intel_display.c 484 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330123311233212333123341233512336123371233812339123401234112342123431234412345123461234712348123491235012351123521235312354123551235612357123581235912360123611236212363123641236512366123671236812369123701237112372123731237412375123761237712378123791238012381123821238312384123851238612387123881238912390123911239212393123941239512396123971239812399124001240112402124031240412405124061240712408124091241012411124121241312414124151241612417124181241912420124211242212423124241242512426124271242812429124301243112432124331243412435124361243712438124391244012441124421244312444124451244612447124481244912450124511245212453124541245512456124571245812459124601246112462124631246412465124661246712468124691247012471124721247312474124751247612477124781247912480124811248212483124841248512486124871248812489124901249112492124931249412495124961249712498124991250012501125021250312504125051250612507125081250912510125111251212513125141251512516125171251812519125201252112522125231252412525125261252712528125291253012531125321253312534125351253612537125381253912540125411254212543125441254512546125471254812549125501255112552125531255412555125561255712558125591256012561125621256312564125651256612567125681256912570125711257212573125741257512576125771257812579125801258112582125831258412585125861258712588125891259012591125921259312594125951259612597125981259912600126011260212603126041260512606126071260812609126101261112612126131261412615126161261712618126191262012621126221262312624126251262612627126281262912630126311263212633126341263512636126371263812639126401264112642126431264412645126461264712648126491265012651126521265312654126551265612657126581265912660126611266212663126641266512666126671266812669126701267112672126731267412675126761267712678126791268012681126821268312684126851268612687126881268912690126911269212693126941269512696126971269812699127001270112702127031270412705127061270712708127091271012711127121271312714127151271612717127181271912720127211272212723127241272512726127271272812729127301273112732127331273412735127361273712738127391274012741127421274312744127451274612747127481274912750127511275212753127541275512756127571275812759127601276112762127631276412765127661276712768127691277012771127721277312774127751277612777127781277912780127811278212783127841278512786127871278812789127901279112792127931279412795127961279712798127991280012801128021280312804128051280612807128081280912810128111281212813128141281512816128171281812819128201282112822128231282412825128261282712828128291283012831128321283312834128351283612837128381283912840128411284212843128441284512846128471284812849128501285112852128531285412855128561285712858128591286012861128621286312864128651286612867128681286912870128711287212873128741287512876128771287812879128801288112882128831288412885128861288712888128891289012891128921289312894128951289612897128981289912900129011290212903129041290512906129071290812909129101291112912129131291412915129161291712918129191292012921129221292312924129251292612927129281292912930129311293212933129341293512936129371293812939129401294112942129431294412945129461294712948129491295012951129521295312954129551295612957129581295912960129611296212963129641296512966129671296812969129701297112972129731297412975129761297712978129791298012981129821298312984129851298612987129881298912990129911299212993129941299512996129971299812999130001300113002130031300413005130061300713008130091301013011130121301313014130151301613017130181301913020130211302213023130241302513026130271302813029130301303113032130331303413035130361303713038130391304013041130421304313044130451304613047130481304913050130511305213053130541305513056130571305813059130601306113062130631306413065130661306713068130691307013071130721307313074130751307613077130781307913080130811308213083130841308513086130871308813089130901309113092130931309413095130961309713098130991310013101131021310313104131051310613107131081310913110131111311213113131141311513116131171311813119131201312113122131231312413125131261312713128131291313013131131321313313134131351313613137131381313913140131411314213143131441314513146131471314813149131501315113152131531315413155131561315713158131591316013161131621316313164131651316613167131681316913170131711317213173131741317513176131771317813179131801318113182131831318413185131861318713188131891319013191131921319313194131951319613197131981319913200132011320213203132041320513206132071320813209132101321113212132131321413215132161321713218132191322013221132221322313224132251322613227132281322913230132311323213233132341323513236132371323813239132401324113242132431324413245132461324713248132491325013251132521325313254132551325613257132581325913260132611326213263132641326513266132671326813269132701327113272132731327413275132761327713278132791328013281132821328313284132851328613287132881328913290132911329213293132941329513296132971329813299133001330113302133031330413305133061330713308133091331013311133121331313314133151331613317133181331913320133211332213323133241332513326133271332813329133301333113332133331333413335133361333713338133391334013341133421334313344133451334613347133481334913350133511335213353133541335513356133571335813359133601336113362133631336413365133661336713368133691337013371133721337313374133751337613377133781337913380133811338213383133841338513386133871338813389133901339113392133931339413395133961339713398133991340013401134021340313404134051340613407134081340913410134111341213413134141341513416134171341813419134201342113422134231342413425134261342713428134291343013431134321343313434134351343613437134381343913440134411344213443134441344513446134471344813449134501345113452134531345413455134561345713458134591346013461134621346313464134651346613467134681346913470134711347213473134741347513476134771347813479134801348113482134831348413485134861348713488134891349013491134921349313494134951349613497134981349913500135011350213503135041350513506135071350813509135101351113512135131351413515135161351713518135191352013521135221352313524135251352613527135281352913530135311353213533135341353513536135371353813539135401354113542135431354413545135461354713548135491355013551135521355313554135551355613557135581355913560135611356213563135641356513566135671356813569135701357113572135731357413575135761357713578135791358013581135821358313584135851358613587135881358913590135911359213593135941359513596135971359813599136001360113602136031360413605136061360713608136091361013611136121361313614136151361613617136181361913620136211362213623136241362513626136271362813629136301363113632136331363413635136361363713638136391364013641136421364313644136451364613647136481364913650136511365213653136541365513656136571365813659136601366113662136631366413665136661366713668136691367013671136721367313674136751367613677136781367913680136811368213683136841368513686136871368813689136901369113692136931369413695136961369713698136991370013701137021370313704137051370613707137081370913710137111371213713137141371513716137171371813719137201372113722137231372413725137261372713728137291373013731137321373313734137351373613737137381373913740137411374213743137441374513746137471374813749137501375113752137531375413755137561375713758137591376013761137621376313764137651376613767137681376913770137711377213773137741377513776137771377813779137801378113782137831378413785137861378713788137891379013791137921379313794137951379613797137981379913800138011380213803138041380513806138071380813809138101381113812138131381413815138161381713818138191382013821138221382313824138251382613827138281382913830138311383213833138341383513836138371383813839138401384113842138431384413845138461384713848138491385013851138521385313854138551385613857138581385913860138611386213863138641386513866138671386813869138701387113872138731387413875138761387713878138791388013881138821388313884138851388613887138881388913890138911389213893138941389513896138971389813899139001390113902139031390413905139061390713908139091391013911139121391313914139151391613917139181391913920139211392213923139241392513926139271392813929139301393113932139331393413935139361393713938139391394013941139421394313944139451394613947139481394913950139511395213953139541395513956139571395813959139601396113962139631396413965139661396713968139691397013971139721397313974139751397613977139781397913980139811398213983139841398513986139871398813989139901399113992139931399413995139961399713998139991400014001140021400314004140051400614007140081400914010140111401214013140141401514016140171401814019140201402114022140231402414025140261402714028140291403014031140321403314034140351403614037140381403914040140411404214043140441404514046140471404814049140501405114052140531405414055140561405714058140591406014061140621406314064140651406614067140681406914070140711407214073140741407514076140771407814079140801408114082140831408414085140861408714088140891409014091140921409314094140951409614097140981409914100141011410214103141041410514106141071410814109141101411114112141131411414115141161411714118141191412014121141221412314124141251412614127141281412914130141311413214133141341413514136141371413814139141401414114142141431414414145141461414714148141491415014151141521415314154141551415614157141581415914160141611416214163141641416514166141671416814169141701417114172141731417414175141761417714178141791418014181141821418314184141851418614187141881418914190141911419214193141941419514196141971419814199142001420114202142031420414205142061420714208142091421014211142121421314214142151421614217142181421914220142211422214223142241422514226142271422814229142301423114232142331423414235142361423714238142391424014241142421424314244142451424614247142481424914250142511425214253142541425514256142571425814259142601426114262142631426414265142661426714268142691427014271142721427314274142751427614277142781427914280142811428214283142841428514286142871428814289142901429114292142931429414295142961429714298142991430014301143021430314304143051430614307143081430914310143111431214313143141431514316143171431814319143201432114322143231432414325143261432714328143291433014331143321433314334143351433614337143381433914340143411434214343143441434514346143471434814349143501435114352143531435414355143561435714358143591436014361143621436314364143651436614367143681436914370143711437214373143741437514376143771437814379143801438114382143831438414385143861438714388143891439014391143921439314394143951439614397143981439914400144011440214403144041440514406144071440814409144101441114412144131441414415144161441714418144191442014421144221442314424144251442614427144281442914430144311443214433144341443514436144371443814439144401444114442144431444414445144461444714448144491445014451144521445314454144551445614457144581445914460144611446214463144641446514466144671446814469144701447114472144731447414475144761447714478144791448014481144821448314484144851448614487144881448914490144911449214493144941449514496144971449814499145001450114502145031450414505145061450714508145091451014511145121451314514145151451614517145181451914520145211452214523145241452514526145271452814529145301453114532145331453414535145361453714538145391454014541145421454314544145451454614547145481454914550145511455214553145541455514556145571455814559145601456114562145631456414565145661456714568145691457014571145721457314574145751457614577145781457914580145811458214583145841458514586145871458814589145901459114592145931459414595145961459714598145991460014601146021460314604146051460614607146081460914610146111461214613146141461514616146171461814619146201462114622146231462414625146261462714628146291463014631146321463314634146351463614637146381463914640146411464214643146441464514646146471464814649146501465114652146531465414655146561465714658146591466014661146621466314664146651466614667146681466914670146711467214673146741467514676146771467814679146801468114682146831468414685146861468714688146891469014691146921469314694146951469614697146981469914700147011470214703147041470514706147071470814709147101471114712147131471414715147161471714718147191472014721147221472314724147251472614727147281472914730147311473214733147341473514736147371473814739147401474114742147431474414745147461474714748147491475014751147521475314754147551475614757147581475914760147611476214763147641476514766147671476814769147701477114772147731477414775147761477714778147791478014781147821478314784147851478614787147881478914790147911479214793147941479514796147971479814799148001480114802148031480414805148061480714808148091481014811148121481314814148151481614817148181481914820148211482214823148241482514826148271482814829148301483114832148331483414835148361483714838148391484014841148421484314844148451484614847148481484914850148511485214853148541485514856148571485814859148601486114862148631486414865148661486714868148691487014871148721487314874148751487614877148781487914880148811488214883148841488514886148871488814889148901489114892148931489414895148961489714898148991490014901149021490314904149051490614907149081490914910149111491214913149141491514916149171491814919149201492114922149231492414925149261492714928149291493014931149321493314934149351493614937149381493914940149411494214943149441494514946149471494814949149501495114952149531495414955149561495714958149591496014961149621496314964149651496614967149681496914970149711497214973149741497514976149771497814979149801498114982149831498414985149861498714988149891499014991149921499314994149951499614997149981499915000150011500215003150041500515006150071500815009150101501115012150131501415015150161501715018150191502015021150221502315024150251502615027150281502915030150311503215033150341503515036150371503815039150401504115042150431504415045150461504715048150491505015051150521505315054150551505615057150581505915060150611506215063150641506515066150671506815069150701507115072150731507415075150761507715078150791508015081150821508315084150851508615087150881508915090150911509215093150941509515096150971509815099151001510115102151031510415105151061510715108151091511015111151121511315114151151511615117151181511915120151211512215123151241512515126151271512815129151301513115132151331513415135151361513715138151391514015141151421514315144151451514615147151481514915150151511515215153151541515515156151571515815159151601516115162151631516415165151661516715168151691517015171151721517315174151751517615177151781517915180151811518215183151841518515186151871518815189151901519115192151931519415195151961519715198151991520015201152021520315204152051520615207152081520915210152111521215213152141521515216152171521815219152201522115222152231522415225152261522715228152291523015231152321523315234152351523615237152381523915240152411524215243152441524515246152471524815249152501525115252152531525415255152561525715258152591526015261152621526315264152651526615267152681526915270152711527215273152741527515276152771527815279152801528115282152831528415285152861528715288152891529015291152921529315294152951529615297152981529915300153011530215303153041530515306153071530815309153101531115312153131531415315153161531715318153191532015321153221532315324153251532615327153281532915330153311533215333153341533515336153371533815339153401534115342153431534415345153461534715348153491535015351153521535315354153551535615357153581535915360153611536215363153641536515366153671536815369153701537115372153731537415375153761537715378153791538015381153821538315384153851538615387153881538915390153911539215393153941539515396153971539815399154001540115402154031540415405154061540715408154091541015411154121541315414154151541615417154181541915420154211542215423154241542515426154271542815429154301543115432154331543415435154361543715438154391544015441154421544315444154451544615447154481544915450154511545215453154541545515456154571545815459154601546115462154631546415465154661546715468154691547015471154721547315474154751547615477154781547915480154811548215483154841548515486154871548815489154901549115492154931549415495154961549715498154991550015501155021550315504155051550615507155081550915510155111551215513155141551515516155171551815519155201552115522155231552415525155261552715528155291553015531155321553315534155351553615537155381553915540155411554215543155441554515546155471554815549155501555115552155531555415555155561555715558155591556015561155621556315564155651556615567155681556915570155711557215573155741557515576155771557815579155801558115582155831558415585155861558715588155891559015591155921559315594155951559615597155981559915600156011560215603156041560515606156071560815609156101561115612156131561415615156161561715618156191562015621156221562315624156251562615627156281562915630156311563215633156341563515636156371563815639156401564115642156431564415645156461564715648156491565015651156521565315654156551565615657156581565915660156611566215663156641566515666156671566815669156701567115672156731567415675156761567715678156791568015681156821568315684156851568615687156881568915690156911569215693156941569515696156971569815699157001570115702157031570415705157061570715708157091571015711157121571315714157151571615717157181571915720157211572215723157241572515726157271572815729157301573115732157331573415735157361573715738157391574015741157421574315744157451574615747157481574915750157511575215753157541575515756157571575815759157601576115762157631576415765157661576715768157691577015771157721577315774157751577615777157781577915780157811578215783157841578515786157871578815789157901579115792157931579415795157961579715798157991580015801158021580315804158051580615807158081580915810158111581215813158141581515816158171581815819158201582115822158231582415825158261582715828158291583015831158321583315834158351583615837158381583915840158411584215843158441584515846158471584815849158501585115852158531585415855158561585715858158591586015861158621586315864158651586615867158681586915870158711587215873158741587515876158771587815879158801588115882158831588415885158861588715888158891589015891158921589315894158951589615897158981589915900159011590215903159041590515906159071590815909159101591115912159131591415915159161591715918159191592015921159221592315924159251592615927159281592915930159311593215933159341593515936159371593815939159401594115942159431594415945159461594715948159491595015951159521595315954159551595615957159581595915960159611596215963159641596515966159671596815969159701597115972159731597415975159761597715978159791598015981159821598315984159851598615987159881598915990159911599215993159941599515996159971599815999160001600116002160031600416005160061600716008160091601016011160121601316014160151601616017160181601916020160211602216023160241602516026160271602816029160301603116032160331603416035160361603716038160391604016041160421604316044160451604616047160481604916050160511605216053160541605516056160571605816059160601606116062160631606416065160661606716068160691607016071160721607316074160751607616077160781607916080160811608216083160841608516086160871608816089160901609116092160931609416095160961609716098160991610016101161021610316104161051610616107161081610916110161111611216113161141611516116161171611816119161201612116122161231612416125161261612716128161291613016131161321613316134161351613616137161381613916140161411614216143161441614516146161471614816149161501615116152161531615416155161561615716158161591616016161161621616316164161651616616167161681616916170161711617216173161741617516176161771617816179161801618116182161831618416185161861618716188161891619016191161921619316194161951619616197161981619916200162011620216203162041620516206162071620816209162101621116212162131621416215162161621716218162191622016221162221622316224162251622616227162281622916230162311623216233162341623516236162371623816239162401624116242162431624416245162461624716248162491625016251162521625316254162551625616257162581625916260162611626216263162641626516266162671626816269162701627116272162731627416275162761627716278162791628016281162821628316284162851628616287162881628916290162911629216293162941629516296162971629816299163001630116302163031630416305163061630716308163091631016311163121631316314163151631616317163181631916320163211632216323163241632516326163271632816329163301633116332163331633416335163361633716338163391634016341163421634316344163451634616347163481634916350163511635216353163541635516356163571635816359163601636116362163631636416365163661636716368163691637016371163721637316374163751637616377163781637916380163811638216383163841638516386163871638816389163901639116392163931639416395163961639716398163991640016401164021640316404164051640616407164081640916410164111641216413164141641516416164171641816419164201642116422164231642416425164261642716428164291643016431164321643316434164351643616437164381643916440164411644216443164441644516446164471644816449164501645116452164531645416455164561645716458164591646016461164621646316464164651646616467164681646916470164711647216473164741647516476164771647816479164801648116482164831648416485164861648716488164891649016491164921649316494164951649616497164981649916500165011650216503165041650516506165071650816509165101651116512165131651416515165161651716518165191652016521165221652316524165251652616527165281652916530165311653216533165341653516536165371653816539165401654116542165431654416545165461654716548165491655016551165521655316554165551655616557165581655916560165611656216563165641656516566165671656816569165701657116572165731657416575165761657716578165791658016581165821658316584165851658616587165881658916590165911659216593165941659516596165971659816599166001660116602166031660416605166061660716608166091661016611166121661316614166151661616617166181661916620166211662216623166241662516626166271662816629166301663116632166331663416635166361663716638166391664016641166421664316644166451664616647166481664916650166511665216653166541665516656166571665816659166601666116662166631666416665166661666716668166691667016671166721667316674166751667616677166781667916680166811668216683166841668516686166871668816689166901669116692166931669416695166961669716698166991670016701167021670316704167051670616707167081670916710167111671216713167141671516716167171671816719167201672116722167231672416725167261672716728167291673016731167321673316734167351673616737167381673916740167411674216743167441674516746167471674816749167501675116752167531675416755167561675716758167591676016761167621676316764167651676616767167681676916770167711677216773167741677516776167771677816779167801678116782167831678416785167861678716788167891679016791167921679316794167951679616797167981679916800168011680216803168041680516806168071680816809168101681116812168131681416815168161681716818168191682016821168221682316824168251682616827168281682916830168311683216833168341683516836168371683816839168401684116842168431684416845168461684716848168491685016851168521685316854168551685616857168581685916860168611686216863168641686516866168671686816869168701687116872168731687416875168761687716878168791688016881168821688316884168851688616887168881688916890168911689216893168941689516896168971689816899169001690116902169031690416905169061690716908169091691016911169121691316914169151691616917169181691916920169211692216923169241692516926169271692816929169301693116932169331693416935169361693716938169391694016941169421694316944169451694616947169481694916950169511695216953169541695516956169571695816959169601696116962169631696416965169661696716968169691697016971169721697316974169751697616977169781697916980169811698216983169841698516986169871698816989169901699116992169931699416995169961699716998169991700017001170021700317004170051700617007170081700917010170111701217013170141701517016170171701817019170201702117022170231702417025170261702717028170291703017031170321703317034170351703617037170381703917040170411704217043170441704517046170471704817049170501705117052170531705417055170561705717058170591706017061170621706317064170651706617067170681706917070170711707217073170741707517076170771707817079170801708117082170831708417085170861708717088170891709017091170921709317094170951709617097170981709917100171011710217103171041710517106171071710817109171101711117112171131711417115171161711717118171191712017121171221712317124171251712617127171281712917130171311713217133171341713517136171371713817139171401714117142171431714417145171461714717148171491715017151171521715317154171551715617157171581715917160171611716217163171641716517166171671716817169171701717117172171731717417175171761717717178171791718017181171821718317184171851718617187171881718917190171911719217193171941719517196171971719817199172001720117202172031720417205172061720717208172091721017211172121721317214172151721617217172181721917220172211722217223172241722517226172271722817229172301723117232172331723417235172361723717238172391724017241172421724317244172451724617247172481724917250172511725217253172541725517256172571725817259172601726117262172631726417265172661726717268172691727017271172721727317274172751727617277172781727917280172811728217283172841728517286172871728817289172901729117292172931729417295172961729717298172991730017301173021730317304173051730617307173081730917310173111731217313173141731517316173171731817319173201732117322173231732417325173261732717328173291733017331173321733317334173351733617337173381733917340173411734217343173441734517346173471734817349173501735117352
  1. /*
  2. * Copyright © 2006-2007 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21. * DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. */
  26. #include <linux/dmi.h>
  27. #include <linux/module.h>
  28. #include <linux/input.h>
  29. #include <linux/i2c.h>
  30. #include <linux/kernel.h>
  31. #include <linux/slab.h>
  32. #include <linux/vgaarb.h>
  33. #include <drm/drm_edid.h>
  34. #include <drm/drmP.h>
  35. #include "intel_drv.h"
  36. #include "intel_frontbuffer.h"
  37. #include <drm/i915_drm.h>
  38. #include "i915_drv.h"
  39. #include "intel_dsi.h"
  40. #include "i915_trace.h"
  41. #include <drm/drm_atomic.h>
  42. #include <drm/drm_atomic_helper.h>
  43. #include <drm/drm_dp_helper.h>
  44. #include <drm/drm_crtc_helper.h>
  45. #include <drm/drm_plane_helper.h>
  46. #include <drm/drm_rect.h>
  47. #include <linux/dma_remapping.h>
  48. #include <linux/reservation.h>
  49. static bool is_mmio_work(struct intel_flip_work *work)
  50. {
  51. return work->mmio_work.func;
  52. }
  53. /* Primary plane formats for gen <= 3 */
  54. static const uint32_t i8xx_primary_formats[] = {
  55. DRM_FORMAT_C8,
  56. DRM_FORMAT_RGB565,
  57. DRM_FORMAT_XRGB1555,
  58. DRM_FORMAT_XRGB8888,
  59. };
  60. /* Primary plane formats for gen >= 4 */
  61. static const uint32_t i965_primary_formats[] = {
  62. DRM_FORMAT_C8,
  63. DRM_FORMAT_RGB565,
  64. DRM_FORMAT_XRGB8888,
  65. DRM_FORMAT_XBGR8888,
  66. DRM_FORMAT_XRGB2101010,
  67. DRM_FORMAT_XBGR2101010,
  68. };
  69. static const uint32_t skl_primary_formats[] = {
  70. DRM_FORMAT_C8,
  71. DRM_FORMAT_RGB565,
  72. DRM_FORMAT_XRGB8888,
  73. DRM_FORMAT_XBGR8888,
  74. DRM_FORMAT_ARGB8888,
  75. DRM_FORMAT_ABGR8888,
  76. DRM_FORMAT_XRGB2101010,
  77. DRM_FORMAT_XBGR2101010,
  78. DRM_FORMAT_YUYV,
  79. DRM_FORMAT_YVYU,
  80. DRM_FORMAT_UYVY,
  81. DRM_FORMAT_VYUY,
  82. };
  83. /* Cursor formats */
  84. static const uint32_t intel_cursor_formats[] = {
  85. DRM_FORMAT_ARGB8888,
  86. };
  87. static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
  88. struct intel_crtc_state *pipe_config);
  89. static void ironlake_pch_clock_get(struct intel_crtc *crtc,
  90. struct intel_crtc_state *pipe_config);
  91. static int intel_framebuffer_init(struct drm_device *dev,
  92. struct intel_framebuffer *ifb,
  93. struct drm_mode_fb_cmd2 *mode_cmd,
  94. struct drm_i915_gem_object *obj);
  95. static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
  96. static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
  97. static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
  98. static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
  99. struct intel_link_m_n *m_n,
  100. struct intel_link_m_n *m2_n2);
  101. static void ironlake_set_pipeconf(struct drm_crtc *crtc);
  102. static void haswell_set_pipeconf(struct drm_crtc *crtc);
  103. static void haswell_set_pipemisc(struct drm_crtc *crtc);
  104. static void vlv_prepare_pll(struct intel_crtc *crtc,
  105. const struct intel_crtc_state *pipe_config);
  106. static void chv_prepare_pll(struct intel_crtc *crtc,
  107. const struct intel_crtc_state *pipe_config);
  108. static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
  109. static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
  110. static void skl_init_scalers(struct drm_i915_private *dev_priv,
  111. struct intel_crtc *crtc,
  112. struct intel_crtc_state *crtc_state);
  113. static void skylake_pfit_enable(struct intel_crtc *crtc);
  114. static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
  115. static void ironlake_pfit_enable(struct intel_crtc *crtc);
  116. static void intel_modeset_setup_hw_state(struct drm_device *dev);
  117. static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
  118. static int ilk_max_pixel_rate(struct drm_atomic_state *state);
  119. static int bxt_calc_cdclk(int max_pixclk);
  120. struct intel_limit {
  121. struct {
  122. int min, max;
  123. } dot, vco, n, m, m1, m2, p, p1;
  124. struct {
  125. int dot_limit;
  126. int p2_slow, p2_fast;
  127. } p2;
  128. };
  129. /* returns HPLL frequency in kHz */
  130. static int valleyview_get_vco(struct drm_i915_private *dev_priv)
  131. {
  132. int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
  133. /* Obtain SKU information */
  134. mutex_lock(&dev_priv->sb_lock);
  135. hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
  136. CCK_FUSE_HPLL_FREQ_MASK;
  137. mutex_unlock(&dev_priv->sb_lock);
  138. return vco_freq[hpll_freq] * 1000;
  139. }
  140. int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
  141. const char *name, u32 reg, int ref_freq)
  142. {
  143. u32 val;
  144. int divider;
  145. mutex_lock(&dev_priv->sb_lock);
  146. val = vlv_cck_read(dev_priv, reg);
  147. mutex_unlock(&dev_priv->sb_lock);
  148. divider = val & CCK_FREQUENCY_VALUES;
  149. WARN((val & CCK_FREQUENCY_STATUS) !=
  150. (divider << CCK_FREQUENCY_STATUS_SHIFT),
  151. "%s change in progress\n", name);
  152. return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
  153. }
  154. static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
  155. const char *name, u32 reg)
  156. {
  157. if (dev_priv->hpll_freq == 0)
  158. dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
  159. return vlv_get_cck_clock(dev_priv, name, reg,
  160. dev_priv->hpll_freq);
  161. }
  162. static int
  163. intel_pch_rawclk(struct drm_i915_private *dev_priv)
  164. {
  165. return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
  166. }
  167. static int
  168. intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
  169. {
  170. /* RAWCLK_FREQ_VLV register updated from power well code */
  171. return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
  172. CCK_DISPLAY_REF_CLOCK_CONTROL);
  173. }
  174. static int
  175. intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
  176. {
  177. uint32_t clkcfg;
  178. /* hrawclock is 1/4 the FSB frequency */
  179. clkcfg = I915_READ(CLKCFG);
  180. switch (clkcfg & CLKCFG_FSB_MASK) {
  181. case CLKCFG_FSB_400:
  182. return 100000;
  183. case CLKCFG_FSB_533:
  184. return 133333;
  185. case CLKCFG_FSB_667:
  186. return 166667;
  187. case CLKCFG_FSB_800:
  188. return 200000;
  189. case CLKCFG_FSB_1067:
  190. return 266667;
  191. case CLKCFG_FSB_1333:
  192. return 333333;
  193. /* these two are just a guess; one of them might be right */
  194. case CLKCFG_FSB_1600:
  195. case CLKCFG_FSB_1600_ALT:
  196. return 400000;
  197. default:
  198. return 133333;
  199. }
  200. }
  201. void intel_update_rawclk(struct drm_i915_private *dev_priv)
  202. {
  203. if (HAS_PCH_SPLIT(dev_priv))
  204. dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
  205. else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  206. dev_priv->rawclk_freq = intel_vlv_hrawclk(dev_priv);
  207. else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
  208. dev_priv->rawclk_freq = intel_g4x_hrawclk(dev_priv);
  209. else
  210. return; /* no rawclk on other platforms, or no need to know it */
  211. DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
  212. }
  213. static void intel_update_czclk(struct drm_i915_private *dev_priv)
  214. {
  215. if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
  216. return;
  217. dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
  218. CCK_CZ_CLOCK_CONTROL);
  219. DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
  220. }
  221. static inline u32 /* units of 100MHz */
  222. intel_fdi_link_freq(struct drm_i915_private *dev_priv,
  223. const struct intel_crtc_state *pipe_config)
  224. {
  225. if (HAS_DDI(dev_priv))
  226. return pipe_config->port_clock; /* SPLL */
  227. else if (IS_GEN5(dev_priv))
  228. return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000;
  229. else
  230. return 270000;
  231. }
  232. static const struct intel_limit intel_limits_i8xx_dac = {
  233. .dot = { .min = 25000, .max = 350000 },
  234. .vco = { .min = 908000, .max = 1512000 },
  235. .n = { .min = 2, .max = 16 },
  236. .m = { .min = 96, .max = 140 },
  237. .m1 = { .min = 18, .max = 26 },
  238. .m2 = { .min = 6, .max = 16 },
  239. .p = { .min = 4, .max = 128 },
  240. .p1 = { .min = 2, .max = 33 },
  241. .p2 = { .dot_limit = 165000,
  242. .p2_slow = 4, .p2_fast = 2 },
  243. };
  244. static const struct intel_limit intel_limits_i8xx_dvo = {
  245. .dot = { .min = 25000, .max = 350000 },
  246. .vco = { .min = 908000, .max = 1512000 },
  247. .n = { .min = 2, .max = 16 },
  248. .m = { .min = 96, .max = 140 },
  249. .m1 = { .min = 18, .max = 26 },
  250. .m2 = { .min = 6, .max = 16 },
  251. .p = { .min = 4, .max = 128 },
  252. .p1 = { .min = 2, .max = 33 },
  253. .p2 = { .dot_limit = 165000,
  254. .p2_slow = 4, .p2_fast = 4 },
  255. };
  256. static const struct intel_limit intel_limits_i8xx_lvds = {
  257. .dot = { .min = 25000, .max = 350000 },
  258. .vco = { .min = 908000, .max = 1512000 },
  259. .n = { .min = 2, .max = 16 },
  260. .m = { .min = 96, .max = 140 },
  261. .m1 = { .min = 18, .max = 26 },
  262. .m2 = { .min = 6, .max = 16 },
  263. .p = { .min = 4, .max = 128 },
  264. .p1 = { .min = 1, .max = 6 },
  265. .p2 = { .dot_limit = 165000,
  266. .p2_slow = 14, .p2_fast = 7 },
  267. };
  268. static const struct intel_limit intel_limits_i9xx_sdvo = {
  269. .dot = { .min = 20000, .max = 400000 },
  270. .vco = { .min = 1400000, .max = 2800000 },
  271. .n = { .min = 1, .max = 6 },
  272. .m = { .min = 70, .max = 120 },
  273. .m1 = { .min = 8, .max = 18 },
  274. .m2 = { .min = 3, .max = 7 },
  275. .p = { .min = 5, .max = 80 },
  276. .p1 = { .min = 1, .max = 8 },
  277. .p2 = { .dot_limit = 200000,
  278. .p2_slow = 10, .p2_fast = 5 },
  279. };
  280. static const struct intel_limit intel_limits_i9xx_lvds = {
  281. .dot = { .min = 20000, .max = 400000 },
  282. .vco = { .min = 1400000, .max = 2800000 },
  283. .n = { .min = 1, .max = 6 },
  284. .m = { .min = 70, .max = 120 },
  285. .m1 = { .min = 8, .max = 18 },
  286. .m2 = { .min = 3, .max = 7 },
  287. .p = { .min = 7, .max = 98 },
  288. .p1 = { .min = 1, .max = 8 },
  289. .p2 = { .dot_limit = 112000,
  290. .p2_slow = 14, .p2_fast = 7 },
  291. };
  292. static const struct intel_limit intel_limits_g4x_sdvo = {
  293. .dot = { .min = 25000, .max = 270000 },
  294. .vco = { .min = 1750000, .max = 3500000},
  295. .n = { .min = 1, .max = 4 },
  296. .m = { .min = 104, .max = 138 },
  297. .m1 = { .min = 17, .max = 23 },
  298. .m2 = { .min = 5, .max = 11 },
  299. .p = { .min = 10, .max = 30 },
  300. .p1 = { .min = 1, .max = 3},
  301. .p2 = { .dot_limit = 270000,
  302. .p2_slow = 10,
  303. .p2_fast = 10
  304. },
  305. };
  306. static const struct intel_limit intel_limits_g4x_hdmi = {
  307. .dot = { .min = 22000, .max = 400000 },
  308. .vco = { .min = 1750000, .max = 3500000},
  309. .n = { .min = 1, .max = 4 },
  310. .m = { .min = 104, .max = 138 },
  311. .m1 = { .min = 16, .max = 23 },
  312. .m2 = { .min = 5, .max = 11 },
  313. .p = { .min = 5, .max = 80 },
  314. .p1 = { .min = 1, .max = 8},
  315. .p2 = { .dot_limit = 165000,
  316. .p2_slow = 10, .p2_fast = 5 },
  317. };
  318. static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
  319. .dot = { .min = 20000, .max = 115000 },
  320. .vco = { .min = 1750000, .max = 3500000 },
  321. .n = { .min = 1, .max = 3 },
  322. .m = { .min = 104, .max = 138 },
  323. .m1 = { .min = 17, .max = 23 },
  324. .m2 = { .min = 5, .max = 11 },
  325. .p = { .min = 28, .max = 112 },
  326. .p1 = { .min = 2, .max = 8 },
  327. .p2 = { .dot_limit = 0,
  328. .p2_slow = 14, .p2_fast = 14
  329. },
  330. };
  331. static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
  332. .dot = { .min = 80000, .max = 224000 },
  333. .vco = { .min = 1750000, .max = 3500000 },
  334. .n = { .min = 1, .max = 3 },
  335. .m = { .min = 104, .max = 138 },
  336. .m1 = { .min = 17, .max = 23 },
  337. .m2 = { .min = 5, .max = 11 },
  338. .p = { .min = 14, .max = 42 },
  339. .p1 = { .min = 2, .max = 6 },
  340. .p2 = { .dot_limit = 0,
  341. .p2_slow = 7, .p2_fast = 7
  342. },
  343. };
  344. static const struct intel_limit intel_limits_pineview_sdvo = {
  345. .dot = { .min = 20000, .max = 400000},
  346. .vco = { .min = 1700000, .max = 3500000 },
  347. /* Pineview's Ncounter is a ring counter */
  348. .n = { .min = 3, .max = 6 },
  349. .m = { .min = 2, .max = 256 },
  350. /* Pineview only has one combined m divider, which we treat as m2. */
  351. .m1 = { .min = 0, .max = 0 },
  352. .m2 = { .min = 0, .max = 254 },
  353. .p = { .min = 5, .max = 80 },
  354. .p1 = { .min = 1, .max = 8 },
  355. .p2 = { .dot_limit = 200000,
  356. .p2_slow = 10, .p2_fast = 5 },
  357. };
  358. static const struct intel_limit intel_limits_pineview_lvds = {
  359. .dot = { .min = 20000, .max = 400000 },
  360. .vco = { .min = 1700000, .max = 3500000 },
  361. .n = { .min = 3, .max = 6 },
  362. .m = { .min = 2, .max = 256 },
  363. .m1 = { .min = 0, .max = 0 },
  364. .m2 = { .min = 0, .max = 254 },
  365. .p = { .min = 7, .max = 112 },
  366. .p1 = { .min = 1, .max = 8 },
  367. .p2 = { .dot_limit = 112000,
  368. .p2_slow = 14, .p2_fast = 14 },
  369. };
  370. /* Ironlake / Sandybridge
  371. *
  372. * We calculate clock using (register_value + 2) for N/M1/M2, so here
  373. * the range value for them is (actual_value - 2).
  374. */
  375. static const struct intel_limit intel_limits_ironlake_dac = {
  376. .dot = { .min = 25000, .max = 350000 },
  377. .vco = { .min = 1760000, .max = 3510000 },
  378. .n = { .min = 1, .max = 5 },
  379. .m = { .min = 79, .max = 127 },
  380. .m1 = { .min = 12, .max = 22 },
  381. .m2 = { .min = 5, .max = 9 },
  382. .p = { .min = 5, .max = 80 },
  383. .p1 = { .min = 1, .max = 8 },
  384. .p2 = { .dot_limit = 225000,
  385. .p2_slow = 10, .p2_fast = 5 },
  386. };
  387. static const struct intel_limit intel_limits_ironlake_single_lvds = {
  388. .dot = { .min = 25000, .max = 350000 },
  389. .vco = { .min = 1760000, .max = 3510000 },
  390. .n = { .min = 1, .max = 3 },
  391. .m = { .min = 79, .max = 118 },
  392. .m1 = { .min = 12, .max = 22 },
  393. .m2 = { .min = 5, .max = 9 },
  394. .p = { .min = 28, .max = 112 },
  395. .p1 = { .min = 2, .max = 8 },
  396. .p2 = { .dot_limit = 225000,
  397. .p2_slow = 14, .p2_fast = 14 },
  398. };
  399. static const struct intel_limit intel_limits_ironlake_dual_lvds = {
  400. .dot = { .min = 25000, .max = 350000 },
  401. .vco = { .min = 1760000, .max = 3510000 },
  402. .n = { .min = 1, .max = 3 },
  403. .m = { .min = 79, .max = 127 },
  404. .m1 = { .min = 12, .max = 22 },
  405. .m2 = { .min = 5, .max = 9 },
  406. .p = { .min = 14, .max = 56 },
  407. .p1 = { .min = 2, .max = 8 },
  408. .p2 = { .dot_limit = 225000,
  409. .p2_slow = 7, .p2_fast = 7 },
  410. };
  411. /* LVDS 100mhz refclk limits. */
  412. static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
  413. .dot = { .min = 25000, .max = 350000 },
  414. .vco = { .min = 1760000, .max = 3510000 },
  415. .n = { .min = 1, .max = 2 },
  416. .m = { .min = 79, .max = 126 },
  417. .m1 = { .min = 12, .max = 22 },
  418. .m2 = { .min = 5, .max = 9 },
  419. .p = { .min = 28, .max = 112 },
  420. .p1 = { .min = 2, .max = 8 },
  421. .p2 = { .dot_limit = 225000,
  422. .p2_slow = 14, .p2_fast = 14 },
  423. };
  424. static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
  425. .dot = { .min = 25000, .max = 350000 },
  426. .vco = { .min = 1760000, .max = 3510000 },
  427. .n = { .min = 1, .max = 3 },
  428. .m = { .min = 79, .max = 126 },
  429. .m1 = { .min = 12, .max = 22 },
  430. .m2 = { .min = 5, .max = 9 },
  431. .p = { .min = 14, .max = 42 },
  432. .p1 = { .min = 2, .max = 6 },
  433. .p2 = { .dot_limit = 225000,
  434. .p2_slow = 7, .p2_fast = 7 },
  435. };
  436. static const struct intel_limit intel_limits_vlv = {
  437. /*
  438. * These are the data rate limits (measured in fast clocks)
  439. * since those are the strictest limits we have. The fast
  440. * clock and actual rate limits are more relaxed, so checking
  441. * them would make no difference.
  442. */
  443. .dot = { .min = 25000 * 5, .max = 270000 * 5 },
  444. .vco = { .min = 4000000, .max = 6000000 },
  445. .n = { .min = 1, .max = 7 },
  446. .m1 = { .min = 2, .max = 3 },
  447. .m2 = { .min = 11, .max = 156 },
  448. .p1 = { .min = 2, .max = 3 },
  449. .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
  450. };
  451. static const struct intel_limit intel_limits_chv = {
  452. /*
  453. * These are the data rate limits (measured in fast clocks)
  454. * since those are the strictest limits we have. The fast
  455. * clock and actual rate limits are more relaxed, so checking
  456. * them would make no difference.
  457. */
  458. .dot = { .min = 25000 * 5, .max = 540000 * 5},
  459. .vco = { .min = 4800000, .max = 6480000 },
  460. .n = { .min = 1, .max = 1 },
  461. .m1 = { .min = 2, .max = 2 },
  462. .m2 = { .min = 24 << 22, .max = 175 << 22 },
  463. .p1 = { .min = 2, .max = 4 },
  464. .p2 = { .p2_slow = 1, .p2_fast = 14 },
  465. };
  466. static const struct intel_limit intel_limits_bxt = {
  467. /* FIXME: find real dot limits */
  468. .dot = { .min = 0, .max = INT_MAX },
  469. .vco = { .min = 4800000, .max = 6700000 },
  470. .n = { .min = 1, .max = 1 },
  471. .m1 = { .min = 2, .max = 2 },
  472. /* FIXME: find real m2 limits */
  473. .m2 = { .min = 2 << 22, .max = 255 << 22 },
  474. .p1 = { .min = 2, .max = 4 },
  475. .p2 = { .p2_slow = 1, .p2_fast = 20 },
  476. };
  477. static bool
  478. needs_modeset(struct drm_crtc_state *state)
  479. {
  480. return drm_atomic_crtc_needs_modeset(state);
  481. }
  482. /*
  483. * Platform specific helpers to calculate the port PLL loopback- (clock.m),
  484. * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
  485. * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
  486. * The helpers' return value is the rate of the clock that is fed to the
  487. * display engine's pipe which can be the above fast dot clock rate or a
  488. * divided-down version of it.
  489. */
  490. /* m1 is reserved as 0 in Pineview, n is a ring counter */
  491. static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
  492. {
  493. clock->m = clock->m2 + 2;
  494. clock->p = clock->p1 * clock->p2;
  495. if (WARN_ON(clock->n == 0 || clock->p == 0))
  496. return 0;
  497. clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
  498. clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  499. return clock->dot;
  500. }
  501. static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
  502. {
  503. return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
  504. }
  505. static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
  506. {
  507. clock->m = i9xx_dpll_compute_m(clock);
  508. clock->p = clock->p1 * clock->p2;
  509. if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
  510. return 0;
  511. clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
  512. clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  513. return clock->dot;
  514. }
  515. static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
  516. {
  517. clock->m = clock->m1 * clock->m2;
  518. clock->p = clock->p1 * clock->p2;
  519. if (WARN_ON(clock->n == 0 || clock->p == 0))
  520. return 0;
  521. clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
  522. clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  523. return clock->dot / 5;
  524. }
  525. int chv_calc_dpll_params(int refclk, struct dpll *clock)
  526. {
  527. clock->m = clock->m1 * clock->m2;
  528. clock->p = clock->p1 * clock->p2;
  529. if (WARN_ON(clock->n == 0 || clock->p == 0))
  530. return 0;
  531. clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
  532. clock->n << 22);
  533. clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  534. return clock->dot / 5;
  535. }
  536. #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
  537. /**
  538. * Returns whether the given set of divisors are valid for a given refclk with
  539. * the given connectors.
  540. */
  541. static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
  542. const struct intel_limit *limit,
  543. const struct dpll *clock)
  544. {
  545. if (clock->n < limit->n.min || limit->n.max < clock->n)
  546. INTELPllInvalid("n out of range\n");
  547. if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
  548. INTELPllInvalid("p1 out of range\n");
  549. if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
  550. INTELPllInvalid("m2 out of range\n");
  551. if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
  552. INTELPllInvalid("m1 out of range\n");
  553. if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
  554. !IS_CHERRYVIEW(dev_priv) && !IS_BROXTON(dev_priv))
  555. if (clock->m1 <= clock->m2)
  556. INTELPllInvalid("m1 <= m2\n");
  557. if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
  558. !IS_BROXTON(dev_priv)) {
  559. if (clock->p < limit->p.min || limit->p.max < clock->p)
  560. INTELPllInvalid("p out of range\n");
  561. if (clock->m < limit->m.min || limit->m.max < clock->m)
  562. INTELPllInvalid("m out of range\n");
  563. }
  564. if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
  565. INTELPllInvalid("vco out of range\n");
  566. /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
  567. * connector, etc., rather than just a single range.
  568. */
  569. if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
  570. INTELPllInvalid("dot out of range\n");
  571. return true;
  572. }
  573. static int
  574. i9xx_select_p2_div(const struct intel_limit *limit,
  575. const struct intel_crtc_state *crtc_state,
  576. int target)
  577. {
  578. struct drm_device *dev = crtc_state->base.crtc->dev;
  579. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  580. /*
  581. * For LVDS just rely on its current settings for dual-channel.
  582. * We haven't figured out how to reliably set up different
  583. * single/dual channel state, if we even can.
  584. */
  585. if (intel_is_dual_link_lvds(dev))
  586. return limit->p2.p2_fast;
  587. else
  588. return limit->p2.p2_slow;
  589. } else {
  590. if (target < limit->p2.dot_limit)
  591. return limit->p2.p2_slow;
  592. else
  593. return limit->p2.p2_fast;
  594. }
  595. }
  596. /*
  597. * Returns a set of divisors for the desired target clock with the given
  598. * refclk, or FALSE. The returned values represent the clock equation:
  599. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  600. *
  601. * Target and reference clocks are specified in kHz.
  602. *
  603. * If match_clock is provided, then best_clock P divider must match the P
  604. * divider from @match_clock used for LVDS downclocking.
  605. */
  606. static bool
  607. i9xx_find_best_dpll(const struct intel_limit *limit,
  608. struct intel_crtc_state *crtc_state,
  609. int target, int refclk, struct dpll *match_clock,
  610. struct dpll *best_clock)
  611. {
  612. struct drm_device *dev = crtc_state->base.crtc->dev;
  613. struct dpll clock;
  614. int err = target;
  615. memset(best_clock, 0, sizeof(*best_clock));
  616. clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
  617. for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
  618. clock.m1++) {
  619. for (clock.m2 = limit->m2.min;
  620. clock.m2 <= limit->m2.max; clock.m2++) {
  621. if (clock.m2 >= clock.m1)
  622. break;
  623. for (clock.n = limit->n.min;
  624. clock.n <= limit->n.max; clock.n++) {
  625. for (clock.p1 = limit->p1.min;
  626. clock.p1 <= limit->p1.max; clock.p1++) {
  627. int this_err;
  628. i9xx_calc_dpll_params(refclk, &clock);
  629. if (!intel_PLL_is_valid(to_i915(dev),
  630. limit,
  631. &clock))
  632. continue;
  633. if (match_clock &&
  634. clock.p != match_clock->p)
  635. continue;
  636. this_err = abs(clock.dot - target);
  637. if (this_err < err) {
  638. *best_clock = clock;
  639. err = this_err;
  640. }
  641. }
  642. }
  643. }
  644. }
  645. return (err != target);
  646. }
  647. /*
  648. * Returns a set of divisors for the desired target clock with the given
  649. * refclk, or FALSE. The returned values represent the clock equation:
  650. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  651. *
  652. * Target and reference clocks are specified in kHz.
  653. *
  654. * If match_clock is provided, then best_clock P divider must match the P
  655. * divider from @match_clock used for LVDS downclocking.
  656. */
  657. static bool
  658. pnv_find_best_dpll(const struct intel_limit *limit,
  659. struct intel_crtc_state *crtc_state,
  660. int target, int refclk, struct dpll *match_clock,
  661. struct dpll *best_clock)
  662. {
  663. struct drm_device *dev = crtc_state->base.crtc->dev;
  664. struct dpll clock;
  665. int err = target;
  666. memset(best_clock, 0, sizeof(*best_clock));
  667. clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
  668. for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
  669. clock.m1++) {
  670. for (clock.m2 = limit->m2.min;
  671. clock.m2 <= limit->m2.max; clock.m2++) {
  672. for (clock.n = limit->n.min;
  673. clock.n <= limit->n.max; clock.n++) {
  674. for (clock.p1 = limit->p1.min;
  675. clock.p1 <= limit->p1.max; clock.p1++) {
  676. int this_err;
  677. pnv_calc_dpll_params(refclk, &clock);
  678. if (!intel_PLL_is_valid(to_i915(dev),
  679. limit,
  680. &clock))
  681. continue;
  682. if (match_clock &&
  683. clock.p != match_clock->p)
  684. continue;
  685. this_err = abs(clock.dot - target);
  686. if (this_err < err) {
  687. *best_clock = clock;
  688. err = this_err;
  689. }
  690. }
  691. }
  692. }
  693. }
  694. return (err != target);
  695. }
  696. /*
  697. * Returns a set of divisors for the desired target clock with the given
  698. * refclk, or FALSE. The returned values represent the clock equation:
  699. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  700. *
  701. * Target and reference clocks are specified in kHz.
  702. *
  703. * If match_clock is provided, then best_clock P divider must match the P
  704. * divider from @match_clock used for LVDS downclocking.
  705. */
  706. static bool
  707. g4x_find_best_dpll(const struct intel_limit *limit,
  708. struct intel_crtc_state *crtc_state,
  709. int target, int refclk, struct dpll *match_clock,
  710. struct dpll *best_clock)
  711. {
  712. struct drm_device *dev = crtc_state->base.crtc->dev;
  713. struct dpll clock;
  714. int max_n;
  715. bool found = false;
  716. /* approximately equals target * 0.00585 */
  717. int err_most = (target >> 8) + (target >> 9);
  718. memset(best_clock, 0, sizeof(*best_clock));
  719. clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
  720. max_n = limit->n.max;
  721. /* based on hardware requirement, prefer smaller n to precision */
  722. for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
  723. /* based on hardware requirement, prefere larger m1,m2 */
  724. for (clock.m1 = limit->m1.max;
  725. clock.m1 >= limit->m1.min; clock.m1--) {
  726. for (clock.m2 = limit->m2.max;
  727. clock.m2 >= limit->m2.min; clock.m2--) {
  728. for (clock.p1 = limit->p1.max;
  729. clock.p1 >= limit->p1.min; clock.p1--) {
  730. int this_err;
  731. i9xx_calc_dpll_params(refclk, &clock);
  732. if (!intel_PLL_is_valid(to_i915(dev),
  733. limit,
  734. &clock))
  735. continue;
  736. this_err = abs(clock.dot - target);
  737. if (this_err < err_most) {
  738. *best_clock = clock;
  739. err_most = this_err;
  740. max_n = clock.n;
  741. found = true;
  742. }
  743. }
  744. }
  745. }
  746. }
  747. return found;
  748. }
  749. /*
  750. * Check if the calculated PLL configuration is more optimal compared to the
  751. * best configuration and error found so far. Return the calculated error.
  752. */
  753. static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
  754. const struct dpll *calculated_clock,
  755. const struct dpll *best_clock,
  756. unsigned int best_error_ppm,
  757. unsigned int *error_ppm)
  758. {
  759. /*
  760. * For CHV ignore the error and consider only the P value.
  761. * Prefer a bigger P value based on HW requirements.
  762. */
  763. if (IS_CHERRYVIEW(to_i915(dev))) {
  764. *error_ppm = 0;
  765. return calculated_clock->p > best_clock->p;
  766. }
  767. if (WARN_ON_ONCE(!target_freq))
  768. return false;
  769. *error_ppm = div_u64(1000000ULL *
  770. abs(target_freq - calculated_clock->dot),
  771. target_freq);
  772. /*
  773. * Prefer a better P value over a better (smaller) error if the error
  774. * is small. Ensure this preference for future configurations too by
  775. * setting the error to 0.
  776. */
  777. if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
  778. *error_ppm = 0;
  779. return true;
  780. }
  781. return *error_ppm + 10 < best_error_ppm;
  782. }
  783. /*
  784. * Returns a set of divisors for the desired target clock with the given
  785. * refclk, or FALSE. The returned values represent the clock equation:
  786. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  787. */
  788. static bool
  789. vlv_find_best_dpll(const struct intel_limit *limit,
  790. struct intel_crtc_state *crtc_state,
  791. int target, int refclk, struct dpll *match_clock,
  792. struct dpll *best_clock)
  793. {
  794. struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  795. struct drm_device *dev = crtc->base.dev;
  796. struct dpll clock;
  797. unsigned int bestppm = 1000000;
  798. /* min update 19.2 MHz */
  799. int max_n = min(limit->n.max, refclk / 19200);
  800. bool found = false;
  801. target *= 5; /* fast clock */
  802. memset(best_clock, 0, sizeof(*best_clock));
  803. /* based on hardware requirement, prefer smaller n to precision */
  804. for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
  805. for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
  806. for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
  807. clock.p2 -= clock.p2 > 10 ? 2 : 1) {
  808. clock.p = clock.p1 * clock.p2;
  809. /* based on hardware requirement, prefer bigger m1,m2 values */
  810. for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
  811. unsigned int ppm;
  812. clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
  813. refclk * clock.m1);
  814. vlv_calc_dpll_params(refclk, &clock);
  815. if (!intel_PLL_is_valid(to_i915(dev),
  816. limit,
  817. &clock))
  818. continue;
  819. if (!vlv_PLL_is_optimal(dev, target,
  820. &clock,
  821. best_clock,
  822. bestppm, &ppm))
  823. continue;
  824. *best_clock = clock;
  825. bestppm = ppm;
  826. found = true;
  827. }
  828. }
  829. }
  830. }
  831. return found;
  832. }
  833. /*
  834. * Returns a set of divisors for the desired target clock with the given
  835. * refclk, or FALSE. The returned values represent the clock equation:
  836. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  837. */
  838. static bool
  839. chv_find_best_dpll(const struct intel_limit *limit,
  840. struct intel_crtc_state *crtc_state,
  841. int target, int refclk, struct dpll *match_clock,
  842. struct dpll *best_clock)
  843. {
  844. struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  845. struct drm_device *dev = crtc->base.dev;
  846. unsigned int best_error_ppm;
  847. struct dpll clock;
  848. uint64_t m2;
  849. int found = false;
  850. memset(best_clock, 0, sizeof(*best_clock));
  851. best_error_ppm = 1000000;
  852. /*
  853. * Based on hardware doc, the n always set to 1, and m1 always
  854. * set to 2. If requires to support 200Mhz refclk, we need to
  855. * revisit this because n may not 1 anymore.
  856. */
  857. clock.n = 1, clock.m1 = 2;
  858. target *= 5; /* fast clock */
  859. for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
  860. for (clock.p2 = limit->p2.p2_fast;
  861. clock.p2 >= limit->p2.p2_slow;
  862. clock.p2 -= clock.p2 > 10 ? 2 : 1) {
  863. unsigned int error_ppm;
  864. clock.p = clock.p1 * clock.p2;
  865. m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
  866. clock.n) << 22, refclk * clock.m1);
  867. if (m2 > INT_MAX/clock.m1)
  868. continue;
  869. clock.m2 = m2;
  870. chv_calc_dpll_params(refclk, &clock);
  871. if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
  872. continue;
  873. if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
  874. best_error_ppm, &error_ppm))
  875. continue;
  876. *best_clock = clock;
  877. best_error_ppm = error_ppm;
  878. found = true;
  879. }
  880. }
  881. return found;
  882. }
  883. bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
  884. struct dpll *best_clock)
  885. {
  886. int refclk = 100000;
  887. const struct intel_limit *limit = &intel_limits_bxt;
  888. return chv_find_best_dpll(limit, crtc_state,
  889. target_clock, refclk, NULL, best_clock);
  890. }
  891. bool intel_crtc_active(struct intel_crtc *crtc)
  892. {
  893. /* Be paranoid as we can arrive here with only partial
  894. * state retrieved from the hardware during setup.
  895. *
  896. * We can ditch the adjusted_mode.crtc_clock check as soon
  897. * as Haswell has gained clock readout/fastboot support.
  898. *
  899. * We can ditch the crtc->primary->fb check as soon as we can
  900. * properly reconstruct framebuffers.
  901. *
  902. * FIXME: The intel_crtc->active here should be switched to
  903. * crtc->state->active once we have proper CRTC states wired up
  904. * for atomic.
  905. */
  906. return crtc->active && crtc->base.primary->state->fb &&
  907. crtc->config->base.adjusted_mode.crtc_clock;
  908. }
  909. enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
  910. enum pipe pipe)
  911. {
  912. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  913. return crtc->config->cpu_transcoder;
  914. }
  915. static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe)
  916. {
  917. i915_reg_t reg = PIPEDSL(pipe);
  918. u32 line1, line2;
  919. u32 line_mask;
  920. if (IS_GEN2(dev_priv))
  921. line_mask = DSL_LINEMASK_GEN2;
  922. else
  923. line_mask = DSL_LINEMASK_GEN3;
  924. line1 = I915_READ(reg) & line_mask;
  925. msleep(5);
  926. line2 = I915_READ(reg) & line_mask;
  927. return line1 == line2;
  928. }
  929. /*
  930. * intel_wait_for_pipe_off - wait for pipe to turn off
  931. * @crtc: crtc whose pipe to wait for
  932. *
  933. * After disabling a pipe, we can't wait for vblank in the usual way,
  934. * spinning on the vblank interrupt status bit, since we won't actually
  935. * see an interrupt when the pipe is disabled.
  936. *
  937. * On Gen4 and above:
  938. * wait for the pipe register state bit to turn off
  939. *
  940. * Otherwise:
  941. * wait for the display line value to settle (it usually
  942. * ends up stopping at the start of the next frame).
  943. *
  944. */
  945. static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
  946. {
  947. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  948. enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  949. enum pipe pipe = crtc->pipe;
  950. if (INTEL_GEN(dev_priv) >= 4) {
  951. i915_reg_t reg = PIPECONF(cpu_transcoder);
  952. /* Wait for the Pipe State to go off */
  953. if (intel_wait_for_register(dev_priv,
  954. reg, I965_PIPECONF_ACTIVE, 0,
  955. 100))
  956. WARN(1, "pipe_off wait timed out\n");
  957. } else {
  958. /* Wait for the display line to settle */
  959. if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100))
  960. WARN(1, "pipe_off wait timed out\n");
  961. }
  962. }
  963. /* Only for pre-ILK configs */
  964. void assert_pll(struct drm_i915_private *dev_priv,
  965. enum pipe pipe, bool state)
  966. {
  967. u32 val;
  968. bool cur_state;
  969. val = I915_READ(DPLL(pipe));
  970. cur_state = !!(val & DPLL_VCO_ENABLE);
  971. I915_STATE_WARN(cur_state != state,
  972. "PLL state assertion failure (expected %s, current %s)\n",
  973. onoff(state), onoff(cur_state));
  974. }
  975. /* XXX: the dsi pll is shared between MIPI DSI ports */
  976. void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
  977. {
  978. u32 val;
  979. bool cur_state;
  980. mutex_lock(&dev_priv->sb_lock);
  981. val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
  982. mutex_unlock(&dev_priv->sb_lock);
  983. cur_state = val & DSI_PLL_VCO_EN;
  984. I915_STATE_WARN(cur_state != state,
  985. "DSI PLL state assertion failure (expected %s, current %s)\n",
  986. onoff(state), onoff(cur_state));
  987. }
  988. static void assert_fdi_tx(struct drm_i915_private *dev_priv,
  989. enum pipe pipe, bool state)
  990. {
  991. bool cur_state;
  992. enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
  993. pipe);
  994. if (HAS_DDI(dev_priv)) {
  995. /* DDI does not have a specific FDI_TX register */
  996. u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
  997. cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
  998. } else {
  999. u32 val = I915_READ(FDI_TX_CTL(pipe));
  1000. cur_state = !!(val & FDI_TX_ENABLE);
  1001. }
  1002. I915_STATE_WARN(cur_state != state,
  1003. "FDI TX state assertion failure (expected %s, current %s)\n",
  1004. onoff(state), onoff(cur_state));
  1005. }
  1006. #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
  1007. #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
  1008. static void assert_fdi_rx(struct drm_i915_private *dev_priv,
  1009. enum pipe pipe, bool state)
  1010. {
  1011. u32 val;
  1012. bool cur_state;
  1013. val = I915_READ(FDI_RX_CTL(pipe));
  1014. cur_state = !!(val & FDI_RX_ENABLE);
  1015. I915_STATE_WARN(cur_state != state,
  1016. "FDI RX state assertion failure (expected %s, current %s)\n",
  1017. onoff(state), onoff(cur_state));
  1018. }
  1019. #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
  1020. #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
  1021. static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
  1022. enum pipe pipe)
  1023. {
  1024. u32 val;
  1025. /* ILK FDI PLL is always enabled */
  1026. if (IS_GEN5(dev_priv))
  1027. return;
  1028. /* On Haswell, DDI ports are responsible for the FDI PLL setup */
  1029. if (HAS_DDI(dev_priv))
  1030. return;
  1031. val = I915_READ(FDI_TX_CTL(pipe));
  1032. I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
  1033. }
  1034. void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
  1035. enum pipe pipe, bool state)
  1036. {
  1037. u32 val;
  1038. bool cur_state;
  1039. val = I915_READ(FDI_RX_CTL(pipe));
  1040. cur_state = !!(val & FDI_RX_PLL_ENABLE);
  1041. I915_STATE_WARN(cur_state != state,
  1042. "FDI RX PLL assertion failure (expected %s, current %s)\n",
  1043. onoff(state), onoff(cur_state));
  1044. }
  1045. void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
  1046. {
  1047. i915_reg_t pp_reg;
  1048. u32 val;
  1049. enum pipe panel_pipe = PIPE_A;
  1050. bool locked = true;
  1051. if (WARN_ON(HAS_DDI(dev_priv)))
  1052. return;
  1053. if (HAS_PCH_SPLIT(dev_priv)) {
  1054. u32 port_sel;
  1055. pp_reg = PP_CONTROL(0);
  1056. port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
  1057. if (port_sel == PANEL_PORT_SELECT_LVDS &&
  1058. I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
  1059. panel_pipe = PIPE_B;
  1060. /* XXX: else fix for eDP */
  1061. } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  1062. /* presumably write lock depends on pipe, not port select */
  1063. pp_reg = PP_CONTROL(pipe);
  1064. panel_pipe = pipe;
  1065. } else {
  1066. pp_reg = PP_CONTROL(0);
  1067. if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
  1068. panel_pipe = PIPE_B;
  1069. }
  1070. val = I915_READ(pp_reg);
  1071. if (!(val & PANEL_POWER_ON) ||
  1072. ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
  1073. locked = false;
  1074. I915_STATE_WARN(panel_pipe == pipe && locked,
  1075. "panel assertion failure, pipe %c regs locked\n",
  1076. pipe_name(pipe));
  1077. }
  1078. static void assert_cursor(struct drm_i915_private *dev_priv,
  1079. enum pipe pipe, bool state)
  1080. {
  1081. bool cur_state;
  1082. if (IS_845G(dev_priv) || IS_I865G(dev_priv))
  1083. cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
  1084. else
  1085. cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
  1086. I915_STATE_WARN(cur_state != state,
  1087. "cursor on pipe %c assertion failure (expected %s, current %s)\n",
  1088. pipe_name(pipe), onoff(state), onoff(cur_state));
  1089. }
  1090. #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
  1091. #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
  1092. void assert_pipe(struct drm_i915_private *dev_priv,
  1093. enum pipe pipe, bool state)
  1094. {
  1095. bool cur_state;
  1096. enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
  1097. pipe);
  1098. enum intel_display_power_domain power_domain;
  1099. /* if we need the pipe quirk it must be always on */
  1100. if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  1101. (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  1102. state = true;
  1103. power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
  1104. if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
  1105. u32 val = I915_READ(PIPECONF(cpu_transcoder));
  1106. cur_state = !!(val & PIPECONF_ENABLE);
  1107. intel_display_power_put(dev_priv, power_domain);
  1108. } else {
  1109. cur_state = false;
  1110. }
  1111. I915_STATE_WARN(cur_state != state,
  1112. "pipe %c assertion failure (expected %s, current %s)\n",
  1113. pipe_name(pipe), onoff(state), onoff(cur_state));
  1114. }
  1115. static void assert_plane(struct drm_i915_private *dev_priv,
  1116. enum plane plane, bool state)
  1117. {
  1118. u32 val;
  1119. bool cur_state;
  1120. val = I915_READ(DSPCNTR(plane));
  1121. cur_state = !!(val & DISPLAY_PLANE_ENABLE);
  1122. I915_STATE_WARN(cur_state != state,
  1123. "plane %c assertion failure (expected %s, current %s)\n",
  1124. plane_name(plane), onoff(state), onoff(cur_state));
  1125. }
  1126. #define assert_plane_enabled(d, p) assert_plane(d, p, true)
  1127. #define assert_plane_disabled(d, p) assert_plane(d, p, false)
  1128. static void assert_planes_disabled(struct drm_i915_private *dev_priv,
  1129. enum pipe pipe)
  1130. {
  1131. int i;
  1132. /* Primary planes are fixed to pipes on gen4+ */
  1133. if (INTEL_GEN(dev_priv) >= 4) {
  1134. u32 val = I915_READ(DSPCNTR(pipe));
  1135. I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
  1136. "plane %c assertion failure, should be disabled but not\n",
  1137. plane_name(pipe));
  1138. return;
  1139. }
  1140. /* Need to check both planes against the pipe */
  1141. for_each_pipe(dev_priv, i) {
  1142. u32 val = I915_READ(DSPCNTR(i));
  1143. enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
  1144. DISPPLANE_SEL_PIPE_SHIFT;
  1145. I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
  1146. "plane %c assertion failure, should be off on pipe %c but is still active\n",
  1147. plane_name(i), pipe_name(pipe));
  1148. }
  1149. }
  1150. static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
  1151. enum pipe pipe)
  1152. {
  1153. int sprite;
  1154. if (INTEL_GEN(dev_priv) >= 9) {
  1155. for_each_sprite(dev_priv, pipe, sprite) {
  1156. u32 val = I915_READ(PLANE_CTL(pipe, sprite));
  1157. I915_STATE_WARN(val & PLANE_CTL_ENABLE,
  1158. "plane %d assertion failure, should be off on pipe %c but is still active\n",
  1159. sprite, pipe_name(pipe));
  1160. }
  1161. } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  1162. for_each_sprite(dev_priv, pipe, sprite) {
  1163. u32 val = I915_READ(SPCNTR(pipe, sprite));
  1164. I915_STATE_WARN(val & SP_ENABLE,
  1165. "sprite %c assertion failure, should be off on pipe %c but is still active\n",
  1166. sprite_name(pipe, sprite), pipe_name(pipe));
  1167. }
  1168. } else if (INTEL_GEN(dev_priv) >= 7) {
  1169. u32 val = I915_READ(SPRCTL(pipe));
  1170. I915_STATE_WARN(val & SPRITE_ENABLE,
  1171. "sprite %c assertion failure, should be off on pipe %c but is still active\n",
  1172. plane_name(pipe), pipe_name(pipe));
  1173. } else if (INTEL_GEN(dev_priv) >= 5) {
  1174. u32 val = I915_READ(DVSCNTR(pipe));
  1175. I915_STATE_WARN(val & DVS_ENABLE,
  1176. "sprite %c assertion failure, should be off on pipe %c but is still active\n",
  1177. plane_name(pipe), pipe_name(pipe));
  1178. }
  1179. }
  1180. static void assert_vblank_disabled(struct drm_crtc *crtc)
  1181. {
  1182. if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
  1183. drm_crtc_vblank_put(crtc);
  1184. }
  1185. void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
  1186. enum pipe pipe)
  1187. {
  1188. u32 val;
  1189. bool enabled;
  1190. val = I915_READ(PCH_TRANSCONF(pipe));
  1191. enabled = !!(val & TRANS_ENABLE);
  1192. I915_STATE_WARN(enabled,
  1193. "transcoder assertion failed, should be off on pipe %c but is still active\n",
  1194. pipe_name(pipe));
  1195. }
  1196. static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
  1197. enum pipe pipe, u32 port_sel, u32 val)
  1198. {
  1199. if ((val & DP_PORT_EN) == 0)
  1200. return false;
  1201. if (HAS_PCH_CPT(dev_priv)) {
  1202. u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
  1203. if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
  1204. return false;
  1205. } else if (IS_CHERRYVIEW(dev_priv)) {
  1206. if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
  1207. return false;
  1208. } else {
  1209. if ((val & DP_PIPE_MASK) != (pipe << 30))
  1210. return false;
  1211. }
  1212. return true;
  1213. }
  1214. static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
  1215. enum pipe pipe, u32 val)
  1216. {
  1217. if ((val & SDVO_ENABLE) == 0)
  1218. return false;
  1219. if (HAS_PCH_CPT(dev_priv)) {
  1220. if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
  1221. return false;
  1222. } else if (IS_CHERRYVIEW(dev_priv)) {
  1223. if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
  1224. return false;
  1225. } else {
  1226. if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
  1227. return false;
  1228. }
  1229. return true;
  1230. }
  1231. static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
  1232. enum pipe pipe, u32 val)
  1233. {
  1234. if ((val & LVDS_PORT_EN) == 0)
  1235. return false;
  1236. if (HAS_PCH_CPT(dev_priv)) {
  1237. if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
  1238. return false;
  1239. } else {
  1240. if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
  1241. return false;
  1242. }
  1243. return true;
  1244. }
  1245. static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
  1246. enum pipe pipe, u32 val)
  1247. {
  1248. if ((val & ADPA_DAC_ENABLE) == 0)
  1249. return false;
  1250. if (HAS_PCH_CPT(dev_priv)) {
  1251. if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
  1252. return false;
  1253. } else {
  1254. if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
  1255. return false;
  1256. }
  1257. return true;
  1258. }
  1259. static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
  1260. enum pipe pipe, i915_reg_t reg,
  1261. u32 port_sel)
  1262. {
  1263. u32 val = I915_READ(reg);
  1264. I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
  1265. "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
  1266. i915_mmio_reg_offset(reg), pipe_name(pipe));
  1267. I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
  1268. && (val & DP_PIPEB_SELECT),
  1269. "IBX PCH dp port still using transcoder B\n");
  1270. }
  1271. static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
  1272. enum pipe pipe, i915_reg_t reg)
  1273. {
  1274. u32 val = I915_READ(reg);
  1275. I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
  1276. "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
  1277. i915_mmio_reg_offset(reg), pipe_name(pipe));
  1278. I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
  1279. && (val & SDVO_PIPE_B_SELECT),
  1280. "IBX PCH hdmi port still using transcoder B\n");
  1281. }
  1282. static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
  1283. enum pipe pipe)
  1284. {
  1285. u32 val;
  1286. assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
  1287. assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
  1288. assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
  1289. val = I915_READ(PCH_ADPA);
  1290. I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
  1291. "PCH VGA enabled on transcoder %c, should be disabled\n",
  1292. pipe_name(pipe));
  1293. val = I915_READ(PCH_LVDS);
  1294. I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
  1295. "PCH LVDS enabled on transcoder %c, should be disabled\n",
  1296. pipe_name(pipe));
  1297. assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
  1298. assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
  1299. assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
  1300. }
  1301. static void _vlv_enable_pll(struct intel_crtc *crtc,
  1302. const struct intel_crtc_state *pipe_config)
  1303. {
  1304. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1305. enum pipe pipe = crtc->pipe;
  1306. I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
  1307. POSTING_READ(DPLL(pipe));
  1308. udelay(150);
  1309. if (intel_wait_for_register(dev_priv,
  1310. DPLL(pipe),
  1311. DPLL_LOCK_VLV,
  1312. DPLL_LOCK_VLV,
  1313. 1))
  1314. DRM_ERROR("DPLL %d failed to lock\n", pipe);
  1315. }
  1316. static void vlv_enable_pll(struct intel_crtc *crtc,
  1317. const struct intel_crtc_state *pipe_config)
  1318. {
  1319. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1320. enum pipe pipe = crtc->pipe;
  1321. assert_pipe_disabled(dev_priv, pipe);
  1322. /* PLL is protected by panel, make sure we can write it */
  1323. assert_panel_unlocked(dev_priv, pipe);
  1324. if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
  1325. _vlv_enable_pll(crtc, pipe_config);
  1326. I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
  1327. POSTING_READ(DPLL_MD(pipe));
  1328. }
  1329. static void _chv_enable_pll(struct intel_crtc *crtc,
  1330. const struct intel_crtc_state *pipe_config)
  1331. {
  1332. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1333. enum pipe pipe = crtc->pipe;
  1334. enum dpio_channel port = vlv_pipe_to_channel(pipe);
  1335. u32 tmp;
  1336. mutex_lock(&dev_priv->sb_lock);
  1337. /* Enable back the 10bit clock to display controller */
  1338. tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
  1339. tmp |= DPIO_DCLKP_EN;
  1340. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
  1341. mutex_unlock(&dev_priv->sb_lock);
  1342. /*
  1343. * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
  1344. */
  1345. udelay(1);
  1346. /* Enable PLL */
  1347. I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
  1348. /* Check PLL is locked */
  1349. if (intel_wait_for_register(dev_priv,
  1350. DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
  1351. 1))
  1352. DRM_ERROR("PLL %d failed to lock\n", pipe);
  1353. }
  1354. static void chv_enable_pll(struct intel_crtc *crtc,
  1355. const struct intel_crtc_state *pipe_config)
  1356. {
  1357. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1358. enum pipe pipe = crtc->pipe;
  1359. assert_pipe_disabled(dev_priv, pipe);
  1360. /* PLL is protected by panel, make sure we can write it */
  1361. assert_panel_unlocked(dev_priv, pipe);
  1362. if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
  1363. _chv_enable_pll(crtc, pipe_config);
  1364. if (pipe != PIPE_A) {
  1365. /*
  1366. * WaPixelRepeatModeFixForC0:chv
  1367. *
  1368. * DPLLCMD is AWOL. Use chicken bits to propagate
  1369. * the value from DPLLBMD to either pipe B or C.
  1370. */
  1371. I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C);
  1372. I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
  1373. I915_WRITE(CBR4_VLV, 0);
  1374. dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
  1375. /*
  1376. * DPLLB VGA mode also seems to cause problems.
  1377. * We should always have it disabled.
  1378. */
  1379. WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
  1380. } else {
  1381. I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
  1382. POSTING_READ(DPLL_MD(pipe));
  1383. }
  1384. }
  1385. static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
  1386. {
  1387. struct intel_crtc *crtc;
  1388. int count = 0;
  1389. for_each_intel_crtc(&dev_priv->drm, crtc) {
  1390. count += crtc->base.state->active &&
  1391. intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
  1392. }
  1393. return count;
  1394. }
  1395. static void i9xx_enable_pll(struct intel_crtc *crtc)
  1396. {
  1397. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1398. i915_reg_t reg = DPLL(crtc->pipe);
  1399. u32 dpll = crtc->config->dpll_hw_state.dpll;
  1400. assert_pipe_disabled(dev_priv, crtc->pipe);
  1401. /* PLL is protected by panel, make sure we can write it */
  1402. if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
  1403. assert_panel_unlocked(dev_priv, crtc->pipe);
  1404. /* Enable DVO 2x clock on both PLLs if necessary */
  1405. if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
  1406. /*
  1407. * It appears to be important that we don't enable this
  1408. * for the current pipe before otherwise configuring the
  1409. * PLL. No idea how this should be handled if multiple
  1410. * DVO outputs are enabled simultaneosly.
  1411. */
  1412. dpll |= DPLL_DVO_2X_MODE;
  1413. I915_WRITE(DPLL(!crtc->pipe),
  1414. I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
  1415. }
  1416. /*
  1417. * Apparently we need to have VGA mode enabled prior to changing
  1418. * the P1/P2 dividers. Otherwise the DPLL will keep using the old
  1419. * dividers, even though the register value does change.
  1420. */
  1421. I915_WRITE(reg, 0);
  1422. I915_WRITE(reg, dpll);
  1423. /* Wait for the clocks to stabilize. */
  1424. POSTING_READ(reg);
  1425. udelay(150);
  1426. if (INTEL_GEN(dev_priv) >= 4) {
  1427. I915_WRITE(DPLL_MD(crtc->pipe),
  1428. crtc->config->dpll_hw_state.dpll_md);
  1429. } else {
  1430. /* The pixel multiplier can only be updated once the
  1431. * DPLL is enabled and the clocks are stable.
  1432. *
  1433. * So write it again.
  1434. */
  1435. I915_WRITE(reg, dpll);
  1436. }
  1437. /* We do this three times for luck */
  1438. I915_WRITE(reg, dpll);
  1439. POSTING_READ(reg);
  1440. udelay(150); /* wait for warmup */
  1441. I915_WRITE(reg, dpll);
  1442. POSTING_READ(reg);
  1443. udelay(150); /* wait for warmup */
  1444. I915_WRITE(reg, dpll);
  1445. POSTING_READ(reg);
  1446. udelay(150); /* wait for warmup */
  1447. }
  1448. /**
  1449. * i9xx_disable_pll - disable a PLL
  1450. * @dev_priv: i915 private structure
  1451. * @pipe: pipe PLL to disable
  1452. *
  1453. * Disable the PLL for @pipe, making sure the pipe is off first.
  1454. *
  1455. * Note! This is for pre-ILK only.
  1456. */
  1457. static void i9xx_disable_pll(struct intel_crtc *crtc)
  1458. {
  1459. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1460. enum pipe pipe = crtc->pipe;
  1461. /* Disable DVO 2x clock on both PLLs if necessary */
  1462. if (IS_I830(dev_priv) &&
  1463. intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
  1464. !intel_num_dvo_pipes(dev_priv)) {
  1465. I915_WRITE(DPLL(PIPE_B),
  1466. I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
  1467. I915_WRITE(DPLL(PIPE_A),
  1468. I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
  1469. }
  1470. /* Don't disable pipe or pipe PLLs if needed */
  1471. if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  1472. (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  1473. return;
  1474. /* Make sure the pipe isn't still relying on us */
  1475. assert_pipe_disabled(dev_priv, pipe);
  1476. I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
  1477. POSTING_READ(DPLL(pipe));
  1478. }
  1479. static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
  1480. {
  1481. u32 val;
  1482. /* Make sure the pipe isn't still relying on us */
  1483. assert_pipe_disabled(dev_priv, pipe);
  1484. val = DPLL_INTEGRATED_REF_CLK_VLV |
  1485. DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
  1486. if (pipe != PIPE_A)
  1487. val |= DPLL_INTEGRATED_CRI_CLK_VLV;
  1488. I915_WRITE(DPLL(pipe), val);
  1489. POSTING_READ(DPLL(pipe));
  1490. }
  1491. static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
  1492. {
  1493. enum dpio_channel port = vlv_pipe_to_channel(pipe);
  1494. u32 val;
  1495. /* Make sure the pipe isn't still relying on us */
  1496. assert_pipe_disabled(dev_priv, pipe);
  1497. val = DPLL_SSC_REF_CLK_CHV |
  1498. DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
  1499. if (pipe != PIPE_A)
  1500. val |= DPLL_INTEGRATED_CRI_CLK_VLV;
  1501. I915_WRITE(DPLL(pipe), val);
  1502. POSTING_READ(DPLL(pipe));
  1503. mutex_lock(&dev_priv->sb_lock);
  1504. /* Disable 10bit clock to display controller */
  1505. val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
  1506. val &= ~DPIO_DCLKP_EN;
  1507. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
  1508. mutex_unlock(&dev_priv->sb_lock);
  1509. }
  1510. void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
  1511. struct intel_digital_port *dport,
  1512. unsigned int expected_mask)
  1513. {
  1514. u32 port_mask;
  1515. i915_reg_t dpll_reg;
  1516. switch (dport->port) {
  1517. case PORT_B:
  1518. port_mask = DPLL_PORTB_READY_MASK;
  1519. dpll_reg = DPLL(0);
  1520. break;
  1521. case PORT_C:
  1522. port_mask = DPLL_PORTC_READY_MASK;
  1523. dpll_reg = DPLL(0);
  1524. expected_mask <<= 4;
  1525. break;
  1526. case PORT_D:
  1527. port_mask = DPLL_PORTD_READY_MASK;
  1528. dpll_reg = DPIO_PHY_STATUS;
  1529. break;
  1530. default:
  1531. BUG();
  1532. }
  1533. if (intel_wait_for_register(dev_priv,
  1534. dpll_reg, port_mask, expected_mask,
  1535. 1000))
  1536. WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
  1537. port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
  1538. }
  1539. static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
  1540. enum pipe pipe)
  1541. {
  1542. struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
  1543. pipe);
  1544. i915_reg_t reg;
  1545. uint32_t val, pipeconf_val;
  1546. /* Make sure PCH DPLL is enabled */
  1547. assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
  1548. /* FDI must be feeding us bits for PCH ports */
  1549. assert_fdi_tx_enabled(dev_priv, pipe);
  1550. assert_fdi_rx_enabled(dev_priv, pipe);
  1551. if (HAS_PCH_CPT(dev_priv)) {
  1552. /* Workaround: Set the timing override bit before enabling the
  1553. * pch transcoder. */
  1554. reg = TRANS_CHICKEN2(pipe);
  1555. val = I915_READ(reg);
  1556. val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
  1557. I915_WRITE(reg, val);
  1558. }
  1559. reg = PCH_TRANSCONF(pipe);
  1560. val = I915_READ(reg);
  1561. pipeconf_val = I915_READ(PIPECONF(pipe));
  1562. if (HAS_PCH_IBX(dev_priv)) {
  1563. /*
  1564. * Make the BPC in transcoder be consistent with
  1565. * that in pipeconf reg. For HDMI we must use 8bpc
  1566. * here for both 8bpc and 12bpc.
  1567. */
  1568. val &= ~PIPECONF_BPC_MASK;
  1569. if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
  1570. val |= PIPECONF_8BPC;
  1571. else
  1572. val |= pipeconf_val & PIPECONF_BPC_MASK;
  1573. }
  1574. val &= ~TRANS_INTERLACE_MASK;
  1575. if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
  1576. if (HAS_PCH_IBX(dev_priv) &&
  1577. intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
  1578. val |= TRANS_LEGACY_INTERLACED_ILK;
  1579. else
  1580. val |= TRANS_INTERLACED;
  1581. else
  1582. val |= TRANS_PROGRESSIVE;
  1583. I915_WRITE(reg, val | TRANS_ENABLE);
  1584. if (intel_wait_for_register(dev_priv,
  1585. reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
  1586. 100))
  1587. DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
  1588. }
  1589. static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
  1590. enum transcoder cpu_transcoder)
  1591. {
  1592. u32 val, pipeconf_val;
  1593. /* FDI must be feeding us bits for PCH ports */
  1594. assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
  1595. assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
  1596. /* Workaround: set timing override bit. */
  1597. val = I915_READ(TRANS_CHICKEN2(PIPE_A));
  1598. val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
  1599. I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
  1600. val = TRANS_ENABLE;
  1601. pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
  1602. if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
  1603. PIPECONF_INTERLACED_ILK)
  1604. val |= TRANS_INTERLACED;
  1605. else
  1606. val |= TRANS_PROGRESSIVE;
  1607. I915_WRITE(LPT_TRANSCONF, val);
  1608. if (intel_wait_for_register(dev_priv,
  1609. LPT_TRANSCONF,
  1610. TRANS_STATE_ENABLE,
  1611. TRANS_STATE_ENABLE,
  1612. 100))
  1613. DRM_ERROR("Failed to enable PCH transcoder\n");
  1614. }
  1615. static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
  1616. enum pipe pipe)
  1617. {
  1618. i915_reg_t reg;
  1619. uint32_t val;
  1620. /* FDI relies on the transcoder */
  1621. assert_fdi_tx_disabled(dev_priv, pipe);
  1622. assert_fdi_rx_disabled(dev_priv, pipe);
  1623. /* Ports must be off as well */
  1624. assert_pch_ports_disabled(dev_priv, pipe);
  1625. reg = PCH_TRANSCONF(pipe);
  1626. val = I915_READ(reg);
  1627. val &= ~TRANS_ENABLE;
  1628. I915_WRITE(reg, val);
  1629. /* wait for PCH transcoder off, transcoder state */
  1630. if (intel_wait_for_register(dev_priv,
  1631. reg, TRANS_STATE_ENABLE, 0,
  1632. 50))
  1633. DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
  1634. if (HAS_PCH_CPT(dev_priv)) {
  1635. /* Workaround: Clear the timing override chicken bit again. */
  1636. reg = TRANS_CHICKEN2(pipe);
  1637. val = I915_READ(reg);
  1638. val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
  1639. I915_WRITE(reg, val);
  1640. }
  1641. }
  1642. void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
  1643. {
  1644. u32 val;
  1645. val = I915_READ(LPT_TRANSCONF);
  1646. val &= ~TRANS_ENABLE;
  1647. I915_WRITE(LPT_TRANSCONF, val);
  1648. /* wait for PCH transcoder off, transcoder state */
  1649. if (intel_wait_for_register(dev_priv,
  1650. LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
  1651. 50))
  1652. DRM_ERROR("Failed to disable PCH transcoder\n");
  1653. /* Workaround: clear timing override bit. */
  1654. val = I915_READ(TRANS_CHICKEN2(PIPE_A));
  1655. val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
  1656. I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
  1657. }
  1658. enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc)
  1659. {
  1660. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1661. WARN_ON(!crtc->config->has_pch_encoder);
  1662. if (HAS_PCH_LPT(dev_priv))
  1663. return TRANSCODER_A;
  1664. else
  1665. return (enum transcoder) crtc->pipe;
  1666. }
  1667. /**
  1668. * intel_enable_pipe - enable a pipe, asserting requirements
  1669. * @crtc: crtc responsible for the pipe
  1670. *
  1671. * Enable @crtc's pipe, making sure that various hardware specific requirements
  1672. * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
  1673. */
  1674. static void intel_enable_pipe(struct intel_crtc *crtc)
  1675. {
  1676. struct drm_device *dev = crtc->base.dev;
  1677. struct drm_i915_private *dev_priv = to_i915(dev);
  1678. enum pipe pipe = crtc->pipe;
  1679. enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  1680. i915_reg_t reg;
  1681. u32 val;
  1682. DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
  1683. assert_planes_disabled(dev_priv, pipe);
  1684. assert_cursor_disabled(dev_priv, pipe);
  1685. assert_sprites_disabled(dev_priv, pipe);
  1686. /*
  1687. * A pipe without a PLL won't actually be able to drive bits from
  1688. * a plane. On ILK+ the pipe PLLs are integrated, so we don't
  1689. * need the check.
  1690. */
  1691. if (HAS_GMCH_DISPLAY(dev_priv)) {
  1692. if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI))
  1693. assert_dsi_pll_enabled(dev_priv);
  1694. else
  1695. assert_pll_enabled(dev_priv, pipe);
  1696. } else {
  1697. if (crtc->config->has_pch_encoder) {
  1698. /* if driving the PCH, we need FDI enabled */
  1699. assert_fdi_rx_pll_enabled(dev_priv,
  1700. (enum pipe) intel_crtc_pch_transcoder(crtc));
  1701. assert_fdi_tx_pll_enabled(dev_priv,
  1702. (enum pipe) cpu_transcoder);
  1703. }
  1704. /* FIXME: assert CPU port conditions for SNB+ */
  1705. }
  1706. reg = PIPECONF(cpu_transcoder);
  1707. val = I915_READ(reg);
  1708. if (val & PIPECONF_ENABLE) {
  1709. WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  1710. (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
  1711. return;
  1712. }
  1713. I915_WRITE(reg, val | PIPECONF_ENABLE);
  1714. POSTING_READ(reg);
  1715. /*
  1716. * Until the pipe starts DSL will read as 0, which would cause
  1717. * an apparent vblank timestamp jump, which messes up also the
  1718. * frame count when it's derived from the timestamps. So let's
  1719. * wait for the pipe to start properly before we call
  1720. * drm_crtc_vblank_on()
  1721. */
  1722. if (dev->max_vblank_count == 0 &&
  1723. wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
  1724. DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
  1725. }
  1726. /**
  1727. * intel_disable_pipe - disable a pipe, asserting requirements
  1728. * @crtc: crtc whose pipes is to be disabled
  1729. *
  1730. * Disable the pipe of @crtc, making sure that various hardware
  1731. * specific requirements are met, if applicable, e.g. plane
  1732. * disabled, panel fitter off, etc.
  1733. *
  1734. * Will wait until the pipe has shut down before returning.
  1735. */
  1736. static void intel_disable_pipe(struct intel_crtc *crtc)
  1737. {
  1738. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1739. enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  1740. enum pipe pipe = crtc->pipe;
  1741. i915_reg_t reg;
  1742. u32 val;
  1743. DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
  1744. /*
  1745. * Make sure planes won't keep trying to pump pixels to us,
  1746. * or we might hang the display.
  1747. */
  1748. assert_planes_disabled(dev_priv, pipe);
  1749. assert_cursor_disabled(dev_priv, pipe);
  1750. assert_sprites_disabled(dev_priv, pipe);
  1751. reg = PIPECONF(cpu_transcoder);
  1752. val = I915_READ(reg);
  1753. if ((val & PIPECONF_ENABLE) == 0)
  1754. return;
  1755. /*
  1756. * Double wide has implications for planes
  1757. * so best keep it disabled when not needed.
  1758. */
  1759. if (crtc->config->double_wide)
  1760. val &= ~PIPECONF_DOUBLE_WIDE;
  1761. /* Don't disable pipe or pipe PLLs if needed */
  1762. if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
  1763. !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  1764. val &= ~PIPECONF_ENABLE;
  1765. I915_WRITE(reg, val);
  1766. if ((val & PIPECONF_ENABLE) == 0)
  1767. intel_wait_for_pipe_off(crtc);
  1768. }
  1769. static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
  1770. {
  1771. return IS_GEN2(dev_priv) ? 2048 : 4096;
  1772. }
  1773. static unsigned int intel_tile_width_bytes(const struct drm_i915_private *dev_priv,
  1774. uint64_t fb_modifier, unsigned int cpp)
  1775. {
  1776. switch (fb_modifier) {
  1777. case DRM_FORMAT_MOD_NONE:
  1778. return cpp;
  1779. case I915_FORMAT_MOD_X_TILED:
  1780. if (IS_GEN2(dev_priv))
  1781. return 128;
  1782. else
  1783. return 512;
  1784. case I915_FORMAT_MOD_Y_TILED:
  1785. if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
  1786. return 128;
  1787. else
  1788. return 512;
  1789. case I915_FORMAT_MOD_Yf_TILED:
  1790. switch (cpp) {
  1791. case 1:
  1792. return 64;
  1793. case 2:
  1794. case 4:
  1795. return 128;
  1796. case 8:
  1797. case 16:
  1798. return 256;
  1799. default:
  1800. MISSING_CASE(cpp);
  1801. return cpp;
  1802. }
  1803. break;
  1804. default:
  1805. MISSING_CASE(fb_modifier);
  1806. return cpp;
  1807. }
  1808. }
  1809. unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
  1810. uint64_t fb_modifier, unsigned int cpp)
  1811. {
  1812. if (fb_modifier == DRM_FORMAT_MOD_NONE)
  1813. return 1;
  1814. else
  1815. return intel_tile_size(dev_priv) /
  1816. intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
  1817. }
  1818. /* Return the tile dimensions in pixel units */
  1819. static void intel_tile_dims(const struct drm_i915_private *dev_priv,
  1820. unsigned int *tile_width,
  1821. unsigned int *tile_height,
  1822. uint64_t fb_modifier,
  1823. unsigned int cpp)
  1824. {
  1825. unsigned int tile_width_bytes =
  1826. intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
  1827. *tile_width = tile_width_bytes / cpp;
  1828. *tile_height = intel_tile_size(dev_priv) / tile_width_bytes;
  1829. }
  1830. unsigned int
  1831. intel_fb_align_height(struct drm_device *dev, unsigned int height,
  1832. uint32_t pixel_format, uint64_t fb_modifier)
  1833. {
  1834. unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
  1835. unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp);
  1836. return ALIGN(height, tile_height);
  1837. }
  1838. unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
  1839. {
  1840. unsigned int size = 0;
  1841. int i;
  1842. for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
  1843. size += rot_info->plane[i].width * rot_info->plane[i].height;
  1844. return size;
  1845. }
  1846. static void
  1847. intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
  1848. const struct drm_framebuffer *fb,
  1849. unsigned int rotation)
  1850. {
  1851. if (drm_rotation_90_or_270(rotation)) {
  1852. *view = i915_ggtt_view_rotated;
  1853. view->params.rotated = to_intel_framebuffer(fb)->rot_info;
  1854. } else {
  1855. *view = i915_ggtt_view_normal;
  1856. }
  1857. }
  1858. static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
  1859. {
  1860. if (INTEL_INFO(dev_priv)->gen >= 9)
  1861. return 256 * 1024;
  1862. else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
  1863. IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  1864. return 128 * 1024;
  1865. else if (INTEL_INFO(dev_priv)->gen >= 4)
  1866. return 4 * 1024;
  1867. else
  1868. return 0;
  1869. }
  1870. static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv,
  1871. uint64_t fb_modifier)
  1872. {
  1873. switch (fb_modifier) {
  1874. case DRM_FORMAT_MOD_NONE:
  1875. return intel_linear_alignment(dev_priv);
  1876. case I915_FORMAT_MOD_X_TILED:
  1877. if (INTEL_INFO(dev_priv)->gen >= 9)
  1878. return 256 * 1024;
  1879. return 0;
  1880. case I915_FORMAT_MOD_Y_TILED:
  1881. case I915_FORMAT_MOD_Yf_TILED:
  1882. return 1 * 1024 * 1024;
  1883. default:
  1884. MISSING_CASE(fb_modifier);
  1885. return 0;
  1886. }
  1887. }
  1888. struct i915_vma *
  1889. intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
  1890. {
  1891. struct drm_device *dev = fb->dev;
  1892. struct drm_i915_private *dev_priv = to_i915(dev);
  1893. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  1894. struct i915_ggtt_view view;
  1895. struct i915_vma *vma;
  1896. u32 alignment;
  1897. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  1898. alignment = intel_surf_alignment(dev_priv, fb->modifier);
  1899. intel_fill_fb_ggtt_view(&view, fb, rotation);
  1900. /* Note that the w/a also requires 64 PTE of padding following the
  1901. * bo. We currently fill all unused PTE with the shadow page and so
  1902. * we should always have valid PTE following the scanout preventing
  1903. * the VT-d warning.
  1904. */
  1905. if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
  1906. alignment = 256 * 1024;
  1907. /*
  1908. * Global gtt pte registers are special registers which actually forward
  1909. * writes to a chunk of system memory. Which means that there is no risk
  1910. * that the register values disappear as soon as we call
  1911. * intel_runtime_pm_put(), so it is correct to wrap only the
  1912. * pin/unpin/fence and not more.
  1913. */
  1914. intel_runtime_pm_get(dev_priv);
  1915. vma = i915_gem_object_pin_to_display_plane(obj, alignment, &view);
  1916. if (IS_ERR(vma))
  1917. goto err;
  1918. if (i915_vma_is_map_and_fenceable(vma)) {
  1919. /* Install a fence for tiled scan-out. Pre-i965 always needs a
  1920. * fence, whereas 965+ only requires a fence if using
  1921. * framebuffer compression. For simplicity, we always, when
  1922. * possible, install a fence as the cost is not that onerous.
  1923. *
  1924. * If we fail to fence the tiled scanout, then either the
  1925. * modeset will reject the change (which is highly unlikely as
  1926. * the affected systems, all but one, do not have unmappable
  1927. * space) or we will not be able to enable full powersaving
  1928. * techniques (also likely not to apply due to various limits
  1929. * FBC and the like impose on the size of the buffer, which
  1930. * presumably we violated anyway with this unmappable buffer).
  1931. * Anyway, it is presumably better to stumble onwards with
  1932. * something and try to run the system in a "less than optimal"
  1933. * mode that matches the user configuration.
  1934. */
  1935. if (i915_vma_get_fence(vma) == 0)
  1936. i915_vma_pin_fence(vma);
  1937. }
  1938. err:
  1939. intel_runtime_pm_put(dev_priv);
  1940. return vma;
  1941. }
  1942. void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
  1943. {
  1944. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  1945. struct i915_ggtt_view view;
  1946. struct i915_vma *vma;
  1947. WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
  1948. intel_fill_fb_ggtt_view(&view, fb, rotation);
  1949. vma = i915_gem_object_to_ggtt(obj, &view);
  1950. i915_vma_unpin_fence(vma);
  1951. i915_gem_object_unpin_from_display_plane(vma);
  1952. }
  1953. static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
  1954. unsigned int rotation)
  1955. {
  1956. if (drm_rotation_90_or_270(rotation))
  1957. return to_intel_framebuffer(fb)->rotated[plane].pitch;
  1958. else
  1959. return fb->pitches[plane];
  1960. }
  1961. /*
  1962. * Convert the x/y offsets into a linear offset.
  1963. * Only valid with 0/180 degree rotation, which is fine since linear
  1964. * offset is only used with linear buffers on pre-hsw and tiled buffers
  1965. * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
  1966. */
  1967. u32 intel_fb_xy_to_linear(int x, int y,
  1968. const struct intel_plane_state *state,
  1969. int plane)
  1970. {
  1971. const struct drm_framebuffer *fb = state->base.fb;
  1972. unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
  1973. unsigned int pitch = fb->pitches[plane];
  1974. return y * pitch + x * cpp;
  1975. }
  1976. /*
  1977. * Add the x/y offsets derived from fb->offsets[] to the user
  1978. * specified plane src x/y offsets. The resulting x/y offsets
  1979. * specify the start of scanout from the beginning of the gtt mapping.
  1980. */
  1981. void intel_add_fb_offsets(int *x, int *y,
  1982. const struct intel_plane_state *state,
  1983. int plane)
  1984. {
  1985. const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
  1986. unsigned int rotation = state->base.rotation;
  1987. if (drm_rotation_90_or_270(rotation)) {
  1988. *x += intel_fb->rotated[plane].x;
  1989. *y += intel_fb->rotated[plane].y;
  1990. } else {
  1991. *x += intel_fb->normal[plane].x;
  1992. *y += intel_fb->normal[plane].y;
  1993. }
  1994. }
  1995. /*
  1996. * Input tile dimensions and pitch must already be
  1997. * rotated to match x and y, and in pixel units.
  1998. */
  1999. static u32 _intel_adjust_tile_offset(int *x, int *y,
  2000. unsigned int tile_width,
  2001. unsigned int tile_height,
  2002. unsigned int tile_size,
  2003. unsigned int pitch_tiles,
  2004. u32 old_offset,
  2005. u32 new_offset)
  2006. {
  2007. unsigned int pitch_pixels = pitch_tiles * tile_width;
  2008. unsigned int tiles;
  2009. WARN_ON(old_offset & (tile_size - 1));
  2010. WARN_ON(new_offset & (tile_size - 1));
  2011. WARN_ON(new_offset > old_offset);
  2012. tiles = (old_offset - new_offset) / tile_size;
  2013. *y += tiles / pitch_tiles * tile_height;
  2014. *x += tiles % pitch_tiles * tile_width;
  2015. /* minimize x in case it got needlessly big */
  2016. *y += *x / pitch_pixels * tile_height;
  2017. *x %= pitch_pixels;
  2018. return new_offset;
  2019. }
  2020. /*
  2021. * Adjust the tile offset by moving the difference into
  2022. * the x/y offsets.
  2023. */
  2024. static u32 intel_adjust_tile_offset(int *x, int *y,
  2025. const struct intel_plane_state *state, int plane,
  2026. u32 old_offset, u32 new_offset)
  2027. {
  2028. const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev);
  2029. const struct drm_framebuffer *fb = state->base.fb;
  2030. unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
  2031. unsigned int rotation = state->base.rotation;
  2032. unsigned int pitch = intel_fb_pitch(fb, plane, rotation);
  2033. WARN_ON(new_offset > old_offset);
  2034. if (fb->modifier != DRM_FORMAT_MOD_NONE) {
  2035. unsigned int tile_size, tile_width, tile_height;
  2036. unsigned int pitch_tiles;
  2037. tile_size = intel_tile_size(dev_priv);
  2038. intel_tile_dims(dev_priv, &tile_width, &tile_height,
  2039. fb->modifier, cpp);
  2040. if (drm_rotation_90_or_270(rotation)) {
  2041. pitch_tiles = pitch / tile_height;
  2042. swap(tile_width, tile_height);
  2043. } else {
  2044. pitch_tiles = pitch / (tile_width * cpp);
  2045. }
  2046. _intel_adjust_tile_offset(x, y, tile_width, tile_height,
  2047. tile_size, pitch_tiles,
  2048. old_offset, new_offset);
  2049. } else {
  2050. old_offset += *y * pitch + *x * cpp;
  2051. *y = (old_offset - new_offset) / pitch;
  2052. *x = ((old_offset - new_offset) - *y * pitch) / cpp;
  2053. }
  2054. return new_offset;
  2055. }
  2056. /*
  2057. * Computes the linear offset to the base tile and adjusts
  2058. * x, y. bytes per pixel is assumed to be a power-of-two.
  2059. *
  2060. * In the 90/270 rotated case, x and y are assumed
  2061. * to be already rotated to match the rotated GTT view, and
  2062. * pitch is the tile_height aligned framebuffer height.
  2063. *
  2064. * This function is used when computing the derived information
  2065. * under intel_framebuffer, so using any of that information
  2066. * here is not allowed. Anything under drm_framebuffer can be
  2067. * used. This is why the user has to pass in the pitch since it
  2068. * is specified in the rotated orientation.
  2069. */
  2070. static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
  2071. int *x, int *y,
  2072. const struct drm_framebuffer *fb, int plane,
  2073. unsigned int pitch,
  2074. unsigned int rotation,
  2075. u32 alignment)
  2076. {
  2077. uint64_t fb_modifier = fb->modifier;
  2078. unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
  2079. u32 offset, offset_aligned;
  2080. if (alignment)
  2081. alignment--;
  2082. if (fb_modifier != DRM_FORMAT_MOD_NONE) {
  2083. unsigned int tile_size, tile_width, tile_height;
  2084. unsigned int tile_rows, tiles, pitch_tiles;
  2085. tile_size = intel_tile_size(dev_priv);
  2086. intel_tile_dims(dev_priv, &tile_width, &tile_height,
  2087. fb_modifier, cpp);
  2088. if (drm_rotation_90_or_270(rotation)) {
  2089. pitch_tiles = pitch / tile_height;
  2090. swap(tile_width, tile_height);
  2091. } else {
  2092. pitch_tiles = pitch / (tile_width * cpp);
  2093. }
  2094. tile_rows = *y / tile_height;
  2095. *y %= tile_height;
  2096. tiles = *x / tile_width;
  2097. *x %= tile_width;
  2098. offset = (tile_rows * pitch_tiles + tiles) * tile_size;
  2099. offset_aligned = offset & ~alignment;
  2100. _intel_adjust_tile_offset(x, y, tile_width, tile_height,
  2101. tile_size, pitch_tiles,
  2102. offset, offset_aligned);
  2103. } else {
  2104. offset = *y * pitch + *x * cpp;
  2105. offset_aligned = offset & ~alignment;
  2106. *y = (offset & alignment) / pitch;
  2107. *x = ((offset & alignment) - *y * pitch) / cpp;
  2108. }
  2109. return offset_aligned;
  2110. }
  2111. u32 intel_compute_tile_offset(int *x, int *y,
  2112. const struct intel_plane_state *state,
  2113. int plane)
  2114. {
  2115. const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev);
  2116. const struct drm_framebuffer *fb = state->base.fb;
  2117. unsigned int rotation = state->base.rotation;
  2118. int pitch = intel_fb_pitch(fb, plane, rotation);
  2119. u32 alignment;
  2120. /* AUX_DIST needs only 4K alignment */
  2121. if (fb->pixel_format == DRM_FORMAT_NV12 && plane == 1)
  2122. alignment = 4096;
  2123. else
  2124. alignment = intel_surf_alignment(dev_priv, fb->modifier);
  2125. return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch,
  2126. rotation, alignment);
  2127. }
  2128. /* Convert the fb->offset[] linear offset into x/y offsets */
  2129. static void intel_fb_offset_to_xy(int *x, int *y,
  2130. const struct drm_framebuffer *fb, int plane)
  2131. {
  2132. unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
  2133. unsigned int pitch = fb->pitches[plane];
  2134. u32 linear_offset = fb->offsets[plane];
  2135. *y = linear_offset / pitch;
  2136. *x = linear_offset % pitch / cpp;
  2137. }
  2138. static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
  2139. {
  2140. switch (fb_modifier) {
  2141. case I915_FORMAT_MOD_X_TILED:
  2142. return I915_TILING_X;
  2143. case I915_FORMAT_MOD_Y_TILED:
  2144. return I915_TILING_Y;
  2145. default:
  2146. return I915_TILING_NONE;
  2147. }
  2148. }
  2149. static int
  2150. intel_fill_fb_info(struct drm_i915_private *dev_priv,
  2151. struct drm_framebuffer *fb)
  2152. {
  2153. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  2154. struct intel_rotation_info *rot_info = &intel_fb->rot_info;
  2155. u32 gtt_offset_rotated = 0;
  2156. unsigned int max_size = 0;
  2157. uint32_t format = fb->pixel_format;
  2158. int i, num_planes = drm_format_num_planes(format);
  2159. unsigned int tile_size = intel_tile_size(dev_priv);
  2160. for (i = 0; i < num_planes; i++) {
  2161. unsigned int width, height;
  2162. unsigned int cpp, size;
  2163. u32 offset;
  2164. int x, y;
  2165. cpp = drm_format_plane_cpp(format, i);
  2166. width = drm_format_plane_width(fb->width, format, i);
  2167. height = drm_format_plane_height(fb->height, format, i);
  2168. intel_fb_offset_to_xy(&x, &y, fb, i);
  2169. /*
  2170. * The fence (if used) is aligned to the start of the object
  2171. * so having the framebuffer wrap around across the edge of the
  2172. * fenced region doesn't really work. We have no API to configure
  2173. * the fence start offset within the object (nor could we probably
  2174. * on gen2/3). So it's just easier if we just require that the
  2175. * fb layout agrees with the fence layout. We already check that the
  2176. * fb stride matches the fence stride elsewhere.
  2177. */
  2178. if (i915_gem_object_is_tiled(intel_fb->obj) &&
  2179. (x + width) * cpp > fb->pitches[i]) {
  2180. DRM_DEBUG("bad fb plane %d offset: 0x%x\n",
  2181. i, fb->offsets[i]);
  2182. return -EINVAL;
  2183. }
  2184. /*
  2185. * First pixel of the framebuffer from
  2186. * the start of the normal gtt mapping.
  2187. */
  2188. intel_fb->normal[i].x = x;
  2189. intel_fb->normal[i].y = y;
  2190. offset = _intel_compute_tile_offset(dev_priv, &x, &y,
  2191. fb, 0, fb->pitches[i],
  2192. DRM_ROTATE_0, tile_size);
  2193. offset /= tile_size;
  2194. if (fb->modifier != DRM_FORMAT_MOD_NONE) {
  2195. unsigned int tile_width, tile_height;
  2196. unsigned int pitch_tiles;
  2197. struct drm_rect r;
  2198. intel_tile_dims(dev_priv, &tile_width, &tile_height,
  2199. fb->modifier, cpp);
  2200. rot_info->plane[i].offset = offset;
  2201. rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
  2202. rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
  2203. rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
  2204. intel_fb->rotated[i].pitch =
  2205. rot_info->plane[i].height * tile_height;
  2206. /* how many tiles does this plane need */
  2207. size = rot_info->plane[i].stride * rot_info->plane[i].height;
  2208. /*
  2209. * If the plane isn't horizontally tile aligned,
  2210. * we need one more tile.
  2211. */
  2212. if (x != 0)
  2213. size++;
  2214. /* rotate the x/y offsets to match the GTT view */
  2215. r.x1 = x;
  2216. r.y1 = y;
  2217. r.x2 = x + width;
  2218. r.y2 = y + height;
  2219. drm_rect_rotate(&r,
  2220. rot_info->plane[i].width * tile_width,
  2221. rot_info->plane[i].height * tile_height,
  2222. DRM_ROTATE_270);
  2223. x = r.x1;
  2224. y = r.y1;
  2225. /* rotate the tile dimensions to match the GTT view */
  2226. pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
  2227. swap(tile_width, tile_height);
  2228. /*
  2229. * We only keep the x/y offsets, so push all of the
  2230. * gtt offset into the x/y offsets.
  2231. */
  2232. _intel_adjust_tile_offset(&x, &y, tile_size,
  2233. tile_width, tile_height, pitch_tiles,
  2234. gtt_offset_rotated * tile_size, 0);
  2235. gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
  2236. /*
  2237. * First pixel of the framebuffer from
  2238. * the start of the rotated gtt mapping.
  2239. */
  2240. intel_fb->rotated[i].x = x;
  2241. intel_fb->rotated[i].y = y;
  2242. } else {
  2243. size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
  2244. x * cpp, tile_size);
  2245. }
  2246. /* how many tiles in total needed in the bo */
  2247. max_size = max(max_size, offset + size);
  2248. }
  2249. if (max_size * tile_size > to_intel_framebuffer(fb)->obj->base.size) {
  2250. DRM_DEBUG("fb too big for bo (need %u bytes, have %zu bytes)\n",
  2251. max_size * tile_size, to_intel_framebuffer(fb)->obj->base.size);
  2252. return -EINVAL;
  2253. }
  2254. return 0;
  2255. }
  2256. static int i9xx_format_to_fourcc(int format)
  2257. {
  2258. switch (format) {
  2259. case DISPPLANE_8BPP:
  2260. return DRM_FORMAT_C8;
  2261. case DISPPLANE_BGRX555:
  2262. return DRM_FORMAT_XRGB1555;
  2263. case DISPPLANE_BGRX565:
  2264. return DRM_FORMAT_RGB565;
  2265. default:
  2266. case DISPPLANE_BGRX888:
  2267. return DRM_FORMAT_XRGB8888;
  2268. case DISPPLANE_RGBX888:
  2269. return DRM_FORMAT_XBGR8888;
  2270. case DISPPLANE_BGRX101010:
  2271. return DRM_FORMAT_XRGB2101010;
  2272. case DISPPLANE_RGBX101010:
  2273. return DRM_FORMAT_XBGR2101010;
  2274. }
  2275. }
  2276. static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
  2277. {
  2278. switch (format) {
  2279. case PLANE_CTL_FORMAT_RGB_565:
  2280. return DRM_FORMAT_RGB565;
  2281. default:
  2282. case PLANE_CTL_FORMAT_XRGB_8888:
  2283. if (rgb_order) {
  2284. if (alpha)
  2285. return DRM_FORMAT_ABGR8888;
  2286. else
  2287. return DRM_FORMAT_XBGR8888;
  2288. } else {
  2289. if (alpha)
  2290. return DRM_FORMAT_ARGB8888;
  2291. else
  2292. return DRM_FORMAT_XRGB8888;
  2293. }
  2294. case PLANE_CTL_FORMAT_XRGB_2101010:
  2295. if (rgb_order)
  2296. return DRM_FORMAT_XBGR2101010;
  2297. else
  2298. return DRM_FORMAT_XRGB2101010;
  2299. }
  2300. }
  2301. static bool
  2302. intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
  2303. struct intel_initial_plane_config *plane_config)
  2304. {
  2305. struct drm_device *dev = crtc->base.dev;
  2306. struct drm_i915_private *dev_priv = to_i915(dev);
  2307. struct i915_ggtt *ggtt = &dev_priv->ggtt;
  2308. struct drm_i915_gem_object *obj = NULL;
  2309. struct drm_mode_fb_cmd2 mode_cmd = { 0 };
  2310. struct drm_framebuffer *fb = &plane_config->fb->base;
  2311. u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
  2312. u32 size_aligned = round_up(plane_config->base + plane_config->size,
  2313. PAGE_SIZE);
  2314. size_aligned -= base_aligned;
  2315. if (plane_config->size == 0)
  2316. return false;
  2317. /* If the FB is too big, just don't use it since fbdev is not very
  2318. * important and we should probably use that space with FBC or other
  2319. * features. */
  2320. if (size_aligned * 2 > ggtt->stolen_usable_size)
  2321. return false;
  2322. mutex_lock(&dev->struct_mutex);
  2323. obj = i915_gem_object_create_stolen_for_preallocated(dev,
  2324. base_aligned,
  2325. base_aligned,
  2326. size_aligned);
  2327. if (!obj) {
  2328. mutex_unlock(&dev->struct_mutex);
  2329. return false;
  2330. }
  2331. if (plane_config->tiling == I915_TILING_X)
  2332. obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
  2333. mode_cmd.pixel_format = fb->pixel_format;
  2334. mode_cmd.width = fb->width;
  2335. mode_cmd.height = fb->height;
  2336. mode_cmd.pitches[0] = fb->pitches[0];
  2337. mode_cmd.modifier[0] = fb->modifier;
  2338. mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
  2339. if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
  2340. &mode_cmd, obj)) {
  2341. DRM_DEBUG_KMS("intel fb init failed\n");
  2342. goto out_unref_obj;
  2343. }
  2344. mutex_unlock(&dev->struct_mutex);
  2345. DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
  2346. return true;
  2347. out_unref_obj:
  2348. i915_gem_object_put(obj);
  2349. mutex_unlock(&dev->struct_mutex);
  2350. return false;
  2351. }
  2352. /* Update plane->state->fb to match plane->fb after driver-internal updates */
  2353. static void
  2354. update_state_fb(struct drm_plane *plane)
  2355. {
  2356. if (plane->fb == plane->state->fb)
  2357. return;
  2358. if (plane->state->fb)
  2359. drm_framebuffer_unreference(plane->state->fb);
  2360. plane->state->fb = plane->fb;
  2361. if (plane->state->fb)
  2362. drm_framebuffer_reference(plane->state->fb);
  2363. }
  2364. static void
  2365. intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
  2366. struct intel_initial_plane_config *plane_config)
  2367. {
  2368. struct drm_device *dev = intel_crtc->base.dev;
  2369. struct drm_i915_private *dev_priv = to_i915(dev);
  2370. struct drm_crtc *c;
  2371. struct intel_crtc *i;
  2372. struct drm_i915_gem_object *obj;
  2373. struct drm_plane *primary = intel_crtc->base.primary;
  2374. struct drm_plane_state *plane_state = primary->state;
  2375. struct drm_crtc_state *crtc_state = intel_crtc->base.state;
  2376. struct intel_plane *intel_plane = to_intel_plane(primary);
  2377. struct intel_plane_state *intel_state =
  2378. to_intel_plane_state(plane_state);
  2379. struct drm_framebuffer *fb;
  2380. if (!plane_config->fb)
  2381. return;
  2382. if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
  2383. fb = &plane_config->fb->base;
  2384. goto valid_fb;
  2385. }
  2386. kfree(plane_config->fb);
  2387. /*
  2388. * Failed to alloc the obj, check to see if we should share
  2389. * an fb with another CRTC instead
  2390. */
  2391. for_each_crtc(dev, c) {
  2392. i = to_intel_crtc(c);
  2393. if (c == &intel_crtc->base)
  2394. continue;
  2395. if (!i->active)
  2396. continue;
  2397. fb = c->primary->fb;
  2398. if (!fb)
  2399. continue;
  2400. obj = intel_fb_obj(fb);
  2401. if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) {
  2402. drm_framebuffer_reference(fb);
  2403. goto valid_fb;
  2404. }
  2405. }
  2406. /*
  2407. * We've failed to reconstruct the BIOS FB. Current display state
  2408. * indicates that the primary plane is visible, but has a NULL FB,
  2409. * which will lead to problems later if we don't fix it up. The
  2410. * simplest solution is to just disable the primary plane now and
  2411. * pretend the BIOS never had it enabled.
  2412. */
  2413. to_intel_plane_state(plane_state)->base.visible = false;
  2414. crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
  2415. intel_pre_disable_primary_noatomic(&intel_crtc->base);
  2416. intel_plane->disable_plane(primary, &intel_crtc->base);
  2417. return;
  2418. valid_fb:
  2419. plane_state->src_x = 0;
  2420. plane_state->src_y = 0;
  2421. plane_state->src_w = fb->width << 16;
  2422. plane_state->src_h = fb->height << 16;
  2423. plane_state->crtc_x = 0;
  2424. plane_state->crtc_y = 0;
  2425. plane_state->crtc_w = fb->width;
  2426. plane_state->crtc_h = fb->height;
  2427. intel_state->base.src = drm_plane_state_src(plane_state);
  2428. intel_state->base.dst = drm_plane_state_dest(plane_state);
  2429. obj = intel_fb_obj(fb);
  2430. if (i915_gem_object_is_tiled(obj))
  2431. dev_priv->preserve_bios_swizzle = true;
  2432. drm_framebuffer_reference(fb);
  2433. primary->fb = primary->state->fb = fb;
  2434. primary->crtc = primary->state->crtc = &intel_crtc->base;
  2435. intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
  2436. atomic_or(to_intel_plane(primary)->frontbuffer_bit,
  2437. &obj->frontbuffer_bits);
  2438. }
  2439. static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
  2440. unsigned int rotation)
  2441. {
  2442. int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
  2443. switch (fb->modifier) {
  2444. case DRM_FORMAT_MOD_NONE:
  2445. case I915_FORMAT_MOD_X_TILED:
  2446. switch (cpp) {
  2447. case 8:
  2448. return 4096;
  2449. case 4:
  2450. case 2:
  2451. case 1:
  2452. return 8192;
  2453. default:
  2454. MISSING_CASE(cpp);
  2455. break;
  2456. }
  2457. break;
  2458. case I915_FORMAT_MOD_Y_TILED:
  2459. case I915_FORMAT_MOD_Yf_TILED:
  2460. switch (cpp) {
  2461. case 8:
  2462. return 2048;
  2463. case 4:
  2464. return 4096;
  2465. case 2:
  2466. case 1:
  2467. return 8192;
  2468. default:
  2469. MISSING_CASE(cpp);
  2470. break;
  2471. }
  2472. break;
  2473. default:
  2474. MISSING_CASE(fb->modifier);
  2475. }
  2476. return 2048;
  2477. }
  2478. static int skl_check_main_surface(struct intel_plane_state *plane_state)
  2479. {
  2480. const struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
  2481. const struct drm_framebuffer *fb = plane_state->base.fb;
  2482. unsigned int rotation = plane_state->base.rotation;
  2483. int x = plane_state->base.src.x1 >> 16;
  2484. int y = plane_state->base.src.y1 >> 16;
  2485. int w = drm_rect_width(&plane_state->base.src) >> 16;
  2486. int h = drm_rect_height(&plane_state->base.src) >> 16;
  2487. int max_width = skl_max_plane_width(fb, 0, rotation);
  2488. int max_height = 4096;
  2489. u32 alignment, offset, aux_offset = plane_state->aux.offset;
  2490. if (w > max_width || h > max_height) {
  2491. DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
  2492. w, h, max_width, max_height);
  2493. return -EINVAL;
  2494. }
  2495. intel_add_fb_offsets(&x, &y, plane_state, 0);
  2496. offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
  2497. alignment = intel_surf_alignment(dev_priv, fb->modifier);
  2498. /*
  2499. * AUX surface offset is specified as the distance from the
  2500. * main surface offset, and it must be non-negative. Make
  2501. * sure that is what we will get.
  2502. */
  2503. if (offset > aux_offset)
  2504. offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
  2505. offset, aux_offset & ~(alignment - 1));
  2506. /*
  2507. * When using an X-tiled surface, the plane blows up
  2508. * if the x offset + width exceed the stride.
  2509. *
  2510. * TODO: linear and Y-tiled seem fine, Yf untested,
  2511. */
  2512. if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
  2513. int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
  2514. while ((x + w) * cpp > fb->pitches[0]) {
  2515. if (offset == 0) {
  2516. DRM_DEBUG_KMS("Unable to find suitable display surface offset\n");
  2517. return -EINVAL;
  2518. }
  2519. offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
  2520. offset, offset - alignment);
  2521. }
  2522. }
  2523. plane_state->main.offset = offset;
  2524. plane_state->main.x = x;
  2525. plane_state->main.y = y;
  2526. return 0;
  2527. }
  2528. static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
  2529. {
  2530. const struct drm_framebuffer *fb = plane_state->base.fb;
  2531. unsigned int rotation = plane_state->base.rotation;
  2532. int max_width = skl_max_plane_width(fb, 1, rotation);
  2533. int max_height = 4096;
  2534. int x = plane_state->base.src.x1 >> 17;
  2535. int y = plane_state->base.src.y1 >> 17;
  2536. int w = drm_rect_width(&plane_state->base.src) >> 17;
  2537. int h = drm_rect_height(&plane_state->base.src) >> 17;
  2538. u32 offset;
  2539. intel_add_fb_offsets(&x, &y, plane_state, 1);
  2540. offset = intel_compute_tile_offset(&x, &y, plane_state, 1);
  2541. /* FIXME not quite sure how/if these apply to the chroma plane */
  2542. if (w > max_width || h > max_height) {
  2543. DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
  2544. w, h, max_width, max_height);
  2545. return -EINVAL;
  2546. }
  2547. plane_state->aux.offset = offset;
  2548. plane_state->aux.x = x;
  2549. plane_state->aux.y = y;
  2550. return 0;
  2551. }
  2552. int skl_check_plane_surface(struct intel_plane_state *plane_state)
  2553. {
  2554. const struct drm_framebuffer *fb = plane_state->base.fb;
  2555. unsigned int rotation = plane_state->base.rotation;
  2556. int ret;
  2557. if (!plane_state->base.visible)
  2558. return 0;
  2559. /* Rotate src coordinates to match rotated GTT view */
  2560. if (drm_rotation_90_or_270(rotation))
  2561. drm_rect_rotate(&plane_state->base.src,
  2562. fb->width << 16, fb->height << 16,
  2563. DRM_ROTATE_270);
  2564. /*
  2565. * Handle the AUX surface first since
  2566. * the main surface setup depends on it.
  2567. */
  2568. if (fb->pixel_format == DRM_FORMAT_NV12) {
  2569. ret = skl_check_nv12_aux_surface(plane_state);
  2570. if (ret)
  2571. return ret;
  2572. } else {
  2573. plane_state->aux.offset = ~0xfff;
  2574. plane_state->aux.x = 0;
  2575. plane_state->aux.y = 0;
  2576. }
  2577. ret = skl_check_main_surface(plane_state);
  2578. if (ret)
  2579. return ret;
  2580. return 0;
  2581. }
  2582. static void i9xx_update_primary_plane(struct drm_plane *primary,
  2583. const struct intel_crtc_state *crtc_state,
  2584. const struct intel_plane_state *plane_state)
  2585. {
  2586. struct drm_i915_private *dev_priv = to_i915(primary->dev);
  2587. struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
  2588. struct drm_framebuffer *fb = plane_state->base.fb;
  2589. int plane = intel_crtc->plane;
  2590. u32 linear_offset;
  2591. u32 dspcntr;
  2592. i915_reg_t reg = DSPCNTR(plane);
  2593. unsigned int rotation = plane_state->base.rotation;
  2594. int x = plane_state->base.src.x1 >> 16;
  2595. int y = plane_state->base.src.y1 >> 16;
  2596. dspcntr = DISPPLANE_GAMMA_ENABLE;
  2597. dspcntr |= DISPLAY_PLANE_ENABLE;
  2598. if (INTEL_GEN(dev_priv) < 4) {
  2599. if (intel_crtc->pipe == PIPE_B)
  2600. dspcntr |= DISPPLANE_SEL_PIPE_B;
  2601. /* pipesrc and dspsize control the size that is scaled from,
  2602. * which should always be the user's requested size.
  2603. */
  2604. I915_WRITE(DSPSIZE(plane),
  2605. ((crtc_state->pipe_src_h - 1) << 16) |
  2606. (crtc_state->pipe_src_w - 1));
  2607. I915_WRITE(DSPPOS(plane), 0);
  2608. } else if (IS_CHERRYVIEW(dev_priv) && plane == PLANE_B) {
  2609. I915_WRITE(PRIMSIZE(plane),
  2610. ((crtc_state->pipe_src_h - 1) << 16) |
  2611. (crtc_state->pipe_src_w - 1));
  2612. I915_WRITE(PRIMPOS(plane), 0);
  2613. I915_WRITE(PRIMCNSTALPHA(plane), 0);
  2614. }
  2615. switch (fb->pixel_format) {
  2616. case DRM_FORMAT_C8:
  2617. dspcntr |= DISPPLANE_8BPP;
  2618. break;
  2619. case DRM_FORMAT_XRGB1555:
  2620. dspcntr |= DISPPLANE_BGRX555;
  2621. break;
  2622. case DRM_FORMAT_RGB565:
  2623. dspcntr |= DISPPLANE_BGRX565;
  2624. break;
  2625. case DRM_FORMAT_XRGB8888:
  2626. dspcntr |= DISPPLANE_BGRX888;
  2627. break;
  2628. case DRM_FORMAT_XBGR8888:
  2629. dspcntr |= DISPPLANE_RGBX888;
  2630. break;
  2631. case DRM_FORMAT_XRGB2101010:
  2632. dspcntr |= DISPPLANE_BGRX101010;
  2633. break;
  2634. case DRM_FORMAT_XBGR2101010:
  2635. dspcntr |= DISPPLANE_RGBX101010;
  2636. break;
  2637. default:
  2638. BUG();
  2639. }
  2640. if (INTEL_GEN(dev_priv) >= 4 &&
  2641. fb->modifier == I915_FORMAT_MOD_X_TILED)
  2642. dspcntr |= DISPPLANE_TILED;
  2643. if (rotation & DRM_ROTATE_180)
  2644. dspcntr |= DISPPLANE_ROTATE_180;
  2645. if (rotation & DRM_REFLECT_X)
  2646. dspcntr |= DISPPLANE_MIRROR;
  2647. if (IS_G4X(dev_priv))
  2648. dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
  2649. intel_add_fb_offsets(&x, &y, plane_state, 0);
  2650. if (INTEL_GEN(dev_priv) >= 4)
  2651. intel_crtc->dspaddr_offset =
  2652. intel_compute_tile_offset(&x, &y, plane_state, 0);
  2653. if (rotation & DRM_ROTATE_180) {
  2654. x += crtc_state->pipe_src_w - 1;
  2655. y += crtc_state->pipe_src_h - 1;
  2656. } else if (rotation & DRM_REFLECT_X) {
  2657. x += crtc_state->pipe_src_w - 1;
  2658. }
  2659. linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
  2660. if (INTEL_GEN(dev_priv) < 4)
  2661. intel_crtc->dspaddr_offset = linear_offset;
  2662. intel_crtc->adjusted_x = x;
  2663. intel_crtc->adjusted_y = y;
  2664. I915_WRITE(reg, dspcntr);
  2665. I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
  2666. if (INTEL_GEN(dev_priv) >= 4) {
  2667. I915_WRITE(DSPSURF(plane),
  2668. intel_fb_gtt_offset(fb, rotation) +
  2669. intel_crtc->dspaddr_offset);
  2670. I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
  2671. I915_WRITE(DSPLINOFF(plane), linear_offset);
  2672. } else {
  2673. I915_WRITE(DSPADDR(plane),
  2674. intel_fb_gtt_offset(fb, rotation) +
  2675. intel_crtc->dspaddr_offset);
  2676. }
  2677. POSTING_READ(reg);
  2678. }
  2679. static void i9xx_disable_primary_plane(struct drm_plane *primary,
  2680. struct drm_crtc *crtc)
  2681. {
  2682. struct drm_device *dev = crtc->dev;
  2683. struct drm_i915_private *dev_priv = to_i915(dev);
  2684. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2685. int plane = intel_crtc->plane;
  2686. I915_WRITE(DSPCNTR(plane), 0);
  2687. if (INTEL_INFO(dev_priv)->gen >= 4)
  2688. I915_WRITE(DSPSURF(plane), 0);
  2689. else
  2690. I915_WRITE(DSPADDR(plane), 0);
  2691. POSTING_READ(DSPCNTR(plane));
  2692. }
  2693. static void ironlake_update_primary_plane(struct drm_plane *primary,
  2694. const struct intel_crtc_state *crtc_state,
  2695. const struct intel_plane_state *plane_state)
  2696. {
  2697. struct drm_device *dev = primary->dev;
  2698. struct drm_i915_private *dev_priv = to_i915(dev);
  2699. struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
  2700. struct drm_framebuffer *fb = plane_state->base.fb;
  2701. int plane = intel_crtc->plane;
  2702. u32 linear_offset;
  2703. u32 dspcntr;
  2704. i915_reg_t reg = DSPCNTR(plane);
  2705. unsigned int rotation = plane_state->base.rotation;
  2706. int x = plane_state->base.src.x1 >> 16;
  2707. int y = plane_state->base.src.y1 >> 16;
  2708. dspcntr = DISPPLANE_GAMMA_ENABLE;
  2709. dspcntr |= DISPLAY_PLANE_ENABLE;
  2710. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  2711. dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
  2712. switch (fb->pixel_format) {
  2713. case DRM_FORMAT_C8:
  2714. dspcntr |= DISPPLANE_8BPP;
  2715. break;
  2716. case DRM_FORMAT_RGB565:
  2717. dspcntr |= DISPPLANE_BGRX565;
  2718. break;
  2719. case DRM_FORMAT_XRGB8888:
  2720. dspcntr |= DISPPLANE_BGRX888;
  2721. break;
  2722. case DRM_FORMAT_XBGR8888:
  2723. dspcntr |= DISPPLANE_RGBX888;
  2724. break;
  2725. case DRM_FORMAT_XRGB2101010:
  2726. dspcntr |= DISPPLANE_BGRX101010;
  2727. break;
  2728. case DRM_FORMAT_XBGR2101010:
  2729. dspcntr |= DISPPLANE_RGBX101010;
  2730. break;
  2731. default:
  2732. BUG();
  2733. }
  2734. if (fb->modifier == I915_FORMAT_MOD_X_TILED)
  2735. dspcntr |= DISPPLANE_TILED;
  2736. if (rotation & DRM_ROTATE_180)
  2737. dspcntr |= DISPPLANE_ROTATE_180;
  2738. if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv))
  2739. dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
  2740. intel_add_fb_offsets(&x, &y, plane_state, 0);
  2741. intel_crtc->dspaddr_offset =
  2742. intel_compute_tile_offset(&x, &y, plane_state, 0);
  2743. /* HSW+ does this automagically in hardware */
  2744. if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) &&
  2745. rotation & DRM_ROTATE_180) {
  2746. x += crtc_state->pipe_src_w - 1;
  2747. y += crtc_state->pipe_src_h - 1;
  2748. }
  2749. linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
  2750. intel_crtc->adjusted_x = x;
  2751. intel_crtc->adjusted_y = y;
  2752. I915_WRITE(reg, dspcntr);
  2753. I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
  2754. I915_WRITE(DSPSURF(plane),
  2755. intel_fb_gtt_offset(fb, rotation) +
  2756. intel_crtc->dspaddr_offset);
  2757. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  2758. I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
  2759. } else {
  2760. I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
  2761. I915_WRITE(DSPLINOFF(plane), linear_offset);
  2762. }
  2763. POSTING_READ(reg);
  2764. }
  2765. u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
  2766. uint64_t fb_modifier, uint32_t pixel_format)
  2767. {
  2768. if (fb_modifier == DRM_FORMAT_MOD_NONE) {
  2769. return 64;
  2770. } else {
  2771. int cpp = drm_format_plane_cpp(pixel_format, 0);
  2772. return intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
  2773. }
  2774. }
  2775. u32 intel_fb_gtt_offset(struct drm_framebuffer *fb,
  2776. unsigned int rotation)
  2777. {
  2778. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  2779. struct i915_ggtt_view view;
  2780. struct i915_vma *vma;
  2781. intel_fill_fb_ggtt_view(&view, fb, rotation);
  2782. vma = i915_gem_object_to_ggtt(obj, &view);
  2783. if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
  2784. view.type))
  2785. return -1;
  2786. return i915_ggtt_offset(vma);
  2787. }
  2788. static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
  2789. {
  2790. struct drm_device *dev = intel_crtc->base.dev;
  2791. struct drm_i915_private *dev_priv = to_i915(dev);
  2792. I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
  2793. I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
  2794. I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
  2795. }
  2796. /*
  2797. * This function detaches (aka. unbinds) unused scalers in hardware
  2798. */
  2799. static void skl_detach_scalers(struct intel_crtc *intel_crtc)
  2800. {
  2801. struct intel_crtc_scaler_state *scaler_state;
  2802. int i;
  2803. scaler_state = &intel_crtc->config->scaler_state;
  2804. /* loop through and disable scalers that aren't in use */
  2805. for (i = 0; i < intel_crtc->num_scalers; i++) {
  2806. if (!scaler_state->scalers[i].in_use)
  2807. skl_detach_scaler(intel_crtc, i);
  2808. }
  2809. }
  2810. u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
  2811. unsigned int rotation)
  2812. {
  2813. const struct drm_i915_private *dev_priv = to_i915(fb->dev);
  2814. u32 stride = intel_fb_pitch(fb, plane, rotation);
  2815. /*
  2816. * The stride is either expressed as a multiple of 64 bytes chunks for
  2817. * linear buffers or in number of tiles for tiled buffers.
  2818. */
  2819. if (drm_rotation_90_or_270(rotation)) {
  2820. int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
  2821. stride /= intel_tile_height(dev_priv, fb->modifier, cpp);
  2822. } else {
  2823. stride /= intel_fb_stride_alignment(dev_priv, fb->modifier,
  2824. fb->pixel_format);
  2825. }
  2826. return stride;
  2827. }
  2828. u32 skl_plane_ctl_format(uint32_t pixel_format)
  2829. {
  2830. switch (pixel_format) {
  2831. case DRM_FORMAT_C8:
  2832. return PLANE_CTL_FORMAT_INDEXED;
  2833. case DRM_FORMAT_RGB565:
  2834. return PLANE_CTL_FORMAT_RGB_565;
  2835. case DRM_FORMAT_XBGR8888:
  2836. return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
  2837. case DRM_FORMAT_XRGB8888:
  2838. return PLANE_CTL_FORMAT_XRGB_8888;
  2839. /*
  2840. * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
  2841. * to be already pre-multiplied. We need to add a knob (or a different
  2842. * DRM_FORMAT) for user-space to configure that.
  2843. */
  2844. case DRM_FORMAT_ABGR8888:
  2845. return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
  2846. PLANE_CTL_ALPHA_SW_PREMULTIPLY;
  2847. case DRM_FORMAT_ARGB8888:
  2848. return PLANE_CTL_FORMAT_XRGB_8888 |
  2849. PLANE_CTL_ALPHA_SW_PREMULTIPLY;
  2850. case DRM_FORMAT_XRGB2101010:
  2851. return PLANE_CTL_FORMAT_XRGB_2101010;
  2852. case DRM_FORMAT_XBGR2101010:
  2853. return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
  2854. case DRM_FORMAT_YUYV:
  2855. return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
  2856. case DRM_FORMAT_YVYU:
  2857. return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
  2858. case DRM_FORMAT_UYVY:
  2859. return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
  2860. case DRM_FORMAT_VYUY:
  2861. return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
  2862. default:
  2863. MISSING_CASE(pixel_format);
  2864. }
  2865. return 0;
  2866. }
  2867. u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
  2868. {
  2869. switch (fb_modifier) {
  2870. case DRM_FORMAT_MOD_NONE:
  2871. break;
  2872. case I915_FORMAT_MOD_X_TILED:
  2873. return PLANE_CTL_TILED_X;
  2874. case I915_FORMAT_MOD_Y_TILED:
  2875. return PLANE_CTL_TILED_Y;
  2876. case I915_FORMAT_MOD_Yf_TILED:
  2877. return PLANE_CTL_TILED_YF;
  2878. default:
  2879. MISSING_CASE(fb_modifier);
  2880. }
  2881. return 0;
  2882. }
  2883. u32 skl_plane_ctl_rotation(unsigned int rotation)
  2884. {
  2885. switch (rotation) {
  2886. case DRM_ROTATE_0:
  2887. break;
  2888. /*
  2889. * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
  2890. * while i915 HW rotation is clockwise, thats why this swapping.
  2891. */
  2892. case DRM_ROTATE_90:
  2893. return PLANE_CTL_ROTATE_270;
  2894. case DRM_ROTATE_180:
  2895. return PLANE_CTL_ROTATE_180;
  2896. case DRM_ROTATE_270:
  2897. return PLANE_CTL_ROTATE_90;
  2898. default:
  2899. MISSING_CASE(rotation);
  2900. }
  2901. return 0;
  2902. }
  2903. static void skylake_update_primary_plane(struct drm_plane *plane,
  2904. const struct intel_crtc_state *crtc_state,
  2905. const struct intel_plane_state *plane_state)
  2906. {
  2907. struct drm_device *dev = plane->dev;
  2908. struct drm_i915_private *dev_priv = to_i915(dev);
  2909. struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
  2910. struct drm_framebuffer *fb = plane_state->base.fb;
  2911. int pipe = intel_crtc->pipe;
  2912. u32 plane_ctl;
  2913. unsigned int rotation = plane_state->base.rotation;
  2914. u32 stride = skl_plane_stride(fb, 0, rotation);
  2915. u32 surf_addr = plane_state->main.offset;
  2916. int scaler_id = plane_state->scaler_id;
  2917. int src_x = plane_state->main.x;
  2918. int src_y = plane_state->main.y;
  2919. int src_w = drm_rect_width(&plane_state->base.src) >> 16;
  2920. int src_h = drm_rect_height(&plane_state->base.src) >> 16;
  2921. int dst_x = plane_state->base.dst.x1;
  2922. int dst_y = plane_state->base.dst.y1;
  2923. int dst_w = drm_rect_width(&plane_state->base.dst);
  2924. int dst_h = drm_rect_height(&plane_state->base.dst);
  2925. plane_ctl = PLANE_CTL_ENABLE |
  2926. PLANE_CTL_PIPE_GAMMA_ENABLE |
  2927. PLANE_CTL_PIPE_CSC_ENABLE;
  2928. plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
  2929. plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
  2930. plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
  2931. plane_ctl |= skl_plane_ctl_rotation(rotation);
  2932. /* Sizes are 0 based */
  2933. src_w--;
  2934. src_h--;
  2935. dst_w--;
  2936. dst_h--;
  2937. intel_crtc->dspaddr_offset = surf_addr;
  2938. intel_crtc->adjusted_x = src_x;
  2939. intel_crtc->adjusted_y = src_y;
  2940. I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
  2941. I915_WRITE(PLANE_OFFSET(pipe, 0), (src_y << 16) | src_x);
  2942. I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
  2943. I915_WRITE(PLANE_SIZE(pipe, 0), (src_h << 16) | src_w);
  2944. if (scaler_id >= 0) {
  2945. uint32_t ps_ctrl = 0;
  2946. WARN_ON(!dst_w || !dst_h);
  2947. ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
  2948. crtc_state->scaler_state.scalers[scaler_id].mode;
  2949. I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
  2950. I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
  2951. I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
  2952. I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
  2953. I915_WRITE(PLANE_POS(pipe, 0), 0);
  2954. } else {
  2955. I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
  2956. }
  2957. I915_WRITE(PLANE_SURF(pipe, 0),
  2958. intel_fb_gtt_offset(fb, rotation) + surf_addr);
  2959. POSTING_READ(PLANE_SURF(pipe, 0));
  2960. }
  2961. static void skylake_disable_primary_plane(struct drm_plane *primary,
  2962. struct drm_crtc *crtc)
  2963. {
  2964. struct drm_device *dev = crtc->dev;
  2965. struct drm_i915_private *dev_priv = to_i915(dev);
  2966. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2967. int pipe = intel_crtc->pipe;
  2968. I915_WRITE(PLANE_CTL(pipe, 0), 0);
  2969. I915_WRITE(PLANE_SURF(pipe, 0), 0);
  2970. POSTING_READ(PLANE_SURF(pipe, 0));
  2971. }
  2972. /* Assume fb object is pinned & idle & fenced and just update base pointers */
  2973. static int
  2974. intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
  2975. int x, int y, enum mode_set_atomic state)
  2976. {
  2977. /* Support for kgdboc is disabled, this needs a major rework. */
  2978. DRM_ERROR("legacy panic handler not supported any more.\n");
  2979. return -ENODEV;
  2980. }
  2981. static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
  2982. {
  2983. struct intel_crtc *crtc;
  2984. for_each_intel_crtc(&dev_priv->drm, crtc)
  2985. intel_finish_page_flip_cs(dev_priv, crtc->pipe);
  2986. }
  2987. static void intel_update_primary_planes(struct drm_device *dev)
  2988. {
  2989. struct drm_crtc *crtc;
  2990. for_each_crtc(dev, crtc) {
  2991. struct intel_plane *plane = to_intel_plane(crtc->primary);
  2992. struct intel_plane_state *plane_state =
  2993. to_intel_plane_state(plane->base.state);
  2994. if (plane_state->base.visible)
  2995. plane->update_plane(&plane->base,
  2996. to_intel_crtc_state(crtc->state),
  2997. plane_state);
  2998. }
  2999. }
  3000. static int
  3001. __intel_display_resume(struct drm_device *dev,
  3002. struct drm_atomic_state *state)
  3003. {
  3004. struct drm_crtc_state *crtc_state;
  3005. struct drm_crtc *crtc;
  3006. int i, ret;
  3007. intel_modeset_setup_hw_state(dev);
  3008. i915_redisable_vga(to_i915(dev));
  3009. if (!state)
  3010. return 0;
  3011. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  3012. /*
  3013. * Force recalculation even if we restore
  3014. * current state. With fast modeset this may not result
  3015. * in a modeset when the state is compatible.
  3016. */
  3017. crtc_state->mode_changed = true;
  3018. }
  3019. /* ignore any reset values/BIOS leftovers in the WM registers */
  3020. to_intel_atomic_state(state)->skip_intermediate_wm = true;
  3021. ret = drm_atomic_commit(state);
  3022. WARN_ON(ret == -EDEADLK);
  3023. return ret;
  3024. }
  3025. static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
  3026. {
  3027. return intel_has_gpu_reset(dev_priv) &&
  3028. INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv);
  3029. }
  3030. void intel_prepare_reset(struct drm_i915_private *dev_priv)
  3031. {
  3032. struct drm_device *dev = &dev_priv->drm;
  3033. struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
  3034. struct drm_atomic_state *state;
  3035. int ret;
  3036. /*
  3037. * Need mode_config.mutex so that we don't
  3038. * trample ongoing ->detect() and whatnot.
  3039. */
  3040. mutex_lock(&dev->mode_config.mutex);
  3041. drm_modeset_acquire_init(ctx, 0);
  3042. while (1) {
  3043. ret = drm_modeset_lock_all_ctx(dev, ctx);
  3044. if (ret != -EDEADLK)
  3045. break;
  3046. drm_modeset_backoff(ctx);
  3047. }
  3048. /* reset doesn't touch the display, but flips might get nuked anyway, */
  3049. if (!i915.force_reset_modeset_test &&
  3050. !gpu_reset_clobbers_display(dev_priv))
  3051. return;
  3052. /*
  3053. * Disabling the crtcs gracefully seems nicer. Also the
  3054. * g33 docs say we should at least disable all the planes.
  3055. */
  3056. state = drm_atomic_helper_duplicate_state(dev, ctx);
  3057. if (IS_ERR(state)) {
  3058. ret = PTR_ERR(state);
  3059. state = NULL;
  3060. DRM_ERROR("Duplicating state failed with %i\n", ret);
  3061. goto err;
  3062. }
  3063. ret = drm_atomic_helper_disable_all(dev, ctx);
  3064. if (ret) {
  3065. DRM_ERROR("Suspending crtc's failed with %i\n", ret);
  3066. goto err;
  3067. }
  3068. dev_priv->modeset_restore_state = state;
  3069. state->acquire_ctx = ctx;
  3070. return;
  3071. err:
  3072. drm_atomic_state_put(state);
  3073. }
  3074. void intel_finish_reset(struct drm_i915_private *dev_priv)
  3075. {
  3076. struct drm_device *dev = &dev_priv->drm;
  3077. struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
  3078. struct drm_atomic_state *state = dev_priv->modeset_restore_state;
  3079. int ret;
  3080. /*
  3081. * Flips in the rings will be nuked by the reset,
  3082. * so complete all pending flips so that user space
  3083. * will get its events and not get stuck.
  3084. */
  3085. intel_complete_page_flips(dev_priv);
  3086. dev_priv->modeset_restore_state = NULL;
  3087. /* reset doesn't touch the display */
  3088. if (!gpu_reset_clobbers_display(dev_priv)) {
  3089. if (!state) {
  3090. /*
  3091. * Flips in the rings have been nuked by the reset,
  3092. * so update the base address of all primary
  3093. * planes to the the last fb to make sure we're
  3094. * showing the correct fb after a reset.
  3095. *
  3096. * FIXME: Atomic will make this obsolete since we won't schedule
  3097. * CS-based flips (which might get lost in gpu resets) any more.
  3098. */
  3099. intel_update_primary_planes(dev);
  3100. } else {
  3101. ret = __intel_display_resume(dev, state);
  3102. if (ret)
  3103. DRM_ERROR("Restoring old state failed with %i\n", ret);
  3104. }
  3105. } else {
  3106. /*
  3107. * The display has been reset as well,
  3108. * so need a full re-initialization.
  3109. */
  3110. intel_runtime_pm_disable_interrupts(dev_priv);
  3111. intel_runtime_pm_enable_interrupts(dev_priv);
  3112. intel_pps_unlock_regs_wa(dev_priv);
  3113. intel_modeset_init_hw(dev);
  3114. spin_lock_irq(&dev_priv->irq_lock);
  3115. if (dev_priv->display.hpd_irq_setup)
  3116. dev_priv->display.hpd_irq_setup(dev_priv);
  3117. spin_unlock_irq(&dev_priv->irq_lock);
  3118. ret = __intel_display_resume(dev, state);
  3119. if (ret)
  3120. DRM_ERROR("Restoring old state failed with %i\n", ret);
  3121. intel_hpd_init(dev_priv);
  3122. }
  3123. if (state)
  3124. drm_atomic_state_put(state);
  3125. drm_modeset_drop_locks(ctx);
  3126. drm_modeset_acquire_fini(ctx);
  3127. mutex_unlock(&dev->mode_config.mutex);
  3128. }
  3129. static bool abort_flip_on_reset(struct intel_crtc *crtc)
  3130. {
  3131. struct i915_gpu_error *error = &to_i915(crtc->base.dev)->gpu_error;
  3132. if (i915_reset_in_progress(error))
  3133. return true;
  3134. if (crtc->reset_count != i915_reset_count(error))
  3135. return true;
  3136. return false;
  3137. }
  3138. static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
  3139. {
  3140. struct drm_device *dev = crtc->dev;
  3141. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3142. bool pending;
  3143. if (abort_flip_on_reset(intel_crtc))
  3144. return false;
  3145. spin_lock_irq(&dev->event_lock);
  3146. pending = to_intel_crtc(crtc)->flip_work != NULL;
  3147. spin_unlock_irq(&dev->event_lock);
  3148. return pending;
  3149. }
  3150. static void intel_update_pipe_config(struct intel_crtc *crtc,
  3151. struct intel_crtc_state *old_crtc_state)
  3152. {
  3153. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  3154. struct intel_crtc_state *pipe_config =
  3155. to_intel_crtc_state(crtc->base.state);
  3156. /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
  3157. crtc->base.mode = crtc->base.state->mode;
  3158. DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
  3159. old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
  3160. pipe_config->pipe_src_w, pipe_config->pipe_src_h);
  3161. /*
  3162. * Update pipe size and adjust fitter if needed: the reason for this is
  3163. * that in compute_mode_changes we check the native mode (not the pfit
  3164. * mode) to see if we can flip rather than do a full mode set. In the
  3165. * fastboot case, we'll flip, but if we don't update the pipesrc and
  3166. * pfit state, we'll end up with a big fb scanned out into the wrong
  3167. * sized surface.
  3168. */
  3169. I915_WRITE(PIPESRC(crtc->pipe),
  3170. ((pipe_config->pipe_src_w - 1) << 16) |
  3171. (pipe_config->pipe_src_h - 1));
  3172. /* on skylake this is done by detaching scalers */
  3173. if (INTEL_GEN(dev_priv) >= 9) {
  3174. skl_detach_scalers(crtc);
  3175. if (pipe_config->pch_pfit.enabled)
  3176. skylake_pfit_enable(crtc);
  3177. } else if (HAS_PCH_SPLIT(dev_priv)) {
  3178. if (pipe_config->pch_pfit.enabled)
  3179. ironlake_pfit_enable(crtc);
  3180. else if (old_crtc_state->pch_pfit.enabled)
  3181. ironlake_pfit_disable(crtc, true);
  3182. }
  3183. }
  3184. static void intel_fdi_normal_train(struct drm_crtc *crtc)
  3185. {
  3186. struct drm_device *dev = crtc->dev;
  3187. struct drm_i915_private *dev_priv = to_i915(dev);
  3188. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3189. int pipe = intel_crtc->pipe;
  3190. i915_reg_t reg;
  3191. u32 temp;
  3192. /* enable normal train */
  3193. reg = FDI_TX_CTL(pipe);
  3194. temp = I915_READ(reg);
  3195. if (IS_IVYBRIDGE(dev_priv)) {
  3196. temp &= ~FDI_LINK_TRAIN_NONE_IVB;
  3197. temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
  3198. } else {
  3199. temp &= ~FDI_LINK_TRAIN_NONE;
  3200. temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
  3201. }
  3202. I915_WRITE(reg, temp);
  3203. reg = FDI_RX_CTL(pipe);
  3204. temp = I915_READ(reg);
  3205. if (HAS_PCH_CPT(dev_priv)) {
  3206. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3207. temp |= FDI_LINK_TRAIN_NORMAL_CPT;
  3208. } else {
  3209. temp &= ~FDI_LINK_TRAIN_NONE;
  3210. temp |= FDI_LINK_TRAIN_NONE;
  3211. }
  3212. I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
  3213. /* wait one idle pattern time */
  3214. POSTING_READ(reg);
  3215. udelay(1000);
  3216. /* IVB wants error correction enabled */
  3217. if (IS_IVYBRIDGE(dev_priv))
  3218. I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
  3219. FDI_FE_ERRC_ENABLE);
  3220. }
  3221. /* The FDI link training functions for ILK/Ibexpeak. */
  3222. static void ironlake_fdi_link_train(struct drm_crtc *crtc)
  3223. {
  3224. struct drm_device *dev = crtc->dev;
  3225. struct drm_i915_private *dev_priv = to_i915(dev);
  3226. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3227. int pipe = intel_crtc->pipe;
  3228. i915_reg_t reg;
  3229. u32 temp, tries;
  3230. /* FDI needs bits from pipe first */
  3231. assert_pipe_enabled(dev_priv, pipe);
  3232. /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
  3233. for train result */
  3234. reg = FDI_RX_IMR(pipe);
  3235. temp = I915_READ(reg);
  3236. temp &= ~FDI_RX_SYMBOL_LOCK;
  3237. temp &= ~FDI_RX_BIT_LOCK;
  3238. I915_WRITE(reg, temp);
  3239. I915_READ(reg);
  3240. udelay(150);
  3241. /* enable CPU FDI TX and PCH FDI RX */
  3242. reg = FDI_TX_CTL(pipe);
  3243. temp = I915_READ(reg);
  3244. temp &= ~FDI_DP_PORT_WIDTH_MASK;
  3245. temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
  3246. temp &= ~FDI_LINK_TRAIN_NONE;
  3247. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3248. I915_WRITE(reg, temp | FDI_TX_ENABLE);
  3249. reg = FDI_RX_CTL(pipe);
  3250. temp = I915_READ(reg);
  3251. temp &= ~FDI_LINK_TRAIN_NONE;
  3252. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3253. I915_WRITE(reg, temp | FDI_RX_ENABLE);
  3254. POSTING_READ(reg);
  3255. udelay(150);
  3256. /* Ironlake workaround, enable clock pointer after FDI enable*/
  3257. I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
  3258. I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
  3259. FDI_RX_PHASE_SYNC_POINTER_EN);
  3260. reg = FDI_RX_IIR(pipe);
  3261. for (tries = 0; tries < 5; tries++) {
  3262. temp = I915_READ(reg);
  3263. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3264. if ((temp & FDI_RX_BIT_LOCK)) {
  3265. DRM_DEBUG_KMS("FDI train 1 done.\n");
  3266. I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
  3267. break;
  3268. }
  3269. }
  3270. if (tries == 5)
  3271. DRM_ERROR("FDI train 1 fail!\n");
  3272. /* Train 2 */
  3273. reg = FDI_TX_CTL(pipe);
  3274. temp = I915_READ(reg);
  3275. temp &= ~FDI_LINK_TRAIN_NONE;
  3276. temp |= FDI_LINK_TRAIN_PATTERN_2;
  3277. I915_WRITE(reg, temp);
  3278. reg = FDI_RX_CTL(pipe);
  3279. temp = I915_READ(reg);
  3280. temp &= ~FDI_LINK_TRAIN_NONE;
  3281. temp |= FDI_LINK_TRAIN_PATTERN_2;
  3282. I915_WRITE(reg, temp);
  3283. POSTING_READ(reg);
  3284. udelay(150);
  3285. reg = FDI_RX_IIR(pipe);
  3286. for (tries = 0; tries < 5; tries++) {
  3287. temp = I915_READ(reg);
  3288. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3289. if (temp & FDI_RX_SYMBOL_LOCK) {
  3290. I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
  3291. DRM_DEBUG_KMS("FDI train 2 done.\n");
  3292. break;
  3293. }
  3294. }
  3295. if (tries == 5)
  3296. DRM_ERROR("FDI train 2 fail!\n");
  3297. DRM_DEBUG_KMS("FDI train done\n");
  3298. }
  3299. static const int snb_b_fdi_train_param[] = {
  3300. FDI_LINK_TRAIN_400MV_0DB_SNB_B,
  3301. FDI_LINK_TRAIN_400MV_6DB_SNB_B,
  3302. FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
  3303. FDI_LINK_TRAIN_800MV_0DB_SNB_B,
  3304. };
  3305. /* The FDI link training functions for SNB/Cougarpoint. */
  3306. static void gen6_fdi_link_train(struct drm_crtc *crtc)
  3307. {
  3308. struct drm_device *dev = crtc->dev;
  3309. struct drm_i915_private *dev_priv = to_i915(dev);
  3310. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3311. int pipe = intel_crtc->pipe;
  3312. i915_reg_t reg;
  3313. u32 temp, i, retry;
  3314. /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
  3315. for train result */
  3316. reg = FDI_RX_IMR(pipe);
  3317. temp = I915_READ(reg);
  3318. temp &= ~FDI_RX_SYMBOL_LOCK;
  3319. temp &= ~FDI_RX_BIT_LOCK;
  3320. I915_WRITE(reg, temp);
  3321. POSTING_READ(reg);
  3322. udelay(150);
  3323. /* enable CPU FDI TX and PCH FDI RX */
  3324. reg = FDI_TX_CTL(pipe);
  3325. temp = I915_READ(reg);
  3326. temp &= ~FDI_DP_PORT_WIDTH_MASK;
  3327. temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
  3328. temp &= ~FDI_LINK_TRAIN_NONE;
  3329. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3330. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  3331. /* SNB-B */
  3332. temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
  3333. I915_WRITE(reg, temp | FDI_TX_ENABLE);
  3334. I915_WRITE(FDI_RX_MISC(pipe),
  3335. FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
  3336. reg = FDI_RX_CTL(pipe);
  3337. temp = I915_READ(reg);
  3338. if (HAS_PCH_CPT(dev_priv)) {
  3339. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3340. temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
  3341. } else {
  3342. temp &= ~FDI_LINK_TRAIN_NONE;
  3343. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3344. }
  3345. I915_WRITE(reg, temp | FDI_RX_ENABLE);
  3346. POSTING_READ(reg);
  3347. udelay(150);
  3348. for (i = 0; i < 4; i++) {
  3349. reg = FDI_TX_CTL(pipe);
  3350. temp = I915_READ(reg);
  3351. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  3352. temp |= snb_b_fdi_train_param[i];
  3353. I915_WRITE(reg, temp);
  3354. POSTING_READ(reg);
  3355. udelay(500);
  3356. for (retry = 0; retry < 5; retry++) {
  3357. reg = FDI_RX_IIR(pipe);
  3358. temp = I915_READ(reg);
  3359. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3360. if (temp & FDI_RX_BIT_LOCK) {
  3361. I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
  3362. DRM_DEBUG_KMS("FDI train 1 done.\n");
  3363. break;
  3364. }
  3365. udelay(50);
  3366. }
  3367. if (retry < 5)
  3368. break;
  3369. }
  3370. if (i == 4)
  3371. DRM_ERROR("FDI train 1 fail!\n");
  3372. /* Train 2 */
  3373. reg = FDI_TX_CTL(pipe);
  3374. temp = I915_READ(reg);
  3375. temp &= ~FDI_LINK_TRAIN_NONE;
  3376. temp |= FDI_LINK_TRAIN_PATTERN_2;
  3377. if (IS_GEN6(dev_priv)) {
  3378. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  3379. /* SNB-B */
  3380. temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
  3381. }
  3382. I915_WRITE(reg, temp);
  3383. reg = FDI_RX_CTL(pipe);
  3384. temp = I915_READ(reg);
  3385. if (HAS_PCH_CPT(dev_priv)) {
  3386. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3387. temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
  3388. } else {
  3389. temp &= ~FDI_LINK_TRAIN_NONE;
  3390. temp |= FDI_LINK_TRAIN_PATTERN_2;
  3391. }
  3392. I915_WRITE(reg, temp);
  3393. POSTING_READ(reg);
  3394. udelay(150);
  3395. for (i = 0; i < 4; i++) {
  3396. reg = FDI_TX_CTL(pipe);
  3397. temp = I915_READ(reg);
  3398. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  3399. temp |= snb_b_fdi_train_param[i];
  3400. I915_WRITE(reg, temp);
  3401. POSTING_READ(reg);
  3402. udelay(500);
  3403. for (retry = 0; retry < 5; retry++) {
  3404. reg = FDI_RX_IIR(pipe);
  3405. temp = I915_READ(reg);
  3406. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3407. if (temp & FDI_RX_SYMBOL_LOCK) {
  3408. I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
  3409. DRM_DEBUG_KMS("FDI train 2 done.\n");
  3410. break;
  3411. }
  3412. udelay(50);
  3413. }
  3414. if (retry < 5)
  3415. break;
  3416. }
  3417. if (i == 4)
  3418. DRM_ERROR("FDI train 2 fail!\n");
  3419. DRM_DEBUG_KMS("FDI train done.\n");
  3420. }
  3421. /* Manual link training for Ivy Bridge A0 parts */
  3422. static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
  3423. {
  3424. struct drm_device *dev = crtc->dev;
  3425. struct drm_i915_private *dev_priv = to_i915(dev);
  3426. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3427. int pipe = intel_crtc->pipe;
  3428. i915_reg_t reg;
  3429. u32 temp, i, j;
  3430. /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
  3431. for train result */
  3432. reg = FDI_RX_IMR(pipe);
  3433. temp = I915_READ(reg);
  3434. temp &= ~FDI_RX_SYMBOL_LOCK;
  3435. temp &= ~FDI_RX_BIT_LOCK;
  3436. I915_WRITE(reg, temp);
  3437. POSTING_READ(reg);
  3438. udelay(150);
  3439. DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
  3440. I915_READ(FDI_RX_IIR(pipe)));
  3441. /* Try each vswing and preemphasis setting twice before moving on */
  3442. for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
  3443. /* disable first in case we need to retry */
  3444. reg = FDI_TX_CTL(pipe);
  3445. temp = I915_READ(reg);
  3446. temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
  3447. temp &= ~FDI_TX_ENABLE;
  3448. I915_WRITE(reg, temp);
  3449. reg = FDI_RX_CTL(pipe);
  3450. temp = I915_READ(reg);
  3451. temp &= ~FDI_LINK_TRAIN_AUTO;
  3452. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3453. temp &= ~FDI_RX_ENABLE;
  3454. I915_WRITE(reg, temp);
  3455. /* enable CPU FDI TX and PCH FDI RX */
  3456. reg = FDI_TX_CTL(pipe);
  3457. temp = I915_READ(reg);
  3458. temp &= ~FDI_DP_PORT_WIDTH_MASK;
  3459. temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
  3460. temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
  3461. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  3462. temp |= snb_b_fdi_train_param[j/2];
  3463. temp |= FDI_COMPOSITE_SYNC;
  3464. I915_WRITE(reg, temp | FDI_TX_ENABLE);
  3465. I915_WRITE(FDI_RX_MISC(pipe),
  3466. FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
  3467. reg = FDI_RX_CTL(pipe);
  3468. temp = I915_READ(reg);
  3469. temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
  3470. temp |= FDI_COMPOSITE_SYNC;
  3471. I915_WRITE(reg, temp | FDI_RX_ENABLE);
  3472. POSTING_READ(reg);
  3473. udelay(1); /* should be 0.5us */
  3474. for (i = 0; i < 4; i++) {
  3475. reg = FDI_RX_IIR(pipe);
  3476. temp = I915_READ(reg);
  3477. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3478. if (temp & FDI_RX_BIT_LOCK ||
  3479. (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
  3480. I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
  3481. DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
  3482. i);
  3483. break;
  3484. }
  3485. udelay(1); /* should be 0.5us */
  3486. }
  3487. if (i == 4) {
  3488. DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
  3489. continue;
  3490. }
  3491. /* Train 2 */
  3492. reg = FDI_TX_CTL(pipe);
  3493. temp = I915_READ(reg);
  3494. temp &= ~FDI_LINK_TRAIN_NONE_IVB;
  3495. temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
  3496. I915_WRITE(reg, temp);
  3497. reg = FDI_RX_CTL(pipe);
  3498. temp = I915_READ(reg);
  3499. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3500. temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
  3501. I915_WRITE(reg, temp);
  3502. POSTING_READ(reg);
  3503. udelay(2); /* should be 1.5us */
  3504. for (i = 0; i < 4; i++) {
  3505. reg = FDI_RX_IIR(pipe);
  3506. temp = I915_READ(reg);
  3507. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3508. if (temp & FDI_RX_SYMBOL_LOCK ||
  3509. (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
  3510. I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
  3511. DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
  3512. i);
  3513. goto train_done;
  3514. }
  3515. udelay(2); /* should be 1.5us */
  3516. }
  3517. if (i == 4)
  3518. DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
  3519. }
  3520. train_done:
  3521. DRM_DEBUG_KMS("FDI train done.\n");
  3522. }
  3523. static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
  3524. {
  3525. struct drm_device *dev = intel_crtc->base.dev;
  3526. struct drm_i915_private *dev_priv = to_i915(dev);
  3527. int pipe = intel_crtc->pipe;
  3528. i915_reg_t reg;
  3529. u32 temp;
  3530. /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
  3531. reg = FDI_RX_CTL(pipe);
  3532. temp = I915_READ(reg);
  3533. temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
  3534. temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
  3535. temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
  3536. I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
  3537. POSTING_READ(reg);
  3538. udelay(200);
  3539. /* Switch from Rawclk to PCDclk */
  3540. temp = I915_READ(reg);
  3541. I915_WRITE(reg, temp | FDI_PCDCLK);
  3542. POSTING_READ(reg);
  3543. udelay(200);
  3544. /* Enable CPU FDI TX PLL, always on for Ironlake */
  3545. reg = FDI_TX_CTL(pipe);
  3546. temp = I915_READ(reg);
  3547. if ((temp & FDI_TX_PLL_ENABLE) == 0) {
  3548. I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
  3549. POSTING_READ(reg);
  3550. udelay(100);
  3551. }
  3552. }
  3553. static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
  3554. {
  3555. struct drm_device *dev = intel_crtc->base.dev;
  3556. struct drm_i915_private *dev_priv = to_i915(dev);
  3557. int pipe = intel_crtc->pipe;
  3558. i915_reg_t reg;
  3559. u32 temp;
  3560. /* Switch from PCDclk to Rawclk */
  3561. reg = FDI_RX_CTL(pipe);
  3562. temp = I915_READ(reg);
  3563. I915_WRITE(reg, temp & ~FDI_PCDCLK);
  3564. /* Disable CPU FDI TX PLL */
  3565. reg = FDI_TX_CTL(pipe);
  3566. temp = I915_READ(reg);
  3567. I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
  3568. POSTING_READ(reg);
  3569. udelay(100);
  3570. reg = FDI_RX_CTL(pipe);
  3571. temp = I915_READ(reg);
  3572. I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
  3573. /* Wait for the clocks to turn off. */
  3574. POSTING_READ(reg);
  3575. udelay(100);
  3576. }
  3577. static void ironlake_fdi_disable(struct drm_crtc *crtc)
  3578. {
  3579. struct drm_device *dev = crtc->dev;
  3580. struct drm_i915_private *dev_priv = to_i915(dev);
  3581. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3582. int pipe = intel_crtc->pipe;
  3583. i915_reg_t reg;
  3584. u32 temp;
  3585. /* disable CPU FDI tx and PCH FDI rx */
  3586. reg = FDI_TX_CTL(pipe);
  3587. temp = I915_READ(reg);
  3588. I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
  3589. POSTING_READ(reg);
  3590. reg = FDI_RX_CTL(pipe);
  3591. temp = I915_READ(reg);
  3592. temp &= ~(0x7 << 16);
  3593. temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
  3594. I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
  3595. POSTING_READ(reg);
  3596. udelay(100);
  3597. /* Ironlake workaround, disable clock pointer after downing FDI */
  3598. if (HAS_PCH_IBX(dev_priv))
  3599. I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
  3600. /* still set train pattern 1 */
  3601. reg = FDI_TX_CTL(pipe);
  3602. temp = I915_READ(reg);
  3603. temp &= ~FDI_LINK_TRAIN_NONE;
  3604. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3605. I915_WRITE(reg, temp);
  3606. reg = FDI_RX_CTL(pipe);
  3607. temp = I915_READ(reg);
  3608. if (HAS_PCH_CPT(dev_priv)) {
  3609. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3610. temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
  3611. } else {
  3612. temp &= ~FDI_LINK_TRAIN_NONE;
  3613. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3614. }
  3615. /* BPC in FDI rx is consistent with that in PIPECONF */
  3616. temp &= ~(0x07 << 16);
  3617. temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
  3618. I915_WRITE(reg, temp);
  3619. POSTING_READ(reg);
  3620. udelay(100);
  3621. }
  3622. bool intel_has_pending_fb_unpin(struct drm_device *dev)
  3623. {
  3624. struct drm_i915_private *dev_priv = to_i915(dev);
  3625. struct intel_crtc *crtc;
  3626. /* Note that we don't need to be called with mode_config.lock here
  3627. * as our list of CRTC objects is static for the lifetime of the
  3628. * device and so cannot disappear as we iterate. Similarly, we can
  3629. * happily treat the predicates as racy, atomic checks as userspace
  3630. * cannot claim and pin a new fb without at least acquring the
  3631. * struct_mutex and so serialising with us.
  3632. */
  3633. for_each_intel_crtc(dev, crtc) {
  3634. if (atomic_read(&crtc->unpin_work_count) == 0)
  3635. continue;
  3636. if (crtc->flip_work)
  3637. intel_wait_for_vblank(dev_priv, crtc->pipe);
  3638. return true;
  3639. }
  3640. return false;
  3641. }
  3642. static void page_flip_completed(struct intel_crtc *intel_crtc)
  3643. {
  3644. struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
  3645. struct intel_flip_work *work = intel_crtc->flip_work;
  3646. intel_crtc->flip_work = NULL;
  3647. if (work->event)
  3648. drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
  3649. drm_crtc_vblank_put(&intel_crtc->base);
  3650. wake_up_all(&dev_priv->pending_flip_queue);
  3651. queue_work(dev_priv->wq, &work->unpin_work);
  3652. trace_i915_flip_complete(intel_crtc->plane,
  3653. work->pending_flip_obj);
  3654. }
  3655. static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
  3656. {
  3657. struct drm_device *dev = crtc->dev;
  3658. struct drm_i915_private *dev_priv = to_i915(dev);
  3659. long ret;
  3660. WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
  3661. ret = wait_event_interruptible_timeout(
  3662. dev_priv->pending_flip_queue,
  3663. !intel_crtc_has_pending_flip(crtc),
  3664. 60*HZ);
  3665. if (ret < 0)
  3666. return ret;
  3667. if (ret == 0) {
  3668. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3669. struct intel_flip_work *work;
  3670. spin_lock_irq(&dev->event_lock);
  3671. work = intel_crtc->flip_work;
  3672. if (work && !is_mmio_work(work)) {
  3673. WARN_ONCE(1, "Removing stuck page flip\n");
  3674. page_flip_completed(intel_crtc);
  3675. }
  3676. spin_unlock_irq(&dev->event_lock);
  3677. }
  3678. return 0;
  3679. }
  3680. void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
  3681. {
  3682. u32 temp;
  3683. I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
  3684. mutex_lock(&dev_priv->sb_lock);
  3685. temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
  3686. temp |= SBI_SSCCTL_DISABLE;
  3687. intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
  3688. mutex_unlock(&dev_priv->sb_lock);
  3689. }
  3690. /* Program iCLKIP clock to the desired frequency */
  3691. static void lpt_program_iclkip(struct drm_crtc *crtc)
  3692. {
  3693. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  3694. int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
  3695. u32 divsel, phaseinc, auxdiv, phasedir = 0;
  3696. u32 temp;
  3697. lpt_disable_iclkip(dev_priv);
  3698. /* The iCLK virtual clock root frequency is in MHz,
  3699. * but the adjusted_mode->crtc_clock in in KHz. To get the
  3700. * divisors, it is necessary to divide one by another, so we
  3701. * convert the virtual clock precision to KHz here for higher
  3702. * precision.
  3703. */
  3704. for (auxdiv = 0; auxdiv < 2; auxdiv++) {
  3705. u32 iclk_virtual_root_freq = 172800 * 1000;
  3706. u32 iclk_pi_range = 64;
  3707. u32 desired_divisor;
  3708. desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
  3709. clock << auxdiv);
  3710. divsel = (desired_divisor / iclk_pi_range) - 2;
  3711. phaseinc = desired_divisor % iclk_pi_range;
  3712. /*
  3713. * Near 20MHz is a corner case which is
  3714. * out of range for the 7-bit divisor
  3715. */
  3716. if (divsel <= 0x7f)
  3717. break;
  3718. }
  3719. /* This should not happen with any sane values */
  3720. WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
  3721. ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
  3722. WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
  3723. ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
  3724. DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
  3725. clock,
  3726. auxdiv,
  3727. divsel,
  3728. phasedir,
  3729. phaseinc);
  3730. mutex_lock(&dev_priv->sb_lock);
  3731. /* Program SSCDIVINTPHASE6 */
  3732. temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
  3733. temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
  3734. temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
  3735. temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
  3736. temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
  3737. temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
  3738. temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
  3739. intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
  3740. /* Program SSCAUXDIV */
  3741. temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
  3742. temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
  3743. temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
  3744. intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
  3745. /* Enable modulator and associated divider */
  3746. temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
  3747. temp &= ~SBI_SSCCTL_DISABLE;
  3748. intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
  3749. mutex_unlock(&dev_priv->sb_lock);
  3750. /* Wait for initialization time */
  3751. udelay(24);
  3752. I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
  3753. }
  3754. int lpt_get_iclkip(struct drm_i915_private *dev_priv)
  3755. {
  3756. u32 divsel, phaseinc, auxdiv;
  3757. u32 iclk_virtual_root_freq = 172800 * 1000;
  3758. u32 iclk_pi_range = 64;
  3759. u32 desired_divisor;
  3760. u32 temp;
  3761. if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
  3762. return 0;
  3763. mutex_lock(&dev_priv->sb_lock);
  3764. temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
  3765. if (temp & SBI_SSCCTL_DISABLE) {
  3766. mutex_unlock(&dev_priv->sb_lock);
  3767. return 0;
  3768. }
  3769. temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
  3770. divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
  3771. SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
  3772. phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
  3773. SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
  3774. temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
  3775. auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
  3776. SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
  3777. mutex_unlock(&dev_priv->sb_lock);
  3778. desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
  3779. return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
  3780. desired_divisor << auxdiv);
  3781. }
  3782. static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
  3783. enum pipe pch_transcoder)
  3784. {
  3785. struct drm_device *dev = crtc->base.dev;
  3786. struct drm_i915_private *dev_priv = to_i915(dev);
  3787. enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  3788. I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
  3789. I915_READ(HTOTAL(cpu_transcoder)));
  3790. I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
  3791. I915_READ(HBLANK(cpu_transcoder)));
  3792. I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
  3793. I915_READ(HSYNC(cpu_transcoder)));
  3794. I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
  3795. I915_READ(VTOTAL(cpu_transcoder)));
  3796. I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
  3797. I915_READ(VBLANK(cpu_transcoder)));
  3798. I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
  3799. I915_READ(VSYNC(cpu_transcoder)));
  3800. I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
  3801. I915_READ(VSYNCSHIFT(cpu_transcoder)));
  3802. }
  3803. static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
  3804. {
  3805. struct drm_i915_private *dev_priv = to_i915(dev);
  3806. uint32_t temp;
  3807. temp = I915_READ(SOUTH_CHICKEN1);
  3808. if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
  3809. return;
  3810. WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
  3811. WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
  3812. temp &= ~FDI_BC_BIFURCATION_SELECT;
  3813. if (enable)
  3814. temp |= FDI_BC_BIFURCATION_SELECT;
  3815. DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
  3816. I915_WRITE(SOUTH_CHICKEN1, temp);
  3817. POSTING_READ(SOUTH_CHICKEN1);
  3818. }
  3819. static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
  3820. {
  3821. struct drm_device *dev = intel_crtc->base.dev;
  3822. switch (intel_crtc->pipe) {
  3823. case PIPE_A:
  3824. break;
  3825. case PIPE_B:
  3826. if (intel_crtc->config->fdi_lanes > 2)
  3827. cpt_set_fdi_bc_bifurcation(dev, false);
  3828. else
  3829. cpt_set_fdi_bc_bifurcation(dev, true);
  3830. break;
  3831. case PIPE_C:
  3832. cpt_set_fdi_bc_bifurcation(dev, true);
  3833. break;
  3834. default:
  3835. BUG();
  3836. }
  3837. }
  3838. /* Return which DP Port should be selected for Transcoder DP control */
  3839. static enum port
  3840. intel_trans_dp_port_sel(struct drm_crtc *crtc)
  3841. {
  3842. struct drm_device *dev = crtc->dev;
  3843. struct intel_encoder *encoder;
  3844. for_each_encoder_on_crtc(dev, crtc, encoder) {
  3845. if (encoder->type == INTEL_OUTPUT_DP ||
  3846. encoder->type == INTEL_OUTPUT_EDP)
  3847. return enc_to_dig_port(&encoder->base)->port;
  3848. }
  3849. return -1;
  3850. }
  3851. /*
  3852. * Enable PCH resources required for PCH ports:
  3853. * - PCH PLLs
  3854. * - FDI training & RX/TX
  3855. * - update transcoder timings
  3856. * - DP transcoding bits
  3857. * - transcoder
  3858. */
  3859. static void ironlake_pch_enable(struct drm_crtc *crtc)
  3860. {
  3861. struct drm_device *dev = crtc->dev;
  3862. struct drm_i915_private *dev_priv = to_i915(dev);
  3863. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3864. int pipe = intel_crtc->pipe;
  3865. u32 temp;
  3866. assert_pch_transcoder_disabled(dev_priv, pipe);
  3867. if (IS_IVYBRIDGE(dev_priv))
  3868. ivybridge_update_fdi_bc_bifurcation(intel_crtc);
  3869. /* Write the TU size bits before fdi link training, so that error
  3870. * detection works. */
  3871. I915_WRITE(FDI_RX_TUSIZE1(pipe),
  3872. I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
  3873. /* For PCH output, training FDI link */
  3874. dev_priv->display.fdi_link_train(crtc);
  3875. /* We need to program the right clock selection before writing the pixel
  3876. * mutliplier into the DPLL. */
  3877. if (HAS_PCH_CPT(dev_priv)) {
  3878. u32 sel;
  3879. temp = I915_READ(PCH_DPLL_SEL);
  3880. temp |= TRANS_DPLL_ENABLE(pipe);
  3881. sel = TRANS_DPLLB_SEL(pipe);
  3882. if (intel_crtc->config->shared_dpll ==
  3883. intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
  3884. temp |= sel;
  3885. else
  3886. temp &= ~sel;
  3887. I915_WRITE(PCH_DPLL_SEL, temp);
  3888. }
  3889. /* XXX: pch pll's can be enabled any time before we enable the PCH
  3890. * transcoder, and we actually should do this to not upset any PCH
  3891. * transcoder that already use the clock when we share it.
  3892. *
  3893. * Note that enable_shared_dpll tries to do the right thing, but
  3894. * get_shared_dpll unconditionally resets the pll - we need that to have
  3895. * the right LVDS enable sequence. */
  3896. intel_enable_shared_dpll(intel_crtc);
  3897. /* set transcoder timing, panel must allow it */
  3898. assert_panel_unlocked(dev_priv, pipe);
  3899. ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
  3900. intel_fdi_normal_train(crtc);
  3901. /* For PCH DP, enable TRANS_DP_CTL */
  3902. if (HAS_PCH_CPT(dev_priv) &&
  3903. intel_crtc_has_dp_encoder(intel_crtc->config)) {
  3904. const struct drm_display_mode *adjusted_mode =
  3905. &intel_crtc->config->base.adjusted_mode;
  3906. u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
  3907. i915_reg_t reg = TRANS_DP_CTL(pipe);
  3908. temp = I915_READ(reg);
  3909. temp &= ~(TRANS_DP_PORT_SEL_MASK |
  3910. TRANS_DP_SYNC_MASK |
  3911. TRANS_DP_BPC_MASK);
  3912. temp |= TRANS_DP_OUTPUT_ENABLE;
  3913. temp |= bpc << 9; /* same format but at 11:9 */
  3914. if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
  3915. temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
  3916. if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
  3917. temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
  3918. switch (intel_trans_dp_port_sel(crtc)) {
  3919. case PORT_B:
  3920. temp |= TRANS_DP_PORT_SEL_B;
  3921. break;
  3922. case PORT_C:
  3923. temp |= TRANS_DP_PORT_SEL_C;
  3924. break;
  3925. case PORT_D:
  3926. temp |= TRANS_DP_PORT_SEL_D;
  3927. break;
  3928. default:
  3929. BUG();
  3930. }
  3931. I915_WRITE(reg, temp);
  3932. }
  3933. ironlake_enable_pch_transcoder(dev_priv, pipe);
  3934. }
  3935. static void lpt_pch_enable(struct drm_crtc *crtc)
  3936. {
  3937. struct drm_device *dev = crtc->dev;
  3938. struct drm_i915_private *dev_priv = to_i915(dev);
  3939. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3940. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  3941. assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
  3942. lpt_program_iclkip(crtc);
  3943. /* Set transcoder timing. */
  3944. ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
  3945. lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
  3946. }
  3947. static void cpt_verify_modeset(struct drm_device *dev, int pipe)
  3948. {
  3949. struct drm_i915_private *dev_priv = to_i915(dev);
  3950. i915_reg_t dslreg = PIPEDSL(pipe);
  3951. u32 temp;
  3952. temp = I915_READ(dslreg);
  3953. udelay(500);
  3954. if (wait_for(I915_READ(dslreg) != temp, 5)) {
  3955. if (wait_for(I915_READ(dslreg) != temp, 5))
  3956. DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
  3957. }
  3958. }
  3959. static int
  3960. skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
  3961. unsigned scaler_user, int *scaler_id, unsigned int rotation,
  3962. int src_w, int src_h, int dst_w, int dst_h)
  3963. {
  3964. struct intel_crtc_scaler_state *scaler_state =
  3965. &crtc_state->scaler_state;
  3966. struct intel_crtc *intel_crtc =
  3967. to_intel_crtc(crtc_state->base.crtc);
  3968. int need_scaling;
  3969. need_scaling = drm_rotation_90_or_270(rotation) ?
  3970. (src_h != dst_w || src_w != dst_h):
  3971. (src_w != dst_w || src_h != dst_h);
  3972. /*
  3973. * if plane is being disabled or scaler is no more required or force detach
  3974. * - free scaler binded to this plane/crtc
  3975. * - in order to do this, update crtc->scaler_usage
  3976. *
  3977. * Here scaler state in crtc_state is set free so that
  3978. * scaler can be assigned to other user. Actual register
  3979. * update to free the scaler is done in plane/panel-fit programming.
  3980. * For this purpose crtc/plane_state->scaler_id isn't reset here.
  3981. */
  3982. if (force_detach || !need_scaling) {
  3983. if (*scaler_id >= 0) {
  3984. scaler_state->scaler_users &= ~(1 << scaler_user);
  3985. scaler_state->scalers[*scaler_id].in_use = 0;
  3986. DRM_DEBUG_KMS("scaler_user index %u.%u: "
  3987. "Staged freeing scaler id %d scaler_users = 0x%x\n",
  3988. intel_crtc->pipe, scaler_user, *scaler_id,
  3989. scaler_state->scaler_users);
  3990. *scaler_id = -1;
  3991. }
  3992. return 0;
  3993. }
  3994. /* range checks */
  3995. if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
  3996. dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
  3997. src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
  3998. dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
  3999. DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
  4000. "size is out of scaler range\n",
  4001. intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
  4002. return -EINVAL;
  4003. }
  4004. /* mark this plane as a scaler user in crtc_state */
  4005. scaler_state->scaler_users |= (1 << scaler_user);
  4006. DRM_DEBUG_KMS("scaler_user index %u.%u: "
  4007. "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
  4008. intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
  4009. scaler_state->scaler_users);
  4010. return 0;
  4011. }
  4012. /**
  4013. * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
  4014. *
  4015. * @state: crtc's scaler state
  4016. *
  4017. * Return
  4018. * 0 - scaler_usage updated successfully
  4019. * error - requested scaling cannot be supported or other error condition
  4020. */
  4021. int skl_update_scaler_crtc(struct intel_crtc_state *state)
  4022. {
  4023. const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
  4024. return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
  4025. &state->scaler_state.scaler_id, DRM_ROTATE_0,
  4026. state->pipe_src_w, state->pipe_src_h,
  4027. adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
  4028. }
  4029. /**
  4030. * skl_update_scaler_plane - Stages update to scaler state for a given plane.
  4031. *
  4032. * @state: crtc's scaler state
  4033. * @plane_state: atomic plane state to update
  4034. *
  4035. * Return
  4036. * 0 - scaler_usage updated successfully
  4037. * error - requested scaling cannot be supported or other error condition
  4038. */
  4039. static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
  4040. struct intel_plane_state *plane_state)
  4041. {
  4042. struct intel_plane *intel_plane =
  4043. to_intel_plane(plane_state->base.plane);
  4044. struct drm_framebuffer *fb = plane_state->base.fb;
  4045. int ret;
  4046. bool force_detach = !fb || !plane_state->base.visible;
  4047. ret = skl_update_scaler(crtc_state, force_detach,
  4048. drm_plane_index(&intel_plane->base),
  4049. &plane_state->scaler_id,
  4050. plane_state->base.rotation,
  4051. drm_rect_width(&plane_state->base.src) >> 16,
  4052. drm_rect_height(&plane_state->base.src) >> 16,
  4053. drm_rect_width(&plane_state->base.dst),
  4054. drm_rect_height(&plane_state->base.dst));
  4055. if (ret || plane_state->scaler_id < 0)
  4056. return ret;
  4057. /* check colorkey */
  4058. if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
  4059. DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
  4060. intel_plane->base.base.id,
  4061. intel_plane->base.name);
  4062. return -EINVAL;
  4063. }
  4064. /* Check src format */
  4065. switch (fb->pixel_format) {
  4066. case DRM_FORMAT_RGB565:
  4067. case DRM_FORMAT_XBGR8888:
  4068. case DRM_FORMAT_XRGB8888:
  4069. case DRM_FORMAT_ABGR8888:
  4070. case DRM_FORMAT_ARGB8888:
  4071. case DRM_FORMAT_XRGB2101010:
  4072. case DRM_FORMAT_XBGR2101010:
  4073. case DRM_FORMAT_YUYV:
  4074. case DRM_FORMAT_YVYU:
  4075. case DRM_FORMAT_UYVY:
  4076. case DRM_FORMAT_VYUY:
  4077. break;
  4078. default:
  4079. DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
  4080. intel_plane->base.base.id, intel_plane->base.name,
  4081. fb->base.id, fb->pixel_format);
  4082. return -EINVAL;
  4083. }
  4084. return 0;
  4085. }
  4086. static void skylake_scaler_disable(struct intel_crtc *crtc)
  4087. {
  4088. int i;
  4089. for (i = 0; i < crtc->num_scalers; i++)
  4090. skl_detach_scaler(crtc, i);
  4091. }
  4092. static void skylake_pfit_enable(struct intel_crtc *crtc)
  4093. {
  4094. struct drm_device *dev = crtc->base.dev;
  4095. struct drm_i915_private *dev_priv = to_i915(dev);
  4096. int pipe = crtc->pipe;
  4097. struct intel_crtc_scaler_state *scaler_state =
  4098. &crtc->config->scaler_state;
  4099. DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
  4100. if (crtc->config->pch_pfit.enabled) {
  4101. int id;
  4102. if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
  4103. DRM_ERROR("Requesting pfit without getting a scaler first\n");
  4104. return;
  4105. }
  4106. id = scaler_state->scaler_id;
  4107. I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
  4108. PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
  4109. I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
  4110. I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
  4111. DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
  4112. }
  4113. }
  4114. static void ironlake_pfit_enable(struct intel_crtc *crtc)
  4115. {
  4116. struct drm_device *dev = crtc->base.dev;
  4117. struct drm_i915_private *dev_priv = to_i915(dev);
  4118. int pipe = crtc->pipe;
  4119. if (crtc->config->pch_pfit.enabled) {
  4120. /* Force use of hard-coded filter coefficients
  4121. * as some pre-programmed values are broken,
  4122. * e.g. x201.
  4123. */
  4124. if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
  4125. I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
  4126. PF_PIPE_SEL_IVB(pipe));
  4127. else
  4128. I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
  4129. I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
  4130. I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
  4131. }
  4132. }
  4133. void hsw_enable_ips(struct intel_crtc *crtc)
  4134. {
  4135. struct drm_device *dev = crtc->base.dev;
  4136. struct drm_i915_private *dev_priv = to_i915(dev);
  4137. if (!crtc->config->ips_enabled)
  4138. return;
  4139. /*
  4140. * We can only enable IPS after we enable a plane and wait for a vblank
  4141. * This function is called from post_plane_update, which is run after
  4142. * a vblank wait.
  4143. */
  4144. assert_plane_enabled(dev_priv, crtc->plane);
  4145. if (IS_BROADWELL(dev_priv)) {
  4146. mutex_lock(&dev_priv->rps.hw_lock);
  4147. WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
  4148. mutex_unlock(&dev_priv->rps.hw_lock);
  4149. /* Quoting Art Runyan: "its not safe to expect any particular
  4150. * value in IPS_CTL bit 31 after enabling IPS through the
  4151. * mailbox." Moreover, the mailbox may return a bogus state,
  4152. * so we need to just enable it and continue on.
  4153. */
  4154. } else {
  4155. I915_WRITE(IPS_CTL, IPS_ENABLE);
  4156. /* The bit only becomes 1 in the next vblank, so this wait here
  4157. * is essentially intel_wait_for_vblank. If we don't have this
  4158. * and don't wait for vblanks until the end of crtc_enable, then
  4159. * the HW state readout code will complain that the expected
  4160. * IPS_CTL value is not the one we read. */
  4161. if (intel_wait_for_register(dev_priv,
  4162. IPS_CTL, IPS_ENABLE, IPS_ENABLE,
  4163. 50))
  4164. DRM_ERROR("Timed out waiting for IPS enable\n");
  4165. }
  4166. }
  4167. void hsw_disable_ips(struct intel_crtc *crtc)
  4168. {
  4169. struct drm_device *dev = crtc->base.dev;
  4170. struct drm_i915_private *dev_priv = to_i915(dev);
  4171. if (!crtc->config->ips_enabled)
  4172. return;
  4173. assert_plane_enabled(dev_priv, crtc->plane);
  4174. if (IS_BROADWELL(dev_priv)) {
  4175. mutex_lock(&dev_priv->rps.hw_lock);
  4176. WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
  4177. mutex_unlock(&dev_priv->rps.hw_lock);
  4178. /* wait for pcode to finish disabling IPS, which may take up to 42ms */
  4179. if (intel_wait_for_register(dev_priv,
  4180. IPS_CTL, IPS_ENABLE, 0,
  4181. 42))
  4182. DRM_ERROR("Timed out waiting for IPS disable\n");
  4183. } else {
  4184. I915_WRITE(IPS_CTL, 0);
  4185. POSTING_READ(IPS_CTL);
  4186. }
  4187. /* We need to wait for a vblank before we can disable the plane. */
  4188. intel_wait_for_vblank(dev_priv, crtc->pipe);
  4189. }
  4190. static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
  4191. {
  4192. if (intel_crtc->overlay) {
  4193. struct drm_device *dev = intel_crtc->base.dev;
  4194. struct drm_i915_private *dev_priv = to_i915(dev);
  4195. mutex_lock(&dev->struct_mutex);
  4196. dev_priv->mm.interruptible = false;
  4197. (void) intel_overlay_switch_off(intel_crtc->overlay);
  4198. dev_priv->mm.interruptible = true;
  4199. mutex_unlock(&dev->struct_mutex);
  4200. }
  4201. /* Let userspace switch the overlay on again. In most cases userspace
  4202. * has to recompute where to put it anyway.
  4203. */
  4204. }
  4205. /**
  4206. * intel_post_enable_primary - Perform operations after enabling primary plane
  4207. * @crtc: the CRTC whose primary plane was just enabled
  4208. *
  4209. * Performs potentially sleeping operations that must be done after the primary
  4210. * plane is enabled, such as updating FBC and IPS. Note that this may be
  4211. * called due to an explicit primary plane update, or due to an implicit
  4212. * re-enable that is caused when a sprite plane is updated to no longer
  4213. * completely hide the primary plane.
  4214. */
  4215. static void
  4216. intel_post_enable_primary(struct drm_crtc *crtc)
  4217. {
  4218. struct drm_device *dev = crtc->dev;
  4219. struct drm_i915_private *dev_priv = to_i915(dev);
  4220. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4221. int pipe = intel_crtc->pipe;
  4222. /*
  4223. * FIXME IPS should be fine as long as one plane is
  4224. * enabled, but in practice it seems to have problems
  4225. * when going from primary only to sprite only and vice
  4226. * versa.
  4227. */
  4228. hsw_enable_ips(intel_crtc);
  4229. /*
  4230. * Gen2 reports pipe underruns whenever all planes are disabled.
  4231. * So don't enable underrun reporting before at least some planes
  4232. * are enabled.
  4233. * FIXME: Need to fix the logic to work when we turn off all planes
  4234. * but leave the pipe running.
  4235. */
  4236. if (IS_GEN2(dev_priv))
  4237. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4238. /* Underruns don't always raise interrupts, so check manually. */
  4239. intel_check_cpu_fifo_underruns(dev_priv);
  4240. intel_check_pch_fifo_underruns(dev_priv);
  4241. }
  4242. /* FIXME move all this to pre_plane_update() with proper state tracking */
  4243. static void
  4244. intel_pre_disable_primary(struct drm_crtc *crtc)
  4245. {
  4246. struct drm_device *dev = crtc->dev;
  4247. struct drm_i915_private *dev_priv = to_i915(dev);
  4248. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4249. int pipe = intel_crtc->pipe;
  4250. /*
  4251. * Gen2 reports pipe underruns whenever all planes are disabled.
  4252. * So diasble underrun reporting before all the planes get disabled.
  4253. * FIXME: Need to fix the logic to work when we turn off all planes
  4254. * but leave the pipe running.
  4255. */
  4256. if (IS_GEN2(dev_priv))
  4257. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
  4258. /*
  4259. * FIXME IPS should be fine as long as one plane is
  4260. * enabled, but in practice it seems to have problems
  4261. * when going from primary only to sprite only and vice
  4262. * versa.
  4263. */
  4264. hsw_disable_ips(intel_crtc);
  4265. }
  4266. /* FIXME get rid of this and use pre_plane_update */
  4267. static void
  4268. intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
  4269. {
  4270. struct drm_device *dev = crtc->dev;
  4271. struct drm_i915_private *dev_priv = to_i915(dev);
  4272. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4273. int pipe = intel_crtc->pipe;
  4274. intel_pre_disable_primary(crtc);
  4275. /*
  4276. * Vblank time updates from the shadow to live plane control register
  4277. * are blocked if the memory self-refresh mode is active at that
  4278. * moment. So to make sure the plane gets truly disabled, disable
  4279. * first the self-refresh mode. The self-refresh enable bit in turn
  4280. * will be checked/applied by the HW only at the next frame start
  4281. * event which is after the vblank start event, so we need to have a
  4282. * wait-for-vblank between disabling the plane and the pipe.
  4283. */
  4284. if (HAS_GMCH_DISPLAY(dev_priv)) {
  4285. intel_set_memory_cxsr(dev_priv, false);
  4286. dev_priv->wm.vlv.cxsr = false;
  4287. intel_wait_for_vblank(dev_priv, pipe);
  4288. }
  4289. }
  4290. static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
  4291. {
  4292. struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
  4293. struct drm_atomic_state *old_state = old_crtc_state->base.state;
  4294. struct intel_crtc_state *pipe_config =
  4295. to_intel_crtc_state(crtc->base.state);
  4296. struct drm_plane *primary = crtc->base.primary;
  4297. struct drm_plane_state *old_pri_state =
  4298. drm_atomic_get_existing_plane_state(old_state, primary);
  4299. intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
  4300. crtc->wm.cxsr_allowed = true;
  4301. if (pipe_config->update_wm_post && pipe_config->base.active)
  4302. intel_update_watermarks(crtc);
  4303. if (old_pri_state) {
  4304. struct intel_plane_state *primary_state =
  4305. to_intel_plane_state(primary->state);
  4306. struct intel_plane_state *old_primary_state =
  4307. to_intel_plane_state(old_pri_state);
  4308. intel_fbc_post_update(crtc);
  4309. if (primary_state->base.visible &&
  4310. (needs_modeset(&pipe_config->base) ||
  4311. !old_primary_state->base.visible))
  4312. intel_post_enable_primary(&crtc->base);
  4313. }
  4314. }
  4315. static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
  4316. {
  4317. struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
  4318. struct drm_device *dev = crtc->base.dev;
  4319. struct drm_i915_private *dev_priv = to_i915(dev);
  4320. struct intel_crtc_state *pipe_config =
  4321. to_intel_crtc_state(crtc->base.state);
  4322. struct drm_atomic_state *old_state = old_crtc_state->base.state;
  4323. struct drm_plane *primary = crtc->base.primary;
  4324. struct drm_plane_state *old_pri_state =
  4325. drm_atomic_get_existing_plane_state(old_state, primary);
  4326. bool modeset = needs_modeset(&pipe_config->base);
  4327. struct intel_atomic_state *old_intel_state =
  4328. to_intel_atomic_state(old_state);
  4329. if (old_pri_state) {
  4330. struct intel_plane_state *primary_state =
  4331. to_intel_plane_state(primary->state);
  4332. struct intel_plane_state *old_primary_state =
  4333. to_intel_plane_state(old_pri_state);
  4334. intel_fbc_pre_update(crtc, pipe_config, primary_state);
  4335. if (old_primary_state->base.visible &&
  4336. (modeset || !primary_state->base.visible))
  4337. intel_pre_disable_primary(&crtc->base);
  4338. }
  4339. if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev_priv)) {
  4340. crtc->wm.cxsr_allowed = false;
  4341. /*
  4342. * Vblank time updates from the shadow to live plane control register
  4343. * are blocked if the memory self-refresh mode is active at that
  4344. * moment. So to make sure the plane gets truly disabled, disable
  4345. * first the self-refresh mode. The self-refresh enable bit in turn
  4346. * will be checked/applied by the HW only at the next frame start
  4347. * event which is after the vblank start event, so we need to have a
  4348. * wait-for-vblank between disabling the plane and the pipe.
  4349. */
  4350. if (old_crtc_state->base.active) {
  4351. intel_set_memory_cxsr(dev_priv, false);
  4352. dev_priv->wm.vlv.cxsr = false;
  4353. intel_wait_for_vblank(dev_priv, crtc->pipe);
  4354. }
  4355. }
  4356. /*
  4357. * IVB workaround: must disable low power watermarks for at least
  4358. * one frame before enabling scaling. LP watermarks can be re-enabled
  4359. * when scaling is disabled.
  4360. *
  4361. * WaCxSRDisabledForSpriteScaling:ivb
  4362. */
  4363. if (pipe_config->disable_lp_wm) {
  4364. ilk_disable_lp_wm(dev);
  4365. intel_wait_for_vblank(dev_priv, crtc->pipe);
  4366. }
  4367. /*
  4368. * If we're doing a modeset, we're done. No need to do any pre-vblank
  4369. * watermark programming here.
  4370. */
  4371. if (needs_modeset(&pipe_config->base))
  4372. return;
  4373. /*
  4374. * For platforms that support atomic watermarks, program the
  4375. * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
  4376. * will be the intermediate values that are safe for both pre- and
  4377. * post- vblank; when vblank happens, the 'active' values will be set
  4378. * to the final 'target' values and we'll do this again to get the
  4379. * optimal watermarks. For gen9+ platforms, the values we program here
  4380. * will be the final target values which will get automatically latched
  4381. * at vblank time; no further programming will be necessary.
  4382. *
  4383. * If a platform hasn't been transitioned to atomic watermarks yet,
  4384. * we'll continue to update watermarks the old way, if flags tell
  4385. * us to.
  4386. */
  4387. if (dev_priv->display.initial_watermarks != NULL)
  4388. dev_priv->display.initial_watermarks(old_intel_state,
  4389. pipe_config);
  4390. else if (pipe_config->update_wm_pre)
  4391. intel_update_watermarks(crtc);
  4392. }
  4393. static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
  4394. {
  4395. struct drm_device *dev = crtc->dev;
  4396. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4397. struct drm_plane *p;
  4398. int pipe = intel_crtc->pipe;
  4399. intel_crtc_dpms_overlay_disable(intel_crtc);
  4400. drm_for_each_plane_mask(p, dev, plane_mask)
  4401. to_intel_plane(p)->disable_plane(p, crtc);
  4402. /*
  4403. * FIXME: Once we grow proper nuclear flip support out of this we need
  4404. * to compute the mask of flip planes precisely. For the time being
  4405. * consider this a flip to a NULL plane.
  4406. */
  4407. intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
  4408. }
  4409. static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
  4410. struct intel_crtc_state *crtc_state,
  4411. struct drm_atomic_state *old_state)
  4412. {
  4413. struct drm_connector_state *old_conn_state;
  4414. struct drm_connector *conn;
  4415. int i;
  4416. for_each_connector_in_state(old_state, conn, old_conn_state, i) {
  4417. struct drm_connector_state *conn_state = conn->state;
  4418. struct intel_encoder *encoder =
  4419. to_intel_encoder(conn_state->best_encoder);
  4420. if (conn_state->crtc != crtc)
  4421. continue;
  4422. if (encoder->pre_pll_enable)
  4423. encoder->pre_pll_enable(encoder, crtc_state, conn_state);
  4424. }
  4425. }
  4426. static void intel_encoders_pre_enable(struct drm_crtc *crtc,
  4427. struct intel_crtc_state *crtc_state,
  4428. struct drm_atomic_state *old_state)
  4429. {
  4430. struct drm_connector_state *old_conn_state;
  4431. struct drm_connector *conn;
  4432. int i;
  4433. for_each_connector_in_state(old_state, conn, old_conn_state, i) {
  4434. struct drm_connector_state *conn_state = conn->state;
  4435. struct intel_encoder *encoder =
  4436. to_intel_encoder(conn_state->best_encoder);
  4437. if (conn_state->crtc != crtc)
  4438. continue;
  4439. if (encoder->pre_enable)
  4440. encoder->pre_enable(encoder, crtc_state, conn_state);
  4441. }
  4442. }
  4443. static void intel_encoders_enable(struct drm_crtc *crtc,
  4444. struct intel_crtc_state *crtc_state,
  4445. struct drm_atomic_state *old_state)
  4446. {
  4447. struct drm_connector_state *old_conn_state;
  4448. struct drm_connector *conn;
  4449. int i;
  4450. for_each_connector_in_state(old_state, conn, old_conn_state, i) {
  4451. struct drm_connector_state *conn_state = conn->state;
  4452. struct intel_encoder *encoder =
  4453. to_intel_encoder(conn_state->best_encoder);
  4454. if (conn_state->crtc != crtc)
  4455. continue;
  4456. encoder->enable(encoder, crtc_state, conn_state);
  4457. intel_opregion_notify_encoder(encoder, true);
  4458. }
  4459. }
  4460. static void intel_encoders_disable(struct drm_crtc *crtc,
  4461. struct intel_crtc_state *old_crtc_state,
  4462. struct drm_atomic_state *old_state)
  4463. {
  4464. struct drm_connector_state *old_conn_state;
  4465. struct drm_connector *conn;
  4466. int i;
  4467. for_each_connector_in_state(old_state, conn, old_conn_state, i) {
  4468. struct intel_encoder *encoder =
  4469. to_intel_encoder(old_conn_state->best_encoder);
  4470. if (old_conn_state->crtc != crtc)
  4471. continue;
  4472. intel_opregion_notify_encoder(encoder, false);
  4473. encoder->disable(encoder, old_crtc_state, old_conn_state);
  4474. }
  4475. }
  4476. static void intel_encoders_post_disable(struct drm_crtc *crtc,
  4477. struct intel_crtc_state *old_crtc_state,
  4478. struct drm_atomic_state *old_state)
  4479. {
  4480. struct drm_connector_state *old_conn_state;
  4481. struct drm_connector *conn;
  4482. int i;
  4483. for_each_connector_in_state(old_state, conn, old_conn_state, i) {
  4484. struct intel_encoder *encoder =
  4485. to_intel_encoder(old_conn_state->best_encoder);
  4486. if (old_conn_state->crtc != crtc)
  4487. continue;
  4488. if (encoder->post_disable)
  4489. encoder->post_disable(encoder, old_crtc_state, old_conn_state);
  4490. }
  4491. }
  4492. static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
  4493. struct intel_crtc_state *old_crtc_state,
  4494. struct drm_atomic_state *old_state)
  4495. {
  4496. struct drm_connector_state *old_conn_state;
  4497. struct drm_connector *conn;
  4498. int i;
  4499. for_each_connector_in_state(old_state, conn, old_conn_state, i) {
  4500. struct intel_encoder *encoder =
  4501. to_intel_encoder(old_conn_state->best_encoder);
  4502. if (old_conn_state->crtc != crtc)
  4503. continue;
  4504. if (encoder->post_pll_disable)
  4505. encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
  4506. }
  4507. }
  4508. static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
  4509. struct drm_atomic_state *old_state)
  4510. {
  4511. struct drm_crtc *crtc = pipe_config->base.crtc;
  4512. struct drm_device *dev = crtc->dev;
  4513. struct drm_i915_private *dev_priv = to_i915(dev);
  4514. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4515. int pipe = intel_crtc->pipe;
  4516. struct intel_atomic_state *old_intel_state =
  4517. to_intel_atomic_state(old_state);
  4518. if (WARN_ON(intel_crtc->active))
  4519. return;
  4520. /*
  4521. * Sometimes spurious CPU pipe underruns happen during FDI
  4522. * training, at least with VGA+HDMI cloning. Suppress them.
  4523. *
  4524. * On ILK we get an occasional spurious CPU pipe underruns
  4525. * between eDP port A enable and vdd enable. Also PCH port
  4526. * enable seems to result in the occasional CPU pipe underrun.
  4527. *
  4528. * Spurious PCH underruns also occur during PCH enabling.
  4529. */
  4530. if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
  4531. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
  4532. if (intel_crtc->config->has_pch_encoder)
  4533. intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
  4534. if (intel_crtc->config->has_pch_encoder)
  4535. intel_prepare_shared_dpll(intel_crtc);
  4536. if (intel_crtc_has_dp_encoder(intel_crtc->config))
  4537. intel_dp_set_m_n(intel_crtc, M1_N1);
  4538. intel_set_pipe_timings(intel_crtc);
  4539. intel_set_pipe_src_size(intel_crtc);
  4540. if (intel_crtc->config->has_pch_encoder) {
  4541. intel_cpu_transcoder_set_m_n(intel_crtc,
  4542. &intel_crtc->config->fdi_m_n, NULL);
  4543. }
  4544. ironlake_set_pipeconf(crtc);
  4545. intel_crtc->active = true;
  4546. intel_encoders_pre_enable(crtc, pipe_config, old_state);
  4547. if (intel_crtc->config->has_pch_encoder) {
  4548. /* Note: FDI PLL enabling _must_ be done before we enable the
  4549. * cpu pipes, hence this is separate from all the other fdi/pch
  4550. * enabling. */
  4551. ironlake_fdi_pll_enable(intel_crtc);
  4552. } else {
  4553. assert_fdi_tx_disabled(dev_priv, pipe);
  4554. assert_fdi_rx_disabled(dev_priv, pipe);
  4555. }
  4556. ironlake_pfit_enable(intel_crtc);
  4557. /*
  4558. * On ILK+ LUT must be loaded before the pipe is running but with
  4559. * clocks enabled
  4560. */
  4561. intel_color_load_luts(&pipe_config->base);
  4562. if (dev_priv->display.initial_watermarks != NULL)
  4563. dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config);
  4564. intel_enable_pipe(intel_crtc);
  4565. if (intel_crtc->config->has_pch_encoder)
  4566. ironlake_pch_enable(crtc);
  4567. assert_vblank_disabled(crtc);
  4568. drm_crtc_vblank_on(crtc);
  4569. intel_encoders_enable(crtc, pipe_config, old_state);
  4570. if (HAS_PCH_CPT(dev_priv))
  4571. cpt_verify_modeset(dev, intel_crtc->pipe);
  4572. /* Must wait for vblank to avoid spurious PCH FIFO underruns */
  4573. if (intel_crtc->config->has_pch_encoder)
  4574. intel_wait_for_vblank(dev_priv, pipe);
  4575. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4576. intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
  4577. }
  4578. /* IPS only exists on ULT machines and is tied to pipe A. */
  4579. static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
  4580. {
  4581. return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
  4582. }
  4583. static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
  4584. struct drm_atomic_state *old_state)
  4585. {
  4586. struct drm_crtc *crtc = pipe_config->base.crtc;
  4587. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  4588. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4589. int pipe = intel_crtc->pipe, hsw_workaround_pipe;
  4590. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  4591. struct intel_atomic_state *old_intel_state =
  4592. to_intel_atomic_state(old_state);
  4593. if (WARN_ON(intel_crtc->active))
  4594. return;
  4595. if (intel_crtc->config->has_pch_encoder)
  4596. intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
  4597. false);
  4598. intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
  4599. if (intel_crtc->config->shared_dpll)
  4600. intel_enable_shared_dpll(intel_crtc);
  4601. if (intel_crtc_has_dp_encoder(intel_crtc->config))
  4602. intel_dp_set_m_n(intel_crtc, M1_N1);
  4603. if (!transcoder_is_dsi(cpu_transcoder))
  4604. intel_set_pipe_timings(intel_crtc);
  4605. intel_set_pipe_src_size(intel_crtc);
  4606. if (cpu_transcoder != TRANSCODER_EDP &&
  4607. !transcoder_is_dsi(cpu_transcoder)) {
  4608. I915_WRITE(PIPE_MULT(cpu_transcoder),
  4609. intel_crtc->config->pixel_multiplier - 1);
  4610. }
  4611. if (intel_crtc->config->has_pch_encoder) {
  4612. intel_cpu_transcoder_set_m_n(intel_crtc,
  4613. &intel_crtc->config->fdi_m_n, NULL);
  4614. }
  4615. if (!transcoder_is_dsi(cpu_transcoder))
  4616. haswell_set_pipeconf(crtc);
  4617. haswell_set_pipemisc(crtc);
  4618. intel_color_set_csc(&pipe_config->base);
  4619. intel_crtc->active = true;
  4620. if (intel_crtc->config->has_pch_encoder)
  4621. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
  4622. else
  4623. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4624. intel_encoders_pre_enable(crtc, pipe_config, old_state);
  4625. if (intel_crtc->config->has_pch_encoder)
  4626. dev_priv->display.fdi_link_train(crtc);
  4627. if (!transcoder_is_dsi(cpu_transcoder))
  4628. intel_ddi_enable_pipe_clock(intel_crtc);
  4629. if (INTEL_GEN(dev_priv) >= 9)
  4630. skylake_pfit_enable(intel_crtc);
  4631. else
  4632. ironlake_pfit_enable(intel_crtc);
  4633. /*
  4634. * On ILK+ LUT must be loaded before the pipe is running but with
  4635. * clocks enabled
  4636. */
  4637. intel_color_load_luts(&pipe_config->base);
  4638. intel_ddi_set_pipe_settings(crtc);
  4639. if (!transcoder_is_dsi(cpu_transcoder))
  4640. intel_ddi_enable_transcoder_func(crtc);
  4641. if (dev_priv->display.initial_watermarks != NULL)
  4642. dev_priv->display.initial_watermarks(old_intel_state,
  4643. pipe_config);
  4644. else
  4645. intel_update_watermarks(intel_crtc);
  4646. /* XXX: Do the pipe assertions at the right place for BXT DSI. */
  4647. if (!transcoder_is_dsi(cpu_transcoder))
  4648. intel_enable_pipe(intel_crtc);
  4649. if (intel_crtc->config->has_pch_encoder)
  4650. lpt_pch_enable(crtc);
  4651. if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
  4652. intel_ddi_set_vc_payload_alloc(crtc, true);
  4653. assert_vblank_disabled(crtc);
  4654. drm_crtc_vblank_on(crtc);
  4655. intel_encoders_enable(crtc, pipe_config, old_state);
  4656. if (intel_crtc->config->has_pch_encoder) {
  4657. intel_wait_for_vblank(dev_priv, pipe);
  4658. intel_wait_for_vblank(dev_priv, pipe);
  4659. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4660. intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
  4661. true);
  4662. }
  4663. /* If we change the relative order between pipe/planes enabling, we need
  4664. * to change the workaround. */
  4665. hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
  4666. if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
  4667. intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
  4668. intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
  4669. }
  4670. }
  4671. static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
  4672. {
  4673. struct drm_device *dev = crtc->base.dev;
  4674. struct drm_i915_private *dev_priv = to_i915(dev);
  4675. int pipe = crtc->pipe;
  4676. /* To avoid upsetting the power well on haswell only disable the pfit if
  4677. * it's in use. The hw state code will make sure we get this right. */
  4678. if (force || crtc->config->pch_pfit.enabled) {
  4679. I915_WRITE(PF_CTL(pipe), 0);
  4680. I915_WRITE(PF_WIN_POS(pipe), 0);
  4681. I915_WRITE(PF_WIN_SZ(pipe), 0);
  4682. }
  4683. }
  4684. static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
  4685. struct drm_atomic_state *old_state)
  4686. {
  4687. struct drm_crtc *crtc = old_crtc_state->base.crtc;
  4688. struct drm_device *dev = crtc->dev;
  4689. struct drm_i915_private *dev_priv = to_i915(dev);
  4690. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4691. int pipe = intel_crtc->pipe;
  4692. /*
  4693. * Sometimes spurious CPU pipe underruns happen when the
  4694. * pipe is already disabled, but FDI RX/TX is still enabled.
  4695. * Happens at least with VGA+HDMI cloning. Suppress them.
  4696. */
  4697. if (intel_crtc->config->has_pch_encoder) {
  4698. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
  4699. intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
  4700. }
  4701. intel_encoders_disable(crtc, old_crtc_state, old_state);
  4702. drm_crtc_vblank_off(crtc);
  4703. assert_vblank_disabled(crtc);
  4704. intel_disable_pipe(intel_crtc);
  4705. ironlake_pfit_disable(intel_crtc, false);
  4706. if (intel_crtc->config->has_pch_encoder)
  4707. ironlake_fdi_disable(crtc);
  4708. intel_encoders_post_disable(crtc, old_crtc_state, old_state);
  4709. if (intel_crtc->config->has_pch_encoder) {
  4710. ironlake_disable_pch_transcoder(dev_priv, pipe);
  4711. if (HAS_PCH_CPT(dev_priv)) {
  4712. i915_reg_t reg;
  4713. u32 temp;
  4714. /* disable TRANS_DP_CTL */
  4715. reg = TRANS_DP_CTL(pipe);
  4716. temp = I915_READ(reg);
  4717. temp &= ~(TRANS_DP_OUTPUT_ENABLE |
  4718. TRANS_DP_PORT_SEL_MASK);
  4719. temp |= TRANS_DP_PORT_SEL_NONE;
  4720. I915_WRITE(reg, temp);
  4721. /* disable DPLL_SEL */
  4722. temp = I915_READ(PCH_DPLL_SEL);
  4723. temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
  4724. I915_WRITE(PCH_DPLL_SEL, temp);
  4725. }
  4726. ironlake_fdi_pll_disable(intel_crtc);
  4727. }
  4728. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4729. intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
  4730. }
  4731. static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
  4732. struct drm_atomic_state *old_state)
  4733. {
  4734. struct drm_crtc *crtc = old_crtc_state->base.crtc;
  4735. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  4736. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4737. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  4738. if (intel_crtc->config->has_pch_encoder)
  4739. intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
  4740. false);
  4741. intel_encoders_disable(crtc, old_crtc_state, old_state);
  4742. drm_crtc_vblank_off(crtc);
  4743. assert_vblank_disabled(crtc);
  4744. /* XXX: Do the pipe assertions at the right place for BXT DSI. */
  4745. if (!transcoder_is_dsi(cpu_transcoder))
  4746. intel_disable_pipe(intel_crtc);
  4747. if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
  4748. intel_ddi_set_vc_payload_alloc(crtc, false);
  4749. if (!transcoder_is_dsi(cpu_transcoder))
  4750. intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
  4751. if (INTEL_GEN(dev_priv) >= 9)
  4752. skylake_scaler_disable(intel_crtc);
  4753. else
  4754. ironlake_pfit_disable(intel_crtc, false);
  4755. if (!transcoder_is_dsi(cpu_transcoder))
  4756. intel_ddi_disable_pipe_clock(intel_crtc);
  4757. intel_encoders_post_disable(crtc, old_crtc_state, old_state);
  4758. if (old_crtc_state->has_pch_encoder)
  4759. intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
  4760. true);
  4761. }
  4762. static void i9xx_pfit_enable(struct intel_crtc *crtc)
  4763. {
  4764. struct drm_device *dev = crtc->base.dev;
  4765. struct drm_i915_private *dev_priv = to_i915(dev);
  4766. struct intel_crtc_state *pipe_config = crtc->config;
  4767. if (!pipe_config->gmch_pfit.control)
  4768. return;
  4769. /*
  4770. * The panel fitter should only be adjusted whilst the pipe is disabled,
  4771. * according to register description and PRM.
  4772. */
  4773. WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
  4774. assert_pipe_disabled(dev_priv, crtc->pipe);
  4775. I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
  4776. I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
  4777. /* Border color in case we don't scale up to the full screen. Black by
  4778. * default, change to something else for debugging. */
  4779. I915_WRITE(BCLRPAT(crtc->pipe), 0);
  4780. }
  4781. static enum intel_display_power_domain port_to_power_domain(enum port port)
  4782. {
  4783. switch (port) {
  4784. case PORT_A:
  4785. return POWER_DOMAIN_PORT_DDI_A_LANES;
  4786. case PORT_B:
  4787. return POWER_DOMAIN_PORT_DDI_B_LANES;
  4788. case PORT_C:
  4789. return POWER_DOMAIN_PORT_DDI_C_LANES;
  4790. case PORT_D:
  4791. return POWER_DOMAIN_PORT_DDI_D_LANES;
  4792. case PORT_E:
  4793. return POWER_DOMAIN_PORT_DDI_E_LANES;
  4794. default:
  4795. MISSING_CASE(port);
  4796. return POWER_DOMAIN_PORT_OTHER;
  4797. }
  4798. }
  4799. static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
  4800. {
  4801. switch (port) {
  4802. case PORT_A:
  4803. return POWER_DOMAIN_AUX_A;
  4804. case PORT_B:
  4805. return POWER_DOMAIN_AUX_B;
  4806. case PORT_C:
  4807. return POWER_DOMAIN_AUX_C;
  4808. case PORT_D:
  4809. return POWER_DOMAIN_AUX_D;
  4810. case PORT_E:
  4811. /* FIXME: Check VBT for actual wiring of PORT E */
  4812. return POWER_DOMAIN_AUX_D;
  4813. default:
  4814. MISSING_CASE(port);
  4815. return POWER_DOMAIN_AUX_A;
  4816. }
  4817. }
  4818. enum intel_display_power_domain
  4819. intel_display_port_power_domain(struct intel_encoder *intel_encoder)
  4820. {
  4821. struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
  4822. struct intel_digital_port *intel_dig_port;
  4823. switch (intel_encoder->type) {
  4824. case INTEL_OUTPUT_UNKNOWN:
  4825. /* Only DDI platforms should ever use this output type */
  4826. WARN_ON_ONCE(!HAS_DDI(dev_priv));
  4827. case INTEL_OUTPUT_DP:
  4828. case INTEL_OUTPUT_HDMI:
  4829. case INTEL_OUTPUT_EDP:
  4830. intel_dig_port = enc_to_dig_port(&intel_encoder->base);
  4831. return port_to_power_domain(intel_dig_port->port);
  4832. case INTEL_OUTPUT_DP_MST:
  4833. intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
  4834. return port_to_power_domain(intel_dig_port->port);
  4835. case INTEL_OUTPUT_ANALOG:
  4836. return POWER_DOMAIN_PORT_CRT;
  4837. case INTEL_OUTPUT_DSI:
  4838. return POWER_DOMAIN_PORT_DSI;
  4839. default:
  4840. return POWER_DOMAIN_PORT_OTHER;
  4841. }
  4842. }
  4843. enum intel_display_power_domain
  4844. intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
  4845. {
  4846. struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
  4847. struct intel_digital_port *intel_dig_port;
  4848. switch (intel_encoder->type) {
  4849. case INTEL_OUTPUT_UNKNOWN:
  4850. case INTEL_OUTPUT_HDMI:
  4851. /*
  4852. * Only DDI platforms should ever use these output types.
  4853. * We can get here after the HDMI detect code has already set
  4854. * the type of the shared encoder. Since we can't be sure
  4855. * what's the status of the given connectors, play safe and
  4856. * run the DP detection too.
  4857. */
  4858. WARN_ON_ONCE(!HAS_DDI(dev_priv));
  4859. case INTEL_OUTPUT_DP:
  4860. case INTEL_OUTPUT_EDP:
  4861. intel_dig_port = enc_to_dig_port(&intel_encoder->base);
  4862. return port_to_aux_power_domain(intel_dig_port->port);
  4863. case INTEL_OUTPUT_DP_MST:
  4864. intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
  4865. return port_to_aux_power_domain(intel_dig_port->port);
  4866. default:
  4867. MISSING_CASE(intel_encoder->type);
  4868. return POWER_DOMAIN_AUX_A;
  4869. }
  4870. }
  4871. static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
  4872. struct intel_crtc_state *crtc_state)
  4873. {
  4874. struct drm_device *dev = crtc->dev;
  4875. struct drm_encoder *encoder;
  4876. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4877. enum pipe pipe = intel_crtc->pipe;
  4878. unsigned long mask;
  4879. enum transcoder transcoder = crtc_state->cpu_transcoder;
  4880. if (!crtc_state->base.active)
  4881. return 0;
  4882. mask = BIT(POWER_DOMAIN_PIPE(pipe));
  4883. mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
  4884. if (crtc_state->pch_pfit.enabled ||
  4885. crtc_state->pch_pfit.force_thru)
  4886. mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
  4887. drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
  4888. struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
  4889. mask |= BIT(intel_display_port_power_domain(intel_encoder));
  4890. }
  4891. if (crtc_state->shared_dpll)
  4892. mask |= BIT(POWER_DOMAIN_PLLS);
  4893. return mask;
  4894. }
  4895. static unsigned long
  4896. modeset_get_crtc_power_domains(struct drm_crtc *crtc,
  4897. struct intel_crtc_state *crtc_state)
  4898. {
  4899. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  4900. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4901. enum intel_display_power_domain domain;
  4902. unsigned long domains, new_domains, old_domains;
  4903. old_domains = intel_crtc->enabled_power_domains;
  4904. intel_crtc->enabled_power_domains = new_domains =
  4905. get_crtc_power_domains(crtc, crtc_state);
  4906. domains = new_domains & ~old_domains;
  4907. for_each_power_domain(domain, domains)
  4908. intel_display_power_get(dev_priv, domain);
  4909. return old_domains & ~new_domains;
  4910. }
  4911. static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
  4912. unsigned long domains)
  4913. {
  4914. enum intel_display_power_domain domain;
  4915. for_each_power_domain(domain, domains)
  4916. intel_display_power_put(dev_priv, domain);
  4917. }
  4918. static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
  4919. {
  4920. int max_cdclk_freq = dev_priv->max_cdclk_freq;
  4921. if (INTEL_INFO(dev_priv)->gen >= 9 ||
  4922. IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  4923. return max_cdclk_freq;
  4924. else if (IS_CHERRYVIEW(dev_priv))
  4925. return max_cdclk_freq*95/100;
  4926. else if (INTEL_INFO(dev_priv)->gen < 4)
  4927. return 2*max_cdclk_freq*90/100;
  4928. else
  4929. return max_cdclk_freq*90/100;
  4930. }
  4931. static int skl_calc_cdclk(int max_pixclk, int vco);
  4932. static void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
  4933. {
  4934. if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
  4935. u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
  4936. int max_cdclk, vco;
  4937. vco = dev_priv->skl_preferred_vco_freq;
  4938. WARN_ON(vco != 8100000 && vco != 8640000);
  4939. /*
  4940. * Use the lower (vco 8640) cdclk values as a
  4941. * first guess. skl_calc_cdclk() will correct it
  4942. * if the preferred vco is 8100 instead.
  4943. */
  4944. if (limit == SKL_DFSM_CDCLK_LIMIT_675)
  4945. max_cdclk = 617143;
  4946. else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
  4947. max_cdclk = 540000;
  4948. else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
  4949. max_cdclk = 432000;
  4950. else
  4951. max_cdclk = 308571;
  4952. dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
  4953. } else if (IS_BROXTON(dev_priv)) {
  4954. dev_priv->max_cdclk_freq = 624000;
  4955. } else if (IS_BROADWELL(dev_priv)) {
  4956. /*
  4957. * FIXME with extra cooling we can allow
  4958. * 540 MHz for ULX and 675 Mhz for ULT.
  4959. * How can we know if extra cooling is
  4960. * available? PCI ID, VTB, something else?
  4961. */
  4962. if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
  4963. dev_priv->max_cdclk_freq = 450000;
  4964. else if (IS_BDW_ULX(dev_priv))
  4965. dev_priv->max_cdclk_freq = 450000;
  4966. else if (IS_BDW_ULT(dev_priv))
  4967. dev_priv->max_cdclk_freq = 540000;
  4968. else
  4969. dev_priv->max_cdclk_freq = 675000;
  4970. } else if (IS_CHERRYVIEW(dev_priv)) {
  4971. dev_priv->max_cdclk_freq = 320000;
  4972. } else if (IS_VALLEYVIEW(dev_priv)) {
  4973. dev_priv->max_cdclk_freq = 400000;
  4974. } else {
  4975. /* otherwise assume cdclk is fixed */
  4976. dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
  4977. }
  4978. dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
  4979. DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
  4980. dev_priv->max_cdclk_freq);
  4981. DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
  4982. dev_priv->max_dotclk_freq);
  4983. }
  4984. static void intel_update_cdclk(struct drm_i915_private *dev_priv)
  4985. {
  4986. dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev_priv);
  4987. if (INTEL_GEN(dev_priv) >= 9)
  4988. DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
  4989. dev_priv->cdclk_freq, dev_priv->cdclk_pll.vco,
  4990. dev_priv->cdclk_pll.ref);
  4991. else
  4992. DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
  4993. dev_priv->cdclk_freq);
  4994. /*
  4995. * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
  4996. * Programmng [sic] note: bit[9:2] should be programmed to the number
  4997. * of cdclk that generates 4MHz reference clock freq which is used to
  4998. * generate GMBus clock. This will vary with the cdclk freq.
  4999. */
  5000. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  5001. I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
  5002. }
  5003. /* convert from kHz to .1 fixpoint MHz with -1MHz offset */
  5004. static int skl_cdclk_decimal(int cdclk)
  5005. {
  5006. return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
  5007. }
  5008. static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
  5009. {
  5010. int ratio;
  5011. if (cdclk == dev_priv->cdclk_pll.ref)
  5012. return 0;
  5013. switch (cdclk) {
  5014. default:
  5015. MISSING_CASE(cdclk);
  5016. case 144000:
  5017. case 288000:
  5018. case 384000:
  5019. case 576000:
  5020. ratio = 60;
  5021. break;
  5022. case 624000:
  5023. ratio = 65;
  5024. break;
  5025. }
  5026. return dev_priv->cdclk_pll.ref * ratio;
  5027. }
  5028. static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
  5029. {
  5030. I915_WRITE(BXT_DE_PLL_ENABLE, 0);
  5031. /* Timeout 200us */
  5032. if (intel_wait_for_register(dev_priv,
  5033. BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
  5034. 1))
  5035. DRM_ERROR("timeout waiting for DE PLL unlock\n");
  5036. dev_priv->cdclk_pll.vco = 0;
  5037. }
  5038. static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
  5039. {
  5040. int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk_pll.ref);
  5041. u32 val;
  5042. val = I915_READ(BXT_DE_PLL_CTL);
  5043. val &= ~BXT_DE_PLL_RATIO_MASK;
  5044. val |= BXT_DE_PLL_RATIO(ratio);
  5045. I915_WRITE(BXT_DE_PLL_CTL, val);
  5046. I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
  5047. /* Timeout 200us */
  5048. if (intel_wait_for_register(dev_priv,
  5049. BXT_DE_PLL_ENABLE,
  5050. BXT_DE_PLL_LOCK,
  5051. BXT_DE_PLL_LOCK,
  5052. 1))
  5053. DRM_ERROR("timeout waiting for DE PLL lock\n");
  5054. dev_priv->cdclk_pll.vco = vco;
  5055. }
  5056. static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
  5057. {
  5058. u32 val, divider;
  5059. int vco, ret;
  5060. vco = bxt_de_pll_vco(dev_priv, cdclk);
  5061. DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
  5062. /* cdclk = vco / 2 / div{1,1.5,2,4} */
  5063. switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
  5064. case 8:
  5065. divider = BXT_CDCLK_CD2X_DIV_SEL_4;
  5066. break;
  5067. case 4:
  5068. divider = BXT_CDCLK_CD2X_DIV_SEL_2;
  5069. break;
  5070. case 3:
  5071. divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
  5072. break;
  5073. case 2:
  5074. divider = BXT_CDCLK_CD2X_DIV_SEL_1;
  5075. break;
  5076. default:
  5077. WARN_ON(cdclk != dev_priv->cdclk_pll.ref);
  5078. WARN_ON(vco != 0);
  5079. divider = BXT_CDCLK_CD2X_DIV_SEL_1;
  5080. break;
  5081. }
  5082. /* Inform power controller of upcoming frequency change */
  5083. mutex_lock(&dev_priv->rps.hw_lock);
  5084. ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
  5085. 0x80000000);
  5086. mutex_unlock(&dev_priv->rps.hw_lock);
  5087. if (ret) {
  5088. DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
  5089. ret, cdclk);
  5090. return;
  5091. }
  5092. if (dev_priv->cdclk_pll.vco != 0 &&
  5093. dev_priv->cdclk_pll.vco != vco)
  5094. bxt_de_pll_disable(dev_priv);
  5095. if (dev_priv->cdclk_pll.vco != vco)
  5096. bxt_de_pll_enable(dev_priv, vco);
  5097. val = divider | skl_cdclk_decimal(cdclk);
  5098. /*
  5099. * FIXME if only the cd2x divider needs changing, it could be done
  5100. * without shutting off the pipe (if only one pipe is active).
  5101. */
  5102. val |= BXT_CDCLK_CD2X_PIPE_NONE;
  5103. /*
  5104. * Disable SSA Precharge when CD clock frequency < 500 MHz,
  5105. * enable otherwise.
  5106. */
  5107. if (cdclk >= 500000)
  5108. val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
  5109. I915_WRITE(CDCLK_CTL, val);
  5110. mutex_lock(&dev_priv->rps.hw_lock);
  5111. ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
  5112. DIV_ROUND_UP(cdclk, 25000));
  5113. mutex_unlock(&dev_priv->rps.hw_lock);
  5114. if (ret) {
  5115. DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
  5116. ret, cdclk);
  5117. return;
  5118. }
  5119. intel_update_cdclk(dev_priv);
  5120. }
  5121. static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
  5122. {
  5123. u32 cdctl, expected;
  5124. intel_update_cdclk(dev_priv);
  5125. if (dev_priv->cdclk_pll.vco == 0 ||
  5126. dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
  5127. goto sanitize;
  5128. /* DPLL okay; verify the cdclock
  5129. *
  5130. * Some BIOS versions leave an incorrect decimal frequency value and
  5131. * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
  5132. * so sanitize this register.
  5133. */
  5134. cdctl = I915_READ(CDCLK_CTL);
  5135. /*
  5136. * Let's ignore the pipe field, since BIOS could have configured the
  5137. * dividers both synching to an active pipe, or asynchronously
  5138. * (PIPE_NONE).
  5139. */
  5140. cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
  5141. expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
  5142. skl_cdclk_decimal(dev_priv->cdclk_freq);
  5143. /*
  5144. * Disable SSA Precharge when CD clock frequency < 500 MHz,
  5145. * enable otherwise.
  5146. */
  5147. if (dev_priv->cdclk_freq >= 500000)
  5148. expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
  5149. if (cdctl == expected)
  5150. /* All well; nothing to sanitize */
  5151. return;
  5152. sanitize:
  5153. DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
  5154. /* force cdclk programming */
  5155. dev_priv->cdclk_freq = 0;
  5156. /* force full PLL disable + enable */
  5157. dev_priv->cdclk_pll.vco = -1;
  5158. }
  5159. void bxt_init_cdclk(struct drm_i915_private *dev_priv)
  5160. {
  5161. bxt_sanitize_cdclk(dev_priv);
  5162. if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0)
  5163. return;
  5164. /*
  5165. * FIXME:
  5166. * - The initial CDCLK needs to be read from VBT.
  5167. * Need to make this change after VBT has changes for BXT.
  5168. */
  5169. bxt_set_cdclk(dev_priv, bxt_calc_cdclk(0));
  5170. }
  5171. void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
  5172. {
  5173. bxt_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref);
  5174. }
  5175. static int skl_calc_cdclk(int max_pixclk, int vco)
  5176. {
  5177. if (vco == 8640000) {
  5178. if (max_pixclk > 540000)
  5179. return 617143;
  5180. else if (max_pixclk > 432000)
  5181. return 540000;
  5182. else if (max_pixclk > 308571)
  5183. return 432000;
  5184. else
  5185. return 308571;
  5186. } else {
  5187. if (max_pixclk > 540000)
  5188. return 675000;
  5189. else if (max_pixclk > 450000)
  5190. return 540000;
  5191. else if (max_pixclk > 337500)
  5192. return 450000;
  5193. else
  5194. return 337500;
  5195. }
  5196. }
  5197. static void
  5198. skl_dpll0_update(struct drm_i915_private *dev_priv)
  5199. {
  5200. u32 val;
  5201. dev_priv->cdclk_pll.ref = 24000;
  5202. dev_priv->cdclk_pll.vco = 0;
  5203. val = I915_READ(LCPLL1_CTL);
  5204. if ((val & LCPLL_PLL_ENABLE) == 0)
  5205. return;
  5206. if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
  5207. return;
  5208. val = I915_READ(DPLL_CTRL1);
  5209. if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
  5210. DPLL_CTRL1_SSC(SKL_DPLL0) |
  5211. DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
  5212. DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
  5213. return;
  5214. switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
  5215. case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0):
  5216. case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
  5217. case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
  5218. case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
  5219. dev_priv->cdclk_pll.vco = 8100000;
  5220. break;
  5221. case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
  5222. case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
  5223. dev_priv->cdclk_pll.vco = 8640000;
  5224. break;
  5225. default:
  5226. MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
  5227. break;
  5228. }
  5229. }
  5230. void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
  5231. {
  5232. bool changed = dev_priv->skl_preferred_vco_freq != vco;
  5233. dev_priv->skl_preferred_vco_freq = vco;
  5234. if (changed)
  5235. intel_update_max_cdclk(dev_priv);
  5236. }
  5237. static void
  5238. skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
  5239. {
  5240. int min_cdclk = skl_calc_cdclk(0, vco);
  5241. u32 val;
  5242. WARN_ON(vco != 8100000 && vco != 8640000);
  5243. /* select the minimum CDCLK before enabling DPLL 0 */
  5244. val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
  5245. I915_WRITE(CDCLK_CTL, val);
  5246. POSTING_READ(CDCLK_CTL);
  5247. /*
  5248. * We always enable DPLL0 with the lowest link rate possible, but still
  5249. * taking into account the VCO required to operate the eDP panel at the
  5250. * desired frequency. The usual DP link rates operate with a VCO of
  5251. * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
  5252. * The modeset code is responsible for the selection of the exact link
  5253. * rate later on, with the constraint of choosing a frequency that
  5254. * works with vco.
  5255. */
  5256. val = I915_READ(DPLL_CTRL1);
  5257. val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
  5258. DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
  5259. val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
  5260. if (vco == 8640000)
  5261. val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
  5262. SKL_DPLL0);
  5263. else
  5264. val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
  5265. SKL_DPLL0);
  5266. I915_WRITE(DPLL_CTRL1, val);
  5267. POSTING_READ(DPLL_CTRL1);
  5268. I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
  5269. if (intel_wait_for_register(dev_priv,
  5270. LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
  5271. 5))
  5272. DRM_ERROR("DPLL0 not locked\n");
  5273. dev_priv->cdclk_pll.vco = vco;
  5274. /* We'll want to keep using the current vco from now on. */
  5275. skl_set_preferred_cdclk_vco(dev_priv, vco);
  5276. }
  5277. static void
  5278. skl_dpll0_disable(struct drm_i915_private *dev_priv)
  5279. {
  5280. I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
  5281. if (intel_wait_for_register(dev_priv,
  5282. LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
  5283. 1))
  5284. DRM_ERROR("Couldn't disable DPLL0\n");
  5285. dev_priv->cdclk_pll.vco = 0;
  5286. }
  5287. static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
  5288. {
  5289. u32 freq_select, pcu_ack;
  5290. int ret;
  5291. WARN_ON((cdclk == 24000) != (vco == 0));
  5292. DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
  5293. mutex_lock(&dev_priv->rps.hw_lock);
  5294. ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
  5295. SKL_CDCLK_PREPARE_FOR_CHANGE,
  5296. SKL_CDCLK_READY_FOR_CHANGE,
  5297. SKL_CDCLK_READY_FOR_CHANGE, 3);
  5298. mutex_unlock(&dev_priv->rps.hw_lock);
  5299. if (ret) {
  5300. DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
  5301. ret);
  5302. return;
  5303. }
  5304. /* set CDCLK_CTL */
  5305. switch (cdclk) {
  5306. case 450000:
  5307. case 432000:
  5308. freq_select = CDCLK_FREQ_450_432;
  5309. pcu_ack = 1;
  5310. break;
  5311. case 540000:
  5312. freq_select = CDCLK_FREQ_540;
  5313. pcu_ack = 2;
  5314. break;
  5315. case 308571:
  5316. case 337500:
  5317. default:
  5318. freq_select = CDCLK_FREQ_337_308;
  5319. pcu_ack = 0;
  5320. break;
  5321. case 617143:
  5322. case 675000:
  5323. freq_select = CDCLK_FREQ_675_617;
  5324. pcu_ack = 3;
  5325. break;
  5326. }
  5327. if (dev_priv->cdclk_pll.vco != 0 &&
  5328. dev_priv->cdclk_pll.vco != vco)
  5329. skl_dpll0_disable(dev_priv);
  5330. if (dev_priv->cdclk_pll.vco != vco)
  5331. skl_dpll0_enable(dev_priv, vco);
  5332. I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
  5333. POSTING_READ(CDCLK_CTL);
  5334. /* inform PCU of the change */
  5335. mutex_lock(&dev_priv->rps.hw_lock);
  5336. sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
  5337. mutex_unlock(&dev_priv->rps.hw_lock);
  5338. intel_update_cdclk(dev_priv);
  5339. }
  5340. static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
  5341. void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
  5342. {
  5343. skl_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref, 0);
  5344. }
  5345. void skl_init_cdclk(struct drm_i915_private *dev_priv)
  5346. {
  5347. int cdclk, vco;
  5348. skl_sanitize_cdclk(dev_priv);
  5349. if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) {
  5350. /*
  5351. * Use the current vco as our initial
  5352. * guess as to what the preferred vco is.
  5353. */
  5354. if (dev_priv->skl_preferred_vco_freq == 0)
  5355. skl_set_preferred_cdclk_vco(dev_priv,
  5356. dev_priv->cdclk_pll.vco);
  5357. return;
  5358. }
  5359. vco = dev_priv->skl_preferred_vco_freq;
  5360. if (vco == 0)
  5361. vco = 8100000;
  5362. cdclk = skl_calc_cdclk(0, vco);
  5363. skl_set_cdclk(dev_priv, cdclk, vco);
  5364. }
  5365. static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
  5366. {
  5367. uint32_t cdctl, expected;
  5368. /*
  5369. * check if the pre-os intialized the display
  5370. * There is SWF18 scratchpad register defined which is set by the
  5371. * pre-os which can be used by the OS drivers to check the status
  5372. */
  5373. if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
  5374. goto sanitize;
  5375. intel_update_cdclk(dev_priv);
  5376. /* Is PLL enabled and locked ? */
  5377. if (dev_priv->cdclk_pll.vco == 0 ||
  5378. dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
  5379. goto sanitize;
  5380. /* DPLL okay; verify the cdclock
  5381. *
  5382. * Noticed in some instances that the freq selection is correct but
  5383. * decimal part is programmed wrong from BIOS where pre-os does not
  5384. * enable display. Verify the same as well.
  5385. */
  5386. cdctl = I915_READ(CDCLK_CTL);
  5387. expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
  5388. skl_cdclk_decimal(dev_priv->cdclk_freq);
  5389. if (cdctl == expected)
  5390. /* All well; nothing to sanitize */
  5391. return;
  5392. sanitize:
  5393. DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
  5394. /* force cdclk programming */
  5395. dev_priv->cdclk_freq = 0;
  5396. /* force full PLL disable + enable */
  5397. dev_priv->cdclk_pll.vco = -1;
  5398. }
  5399. /* Adjust CDclk dividers to allow high res or save power if possible */
  5400. static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
  5401. {
  5402. struct drm_i915_private *dev_priv = to_i915(dev);
  5403. u32 val, cmd;
  5404. WARN_ON(dev_priv->display.get_display_clock_speed(dev_priv)
  5405. != dev_priv->cdclk_freq);
  5406. if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
  5407. cmd = 2;
  5408. else if (cdclk == 266667)
  5409. cmd = 1;
  5410. else
  5411. cmd = 0;
  5412. mutex_lock(&dev_priv->rps.hw_lock);
  5413. val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
  5414. val &= ~DSPFREQGUAR_MASK;
  5415. val |= (cmd << DSPFREQGUAR_SHIFT);
  5416. vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
  5417. if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
  5418. DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
  5419. 50)) {
  5420. DRM_ERROR("timed out waiting for CDclk change\n");
  5421. }
  5422. mutex_unlock(&dev_priv->rps.hw_lock);
  5423. mutex_lock(&dev_priv->sb_lock);
  5424. if (cdclk == 400000) {
  5425. u32 divider;
  5426. divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
  5427. /* adjust cdclk divider */
  5428. val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
  5429. val &= ~CCK_FREQUENCY_VALUES;
  5430. val |= divider;
  5431. vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
  5432. if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
  5433. CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
  5434. 50))
  5435. DRM_ERROR("timed out waiting for CDclk change\n");
  5436. }
  5437. /* adjust self-refresh exit latency value */
  5438. val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
  5439. val &= ~0x7f;
  5440. /*
  5441. * For high bandwidth configs, we set a higher latency in the bunit
  5442. * so that the core display fetch happens in time to avoid underruns.
  5443. */
  5444. if (cdclk == 400000)
  5445. val |= 4500 / 250; /* 4.5 usec */
  5446. else
  5447. val |= 3000 / 250; /* 3.0 usec */
  5448. vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
  5449. mutex_unlock(&dev_priv->sb_lock);
  5450. intel_update_cdclk(dev_priv);
  5451. }
  5452. static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
  5453. {
  5454. struct drm_i915_private *dev_priv = to_i915(dev);
  5455. u32 val, cmd;
  5456. WARN_ON(dev_priv->display.get_display_clock_speed(dev_priv)
  5457. != dev_priv->cdclk_freq);
  5458. switch (cdclk) {
  5459. case 333333:
  5460. case 320000:
  5461. case 266667:
  5462. case 200000:
  5463. break;
  5464. default:
  5465. MISSING_CASE(cdclk);
  5466. return;
  5467. }
  5468. /*
  5469. * Specs are full of misinformation, but testing on actual
  5470. * hardware has shown that we just need to write the desired
  5471. * CCK divider into the Punit register.
  5472. */
  5473. cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
  5474. mutex_lock(&dev_priv->rps.hw_lock);
  5475. val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
  5476. val &= ~DSPFREQGUAR_MASK_CHV;
  5477. val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
  5478. vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
  5479. if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
  5480. DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
  5481. 50)) {
  5482. DRM_ERROR("timed out waiting for CDclk change\n");
  5483. }
  5484. mutex_unlock(&dev_priv->rps.hw_lock);
  5485. intel_update_cdclk(dev_priv);
  5486. }
  5487. static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
  5488. int max_pixclk)
  5489. {
  5490. int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000;
  5491. int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
  5492. /*
  5493. * Really only a few cases to deal with, as only 4 CDclks are supported:
  5494. * 200MHz
  5495. * 267MHz
  5496. * 320/333MHz (depends on HPLL freq)
  5497. * 400MHz (VLV only)
  5498. * So we check to see whether we're above 90% (VLV) or 95% (CHV)
  5499. * of the lower bin and adjust if needed.
  5500. *
  5501. * We seem to get an unstable or solid color picture at 200MHz.
  5502. * Not sure what's wrong. For now use 200MHz only when all pipes
  5503. * are off.
  5504. */
  5505. if (!IS_CHERRYVIEW(dev_priv) &&
  5506. max_pixclk > freq_320*limit/100)
  5507. return 400000;
  5508. else if (max_pixclk > 266667*limit/100)
  5509. return freq_320;
  5510. else if (max_pixclk > 0)
  5511. return 266667;
  5512. else
  5513. return 200000;
  5514. }
  5515. static int bxt_calc_cdclk(int max_pixclk)
  5516. {
  5517. if (max_pixclk > 576000)
  5518. return 624000;
  5519. else if (max_pixclk > 384000)
  5520. return 576000;
  5521. else if (max_pixclk > 288000)
  5522. return 384000;
  5523. else if (max_pixclk > 144000)
  5524. return 288000;
  5525. else
  5526. return 144000;
  5527. }
  5528. /* Compute the max pixel clock for new configuration. */
  5529. static int intel_mode_max_pixclk(struct drm_device *dev,
  5530. struct drm_atomic_state *state)
  5531. {
  5532. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  5533. struct drm_i915_private *dev_priv = to_i915(dev);
  5534. struct drm_crtc *crtc;
  5535. struct drm_crtc_state *crtc_state;
  5536. unsigned max_pixclk = 0, i;
  5537. enum pipe pipe;
  5538. memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
  5539. sizeof(intel_state->min_pixclk));
  5540. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  5541. int pixclk = 0;
  5542. if (crtc_state->enable)
  5543. pixclk = crtc_state->adjusted_mode.crtc_clock;
  5544. intel_state->min_pixclk[i] = pixclk;
  5545. }
  5546. for_each_pipe(dev_priv, pipe)
  5547. max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
  5548. return max_pixclk;
  5549. }
  5550. static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
  5551. {
  5552. struct drm_device *dev = state->dev;
  5553. struct drm_i915_private *dev_priv = to_i915(dev);
  5554. int max_pixclk = intel_mode_max_pixclk(dev, state);
  5555. struct intel_atomic_state *intel_state =
  5556. to_intel_atomic_state(state);
  5557. intel_state->cdclk = intel_state->dev_cdclk =
  5558. valleyview_calc_cdclk(dev_priv, max_pixclk);
  5559. if (!intel_state->active_crtcs)
  5560. intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0);
  5561. return 0;
  5562. }
  5563. static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
  5564. {
  5565. int max_pixclk = ilk_max_pixel_rate(state);
  5566. struct intel_atomic_state *intel_state =
  5567. to_intel_atomic_state(state);
  5568. intel_state->cdclk = intel_state->dev_cdclk =
  5569. bxt_calc_cdclk(max_pixclk);
  5570. if (!intel_state->active_crtcs)
  5571. intel_state->dev_cdclk = bxt_calc_cdclk(0);
  5572. return 0;
  5573. }
  5574. static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
  5575. {
  5576. unsigned int credits, default_credits;
  5577. if (IS_CHERRYVIEW(dev_priv))
  5578. default_credits = PFI_CREDIT(12);
  5579. else
  5580. default_credits = PFI_CREDIT(8);
  5581. if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
  5582. /* CHV suggested value is 31 or 63 */
  5583. if (IS_CHERRYVIEW(dev_priv))
  5584. credits = PFI_CREDIT_63;
  5585. else
  5586. credits = PFI_CREDIT(15);
  5587. } else {
  5588. credits = default_credits;
  5589. }
  5590. /*
  5591. * WA - write default credits before re-programming
  5592. * FIXME: should we also set the resend bit here?
  5593. */
  5594. I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
  5595. default_credits);
  5596. I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
  5597. credits | PFI_CREDIT_RESEND);
  5598. /*
  5599. * FIXME is this guaranteed to clear
  5600. * immediately or should we poll for it?
  5601. */
  5602. WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
  5603. }
  5604. static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
  5605. {
  5606. struct drm_device *dev = old_state->dev;
  5607. struct drm_i915_private *dev_priv = to_i915(dev);
  5608. struct intel_atomic_state *old_intel_state =
  5609. to_intel_atomic_state(old_state);
  5610. unsigned req_cdclk = old_intel_state->dev_cdclk;
  5611. /*
  5612. * FIXME: We can end up here with all power domains off, yet
  5613. * with a CDCLK frequency other than the minimum. To account
  5614. * for this take the PIPE-A power domain, which covers the HW
  5615. * blocks needed for the following programming. This can be
  5616. * removed once it's guaranteed that we get here either with
  5617. * the minimum CDCLK set, or the required power domains
  5618. * enabled.
  5619. */
  5620. intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
  5621. if (IS_CHERRYVIEW(dev_priv))
  5622. cherryview_set_cdclk(dev, req_cdclk);
  5623. else
  5624. valleyview_set_cdclk(dev, req_cdclk);
  5625. vlv_program_pfi_credits(dev_priv);
  5626. intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
  5627. }
  5628. static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
  5629. struct drm_atomic_state *old_state)
  5630. {
  5631. struct drm_crtc *crtc = pipe_config->base.crtc;
  5632. struct drm_device *dev = crtc->dev;
  5633. struct drm_i915_private *dev_priv = to_i915(dev);
  5634. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5635. int pipe = intel_crtc->pipe;
  5636. if (WARN_ON(intel_crtc->active))
  5637. return;
  5638. if (intel_crtc_has_dp_encoder(intel_crtc->config))
  5639. intel_dp_set_m_n(intel_crtc, M1_N1);
  5640. intel_set_pipe_timings(intel_crtc);
  5641. intel_set_pipe_src_size(intel_crtc);
  5642. if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
  5643. struct drm_i915_private *dev_priv = to_i915(dev);
  5644. I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
  5645. I915_WRITE(CHV_CANVAS(pipe), 0);
  5646. }
  5647. i9xx_set_pipeconf(intel_crtc);
  5648. intel_crtc->active = true;
  5649. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  5650. intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
  5651. if (IS_CHERRYVIEW(dev_priv)) {
  5652. chv_prepare_pll(intel_crtc, intel_crtc->config);
  5653. chv_enable_pll(intel_crtc, intel_crtc->config);
  5654. } else {
  5655. vlv_prepare_pll(intel_crtc, intel_crtc->config);
  5656. vlv_enable_pll(intel_crtc, intel_crtc->config);
  5657. }
  5658. intel_encoders_pre_enable(crtc, pipe_config, old_state);
  5659. i9xx_pfit_enable(intel_crtc);
  5660. intel_color_load_luts(&pipe_config->base);
  5661. intel_update_watermarks(intel_crtc);
  5662. intel_enable_pipe(intel_crtc);
  5663. assert_vblank_disabled(crtc);
  5664. drm_crtc_vblank_on(crtc);
  5665. intel_encoders_enable(crtc, pipe_config, old_state);
  5666. }
  5667. static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
  5668. {
  5669. struct drm_device *dev = crtc->base.dev;
  5670. struct drm_i915_private *dev_priv = to_i915(dev);
  5671. I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
  5672. I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
  5673. }
  5674. static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
  5675. struct drm_atomic_state *old_state)
  5676. {
  5677. struct drm_crtc *crtc = pipe_config->base.crtc;
  5678. struct drm_device *dev = crtc->dev;
  5679. struct drm_i915_private *dev_priv = to_i915(dev);
  5680. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5681. enum pipe pipe = intel_crtc->pipe;
  5682. if (WARN_ON(intel_crtc->active))
  5683. return;
  5684. i9xx_set_pll_dividers(intel_crtc);
  5685. if (intel_crtc_has_dp_encoder(intel_crtc->config))
  5686. intel_dp_set_m_n(intel_crtc, M1_N1);
  5687. intel_set_pipe_timings(intel_crtc);
  5688. intel_set_pipe_src_size(intel_crtc);
  5689. i9xx_set_pipeconf(intel_crtc);
  5690. intel_crtc->active = true;
  5691. if (!IS_GEN2(dev_priv))
  5692. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  5693. intel_encoders_pre_enable(crtc, pipe_config, old_state);
  5694. i9xx_enable_pll(intel_crtc);
  5695. i9xx_pfit_enable(intel_crtc);
  5696. intel_color_load_luts(&pipe_config->base);
  5697. intel_update_watermarks(intel_crtc);
  5698. intel_enable_pipe(intel_crtc);
  5699. assert_vblank_disabled(crtc);
  5700. drm_crtc_vblank_on(crtc);
  5701. intel_encoders_enable(crtc, pipe_config, old_state);
  5702. }
  5703. static void i9xx_pfit_disable(struct intel_crtc *crtc)
  5704. {
  5705. struct drm_device *dev = crtc->base.dev;
  5706. struct drm_i915_private *dev_priv = to_i915(dev);
  5707. if (!crtc->config->gmch_pfit.control)
  5708. return;
  5709. assert_pipe_disabled(dev_priv, crtc->pipe);
  5710. DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
  5711. I915_READ(PFIT_CONTROL));
  5712. I915_WRITE(PFIT_CONTROL, 0);
  5713. }
  5714. static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
  5715. struct drm_atomic_state *old_state)
  5716. {
  5717. struct drm_crtc *crtc = old_crtc_state->base.crtc;
  5718. struct drm_device *dev = crtc->dev;
  5719. struct drm_i915_private *dev_priv = to_i915(dev);
  5720. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5721. int pipe = intel_crtc->pipe;
  5722. /*
  5723. * On gen2 planes are double buffered but the pipe isn't, so we must
  5724. * wait for planes to fully turn off before disabling the pipe.
  5725. */
  5726. if (IS_GEN2(dev_priv))
  5727. intel_wait_for_vblank(dev_priv, pipe);
  5728. intel_encoders_disable(crtc, old_crtc_state, old_state);
  5729. drm_crtc_vblank_off(crtc);
  5730. assert_vblank_disabled(crtc);
  5731. intel_disable_pipe(intel_crtc);
  5732. i9xx_pfit_disable(intel_crtc);
  5733. intel_encoders_post_disable(crtc, old_crtc_state, old_state);
  5734. if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
  5735. if (IS_CHERRYVIEW(dev_priv))
  5736. chv_disable_pll(dev_priv, pipe);
  5737. else if (IS_VALLEYVIEW(dev_priv))
  5738. vlv_disable_pll(dev_priv, pipe);
  5739. else
  5740. i9xx_disable_pll(intel_crtc);
  5741. }
  5742. intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
  5743. if (!IS_GEN2(dev_priv))
  5744. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
  5745. }
  5746. static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
  5747. {
  5748. struct intel_encoder *encoder;
  5749. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  5750. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  5751. enum intel_display_power_domain domain;
  5752. unsigned long domains;
  5753. struct drm_atomic_state *state;
  5754. struct intel_crtc_state *crtc_state;
  5755. int ret;
  5756. if (!intel_crtc->active)
  5757. return;
  5758. if (to_intel_plane_state(crtc->primary->state)->base.visible) {
  5759. WARN_ON(intel_crtc->flip_work);
  5760. intel_pre_disable_primary_noatomic(crtc);
  5761. intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
  5762. to_intel_plane_state(crtc->primary->state)->base.visible = false;
  5763. }
  5764. state = drm_atomic_state_alloc(crtc->dev);
  5765. state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
  5766. /* Everything's already locked, -EDEADLK can't happen. */
  5767. crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
  5768. ret = drm_atomic_add_affected_connectors(state, crtc);
  5769. WARN_ON(IS_ERR(crtc_state) || ret);
  5770. dev_priv->display.crtc_disable(crtc_state, state);
  5771. drm_atomic_state_put(state);
  5772. DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
  5773. crtc->base.id, crtc->name);
  5774. WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
  5775. crtc->state->active = false;
  5776. intel_crtc->active = false;
  5777. crtc->enabled = false;
  5778. crtc->state->connector_mask = 0;
  5779. crtc->state->encoder_mask = 0;
  5780. for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
  5781. encoder->base.crtc = NULL;
  5782. intel_fbc_disable(intel_crtc);
  5783. intel_update_watermarks(intel_crtc);
  5784. intel_disable_shared_dpll(intel_crtc);
  5785. domains = intel_crtc->enabled_power_domains;
  5786. for_each_power_domain(domain, domains)
  5787. intel_display_power_put(dev_priv, domain);
  5788. intel_crtc->enabled_power_domains = 0;
  5789. dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
  5790. dev_priv->min_pixclk[intel_crtc->pipe] = 0;
  5791. }
  5792. /*
  5793. * turn all crtc's off, but do not adjust state
  5794. * This has to be paired with a call to intel_modeset_setup_hw_state.
  5795. */
  5796. int intel_display_suspend(struct drm_device *dev)
  5797. {
  5798. struct drm_i915_private *dev_priv = to_i915(dev);
  5799. struct drm_atomic_state *state;
  5800. int ret;
  5801. state = drm_atomic_helper_suspend(dev);
  5802. ret = PTR_ERR_OR_ZERO(state);
  5803. if (ret)
  5804. DRM_ERROR("Suspending crtc's failed with %i\n", ret);
  5805. else
  5806. dev_priv->modeset_restore_state = state;
  5807. return ret;
  5808. }
  5809. void intel_encoder_destroy(struct drm_encoder *encoder)
  5810. {
  5811. struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
  5812. drm_encoder_cleanup(encoder);
  5813. kfree(intel_encoder);
  5814. }
  5815. /* Cross check the actual hw state with our own modeset state tracking (and it's
  5816. * internal consistency). */
  5817. static void intel_connector_verify_state(struct intel_connector *connector)
  5818. {
  5819. struct drm_crtc *crtc = connector->base.state->crtc;
  5820. DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
  5821. connector->base.base.id,
  5822. connector->base.name);
  5823. if (connector->get_hw_state(connector)) {
  5824. struct intel_encoder *encoder = connector->encoder;
  5825. struct drm_connector_state *conn_state = connector->base.state;
  5826. I915_STATE_WARN(!crtc,
  5827. "connector enabled without attached crtc\n");
  5828. if (!crtc)
  5829. return;
  5830. I915_STATE_WARN(!crtc->state->active,
  5831. "connector is active, but attached crtc isn't\n");
  5832. if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
  5833. return;
  5834. I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
  5835. "atomic encoder doesn't match attached encoder\n");
  5836. I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
  5837. "attached encoder crtc differs from connector crtc\n");
  5838. } else {
  5839. I915_STATE_WARN(crtc && crtc->state->active,
  5840. "attached crtc is active, but connector isn't\n");
  5841. I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
  5842. "best encoder set without crtc!\n");
  5843. }
  5844. }
  5845. int intel_connector_init(struct intel_connector *connector)
  5846. {
  5847. drm_atomic_helper_connector_reset(&connector->base);
  5848. if (!connector->base.state)
  5849. return -ENOMEM;
  5850. return 0;
  5851. }
  5852. struct intel_connector *intel_connector_alloc(void)
  5853. {
  5854. struct intel_connector *connector;
  5855. connector = kzalloc(sizeof *connector, GFP_KERNEL);
  5856. if (!connector)
  5857. return NULL;
  5858. if (intel_connector_init(connector) < 0) {
  5859. kfree(connector);
  5860. return NULL;
  5861. }
  5862. return connector;
  5863. }
  5864. /* Simple connector->get_hw_state implementation for encoders that support only
  5865. * one connector and no cloning and hence the encoder state determines the state
  5866. * of the connector. */
  5867. bool intel_connector_get_hw_state(struct intel_connector *connector)
  5868. {
  5869. enum pipe pipe = 0;
  5870. struct intel_encoder *encoder = connector->encoder;
  5871. return encoder->get_hw_state(encoder, &pipe);
  5872. }
  5873. static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
  5874. {
  5875. if (crtc_state->base.enable && crtc_state->has_pch_encoder)
  5876. return crtc_state->fdi_lanes;
  5877. return 0;
  5878. }
  5879. static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
  5880. struct intel_crtc_state *pipe_config)
  5881. {
  5882. struct drm_i915_private *dev_priv = to_i915(dev);
  5883. struct drm_atomic_state *state = pipe_config->base.state;
  5884. struct intel_crtc *other_crtc;
  5885. struct intel_crtc_state *other_crtc_state;
  5886. DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
  5887. pipe_name(pipe), pipe_config->fdi_lanes);
  5888. if (pipe_config->fdi_lanes > 4) {
  5889. DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
  5890. pipe_name(pipe), pipe_config->fdi_lanes);
  5891. return -EINVAL;
  5892. }
  5893. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  5894. if (pipe_config->fdi_lanes > 2) {
  5895. DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
  5896. pipe_config->fdi_lanes);
  5897. return -EINVAL;
  5898. } else {
  5899. return 0;
  5900. }
  5901. }
  5902. if (INTEL_INFO(dev_priv)->num_pipes == 2)
  5903. return 0;
  5904. /* Ivybridge 3 pipe is really complicated */
  5905. switch (pipe) {
  5906. case PIPE_A:
  5907. return 0;
  5908. case PIPE_B:
  5909. if (pipe_config->fdi_lanes <= 2)
  5910. return 0;
  5911. other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
  5912. other_crtc_state =
  5913. intel_atomic_get_crtc_state(state, other_crtc);
  5914. if (IS_ERR(other_crtc_state))
  5915. return PTR_ERR(other_crtc_state);
  5916. if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
  5917. DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
  5918. pipe_name(pipe), pipe_config->fdi_lanes);
  5919. return -EINVAL;
  5920. }
  5921. return 0;
  5922. case PIPE_C:
  5923. if (pipe_config->fdi_lanes > 2) {
  5924. DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
  5925. pipe_name(pipe), pipe_config->fdi_lanes);
  5926. return -EINVAL;
  5927. }
  5928. other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
  5929. other_crtc_state =
  5930. intel_atomic_get_crtc_state(state, other_crtc);
  5931. if (IS_ERR(other_crtc_state))
  5932. return PTR_ERR(other_crtc_state);
  5933. if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
  5934. DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
  5935. return -EINVAL;
  5936. }
  5937. return 0;
  5938. default:
  5939. BUG();
  5940. }
  5941. }
  5942. #define RETRY 1
  5943. static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
  5944. struct intel_crtc_state *pipe_config)
  5945. {
  5946. struct drm_device *dev = intel_crtc->base.dev;
  5947. const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
  5948. int lane, link_bw, fdi_dotclock, ret;
  5949. bool needs_recompute = false;
  5950. retry:
  5951. /* FDI is a binary signal running at ~2.7GHz, encoding
  5952. * each output octet as 10 bits. The actual frequency
  5953. * is stored as a divider into a 100MHz clock, and the
  5954. * mode pixel clock is stored in units of 1KHz.
  5955. * Hence the bw of each lane in terms of the mode signal
  5956. * is:
  5957. */
  5958. link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
  5959. fdi_dotclock = adjusted_mode->crtc_clock;
  5960. lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
  5961. pipe_config->pipe_bpp);
  5962. pipe_config->fdi_lanes = lane;
  5963. intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
  5964. link_bw, &pipe_config->fdi_m_n);
  5965. ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
  5966. if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
  5967. pipe_config->pipe_bpp -= 2*3;
  5968. DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
  5969. pipe_config->pipe_bpp);
  5970. needs_recompute = true;
  5971. pipe_config->bw_constrained = true;
  5972. goto retry;
  5973. }
  5974. if (needs_recompute)
  5975. return RETRY;
  5976. return ret;
  5977. }
  5978. static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
  5979. struct intel_crtc_state *pipe_config)
  5980. {
  5981. if (pipe_config->pipe_bpp > 24)
  5982. return false;
  5983. /* HSW can handle pixel rate up to cdclk? */
  5984. if (IS_HASWELL(dev_priv))
  5985. return true;
  5986. /*
  5987. * We compare against max which means we must take
  5988. * the increased cdclk requirement into account when
  5989. * calculating the new cdclk.
  5990. *
  5991. * Should measure whether using a lower cdclk w/o IPS
  5992. */
  5993. return ilk_pipe_pixel_rate(pipe_config) <=
  5994. dev_priv->max_cdclk_freq * 95 / 100;
  5995. }
  5996. static void hsw_compute_ips_config(struct intel_crtc *crtc,
  5997. struct intel_crtc_state *pipe_config)
  5998. {
  5999. struct drm_device *dev = crtc->base.dev;
  6000. struct drm_i915_private *dev_priv = to_i915(dev);
  6001. pipe_config->ips_enabled = i915.enable_ips &&
  6002. hsw_crtc_supports_ips(crtc) &&
  6003. pipe_config_supports_ips(dev_priv, pipe_config);
  6004. }
  6005. static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
  6006. {
  6007. const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  6008. /* GDG double wide on either pipe, otherwise pipe A only */
  6009. return INTEL_INFO(dev_priv)->gen < 4 &&
  6010. (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
  6011. }
  6012. static int intel_crtc_compute_config(struct intel_crtc *crtc,
  6013. struct intel_crtc_state *pipe_config)
  6014. {
  6015. struct drm_device *dev = crtc->base.dev;
  6016. struct drm_i915_private *dev_priv = to_i915(dev);
  6017. const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
  6018. int clock_limit = dev_priv->max_dotclk_freq;
  6019. if (INTEL_GEN(dev_priv) < 4) {
  6020. clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
  6021. /*
  6022. * Enable double wide mode when the dot clock
  6023. * is > 90% of the (display) core speed.
  6024. */
  6025. if (intel_crtc_supports_double_wide(crtc) &&
  6026. adjusted_mode->crtc_clock > clock_limit) {
  6027. clock_limit = dev_priv->max_dotclk_freq;
  6028. pipe_config->double_wide = true;
  6029. }
  6030. }
  6031. if (adjusted_mode->crtc_clock > clock_limit) {
  6032. DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
  6033. adjusted_mode->crtc_clock, clock_limit,
  6034. yesno(pipe_config->double_wide));
  6035. return -EINVAL;
  6036. }
  6037. /*
  6038. * Pipe horizontal size must be even in:
  6039. * - DVO ganged mode
  6040. * - LVDS dual channel mode
  6041. * - Double wide pipe
  6042. */
  6043. if ((intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
  6044. intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
  6045. pipe_config->pipe_src_w &= ~1;
  6046. /* Cantiga+ cannot handle modes with a hsync front porch of 0.
  6047. * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
  6048. */
  6049. if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
  6050. adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
  6051. return -EINVAL;
  6052. if (HAS_IPS(dev_priv))
  6053. hsw_compute_ips_config(crtc, pipe_config);
  6054. if (pipe_config->has_pch_encoder)
  6055. return ironlake_fdi_compute_config(crtc, pipe_config);
  6056. return 0;
  6057. }
  6058. static int skylake_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6059. {
  6060. u32 cdctl;
  6061. skl_dpll0_update(dev_priv);
  6062. if (dev_priv->cdclk_pll.vco == 0)
  6063. return dev_priv->cdclk_pll.ref;
  6064. cdctl = I915_READ(CDCLK_CTL);
  6065. if (dev_priv->cdclk_pll.vco == 8640000) {
  6066. switch (cdctl & CDCLK_FREQ_SEL_MASK) {
  6067. case CDCLK_FREQ_450_432:
  6068. return 432000;
  6069. case CDCLK_FREQ_337_308:
  6070. return 308571;
  6071. case CDCLK_FREQ_540:
  6072. return 540000;
  6073. case CDCLK_FREQ_675_617:
  6074. return 617143;
  6075. default:
  6076. MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
  6077. }
  6078. } else {
  6079. switch (cdctl & CDCLK_FREQ_SEL_MASK) {
  6080. case CDCLK_FREQ_450_432:
  6081. return 450000;
  6082. case CDCLK_FREQ_337_308:
  6083. return 337500;
  6084. case CDCLK_FREQ_540:
  6085. return 540000;
  6086. case CDCLK_FREQ_675_617:
  6087. return 675000;
  6088. default:
  6089. MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
  6090. }
  6091. }
  6092. return dev_priv->cdclk_pll.ref;
  6093. }
  6094. static void bxt_de_pll_update(struct drm_i915_private *dev_priv)
  6095. {
  6096. u32 val;
  6097. dev_priv->cdclk_pll.ref = 19200;
  6098. dev_priv->cdclk_pll.vco = 0;
  6099. val = I915_READ(BXT_DE_PLL_ENABLE);
  6100. if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
  6101. return;
  6102. if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
  6103. return;
  6104. val = I915_READ(BXT_DE_PLL_CTL);
  6105. dev_priv->cdclk_pll.vco = (val & BXT_DE_PLL_RATIO_MASK) *
  6106. dev_priv->cdclk_pll.ref;
  6107. }
  6108. static int broxton_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6109. {
  6110. u32 divider;
  6111. int div, vco;
  6112. bxt_de_pll_update(dev_priv);
  6113. vco = dev_priv->cdclk_pll.vco;
  6114. if (vco == 0)
  6115. return dev_priv->cdclk_pll.ref;
  6116. divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
  6117. switch (divider) {
  6118. case BXT_CDCLK_CD2X_DIV_SEL_1:
  6119. div = 2;
  6120. break;
  6121. case BXT_CDCLK_CD2X_DIV_SEL_1_5:
  6122. div = 3;
  6123. break;
  6124. case BXT_CDCLK_CD2X_DIV_SEL_2:
  6125. div = 4;
  6126. break;
  6127. case BXT_CDCLK_CD2X_DIV_SEL_4:
  6128. div = 8;
  6129. break;
  6130. default:
  6131. MISSING_CASE(divider);
  6132. return dev_priv->cdclk_pll.ref;
  6133. }
  6134. return DIV_ROUND_CLOSEST(vco, div);
  6135. }
  6136. static int broadwell_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6137. {
  6138. uint32_t lcpll = I915_READ(LCPLL_CTL);
  6139. uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
  6140. if (lcpll & LCPLL_CD_SOURCE_FCLK)
  6141. return 800000;
  6142. else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
  6143. return 450000;
  6144. else if (freq == LCPLL_CLK_FREQ_450)
  6145. return 450000;
  6146. else if (freq == LCPLL_CLK_FREQ_54O_BDW)
  6147. return 540000;
  6148. else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
  6149. return 337500;
  6150. else
  6151. return 675000;
  6152. }
  6153. static int haswell_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6154. {
  6155. uint32_t lcpll = I915_READ(LCPLL_CTL);
  6156. uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
  6157. if (lcpll & LCPLL_CD_SOURCE_FCLK)
  6158. return 800000;
  6159. else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
  6160. return 450000;
  6161. else if (freq == LCPLL_CLK_FREQ_450)
  6162. return 450000;
  6163. else if (IS_HSW_ULT(dev_priv))
  6164. return 337500;
  6165. else
  6166. return 540000;
  6167. }
  6168. static int valleyview_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6169. {
  6170. return vlv_get_cck_clock_hpll(dev_priv, "cdclk",
  6171. CCK_DISPLAY_CLOCK_CONTROL);
  6172. }
  6173. static int ilk_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6174. {
  6175. return 450000;
  6176. }
  6177. static int i945_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6178. {
  6179. return 400000;
  6180. }
  6181. static int i915_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6182. {
  6183. return 333333;
  6184. }
  6185. static int i9xx_misc_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6186. {
  6187. return 200000;
  6188. }
  6189. static int pnv_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6190. {
  6191. struct pci_dev *pdev = dev_priv->drm.pdev;
  6192. u16 gcfgc = 0;
  6193. pci_read_config_word(pdev, GCFGC, &gcfgc);
  6194. switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
  6195. case GC_DISPLAY_CLOCK_267_MHZ_PNV:
  6196. return 266667;
  6197. case GC_DISPLAY_CLOCK_333_MHZ_PNV:
  6198. return 333333;
  6199. case GC_DISPLAY_CLOCK_444_MHZ_PNV:
  6200. return 444444;
  6201. case GC_DISPLAY_CLOCK_200_MHZ_PNV:
  6202. return 200000;
  6203. default:
  6204. DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
  6205. case GC_DISPLAY_CLOCK_133_MHZ_PNV:
  6206. return 133333;
  6207. case GC_DISPLAY_CLOCK_167_MHZ_PNV:
  6208. return 166667;
  6209. }
  6210. }
  6211. static int i915gm_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6212. {
  6213. struct pci_dev *pdev = dev_priv->drm.pdev;
  6214. u16 gcfgc = 0;
  6215. pci_read_config_word(pdev, GCFGC, &gcfgc);
  6216. if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
  6217. return 133333;
  6218. else {
  6219. switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
  6220. case GC_DISPLAY_CLOCK_333_MHZ:
  6221. return 333333;
  6222. default:
  6223. case GC_DISPLAY_CLOCK_190_200_MHZ:
  6224. return 190000;
  6225. }
  6226. }
  6227. }
  6228. static int i865_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6229. {
  6230. return 266667;
  6231. }
  6232. static int i85x_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6233. {
  6234. struct pci_dev *pdev = dev_priv->drm.pdev;
  6235. u16 hpllcc = 0;
  6236. /*
  6237. * 852GM/852GMV only supports 133 MHz and the HPLLCC
  6238. * encoding is different :(
  6239. * FIXME is this the right way to detect 852GM/852GMV?
  6240. */
  6241. if (pdev->revision == 0x1)
  6242. return 133333;
  6243. pci_bus_read_config_word(pdev->bus,
  6244. PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
  6245. /* Assume that the hardware is in the high speed state. This
  6246. * should be the default.
  6247. */
  6248. switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
  6249. case GC_CLOCK_133_200:
  6250. case GC_CLOCK_133_200_2:
  6251. case GC_CLOCK_100_200:
  6252. return 200000;
  6253. case GC_CLOCK_166_250:
  6254. return 250000;
  6255. case GC_CLOCK_100_133:
  6256. return 133333;
  6257. case GC_CLOCK_133_266:
  6258. case GC_CLOCK_133_266_2:
  6259. case GC_CLOCK_166_266:
  6260. return 266667;
  6261. }
  6262. /* Shouldn't happen */
  6263. return 0;
  6264. }
  6265. static int i830_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6266. {
  6267. return 133333;
  6268. }
  6269. static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
  6270. {
  6271. static const unsigned int blb_vco[8] = {
  6272. [0] = 3200000,
  6273. [1] = 4000000,
  6274. [2] = 5333333,
  6275. [3] = 4800000,
  6276. [4] = 6400000,
  6277. };
  6278. static const unsigned int pnv_vco[8] = {
  6279. [0] = 3200000,
  6280. [1] = 4000000,
  6281. [2] = 5333333,
  6282. [3] = 4800000,
  6283. [4] = 2666667,
  6284. };
  6285. static const unsigned int cl_vco[8] = {
  6286. [0] = 3200000,
  6287. [1] = 4000000,
  6288. [2] = 5333333,
  6289. [3] = 6400000,
  6290. [4] = 3333333,
  6291. [5] = 3566667,
  6292. [6] = 4266667,
  6293. };
  6294. static const unsigned int elk_vco[8] = {
  6295. [0] = 3200000,
  6296. [1] = 4000000,
  6297. [2] = 5333333,
  6298. [3] = 4800000,
  6299. };
  6300. static const unsigned int ctg_vco[8] = {
  6301. [0] = 3200000,
  6302. [1] = 4000000,
  6303. [2] = 5333333,
  6304. [3] = 6400000,
  6305. [4] = 2666667,
  6306. [5] = 4266667,
  6307. };
  6308. const unsigned int *vco_table;
  6309. unsigned int vco;
  6310. uint8_t tmp = 0;
  6311. /* FIXME other chipsets? */
  6312. if (IS_GM45(dev_priv))
  6313. vco_table = ctg_vco;
  6314. else if (IS_G4X(dev_priv))
  6315. vco_table = elk_vco;
  6316. else if (IS_CRESTLINE(dev_priv))
  6317. vco_table = cl_vco;
  6318. else if (IS_PINEVIEW(dev_priv))
  6319. vco_table = pnv_vco;
  6320. else if (IS_G33(dev_priv))
  6321. vco_table = blb_vco;
  6322. else
  6323. return 0;
  6324. tmp = I915_READ(IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO);
  6325. vco = vco_table[tmp & 0x7];
  6326. if (vco == 0)
  6327. DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
  6328. else
  6329. DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
  6330. return vco;
  6331. }
  6332. static int gm45_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6333. {
  6334. struct pci_dev *pdev = dev_priv->drm.pdev;
  6335. unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
  6336. uint16_t tmp = 0;
  6337. pci_read_config_word(pdev, GCFGC, &tmp);
  6338. cdclk_sel = (tmp >> 12) & 0x1;
  6339. switch (vco) {
  6340. case 2666667:
  6341. case 4000000:
  6342. case 5333333:
  6343. return cdclk_sel ? 333333 : 222222;
  6344. case 3200000:
  6345. return cdclk_sel ? 320000 : 228571;
  6346. default:
  6347. DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
  6348. return 222222;
  6349. }
  6350. }
  6351. static int i965gm_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6352. {
  6353. struct pci_dev *pdev = dev_priv->drm.pdev;
  6354. static const uint8_t div_3200[] = { 16, 10, 8 };
  6355. static const uint8_t div_4000[] = { 20, 12, 10 };
  6356. static const uint8_t div_5333[] = { 24, 16, 14 };
  6357. const uint8_t *div_table;
  6358. unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
  6359. uint16_t tmp = 0;
  6360. pci_read_config_word(pdev, GCFGC, &tmp);
  6361. cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
  6362. if (cdclk_sel >= ARRAY_SIZE(div_3200))
  6363. goto fail;
  6364. switch (vco) {
  6365. case 3200000:
  6366. div_table = div_3200;
  6367. break;
  6368. case 4000000:
  6369. div_table = div_4000;
  6370. break;
  6371. case 5333333:
  6372. div_table = div_5333;
  6373. break;
  6374. default:
  6375. goto fail;
  6376. }
  6377. return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
  6378. fail:
  6379. DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
  6380. return 200000;
  6381. }
  6382. static int g33_get_display_clock_speed(struct drm_i915_private *dev_priv)
  6383. {
  6384. struct pci_dev *pdev = dev_priv->drm.pdev;
  6385. static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 };
  6386. static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 };
  6387. static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
  6388. static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
  6389. const uint8_t *div_table;
  6390. unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
  6391. uint16_t tmp = 0;
  6392. pci_read_config_word(pdev, GCFGC, &tmp);
  6393. cdclk_sel = (tmp >> 4) & 0x7;
  6394. if (cdclk_sel >= ARRAY_SIZE(div_3200))
  6395. goto fail;
  6396. switch (vco) {
  6397. case 3200000:
  6398. div_table = div_3200;
  6399. break;
  6400. case 4000000:
  6401. div_table = div_4000;
  6402. break;
  6403. case 4800000:
  6404. div_table = div_4800;
  6405. break;
  6406. case 5333333:
  6407. div_table = div_5333;
  6408. break;
  6409. default:
  6410. goto fail;
  6411. }
  6412. return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
  6413. fail:
  6414. DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
  6415. return 190476;
  6416. }
  6417. static void
  6418. intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
  6419. {
  6420. while (*num > DATA_LINK_M_N_MASK ||
  6421. *den > DATA_LINK_M_N_MASK) {
  6422. *num >>= 1;
  6423. *den >>= 1;
  6424. }
  6425. }
  6426. static void compute_m_n(unsigned int m, unsigned int n,
  6427. uint32_t *ret_m, uint32_t *ret_n)
  6428. {
  6429. *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
  6430. *ret_m = div_u64((uint64_t) m * *ret_n, n);
  6431. intel_reduce_m_n_ratio(ret_m, ret_n);
  6432. }
  6433. void
  6434. intel_link_compute_m_n(int bits_per_pixel, int nlanes,
  6435. int pixel_clock, int link_clock,
  6436. struct intel_link_m_n *m_n)
  6437. {
  6438. m_n->tu = 64;
  6439. compute_m_n(bits_per_pixel * pixel_clock,
  6440. link_clock * nlanes * 8,
  6441. &m_n->gmch_m, &m_n->gmch_n);
  6442. compute_m_n(pixel_clock, link_clock,
  6443. &m_n->link_m, &m_n->link_n);
  6444. }
  6445. static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
  6446. {
  6447. if (i915.panel_use_ssc >= 0)
  6448. return i915.panel_use_ssc != 0;
  6449. return dev_priv->vbt.lvds_use_ssc
  6450. && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
  6451. }
  6452. static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
  6453. {
  6454. return (1 << dpll->n) << 16 | dpll->m2;
  6455. }
  6456. static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
  6457. {
  6458. return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
  6459. }
  6460. static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
  6461. struct intel_crtc_state *crtc_state,
  6462. struct dpll *reduced_clock)
  6463. {
  6464. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  6465. u32 fp, fp2 = 0;
  6466. if (IS_PINEVIEW(dev_priv)) {
  6467. fp = pnv_dpll_compute_fp(&crtc_state->dpll);
  6468. if (reduced_clock)
  6469. fp2 = pnv_dpll_compute_fp(reduced_clock);
  6470. } else {
  6471. fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
  6472. if (reduced_clock)
  6473. fp2 = i9xx_dpll_compute_fp(reduced_clock);
  6474. }
  6475. crtc_state->dpll_hw_state.fp0 = fp;
  6476. crtc->lowfreq_avail = false;
  6477. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
  6478. reduced_clock) {
  6479. crtc_state->dpll_hw_state.fp1 = fp2;
  6480. crtc->lowfreq_avail = true;
  6481. } else {
  6482. crtc_state->dpll_hw_state.fp1 = fp;
  6483. }
  6484. }
  6485. static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
  6486. pipe)
  6487. {
  6488. u32 reg_val;
  6489. /*
  6490. * PLLB opamp always calibrates to max value of 0x3f, force enable it
  6491. * and set it to a reasonable value instead.
  6492. */
  6493. reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
  6494. reg_val &= 0xffffff00;
  6495. reg_val |= 0x00000030;
  6496. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
  6497. reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
  6498. reg_val &= 0x8cffffff;
  6499. reg_val = 0x8c000000;
  6500. vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
  6501. reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
  6502. reg_val &= 0xffffff00;
  6503. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
  6504. reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
  6505. reg_val &= 0x00ffffff;
  6506. reg_val |= 0xb0000000;
  6507. vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
  6508. }
  6509. static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
  6510. struct intel_link_m_n *m_n)
  6511. {
  6512. struct drm_device *dev = crtc->base.dev;
  6513. struct drm_i915_private *dev_priv = to_i915(dev);
  6514. int pipe = crtc->pipe;
  6515. I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
  6516. I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
  6517. I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
  6518. I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
  6519. }
  6520. static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
  6521. struct intel_link_m_n *m_n,
  6522. struct intel_link_m_n *m2_n2)
  6523. {
  6524. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  6525. int pipe = crtc->pipe;
  6526. enum transcoder transcoder = crtc->config->cpu_transcoder;
  6527. if (INTEL_GEN(dev_priv) >= 5) {
  6528. I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
  6529. I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
  6530. I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
  6531. I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
  6532. /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
  6533. * for gen < 8) and if DRRS is supported (to make sure the
  6534. * registers are not unnecessarily accessed).
  6535. */
  6536. if (m2_n2 && (IS_CHERRYVIEW(dev_priv) ||
  6537. INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) {
  6538. I915_WRITE(PIPE_DATA_M2(transcoder),
  6539. TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
  6540. I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
  6541. I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
  6542. I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
  6543. }
  6544. } else {
  6545. I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
  6546. I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
  6547. I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
  6548. I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
  6549. }
  6550. }
  6551. void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
  6552. {
  6553. struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
  6554. if (m_n == M1_N1) {
  6555. dp_m_n = &crtc->config->dp_m_n;
  6556. dp_m2_n2 = &crtc->config->dp_m2_n2;
  6557. } else if (m_n == M2_N2) {
  6558. /*
  6559. * M2_N2 registers are not supported. Hence m2_n2 divider value
  6560. * needs to be programmed into M1_N1.
  6561. */
  6562. dp_m_n = &crtc->config->dp_m2_n2;
  6563. } else {
  6564. DRM_ERROR("Unsupported divider value\n");
  6565. return;
  6566. }
  6567. if (crtc->config->has_pch_encoder)
  6568. intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
  6569. else
  6570. intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
  6571. }
  6572. static void vlv_compute_dpll(struct intel_crtc *crtc,
  6573. struct intel_crtc_state *pipe_config)
  6574. {
  6575. pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
  6576. DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
  6577. if (crtc->pipe != PIPE_A)
  6578. pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
  6579. /* DPLL not used with DSI, but still need the rest set up */
  6580. if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
  6581. pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
  6582. DPLL_EXT_BUFFER_ENABLE_VLV;
  6583. pipe_config->dpll_hw_state.dpll_md =
  6584. (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
  6585. }
  6586. static void chv_compute_dpll(struct intel_crtc *crtc,
  6587. struct intel_crtc_state *pipe_config)
  6588. {
  6589. pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
  6590. DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
  6591. if (crtc->pipe != PIPE_A)
  6592. pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
  6593. /* DPLL not used with DSI, but still need the rest set up */
  6594. if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
  6595. pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
  6596. pipe_config->dpll_hw_state.dpll_md =
  6597. (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
  6598. }
  6599. static void vlv_prepare_pll(struct intel_crtc *crtc,
  6600. const struct intel_crtc_state *pipe_config)
  6601. {
  6602. struct drm_device *dev = crtc->base.dev;
  6603. struct drm_i915_private *dev_priv = to_i915(dev);
  6604. enum pipe pipe = crtc->pipe;
  6605. u32 mdiv;
  6606. u32 bestn, bestm1, bestm2, bestp1, bestp2;
  6607. u32 coreclk, reg_val;
  6608. /* Enable Refclk */
  6609. I915_WRITE(DPLL(pipe),
  6610. pipe_config->dpll_hw_state.dpll &
  6611. ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
  6612. /* No need to actually set up the DPLL with DSI */
  6613. if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
  6614. return;
  6615. mutex_lock(&dev_priv->sb_lock);
  6616. bestn = pipe_config->dpll.n;
  6617. bestm1 = pipe_config->dpll.m1;
  6618. bestm2 = pipe_config->dpll.m2;
  6619. bestp1 = pipe_config->dpll.p1;
  6620. bestp2 = pipe_config->dpll.p2;
  6621. /* See eDP HDMI DPIO driver vbios notes doc */
  6622. /* PLL B needs special handling */
  6623. if (pipe == PIPE_B)
  6624. vlv_pllb_recal_opamp(dev_priv, pipe);
  6625. /* Set up Tx target for periodic Rcomp update */
  6626. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
  6627. /* Disable target IRef on PLL */
  6628. reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
  6629. reg_val &= 0x00ffffff;
  6630. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
  6631. /* Disable fast lock */
  6632. vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
  6633. /* Set idtafcrecal before PLL is enabled */
  6634. mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
  6635. mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
  6636. mdiv |= ((bestn << DPIO_N_SHIFT));
  6637. mdiv |= (1 << DPIO_K_SHIFT);
  6638. /*
  6639. * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
  6640. * but we don't support that).
  6641. * Note: don't use the DAC post divider as it seems unstable.
  6642. */
  6643. mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
  6644. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
  6645. mdiv |= DPIO_ENABLE_CALIBRATION;
  6646. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
  6647. /* Set HBR and RBR LPF coefficients */
  6648. if (pipe_config->port_clock == 162000 ||
  6649. intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
  6650. intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
  6651. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
  6652. 0x009f0003);
  6653. else
  6654. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
  6655. 0x00d0000f);
  6656. if (intel_crtc_has_dp_encoder(pipe_config)) {
  6657. /* Use SSC source */
  6658. if (pipe == PIPE_A)
  6659. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
  6660. 0x0df40000);
  6661. else
  6662. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
  6663. 0x0df70000);
  6664. } else { /* HDMI or VGA */
  6665. /* Use bend source */
  6666. if (pipe == PIPE_A)
  6667. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
  6668. 0x0df70000);
  6669. else
  6670. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
  6671. 0x0df40000);
  6672. }
  6673. coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
  6674. coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
  6675. if (intel_crtc_has_dp_encoder(crtc->config))
  6676. coreclk |= 0x01000000;
  6677. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
  6678. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
  6679. mutex_unlock(&dev_priv->sb_lock);
  6680. }
  6681. static void chv_prepare_pll(struct intel_crtc *crtc,
  6682. const struct intel_crtc_state *pipe_config)
  6683. {
  6684. struct drm_device *dev = crtc->base.dev;
  6685. struct drm_i915_private *dev_priv = to_i915(dev);
  6686. enum pipe pipe = crtc->pipe;
  6687. enum dpio_channel port = vlv_pipe_to_channel(pipe);
  6688. u32 loopfilter, tribuf_calcntr;
  6689. u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
  6690. u32 dpio_val;
  6691. int vco;
  6692. /* Enable Refclk and SSC */
  6693. I915_WRITE(DPLL(pipe),
  6694. pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
  6695. /* No need to actually set up the DPLL with DSI */
  6696. if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
  6697. return;
  6698. bestn = pipe_config->dpll.n;
  6699. bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
  6700. bestm1 = pipe_config->dpll.m1;
  6701. bestm2 = pipe_config->dpll.m2 >> 22;
  6702. bestp1 = pipe_config->dpll.p1;
  6703. bestp2 = pipe_config->dpll.p2;
  6704. vco = pipe_config->dpll.vco;
  6705. dpio_val = 0;
  6706. loopfilter = 0;
  6707. mutex_lock(&dev_priv->sb_lock);
  6708. /* p1 and p2 divider */
  6709. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
  6710. 5 << DPIO_CHV_S1_DIV_SHIFT |
  6711. bestp1 << DPIO_CHV_P1_DIV_SHIFT |
  6712. bestp2 << DPIO_CHV_P2_DIV_SHIFT |
  6713. 1 << DPIO_CHV_K_DIV_SHIFT);
  6714. /* Feedback post-divider - m2 */
  6715. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
  6716. /* Feedback refclk divider - n and m1 */
  6717. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
  6718. DPIO_CHV_M1_DIV_BY_2 |
  6719. 1 << DPIO_CHV_N_DIV_SHIFT);
  6720. /* M2 fraction division */
  6721. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
  6722. /* M2 fraction division enable */
  6723. dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
  6724. dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
  6725. dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
  6726. if (bestm2_frac)
  6727. dpio_val |= DPIO_CHV_FRAC_DIV_EN;
  6728. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
  6729. /* Program digital lock detect threshold */
  6730. dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
  6731. dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
  6732. DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
  6733. dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
  6734. if (!bestm2_frac)
  6735. dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
  6736. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
  6737. /* Loop filter */
  6738. if (vco == 5400000) {
  6739. loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
  6740. loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
  6741. loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
  6742. tribuf_calcntr = 0x9;
  6743. } else if (vco <= 6200000) {
  6744. loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
  6745. loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
  6746. loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
  6747. tribuf_calcntr = 0x9;
  6748. } else if (vco <= 6480000) {
  6749. loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
  6750. loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
  6751. loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
  6752. tribuf_calcntr = 0x8;
  6753. } else {
  6754. /* Not supported. Apply the same limits as in the max case */
  6755. loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
  6756. loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
  6757. loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
  6758. tribuf_calcntr = 0;
  6759. }
  6760. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
  6761. dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
  6762. dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
  6763. dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
  6764. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
  6765. /* AFC Recal */
  6766. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
  6767. vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
  6768. DPIO_AFC_RECAL);
  6769. mutex_unlock(&dev_priv->sb_lock);
  6770. }
  6771. /**
  6772. * vlv_force_pll_on - forcibly enable just the PLL
  6773. * @dev_priv: i915 private structure
  6774. * @pipe: pipe PLL to enable
  6775. * @dpll: PLL configuration
  6776. *
  6777. * Enable the PLL for @pipe using the supplied @dpll config. To be used
  6778. * in cases where we need the PLL enabled even when @pipe is not going to
  6779. * be enabled.
  6780. */
  6781. int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
  6782. const struct dpll *dpll)
  6783. {
  6784. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  6785. struct intel_crtc_state *pipe_config;
  6786. pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
  6787. if (!pipe_config)
  6788. return -ENOMEM;
  6789. pipe_config->base.crtc = &crtc->base;
  6790. pipe_config->pixel_multiplier = 1;
  6791. pipe_config->dpll = *dpll;
  6792. if (IS_CHERRYVIEW(dev_priv)) {
  6793. chv_compute_dpll(crtc, pipe_config);
  6794. chv_prepare_pll(crtc, pipe_config);
  6795. chv_enable_pll(crtc, pipe_config);
  6796. } else {
  6797. vlv_compute_dpll(crtc, pipe_config);
  6798. vlv_prepare_pll(crtc, pipe_config);
  6799. vlv_enable_pll(crtc, pipe_config);
  6800. }
  6801. kfree(pipe_config);
  6802. return 0;
  6803. }
  6804. /**
  6805. * vlv_force_pll_off - forcibly disable just the PLL
  6806. * @dev_priv: i915 private structure
  6807. * @pipe: pipe PLL to disable
  6808. *
  6809. * Disable the PLL for @pipe. To be used in cases where we need
  6810. * the PLL enabled even when @pipe is not going to be enabled.
  6811. */
  6812. void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
  6813. {
  6814. if (IS_CHERRYVIEW(dev_priv))
  6815. chv_disable_pll(dev_priv, pipe);
  6816. else
  6817. vlv_disable_pll(dev_priv, pipe);
  6818. }
  6819. static void i9xx_compute_dpll(struct intel_crtc *crtc,
  6820. struct intel_crtc_state *crtc_state,
  6821. struct dpll *reduced_clock)
  6822. {
  6823. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  6824. u32 dpll;
  6825. struct dpll *clock = &crtc_state->dpll;
  6826. i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
  6827. dpll = DPLL_VGA_MODE_DIS;
  6828. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
  6829. dpll |= DPLLB_MODE_LVDS;
  6830. else
  6831. dpll |= DPLLB_MODE_DAC_SERIAL;
  6832. if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || IS_G33(dev_priv)) {
  6833. dpll |= (crtc_state->pixel_multiplier - 1)
  6834. << SDVO_MULTIPLIER_SHIFT_HIRES;
  6835. }
  6836. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
  6837. intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
  6838. dpll |= DPLL_SDVO_HIGH_SPEED;
  6839. if (intel_crtc_has_dp_encoder(crtc_state))
  6840. dpll |= DPLL_SDVO_HIGH_SPEED;
  6841. /* compute bitmask from p1 value */
  6842. if (IS_PINEVIEW(dev_priv))
  6843. dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
  6844. else {
  6845. dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
  6846. if (IS_G4X(dev_priv) && reduced_clock)
  6847. dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
  6848. }
  6849. switch (clock->p2) {
  6850. case 5:
  6851. dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
  6852. break;
  6853. case 7:
  6854. dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
  6855. break;
  6856. case 10:
  6857. dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
  6858. break;
  6859. case 14:
  6860. dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
  6861. break;
  6862. }
  6863. if (INTEL_GEN(dev_priv) >= 4)
  6864. dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
  6865. if (crtc_state->sdvo_tv_clock)
  6866. dpll |= PLL_REF_INPUT_TVCLKINBC;
  6867. else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
  6868. intel_panel_use_ssc(dev_priv))
  6869. dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
  6870. else
  6871. dpll |= PLL_REF_INPUT_DREFCLK;
  6872. dpll |= DPLL_VCO_ENABLE;
  6873. crtc_state->dpll_hw_state.dpll = dpll;
  6874. if (INTEL_GEN(dev_priv) >= 4) {
  6875. u32 dpll_md = (crtc_state->pixel_multiplier - 1)
  6876. << DPLL_MD_UDI_MULTIPLIER_SHIFT;
  6877. crtc_state->dpll_hw_state.dpll_md = dpll_md;
  6878. }
  6879. }
  6880. static void i8xx_compute_dpll(struct intel_crtc *crtc,
  6881. struct intel_crtc_state *crtc_state,
  6882. struct dpll *reduced_clock)
  6883. {
  6884. struct drm_device *dev = crtc->base.dev;
  6885. struct drm_i915_private *dev_priv = to_i915(dev);
  6886. u32 dpll;
  6887. struct dpll *clock = &crtc_state->dpll;
  6888. i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
  6889. dpll = DPLL_VGA_MODE_DIS;
  6890. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  6891. dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
  6892. } else {
  6893. if (clock->p1 == 2)
  6894. dpll |= PLL_P1_DIVIDE_BY_TWO;
  6895. else
  6896. dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
  6897. if (clock->p2 == 4)
  6898. dpll |= PLL_P2_DIVIDE_BY_4;
  6899. }
  6900. if (!IS_I830(dev_priv) &&
  6901. intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
  6902. dpll |= DPLL_DVO_2X_MODE;
  6903. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
  6904. intel_panel_use_ssc(dev_priv))
  6905. dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
  6906. else
  6907. dpll |= PLL_REF_INPUT_DREFCLK;
  6908. dpll |= DPLL_VCO_ENABLE;
  6909. crtc_state->dpll_hw_state.dpll = dpll;
  6910. }
  6911. static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
  6912. {
  6913. struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
  6914. enum pipe pipe = intel_crtc->pipe;
  6915. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  6916. const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
  6917. uint32_t crtc_vtotal, crtc_vblank_end;
  6918. int vsyncshift = 0;
  6919. /* We need to be careful not to changed the adjusted mode, for otherwise
  6920. * the hw state checker will get angry at the mismatch. */
  6921. crtc_vtotal = adjusted_mode->crtc_vtotal;
  6922. crtc_vblank_end = adjusted_mode->crtc_vblank_end;
  6923. if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
  6924. /* the chip adds 2 halflines automatically */
  6925. crtc_vtotal -= 1;
  6926. crtc_vblank_end -= 1;
  6927. if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
  6928. vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
  6929. else
  6930. vsyncshift = adjusted_mode->crtc_hsync_start -
  6931. adjusted_mode->crtc_htotal / 2;
  6932. if (vsyncshift < 0)
  6933. vsyncshift += adjusted_mode->crtc_htotal;
  6934. }
  6935. if (INTEL_GEN(dev_priv) > 3)
  6936. I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
  6937. I915_WRITE(HTOTAL(cpu_transcoder),
  6938. (adjusted_mode->crtc_hdisplay - 1) |
  6939. ((adjusted_mode->crtc_htotal - 1) << 16));
  6940. I915_WRITE(HBLANK(cpu_transcoder),
  6941. (adjusted_mode->crtc_hblank_start - 1) |
  6942. ((adjusted_mode->crtc_hblank_end - 1) << 16));
  6943. I915_WRITE(HSYNC(cpu_transcoder),
  6944. (adjusted_mode->crtc_hsync_start - 1) |
  6945. ((adjusted_mode->crtc_hsync_end - 1) << 16));
  6946. I915_WRITE(VTOTAL(cpu_transcoder),
  6947. (adjusted_mode->crtc_vdisplay - 1) |
  6948. ((crtc_vtotal - 1) << 16));
  6949. I915_WRITE(VBLANK(cpu_transcoder),
  6950. (adjusted_mode->crtc_vblank_start - 1) |
  6951. ((crtc_vblank_end - 1) << 16));
  6952. I915_WRITE(VSYNC(cpu_transcoder),
  6953. (adjusted_mode->crtc_vsync_start - 1) |
  6954. ((adjusted_mode->crtc_vsync_end - 1) << 16));
  6955. /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
  6956. * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
  6957. * documented on the DDI_FUNC_CTL register description, EDP Input Select
  6958. * bits. */
  6959. if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
  6960. (pipe == PIPE_B || pipe == PIPE_C))
  6961. I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
  6962. }
  6963. static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
  6964. {
  6965. struct drm_device *dev = intel_crtc->base.dev;
  6966. struct drm_i915_private *dev_priv = to_i915(dev);
  6967. enum pipe pipe = intel_crtc->pipe;
  6968. /* pipesrc controls the size that is scaled from, which should
  6969. * always be the user's requested size.
  6970. */
  6971. I915_WRITE(PIPESRC(pipe),
  6972. ((intel_crtc->config->pipe_src_w - 1) << 16) |
  6973. (intel_crtc->config->pipe_src_h - 1));
  6974. }
  6975. static void intel_get_pipe_timings(struct intel_crtc *crtc,
  6976. struct intel_crtc_state *pipe_config)
  6977. {
  6978. struct drm_device *dev = crtc->base.dev;
  6979. struct drm_i915_private *dev_priv = to_i915(dev);
  6980. enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
  6981. uint32_t tmp;
  6982. tmp = I915_READ(HTOTAL(cpu_transcoder));
  6983. pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
  6984. pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
  6985. tmp = I915_READ(HBLANK(cpu_transcoder));
  6986. pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
  6987. pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
  6988. tmp = I915_READ(HSYNC(cpu_transcoder));
  6989. pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
  6990. pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
  6991. tmp = I915_READ(VTOTAL(cpu_transcoder));
  6992. pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
  6993. pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
  6994. tmp = I915_READ(VBLANK(cpu_transcoder));
  6995. pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
  6996. pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
  6997. tmp = I915_READ(VSYNC(cpu_transcoder));
  6998. pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
  6999. pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
  7000. if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
  7001. pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
  7002. pipe_config->base.adjusted_mode.crtc_vtotal += 1;
  7003. pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
  7004. }
  7005. }
  7006. static void intel_get_pipe_src_size(struct intel_crtc *crtc,
  7007. struct intel_crtc_state *pipe_config)
  7008. {
  7009. struct drm_device *dev = crtc->base.dev;
  7010. struct drm_i915_private *dev_priv = to_i915(dev);
  7011. u32 tmp;
  7012. tmp = I915_READ(PIPESRC(crtc->pipe));
  7013. pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
  7014. pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
  7015. pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
  7016. pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
  7017. }
  7018. void intel_mode_from_pipe_config(struct drm_display_mode *mode,
  7019. struct intel_crtc_state *pipe_config)
  7020. {
  7021. mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
  7022. mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
  7023. mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
  7024. mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
  7025. mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
  7026. mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
  7027. mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
  7028. mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
  7029. mode->flags = pipe_config->base.adjusted_mode.flags;
  7030. mode->type = DRM_MODE_TYPE_DRIVER;
  7031. mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
  7032. mode->flags |= pipe_config->base.adjusted_mode.flags;
  7033. mode->hsync = drm_mode_hsync(mode);
  7034. mode->vrefresh = drm_mode_vrefresh(mode);
  7035. drm_mode_set_name(mode);
  7036. }
  7037. static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
  7038. {
  7039. struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
  7040. uint32_t pipeconf;
  7041. pipeconf = 0;
  7042. if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  7043. (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  7044. pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
  7045. if (intel_crtc->config->double_wide)
  7046. pipeconf |= PIPECONF_DOUBLE_WIDE;
  7047. /* only g4x and later have fancy bpc/dither controls */
  7048. if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
  7049. IS_CHERRYVIEW(dev_priv)) {
  7050. /* Bspec claims that we can't use dithering for 30bpp pipes. */
  7051. if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
  7052. pipeconf |= PIPECONF_DITHER_EN |
  7053. PIPECONF_DITHER_TYPE_SP;
  7054. switch (intel_crtc->config->pipe_bpp) {
  7055. case 18:
  7056. pipeconf |= PIPECONF_6BPC;
  7057. break;
  7058. case 24:
  7059. pipeconf |= PIPECONF_8BPC;
  7060. break;
  7061. case 30:
  7062. pipeconf |= PIPECONF_10BPC;
  7063. break;
  7064. default:
  7065. /* Case prevented by intel_choose_pipe_bpp_dither. */
  7066. BUG();
  7067. }
  7068. }
  7069. if (HAS_PIPE_CXSR(dev_priv)) {
  7070. if (intel_crtc->lowfreq_avail) {
  7071. DRM_DEBUG_KMS("enabling CxSR downclocking\n");
  7072. pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
  7073. } else {
  7074. DRM_DEBUG_KMS("disabling CxSR downclocking\n");
  7075. }
  7076. }
  7077. if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
  7078. if (INTEL_GEN(dev_priv) < 4 ||
  7079. intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
  7080. pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
  7081. else
  7082. pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
  7083. } else
  7084. pipeconf |= PIPECONF_PROGRESSIVE;
  7085. if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
  7086. intel_crtc->config->limited_color_range)
  7087. pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
  7088. I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
  7089. POSTING_READ(PIPECONF(intel_crtc->pipe));
  7090. }
  7091. static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
  7092. struct intel_crtc_state *crtc_state)
  7093. {
  7094. struct drm_device *dev = crtc->base.dev;
  7095. struct drm_i915_private *dev_priv = to_i915(dev);
  7096. const struct intel_limit *limit;
  7097. int refclk = 48000;
  7098. memset(&crtc_state->dpll_hw_state, 0,
  7099. sizeof(crtc_state->dpll_hw_state));
  7100. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  7101. if (intel_panel_use_ssc(dev_priv)) {
  7102. refclk = dev_priv->vbt.lvds_ssc_freq;
  7103. DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
  7104. }
  7105. limit = &intel_limits_i8xx_lvds;
  7106. } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
  7107. limit = &intel_limits_i8xx_dvo;
  7108. } else {
  7109. limit = &intel_limits_i8xx_dac;
  7110. }
  7111. if (!crtc_state->clock_set &&
  7112. !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  7113. refclk, NULL, &crtc_state->dpll)) {
  7114. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  7115. return -EINVAL;
  7116. }
  7117. i8xx_compute_dpll(crtc, crtc_state, NULL);
  7118. return 0;
  7119. }
  7120. static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
  7121. struct intel_crtc_state *crtc_state)
  7122. {
  7123. struct drm_device *dev = crtc->base.dev;
  7124. struct drm_i915_private *dev_priv = to_i915(dev);
  7125. const struct intel_limit *limit;
  7126. int refclk = 96000;
  7127. memset(&crtc_state->dpll_hw_state, 0,
  7128. sizeof(crtc_state->dpll_hw_state));
  7129. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  7130. if (intel_panel_use_ssc(dev_priv)) {
  7131. refclk = dev_priv->vbt.lvds_ssc_freq;
  7132. DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
  7133. }
  7134. if (intel_is_dual_link_lvds(dev))
  7135. limit = &intel_limits_g4x_dual_channel_lvds;
  7136. else
  7137. limit = &intel_limits_g4x_single_channel_lvds;
  7138. } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
  7139. intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
  7140. limit = &intel_limits_g4x_hdmi;
  7141. } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
  7142. limit = &intel_limits_g4x_sdvo;
  7143. } else {
  7144. /* The option is for other outputs */
  7145. limit = &intel_limits_i9xx_sdvo;
  7146. }
  7147. if (!crtc_state->clock_set &&
  7148. !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  7149. refclk, NULL, &crtc_state->dpll)) {
  7150. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  7151. return -EINVAL;
  7152. }
  7153. i9xx_compute_dpll(crtc, crtc_state, NULL);
  7154. return 0;
  7155. }
  7156. static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
  7157. struct intel_crtc_state *crtc_state)
  7158. {
  7159. struct drm_device *dev = crtc->base.dev;
  7160. struct drm_i915_private *dev_priv = to_i915(dev);
  7161. const struct intel_limit *limit;
  7162. int refclk = 96000;
  7163. memset(&crtc_state->dpll_hw_state, 0,
  7164. sizeof(crtc_state->dpll_hw_state));
  7165. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  7166. if (intel_panel_use_ssc(dev_priv)) {
  7167. refclk = dev_priv->vbt.lvds_ssc_freq;
  7168. DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
  7169. }
  7170. limit = &intel_limits_pineview_lvds;
  7171. } else {
  7172. limit = &intel_limits_pineview_sdvo;
  7173. }
  7174. if (!crtc_state->clock_set &&
  7175. !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  7176. refclk, NULL, &crtc_state->dpll)) {
  7177. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  7178. return -EINVAL;
  7179. }
  7180. i9xx_compute_dpll(crtc, crtc_state, NULL);
  7181. return 0;
  7182. }
  7183. static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
  7184. struct intel_crtc_state *crtc_state)
  7185. {
  7186. struct drm_device *dev = crtc->base.dev;
  7187. struct drm_i915_private *dev_priv = to_i915(dev);
  7188. const struct intel_limit *limit;
  7189. int refclk = 96000;
  7190. memset(&crtc_state->dpll_hw_state, 0,
  7191. sizeof(crtc_state->dpll_hw_state));
  7192. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  7193. if (intel_panel_use_ssc(dev_priv)) {
  7194. refclk = dev_priv->vbt.lvds_ssc_freq;
  7195. DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
  7196. }
  7197. limit = &intel_limits_i9xx_lvds;
  7198. } else {
  7199. limit = &intel_limits_i9xx_sdvo;
  7200. }
  7201. if (!crtc_state->clock_set &&
  7202. !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  7203. refclk, NULL, &crtc_state->dpll)) {
  7204. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  7205. return -EINVAL;
  7206. }
  7207. i9xx_compute_dpll(crtc, crtc_state, NULL);
  7208. return 0;
  7209. }
  7210. static int chv_crtc_compute_clock(struct intel_crtc *crtc,
  7211. struct intel_crtc_state *crtc_state)
  7212. {
  7213. int refclk = 100000;
  7214. const struct intel_limit *limit = &intel_limits_chv;
  7215. memset(&crtc_state->dpll_hw_state, 0,
  7216. sizeof(crtc_state->dpll_hw_state));
  7217. if (!crtc_state->clock_set &&
  7218. !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  7219. refclk, NULL, &crtc_state->dpll)) {
  7220. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  7221. return -EINVAL;
  7222. }
  7223. chv_compute_dpll(crtc, crtc_state);
  7224. return 0;
  7225. }
  7226. static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
  7227. struct intel_crtc_state *crtc_state)
  7228. {
  7229. int refclk = 100000;
  7230. const struct intel_limit *limit = &intel_limits_vlv;
  7231. memset(&crtc_state->dpll_hw_state, 0,
  7232. sizeof(crtc_state->dpll_hw_state));
  7233. if (!crtc_state->clock_set &&
  7234. !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  7235. refclk, NULL, &crtc_state->dpll)) {
  7236. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  7237. return -EINVAL;
  7238. }
  7239. vlv_compute_dpll(crtc, crtc_state);
  7240. return 0;
  7241. }
  7242. static void i9xx_get_pfit_config(struct intel_crtc *crtc,
  7243. struct intel_crtc_state *pipe_config)
  7244. {
  7245. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  7246. uint32_t tmp;
  7247. if (INTEL_GEN(dev_priv) <= 3 &&
  7248. (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
  7249. return;
  7250. tmp = I915_READ(PFIT_CONTROL);
  7251. if (!(tmp & PFIT_ENABLE))
  7252. return;
  7253. /* Check whether the pfit is attached to our pipe. */
  7254. if (INTEL_GEN(dev_priv) < 4) {
  7255. if (crtc->pipe != PIPE_B)
  7256. return;
  7257. } else {
  7258. if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
  7259. return;
  7260. }
  7261. pipe_config->gmch_pfit.control = tmp;
  7262. pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
  7263. }
  7264. static void vlv_crtc_clock_get(struct intel_crtc *crtc,
  7265. struct intel_crtc_state *pipe_config)
  7266. {
  7267. struct drm_device *dev = crtc->base.dev;
  7268. struct drm_i915_private *dev_priv = to_i915(dev);
  7269. int pipe = pipe_config->cpu_transcoder;
  7270. struct dpll clock;
  7271. u32 mdiv;
  7272. int refclk = 100000;
  7273. /* In case of DSI, DPLL will not be used */
  7274. if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
  7275. return;
  7276. mutex_lock(&dev_priv->sb_lock);
  7277. mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
  7278. mutex_unlock(&dev_priv->sb_lock);
  7279. clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
  7280. clock.m2 = mdiv & DPIO_M2DIV_MASK;
  7281. clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
  7282. clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
  7283. clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
  7284. pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
  7285. }
  7286. static void
  7287. i9xx_get_initial_plane_config(struct intel_crtc *crtc,
  7288. struct intel_initial_plane_config *plane_config)
  7289. {
  7290. struct drm_device *dev = crtc->base.dev;
  7291. struct drm_i915_private *dev_priv = to_i915(dev);
  7292. u32 val, base, offset;
  7293. int pipe = crtc->pipe, plane = crtc->plane;
  7294. int fourcc, pixel_format;
  7295. unsigned int aligned_height;
  7296. struct drm_framebuffer *fb;
  7297. struct intel_framebuffer *intel_fb;
  7298. val = I915_READ(DSPCNTR(plane));
  7299. if (!(val & DISPLAY_PLANE_ENABLE))
  7300. return;
  7301. intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
  7302. if (!intel_fb) {
  7303. DRM_DEBUG_KMS("failed to alloc fb\n");
  7304. return;
  7305. }
  7306. fb = &intel_fb->base;
  7307. if (INTEL_GEN(dev_priv) >= 4) {
  7308. if (val & DISPPLANE_TILED) {
  7309. plane_config->tiling = I915_TILING_X;
  7310. fb->modifier = I915_FORMAT_MOD_X_TILED;
  7311. }
  7312. }
  7313. pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
  7314. fourcc = i9xx_format_to_fourcc(pixel_format);
  7315. fb->pixel_format = fourcc;
  7316. fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
  7317. if (INTEL_GEN(dev_priv) >= 4) {
  7318. if (plane_config->tiling)
  7319. offset = I915_READ(DSPTILEOFF(plane));
  7320. else
  7321. offset = I915_READ(DSPLINOFF(plane));
  7322. base = I915_READ(DSPSURF(plane)) & 0xfffff000;
  7323. } else {
  7324. base = I915_READ(DSPADDR(plane));
  7325. }
  7326. plane_config->base = base;
  7327. val = I915_READ(PIPESRC(pipe));
  7328. fb->width = ((val >> 16) & 0xfff) + 1;
  7329. fb->height = ((val >> 0) & 0xfff) + 1;
  7330. val = I915_READ(DSPSTRIDE(pipe));
  7331. fb->pitches[0] = val & 0xffffffc0;
  7332. aligned_height = intel_fb_align_height(dev, fb->height,
  7333. fb->pixel_format,
  7334. fb->modifier);
  7335. plane_config->size = fb->pitches[0] * aligned_height;
  7336. DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
  7337. pipe_name(pipe), plane, fb->width, fb->height,
  7338. fb->bits_per_pixel, base, fb->pitches[0],
  7339. plane_config->size);
  7340. plane_config->fb = intel_fb;
  7341. }
  7342. static void chv_crtc_clock_get(struct intel_crtc *crtc,
  7343. struct intel_crtc_state *pipe_config)
  7344. {
  7345. struct drm_device *dev = crtc->base.dev;
  7346. struct drm_i915_private *dev_priv = to_i915(dev);
  7347. int pipe = pipe_config->cpu_transcoder;
  7348. enum dpio_channel port = vlv_pipe_to_channel(pipe);
  7349. struct dpll clock;
  7350. u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
  7351. int refclk = 100000;
  7352. /* In case of DSI, DPLL will not be used */
  7353. if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
  7354. return;
  7355. mutex_lock(&dev_priv->sb_lock);
  7356. cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
  7357. pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
  7358. pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
  7359. pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
  7360. pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
  7361. mutex_unlock(&dev_priv->sb_lock);
  7362. clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
  7363. clock.m2 = (pll_dw0 & 0xff) << 22;
  7364. if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
  7365. clock.m2 |= pll_dw2 & 0x3fffff;
  7366. clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
  7367. clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
  7368. clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
  7369. pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
  7370. }
  7371. static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
  7372. struct intel_crtc_state *pipe_config)
  7373. {
  7374. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  7375. enum intel_display_power_domain power_domain;
  7376. uint32_t tmp;
  7377. bool ret;
  7378. power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
  7379. if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
  7380. return false;
  7381. pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
  7382. pipe_config->shared_dpll = NULL;
  7383. ret = false;
  7384. tmp = I915_READ(PIPECONF(crtc->pipe));
  7385. if (!(tmp & PIPECONF_ENABLE))
  7386. goto out;
  7387. if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
  7388. IS_CHERRYVIEW(dev_priv)) {
  7389. switch (tmp & PIPECONF_BPC_MASK) {
  7390. case PIPECONF_6BPC:
  7391. pipe_config->pipe_bpp = 18;
  7392. break;
  7393. case PIPECONF_8BPC:
  7394. pipe_config->pipe_bpp = 24;
  7395. break;
  7396. case PIPECONF_10BPC:
  7397. pipe_config->pipe_bpp = 30;
  7398. break;
  7399. default:
  7400. break;
  7401. }
  7402. }
  7403. if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
  7404. (tmp & PIPECONF_COLOR_RANGE_SELECT))
  7405. pipe_config->limited_color_range = true;
  7406. if (INTEL_GEN(dev_priv) < 4)
  7407. pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
  7408. intel_get_pipe_timings(crtc, pipe_config);
  7409. intel_get_pipe_src_size(crtc, pipe_config);
  7410. i9xx_get_pfit_config(crtc, pipe_config);
  7411. if (INTEL_GEN(dev_priv) >= 4) {
  7412. /* No way to read it out on pipes B and C */
  7413. if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
  7414. tmp = dev_priv->chv_dpll_md[crtc->pipe];
  7415. else
  7416. tmp = I915_READ(DPLL_MD(crtc->pipe));
  7417. pipe_config->pixel_multiplier =
  7418. ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
  7419. >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
  7420. pipe_config->dpll_hw_state.dpll_md = tmp;
  7421. } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
  7422. IS_G33(dev_priv)) {
  7423. tmp = I915_READ(DPLL(crtc->pipe));
  7424. pipe_config->pixel_multiplier =
  7425. ((tmp & SDVO_MULTIPLIER_MASK)
  7426. >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
  7427. } else {
  7428. /* Note that on i915G/GM the pixel multiplier is in the sdvo
  7429. * port and will be fixed up in the encoder->get_config
  7430. * function. */
  7431. pipe_config->pixel_multiplier = 1;
  7432. }
  7433. pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
  7434. if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
  7435. /*
  7436. * DPLL_DVO_2X_MODE must be enabled for both DPLLs
  7437. * on 830. Filter it out here so that we don't
  7438. * report errors due to that.
  7439. */
  7440. if (IS_I830(dev_priv))
  7441. pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
  7442. pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
  7443. pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
  7444. } else {
  7445. /* Mask out read-only status bits. */
  7446. pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
  7447. DPLL_PORTC_READY_MASK |
  7448. DPLL_PORTB_READY_MASK);
  7449. }
  7450. if (IS_CHERRYVIEW(dev_priv))
  7451. chv_crtc_clock_get(crtc, pipe_config);
  7452. else if (IS_VALLEYVIEW(dev_priv))
  7453. vlv_crtc_clock_get(crtc, pipe_config);
  7454. else
  7455. i9xx_crtc_clock_get(crtc, pipe_config);
  7456. /*
  7457. * Normally the dotclock is filled in by the encoder .get_config()
  7458. * but in case the pipe is enabled w/o any ports we need a sane
  7459. * default.
  7460. */
  7461. pipe_config->base.adjusted_mode.crtc_clock =
  7462. pipe_config->port_clock / pipe_config->pixel_multiplier;
  7463. ret = true;
  7464. out:
  7465. intel_display_power_put(dev_priv, power_domain);
  7466. return ret;
  7467. }
  7468. static void ironlake_init_pch_refclk(struct drm_device *dev)
  7469. {
  7470. struct drm_i915_private *dev_priv = to_i915(dev);
  7471. struct intel_encoder *encoder;
  7472. int i;
  7473. u32 val, final;
  7474. bool has_lvds = false;
  7475. bool has_cpu_edp = false;
  7476. bool has_panel = false;
  7477. bool has_ck505 = false;
  7478. bool can_ssc = false;
  7479. bool using_ssc_source = false;
  7480. /* We need to take the global config into account */
  7481. for_each_intel_encoder(dev, encoder) {
  7482. switch (encoder->type) {
  7483. case INTEL_OUTPUT_LVDS:
  7484. has_panel = true;
  7485. has_lvds = true;
  7486. break;
  7487. case INTEL_OUTPUT_EDP:
  7488. has_panel = true;
  7489. if (enc_to_dig_port(&encoder->base)->port == PORT_A)
  7490. has_cpu_edp = true;
  7491. break;
  7492. default:
  7493. break;
  7494. }
  7495. }
  7496. if (HAS_PCH_IBX(dev_priv)) {
  7497. has_ck505 = dev_priv->vbt.display_clock_mode;
  7498. can_ssc = has_ck505;
  7499. } else {
  7500. has_ck505 = false;
  7501. can_ssc = true;
  7502. }
  7503. /* Check if any DPLLs are using the SSC source */
  7504. for (i = 0; i < dev_priv->num_shared_dpll; i++) {
  7505. u32 temp = I915_READ(PCH_DPLL(i));
  7506. if (!(temp & DPLL_VCO_ENABLE))
  7507. continue;
  7508. if ((temp & PLL_REF_INPUT_MASK) ==
  7509. PLLB_REF_INPUT_SPREADSPECTRUMIN) {
  7510. using_ssc_source = true;
  7511. break;
  7512. }
  7513. }
  7514. DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
  7515. has_panel, has_lvds, has_ck505, using_ssc_source);
  7516. /* Ironlake: try to setup display ref clock before DPLL
  7517. * enabling. This is only under driver's control after
  7518. * PCH B stepping, previous chipset stepping should be
  7519. * ignoring this setting.
  7520. */
  7521. val = I915_READ(PCH_DREF_CONTROL);
  7522. /* As we must carefully and slowly disable/enable each source in turn,
  7523. * compute the final state we want first and check if we need to
  7524. * make any changes at all.
  7525. */
  7526. final = val;
  7527. final &= ~DREF_NONSPREAD_SOURCE_MASK;
  7528. if (has_ck505)
  7529. final |= DREF_NONSPREAD_CK505_ENABLE;
  7530. else
  7531. final |= DREF_NONSPREAD_SOURCE_ENABLE;
  7532. final &= ~DREF_SSC_SOURCE_MASK;
  7533. final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
  7534. final &= ~DREF_SSC1_ENABLE;
  7535. if (has_panel) {
  7536. final |= DREF_SSC_SOURCE_ENABLE;
  7537. if (intel_panel_use_ssc(dev_priv) && can_ssc)
  7538. final |= DREF_SSC1_ENABLE;
  7539. if (has_cpu_edp) {
  7540. if (intel_panel_use_ssc(dev_priv) && can_ssc)
  7541. final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
  7542. else
  7543. final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
  7544. } else
  7545. final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
  7546. } else if (using_ssc_source) {
  7547. final |= DREF_SSC_SOURCE_ENABLE;
  7548. final |= DREF_SSC1_ENABLE;
  7549. }
  7550. if (final == val)
  7551. return;
  7552. /* Always enable nonspread source */
  7553. val &= ~DREF_NONSPREAD_SOURCE_MASK;
  7554. if (has_ck505)
  7555. val |= DREF_NONSPREAD_CK505_ENABLE;
  7556. else
  7557. val |= DREF_NONSPREAD_SOURCE_ENABLE;
  7558. if (has_panel) {
  7559. val &= ~DREF_SSC_SOURCE_MASK;
  7560. val |= DREF_SSC_SOURCE_ENABLE;
  7561. /* SSC must be turned on before enabling the CPU output */
  7562. if (intel_panel_use_ssc(dev_priv) && can_ssc) {
  7563. DRM_DEBUG_KMS("Using SSC on panel\n");
  7564. val |= DREF_SSC1_ENABLE;
  7565. } else
  7566. val &= ~DREF_SSC1_ENABLE;
  7567. /* Get SSC going before enabling the outputs */
  7568. I915_WRITE(PCH_DREF_CONTROL, val);
  7569. POSTING_READ(PCH_DREF_CONTROL);
  7570. udelay(200);
  7571. val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
  7572. /* Enable CPU source on CPU attached eDP */
  7573. if (has_cpu_edp) {
  7574. if (intel_panel_use_ssc(dev_priv) && can_ssc) {
  7575. DRM_DEBUG_KMS("Using SSC on eDP\n");
  7576. val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
  7577. } else
  7578. val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
  7579. } else
  7580. val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
  7581. I915_WRITE(PCH_DREF_CONTROL, val);
  7582. POSTING_READ(PCH_DREF_CONTROL);
  7583. udelay(200);
  7584. } else {
  7585. DRM_DEBUG_KMS("Disabling CPU source output\n");
  7586. val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
  7587. /* Turn off CPU output */
  7588. val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
  7589. I915_WRITE(PCH_DREF_CONTROL, val);
  7590. POSTING_READ(PCH_DREF_CONTROL);
  7591. udelay(200);
  7592. if (!using_ssc_source) {
  7593. DRM_DEBUG_KMS("Disabling SSC source\n");
  7594. /* Turn off the SSC source */
  7595. val &= ~DREF_SSC_SOURCE_MASK;
  7596. val |= DREF_SSC_SOURCE_DISABLE;
  7597. /* Turn off SSC1 */
  7598. val &= ~DREF_SSC1_ENABLE;
  7599. I915_WRITE(PCH_DREF_CONTROL, val);
  7600. POSTING_READ(PCH_DREF_CONTROL);
  7601. udelay(200);
  7602. }
  7603. }
  7604. BUG_ON(val != final);
  7605. }
  7606. static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
  7607. {
  7608. uint32_t tmp;
  7609. tmp = I915_READ(SOUTH_CHICKEN2);
  7610. tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
  7611. I915_WRITE(SOUTH_CHICKEN2, tmp);
  7612. if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
  7613. FDI_MPHY_IOSFSB_RESET_STATUS, 100))
  7614. DRM_ERROR("FDI mPHY reset assert timeout\n");
  7615. tmp = I915_READ(SOUTH_CHICKEN2);
  7616. tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
  7617. I915_WRITE(SOUTH_CHICKEN2, tmp);
  7618. if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
  7619. FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
  7620. DRM_ERROR("FDI mPHY reset de-assert timeout\n");
  7621. }
  7622. /* WaMPhyProgramming:hsw */
  7623. static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
  7624. {
  7625. uint32_t tmp;
  7626. tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
  7627. tmp &= ~(0xFF << 24);
  7628. tmp |= (0x12 << 24);
  7629. intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
  7630. tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
  7631. tmp |= (1 << 11);
  7632. intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
  7633. tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
  7634. tmp |= (1 << 11);
  7635. intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
  7636. tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
  7637. tmp |= (1 << 24) | (1 << 21) | (1 << 18);
  7638. intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
  7639. tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
  7640. tmp |= (1 << 24) | (1 << 21) | (1 << 18);
  7641. intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
  7642. tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
  7643. tmp &= ~(7 << 13);
  7644. tmp |= (5 << 13);
  7645. intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
  7646. tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
  7647. tmp &= ~(7 << 13);
  7648. tmp |= (5 << 13);
  7649. intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
  7650. tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
  7651. tmp &= ~0xFF;
  7652. tmp |= 0x1C;
  7653. intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
  7654. tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
  7655. tmp &= ~0xFF;
  7656. tmp |= 0x1C;
  7657. intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
  7658. tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
  7659. tmp &= ~(0xFF << 16);
  7660. tmp |= (0x1C << 16);
  7661. intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
  7662. tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
  7663. tmp &= ~(0xFF << 16);
  7664. tmp |= (0x1C << 16);
  7665. intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
  7666. tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
  7667. tmp |= (1 << 27);
  7668. intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
  7669. tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
  7670. tmp |= (1 << 27);
  7671. intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
  7672. tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
  7673. tmp &= ~(0xF << 28);
  7674. tmp |= (4 << 28);
  7675. intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
  7676. tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
  7677. tmp &= ~(0xF << 28);
  7678. tmp |= (4 << 28);
  7679. intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
  7680. }
  7681. /* Implements 3 different sequences from BSpec chapter "Display iCLK
  7682. * Programming" based on the parameters passed:
  7683. * - Sequence to enable CLKOUT_DP
  7684. * - Sequence to enable CLKOUT_DP without spread
  7685. * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
  7686. */
  7687. static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
  7688. bool with_fdi)
  7689. {
  7690. struct drm_i915_private *dev_priv = to_i915(dev);
  7691. uint32_t reg, tmp;
  7692. if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
  7693. with_spread = true;
  7694. if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
  7695. with_fdi, "LP PCH doesn't have FDI\n"))
  7696. with_fdi = false;
  7697. mutex_lock(&dev_priv->sb_lock);
  7698. tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
  7699. tmp &= ~SBI_SSCCTL_DISABLE;
  7700. tmp |= SBI_SSCCTL_PATHALT;
  7701. intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
  7702. udelay(24);
  7703. if (with_spread) {
  7704. tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
  7705. tmp &= ~SBI_SSCCTL_PATHALT;
  7706. intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
  7707. if (with_fdi) {
  7708. lpt_reset_fdi_mphy(dev_priv);
  7709. lpt_program_fdi_mphy(dev_priv);
  7710. }
  7711. }
  7712. reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
  7713. tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
  7714. tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
  7715. intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
  7716. mutex_unlock(&dev_priv->sb_lock);
  7717. }
  7718. /* Sequence to disable CLKOUT_DP */
  7719. static void lpt_disable_clkout_dp(struct drm_device *dev)
  7720. {
  7721. struct drm_i915_private *dev_priv = to_i915(dev);
  7722. uint32_t reg, tmp;
  7723. mutex_lock(&dev_priv->sb_lock);
  7724. reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
  7725. tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
  7726. tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
  7727. intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
  7728. tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
  7729. if (!(tmp & SBI_SSCCTL_DISABLE)) {
  7730. if (!(tmp & SBI_SSCCTL_PATHALT)) {
  7731. tmp |= SBI_SSCCTL_PATHALT;
  7732. intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
  7733. udelay(32);
  7734. }
  7735. tmp |= SBI_SSCCTL_DISABLE;
  7736. intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
  7737. }
  7738. mutex_unlock(&dev_priv->sb_lock);
  7739. }
  7740. #define BEND_IDX(steps) ((50 + (steps)) / 5)
  7741. static const uint16_t sscdivintphase[] = {
  7742. [BEND_IDX( 50)] = 0x3B23,
  7743. [BEND_IDX( 45)] = 0x3B23,
  7744. [BEND_IDX( 40)] = 0x3C23,
  7745. [BEND_IDX( 35)] = 0x3C23,
  7746. [BEND_IDX( 30)] = 0x3D23,
  7747. [BEND_IDX( 25)] = 0x3D23,
  7748. [BEND_IDX( 20)] = 0x3E23,
  7749. [BEND_IDX( 15)] = 0x3E23,
  7750. [BEND_IDX( 10)] = 0x3F23,
  7751. [BEND_IDX( 5)] = 0x3F23,
  7752. [BEND_IDX( 0)] = 0x0025,
  7753. [BEND_IDX( -5)] = 0x0025,
  7754. [BEND_IDX(-10)] = 0x0125,
  7755. [BEND_IDX(-15)] = 0x0125,
  7756. [BEND_IDX(-20)] = 0x0225,
  7757. [BEND_IDX(-25)] = 0x0225,
  7758. [BEND_IDX(-30)] = 0x0325,
  7759. [BEND_IDX(-35)] = 0x0325,
  7760. [BEND_IDX(-40)] = 0x0425,
  7761. [BEND_IDX(-45)] = 0x0425,
  7762. [BEND_IDX(-50)] = 0x0525,
  7763. };
  7764. /*
  7765. * Bend CLKOUT_DP
  7766. * steps -50 to 50 inclusive, in steps of 5
  7767. * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
  7768. * change in clock period = -(steps / 10) * 5.787 ps
  7769. */
  7770. static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
  7771. {
  7772. uint32_t tmp;
  7773. int idx = BEND_IDX(steps);
  7774. if (WARN_ON(steps % 5 != 0))
  7775. return;
  7776. if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
  7777. return;
  7778. mutex_lock(&dev_priv->sb_lock);
  7779. if (steps % 10 != 0)
  7780. tmp = 0xAAAAAAAB;
  7781. else
  7782. tmp = 0x00000000;
  7783. intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
  7784. tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
  7785. tmp &= 0xffff0000;
  7786. tmp |= sscdivintphase[idx];
  7787. intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
  7788. mutex_unlock(&dev_priv->sb_lock);
  7789. }
  7790. #undef BEND_IDX
  7791. static void lpt_init_pch_refclk(struct drm_device *dev)
  7792. {
  7793. struct intel_encoder *encoder;
  7794. bool has_vga = false;
  7795. for_each_intel_encoder(dev, encoder) {
  7796. switch (encoder->type) {
  7797. case INTEL_OUTPUT_ANALOG:
  7798. has_vga = true;
  7799. break;
  7800. default:
  7801. break;
  7802. }
  7803. }
  7804. if (has_vga) {
  7805. lpt_bend_clkout_dp(to_i915(dev), 0);
  7806. lpt_enable_clkout_dp(dev, true, true);
  7807. } else {
  7808. lpt_disable_clkout_dp(dev);
  7809. }
  7810. }
  7811. /*
  7812. * Initialize reference clocks when the driver loads
  7813. */
  7814. void intel_init_pch_refclk(struct drm_device *dev)
  7815. {
  7816. struct drm_i915_private *dev_priv = to_i915(dev);
  7817. if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
  7818. ironlake_init_pch_refclk(dev);
  7819. else if (HAS_PCH_LPT(dev_priv))
  7820. lpt_init_pch_refclk(dev);
  7821. }
  7822. static void ironlake_set_pipeconf(struct drm_crtc *crtc)
  7823. {
  7824. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  7825. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  7826. int pipe = intel_crtc->pipe;
  7827. uint32_t val;
  7828. val = 0;
  7829. switch (intel_crtc->config->pipe_bpp) {
  7830. case 18:
  7831. val |= PIPECONF_6BPC;
  7832. break;
  7833. case 24:
  7834. val |= PIPECONF_8BPC;
  7835. break;
  7836. case 30:
  7837. val |= PIPECONF_10BPC;
  7838. break;
  7839. case 36:
  7840. val |= PIPECONF_12BPC;
  7841. break;
  7842. default:
  7843. /* Case prevented by intel_choose_pipe_bpp_dither. */
  7844. BUG();
  7845. }
  7846. if (intel_crtc->config->dither)
  7847. val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
  7848. if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
  7849. val |= PIPECONF_INTERLACED_ILK;
  7850. else
  7851. val |= PIPECONF_PROGRESSIVE;
  7852. if (intel_crtc->config->limited_color_range)
  7853. val |= PIPECONF_COLOR_RANGE_SELECT;
  7854. I915_WRITE(PIPECONF(pipe), val);
  7855. POSTING_READ(PIPECONF(pipe));
  7856. }
  7857. static void haswell_set_pipeconf(struct drm_crtc *crtc)
  7858. {
  7859. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  7860. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  7861. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  7862. u32 val = 0;
  7863. if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
  7864. val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
  7865. if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
  7866. val |= PIPECONF_INTERLACED_ILK;
  7867. else
  7868. val |= PIPECONF_PROGRESSIVE;
  7869. I915_WRITE(PIPECONF(cpu_transcoder), val);
  7870. POSTING_READ(PIPECONF(cpu_transcoder));
  7871. }
  7872. static void haswell_set_pipemisc(struct drm_crtc *crtc)
  7873. {
  7874. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  7875. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  7876. if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
  7877. u32 val = 0;
  7878. switch (intel_crtc->config->pipe_bpp) {
  7879. case 18:
  7880. val |= PIPEMISC_DITHER_6_BPC;
  7881. break;
  7882. case 24:
  7883. val |= PIPEMISC_DITHER_8_BPC;
  7884. break;
  7885. case 30:
  7886. val |= PIPEMISC_DITHER_10_BPC;
  7887. break;
  7888. case 36:
  7889. val |= PIPEMISC_DITHER_12_BPC;
  7890. break;
  7891. default:
  7892. /* Case prevented by pipe_config_set_bpp. */
  7893. BUG();
  7894. }
  7895. if (intel_crtc->config->dither)
  7896. val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
  7897. I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
  7898. }
  7899. }
  7900. int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
  7901. {
  7902. /*
  7903. * Account for spread spectrum to avoid
  7904. * oversubscribing the link. Max center spread
  7905. * is 2.5%; use 5% for safety's sake.
  7906. */
  7907. u32 bps = target_clock * bpp * 21 / 20;
  7908. return DIV_ROUND_UP(bps, link_bw * 8);
  7909. }
  7910. static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
  7911. {
  7912. return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
  7913. }
  7914. static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
  7915. struct intel_crtc_state *crtc_state,
  7916. struct dpll *reduced_clock)
  7917. {
  7918. struct drm_crtc *crtc = &intel_crtc->base;
  7919. struct drm_device *dev = crtc->dev;
  7920. struct drm_i915_private *dev_priv = to_i915(dev);
  7921. u32 dpll, fp, fp2;
  7922. int factor;
  7923. /* Enable autotuning of the PLL clock (if permissible) */
  7924. factor = 21;
  7925. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  7926. if ((intel_panel_use_ssc(dev_priv) &&
  7927. dev_priv->vbt.lvds_ssc_freq == 100000) ||
  7928. (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
  7929. factor = 25;
  7930. } else if (crtc_state->sdvo_tv_clock)
  7931. factor = 20;
  7932. fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
  7933. if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
  7934. fp |= FP_CB_TUNE;
  7935. if (reduced_clock) {
  7936. fp2 = i9xx_dpll_compute_fp(reduced_clock);
  7937. if (reduced_clock->m < factor * reduced_clock->n)
  7938. fp2 |= FP_CB_TUNE;
  7939. } else {
  7940. fp2 = fp;
  7941. }
  7942. dpll = 0;
  7943. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
  7944. dpll |= DPLLB_MODE_LVDS;
  7945. else
  7946. dpll |= DPLLB_MODE_DAC_SERIAL;
  7947. dpll |= (crtc_state->pixel_multiplier - 1)
  7948. << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
  7949. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
  7950. intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
  7951. dpll |= DPLL_SDVO_HIGH_SPEED;
  7952. if (intel_crtc_has_dp_encoder(crtc_state))
  7953. dpll |= DPLL_SDVO_HIGH_SPEED;
  7954. /*
  7955. * The high speed IO clock is only really required for
  7956. * SDVO/HDMI/DP, but we also enable it for CRT to make it
  7957. * possible to share the DPLL between CRT and HDMI. Enabling
  7958. * the clock needlessly does no real harm, except use up a
  7959. * bit of power potentially.
  7960. *
  7961. * We'll limit this to IVB with 3 pipes, since it has only two
  7962. * DPLLs and so DPLL sharing is the only way to get three pipes
  7963. * driving PCH ports at the same time. On SNB we could do this,
  7964. * and potentially avoid enabling the second DPLL, but it's not
  7965. * clear if it''s a win or loss power wise. No point in doing
  7966. * this on ILK at all since it has a fixed DPLL<->pipe mapping.
  7967. */
  7968. if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
  7969. intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
  7970. dpll |= DPLL_SDVO_HIGH_SPEED;
  7971. /* compute bitmask from p1 value */
  7972. dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
  7973. /* also FPA1 */
  7974. dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
  7975. switch (crtc_state->dpll.p2) {
  7976. case 5:
  7977. dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
  7978. break;
  7979. case 7:
  7980. dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
  7981. break;
  7982. case 10:
  7983. dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
  7984. break;
  7985. case 14:
  7986. dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
  7987. break;
  7988. }
  7989. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
  7990. intel_panel_use_ssc(dev_priv))
  7991. dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
  7992. else
  7993. dpll |= PLL_REF_INPUT_DREFCLK;
  7994. dpll |= DPLL_VCO_ENABLE;
  7995. crtc_state->dpll_hw_state.dpll = dpll;
  7996. crtc_state->dpll_hw_state.fp0 = fp;
  7997. crtc_state->dpll_hw_state.fp1 = fp2;
  7998. }
  7999. static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
  8000. struct intel_crtc_state *crtc_state)
  8001. {
  8002. struct drm_device *dev = crtc->base.dev;
  8003. struct drm_i915_private *dev_priv = to_i915(dev);
  8004. struct dpll reduced_clock;
  8005. bool has_reduced_clock = false;
  8006. struct intel_shared_dpll *pll;
  8007. const struct intel_limit *limit;
  8008. int refclk = 120000;
  8009. memset(&crtc_state->dpll_hw_state, 0,
  8010. sizeof(crtc_state->dpll_hw_state));
  8011. crtc->lowfreq_avail = false;
  8012. /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
  8013. if (!crtc_state->has_pch_encoder)
  8014. return 0;
  8015. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  8016. if (intel_panel_use_ssc(dev_priv)) {
  8017. DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
  8018. dev_priv->vbt.lvds_ssc_freq);
  8019. refclk = dev_priv->vbt.lvds_ssc_freq;
  8020. }
  8021. if (intel_is_dual_link_lvds(dev)) {
  8022. if (refclk == 100000)
  8023. limit = &intel_limits_ironlake_dual_lvds_100m;
  8024. else
  8025. limit = &intel_limits_ironlake_dual_lvds;
  8026. } else {
  8027. if (refclk == 100000)
  8028. limit = &intel_limits_ironlake_single_lvds_100m;
  8029. else
  8030. limit = &intel_limits_ironlake_single_lvds;
  8031. }
  8032. } else {
  8033. limit = &intel_limits_ironlake_dac;
  8034. }
  8035. if (!crtc_state->clock_set &&
  8036. !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  8037. refclk, NULL, &crtc_state->dpll)) {
  8038. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  8039. return -EINVAL;
  8040. }
  8041. ironlake_compute_dpll(crtc, crtc_state,
  8042. has_reduced_clock ? &reduced_clock : NULL);
  8043. pll = intel_get_shared_dpll(crtc, crtc_state, NULL);
  8044. if (pll == NULL) {
  8045. DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
  8046. pipe_name(crtc->pipe));
  8047. return -EINVAL;
  8048. }
  8049. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
  8050. has_reduced_clock)
  8051. crtc->lowfreq_avail = true;
  8052. return 0;
  8053. }
  8054. static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
  8055. struct intel_link_m_n *m_n)
  8056. {
  8057. struct drm_device *dev = crtc->base.dev;
  8058. struct drm_i915_private *dev_priv = to_i915(dev);
  8059. enum pipe pipe = crtc->pipe;
  8060. m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
  8061. m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
  8062. m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
  8063. & ~TU_SIZE_MASK;
  8064. m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
  8065. m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
  8066. & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
  8067. }
  8068. static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
  8069. enum transcoder transcoder,
  8070. struct intel_link_m_n *m_n,
  8071. struct intel_link_m_n *m2_n2)
  8072. {
  8073. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  8074. enum pipe pipe = crtc->pipe;
  8075. if (INTEL_GEN(dev_priv) >= 5) {
  8076. m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
  8077. m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
  8078. m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
  8079. & ~TU_SIZE_MASK;
  8080. m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
  8081. m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
  8082. & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
  8083. /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
  8084. * gen < 8) and if DRRS is supported (to make sure the
  8085. * registers are not unnecessarily read).
  8086. */
  8087. if (m2_n2 && INTEL_GEN(dev_priv) < 8 &&
  8088. crtc->config->has_drrs) {
  8089. m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
  8090. m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
  8091. m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
  8092. & ~TU_SIZE_MASK;
  8093. m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
  8094. m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
  8095. & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
  8096. }
  8097. } else {
  8098. m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
  8099. m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
  8100. m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
  8101. & ~TU_SIZE_MASK;
  8102. m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
  8103. m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
  8104. & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
  8105. }
  8106. }
  8107. void intel_dp_get_m_n(struct intel_crtc *crtc,
  8108. struct intel_crtc_state *pipe_config)
  8109. {
  8110. if (pipe_config->has_pch_encoder)
  8111. intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
  8112. else
  8113. intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
  8114. &pipe_config->dp_m_n,
  8115. &pipe_config->dp_m2_n2);
  8116. }
  8117. static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
  8118. struct intel_crtc_state *pipe_config)
  8119. {
  8120. intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
  8121. &pipe_config->fdi_m_n, NULL);
  8122. }
  8123. static void skylake_get_pfit_config(struct intel_crtc *crtc,
  8124. struct intel_crtc_state *pipe_config)
  8125. {
  8126. struct drm_device *dev = crtc->base.dev;
  8127. struct drm_i915_private *dev_priv = to_i915(dev);
  8128. struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
  8129. uint32_t ps_ctrl = 0;
  8130. int id = -1;
  8131. int i;
  8132. /* find scaler attached to this pipe */
  8133. for (i = 0; i < crtc->num_scalers; i++) {
  8134. ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
  8135. if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
  8136. id = i;
  8137. pipe_config->pch_pfit.enabled = true;
  8138. pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
  8139. pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
  8140. break;
  8141. }
  8142. }
  8143. scaler_state->scaler_id = id;
  8144. if (id >= 0) {
  8145. scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
  8146. } else {
  8147. scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
  8148. }
  8149. }
  8150. static void
  8151. skylake_get_initial_plane_config(struct intel_crtc *crtc,
  8152. struct intel_initial_plane_config *plane_config)
  8153. {
  8154. struct drm_device *dev = crtc->base.dev;
  8155. struct drm_i915_private *dev_priv = to_i915(dev);
  8156. u32 val, base, offset, stride_mult, tiling;
  8157. int pipe = crtc->pipe;
  8158. int fourcc, pixel_format;
  8159. unsigned int aligned_height;
  8160. struct drm_framebuffer *fb;
  8161. struct intel_framebuffer *intel_fb;
  8162. intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
  8163. if (!intel_fb) {
  8164. DRM_DEBUG_KMS("failed to alloc fb\n");
  8165. return;
  8166. }
  8167. fb = &intel_fb->base;
  8168. val = I915_READ(PLANE_CTL(pipe, 0));
  8169. if (!(val & PLANE_CTL_ENABLE))
  8170. goto error;
  8171. pixel_format = val & PLANE_CTL_FORMAT_MASK;
  8172. fourcc = skl_format_to_fourcc(pixel_format,
  8173. val & PLANE_CTL_ORDER_RGBX,
  8174. val & PLANE_CTL_ALPHA_MASK);
  8175. fb->pixel_format = fourcc;
  8176. fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
  8177. tiling = val & PLANE_CTL_TILED_MASK;
  8178. switch (tiling) {
  8179. case PLANE_CTL_TILED_LINEAR:
  8180. fb->modifier = DRM_FORMAT_MOD_NONE;
  8181. break;
  8182. case PLANE_CTL_TILED_X:
  8183. plane_config->tiling = I915_TILING_X;
  8184. fb->modifier = I915_FORMAT_MOD_X_TILED;
  8185. break;
  8186. case PLANE_CTL_TILED_Y:
  8187. fb->modifier = I915_FORMAT_MOD_Y_TILED;
  8188. break;
  8189. case PLANE_CTL_TILED_YF:
  8190. fb->modifier = I915_FORMAT_MOD_Yf_TILED;
  8191. break;
  8192. default:
  8193. MISSING_CASE(tiling);
  8194. goto error;
  8195. }
  8196. base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
  8197. plane_config->base = base;
  8198. offset = I915_READ(PLANE_OFFSET(pipe, 0));
  8199. val = I915_READ(PLANE_SIZE(pipe, 0));
  8200. fb->height = ((val >> 16) & 0xfff) + 1;
  8201. fb->width = ((val >> 0) & 0x1fff) + 1;
  8202. val = I915_READ(PLANE_STRIDE(pipe, 0));
  8203. stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier,
  8204. fb->pixel_format);
  8205. fb->pitches[0] = (val & 0x3ff) * stride_mult;
  8206. aligned_height = intel_fb_align_height(dev, fb->height,
  8207. fb->pixel_format,
  8208. fb->modifier);
  8209. plane_config->size = fb->pitches[0] * aligned_height;
  8210. DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
  8211. pipe_name(pipe), fb->width, fb->height,
  8212. fb->bits_per_pixel, base, fb->pitches[0],
  8213. plane_config->size);
  8214. plane_config->fb = intel_fb;
  8215. return;
  8216. error:
  8217. kfree(intel_fb);
  8218. }
  8219. static void ironlake_get_pfit_config(struct intel_crtc *crtc,
  8220. struct intel_crtc_state *pipe_config)
  8221. {
  8222. struct drm_device *dev = crtc->base.dev;
  8223. struct drm_i915_private *dev_priv = to_i915(dev);
  8224. uint32_t tmp;
  8225. tmp = I915_READ(PF_CTL(crtc->pipe));
  8226. if (tmp & PF_ENABLE) {
  8227. pipe_config->pch_pfit.enabled = true;
  8228. pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
  8229. pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
  8230. /* We currently do not free assignements of panel fitters on
  8231. * ivb/hsw (since we don't use the higher upscaling modes which
  8232. * differentiates them) so just WARN about this case for now. */
  8233. if (IS_GEN7(dev_priv)) {
  8234. WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
  8235. PF_PIPE_SEL_IVB(crtc->pipe));
  8236. }
  8237. }
  8238. }
  8239. static void
  8240. ironlake_get_initial_plane_config(struct intel_crtc *crtc,
  8241. struct intel_initial_plane_config *plane_config)
  8242. {
  8243. struct drm_device *dev = crtc->base.dev;
  8244. struct drm_i915_private *dev_priv = to_i915(dev);
  8245. u32 val, base, offset;
  8246. int pipe = crtc->pipe;
  8247. int fourcc, pixel_format;
  8248. unsigned int aligned_height;
  8249. struct drm_framebuffer *fb;
  8250. struct intel_framebuffer *intel_fb;
  8251. val = I915_READ(DSPCNTR(pipe));
  8252. if (!(val & DISPLAY_PLANE_ENABLE))
  8253. return;
  8254. intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
  8255. if (!intel_fb) {
  8256. DRM_DEBUG_KMS("failed to alloc fb\n");
  8257. return;
  8258. }
  8259. fb = &intel_fb->base;
  8260. if (INTEL_GEN(dev_priv) >= 4) {
  8261. if (val & DISPPLANE_TILED) {
  8262. plane_config->tiling = I915_TILING_X;
  8263. fb->modifier = I915_FORMAT_MOD_X_TILED;
  8264. }
  8265. }
  8266. pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
  8267. fourcc = i9xx_format_to_fourcc(pixel_format);
  8268. fb->pixel_format = fourcc;
  8269. fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
  8270. base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
  8271. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  8272. offset = I915_READ(DSPOFFSET(pipe));
  8273. } else {
  8274. if (plane_config->tiling)
  8275. offset = I915_READ(DSPTILEOFF(pipe));
  8276. else
  8277. offset = I915_READ(DSPLINOFF(pipe));
  8278. }
  8279. plane_config->base = base;
  8280. val = I915_READ(PIPESRC(pipe));
  8281. fb->width = ((val >> 16) & 0xfff) + 1;
  8282. fb->height = ((val >> 0) & 0xfff) + 1;
  8283. val = I915_READ(DSPSTRIDE(pipe));
  8284. fb->pitches[0] = val & 0xffffffc0;
  8285. aligned_height = intel_fb_align_height(dev, fb->height,
  8286. fb->pixel_format,
  8287. fb->modifier);
  8288. plane_config->size = fb->pitches[0] * aligned_height;
  8289. DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
  8290. pipe_name(pipe), fb->width, fb->height,
  8291. fb->bits_per_pixel, base, fb->pitches[0],
  8292. plane_config->size);
  8293. plane_config->fb = intel_fb;
  8294. }
  8295. static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
  8296. struct intel_crtc_state *pipe_config)
  8297. {
  8298. struct drm_device *dev = crtc->base.dev;
  8299. struct drm_i915_private *dev_priv = to_i915(dev);
  8300. enum intel_display_power_domain power_domain;
  8301. uint32_t tmp;
  8302. bool ret;
  8303. power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
  8304. if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
  8305. return false;
  8306. pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
  8307. pipe_config->shared_dpll = NULL;
  8308. ret = false;
  8309. tmp = I915_READ(PIPECONF(crtc->pipe));
  8310. if (!(tmp & PIPECONF_ENABLE))
  8311. goto out;
  8312. switch (tmp & PIPECONF_BPC_MASK) {
  8313. case PIPECONF_6BPC:
  8314. pipe_config->pipe_bpp = 18;
  8315. break;
  8316. case PIPECONF_8BPC:
  8317. pipe_config->pipe_bpp = 24;
  8318. break;
  8319. case PIPECONF_10BPC:
  8320. pipe_config->pipe_bpp = 30;
  8321. break;
  8322. case PIPECONF_12BPC:
  8323. pipe_config->pipe_bpp = 36;
  8324. break;
  8325. default:
  8326. break;
  8327. }
  8328. if (tmp & PIPECONF_COLOR_RANGE_SELECT)
  8329. pipe_config->limited_color_range = true;
  8330. if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
  8331. struct intel_shared_dpll *pll;
  8332. enum intel_dpll_id pll_id;
  8333. pipe_config->has_pch_encoder = true;
  8334. tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
  8335. pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
  8336. FDI_DP_PORT_WIDTH_SHIFT) + 1;
  8337. ironlake_get_fdi_m_n_config(crtc, pipe_config);
  8338. if (HAS_PCH_IBX(dev_priv)) {
  8339. /*
  8340. * The pipe->pch transcoder and pch transcoder->pll
  8341. * mapping is fixed.
  8342. */
  8343. pll_id = (enum intel_dpll_id) crtc->pipe;
  8344. } else {
  8345. tmp = I915_READ(PCH_DPLL_SEL);
  8346. if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
  8347. pll_id = DPLL_ID_PCH_PLL_B;
  8348. else
  8349. pll_id= DPLL_ID_PCH_PLL_A;
  8350. }
  8351. pipe_config->shared_dpll =
  8352. intel_get_shared_dpll_by_id(dev_priv, pll_id);
  8353. pll = pipe_config->shared_dpll;
  8354. WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
  8355. &pipe_config->dpll_hw_state));
  8356. tmp = pipe_config->dpll_hw_state.dpll;
  8357. pipe_config->pixel_multiplier =
  8358. ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
  8359. >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
  8360. ironlake_pch_clock_get(crtc, pipe_config);
  8361. } else {
  8362. pipe_config->pixel_multiplier = 1;
  8363. }
  8364. intel_get_pipe_timings(crtc, pipe_config);
  8365. intel_get_pipe_src_size(crtc, pipe_config);
  8366. ironlake_get_pfit_config(crtc, pipe_config);
  8367. ret = true;
  8368. out:
  8369. intel_display_power_put(dev_priv, power_domain);
  8370. return ret;
  8371. }
  8372. static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
  8373. {
  8374. struct drm_device *dev = &dev_priv->drm;
  8375. struct intel_crtc *crtc;
  8376. for_each_intel_crtc(dev, crtc)
  8377. I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
  8378. pipe_name(crtc->pipe));
  8379. I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
  8380. I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
  8381. I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
  8382. I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
  8383. I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
  8384. I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
  8385. "CPU PWM1 enabled\n");
  8386. if (IS_HASWELL(dev_priv))
  8387. I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
  8388. "CPU PWM2 enabled\n");
  8389. I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
  8390. "PCH PWM1 enabled\n");
  8391. I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
  8392. "Utility pin enabled\n");
  8393. I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
  8394. /*
  8395. * In theory we can still leave IRQs enabled, as long as only the HPD
  8396. * interrupts remain enabled. We used to check for that, but since it's
  8397. * gen-specific and since we only disable LCPLL after we fully disable
  8398. * the interrupts, the check below should be enough.
  8399. */
  8400. I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
  8401. }
  8402. static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
  8403. {
  8404. if (IS_HASWELL(dev_priv))
  8405. return I915_READ(D_COMP_HSW);
  8406. else
  8407. return I915_READ(D_COMP_BDW);
  8408. }
  8409. static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
  8410. {
  8411. if (IS_HASWELL(dev_priv)) {
  8412. mutex_lock(&dev_priv->rps.hw_lock);
  8413. if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
  8414. val))
  8415. DRM_DEBUG_KMS("Failed to write to D_COMP\n");
  8416. mutex_unlock(&dev_priv->rps.hw_lock);
  8417. } else {
  8418. I915_WRITE(D_COMP_BDW, val);
  8419. POSTING_READ(D_COMP_BDW);
  8420. }
  8421. }
  8422. /*
  8423. * This function implements pieces of two sequences from BSpec:
  8424. * - Sequence for display software to disable LCPLL
  8425. * - Sequence for display software to allow package C8+
  8426. * The steps implemented here are just the steps that actually touch the LCPLL
  8427. * register. Callers should take care of disabling all the display engine
  8428. * functions, doing the mode unset, fixing interrupts, etc.
  8429. */
  8430. static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
  8431. bool switch_to_fclk, bool allow_power_down)
  8432. {
  8433. uint32_t val;
  8434. assert_can_disable_lcpll(dev_priv);
  8435. val = I915_READ(LCPLL_CTL);
  8436. if (switch_to_fclk) {
  8437. val |= LCPLL_CD_SOURCE_FCLK;
  8438. I915_WRITE(LCPLL_CTL, val);
  8439. if (wait_for_us(I915_READ(LCPLL_CTL) &
  8440. LCPLL_CD_SOURCE_FCLK_DONE, 1))
  8441. DRM_ERROR("Switching to FCLK failed\n");
  8442. val = I915_READ(LCPLL_CTL);
  8443. }
  8444. val |= LCPLL_PLL_DISABLE;
  8445. I915_WRITE(LCPLL_CTL, val);
  8446. POSTING_READ(LCPLL_CTL);
  8447. if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
  8448. DRM_ERROR("LCPLL still locked\n");
  8449. val = hsw_read_dcomp(dev_priv);
  8450. val |= D_COMP_COMP_DISABLE;
  8451. hsw_write_dcomp(dev_priv, val);
  8452. ndelay(100);
  8453. if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
  8454. 1))
  8455. DRM_ERROR("D_COMP RCOMP still in progress\n");
  8456. if (allow_power_down) {
  8457. val = I915_READ(LCPLL_CTL);
  8458. val |= LCPLL_POWER_DOWN_ALLOW;
  8459. I915_WRITE(LCPLL_CTL, val);
  8460. POSTING_READ(LCPLL_CTL);
  8461. }
  8462. }
  8463. /*
  8464. * Fully restores LCPLL, disallowing power down and switching back to LCPLL
  8465. * source.
  8466. */
  8467. static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
  8468. {
  8469. uint32_t val;
  8470. val = I915_READ(LCPLL_CTL);
  8471. if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
  8472. LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
  8473. return;
  8474. /*
  8475. * Make sure we're not on PC8 state before disabling PC8, otherwise
  8476. * we'll hang the machine. To prevent PC8 state, just enable force_wake.
  8477. */
  8478. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  8479. if (val & LCPLL_POWER_DOWN_ALLOW) {
  8480. val &= ~LCPLL_POWER_DOWN_ALLOW;
  8481. I915_WRITE(LCPLL_CTL, val);
  8482. POSTING_READ(LCPLL_CTL);
  8483. }
  8484. val = hsw_read_dcomp(dev_priv);
  8485. val |= D_COMP_COMP_FORCE;
  8486. val &= ~D_COMP_COMP_DISABLE;
  8487. hsw_write_dcomp(dev_priv, val);
  8488. val = I915_READ(LCPLL_CTL);
  8489. val &= ~LCPLL_PLL_DISABLE;
  8490. I915_WRITE(LCPLL_CTL, val);
  8491. if (intel_wait_for_register(dev_priv,
  8492. LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
  8493. 5))
  8494. DRM_ERROR("LCPLL not locked yet\n");
  8495. if (val & LCPLL_CD_SOURCE_FCLK) {
  8496. val = I915_READ(LCPLL_CTL);
  8497. val &= ~LCPLL_CD_SOURCE_FCLK;
  8498. I915_WRITE(LCPLL_CTL, val);
  8499. if (wait_for_us((I915_READ(LCPLL_CTL) &
  8500. LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
  8501. DRM_ERROR("Switching back to LCPLL failed\n");
  8502. }
  8503. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  8504. intel_update_cdclk(dev_priv);
  8505. }
  8506. /*
  8507. * Package states C8 and deeper are really deep PC states that can only be
  8508. * reached when all the devices on the system allow it, so even if the graphics
  8509. * device allows PC8+, it doesn't mean the system will actually get to these
  8510. * states. Our driver only allows PC8+ when going into runtime PM.
  8511. *
  8512. * The requirements for PC8+ are that all the outputs are disabled, the power
  8513. * well is disabled and most interrupts are disabled, and these are also
  8514. * requirements for runtime PM. When these conditions are met, we manually do
  8515. * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
  8516. * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
  8517. * hang the machine.
  8518. *
  8519. * When we really reach PC8 or deeper states (not just when we allow it) we lose
  8520. * the state of some registers, so when we come back from PC8+ we need to
  8521. * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
  8522. * need to take care of the registers kept by RC6. Notice that this happens even
  8523. * if we don't put the device in PCI D3 state (which is what currently happens
  8524. * because of the runtime PM support).
  8525. *
  8526. * For more, read "Display Sequences for Package C8" on the hardware
  8527. * documentation.
  8528. */
  8529. void hsw_enable_pc8(struct drm_i915_private *dev_priv)
  8530. {
  8531. struct drm_device *dev = &dev_priv->drm;
  8532. uint32_t val;
  8533. DRM_DEBUG_KMS("Enabling package C8+\n");
  8534. if (HAS_PCH_LPT_LP(dev_priv)) {
  8535. val = I915_READ(SOUTH_DSPCLK_GATE_D);
  8536. val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
  8537. I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
  8538. }
  8539. lpt_disable_clkout_dp(dev);
  8540. hsw_disable_lcpll(dev_priv, true, true);
  8541. }
  8542. void hsw_disable_pc8(struct drm_i915_private *dev_priv)
  8543. {
  8544. struct drm_device *dev = &dev_priv->drm;
  8545. uint32_t val;
  8546. DRM_DEBUG_KMS("Disabling package C8+\n");
  8547. hsw_restore_lcpll(dev_priv);
  8548. lpt_init_pch_refclk(dev);
  8549. if (HAS_PCH_LPT_LP(dev_priv)) {
  8550. val = I915_READ(SOUTH_DSPCLK_GATE_D);
  8551. val |= PCH_LP_PARTITION_LEVEL_DISABLE;
  8552. I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
  8553. }
  8554. }
  8555. static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
  8556. {
  8557. struct drm_device *dev = old_state->dev;
  8558. struct intel_atomic_state *old_intel_state =
  8559. to_intel_atomic_state(old_state);
  8560. unsigned int req_cdclk = old_intel_state->dev_cdclk;
  8561. bxt_set_cdclk(to_i915(dev), req_cdclk);
  8562. }
  8563. static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state,
  8564. int pixel_rate)
  8565. {
  8566. struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
  8567. /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
  8568. if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
  8569. pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
  8570. /* BSpec says "Do not use DisplayPort with CDCLK less than
  8571. * 432 MHz, audio enabled, port width x4, and link rate
  8572. * HBR2 (5.4 GHz), or else there may be audio corruption or
  8573. * screen corruption."
  8574. */
  8575. if (intel_crtc_has_dp_encoder(crtc_state) &&
  8576. crtc_state->has_audio &&
  8577. crtc_state->port_clock >= 540000 &&
  8578. crtc_state->lane_count == 4)
  8579. pixel_rate = max(432000, pixel_rate);
  8580. return pixel_rate;
  8581. }
  8582. /* compute the max rate for new configuration */
  8583. static int ilk_max_pixel_rate(struct drm_atomic_state *state)
  8584. {
  8585. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  8586. struct drm_i915_private *dev_priv = to_i915(state->dev);
  8587. struct drm_crtc *crtc;
  8588. struct drm_crtc_state *cstate;
  8589. struct intel_crtc_state *crtc_state;
  8590. unsigned max_pixel_rate = 0, i;
  8591. enum pipe pipe;
  8592. memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
  8593. sizeof(intel_state->min_pixclk));
  8594. for_each_crtc_in_state(state, crtc, cstate, i) {
  8595. int pixel_rate;
  8596. crtc_state = to_intel_crtc_state(cstate);
  8597. if (!crtc_state->base.enable) {
  8598. intel_state->min_pixclk[i] = 0;
  8599. continue;
  8600. }
  8601. pixel_rate = ilk_pipe_pixel_rate(crtc_state);
  8602. if (IS_BROADWELL(dev_priv) || IS_GEN9(dev_priv))
  8603. pixel_rate = bdw_adjust_min_pipe_pixel_rate(crtc_state,
  8604. pixel_rate);
  8605. intel_state->min_pixclk[i] = pixel_rate;
  8606. }
  8607. for_each_pipe(dev_priv, pipe)
  8608. max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
  8609. return max_pixel_rate;
  8610. }
  8611. static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
  8612. {
  8613. struct drm_i915_private *dev_priv = to_i915(dev);
  8614. uint32_t val, data;
  8615. int ret;
  8616. if (WARN((I915_READ(LCPLL_CTL) &
  8617. (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
  8618. LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
  8619. LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
  8620. LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
  8621. "trying to change cdclk frequency with cdclk not enabled\n"))
  8622. return;
  8623. mutex_lock(&dev_priv->rps.hw_lock);
  8624. ret = sandybridge_pcode_write(dev_priv,
  8625. BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
  8626. mutex_unlock(&dev_priv->rps.hw_lock);
  8627. if (ret) {
  8628. DRM_ERROR("failed to inform pcode about cdclk change\n");
  8629. return;
  8630. }
  8631. val = I915_READ(LCPLL_CTL);
  8632. val |= LCPLL_CD_SOURCE_FCLK;
  8633. I915_WRITE(LCPLL_CTL, val);
  8634. if (wait_for_us(I915_READ(LCPLL_CTL) &
  8635. LCPLL_CD_SOURCE_FCLK_DONE, 1))
  8636. DRM_ERROR("Switching to FCLK failed\n");
  8637. val = I915_READ(LCPLL_CTL);
  8638. val &= ~LCPLL_CLK_FREQ_MASK;
  8639. switch (cdclk) {
  8640. case 450000:
  8641. val |= LCPLL_CLK_FREQ_450;
  8642. data = 0;
  8643. break;
  8644. case 540000:
  8645. val |= LCPLL_CLK_FREQ_54O_BDW;
  8646. data = 1;
  8647. break;
  8648. case 337500:
  8649. val |= LCPLL_CLK_FREQ_337_5_BDW;
  8650. data = 2;
  8651. break;
  8652. case 675000:
  8653. val |= LCPLL_CLK_FREQ_675_BDW;
  8654. data = 3;
  8655. break;
  8656. default:
  8657. WARN(1, "invalid cdclk frequency\n");
  8658. return;
  8659. }
  8660. I915_WRITE(LCPLL_CTL, val);
  8661. val = I915_READ(LCPLL_CTL);
  8662. val &= ~LCPLL_CD_SOURCE_FCLK;
  8663. I915_WRITE(LCPLL_CTL, val);
  8664. if (wait_for_us((I915_READ(LCPLL_CTL) &
  8665. LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
  8666. DRM_ERROR("Switching back to LCPLL failed\n");
  8667. mutex_lock(&dev_priv->rps.hw_lock);
  8668. sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
  8669. mutex_unlock(&dev_priv->rps.hw_lock);
  8670. I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
  8671. intel_update_cdclk(dev_priv);
  8672. WARN(cdclk != dev_priv->cdclk_freq,
  8673. "cdclk requested %d kHz but got %d kHz\n",
  8674. cdclk, dev_priv->cdclk_freq);
  8675. }
  8676. static int broadwell_calc_cdclk(int max_pixclk)
  8677. {
  8678. if (max_pixclk > 540000)
  8679. return 675000;
  8680. else if (max_pixclk > 450000)
  8681. return 540000;
  8682. else if (max_pixclk > 337500)
  8683. return 450000;
  8684. else
  8685. return 337500;
  8686. }
  8687. static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
  8688. {
  8689. struct drm_i915_private *dev_priv = to_i915(state->dev);
  8690. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  8691. int max_pixclk = ilk_max_pixel_rate(state);
  8692. int cdclk;
  8693. /*
  8694. * FIXME should also account for plane ratio
  8695. * once 64bpp pixel formats are supported.
  8696. */
  8697. cdclk = broadwell_calc_cdclk(max_pixclk);
  8698. if (cdclk > dev_priv->max_cdclk_freq) {
  8699. DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
  8700. cdclk, dev_priv->max_cdclk_freq);
  8701. return -EINVAL;
  8702. }
  8703. intel_state->cdclk = intel_state->dev_cdclk = cdclk;
  8704. if (!intel_state->active_crtcs)
  8705. intel_state->dev_cdclk = broadwell_calc_cdclk(0);
  8706. return 0;
  8707. }
  8708. static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
  8709. {
  8710. struct drm_device *dev = old_state->dev;
  8711. struct intel_atomic_state *old_intel_state =
  8712. to_intel_atomic_state(old_state);
  8713. unsigned req_cdclk = old_intel_state->dev_cdclk;
  8714. broadwell_set_cdclk(dev, req_cdclk);
  8715. }
  8716. static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
  8717. {
  8718. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  8719. struct drm_i915_private *dev_priv = to_i915(state->dev);
  8720. const int max_pixclk = ilk_max_pixel_rate(state);
  8721. int vco = intel_state->cdclk_pll_vco;
  8722. int cdclk;
  8723. /*
  8724. * FIXME should also account for plane ratio
  8725. * once 64bpp pixel formats are supported.
  8726. */
  8727. cdclk = skl_calc_cdclk(max_pixclk, vco);
  8728. /*
  8729. * FIXME move the cdclk caclulation to
  8730. * compute_config() so we can fail gracegully.
  8731. */
  8732. if (cdclk > dev_priv->max_cdclk_freq) {
  8733. DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
  8734. cdclk, dev_priv->max_cdclk_freq);
  8735. cdclk = dev_priv->max_cdclk_freq;
  8736. }
  8737. intel_state->cdclk = intel_state->dev_cdclk = cdclk;
  8738. if (!intel_state->active_crtcs)
  8739. intel_state->dev_cdclk = skl_calc_cdclk(0, vco);
  8740. return 0;
  8741. }
  8742. static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state)
  8743. {
  8744. struct drm_i915_private *dev_priv = to_i915(old_state->dev);
  8745. struct intel_atomic_state *intel_state = to_intel_atomic_state(old_state);
  8746. unsigned int req_cdclk = intel_state->dev_cdclk;
  8747. unsigned int req_vco = intel_state->cdclk_pll_vco;
  8748. skl_set_cdclk(dev_priv, req_cdclk, req_vco);
  8749. }
  8750. static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
  8751. struct intel_crtc_state *crtc_state)
  8752. {
  8753. if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
  8754. if (!intel_ddi_pll_select(crtc, crtc_state))
  8755. return -EINVAL;
  8756. }
  8757. crtc->lowfreq_avail = false;
  8758. return 0;
  8759. }
  8760. static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
  8761. enum port port,
  8762. struct intel_crtc_state *pipe_config)
  8763. {
  8764. enum intel_dpll_id id;
  8765. switch (port) {
  8766. case PORT_A:
  8767. id = DPLL_ID_SKL_DPLL0;
  8768. break;
  8769. case PORT_B:
  8770. id = DPLL_ID_SKL_DPLL1;
  8771. break;
  8772. case PORT_C:
  8773. id = DPLL_ID_SKL_DPLL2;
  8774. break;
  8775. default:
  8776. DRM_ERROR("Incorrect port type\n");
  8777. return;
  8778. }
  8779. pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
  8780. }
  8781. static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
  8782. enum port port,
  8783. struct intel_crtc_state *pipe_config)
  8784. {
  8785. enum intel_dpll_id id;
  8786. u32 temp;
  8787. temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
  8788. id = temp >> (port * 3 + 1);
  8789. if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
  8790. return;
  8791. pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
  8792. }
  8793. static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
  8794. enum port port,
  8795. struct intel_crtc_state *pipe_config)
  8796. {
  8797. enum intel_dpll_id id;
  8798. uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
  8799. switch (ddi_pll_sel) {
  8800. case PORT_CLK_SEL_WRPLL1:
  8801. id = DPLL_ID_WRPLL1;
  8802. break;
  8803. case PORT_CLK_SEL_WRPLL2:
  8804. id = DPLL_ID_WRPLL2;
  8805. break;
  8806. case PORT_CLK_SEL_SPLL:
  8807. id = DPLL_ID_SPLL;
  8808. break;
  8809. case PORT_CLK_SEL_LCPLL_810:
  8810. id = DPLL_ID_LCPLL_810;
  8811. break;
  8812. case PORT_CLK_SEL_LCPLL_1350:
  8813. id = DPLL_ID_LCPLL_1350;
  8814. break;
  8815. case PORT_CLK_SEL_LCPLL_2700:
  8816. id = DPLL_ID_LCPLL_2700;
  8817. break;
  8818. default:
  8819. MISSING_CASE(ddi_pll_sel);
  8820. /* fall through */
  8821. case PORT_CLK_SEL_NONE:
  8822. return;
  8823. }
  8824. pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
  8825. }
  8826. static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
  8827. struct intel_crtc_state *pipe_config,
  8828. unsigned long *power_domain_mask)
  8829. {
  8830. struct drm_device *dev = crtc->base.dev;
  8831. struct drm_i915_private *dev_priv = to_i915(dev);
  8832. enum intel_display_power_domain power_domain;
  8833. u32 tmp;
  8834. /*
  8835. * The pipe->transcoder mapping is fixed with the exception of the eDP
  8836. * transcoder handled below.
  8837. */
  8838. pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
  8839. /*
  8840. * XXX: Do intel_display_power_get_if_enabled before reading this (for
  8841. * consistency and less surprising code; it's in always on power).
  8842. */
  8843. tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
  8844. if (tmp & TRANS_DDI_FUNC_ENABLE) {
  8845. enum pipe trans_edp_pipe;
  8846. switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
  8847. default:
  8848. WARN(1, "unknown pipe linked to edp transcoder\n");
  8849. case TRANS_DDI_EDP_INPUT_A_ONOFF:
  8850. case TRANS_DDI_EDP_INPUT_A_ON:
  8851. trans_edp_pipe = PIPE_A;
  8852. break;
  8853. case TRANS_DDI_EDP_INPUT_B_ONOFF:
  8854. trans_edp_pipe = PIPE_B;
  8855. break;
  8856. case TRANS_DDI_EDP_INPUT_C_ONOFF:
  8857. trans_edp_pipe = PIPE_C;
  8858. break;
  8859. }
  8860. if (trans_edp_pipe == crtc->pipe)
  8861. pipe_config->cpu_transcoder = TRANSCODER_EDP;
  8862. }
  8863. power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
  8864. if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
  8865. return false;
  8866. *power_domain_mask |= BIT(power_domain);
  8867. tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
  8868. return tmp & PIPECONF_ENABLE;
  8869. }
  8870. static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
  8871. struct intel_crtc_state *pipe_config,
  8872. unsigned long *power_domain_mask)
  8873. {
  8874. struct drm_device *dev = crtc->base.dev;
  8875. struct drm_i915_private *dev_priv = to_i915(dev);
  8876. enum intel_display_power_domain power_domain;
  8877. enum port port;
  8878. enum transcoder cpu_transcoder;
  8879. u32 tmp;
  8880. for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
  8881. if (port == PORT_A)
  8882. cpu_transcoder = TRANSCODER_DSI_A;
  8883. else
  8884. cpu_transcoder = TRANSCODER_DSI_C;
  8885. power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
  8886. if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
  8887. continue;
  8888. *power_domain_mask |= BIT(power_domain);
  8889. /*
  8890. * The PLL needs to be enabled with a valid divider
  8891. * configuration, otherwise accessing DSI registers will hang
  8892. * the machine. See BSpec North Display Engine
  8893. * registers/MIPI[BXT]. We can break out here early, since we
  8894. * need the same DSI PLL to be enabled for both DSI ports.
  8895. */
  8896. if (!intel_dsi_pll_is_enabled(dev_priv))
  8897. break;
  8898. /* XXX: this works for video mode only */
  8899. tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
  8900. if (!(tmp & DPI_ENABLE))
  8901. continue;
  8902. tmp = I915_READ(MIPI_CTRL(port));
  8903. if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
  8904. continue;
  8905. pipe_config->cpu_transcoder = cpu_transcoder;
  8906. break;
  8907. }
  8908. return transcoder_is_dsi(pipe_config->cpu_transcoder);
  8909. }
  8910. static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
  8911. struct intel_crtc_state *pipe_config)
  8912. {
  8913. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  8914. struct intel_shared_dpll *pll;
  8915. enum port port;
  8916. uint32_t tmp;
  8917. tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
  8918. port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
  8919. if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
  8920. skylake_get_ddi_pll(dev_priv, port, pipe_config);
  8921. else if (IS_BROXTON(dev_priv))
  8922. bxt_get_ddi_pll(dev_priv, port, pipe_config);
  8923. else
  8924. haswell_get_ddi_pll(dev_priv, port, pipe_config);
  8925. pll = pipe_config->shared_dpll;
  8926. if (pll) {
  8927. WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
  8928. &pipe_config->dpll_hw_state));
  8929. }
  8930. /*
  8931. * Haswell has only FDI/PCH transcoder A. It is which is connected to
  8932. * DDI E. So just check whether this pipe is wired to DDI E and whether
  8933. * the PCH transcoder is on.
  8934. */
  8935. if (INTEL_GEN(dev_priv) < 9 &&
  8936. (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
  8937. pipe_config->has_pch_encoder = true;
  8938. tmp = I915_READ(FDI_RX_CTL(PIPE_A));
  8939. pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
  8940. FDI_DP_PORT_WIDTH_SHIFT) + 1;
  8941. ironlake_get_fdi_m_n_config(crtc, pipe_config);
  8942. }
  8943. }
  8944. static bool haswell_get_pipe_config(struct intel_crtc *crtc,
  8945. struct intel_crtc_state *pipe_config)
  8946. {
  8947. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  8948. enum intel_display_power_domain power_domain;
  8949. unsigned long power_domain_mask;
  8950. bool active;
  8951. power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
  8952. if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
  8953. return false;
  8954. power_domain_mask = BIT(power_domain);
  8955. pipe_config->shared_dpll = NULL;
  8956. active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
  8957. if (IS_BROXTON(dev_priv) &&
  8958. bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
  8959. WARN_ON(active);
  8960. active = true;
  8961. }
  8962. if (!active)
  8963. goto out;
  8964. if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
  8965. haswell_get_ddi_port_state(crtc, pipe_config);
  8966. intel_get_pipe_timings(crtc, pipe_config);
  8967. }
  8968. intel_get_pipe_src_size(crtc, pipe_config);
  8969. pipe_config->gamma_mode =
  8970. I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
  8971. if (INTEL_GEN(dev_priv) >= 9) {
  8972. skl_init_scalers(dev_priv, crtc, pipe_config);
  8973. pipe_config->scaler_state.scaler_id = -1;
  8974. pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
  8975. }
  8976. power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
  8977. if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
  8978. power_domain_mask |= BIT(power_domain);
  8979. if (INTEL_GEN(dev_priv) >= 9)
  8980. skylake_get_pfit_config(crtc, pipe_config);
  8981. else
  8982. ironlake_get_pfit_config(crtc, pipe_config);
  8983. }
  8984. if (IS_HASWELL(dev_priv))
  8985. pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
  8986. (I915_READ(IPS_CTL) & IPS_ENABLE);
  8987. if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
  8988. !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
  8989. pipe_config->pixel_multiplier =
  8990. I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
  8991. } else {
  8992. pipe_config->pixel_multiplier = 1;
  8993. }
  8994. out:
  8995. for_each_power_domain(power_domain, power_domain_mask)
  8996. intel_display_power_put(dev_priv, power_domain);
  8997. return active;
  8998. }
  8999. static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
  9000. const struct intel_plane_state *plane_state)
  9001. {
  9002. struct drm_device *dev = crtc->dev;
  9003. struct drm_i915_private *dev_priv = to_i915(dev);
  9004. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9005. uint32_t cntl = 0, size = 0;
  9006. if (plane_state && plane_state->base.visible) {
  9007. unsigned int width = plane_state->base.crtc_w;
  9008. unsigned int height = plane_state->base.crtc_h;
  9009. unsigned int stride = roundup_pow_of_two(width) * 4;
  9010. switch (stride) {
  9011. default:
  9012. WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
  9013. width, stride);
  9014. stride = 256;
  9015. /* fallthrough */
  9016. case 256:
  9017. case 512:
  9018. case 1024:
  9019. case 2048:
  9020. break;
  9021. }
  9022. cntl |= CURSOR_ENABLE |
  9023. CURSOR_GAMMA_ENABLE |
  9024. CURSOR_FORMAT_ARGB |
  9025. CURSOR_STRIDE(stride);
  9026. size = (height << 12) | width;
  9027. }
  9028. if (intel_crtc->cursor_cntl != 0 &&
  9029. (intel_crtc->cursor_base != base ||
  9030. intel_crtc->cursor_size != size ||
  9031. intel_crtc->cursor_cntl != cntl)) {
  9032. /* On these chipsets we can only modify the base/size/stride
  9033. * whilst the cursor is disabled.
  9034. */
  9035. I915_WRITE(CURCNTR(PIPE_A), 0);
  9036. POSTING_READ(CURCNTR(PIPE_A));
  9037. intel_crtc->cursor_cntl = 0;
  9038. }
  9039. if (intel_crtc->cursor_base != base) {
  9040. I915_WRITE(CURBASE(PIPE_A), base);
  9041. intel_crtc->cursor_base = base;
  9042. }
  9043. if (intel_crtc->cursor_size != size) {
  9044. I915_WRITE(CURSIZE, size);
  9045. intel_crtc->cursor_size = size;
  9046. }
  9047. if (intel_crtc->cursor_cntl != cntl) {
  9048. I915_WRITE(CURCNTR(PIPE_A), cntl);
  9049. POSTING_READ(CURCNTR(PIPE_A));
  9050. intel_crtc->cursor_cntl = cntl;
  9051. }
  9052. }
  9053. static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
  9054. const struct intel_plane_state *plane_state)
  9055. {
  9056. struct drm_device *dev = crtc->dev;
  9057. struct drm_i915_private *dev_priv = to_i915(dev);
  9058. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9059. int pipe = intel_crtc->pipe;
  9060. uint32_t cntl = 0;
  9061. if (plane_state && plane_state->base.visible) {
  9062. cntl = MCURSOR_GAMMA_ENABLE;
  9063. switch (plane_state->base.crtc_w) {
  9064. case 64:
  9065. cntl |= CURSOR_MODE_64_ARGB_AX;
  9066. break;
  9067. case 128:
  9068. cntl |= CURSOR_MODE_128_ARGB_AX;
  9069. break;
  9070. case 256:
  9071. cntl |= CURSOR_MODE_256_ARGB_AX;
  9072. break;
  9073. default:
  9074. MISSING_CASE(plane_state->base.crtc_w);
  9075. return;
  9076. }
  9077. cntl |= pipe << 28; /* Connect to correct pipe */
  9078. if (HAS_DDI(dev_priv))
  9079. cntl |= CURSOR_PIPE_CSC_ENABLE;
  9080. if (plane_state->base.rotation & DRM_ROTATE_180)
  9081. cntl |= CURSOR_ROTATE_180;
  9082. }
  9083. if (intel_crtc->cursor_cntl != cntl) {
  9084. I915_WRITE(CURCNTR(pipe), cntl);
  9085. POSTING_READ(CURCNTR(pipe));
  9086. intel_crtc->cursor_cntl = cntl;
  9087. }
  9088. /* and commit changes on next vblank */
  9089. I915_WRITE(CURBASE(pipe), base);
  9090. POSTING_READ(CURBASE(pipe));
  9091. intel_crtc->cursor_base = base;
  9092. }
  9093. /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
  9094. static void intel_crtc_update_cursor(struct drm_crtc *crtc,
  9095. const struct intel_plane_state *plane_state)
  9096. {
  9097. struct drm_device *dev = crtc->dev;
  9098. struct drm_i915_private *dev_priv = to_i915(dev);
  9099. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9100. int pipe = intel_crtc->pipe;
  9101. u32 base = intel_crtc->cursor_addr;
  9102. u32 pos = 0;
  9103. if (plane_state) {
  9104. int x = plane_state->base.crtc_x;
  9105. int y = plane_state->base.crtc_y;
  9106. if (x < 0) {
  9107. pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
  9108. x = -x;
  9109. }
  9110. pos |= x << CURSOR_X_SHIFT;
  9111. if (y < 0) {
  9112. pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
  9113. y = -y;
  9114. }
  9115. pos |= y << CURSOR_Y_SHIFT;
  9116. /* ILK+ do this automagically */
  9117. if (HAS_GMCH_DISPLAY(dev_priv) &&
  9118. plane_state->base.rotation & DRM_ROTATE_180) {
  9119. base += (plane_state->base.crtc_h *
  9120. plane_state->base.crtc_w - 1) * 4;
  9121. }
  9122. }
  9123. I915_WRITE(CURPOS(pipe), pos);
  9124. if (IS_845G(dev_priv) || IS_I865G(dev_priv))
  9125. i845_update_cursor(crtc, base, plane_state);
  9126. else
  9127. i9xx_update_cursor(crtc, base, plane_state);
  9128. }
  9129. static bool cursor_size_ok(struct drm_i915_private *dev_priv,
  9130. uint32_t width, uint32_t height)
  9131. {
  9132. if (width == 0 || height == 0)
  9133. return false;
  9134. /*
  9135. * 845g/865g are special in that they are only limited by
  9136. * the width of their cursors, the height is arbitrary up to
  9137. * the precision of the register. Everything else requires
  9138. * square cursors, limited to a few power-of-two sizes.
  9139. */
  9140. if (IS_845G(dev_priv) || IS_I865G(dev_priv)) {
  9141. if ((width & 63) != 0)
  9142. return false;
  9143. if (width > (IS_845G(dev_priv) ? 64 : 512))
  9144. return false;
  9145. if (height > 1023)
  9146. return false;
  9147. } else {
  9148. switch (width | height) {
  9149. case 256:
  9150. case 128:
  9151. if (IS_GEN2(dev_priv))
  9152. return false;
  9153. case 64:
  9154. break;
  9155. default:
  9156. return false;
  9157. }
  9158. }
  9159. return true;
  9160. }
  9161. /* VESA 640x480x72Hz mode to set on the pipe */
  9162. static struct drm_display_mode load_detect_mode = {
  9163. DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
  9164. 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
  9165. };
  9166. struct drm_framebuffer *
  9167. __intel_framebuffer_create(struct drm_device *dev,
  9168. struct drm_mode_fb_cmd2 *mode_cmd,
  9169. struct drm_i915_gem_object *obj)
  9170. {
  9171. struct intel_framebuffer *intel_fb;
  9172. int ret;
  9173. intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
  9174. if (!intel_fb)
  9175. return ERR_PTR(-ENOMEM);
  9176. ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
  9177. if (ret)
  9178. goto err;
  9179. return &intel_fb->base;
  9180. err:
  9181. kfree(intel_fb);
  9182. return ERR_PTR(ret);
  9183. }
  9184. static struct drm_framebuffer *
  9185. intel_framebuffer_create(struct drm_device *dev,
  9186. struct drm_mode_fb_cmd2 *mode_cmd,
  9187. struct drm_i915_gem_object *obj)
  9188. {
  9189. struct drm_framebuffer *fb;
  9190. int ret;
  9191. ret = i915_mutex_lock_interruptible(dev);
  9192. if (ret)
  9193. return ERR_PTR(ret);
  9194. fb = __intel_framebuffer_create(dev, mode_cmd, obj);
  9195. mutex_unlock(&dev->struct_mutex);
  9196. return fb;
  9197. }
  9198. static u32
  9199. intel_framebuffer_pitch_for_width(int width, int bpp)
  9200. {
  9201. u32 pitch = DIV_ROUND_UP(width * bpp, 8);
  9202. return ALIGN(pitch, 64);
  9203. }
  9204. static u32
  9205. intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
  9206. {
  9207. u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
  9208. return PAGE_ALIGN(pitch * mode->vdisplay);
  9209. }
  9210. static struct drm_framebuffer *
  9211. intel_framebuffer_create_for_mode(struct drm_device *dev,
  9212. struct drm_display_mode *mode,
  9213. int depth, int bpp)
  9214. {
  9215. struct drm_framebuffer *fb;
  9216. struct drm_i915_gem_object *obj;
  9217. struct drm_mode_fb_cmd2 mode_cmd = { 0 };
  9218. obj = i915_gem_object_create(dev,
  9219. intel_framebuffer_size_for_mode(mode, bpp));
  9220. if (IS_ERR(obj))
  9221. return ERR_CAST(obj);
  9222. mode_cmd.width = mode->hdisplay;
  9223. mode_cmd.height = mode->vdisplay;
  9224. mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
  9225. bpp);
  9226. mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
  9227. fb = intel_framebuffer_create(dev, &mode_cmd, obj);
  9228. if (IS_ERR(fb))
  9229. i915_gem_object_put(obj);
  9230. return fb;
  9231. }
  9232. static struct drm_framebuffer *
  9233. mode_fits_in_fbdev(struct drm_device *dev,
  9234. struct drm_display_mode *mode)
  9235. {
  9236. #ifdef CONFIG_DRM_FBDEV_EMULATION
  9237. struct drm_i915_private *dev_priv = to_i915(dev);
  9238. struct drm_i915_gem_object *obj;
  9239. struct drm_framebuffer *fb;
  9240. if (!dev_priv->fbdev)
  9241. return NULL;
  9242. if (!dev_priv->fbdev->fb)
  9243. return NULL;
  9244. obj = dev_priv->fbdev->fb->obj;
  9245. BUG_ON(!obj);
  9246. fb = &dev_priv->fbdev->fb->base;
  9247. if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
  9248. fb->bits_per_pixel))
  9249. return NULL;
  9250. if (obj->base.size < mode->vdisplay * fb->pitches[0])
  9251. return NULL;
  9252. drm_framebuffer_reference(fb);
  9253. return fb;
  9254. #else
  9255. return NULL;
  9256. #endif
  9257. }
  9258. static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
  9259. struct drm_crtc *crtc,
  9260. struct drm_display_mode *mode,
  9261. struct drm_framebuffer *fb,
  9262. int x, int y)
  9263. {
  9264. struct drm_plane_state *plane_state;
  9265. int hdisplay, vdisplay;
  9266. int ret;
  9267. plane_state = drm_atomic_get_plane_state(state, crtc->primary);
  9268. if (IS_ERR(plane_state))
  9269. return PTR_ERR(plane_state);
  9270. if (mode)
  9271. drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
  9272. else
  9273. hdisplay = vdisplay = 0;
  9274. ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
  9275. if (ret)
  9276. return ret;
  9277. drm_atomic_set_fb_for_plane(plane_state, fb);
  9278. plane_state->crtc_x = 0;
  9279. plane_state->crtc_y = 0;
  9280. plane_state->crtc_w = hdisplay;
  9281. plane_state->crtc_h = vdisplay;
  9282. plane_state->src_x = x << 16;
  9283. plane_state->src_y = y << 16;
  9284. plane_state->src_w = hdisplay << 16;
  9285. plane_state->src_h = vdisplay << 16;
  9286. return 0;
  9287. }
  9288. bool intel_get_load_detect_pipe(struct drm_connector *connector,
  9289. struct drm_display_mode *mode,
  9290. struct intel_load_detect_pipe *old,
  9291. struct drm_modeset_acquire_ctx *ctx)
  9292. {
  9293. struct intel_crtc *intel_crtc;
  9294. struct intel_encoder *intel_encoder =
  9295. intel_attached_encoder(connector);
  9296. struct drm_crtc *possible_crtc;
  9297. struct drm_encoder *encoder = &intel_encoder->base;
  9298. struct drm_crtc *crtc = NULL;
  9299. struct drm_device *dev = encoder->dev;
  9300. struct drm_i915_private *dev_priv = to_i915(dev);
  9301. struct drm_framebuffer *fb;
  9302. struct drm_mode_config *config = &dev->mode_config;
  9303. struct drm_atomic_state *state = NULL, *restore_state = NULL;
  9304. struct drm_connector_state *connector_state;
  9305. struct intel_crtc_state *crtc_state;
  9306. int ret, i = -1;
  9307. DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
  9308. connector->base.id, connector->name,
  9309. encoder->base.id, encoder->name);
  9310. old->restore_state = NULL;
  9311. retry:
  9312. ret = drm_modeset_lock(&config->connection_mutex, ctx);
  9313. if (ret)
  9314. goto fail;
  9315. /*
  9316. * Algorithm gets a little messy:
  9317. *
  9318. * - if the connector already has an assigned crtc, use it (but make
  9319. * sure it's on first)
  9320. *
  9321. * - try to find the first unused crtc that can drive this connector,
  9322. * and use that if we find one
  9323. */
  9324. /* See if we already have a CRTC for this connector */
  9325. if (connector->state->crtc) {
  9326. crtc = connector->state->crtc;
  9327. ret = drm_modeset_lock(&crtc->mutex, ctx);
  9328. if (ret)
  9329. goto fail;
  9330. /* Make sure the crtc and connector are running */
  9331. goto found;
  9332. }
  9333. /* Find an unused one (if possible) */
  9334. for_each_crtc(dev, possible_crtc) {
  9335. i++;
  9336. if (!(encoder->possible_crtcs & (1 << i)))
  9337. continue;
  9338. ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
  9339. if (ret)
  9340. goto fail;
  9341. if (possible_crtc->state->enable) {
  9342. drm_modeset_unlock(&possible_crtc->mutex);
  9343. continue;
  9344. }
  9345. crtc = possible_crtc;
  9346. break;
  9347. }
  9348. /*
  9349. * If we didn't find an unused CRTC, don't use any.
  9350. */
  9351. if (!crtc) {
  9352. DRM_DEBUG_KMS("no pipe available for load-detect\n");
  9353. goto fail;
  9354. }
  9355. found:
  9356. intel_crtc = to_intel_crtc(crtc);
  9357. ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
  9358. if (ret)
  9359. goto fail;
  9360. state = drm_atomic_state_alloc(dev);
  9361. restore_state = drm_atomic_state_alloc(dev);
  9362. if (!state || !restore_state) {
  9363. ret = -ENOMEM;
  9364. goto fail;
  9365. }
  9366. state->acquire_ctx = ctx;
  9367. restore_state->acquire_ctx = ctx;
  9368. connector_state = drm_atomic_get_connector_state(state, connector);
  9369. if (IS_ERR(connector_state)) {
  9370. ret = PTR_ERR(connector_state);
  9371. goto fail;
  9372. }
  9373. ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
  9374. if (ret)
  9375. goto fail;
  9376. crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
  9377. if (IS_ERR(crtc_state)) {
  9378. ret = PTR_ERR(crtc_state);
  9379. goto fail;
  9380. }
  9381. crtc_state->base.active = crtc_state->base.enable = true;
  9382. if (!mode)
  9383. mode = &load_detect_mode;
  9384. /* We need a framebuffer large enough to accommodate all accesses
  9385. * that the plane may generate whilst we perform load detection.
  9386. * We can not rely on the fbcon either being present (we get called
  9387. * during its initialisation to detect all boot displays, or it may
  9388. * not even exist) or that it is large enough to satisfy the
  9389. * requested mode.
  9390. */
  9391. fb = mode_fits_in_fbdev(dev, mode);
  9392. if (fb == NULL) {
  9393. DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
  9394. fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
  9395. } else
  9396. DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
  9397. if (IS_ERR(fb)) {
  9398. DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
  9399. goto fail;
  9400. }
  9401. ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
  9402. if (ret)
  9403. goto fail;
  9404. drm_framebuffer_unreference(fb);
  9405. ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
  9406. if (ret)
  9407. goto fail;
  9408. ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
  9409. if (!ret)
  9410. ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
  9411. if (!ret)
  9412. ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
  9413. if (ret) {
  9414. DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
  9415. goto fail;
  9416. }
  9417. ret = drm_atomic_commit(state);
  9418. if (ret) {
  9419. DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
  9420. goto fail;
  9421. }
  9422. old->restore_state = restore_state;
  9423. /* let the connector get through one full cycle before testing */
  9424. intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
  9425. return true;
  9426. fail:
  9427. if (state) {
  9428. drm_atomic_state_put(state);
  9429. state = NULL;
  9430. }
  9431. if (restore_state) {
  9432. drm_atomic_state_put(restore_state);
  9433. restore_state = NULL;
  9434. }
  9435. if (ret == -EDEADLK) {
  9436. drm_modeset_backoff(ctx);
  9437. goto retry;
  9438. }
  9439. return false;
  9440. }
  9441. void intel_release_load_detect_pipe(struct drm_connector *connector,
  9442. struct intel_load_detect_pipe *old,
  9443. struct drm_modeset_acquire_ctx *ctx)
  9444. {
  9445. struct intel_encoder *intel_encoder =
  9446. intel_attached_encoder(connector);
  9447. struct drm_encoder *encoder = &intel_encoder->base;
  9448. struct drm_atomic_state *state = old->restore_state;
  9449. int ret;
  9450. DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
  9451. connector->base.id, connector->name,
  9452. encoder->base.id, encoder->name);
  9453. if (!state)
  9454. return;
  9455. ret = drm_atomic_commit(state);
  9456. if (ret)
  9457. DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
  9458. drm_atomic_state_put(state);
  9459. }
  9460. static int i9xx_pll_refclk(struct drm_device *dev,
  9461. const struct intel_crtc_state *pipe_config)
  9462. {
  9463. struct drm_i915_private *dev_priv = to_i915(dev);
  9464. u32 dpll = pipe_config->dpll_hw_state.dpll;
  9465. if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
  9466. return dev_priv->vbt.lvds_ssc_freq;
  9467. else if (HAS_PCH_SPLIT(dev_priv))
  9468. return 120000;
  9469. else if (!IS_GEN2(dev_priv))
  9470. return 96000;
  9471. else
  9472. return 48000;
  9473. }
  9474. /* Returns the clock of the currently programmed mode of the given pipe. */
  9475. static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
  9476. struct intel_crtc_state *pipe_config)
  9477. {
  9478. struct drm_device *dev = crtc->base.dev;
  9479. struct drm_i915_private *dev_priv = to_i915(dev);
  9480. int pipe = pipe_config->cpu_transcoder;
  9481. u32 dpll = pipe_config->dpll_hw_state.dpll;
  9482. u32 fp;
  9483. struct dpll clock;
  9484. int port_clock;
  9485. int refclk = i9xx_pll_refclk(dev, pipe_config);
  9486. if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
  9487. fp = pipe_config->dpll_hw_state.fp0;
  9488. else
  9489. fp = pipe_config->dpll_hw_state.fp1;
  9490. clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
  9491. if (IS_PINEVIEW(dev_priv)) {
  9492. clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
  9493. clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
  9494. } else {
  9495. clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
  9496. clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
  9497. }
  9498. if (!IS_GEN2(dev_priv)) {
  9499. if (IS_PINEVIEW(dev_priv))
  9500. clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
  9501. DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
  9502. else
  9503. clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
  9504. DPLL_FPA01_P1_POST_DIV_SHIFT);
  9505. switch (dpll & DPLL_MODE_MASK) {
  9506. case DPLLB_MODE_DAC_SERIAL:
  9507. clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
  9508. 5 : 10;
  9509. break;
  9510. case DPLLB_MODE_LVDS:
  9511. clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
  9512. 7 : 14;
  9513. break;
  9514. default:
  9515. DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
  9516. "mode\n", (int)(dpll & DPLL_MODE_MASK));
  9517. return;
  9518. }
  9519. if (IS_PINEVIEW(dev_priv))
  9520. port_clock = pnv_calc_dpll_params(refclk, &clock);
  9521. else
  9522. port_clock = i9xx_calc_dpll_params(refclk, &clock);
  9523. } else {
  9524. u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
  9525. bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
  9526. if (is_lvds) {
  9527. clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
  9528. DPLL_FPA01_P1_POST_DIV_SHIFT);
  9529. if (lvds & LVDS_CLKB_POWER_UP)
  9530. clock.p2 = 7;
  9531. else
  9532. clock.p2 = 14;
  9533. } else {
  9534. if (dpll & PLL_P1_DIVIDE_BY_TWO)
  9535. clock.p1 = 2;
  9536. else {
  9537. clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
  9538. DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
  9539. }
  9540. if (dpll & PLL_P2_DIVIDE_BY_4)
  9541. clock.p2 = 4;
  9542. else
  9543. clock.p2 = 2;
  9544. }
  9545. port_clock = i9xx_calc_dpll_params(refclk, &clock);
  9546. }
  9547. /*
  9548. * This value includes pixel_multiplier. We will use
  9549. * port_clock to compute adjusted_mode.crtc_clock in the
  9550. * encoder's get_config() function.
  9551. */
  9552. pipe_config->port_clock = port_clock;
  9553. }
  9554. int intel_dotclock_calculate(int link_freq,
  9555. const struct intel_link_m_n *m_n)
  9556. {
  9557. /*
  9558. * The calculation for the data clock is:
  9559. * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
  9560. * But we want to avoid losing precison if possible, so:
  9561. * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
  9562. *
  9563. * and the link clock is simpler:
  9564. * link_clock = (m * link_clock) / n
  9565. */
  9566. if (!m_n->link_n)
  9567. return 0;
  9568. return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
  9569. }
  9570. static void ironlake_pch_clock_get(struct intel_crtc *crtc,
  9571. struct intel_crtc_state *pipe_config)
  9572. {
  9573. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  9574. /* read out port_clock from the DPLL */
  9575. i9xx_crtc_clock_get(crtc, pipe_config);
  9576. /*
  9577. * In case there is an active pipe without active ports,
  9578. * we may need some idea for the dotclock anyway.
  9579. * Calculate one based on the FDI configuration.
  9580. */
  9581. pipe_config->base.adjusted_mode.crtc_clock =
  9582. intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
  9583. &pipe_config->fdi_m_n);
  9584. }
  9585. /** Returns the currently programmed mode of the given pipe. */
  9586. struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
  9587. struct drm_crtc *crtc)
  9588. {
  9589. struct drm_i915_private *dev_priv = to_i915(dev);
  9590. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9591. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  9592. struct drm_display_mode *mode;
  9593. struct intel_crtc_state *pipe_config;
  9594. int htot = I915_READ(HTOTAL(cpu_transcoder));
  9595. int hsync = I915_READ(HSYNC(cpu_transcoder));
  9596. int vtot = I915_READ(VTOTAL(cpu_transcoder));
  9597. int vsync = I915_READ(VSYNC(cpu_transcoder));
  9598. enum pipe pipe = intel_crtc->pipe;
  9599. mode = kzalloc(sizeof(*mode), GFP_KERNEL);
  9600. if (!mode)
  9601. return NULL;
  9602. pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
  9603. if (!pipe_config) {
  9604. kfree(mode);
  9605. return NULL;
  9606. }
  9607. /*
  9608. * Construct a pipe_config sufficient for getting the clock info
  9609. * back out of crtc_clock_get.
  9610. *
  9611. * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
  9612. * to use a real value here instead.
  9613. */
  9614. pipe_config->cpu_transcoder = (enum transcoder) pipe;
  9615. pipe_config->pixel_multiplier = 1;
  9616. pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
  9617. pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
  9618. pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
  9619. i9xx_crtc_clock_get(intel_crtc, pipe_config);
  9620. mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
  9621. mode->hdisplay = (htot & 0xffff) + 1;
  9622. mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
  9623. mode->hsync_start = (hsync & 0xffff) + 1;
  9624. mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
  9625. mode->vdisplay = (vtot & 0xffff) + 1;
  9626. mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
  9627. mode->vsync_start = (vsync & 0xffff) + 1;
  9628. mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
  9629. drm_mode_set_name(mode);
  9630. kfree(pipe_config);
  9631. return mode;
  9632. }
  9633. static void intel_crtc_destroy(struct drm_crtc *crtc)
  9634. {
  9635. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9636. struct drm_device *dev = crtc->dev;
  9637. struct intel_flip_work *work;
  9638. spin_lock_irq(&dev->event_lock);
  9639. work = intel_crtc->flip_work;
  9640. intel_crtc->flip_work = NULL;
  9641. spin_unlock_irq(&dev->event_lock);
  9642. if (work) {
  9643. cancel_work_sync(&work->mmio_work);
  9644. cancel_work_sync(&work->unpin_work);
  9645. kfree(work);
  9646. }
  9647. drm_crtc_cleanup(crtc);
  9648. kfree(intel_crtc);
  9649. }
  9650. static void intel_unpin_work_fn(struct work_struct *__work)
  9651. {
  9652. struct intel_flip_work *work =
  9653. container_of(__work, struct intel_flip_work, unpin_work);
  9654. struct intel_crtc *crtc = to_intel_crtc(work->crtc);
  9655. struct drm_device *dev = crtc->base.dev;
  9656. struct drm_plane *primary = crtc->base.primary;
  9657. if (is_mmio_work(work))
  9658. flush_work(&work->mmio_work);
  9659. mutex_lock(&dev->struct_mutex);
  9660. intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
  9661. i915_gem_object_put(work->pending_flip_obj);
  9662. mutex_unlock(&dev->struct_mutex);
  9663. i915_gem_request_put(work->flip_queued_req);
  9664. intel_frontbuffer_flip_complete(to_i915(dev),
  9665. to_intel_plane(primary)->frontbuffer_bit);
  9666. intel_fbc_post_update(crtc);
  9667. drm_framebuffer_unreference(work->old_fb);
  9668. BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
  9669. atomic_dec(&crtc->unpin_work_count);
  9670. kfree(work);
  9671. }
  9672. /* Is 'a' after or equal to 'b'? */
  9673. static bool g4x_flip_count_after_eq(u32 a, u32 b)
  9674. {
  9675. return !((a - b) & 0x80000000);
  9676. }
  9677. static bool __pageflip_finished_cs(struct intel_crtc *crtc,
  9678. struct intel_flip_work *work)
  9679. {
  9680. struct drm_device *dev = crtc->base.dev;
  9681. struct drm_i915_private *dev_priv = to_i915(dev);
  9682. if (abort_flip_on_reset(crtc))
  9683. return true;
  9684. /*
  9685. * The relevant registers doen't exist on pre-ctg.
  9686. * As the flip done interrupt doesn't trigger for mmio
  9687. * flips on gmch platforms, a flip count check isn't
  9688. * really needed there. But since ctg has the registers,
  9689. * include it in the check anyway.
  9690. */
  9691. if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
  9692. return true;
  9693. /*
  9694. * BDW signals flip done immediately if the plane
  9695. * is disabled, even if the plane enable is already
  9696. * armed to occur at the next vblank :(
  9697. */
  9698. /*
  9699. * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
  9700. * used the same base address. In that case the mmio flip might
  9701. * have completed, but the CS hasn't even executed the flip yet.
  9702. *
  9703. * A flip count check isn't enough as the CS might have updated
  9704. * the base address just after start of vblank, but before we
  9705. * managed to process the interrupt. This means we'd complete the
  9706. * CS flip too soon.
  9707. *
  9708. * Combining both checks should get us a good enough result. It may
  9709. * still happen that the CS flip has been executed, but has not
  9710. * yet actually completed. But in case the base address is the same
  9711. * anyway, we don't really care.
  9712. */
  9713. return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
  9714. crtc->flip_work->gtt_offset &&
  9715. g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
  9716. crtc->flip_work->flip_count);
  9717. }
  9718. static bool
  9719. __pageflip_finished_mmio(struct intel_crtc *crtc,
  9720. struct intel_flip_work *work)
  9721. {
  9722. /*
  9723. * MMIO work completes when vblank is different from
  9724. * flip_queued_vblank.
  9725. *
  9726. * Reset counter value doesn't matter, this is handled by
  9727. * i915_wait_request finishing early, so no need to handle
  9728. * reset here.
  9729. */
  9730. return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank;
  9731. }
  9732. static bool pageflip_finished(struct intel_crtc *crtc,
  9733. struct intel_flip_work *work)
  9734. {
  9735. if (!atomic_read(&work->pending))
  9736. return false;
  9737. smp_rmb();
  9738. if (is_mmio_work(work))
  9739. return __pageflip_finished_mmio(crtc, work);
  9740. else
  9741. return __pageflip_finished_cs(crtc, work);
  9742. }
  9743. void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
  9744. {
  9745. struct drm_device *dev = &dev_priv->drm;
  9746. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  9747. struct intel_flip_work *work;
  9748. unsigned long flags;
  9749. /* Ignore early vblank irqs */
  9750. if (!crtc)
  9751. return;
  9752. /*
  9753. * This is called both by irq handlers and the reset code (to complete
  9754. * lost pageflips) so needs the full irqsave spinlocks.
  9755. */
  9756. spin_lock_irqsave(&dev->event_lock, flags);
  9757. work = crtc->flip_work;
  9758. if (work != NULL &&
  9759. !is_mmio_work(work) &&
  9760. pageflip_finished(crtc, work))
  9761. page_flip_completed(crtc);
  9762. spin_unlock_irqrestore(&dev->event_lock, flags);
  9763. }
  9764. void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
  9765. {
  9766. struct drm_device *dev = &dev_priv->drm;
  9767. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  9768. struct intel_flip_work *work;
  9769. unsigned long flags;
  9770. /* Ignore early vblank irqs */
  9771. if (!crtc)
  9772. return;
  9773. /*
  9774. * This is called both by irq handlers and the reset code (to complete
  9775. * lost pageflips) so needs the full irqsave spinlocks.
  9776. */
  9777. spin_lock_irqsave(&dev->event_lock, flags);
  9778. work = crtc->flip_work;
  9779. if (work != NULL &&
  9780. is_mmio_work(work) &&
  9781. pageflip_finished(crtc, work))
  9782. page_flip_completed(crtc);
  9783. spin_unlock_irqrestore(&dev->event_lock, flags);
  9784. }
  9785. static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
  9786. struct intel_flip_work *work)
  9787. {
  9788. work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc);
  9789. /* Ensure that the work item is consistent when activating it ... */
  9790. smp_mb__before_atomic();
  9791. atomic_set(&work->pending, 1);
  9792. }
  9793. static int intel_gen2_queue_flip(struct drm_device *dev,
  9794. struct drm_crtc *crtc,
  9795. struct drm_framebuffer *fb,
  9796. struct drm_i915_gem_object *obj,
  9797. struct drm_i915_gem_request *req,
  9798. uint32_t flags)
  9799. {
  9800. struct intel_ring *ring = req->ring;
  9801. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9802. u32 flip_mask;
  9803. int ret;
  9804. ret = intel_ring_begin(req, 6);
  9805. if (ret)
  9806. return ret;
  9807. /* Can't queue multiple flips, so wait for the previous
  9808. * one to finish before executing the next.
  9809. */
  9810. if (intel_crtc->plane)
  9811. flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
  9812. else
  9813. flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
  9814. intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
  9815. intel_ring_emit(ring, MI_NOOP);
  9816. intel_ring_emit(ring, MI_DISPLAY_FLIP |
  9817. MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
  9818. intel_ring_emit(ring, fb->pitches[0]);
  9819. intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
  9820. intel_ring_emit(ring, 0); /* aux display base address, unused */
  9821. return 0;
  9822. }
  9823. static int intel_gen3_queue_flip(struct drm_device *dev,
  9824. struct drm_crtc *crtc,
  9825. struct drm_framebuffer *fb,
  9826. struct drm_i915_gem_object *obj,
  9827. struct drm_i915_gem_request *req,
  9828. uint32_t flags)
  9829. {
  9830. struct intel_ring *ring = req->ring;
  9831. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9832. u32 flip_mask;
  9833. int ret;
  9834. ret = intel_ring_begin(req, 6);
  9835. if (ret)
  9836. return ret;
  9837. if (intel_crtc->plane)
  9838. flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
  9839. else
  9840. flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
  9841. intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
  9842. intel_ring_emit(ring, MI_NOOP);
  9843. intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
  9844. MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
  9845. intel_ring_emit(ring, fb->pitches[0]);
  9846. intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
  9847. intel_ring_emit(ring, MI_NOOP);
  9848. return 0;
  9849. }
  9850. static int intel_gen4_queue_flip(struct drm_device *dev,
  9851. struct drm_crtc *crtc,
  9852. struct drm_framebuffer *fb,
  9853. struct drm_i915_gem_object *obj,
  9854. struct drm_i915_gem_request *req,
  9855. uint32_t flags)
  9856. {
  9857. struct intel_ring *ring = req->ring;
  9858. struct drm_i915_private *dev_priv = to_i915(dev);
  9859. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9860. uint32_t pf, pipesrc;
  9861. int ret;
  9862. ret = intel_ring_begin(req, 4);
  9863. if (ret)
  9864. return ret;
  9865. /* i965+ uses the linear or tiled offsets from the
  9866. * Display Registers (which do not change across a page-flip)
  9867. * so we need only reprogram the base address.
  9868. */
  9869. intel_ring_emit(ring, MI_DISPLAY_FLIP |
  9870. MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
  9871. intel_ring_emit(ring, fb->pitches[0]);
  9872. intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
  9873. intel_fb_modifier_to_tiling(fb->modifier));
  9874. /* XXX Enabling the panel-fitter across page-flip is so far
  9875. * untested on non-native modes, so ignore it for now.
  9876. * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
  9877. */
  9878. pf = 0;
  9879. pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
  9880. intel_ring_emit(ring, pf | pipesrc);
  9881. return 0;
  9882. }
  9883. static int intel_gen6_queue_flip(struct drm_device *dev,
  9884. struct drm_crtc *crtc,
  9885. struct drm_framebuffer *fb,
  9886. struct drm_i915_gem_object *obj,
  9887. struct drm_i915_gem_request *req,
  9888. uint32_t flags)
  9889. {
  9890. struct intel_ring *ring = req->ring;
  9891. struct drm_i915_private *dev_priv = to_i915(dev);
  9892. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9893. uint32_t pf, pipesrc;
  9894. int ret;
  9895. ret = intel_ring_begin(req, 4);
  9896. if (ret)
  9897. return ret;
  9898. intel_ring_emit(ring, MI_DISPLAY_FLIP |
  9899. MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
  9900. intel_ring_emit(ring, fb->pitches[0] |
  9901. intel_fb_modifier_to_tiling(fb->modifier));
  9902. intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
  9903. /* Contrary to the suggestions in the documentation,
  9904. * "Enable Panel Fitter" does not seem to be required when page
  9905. * flipping with a non-native mode, and worse causes a normal
  9906. * modeset to fail.
  9907. * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
  9908. */
  9909. pf = 0;
  9910. pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
  9911. intel_ring_emit(ring, pf | pipesrc);
  9912. return 0;
  9913. }
  9914. static int intel_gen7_queue_flip(struct drm_device *dev,
  9915. struct drm_crtc *crtc,
  9916. struct drm_framebuffer *fb,
  9917. struct drm_i915_gem_object *obj,
  9918. struct drm_i915_gem_request *req,
  9919. uint32_t flags)
  9920. {
  9921. struct drm_i915_private *dev_priv = to_i915(dev);
  9922. struct intel_ring *ring = req->ring;
  9923. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9924. uint32_t plane_bit = 0;
  9925. int len, ret;
  9926. switch (intel_crtc->plane) {
  9927. case PLANE_A:
  9928. plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
  9929. break;
  9930. case PLANE_B:
  9931. plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
  9932. break;
  9933. case PLANE_C:
  9934. plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
  9935. break;
  9936. default:
  9937. WARN_ONCE(1, "unknown plane in flip command\n");
  9938. return -ENODEV;
  9939. }
  9940. len = 4;
  9941. if (req->engine->id == RCS) {
  9942. len += 6;
  9943. /*
  9944. * On Gen 8, SRM is now taking an extra dword to accommodate
  9945. * 48bits addresses, and we need a NOOP for the batch size to
  9946. * stay even.
  9947. */
  9948. if (IS_GEN8(dev_priv))
  9949. len += 2;
  9950. }
  9951. /*
  9952. * BSpec MI_DISPLAY_FLIP for IVB:
  9953. * "The full packet must be contained within the same cache line."
  9954. *
  9955. * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
  9956. * cacheline, if we ever start emitting more commands before
  9957. * the MI_DISPLAY_FLIP we may need to first emit everything else,
  9958. * then do the cacheline alignment, and finally emit the
  9959. * MI_DISPLAY_FLIP.
  9960. */
  9961. ret = intel_ring_cacheline_align(req);
  9962. if (ret)
  9963. return ret;
  9964. ret = intel_ring_begin(req, len);
  9965. if (ret)
  9966. return ret;
  9967. /* Unmask the flip-done completion message. Note that the bspec says that
  9968. * we should do this for both the BCS and RCS, and that we must not unmask
  9969. * more than one flip event at any time (or ensure that one flip message
  9970. * can be sent by waiting for flip-done prior to queueing new flips).
  9971. * Experimentation says that BCS works despite DERRMR masking all
  9972. * flip-done completion events and that unmasking all planes at once
  9973. * for the RCS also doesn't appear to drop events. Setting the DERRMR
  9974. * to zero does lead to lockups within MI_DISPLAY_FLIP.
  9975. */
  9976. if (req->engine->id == RCS) {
  9977. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  9978. intel_ring_emit_reg(ring, DERRMR);
  9979. intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
  9980. DERRMR_PIPEB_PRI_FLIP_DONE |
  9981. DERRMR_PIPEC_PRI_FLIP_DONE));
  9982. if (IS_GEN8(dev_priv))
  9983. intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
  9984. MI_SRM_LRM_GLOBAL_GTT);
  9985. else
  9986. intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
  9987. MI_SRM_LRM_GLOBAL_GTT);
  9988. intel_ring_emit_reg(ring, DERRMR);
  9989. intel_ring_emit(ring,
  9990. i915_ggtt_offset(req->engine->scratch) + 256);
  9991. if (IS_GEN8(dev_priv)) {
  9992. intel_ring_emit(ring, 0);
  9993. intel_ring_emit(ring, MI_NOOP);
  9994. }
  9995. }
  9996. intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
  9997. intel_ring_emit(ring, fb->pitches[0] |
  9998. intel_fb_modifier_to_tiling(fb->modifier));
  9999. intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
  10000. intel_ring_emit(ring, (MI_NOOP));
  10001. return 0;
  10002. }
  10003. static bool use_mmio_flip(struct intel_engine_cs *engine,
  10004. struct drm_i915_gem_object *obj)
  10005. {
  10006. /*
  10007. * This is not being used for older platforms, because
  10008. * non-availability of flip done interrupt forces us to use
  10009. * CS flips. Older platforms derive flip done using some clever
  10010. * tricks involving the flip_pending status bits and vblank irqs.
  10011. * So using MMIO flips there would disrupt this mechanism.
  10012. */
  10013. if (engine == NULL)
  10014. return true;
  10015. if (INTEL_GEN(engine->i915) < 5)
  10016. return false;
  10017. if (i915.use_mmio_flip < 0)
  10018. return false;
  10019. else if (i915.use_mmio_flip > 0)
  10020. return true;
  10021. else if (i915.enable_execlists)
  10022. return true;
  10023. return engine != i915_gem_object_last_write_engine(obj);
  10024. }
  10025. static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
  10026. unsigned int rotation,
  10027. struct intel_flip_work *work)
  10028. {
  10029. struct drm_device *dev = intel_crtc->base.dev;
  10030. struct drm_i915_private *dev_priv = to_i915(dev);
  10031. struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
  10032. const enum pipe pipe = intel_crtc->pipe;
  10033. u32 ctl, stride = skl_plane_stride(fb, 0, rotation);
  10034. ctl = I915_READ(PLANE_CTL(pipe, 0));
  10035. ctl &= ~PLANE_CTL_TILED_MASK;
  10036. switch (fb->modifier) {
  10037. case DRM_FORMAT_MOD_NONE:
  10038. break;
  10039. case I915_FORMAT_MOD_X_TILED:
  10040. ctl |= PLANE_CTL_TILED_X;
  10041. break;
  10042. case I915_FORMAT_MOD_Y_TILED:
  10043. ctl |= PLANE_CTL_TILED_Y;
  10044. break;
  10045. case I915_FORMAT_MOD_Yf_TILED:
  10046. ctl |= PLANE_CTL_TILED_YF;
  10047. break;
  10048. default:
  10049. MISSING_CASE(fb->modifier);
  10050. }
  10051. /*
  10052. * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
  10053. * PLANE_SURF updates, the update is then guaranteed to be atomic.
  10054. */
  10055. I915_WRITE(PLANE_CTL(pipe, 0), ctl);
  10056. I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
  10057. I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
  10058. POSTING_READ(PLANE_SURF(pipe, 0));
  10059. }
  10060. static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
  10061. struct intel_flip_work *work)
  10062. {
  10063. struct drm_device *dev = intel_crtc->base.dev;
  10064. struct drm_i915_private *dev_priv = to_i915(dev);
  10065. struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
  10066. i915_reg_t reg = DSPCNTR(intel_crtc->plane);
  10067. u32 dspcntr;
  10068. dspcntr = I915_READ(reg);
  10069. if (fb->modifier == I915_FORMAT_MOD_X_TILED)
  10070. dspcntr |= DISPPLANE_TILED;
  10071. else
  10072. dspcntr &= ~DISPPLANE_TILED;
  10073. I915_WRITE(reg, dspcntr);
  10074. I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
  10075. POSTING_READ(DSPSURF(intel_crtc->plane));
  10076. }
  10077. static void intel_mmio_flip_work_func(struct work_struct *w)
  10078. {
  10079. struct intel_flip_work *work =
  10080. container_of(w, struct intel_flip_work, mmio_work);
  10081. struct intel_crtc *crtc = to_intel_crtc(work->crtc);
  10082. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  10083. struct intel_framebuffer *intel_fb =
  10084. to_intel_framebuffer(crtc->base.primary->fb);
  10085. struct drm_i915_gem_object *obj = intel_fb->obj;
  10086. WARN_ON(i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT, NULL) < 0);
  10087. intel_pipe_update_start(crtc);
  10088. if (INTEL_GEN(dev_priv) >= 9)
  10089. skl_do_mmio_flip(crtc, work->rotation, work);
  10090. else
  10091. /* use_mmio_flip() retricts MMIO flips to ilk+ */
  10092. ilk_do_mmio_flip(crtc, work);
  10093. intel_pipe_update_end(crtc, work);
  10094. }
  10095. static int intel_default_queue_flip(struct drm_device *dev,
  10096. struct drm_crtc *crtc,
  10097. struct drm_framebuffer *fb,
  10098. struct drm_i915_gem_object *obj,
  10099. struct drm_i915_gem_request *req,
  10100. uint32_t flags)
  10101. {
  10102. return -ENODEV;
  10103. }
  10104. static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
  10105. struct intel_crtc *intel_crtc,
  10106. struct intel_flip_work *work)
  10107. {
  10108. u32 addr, vblank;
  10109. if (!atomic_read(&work->pending))
  10110. return false;
  10111. smp_rmb();
  10112. vblank = intel_crtc_get_vblank_counter(intel_crtc);
  10113. if (work->flip_ready_vblank == 0) {
  10114. if (work->flip_queued_req &&
  10115. !i915_gem_request_completed(work->flip_queued_req))
  10116. return false;
  10117. work->flip_ready_vblank = vblank;
  10118. }
  10119. if (vblank - work->flip_ready_vblank < 3)
  10120. return false;
  10121. /* Potential stall - if we see that the flip has happened,
  10122. * assume a missed interrupt. */
  10123. if (INTEL_GEN(dev_priv) >= 4)
  10124. addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
  10125. else
  10126. addr = I915_READ(DSPADDR(intel_crtc->plane));
  10127. /* There is a potential issue here with a false positive after a flip
  10128. * to the same address. We could address this by checking for a
  10129. * non-incrementing frame counter.
  10130. */
  10131. return addr == work->gtt_offset;
  10132. }
  10133. void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
  10134. {
  10135. struct drm_device *dev = &dev_priv->drm;
  10136. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  10137. struct intel_flip_work *work;
  10138. WARN_ON(!in_interrupt());
  10139. if (crtc == NULL)
  10140. return;
  10141. spin_lock(&dev->event_lock);
  10142. work = crtc->flip_work;
  10143. if (work != NULL && !is_mmio_work(work) &&
  10144. __pageflip_stall_check_cs(dev_priv, crtc, work)) {
  10145. WARN_ONCE(1,
  10146. "Kicking stuck page flip: queued at %d, now %d\n",
  10147. work->flip_queued_vblank, intel_crtc_get_vblank_counter(crtc));
  10148. page_flip_completed(crtc);
  10149. work = NULL;
  10150. }
  10151. if (work != NULL && !is_mmio_work(work) &&
  10152. intel_crtc_get_vblank_counter(crtc) - work->flip_queued_vblank > 1)
  10153. intel_queue_rps_boost_for_request(work->flip_queued_req);
  10154. spin_unlock(&dev->event_lock);
  10155. }
  10156. static int intel_crtc_page_flip(struct drm_crtc *crtc,
  10157. struct drm_framebuffer *fb,
  10158. struct drm_pending_vblank_event *event,
  10159. uint32_t page_flip_flags)
  10160. {
  10161. struct drm_device *dev = crtc->dev;
  10162. struct drm_i915_private *dev_priv = to_i915(dev);
  10163. struct drm_framebuffer *old_fb = crtc->primary->fb;
  10164. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  10165. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  10166. struct drm_plane *primary = crtc->primary;
  10167. enum pipe pipe = intel_crtc->pipe;
  10168. struct intel_flip_work *work;
  10169. struct intel_engine_cs *engine;
  10170. bool mmio_flip;
  10171. struct drm_i915_gem_request *request;
  10172. struct i915_vma *vma;
  10173. int ret;
  10174. /*
  10175. * drm_mode_page_flip_ioctl() should already catch this, but double
  10176. * check to be safe. In the future we may enable pageflipping from
  10177. * a disabled primary plane.
  10178. */
  10179. if (WARN_ON(intel_fb_obj(old_fb) == NULL))
  10180. return -EBUSY;
  10181. /* Can't change pixel format via MI display flips. */
  10182. if (fb->pixel_format != crtc->primary->fb->pixel_format)
  10183. return -EINVAL;
  10184. /*
  10185. * TILEOFF/LINOFF registers can't be changed via MI display flips.
  10186. * Note that pitch changes could also affect these register.
  10187. */
  10188. if (INTEL_GEN(dev_priv) > 3 &&
  10189. (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
  10190. fb->pitches[0] != crtc->primary->fb->pitches[0]))
  10191. return -EINVAL;
  10192. if (i915_terminally_wedged(&dev_priv->gpu_error))
  10193. goto out_hang;
  10194. work = kzalloc(sizeof(*work), GFP_KERNEL);
  10195. if (work == NULL)
  10196. return -ENOMEM;
  10197. work->event = event;
  10198. work->crtc = crtc;
  10199. work->old_fb = old_fb;
  10200. INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
  10201. ret = drm_crtc_vblank_get(crtc);
  10202. if (ret)
  10203. goto free_work;
  10204. /* We borrow the event spin lock for protecting flip_work */
  10205. spin_lock_irq(&dev->event_lock);
  10206. if (intel_crtc->flip_work) {
  10207. /* Before declaring the flip queue wedged, check if
  10208. * the hardware completed the operation behind our backs.
  10209. */
  10210. if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
  10211. DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
  10212. page_flip_completed(intel_crtc);
  10213. } else {
  10214. DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
  10215. spin_unlock_irq(&dev->event_lock);
  10216. drm_crtc_vblank_put(crtc);
  10217. kfree(work);
  10218. return -EBUSY;
  10219. }
  10220. }
  10221. intel_crtc->flip_work = work;
  10222. spin_unlock_irq(&dev->event_lock);
  10223. if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
  10224. flush_workqueue(dev_priv->wq);
  10225. /* Reference the objects for the scheduled work. */
  10226. drm_framebuffer_reference(work->old_fb);
  10227. crtc->primary->fb = fb;
  10228. update_state_fb(crtc->primary);
  10229. work->pending_flip_obj = i915_gem_object_get(obj);
  10230. ret = i915_mutex_lock_interruptible(dev);
  10231. if (ret)
  10232. goto cleanup;
  10233. intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error);
  10234. if (i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) {
  10235. ret = -EIO;
  10236. goto unlock;
  10237. }
  10238. atomic_inc(&intel_crtc->unpin_work_count);
  10239. if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
  10240. work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
  10241. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  10242. engine = dev_priv->engine[BCS];
  10243. if (fb->modifier != old_fb->modifier)
  10244. /* vlv: DISPLAY_FLIP fails to change tiling */
  10245. engine = NULL;
  10246. } else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
  10247. engine = dev_priv->engine[BCS];
  10248. } else if (INTEL_GEN(dev_priv) >= 7) {
  10249. engine = i915_gem_object_last_write_engine(obj);
  10250. if (engine == NULL || engine->id != RCS)
  10251. engine = dev_priv->engine[BCS];
  10252. } else {
  10253. engine = dev_priv->engine[RCS];
  10254. }
  10255. mmio_flip = use_mmio_flip(engine, obj);
  10256. vma = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
  10257. if (IS_ERR(vma)) {
  10258. ret = PTR_ERR(vma);
  10259. goto cleanup_pending;
  10260. }
  10261. work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation);
  10262. work->gtt_offset += intel_crtc->dspaddr_offset;
  10263. work->rotation = crtc->primary->state->rotation;
  10264. /*
  10265. * There's the potential that the next frame will not be compatible with
  10266. * FBC, so we want to call pre_update() before the actual page flip.
  10267. * The problem is that pre_update() caches some information about the fb
  10268. * object, so we want to do this only after the object is pinned. Let's
  10269. * be on the safe side and do this immediately before scheduling the
  10270. * flip.
  10271. */
  10272. intel_fbc_pre_update(intel_crtc, intel_crtc->config,
  10273. to_intel_plane_state(primary->state));
  10274. if (mmio_flip) {
  10275. INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
  10276. queue_work(system_unbound_wq, &work->mmio_work);
  10277. } else {
  10278. request = i915_gem_request_alloc(engine, engine->last_context);
  10279. if (IS_ERR(request)) {
  10280. ret = PTR_ERR(request);
  10281. goto cleanup_unpin;
  10282. }
  10283. ret = i915_gem_request_await_object(request, obj, false);
  10284. if (ret)
  10285. goto cleanup_request;
  10286. ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
  10287. page_flip_flags);
  10288. if (ret)
  10289. goto cleanup_request;
  10290. intel_mark_page_flip_active(intel_crtc, work);
  10291. work->flip_queued_req = i915_gem_request_get(request);
  10292. i915_add_request_no_flush(request);
  10293. }
  10294. i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
  10295. i915_gem_track_fb(intel_fb_obj(old_fb), obj,
  10296. to_intel_plane(primary)->frontbuffer_bit);
  10297. mutex_unlock(&dev->struct_mutex);
  10298. intel_frontbuffer_flip_prepare(to_i915(dev),
  10299. to_intel_plane(primary)->frontbuffer_bit);
  10300. trace_i915_flip_request(intel_crtc->plane, obj);
  10301. return 0;
  10302. cleanup_request:
  10303. i915_add_request_no_flush(request);
  10304. cleanup_unpin:
  10305. intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
  10306. cleanup_pending:
  10307. atomic_dec(&intel_crtc->unpin_work_count);
  10308. unlock:
  10309. mutex_unlock(&dev->struct_mutex);
  10310. cleanup:
  10311. crtc->primary->fb = old_fb;
  10312. update_state_fb(crtc->primary);
  10313. i915_gem_object_put(obj);
  10314. drm_framebuffer_unreference(work->old_fb);
  10315. spin_lock_irq(&dev->event_lock);
  10316. intel_crtc->flip_work = NULL;
  10317. spin_unlock_irq(&dev->event_lock);
  10318. drm_crtc_vblank_put(crtc);
  10319. free_work:
  10320. kfree(work);
  10321. if (ret == -EIO) {
  10322. struct drm_atomic_state *state;
  10323. struct drm_plane_state *plane_state;
  10324. out_hang:
  10325. state = drm_atomic_state_alloc(dev);
  10326. if (!state)
  10327. return -ENOMEM;
  10328. state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
  10329. retry:
  10330. plane_state = drm_atomic_get_plane_state(state, primary);
  10331. ret = PTR_ERR_OR_ZERO(plane_state);
  10332. if (!ret) {
  10333. drm_atomic_set_fb_for_plane(plane_state, fb);
  10334. ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
  10335. if (!ret)
  10336. ret = drm_atomic_commit(state);
  10337. }
  10338. if (ret == -EDEADLK) {
  10339. drm_modeset_backoff(state->acquire_ctx);
  10340. drm_atomic_state_clear(state);
  10341. goto retry;
  10342. }
  10343. drm_atomic_state_put(state);
  10344. if (ret == 0 && event) {
  10345. spin_lock_irq(&dev->event_lock);
  10346. drm_crtc_send_vblank_event(crtc, event);
  10347. spin_unlock_irq(&dev->event_lock);
  10348. }
  10349. }
  10350. return ret;
  10351. }
  10352. /**
  10353. * intel_wm_need_update - Check whether watermarks need updating
  10354. * @plane: drm plane
  10355. * @state: new plane state
  10356. *
  10357. * Check current plane state versus the new one to determine whether
  10358. * watermarks need to be recalculated.
  10359. *
  10360. * Returns true or false.
  10361. */
  10362. static bool intel_wm_need_update(struct drm_plane *plane,
  10363. struct drm_plane_state *state)
  10364. {
  10365. struct intel_plane_state *new = to_intel_plane_state(state);
  10366. struct intel_plane_state *cur = to_intel_plane_state(plane->state);
  10367. /* Update watermarks on tiling or size changes. */
  10368. if (new->base.visible != cur->base.visible)
  10369. return true;
  10370. if (!cur->base.fb || !new->base.fb)
  10371. return false;
  10372. if (cur->base.fb->modifier != new->base.fb->modifier ||
  10373. cur->base.rotation != new->base.rotation ||
  10374. drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
  10375. drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
  10376. drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
  10377. drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
  10378. return true;
  10379. return false;
  10380. }
  10381. static bool needs_scaling(struct intel_plane_state *state)
  10382. {
  10383. int src_w = drm_rect_width(&state->base.src) >> 16;
  10384. int src_h = drm_rect_height(&state->base.src) >> 16;
  10385. int dst_w = drm_rect_width(&state->base.dst);
  10386. int dst_h = drm_rect_height(&state->base.dst);
  10387. return (src_w != dst_w || src_h != dst_h);
  10388. }
  10389. int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
  10390. struct drm_plane_state *plane_state)
  10391. {
  10392. struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
  10393. struct drm_crtc *crtc = crtc_state->crtc;
  10394. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  10395. struct drm_plane *plane = plane_state->plane;
  10396. struct drm_device *dev = crtc->dev;
  10397. struct drm_i915_private *dev_priv = to_i915(dev);
  10398. struct intel_plane_state *old_plane_state =
  10399. to_intel_plane_state(plane->state);
  10400. bool mode_changed = needs_modeset(crtc_state);
  10401. bool was_crtc_enabled = crtc->state->active;
  10402. bool is_crtc_enabled = crtc_state->active;
  10403. bool turn_off, turn_on, visible, was_visible;
  10404. struct drm_framebuffer *fb = plane_state->fb;
  10405. int ret;
  10406. if (INTEL_GEN(dev_priv) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) {
  10407. ret = skl_update_scaler_plane(
  10408. to_intel_crtc_state(crtc_state),
  10409. to_intel_plane_state(plane_state));
  10410. if (ret)
  10411. return ret;
  10412. }
  10413. was_visible = old_plane_state->base.visible;
  10414. visible = to_intel_plane_state(plane_state)->base.visible;
  10415. if (!was_crtc_enabled && WARN_ON(was_visible))
  10416. was_visible = false;
  10417. /*
  10418. * Visibility is calculated as if the crtc was on, but
  10419. * after scaler setup everything depends on it being off
  10420. * when the crtc isn't active.
  10421. *
  10422. * FIXME this is wrong for watermarks. Watermarks should also
  10423. * be computed as if the pipe would be active. Perhaps move
  10424. * per-plane wm computation to the .check_plane() hook, and
  10425. * only combine the results from all planes in the current place?
  10426. */
  10427. if (!is_crtc_enabled)
  10428. to_intel_plane_state(plane_state)->base.visible = visible = false;
  10429. if (!was_visible && !visible)
  10430. return 0;
  10431. if (fb != old_plane_state->base.fb)
  10432. pipe_config->fb_changed = true;
  10433. turn_off = was_visible && (!visible || mode_changed);
  10434. turn_on = visible && (!was_visible || mode_changed);
  10435. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
  10436. intel_crtc->base.base.id,
  10437. intel_crtc->base.name,
  10438. plane->base.id, plane->name,
  10439. fb ? fb->base.id : -1);
  10440. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
  10441. plane->base.id, plane->name,
  10442. was_visible, visible,
  10443. turn_off, turn_on, mode_changed);
  10444. if (turn_on) {
  10445. pipe_config->update_wm_pre = true;
  10446. /* must disable cxsr around plane enable/disable */
  10447. if (plane->type != DRM_PLANE_TYPE_CURSOR)
  10448. pipe_config->disable_cxsr = true;
  10449. } else if (turn_off) {
  10450. pipe_config->update_wm_post = true;
  10451. /* must disable cxsr around plane enable/disable */
  10452. if (plane->type != DRM_PLANE_TYPE_CURSOR)
  10453. pipe_config->disable_cxsr = true;
  10454. } else if (intel_wm_need_update(plane, plane_state)) {
  10455. /* FIXME bollocks */
  10456. pipe_config->update_wm_pre = true;
  10457. pipe_config->update_wm_post = true;
  10458. }
  10459. /* Pre-gen9 platforms need two-step watermark updates */
  10460. if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) &&
  10461. INTEL_GEN(dev_priv) < 9 && dev_priv->display.optimize_watermarks)
  10462. to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true;
  10463. if (visible || was_visible)
  10464. pipe_config->fb_bits |= to_intel_plane(plane)->frontbuffer_bit;
  10465. /*
  10466. * WaCxSRDisabledForSpriteScaling:ivb
  10467. *
  10468. * cstate->update_wm was already set above, so this flag will
  10469. * take effect when we commit and program watermarks.
  10470. */
  10471. if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev_priv) &&
  10472. needs_scaling(to_intel_plane_state(plane_state)) &&
  10473. !needs_scaling(old_plane_state))
  10474. pipe_config->disable_lp_wm = true;
  10475. return 0;
  10476. }
  10477. static bool encoders_cloneable(const struct intel_encoder *a,
  10478. const struct intel_encoder *b)
  10479. {
  10480. /* masks could be asymmetric, so check both ways */
  10481. return a == b || (a->cloneable & (1 << b->type) &&
  10482. b->cloneable & (1 << a->type));
  10483. }
  10484. static bool check_single_encoder_cloning(struct drm_atomic_state *state,
  10485. struct intel_crtc *crtc,
  10486. struct intel_encoder *encoder)
  10487. {
  10488. struct intel_encoder *source_encoder;
  10489. struct drm_connector *connector;
  10490. struct drm_connector_state *connector_state;
  10491. int i;
  10492. for_each_connector_in_state(state, connector, connector_state, i) {
  10493. if (connector_state->crtc != &crtc->base)
  10494. continue;
  10495. source_encoder =
  10496. to_intel_encoder(connector_state->best_encoder);
  10497. if (!encoders_cloneable(encoder, source_encoder))
  10498. return false;
  10499. }
  10500. return true;
  10501. }
  10502. static int intel_crtc_atomic_check(struct drm_crtc *crtc,
  10503. struct drm_crtc_state *crtc_state)
  10504. {
  10505. struct drm_device *dev = crtc->dev;
  10506. struct drm_i915_private *dev_priv = to_i915(dev);
  10507. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  10508. struct intel_crtc_state *pipe_config =
  10509. to_intel_crtc_state(crtc_state);
  10510. struct drm_atomic_state *state = crtc_state->state;
  10511. int ret;
  10512. bool mode_changed = needs_modeset(crtc_state);
  10513. if (mode_changed && !crtc_state->active)
  10514. pipe_config->update_wm_post = true;
  10515. if (mode_changed && crtc_state->enable &&
  10516. dev_priv->display.crtc_compute_clock &&
  10517. !WARN_ON(pipe_config->shared_dpll)) {
  10518. ret = dev_priv->display.crtc_compute_clock(intel_crtc,
  10519. pipe_config);
  10520. if (ret)
  10521. return ret;
  10522. }
  10523. if (crtc_state->color_mgmt_changed) {
  10524. ret = intel_color_check(crtc, crtc_state);
  10525. if (ret)
  10526. return ret;
  10527. /*
  10528. * Changing color management on Intel hardware is
  10529. * handled as part of planes update.
  10530. */
  10531. crtc_state->planes_changed = true;
  10532. }
  10533. ret = 0;
  10534. if (dev_priv->display.compute_pipe_wm) {
  10535. ret = dev_priv->display.compute_pipe_wm(pipe_config);
  10536. if (ret) {
  10537. DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
  10538. return ret;
  10539. }
  10540. }
  10541. if (dev_priv->display.compute_intermediate_wm &&
  10542. !to_intel_atomic_state(state)->skip_intermediate_wm) {
  10543. if (WARN_ON(!dev_priv->display.compute_pipe_wm))
  10544. return 0;
  10545. /*
  10546. * Calculate 'intermediate' watermarks that satisfy both the
  10547. * old state and the new state. We can program these
  10548. * immediately.
  10549. */
  10550. ret = dev_priv->display.compute_intermediate_wm(dev,
  10551. intel_crtc,
  10552. pipe_config);
  10553. if (ret) {
  10554. DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
  10555. return ret;
  10556. }
  10557. } else if (dev_priv->display.compute_intermediate_wm) {
  10558. if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
  10559. pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
  10560. }
  10561. if (INTEL_GEN(dev_priv) >= 9) {
  10562. if (mode_changed)
  10563. ret = skl_update_scaler_crtc(pipe_config);
  10564. if (!ret)
  10565. ret = intel_atomic_setup_scalers(dev, intel_crtc,
  10566. pipe_config);
  10567. }
  10568. return ret;
  10569. }
  10570. static const struct drm_crtc_helper_funcs intel_helper_funcs = {
  10571. .mode_set_base_atomic = intel_pipe_set_base_atomic,
  10572. .atomic_begin = intel_begin_crtc_commit,
  10573. .atomic_flush = intel_finish_crtc_commit,
  10574. .atomic_check = intel_crtc_atomic_check,
  10575. };
  10576. static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
  10577. {
  10578. struct intel_connector *connector;
  10579. for_each_intel_connector(dev, connector) {
  10580. if (connector->base.state->crtc)
  10581. drm_connector_unreference(&connector->base);
  10582. if (connector->base.encoder) {
  10583. connector->base.state->best_encoder =
  10584. connector->base.encoder;
  10585. connector->base.state->crtc =
  10586. connector->base.encoder->crtc;
  10587. drm_connector_reference(&connector->base);
  10588. } else {
  10589. connector->base.state->best_encoder = NULL;
  10590. connector->base.state->crtc = NULL;
  10591. }
  10592. }
  10593. }
  10594. static void
  10595. connected_sink_compute_bpp(struct intel_connector *connector,
  10596. struct intel_crtc_state *pipe_config)
  10597. {
  10598. const struct drm_display_info *info = &connector->base.display_info;
  10599. int bpp = pipe_config->pipe_bpp;
  10600. DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
  10601. connector->base.base.id,
  10602. connector->base.name);
  10603. /* Don't use an invalid EDID bpc value */
  10604. if (info->bpc != 0 && info->bpc * 3 < bpp) {
  10605. DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
  10606. bpp, info->bpc * 3);
  10607. pipe_config->pipe_bpp = info->bpc * 3;
  10608. }
  10609. /* Clamp bpp to 8 on screens without EDID 1.4 */
  10610. if (info->bpc == 0 && bpp > 24) {
  10611. DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
  10612. bpp);
  10613. pipe_config->pipe_bpp = 24;
  10614. }
  10615. }
  10616. static int
  10617. compute_baseline_pipe_bpp(struct intel_crtc *crtc,
  10618. struct intel_crtc_state *pipe_config)
  10619. {
  10620. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  10621. struct drm_atomic_state *state;
  10622. struct drm_connector *connector;
  10623. struct drm_connector_state *connector_state;
  10624. int bpp, i;
  10625. if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
  10626. IS_CHERRYVIEW(dev_priv)))
  10627. bpp = 10*3;
  10628. else if (INTEL_GEN(dev_priv) >= 5)
  10629. bpp = 12*3;
  10630. else
  10631. bpp = 8*3;
  10632. pipe_config->pipe_bpp = bpp;
  10633. state = pipe_config->base.state;
  10634. /* Clamp display bpp to EDID value */
  10635. for_each_connector_in_state(state, connector, connector_state, i) {
  10636. if (connector_state->crtc != &crtc->base)
  10637. continue;
  10638. connected_sink_compute_bpp(to_intel_connector(connector),
  10639. pipe_config);
  10640. }
  10641. return bpp;
  10642. }
  10643. static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
  10644. {
  10645. DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
  10646. "type: 0x%x flags: 0x%x\n",
  10647. mode->crtc_clock,
  10648. mode->crtc_hdisplay, mode->crtc_hsync_start,
  10649. mode->crtc_hsync_end, mode->crtc_htotal,
  10650. mode->crtc_vdisplay, mode->crtc_vsync_start,
  10651. mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
  10652. }
  10653. static inline void
  10654. intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
  10655. unsigned int lane_count, struct intel_link_m_n *m_n)
  10656. {
  10657. DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
  10658. id, lane_count,
  10659. m_n->gmch_m, m_n->gmch_n,
  10660. m_n->link_m, m_n->link_n, m_n->tu);
  10661. }
  10662. static void intel_dump_pipe_config(struct intel_crtc *crtc,
  10663. struct intel_crtc_state *pipe_config,
  10664. const char *context)
  10665. {
  10666. struct drm_device *dev = crtc->base.dev;
  10667. struct drm_i915_private *dev_priv = to_i915(dev);
  10668. struct drm_plane *plane;
  10669. struct intel_plane *intel_plane;
  10670. struct intel_plane_state *state;
  10671. struct drm_framebuffer *fb;
  10672. DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
  10673. crtc->base.base.id, crtc->base.name, context);
  10674. DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
  10675. transcoder_name(pipe_config->cpu_transcoder),
  10676. pipe_config->pipe_bpp, pipe_config->dither);
  10677. if (pipe_config->has_pch_encoder)
  10678. intel_dump_m_n_config(pipe_config, "fdi",
  10679. pipe_config->fdi_lanes,
  10680. &pipe_config->fdi_m_n);
  10681. if (intel_crtc_has_dp_encoder(pipe_config)) {
  10682. intel_dump_m_n_config(pipe_config, "dp m_n",
  10683. pipe_config->lane_count, &pipe_config->dp_m_n);
  10684. if (pipe_config->has_drrs)
  10685. intel_dump_m_n_config(pipe_config, "dp m2_n2",
  10686. pipe_config->lane_count,
  10687. &pipe_config->dp_m2_n2);
  10688. }
  10689. DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
  10690. pipe_config->has_audio, pipe_config->has_infoframe);
  10691. DRM_DEBUG_KMS("requested mode:\n");
  10692. drm_mode_debug_printmodeline(&pipe_config->base.mode);
  10693. DRM_DEBUG_KMS("adjusted mode:\n");
  10694. drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
  10695. intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
  10696. DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d\n",
  10697. pipe_config->port_clock,
  10698. pipe_config->pipe_src_w, pipe_config->pipe_src_h);
  10699. if (INTEL_GEN(dev_priv) >= 9)
  10700. DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
  10701. crtc->num_scalers,
  10702. pipe_config->scaler_state.scaler_users,
  10703. pipe_config->scaler_state.scaler_id);
  10704. if (HAS_GMCH_DISPLAY(dev_priv))
  10705. DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
  10706. pipe_config->gmch_pfit.control,
  10707. pipe_config->gmch_pfit.pgm_ratios,
  10708. pipe_config->gmch_pfit.lvds_border_bits);
  10709. else
  10710. DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
  10711. pipe_config->pch_pfit.pos,
  10712. pipe_config->pch_pfit.size,
  10713. enableddisabled(pipe_config->pch_pfit.enabled));
  10714. DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
  10715. pipe_config->ips_enabled, pipe_config->double_wide);
  10716. if (IS_BROXTON(dev_priv)) {
  10717. DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
  10718. "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
  10719. "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
  10720. pipe_config->dpll_hw_state.ebb0,
  10721. pipe_config->dpll_hw_state.ebb4,
  10722. pipe_config->dpll_hw_state.pll0,
  10723. pipe_config->dpll_hw_state.pll1,
  10724. pipe_config->dpll_hw_state.pll2,
  10725. pipe_config->dpll_hw_state.pll3,
  10726. pipe_config->dpll_hw_state.pll6,
  10727. pipe_config->dpll_hw_state.pll8,
  10728. pipe_config->dpll_hw_state.pll9,
  10729. pipe_config->dpll_hw_state.pll10,
  10730. pipe_config->dpll_hw_state.pcsdw12);
  10731. } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
  10732. DRM_DEBUG_KMS("dpll_hw_state: "
  10733. "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
  10734. pipe_config->dpll_hw_state.ctrl1,
  10735. pipe_config->dpll_hw_state.cfgcr1,
  10736. pipe_config->dpll_hw_state.cfgcr2);
  10737. } else if (HAS_DDI(dev_priv)) {
  10738. DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
  10739. pipe_config->dpll_hw_state.wrpll,
  10740. pipe_config->dpll_hw_state.spll);
  10741. } else {
  10742. DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
  10743. "fp0: 0x%x, fp1: 0x%x\n",
  10744. pipe_config->dpll_hw_state.dpll,
  10745. pipe_config->dpll_hw_state.dpll_md,
  10746. pipe_config->dpll_hw_state.fp0,
  10747. pipe_config->dpll_hw_state.fp1);
  10748. }
  10749. DRM_DEBUG_KMS("planes on this crtc\n");
  10750. list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
  10751. struct drm_format_name_buf format_name;
  10752. intel_plane = to_intel_plane(plane);
  10753. if (intel_plane->pipe != crtc->pipe)
  10754. continue;
  10755. state = to_intel_plane_state(plane->state);
  10756. fb = state->base.fb;
  10757. if (!fb) {
  10758. DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
  10759. plane->base.id, plane->name, state->scaler_id);
  10760. continue;
  10761. }
  10762. DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
  10763. plane->base.id, plane->name,
  10764. fb->base.id, fb->width, fb->height,
  10765. drm_get_format_name(fb->pixel_format, &format_name));
  10766. if (INTEL_GEN(dev_priv) >= 9)
  10767. DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
  10768. state->scaler_id,
  10769. state->base.src.x1 >> 16,
  10770. state->base.src.y1 >> 16,
  10771. drm_rect_width(&state->base.src) >> 16,
  10772. drm_rect_height(&state->base.src) >> 16,
  10773. state->base.dst.x1, state->base.dst.y1,
  10774. drm_rect_width(&state->base.dst),
  10775. drm_rect_height(&state->base.dst));
  10776. }
  10777. }
  10778. static bool check_digital_port_conflicts(struct drm_atomic_state *state)
  10779. {
  10780. struct drm_device *dev = state->dev;
  10781. struct drm_connector *connector;
  10782. unsigned int used_ports = 0;
  10783. unsigned int used_mst_ports = 0;
  10784. /*
  10785. * Walk the connector list instead of the encoder
  10786. * list to detect the problem on ddi platforms
  10787. * where there's just one encoder per digital port.
  10788. */
  10789. drm_for_each_connector(connector, dev) {
  10790. struct drm_connector_state *connector_state;
  10791. struct intel_encoder *encoder;
  10792. connector_state = drm_atomic_get_existing_connector_state(state, connector);
  10793. if (!connector_state)
  10794. connector_state = connector->state;
  10795. if (!connector_state->best_encoder)
  10796. continue;
  10797. encoder = to_intel_encoder(connector_state->best_encoder);
  10798. WARN_ON(!connector_state->crtc);
  10799. switch (encoder->type) {
  10800. unsigned int port_mask;
  10801. case INTEL_OUTPUT_UNKNOWN:
  10802. if (WARN_ON(!HAS_DDI(to_i915(dev))))
  10803. break;
  10804. case INTEL_OUTPUT_DP:
  10805. case INTEL_OUTPUT_HDMI:
  10806. case INTEL_OUTPUT_EDP:
  10807. port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
  10808. /* the same port mustn't appear more than once */
  10809. if (used_ports & port_mask)
  10810. return false;
  10811. used_ports |= port_mask;
  10812. break;
  10813. case INTEL_OUTPUT_DP_MST:
  10814. used_mst_ports |=
  10815. 1 << enc_to_mst(&encoder->base)->primary->port;
  10816. break;
  10817. default:
  10818. break;
  10819. }
  10820. }
  10821. /* can't mix MST and SST/HDMI on the same port */
  10822. if (used_ports & used_mst_ports)
  10823. return false;
  10824. return true;
  10825. }
  10826. static void
  10827. clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
  10828. {
  10829. struct drm_crtc_state tmp_state;
  10830. struct intel_crtc_scaler_state scaler_state;
  10831. struct intel_dpll_hw_state dpll_hw_state;
  10832. struct intel_shared_dpll *shared_dpll;
  10833. bool force_thru;
  10834. /* FIXME: before the switch to atomic started, a new pipe_config was
  10835. * kzalloc'd. Code that depends on any field being zero should be
  10836. * fixed, so that the crtc_state can be safely duplicated. For now,
  10837. * only fields that are know to not cause problems are preserved. */
  10838. tmp_state = crtc_state->base;
  10839. scaler_state = crtc_state->scaler_state;
  10840. shared_dpll = crtc_state->shared_dpll;
  10841. dpll_hw_state = crtc_state->dpll_hw_state;
  10842. force_thru = crtc_state->pch_pfit.force_thru;
  10843. memset(crtc_state, 0, sizeof *crtc_state);
  10844. crtc_state->base = tmp_state;
  10845. crtc_state->scaler_state = scaler_state;
  10846. crtc_state->shared_dpll = shared_dpll;
  10847. crtc_state->dpll_hw_state = dpll_hw_state;
  10848. crtc_state->pch_pfit.force_thru = force_thru;
  10849. }
  10850. static int
  10851. intel_modeset_pipe_config(struct drm_crtc *crtc,
  10852. struct intel_crtc_state *pipe_config)
  10853. {
  10854. struct drm_atomic_state *state = pipe_config->base.state;
  10855. struct intel_encoder *encoder;
  10856. struct drm_connector *connector;
  10857. struct drm_connector_state *connector_state;
  10858. int base_bpp, ret = -EINVAL;
  10859. int i;
  10860. bool retry = true;
  10861. clear_intel_crtc_state(pipe_config);
  10862. pipe_config->cpu_transcoder =
  10863. (enum transcoder) to_intel_crtc(crtc)->pipe;
  10864. /*
  10865. * Sanitize sync polarity flags based on requested ones. If neither
  10866. * positive or negative polarity is requested, treat this as meaning
  10867. * negative polarity.
  10868. */
  10869. if (!(pipe_config->base.adjusted_mode.flags &
  10870. (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
  10871. pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
  10872. if (!(pipe_config->base.adjusted_mode.flags &
  10873. (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
  10874. pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
  10875. base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
  10876. pipe_config);
  10877. if (base_bpp < 0)
  10878. goto fail;
  10879. /*
  10880. * Determine the real pipe dimensions. Note that stereo modes can
  10881. * increase the actual pipe size due to the frame doubling and
  10882. * insertion of additional space for blanks between the frame. This
  10883. * is stored in the crtc timings. We use the requested mode to do this
  10884. * computation to clearly distinguish it from the adjusted mode, which
  10885. * can be changed by the connectors in the below retry loop.
  10886. */
  10887. drm_crtc_get_hv_timing(&pipe_config->base.mode,
  10888. &pipe_config->pipe_src_w,
  10889. &pipe_config->pipe_src_h);
  10890. for_each_connector_in_state(state, connector, connector_state, i) {
  10891. if (connector_state->crtc != crtc)
  10892. continue;
  10893. encoder = to_intel_encoder(connector_state->best_encoder);
  10894. if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
  10895. DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
  10896. goto fail;
  10897. }
  10898. /*
  10899. * Determine output_types before calling the .compute_config()
  10900. * hooks so that the hooks can use this information safely.
  10901. */
  10902. pipe_config->output_types |= 1 << encoder->type;
  10903. }
  10904. encoder_retry:
  10905. /* Ensure the port clock defaults are reset when retrying. */
  10906. pipe_config->port_clock = 0;
  10907. pipe_config->pixel_multiplier = 1;
  10908. /* Fill in default crtc timings, allow encoders to overwrite them. */
  10909. drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
  10910. CRTC_STEREO_DOUBLE);
  10911. /* Pass our mode to the connectors and the CRTC to give them a chance to
  10912. * adjust it according to limitations or connector properties, and also
  10913. * a chance to reject the mode entirely.
  10914. */
  10915. for_each_connector_in_state(state, connector, connector_state, i) {
  10916. if (connector_state->crtc != crtc)
  10917. continue;
  10918. encoder = to_intel_encoder(connector_state->best_encoder);
  10919. if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
  10920. DRM_DEBUG_KMS("Encoder config failure\n");
  10921. goto fail;
  10922. }
  10923. }
  10924. /* Set default port clock if not overwritten by the encoder. Needs to be
  10925. * done afterwards in case the encoder adjusts the mode. */
  10926. if (!pipe_config->port_clock)
  10927. pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
  10928. * pipe_config->pixel_multiplier;
  10929. ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
  10930. if (ret < 0) {
  10931. DRM_DEBUG_KMS("CRTC fixup failed\n");
  10932. goto fail;
  10933. }
  10934. if (ret == RETRY) {
  10935. if (WARN(!retry, "loop in pipe configuration computation\n")) {
  10936. ret = -EINVAL;
  10937. goto fail;
  10938. }
  10939. DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
  10940. retry = false;
  10941. goto encoder_retry;
  10942. }
  10943. /* Dithering seems to not pass-through bits correctly when it should, so
  10944. * only enable it on 6bpc panels. */
  10945. pipe_config->dither = pipe_config->pipe_bpp == 6*3;
  10946. DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
  10947. base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
  10948. fail:
  10949. return ret;
  10950. }
  10951. static void
  10952. intel_modeset_update_crtc_state(struct drm_atomic_state *state)
  10953. {
  10954. struct drm_crtc *crtc;
  10955. struct drm_crtc_state *crtc_state;
  10956. int i;
  10957. /* Double check state. */
  10958. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  10959. to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
  10960. /* Update hwmode for vblank functions */
  10961. if (crtc->state->active)
  10962. crtc->hwmode = crtc->state->adjusted_mode;
  10963. else
  10964. crtc->hwmode.crtc_clock = 0;
  10965. /*
  10966. * Update legacy state to satisfy fbc code. This can
  10967. * be removed when fbc uses the atomic state.
  10968. */
  10969. if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
  10970. struct drm_plane_state *plane_state = crtc->primary->state;
  10971. crtc->primary->fb = plane_state->fb;
  10972. crtc->x = plane_state->src_x >> 16;
  10973. crtc->y = plane_state->src_y >> 16;
  10974. }
  10975. }
  10976. }
  10977. static bool intel_fuzzy_clock_check(int clock1, int clock2)
  10978. {
  10979. int diff;
  10980. if (clock1 == clock2)
  10981. return true;
  10982. if (!clock1 || !clock2)
  10983. return false;
  10984. diff = abs(clock1 - clock2);
  10985. if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
  10986. return true;
  10987. return false;
  10988. }
  10989. static bool
  10990. intel_compare_m_n(unsigned int m, unsigned int n,
  10991. unsigned int m2, unsigned int n2,
  10992. bool exact)
  10993. {
  10994. if (m == m2 && n == n2)
  10995. return true;
  10996. if (exact || !m || !n || !m2 || !n2)
  10997. return false;
  10998. BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
  10999. if (n > n2) {
  11000. while (n > n2) {
  11001. m2 <<= 1;
  11002. n2 <<= 1;
  11003. }
  11004. } else if (n < n2) {
  11005. while (n < n2) {
  11006. m <<= 1;
  11007. n <<= 1;
  11008. }
  11009. }
  11010. if (n != n2)
  11011. return false;
  11012. return intel_fuzzy_clock_check(m, m2);
  11013. }
  11014. static bool
  11015. intel_compare_link_m_n(const struct intel_link_m_n *m_n,
  11016. struct intel_link_m_n *m2_n2,
  11017. bool adjust)
  11018. {
  11019. if (m_n->tu == m2_n2->tu &&
  11020. intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
  11021. m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
  11022. intel_compare_m_n(m_n->link_m, m_n->link_n,
  11023. m2_n2->link_m, m2_n2->link_n, !adjust)) {
  11024. if (adjust)
  11025. *m2_n2 = *m_n;
  11026. return true;
  11027. }
  11028. return false;
  11029. }
  11030. static bool
  11031. intel_pipe_config_compare(struct drm_i915_private *dev_priv,
  11032. struct intel_crtc_state *current_config,
  11033. struct intel_crtc_state *pipe_config,
  11034. bool adjust)
  11035. {
  11036. bool ret = true;
  11037. #define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
  11038. do { \
  11039. if (!adjust) \
  11040. DRM_ERROR(fmt, ##__VA_ARGS__); \
  11041. else \
  11042. DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
  11043. } while (0)
  11044. #define PIPE_CONF_CHECK_X(name) \
  11045. if (current_config->name != pipe_config->name) { \
  11046. INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
  11047. "(expected 0x%08x, found 0x%08x)\n", \
  11048. current_config->name, \
  11049. pipe_config->name); \
  11050. ret = false; \
  11051. }
  11052. #define PIPE_CONF_CHECK_I(name) \
  11053. if (current_config->name != pipe_config->name) { \
  11054. INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
  11055. "(expected %i, found %i)\n", \
  11056. current_config->name, \
  11057. pipe_config->name); \
  11058. ret = false; \
  11059. }
  11060. #define PIPE_CONF_CHECK_P(name) \
  11061. if (current_config->name != pipe_config->name) { \
  11062. INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
  11063. "(expected %p, found %p)\n", \
  11064. current_config->name, \
  11065. pipe_config->name); \
  11066. ret = false; \
  11067. }
  11068. #define PIPE_CONF_CHECK_M_N(name) \
  11069. if (!intel_compare_link_m_n(&current_config->name, \
  11070. &pipe_config->name,\
  11071. adjust)) { \
  11072. INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
  11073. "(expected tu %i gmch %i/%i link %i/%i, " \
  11074. "found tu %i, gmch %i/%i link %i/%i)\n", \
  11075. current_config->name.tu, \
  11076. current_config->name.gmch_m, \
  11077. current_config->name.gmch_n, \
  11078. current_config->name.link_m, \
  11079. current_config->name.link_n, \
  11080. pipe_config->name.tu, \
  11081. pipe_config->name.gmch_m, \
  11082. pipe_config->name.gmch_n, \
  11083. pipe_config->name.link_m, \
  11084. pipe_config->name.link_n); \
  11085. ret = false; \
  11086. }
  11087. /* This is required for BDW+ where there is only one set of registers for
  11088. * switching between high and low RR.
  11089. * This macro can be used whenever a comparison has to be made between one
  11090. * hw state and multiple sw state variables.
  11091. */
  11092. #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
  11093. if (!intel_compare_link_m_n(&current_config->name, \
  11094. &pipe_config->name, adjust) && \
  11095. !intel_compare_link_m_n(&current_config->alt_name, \
  11096. &pipe_config->name, adjust)) { \
  11097. INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
  11098. "(expected tu %i gmch %i/%i link %i/%i, " \
  11099. "or tu %i gmch %i/%i link %i/%i, " \
  11100. "found tu %i, gmch %i/%i link %i/%i)\n", \
  11101. current_config->name.tu, \
  11102. current_config->name.gmch_m, \
  11103. current_config->name.gmch_n, \
  11104. current_config->name.link_m, \
  11105. current_config->name.link_n, \
  11106. current_config->alt_name.tu, \
  11107. current_config->alt_name.gmch_m, \
  11108. current_config->alt_name.gmch_n, \
  11109. current_config->alt_name.link_m, \
  11110. current_config->alt_name.link_n, \
  11111. pipe_config->name.tu, \
  11112. pipe_config->name.gmch_m, \
  11113. pipe_config->name.gmch_n, \
  11114. pipe_config->name.link_m, \
  11115. pipe_config->name.link_n); \
  11116. ret = false; \
  11117. }
  11118. #define PIPE_CONF_CHECK_FLAGS(name, mask) \
  11119. if ((current_config->name ^ pipe_config->name) & (mask)) { \
  11120. INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
  11121. "(expected %i, found %i)\n", \
  11122. current_config->name & (mask), \
  11123. pipe_config->name & (mask)); \
  11124. ret = false; \
  11125. }
  11126. #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
  11127. if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
  11128. INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
  11129. "(expected %i, found %i)\n", \
  11130. current_config->name, \
  11131. pipe_config->name); \
  11132. ret = false; \
  11133. }
  11134. #define PIPE_CONF_QUIRK(quirk) \
  11135. ((current_config->quirks | pipe_config->quirks) & (quirk))
  11136. PIPE_CONF_CHECK_I(cpu_transcoder);
  11137. PIPE_CONF_CHECK_I(has_pch_encoder);
  11138. PIPE_CONF_CHECK_I(fdi_lanes);
  11139. PIPE_CONF_CHECK_M_N(fdi_m_n);
  11140. PIPE_CONF_CHECK_I(lane_count);
  11141. PIPE_CONF_CHECK_X(lane_lat_optim_mask);
  11142. if (INTEL_GEN(dev_priv) < 8) {
  11143. PIPE_CONF_CHECK_M_N(dp_m_n);
  11144. if (current_config->has_drrs)
  11145. PIPE_CONF_CHECK_M_N(dp_m2_n2);
  11146. } else
  11147. PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
  11148. PIPE_CONF_CHECK_X(output_types);
  11149. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
  11150. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
  11151. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
  11152. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
  11153. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
  11154. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
  11155. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
  11156. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
  11157. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
  11158. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
  11159. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
  11160. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
  11161. PIPE_CONF_CHECK_I(pixel_multiplier);
  11162. PIPE_CONF_CHECK_I(has_hdmi_sink);
  11163. if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
  11164. IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  11165. PIPE_CONF_CHECK_I(limited_color_range);
  11166. PIPE_CONF_CHECK_I(has_infoframe);
  11167. PIPE_CONF_CHECK_I(has_audio);
  11168. PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
  11169. DRM_MODE_FLAG_INTERLACE);
  11170. if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
  11171. PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
  11172. DRM_MODE_FLAG_PHSYNC);
  11173. PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
  11174. DRM_MODE_FLAG_NHSYNC);
  11175. PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
  11176. DRM_MODE_FLAG_PVSYNC);
  11177. PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
  11178. DRM_MODE_FLAG_NVSYNC);
  11179. }
  11180. PIPE_CONF_CHECK_X(gmch_pfit.control);
  11181. /* pfit ratios are autocomputed by the hw on gen4+ */
  11182. if (INTEL_GEN(dev_priv) < 4)
  11183. PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
  11184. PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
  11185. if (!adjust) {
  11186. PIPE_CONF_CHECK_I(pipe_src_w);
  11187. PIPE_CONF_CHECK_I(pipe_src_h);
  11188. PIPE_CONF_CHECK_I(pch_pfit.enabled);
  11189. if (current_config->pch_pfit.enabled) {
  11190. PIPE_CONF_CHECK_X(pch_pfit.pos);
  11191. PIPE_CONF_CHECK_X(pch_pfit.size);
  11192. }
  11193. PIPE_CONF_CHECK_I(scaler_state.scaler_id);
  11194. }
  11195. /* BDW+ don't expose a synchronous way to read the state */
  11196. if (IS_HASWELL(dev_priv))
  11197. PIPE_CONF_CHECK_I(ips_enabled);
  11198. PIPE_CONF_CHECK_I(double_wide);
  11199. PIPE_CONF_CHECK_P(shared_dpll);
  11200. PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
  11201. PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
  11202. PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
  11203. PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
  11204. PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
  11205. PIPE_CONF_CHECK_X(dpll_hw_state.spll);
  11206. PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
  11207. PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
  11208. PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
  11209. PIPE_CONF_CHECK_X(dsi_pll.ctrl);
  11210. PIPE_CONF_CHECK_X(dsi_pll.div);
  11211. if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
  11212. PIPE_CONF_CHECK_I(pipe_bpp);
  11213. PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
  11214. PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
  11215. #undef PIPE_CONF_CHECK_X
  11216. #undef PIPE_CONF_CHECK_I
  11217. #undef PIPE_CONF_CHECK_P
  11218. #undef PIPE_CONF_CHECK_FLAGS
  11219. #undef PIPE_CONF_CHECK_CLOCK_FUZZY
  11220. #undef PIPE_CONF_QUIRK
  11221. #undef INTEL_ERR_OR_DBG_KMS
  11222. return ret;
  11223. }
  11224. static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
  11225. const struct intel_crtc_state *pipe_config)
  11226. {
  11227. if (pipe_config->has_pch_encoder) {
  11228. int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
  11229. &pipe_config->fdi_m_n);
  11230. int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
  11231. /*
  11232. * FDI already provided one idea for the dotclock.
  11233. * Yell if the encoder disagrees.
  11234. */
  11235. WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
  11236. "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
  11237. fdi_dotclock, dotclock);
  11238. }
  11239. }
  11240. static void verify_wm_state(struct drm_crtc *crtc,
  11241. struct drm_crtc_state *new_state)
  11242. {
  11243. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  11244. struct skl_ddb_allocation hw_ddb, *sw_ddb;
  11245. struct skl_pipe_wm hw_wm, *sw_wm;
  11246. struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
  11247. struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
  11248. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  11249. const enum pipe pipe = intel_crtc->pipe;
  11250. int plane, level, max_level = ilk_wm_max_level(dev_priv);
  11251. if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
  11252. return;
  11253. skl_pipe_wm_get_hw_state(crtc, &hw_wm);
  11254. sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
  11255. skl_ddb_get_hw_state(dev_priv, &hw_ddb);
  11256. sw_ddb = &dev_priv->wm.skl_hw.ddb;
  11257. /* planes */
  11258. for_each_universal_plane(dev_priv, pipe, plane) {
  11259. hw_plane_wm = &hw_wm.planes[plane];
  11260. sw_plane_wm = &sw_wm->planes[plane];
  11261. /* Watermarks */
  11262. for (level = 0; level <= max_level; level++) {
  11263. if (skl_wm_level_equals(&hw_plane_wm->wm[level],
  11264. &sw_plane_wm->wm[level]))
  11265. continue;
  11266. DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
  11267. pipe_name(pipe), plane + 1, level,
  11268. sw_plane_wm->wm[level].plane_en,
  11269. sw_plane_wm->wm[level].plane_res_b,
  11270. sw_plane_wm->wm[level].plane_res_l,
  11271. hw_plane_wm->wm[level].plane_en,
  11272. hw_plane_wm->wm[level].plane_res_b,
  11273. hw_plane_wm->wm[level].plane_res_l);
  11274. }
  11275. if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
  11276. &sw_plane_wm->trans_wm)) {
  11277. DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
  11278. pipe_name(pipe), plane + 1,
  11279. sw_plane_wm->trans_wm.plane_en,
  11280. sw_plane_wm->trans_wm.plane_res_b,
  11281. sw_plane_wm->trans_wm.plane_res_l,
  11282. hw_plane_wm->trans_wm.plane_en,
  11283. hw_plane_wm->trans_wm.plane_res_b,
  11284. hw_plane_wm->trans_wm.plane_res_l);
  11285. }
  11286. /* DDB */
  11287. hw_ddb_entry = &hw_ddb.plane[pipe][plane];
  11288. sw_ddb_entry = &sw_ddb->plane[pipe][plane];
  11289. if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
  11290. DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
  11291. pipe_name(pipe), plane + 1,
  11292. sw_ddb_entry->start, sw_ddb_entry->end,
  11293. hw_ddb_entry->start, hw_ddb_entry->end);
  11294. }
  11295. }
  11296. /*
  11297. * cursor
  11298. * If the cursor plane isn't active, we may not have updated it's ddb
  11299. * allocation. In that case since the ddb allocation will be updated
  11300. * once the plane becomes visible, we can skip this check
  11301. */
  11302. if (intel_crtc->cursor_addr) {
  11303. hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
  11304. sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
  11305. /* Watermarks */
  11306. for (level = 0; level <= max_level; level++) {
  11307. if (skl_wm_level_equals(&hw_plane_wm->wm[level],
  11308. &sw_plane_wm->wm[level]))
  11309. continue;
  11310. DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
  11311. pipe_name(pipe), level,
  11312. sw_plane_wm->wm[level].plane_en,
  11313. sw_plane_wm->wm[level].plane_res_b,
  11314. sw_plane_wm->wm[level].plane_res_l,
  11315. hw_plane_wm->wm[level].plane_en,
  11316. hw_plane_wm->wm[level].plane_res_b,
  11317. hw_plane_wm->wm[level].plane_res_l);
  11318. }
  11319. if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
  11320. &sw_plane_wm->trans_wm)) {
  11321. DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
  11322. pipe_name(pipe),
  11323. sw_plane_wm->trans_wm.plane_en,
  11324. sw_plane_wm->trans_wm.plane_res_b,
  11325. sw_plane_wm->trans_wm.plane_res_l,
  11326. hw_plane_wm->trans_wm.plane_en,
  11327. hw_plane_wm->trans_wm.plane_res_b,
  11328. hw_plane_wm->trans_wm.plane_res_l);
  11329. }
  11330. /* DDB */
  11331. hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
  11332. sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
  11333. if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
  11334. DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
  11335. pipe_name(pipe),
  11336. sw_ddb_entry->start, sw_ddb_entry->end,
  11337. hw_ddb_entry->start, hw_ddb_entry->end);
  11338. }
  11339. }
  11340. }
  11341. static void
  11342. verify_connector_state(struct drm_device *dev,
  11343. struct drm_atomic_state *state,
  11344. struct drm_crtc *crtc)
  11345. {
  11346. struct drm_connector *connector;
  11347. struct drm_connector_state *old_conn_state;
  11348. int i;
  11349. for_each_connector_in_state(state, connector, old_conn_state, i) {
  11350. struct drm_encoder *encoder = connector->encoder;
  11351. struct drm_connector_state *state = connector->state;
  11352. if (state->crtc != crtc)
  11353. continue;
  11354. intel_connector_verify_state(to_intel_connector(connector));
  11355. I915_STATE_WARN(state->best_encoder != encoder,
  11356. "connector's atomic encoder doesn't match legacy encoder\n");
  11357. }
  11358. }
  11359. static void
  11360. verify_encoder_state(struct drm_device *dev)
  11361. {
  11362. struct intel_encoder *encoder;
  11363. struct intel_connector *connector;
  11364. for_each_intel_encoder(dev, encoder) {
  11365. bool enabled = false;
  11366. enum pipe pipe;
  11367. DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
  11368. encoder->base.base.id,
  11369. encoder->base.name);
  11370. for_each_intel_connector(dev, connector) {
  11371. if (connector->base.state->best_encoder != &encoder->base)
  11372. continue;
  11373. enabled = true;
  11374. I915_STATE_WARN(connector->base.state->crtc !=
  11375. encoder->base.crtc,
  11376. "connector's crtc doesn't match encoder crtc\n");
  11377. }
  11378. I915_STATE_WARN(!!encoder->base.crtc != enabled,
  11379. "encoder's enabled state mismatch "
  11380. "(expected %i, found %i)\n",
  11381. !!encoder->base.crtc, enabled);
  11382. if (!encoder->base.crtc) {
  11383. bool active;
  11384. active = encoder->get_hw_state(encoder, &pipe);
  11385. I915_STATE_WARN(active,
  11386. "encoder detached but still enabled on pipe %c.\n",
  11387. pipe_name(pipe));
  11388. }
  11389. }
  11390. }
  11391. static void
  11392. verify_crtc_state(struct drm_crtc *crtc,
  11393. struct drm_crtc_state *old_crtc_state,
  11394. struct drm_crtc_state *new_crtc_state)
  11395. {
  11396. struct drm_device *dev = crtc->dev;
  11397. struct drm_i915_private *dev_priv = to_i915(dev);
  11398. struct intel_encoder *encoder;
  11399. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  11400. struct intel_crtc_state *pipe_config, *sw_config;
  11401. struct drm_atomic_state *old_state;
  11402. bool active;
  11403. old_state = old_crtc_state->state;
  11404. __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
  11405. pipe_config = to_intel_crtc_state(old_crtc_state);
  11406. memset(pipe_config, 0, sizeof(*pipe_config));
  11407. pipe_config->base.crtc = crtc;
  11408. pipe_config->base.state = old_state;
  11409. DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
  11410. active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
  11411. /* hw state is inconsistent with the pipe quirk */
  11412. if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  11413. (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  11414. active = new_crtc_state->active;
  11415. I915_STATE_WARN(new_crtc_state->active != active,
  11416. "crtc active state doesn't match with hw state "
  11417. "(expected %i, found %i)\n", new_crtc_state->active, active);
  11418. I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
  11419. "transitional active state does not match atomic hw state "
  11420. "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
  11421. for_each_encoder_on_crtc(dev, crtc, encoder) {
  11422. enum pipe pipe;
  11423. active = encoder->get_hw_state(encoder, &pipe);
  11424. I915_STATE_WARN(active != new_crtc_state->active,
  11425. "[ENCODER:%i] active %i with crtc active %i\n",
  11426. encoder->base.base.id, active, new_crtc_state->active);
  11427. I915_STATE_WARN(active && intel_crtc->pipe != pipe,
  11428. "Encoder connected to wrong pipe %c\n",
  11429. pipe_name(pipe));
  11430. if (active) {
  11431. pipe_config->output_types |= 1 << encoder->type;
  11432. encoder->get_config(encoder, pipe_config);
  11433. }
  11434. }
  11435. if (!new_crtc_state->active)
  11436. return;
  11437. intel_pipe_config_sanity_check(dev_priv, pipe_config);
  11438. sw_config = to_intel_crtc_state(crtc->state);
  11439. if (!intel_pipe_config_compare(dev_priv, sw_config,
  11440. pipe_config, false)) {
  11441. I915_STATE_WARN(1, "pipe state doesn't match!\n");
  11442. intel_dump_pipe_config(intel_crtc, pipe_config,
  11443. "[hw state]");
  11444. intel_dump_pipe_config(intel_crtc, sw_config,
  11445. "[sw state]");
  11446. }
  11447. }
  11448. static void
  11449. verify_single_dpll_state(struct drm_i915_private *dev_priv,
  11450. struct intel_shared_dpll *pll,
  11451. struct drm_crtc *crtc,
  11452. struct drm_crtc_state *new_state)
  11453. {
  11454. struct intel_dpll_hw_state dpll_hw_state;
  11455. unsigned crtc_mask;
  11456. bool active;
  11457. memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
  11458. DRM_DEBUG_KMS("%s\n", pll->name);
  11459. active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state);
  11460. if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) {
  11461. I915_STATE_WARN(!pll->on && pll->active_mask,
  11462. "pll in active use but not on in sw tracking\n");
  11463. I915_STATE_WARN(pll->on && !pll->active_mask,
  11464. "pll is on but not used by any active crtc\n");
  11465. I915_STATE_WARN(pll->on != active,
  11466. "pll on state mismatch (expected %i, found %i)\n",
  11467. pll->on, active);
  11468. }
  11469. if (!crtc) {
  11470. I915_STATE_WARN(pll->active_mask & ~pll->config.crtc_mask,
  11471. "more active pll users than references: %x vs %x\n",
  11472. pll->active_mask, pll->config.crtc_mask);
  11473. return;
  11474. }
  11475. crtc_mask = 1 << drm_crtc_index(crtc);
  11476. if (new_state->active)
  11477. I915_STATE_WARN(!(pll->active_mask & crtc_mask),
  11478. "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
  11479. pipe_name(drm_crtc_index(crtc)), pll->active_mask);
  11480. else
  11481. I915_STATE_WARN(pll->active_mask & crtc_mask,
  11482. "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
  11483. pipe_name(drm_crtc_index(crtc)), pll->active_mask);
  11484. I915_STATE_WARN(!(pll->config.crtc_mask & crtc_mask),
  11485. "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
  11486. crtc_mask, pll->config.crtc_mask);
  11487. I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state,
  11488. &dpll_hw_state,
  11489. sizeof(dpll_hw_state)),
  11490. "pll hw state mismatch\n");
  11491. }
  11492. static void
  11493. verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
  11494. struct drm_crtc_state *old_crtc_state,
  11495. struct drm_crtc_state *new_crtc_state)
  11496. {
  11497. struct drm_i915_private *dev_priv = to_i915(dev);
  11498. struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
  11499. struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
  11500. if (new_state->shared_dpll)
  11501. verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
  11502. if (old_state->shared_dpll &&
  11503. old_state->shared_dpll != new_state->shared_dpll) {
  11504. unsigned crtc_mask = 1 << drm_crtc_index(crtc);
  11505. struct intel_shared_dpll *pll = old_state->shared_dpll;
  11506. I915_STATE_WARN(pll->active_mask & crtc_mask,
  11507. "pll active mismatch (didn't expect pipe %c in active mask)\n",
  11508. pipe_name(drm_crtc_index(crtc)));
  11509. I915_STATE_WARN(pll->config.crtc_mask & crtc_mask,
  11510. "pll enabled crtcs mismatch (found %x in enabled mask)\n",
  11511. pipe_name(drm_crtc_index(crtc)));
  11512. }
  11513. }
  11514. static void
  11515. intel_modeset_verify_crtc(struct drm_crtc *crtc,
  11516. struct drm_atomic_state *state,
  11517. struct drm_crtc_state *old_state,
  11518. struct drm_crtc_state *new_state)
  11519. {
  11520. if (!needs_modeset(new_state) &&
  11521. !to_intel_crtc_state(new_state)->update_pipe)
  11522. return;
  11523. verify_wm_state(crtc, new_state);
  11524. verify_connector_state(crtc->dev, state, crtc);
  11525. verify_crtc_state(crtc, old_state, new_state);
  11526. verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
  11527. }
  11528. static void
  11529. verify_disabled_dpll_state(struct drm_device *dev)
  11530. {
  11531. struct drm_i915_private *dev_priv = to_i915(dev);
  11532. int i;
  11533. for (i = 0; i < dev_priv->num_shared_dpll; i++)
  11534. verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
  11535. }
  11536. static void
  11537. intel_modeset_verify_disabled(struct drm_device *dev,
  11538. struct drm_atomic_state *state)
  11539. {
  11540. verify_encoder_state(dev);
  11541. verify_connector_state(dev, state, NULL);
  11542. verify_disabled_dpll_state(dev);
  11543. }
  11544. static void update_scanline_offset(struct intel_crtc *crtc)
  11545. {
  11546. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  11547. /*
  11548. * The scanline counter increments at the leading edge of hsync.
  11549. *
  11550. * On most platforms it starts counting from vtotal-1 on the
  11551. * first active line. That means the scanline counter value is
  11552. * always one less than what we would expect. Ie. just after
  11553. * start of vblank, which also occurs at start of hsync (on the
  11554. * last active line), the scanline counter will read vblank_start-1.
  11555. *
  11556. * On gen2 the scanline counter starts counting from 1 instead
  11557. * of vtotal-1, so we have to subtract one (or rather add vtotal-1
  11558. * to keep the value positive), instead of adding one.
  11559. *
  11560. * On HSW+ the behaviour of the scanline counter depends on the output
  11561. * type. For DP ports it behaves like most other platforms, but on HDMI
  11562. * there's an extra 1 line difference. So we need to add two instead of
  11563. * one to the value.
  11564. */
  11565. if (IS_GEN2(dev_priv)) {
  11566. const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
  11567. int vtotal;
  11568. vtotal = adjusted_mode->crtc_vtotal;
  11569. if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
  11570. vtotal /= 2;
  11571. crtc->scanline_offset = vtotal - 1;
  11572. } else if (HAS_DDI(dev_priv) &&
  11573. intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
  11574. crtc->scanline_offset = 2;
  11575. } else
  11576. crtc->scanline_offset = 1;
  11577. }
  11578. static void intel_modeset_clear_plls(struct drm_atomic_state *state)
  11579. {
  11580. struct drm_device *dev = state->dev;
  11581. struct drm_i915_private *dev_priv = to_i915(dev);
  11582. struct intel_shared_dpll_config *shared_dpll = NULL;
  11583. struct drm_crtc *crtc;
  11584. struct drm_crtc_state *crtc_state;
  11585. int i;
  11586. if (!dev_priv->display.crtc_compute_clock)
  11587. return;
  11588. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  11589. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  11590. struct intel_shared_dpll *old_dpll =
  11591. to_intel_crtc_state(crtc->state)->shared_dpll;
  11592. if (!needs_modeset(crtc_state))
  11593. continue;
  11594. to_intel_crtc_state(crtc_state)->shared_dpll = NULL;
  11595. if (!old_dpll)
  11596. continue;
  11597. if (!shared_dpll)
  11598. shared_dpll = intel_atomic_get_shared_dpll_state(state);
  11599. intel_shared_dpll_config_put(shared_dpll, old_dpll, intel_crtc);
  11600. }
  11601. }
  11602. /*
  11603. * This implements the workaround described in the "notes" section of the mode
  11604. * set sequence documentation. When going from no pipes or single pipe to
  11605. * multiple pipes, and planes are enabled after the pipe, we need to wait at
  11606. * least 2 vblanks on the first pipe before enabling planes on the second pipe.
  11607. */
  11608. static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
  11609. {
  11610. struct drm_crtc_state *crtc_state;
  11611. struct intel_crtc *intel_crtc;
  11612. struct drm_crtc *crtc;
  11613. struct intel_crtc_state *first_crtc_state = NULL;
  11614. struct intel_crtc_state *other_crtc_state = NULL;
  11615. enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
  11616. int i;
  11617. /* look at all crtc's that are going to be enabled in during modeset */
  11618. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  11619. intel_crtc = to_intel_crtc(crtc);
  11620. if (!crtc_state->active || !needs_modeset(crtc_state))
  11621. continue;
  11622. if (first_crtc_state) {
  11623. other_crtc_state = to_intel_crtc_state(crtc_state);
  11624. break;
  11625. } else {
  11626. first_crtc_state = to_intel_crtc_state(crtc_state);
  11627. first_pipe = intel_crtc->pipe;
  11628. }
  11629. }
  11630. /* No workaround needed? */
  11631. if (!first_crtc_state)
  11632. return 0;
  11633. /* w/a possibly needed, check how many crtc's are already enabled. */
  11634. for_each_intel_crtc(state->dev, intel_crtc) {
  11635. struct intel_crtc_state *pipe_config;
  11636. pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
  11637. if (IS_ERR(pipe_config))
  11638. return PTR_ERR(pipe_config);
  11639. pipe_config->hsw_workaround_pipe = INVALID_PIPE;
  11640. if (!pipe_config->base.active ||
  11641. needs_modeset(&pipe_config->base))
  11642. continue;
  11643. /* 2 or more enabled crtcs means no need for w/a */
  11644. if (enabled_pipe != INVALID_PIPE)
  11645. return 0;
  11646. enabled_pipe = intel_crtc->pipe;
  11647. }
  11648. if (enabled_pipe != INVALID_PIPE)
  11649. first_crtc_state->hsw_workaround_pipe = enabled_pipe;
  11650. else if (other_crtc_state)
  11651. other_crtc_state->hsw_workaround_pipe = first_pipe;
  11652. return 0;
  11653. }
  11654. static int intel_modeset_all_pipes(struct drm_atomic_state *state)
  11655. {
  11656. struct drm_crtc *crtc;
  11657. struct drm_crtc_state *crtc_state;
  11658. int ret = 0;
  11659. /* add all active pipes to the state */
  11660. for_each_crtc(state->dev, crtc) {
  11661. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  11662. if (IS_ERR(crtc_state))
  11663. return PTR_ERR(crtc_state);
  11664. if (!crtc_state->active || needs_modeset(crtc_state))
  11665. continue;
  11666. crtc_state->mode_changed = true;
  11667. ret = drm_atomic_add_affected_connectors(state, crtc);
  11668. if (ret)
  11669. break;
  11670. ret = drm_atomic_add_affected_planes(state, crtc);
  11671. if (ret)
  11672. break;
  11673. }
  11674. return ret;
  11675. }
  11676. static int intel_modeset_checks(struct drm_atomic_state *state)
  11677. {
  11678. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  11679. struct drm_i915_private *dev_priv = to_i915(state->dev);
  11680. struct drm_crtc *crtc;
  11681. struct drm_crtc_state *crtc_state;
  11682. int ret = 0, i;
  11683. if (!check_digital_port_conflicts(state)) {
  11684. DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
  11685. return -EINVAL;
  11686. }
  11687. intel_state->modeset = true;
  11688. intel_state->active_crtcs = dev_priv->active_crtcs;
  11689. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  11690. if (crtc_state->active)
  11691. intel_state->active_crtcs |= 1 << i;
  11692. else
  11693. intel_state->active_crtcs &= ~(1 << i);
  11694. if (crtc_state->active != crtc->state->active)
  11695. intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
  11696. }
  11697. /*
  11698. * See if the config requires any additional preparation, e.g.
  11699. * to adjust global state with pipes off. We need to do this
  11700. * here so we can get the modeset_pipe updated config for the new
  11701. * mode set on this crtc. For other crtcs we need to use the
  11702. * adjusted_mode bits in the crtc directly.
  11703. */
  11704. if (dev_priv->display.modeset_calc_cdclk) {
  11705. if (!intel_state->cdclk_pll_vco)
  11706. intel_state->cdclk_pll_vco = dev_priv->cdclk_pll.vco;
  11707. if (!intel_state->cdclk_pll_vco)
  11708. intel_state->cdclk_pll_vco = dev_priv->skl_preferred_vco_freq;
  11709. ret = dev_priv->display.modeset_calc_cdclk(state);
  11710. if (ret < 0)
  11711. return ret;
  11712. if (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
  11713. intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)
  11714. ret = intel_modeset_all_pipes(state);
  11715. if (ret < 0)
  11716. return ret;
  11717. DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
  11718. intel_state->cdclk, intel_state->dev_cdclk);
  11719. } else {
  11720. to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
  11721. }
  11722. intel_modeset_clear_plls(state);
  11723. if (IS_HASWELL(dev_priv))
  11724. return haswell_mode_set_planes_workaround(state);
  11725. return 0;
  11726. }
  11727. /*
  11728. * Handle calculation of various watermark data at the end of the atomic check
  11729. * phase. The code here should be run after the per-crtc and per-plane 'check'
  11730. * handlers to ensure that all derived state has been updated.
  11731. */
  11732. static int calc_watermark_data(struct drm_atomic_state *state)
  11733. {
  11734. struct drm_device *dev = state->dev;
  11735. struct drm_i915_private *dev_priv = to_i915(dev);
  11736. /* Is there platform-specific watermark information to calculate? */
  11737. if (dev_priv->display.compute_global_watermarks)
  11738. return dev_priv->display.compute_global_watermarks(state);
  11739. return 0;
  11740. }
  11741. /**
  11742. * intel_atomic_check - validate state object
  11743. * @dev: drm device
  11744. * @state: state to validate
  11745. */
  11746. static int intel_atomic_check(struct drm_device *dev,
  11747. struct drm_atomic_state *state)
  11748. {
  11749. struct drm_i915_private *dev_priv = to_i915(dev);
  11750. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  11751. struct drm_crtc *crtc;
  11752. struct drm_crtc_state *crtc_state;
  11753. int ret, i;
  11754. bool any_ms = false;
  11755. ret = drm_atomic_helper_check_modeset(dev, state);
  11756. if (ret)
  11757. return ret;
  11758. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  11759. struct intel_crtc_state *pipe_config =
  11760. to_intel_crtc_state(crtc_state);
  11761. /* Catch I915_MODE_FLAG_INHERITED */
  11762. if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
  11763. crtc_state->mode_changed = true;
  11764. if (!needs_modeset(crtc_state))
  11765. continue;
  11766. if (!crtc_state->enable) {
  11767. any_ms = true;
  11768. continue;
  11769. }
  11770. /* FIXME: For only active_changed we shouldn't need to do any
  11771. * state recomputation at all. */
  11772. ret = drm_atomic_add_affected_connectors(state, crtc);
  11773. if (ret)
  11774. return ret;
  11775. ret = intel_modeset_pipe_config(crtc, pipe_config);
  11776. if (ret) {
  11777. intel_dump_pipe_config(to_intel_crtc(crtc),
  11778. pipe_config, "[failed]");
  11779. return ret;
  11780. }
  11781. if (i915.fastboot &&
  11782. intel_pipe_config_compare(dev_priv,
  11783. to_intel_crtc_state(crtc->state),
  11784. pipe_config, true)) {
  11785. crtc_state->mode_changed = false;
  11786. to_intel_crtc_state(crtc_state)->update_pipe = true;
  11787. }
  11788. if (needs_modeset(crtc_state))
  11789. any_ms = true;
  11790. ret = drm_atomic_add_affected_planes(state, crtc);
  11791. if (ret)
  11792. return ret;
  11793. intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
  11794. needs_modeset(crtc_state) ?
  11795. "[modeset]" : "[fastset]");
  11796. }
  11797. if (any_ms) {
  11798. ret = intel_modeset_checks(state);
  11799. if (ret)
  11800. return ret;
  11801. } else {
  11802. intel_state->cdclk = dev_priv->atomic_cdclk_freq;
  11803. }
  11804. ret = drm_atomic_helper_check_planes(dev, state);
  11805. if (ret)
  11806. return ret;
  11807. intel_fbc_choose_crtc(dev_priv, state);
  11808. return calc_watermark_data(state);
  11809. }
  11810. static int intel_atomic_prepare_commit(struct drm_device *dev,
  11811. struct drm_atomic_state *state)
  11812. {
  11813. struct drm_i915_private *dev_priv = to_i915(dev);
  11814. struct drm_crtc_state *crtc_state;
  11815. struct drm_crtc *crtc;
  11816. int i, ret;
  11817. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  11818. if (state->legacy_cursor_update)
  11819. continue;
  11820. ret = intel_crtc_wait_for_pending_flips(crtc);
  11821. if (ret)
  11822. return ret;
  11823. if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
  11824. flush_workqueue(dev_priv->wq);
  11825. }
  11826. ret = mutex_lock_interruptible(&dev->struct_mutex);
  11827. if (ret)
  11828. return ret;
  11829. ret = drm_atomic_helper_prepare_planes(dev, state);
  11830. mutex_unlock(&dev->struct_mutex);
  11831. return ret;
  11832. }
  11833. u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
  11834. {
  11835. struct drm_device *dev = crtc->base.dev;
  11836. if (!dev->max_vblank_count)
  11837. return drm_accurate_vblank_count(&crtc->base);
  11838. return dev->driver->get_vblank_counter(dev, crtc->pipe);
  11839. }
  11840. static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
  11841. struct drm_i915_private *dev_priv,
  11842. unsigned crtc_mask)
  11843. {
  11844. unsigned last_vblank_count[I915_MAX_PIPES];
  11845. enum pipe pipe;
  11846. int ret;
  11847. if (!crtc_mask)
  11848. return;
  11849. for_each_pipe(dev_priv, pipe) {
  11850. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
  11851. pipe);
  11852. if (!((1 << pipe) & crtc_mask))
  11853. continue;
  11854. ret = drm_crtc_vblank_get(&crtc->base);
  11855. if (WARN_ON(ret != 0)) {
  11856. crtc_mask &= ~(1 << pipe);
  11857. continue;
  11858. }
  11859. last_vblank_count[pipe] = drm_crtc_vblank_count(&crtc->base);
  11860. }
  11861. for_each_pipe(dev_priv, pipe) {
  11862. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
  11863. pipe);
  11864. long lret;
  11865. if (!((1 << pipe) & crtc_mask))
  11866. continue;
  11867. lret = wait_event_timeout(dev->vblank[pipe].queue,
  11868. last_vblank_count[pipe] !=
  11869. drm_crtc_vblank_count(&crtc->base),
  11870. msecs_to_jiffies(50));
  11871. WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
  11872. drm_crtc_vblank_put(&crtc->base);
  11873. }
  11874. }
  11875. static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
  11876. {
  11877. /* fb updated, need to unpin old fb */
  11878. if (crtc_state->fb_changed)
  11879. return true;
  11880. /* wm changes, need vblank before final wm's */
  11881. if (crtc_state->update_wm_post)
  11882. return true;
  11883. /*
  11884. * cxsr is re-enabled after vblank.
  11885. * This is already handled by crtc_state->update_wm_post,
  11886. * but added for clarity.
  11887. */
  11888. if (crtc_state->disable_cxsr)
  11889. return true;
  11890. return false;
  11891. }
  11892. static void intel_update_crtc(struct drm_crtc *crtc,
  11893. struct drm_atomic_state *state,
  11894. struct drm_crtc_state *old_crtc_state,
  11895. unsigned int *crtc_vblank_mask)
  11896. {
  11897. struct drm_device *dev = crtc->dev;
  11898. struct drm_i915_private *dev_priv = to_i915(dev);
  11899. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  11900. struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->state);
  11901. bool modeset = needs_modeset(crtc->state);
  11902. if (modeset) {
  11903. update_scanline_offset(intel_crtc);
  11904. dev_priv->display.crtc_enable(pipe_config, state);
  11905. } else {
  11906. intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
  11907. }
  11908. if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
  11909. intel_fbc_enable(
  11910. intel_crtc, pipe_config,
  11911. to_intel_plane_state(crtc->primary->state));
  11912. }
  11913. drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
  11914. if (needs_vblank_wait(pipe_config))
  11915. *crtc_vblank_mask |= drm_crtc_mask(crtc);
  11916. }
  11917. static void intel_update_crtcs(struct drm_atomic_state *state,
  11918. unsigned int *crtc_vblank_mask)
  11919. {
  11920. struct drm_crtc *crtc;
  11921. struct drm_crtc_state *old_crtc_state;
  11922. int i;
  11923. for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
  11924. if (!crtc->state->active)
  11925. continue;
  11926. intel_update_crtc(crtc, state, old_crtc_state,
  11927. crtc_vblank_mask);
  11928. }
  11929. }
  11930. static void skl_update_crtcs(struct drm_atomic_state *state,
  11931. unsigned int *crtc_vblank_mask)
  11932. {
  11933. struct drm_i915_private *dev_priv = to_i915(state->dev);
  11934. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  11935. struct drm_crtc *crtc;
  11936. struct intel_crtc *intel_crtc;
  11937. struct drm_crtc_state *old_crtc_state;
  11938. struct intel_crtc_state *cstate;
  11939. unsigned int updated = 0;
  11940. bool progress;
  11941. enum pipe pipe;
  11942. int i;
  11943. const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
  11944. for_each_crtc_in_state(state, crtc, old_crtc_state, i)
  11945. /* ignore allocations for crtc's that have been turned off. */
  11946. if (crtc->state->active)
  11947. entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
  11948. /*
  11949. * Whenever the number of active pipes changes, we need to make sure we
  11950. * update the pipes in the right order so that their ddb allocations
  11951. * never overlap with eachother inbetween CRTC updates. Otherwise we'll
  11952. * cause pipe underruns and other bad stuff.
  11953. */
  11954. do {
  11955. progress = false;
  11956. for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
  11957. bool vbl_wait = false;
  11958. unsigned int cmask = drm_crtc_mask(crtc);
  11959. intel_crtc = to_intel_crtc(crtc);
  11960. cstate = to_intel_crtc_state(crtc->state);
  11961. pipe = intel_crtc->pipe;
  11962. if (updated & cmask || !cstate->base.active)
  11963. continue;
  11964. if (skl_ddb_allocation_overlaps(entries, &cstate->wm.skl.ddb, i))
  11965. continue;
  11966. updated |= cmask;
  11967. entries[i] = &cstate->wm.skl.ddb;
  11968. /*
  11969. * If this is an already active pipe, it's DDB changed,
  11970. * and this isn't the last pipe that needs updating
  11971. * then we need to wait for a vblank to pass for the
  11972. * new ddb allocation to take effect.
  11973. */
  11974. if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
  11975. &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
  11976. !crtc->state->active_changed &&
  11977. intel_state->wm_results.dirty_pipes != updated)
  11978. vbl_wait = true;
  11979. intel_update_crtc(crtc, state, old_crtc_state,
  11980. crtc_vblank_mask);
  11981. if (vbl_wait)
  11982. intel_wait_for_vblank(dev_priv, pipe);
  11983. progress = true;
  11984. }
  11985. } while (progress);
  11986. }
  11987. static void intel_atomic_commit_tail(struct drm_atomic_state *state)
  11988. {
  11989. struct drm_device *dev = state->dev;
  11990. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  11991. struct drm_i915_private *dev_priv = to_i915(dev);
  11992. struct drm_crtc_state *old_crtc_state;
  11993. struct drm_crtc *crtc;
  11994. struct intel_crtc_state *intel_cstate;
  11995. bool hw_check = intel_state->modeset;
  11996. unsigned long put_domains[I915_MAX_PIPES] = {};
  11997. unsigned crtc_vblank_mask = 0;
  11998. int i;
  11999. drm_atomic_helper_wait_for_dependencies(state);
  12000. if (intel_state->modeset)
  12001. intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
  12002. for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
  12003. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  12004. if (needs_modeset(crtc->state) ||
  12005. to_intel_crtc_state(crtc->state)->update_pipe) {
  12006. hw_check = true;
  12007. put_domains[to_intel_crtc(crtc)->pipe] =
  12008. modeset_get_crtc_power_domains(crtc,
  12009. to_intel_crtc_state(crtc->state));
  12010. }
  12011. if (!needs_modeset(crtc->state))
  12012. continue;
  12013. intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
  12014. if (old_crtc_state->active) {
  12015. intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
  12016. dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state);
  12017. intel_crtc->active = false;
  12018. intel_fbc_disable(intel_crtc);
  12019. intel_disable_shared_dpll(intel_crtc);
  12020. /*
  12021. * Underruns don't always raise
  12022. * interrupts, so check manually.
  12023. */
  12024. intel_check_cpu_fifo_underruns(dev_priv);
  12025. intel_check_pch_fifo_underruns(dev_priv);
  12026. if (!crtc->state->active) {
  12027. /*
  12028. * Make sure we don't call initial_watermarks
  12029. * for ILK-style watermark updates.
  12030. */
  12031. if (dev_priv->display.atomic_update_watermarks)
  12032. dev_priv->display.initial_watermarks(intel_state,
  12033. to_intel_crtc_state(crtc->state));
  12034. else
  12035. intel_update_watermarks(intel_crtc);
  12036. }
  12037. }
  12038. }
  12039. /* Only after disabling all output pipelines that will be changed can we
  12040. * update the the output configuration. */
  12041. intel_modeset_update_crtc_state(state);
  12042. if (intel_state->modeset) {
  12043. drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
  12044. if (dev_priv->display.modeset_commit_cdclk &&
  12045. (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
  12046. intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
  12047. dev_priv->display.modeset_commit_cdclk(state);
  12048. /*
  12049. * SKL workaround: bspec recommends we disable the SAGV when we
  12050. * have more then one pipe enabled
  12051. */
  12052. if (!intel_can_enable_sagv(state))
  12053. intel_disable_sagv(dev_priv);
  12054. intel_modeset_verify_disabled(dev, state);
  12055. }
  12056. /* Complete the events for pipes that have now been disabled */
  12057. for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
  12058. bool modeset = needs_modeset(crtc->state);
  12059. /* Complete events for now disable pipes here. */
  12060. if (modeset && !crtc->state->active && crtc->state->event) {
  12061. spin_lock_irq(&dev->event_lock);
  12062. drm_crtc_send_vblank_event(crtc, crtc->state->event);
  12063. spin_unlock_irq(&dev->event_lock);
  12064. crtc->state->event = NULL;
  12065. }
  12066. }
  12067. /* Now enable the clocks, plane, pipe, and connectors that we set up. */
  12068. dev_priv->display.update_crtcs(state, &crtc_vblank_mask);
  12069. /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
  12070. * already, but still need the state for the delayed optimization. To
  12071. * fix this:
  12072. * - wrap the optimization/post_plane_update stuff into a per-crtc work.
  12073. * - schedule that vblank worker _before_ calling hw_done
  12074. * - at the start of commit_tail, cancel it _synchrously
  12075. * - switch over to the vblank wait helper in the core after that since
  12076. * we don't need out special handling any more.
  12077. */
  12078. if (!state->legacy_cursor_update)
  12079. intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
  12080. /*
  12081. * Now that the vblank has passed, we can go ahead and program the
  12082. * optimal watermarks on platforms that need two-step watermark
  12083. * programming.
  12084. *
  12085. * TODO: Move this (and other cleanup) to an async worker eventually.
  12086. */
  12087. for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
  12088. intel_cstate = to_intel_crtc_state(crtc->state);
  12089. if (dev_priv->display.optimize_watermarks)
  12090. dev_priv->display.optimize_watermarks(intel_state,
  12091. intel_cstate);
  12092. }
  12093. for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
  12094. intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
  12095. if (put_domains[i])
  12096. modeset_put_power_domains(dev_priv, put_domains[i]);
  12097. intel_modeset_verify_crtc(crtc, state, old_crtc_state, crtc->state);
  12098. }
  12099. if (intel_state->modeset && intel_can_enable_sagv(state))
  12100. intel_enable_sagv(dev_priv);
  12101. drm_atomic_helper_commit_hw_done(state);
  12102. if (intel_state->modeset)
  12103. intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
  12104. mutex_lock(&dev->struct_mutex);
  12105. drm_atomic_helper_cleanup_planes(dev, state);
  12106. mutex_unlock(&dev->struct_mutex);
  12107. drm_atomic_helper_commit_cleanup_done(state);
  12108. drm_atomic_state_put(state);
  12109. /* As one of the primary mmio accessors, KMS has a high likelihood
  12110. * of triggering bugs in unclaimed access. After we finish
  12111. * modesetting, see if an error has been flagged, and if so
  12112. * enable debugging for the next modeset - and hope we catch
  12113. * the culprit.
  12114. *
  12115. * XXX note that we assume display power is on at this point.
  12116. * This might hold true now but we need to add pm helper to check
  12117. * unclaimed only when the hardware is on, as atomic commits
  12118. * can happen also when the device is completely off.
  12119. */
  12120. intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
  12121. }
  12122. static void intel_atomic_commit_work(struct work_struct *work)
  12123. {
  12124. struct drm_atomic_state *state =
  12125. container_of(work, struct drm_atomic_state, commit_work);
  12126. intel_atomic_commit_tail(state);
  12127. }
  12128. static int __i915_sw_fence_call
  12129. intel_atomic_commit_ready(struct i915_sw_fence *fence,
  12130. enum i915_sw_fence_notify notify)
  12131. {
  12132. struct intel_atomic_state *state =
  12133. container_of(fence, struct intel_atomic_state, commit_ready);
  12134. switch (notify) {
  12135. case FENCE_COMPLETE:
  12136. if (state->base.commit_work.func)
  12137. queue_work(system_unbound_wq, &state->base.commit_work);
  12138. break;
  12139. case FENCE_FREE:
  12140. drm_atomic_state_put(&state->base);
  12141. break;
  12142. }
  12143. return NOTIFY_DONE;
  12144. }
  12145. static void intel_atomic_track_fbs(struct drm_atomic_state *state)
  12146. {
  12147. struct drm_plane_state *old_plane_state;
  12148. struct drm_plane *plane;
  12149. int i;
  12150. for_each_plane_in_state(state, plane, old_plane_state, i)
  12151. i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
  12152. intel_fb_obj(plane->state->fb),
  12153. to_intel_plane(plane)->frontbuffer_bit);
  12154. }
  12155. /**
  12156. * intel_atomic_commit - commit validated state object
  12157. * @dev: DRM device
  12158. * @state: the top-level driver state object
  12159. * @nonblock: nonblocking commit
  12160. *
  12161. * This function commits a top-level state object that has been validated
  12162. * with drm_atomic_helper_check().
  12163. *
  12164. * RETURNS
  12165. * Zero for success or -errno.
  12166. */
  12167. static int intel_atomic_commit(struct drm_device *dev,
  12168. struct drm_atomic_state *state,
  12169. bool nonblock)
  12170. {
  12171. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  12172. struct drm_i915_private *dev_priv = to_i915(dev);
  12173. int ret = 0;
  12174. ret = drm_atomic_helper_setup_commit(state, nonblock);
  12175. if (ret)
  12176. return ret;
  12177. drm_atomic_state_get(state);
  12178. i915_sw_fence_init(&intel_state->commit_ready,
  12179. intel_atomic_commit_ready);
  12180. ret = intel_atomic_prepare_commit(dev, state);
  12181. if (ret) {
  12182. DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
  12183. i915_sw_fence_commit(&intel_state->commit_ready);
  12184. return ret;
  12185. }
  12186. drm_atomic_helper_swap_state(state, true);
  12187. dev_priv->wm.distrust_bios_wm = false;
  12188. intel_shared_dpll_commit(state);
  12189. intel_atomic_track_fbs(state);
  12190. if (intel_state->modeset) {
  12191. memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
  12192. sizeof(intel_state->min_pixclk));
  12193. dev_priv->active_crtcs = intel_state->active_crtcs;
  12194. dev_priv->atomic_cdclk_freq = intel_state->cdclk;
  12195. }
  12196. drm_atomic_state_get(state);
  12197. INIT_WORK(&state->commit_work,
  12198. nonblock ? intel_atomic_commit_work : NULL);
  12199. i915_sw_fence_commit(&intel_state->commit_ready);
  12200. if (!nonblock) {
  12201. i915_sw_fence_wait(&intel_state->commit_ready);
  12202. intel_atomic_commit_tail(state);
  12203. }
  12204. return 0;
  12205. }
  12206. void intel_crtc_restore_mode(struct drm_crtc *crtc)
  12207. {
  12208. struct drm_device *dev = crtc->dev;
  12209. struct drm_atomic_state *state;
  12210. struct drm_crtc_state *crtc_state;
  12211. int ret;
  12212. state = drm_atomic_state_alloc(dev);
  12213. if (!state) {
  12214. DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory",
  12215. crtc->base.id, crtc->name);
  12216. return;
  12217. }
  12218. state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
  12219. retry:
  12220. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  12221. ret = PTR_ERR_OR_ZERO(crtc_state);
  12222. if (!ret) {
  12223. if (!crtc_state->active)
  12224. goto out;
  12225. crtc_state->mode_changed = true;
  12226. ret = drm_atomic_commit(state);
  12227. }
  12228. if (ret == -EDEADLK) {
  12229. drm_atomic_state_clear(state);
  12230. drm_modeset_backoff(state->acquire_ctx);
  12231. goto retry;
  12232. }
  12233. out:
  12234. drm_atomic_state_put(state);
  12235. }
  12236. /*
  12237. * FIXME: Remove this once i915 is fully DRIVER_ATOMIC by calling
  12238. * drm_atomic_helper_legacy_gamma_set() directly.
  12239. */
  12240. static int intel_atomic_legacy_gamma_set(struct drm_crtc *crtc,
  12241. u16 *red, u16 *green, u16 *blue,
  12242. uint32_t size)
  12243. {
  12244. struct drm_device *dev = crtc->dev;
  12245. struct drm_mode_config *config = &dev->mode_config;
  12246. struct drm_crtc_state *state;
  12247. int ret;
  12248. ret = drm_atomic_helper_legacy_gamma_set(crtc, red, green, blue, size);
  12249. if (ret)
  12250. return ret;
  12251. /*
  12252. * Make sure we update the legacy properties so this works when
  12253. * atomic is not enabled.
  12254. */
  12255. state = crtc->state;
  12256. drm_object_property_set_value(&crtc->base,
  12257. config->degamma_lut_property,
  12258. (state->degamma_lut) ?
  12259. state->degamma_lut->base.id : 0);
  12260. drm_object_property_set_value(&crtc->base,
  12261. config->ctm_property,
  12262. (state->ctm) ?
  12263. state->ctm->base.id : 0);
  12264. drm_object_property_set_value(&crtc->base,
  12265. config->gamma_lut_property,
  12266. (state->gamma_lut) ?
  12267. state->gamma_lut->base.id : 0);
  12268. return 0;
  12269. }
  12270. static const struct drm_crtc_funcs intel_crtc_funcs = {
  12271. .gamma_set = intel_atomic_legacy_gamma_set,
  12272. .set_config = drm_atomic_helper_set_config,
  12273. .set_property = drm_atomic_helper_crtc_set_property,
  12274. .destroy = intel_crtc_destroy,
  12275. .page_flip = intel_crtc_page_flip,
  12276. .atomic_duplicate_state = intel_crtc_duplicate_state,
  12277. .atomic_destroy_state = intel_crtc_destroy_state,
  12278. };
  12279. /**
  12280. * intel_prepare_plane_fb - Prepare fb for usage on plane
  12281. * @plane: drm plane to prepare for
  12282. * @fb: framebuffer to prepare for presentation
  12283. *
  12284. * Prepares a framebuffer for usage on a display plane. Generally this
  12285. * involves pinning the underlying object and updating the frontbuffer tracking
  12286. * bits. Some older platforms need special physical address handling for
  12287. * cursor planes.
  12288. *
  12289. * Must be called with struct_mutex held.
  12290. *
  12291. * Returns 0 on success, negative error code on failure.
  12292. */
  12293. int
  12294. intel_prepare_plane_fb(struct drm_plane *plane,
  12295. struct drm_plane_state *new_state)
  12296. {
  12297. struct intel_atomic_state *intel_state =
  12298. to_intel_atomic_state(new_state->state);
  12299. struct drm_i915_private *dev_priv = to_i915(plane->dev);
  12300. struct drm_framebuffer *fb = new_state->fb;
  12301. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  12302. struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
  12303. int ret;
  12304. if (!obj && !old_obj)
  12305. return 0;
  12306. if (old_obj) {
  12307. struct drm_crtc_state *crtc_state =
  12308. drm_atomic_get_existing_crtc_state(new_state->state,
  12309. plane->state->crtc);
  12310. /* Big Hammer, we also need to ensure that any pending
  12311. * MI_WAIT_FOR_EVENT inside a user batch buffer on the
  12312. * current scanout is retired before unpinning the old
  12313. * framebuffer. Note that we rely on userspace rendering
  12314. * into the buffer attached to the pipe they are waiting
  12315. * on. If not, userspace generates a GPU hang with IPEHR
  12316. * point to the MI_WAIT_FOR_EVENT.
  12317. *
  12318. * This should only fail upon a hung GPU, in which case we
  12319. * can safely continue.
  12320. */
  12321. if (needs_modeset(crtc_state)) {
  12322. ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
  12323. old_obj->resv, NULL,
  12324. false, 0,
  12325. GFP_KERNEL);
  12326. if (ret < 0)
  12327. return ret;
  12328. }
  12329. }
  12330. if (new_state->fence) { /* explicit fencing */
  12331. ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
  12332. new_state->fence,
  12333. I915_FENCE_TIMEOUT,
  12334. GFP_KERNEL);
  12335. if (ret < 0)
  12336. return ret;
  12337. }
  12338. if (!obj)
  12339. return 0;
  12340. if (!new_state->fence) { /* implicit fencing */
  12341. ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
  12342. obj->resv, NULL,
  12343. false, I915_FENCE_TIMEOUT,
  12344. GFP_KERNEL);
  12345. if (ret < 0)
  12346. return ret;
  12347. i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
  12348. }
  12349. if (plane->type == DRM_PLANE_TYPE_CURSOR &&
  12350. INTEL_INFO(dev_priv)->cursor_needs_physical) {
  12351. int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
  12352. ret = i915_gem_object_attach_phys(obj, align);
  12353. if (ret) {
  12354. DRM_DEBUG_KMS("failed to attach phys object\n");
  12355. return ret;
  12356. }
  12357. } else {
  12358. struct i915_vma *vma;
  12359. vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
  12360. if (IS_ERR(vma)) {
  12361. DRM_DEBUG_KMS("failed to pin object\n");
  12362. return PTR_ERR(vma);
  12363. }
  12364. }
  12365. return 0;
  12366. }
  12367. /**
  12368. * intel_cleanup_plane_fb - Cleans up an fb after plane use
  12369. * @plane: drm plane to clean up for
  12370. * @fb: old framebuffer that was on plane
  12371. *
  12372. * Cleans up a framebuffer that has just been removed from a plane.
  12373. *
  12374. * Must be called with struct_mutex held.
  12375. */
  12376. void
  12377. intel_cleanup_plane_fb(struct drm_plane *plane,
  12378. struct drm_plane_state *old_state)
  12379. {
  12380. struct drm_i915_private *dev_priv = to_i915(plane->dev);
  12381. struct intel_plane_state *old_intel_state;
  12382. struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
  12383. struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
  12384. old_intel_state = to_intel_plane_state(old_state);
  12385. if (!obj && !old_obj)
  12386. return;
  12387. if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
  12388. !INTEL_INFO(dev_priv)->cursor_needs_physical))
  12389. intel_unpin_fb_obj(old_state->fb, old_state->rotation);
  12390. }
  12391. int
  12392. skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
  12393. {
  12394. int max_scale;
  12395. int crtc_clock, cdclk;
  12396. if (!intel_crtc || !crtc_state->base.enable)
  12397. return DRM_PLANE_HELPER_NO_SCALING;
  12398. crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
  12399. cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
  12400. if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
  12401. return DRM_PLANE_HELPER_NO_SCALING;
  12402. /*
  12403. * skl max scale is lower of:
  12404. * close to 3 but not 3, -1 is for that purpose
  12405. * or
  12406. * cdclk/crtc_clock
  12407. */
  12408. max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
  12409. return max_scale;
  12410. }
  12411. static int
  12412. intel_check_primary_plane(struct drm_plane *plane,
  12413. struct intel_crtc_state *crtc_state,
  12414. struct intel_plane_state *state)
  12415. {
  12416. struct drm_i915_private *dev_priv = to_i915(plane->dev);
  12417. struct drm_crtc *crtc = state->base.crtc;
  12418. int min_scale = DRM_PLANE_HELPER_NO_SCALING;
  12419. int max_scale = DRM_PLANE_HELPER_NO_SCALING;
  12420. bool can_position = false;
  12421. int ret;
  12422. if (INTEL_GEN(dev_priv) >= 9) {
  12423. /* use scaler when colorkey is not required */
  12424. if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
  12425. min_scale = 1;
  12426. max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
  12427. }
  12428. can_position = true;
  12429. }
  12430. ret = drm_plane_helper_check_state(&state->base,
  12431. &state->clip,
  12432. min_scale, max_scale,
  12433. can_position, true);
  12434. if (ret)
  12435. return ret;
  12436. if (!state->base.fb)
  12437. return 0;
  12438. if (INTEL_GEN(dev_priv) >= 9) {
  12439. ret = skl_check_plane_surface(state);
  12440. if (ret)
  12441. return ret;
  12442. }
  12443. return 0;
  12444. }
  12445. static void intel_begin_crtc_commit(struct drm_crtc *crtc,
  12446. struct drm_crtc_state *old_crtc_state)
  12447. {
  12448. struct drm_device *dev = crtc->dev;
  12449. struct drm_i915_private *dev_priv = to_i915(dev);
  12450. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  12451. struct intel_crtc_state *intel_cstate =
  12452. to_intel_crtc_state(crtc->state);
  12453. struct intel_crtc_state *old_intel_cstate =
  12454. to_intel_crtc_state(old_crtc_state);
  12455. struct intel_atomic_state *old_intel_state =
  12456. to_intel_atomic_state(old_crtc_state->state);
  12457. bool modeset = needs_modeset(crtc->state);
  12458. /* Perform vblank evasion around commit operation */
  12459. intel_pipe_update_start(intel_crtc);
  12460. if (modeset)
  12461. goto out;
  12462. if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
  12463. intel_color_set_csc(crtc->state);
  12464. intel_color_load_luts(crtc->state);
  12465. }
  12466. if (intel_cstate->update_pipe)
  12467. intel_update_pipe_config(intel_crtc, old_intel_cstate);
  12468. else if (INTEL_GEN(dev_priv) >= 9)
  12469. skl_detach_scalers(intel_crtc);
  12470. out:
  12471. if (dev_priv->display.atomic_update_watermarks)
  12472. dev_priv->display.atomic_update_watermarks(old_intel_state,
  12473. intel_cstate);
  12474. }
  12475. static void intel_finish_crtc_commit(struct drm_crtc *crtc,
  12476. struct drm_crtc_state *old_crtc_state)
  12477. {
  12478. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  12479. intel_pipe_update_end(intel_crtc, NULL);
  12480. }
  12481. /**
  12482. * intel_plane_destroy - destroy a plane
  12483. * @plane: plane to destroy
  12484. *
  12485. * Common destruction function for all types of planes (primary, cursor,
  12486. * sprite).
  12487. */
  12488. void intel_plane_destroy(struct drm_plane *plane)
  12489. {
  12490. drm_plane_cleanup(plane);
  12491. kfree(to_intel_plane(plane));
  12492. }
  12493. const struct drm_plane_funcs intel_plane_funcs = {
  12494. .update_plane = drm_atomic_helper_update_plane,
  12495. .disable_plane = drm_atomic_helper_disable_plane,
  12496. .destroy = intel_plane_destroy,
  12497. .set_property = drm_atomic_helper_plane_set_property,
  12498. .atomic_get_property = intel_plane_atomic_get_property,
  12499. .atomic_set_property = intel_plane_atomic_set_property,
  12500. .atomic_duplicate_state = intel_plane_duplicate_state,
  12501. .atomic_destroy_state = intel_plane_destroy_state,
  12502. };
  12503. static struct intel_plane *
  12504. intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
  12505. {
  12506. struct intel_plane *primary = NULL;
  12507. struct intel_plane_state *state = NULL;
  12508. const uint32_t *intel_primary_formats;
  12509. unsigned int supported_rotations;
  12510. unsigned int num_formats;
  12511. int ret;
  12512. primary = kzalloc(sizeof(*primary), GFP_KERNEL);
  12513. if (!primary) {
  12514. ret = -ENOMEM;
  12515. goto fail;
  12516. }
  12517. state = intel_create_plane_state(&primary->base);
  12518. if (!state) {
  12519. ret = -ENOMEM;
  12520. goto fail;
  12521. }
  12522. primary->base.state = &state->base;
  12523. primary->can_scale = false;
  12524. primary->max_downscale = 1;
  12525. if (INTEL_GEN(dev_priv) >= 9) {
  12526. primary->can_scale = true;
  12527. state->scaler_id = -1;
  12528. }
  12529. primary->pipe = pipe;
  12530. /*
  12531. * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
  12532. * port is hooked to pipe B. Hence we want plane A feeding pipe B.
  12533. */
  12534. if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
  12535. primary->plane = (enum plane) !pipe;
  12536. else
  12537. primary->plane = (enum plane) pipe;
  12538. primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
  12539. primary->check_plane = intel_check_primary_plane;
  12540. if (INTEL_GEN(dev_priv) >= 9) {
  12541. intel_primary_formats = skl_primary_formats;
  12542. num_formats = ARRAY_SIZE(skl_primary_formats);
  12543. primary->update_plane = skylake_update_primary_plane;
  12544. primary->disable_plane = skylake_disable_primary_plane;
  12545. } else if (HAS_PCH_SPLIT(dev_priv)) {
  12546. intel_primary_formats = i965_primary_formats;
  12547. num_formats = ARRAY_SIZE(i965_primary_formats);
  12548. primary->update_plane = ironlake_update_primary_plane;
  12549. primary->disable_plane = i9xx_disable_primary_plane;
  12550. } else if (INTEL_GEN(dev_priv) >= 4) {
  12551. intel_primary_formats = i965_primary_formats;
  12552. num_formats = ARRAY_SIZE(i965_primary_formats);
  12553. primary->update_plane = i9xx_update_primary_plane;
  12554. primary->disable_plane = i9xx_disable_primary_plane;
  12555. } else {
  12556. intel_primary_formats = i8xx_primary_formats;
  12557. num_formats = ARRAY_SIZE(i8xx_primary_formats);
  12558. primary->update_plane = i9xx_update_primary_plane;
  12559. primary->disable_plane = i9xx_disable_primary_plane;
  12560. }
  12561. if (INTEL_GEN(dev_priv) >= 9)
  12562. ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
  12563. 0, &intel_plane_funcs,
  12564. intel_primary_formats, num_formats,
  12565. DRM_PLANE_TYPE_PRIMARY,
  12566. "plane 1%c", pipe_name(pipe));
  12567. else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
  12568. ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
  12569. 0, &intel_plane_funcs,
  12570. intel_primary_formats, num_formats,
  12571. DRM_PLANE_TYPE_PRIMARY,
  12572. "primary %c", pipe_name(pipe));
  12573. else
  12574. ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
  12575. 0, &intel_plane_funcs,
  12576. intel_primary_formats, num_formats,
  12577. DRM_PLANE_TYPE_PRIMARY,
  12578. "plane %c", plane_name(primary->plane));
  12579. if (ret)
  12580. goto fail;
  12581. if (INTEL_GEN(dev_priv) >= 9) {
  12582. supported_rotations =
  12583. DRM_ROTATE_0 | DRM_ROTATE_90 |
  12584. DRM_ROTATE_180 | DRM_ROTATE_270;
  12585. } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
  12586. supported_rotations =
  12587. DRM_ROTATE_0 | DRM_ROTATE_180 |
  12588. DRM_REFLECT_X;
  12589. } else if (INTEL_GEN(dev_priv) >= 4) {
  12590. supported_rotations =
  12591. DRM_ROTATE_0 | DRM_ROTATE_180;
  12592. } else {
  12593. supported_rotations = DRM_ROTATE_0;
  12594. }
  12595. if (INTEL_GEN(dev_priv) >= 4)
  12596. drm_plane_create_rotation_property(&primary->base,
  12597. DRM_ROTATE_0,
  12598. supported_rotations);
  12599. drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
  12600. return primary;
  12601. fail:
  12602. kfree(state);
  12603. kfree(primary);
  12604. return ERR_PTR(ret);
  12605. }
  12606. static int
  12607. intel_check_cursor_plane(struct drm_plane *plane,
  12608. struct intel_crtc_state *crtc_state,
  12609. struct intel_plane_state *state)
  12610. {
  12611. struct drm_framebuffer *fb = state->base.fb;
  12612. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  12613. enum pipe pipe = to_intel_plane(plane)->pipe;
  12614. unsigned stride;
  12615. int ret;
  12616. ret = drm_plane_helper_check_state(&state->base,
  12617. &state->clip,
  12618. DRM_PLANE_HELPER_NO_SCALING,
  12619. DRM_PLANE_HELPER_NO_SCALING,
  12620. true, true);
  12621. if (ret)
  12622. return ret;
  12623. /* if we want to turn off the cursor ignore width and height */
  12624. if (!obj)
  12625. return 0;
  12626. /* Check for which cursor types we support */
  12627. if (!cursor_size_ok(to_i915(plane->dev), state->base.crtc_w,
  12628. state->base.crtc_h)) {
  12629. DRM_DEBUG("Cursor dimension %dx%d not supported\n",
  12630. state->base.crtc_w, state->base.crtc_h);
  12631. return -EINVAL;
  12632. }
  12633. stride = roundup_pow_of_two(state->base.crtc_w) * 4;
  12634. if (obj->base.size < stride * state->base.crtc_h) {
  12635. DRM_DEBUG_KMS("buffer is too small\n");
  12636. return -ENOMEM;
  12637. }
  12638. if (fb->modifier != DRM_FORMAT_MOD_NONE) {
  12639. DRM_DEBUG_KMS("cursor cannot be tiled\n");
  12640. return -EINVAL;
  12641. }
  12642. /*
  12643. * There's something wrong with the cursor on CHV pipe C.
  12644. * If it straddles the left edge of the screen then
  12645. * moving it away from the edge or disabling it often
  12646. * results in a pipe underrun, and often that can lead to
  12647. * dead pipe (constant underrun reported, and it scans
  12648. * out just a solid color). To recover from that, the
  12649. * display power well must be turned off and on again.
  12650. * Refuse the put the cursor into that compromised position.
  12651. */
  12652. if (IS_CHERRYVIEW(to_i915(plane->dev)) && pipe == PIPE_C &&
  12653. state->base.visible && state->base.crtc_x < 0) {
  12654. DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
  12655. return -EINVAL;
  12656. }
  12657. return 0;
  12658. }
  12659. static void
  12660. intel_disable_cursor_plane(struct drm_plane *plane,
  12661. struct drm_crtc *crtc)
  12662. {
  12663. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  12664. intel_crtc->cursor_addr = 0;
  12665. intel_crtc_update_cursor(crtc, NULL);
  12666. }
  12667. static void
  12668. intel_update_cursor_plane(struct drm_plane *plane,
  12669. const struct intel_crtc_state *crtc_state,
  12670. const struct intel_plane_state *state)
  12671. {
  12672. struct drm_crtc *crtc = crtc_state->base.crtc;
  12673. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  12674. struct drm_i915_private *dev_priv = to_i915(plane->dev);
  12675. struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
  12676. uint32_t addr;
  12677. if (!obj)
  12678. addr = 0;
  12679. else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
  12680. addr = i915_gem_object_ggtt_offset(obj, NULL);
  12681. else
  12682. addr = obj->phys_handle->busaddr;
  12683. intel_crtc->cursor_addr = addr;
  12684. intel_crtc_update_cursor(crtc, state);
  12685. }
  12686. static struct intel_plane *
  12687. intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
  12688. {
  12689. struct intel_plane *cursor = NULL;
  12690. struct intel_plane_state *state = NULL;
  12691. int ret;
  12692. cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
  12693. if (!cursor) {
  12694. ret = -ENOMEM;
  12695. goto fail;
  12696. }
  12697. state = intel_create_plane_state(&cursor->base);
  12698. if (!state) {
  12699. ret = -ENOMEM;
  12700. goto fail;
  12701. }
  12702. cursor->base.state = &state->base;
  12703. cursor->can_scale = false;
  12704. cursor->max_downscale = 1;
  12705. cursor->pipe = pipe;
  12706. cursor->plane = pipe;
  12707. cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
  12708. cursor->check_plane = intel_check_cursor_plane;
  12709. cursor->update_plane = intel_update_cursor_plane;
  12710. cursor->disable_plane = intel_disable_cursor_plane;
  12711. ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
  12712. 0, &intel_plane_funcs,
  12713. intel_cursor_formats,
  12714. ARRAY_SIZE(intel_cursor_formats),
  12715. DRM_PLANE_TYPE_CURSOR,
  12716. "cursor %c", pipe_name(pipe));
  12717. if (ret)
  12718. goto fail;
  12719. if (INTEL_GEN(dev_priv) >= 4)
  12720. drm_plane_create_rotation_property(&cursor->base,
  12721. DRM_ROTATE_0,
  12722. DRM_ROTATE_0 |
  12723. DRM_ROTATE_180);
  12724. if (INTEL_GEN(dev_priv) >= 9)
  12725. state->scaler_id = -1;
  12726. drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
  12727. return cursor;
  12728. fail:
  12729. kfree(state);
  12730. kfree(cursor);
  12731. return ERR_PTR(ret);
  12732. }
  12733. static void skl_init_scalers(struct drm_i915_private *dev_priv,
  12734. struct intel_crtc *crtc,
  12735. struct intel_crtc_state *crtc_state)
  12736. {
  12737. struct intel_crtc_scaler_state *scaler_state =
  12738. &crtc_state->scaler_state;
  12739. int i;
  12740. for (i = 0; i < crtc->num_scalers; i++) {
  12741. struct intel_scaler *scaler = &scaler_state->scalers[i];
  12742. scaler->in_use = 0;
  12743. scaler->mode = PS_SCALER_MODE_DYN;
  12744. }
  12745. scaler_state->scaler_id = -1;
  12746. }
  12747. static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
  12748. {
  12749. struct intel_crtc *intel_crtc;
  12750. struct intel_crtc_state *crtc_state = NULL;
  12751. struct intel_plane *primary = NULL;
  12752. struct intel_plane *cursor = NULL;
  12753. int sprite, ret;
  12754. intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
  12755. if (!intel_crtc)
  12756. return -ENOMEM;
  12757. crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
  12758. if (!crtc_state) {
  12759. ret = -ENOMEM;
  12760. goto fail;
  12761. }
  12762. intel_crtc->config = crtc_state;
  12763. intel_crtc->base.state = &crtc_state->base;
  12764. crtc_state->base.crtc = &intel_crtc->base;
  12765. /* initialize shared scalers */
  12766. if (INTEL_GEN(dev_priv) >= 9) {
  12767. if (pipe == PIPE_C)
  12768. intel_crtc->num_scalers = 1;
  12769. else
  12770. intel_crtc->num_scalers = SKL_NUM_SCALERS;
  12771. skl_init_scalers(dev_priv, intel_crtc, crtc_state);
  12772. }
  12773. primary = intel_primary_plane_create(dev_priv, pipe);
  12774. if (IS_ERR(primary)) {
  12775. ret = PTR_ERR(primary);
  12776. goto fail;
  12777. }
  12778. for_each_sprite(dev_priv, pipe, sprite) {
  12779. struct intel_plane *plane;
  12780. plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
  12781. if (IS_ERR(plane)) {
  12782. ret = PTR_ERR(plane);
  12783. goto fail;
  12784. }
  12785. }
  12786. cursor = intel_cursor_plane_create(dev_priv, pipe);
  12787. if (IS_ERR(cursor)) {
  12788. ret = PTR_ERR(cursor);
  12789. goto fail;
  12790. }
  12791. ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
  12792. &primary->base, &cursor->base,
  12793. &intel_crtc_funcs,
  12794. "pipe %c", pipe_name(pipe));
  12795. if (ret)
  12796. goto fail;
  12797. intel_crtc->pipe = pipe;
  12798. intel_crtc->plane = primary->plane;
  12799. intel_crtc->cursor_base = ~0;
  12800. intel_crtc->cursor_cntl = ~0;
  12801. intel_crtc->cursor_size = ~0;
  12802. intel_crtc->wm.cxsr_allowed = true;
  12803. BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
  12804. dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
  12805. dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = intel_crtc;
  12806. dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = intel_crtc;
  12807. drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
  12808. intel_color_init(&intel_crtc->base);
  12809. WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
  12810. return 0;
  12811. fail:
  12812. /*
  12813. * drm_mode_config_cleanup() will free up any
  12814. * crtcs/planes already initialized.
  12815. */
  12816. kfree(crtc_state);
  12817. kfree(intel_crtc);
  12818. return ret;
  12819. }
  12820. enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
  12821. {
  12822. struct drm_encoder *encoder = connector->base.encoder;
  12823. struct drm_device *dev = connector->base.dev;
  12824. WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
  12825. if (!encoder || WARN_ON(!encoder->crtc))
  12826. return INVALID_PIPE;
  12827. return to_intel_crtc(encoder->crtc)->pipe;
  12828. }
  12829. int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
  12830. struct drm_file *file)
  12831. {
  12832. struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
  12833. struct drm_crtc *drmmode_crtc;
  12834. struct intel_crtc *crtc;
  12835. drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
  12836. if (!drmmode_crtc)
  12837. return -ENOENT;
  12838. crtc = to_intel_crtc(drmmode_crtc);
  12839. pipe_from_crtc_id->pipe = crtc->pipe;
  12840. return 0;
  12841. }
  12842. static int intel_encoder_clones(struct intel_encoder *encoder)
  12843. {
  12844. struct drm_device *dev = encoder->base.dev;
  12845. struct intel_encoder *source_encoder;
  12846. int index_mask = 0;
  12847. int entry = 0;
  12848. for_each_intel_encoder(dev, source_encoder) {
  12849. if (encoders_cloneable(encoder, source_encoder))
  12850. index_mask |= (1 << entry);
  12851. entry++;
  12852. }
  12853. return index_mask;
  12854. }
  12855. static bool has_edp_a(struct drm_i915_private *dev_priv)
  12856. {
  12857. if (!IS_MOBILE(dev_priv))
  12858. return false;
  12859. if ((I915_READ(DP_A) & DP_DETECTED) == 0)
  12860. return false;
  12861. if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
  12862. return false;
  12863. return true;
  12864. }
  12865. static bool intel_crt_present(struct drm_i915_private *dev_priv)
  12866. {
  12867. if (INTEL_GEN(dev_priv) >= 9)
  12868. return false;
  12869. if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
  12870. return false;
  12871. if (IS_CHERRYVIEW(dev_priv))
  12872. return false;
  12873. if (HAS_PCH_LPT_H(dev_priv) &&
  12874. I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
  12875. return false;
  12876. /* DDI E can't be used if DDI A requires 4 lanes */
  12877. if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
  12878. return false;
  12879. if (!dev_priv->vbt.int_crt_support)
  12880. return false;
  12881. return true;
  12882. }
  12883. void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
  12884. {
  12885. int pps_num;
  12886. int pps_idx;
  12887. if (HAS_DDI(dev_priv))
  12888. return;
  12889. /*
  12890. * This w/a is needed at least on CPT/PPT, but to be sure apply it
  12891. * everywhere where registers can be write protected.
  12892. */
  12893. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  12894. pps_num = 2;
  12895. else
  12896. pps_num = 1;
  12897. for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
  12898. u32 val = I915_READ(PP_CONTROL(pps_idx));
  12899. val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
  12900. I915_WRITE(PP_CONTROL(pps_idx), val);
  12901. }
  12902. }
  12903. static void intel_pps_init(struct drm_i915_private *dev_priv)
  12904. {
  12905. if (HAS_PCH_SPLIT(dev_priv) || IS_BROXTON(dev_priv))
  12906. dev_priv->pps_mmio_base = PCH_PPS_BASE;
  12907. else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  12908. dev_priv->pps_mmio_base = VLV_PPS_BASE;
  12909. else
  12910. dev_priv->pps_mmio_base = PPS_BASE;
  12911. intel_pps_unlock_regs_wa(dev_priv);
  12912. }
  12913. static void intel_setup_outputs(struct drm_device *dev)
  12914. {
  12915. struct drm_i915_private *dev_priv = to_i915(dev);
  12916. struct intel_encoder *encoder;
  12917. bool dpd_is_edp = false;
  12918. intel_pps_init(dev_priv);
  12919. /*
  12920. * intel_edp_init_connector() depends on this completing first, to
  12921. * prevent the registeration of both eDP and LVDS and the incorrect
  12922. * sharing of the PPS.
  12923. */
  12924. intel_lvds_init(dev);
  12925. if (intel_crt_present(dev_priv))
  12926. intel_crt_init(dev);
  12927. if (IS_BROXTON(dev_priv)) {
  12928. /*
  12929. * FIXME: Broxton doesn't support port detection via the
  12930. * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
  12931. * detect the ports.
  12932. */
  12933. intel_ddi_init(dev, PORT_A);
  12934. intel_ddi_init(dev, PORT_B);
  12935. intel_ddi_init(dev, PORT_C);
  12936. intel_dsi_init(dev);
  12937. } else if (HAS_DDI(dev_priv)) {
  12938. int found;
  12939. /*
  12940. * Haswell uses DDI functions to detect digital outputs.
  12941. * On SKL pre-D0 the strap isn't connected, so we assume
  12942. * it's there.
  12943. */
  12944. found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
  12945. /* WaIgnoreDDIAStrap: skl */
  12946. if (found || IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
  12947. intel_ddi_init(dev, PORT_A);
  12948. /* DDI B, C and D detection is indicated by the SFUSE_STRAP
  12949. * register */
  12950. found = I915_READ(SFUSE_STRAP);
  12951. if (found & SFUSE_STRAP_DDIB_DETECTED)
  12952. intel_ddi_init(dev, PORT_B);
  12953. if (found & SFUSE_STRAP_DDIC_DETECTED)
  12954. intel_ddi_init(dev, PORT_C);
  12955. if (found & SFUSE_STRAP_DDID_DETECTED)
  12956. intel_ddi_init(dev, PORT_D);
  12957. /*
  12958. * On SKL we don't have a way to detect DDI-E so we rely on VBT.
  12959. */
  12960. if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
  12961. (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
  12962. dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
  12963. dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
  12964. intel_ddi_init(dev, PORT_E);
  12965. } else if (HAS_PCH_SPLIT(dev_priv)) {
  12966. int found;
  12967. dpd_is_edp = intel_dp_is_edp(dev_priv, PORT_D);
  12968. if (has_edp_a(dev_priv))
  12969. intel_dp_init(dev, DP_A, PORT_A);
  12970. if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
  12971. /* PCH SDVOB multiplex with HDMIB */
  12972. found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
  12973. if (!found)
  12974. intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
  12975. if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
  12976. intel_dp_init(dev, PCH_DP_B, PORT_B);
  12977. }
  12978. if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
  12979. intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
  12980. if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
  12981. intel_hdmi_init(dev, PCH_HDMID, PORT_D);
  12982. if (I915_READ(PCH_DP_C) & DP_DETECTED)
  12983. intel_dp_init(dev, PCH_DP_C, PORT_C);
  12984. if (I915_READ(PCH_DP_D) & DP_DETECTED)
  12985. intel_dp_init(dev, PCH_DP_D, PORT_D);
  12986. } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  12987. bool has_edp, has_port;
  12988. /*
  12989. * The DP_DETECTED bit is the latched state of the DDC
  12990. * SDA pin at boot. However since eDP doesn't require DDC
  12991. * (no way to plug in a DP->HDMI dongle) the DDC pins for
  12992. * eDP ports may have been muxed to an alternate function.
  12993. * Thus we can't rely on the DP_DETECTED bit alone to detect
  12994. * eDP ports. Consult the VBT as well as DP_DETECTED to
  12995. * detect eDP ports.
  12996. *
  12997. * Sadly the straps seem to be missing sometimes even for HDMI
  12998. * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
  12999. * and VBT for the presence of the port. Additionally we can't
  13000. * trust the port type the VBT declares as we've seen at least
  13001. * HDMI ports that the VBT claim are DP or eDP.
  13002. */
  13003. has_edp = intel_dp_is_edp(dev_priv, PORT_B);
  13004. has_port = intel_bios_is_port_present(dev_priv, PORT_B);
  13005. if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
  13006. has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
  13007. if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
  13008. intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
  13009. has_edp = intel_dp_is_edp(dev_priv, PORT_C);
  13010. has_port = intel_bios_is_port_present(dev_priv, PORT_C);
  13011. if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
  13012. has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
  13013. if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
  13014. intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
  13015. if (IS_CHERRYVIEW(dev_priv)) {
  13016. /*
  13017. * eDP not supported on port D,
  13018. * so no need to worry about it
  13019. */
  13020. has_port = intel_bios_is_port_present(dev_priv, PORT_D);
  13021. if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
  13022. intel_dp_init(dev, CHV_DP_D, PORT_D);
  13023. if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
  13024. intel_hdmi_init(dev, CHV_HDMID, PORT_D);
  13025. }
  13026. intel_dsi_init(dev);
  13027. } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
  13028. bool found = false;
  13029. if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
  13030. DRM_DEBUG_KMS("probing SDVOB\n");
  13031. found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
  13032. if (!found && IS_G4X(dev_priv)) {
  13033. DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
  13034. intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
  13035. }
  13036. if (!found && IS_G4X(dev_priv))
  13037. intel_dp_init(dev, DP_B, PORT_B);
  13038. }
  13039. /* Before G4X SDVOC doesn't have its own detect register */
  13040. if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
  13041. DRM_DEBUG_KMS("probing SDVOC\n");
  13042. found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
  13043. }
  13044. if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
  13045. if (IS_G4X(dev_priv)) {
  13046. DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
  13047. intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
  13048. }
  13049. if (IS_G4X(dev_priv))
  13050. intel_dp_init(dev, DP_C, PORT_C);
  13051. }
  13052. if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
  13053. intel_dp_init(dev, DP_D, PORT_D);
  13054. } else if (IS_GEN2(dev_priv))
  13055. intel_dvo_init(dev);
  13056. if (SUPPORTS_TV(dev_priv))
  13057. intel_tv_init(dev);
  13058. intel_psr_init(dev);
  13059. for_each_intel_encoder(dev, encoder) {
  13060. encoder->base.possible_crtcs = encoder->crtc_mask;
  13061. encoder->base.possible_clones =
  13062. intel_encoder_clones(encoder);
  13063. }
  13064. intel_init_pch_refclk(dev);
  13065. drm_helper_move_panel_connectors_to_head(dev);
  13066. }
  13067. static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
  13068. {
  13069. struct drm_device *dev = fb->dev;
  13070. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  13071. drm_framebuffer_cleanup(fb);
  13072. mutex_lock(&dev->struct_mutex);
  13073. WARN_ON(!intel_fb->obj->framebuffer_references--);
  13074. i915_gem_object_put(intel_fb->obj);
  13075. mutex_unlock(&dev->struct_mutex);
  13076. kfree(intel_fb);
  13077. }
  13078. static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
  13079. struct drm_file *file,
  13080. unsigned int *handle)
  13081. {
  13082. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  13083. struct drm_i915_gem_object *obj = intel_fb->obj;
  13084. if (obj->userptr.mm) {
  13085. DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
  13086. return -EINVAL;
  13087. }
  13088. return drm_gem_handle_create(file, &obj->base, handle);
  13089. }
  13090. static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
  13091. struct drm_file *file,
  13092. unsigned flags, unsigned color,
  13093. struct drm_clip_rect *clips,
  13094. unsigned num_clips)
  13095. {
  13096. struct drm_device *dev = fb->dev;
  13097. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  13098. struct drm_i915_gem_object *obj = intel_fb->obj;
  13099. mutex_lock(&dev->struct_mutex);
  13100. if (obj->pin_display && obj->cache_dirty)
  13101. i915_gem_clflush_object(obj, true);
  13102. intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
  13103. mutex_unlock(&dev->struct_mutex);
  13104. return 0;
  13105. }
  13106. static const struct drm_framebuffer_funcs intel_fb_funcs = {
  13107. .destroy = intel_user_framebuffer_destroy,
  13108. .create_handle = intel_user_framebuffer_create_handle,
  13109. .dirty = intel_user_framebuffer_dirty,
  13110. };
  13111. static
  13112. u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
  13113. uint64_t fb_modifier, uint32_t pixel_format)
  13114. {
  13115. u32 gen = INTEL_INFO(dev_priv)->gen;
  13116. if (gen >= 9) {
  13117. int cpp = drm_format_plane_cpp(pixel_format, 0);
  13118. /* "The stride in bytes must not exceed the of the size of 8K
  13119. * pixels and 32K bytes."
  13120. */
  13121. return min(8192 * cpp, 32768);
  13122. } else if (gen >= 5 && !IS_VALLEYVIEW(dev_priv) &&
  13123. !IS_CHERRYVIEW(dev_priv)) {
  13124. return 32*1024;
  13125. } else if (gen >= 4) {
  13126. if (fb_modifier == I915_FORMAT_MOD_X_TILED)
  13127. return 16*1024;
  13128. else
  13129. return 32*1024;
  13130. } else if (gen >= 3) {
  13131. if (fb_modifier == I915_FORMAT_MOD_X_TILED)
  13132. return 8*1024;
  13133. else
  13134. return 16*1024;
  13135. } else {
  13136. /* XXX DSPC is limited to 4k tiled */
  13137. return 8*1024;
  13138. }
  13139. }
  13140. static int intel_framebuffer_init(struct drm_device *dev,
  13141. struct intel_framebuffer *intel_fb,
  13142. struct drm_mode_fb_cmd2 *mode_cmd,
  13143. struct drm_i915_gem_object *obj)
  13144. {
  13145. struct drm_i915_private *dev_priv = to_i915(dev);
  13146. unsigned int tiling = i915_gem_object_get_tiling(obj);
  13147. int ret;
  13148. u32 pitch_limit, stride_alignment;
  13149. struct drm_format_name_buf format_name;
  13150. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  13151. if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
  13152. /*
  13153. * If there's a fence, enforce that
  13154. * the fb modifier and tiling mode match.
  13155. */
  13156. if (tiling != I915_TILING_NONE &&
  13157. tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
  13158. DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
  13159. return -EINVAL;
  13160. }
  13161. } else {
  13162. if (tiling == I915_TILING_X) {
  13163. mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
  13164. } else if (tiling == I915_TILING_Y) {
  13165. DRM_DEBUG("No Y tiling for legacy addfb\n");
  13166. return -EINVAL;
  13167. }
  13168. }
  13169. /* Passed in modifier sanity checking. */
  13170. switch (mode_cmd->modifier[0]) {
  13171. case I915_FORMAT_MOD_Y_TILED:
  13172. case I915_FORMAT_MOD_Yf_TILED:
  13173. if (INTEL_GEN(dev_priv) < 9) {
  13174. DRM_DEBUG("Unsupported tiling 0x%llx!\n",
  13175. mode_cmd->modifier[0]);
  13176. return -EINVAL;
  13177. }
  13178. case DRM_FORMAT_MOD_NONE:
  13179. case I915_FORMAT_MOD_X_TILED:
  13180. break;
  13181. default:
  13182. DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
  13183. mode_cmd->modifier[0]);
  13184. return -EINVAL;
  13185. }
  13186. /*
  13187. * gen2/3 display engine uses the fence if present,
  13188. * so the tiling mode must match the fb modifier exactly.
  13189. */
  13190. if (INTEL_INFO(dev_priv)->gen < 4 &&
  13191. tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
  13192. DRM_DEBUG("tiling_mode must match fb modifier exactly on gen2/3\n");
  13193. return -EINVAL;
  13194. }
  13195. stride_alignment = intel_fb_stride_alignment(dev_priv,
  13196. mode_cmd->modifier[0],
  13197. mode_cmd->pixel_format);
  13198. if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
  13199. DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
  13200. mode_cmd->pitches[0], stride_alignment);
  13201. return -EINVAL;
  13202. }
  13203. pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
  13204. mode_cmd->pixel_format);
  13205. if (mode_cmd->pitches[0] > pitch_limit) {
  13206. DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
  13207. mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
  13208. "tiled" : "linear",
  13209. mode_cmd->pitches[0], pitch_limit);
  13210. return -EINVAL;
  13211. }
  13212. /*
  13213. * If there's a fence, enforce that
  13214. * the fb pitch and fence stride match.
  13215. */
  13216. if (tiling != I915_TILING_NONE &&
  13217. mode_cmd->pitches[0] != i915_gem_object_get_stride(obj)) {
  13218. DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
  13219. mode_cmd->pitches[0],
  13220. i915_gem_object_get_stride(obj));
  13221. return -EINVAL;
  13222. }
  13223. /* Reject formats not supported by any plane early. */
  13224. switch (mode_cmd->pixel_format) {
  13225. case DRM_FORMAT_C8:
  13226. case DRM_FORMAT_RGB565:
  13227. case DRM_FORMAT_XRGB8888:
  13228. case DRM_FORMAT_ARGB8888:
  13229. break;
  13230. case DRM_FORMAT_XRGB1555:
  13231. if (INTEL_GEN(dev_priv) > 3) {
  13232. DRM_DEBUG("unsupported pixel format: %s\n",
  13233. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  13234. return -EINVAL;
  13235. }
  13236. break;
  13237. case DRM_FORMAT_ABGR8888:
  13238. if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
  13239. INTEL_GEN(dev_priv) < 9) {
  13240. DRM_DEBUG("unsupported pixel format: %s\n",
  13241. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  13242. return -EINVAL;
  13243. }
  13244. break;
  13245. case DRM_FORMAT_XBGR8888:
  13246. case DRM_FORMAT_XRGB2101010:
  13247. case DRM_FORMAT_XBGR2101010:
  13248. if (INTEL_GEN(dev_priv) < 4) {
  13249. DRM_DEBUG("unsupported pixel format: %s\n",
  13250. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  13251. return -EINVAL;
  13252. }
  13253. break;
  13254. case DRM_FORMAT_ABGR2101010:
  13255. if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
  13256. DRM_DEBUG("unsupported pixel format: %s\n",
  13257. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  13258. return -EINVAL;
  13259. }
  13260. break;
  13261. case DRM_FORMAT_YUYV:
  13262. case DRM_FORMAT_UYVY:
  13263. case DRM_FORMAT_YVYU:
  13264. case DRM_FORMAT_VYUY:
  13265. if (INTEL_GEN(dev_priv) < 5) {
  13266. DRM_DEBUG("unsupported pixel format: %s\n",
  13267. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  13268. return -EINVAL;
  13269. }
  13270. break;
  13271. default:
  13272. DRM_DEBUG("unsupported pixel format: %s\n",
  13273. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  13274. return -EINVAL;
  13275. }
  13276. /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
  13277. if (mode_cmd->offsets[0] != 0)
  13278. return -EINVAL;
  13279. drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
  13280. intel_fb->obj = obj;
  13281. ret = intel_fill_fb_info(dev_priv, &intel_fb->base);
  13282. if (ret)
  13283. return ret;
  13284. ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
  13285. if (ret) {
  13286. DRM_ERROR("framebuffer init failed %d\n", ret);
  13287. return ret;
  13288. }
  13289. intel_fb->obj->framebuffer_references++;
  13290. return 0;
  13291. }
  13292. static struct drm_framebuffer *
  13293. intel_user_framebuffer_create(struct drm_device *dev,
  13294. struct drm_file *filp,
  13295. const struct drm_mode_fb_cmd2 *user_mode_cmd)
  13296. {
  13297. struct drm_framebuffer *fb;
  13298. struct drm_i915_gem_object *obj;
  13299. struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
  13300. obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
  13301. if (!obj)
  13302. return ERR_PTR(-ENOENT);
  13303. fb = intel_framebuffer_create(dev, &mode_cmd, obj);
  13304. if (IS_ERR(fb))
  13305. i915_gem_object_put(obj);
  13306. return fb;
  13307. }
  13308. static const struct drm_mode_config_funcs intel_mode_funcs = {
  13309. .fb_create = intel_user_framebuffer_create,
  13310. .output_poll_changed = intel_fbdev_output_poll_changed,
  13311. .atomic_check = intel_atomic_check,
  13312. .atomic_commit = intel_atomic_commit,
  13313. .atomic_state_alloc = intel_atomic_state_alloc,
  13314. .atomic_state_clear = intel_atomic_state_clear,
  13315. };
  13316. /**
  13317. * intel_init_display_hooks - initialize the display modesetting hooks
  13318. * @dev_priv: device private
  13319. */
  13320. void intel_init_display_hooks(struct drm_i915_private *dev_priv)
  13321. {
  13322. if (INTEL_INFO(dev_priv)->gen >= 9) {
  13323. dev_priv->display.get_pipe_config = haswell_get_pipe_config;
  13324. dev_priv->display.get_initial_plane_config =
  13325. skylake_get_initial_plane_config;
  13326. dev_priv->display.crtc_compute_clock =
  13327. haswell_crtc_compute_clock;
  13328. dev_priv->display.crtc_enable = haswell_crtc_enable;
  13329. dev_priv->display.crtc_disable = haswell_crtc_disable;
  13330. } else if (HAS_DDI(dev_priv)) {
  13331. dev_priv->display.get_pipe_config = haswell_get_pipe_config;
  13332. dev_priv->display.get_initial_plane_config =
  13333. ironlake_get_initial_plane_config;
  13334. dev_priv->display.crtc_compute_clock =
  13335. haswell_crtc_compute_clock;
  13336. dev_priv->display.crtc_enable = haswell_crtc_enable;
  13337. dev_priv->display.crtc_disable = haswell_crtc_disable;
  13338. } else if (HAS_PCH_SPLIT(dev_priv)) {
  13339. dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
  13340. dev_priv->display.get_initial_plane_config =
  13341. ironlake_get_initial_plane_config;
  13342. dev_priv->display.crtc_compute_clock =
  13343. ironlake_crtc_compute_clock;
  13344. dev_priv->display.crtc_enable = ironlake_crtc_enable;
  13345. dev_priv->display.crtc_disable = ironlake_crtc_disable;
  13346. } else if (IS_CHERRYVIEW(dev_priv)) {
  13347. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  13348. dev_priv->display.get_initial_plane_config =
  13349. i9xx_get_initial_plane_config;
  13350. dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
  13351. dev_priv->display.crtc_enable = valleyview_crtc_enable;
  13352. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  13353. } else if (IS_VALLEYVIEW(dev_priv)) {
  13354. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  13355. dev_priv->display.get_initial_plane_config =
  13356. i9xx_get_initial_plane_config;
  13357. dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
  13358. dev_priv->display.crtc_enable = valleyview_crtc_enable;
  13359. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  13360. } else if (IS_G4X(dev_priv)) {
  13361. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  13362. dev_priv->display.get_initial_plane_config =
  13363. i9xx_get_initial_plane_config;
  13364. dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
  13365. dev_priv->display.crtc_enable = i9xx_crtc_enable;
  13366. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  13367. } else if (IS_PINEVIEW(dev_priv)) {
  13368. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  13369. dev_priv->display.get_initial_plane_config =
  13370. i9xx_get_initial_plane_config;
  13371. dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
  13372. dev_priv->display.crtc_enable = i9xx_crtc_enable;
  13373. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  13374. } else if (!IS_GEN2(dev_priv)) {
  13375. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  13376. dev_priv->display.get_initial_plane_config =
  13377. i9xx_get_initial_plane_config;
  13378. dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
  13379. dev_priv->display.crtc_enable = i9xx_crtc_enable;
  13380. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  13381. } else {
  13382. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  13383. dev_priv->display.get_initial_plane_config =
  13384. i9xx_get_initial_plane_config;
  13385. dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
  13386. dev_priv->display.crtc_enable = i9xx_crtc_enable;
  13387. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  13388. }
  13389. /* Returns the core display clock speed */
  13390. if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
  13391. dev_priv->display.get_display_clock_speed =
  13392. skylake_get_display_clock_speed;
  13393. else if (IS_BROXTON(dev_priv))
  13394. dev_priv->display.get_display_clock_speed =
  13395. broxton_get_display_clock_speed;
  13396. else if (IS_BROADWELL(dev_priv))
  13397. dev_priv->display.get_display_clock_speed =
  13398. broadwell_get_display_clock_speed;
  13399. else if (IS_HASWELL(dev_priv))
  13400. dev_priv->display.get_display_clock_speed =
  13401. haswell_get_display_clock_speed;
  13402. else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  13403. dev_priv->display.get_display_clock_speed =
  13404. valleyview_get_display_clock_speed;
  13405. else if (IS_GEN5(dev_priv))
  13406. dev_priv->display.get_display_clock_speed =
  13407. ilk_get_display_clock_speed;
  13408. else if (IS_I945G(dev_priv) || IS_BROADWATER(dev_priv) ||
  13409. IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
  13410. dev_priv->display.get_display_clock_speed =
  13411. i945_get_display_clock_speed;
  13412. else if (IS_GM45(dev_priv))
  13413. dev_priv->display.get_display_clock_speed =
  13414. gm45_get_display_clock_speed;
  13415. else if (IS_CRESTLINE(dev_priv))
  13416. dev_priv->display.get_display_clock_speed =
  13417. i965gm_get_display_clock_speed;
  13418. else if (IS_PINEVIEW(dev_priv))
  13419. dev_priv->display.get_display_clock_speed =
  13420. pnv_get_display_clock_speed;
  13421. else if (IS_G33(dev_priv) || IS_G4X(dev_priv))
  13422. dev_priv->display.get_display_clock_speed =
  13423. g33_get_display_clock_speed;
  13424. else if (IS_I915G(dev_priv))
  13425. dev_priv->display.get_display_clock_speed =
  13426. i915_get_display_clock_speed;
  13427. else if (IS_I945GM(dev_priv) || IS_845G(dev_priv))
  13428. dev_priv->display.get_display_clock_speed =
  13429. i9xx_misc_get_display_clock_speed;
  13430. else if (IS_I915GM(dev_priv))
  13431. dev_priv->display.get_display_clock_speed =
  13432. i915gm_get_display_clock_speed;
  13433. else if (IS_I865G(dev_priv))
  13434. dev_priv->display.get_display_clock_speed =
  13435. i865_get_display_clock_speed;
  13436. else if (IS_I85X(dev_priv))
  13437. dev_priv->display.get_display_clock_speed =
  13438. i85x_get_display_clock_speed;
  13439. else { /* 830 */
  13440. WARN(!IS_I830(dev_priv), "Unknown platform. Assuming 133 MHz CDCLK\n");
  13441. dev_priv->display.get_display_clock_speed =
  13442. i830_get_display_clock_speed;
  13443. }
  13444. if (IS_GEN5(dev_priv)) {
  13445. dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
  13446. } else if (IS_GEN6(dev_priv)) {
  13447. dev_priv->display.fdi_link_train = gen6_fdi_link_train;
  13448. } else if (IS_IVYBRIDGE(dev_priv)) {
  13449. /* FIXME: detect B0+ stepping and use auto training */
  13450. dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
  13451. } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  13452. dev_priv->display.fdi_link_train = hsw_fdi_link_train;
  13453. }
  13454. if (IS_BROADWELL(dev_priv)) {
  13455. dev_priv->display.modeset_commit_cdclk =
  13456. broadwell_modeset_commit_cdclk;
  13457. dev_priv->display.modeset_calc_cdclk =
  13458. broadwell_modeset_calc_cdclk;
  13459. } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  13460. dev_priv->display.modeset_commit_cdclk =
  13461. valleyview_modeset_commit_cdclk;
  13462. dev_priv->display.modeset_calc_cdclk =
  13463. valleyview_modeset_calc_cdclk;
  13464. } else if (IS_BROXTON(dev_priv)) {
  13465. dev_priv->display.modeset_commit_cdclk =
  13466. bxt_modeset_commit_cdclk;
  13467. dev_priv->display.modeset_calc_cdclk =
  13468. bxt_modeset_calc_cdclk;
  13469. } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
  13470. dev_priv->display.modeset_commit_cdclk =
  13471. skl_modeset_commit_cdclk;
  13472. dev_priv->display.modeset_calc_cdclk =
  13473. skl_modeset_calc_cdclk;
  13474. }
  13475. if (dev_priv->info.gen >= 9)
  13476. dev_priv->display.update_crtcs = skl_update_crtcs;
  13477. else
  13478. dev_priv->display.update_crtcs = intel_update_crtcs;
  13479. switch (INTEL_INFO(dev_priv)->gen) {
  13480. case 2:
  13481. dev_priv->display.queue_flip = intel_gen2_queue_flip;
  13482. break;
  13483. case 3:
  13484. dev_priv->display.queue_flip = intel_gen3_queue_flip;
  13485. break;
  13486. case 4:
  13487. case 5:
  13488. dev_priv->display.queue_flip = intel_gen4_queue_flip;
  13489. break;
  13490. case 6:
  13491. dev_priv->display.queue_flip = intel_gen6_queue_flip;
  13492. break;
  13493. case 7:
  13494. case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
  13495. dev_priv->display.queue_flip = intel_gen7_queue_flip;
  13496. break;
  13497. case 9:
  13498. /* Drop through - unsupported since execlist only. */
  13499. default:
  13500. /* Default just returns -ENODEV to indicate unsupported */
  13501. dev_priv->display.queue_flip = intel_default_queue_flip;
  13502. }
  13503. }
  13504. /*
  13505. * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
  13506. * resume, or other times. This quirk makes sure that's the case for
  13507. * affected systems.
  13508. */
  13509. static void quirk_pipea_force(struct drm_device *dev)
  13510. {
  13511. struct drm_i915_private *dev_priv = to_i915(dev);
  13512. dev_priv->quirks |= QUIRK_PIPEA_FORCE;
  13513. DRM_INFO("applying pipe a force quirk\n");
  13514. }
  13515. static void quirk_pipeb_force(struct drm_device *dev)
  13516. {
  13517. struct drm_i915_private *dev_priv = to_i915(dev);
  13518. dev_priv->quirks |= QUIRK_PIPEB_FORCE;
  13519. DRM_INFO("applying pipe b force quirk\n");
  13520. }
  13521. /*
  13522. * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
  13523. */
  13524. static void quirk_ssc_force_disable(struct drm_device *dev)
  13525. {
  13526. struct drm_i915_private *dev_priv = to_i915(dev);
  13527. dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
  13528. DRM_INFO("applying lvds SSC disable quirk\n");
  13529. }
  13530. /*
  13531. * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
  13532. * brightness value
  13533. */
  13534. static void quirk_invert_brightness(struct drm_device *dev)
  13535. {
  13536. struct drm_i915_private *dev_priv = to_i915(dev);
  13537. dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
  13538. DRM_INFO("applying inverted panel brightness quirk\n");
  13539. }
  13540. /* Some VBT's incorrectly indicate no backlight is present */
  13541. static void quirk_backlight_present(struct drm_device *dev)
  13542. {
  13543. struct drm_i915_private *dev_priv = to_i915(dev);
  13544. dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
  13545. DRM_INFO("applying backlight present quirk\n");
  13546. }
  13547. struct intel_quirk {
  13548. int device;
  13549. int subsystem_vendor;
  13550. int subsystem_device;
  13551. void (*hook)(struct drm_device *dev);
  13552. };
  13553. /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
  13554. struct intel_dmi_quirk {
  13555. void (*hook)(struct drm_device *dev);
  13556. const struct dmi_system_id (*dmi_id_list)[];
  13557. };
  13558. static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
  13559. {
  13560. DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
  13561. return 1;
  13562. }
  13563. static const struct intel_dmi_quirk intel_dmi_quirks[] = {
  13564. {
  13565. .dmi_id_list = &(const struct dmi_system_id[]) {
  13566. {
  13567. .callback = intel_dmi_reverse_brightness,
  13568. .ident = "NCR Corporation",
  13569. .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
  13570. DMI_MATCH(DMI_PRODUCT_NAME, ""),
  13571. },
  13572. },
  13573. { } /* terminating entry */
  13574. },
  13575. .hook = quirk_invert_brightness,
  13576. },
  13577. };
  13578. static struct intel_quirk intel_quirks[] = {
  13579. /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
  13580. { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
  13581. /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
  13582. { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
  13583. /* 830 needs to leave pipe A & dpll A up */
  13584. { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
  13585. /* 830 needs to leave pipe B & dpll B up */
  13586. { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
  13587. /* Lenovo U160 cannot use SSC on LVDS */
  13588. { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
  13589. /* Sony Vaio Y cannot use SSC on LVDS */
  13590. { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
  13591. /* Acer Aspire 5734Z must invert backlight brightness */
  13592. { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
  13593. /* Acer/eMachines G725 */
  13594. { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
  13595. /* Acer/eMachines e725 */
  13596. { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
  13597. /* Acer/Packard Bell NCL20 */
  13598. { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
  13599. /* Acer Aspire 4736Z */
  13600. { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
  13601. /* Acer Aspire 5336 */
  13602. { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
  13603. /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
  13604. { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
  13605. /* Acer C720 Chromebook (Core i3 4005U) */
  13606. { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
  13607. /* Apple Macbook 2,1 (Core 2 T7400) */
  13608. { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
  13609. /* Apple Macbook 4,1 */
  13610. { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
  13611. /* Toshiba CB35 Chromebook (Celeron 2955U) */
  13612. { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
  13613. /* HP Chromebook 14 (Celeron 2955U) */
  13614. { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
  13615. /* Dell Chromebook 11 */
  13616. { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
  13617. /* Dell Chromebook 11 (2015 version) */
  13618. { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
  13619. };
  13620. static void intel_init_quirks(struct drm_device *dev)
  13621. {
  13622. struct pci_dev *d = dev->pdev;
  13623. int i;
  13624. for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
  13625. struct intel_quirk *q = &intel_quirks[i];
  13626. if (d->device == q->device &&
  13627. (d->subsystem_vendor == q->subsystem_vendor ||
  13628. q->subsystem_vendor == PCI_ANY_ID) &&
  13629. (d->subsystem_device == q->subsystem_device ||
  13630. q->subsystem_device == PCI_ANY_ID))
  13631. q->hook(dev);
  13632. }
  13633. for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
  13634. if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
  13635. intel_dmi_quirks[i].hook(dev);
  13636. }
  13637. }
  13638. /* Disable the VGA plane that we never use */
  13639. static void i915_disable_vga(struct drm_i915_private *dev_priv)
  13640. {
  13641. struct pci_dev *pdev = dev_priv->drm.pdev;
  13642. u8 sr1;
  13643. i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
  13644. /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
  13645. vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
  13646. outb(SR01, VGA_SR_INDEX);
  13647. sr1 = inb(VGA_SR_DATA);
  13648. outb(sr1 | 1<<5, VGA_SR_DATA);
  13649. vga_put(pdev, VGA_RSRC_LEGACY_IO);
  13650. udelay(300);
  13651. I915_WRITE(vga_reg, VGA_DISP_DISABLE);
  13652. POSTING_READ(vga_reg);
  13653. }
  13654. void intel_modeset_init_hw(struct drm_device *dev)
  13655. {
  13656. struct drm_i915_private *dev_priv = to_i915(dev);
  13657. intel_update_cdclk(dev_priv);
  13658. dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
  13659. intel_init_clock_gating(dev_priv);
  13660. }
  13661. /*
  13662. * Calculate what we think the watermarks should be for the state we've read
  13663. * out of the hardware and then immediately program those watermarks so that
  13664. * we ensure the hardware settings match our internal state.
  13665. *
  13666. * We can calculate what we think WM's should be by creating a duplicate of the
  13667. * current state (which was constructed during hardware readout) and running it
  13668. * through the atomic check code to calculate new watermark values in the
  13669. * state object.
  13670. */
  13671. static void sanitize_watermarks(struct drm_device *dev)
  13672. {
  13673. struct drm_i915_private *dev_priv = to_i915(dev);
  13674. struct drm_atomic_state *state;
  13675. struct intel_atomic_state *intel_state;
  13676. struct drm_crtc *crtc;
  13677. struct drm_crtc_state *cstate;
  13678. struct drm_modeset_acquire_ctx ctx;
  13679. int ret;
  13680. int i;
  13681. /* Only supported on platforms that use atomic watermark design */
  13682. if (!dev_priv->display.optimize_watermarks)
  13683. return;
  13684. /*
  13685. * We need to hold connection_mutex before calling duplicate_state so
  13686. * that the connector loop is protected.
  13687. */
  13688. drm_modeset_acquire_init(&ctx, 0);
  13689. retry:
  13690. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  13691. if (ret == -EDEADLK) {
  13692. drm_modeset_backoff(&ctx);
  13693. goto retry;
  13694. } else if (WARN_ON(ret)) {
  13695. goto fail;
  13696. }
  13697. state = drm_atomic_helper_duplicate_state(dev, &ctx);
  13698. if (WARN_ON(IS_ERR(state)))
  13699. goto fail;
  13700. intel_state = to_intel_atomic_state(state);
  13701. /*
  13702. * Hardware readout is the only time we don't want to calculate
  13703. * intermediate watermarks (since we don't trust the current
  13704. * watermarks).
  13705. */
  13706. intel_state->skip_intermediate_wm = true;
  13707. ret = intel_atomic_check(dev, state);
  13708. if (ret) {
  13709. /*
  13710. * If we fail here, it means that the hardware appears to be
  13711. * programmed in a way that shouldn't be possible, given our
  13712. * understanding of watermark requirements. This might mean a
  13713. * mistake in the hardware readout code or a mistake in the
  13714. * watermark calculations for a given platform. Raise a WARN
  13715. * so that this is noticeable.
  13716. *
  13717. * If this actually happens, we'll have to just leave the
  13718. * BIOS-programmed watermarks untouched and hope for the best.
  13719. */
  13720. WARN(true, "Could not determine valid watermarks for inherited state\n");
  13721. goto put_state;
  13722. }
  13723. /* Write calculated watermark values back */
  13724. for_each_crtc_in_state(state, crtc, cstate, i) {
  13725. struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
  13726. cs->wm.need_postvbl_update = true;
  13727. dev_priv->display.optimize_watermarks(intel_state, cs);
  13728. }
  13729. put_state:
  13730. drm_atomic_state_put(state);
  13731. fail:
  13732. drm_modeset_drop_locks(&ctx);
  13733. drm_modeset_acquire_fini(&ctx);
  13734. }
  13735. int intel_modeset_init(struct drm_device *dev)
  13736. {
  13737. struct drm_i915_private *dev_priv = to_i915(dev);
  13738. struct i915_ggtt *ggtt = &dev_priv->ggtt;
  13739. enum pipe pipe;
  13740. struct intel_crtc *crtc;
  13741. drm_mode_config_init(dev);
  13742. dev->mode_config.min_width = 0;
  13743. dev->mode_config.min_height = 0;
  13744. dev->mode_config.preferred_depth = 24;
  13745. dev->mode_config.prefer_shadow = 1;
  13746. dev->mode_config.allow_fb_modifiers = true;
  13747. dev->mode_config.funcs = &intel_mode_funcs;
  13748. intel_init_quirks(dev);
  13749. intel_init_pm(dev_priv);
  13750. if (INTEL_INFO(dev_priv)->num_pipes == 0)
  13751. return 0;
  13752. /*
  13753. * There may be no VBT; and if the BIOS enabled SSC we can
  13754. * just keep using it to avoid unnecessary flicker. Whereas if the
  13755. * BIOS isn't using it, don't assume it will work even if the VBT
  13756. * indicates as much.
  13757. */
  13758. if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
  13759. bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
  13760. DREF_SSC1_ENABLE);
  13761. if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
  13762. DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
  13763. bios_lvds_use_ssc ? "en" : "dis",
  13764. dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
  13765. dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
  13766. }
  13767. }
  13768. if (IS_GEN2(dev_priv)) {
  13769. dev->mode_config.max_width = 2048;
  13770. dev->mode_config.max_height = 2048;
  13771. } else if (IS_GEN3(dev_priv)) {
  13772. dev->mode_config.max_width = 4096;
  13773. dev->mode_config.max_height = 4096;
  13774. } else {
  13775. dev->mode_config.max_width = 8192;
  13776. dev->mode_config.max_height = 8192;
  13777. }
  13778. if (IS_845G(dev_priv) || IS_I865G(dev_priv)) {
  13779. dev->mode_config.cursor_width = IS_845G(dev_priv) ? 64 : 512;
  13780. dev->mode_config.cursor_height = 1023;
  13781. } else if (IS_GEN2(dev_priv)) {
  13782. dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
  13783. dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
  13784. } else {
  13785. dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
  13786. dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
  13787. }
  13788. dev->mode_config.fb_base = ggtt->mappable_base;
  13789. DRM_DEBUG_KMS("%d display pipe%s available.\n",
  13790. INTEL_INFO(dev_priv)->num_pipes,
  13791. INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
  13792. for_each_pipe(dev_priv, pipe) {
  13793. int ret;
  13794. ret = intel_crtc_init(dev_priv, pipe);
  13795. if (ret) {
  13796. drm_mode_config_cleanup(dev);
  13797. return ret;
  13798. }
  13799. }
  13800. intel_update_czclk(dev_priv);
  13801. intel_update_cdclk(dev_priv);
  13802. dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
  13803. intel_shared_dpll_init(dev);
  13804. if (dev_priv->max_cdclk_freq == 0)
  13805. intel_update_max_cdclk(dev_priv);
  13806. /* Just disable it once at startup */
  13807. i915_disable_vga(dev_priv);
  13808. intel_setup_outputs(dev);
  13809. drm_modeset_lock_all(dev);
  13810. intel_modeset_setup_hw_state(dev);
  13811. drm_modeset_unlock_all(dev);
  13812. for_each_intel_crtc(dev, crtc) {
  13813. struct intel_initial_plane_config plane_config = {};
  13814. if (!crtc->active)
  13815. continue;
  13816. /*
  13817. * Note that reserving the BIOS fb up front prevents us
  13818. * from stuffing other stolen allocations like the ring
  13819. * on top. This prevents some ugliness at boot time, and
  13820. * can even allow for smooth boot transitions if the BIOS
  13821. * fb is large enough for the active pipe configuration.
  13822. */
  13823. dev_priv->display.get_initial_plane_config(crtc,
  13824. &plane_config);
  13825. /*
  13826. * If the fb is shared between multiple heads, we'll
  13827. * just get the first one.
  13828. */
  13829. intel_find_initial_plane_obj(crtc, &plane_config);
  13830. }
  13831. /*
  13832. * Make sure hardware watermarks really match the state we read out.
  13833. * Note that we need to do this after reconstructing the BIOS fb's
  13834. * since the watermark calculation done here will use pstate->fb.
  13835. */
  13836. sanitize_watermarks(dev);
  13837. return 0;
  13838. }
  13839. static void intel_enable_pipe_a(struct drm_device *dev)
  13840. {
  13841. struct intel_connector *connector;
  13842. struct drm_connector *crt = NULL;
  13843. struct intel_load_detect_pipe load_detect_temp;
  13844. struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
  13845. /* We can't just switch on the pipe A, we need to set things up with a
  13846. * proper mode and output configuration. As a gross hack, enable pipe A
  13847. * by enabling the load detect pipe once. */
  13848. for_each_intel_connector(dev, connector) {
  13849. if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
  13850. crt = &connector->base;
  13851. break;
  13852. }
  13853. }
  13854. if (!crt)
  13855. return;
  13856. if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
  13857. intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
  13858. }
  13859. static bool
  13860. intel_check_plane_mapping(struct intel_crtc *crtc)
  13861. {
  13862. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  13863. u32 val;
  13864. if (INTEL_INFO(dev_priv)->num_pipes == 1)
  13865. return true;
  13866. val = I915_READ(DSPCNTR(!crtc->plane));
  13867. if ((val & DISPLAY_PLANE_ENABLE) &&
  13868. (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
  13869. return false;
  13870. return true;
  13871. }
  13872. static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
  13873. {
  13874. struct drm_device *dev = crtc->base.dev;
  13875. struct intel_encoder *encoder;
  13876. for_each_encoder_on_crtc(dev, &crtc->base, encoder)
  13877. return true;
  13878. return false;
  13879. }
  13880. static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
  13881. {
  13882. struct drm_device *dev = encoder->base.dev;
  13883. struct intel_connector *connector;
  13884. for_each_connector_on_encoder(dev, &encoder->base, connector)
  13885. return connector;
  13886. return NULL;
  13887. }
  13888. static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
  13889. enum transcoder pch_transcoder)
  13890. {
  13891. return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
  13892. (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);
  13893. }
  13894. static void intel_sanitize_crtc(struct intel_crtc *crtc)
  13895. {
  13896. struct drm_device *dev = crtc->base.dev;
  13897. struct drm_i915_private *dev_priv = to_i915(dev);
  13898. enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  13899. /* Clear any frame start delays used for debugging left by the BIOS */
  13900. if (!transcoder_is_dsi(cpu_transcoder)) {
  13901. i915_reg_t reg = PIPECONF(cpu_transcoder);
  13902. I915_WRITE(reg,
  13903. I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
  13904. }
  13905. /* restore vblank interrupts to correct state */
  13906. drm_crtc_vblank_reset(&crtc->base);
  13907. if (crtc->active) {
  13908. struct intel_plane *plane;
  13909. drm_crtc_vblank_on(&crtc->base);
  13910. /* Disable everything but the primary plane */
  13911. for_each_intel_plane_on_crtc(dev, crtc, plane) {
  13912. if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
  13913. continue;
  13914. plane->disable_plane(&plane->base, &crtc->base);
  13915. }
  13916. }
  13917. /* We need to sanitize the plane -> pipe mapping first because this will
  13918. * disable the crtc (and hence change the state) if it is wrong. Note
  13919. * that gen4+ has a fixed plane -> pipe mapping. */
  13920. if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) {
  13921. bool plane;
  13922. DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
  13923. crtc->base.base.id, crtc->base.name);
  13924. /* Pipe has the wrong plane attached and the plane is active.
  13925. * Temporarily change the plane mapping and disable everything
  13926. * ... */
  13927. plane = crtc->plane;
  13928. to_intel_plane_state(crtc->base.primary->state)->base.visible = true;
  13929. crtc->plane = !plane;
  13930. intel_crtc_disable_noatomic(&crtc->base);
  13931. crtc->plane = plane;
  13932. }
  13933. if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
  13934. crtc->pipe == PIPE_A && !crtc->active) {
  13935. /* BIOS forgot to enable pipe A, this mostly happens after
  13936. * resume. Force-enable the pipe to fix this, the update_dpms
  13937. * call below we restore the pipe to the right state, but leave
  13938. * the required bits on. */
  13939. intel_enable_pipe_a(dev);
  13940. }
  13941. /* Adjust the state of the output pipe according to whether we
  13942. * have active connectors/encoders. */
  13943. if (crtc->active && !intel_crtc_has_encoders(crtc))
  13944. intel_crtc_disable_noatomic(&crtc->base);
  13945. if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
  13946. /*
  13947. * We start out with underrun reporting disabled to avoid races.
  13948. * For correct bookkeeping mark this on active crtcs.
  13949. *
  13950. * Also on gmch platforms we dont have any hardware bits to
  13951. * disable the underrun reporting. Which means we need to start
  13952. * out with underrun reporting disabled also on inactive pipes,
  13953. * since otherwise we'll complain about the garbage we read when
  13954. * e.g. coming up after runtime pm.
  13955. *
  13956. * No protection against concurrent access is required - at
  13957. * worst a fifo underrun happens which also sets this to false.
  13958. */
  13959. crtc->cpu_fifo_underrun_disabled = true;
  13960. /*
  13961. * We track the PCH trancoder underrun reporting state
  13962. * within the crtc. With crtc for pipe A housing the underrun
  13963. * reporting state for PCH transcoder A, crtc for pipe B housing
  13964. * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
  13965. * and marking underrun reporting as disabled for the non-existing
  13966. * PCH transcoders B and C would prevent enabling the south
  13967. * error interrupt (see cpt_can_enable_serr_int()).
  13968. */
  13969. if (has_pch_trancoder(dev_priv, (enum transcoder)crtc->pipe))
  13970. crtc->pch_fifo_underrun_disabled = true;
  13971. }
  13972. }
  13973. static void intel_sanitize_encoder(struct intel_encoder *encoder)
  13974. {
  13975. struct intel_connector *connector;
  13976. /* We need to check both for a crtc link (meaning that the
  13977. * encoder is active and trying to read from a pipe) and the
  13978. * pipe itself being active. */
  13979. bool has_active_crtc = encoder->base.crtc &&
  13980. to_intel_crtc(encoder->base.crtc)->active;
  13981. connector = intel_encoder_find_connector(encoder);
  13982. if (connector && !has_active_crtc) {
  13983. DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
  13984. encoder->base.base.id,
  13985. encoder->base.name);
  13986. /* Connector is active, but has no active pipe. This is
  13987. * fallout from our resume register restoring. Disable
  13988. * the encoder manually again. */
  13989. if (encoder->base.crtc) {
  13990. struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
  13991. DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
  13992. encoder->base.base.id,
  13993. encoder->base.name);
  13994. encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
  13995. if (encoder->post_disable)
  13996. encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
  13997. }
  13998. encoder->base.crtc = NULL;
  13999. /* Inconsistent output/port/pipe state happens presumably due to
  14000. * a bug in one of the get_hw_state functions. Or someplace else
  14001. * in our code, like the register restore mess on resume. Clamp
  14002. * things to off as a safer default. */
  14003. connector->base.dpms = DRM_MODE_DPMS_OFF;
  14004. connector->base.encoder = NULL;
  14005. }
  14006. /* Enabled encoders without active connectors will be fixed in
  14007. * the crtc fixup. */
  14008. }
  14009. void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
  14010. {
  14011. i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
  14012. if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
  14013. DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
  14014. i915_disable_vga(dev_priv);
  14015. }
  14016. }
  14017. void i915_redisable_vga(struct drm_i915_private *dev_priv)
  14018. {
  14019. /* This function can be called both from intel_modeset_setup_hw_state or
  14020. * at a very early point in our resume sequence, where the power well
  14021. * structures are not yet restored. Since this function is at a very
  14022. * paranoid "someone might have enabled VGA while we were not looking"
  14023. * level, just check if the power well is enabled instead of trying to
  14024. * follow the "don't touch the power well if we don't need it" policy
  14025. * the rest of the driver uses. */
  14026. if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
  14027. return;
  14028. i915_redisable_vga_power_on(dev_priv);
  14029. intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
  14030. }
  14031. static bool primary_get_hw_state(struct intel_plane *plane)
  14032. {
  14033. struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
  14034. return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
  14035. }
  14036. /* FIXME read out full plane state for all planes */
  14037. static void readout_plane_state(struct intel_crtc *crtc)
  14038. {
  14039. struct drm_plane *primary = crtc->base.primary;
  14040. struct intel_plane_state *plane_state =
  14041. to_intel_plane_state(primary->state);
  14042. plane_state->base.visible = crtc->active &&
  14043. primary_get_hw_state(to_intel_plane(primary));
  14044. if (plane_state->base.visible)
  14045. crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
  14046. }
  14047. static void intel_modeset_readout_hw_state(struct drm_device *dev)
  14048. {
  14049. struct drm_i915_private *dev_priv = to_i915(dev);
  14050. enum pipe pipe;
  14051. struct intel_crtc *crtc;
  14052. struct intel_encoder *encoder;
  14053. struct intel_connector *connector;
  14054. int i;
  14055. dev_priv->active_crtcs = 0;
  14056. for_each_intel_crtc(dev, crtc) {
  14057. struct intel_crtc_state *crtc_state = crtc->config;
  14058. __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
  14059. memset(crtc_state, 0, sizeof(*crtc_state));
  14060. crtc_state->base.crtc = &crtc->base;
  14061. crtc_state->base.active = crtc_state->base.enable =
  14062. dev_priv->display.get_pipe_config(crtc, crtc_state);
  14063. crtc->base.enabled = crtc_state->base.enable;
  14064. crtc->active = crtc_state->base.active;
  14065. if (crtc_state->base.active)
  14066. dev_priv->active_crtcs |= 1 << crtc->pipe;
  14067. readout_plane_state(crtc);
  14068. DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
  14069. crtc->base.base.id, crtc->base.name,
  14070. enableddisabled(crtc->active));
  14071. }
  14072. for (i = 0; i < dev_priv->num_shared_dpll; i++) {
  14073. struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
  14074. pll->on = pll->funcs.get_hw_state(dev_priv, pll,
  14075. &pll->config.hw_state);
  14076. pll->config.crtc_mask = 0;
  14077. for_each_intel_crtc(dev, crtc) {
  14078. if (crtc->active && crtc->config->shared_dpll == pll)
  14079. pll->config.crtc_mask |= 1 << crtc->pipe;
  14080. }
  14081. pll->active_mask = pll->config.crtc_mask;
  14082. DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
  14083. pll->name, pll->config.crtc_mask, pll->on);
  14084. }
  14085. for_each_intel_encoder(dev, encoder) {
  14086. pipe = 0;
  14087. if (encoder->get_hw_state(encoder, &pipe)) {
  14088. crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  14089. encoder->base.crtc = &crtc->base;
  14090. crtc->config->output_types |= 1 << encoder->type;
  14091. encoder->get_config(encoder, crtc->config);
  14092. } else {
  14093. encoder->base.crtc = NULL;
  14094. }
  14095. DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
  14096. encoder->base.base.id, encoder->base.name,
  14097. enableddisabled(encoder->base.crtc),
  14098. pipe_name(pipe));
  14099. }
  14100. for_each_intel_connector(dev, connector) {
  14101. if (connector->get_hw_state(connector)) {
  14102. connector->base.dpms = DRM_MODE_DPMS_ON;
  14103. encoder = connector->encoder;
  14104. connector->base.encoder = &encoder->base;
  14105. if (encoder->base.crtc &&
  14106. encoder->base.crtc->state->active) {
  14107. /*
  14108. * This has to be done during hardware readout
  14109. * because anything calling .crtc_disable may
  14110. * rely on the connector_mask being accurate.
  14111. */
  14112. encoder->base.crtc->state->connector_mask |=
  14113. 1 << drm_connector_index(&connector->base);
  14114. encoder->base.crtc->state->encoder_mask |=
  14115. 1 << drm_encoder_index(&encoder->base);
  14116. }
  14117. } else {
  14118. connector->base.dpms = DRM_MODE_DPMS_OFF;
  14119. connector->base.encoder = NULL;
  14120. }
  14121. DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
  14122. connector->base.base.id, connector->base.name,
  14123. enableddisabled(connector->base.encoder));
  14124. }
  14125. for_each_intel_crtc(dev, crtc) {
  14126. int pixclk = 0;
  14127. crtc->base.hwmode = crtc->config->base.adjusted_mode;
  14128. memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
  14129. if (crtc->base.state->active) {
  14130. intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
  14131. intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
  14132. WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
  14133. /*
  14134. * The initial mode needs to be set in order to keep
  14135. * the atomic core happy. It wants a valid mode if the
  14136. * crtc's enabled, so we do the above call.
  14137. *
  14138. * At this point some state updated by the connectors
  14139. * in their ->detect() callback has not run yet, so
  14140. * no recalculation can be done yet.
  14141. *
  14142. * Even if we could do a recalculation and modeset
  14143. * right now it would cause a double modeset if
  14144. * fbdev or userspace chooses a different initial mode.
  14145. *
  14146. * If that happens, someone indicated they wanted a
  14147. * mode change, which means it's safe to do a full
  14148. * recalculation.
  14149. */
  14150. crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
  14151. if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
  14152. pixclk = ilk_pipe_pixel_rate(crtc->config);
  14153. else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  14154. pixclk = crtc->config->base.adjusted_mode.crtc_clock;
  14155. else
  14156. WARN_ON(dev_priv->display.modeset_calc_cdclk);
  14157. /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
  14158. if (IS_BROADWELL(dev_priv) && crtc->config->ips_enabled)
  14159. pixclk = DIV_ROUND_UP(pixclk * 100, 95);
  14160. drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
  14161. update_scanline_offset(crtc);
  14162. }
  14163. dev_priv->min_pixclk[crtc->pipe] = pixclk;
  14164. intel_pipe_config_sanity_check(dev_priv, crtc->config);
  14165. }
  14166. }
  14167. /* Scan out the current hw modeset state,
  14168. * and sanitizes it to the current state
  14169. */
  14170. static void
  14171. intel_modeset_setup_hw_state(struct drm_device *dev)
  14172. {
  14173. struct drm_i915_private *dev_priv = to_i915(dev);
  14174. enum pipe pipe;
  14175. struct intel_crtc *crtc;
  14176. struct intel_encoder *encoder;
  14177. int i;
  14178. intel_modeset_readout_hw_state(dev);
  14179. /* HW state is read out, now we need to sanitize this mess. */
  14180. for_each_intel_encoder(dev, encoder) {
  14181. intel_sanitize_encoder(encoder);
  14182. }
  14183. for_each_pipe(dev_priv, pipe) {
  14184. crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  14185. intel_sanitize_crtc(crtc);
  14186. intel_dump_pipe_config(crtc, crtc->config,
  14187. "[setup_hw_state]");
  14188. }
  14189. intel_modeset_update_connector_atomic_state(dev);
  14190. for (i = 0; i < dev_priv->num_shared_dpll; i++) {
  14191. struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
  14192. if (!pll->on || pll->active_mask)
  14193. continue;
  14194. DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
  14195. pll->funcs.disable(dev_priv, pll);
  14196. pll->on = false;
  14197. }
  14198. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  14199. vlv_wm_get_hw_state(dev);
  14200. else if (IS_GEN9(dev_priv))
  14201. skl_wm_get_hw_state(dev);
  14202. else if (HAS_PCH_SPLIT(dev_priv))
  14203. ilk_wm_get_hw_state(dev);
  14204. for_each_intel_crtc(dev, crtc) {
  14205. unsigned long put_domains;
  14206. put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
  14207. if (WARN_ON(put_domains))
  14208. modeset_put_power_domains(dev_priv, put_domains);
  14209. }
  14210. intel_display_set_init_power(dev_priv, false);
  14211. intel_fbc_init_pipe_state(dev_priv);
  14212. }
  14213. void intel_display_resume(struct drm_device *dev)
  14214. {
  14215. struct drm_i915_private *dev_priv = to_i915(dev);
  14216. struct drm_atomic_state *state = dev_priv->modeset_restore_state;
  14217. struct drm_modeset_acquire_ctx ctx;
  14218. int ret;
  14219. dev_priv->modeset_restore_state = NULL;
  14220. if (state)
  14221. state->acquire_ctx = &ctx;
  14222. /*
  14223. * This is a cludge because with real atomic modeset mode_config.mutex
  14224. * won't be taken. Unfortunately some probed state like
  14225. * audio_codec_enable is still protected by mode_config.mutex, so lock
  14226. * it here for now.
  14227. */
  14228. mutex_lock(&dev->mode_config.mutex);
  14229. drm_modeset_acquire_init(&ctx, 0);
  14230. while (1) {
  14231. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  14232. if (ret != -EDEADLK)
  14233. break;
  14234. drm_modeset_backoff(&ctx);
  14235. }
  14236. if (!ret)
  14237. ret = __intel_display_resume(dev, state);
  14238. drm_modeset_drop_locks(&ctx);
  14239. drm_modeset_acquire_fini(&ctx);
  14240. mutex_unlock(&dev->mode_config.mutex);
  14241. if (ret)
  14242. DRM_ERROR("Restoring old state failed with %i\n", ret);
  14243. drm_atomic_state_put(state);
  14244. }
  14245. void intel_modeset_gem_init(struct drm_device *dev)
  14246. {
  14247. struct drm_i915_private *dev_priv = to_i915(dev);
  14248. struct drm_crtc *c;
  14249. struct drm_i915_gem_object *obj;
  14250. intel_init_gt_powersave(dev_priv);
  14251. intel_modeset_init_hw(dev);
  14252. intel_setup_overlay(dev_priv);
  14253. /*
  14254. * Make sure any fbs we allocated at startup are properly
  14255. * pinned & fenced. When we do the allocation it's too early
  14256. * for this.
  14257. */
  14258. for_each_crtc(dev, c) {
  14259. struct i915_vma *vma;
  14260. obj = intel_fb_obj(c->primary->fb);
  14261. if (obj == NULL)
  14262. continue;
  14263. mutex_lock(&dev->struct_mutex);
  14264. vma = intel_pin_and_fence_fb_obj(c->primary->fb,
  14265. c->primary->state->rotation);
  14266. mutex_unlock(&dev->struct_mutex);
  14267. if (IS_ERR(vma)) {
  14268. DRM_ERROR("failed to pin boot fb on pipe %d\n",
  14269. to_intel_crtc(c)->pipe);
  14270. drm_framebuffer_unreference(c->primary->fb);
  14271. c->primary->fb = NULL;
  14272. c->primary->crtc = c->primary->state->crtc = NULL;
  14273. update_state_fb(c->primary);
  14274. c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
  14275. }
  14276. }
  14277. }
  14278. int intel_connector_register(struct drm_connector *connector)
  14279. {
  14280. struct intel_connector *intel_connector = to_intel_connector(connector);
  14281. int ret;
  14282. ret = intel_backlight_device_register(intel_connector);
  14283. if (ret)
  14284. goto err;
  14285. return 0;
  14286. err:
  14287. return ret;
  14288. }
  14289. void intel_connector_unregister(struct drm_connector *connector)
  14290. {
  14291. struct intel_connector *intel_connector = to_intel_connector(connector);
  14292. intel_backlight_device_unregister(intel_connector);
  14293. intel_panel_destroy_backlight(connector);
  14294. }
  14295. void intel_modeset_cleanup(struct drm_device *dev)
  14296. {
  14297. struct drm_i915_private *dev_priv = to_i915(dev);
  14298. intel_disable_gt_powersave(dev_priv);
  14299. /*
  14300. * Interrupts and polling as the first thing to avoid creating havoc.
  14301. * Too much stuff here (turning of connectors, ...) would
  14302. * experience fancy races otherwise.
  14303. */
  14304. intel_irq_uninstall(dev_priv);
  14305. /*
  14306. * Due to the hpd irq storm handling the hotplug work can re-arm the
  14307. * poll handlers. Hence disable polling after hpd handling is shut down.
  14308. */
  14309. drm_kms_helper_poll_fini(dev);
  14310. intel_unregister_dsm_handler();
  14311. intel_fbc_global_disable(dev_priv);
  14312. /* flush any delayed tasks or pending work */
  14313. flush_scheduled_work();
  14314. drm_mode_config_cleanup(dev);
  14315. intel_cleanup_overlay(dev_priv);
  14316. intel_cleanup_gt_powersave(dev_priv);
  14317. intel_teardown_gmbus(dev);
  14318. }
  14319. void intel_connector_attach_encoder(struct intel_connector *connector,
  14320. struct intel_encoder *encoder)
  14321. {
  14322. connector->encoder = encoder;
  14323. drm_mode_connector_attach_encoder(&connector->base,
  14324. &encoder->base);
  14325. }
  14326. /*
  14327. * set vga decode state - true == enable VGA decode
  14328. */
  14329. int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
  14330. {
  14331. unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
  14332. u16 gmch_ctrl;
  14333. if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
  14334. DRM_ERROR("failed to read control word\n");
  14335. return -EIO;
  14336. }
  14337. if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
  14338. return 0;
  14339. if (state)
  14340. gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
  14341. else
  14342. gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
  14343. if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
  14344. DRM_ERROR("failed to write control word\n");
  14345. return -EIO;
  14346. }
  14347. return 0;
  14348. }
  14349. #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
  14350. struct intel_display_error_state {
  14351. u32 power_well_driver;
  14352. int num_transcoders;
  14353. struct intel_cursor_error_state {
  14354. u32 control;
  14355. u32 position;
  14356. u32 base;
  14357. u32 size;
  14358. } cursor[I915_MAX_PIPES];
  14359. struct intel_pipe_error_state {
  14360. bool power_domain_on;
  14361. u32 source;
  14362. u32 stat;
  14363. } pipe[I915_MAX_PIPES];
  14364. struct intel_plane_error_state {
  14365. u32 control;
  14366. u32 stride;
  14367. u32 size;
  14368. u32 pos;
  14369. u32 addr;
  14370. u32 surface;
  14371. u32 tile_offset;
  14372. } plane[I915_MAX_PIPES];
  14373. struct intel_transcoder_error_state {
  14374. bool power_domain_on;
  14375. enum transcoder cpu_transcoder;
  14376. u32 conf;
  14377. u32 htotal;
  14378. u32 hblank;
  14379. u32 hsync;
  14380. u32 vtotal;
  14381. u32 vblank;
  14382. u32 vsync;
  14383. } transcoder[4];
  14384. };
  14385. struct intel_display_error_state *
  14386. intel_display_capture_error_state(struct drm_i915_private *dev_priv)
  14387. {
  14388. struct intel_display_error_state *error;
  14389. int transcoders[] = {
  14390. TRANSCODER_A,
  14391. TRANSCODER_B,
  14392. TRANSCODER_C,
  14393. TRANSCODER_EDP,
  14394. };
  14395. int i;
  14396. if (INTEL_INFO(dev_priv)->num_pipes == 0)
  14397. return NULL;
  14398. error = kzalloc(sizeof(*error), GFP_ATOMIC);
  14399. if (error == NULL)
  14400. return NULL;
  14401. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  14402. error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
  14403. for_each_pipe(dev_priv, i) {
  14404. error->pipe[i].power_domain_on =
  14405. __intel_display_power_is_enabled(dev_priv,
  14406. POWER_DOMAIN_PIPE(i));
  14407. if (!error->pipe[i].power_domain_on)
  14408. continue;
  14409. error->cursor[i].control = I915_READ(CURCNTR(i));
  14410. error->cursor[i].position = I915_READ(CURPOS(i));
  14411. error->cursor[i].base = I915_READ(CURBASE(i));
  14412. error->plane[i].control = I915_READ(DSPCNTR(i));
  14413. error->plane[i].stride = I915_READ(DSPSTRIDE(i));
  14414. if (INTEL_GEN(dev_priv) <= 3) {
  14415. error->plane[i].size = I915_READ(DSPSIZE(i));
  14416. error->plane[i].pos = I915_READ(DSPPOS(i));
  14417. }
  14418. if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
  14419. error->plane[i].addr = I915_READ(DSPADDR(i));
  14420. if (INTEL_GEN(dev_priv) >= 4) {
  14421. error->plane[i].surface = I915_READ(DSPSURF(i));
  14422. error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
  14423. }
  14424. error->pipe[i].source = I915_READ(PIPESRC(i));
  14425. if (HAS_GMCH_DISPLAY(dev_priv))
  14426. error->pipe[i].stat = I915_READ(PIPESTAT(i));
  14427. }
  14428. /* Note: this does not include DSI transcoders. */
  14429. error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
  14430. if (HAS_DDI(dev_priv))
  14431. error->num_transcoders++; /* Account for eDP. */
  14432. for (i = 0; i < error->num_transcoders; i++) {
  14433. enum transcoder cpu_transcoder = transcoders[i];
  14434. error->transcoder[i].power_domain_on =
  14435. __intel_display_power_is_enabled(dev_priv,
  14436. POWER_DOMAIN_TRANSCODER(cpu_transcoder));
  14437. if (!error->transcoder[i].power_domain_on)
  14438. continue;
  14439. error->transcoder[i].cpu_transcoder = cpu_transcoder;
  14440. error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
  14441. error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
  14442. error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
  14443. error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
  14444. error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
  14445. error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
  14446. error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
  14447. }
  14448. return error;
  14449. }
  14450. #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
  14451. void
  14452. intel_display_print_error_state(struct drm_i915_error_state_buf *m,
  14453. struct drm_i915_private *dev_priv,
  14454. struct intel_display_error_state *error)
  14455. {
  14456. int i;
  14457. if (!error)
  14458. return;
  14459. err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
  14460. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  14461. err_printf(m, "PWR_WELL_CTL2: %08x\n",
  14462. error->power_well_driver);
  14463. for_each_pipe(dev_priv, i) {
  14464. err_printf(m, "Pipe [%d]:\n", i);
  14465. err_printf(m, " Power: %s\n",
  14466. onoff(error->pipe[i].power_domain_on));
  14467. err_printf(m, " SRC: %08x\n", error->pipe[i].source);
  14468. err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
  14469. err_printf(m, "Plane [%d]:\n", i);
  14470. err_printf(m, " CNTR: %08x\n", error->plane[i].control);
  14471. err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
  14472. if (INTEL_GEN(dev_priv) <= 3) {
  14473. err_printf(m, " SIZE: %08x\n", error->plane[i].size);
  14474. err_printf(m, " POS: %08x\n", error->plane[i].pos);
  14475. }
  14476. if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
  14477. err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
  14478. if (INTEL_GEN(dev_priv) >= 4) {
  14479. err_printf(m, " SURF: %08x\n", error->plane[i].surface);
  14480. err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
  14481. }
  14482. err_printf(m, "Cursor [%d]:\n", i);
  14483. err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
  14484. err_printf(m, " POS: %08x\n", error->cursor[i].position);
  14485. err_printf(m, " BASE: %08x\n", error->cursor[i].base);
  14486. }
  14487. for (i = 0; i < error->num_transcoders; i++) {
  14488. err_printf(m, "CPU transcoder: %s\n",
  14489. transcoder_name(error->transcoder[i].cpu_transcoder));
  14490. err_printf(m, " Power: %s\n",
  14491. onoff(error->transcoder[i].power_domain_on));
  14492. err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
  14493. err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
  14494. err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
  14495. err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
  14496. err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
  14497. err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
  14498. err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
  14499. }
  14500. }
  14501. #endif