intel_display.c 448 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919139201392113922139231392413925139261392713928139291393013931139321393313934139351393613937139381393913940139411394213943139441394513946139471394813949139501395113952139531395413955139561395713958139591396013961139621396313964139651396613967139681396913970139711397213973139741397513976139771397813979139801398113982139831398413985139861398713988139891399013991139921399313994139951399613997139981399914000140011400214003140041400514006140071400814009140101401114012140131401414015140161401714018140191402014021140221402314024140251402614027140281402914030140311403214033140341403514036140371403814039140401404114042140431404414045140461404714048140491405014051140521405314054140551405614057140581405914060140611406214063140641406514066140671406814069140701407114072140731407414075140761407714078140791408014081140821408314084140851408614087140881408914090140911409214093140941409514096140971409814099141001410114102141031410414105141061410714108141091411014111141121411314114141151411614117141181411914120141211412214123141241412514126141271412814129141301413114132141331413414135141361413714138141391414014141141421414314144141451414614147141481414914150141511415214153141541415514156141571415814159141601416114162141631416414165141661416714168141691417014171141721417314174141751417614177141781417914180141811418214183141841418514186141871418814189141901419114192141931419414195141961419714198141991420014201142021420314204142051420614207142081420914210142111421214213142141421514216142171421814219142201422114222142231422414225142261422714228142291423014231142321423314234142351423614237142381423914240142411424214243142441424514246142471424814249142501425114252142531425414255142561425714258142591426014261142621426314264142651426614267142681426914270142711427214273142741427514276142771427814279142801428114282142831428414285142861428714288142891429014291142921429314294142951429614297142981429914300143011430214303143041430514306143071430814309143101431114312143131431414315143161431714318143191432014321143221432314324143251432614327143281432914330143311433214333143341433514336143371433814339143401434114342143431434414345143461434714348143491435014351143521435314354143551435614357143581435914360143611436214363143641436514366143671436814369143701437114372143731437414375143761437714378143791438014381143821438314384143851438614387143881438914390143911439214393143941439514396143971439814399144001440114402144031440414405144061440714408144091441014411144121441314414144151441614417144181441914420144211442214423144241442514426144271442814429144301443114432144331443414435144361443714438144391444014441144421444314444144451444614447144481444914450144511445214453144541445514456144571445814459144601446114462144631446414465144661446714468144691447014471144721447314474144751447614477144781447914480144811448214483144841448514486144871448814489144901449114492144931449414495144961449714498144991450014501145021450314504145051450614507145081450914510145111451214513145141451514516145171451814519145201452114522145231452414525145261452714528145291453014531145321453314534145351453614537145381453914540145411454214543145441454514546145471454814549145501455114552145531455414555145561455714558145591456014561145621456314564145651456614567145681456914570145711457214573145741457514576145771457814579145801458114582145831458414585145861458714588145891459014591145921459314594145951459614597145981459914600146011460214603146041460514606146071460814609146101461114612146131461414615146161461714618146191462014621146221462314624146251462614627146281462914630146311463214633146341463514636146371463814639146401464114642146431464414645146461464714648146491465014651146521465314654146551465614657146581465914660146611466214663146641466514666146671466814669146701467114672146731467414675146761467714678146791468014681146821468314684146851468614687146881468914690146911469214693146941469514696146971469814699147001470114702147031470414705147061470714708147091471014711147121471314714147151471614717147181471914720147211472214723147241472514726147271472814729147301473114732147331473414735147361473714738147391474014741147421474314744147451474614747147481474914750147511475214753147541475514756147571475814759147601476114762147631476414765147661476714768147691477014771147721477314774147751477614777147781477914780147811478214783147841478514786147871478814789147901479114792147931479414795147961479714798147991480014801148021480314804148051480614807148081480914810148111481214813148141481514816148171481814819148201482114822148231482414825148261482714828148291483014831148321483314834148351483614837148381483914840148411484214843148441484514846148471484814849148501485114852148531485414855148561485714858148591486014861148621486314864148651486614867148681486914870148711487214873148741487514876148771487814879148801488114882148831488414885148861488714888148891489014891148921489314894148951489614897148981489914900149011490214903149041490514906149071490814909149101491114912149131491414915149161491714918149191492014921149221492314924149251492614927149281492914930149311493214933149341493514936149371493814939149401494114942149431494414945149461494714948149491495014951149521495314954149551495614957149581495914960149611496214963149641496514966149671496814969149701497114972149731497414975149761497714978149791498014981149821498314984149851498614987149881498914990149911499214993149941499514996149971499814999150001500115002150031500415005150061500715008150091501015011150121501315014150151501615017150181501915020150211502215023150241502515026150271502815029150301503115032150331503415035150361503715038150391504015041150421504315044150451504615047150481504915050150511505215053150541505515056150571505815059150601506115062150631506415065150661506715068150691507015071150721507315074150751507615077150781507915080150811508215083150841508515086150871508815089150901509115092150931509415095150961509715098150991510015101151021510315104151051510615107151081510915110151111511215113151141511515116151171511815119151201512115122151231512415125151261512715128151291513015131151321513315134151351513615137151381513915140151411514215143151441514515146151471514815149151501515115152151531515415155151561515715158151591516015161151621516315164151651516615167151681516915170151711517215173151741517515176151771517815179151801518115182151831518415185151861518715188151891519015191151921519315194151951519615197151981519915200152011520215203152041520515206152071520815209152101521115212152131521415215152161521715218152191522015221152221522315224152251522615227152281522915230152311523215233152341523515236152371523815239152401524115242152431524415245152461524715248152491525015251152521525315254152551525615257152581525915260152611526215263152641526515266152671526815269152701527115272152731527415275152761527715278152791528015281152821528315284152851528615287152881528915290152911529215293152941529515296152971529815299153001530115302153031530415305153061530715308153091531015311153121531315314153151531615317153181531915320153211532215323153241532515326153271532815329153301533115332153331533415335153361533715338153391534015341153421534315344153451534615347153481534915350153511535215353153541535515356153571535815359153601536115362153631536415365153661536715368153691537015371153721537315374153751537615377153781537915380153811538215383153841538515386153871538815389153901539115392153931539415395153961539715398153991540015401154021540315404154051540615407154081540915410154111541215413154141541515416154171541815419154201542115422154231542415425154261542715428154291543015431154321543315434154351543615437154381543915440154411544215443154441544515446154471544815449154501545115452154531545415455154561545715458154591546015461154621546315464154651546615467154681546915470154711547215473154741547515476154771547815479154801548115482154831548415485154861548715488154891549015491154921549315494154951549615497154981549915500155011550215503155041550515506155071550815509155101551115512155131551415515155161551715518155191552015521155221552315524155251552615527155281552915530155311553215533155341553515536155371553815539155401554115542155431554415545155461554715548155491555015551155521555315554155551555615557155581555915560155611556215563155641556515566155671556815569155701557115572155731557415575155761557715578155791558015581155821558315584155851558615587155881558915590155911559215593155941559515596155971559815599156001560115602156031560415605156061560715608156091561015611156121561315614156151561615617156181561915620156211562215623156241562515626156271562815629156301563115632156331563415635156361563715638156391564015641156421564315644156451564615647156481564915650156511565215653156541565515656156571565815659156601566115662156631566415665156661566715668156691567015671156721567315674156751567615677156781567915680156811568215683156841568515686156871568815689156901569115692156931569415695156961569715698156991570015701157021570315704157051570615707157081570915710157111571215713157141571515716157171571815719157201572115722157231572415725157261572715728157291573015731157321573315734157351573615737157381573915740157411574215743157441574515746157471574815749157501575115752157531575415755157561575715758157591576015761157621576315764157651576615767157681576915770157711577215773157741577515776157771577815779157801578115782157831578415785157861578715788157891579015791157921579315794157951579615797157981579915800158011580215803158041580515806158071580815809158101581115812158131581415815158161581715818158191582015821158221582315824158251582615827158281582915830158311583215833158341583515836158371583815839158401584115842158431584415845158461584715848158491585015851158521585315854158551585615857158581585915860158611586215863158641586515866158671586815869158701587115872158731587415875158761587715878158791588015881158821588315884158851588615887158881588915890158911589215893158941589515896158971589815899159001590115902159031590415905159061590715908159091591015911159121591315914159151591615917159181591915920159211592215923159241592515926159271592815929159301593115932159331593415935
  1. /*
  2. * Copyright © 2006-2007 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21. * DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. */
  26. #include <linux/dmi.h>
  27. #include <linux/module.h>
  28. #include <linux/input.h>
  29. #include <linux/i2c.h>
  30. #include <linux/kernel.h>
  31. #include <linux/slab.h>
  32. #include <linux/vgaarb.h>
  33. #include <drm/drm_edid.h>
  34. #include <drm/drmP.h>
  35. #include "intel_drv.h"
  36. #include "intel_frontbuffer.h"
  37. #include <drm/i915_drm.h>
  38. #include "i915_drv.h"
  39. #include "i915_gem_clflush.h"
  40. #include "intel_dsi.h"
  41. #include "i915_trace.h"
  42. #include <drm/drm_atomic.h>
  43. #include <drm/drm_atomic_helper.h>
  44. #include <drm/drm_dp_helper.h>
  45. #include <drm/drm_crtc_helper.h>
  46. #include <drm/drm_plane_helper.h>
  47. #include <drm/drm_rect.h>
  48. #include <linux/dma_remapping.h>
  49. #include <linux/reservation.h>
  50. static bool is_mmio_work(struct intel_flip_work *work)
  51. {
  52. return work->mmio_work.func;
  53. }
  54. /* Primary plane formats for gen <= 3 */
  55. static const uint32_t i8xx_primary_formats[] = {
  56. DRM_FORMAT_C8,
  57. DRM_FORMAT_RGB565,
  58. DRM_FORMAT_XRGB1555,
  59. DRM_FORMAT_XRGB8888,
  60. };
  61. /* Primary plane formats for gen >= 4 */
  62. static const uint32_t i965_primary_formats[] = {
  63. DRM_FORMAT_C8,
  64. DRM_FORMAT_RGB565,
  65. DRM_FORMAT_XRGB8888,
  66. DRM_FORMAT_XBGR8888,
  67. DRM_FORMAT_XRGB2101010,
  68. DRM_FORMAT_XBGR2101010,
  69. };
  70. static const uint32_t skl_primary_formats[] = {
  71. DRM_FORMAT_C8,
  72. DRM_FORMAT_RGB565,
  73. DRM_FORMAT_XRGB8888,
  74. DRM_FORMAT_XBGR8888,
  75. DRM_FORMAT_ARGB8888,
  76. DRM_FORMAT_ABGR8888,
  77. DRM_FORMAT_XRGB2101010,
  78. DRM_FORMAT_XBGR2101010,
  79. DRM_FORMAT_YUYV,
  80. DRM_FORMAT_YVYU,
  81. DRM_FORMAT_UYVY,
  82. DRM_FORMAT_VYUY,
  83. };
  84. /* Cursor formats */
  85. static const uint32_t intel_cursor_formats[] = {
  86. DRM_FORMAT_ARGB8888,
  87. };
  88. static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
  89. struct intel_crtc_state *pipe_config);
  90. static void ironlake_pch_clock_get(struct intel_crtc *crtc,
  91. struct intel_crtc_state *pipe_config);
  92. static int intel_framebuffer_init(struct intel_framebuffer *ifb,
  93. struct drm_i915_gem_object *obj,
  94. struct drm_mode_fb_cmd2 *mode_cmd);
  95. static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
  96. static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
  97. static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
  98. static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
  99. struct intel_link_m_n *m_n,
  100. struct intel_link_m_n *m2_n2);
  101. static void ironlake_set_pipeconf(struct drm_crtc *crtc);
  102. static void haswell_set_pipeconf(struct drm_crtc *crtc);
  103. static void haswell_set_pipemisc(struct drm_crtc *crtc);
  104. static void vlv_prepare_pll(struct intel_crtc *crtc,
  105. const struct intel_crtc_state *pipe_config);
  106. static void chv_prepare_pll(struct intel_crtc *crtc,
  107. const struct intel_crtc_state *pipe_config);
  108. static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
  109. static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
  110. static void intel_crtc_init_scalers(struct intel_crtc *crtc,
  111. struct intel_crtc_state *crtc_state);
  112. static void skylake_pfit_enable(struct intel_crtc *crtc);
  113. static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
  114. static void ironlake_pfit_enable(struct intel_crtc *crtc);
  115. static void intel_modeset_setup_hw_state(struct drm_device *dev);
  116. static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
  117. struct intel_limit {
  118. struct {
  119. int min, max;
  120. } dot, vco, n, m, m1, m2, p, p1;
  121. struct {
  122. int dot_limit;
  123. int p2_slow, p2_fast;
  124. } p2;
  125. };
  126. /* returns HPLL frequency in kHz */
  127. int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
  128. {
  129. int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
  130. /* Obtain SKU information */
  131. mutex_lock(&dev_priv->sb_lock);
  132. hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
  133. CCK_FUSE_HPLL_FREQ_MASK;
  134. mutex_unlock(&dev_priv->sb_lock);
  135. return vco_freq[hpll_freq] * 1000;
  136. }
  137. int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
  138. const char *name, u32 reg, int ref_freq)
  139. {
  140. u32 val;
  141. int divider;
  142. mutex_lock(&dev_priv->sb_lock);
  143. val = vlv_cck_read(dev_priv, reg);
  144. mutex_unlock(&dev_priv->sb_lock);
  145. divider = val & CCK_FREQUENCY_VALUES;
  146. WARN((val & CCK_FREQUENCY_STATUS) !=
  147. (divider << CCK_FREQUENCY_STATUS_SHIFT),
  148. "%s change in progress\n", name);
  149. return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
  150. }
  151. int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
  152. const char *name, u32 reg)
  153. {
  154. if (dev_priv->hpll_freq == 0)
  155. dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
  156. return vlv_get_cck_clock(dev_priv, name, reg,
  157. dev_priv->hpll_freq);
  158. }
  159. static void intel_update_czclk(struct drm_i915_private *dev_priv)
  160. {
  161. if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
  162. return;
  163. dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
  164. CCK_CZ_CLOCK_CONTROL);
  165. DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
  166. }
  167. static inline u32 /* units of 100MHz */
  168. intel_fdi_link_freq(struct drm_i915_private *dev_priv,
  169. const struct intel_crtc_state *pipe_config)
  170. {
  171. if (HAS_DDI(dev_priv))
  172. return pipe_config->port_clock; /* SPLL */
  173. else if (IS_GEN5(dev_priv))
  174. return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000;
  175. else
  176. return 270000;
  177. }
  178. static const struct intel_limit intel_limits_i8xx_dac = {
  179. .dot = { .min = 25000, .max = 350000 },
  180. .vco = { .min = 908000, .max = 1512000 },
  181. .n = { .min = 2, .max = 16 },
  182. .m = { .min = 96, .max = 140 },
  183. .m1 = { .min = 18, .max = 26 },
  184. .m2 = { .min = 6, .max = 16 },
  185. .p = { .min = 4, .max = 128 },
  186. .p1 = { .min = 2, .max = 33 },
  187. .p2 = { .dot_limit = 165000,
  188. .p2_slow = 4, .p2_fast = 2 },
  189. };
  190. static const struct intel_limit intel_limits_i8xx_dvo = {
  191. .dot = { .min = 25000, .max = 350000 },
  192. .vco = { .min = 908000, .max = 1512000 },
  193. .n = { .min = 2, .max = 16 },
  194. .m = { .min = 96, .max = 140 },
  195. .m1 = { .min = 18, .max = 26 },
  196. .m2 = { .min = 6, .max = 16 },
  197. .p = { .min = 4, .max = 128 },
  198. .p1 = { .min = 2, .max = 33 },
  199. .p2 = { .dot_limit = 165000,
  200. .p2_slow = 4, .p2_fast = 4 },
  201. };
  202. static const struct intel_limit intel_limits_i8xx_lvds = {
  203. .dot = { .min = 25000, .max = 350000 },
  204. .vco = { .min = 908000, .max = 1512000 },
  205. .n = { .min = 2, .max = 16 },
  206. .m = { .min = 96, .max = 140 },
  207. .m1 = { .min = 18, .max = 26 },
  208. .m2 = { .min = 6, .max = 16 },
  209. .p = { .min = 4, .max = 128 },
  210. .p1 = { .min = 1, .max = 6 },
  211. .p2 = { .dot_limit = 165000,
  212. .p2_slow = 14, .p2_fast = 7 },
  213. };
  214. static const struct intel_limit intel_limits_i9xx_sdvo = {
  215. .dot = { .min = 20000, .max = 400000 },
  216. .vco = { .min = 1400000, .max = 2800000 },
  217. .n = { .min = 1, .max = 6 },
  218. .m = { .min = 70, .max = 120 },
  219. .m1 = { .min = 8, .max = 18 },
  220. .m2 = { .min = 3, .max = 7 },
  221. .p = { .min = 5, .max = 80 },
  222. .p1 = { .min = 1, .max = 8 },
  223. .p2 = { .dot_limit = 200000,
  224. .p2_slow = 10, .p2_fast = 5 },
  225. };
  226. static const struct intel_limit intel_limits_i9xx_lvds = {
  227. .dot = { .min = 20000, .max = 400000 },
  228. .vco = { .min = 1400000, .max = 2800000 },
  229. .n = { .min = 1, .max = 6 },
  230. .m = { .min = 70, .max = 120 },
  231. .m1 = { .min = 8, .max = 18 },
  232. .m2 = { .min = 3, .max = 7 },
  233. .p = { .min = 7, .max = 98 },
  234. .p1 = { .min = 1, .max = 8 },
  235. .p2 = { .dot_limit = 112000,
  236. .p2_slow = 14, .p2_fast = 7 },
  237. };
  238. static const struct intel_limit intel_limits_g4x_sdvo = {
  239. .dot = { .min = 25000, .max = 270000 },
  240. .vco = { .min = 1750000, .max = 3500000},
  241. .n = { .min = 1, .max = 4 },
  242. .m = { .min = 104, .max = 138 },
  243. .m1 = { .min = 17, .max = 23 },
  244. .m2 = { .min = 5, .max = 11 },
  245. .p = { .min = 10, .max = 30 },
  246. .p1 = { .min = 1, .max = 3},
  247. .p2 = { .dot_limit = 270000,
  248. .p2_slow = 10,
  249. .p2_fast = 10
  250. },
  251. };
  252. static const struct intel_limit intel_limits_g4x_hdmi = {
  253. .dot = { .min = 22000, .max = 400000 },
  254. .vco = { .min = 1750000, .max = 3500000},
  255. .n = { .min = 1, .max = 4 },
  256. .m = { .min = 104, .max = 138 },
  257. .m1 = { .min = 16, .max = 23 },
  258. .m2 = { .min = 5, .max = 11 },
  259. .p = { .min = 5, .max = 80 },
  260. .p1 = { .min = 1, .max = 8},
  261. .p2 = { .dot_limit = 165000,
  262. .p2_slow = 10, .p2_fast = 5 },
  263. };
  264. static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
  265. .dot = { .min = 20000, .max = 115000 },
  266. .vco = { .min = 1750000, .max = 3500000 },
  267. .n = { .min = 1, .max = 3 },
  268. .m = { .min = 104, .max = 138 },
  269. .m1 = { .min = 17, .max = 23 },
  270. .m2 = { .min = 5, .max = 11 },
  271. .p = { .min = 28, .max = 112 },
  272. .p1 = { .min = 2, .max = 8 },
  273. .p2 = { .dot_limit = 0,
  274. .p2_slow = 14, .p2_fast = 14
  275. },
  276. };
  277. static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
  278. .dot = { .min = 80000, .max = 224000 },
  279. .vco = { .min = 1750000, .max = 3500000 },
  280. .n = { .min = 1, .max = 3 },
  281. .m = { .min = 104, .max = 138 },
  282. .m1 = { .min = 17, .max = 23 },
  283. .m2 = { .min = 5, .max = 11 },
  284. .p = { .min = 14, .max = 42 },
  285. .p1 = { .min = 2, .max = 6 },
  286. .p2 = { .dot_limit = 0,
  287. .p2_slow = 7, .p2_fast = 7
  288. },
  289. };
  290. static const struct intel_limit intel_limits_pineview_sdvo = {
  291. .dot = { .min = 20000, .max = 400000},
  292. .vco = { .min = 1700000, .max = 3500000 },
  293. /* Pineview's Ncounter is a ring counter */
  294. .n = { .min = 3, .max = 6 },
  295. .m = { .min = 2, .max = 256 },
  296. /* Pineview only has one combined m divider, which we treat as m2. */
  297. .m1 = { .min = 0, .max = 0 },
  298. .m2 = { .min = 0, .max = 254 },
  299. .p = { .min = 5, .max = 80 },
  300. .p1 = { .min = 1, .max = 8 },
  301. .p2 = { .dot_limit = 200000,
  302. .p2_slow = 10, .p2_fast = 5 },
  303. };
  304. static const struct intel_limit intel_limits_pineview_lvds = {
  305. .dot = { .min = 20000, .max = 400000 },
  306. .vco = { .min = 1700000, .max = 3500000 },
  307. .n = { .min = 3, .max = 6 },
  308. .m = { .min = 2, .max = 256 },
  309. .m1 = { .min = 0, .max = 0 },
  310. .m2 = { .min = 0, .max = 254 },
  311. .p = { .min = 7, .max = 112 },
  312. .p1 = { .min = 1, .max = 8 },
  313. .p2 = { .dot_limit = 112000,
  314. .p2_slow = 14, .p2_fast = 14 },
  315. };
  316. /* Ironlake / Sandybridge
  317. *
  318. * We calculate clock using (register_value + 2) for N/M1/M2, so here
  319. * the range value for them is (actual_value - 2).
  320. */
  321. static const struct intel_limit intel_limits_ironlake_dac = {
  322. .dot = { .min = 25000, .max = 350000 },
  323. .vco = { .min = 1760000, .max = 3510000 },
  324. .n = { .min = 1, .max = 5 },
  325. .m = { .min = 79, .max = 127 },
  326. .m1 = { .min = 12, .max = 22 },
  327. .m2 = { .min = 5, .max = 9 },
  328. .p = { .min = 5, .max = 80 },
  329. .p1 = { .min = 1, .max = 8 },
  330. .p2 = { .dot_limit = 225000,
  331. .p2_slow = 10, .p2_fast = 5 },
  332. };
  333. static const struct intel_limit intel_limits_ironlake_single_lvds = {
  334. .dot = { .min = 25000, .max = 350000 },
  335. .vco = { .min = 1760000, .max = 3510000 },
  336. .n = { .min = 1, .max = 3 },
  337. .m = { .min = 79, .max = 118 },
  338. .m1 = { .min = 12, .max = 22 },
  339. .m2 = { .min = 5, .max = 9 },
  340. .p = { .min = 28, .max = 112 },
  341. .p1 = { .min = 2, .max = 8 },
  342. .p2 = { .dot_limit = 225000,
  343. .p2_slow = 14, .p2_fast = 14 },
  344. };
  345. static const struct intel_limit intel_limits_ironlake_dual_lvds = {
  346. .dot = { .min = 25000, .max = 350000 },
  347. .vco = { .min = 1760000, .max = 3510000 },
  348. .n = { .min = 1, .max = 3 },
  349. .m = { .min = 79, .max = 127 },
  350. .m1 = { .min = 12, .max = 22 },
  351. .m2 = { .min = 5, .max = 9 },
  352. .p = { .min = 14, .max = 56 },
  353. .p1 = { .min = 2, .max = 8 },
  354. .p2 = { .dot_limit = 225000,
  355. .p2_slow = 7, .p2_fast = 7 },
  356. };
  357. /* LVDS 100mhz refclk limits. */
  358. static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
  359. .dot = { .min = 25000, .max = 350000 },
  360. .vco = { .min = 1760000, .max = 3510000 },
  361. .n = { .min = 1, .max = 2 },
  362. .m = { .min = 79, .max = 126 },
  363. .m1 = { .min = 12, .max = 22 },
  364. .m2 = { .min = 5, .max = 9 },
  365. .p = { .min = 28, .max = 112 },
  366. .p1 = { .min = 2, .max = 8 },
  367. .p2 = { .dot_limit = 225000,
  368. .p2_slow = 14, .p2_fast = 14 },
  369. };
  370. static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
  371. .dot = { .min = 25000, .max = 350000 },
  372. .vco = { .min = 1760000, .max = 3510000 },
  373. .n = { .min = 1, .max = 3 },
  374. .m = { .min = 79, .max = 126 },
  375. .m1 = { .min = 12, .max = 22 },
  376. .m2 = { .min = 5, .max = 9 },
  377. .p = { .min = 14, .max = 42 },
  378. .p1 = { .min = 2, .max = 6 },
  379. .p2 = { .dot_limit = 225000,
  380. .p2_slow = 7, .p2_fast = 7 },
  381. };
  382. static const struct intel_limit intel_limits_vlv = {
  383. /*
  384. * These are the data rate limits (measured in fast clocks)
  385. * since those are the strictest limits we have. The fast
  386. * clock and actual rate limits are more relaxed, so checking
  387. * them would make no difference.
  388. */
  389. .dot = { .min = 25000 * 5, .max = 270000 * 5 },
  390. .vco = { .min = 4000000, .max = 6000000 },
  391. .n = { .min = 1, .max = 7 },
  392. .m1 = { .min = 2, .max = 3 },
  393. .m2 = { .min = 11, .max = 156 },
  394. .p1 = { .min = 2, .max = 3 },
  395. .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
  396. };
  397. static const struct intel_limit intel_limits_chv = {
  398. /*
  399. * These are the data rate limits (measured in fast clocks)
  400. * since those are the strictest limits we have. The fast
  401. * clock and actual rate limits are more relaxed, so checking
  402. * them would make no difference.
  403. */
  404. .dot = { .min = 25000 * 5, .max = 540000 * 5},
  405. .vco = { .min = 4800000, .max = 6480000 },
  406. .n = { .min = 1, .max = 1 },
  407. .m1 = { .min = 2, .max = 2 },
  408. .m2 = { .min = 24 << 22, .max = 175 << 22 },
  409. .p1 = { .min = 2, .max = 4 },
  410. .p2 = { .p2_slow = 1, .p2_fast = 14 },
  411. };
  412. static const struct intel_limit intel_limits_bxt = {
  413. /* FIXME: find real dot limits */
  414. .dot = { .min = 0, .max = INT_MAX },
  415. .vco = { .min = 4800000, .max = 6700000 },
  416. .n = { .min = 1, .max = 1 },
  417. .m1 = { .min = 2, .max = 2 },
  418. /* FIXME: find real m2 limits */
  419. .m2 = { .min = 2 << 22, .max = 255 << 22 },
  420. .p1 = { .min = 2, .max = 4 },
  421. .p2 = { .p2_slow = 1, .p2_fast = 20 },
  422. };
  423. static bool
  424. needs_modeset(struct drm_crtc_state *state)
  425. {
  426. return drm_atomic_crtc_needs_modeset(state);
  427. }
  428. /*
  429. * Platform specific helpers to calculate the port PLL loopback- (clock.m),
  430. * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
  431. * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
  432. * The helpers' return value is the rate of the clock that is fed to the
  433. * display engine's pipe which can be the above fast dot clock rate or a
  434. * divided-down version of it.
  435. */
  436. /* m1 is reserved as 0 in Pineview, n is a ring counter */
  437. static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
  438. {
  439. clock->m = clock->m2 + 2;
  440. clock->p = clock->p1 * clock->p2;
  441. if (WARN_ON(clock->n == 0 || clock->p == 0))
  442. return 0;
  443. clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
  444. clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  445. return clock->dot;
  446. }
  447. static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
  448. {
  449. return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
  450. }
  451. static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
  452. {
  453. clock->m = i9xx_dpll_compute_m(clock);
  454. clock->p = clock->p1 * clock->p2;
  455. if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
  456. return 0;
  457. clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
  458. clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  459. return clock->dot;
  460. }
  461. static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
  462. {
  463. clock->m = clock->m1 * clock->m2;
  464. clock->p = clock->p1 * clock->p2;
  465. if (WARN_ON(clock->n == 0 || clock->p == 0))
  466. return 0;
  467. clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
  468. clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  469. return clock->dot / 5;
  470. }
  471. int chv_calc_dpll_params(int refclk, struct dpll *clock)
  472. {
  473. clock->m = clock->m1 * clock->m2;
  474. clock->p = clock->p1 * clock->p2;
  475. if (WARN_ON(clock->n == 0 || clock->p == 0))
  476. return 0;
  477. clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
  478. clock->n << 22);
  479. clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
  480. return clock->dot / 5;
  481. }
  482. #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
  483. /**
  484. * Returns whether the given set of divisors are valid for a given refclk with
  485. * the given connectors.
  486. */
  487. static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
  488. const struct intel_limit *limit,
  489. const struct dpll *clock)
  490. {
  491. if (clock->n < limit->n.min || limit->n.max < clock->n)
  492. INTELPllInvalid("n out of range\n");
  493. if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
  494. INTELPllInvalid("p1 out of range\n");
  495. if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
  496. INTELPllInvalid("m2 out of range\n");
  497. if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
  498. INTELPllInvalid("m1 out of range\n");
  499. if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
  500. !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
  501. if (clock->m1 <= clock->m2)
  502. INTELPllInvalid("m1 <= m2\n");
  503. if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
  504. !IS_GEN9_LP(dev_priv)) {
  505. if (clock->p < limit->p.min || limit->p.max < clock->p)
  506. INTELPllInvalid("p out of range\n");
  507. if (clock->m < limit->m.min || limit->m.max < clock->m)
  508. INTELPllInvalid("m out of range\n");
  509. }
  510. if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
  511. INTELPllInvalid("vco out of range\n");
  512. /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
  513. * connector, etc., rather than just a single range.
  514. */
  515. if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
  516. INTELPllInvalid("dot out of range\n");
  517. return true;
  518. }
  519. static int
  520. i9xx_select_p2_div(const struct intel_limit *limit,
  521. const struct intel_crtc_state *crtc_state,
  522. int target)
  523. {
  524. struct drm_device *dev = crtc_state->base.crtc->dev;
  525. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  526. /*
  527. * For LVDS just rely on its current settings for dual-channel.
  528. * We haven't figured out how to reliably set up different
  529. * single/dual channel state, if we even can.
  530. */
  531. if (intel_is_dual_link_lvds(dev))
  532. return limit->p2.p2_fast;
  533. else
  534. return limit->p2.p2_slow;
  535. } else {
  536. if (target < limit->p2.dot_limit)
  537. return limit->p2.p2_slow;
  538. else
  539. return limit->p2.p2_fast;
  540. }
  541. }
  542. /*
  543. * Returns a set of divisors for the desired target clock with the given
  544. * refclk, or FALSE. The returned values represent the clock equation:
  545. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  546. *
  547. * Target and reference clocks are specified in kHz.
  548. *
  549. * If match_clock is provided, then best_clock P divider must match the P
  550. * divider from @match_clock used for LVDS downclocking.
  551. */
  552. static bool
  553. i9xx_find_best_dpll(const struct intel_limit *limit,
  554. struct intel_crtc_state *crtc_state,
  555. int target, int refclk, struct dpll *match_clock,
  556. struct dpll *best_clock)
  557. {
  558. struct drm_device *dev = crtc_state->base.crtc->dev;
  559. struct dpll clock;
  560. int err = target;
  561. memset(best_clock, 0, sizeof(*best_clock));
  562. clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
  563. for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
  564. clock.m1++) {
  565. for (clock.m2 = limit->m2.min;
  566. clock.m2 <= limit->m2.max; clock.m2++) {
  567. if (clock.m2 >= clock.m1)
  568. break;
  569. for (clock.n = limit->n.min;
  570. clock.n <= limit->n.max; clock.n++) {
  571. for (clock.p1 = limit->p1.min;
  572. clock.p1 <= limit->p1.max; clock.p1++) {
  573. int this_err;
  574. i9xx_calc_dpll_params(refclk, &clock);
  575. if (!intel_PLL_is_valid(to_i915(dev),
  576. limit,
  577. &clock))
  578. continue;
  579. if (match_clock &&
  580. clock.p != match_clock->p)
  581. continue;
  582. this_err = abs(clock.dot - target);
  583. if (this_err < err) {
  584. *best_clock = clock;
  585. err = this_err;
  586. }
  587. }
  588. }
  589. }
  590. }
  591. return (err != target);
  592. }
  593. /*
  594. * Returns a set of divisors for the desired target clock with the given
  595. * refclk, or FALSE. The returned values represent the clock equation:
  596. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  597. *
  598. * Target and reference clocks are specified in kHz.
  599. *
  600. * If match_clock is provided, then best_clock P divider must match the P
  601. * divider from @match_clock used for LVDS downclocking.
  602. */
  603. static bool
  604. pnv_find_best_dpll(const struct intel_limit *limit,
  605. struct intel_crtc_state *crtc_state,
  606. int target, int refclk, struct dpll *match_clock,
  607. struct dpll *best_clock)
  608. {
  609. struct drm_device *dev = crtc_state->base.crtc->dev;
  610. struct dpll clock;
  611. int err = target;
  612. memset(best_clock, 0, sizeof(*best_clock));
  613. clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
  614. for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
  615. clock.m1++) {
  616. for (clock.m2 = limit->m2.min;
  617. clock.m2 <= limit->m2.max; clock.m2++) {
  618. for (clock.n = limit->n.min;
  619. clock.n <= limit->n.max; clock.n++) {
  620. for (clock.p1 = limit->p1.min;
  621. clock.p1 <= limit->p1.max; clock.p1++) {
  622. int this_err;
  623. pnv_calc_dpll_params(refclk, &clock);
  624. if (!intel_PLL_is_valid(to_i915(dev),
  625. limit,
  626. &clock))
  627. continue;
  628. if (match_clock &&
  629. clock.p != match_clock->p)
  630. continue;
  631. this_err = abs(clock.dot - target);
  632. if (this_err < err) {
  633. *best_clock = clock;
  634. err = this_err;
  635. }
  636. }
  637. }
  638. }
  639. }
  640. return (err != target);
  641. }
  642. /*
  643. * Returns a set of divisors for the desired target clock with the given
  644. * refclk, or FALSE. The returned values represent the clock equation:
  645. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  646. *
  647. * Target and reference clocks are specified in kHz.
  648. *
  649. * If match_clock is provided, then best_clock P divider must match the P
  650. * divider from @match_clock used for LVDS downclocking.
  651. */
  652. static bool
  653. g4x_find_best_dpll(const struct intel_limit *limit,
  654. struct intel_crtc_state *crtc_state,
  655. int target, int refclk, struct dpll *match_clock,
  656. struct dpll *best_clock)
  657. {
  658. struct drm_device *dev = crtc_state->base.crtc->dev;
  659. struct dpll clock;
  660. int max_n;
  661. bool found = false;
  662. /* approximately equals target * 0.00585 */
  663. int err_most = (target >> 8) + (target >> 9);
  664. memset(best_clock, 0, sizeof(*best_clock));
  665. clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
  666. max_n = limit->n.max;
  667. /* based on hardware requirement, prefer smaller n to precision */
  668. for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
  669. /* based on hardware requirement, prefere larger m1,m2 */
  670. for (clock.m1 = limit->m1.max;
  671. clock.m1 >= limit->m1.min; clock.m1--) {
  672. for (clock.m2 = limit->m2.max;
  673. clock.m2 >= limit->m2.min; clock.m2--) {
  674. for (clock.p1 = limit->p1.max;
  675. clock.p1 >= limit->p1.min; clock.p1--) {
  676. int this_err;
  677. i9xx_calc_dpll_params(refclk, &clock);
  678. if (!intel_PLL_is_valid(to_i915(dev),
  679. limit,
  680. &clock))
  681. continue;
  682. this_err = abs(clock.dot - target);
  683. if (this_err < err_most) {
  684. *best_clock = clock;
  685. err_most = this_err;
  686. max_n = clock.n;
  687. found = true;
  688. }
  689. }
  690. }
  691. }
  692. }
  693. return found;
  694. }
  695. /*
  696. * Check if the calculated PLL configuration is more optimal compared to the
  697. * best configuration and error found so far. Return the calculated error.
  698. */
  699. static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
  700. const struct dpll *calculated_clock,
  701. const struct dpll *best_clock,
  702. unsigned int best_error_ppm,
  703. unsigned int *error_ppm)
  704. {
  705. /*
  706. * For CHV ignore the error and consider only the P value.
  707. * Prefer a bigger P value based on HW requirements.
  708. */
  709. if (IS_CHERRYVIEW(to_i915(dev))) {
  710. *error_ppm = 0;
  711. return calculated_clock->p > best_clock->p;
  712. }
  713. if (WARN_ON_ONCE(!target_freq))
  714. return false;
  715. *error_ppm = div_u64(1000000ULL *
  716. abs(target_freq - calculated_clock->dot),
  717. target_freq);
  718. /*
  719. * Prefer a better P value over a better (smaller) error if the error
  720. * is small. Ensure this preference for future configurations too by
  721. * setting the error to 0.
  722. */
  723. if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
  724. *error_ppm = 0;
  725. return true;
  726. }
  727. return *error_ppm + 10 < best_error_ppm;
  728. }
  729. /*
  730. * Returns a set of divisors for the desired target clock with the given
  731. * refclk, or FALSE. The returned values represent the clock equation:
  732. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  733. */
  734. static bool
  735. vlv_find_best_dpll(const struct intel_limit *limit,
  736. struct intel_crtc_state *crtc_state,
  737. int target, int refclk, struct dpll *match_clock,
  738. struct dpll *best_clock)
  739. {
  740. struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  741. struct drm_device *dev = crtc->base.dev;
  742. struct dpll clock;
  743. unsigned int bestppm = 1000000;
  744. /* min update 19.2 MHz */
  745. int max_n = min(limit->n.max, refclk / 19200);
  746. bool found = false;
  747. target *= 5; /* fast clock */
  748. memset(best_clock, 0, sizeof(*best_clock));
  749. /* based on hardware requirement, prefer smaller n to precision */
  750. for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
  751. for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
  752. for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
  753. clock.p2 -= clock.p2 > 10 ? 2 : 1) {
  754. clock.p = clock.p1 * clock.p2;
  755. /* based on hardware requirement, prefer bigger m1,m2 values */
  756. for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
  757. unsigned int ppm;
  758. clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
  759. refclk * clock.m1);
  760. vlv_calc_dpll_params(refclk, &clock);
  761. if (!intel_PLL_is_valid(to_i915(dev),
  762. limit,
  763. &clock))
  764. continue;
  765. if (!vlv_PLL_is_optimal(dev, target,
  766. &clock,
  767. best_clock,
  768. bestppm, &ppm))
  769. continue;
  770. *best_clock = clock;
  771. bestppm = ppm;
  772. found = true;
  773. }
  774. }
  775. }
  776. }
  777. return found;
  778. }
  779. /*
  780. * Returns a set of divisors for the desired target clock with the given
  781. * refclk, or FALSE. The returned values represent the clock equation:
  782. * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  783. */
  784. static bool
  785. chv_find_best_dpll(const struct intel_limit *limit,
  786. struct intel_crtc_state *crtc_state,
  787. int target, int refclk, struct dpll *match_clock,
  788. struct dpll *best_clock)
  789. {
  790. struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  791. struct drm_device *dev = crtc->base.dev;
  792. unsigned int best_error_ppm;
  793. struct dpll clock;
  794. uint64_t m2;
  795. int found = false;
  796. memset(best_clock, 0, sizeof(*best_clock));
  797. best_error_ppm = 1000000;
  798. /*
  799. * Based on hardware doc, the n always set to 1, and m1 always
  800. * set to 2. If requires to support 200Mhz refclk, we need to
  801. * revisit this because n may not 1 anymore.
  802. */
  803. clock.n = 1, clock.m1 = 2;
  804. target *= 5; /* fast clock */
  805. for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
  806. for (clock.p2 = limit->p2.p2_fast;
  807. clock.p2 >= limit->p2.p2_slow;
  808. clock.p2 -= clock.p2 > 10 ? 2 : 1) {
  809. unsigned int error_ppm;
  810. clock.p = clock.p1 * clock.p2;
  811. m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
  812. clock.n) << 22, refclk * clock.m1);
  813. if (m2 > INT_MAX/clock.m1)
  814. continue;
  815. clock.m2 = m2;
  816. chv_calc_dpll_params(refclk, &clock);
  817. if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
  818. continue;
  819. if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
  820. best_error_ppm, &error_ppm))
  821. continue;
  822. *best_clock = clock;
  823. best_error_ppm = error_ppm;
  824. found = true;
  825. }
  826. }
  827. return found;
  828. }
  829. bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
  830. struct dpll *best_clock)
  831. {
  832. int refclk = 100000;
  833. const struct intel_limit *limit = &intel_limits_bxt;
  834. return chv_find_best_dpll(limit, crtc_state,
  835. target_clock, refclk, NULL, best_clock);
  836. }
  837. bool intel_crtc_active(struct intel_crtc *crtc)
  838. {
  839. /* Be paranoid as we can arrive here with only partial
  840. * state retrieved from the hardware during setup.
  841. *
  842. * We can ditch the adjusted_mode.crtc_clock check as soon
  843. * as Haswell has gained clock readout/fastboot support.
  844. *
  845. * We can ditch the crtc->primary->fb check as soon as we can
  846. * properly reconstruct framebuffers.
  847. *
  848. * FIXME: The intel_crtc->active here should be switched to
  849. * crtc->state->active once we have proper CRTC states wired up
  850. * for atomic.
  851. */
  852. return crtc->active && crtc->base.primary->state->fb &&
  853. crtc->config->base.adjusted_mode.crtc_clock;
  854. }
  855. enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
  856. enum pipe pipe)
  857. {
  858. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  859. return crtc->config->cpu_transcoder;
  860. }
  861. static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe)
  862. {
  863. i915_reg_t reg = PIPEDSL(pipe);
  864. u32 line1, line2;
  865. u32 line_mask;
  866. if (IS_GEN2(dev_priv))
  867. line_mask = DSL_LINEMASK_GEN2;
  868. else
  869. line_mask = DSL_LINEMASK_GEN3;
  870. line1 = I915_READ(reg) & line_mask;
  871. msleep(5);
  872. line2 = I915_READ(reg) & line_mask;
  873. return line1 == line2;
  874. }
  875. /*
  876. * intel_wait_for_pipe_off - wait for pipe to turn off
  877. * @crtc: crtc whose pipe to wait for
  878. *
  879. * After disabling a pipe, we can't wait for vblank in the usual way,
  880. * spinning on the vblank interrupt status bit, since we won't actually
  881. * see an interrupt when the pipe is disabled.
  882. *
  883. * On Gen4 and above:
  884. * wait for the pipe register state bit to turn off
  885. *
  886. * Otherwise:
  887. * wait for the display line value to settle (it usually
  888. * ends up stopping at the start of the next frame).
  889. *
  890. */
  891. static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
  892. {
  893. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  894. enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  895. enum pipe pipe = crtc->pipe;
  896. if (INTEL_GEN(dev_priv) >= 4) {
  897. i915_reg_t reg = PIPECONF(cpu_transcoder);
  898. /* Wait for the Pipe State to go off */
  899. if (intel_wait_for_register(dev_priv,
  900. reg, I965_PIPECONF_ACTIVE, 0,
  901. 100))
  902. WARN(1, "pipe_off wait timed out\n");
  903. } else {
  904. /* Wait for the display line to settle */
  905. if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100))
  906. WARN(1, "pipe_off wait timed out\n");
  907. }
  908. }
  909. /* Only for pre-ILK configs */
  910. void assert_pll(struct drm_i915_private *dev_priv,
  911. enum pipe pipe, bool state)
  912. {
  913. u32 val;
  914. bool cur_state;
  915. val = I915_READ(DPLL(pipe));
  916. cur_state = !!(val & DPLL_VCO_ENABLE);
  917. I915_STATE_WARN(cur_state != state,
  918. "PLL state assertion failure (expected %s, current %s)\n",
  919. onoff(state), onoff(cur_state));
  920. }
  921. /* XXX: the dsi pll is shared between MIPI DSI ports */
  922. void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
  923. {
  924. u32 val;
  925. bool cur_state;
  926. mutex_lock(&dev_priv->sb_lock);
  927. val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
  928. mutex_unlock(&dev_priv->sb_lock);
  929. cur_state = val & DSI_PLL_VCO_EN;
  930. I915_STATE_WARN(cur_state != state,
  931. "DSI PLL state assertion failure (expected %s, current %s)\n",
  932. onoff(state), onoff(cur_state));
  933. }
  934. static void assert_fdi_tx(struct drm_i915_private *dev_priv,
  935. enum pipe pipe, bool state)
  936. {
  937. bool cur_state;
  938. enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
  939. pipe);
  940. if (HAS_DDI(dev_priv)) {
  941. /* DDI does not have a specific FDI_TX register */
  942. u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
  943. cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
  944. } else {
  945. u32 val = I915_READ(FDI_TX_CTL(pipe));
  946. cur_state = !!(val & FDI_TX_ENABLE);
  947. }
  948. I915_STATE_WARN(cur_state != state,
  949. "FDI TX state assertion failure (expected %s, current %s)\n",
  950. onoff(state), onoff(cur_state));
  951. }
  952. #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
  953. #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
  954. static void assert_fdi_rx(struct drm_i915_private *dev_priv,
  955. enum pipe pipe, bool state)
  956. {
  957. u32 val;
  958. bool cur_state;
  959. val = I915_READ(FDI_RX_CTL(pipe));
  960. cur_state = !!(val & FDI_RX_ENABLE);
  961. I915_STATE_WARN(cur_state != state,
  962. "FDI RX state assertion failure (expected %s, current %s)\n",
  963. onoff(state), onoff(cur_state));
  964. }
  965. #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
  966. #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
  967. static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
  968. enum pipe pipe)
  969. {
  970. u32 val;
  971. /* ILK FDI PLL is always enabled */
  972. if (IS_GEN5(dev_priv))
  973. return;
  974. /* On Haswell, DDI ports are responsible for the FDI PLL setup */
  975. if (HAS_DDI(dev_priv))
  976. return;
  977. val = I915_READ(FDI_TX_CTL(pipe));
  978. I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
  979. }
  980. void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
  981. enum pipe pipe, bool state)
  982. {
  983. u32 val;
  984. bool cur_state;
  985. val = I915_READ(FDI_RX_CTL(pipe));
  986. cur_state = !!(val & FDI_RX_PLL_ENABLE);
  987. I915_STATE_WARN(cur_state != state,
  988. "FDI RX PLL assertion failure (expected %s, current %s)\n",
  989. onoff(state), onoff(cur_state));
  990. }
  991. void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
  992. {
  993. i915_reg_t pp_reg;
  994. u32 val;
  995. enum pipe panel_pipe = PIPE_A;
  996. bool locked = true;
  997. if (WARN_ON(HAS_DDI(dev_priv)))
  998. return;
  999. if (HAS_PCH_SPLIT(dev_priv)) {
  1000. u32 port_sel;
  1001. pp_reg = PP_CONTROL(0);
  1002. port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
  1003. if (port_sel == PANEL_PORT_SELECT_LVDS &&
  1004. I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
  1005. panel_pipe = PIPE_B;
  1006. /* XXX: else fix for eDP */
  1007. } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  1008. /* presumably write lock depends on pipe, not port select */
  1009. pp_reg = PP_CONTROL(pipe);
  1010. panel_pipe = pipe;
  1011. } else {
  1012. pp_reg = PP_CONTROL(0);
  1013. if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
  1014. panel_pipe = PIPE_B;
  1015. }
  1016. val = I915_READ(pp_reg);
  1017. if (!(val & PANEL_POWER_ON) ||
  1018. ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
  1019. locked = false;
  1020. I915_STATE_WARN(panel_pipe == pipe && locked,
  1021. "panel assertion failure, pipe %c regs locked\n",
  1022. pipe_name(pipe));
  1023. }
  1024. static void assert_cursor(struct drm_i915_private *dev_priv,
  1025. enum pipe pipe, bool state)
  1026. {
  1027. bool cur_state;
  1028. if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
  1029. cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
  1030. else
  1031. cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
  1032. I915_STATE_WARN(cur_state != state,
  1033. "cursor on pipe %c assertion failure (expected %s, current %s)\n",
  1034. pipe_name(pipe), onoff(state), onoff(cur_state));
  1035. }
  1036. #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
  1037. #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
  1038. void assert_pipe(struct drm_i915_private *dev_priv,
  1039. enum pipe pipe, bool state)
  1040. {
  1041. bool cur_state;
  1042. enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
  1043. pipe);
  1044. enum intel_display_power_domain power_domain;
  1045. /* if we need the pipe quirk it must be always on */
  1046. if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  1047. (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  1048. state = true;
  1049. power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
  1050. if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
  1051. u32 val = I915_READ(PIPECONF(cpu_transcoder));
  1052. cur_state = !!(val & PIPECONF_ENABLE);
  1053. intel_display_power_put(dev_priv, power_domain);
  1054. } else {
  1055. cur_state = false;
  1056. }
  1057. I915_STATE_WARN(cur_state != state,
  1058. "pipe %c assertion failure (expected %s, current %s)\n",
  1059. pipe_name(pipe), onoff(state), onoff(cur_state));
  1060. }
  1061. static void assert_plane(struct drm_i915_private *dev_priv,
  1062. enum plane plane, bool state)
  1063. {
  1064. u32 val;
  1065. bool cur_state;
  1066. val = I915_READ(DSPCNTR(plane));
  1067. cur_state = !!(val & DISPLAY_PLANE_ENABLE);
  1068. I915_STATE_WARN(cur_state != state,
  1069. "plane %c assertion failure (expected %s, current %s)\n",
  1070. plane_name(plane), onoff(state), onoff(cur_state));
  1071. }
  1072. #define assert_plane_enabled(d, p) assert_plane(d, p, true)
  1073. #define assert_plane_disabled(d, p) assert_plane(d, p, false)
  1074. static void assert_planes_disabled(struct drm_i915_private *dev_priv,
  1075. enum pipe pipe)
  1076. {
  1077. int i;
  1078. /* Primary planes are fixed to pipes on gen4+ */
  1079. if (INTEL_GEN(dev_priv) >= 4) {
  1080. u32 val = I915_READ(DSPCNTR(pipe));
  1081. I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
  1082. "plane %c assertion failure, should be disabled but not\n",
  1083. plane_name(pipe));
  1084. return;
  1085. }
  1086. /* Need to check both planes against the pipe */
  1087. for_each_pipe(dev_priv, i) {
  1088. u32 val = I915_READ(DSPCNTR(i));
  1089. enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
  1090. DISPPLANE_SEL_PIPE_SHIFT;
  1091. I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
  1092. "plane %c assertion failure, should be off on pipe %c but is still active\n",
  1093. plane_name(i), pipe_name(pipe));
  1094. }
  1095. }
  1096. static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
  1097. enum pipe pipe)
  1098. {
  1099. int sprite;
  1100. if (INTEL_GEN(dev_priv) >= 9) {
  1101. for_each_sprite(dev_priv, pipe, sprite) {
  1102. u32 val = I915_READ(PLANE_CTL(pipe, sprite));
  1103. I915_STATE_WARN(val & PLANE_CTL_ENABLE,
  1104. "plane %d assertion failure, should be off on pipe %c but is still active\n",
  1105. sprite, pipe_name(pipe));
  1106. }
  1107. } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  1108. for_each_sprite(dev_priv, pipe, sprite) {
  1109. u32 val = I915_READ(SPCNTR(pipe, PLANE_SPRITE0 + sprite));
  1110. I915_STATE_WARN(val & SP_ENABLE,
  1111. "sprite %c assertion failure, should be off on pipe %c but is still active\n",
  1112. sprite_name(pipe, sprite), pipe_name(pipe));
  1113. }
  1114. } else if (INTEL_GEN(dev_priv) >= 7) {
  1115. u32 val = I915_READ(SPRCTL(pipe));
  1116. I915_STATE_WARN(val & SPRITE_ENABLE,
  1117. "sprite %c assertion failure, should be off on pipe %c but is still active\n",
  1118. plane_name(pipe), pipe_name(pipe));
  1119. } else if (INTEL_GEN(dev_priv) >= 5) {
  1120. u32 val = I915_READ(DVSCNTR(pipe));
  1121. I915_STATE_WARN(val & DVS_ENABLE,
  1122. "sprite %c assertion failure, should be off on pipe %c but is still active\n",
  1123. plane_name(pipe), pipe_name(pipe));
  1124. }
  1125. }
  1126. static void assert_vblank_disabled(struct drm_crtc *crtc)
  1127. {
  1128. if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
  1129. drm_crtc_vblank_put(crtc);
  1130. }
  1131. void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
  1132. enum pipe pipe)
  1133. {
  1134. u32 val;
  1135. bool enabled;
  1136. val = I915_READ(PCH_TRANSCONF(pipe));
  1137. enabled = !!(val & TRANS_ENABLE);
  1138. I915_STATE_WARN(enabled,
  1139. "transcoder assertion failed, should be off on pipe %c but is still active\n",
  1140. pipe_name(pipe));
  1141. }
  1142. static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
  1143. enum pipe pipe, u32 port_sel, u32 val)
  1144. {
  1145. if ((val & DP_PORT_EN) == 0)
  1146. return false;
  1147. if (HAS_PCH_CPT(dev_priv)) {
  1148. u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
  1149. if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
  1150. return false;
  1151. } else if (IS_CHERRYVIEW(dev_priv)) {
  1152. if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
  1153. return false;
  1154. } else {
  1155. if ((val & DP_PIPE_MASK) != (pipe << 30))
  1156. return false;
  1157. }
  1158. return true;
  1159. }
  1160. static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
  1161. enum pipe pipe, u32 val)
  1162. {
  1163. if ((val & SDVO_ENABLE) == 0)
  1164. return false;
  1165. if (HAS_PCH_CPT(dev_priv)) {
  1166. if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
  1167. return false;
  1168. } else if (IS_CHERRYVIEW(dev_priv)) {
  1169. if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
  1170. return false;
  1171. } else {
  1172. if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
  1173. return false;
  1174. }
  1175. return true;
  1176. }
  1177. static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
  1178. enum pipe pipe, u32 val)
  1179. {
  1180. if ((val & LVDS_PORT_EN) == 0)
  1181. return false;
  1182. if (HAS_PCH_CPT(dev_priv)) {
  1183. if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
  1184. return false;
  1185. } else {
  1186. if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
  1187. return false;
  1188. }
  1189. return true;
  1190. }
  1191. static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
  1192. enum pipe pipe, u32 val)
  1193. {
  1194. if ((val & ADPA_DAC_ENABLE) == 0)
  1195. return false;
  1196. if (HAS_PCH_CPT(dev_priv)) {
  1197. if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
  1198. return false;
  1199. } else {
  1200. if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
  1201. return false;
  1202. }
  1203. return true;
  1204. }
  1205. static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
  1206. enum pipe pipe, i915_reg_t reg,
  1207. u32 port_sel)
  1208. {
  1209. u32 val = I915_READ(reg);
  1210. I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
  1211. "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
  1212. i915_mmio_reg_offset(reg), pipe_name(pipe));
  1213. I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
  1214. && (val & DP_PIPEB_SELECT),
  1215. "IBX PCH dp port still using transcoder B\n");
  1216. }
  1217. static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
  1218. enum pipe pipe, i915_reg_t reg)
  1219. {
  1220. u32 val = I915_READ(reg);
  1221. I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
  1222. "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
  1223. i915_mmio_reg_offset(reg), pipe_name(pipe));
  1224. I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
  1225. && (val & SDVO_PIPE_B_SELECT),
  1226. "IBX PCH hdmi port still using transcoder B\n");
  1227. }
  1228. static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
  1229. enum pipe pipe)
  1230. {
  1231. u32 val;
  1232. assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
  1233. assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
  1234. assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
  1235. val = I915_READ(PCH_ADPA);
  1236. I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
  1237. "PCH VGA enabled on transcoder %c, should be disabled\n",
  1238. pipe_name(pipe));
  1239. val = I915_READ(PCH_LVDS);
  1240. I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
  1241. "PCH LVDS enabled on transcoder %c, should be disabled\n",
  1242. pipe_name(pipe));
  1243. assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
  1244. assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
  1245. assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
  1246. }
  1247. static void _vlv_enable_pll(struct intel_crtc *crtc,
  1248. const struct intel_crtc_state *pipe_config)
  1249. {
  1250. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1251. enum pipe pipe = crtc->pipe;
  1252. I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
  1253. POSTING_READ(DPLL(pipe));
  1254. udelay(150);
  1255. if (intel_wait_for_register(dev_priv,
  1256. DPLL(pipe),
  1257. DPLL_LOCK_VLV,
  1258. DPLL_LOCK_VLV,
  1259. 1))
  1260. DRM_ERROR("DPLL %d failed to lock\n", pipe);
  1261. }
  1262. static void vlv_enable_pll(struct intel_crtc *crtc,
  1263. const struct intel_crtc_state *pipe_config)
  1264. {
  1265. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1266. enum pipe pipe = crtc->pipe;
  1267. assert_pipe_disabled(dev_priv, pipe);
  1268. /* PLL is protected by panel, make sure we can write it */
  1269. assert_panel_unlocked(dev_priv, pipe);
  1270. if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
  1271. _vlv_enable_pll(crtc, pipe_config);
  1272. I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
  1273. POSTING_READ(DPLL_MD(pipe));
  1274. }
  1275. static void _chv_enable_pll(struct intel_crtc *crtc,
  1276. const struct intel_crtc_state *pipe_config)
  1277. {
  1278. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1279. enum pipe pipe = crtc->pipe;
  1280. enum dpio_channel port = vlv_pipe_to_channel(pipe);
  1281. u32 tmp;
  1282. mutex_lock(&dev_priv->sb_lock);
  1283. /* Enable back the 10bit clock to display controller */
  1284. tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
  1285. tmp |= DPIO_DCLKP_EN;
  1286. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
  1287. mutex_unlock(&dev_priv->sb_lock);
  1288. /*
  1289. * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
  1290. */
  1291. udelay(1);
  1292. /* Enable PLL */
  1293. I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
  1294. /* Check PLL is locked */
  1295. if (intel_wait_for_register(dev_priv,
  1296. DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
  1297. 1))
  1298. DRM_ERROR("PLL %d failed to lock\n", pipe);
  1299. }
  1300. static void chv_enable_pll(struct intel_crtc *crtc,
  1301. const struct intel_crtc_state *pipe_config)
  1302. {
  1303. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1304. enum pipe pipe = crtc->pipe;
  1305. assert_pipe_disabled(dev_priv, pipe);
  1306. /* PLL is protected by panel, make sure we can write it */
  1307. assert_panel_unlocked(dev_priv, pipe);
  1308. if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
  1309. _chv_enable_pll(crtc, pipe_config);
  1310. if (pipe != PIPE_A) {
  1311. /*
  1312. * WaPixelRepeatModeFixForC0:chv
  1313. *
  1314. * DPLLCMD is AWOL. Use chicken bits to propagate
  1315. * the value from DPLLBMD to either pipe B or C.
  1316. */
  1317. I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C);
  1318. I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
  1319. I915_WRITE(CBR4_VLV, 0);
  1320. dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
  1321. /*
  1322. * DPLLB VGA mode also seems to cause problems.
  1323. * We should always have it disabled.
  1324. */
  1325. WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
  1326. } else {
  1327. I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
  1328. POSTING_READ(DPLL_MD(pipe));
  1329. }
  1330. }
  1331. static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
  1332. {
  1333. struct intel_crtc *crtc;
  1334. int count = 0;
  1335. for_each_intel_crtc(&dev_priv->drm, crtc) {
  1336. count += crtc->base.state->active &&
  1337. intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
  1338. }
  1339. return count;
  1340. }
  1341. static void i9xx_enable_pll(struct intel_crtc *crtc)
  1342. {
  1343. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1344. i915_reg_t reg = DPLL(crtc->pipe);
  1345. u32 dpll = crtc->config->dpll_hw_state.dpll;
  1346. assert_pipe_disabled(dev_priv, crtc->pipe);
  1347. /* PLL is protected by panel, make sure we can write it */
  1348. if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
  1349. assert_panel_unlocked(dev_priv, crtc->pipe);
  1350. /* Enable DVO 2x clock on both PLLs if necessary */
  1351. if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
  1352. /*
  1353. * It appears to be important that we don't enable this
  1354. * for the current pipe before otherwise configuring the
  1355. * PLL. No idea how this should be handled if multiple
  1356. * DVO outputs are enabled simultaneosly.
  1357. */
  1358. dpll |= DPLL_DVO_2X_MODE;
  1359. I915_WRITE(DPLL(!crtc->pipe),
  1360. I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
  1361. }
  1362. /*
  1363. * Apparently we need to have VGA mode enabled prior to changing
  1364. * the P1/P2 dividers. Otherwise the DPLL will keep using the old
  1365. * dividers, even though the register value does change.
  1366. */
  1367. I915_WRITE(reg, 0);
  1368. I915_WRITE(reg, dpll);
  1369. /* Wait for the clocks to stabilize. */
  1370. POSTING_READ(reg);
  1371. udelay(150);
  1372. if (INTEL_GEN(dev_priv) >= 4) {
  1373. I915_WRITE(DPLL_MD(crtc->pipe),
  1374. crtc->config->dpll_hw_state.dpll_md);
  1375. } else {
  1376. /* The pixel multiplier can only be updated once the
  1377. * DPLL is enabled and the clocks are stable.
  1378. *
  1379. * So write it again.
  1380. */
  1381. I915_WRITE(reg, dpll);
  1382. }
  1383. /* We do this three times for luck */
  1384. I915_WRITE(reg, dpll);
  1385. POSTING_READ(reg);
  1386. udelay(150); /* wait for warmup */
  1387. I915_WRITE(reg, dpll);
  1388. POSTING_READ(reg);
  1389. udelay(150); /* wait for warmup */
  1390. I915_WRITE(reg, dpll);
  1391. POSTING_READ(reg);
  1392. udelay(150); /* wait for warmup */
  1393. }
  1394. /**
  1395. * i9xx_disable_pll - disable a PLL
  1396. * @dev_priv: i915 private structure
  1397. * @pipe: pipe PLL to disable
  1398. *
  1399. * Disable the PLL for @pipe, making sure the pipe is off first.
  1400. *
  1401. * Note! This is for pre-ILK only.
  1402. */
  1403. static void i9xx_disable_pll(struct intel_crtc *crtc)
  1404. {
  1405. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1406. enum pipe pipe = crtc->pipe;
  1407. /* Disable DVO 2x clock on both PLLs if necessary */
  1408. if (IS_I830(dev_priv) &&
  1409. intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
  1410. !intel_num_dvo_pipes(dev_priv)) {
  1411. I915_WRITE(DPLL(PIPE_B),
  1412. I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
  1413. I915_WRITE(DPLL(PIPE_A),
  1414. I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
  1415. }
  1416. /* Don't disable pipe or pipe PLLs if needed */
  1417. if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  1418. (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  1419. return;
  1420. /* Make sure the pipe isn't still relying on us */
  1421. assert_pipe_disabled(dev_priv, pipe);
  1422. I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
  1423. POSTING_READ(DPLL(pipe));
  1424. }
  1425. static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
  1426. {
  1427. u32 val;
  1428. /* Make sure the pipe isn't still relying on us */
  1429. assert_pipe_disabled(dev_priv, pipe);
  1430. val = DPLL_INTEGRATED_REF_CLK_VLV |
  1431. DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
  1432. if (pipe != PIPE_A)
  1433. val |= DPLL_INTEGRATED_CRI_CLK_VLV;
  1434. I915_WRITE(DPLL(pipe), val);
  1435. POSTING_READ(DPLL(pipe));
  1436. }
  1437. static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
  1438. {
  1439. enum dpio_channel port = vlv_pipe_to_channel(pipe);
  1440. u32 val;
  1441. /* Make sure the pipe isn't still relying on us */
  1442. assert_pipe_disabled(dev_priv, pipe);
  1443. val = DPLL_SSC_REF_CLK_CHV |
  1444. DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
  1445. if (pipe != PIPE_A)
  1446. val |= DPLL_INTEGRATED_CRI_CLK_VLV;
  1447. I915_WRITE(DPLL(pipe), val);
  1448. POSTING_READ(DPLL(pipe));
  1449. mutex_lock(&dev_priv->sb_lock);
  1450. /* Disable 10bit clock to display controller */
  1451. val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
  1452. val &= ~DPIO_DCLKP_EN;
  1453. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
  1454. mutex_unlock(&dev_priv->sb_lock);
  1455. }
  1456. void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
  1457. struct intel_digital_port *dport,
  1458. unsigned int expected_mask)
  1459. {
  1460. u32 port_mask;
  1461. i915_reg_t dpll_reg;
  1462. switch (dport->port) {
  1463. case PORT_B:
  1464. port_mask = DPLL_PORTB_READY_MASK;
  1465. dpll_reg = DPLL(0);
  1466. break;
  1467. case PORT_C:
  1468. port_mask = DPLL_PORTC_READY_MASK;
  1469. dpll_reg = DPLL(0);
  1470. expected_mask <<= 4;
  1471. break;
  1472. case PORT_D:
  1473. port_mask = DPLL_PORTD_READY_MASK;
  1474. dpll_reg = DPIO_PHY_STATUS;
  1475. break;
  1476. default:
  1477. BUG();
  1478. }
  1479. if (intel_wait_for_register(dev_priv,
  1480. dpll_reg, port_mask, expected_mask,
  1481. 1000))
  1482. WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
  1483. port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
  1484. }
  1485. static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
  1486. enum pipe pipe)
  1487. {
  1488. struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
  1489. pipe);
  1490. i915_reg_t reg;
  1491. uint32_t val, pipeconf_val;
  1492. /* Make sure PCH DPLL is enabled */
  1493. assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
  1494. /* FDI must be feeding us bits for PCH ports */
  1495. assert_fdi_tx_enabled(dev_priv, pipe);
  1496. assert_fdi_rx_enabled(dev_priv, pipe);
  1497. if (HAS_PCH_CPT(dev_priv)) {
  1498. /* Workaround: Set the timing override bit before enabling the
  1499. * pch transcoder. */
  1500. reg = TRANS_CHICKEN2(pipe);
  1501. val = I915_READ(reg);
  1502. val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
  1503. I915_WRITE(reg, val);
  1504. }
  1505. reg = PCH_TRANSCONF(pipe);
  1506. val = I915_READ(reg);
  1507. pipeconf_val = I915_READ(PIPECONF(pipe));
  1508. if (HAS_PCH_IBX(dev_priv)) {
  1509. /*
  1510. * Make the BPC in transcoder be consistent with
  1511. * that in pipeconf reg. For HDMI we must use 8bpc
  1512. * here for both 8bpc and 12bpc.
  1513. */
  1514. val &= ~PIPECONF_BPC_MASK;
  1515. if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
  1516. val |= PIPECONF_8BPC;
  1517. else
  1518. val |= pipeconf_val & PIPECONF_BPC_MASK;
  1519. }
  1520. val &= ~TRANS_INTERLACE_MASK;
  1521. if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
  1522. if (HAS_PCH_IBX(dev_priv) &&
  1523. intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
  1524. val |= TRANS_LEGACY_INTERLACED_ILK;
  1525. else
  1526. val |= TRANS_INTERLACED;
  1527. else
  1528. val |= TRANS_PROGRESSIVE;
  1529. I915_WRITE(reg, val | TRANS_ENABLE);
  1530. if (intel_wait_for_register(dev_priv,
  1531. reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
  1532. 100))
  1533. DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
  1534. }
  1535. static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
  1536. enum transcoder cpu_transcoder)
  1537. {
  1538. u32 val, pipeconf_val;
  1539. /* FDI must be feeding us bits for PCH ports */
  1540. assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
  1541. assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
  1542. /* Workaround: set timing override bit. */
  1543. val = I915_READ(TRANS_CHICKEN2(PIPE_A));
  1544. val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
  1545. I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
  1546. val = TRANS_ENABLE;
  1547. pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
  1548. if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
  1549. PIPECONF_INTERLACED_ILK)
  1550. val |= TRANS_INTERLACED;
  1551. else
  1552. val |= TRANS_PROGRESSIVE;
  1553. I915_WRITE(LPT_TRANSCONF, val);
  1554. if (intel_wait_for_register(dev_priv,
  1555. LPT_TRANSCONF,
  1556. TRANS_STATE_ENABLE,
  1557. TRANS_STATE_ENABLE,
  1558. 100))
  1559. DRM_ERROR("Failed to enable PCH transcoder\n");
  1560. }
  1561. static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
  1562. enum pipe pipe)
  1563. {
  1564. i915_reg_t reg;
  1565. uint32_t val;
  1566. /* FDI relies on the transcoder */
  1567. assert_fdi_tx_disabled(dev_priv, pipe);
  1568. assert_fdi_rx_disabled(dev_priv, pipe);
  1569. /* Ports must be off as well */
  1570. assert_pch_ports_disabled(dev_priv, pipe);
  1571. reg = PCH_TRANSCONF(pipe);
  1572. val = I915_READ(reg);
  1573. val &= ~TRANS_ENABLE;
  1574. I915_WRITE(reg, val);
  1575. /* wait for PCH transcoder off, transcoder state */
  1576. if (intel_wait_for_register(dev_priv,
  1577. reg, TRANS_STATE_ENABLE, 0,
  1578. 50))
  1579. DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
  1580. if (HAS_PCH_CPT(dev_priv)) {
  1581. /* Workaround: Clear the timing override chicken bit again. */
  1582. reg = TRANS_CHICKEN2(pipe);
  1583. val = I915_READ(reg);
  1584. val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
  1585. I915_WRITE(reg, val);
  1586. }
  1587. }
  1588. void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
  1589. {
  1590. u32 val;
  1591. val = I915_READ(LPT_TRANSCONF);
  1592. val &= ~TRANS_ENABLE;
  1593. I915_WRITE(LPT_TRANSCONF, val);
  1594. /* wait for PCH transcoder off, transcoder state */
  1595. if (intel_wait_for_register(dev_priv,
  1596. LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
  1597. 50))
  1598. DRM_ERROR("Failed to disable PCH transcoder\n");
  1599. /* Workaround: clear timing override bit. */
  1600. val = I915_READ(TRANS_CHICKEN2(PIPE_A));
  1601. val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
  1602. I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
  1603. }
  1604. enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc)
  1605. {
  1606. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1607. WARN_ON(!crtc->config->has_pch_encoder);
  1608. if (HAS_PCH_LPT(dev_priv))
  1609. return TRANSCODER_A;
  1610. else
  1611. return (enum transcoder) crtc->pipe;
  1612. }
  1613. /**
  1614. * intel_enable_pipe - enable a pipe, asserting requirements
  1615. * @crtc: crtc responsible for the pipe
  1616. *
  1617. * Enable @crtc's pipe, making sure that various hardware specific requirements
  1618. * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
  1619. */
  1620. static void intel_enable_pipe(struct intel_crtc *crtc)
  1621. {
  1622. struct drm_device *dev = crtc->base.dev;
  1623. struct drm_i915_private *dev_priv = to_i915(dev);
  1624. enum pipe pipe = crtc->pipe;
  1625. enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  1626. i915_reg_t reg;
  1627. u32 val;
  1628. DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
  1629. assert_planes_disabled(dev_priv, pipe);
  1630. assert_cursor_disabled(dev_priv, pipe);
  1631. assert_sprites_disabled(dev_priv, pipe);
  1632. /*
  1633. * A pipe without a PLL won't actually be able to drive bits from
  1634. * a plane. On ILK+ the pipe PLLs are integrated, so we don't
  1635. * need the check.
  1636. */
  1637. if (HAS_GMCH_DISPLAY(dev_priv)) {
  1638. if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI))
  1639. assert_dsi_pll_enabled(dev_priv);
  1640. else
  1641. assert_pll_enabled(dev_priv, pipe);
  1642. } else {
  1643. if (crtc->config->has_pch_encoder) {
  1644. /* if driving the PCH, we need FDI enabled */
  1645. assert_fdi_rx_pll_enabled(dev_priv,
  1646. (enum pipe) intel_crtc_pch_transcoder(crtc));
  1647. assert_fdi_tx_pll_enabled(dev_priv,
  1648. (enum pipe) cpu_transcoder);
  1649. }
  1650. /* FIXME: assert CPU port conditions for SNB+ */
  1651. }
  1652. reg = PIPECONF(cpu_transcoder);
  1653. val = I915_READ(reg);
  1654. if (val & PIPECONF_ENABLE) {
  1655. WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  1656. (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
  1657. return;
  1658. }
  1659. I915_WRITE(reg, val | PIPECONF_ENABLE);
  1660. POSTING_READ(reg);
  1661. /*
  1662. * Until the pipe starts DSL will read as 0, which would cause
  1663. * an apparent vblank timestamp jump, which messes up also the
  1664. * frame count when it's derived from the timestamps. So let's
  1665. * wait for the pipe to start properly before we call
  1666. * drm_crtc_vblank_on()
  1667. */
  1668. if (dev->max_vblank_count == 0 &&
  1669. wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
  1670. DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
  1671. }
  1672. /**
  1673. * intel_disable_pipe - disable a pipe, asserting requirements
  1674. * @crtc: crtc whose pipes is to be disabled
  1675. *
  1676. * Disable the pipe of @crtc, making sure that various hardware
  1677. * specific requirements are met, if applicable, e.g. plane
  1678. * disabled, panel fitter off, etc.
  1679. *
  1680. * Will wait until the pipe has shut down before returning.
  1681. */
  1682. static void intel_disable_pipe(struct intel_crtc *crtc)
  1683. {
  1684. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1685. enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  1686. enum pipe pipe = crtc->pipe;
  1687. i915_reg_t reg;
  1688. u32 val;
  1689. DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
  1690. /*
  1691. * Make sure planes won't keep trying to pump pixels to us,
  1692. * or we might hang the display.
  1693. */
  1694. assert_planes_disabled(dev_priv, pipe);
  1695. assert_cursor_disabled(dev_priv, pipe);
  1696. assert_sprites_disabled(dev_priv, pipe);
  1697. reg = PIPECONF(cpu_transcoder);
  1698. val = I915_READ(reg);
  1699. if ((val & PIPECONF_ENABLE) == 0)
  1700. return;
  1701. /*
  1702. * Double wide has implications for planes
  1703. * so best keep it disabled when not needed.
  1704. */
  1705. if (crtc->config->double_wide)
  1706. val &= ~PIPECONF_DOUBLE_WIDE;
  1707. /* Don't disable pipe or pipe PLLs if needed */
  1708. if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
  1709. !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  1710. val &= ~PIPECONF_ENABLE;
  1711. I915_WRITE(reg, val);
  1712. if ((val & PIPECONF_ENABLE) == 0)
  1713. intel_wait_for_pipe_off(crtc);
  1714. }
  1715. static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
  1716. {
  1717. return IS_GEN2(dev_priv) ? 2048 : 4096;
  1718. }
  1719. static unsigned int
  1720. intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane)
  1721. {
  1722. struct drm_i915_private *dev_priv = to_i915(fb->dev);
  1723. unsigned int cpp = fb->format->cpp[plane];
  1724. switch (fb->modifier) {
  1725. case DRM_FORMAT_MOD_NONE:
  1726. return cpp;
  1727. case I915_FORMAT_MOD_X_TILED:
  1728. if (IS_GEN2(dev_priv))
  1729. return 128;
  1730. else
  1731. return 512;
  1732. case I915_FORMAT_MOD_Y_TILED:
  1733. if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
  1734. return 128;
  1735. else
  1736. return 512;
  1737. case I915_FORMAT_MOD_Yf_TILED:
  1738. switch (cpp) {
  1739. case 1:
  1740. return 64;
  1741. case 2:
  1742. case 4:
  1743. return 128;
  1744. case 8:
  1745. case 16:
  1746. return 256;
  1747. default:
  1748. MISSING_CASE(cpp);
  1749. return cpp;
  1750. }
  1751. break;
  1752. default:
  1753. MISSING_CASE(fb->modifier);
  1754. return cpp;
  1755. }
  1756. }
  1757. static unsigned int
  1758. intel_tile_height(const struct drm_framebuffer *fb, int plane)
  1759. {
  1760. if (fb->modifier == DRM_FORMAT_MOD_NONE)
  1761. return 1;
  1762. else
  1763. return intel_tile_size(to_i915(fb->dev)) /
  1764. intel_tile_width_bytes(fb, plane);
  1765. }
  1766. /* Return the tile dimensions in pixel units */
  1767. static void intel_tile_dims(const struct drm_framebuffer *fb, int plane,
  1768. unsigned int *tile_width,
  1769. unsigned int *tile_height)
  1770. {
  1771. unsigned int tile_width_bytes = intel_tile_width_bytes(fb, plane);
  1772. unsigned int cpp = fb->format->cpp[plane];
  1773. *tile_width = tile_width_bytes / cpp;
  1774. *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
  1775. }
  1776. unsigned int
  1777. intel_fb_align_height(const struct drm_framebuffer *fb,
  1778. int plane, unsigned int height)
  1779. {
  1780. unsigned int tile_height = intel_tile_height(fb, plane);
  1781. return ALIGN(height, tile_height);
  1782. }
  1783. unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
  1784. {
  1785. unsigned int size = 0;
  1786. int i;
  1787. for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
  1788. size += rot_info->plane[i].width * rot_info->plane[i].height;
  1789. return size;
  1790. }
  1791. static void
  1792. intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
  1793. const struct drm_framebuffer *fb,
  1794. unsigned int rotation)
  1795. {
  1796. view->type = I915_GGTT_VIEW_NORMAL;
  1797. if (drm_rotation_90_or_270(rotation)) {
  1798. view->type = I915_GGTT_VIEW_ROTATED;
  1799. view->rotated = to_intel_framebuffer(fb)->rot_info;
  1800. }
  1801. }
  1802. static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
  1803. {
  1804. if (INTEL_INFO(dev_priv)->gen >= 9)
  1805. return 256 * 1024;
  1806. else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
  1807. IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  1808. return 128 * 1024;
  1809. else if (INTEL_INFO(dev_priv)->gen >= 4)
  1810. return 4 * 1024;
  1811. else
  1812. return 0;
  1813. }
  1814. static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
  1815. int plane)
  1816. {
  1817. struct drm_i915_private *dev_priv = to_i915(fb->dev);
  1818. /* AUX_DIST needs only 4K alignment */
  1819. if (fb->format->format == DRM_FORMAT_NV12 && plane == 1)
  1820. return 4096;
  1821. switch (fb->modifier) {
  1822. case DRM_FORMAT_MOD_NONE:
  1823. return intel_linear_alignment(dev_priv);
  1824. case I915_FORMAT_MOD_X_TILED:
  1825. if (INTEL_GEN(dev_priv) >= 9)
  1826. return 256 * 1024;
  1827. return 0;
  1828. case I915_FORMAT_MOD_Y_TILED:
  1829. case I915_FORMAT_MOD_Yf_TILED:
  1830. return 1 * 1024 * 1024;
  1831. default:
  1832. MISSING_CASE(fb->modifier);
  1833. return 0;
  1834. }
  1835. }
  1836. struct i915_vma *
  1837. intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
  1838. {
  1839. struct drm_device *dev = fb->dev;
  1840. struct drm_i915_private *dev_priv = to_i915(dev);
  1841. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  1842. struct i915_ggtt_view view;
  1843. struct i915_vma *vma;
  1844. u32 alignment;
  1845. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  1846. alignment = intel_surf_alignment(fb, 0);
  1847. intel_fill_fb_ggtt_view(&view, fb, rotation);
  1848. /* Note that the w/a also requires 64 PTE of padding following the
  1849. * bo. We currently fill all unused PTE with the shadow page and so
  1850. * we should always have valid PTE following the scanout preventing
  1851. * the VT-d warning.
  1852. */
  1853. if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
  1854. alignment = 256 * 1024;
  1855. /*
  1856. * Global gtt pte registers are special registers which actually forward
  1857. * writes to a chunk of system memory. Which means that there is no risk
  1858. * that the register values disappear as soon as we call
  1859. * intel_runtime_pm_put(), so it is correct to wrap only the
  1860. * pin/unpin/fence and not more.
  1861. */
  1862. intel_runtime_pm_get(dev_priv);
  1863. vma = i915_gem_object_pin_to_display_plane(obj, alignment, &view);
  1864. if (IS_ERR(vma))
  1865. goto err;
  1866. if (i915_vma_is_map_and_fenceable(vma)) {
  1867. /* Install a fence for tiled scan-out. Pre-i965 always needs a
  1868. * fence, whereas 965+ only requires a fence if using
  1869. * framebuffer compression. For simplicity, we always, when
  1870. * possible, install a fence as the cost is not that onerous.
  1871. *
  1872. * If we fail to fence the tiled scanout, then either the
  1873. * modeset will reject the change (which is highly unlikely as
  1874. * the affected systems, all but one, do not have unmappable
  1875. * space) or we will not be able to enable full powersaving
  1876. * techniques (also likely not to apply due to various limits
  1877. * FBC and the like impose on the size of the buffer, which
  1878. * presumably we violated anyway with this unmappable buffer).
  1879. * Anyway, it is presumably better to stumble onwards with
  1880. * something and try to run the system in a "less than optimal"
  1881. * mode that matches the user configuration.
  1882. */
  1883. if (i915_vma_get_fence(vma) == 0)
  1884. i915_vma_pin_fence(vma);
  1885. }
  1886. i915_vma_get(vma);
  1887. err:
  1888. intel_runtime_pm_put(dev_priv);
  1889. return vma;
  1890. }
  1891. void intel_unpin_fb_vma(struct i915_vma *vma)
  1892. {
  1893. lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
  1894. i915_vma_unpin_fence(vma);
  1895. i915_gem_object_unpin_from_display_plane(vma);
  1896. i915_vma_put(vma);
  1897. }
  1898. static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
  1899. unsigned int rotation)
  1900. {
  1901. if (drm_rotation_90_or_270(rotation))
  1902. return to_intel_framebuffer(fb)->rotated[plane].pitch;
  1903. else
  1904. return fb->pitches[plane];
  1905. }
  1906. /*
  1907. * Convert the x/y offsets into a linear offset.
  1908. * Only valid with 0/180 degree rotation, which is fine since linear
  1909. * offset is only used with linear buffers on pre-hsw and tiled buffers
  1910. * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
  1911. */
  1912. u32 intel_fb_xy_to_linear(int x, int y,
  1913. const struct intel_plane_state *state,
  1914. int plane)
  1915. {
  1916. const struct drm_framebuffer *fb = state->base.fb;
  1917. unsigned int cpp = fb->format->cpp[plane];
  1918. unsigned int pitch = fb->pitches[plane];
  1919. return y * pitch + x * cpp;
  1920. }
  1921. /*
  1922. * Add the x/y offsets derived from fb->offsets[] to the user
  1923. * specified plane src x/y offsets. The resulting x/y offsets
  1924. * specify the start of scanout from the beginning of the gtt mapping.
  1925. */
  1926. void intel_add_fb_offsets(int *x, int *y,
  1927. const struct intel_plane_state *state,
  1928. int plane)
  1929. {
  1930. const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
  1931. unsigned int rotation = state->base.rotation;
  1932. if (drm_rotation_90_or_270(rotation)) {
  1933. *x += intel_fb->rotated[plane].x;
  1934. *y += intel_fb->rotated[plane].y;
  1935. } else {
  1936. *x += intel_fb->normal[plane].x;
  1937. *y += intel_fb->normal[plane].y;
  1938. }
  1939. }
  1940. /*
  1941. * Input tile dimensions and pitch must already be
  1942. * rotated to match x and y, and in pixel units.
  1943. */
  1944. static u32 _intel_adjust_tile_offset(int *x, int *y,
  1945. unsigned int tile_width,
  1946. unsigned int tile_height,
  1947. unsigned int tile_size,
  1948. unsigned int pitch_tiles,
  1949. u32 old_offset,
  1950. u32 new_offset)
  1951. {
  1952. unsigned int pitch_pixels = pitch_tiles * tile_width;
  1953. unsigned int tiles;
  1954. WARN_ON(old_offset & (tile_size - 1));
  1955. WARN_ON(new_offset & (tile_size - 1));
  1956. WARN_ON(new_offset > old_offset);
  1957. tiles = (old_offset - new_offset) / tile_size;
  1958. *y += tiles / pitch_tiles * tile_height;
  1959. *x += tiles % pitch_tiles * tile_width;
  1960. /* minimize x in case it got needlessly big */
  1961. *y += *x / pitch_pixels * tile_height;
  1962. *x %= pitch_pixels;
  1963. return new_offset;
  1964. }
  1965. /*
  1966. * Adjust the tile offset by moving the difference into
  1967. * the x/y offsets.
  1968. */
  1969. static u32 intel_adjust_tile_offset(int *x, int *y,
  1970. const struct intel_plane_state *state, int plane,
  1971. u32 old_offset, u32 new_offset)
  1972. {
  1973. const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev);
  1974. const struct drm_framebuffer *fb = state->base.fb;
  1975. unsigned int cpp = fb->format->cpp[plane];
  1976. unsigned int rotation = state->base.rotation;
  1977. unsigned int pitch = intel_fb_pitch(fb, plane, rotation);
  1978. WARN_ON(new_offset > old_offset);
  1979. if (fb->modifier != DRM_FORMAT_MOD_NONE) {
  1980. unsigned int tile_size, tile_width, tile_height;
  1981. unsigned int pitch_tiles;
  1982. tile_size = intel_tile_size(dev_priv);
  1983. intel_tile_dims(fb, plane, &tile_width, &tile_height);
  1984. if (drm_rotation_90_or_270(rotation)) {
  1985. pitch_tiles = pitch / tile_height;
  1986. swap(tile_width, tile_height);
  1987. } else {
  1988. pitch_tiles = pitch / (tile_width * cpp);
  1989. }
  1990. _intel_adjust_tile_offset(x, y, tile_width, tile_height,
  1991. tile_size, pitch_tiles,
  1992. old_offset, new_offset);
  1993. } else {
  1994. old_offset += *y * pitch + *x * cpp;
  1995. *y = (old_offset - new_offset) / pitch;
  1996. *x = ((old_offset - new_offset) - *y * pitch) / cpp;
  1997. }
  1998. return new_offset;
  1999. }
  2000. /*
  2001. * Computes the linear offset to the base tile and adjusts
  2002. * x, y. bytes per pixel is assumed to be a power-of-two.
  2003. *
  2004. * In the 90/270 rotated case, x and y are assumed
  2005. * to be already rotated to match the rotated GTT view, and
  2006. * pitch is the tile_height aligned framebuffer height.
  2007. *
  2008. * This function is used when computing the derived information
  2009. * under intel_framebuffer, so using any of that information
  2010. * here is not allowed. Anything under drm_framebuffer can be
  2011. * used. This is why the user has to pass in the pitch since it
  2012. * is specified in the rotated orientation.
  2013. */
  2014. static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
  2015. int *x, int *y,
  2016. const struct drm_framebuffer *fb, int plane,
  2017. unsigned int pitch,
  2018. unsigned int rotation,
  2019. u32 alignment)
  2020. {
  2021. uint64_t fb_modifier = fb->modifier;
  2022. unsigned int cpp = fb->format->cpp[plane];
  2023. u32 offset, offset_aligned;
  2024. if (alignment)
  2025. alignment--;
  2026. if (fb_modifier != DRM_FORMAT_MOD_NONE) {
  2027. unsigned int tile_size, tile_width, tile_height;
  2028. unsigned int tile_rows, tiles, pitch_tiles;
  2029. tile_size = intel_tile_size(dev_priv);
  2030. intel_tile_dims(fb, plane, &tile_width, &tile_height);
  2031. if (drm_rotation_90_or_270(rotation)) {
  2032. pitch_tiles = pitch / tile_height;
  2033. swap(tile_width, tile_height);
  2034. } else {
  2035. pitch_tiles = pitch / (tile_width * cpp);
  2036. }
  2037. tile_rows = *y / tile_height;
  2038. *y %= tile_height;
  2039. tiles = *x / tile_width;
  2040. *x %= tile_width;
  2041. offset = (tile_rows * pitch_tiles + tiles) * tile_size;
  2042. offset_aligned = offset & ~alignment;
  2043. _intel_adjust_tile_offset(x, y, tile_width, tile_height,
  2044. tile_size, pitch_tiles,
  2045. offset, offset_aligned);
  2046. } else {
  2047. offset = *y * pitch + *x * cpp;
  2048. offset_aligned = offset & ~alignment;
  2049. *y = (offset & alignment) / pitch;
  2050. *x = ((offset & alignment) - *y * pitch) / cpp;
  2051. }
  2052. return offset_aligned;
  2053. }
  2054. u32 intel_compute_tile_offset(int *x, int *y,
  2055. const struct intel_plane_state *state,
  2056. int plane)
  2057. {
  2058. const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev);
  2059. const struct drm_framebuffer *fb = state->base.fb;
  2060. unsigned int rotation = state->base.rotation;
  2061. int pitch = intel_fb_pitch(fb, plane, rotation);
  2062. u32 alignment = intel_surf_alignment(fb, plane);
  2063. return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch,
  2064. rotation, alignment);
  2065. }
  2066. /* Convert the fb->offset[] linear offset into x/y offsets */
  2067. static void intel_fb_offset_to_xy(int *x, int *y,
  2068. const struct drm_framebuffer *fb, int plane)
  2069. {
  2070. unsigned int cpp = fb->format->cpp[plane];
  2071. unsigned int pitch = fb->pitches[plane];
  2072. u32 linear_offset = fb->offsets[plane];
  2073. *y = linear_offset / pitch;
  2074. *x = linear_offset % pitch / cpp;
  2075. }
  2076. static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
  2077. {
  2078. switch (fb_modifier) {
  2079. case I915_FORMAT_MOD_X_TILED:
  2080. return I915_TILING_X;
  2081. case I915_FORMAT_MOD_Y_TILED:
  2082. return I915_TILING_Y;
  2083. default:
  2084. return I915_TILING_NONE;
  2085. }
  2086. }
  2087. static int
  2088. intel_fill_fb_info(struct drm_i915_private *dev_priv,
  2089. struct drm_framebuffer *fb)
  2090. {
  2091. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  2092. struct intel_rotation_info *rot_info = &intel_fb->rot_info;
  2093. u32 gtt_offset_rotated = 0;
  2094. unsigned int max_size = 0;
  2095. int i, num_planes = fb->format->num_planes;
  2096. unsigned int tile_size = intel_tile_size(dev_priv);
  2097. for (i = 0; i < num_planes; i++) {
  2098. unsigned int width, height;
  2099. unsigned int cpp, size;
  2100. u32 offset;
  2101. int x, y;
  2102. cpp = fb->format->cpp[i];
  2103. width = drm_framebuffer_plane_width(fb->width, fb, i);
  2104. height = drm_framebuffer_plane_height(fb->height, fb, i);
  2105. intel_fb_offset_to_xy(&x, &y, fb, i);
  2106. /*
  2107. * The fence (if used) is aligned to the start of the object
  2108. * so having the framebuffer wrap around across the edge of the
  2109. * fenced region doesn't really work. We have no API to configure
  2110. * the fence start offset within the object (nor could we probably
  2111. * on gen2/3). So it's just easier if we just require that the
  2112. * fb layout agrees with the fence layout. We already check that the
  2113. * fb stride matches the fence stride elsewhere.
  2114. */
  2115. if (i915_gem_object_is_tiled(intel_fb->obj) &&
  2116. (x + width) * cpp > fb->pitches[i]) {
  2117. DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
  2118. i, fb->offsets[i]);
  2119. return -EINVAL;
  2120. }
  2121. /*
  2122. * First pixel of the framebuffer from
  2123. * the start of the normal gtt mapping.
  2124. */
  2125. intel_fb->normal[i].x = x;
  2126. intel_fb->normal[i].y = y;
  2127. offset = _intel_compute_tile_offset(dev_priv, &x, &y,
  2128. fb, i, fb->pitches[i],
  2129. DRM_ROTATE_0, tile_size);
  2130. offset /= tile_size;
  2131. if (fb->modifier != DRM_FORMAT_MOD_NONE) {
  2132. unsigned int tile_width, tile_height;
  2133. unsigned int pitch_tiles;
  2134. struct drm_rect r;
  2135. intel_tile_dims(fb, i, &tile_width, &tile_height);
  2136. rot_info->plane[i].offset = offset;
  2137. rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
  2138. rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
  2139. rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
  2140. intel_fb->rotated[i].pitch =
  2141. rot_info->plane[i].height * tile_height;
  2142. /* how many tiles does this plane need */
  2143. size = rot_info->plane[i].stride * rot_info->plane[i].height;
  2144. /*
  2145. * If the plane isn't horizontally tile aligned,
  2146. * we need one more tile.
  2147. */
  2148. if (x != 0)
  2149. size++;
  2150. /* rotate the x/y offsets to match the GTT view */
  2151. r.x1 = x;
  2152. r.y1 = y;
  2153. r.x2 = x + width;
  2154. r.y2 = y + height;
  2155. drm_rect_rotate(&r,
  2156. rot_info->plane[i].width * tile_width,
  2157. rot_info->plane[i].height * tile_height,
  2158. DRM_ROTATE_270);
  2159. x = r.x1;
  2160. y = r.y1;
  2161. /* rotate the tile dimensions to match the GTT view */
  2162. pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
  2163. swap(tile_width, tile_height);
  2164. /*
  2165. * We only keep the x/y offsets, so push all of the
  2166. * gtt offset into the x/y offsets.
  2167. */
  2168. _intel_adjust_tile_offset(&x, &y,
  2169. tile_width, tile_height,
  2170. tile_size, pitch_tiles,
  2171. gtt_offset_rotated * tile_size, 0);
  2172. gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
  2173. /*
  2174. * First pixel of the framebuffer from
  2175. * the start of the rotated gtt mapping.
  2176. */
  2177. intel_fb->rotated[i].x = x;
  2178. intel_fb->rotated[i].y = y;
  2179. } else {
  2180. size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
  2181. x * cpp, tile_size);
  2182. }
  2183. /* how many tiles in total needed in the bo */
  2184. max_size = max(max_size, offset + size);
  2185. }
  2186. if (max_size * tile_size > intel_fb->obj->base.size) {
  2187. DRM_DEBUG_KMS("fb too big for bo (need %u bytes, have %zu bytes)\n",
  2188. max_size * tile_size, intel_fb->obj->base.size);
  2189. return -EINVAL;
  2190. }
  2191. return 0;
  2192. }
  2193. static int i9xx_format_to_fourcc(int format)
  2194. {
  2195. switch (format) {
  2196. case DISPPLANE_8BPP:
  2197. return DRM_FORMAT_C8;
  2198. case DISPPLANE_BGRX555:
  2199. return DRM_FORMAT_XRGB1555;
  2200. case DISPPLANE_BGRX565:
  2201. return DRM_FORMAT_RGB565;
  2202. default:
  2203. case DISPPLANE_BGRX888:
  2204. return DRM_FORMAT_XRGB8888;
  2205. case DISPPLANE_RGBX888:
  2206. return DRM_FORMAT_XBGR8888;
  2207. case DISPPLANE_BGRX101010:
  2208. return DRM_FORMAT_XRGB2101010;
  2209. case DISPPLANE_RGBX101010:
  2210. return DRM_FORMAT_XBGR2101010;
  2211. }
  2212. }
  2213. static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
  2214. {
  2215. switch (format) {
  2216. case PLANE_CTL_FORMAT_RGB_565:
  2217. return DRM_FORMAT_RGB565;
  2218. default:
  2219. case PLANE_CTL_FORMAT_XRGB_8888:
  2220. if (rgb_order) {
  2221. if (alpha)
  2222. return DRM_FORMAT_ABGR8888;
  2223. else
  2224. return DRM_FORMAT_XBGR8888;
  2225. } else {
  2226. if (alpha)
  2227. return DRM_FORMAT_ARGB8888;
  2228. else
  2229. return DRM_FORMAT_XRGB8888;
  2230. }
  2231. case PLANE_CTL_FORMAT_XRGB_2101010:
  2232. if (rgb_order)
  2233. return DRM_FORMAT_XBGR2101010;
  2234. else
  2235. return DRM_FORMAT_XRGB2101010;
  2236. }
  2237. }
  2238. static bool
  2239. intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
  2240. struct intel_initial_plane_config *plane_config)
  2241. {
  2242. struct drm_device *dev = crtc->base.dev;
  2243. struct drm_i915_private *dev_priv = to_i915(dev);
  2244. struct i915_ggtt *ggtt = &dev_priv->ggtt;
  2245. struct drm_i915_gem_object *obj = NULL;
  2246. struct drm_mode_fb_cmd2 mode_cmd = { 0 };
  2247. struct drm_framebuffer *fb = &plane_config->fb->base;
  2248. u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
  2249. u32 size_aligned = round_up(plane_config->base + plane_config->size,
  2250. PAGE_SIZE);
  2251. size_aligned -= base_aligned;
  2252. if (plane_config->size == 0)
  2253. return false;
  2254. /* If the FB is too big, just don't use it since fbdev is not very
  2255. * important and we should probably use that space with FBC or other
  2256. * features. */
  2257. if (size_aligned * 2 > ggtt->stolen_usable_size)
  2258. return false;
  2259. mutex_lock(&dev->struct_mutex);
  2260. obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
  2261. base_aligned,
  2262. base_aligned,
  2263. size_aligned);
  2264. mutex_unlock(&dev->struct_mutex);
  2265. if (!obj)
  2266. return false;
  2267. if (plane_config->tiling == I915_TILING_X)
  2268. obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
  2269. mode_cmd.pixel_format = fb->format->format;
  2270. mode_cmd.width = fb->width;
  2271. mode_cmd.height = fb->height;
  2272. mode_cmd.pitches[0] = fb->pitches[0];
  2273. mode_cmd.modifier[0] = fb->modifier;
  2274. mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
  2275. if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
  2276. DRM_DEBUG_KMS("intel fb init failed\n");
  2277. goto out_unref_obj;
  2278. }
  2279. DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
  2280. return true;
  2281. out_unref_obj:
  2282. i915_gem_object_put(obj);
  2283. return false;
  2284. }
  2285. /* Update plane->state->fb to match plane->fb after driver-internal updates */
  2286. static void
  2287. update_state_fb(struct drm_plane *plane)
  2288. {
  2289. if (plane->fb == plane->state->fb)
  2290. return;
  2291. if (plane->state->fb)
  2292. drm_framebuffer_unreference(plane->state->fb);
  2293. plane->state->fb = plane->fb;
  2294. if (plane->state->fb)
  2295. drm_framebuffer_reference(plane->state->fb);
  2296. }
  2297. static void
  2298. intel_set_plane_visible(struct intel_crtc_state *crtc_state,
  2299. struct intel_plane_state *plane_state,
  2300. bool visible)
  2301. {
  2302. struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
  2303. plane_state->base.visible = visible;
  2304. /* FIXME pre-g4x don't work like this */
  2305. if (visible) {
  2306. crtc_state->base.plane_mask |= BIT(drm_plane_index(&plane->base));
  2307. crtc_state->active_planes |= BIT(plane->id);
  2308. } else {
  2309. crtc_state->base.plane_mask &= ~BIT(drm_plane_index(&plane->base));
  2310. crtc_state->active_planes &= ~BIT(plane->id);
  2311. }
  2312. DRM_DEBUG_KMS("%s active planes 0x%x\n",
  2313. crtc_state->base.crtc->name,
  2314. crtc_state->active_planes);
  2315. }
  2316. static void
  2317. intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
  2318. struct intel_initial_plane_config *plane_config)
  2319. {
  2320. struct drm_device *dev = intel_crtc->base.dev;
  2321. struct drm_i915_private *dev_priv = to_i915(dev);
  2322. struct drm_crtc *c;
  2323. struct drm_i915_gem_object *obj;
  2324. struct drm_plane *primary = intel_crtc->base.primary;
  2325. struct drm_plane_state *plane_state = primary->state;
  2326. struct drm_crtc_state *crtc_state = intel_crtc->base.state;
  2327. struct intel_plane *intel_plane = to_intel_plane(primary);
  2328. struct intel_plane_state *intel_state =
  2329. to_intel_plane_state(plane_state);
  2330. struct drm_framebuffer *fb;
  2331. if (!plane_config->fb)
  2332. return;
  2333. if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
  2334. fb = &plane_config->fb->base;
  2335. goto valid_fb;
  2336. }
  2337. kfree(plane_config->fb);
  2338. /*
  2339. * Failed to alloc the obj, check to see if we should share
  2340. * an fb with another CRTC instead
  2341. */
  2342. for_each_crtc(dev, c) {
  2343. struct intel_plane_state *state;
  2344. if (c == &intel_crtc->base)
  2345. continue;
  2346. if (!to_intel_crtc(c)->active)
  2347. continue;
  2348. state = to_intel_plane_state(c->primary->state);
  2349. if (!state->vma)
  2350. continue;
  2351. if (intel_plane_ggtt_offset(state) == plane_config->base) {
  2352. fb = c->primary->fb;
  2353. drm_framebuffer_reference(fb);
  2354. goto valid_fb;
  2355. }
  2356. }
  2357. /*
  2358. * We've failed to reconstruct the BIOS FB. Current display state
  2359. * indicates that the primary plane is visible, but has a NULL FB,
  2360. * which will lead to problems later if we don't fix it up. The
  2361. * simplest solution is to just disable the primary plane now and
  2362. * pretend the BIOS never had it enabled.
  2363. */
  2364. intel_set_plane_visible(to_intel_crtc_state(crtc_state),
  2365. to_intel_plane_state(plane_state),
  2366. false);
  2367. intel_pre_disable_primary_noatomic(&intel_crtc->base);
  2368. trace_intel_disable_plane(primary, intel_crtc);
  2369. intel_plane->disable_plane(primary, &intel_crtc->base);
  2370. return;
  2371. valid_fb:
  2372. mutex_lock(&dev->struct_mutex);
  2373. intel_state->vma =
  2374. intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
  2375. mutex_unlock(&dev->struct_mutex);
  2376. if (IS_ERR(intel_state->vma)) {
  2377. DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
  2378. intel_crtc->pipe, PTR_ERR(intel_state->vma));
  2379. intel_state->vma = NULL;
  2380. drm_framebuffer_unreference(fb);
  2381. return;
  2382. }
  2383. plane_state->src_x = 0;
  2384. plane_state->src_y = 0;
  2385. plane_state->src_w = fb->width << 16;
  2386. plane_state->src_h = fb->height << 16;
  2387. plane_state->crtc_x = 0;
  2388. plane_state->crtc_y = 0;
  2389. plane_state->crtc_w = fb->width;
  2390. plane_state->crtc_h = fb->height;
  2391. intel_state->base.src = drm_plane_state_src(plane_state);
  2392. intel_state->base.dst = drm_plane_state_dest(plane_state);
  2393. obj = intel_fb_obj(fb);
  2394. if (i915_gem_object_is_tiled(obj))
  2395. dev_priv->preserve_bios_swizzle = true;
  2396. drm_framebuffer_reference(fb);
  2397. primary->fb = primary->state->fb = fb;
  2398. primary->crtc = primary->state->crtc = &intel_crtc->base;
  2399. intel_set_plane_visible(to_intel_crtc_state(crtc_state),
  2400. to_intel_plane_state(plane_state),
  2401. true);
  2402. atomic_or(to_intel_plane(primary)->frontbuffer_bit,
  2403. &obj->frontbuffer_bits);
  2404. }
  2405. static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
  2406. unsigned int rotation)
  2407. {
  2408. int cpp = fb->format->cpp[plane];
  2409. switch (fb->modifier) {
  2410. case DRM_FORMAT_MOD_NONE:
  2411. case I915_FORMAT_MOD_X_TILED:
  2412. switch (cpp) {
  2413. case 8:
  2414. return 4096;
  2415. case 4:
  2416. case 2:
  2417. case 1:
  2418. return 8192;
  2419. default:
  2420. MISSING_CASE(cpp);
  2421. break;
  2422. }
  2423. break;
  2424. case I915_FORMAT_MOD_Y_TILED:
  2425. case I915_FORMAT_MOD_Yf_TILED:
  2426. switch (cpp) {
  2427. case 8:
  2428. return 2048;
  2429. case 4:
  2430. return 4096;
  2431. case 2:
  2432. case 1:
  2433. return 8192;
  2434. default:
  2435. MISSING_CASE(cpp);
  2436. break;
  2437. }
  2438. break;
  2439. default:
  2440. MISSING_CASE(fb->modifier);
  2441. }
  2442. return 2048;
  2443. }
  2444. static int skl_check_main_surface(struct intel_plane_state *plane_state)
  2445. {
  2446. const struct drm_framebuffer *fb = plane_state->base.fb;
  2447. unsigned int rotation = plane_state->base.rotation;
  2448. int x = plane_state->base.src.x1 >> 16;
  2449. int y = plane_state->base.src.y1 >> 16;
  2450. int w = drm_rect_width(&plane_state->base.src) >> 16;
  2451. int h = drm_rect_height(&plane_state->base.src) >> 16;
  2452. int max_width = skl_max_plane_width(fb, 0, rotation);
  2453. int max_height = 4096;
  2454. u32 alignment, offset, aux_offset = plane_state->aux.offset;
  2455. if (w > max_width || h > max_height) {
  2456. DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
  2457. w, h, max_width, max_height);
  2458. return -EINVAL;
  2459. }
  2460. intel_add_fb_offsets(&x, &y, plane_state, 0);
  2461. offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
  2462. alignment = intel_surf_alignment(fb, 0);
  2463. /*
  2464. * AUX surface offset is specified as the distance from the
  2465. * main surface offset, and it must be non-negative. Make
  2466. * sure that is what we will get.
  2467. */
  2468. if (offset > aux_offset)
  2469. offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
  2470. offset, aux_offset & ~(alignment - 1));
  2471. /*
  2472. * When using an X-tiled surface, the plane blows up
  2473. * if the x offset + width exceed the stride.
  2474. *
  2475. * TODO: linear and Y-tiled seem fine, Yf untested,
  2476. */
  2477. if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
  2478. int cpp = fb->format->cpp[0];
  2479. while ((x + w) * cpp > fb->pitches[0]) {
  2480. if (offset == 0) {
  2481. DRM_DEBUG_KMS("Unable to find suitable display surface offset\n");
  2482. return -EINVAL;
  2483. }
  2484. offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
  2485. offset, offset - alignment);
  2486. }
  2487. }
  2488. plane_state->main.offset = offset;
  2489. plane_state->main.x = x;
  2490. plane_state->main.y = y;
  2491. return 0;
  2492. }
  2493. static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
  2494. {
  2495. const struct drm_framebuffer *fb = plane_state->base.fb;
  2496. unsigned int rotation = plane_state->base.rotation;
  2497. int max_width = skl_max_plane_width(fb, 1, rotation);
  2498. int max_height = 4096;
  2499. int x = plane_state->base.src.x1 >> 17;
  2500. int y = plane_state->base.src.y1 >> 17;
  2501. int w = drm_rect_width(&plane_state->base.src) >> 17;
  2502. int h = drm_rect_height(&plane_state->base.src) >> 17;
  2503. u32 offset;
  2504. intel_add_fb_offsets(&x, &y, plane_state, 1);
  2505. offset = intel_compute_tile_offset(&x, &y, plane_state, 1);
  2506. /* FIXME not quite sure how/if these apply to the chroma plane */
  2507. if (w > max_width || h > max_height) {
  2508. DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
  2509. w, h, max_width, max_height);
  2510. return -EINVAL;
  2511. }
  2512. plane_state->aux.offset = offset;
  2513. plane_state->aux.x = x;
  2514. plane_state->aux.y = y;
  2515. return 0;
  2516. }
  2517. int skl_check_plane_surface(struct intel_plane_state *plane_state)
  2518. {
  2519. const struct drm_framebuffer *fb = plane_state->base.fb;
  2520. unsigned int rotation = plane_state->base.rotation;
  2521. int ret;
  2522. if (!plane_state->base.visible)
  2523. return 0;
  2524. /* Rotate src coordinates to match rotated GTT view */
  2525. if (drm_rotation_90_or_270(rotation))
  2526. drm_rect_rotate(&plane_state->base.src,
  2527. fb->width << 16, fb->height << 16,
  2528. DRM_ROTATE_270);
  2529. /*
  2530. * Handle the AUX surface first since
  2531. * the main surface setup depends on it.
  2532. */
  2533. if (fb->format->format == DRM_FORMAT_NV12) {
  2534. ret = skl_check_nv12_aux_surface(plane_state);
  2535. if (ret)
  2536. return ret;
  2537. } else {
  2538. plane_state->aux.offset = ~0xfff;
  2539. plane_state->aux.x = 0;
  2540. plane_state->aux.y = 0;
  2541. }
  2542. ret = skl_check_main_surface(plane_state);
  2543. if (ret)
  2544. return ret;
  2545. return 0;
  2546. }
  2547. static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
  2548. const struct intel_plane_state *plane_state)
  2549. {
  2550. struct drm_i915_private *dev_priv =
  2551. to_i915(plane_state->base.plane->dev);
  2552. struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  2553. const struct drm_framebuffer *fb = plane_state->base.fb;
  2554. unsigned int rotation = plane_state->base.rotation;
  2555. u32 dspcntr;
  2556. dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE;
  2557. if (IS_G4X(dev_priv) || IS_GEN5(dev_priv) ||
  2558. IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
  2559. dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
  2560. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  2561. dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
  2562. if (INTEL_GEN(dev_priv) < 4) {
  2563. if (crtc->pipe == PIPE_B)
  2564. dspcntr |= DISPPLANE_SEL_PIPE_B;
  2565. }
  2566. switch (fb->format->format) {
  2567. case DRM_FORMAT_C8:
  2568. dspcntr |= DISPPLANE_8BPP;
  2569. break;
  2570. case DRM_FORMAT_XRGB1555:
  2571. dspcntr |= DISPPLANE_BGRX555;
  2572. break;
  2573. case DRM_FORMAT_RGB565:
  2574. dspcntr |= DISPPLANE_BGRX565;
  2575. break;
  2576. case DRM_FORMAT_XRGB8888:
  2577. dspcntr |= DISPPLANE_BGRX888;
  2578. break;
  2579. case DRM_FORMAT_XBGR8888:
  2580. dspcntr |= DISPPLANE_RGBX888;
  2581. break;
  2582. case DRM_FORMAT_XRGB2101010:
  2583. dspcntr |= DISPPLANE_BGRX101010;
  2584. break;
  2585. case DRM_FORMAT_XBGR2101010:
  2586. dspcntr |= DISPPLANE_RGBX101010;
  2587. break;
  2588. default:
  2589. MISSING_CASE(fb->format->format);
  2590. return 0;
  2591. }
  2592. if (INTEL_GEN(dev_priv) >= 4 &&
  2593. fb->modifier == I915_FORMAT_MOD_X_TILED)
  2594. dspcntr |= DISPPLANE_TILED;
  2595. if (rotation & DRM_ROTATE_180)
  2596. dspcntr |= DISPPLANE_ROTATE_180;
  2597. if (rotation & DRM_REFLECT_X)
  2598. dspcntr |= DISPPLANE_MIRROR;
  2599. return dspcntr;
  2600. }
  2601. static void i9xx_update_primary_plane(struct drm_plane *primary,
  2602. const struct intel_crtc_state *crtc_state,
  2603. const struct intel_plane_state *plane_state)
  2604. {
  2605. struct drm_i915_private *dev_priv = to_i915(primary->dev);
  2606. struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
  2607. struct drm_framebuffer *fb = plane_state->base.fb;
  2608. int plane = intel_crtc->plane;
  2609. u32 linear_offset;
  2610. u32 dspcntr = plane_state->ctl;
  2611. i915_reg_t reg = DSPCNTR(plane);
  2612. unsigned int rotation = plane_state->base.rotation;
  2613. int x = plane_state->base.src.x1 >> 16;
  2614. int y = plane_state->base.src.y1 >> 16;
  2615. unsigned long irqflags;
  2616. intel_add_fb_offsets(&x, &y, plane_state, 0);
  2617. if (INTEL_GEN(dev_priv) >= 4)
  2618. intel_crtc->dspaddr_offset =
  2619. intel_compute_tile_offset(&x, &y, plane_state, 0);
  2620. if (rotation & DRM_ROTATE_180) {
  2621. x += crtc_state->pipe_src_w - 1;
  2622. y += crtc_state->pipe_src_h - 1;
  2623. } else if (rotation & DRM_REFLECT_X) {
  2624. x += crtc_state->pipe_src_w - 1;
  2625. }
  2626. linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
  2627. if (INTEL_GEN(dev_priv) < 4)
  2628. intel_crtc->dspaddr_offset = linear_offset;
  2629. intel_crtc->adjusted_x = x;
  2630. intel_crtc->adjusted_y = y;
  2631. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  2632. if (INTEL_GEN(dev_priv) < 4) {
  2633. /* pipesrc and dspsize control the size that is scaled from,
  2634. * which should always be the user's requested size.
  2635. */
  2636. I915_WRITE_FW(DSPSIZE(plane),
  2637. ((crtc_state->pipe_src_h - 1) << 16) |
  2638. (crtc_state->pipe_src_w - 1));
  2639. I915_WRITE_FW(DSPPOS(plane), 0);
  2640. } else if (IS_CHERRYVIEW(dev_priv) && plane == PLANE_B) {
  2641. I915_WRITE_FW(PRIMSIZE(plane),
  2642. ((crtc_state->pipe_src_h - 1) << 16) |
  2643. (crtc_state->pipe_src_w - 1));
  2644. I915_WRITE_FW(PRIMPOS(plane), 0);
  2645. I915_WRITE_FW(PRIMCNSTALPHA(plane), 0);
  2646. }
  2647. I915_WRITE_FW(reg, dspcntr);
  2648. I915_WRITE_FW(DSPSTRIDE(plane), fb->pitches[0]);
  2649. if (INTEL_GEN(dev_priv) >= 4) {
  2650. I915_WRITE_FW(DSPSURF(plane),
  2651. intel_plane_ggtt_offset(plane_state) +
  2652. intel_crtc->dspaddr_offset);
  2653. I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x);
  2654. I915_WRITE_FW(DSPLINOFF(plane), linear_offset);
  2655. } else {
  2656. I915_WRITE_FW(DSPADDR(plane),
  2657. intel_plane_ggtt_offset(plane_state) +
  2658. intel_crtc->dspaddr_offset);
  2659. }
  2660. POSTING_READ_FW(reg);
  2661. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  2662. }
  2663. static void i9xx_disable_primary_plane(struct drm_plane *primary,
  2664. struct drm_crtc *crtc)
  2665. {
  2666. struct drm_device *dev = crtc->dev;
  2667. struct drm_i915_private *dev_priv = to_i915(dev);
  2668. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2669. int plane = intel_crtc->plane;
  2670. unsigned long irqflags;
  2671. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  2672. I915_WRITE_FW(DSPCNTR(plane), 0);
  2673. if (INTEL_INFO(dev_priv)->gen >= 4)
  2674. I915_WRITE_FW(DSPSURF(plane), 0);
  2675. else
  2676. I915_WRITE_FW(DSPADDR(plane), 0);
  2677. POSTING_READ_FW(DSPCNTR(plane));
  2678. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  2679. }
  2680. static void ironlake_update_primary_plane(struct drm_plane *primary,
  2681. const struct intel_crtc_state *crtc_state,
  2682. const struct intel_plane_state *plane_state)
  2683. {
  2684. struct drm_device *dev = primary->dev;
  2685. struct drm_i915_private *dev_priv = to_i915(dev);
  2686. struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
  2687. struct drm_framebuffer *fb = plane_state->base.fb;
  2688. int plane = intel_crtc->plane;
  2689. u32 linear_offset;
  2690. u32 dspcntr = plane_state->ctl;
  2691. i915_reg_t reg = DSPCNTR(plane);
  2692. unsigned int rotation = plane_state->base.rotation;
  2693. int x = plane_state->base.src.x1 >> 16;
  2694. int y = plane_state->base.src.y1 >> 16;
  2695. unsigned long irqflags;
  2696. intel_add_fb_offsets(&x, &y, plane_state, 0);
  2697. intel_crtc->dspaddr_offset =
  2698. intel_compute_tile_offset(&x, &y, plane_state, 0);
  2699. /* HSW+ does this automagically in hardware */
  2700. if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) &&
  2701. rotation & DRM_ROTATE_180) {
  2702. x += crtc_state->pipe_src_w - 1;
  2703. y += crtc_state->pipe_src_h - 1;
  2704. }
  2705. linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
  2706. intel_crtc->adjusted_x = x;
  2707. intel_crtc->adjusted_y = y;
  2708. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  2709. I915_WRITE_FW(reg, dspcntr);
  2710. I915_WRITE_FW(DSPSTRIDE(plane), fb->pitches[0]);
  2711. I915_WRITE_FW(DSPSURF(plane),
  2712. intel_plane_ggtt_offset(plane_state) +
  2713. intel_crtc->dspaddr_offset);
  2714. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  2715. I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x);
  2716. } else {
  2717. I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x);
  2718. I915_WRITE_FW(DSPLINOFF(plane), linear_offset);
  2719. }
  2720. POSTING_READ_FW(reg);
  2721. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  2722. }
  2723. static u32
  2724. intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane)
  2725. {
  2726. if (fb->modifier == DRM_FORMAT_MOD_NONE)
  2727. return 64;
  2728. else
  2729. return intel_tile_width_bytes(fb, plane);
  2730. }
  2731. static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
  2732. {
  2733. struct drm_device *dev = intel_crtc->base.dev;
  2734. struct drm_i915_private *dev_priv = to_i915(dev);
  2735. I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
  2736. I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
  2737. I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
  2738. }
  2739. /*
  2740. * This function detaches (aka. unbinds) unused scalers in hardware
  2741. */
  2742. static void skl_detach_scalers(struct intel_crtc *intel_crtc)
  2743. {
  2744. struct intel_crtc_scaler_state *scaler_state;
  2745. int i;
  2746. scaler_state = &intel_crtc->config->scaler_state;
  2747. /* loop through and disable scalers that aren't in use */
  2748. for (i = 0; i < intel_crtc->num_scalers; i++) {
  2749. if (!scaler_state->scalers[i].in_use)
  2750. skl_detach_scaler(intel_crtc, i);
  2751. }
  2752. }
  2753. u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
  2754. unsigned int rotation)
  2755. {
  2756. u32 stride;
  2757. if (plane >= fb->format->num_planes)
  2758. return 0;
  2759. stride = intel_fb_pitch(fb, plane, rotation);
  2760. /*
  2761. * The stride is either expressed as a multiple of 64 bytes chunks for
  2762. * linear buffers or in number of tiles for tiled buffers.
  2763. */
  2764. if (drm_rotation_90_or_270(rotation))
  2765. stride /= intel_tile_height(fb, plane);
  2766. else
  2767. stride /= intel_fb_stride_alignment(fb, plane);
  2768. return stride;
  2769. }
  2770. static u32 skl_plane_ctl_format(uint32_t pixel_format)
  2771. {
  2772. switch (pixel_format) {
  2773. case DRM_FORMAT_C8:
  2774. return PLANE_CTL_FORMAT_INDEXED;
  2775. case DRM_FORMAT_RGB565:
  2776. return PLANE_CTL_FORMAT_RGB_565;
  2777. case DRM_FORMAT_XBGR8888:
  2778. return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
  2779. case DRM_FORMAT_XRGB8888:
  2780. return PLANE_CTL_FORMAT_XRGB_8888;
  2781. /*
  2782. * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
  2783. * to be already pre-multiplied. We need to add a knob (or a different
  2784. * DRM_FORMAT) for user-space to configure that.
  2785. */
  2786. case DRM_FORMAT_ABGR8888:
  2787. return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
  2788. PLANE_CTL_ALPHA_SW_PREMULTIPLY;
  2789. case DRM_FORMAT_ARGB8888:
  2790. return PLANE_CTL_FORMAT_XRGB_8888 |
  2791. PLANE_CTL_ALPHA_SW_PREMULTIPLY;
  2792. case DRM_FORMAT_XRGB2101010:
  2793. return PLANE_CTL_FORMAT_XRGB_2101010;
  2794. case DRM_FORMAT_XBGR2101010:
  2795. return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
  2796. case DRM_FORMAT_YUYV:
  2797. return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
  2798. case DRM_FORMAT_YVYU:
  2799. return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
  2800. case DRM_FORMAT_UYVY:
  2801. return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
  2802. case DRM_FORMAT_VYUY:
  2803. return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
  2804. default:
  2805. MISSING_CASE(pixel_format);
  2806. }
  2807. return 0;
  2808. }
  2809. static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
  2810. {
  2811. switch (fb_modifier) {
  2812. case DRM_FORMAT_MOD_NONE:
  2813. break;
  2814. case I915_FORMAT_MOD_X_TILED:
  2815. return PLANE_CTL_TILED_X;
  2816. case I915_FORMAT_MOD_Y_TILED:
  2817. return PLANE_CTL_TILED_Y;
  2818. case I915_FORMAT_MOD_Yf_TILED:
  2819. return PLANE_CTL_TILED_YF;
  2820. default:
  2821. MISSING_CASE(fb_modifier);
  2822. }
  2823. return 0;
  2824. }
  2825. static u32 skl_plane_ctl_rotation(unsigned int rotation)
  2826. {
  2827. switch (rotation) {
  2828. case DRM_ROTATE_0:
  2829. break;
  2830. /*
  2831. * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
  2832. * while i915 HW rotation is clockwise, thats why this swapping.
  2833. */
  2834. case DRM_ROTATE_90:
  2835. return PLANE_CTL_ROTATE_270;
  2836. case DRM_ROTATE_180:
  2837. return PLANE_CTL_ROTATE_180;
  2838. case DRM_ROTATE_270:
  2839. return PLANE_CTL_ROTATE_90;
  2840. default:
  2841. MISSING_CASE(rotation);
  2842. }
  2843. return 0;
  2844. }
  2845. u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
  2846. const struct intel_plane_state *plane_state)
  2847. {
  2848. struct drm_i915_private *dev_priv =
  2849. to_i915(plane_state->base.plane->dev);
  2850. const struct drm_framebuffer *fb = plane_state->base.fb;
  2851. unsigned int rotation = plane_state->base.rotation;
  2852. const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
  2853. u32 plane_ctl;
  2854. plane_ctl = PLANE_CTL_ENABLE;
  2855. if (!IS_GEMINILAKE(dev_priv)) {
  2856. plane_ctl |=
  2857. PLANE_CTL_PIPE_GAMMA_ENABLE |
  2858. PLANE_CTL_PIPE_CSC_ENABLE |
  2859. PLANE_CTL_PLANE_GAMMA_DISABLE;
  2860. }
  2861. plane_ctl |= skl_plane_ctl_format(fb->format->format);
  2862. plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
  2863. plane_ctl |= skl_plane_ctl_rotation(rotation);
  2864. if (key->flags & I915_SET_COLORKEY_DESTINATION)
  2865. plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
  2866. else if (key->flags & I915_SET_COLORKEY_SOURCE)
  2867. plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
  2868. return plane_ctl;
  2869. }
  2870. static void skylake_update_primary_plane(struct drm_plane *plane,
  2871. const struct intel_crtc_state *crtc_state,
  2872. const struct intel_plane_state *plane_state)
  2873. {
  2874. struct drm_device *dev = plane->dev;
  2875. struct drm_i915_private *dev_priv = to_i915(dev);
  2876. struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
  2877. struct drm_framebuffer *fb = plane_state->base.fb;
  2878. enum plane_id plane_id = to_intel_plane(plane)->id;
  2879. enum pipe pipe = to_intel_plane(plane)->pipe;
  2880. u32 plane_ctl = plane_state->ctl;
  2881. unsigned int rotation = plane_state->base.rotation;
  2882. u32 stride = skl_plane_stride(fb, 0, rotation);
  2883. u32 surf_addr = plane_state->main.offset;
  2884. int scaler_id = plane_state->scaler_id;
  2885. int src_x = plane_state->main.x;
  2886. int src_y = plane_state->main.y;
  2887. int src_w = drm_rect_width(&plane_state->base.src) >> 16;
  2888. int src_h = drm_rect_height(&plane_state->base.src) >> 16;
  2889. int dst_x = plane_state->base.dst.x1;
  2890. int dst_y = plane_state->base.dst.y1;
  2891. int dst_w = drm_rect_width(&plane_state->base.dst);
  2892. int dst_h = drm_rect_height(&plane_state->base.dst);
  2893. unsigned long irqflags;
  2894. /* Sizes are 0 based */
  2895. src_w--;
  2896. src_h--;
  2897. dst_w--;
  2898. dst_h--;
  2899. intel_crtc->dspaddr_offset = surf_addr;
  2900. intel_crtc->adjusted_x = src_x;
  2901. intel_crtc->adjusted_y = src_y;
  2902. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  2903. if (IS_GEMINILAKE(dev_priv)) {
  2904. I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
  2905. PLANE_COLOR_PIPE_GAMMA_ENABLE |
  2906. PLANE_COLOR_PIPE_CSC_ENABLE |
  2907. PLANE_COLOR_PLANE_GAMMA_DISABLE);
  2908. }
  2909. I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
  2910. I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (src_y << 16) | src_x);
  2911. I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
  2912. I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
  2913. if (scaler_id >= 0) {
  2914. uint32_t ps_ctrl = 0;
  2915. WARN_ON(!dst_w || !dst_h);
  2916. ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane_id) |
  2917. crtc_state->scaler_state.scalers[scaler_id].mode;
  2918. I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
  2919. I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
  2920. I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
  2921. I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
  2922. I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0);
  2923. } else {
  2924. I915_WRITE_FW(PLANE_POS(pipe, plane_id), (dst_y << 16) | dst_x);
  2925. }
  2926. I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
  2927. intel_plane_ggtt_offset(plane_state) + surf_addr);
  2928. POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
  2929. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  2930. }
  2931. static void skylake_disable_primary_plane(struct drm_plane *primary,
  2932. struct drm_crtc *crtc)
  2933. {
  2934. struct drm_device *dev = crtc->dev;
  2935. struct drm_i915_private *dev_priv = to_i915(dev);
  2936. enum plane_id plane_id = to_intel_plane(primary)->id;
  2937. enum pipe pipe = to_intel_plane(primary)->pipe;
  2938. unsigned long irqflags;
  2939. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  2940. I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
  2941. I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
  2942. POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
  2943. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  2944. }
  2945. /* Assume fb object is pinned & idle & fenced and just update base pointers */
  2946. static int
  2947. intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
  2948. int x, int y, enum mode_set_atomic state)
  2949. {
  2950. /* Support for kgdboc is disabled, this needs a major rework. */
  2951. DRM_ERROR("legacy panic handler not supported any more.\n");
  2952. return -ENODEV;
  2953. }
  2954. static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
  2955. {
  2956. struct intel_crtc *crtc;
  2957. for_each_intel_crtc(&dev_priv->drm, crtc)
  2958. intel_finish_page_flip_cs(dev_priv, crtc->pipe);
  2959. }
  2960. static void intel_update_primary_planes(struct drm_device *dev)
  2961. {
  2962. struct drm_crtc *crtc;
  2963. for_each_crtc(dev, crtc) {
  2964. struct intel_plane *plane = to_intel_plane(crtc->primary);
  2965. struct intel_plane_state *plane_state =
  2966. to_intel_plane_state(plane->base.state);
  2967. if (plane_state->base.visible) {
  2968. trace_intel_update_plane(&plane->base,
  2969. to_intel_crtc(crtc));
  2970. plane->update_plane(&plane->base,
  2971. to_intel_crtc_state(crtc->state),
  2972. plane_state);
  2973. }
  2974. }
  2975. }
  2976. static int
  2977. __intel_display_resume(struct drm_device *dev,
  2978. struct drm_atomic_state *state,
  2979. struct drm_modeset_acquire_ctx *ctx)
  2980. {
  2981. struct drm_crtc_state *crtc_state;
  2982. struct drm_crtc *crtc;
  2983. int i, ret;
  2984. intel_modeset_setup_hw_state(dev);
  2985. i915_redisable_vga(to_i915(dev));
  2986. if (!state)
  2987. return 0;
  2988. /*
  2989. * We've duplicated the state, pointers to the old state are invalid.
  2990. *
  2991. * Don't attempt to use the old state until we commit the duplicated state.
  2992. */
  2993. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  2994. /*
  2995. * Force recalculation even if we restore
  2996. * current state. With fast modeset this may not result
  2997. * in a modeset when the state is compatible.
  2998. */
  2999. crtc_state->mode_changed = true;
  3000. }
  3001. /* ignore any reset values/BIOS leftovers in the WM registers */
  3002. if (!HAS_GMCH_DISPLAY(to_i915(dev)))
  3003. to_intel_atomic_state(state)->skip_intermediate_wm = true;
  3004. ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
  3005. WARN_ON(ret == -EDEADLK);
  3006. return ret;
  3007. }
  3008. static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
  3009. {
  3010. return intel_has_gpu_reset(dev_priv) &&
  3011. INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv);
  3012. }
  3013. void intel_prepare_reset(struct drm_i915_private *dev_priv)
  3014. {
  3015. struct drm_device *dev = &dev_priv->drm;
  3016. struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
  3017. struct drm_atomic_state *state;
  3018. int ret;
  3019. /*
  3020. * Need mode_config.mutex so that we don't
  3021. * trample ongoing ->detect() and whatnot.
  3022. */
  3023. mutex_lock(&dev->mode_config.mutex);
  3024. drm_modeset_acquire_init(ctx, 0);
  3025. while (1) {
  3026. ret = drm_modeset_lock_all_ctx(dev, ctx);
  3027. if (ret != -EDEADLK)
  3028. break;
  3029. drm_modeset_backoff(ctx);
  3030. }
  3031. /* reset doesn't touch the display, but flips might get nuked anyway, */
  3032. if (!i915.force_reset_modeset_test &&
  3033. !gpu_reset_clobbers_display(dev_priv))
  3034. return;
  3035. /*
  3036. * Disabling the crtcs gracefully seems nicer. Also the
  3037. * g33 docs say we should at least disable all the planes.
  3038. */
  3039. state = drm_atomic_helper_duplicate_state(dev, ctx);
  3040. if (IS_ERR(state)) {
  3041. ret = PTR_ERR(state);
  3042. DRM_ERROR("Duplicating state failed with %i\n", ret);
  3043. return;
  3044. }
  3045. ret = drm_atomic_helper_disable_all(dev, ctx);
  3046. if (ret) {
  3047. DRM_ERROR("Suspending crtc's failed with %i\n", ret);
  3048. drm_atomic_state_put(state);
  3049. return;
  3050. }
  3051. dev_priv->modeset_restore_state = state;
  3052. state->acquire_ctx = ctx;
  3053. }
  3054. void intel_finish_reset(struct drm_i915_private *dev_priv)
  3055. {
  3056. struct drm_device *dev = &dev_priv->drm;
  3057. struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
  3058. struct drm_atomic_state *state = dev_priv->modeset_restore_state;
  3059. int ret;
  3060. /*
  3061. * Flips in the rings will be nuked by the reset,
  3062. * so complete all pending flips so that user space
  3063. * will get its events and not get stuck.
  3064. */
  3065. intel_complete_page_flips(dev_priv);
  3066. dev_priv->modeset_restore_state = NULL;
  3067. /* reset doesn't touch the display */
  3068. if (!gpu_reset_clobbers_display(dev_priv)) {
  3069. if (!state) {
  3070. /*
  3071. * Flips in the rings have been nuked by the reset,
  3072. * so update the base address of all primary
  3073. * planes to the the last fb to make sure we're
  3074. * showing the correct fb after a reset.
  3075. *
  3076. * FIXME: Atomic will make this obsolete since we won't schedule
  3077. * CS-based flips (which might get lost in gpu resets) any more.
  3078. */
  3079. intel_update_primary_planes(dev);
  3080. } else {
  3081. ret = __intel_display_resume(dev, state, ctx);
  3082. if (ret)
  3083. DRM_ERROR("Restoring old state failed with %i\n", ret);
  3084. }
  3085. } else {
  3086. /*
  3087. * The display has been reset as well,
  3088. * so need a full re-initialization.
  3089. */
  3090. intel_runtime_pm_disable_interrupts(dev_priv);
  3091. intel_runtime_pm_enable_interrupts(dev_priv);
  3092. intel_pps_unlock_regs_wa(dev_priv);
  3093. intel_modeset_init_hw(dev);
  3094. spin_lock_irq(&dev_priv->irq_lock);
  3095. if (dev_priv->display.hpd_irq_setup)
  3096. dev_priv->display.hpd_irq_setup(dev_priv);
  3097. spin_unlock_irq(&dev_priv->irq_lock);
  3098. ret = __intel_display_resume(dev, state, ctx);
  3099. if (ret)
  3100. DRM_ERROR("Restoring old state failed with %i\n", ret);
  3101. intel_hpd_init(dev_priv);
  3102. }
  3103. if (state)
  3104. drm_atomic_state_put(state);
  3105. drm_modeset_drop_locks(ctx);
  3106. drm_modeset_acquire_fini(ctx);
  3107. mutex_unlock(&dev->mode_config.mutex);
  3108. }
  3109. static bool abort_flip_on_reset(struct intel_crtc *crtc)
  3110. {
  3111. struct i915_gpu_error *error = &to_i915(crtc->base.dev)->gpu_error;
  3112. if (i915_reset_backoff(error))
  3113. return true;
  3114. if (crtc->reset_count != i915_reset_count(error))
  3115. return true;
  3116. return false;
  3117. }
  3118. static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
  3119. {
  3120. struct drm_device *dev = crtc->dev;
  3121. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3122. bool pending;
  3123. if (abort_flip_on_reset(intel_crtc))
  3124. return false;
  3125. spin_lock_irq(&dev->event_lock);
  3126. pending = to_intel_crtc(crtc)->flip_work != NULL;
  3127. spin_unlock_irq(&dev->event_lock);
  3128. return pending;
  3129. }
  3130. static void intel_update_pipe_config(struct intel_crtc *crtc,
  3131. struct intel_crtc_state *old_crtc_state)
  3132. {
  3133. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  3134. struct intel_crtc_state *pipe_config =
  3135. to_intel_crtc_state(crtc->base.state);
  3136. /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
  3137. crtc->base.mode = crtc->base.state->mode;
  3138. /*
  3139. * Update pipe size and adjust fitter if needed: the reason for this is
  3140. * that in compute_mode_changes we check the native mode (not the pfit
  3141. * mode) to see if we can flip rather than do a full mode set. In the
  3142. * fastboot case, we'll flip, but if we don't update the pipesrc and
  3143. * pfit state, we'll end up with a big fb scanned out into the wrong
  3144. * sized surface.
  3145. */
  3146. I915_WRITE(PIPESRC(crtc->pipe),
  3147. ((pipe_config->pipe_src_w - 1) << 16) |
  3148. (pipe_config->pipe_src_h - 1));
  3149. /* on skylake this is done by detaching scalers */
  3150. if (INTEL_GEN(dev_priv) >= 9) {
  3151. skl_detach_scalers(crtc);
  3152. if (pipe_config->pch_pfit.enabled)
  3153. skylake_pfit_enable(crtc);
  3154. } else if (HAS_PCH_SPLIT(dev_priv)) {
  3155. if (pipe_config->pch_pfit.enabled)
  3156. ironlake_pfit_enable(crtc);
  3157. else if (old_crtc_state->pch_pfit.enabled)
  3158. ironlake_pfit_disable(crtc, true);
  3159. }
  3160. }
  3161. static void intel_fdi_normal_train(struct intel_crtc *crtc)
  3162. {
  3163. struct drm_device *dev = crtc->base.dev;
  3164. struct drm_i915_private *dev_priv = to_i915(dev);
  3165. int pipe = crtc->pipe;
  3166. i915_reg_t reg;
  3167. u32 temp;
  3168. /* enable normal train */
  3169. reg = FDI_TX_CTL(pipe);
  3170. temp = I915_READ(reg);
  3171. if (IS_IVYBRIDGE(dev_priv)) {
  3172. temp &= ~FDI_LINK_TRAIN_NONE_IVB;
  3173. temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
  3174. } else {
  3175. temp &= ~FDI_LINK_TRAIN_NONE;
  3176. temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
  3177. }
  3178. I915_WRITE(reg, temp);
  3179. reg = FDI_RX_CTL(pipe);
  3180. temp = I915_READ(reg);
  3181. if (HAS_PCH_CPT(dev_priv)) {
  3182. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3183. temp |= FDI_LINK_TRAIN_NORMAL_CPT;
  3184. } else {
  3185. temp &= ~FDI_LINK_TRAIN_NONE;
  3186. temp |= FDI_LINK_TRAIN_NONE;
  3187. }
  3188. I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
  3189. /* wait one idle pattern time */
  3190. POSTING_READ(reg);
  3191. udelay(1000);
  3192. /* IVB wants error correction enabled */
  3193. if (IS_IVYBRIDGE(dev_priv))
  3194. I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
  3195. FDI_FE_ERRC_ENABLE);
  3196. }
  3197. /* The FDI link training functions for ILK/Ibexpeak. */
  3198. static void ironlake_fdi_link_train(struct intel_crtc *crtc,
  3199. const struct intel_crtc_state *crtc_state)
  3200. {
  3201. struct drm_device *dev = crtc->base.dev;
  3202. struct drm_i915_private *dev_priv = to_i915(dev);
  3203. int pipe = crtc->pipe;
  3204. i915_reg_t reg;
  3205. u32 temp, tries;
  3206. /* FDI needs bits from pipe first */
  3207. assert_pipe_enabled(dev_priv, pipe);
  3208. /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
  3209. for train result */
  3210. reg = FDI_RX_IMR(pipe);
  3211. temp = I915_READ(reg);
  3212. temp &= ~FDI_RX_SYMBOL_LOCK;
  3213. temp &= ~FDI_RX_BIT_LOCK;
  3214. I915_WRITE(reg, temp);
  3215. I915_READ(reg);
  3216. udelay(150);
  3217. /* enable CPU FDI TX and PCH FDI RX */
  3218. reg = FDI_TX_CTL(pipe);
  3219. temp = I915_READ(reg);
  3220. temp &= ~FDI_DP_PORT_WIDTH_MASK;
  3221. temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
  3222. temp &= ~FDI_LINK_TRAIN_NONE;
  3223. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3224. I915_WRITE(reg, temp | FDI_TX_ENABLE);
  3225. reg = FDI_RX_CTL(pipe);
  3226. temp = I915_READ(reg);
  3227. temp &= ~FDI_LINK_TRAIN_NONE;
  3228. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3229. I915_WRITE(reg, temp | FDI_RX_ENABLE);
  3230. POSTING_READ(reg);
  3231. udelay(150);
  3232. /* Ironlake workaround, enable clock pointer after FDI enable*/
  3233. I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
  3234. I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
  3235. FDI_RX_PHASE_SYNC_POINTER_EN);
  3236. reg = FDI_RX_IIR(pipe);
  3237. for (tries = 0; tries < 5; tries++) {
  3238. temp = I915_READ(reg);
  3239. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3240. if ((temp & FDI_RX_BIT_LOCK)) {
  3241. DRM_DEBUG_KMS("FDI train 1 done.\n");
  3242. I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
  3243. break;
  3244. }
  3245. }
  3246. if (tries == 5)
  3247. DRM_ERROR("FDI train 1 fail!\n");
  3248. /* Train 2 */
  3249. reg = FDI_TX_CTL(pipe);
  3250. temp = I915_READ(reg);
  3251. temp &= ~FDI_LINK_TRAIN_NONE;
  3252. temp |= FDI_LINK_TRAIN_PATTERN_2;
  3253. I915_WRITE(reg, temp);
  3254. reg = FDI_RX_CTL(pipe);
  3255. temp = I915_READ(reg);
  3256. temp &= ~FDI_LINK_TRAIN_NONE;
  3257. temp |= FDI_LINK_TRAIN_PATTERN_2;
  3258. I915_WRITE(reg, temp);
  3259. POSTING_READ(reg);
  3260. udelay(150);
  3261. reg = FDI_RX_IIR(pipe);
  3262. for (tries = 0; tries < 5; tries++) {
  3263. temp = I915_READ(reg);
  3264. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3265. if (temp & FDI_RX_SYMBOL_LOCK) {
  3266. I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
  3267. DRM_DEBUG_KMS("FDI train 2 done.\n");
  3268. break;
  3269. }
  3270. }
  3271. if (tries == 5)
  3272. DRM_ERROR("FDI train 2 fail!\n");
  3273. DRM_DEBUG_KMS("FDI train done\n");
  3274. }
  3275. static const int snb_b_fdi_train_param[] = {
  3276. FDI_LINK_TRAIN_400MV_0DB_SNB_B,
  3277. FDI_LINK_TRAIN_400MV_6DB_SNB_B,
  3278. FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
  3279. FDI_LINK_TRAIN_800MV_0DB_SNB_B,
  3280. };
  3281. /* The FDI link training functions for SNB/Cougarpoint. */
  3282. static void gen6_fdi_link_train(struct intel_crtc *crtc,
  3283. const struct intel_crtc_state *crtc_state)
  3284. {
  3285. struct drm_device *dev = crtc->base.dev;
  3286. struct drm_i915_private *dev_priv = to_i915(dev);
  3287. int pipe = crtc->pipe;
  3288. i915_reg_t reg;
  3289. u32 temp, i, retry;
  3290. /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
  3291. for train result */
  3292. reg = FDI_RX_IMR(pipe);
  3293. temp = I915_READ(reg);
  3294. temp &= ~FDI_RX_SYMBOL_LOCK;
  3295. temp &= ~FDI_RX_BIT_LOCK;
  3296. I915_WRITE(reg, temp);
  3297. POSTING_READ(reg);
  3298. udelay(150);
  3299. /* enable CPU FDI TX and PCH FDI RX */
  3300. reg = FDI_TX_CTL(pipe);
  3301. temp = I915_READ(reg);
  3302. temp &= ~FDI_DP_PORT_WIDTH_MASK;
  3303. temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
  3304. temp &= ~FDI_LINK_TRAIN_NONE;
  3305. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3306. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  3307. /* SNB-B */
  3308. temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
  3309. I915_WRITE(reg, temp | FDI_TX_ENABLE);
  3310. I915_WRITE(FDI_RX_MISC(pipe),
  3311. FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
  3312. reg = FDI_RX_CTL(pipe);
  3313. temp = I915_READ(reg);
  3314. if (HAS_PCH_CPT(dev_priv)) {
  3315. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3316. temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
  3317. } else {
  3318. temp &= ~FDI_LINK_TRAIN_NONE;
  3319. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3320. }
  3321. I915_WRITE(reg, temp | FDI_RX_ENABLE);
  3322. POSTING_READ(reg);
  3323. udelay(150);
  3324. for (i = 0; i < 4; i++) {
  3325. reg = FDI_TX_CTL(pipe);
  3326. temp = I915_READ(reg);
  3327. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  3328. temp |= snb_b_fdi_train_param[i];
  3329. I915_WRITE(reg, temp);
  3330. POSTING_READ(reg);
  3331. udelay(500);
  3332. for (retry = 0; retry < 5; retry++) {
  3333. reg = FDI_RX_IIR(pipe);
  3334. temp = I915_READ(reg);
  3335. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3336. if (temp & FDI_RX_BIT_LOCK) {
  3337. I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
  3338. DRM_DEBUG_KMS("FDI train 1 done.\n");
  3339. break;
  3340. }
  3341. udelay(50);
  3342. }
  3343. if (retry < 5)
  3344. break;
  3345. }
  3346. if (i == 4)
  3347. DRM_ERROR("FDI train 1 fail!\n");
  3348. /* Train 2 */
  3349. reg = FDI_TX_CTL(pipe);
  3350. temp = I915_READ(reg);
  3351. temp &= ~FDI_LINK_TRAIN_NONE;
  3352. temp |= FDI_LINK_TRAIN_PATTERN_2;
  3353. if (IS_GEN6(dev_priv)) {
  3354. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  3355. /* SNB-B */
  3356. temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
  3357. }
  3358. I915_WRITE(reg, temp);
  3359. reg = FDI_RX_CTL(pipe);
  3360. temp = I915_READ(reg);
  3361. if (HAS_PCH_CPT(dev_priv)) {
  3362. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3363. temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
  3364. } else {
  3365. temp &= ~FDI_LINK_TRAIN_NONE;
  3366. temp |= FDI_LINK_TRAIN_PATTERN_2;
  3367. }
  3368. I915_WRITE(reg, temp);
  3369. POSTING_READ(reg);
  3370. udelay(150);
  3371. for (i = 0; i < 4; i++) {
  3372. reg = FDI_TX_CTL(pipe);
  3373. temp = I915_READ(reg);
  3374. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  3375. temp |= snb_b_fdi_train_param[i];
  3376. I915_WRITE(reg, temp);
  3377. POSTING_READ(reg);
  3378. udelay(500);
  3379. for (retry = 0; retry < 5; retry++) {
  3380. reg = FDI_RX_IIR(pipe);
  3381. temp = I915_READ(reg);
  3382. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3383. if (temp & FDI_RX_SYMBOL_LOCK) {
  3384. I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
  3385. DRM_DEBUG_KMS("FDI train 2 done.\n");
  3386. break;
  3387. }
  3388. udelay(50);
  3389. }
  3390. if (retry < 5)
  3391. break;
  3392. }
  3393. if (i == 4)
  3394. DRM_ERROR("FDI train 2 fail!\n");
  3395. DRM_DEBUG_KMS("FDI train done.\n");
  3396. }
  3397. /* Manual link training for Ivy Bridge A0 parts */
  3398. static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
  3399. const struct intel_crtc_state *crtc_state)
  3400. {
  3401. struct drm_device *dev = crtc->base.dev;
  3402. struct drm_i915_private *dev_priv = to_i915(dev);
  3403. int pipe = crtc->pipe;
  3404. i915_reg_t reg;
  3405. u32 temp, i, j;
  3406. /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
  3407. for train result */
  3408. reg = FDI_RX_IMR(pipe);
  3409. temp = I915_READ(reg);
  3410. temp &= ~FDI_RX_SYMBOL_LOCK;
  3411. temp &= ~FDI_RX_BIT_LOCK;
  3412. I915_WRITE(reg, temp);
  3413. POSTING_READ(reg);
  3414. udelay(150);
  3415. DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
  3416. I915_READ(FDI_RX_IIR(pipe)));
  3417. /* Try each vswing and preemphasis setting twice before moving on */
  3418. for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
  3419. /* disable first in case we need to retry */
  3420. reg = FDI_TX_CTL(pipe);
  3421. temp = I915_READ(reg);
  3422. temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
  3423. temp &= ~FDI_TX_ENABLE;
  3424. I915_WRITE(reg, temp);
  3425. reg = FDI_RX_CTL(pipe);
  3426. temp = I915_READ(reg);
  3427. temp &= ~FDI_LINK_TRAIN_AUTO;
  3428. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3429. temp &= ~FDI_RX_ENABLE;
  3430. I915_WRITE(reg, temp);
  3431. /* enable CPU FDI TX and PCH FDI RX */
  3432. reg = FDI_TX_CTL(pipe);
  3433. temp = I915_READ(reg);
  3434. temp &= ~FDI_DP_PORT_WIDTH_MASK;
  3435. temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
  3436. temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
  3437. temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
  3438. temp |= snb_b_fdi_train_param[j/2];
  3439. temp |= FDI_COMPOSITE_SYNC;
  3440. I915_WRITE(reg, temp | FDI_TX_ENABLE);
  3441. I915_WRITE(FDI_RX_MISC(pipe),
  3442. FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
  3443. reg = FDI_RX_CTL(pipe);
  3444. temp = I915_READ(reg);
  3445. temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
  3446. temp |= FDI_COMPOSITE_SYNC;
  3447. I915_WRITE(reg, temp | FDI_RX_ENABLE);
  3448. POSTING_READ(reg);
  3449. udelay(1); /* should be 0.5us */
  3450. for (i = 0; i < 4; i++) {
  3451. reg = FDI_RX_IIR(pipe);
  3452. temp = I915_READ(reg);
  3453. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3454. if (temp & FDI_RX_BIT_LOCK ||
  3455. (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
  3456. I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
  3457. DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
  3458. i);
  3459. break;
  3460. }
  3461. udelay(1); /* should be 0.5us */
  3462. }
  3463. if (i == 4) {
  3464. DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
  3465. continue;
  3466. }
  3467. /* Train 2 */
  3468. reg = FDI_TX_CTL(pipe);
  3469. temp = I915_READ(reg);
  3470. temp &= ~FDI_LINK_TRAIN_NONE_IVB;
  3471. temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
  3472. I915_WRITE(reg, temp);
  3473. reg = FDI_RX_CTL(pipe);
  3474. temp = I915_READ(reg);
  3475. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3476. temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
  3477. I915_WRITE(reg, temp);
  3478. POSTING_READ(reg);
  3479. udelay(2); /* should be 1.5us */
  3480. for (i = 0; i < 4; i++) {
  3481. reg = FDI_RX_IIR(pipe);
  3482. temp = I915_READ(reg);
  3483. DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
  3484. if (temp & FDI_RX_SYMBOL_LOCK ||
  3485. (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
  3486. I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
  3487. DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
  3488. i);
  3489. goto train_done;
  3490. }
  3491. udelay(2); /* should be 1.5us */
  3492. }
  3493. if (i == 4)
  3494. DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
  3495. }
  3496. train_done:
  3497. DRM_DEBUG_KMS("FDI train done.\n");
  3498. }
  3499. static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
  3500. {
  3501. struct drm_device *dev = intel_crtc->base.dev;
  3502. struct drm_i915_private *dev_priv = to_i915(dev);
  3503. int pipe = intel_crtc->pipe;
  3504. i915_reg_t reg;
  3505. u32 temp;
  3506. /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
  3507. reg = FDI_RX_CTL(pipe);
  3508. temp = I915_READ(reg);
  3509. temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
  3510. temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
  3511. temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
  3512. I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
  3513. POSTING_READ(reg);
  3514. udelay(200);
  3515. /* Switch from Rawclk to PCDclk */
  3516. temp = I915_READ(reg);
  3517. I915_WRITE(reg, temp | FDI_PCDCLK);
  3518. POSTING_READ(reg);
  3519. udelay(200);
  3520. /* Enable CPU FDI TX PLL, always on for Ironlake */
  3521. reg = FDI_TX_CTL(pipe);
  3522. temp = I915_READ(reg);
  3523. if ((temp & FDI_TX_PLL_ENABLE) == 0) {
  3524. I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
  3525. POSTING_READ(reg);
  3526. udelay(100);
  3527. }
  3528. }
  3529. static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
  3530. {
  3531. struct drm_device *dev = intel_crtc->base.dev;
  3532. struct drm_i915_private *dev_priv = to_i915(dev);
  3533. int pipe = intel_crtc->pipe;
  3534. i915_reg_t reg;
  3535. u32 temp;
  3536. /* Switch from PCDclk to Rawclk */
  3537. reg = FDI_RX_CTL(pipe);
  3538. temp = I915_READ(reg);
  3539. I915_WRITE(reg, temp & ~FDI_PCDCLK);
  3540. /* Disable CPU FDI TX PLL */
  3541. reg = FDI_TX_CTL(pipe);
  3542. temp = I915_READ(reg);
  3543. I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
  3544. POSTING_READ(reg);
  3545. udelay(100);
  3546. reg = FDI_RX_CTL(pipe);
  3547. temp = I915_READ(reg);
  3548. I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
  3549. /* Wait for the clocks to turn off. */
  3550. POSTING_READ(reg);
  3551. udelay(100);
  3552. }
  3553. static void ironlake_fdi_disable(struct drm_crtc *crtc)
  3554. {
  3555. struct drm_device *dev = crtc->dev;
  3556. struct drm_i915_private *dev_priv = to_i915(dev);
  3557. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3558. int pipe = intel_crtc->pipe;
  3559. i915_reg_t reg;
  3560. u32 temp;
  3561. /* disable CPU FDI tx and PCH FDI rx */
  3562. reg = FDI_TX_CTL(pipe);
  3563. temp = I915_READ(reg);
  3564. I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
  3565. POSTING_READ(reg);
  3566. reg = FDI_RX_CTL(pipe);
  3567. temp = I915_READ(reg);
  3568. temp &= ~(0x7 << 16);
  3569. temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
  3570. I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
  3571. POSTING_READ(reg);
  3572. udelay(100);
  3573. /* Ironlake workaround, disable clock pointer after downing FDI */
  3574. if (HAS_PCH_IBX(dev_priv))
  3575. I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
  3576. /* still set train pattern 1 */
  3577. reg = FDI_TX_CTL(pipe);
  3578. temp = I915_READ(reg);
  3579. temp &= ~FDI_LINK_TRAIN_NONE;
  3580. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3581. I915_WRITE(reg, temp);
  3582. reg = FDI_RX_CTL(pipe);
  3583. temp = I915_READ(reg);
  3584. if (HAS_PCH_CPT(dev_priv)) {
  3585. temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
  3586. temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
  3587. } else {
  3588. temp &= ~FDI_LINK_TRAIN_NONE;
  3589. temp |= FDI_LINK_TRAIN_PATTERN_1;
  3590. }
  3591. /* BPC in FDI rx is consistent with that in PIPECONF */
  3592. temp &= ~(0x07 << 16);
  3593. temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
  3594. I915_WRITE(reg, temp);
  3595. POSTING_READ(reg);
  3596. udelay(100);
  3597. }
  3598. bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
  3599. {
  3600. struct intel_crtc *crtc;
  3601. /* Note that we don't need to be called with mode_config.lock here
  3602. * as our list of CRTC objects is static for the lifetime of the
  3603. * device and so cannot disappear as we iterate. Similarly, we can
  3604. * happily treat the predicates as racy, atomic checks as userspace
  3605. * cannot claim and pin a new fb without at least acquring the
  3606. * struct_mutex and so serialising with us.
  3607. */
  3608. for_each_intel_crtc(&dev_priv->drm, crtc) {
  3609. if (atomic_read(&crtc->unpin_work_count) == 0)
  3610. continue;
  3611. if (crtc->flip_work)
  3612. intel_wait_for_vblank(dev_priv, crtc->pipe);
  3613. return true;
  3614. }
  3615. return false;
  3616. }
  3617. static void page_flip_completed(struct intel_crtc *intel_crtc)
  3618. {
  3619. struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
  3620. struct intel_flip_work *work = intel_crtc->flip_work;
  3621. intel_crtc->flip_work = NULL;
  3622. if (work->event)
  3623. drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
  3624. drm_crtc_vblank_put(&intel_crtc->base);
  3625. wake_up_all(&dev_priv->pending_flip_queue);
  3626. trace_i915_flip_complete(intel_crtc->plane,
  3627. work->pending_flip_obj);
  3628. queue_work(dev_priv->wq, &work->unpin_work);
  3629. }
  3630. static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
  3631. {
  3632. struct drm_device *dev = crtc->dev;
  3633. struct drm_i915_private *dev_priv = to_i915(dev);
  3634. long ret;
  3635. WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
  3636. ret = wait_event_interruptible_timeout(
  3637. dev_priv->pending_flip_queue,
  3638. !intel_crtc_has_pending_flip(crtc),
  3639. 60*HZ);
  3640. if (ret < 0)
  3641. return ret;
  3642. if (ret == 0) {
  3643. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  3644. struct intel_flip_work *work;
  3645. spin_lock_irq(&dev->event_lock);
  3646. work = intel_crtc->flip_work;
  3647. if (work && !is_mmio_work(work)) {
  3648. WARN_ONCE(1, "Removing stuck page flip\n");
  3649. page_flip_completed(intel_crtc);
  3650. }
  3651. spin_unlock_irq(&dev->event_lock);
  3652. }
  3653. return 0;
  3654. }
  3655. void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
  3656. {
  3657. u32 temp;
  3658. I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
  3659. mutex_lock(&dev_priv->sb_lock);
  3660. temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
  3661. temp |= SBI_SSCCTL_DISABLE;
  3662. intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
  3663. mutex_unlock(&dev_priv->sb_lock);
  3664. }
  3665. /* Program iCLKIP clock to the desired frequency */
  3666. static void lpt_program_iclkip(struct intel_crtc *crtc)
  3667. {
  3668. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  3669. int clock = crtc->config->base.adjusted_mode.crtc_clock;
  3670. u32 divsel, phaseinc, auxdiv, phasedir = 0;
  3671. u32 temp;
  3672. lpt_disable_iclkip(dev_priv);
  3673. /* The iCLK virtual clock root frequency is in MHz,
  3674. * but the adjusted_mode->crtc_clock in in KHz. To get the
  3675. * divisors, it is necessary to divide one by another, so we
  3676. * convert the virtual clock precision to KHz here for higher
  3677. * precision.
  3678. */
  3679. for (auxdiv = 0; auxdiv < 2; auxdiv++) {
  3680. u32 iclk_virtual_root_freq = 172800 * 1000;
  3681. u32 iclk_pi_range = 64;
  3682. u32 desired_divisor;
  3683. desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
  3684. clock << auxdiv);
  3685. divsel = (desired_divisor / iclk_pi_range) - 2;
  3686. phaseinc = desired_divisor % iclk_pi_range;
  3687. /*
  3688. * Near 20MHz is a corner case which is
  3689. * out of range for the 7-bit divisor
  3690. */
  3691. if (divsel <= 0x7f)
  3692. break;
  3693. }
  3694. /* This should not happen with any sane values */
  3695. WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
  3696. ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
  3697. WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
  3698. ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
  3699. DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
  3700. clock,
  3701. auxdiv,
  3702. divsel,
  3703. phasedir,
  3704. phaseinc);
  3705. mutex_lock(&dev_priv->sb_lock);
  3706. /* Program SSCDIVINTPHASE6 */
  3707. temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
  3708. temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
  3709. temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
  3710. temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
  3711. temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
  3712. temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
  3713. temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
  3714. intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
  3715. /* Program SSCAUXDIV */
  3716. temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
  3717. temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
  3718. temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
  3719. intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
  3720. /* Enable modulator and associated divider */
  3721. temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
  3722. temp &= ~SBI_SSCCTL_DISABLE;
  3723. intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
  3724. mutex_unlock(&dev_priv->sb_lock);
  3725. /* Wait for initialization time */
  3726. udelay(24);
  3727. I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
  3728. }
  3729. int lpt_get_iclkip(struct drm_i915_private *dev_priv)
  3730. {
  3731. u32 divsel, phaseinc, auxdiv;
  3732. u32 iclk_virtual_root_freq = 172800 * 1000;
  3733. u32 iclk_pi_range = 64;
  3734. u32 desired_divisor;
  3735. u32 temp;
  3736. if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
  3737. return 0;
  3738. mutex_lock(&dev_priv->sb_lock);
  3739. temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
  3740. if (temp & SBI_SSCCTL_DISABLE) {
  3741. mutex_unlock(&dev_priv->sb_lock);
  3742. return 0;
  3743. }
  3744. temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
  3745. divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
  3746. SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
  3747. phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
  3748. SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
  3749. temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
  3750. auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
  3751. SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
  3752. mutex_unlock(&dev_priv->sb_lock);
  3753. desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
  3754. return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
  3755. desired_divisor << auxdiv);
  3756. }
  3757. static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
  3758. enum pipe pch_transcoder)
  3759. {
  3760. struct drm_device *dev = crtc->base.dev;
  3761. struct drm_i915_private *dev_priv = to_i915(dev);
  3762. enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  3763. I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
  3764. I915_READ(HTOTAL(cpu_transcoder)));
  3765. I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
  3766. I915_READ(HBLANK(cpu_transcoder)));
  3767. I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
  3768. I915_READ(HSYNC(cpu_transcoder)));
  3769. I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
  3770. I915_READ(VTOTAL(cpu_transcoder)));
  3771. I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
  3772. I915_READ(VBLANK(cpu_transcoder)));
  3773. I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
  3774. I915_READ(VSYNC(cpu_transcoder)));
  3775. I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
  3776. I915_READ(VSYNCSHIFT(cpu_transcoder)));
  3777. }
  3778. static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
  3779. {
  3780. struct drm_i915_private *dev_priv = to_i915(dev);
  3781. uint32_t temp;
  3782. temp = I915_READ(SOUTH_CHICKEN1);
  3783. if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
  3784. return;
  3785. WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
  3786. WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
  3787. temp &= ~FDI_BC_BIFURCATION_SELECT;
  3788. if (enable)
  3789. temp |= FDI_BC_BIFURCATION_SELECT;
  3790. DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
  3791. I915_WRITE(SOUTH_CHICKEN1, temp);
  3792. POSTING_READ(SOUTH_CHICKEN1);
  3793. }
  3794. static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
  3795. {
  3796. struct drm_device *dev = intel_crtc->base.dev;
  3797. switch (intel_crtc->pipe) {
  3798. case PIPE_A:
  3799. break;
  3800. case PIPE_B:
  3801. if (intel_crtc->config->fdi_lanes > 2)
  3802. cpt_set_fdi_bc_bifurcation(dev, false);
  3803. else
  3804. cpt_set_fdi_bc_bifurcation(dev, true);
  3805. break;
  3806. case PIPE_C:
  3807. cpt_set_fdi_bc_bifurcation(dev, true);
  3808. break;
  3809. default:
  3810. BUG();
  3811. }
  3812. }
  3813. /* Return which DP Port should be selected for Transcoder DP control */
  3814. static enum port
  3815. intel_trans_dp_port_sel(struct intel_crtc *crtc)
  3816. {
  3817. struct drm_device *dev = crtc->base.dev;
  3818. struct intel_encoder *encoder;
  3819. for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
  3820. if (encoder->type == INTEL_OUTPUT_DP ||
  3821. encoder->type == INTEL_OUTPUT_EDP)
  3822. return enc_to_dig_port(&encoder->base)->port;
  3823. }
  3824. return -1;
  3825. }
  3826. /*
  3827. * Enable PCH resources required for PCH ports:
  3828. * - PCH PLLs
  3829. * - FDI training & RX/TX
  3830. * - update transcoder timings
  3831. * - DP transcoding bits
  3832. * - transcoder
  3833. */
  3834. static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
  3835. {
  3836. struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  3837. struct drm_device *dev = crtc->base.dev;
  3838. struct drm_i915_private *dev_priv = to_i915(dev);
  3839. int pipe = crtc->pipe;
  3840. u32 temp;
  3841. assert_pch_transcoder_disabled(dev_priv, pipe);
  3842. if (IS_IVYBRIDGE(dev_priv))
  3843. ivybridge_update_fdi_bc_bifurcation(crtc);
  3844. /* Write the TU size bits before fdi link training, so that error
  3845. * detection works. */
  3846. I915_WRITE(FDI_RX_TUSIZE1(pipe),
  3847. I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
  3848. /* For PCH output, training FDI link */
  3849. dev_priv->display.fdi_link_train(crtc, crtc_state);
  3850. /* We need to program the right clock selection before writing the pixel
  3851. * mutliplier into the DPLL. */
  3852. if (HAS_PCH_CPT(dev_priv)) {
  3853. u32 sel;
  3854. temp = I915_READ(PCH_DPLL_SEL);
  3855. temp |= TRANS_DPLL_ENABLE(pipe);
  3856. sel = TRANS_DPLLB_SEL(pipe);
  3857. if (crtc_state->shared_dpll ==
  3858. intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
  3859. temp |= sel;
  3860. else
  3861. temp &= ~sel;
  3862. I915_WRITE(PCH_DPLL_SEL, temp);
  3863. }
  3864. /* XXX: pch pll's can be enabled any time before we enable the PCH
  3865. * transcoder, and we actually should do this to not upset any PCH
  3866. * transcoder that already use the clock when we share it.
  3867. *
  3868. * Note that enable_shared_dpll tries to do the right thing, but
  3869. * get_shared_dpll unconditionally resets the pll - we need that to have
  3870. * the right LVDS enable sequence. */
  3871. intel_enable_shared_dpll(crtc);
  3872. /* set transcoder timing, panel must allow it */
  3873. assert_panel_unlocked(dev_priv, pipe);
  3874. ironlake_pch_transcoder_set_timings(crtc, pipe);
  3875. intel_fdi_normal_train(crtc);
  3876. /* For PCH DP, enable TRANS_DP_CTL */
  3877. if (HAS_PCH_CPT(dev_priv) &&
  3878. intel_crtc_has_dp_encoder(crtc_state)) {
  3879. const struct drm_display_mode *adjusted_mode =
  3880. &crtc_state->base.adjusted_mode;
  3881. u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
  3882. i915_reg_t reg = TRANS_DP_CTL(pipe);
  3883. temp = I915_READ(reg);
  3884. temp &= ~(TRANS_DP_PORT_SEL_MASK |
  3885. TRANS_DP_SYNC_MASK |
  3886. TRANS_DP_BPC_MASK);
  3887. temp |= TRANS_DP_OUTPUT_ENABLE;
  3888. temp |= bpc << 9; /* same format but at 11:9 */
  3889. if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
  3890. temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
  3891. if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
  3892. temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
  3893. switch (intel_trans_dp_port_sel(crtc)) {
  3894. case PORT_B:
  3895. temp |= TRANS_DP_PORT_SEL_B;
  3896. break;
  3897. case PORT_C:
  3898. temp |= TRANS_DP_PORT_SEL_C;
  3899. break;
  3900. case PORT_D:
  3901. temp |= TRANS_DP_PORT_SEL_D;
  3902. break;
  3903. default:
  3904. BUG();
  3905. }
  3906. I915_WRITE(reg, temp);
  3907. }
  3908. ironlake_enable_pch_transcoder(dev_priv, pipe);
  3909. }
  3910. static void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
  3911. {
  3912. struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  3913. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  3914. enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
  3915. assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
  3916. lpt_program_iclkip(crtc);
  3917. /* Set transcoder timing. */
  3918. ironlake_pch_transcoder_set_timings(crtc, PIPE_A);
  3919. lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
  3920. }
  3921. static void cpt_verify_modeset(struct drm_device *dev, int pipe)
  3922. {
  3923. struct drm_i915_private *dev_priv = to_i915(dev);
  3924. i915_reg_t dslreg = PIPEDSL(pipe);
  3925. u32 temp;
  3926. temp = I915_READ(dslreg);
  3927. udelay(500);
  3928. if (wait_for(I915_READ(dslreg) != temp, 5)) {
  3929. if (wait_for(I915_READ(dslreg) != temp, 5))
  3930. DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
  3931. }
  3932. }
  3933. static int
  3934. skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
  3935. unsigned scaler_user, int *scaler_id, unsigned int rotation,
  3936. int src_w, int src_h, int dst_w, int dst_h)
  3937. {
  3938. struct intel_crtc_scaler_state *scaler_state =
  3939. &crtc_state->scaler_state;
  3940. struct intel_crtc *intel_crtc =
  3941. to_intel_crtc(crtc_state->base.crtc);
  3942. int need_scaling;
  3943. need_scaling = drm_rotation_90_or_270(rotation) ?
  3944. (src_h != dst_w || src_w != dst_h):
  3945. (src_w != dst_w || src_h != dst_h);
  3946. /*
  3947. * if plane is being disabled or scaler is no more required or force detach
  3948. * - free scaler binded to this plane/crtc
  3949. * - in order to do this, update crtc->scaler_usage
  3950. *
  3951. * Here scaler state in crtc_state is set free so that
  3952. * scaler can be assigned to other user. Actual register
  3953. * update to free the scaler is done in plane/panel-fit programming.
  3954. * For this purpose crtc/plane_state->scaler_id isn't reset here.
  3955. */
  3956. if (force_detach || !need_scaling) {
  3957. if (*scaler_id >= 0) {
  3958. scaler_state->scaler_users &= ~(1 << scaler_user);
  3959. scaler_state->scalers[*scaler_id].in_use = 0;
  3960. DRM_DEBUG_KMS("scaler_user index %u.%u: "
  3961. "Staged freeing scaler id %d scaler_users = 0x%x\n",
  3962. intel_crtc->pipe, scaler_user, *scaler_id,
  3963. scaler_state->scaler_users);
  3964. *scaler_id = -1;
  3965. }
  3966. return 0;
  3967. }
  3968. /* range checks */
  3969. if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
  3970. dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
  3971. src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
  3972. dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
  3973. DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
  3974. "size is out of scaler range\n",
  3975. intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
  3976. return -EINVAL;
  3977. }
  3978. /* mark this plane as a scaler user in crtc_state */
  3979. scaler_state->scaler_users |= (1 << scaler_user);
  3980. DRM_DEBUG_KMS("scaler_user index %u.%u: "
  3981. "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
  3982. intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
  3983. scaler_state->scaler_users);
  3984. return 0;
  3985. }
  3986. /**
  3987. * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
  3988. *
  3989. * @state: crtc's scaler state
  3990. *
  3991. * Return
  3992. * 0 - scaler_usage updated successfully
  3993. * error - requested scaling cannot be supported or other error condition
  3994. */
  3995. int skl_update_scaler_crtc(struct intel_crtc_state *state)
  3996. {
  3997. const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
  3998. return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
  3999. &state->scaler_state.scaler_id, DRM_ROTATE_0,
  4000. state->pipe_src_w, state->pipe_src_h,
  4001. adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
  4002. }
  4003. /**
  4004. * skl_update_scaler_plane - Stages update to scaler state for a given plane.
  4005. *
  4006. * @state: crtc's scaler state
  4007. * @plane_state: atomic plane state to update
  4008. *
  4009. * Return
  4010. * 0 - scaler_usage updated successfully
  4011. * error - requested scaling cannot be supported or other error condition
  4012. */
  4013. static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
  4014. struct intel_plane_state *plane_state)
  4015. {
  4016. struct intel_plane *intel_plane =
  4017. to_intel_plane(plane_state->base.plane);
  4018. struct drm_framebuffer *fb = plane_state->base.fb;
  4019. int ret;
  4020. bool force_detach = !fb || !plane_state->base.visible;
  4021. ret = skl_update_scaler(crtc_state, force_detach,
  4022. drm_plane_index(&intel_plane->base),
  4023. &plane_state->scaler_id,
  4024. plane_state->base.rotation,
  4025. drm_rect_width(&plane_state->base.src) >> 16,
  4026. drm_rect_height(&plane_state->base.src) >> 16,
  4027. drm_rect_width(&plane_state->base.dst),
  4028. drm_rect_height(&plane_state->base.dst));
  4029. if (ret || plane_state->scaler_id < 0)
  4030. return ret;
  4031. /* check colorkey */
  4032. if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
  4033. DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
  4034. intel_plane->base.base.id,
  4035. intel_plane->base.name);
  4036. return -EINVAL;
  4037. }
  4038. /* Check src format */
  4039. switch (fb->format->format) {
  4040. case DRM_FORMAT_RGB565:
  4041. case DRM_FORMAT_XBGR8888:
  4042. case DRM_FORMAT_XRGB8888:
  4043. case DRM_FORMAT_ABGR8888:
  4044. case DRM_FORMAT_ARGB8888:
  4045. case DRM_FORMAT_XRGB2101010:
  4046. case DRM_FORMAT_XBGR2101010:
  4047. case DRM_FORMAT_YUYV:
  4048. case DRM_FORMAT_YVYU:
  4049. case DRM_FORMAT_UYVY:
  4050. case DRM_FORMAT_VYUY:
  4051. break;
  4052. default:
  4053. DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
  4054. intel_plane->base.base.id, intel_plane->base.name,
  4055. fb->base.id, fb->format->format);
  4056. return -EINVAL;
  4057. }
  4058. return 0;
  4059. }
  4060. static void skylake_scaler_disable(struct intel_crtc *crtc)
  4061. {
  4062. int i;
  4063. for (i = 0; i < crtc->num_scalers; i++)
  4064. skl_detach_scaler(crtc, i);
  4065. }
  4066. static void skylake_pfit_enable(struct intel_crtc *crtc)
  4067. {
  4068. struct drm_device *dev = crtc->base.dev;
  4069. struct drm_i915_private *dev_priv = to_i915(dev);
  4070. int pipe = crtc->pipe;
  4071. struct intel_crtc_scaler_state *scaler_state =
  4072. &crtc->config->scaler_state;
  4073. if (crtc->config->pch_pfit.enabled) {
  4074. int id;
  4075. if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
  4076. return;
  4077. id = scaler_state->scaler_id;
  4078. I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
  4079. PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
  4080. I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
  4081. I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
  4082. }
  4083. }
  4084. static void ironlake_pfit_enable(struct intel_crtc *crtc)
  4085. {
  4086. struct drm_device *dev = crtc->base.dev;
  4087. struct drm_i915_private *dev_priv = to_i915(dev);
  4088. int pipe = crtc->pipe;
  4089. if (crtc->config->pch_pfit.enabled) {
  4090. /* Force use of hard-coded filter coefficients
  4091. * as some pre-programmed values are broken,
  4092. * e.g. x201.
  4093. */
  4094. if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
  4095. I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
  4096. PF_PIPE_SEL_IVB(pipe));
  4097. else
  4098. I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
  4099. I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
  4100. I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
  4101. }
  4102. }
  4103. void hsw_enable_ips(struct intel_crtc *crtc)
  4104. {
  4105. struct drm_device *dev = crtc->base.dev;
  4106. struct drm_i915_private *dev_priv = to_i915(dev);
  4107. if (!crtc->config->ips_enabled)
  4108. return;
  4109. /*
  4110. * We can only enable IPS after we enable a plane and wait for a vblank
  4111. * This function is called from post_plane_update, which is run after
  4112. * a vblank wait.
  4113. */
  4114. assert_plane_enabled(dev_priv, crtc->plane);
  4115. if (IS_BROADWELL(dev_priv)) {
  4116. mutex_lock(&dev_priv->rps.hw_lock);
  4117. WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
  4118. mutex_unlock(&dev_priv->rps.hw_lock);
  4119. /* Quoting Art Runyan: "its not safe to expect any particular
  4120. * value in IPS_CTL bit 31 after enabling IPS through the
  4121. * mailbox." Moreover, the mailbox may return a bogus state,
  4122. * so we need to just enable it and continue on.
  4123. */
  4124. } else {
  4125. I915_WRITE(IPS_CTL, IPS_ENABLE);
  4126. /* The bit only becomes 1 in the next vblank, so this wait here
  4127. * is essentially intel_wait_for_vblank. If we don't have this
  4128. * and don't wait for vblanks until the end of crtc_enable, then
  4129. * the HW state readout code will complain that the expected
  4130. * IPS_CTL value is not the one we read. */
  4131. if (intel_wait_for_register(dev_priv,
  4132. IPS_CTL, IPS_ENABLE, IPS_ENABLE,
  4133. 50))
  4134. DRM_ERROR("Timed out waiting for IPS enable\n");
  4135. }
  4136. }
  4137. void hsw_disable_ips(struct intel_crtc *crtc)
  4138. {
  4139. struct drm_device *dev = crtc->base.dev;
  4140. struct drm_i915_private *dev_priv = to_i915(dev);
  4141. if (!crtc->config->ips_enabled)
  4142. return;
  4143. assert_plane_enabled(dev_priv, crtc->plane);
  4144. if (IS_BROADWELL(dev_priv)) {
  4145. mutex_lock(&dev_priv->rps.hw_lock);
  4146. WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
  4147. mutex_unlock(&dev_priv->rps.hw_lock);
  4148. /* wait for pcode to finish disabling IPS, which may take up to 42ms */
  4149. if (intel_wait_for_register(dev_priv,
  4150. IPS_CTL, IPS_ENABLE, 0,
  4151. 42))
  4152. DRM_ERROR("Timed out waiting for IPS disable\n");
  4153. } else {
  4154. I915_WRITE(IPS_CTL, 0);
  4155. POSTING_READ(IPS_CTL);
  4156. }
  4157. /* We need to wait for a vblank before we can disable the plane. */
  4158. intel_wait_for_vblank(dev_priv, crtc->pipe);
  4159. }
  4160. static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
  4161. {
  4162. if (intel_crtc->overlay) {
  4163. struct drm_device *dev = intel_crtc->base.dev;
  4164. struct drm_i915_private *dev_priv = to_i915(dev);
  4165. mutex_lock(&dev->struct_mutex);
  4166. dev_priv->mm.interruptible = false;
  4167. (void) intel_overlay_switch_off(intel_crtc->overlay);
  4168. dev_priv->mm.interruptible = true;
  4169. mutex_unlock(&dev->struct_mutex);
  4170. }
  4171. /* Let userspace switch the overlay on again. In most cases userspace
  4172. * has to recompute where to put it anyway.
  4173. */
  4174. }
  4175. /**
  4176. * intel_post_enable_primary - Perform operations after enabling primary plane
  4177. * @crtc: the CRTC whose primary plane was just enabled
  4178. *
  4179. * Performs potentially sleeping operations that must be done after the primary
  4180. * plane is enabled, such as updating FBC and IPS. Note that this may be
  4181. * called due to an explicit primary plane update, or due to an implicit
  4182. * re-enable that is caused when a sprite plane is updated to no longer
  4183. * completely hide the primary plane.
  4184. */
  4185. static void
  4186. intel_post_enable_primary(struct drm_crtc *crtc)
  4187. {
  4188. struct drm_device *dev = crtc->dev;
  4189. struct drm_i915_private *dev_priv = to_i915(dev);
  4190. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4191. int pipe = intel_crtc->pipe;
  4192. /*
  4193. * FIXME IPS should be fine as long as one plane is
  4194. * enabled, but in practice it seems to have problems
  4195. * when going from primary only to sprite only and vice
  4196. * versa.
  4197. */
  4198. hsw_enable_ips(intel_crtc);
  4199. /*
  4200. * Gen2 reports pipe underruns whenever all planes are disabled.
  4201. * So don't enable underrun reporting before at least some planes
  4202. * are enabled.
  4203. * FIXME: Need to fix the logic to work when we turn off all planes
  4204. * but leave the pipe running.
  4205. */
  4206. if (IS_GEN2(dev_priv))
  4207. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4208. /* Underruns don't always raise interrupts, so check manually. */
  4209. intel_check_cpu_fifo_underruns(dev_priv);
  4210. intel_check_pch_fifo_underruns(dev_priv);
  4211. }
  4212. /* FIXME move all this to pre_plane_update() with proper state tracking */
  4213. static void
  4214. intel_pre_disable_primary(struct drm_crtc *crtc)
  4215. {
  4216. struct drm_device *dev = crtc->dev;
  4217. struct drm_i915_private *dev_priv = to_i915(dev);
  4218. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4219. int pipe = intel_crtc->pipe;
  4220. /*
  4221. * Gen2 reports pipe underruns whenever all planes are disabled.
  4222. * So diasble underrun reporting before all the planes get disabled.
  4223. * FIXME: Need to fix the logic to work when we turn off all planes
  4224. * but leave the pipe running.
  4225. */
  4226. if (IS_GEN2(dev_priv))
  4227. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
  4228. /*
  4229. * FIXME IPS should be fine as long as one plane is
  4230. * enabled, but in practice it seems to have problems
  4231. * when going from primary only to sprite only and vice
  4232. * versa.
  4233. */
  4234. hsw_disable_ips(intel_crtc);
  4235. }
  4236. /* FIXME get rid of this and use pre_plane_update */
  4237. static void
  4238. intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
  4239. {
  4240. struct drm_device *dev = crtc->dev;
  4241. struct drm_i915_private *dev_priv = to_i915(dev);
  4242. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4243. int pipe = intel_crtc->pipe;
  4244. intel_pre_disable_primary(crtc);
  4245. /*
  4246. * Vblank time updates from the shadow to live plane control register
  4247. * are blocked if the memory self-refresh mode is active at that
  4248. * moment. So to make sure the plane gets truly disabled, disable
  4249. * first the self-refresh mode. The self-refresh enable bit in turn
  4250. * will be checked/applied by the HW only at the next frame start
  4251. * event which is after the vblank start event, so we need to have a
  4252. * wait-for-vblank between disabling the plane and the pipe.
  4253. */
  4254. if (HAS_GMCH_DISPLAY(dev_priv) &&
  4255. intel_set_memory_cxsr(dev_priv, false))
  4256. intel_wait_for_vblank(dev_priv, pipe);
  4257. }
  4258. static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
  4259. {
  4260. struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
  4261. struct drm_atomic_state *old_state = old_crtc_state->base.state;
  4262. struct intel_crtc_state *pipe_config =
  4263. to_intel_crtc_state(crtc->base.state);
  4264. struct drm_plane *primary = crtc->base.primary;
  4265. struct drm_plane_state *old_pri_state =
  4266. drm_atomic_get_existing_plane_state(old_state, primary);
  4267. intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
  4268. if (pipe_config->update_wm_post && pipe_config->base.active)
  4269. intel_update_watermarks(crtc);
  4270. if (old_pri_state) {
  4271. struct intel_plane_state *primary_state =
  4272. to_intel_plane_state(primary->state);
  4273. struct intel_plane_state *old_primary_state =
  4274. to_intel_plane_state(old_pri_state);
  4275. intel_fbc_post_update(crtc);
  4276. if (primary_state->base.visible &&
  4277. (needs_modeset(&pipe_config->base) ||
  4278. !old_primary_state->base.visible))
  4279. intel_post_enable_primary(&crtc->base);
  4280. }
  4281. }
  4282. static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
  4283. struct intel_crtc_state *pipe_config)
  4284. {
  4285. struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
  4286. struct drm_device *dev = crtc->base.dev;
  4287. struct drm_i915_private *dev_priv = to_i915(dev);
  4288. struct drm_atomic_state *old_state = old_crtc_state->base.state;
  4289. struct drm_plane *primary = crtc->base.primary;
  4290. struct drm_plane_state *old_pri_state =
  4291. drm_atomic_get_existing_plane_state(old_state, primary);
  4292. bool modeset = needs_modeset(&pipe_config->base);
  4293. struct intel_atomic_state *old_intel_state =
  4294. to_intel_atomic_state(old_state);
  4295. if (old_pri_state) {
  4296. struct intel_plane_state *primary_state =
  4297. to_intel_plane_state(primary->state);
  4298. struct intel_plane_state *old_primary_state =
  4299. to_intel_plane_state(old_pri_state);
  4300. intel_fbc_pre_update(crtc, pipe_config, primary_state);
  4301. if (old_primary_state->base.visible &&
  4302. (modeset || !primary_state->base.visible))
  4303. intel_pre_disable_primary(&crtc->base);
  4304. }
  4305. /*
  4306. * Vblank time updates from the shadow to live plane control register
  4307. * are blocked if the memory self-refresh mode is active at that
  4308. * moment. So to make sure the plane gets truly disabled, disable
  4309. * first the self-refresh mode. The self-refresh enable bit in turn
  4310. * will be checked/applied by the HW only at the next frame start
  4311. * event which is after the vblank start event, so we need to have a
  4312. * wait-for-vblank between disabling the plane and the pipe.
  4313. */
  4314. if (HAS_GMCH_DISPLAY(dev_priv) && old_crtc_state->base.active &&
  4315. pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
  4316. intel_wait_for_vblank(dev_priv, crtc->pipe);
  4317. /*
  4318. * IVB workaround: must disable low power watermarks for at least
  4319. * one frame before enabling scaling. LP watermarks can be re-enabled
  4320. * when scaling is disabled.
  4321. *
  4322. * WaCxSRDisabledForSpriteScaling:ivb
  4323. */
  4324. if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev))
  4325. intel_wait_for_vblank(dev_priv, crtc->pipe);
  4326. /*
  4327. * If we're doing a modeset, we're done. No need to do any pre-vblank
  4328. * watermark programming here.
  4329. */
  4330. if (needs_modeset(&pipe_config->base))
  4331. return;
  4332. /*
  4333. * For platforms that support atomic watermarks, program the
  4334. * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
  4335. * will be the intermediate values that are safe for both pre- and
  4336. * post- vblank; when vblank happens, the 'active' values will be set
  4337. * to the final 'target' values and we'll do this again to get the
  4338. * optimal watermarks. For gen9+ platforms, the values we program here
  4339. * will be the final target values which will get automatically latched
  4340. * at vblank time; no further programming will be necessary.
  4341. *
  4342. * If a platform hasn't been transitioned to atomic watermarks yet,
  4343. * we'll continue to update watermarks the old way, if flags tell
  4344. * us to.
  4345. */
  4346. if (dev_priv->display.initial_watermarks != NULL)
  4347. dev_priv->display.initial_watermarks(old_intel_state,
  4348. pipe_config);
  4349. else if (pipe_config->update_wm_pre)
  4350. intel_update_watermarks(crtc);
  4351. }
  4352. static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
  4353. {
  4354. struct drm_device *dev = crtc->dev;
  4355. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4356. struct drm_plane *p;
  4357. int pipe = intel_crtc->pipe;
  4358. intel_crtc_dpms_overlay_disable(intel_crtc);
  4359. drm_for_each_plane_mask(p, dev, plane_mask)
  4360. to_intel_plane(p)->disable_plane(p, crtc);
  4361. /*
  4362. * FIXME: Once we grow proper nuclear flip support out of this we need
  4363. * to compute the mask of flip planes precisely. For the time being
  4364. * consider this a flip to a NULL plane.
  4365. */
  4366. intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
  4367. }
  4368. static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
  4369. struct intel_crtc_state *crtc_state,
  4370. struct drm_atomic_state *old_state)
  4371. {
  4372. struct drm_connector_state *conn_state;
  4373. struct drm_connector *conn;
  4374. int i;
  4375. for_each_new_connector_in_state(old_state, conn, conn_state, i) {
  4376. struct intel_encoder *encoder =
  4377. to_intel_encoder(conn_state->best_encoder);
  4378. if (conn_state->crtc != crtc)
  4379. continue;
  4380. if (encoder->pre_pll_enable)
  4381. encoder->pre_pll_enable(encoder, crtc_state, conn_state);
  4382. }
  4383. }
  4384. static void intel_encoders_pre_enable(struct drm_crtc *crtc,
  4385. struct intel_crtc_state *crtc_state,
  4386. struct drm_atomic_state *old_state)
  4387. {
  4388. struct drm_connector_state *conn_state;
  4389. struct drm_connector *conn;
  4390. int i;
  4391. for_each_new_connector_in_state(old_state, conn, conn_state, i) {
  4392. struct intel_encoder *encoder =
  4393. to_intel_encoder(conn_state->best_encoder);
  4394. if (conn_state->crtc != crtc)
  4395. continue;
  4396. if (encoder->pre_enable)
  4397. encoder->pre_enable(encoder, crtc_state, conn_state);
  4398. }
  4399. }
  4400. static void intel_encoders_enable(struct drm_crtc *crtc,
  4401. struct intel_crtc_state *crtc_state,
  4402. struct drm_atomic_state *old_state)
  4403. {
  4404. struct drm_connector_state *conn_state;
  4405. struct drm_connector *conn;
  4406. int i;
  4407. for_each_new_connector_in_state(old_state, conn, conn_state, i) {
  4408. struct intel_encoder *encoder =
  4409. to_intel_encoder(conn_state->best_encoder);
  4410. if (conn_state->crtc != crtc)
  4411. continue;
  4412. encoder->enable(encoder, crtc_state, conn_state);
  4413. intel_opregion_notify_encoder(encoder, true);
  4414. }
  4415. }
  4416. static void intel_encoders_disable(struct drm_crtc *crtc,
  4417. struct intel_crtc_state *old_crtc_state,
  4418. struct drm_atomic_state *old_state)
  4419. {
  4420. struct drm_connector_state *old_conn_state;
  4421. struct drm_connector *conn;
  4422. int i;
  4423. for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
  4424. struct intel_encoder *encoder =
  4425. to_intel_encoder(old_conn_state->best_encoder);
  4426. if (old_conn_state->crtc != crtc)
  4427. continue;
  4428. intel_opregion_notify_encoder(encoder, false);
  4429. encoder->disable(encoder, old_crtc_state, old_conn_state);
  4430. }
  4431. }
  4432. static void intel_encoders_post_disable(struct drm_crtc *crtc,
  4433. struct intel_crtc_state *old_crtc_state,
  4434. struct drm_atomic_state *old_state)
  4435. {
  4436. struct drm_connector_state *old_conn_state;
  4437. struct drm_connector *conn;
  4438. int i;
  4439. for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
  4440. struct intel_encoder *encoder =
  4441. to_intel_encoder(old_conn_state->best_encoder);
  4442. if (old_conn_state->crtc != crtc)
  4443. continue;
  4444. if (encoder->post_disable)
  4445. encoder->post_disable(encoder, old_crtc_state, old_conn_state);
  4446. }
  4447. }
  4448. static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
  4449. struct intel_crtc_state *old_crtc_state,
  4450. struct drm_atomic_state *old_state)
  4451. {
  4452. struct drm_connector_state *old_conn_state;
  4453. struct drm_connector *conn;
  4454. int i;
  4455. for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
  4456. struct intel_encoder *encoder =
  4457. to_intel_encoder(old_conn_state->best_encoder);
  4458. if (old_conn_state->crtc != crtc)
  4459. continue;
  4460. if (encoder->post_pll_disable)
  4461. encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
  4462. }
  4463. }
  4464. static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
  4465. struct drm_atomic_state *old_state)
  4466. {
  4467. struct drm_crtc *crtc = pipe_config->base.crtc;
  4468. struct drm_device *dev = crtc->dev;
  4469. struct drm_i915_private *dev_priv = to_i915(dev);
  4470. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4471. int pipe = intel_crtc->pipe;
  4472. struct intel_atomic_state *old_intel_state =
  4473. to_intel_atomic_state(old_state);
  4474. if (WARN_ON(intel_crtc->active))
  4475. return;
  4476. /*
  4477. * Sometimes spurious CPU pipe underruns happen during FDI
  4478. * training, at least with VGA+HDMI cloning. Suppress them.
  4479. *
  4480. * On ILK we get an occasional spurious CPU pipe underruns
  4481. * between eDP port A enable and vdd enable. Also PCH port
  4482. * enable seems to result in the occasional CPU pipe underrun.
  4483. *
  4484. * Spurious PCH underruns also occur during PCH enabling.
  4485. */
  4486. if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
  4487. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
  4488. if (intel_crtc->config->has_pch_encoder)
  4489. intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
  4490. if (intel_crtc->config->has_pch_encoder)
  4491. intel_prepare_shared_dpll(intel_crtc);
  4492. if (intel_crtc_has_dp_encoder(intel_crtc->config))
  4493. intel_dp_set_m_n(intel_crtc, M1_N1);
  4494. intel_set_pipe_timings(intel_crtc);
  4495. intel_set_pipe_src_size(intel_crtc);
  4496. if (intel_crtc->config->has_pch_encoder) {
  4497. intel_cpu_transcoder_set_m_n(intel_crtc,
  4498. &intel_crtc->config->fdi_m_n, NULL);
  4499. }
  4500. ironlake_set_pipeconf(crtc);
  4501. intel_crtc->active = true;
  4502. intel_encoders_pre_enable(crtc, pipe_config, old_state);
  4503. if (intel_crtc->config->has_pch_encoder) {
  4504. /* Note: FDI PLL enabling _must_ be done before we enable the
  4505. * cpu pipes, hence this is separate from all the other fdi/pch
  4506. * enabling. */
  4507. ironlake_fdi_pll_enable(intel_crtc);
  4508. } else {
  4509. assert_fdi_tx_disabled(dev_priv, pipe);
  4510. assert_fdi_rx_disabled(dev_priv, pipe);
  4511. }
  4512. ironlake_pfit_enable(intel_crtc);
  4513. /*
  4514. * On ILK+ LUT must be loaded before the pipe is running but with
  4515. * clocks enabled
  4516. */
  4517. intel_color_load_luts(&pipe_config->base);
  4518. if (dev_priv->display.initial_watermarks != NULL)
  4519. dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config);
  4520. intel_enable_pipe(intel_crtc);
  4521. if (intel_crtc->config->has_pch_encoder)
  4522. ironlake_pch_enable(pipe_config);
  4523. assert_vblank_disabled(crtc);
  4524. drm_crtc_vblank_on(crtc);
  4525. intel_encoders_enable(crtc, pipe_config, old_state);
  4526. if (HAS_PCH_CPT(dev_priv))
  4527. cpt_verify_modeset(dev, intel_crtc->pipe);
  4528. /* Must wait for vblank to avoid spurious PCH FIFO underruns */
  4529. if (intel_crtc->config->has_pch_encoder)
  4530. intel_wait_for_vblank(dev_priv, pipe);
  4531. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4532. intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
  4533. }
  4534. /* IPS only exists on ULT machines and is tied to pipe A. */
  4535. static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
  4536. {
  4537. return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
  4538. }
  4539. static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
  4540. struct drm_atomic_state *old_state)
  4541. {
  4542. struct drm_crtc *crtc = pipe_config->base.crtc;
  4543. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  4544. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4545. int pipe = intel_crtc->pipe, hsw_workaround_pipe;
  4546. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  4547. struct intel_atomic_state *old_intel_state =
  4548. to_intel_atomic_state(old_state);
  4549. if (WARN_ON(intel_crtc->active))
  4550. return;
  4551. if (intel_crtc->config->has_pch_encoder)
  4552. intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
  4553. false);
  4554. intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
  4555. if (intel_crtc->config->shared_dpll)
  4556. intel_enable_shared_dpll(intel_crtc);
  4557. if (intel_crtc_has_dp_encoder(intel_crtc->config))
  4558. intel_dp_set_m_n(intel_crtc, M1_N1);
  4559. if (!transcoder_is_dsi(cpu_transcoder))
  4560. intel_set_pipe_timings(intel_crtc);
  4561. intel_set_pipe_src_size(intel_crtc);
  4562. if (cpu_transcoder != TRANSCODER_EDP &&
  4563. !transcoder_is_dsi(cpu_transcoder)) {
  4564. I915_WRITE(PIPE_MULT(cpu_transcoder),
  4565. intel_crtc->config->pixel_multiplier - 1);
  4566. }
  4567. if (intel_crtc->config->has_pch_encoder) {
  4568. intel_cpu_transcoder_set_m_n(intel_crtc,
  4569. &intel_crtc->config->fdi_m_n, NULL);
  4570. }
  4571. if (!transcoder_is_dsi(cpu_transcoder))
  4572. haswell_set_pipeconf(crtc);
  4573. haswell_set_pipemisc(crtc);
  4574. intel_color_set_csc(&pipe_config->base);
  4575. intel_crtc->active = true;
  4576. if (intel_crtc->config->has_pch_encoder)
  4577. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
  4578. else
  4579. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4580. intel_encoders_pre_enable(crtc, pipe_config, old_state);
  4581. if (intel_crtc->config->has_pch_encoder)
  4582. dev_priv->display.fdi_link_train(intel_crtc, pipe_config);
  4583. if (!transcoder_is_dsi(cpu_transcoder))
  4584. intel_ddi_enable_pipe_clock(pipe_config);
  4585. if (INTEL_GEN(dev_priv) >= 9)
  4586. skylake_pfit_enable(intel_crtc);
  4587. else
  4588. ironlake_pfit_enable(intel_crtc);
  4589. /*
  4590. * On ILK+ LUT must be loaded before the pipe is running but with
  4591. * clocks enabled
  4592. */
  4593. intel_color_load_luts(&pipe_config->base);
  4594. intel_ddi_set_pipe_settings(pipe_config);
  4595. if (!transcoder_is_dsi(cpu_transcoder))
  4596. intel_ddi_enable_transcoder_func(pipe_config);
  4597. if (dev_priv->display.initial_watermarks != NULL)
  4598. dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
  4599. /* XXX: Do the pipe assertions at the right place for BXT DSI. */
  4600. if (!transcoder_is_dsi(cpu_transcoder))
  4601. intel_enable_pipe(intel_crtc);
  4602. if (intel_crtc->config->has_pch_encoder)
  4603. lpt_pch_enable(pipe_config);
  4604. if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
  4605. intel_ddi_set_vc_payload_alloc(pipe_config, true);
  4606. assert_vblank_disabled(crtc);
  4607. drm_crtc_vblank_on(crtc);
  4608. intel_encoders_enable(crtc, pipe_config, old_state);
  4609. if (intel_crtc->config->has_pch_encoder) {
  4610. intel_wait_for_vblank(dev_priv, pipe);
  4611. intel_wait_for_vblank(dev_priv, pipe);
  4612. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4613. intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
  4614. true);
  4615. }
  4616. /* If we change the relative order between pipe/planes enabling, we need
  4617. * to change the workaround. */
  4618. hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
  4619. if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
  4620. intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
  4621. intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
  4622. }
  4623. }
  4624. static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
  4625. {
  4626. struct drm_device *dev = crtc->base.dev;
  4627. struct drm_i915_private *dev_priv = to_i915(dev);
  4628. int pipe = crtc->pipe;
  4629. /* To avoid upsetting the power well on haswell only disable the pfit if
  4630. * it's in use. The hw state code will make sure we get this right. */
  4631. if (force || crtc->config->pch_pfit.enabled) {
  4632. I915_WRITE(PF_CTL(pipe), 0);
  4633. I915_WRITE(PF_WIN_POS(pipe), 0);
  4634. I915_WRITE(PF_WIN_SZ(pipe), 0);
  4635. }
  4636. }
  4637. static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
  4638. struct drm_atomic_state *old_state)
  4639. {
  4640. struct drm_crtc *crtc = old_crtc_state->base.crtc;
  4641. struct drm_device *dev = crtc->dev;
  4642. struct drm_i915_private *dev_priv = to_i915(dev);
  4643. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4644. int pipe = intel_crtc->pipe;
  4645. /*
  4646. * Sometimes spurious CPU pipe underruns happen when the
  4647. * pipe is already disabled, but FDI RX/TX is still enabled.
  4648. * Happens at least with VGA+HDMI cloning. Suppress them.
  4649. */
  4650. if (intel_crtc->config->has_pch_encoder) {
  4651. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
  4652. intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
  4653. }
  4654. intel_encoders_disable(crtc, old_crtc_state, old_state);
  4655. drm_crtc_vblank_off(crtc);
  4656. assert_vblank_disabled(crtc);
  4657. intel_disable_pipe(intel_crtc);
  4658. ironlake_pfit_disable(intel_crtc, false);
  4659. if (intel_crtc->config->has_pch_encoder)
  4660. ironlake_fdi_disable(crtc);
  4661. intel_encoders_post_disable(crtc, old_crtc_state, old_state);
  4662. if (intel_crtc->config->has_pch_encoder) {
  4663. ironlake_disable_pch_transcoder(dev_priv, pipe);
  4664. if (HAS_PCH_CPT(dev_priv)) {
  4665. i915_reg_t reg;
  4666. u32 temp;
  4667. /* disable TRANS_DP_CTL */
  4668. reg = TRANS_DP_CTL(pipe);
  4669. temp = I915_READ(reg);
  4670. temp &= ~(TRANS_DP_OUTPUT_ENABLE |
  4671. TRANS_DP_PORT_SEL_MASK);
  4672. temp |= TRANS_DP_PORT_SEL_NONE;
  4673. I915_WRITE(reg, temp);
  4674. /* disable DPLL_SEL */
  4675. temp = I915_READ(PCH_DPLL_SEL);
  4676. temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
  4677. I915_WRITE(PCH_DPLL_SEL, temp);
  4678. }
  4679. ironlake_fdi_pll_disable(intel_crtc);
  4680. }
  4681. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4682. intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
  4683. }
  4684. static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
  4685. struct drm_atomic_state *old_state)
  4686. {
  4687. struct drm_crtc *crtc = old_crtc_state->base.crtc;
  4688. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  4689. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4690. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  4691. if (intel_crtc->config->has_pch_encoder)
  4692. intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
  4693. false);
  4694. intel_encoders_disable(crtc, old_crtc_state, old_state);
  4695. drm_crtc_vblank_off(crtc);
  4696. assert_vblank_disabled(crtc);
  4697. /* XXX: Do the pipe assertions at the right place for BXT DSI. */
  4698. if (!transcoder_is_dsi(cpu_transcoder))
  4699. intel_disable_pipe(intel_crtc);
  4700. if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
  4701. intel_ddi_set_vc_payload_alloc(intel_crtc->config, false);
  4702. if (!transcoder_is_dsi(cpu_transcoder))
  4703. intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
  4704. if (INTEL_GEN(dev_priv) >= 9)
  4705. skylake_scaler_disable(intel_crtc);
  4706. else
  4707. ironlake_pfit_disable(intel_crtc, false);
  4708. if (!transcoder_is_dsi(cpu_transcoder))
  4709. intel_ddi_disable_pipe_clock(intel_crtc->config);
  4710. intel_encoders_post_disable(crtc, old_crtc_state, old_state);
  4711. if (old_crtc_state->has_pch_encoder)
  4712. intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
  4713. true);
  4714. }
  4715. static void i9xx_pfit_enable(struct intel_crtc *crtc)
  4716. {
  4717. struct drm_device *dev = crtc->base.dev;
  4718. struct drm_i915_private *dev_priv = to_i915(dev);
  4719. struct intel_crtc_state *pipe_config = crtc->config;
  4720. if (!pipe_config->gmch_pfit.control)
  4721. return;
  4722. /*
  4723. * The panel fitter should only be adjusted whilst the pipe is disabled,
  4724. * according to register description and PRM.
  4725. */
  4726. WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
  4727. assert_pipe_disabled(dev_priv, crtc->pipe);
  4728. I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
  4729. I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
  4730. /* Border color in case we don't scale up to the full screen. Black by
  4731. * default, change to something else for debugging. */
  4732. I915_WRITE(BCLRPAT(crtc->pipe), 0);
  4733. }
  4734. enum intel_display_power_domain intel_port_to_power_domain(enum port port)
  4735. {
  4736. switch (port) {
  4737. case PORT_A:
  4738. return POWER_DOMAIN_PORT_DDI_A_LANES;
  4739. case PORT_B:
  4740. return POWER_DOMAIN_PORT_DDI_B_LANES;
  4741. case PORT_C:
  4742. return POWER_DOMAIN_PORT_DDI_C_LANES;
  4743. case PORT_D:
  4744. return POWER_DOMAIN_PORT_DDI_D_LANES;
  4745. case PORT_E:
  4746. return POWER_DOMAIN_PORT_DDI_E_LANES;
  4747. default:
  4748. MISSING_CASE(port);
  4749. return POWER_DOMAIN_PORT_OTHER;
  4750. }
  4751. }
  4752. static u64 get_crtc_power_domains(struct drm_crtc *crtc,
  4753. struct intel_crtc_state *crtc_state)
  4754. {
  4755. struct drm_device *dev = crtc->dev;
  4756. struct drm_i915_private *dev_priv = to_i915(dev);
  4757. struct drm_encoder *encoder;
  4758. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4759. enum pipe pipe = intel_crtc->pipe;
  4760. u64 mask;
  4761. enum transcoder transcoder = crtc_state->cpu_transcoder;
  4762. if (!crtc_state->base.active)
  4763. return 0;
  4764. mask = BIT(POWER_DOMAIN_PIPE(pipe));
  4765. mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
  4766. if (crtc_state->pch_pfit.enabled ||
  4767. crtc_state->pch_pfit.force_thru)
  4768. mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
  4769. drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
  4770. struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
  4771. mask |= BIT_ULL(intel_encoder->power_domain);
  4772. }
  4773. if (HAS_DDI(dev_priv) && crtc_state->has_audio)
  4774. mask |= BIT(POWER_DOMAIN_AUDIO);
  4775. if (crtc_state->shared_dpll)
  4776. mask |= BIT_ULL(POWER_DOMAIN_PLLS);
  4777. return mask;
  4778. }
  4779. static u64
  4780. modeset_get_crtc_power_domains(struct drm_crtc *crtc,
  4781. struct intel_crtc_state *crtc_state)
  4782. {
  4783. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  4784. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4785. enum intel_display_power_domain domain;
  4786. u64 domains, new_domains, old_domains;
  4787. old_domains = intel_crtc->enabled_power_domains;
  4788. intel_crtc->enabled_power_domains = new_domains =
  4789. get_crtc_power_domains(crtc, crtc_state);
  4790. domains = new_domains & ~old_domains;
  4791. for_each_power_domain(domain, domains)
  4792. intel_display_power_get(dev_priv, domain);
  4793. return old_domains & ~new_domains;
  4794. }
  4795. static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
  4796. u64 domains)
  4797. {
  4798. enum intel_display_power_domain domain;
  4799. for_each_power_domain(domain, domains)
  4800. intel_display_power_put(dev_priv, domain);
  4801. }
  4802. static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
  4803. struct drm_atomic_state *old_state)
  4804. {
  4805. struct intel_atomic_state *old_intel_state =
  4806. to_intel_atomic_state(old_state);
  4807. struct drm_crtc *crtc = pipe_config->base.crtc;
  4808. struct drm_device *dev = crtc->dev;
  4809. struct drm_i915_private *dev_priv = to_i915(dev);
  4810. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4811. int pipe = intel_crtc->pipe;
  4812. if (WARN_ON(intel_crtc->active))
  4813. return;
  4814. if (intel_crtc_has_dp_encoder(intel_crtc->config))
  4815. intel_dp_set_m_n(intel_crtc, M1_N1);
  4816. intel_set_pipe_timings(intel_crtc);
  4817. intel_set_pipe_src_size(intel_crtc);
  4818. if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
  4819. struct drm_i915_private *dev_priv = to_i915(dev);
  4820. I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
  4821. I915_WRITE(CHV_CANVAS(pipe), 0);
  4822. }
  4823. i9xx_set_pipeconf(intel_crtc);
  4824. intel_crtc->active = true;
  4825. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4826. intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
  4827. if (IS_CHERRYVIEW(dev_priv)) {
  4828. chv_prepare_pll(intel_crtc, intel_crtc->config);
  4829. chv_enable_pll(intel_crtc, intel_crtc->config);
  4830. } else {
  4831. vlv_prepare_pll(intel_crtc, intel_crtc->config);
  4832. vlv_enable_pll(intel_crtc, intel_crtc->config);
  4833. }
  4834. intel_encoders_pre_enable(crtc, pipe_config, old_state);
  4835. i9xx_pfit_enable(intel_crtc);
  4836. intel_color_load_luts(&pipe_config->base);
  4837. dev_priv->display.initial_watermarks(old_intel_state,
  4838. pipe_config);
  4839. intel_enable_pipe(intel_crtc);
  4840. assert_vblank_disabled(crtc);
  4841. drm_crtc_vblank_on(crtc);
  4842. intel_encoders_enable(crtc, pipe_config, old_state);
  4843. }
  4844. static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
  4845. {
  4846. struct drm_device *dev = crtc->base.dev;
  4847. struct drm_i915_private *dev_priv = to_i915(dev);
  4848. I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
  4849. I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
  4850. }
  4851. static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
  4852. struct drm_atomic_state *old_state)
  4853. {
  4854. struct drm_crtc *crtc = pipe_config->base.crtc;
  4855. struct drm_device *dev = crtc->dev;
  4856. struct drm_i915_private *dev_priv = to_i915(dev);
  4857. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4858. enum pipe pipe = intel_crtc->pipe;
  4859. if (WARN_ON(intel_crtc->active))
  4860. return;
  4861. i9xx_set_pll_dividers(intel_crtc);
  4862. if (intel_crtc_has_dp_encoder(intel_crtc->config))
  4863. intel_dp_set_m_n(intel_crtc, M1_N1);
  4864. intel_set_pipe_timings(intel_crtc);
  4865. intel_set_pipe_src_size(intel_crtc);
  4866. i9xx_set_pipeconf(intel_crtc);
  4867. intel_crtc->active = true;
  4868. if (!IS_GEN2(dev_priv))
  4869. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
  4870. intel_encoders_pre_enable(crtc, pipe_config, old_state);
  4871. i9xx_enable_pll(intel_crtc);
  4872. i9xx_pfit_enable(intel_crtc);
  4873. intel_color_load_luts(&pipe_config->base);
  4874. intel_update_watermarks(intel_crtc);
  4875. intel_enable_pipe(intel_crtc);
  4876. assert_vblank_disabled(crtc);
  4877. drm_crtc_vblank_on(crtc);
  4878. intel_encoders_enable(crtc, pipe_config, old_state);
  4879. }
  4880. static void i9xx_pfit_disable(struct intel_crtc *crtc)
  4881. {
  4882. struct drm_device *dev = crtc->base.dev;
  4883. struct drm_i915_private *dev_priv = to_i915(dev);
  4884. if (!crtc->config->gmch_pfit.control)
  4885. return;
  4886. assert_pipe_disabled(dev_priv, crtc->pipe);
  4887. DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
  4888. I915_READ(PFIT_CONTROL));
  4889. I915_WRITE(PFIT_CONTROL, 0);
  4890. }
  4891. static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
  4892. struct drm_atomic_state *old_state)
  4893. {
  4894. struct drm_crtc *crtc = old_crtc_state->base.crtc;
  4895. struct drm_device *dev = crtc->dev;
  4896. struct drm_i915_private *dev_priv = to_i915(dev);
  4897. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4898. int pipe = intel_crtc->pipe;
  4899. /*
  4900. * On gen2 planes are double buffered but the pipe isn't, so we must
  4901. * wait for planes to fully turn off before disabling the pipe.
  4902. */
  4903. if (IS_GEN2(dev_priv))
  4904. intel_wait_for_vblank(dev_priv, pipe);
  4905. intel_encoders_disable(crtc, old_crtc_state, old_state);
  4906. drm_crtc_vblank_off(crtc);
  4907. assert_vblank_disabled(crtc);
  4908. intel_disable_pipe(intel_crtc);
  4909. i9xx_pfit_disable(intel_crtc);
  4910. intel_encoders_post_disable(crtc, old_crtc_state, old_state);
  4911. if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
  4912. if (IS_CHERRYVIEW(dev_priv))
  4913. chv_disable_pll(dev_priv, pipe);
  4914. else if (IS_VALLEYVIEW(dev_priv))
  4915. vlv_disable_pll(dev_priv, pipe);
  4916. else
  4917. i9xx_disable_pll(intel_crtc);
  4918. }
  4919. intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
  4920. if (!IS_GEN2(dev_priv))
  4921. intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
  4922. if (!dev_priv->display.initial_watermarks)
  4923. intel_update_watermarks(intel_crtc);
  4924. }
  4925. static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
  4926. {
  4927. struct intel_encoder *encoder;
  4928. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  4929. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  4930. enum intel_display_power_domain domain;
  4931. u64 domains;
  4932. struct drm_atomic_state *state;
  4933. struct intel_crtc_state *crtc_state;
  4934. int ret;
  4935. if (!intel_crtc->active)
  4936. return;
  4937. if (crtc->primary->state->visible) {
  4938. WARN_ON(intel_crtc->flip_work);
  4939. intel_pre_disable_primary_noatomic(crtc);
  4940. intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
  4941. crtc->primary->state->visible = false;
  4942. }
  4943. state = drm_atomic_state_alloc(crtc->dev);
  4944. if (!state) {
  4945. DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
  4946. crtc->base.id, crtc->name);
  4947. return;
  4948. }
  4949. state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
  4950. /* Everything's already locked, -EDEADLK can't happen. */
  4951. crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
  4952. ret = drm_atomic_add_affected_connectors(state, crtc);
  4953. WARN_ON(IS_ERR(crtc_state) || ret);
  4954. dev_priv->display.crtc_disable(crtc_state, state);
  4955. drm_atomic_state_put(state);
  4956. DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
  4957. crtc->base.id, crtc->name);
  4958. WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
  4959. crtc->state->active = false;
  4960. intel_crtc->active = false;
  4961. crtc->enabled = false;
  4962. crtc->state->connector_mask = 0;
  4963. crtc->state->encoder_mask = 0;
  4964. for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
  4965. encoder->base.crtc = NULL;
  4966. intel_fbc_disable(intel_crtc);
  4967. intel_update_watermarks(intel_crtc);
  4968. intel_disable_shared_dpll(intel_crtc);
  4969. domains = intel_crtc->enabled_power_domains;
  4970. for_each_power_domain(domain, domains)
  4971. intel_display_power_put(dev_priv, domain);
  4972. intel_crtc->enabled_power_domains = 0;
  4973. dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
  4974. dev_priv->min_pixclk[intel_crtc->pipe] = 0;
  4975. }
  4976. /*
  4977. * turn all crtc's off, but do not adjust state
  4978. * This has to be paired with a call to intel_modeset_setup_hw_state.
  4979. */
  4980. int intel_display_suspend(struct drm_device *dev)
  4981. {
  4982. struct drm_i915_private *dev_priv = to_i915(dev);
  4983. struct drm_atomic_state *state;
  4984. int ret;
  4985. state = drm_atomic_helper_suspend(dev);
  4986. ret = PTR_ERR_OR_ZERO(state);
  4987. if (ret)
  4988. DRM_ERROR("Suspending crtc's failed with %i\n", ret);
  4989. else
  4990. dev_priv->modeset_restore_state = state;
  4991. return ret;
  4992. }
  4993. void intel_encoder_destroy(struct drm_encoder *encoder)
  4994. {
  4995. struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
  4996. drm_encoder_cleanup(encoder);
  4997. kfree(intel_encoder);
  4998. }
  4999. /* Cross check the actual hw state with our own modeset state tracking (and it's
  5000. * internal consistency). */
  5001. static void intel_connector_verify_state(struct intel_connector *connector)
  5002. {
  5003. struct drm_crtc *crtc = connector->base.state->crtc;
  5004. DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
  5005. connector->base.base.id,
  5006. connector->base.name);
  5007. if (connector->get_hw_state(connector)) {
  5008. struct intel_encoder *encoder = connector->encoder;
  5009. struct drm_connector_state *conn_state = connector->base.state;
  5010. I915_STATE_WARN(!crtc,
  5011. "connector enabled without attached crtc\n");
  5012. if (!crtc)
  5013. return;
  5014. I915_STATE_WARN(!crtc->state->active,
  5015. "connector is active, but attached crtc isn't\n");
  5016. if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
  5017. return;
  5018. I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
  5019. "atomic encoder doesn't match attached encoder\n");
  5020. I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
  5021. "attached encoder crtc differs from connector crtc\n");
  5022. } else {
  5023. I915_STATE_WARN(crtc && crtc->state->active,
  5024. "attached crtc is active, but connector isn't\n");
  5025. I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
  5026. "best encoder set without crtc!\n");
  5027. }
  5028. }
  5029. int intel_connector_init(struct intel_connector *connector)
  5030. {
  5031. drm_atomic_helper_connector_reset(&connector->base);
  5032. if (!connector->base.state)
  5033. return -ENOMEM;
  5034. return 0;
  5035. }
  5036. struct intel_connector *intel_connector_alloc(void)
  5037. {
  5038. struct intel_connector *connector;
  5039. connector = kzalloc(sizeof *connector, GFP_KERNEL);
  5040. if (!connector)
  5041. return NULL;
  5042. if (intel_connector_init(connector) < 0) {
  5043. kfree(connector);
  5044. return NULL;
  5045. }
  5046. return connector;
  5047. }
  5048. /* Simple connector->get_hw_state implementation for encoders that support only
  5049. * one connector and no cloning and hence the encoder state determines the state
  5050. * of the connector. */
  5051. bool intel_connector_get_hw_state(struct intel_connector *connector)
  5052. {
  5053. enum pipe pipe = 0;
  5054. struct intel_encoder *encoder = connector->encoder;
  5055. return encoder->get_hw_state(encoder, &pipe);
  5056. }
  5057. static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
  5058. {
  5059. if (crtc_state->base.enable && crtc_state->has_pch_encoder)
  5060. return crtc_state->fdi_lanes;
  5061. return 0;
  5062. }
  5063. static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
  5064. struct intel_crtc_state *pipe_config)
  5065. {
  5066. struct drm_i915_private *dev_priv = to_i915(dev);
  5067. struct drm_atomic_state *state = pipe_config->base.state;
  5068. struct intel_crtc *other_crtc;
  5069. struct intel_crtc_state *other_crtc_state;
  5070. DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
  5071. pipe_name(pipe), pipe_config->fdi_lanes);
  5072. if (pipe_config->fdi_lanes > 4) {
  5073. DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
  5074. pipe_name(pipe), pipe_config->fdi_lanes);
  5075. return -EINVAL;
  5076. }
  5077. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  5078. if (pipe_config->fdi_lanes > 2) {
  5079. DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
  5080. pipe_config->fdi_lanes);
  5081. return -EINVAL;
  5082. } else {
  5083. return 0;
  5084. }
  5085. }
  5086. if (INTEL_INFO(dev_priv)->num_pipes == 2)
  5087. return 0;
  5088. /* Ivybridge 3 pipe is really complicated */
  5089. switch (pipe) {
  5090. case PIPE_A:
  5091. return 0;
  5092. case PIPE_B:
  5093. if (pipe_config->fdi_lanes <= 2)
  5094. return 0;
  5095. other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
  5096. other_crtc_state =
  5097. intel_atomic_get_crtc_state(state, other_crtc);
  5098. if (IS_ERR(other_crtc_state))
  5099. return PTR_ERR(other_crtc_state);
  5100. if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
  5101. DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
  5102. pipe_name(pipe), pipe_config->fdi_lanes);
  5103. return -EINVAL;
  5104. }
  5105. return 0;
  5106. case PIPE_C:
  5107. if (pipe_config->fdi_lanes > 2) {
  5108. DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
  5109. pipe_name(pipe), pipe_config->fdi_lanes);
  5110. return -EINVAL;
  5111. }
  5112. other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
  5113. other_crtc_state =
  5114. intel_atomic_get_crtc_state(state, other_crtc);
  5115. if (IS_ERR(other_crtc_state))
  5116. return PTR_ERR(other_crtc_state);
  5117. if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
  5118. DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
  5119. return -EINVAL;
  5120. }
  5121. return 0;
  5122. default:
  5123. BUG();
  5124. }
  5125. }
  5126. #define RETRY 1
  5127. static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
  5128. struct intel_crtc_state *pipe_config)
  5129. {
  5130. struct drm_device *dev = intel_crtc->base.dev;
  5131. const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
  5132. int lane, link_bw, fdi_dotclock, ret;
  5133. bool needs_recompute = false;
  5134. retry:
  5135. /* FDI is a binary signal running at ~2.7GHz, encoding
  5136. * each output octet as 10 bits. The actual frequency
  5137. * is stored as a divider into a 100MHz clock, and the
  5138. * mode pixel clock is stored in units of 1KHz.
  5139. * Hence the bw of each lane in terms of the mode signal
  5140. * is:
  5141. */
  5142. link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
  5143. fdi_dotclock = adjusted_mode->crtc_clock;
  5144. lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
  5145. pipe_config->pipe_bpp);
  5146. pipe_config->fdi_lanes = lane;
  5147. intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
  5148. link_bw, &pipe_config->fdi_m_n);
  5149. ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
  5150. if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
  5151. pipe_config->pipe_bpp -= 2*3;
  5152. DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
  5153. pipe_config->pipe_bpp);
  5154. needs_recompute = true;
  5155. pipe_config->bw_constrained = true;
  5156. goto retry;
  5157. }
  5158. if (needs_recompute)
  5159. return RETRY;
  5160. return ret;
  5161. }
  5162. static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
  5163. struct intel_crtc_state *pipe_config)
  5164. {
  5165. if (pipe_config->pipe_bpp > 24)
  5166. return false;
  5167. /* HSW can handle pixel rate up to cdclk? */
  5168. if (IS_HASWELL(dev_priv))
  5169. return true;
  5170. /*
  5171. * We compare against max which means we must take
  5172. * the increased cdclk requirement into account when
  5173. * calculating the new cdclk.
  5174. *
  5175. * Should measure whether using a lower cdclk w/o IPS
  5176. */
  5177. return pipe_config->pixel_rate <=
  5178. dev_priv->max_cdclk_freq * 95 / 100;
  5179. }
  5180. static void hsw_compute_ips_config(struct intel_crtc *crtc,
  5181. struct intel_crtc_state *pipe_config)
  5182. {
  5183. struct drm_device *dev = crtc->base.dev;
  5184. struct drm_i915_private *dev_priv = to_i915(dev);
  5185. pipe_config->ips_enabled = i915.enable_ips &&
  5186. hsw_crtc_supports_ips(crtc) &&
  5187. pipe_config_supports_ips(dev_priv, pipe_config);
  5188. }
  5189. static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
  5190. {
  5191. const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  5192. /* GDG double wide on either pipe, otherwise pipe A only */
  5193. return INTEL_INFO(dev_priv)->gen < 4 &&
  5194. (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
  5195. }
  5196. static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
  5197. {
  5198. uint32_t pixel_rate;
  5199. pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
  5200. /*
  5201. * We only use IF-ID interlacing. If we ever use
  5202. * PF-ID we'll need to adjust the pixel_rate here.
  5203. */
  5204. if (pipe_config->pch_pfit.enabled) {
  5205. uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
  5206. uint32_t pfit_size = pipe_config->pch_pfit.size;
  5207. pipe_w = pipe_config->pipe_src_w;
  5208. pipe_h = pipe_config->pipe_src_h;
  5209. pfit_w = (pfit_size >> 16) & 0xFFFF;
  5210. pfit_h = pfit_size & 0xFFFF;
  5211. if (pipe_w < pfit_w)
  5212. pipe_w = pfit_w;
  5213. if (pipe_h < pfit_h)
  5214. pipe_h = pfit_h;
  5215. if (WARN_ON(!pfit_w || !pfit_h))
  5216. return pixel_rate;
  5217. pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
  5218. pfit_w * pfit_h);
  5219. }
  5220. return pixel_rate;
  5221. }
  5222. static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
  5223. {
  5224. struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
  5225. if (HAS_GMCH_DISPLAY(dev_priv))
  5226. /* FIXME calculate proper pipe pixel rate for GMCH pfit */
  5227. crtc_state->pixel_rate =
  5228. crtc_state->base.adjusted_mode.crtc_clock;
  5229. else
  5230. crtc_state->pixel_rate =
  5231. ilk_pipe_pixel_rate(crtc_state);
  5232. }
  5233. static int intel_crtc_compute_config(struct intel_crtc *crtc,
  5234. struct intel_crtc_state *pipe_config)
  5235. {
  5236. struct drm_device *dev = crtc->base.dev;
  5237. struct drm_i915_private *dev_priv = to_i915(dev);
  5238. const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
  5239. int clock_limit = dev_priv->max_dotclk_freq;
  5240. if (INTEL_GEN(dev_priv) < 4) {
  5241. clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
  5242. /*
  5243. * Enable double wide mode when the dot clock
  5244. * is > 90% of the (display) core speed.
  5245. */
  5246. if (intel_crtc_supports_double_wide(crtc) &&
  5247. adjusted_mode->crtc_clock > clock_limit) {
  5248. clock_limit = dev_priv->max_dotclk_freq;
  5249. pipe_config->double_wide = true;
  5250. }
  5251. }
  5252. if (adjusted_mode->crtc_clock > clock_limit) {
  5253. DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
  5254. adjusted_mode->crtc_clock, clock_limit,
  5255. yesno(pipe_config->double_wide));
  5256. return -EINVAL;
  5257. }
  5258. /*
  5259. * Pipe horizontal size must be even in:
  5260. * - DVO ganged mode
  5261. * - LVDS dual channel mode
  5262. * - Double wide pipe
  5263. */
  5264. if ((intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
  5265. intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
  5266. pipe_config->pipe_src_w &= ~1;
  5267. /* Cantiga+ cannot handle modes with a hsync front porch of 0.
  5268. * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
  5269. */
  5270. if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
  5271. adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
  5272. return -EINVAL;
  5273. intel_crtc_compute_pixel_rate(pipe_config);
  5274. if (HAS_IPS(dev_priv))
  5275. hsw_compute_ips_config(crtc, pipe_config);
  5276. if (pipe_config->has_pch_encoder)
  5277. return ironlake_fdi_compute_config(crtc, pipe_config);
  5278. return 0;
  5279. }
  5280. static void
  5281. intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
  5282. {
  5283. while (*num > DATA_LINK_M_N_MASK ||
  5284. *den > DATA_LINK_M_N_MASK) {
  5285. *num >>= 1;
  5286. *den >>= 1;
  5287. }
  5288. }
  5289. static void compute_m_n(unsigned int m, unsigned int n,
  5290. uint32_t *ret_m, uint32_t *ret_n)
  5291. {
  5292. *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
  5293. *ret_m = div_u64((uint64_t) m * *ret_n, n);
  5294. intel_reduce_m_n_ratio(ret_m, ret_n);
  5295. }
  5296. void
  5297. intel_link_compute_m_n(int bits_per_pixel, int nlanes,
  5298. int pixel_clock, int link_clock,
  5299. struct intel_link_m_n *m_n)
  5300. {
  5301. m_n->tu = 64;
  5302. compute_m_n(bits_per_pixel * pixel_clock,
  5303. link_clock * nlanes * 8,
  5304. &m_n->gmch_m, &m_n->gmch_n);
  5305. compute_m_n(pixel_clock, link_clock,
  5306. &m_n->link_m, &m_n->link_n);
  5307. }
  5308. static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
  5309. {
  5310. if (i915.panel_use_ssc >= 0)
  5311. return i915.panel_use_ssc != 0;
  5312. return dev_priv->vbt.lvds_use_ssc
  5313. && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
  5314. }
  5315. static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
  5316. {
  5317. return (1 << dpll->n) << 16 | dpll->m2;
  5318. }
  5319. static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
  5320. {
  5321. return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
  5322. }
  5323. static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
  5324. struct intel_crtc_state *crtc_state,
  5325. struct dpll *reduced_clock)
  5326. {
  5327. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  5328. u32 fp, fp2 = 0;
  5329. if (IS_PINEVIEW(dev_priv)) {
  5330. fp = pnv_dpll_compute_fp(&crtc_state->dpll);
  5331. if (reduced_clock)
  5332. fp2 = pnv_dpll_compute_fp(reduced_clock);
  5333. } else {
  5334. fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
  5335. if (reduced_clock)
  5336. fp2 = i9xx_dpll_compute_fp(reduced_clock);
  5337. }
  5338. crtc_state->dpll_hw_state.fp0 = fp;
  5339. crtc->lowfreq_avail = false;
  5340. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
  5341. reduced_clock) {
  5342. crtc_state->dpll_hw_state.fp1 = fp2;
  5343. crtc->lowfreq_avail = true;
  5344. } else {
  5345. crtc_state->dpll_hw_state.fp1 = fp;
  5346. }
  5347. }
  5348. static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
  5349. pipe)
  5350. {
  5351. u32 reg_val;
  5352. /*
  5353. * PLLB opamp always calibrates to max value of 0x3f, force enable it
  5354. * and set it to a reasonable value instead.
  5355. */
  5356. reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
  5357. reg_val &= 0xffffff00;
  5358. reg_val |= 0x00000030;
  5359. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
  5360. reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
  5361. reg_val &= 0x8cffffff;
  5362. reg_val = 0x8c000000;
  5363. vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
  5364. reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
  5365. reg_val &= 0xffffff00;
  5366. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
  5367. reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
  5368. reg_val &= 0x00ffffff;
  5369. reg_val |= 0xb0000000;
  5370. vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
  5371. }
  5372. static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
  5373. struct intel_link_m_n *m_n)
  5374. {
  5375. struct drm_device *dev = crtc->base.dev;
  5376. struct drm_i915_private *dev_priv = to_i915(dev);
  5377. int pipe = crtc->pipe;
  5378. I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
  5379. I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
  5380. I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
  5381. I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
  5382. }
  5383. static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
  5384. struct intel_link_m_n *m_n,
  5385. struct intel_link_m_n *m2_n2)
  5386. {
  5387. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  5388. int pipe = crtc->pipe;
  5389. enum transcoder transcoder = crtc->config->cpu_transcoder;
  5390. if (INTEL_GEN(dev_priv) >= 5) {
  5391. I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
  5392. I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
  5393. I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
  5394. I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
  5395. /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
  5396. * for gen < 8) and if DRRS is supported (to make sure the
  5397. * registers are not unnecessarily accessed).
  5398. */
  5399. if (m2_n2 && (IS_CHERRYVIEW(dev_priv) ||
  5400. INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) {
  5401. I915_WRITE(PIPE_DATA_M2(transcoder),
  5402. TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
  5403. I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
  5404. I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
  5405. I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
  5406. }
  5407. } else {
  5408. I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
  5409. I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
  5410. I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
  5411. I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
  5412. }
  5413. }
  5414. void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
  5415. {
  5416. struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
  5417. if (m_n == M1_N1) {
  5418. dp_m_n = &crtc->config->dp_m_n;
  5419. dp_m2_n2 = &crtc->config->dp_m2_n2;
  5420. } else if (m_n == M2_N2) {
  5421. /*
  5422. * M2_N2 registers are not supported. Hence m2_n2 divider value
  5423. * needs to be programmed into M1_N1.
  5424. */
  5425. dp_m_n = &crtc->config->dp_m2_n2;
  5426. } else {
  5427. DRM_ERROR("Unsupported divider value\n");
  5428. return;
  5429. }
  5430. if (crtc->config->has_pch_encoder)
  5431. intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
  5432. else
  5433. intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
  5434. }
  5435. static void vlv_compute_dpll(struct intel_crtc *crtc,
  5436. struct intel_crtc_state *pipe_config)
  5437. {
  5438. pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
  5439. DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
  5440. if (crtc->pipe != PIPE_A)
  5441. pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
  5442. /* DPLL not used with DSI, but still need the rest set up */
  5443. if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
  5444. pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
  5445. DPLL_EXT_BUFFER_ENABLE_VLV;
  5446. pipe_config->dpll_hw_state.dpll_md =
  5447. (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
  5448. }
  5449. static void chv_compute_dpll(struct intel_crtc *crtc,
  5450. struct intel_crtc_state *pipe_config)
  5451. {
  5452. pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
  5453. DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
  5454. if (crtc->pipe != PIPE_A)
  5455. pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
  5456. /* DPLL not used with DSI, but still need the rest set up */
  5457. if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
  5458. pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
  5459. pipe_config->dpll_hw_state.dpll_md =
  5460. (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
  5461. }
  5462. static void vlv_prepare_pll(struct intel_crtc *crtc,
  5463. const struct intel_crtc_state *pipe_config)
  5464. {
  5465. struct drm_device *dev = crtc->base.dev;
  5466. struct drm_i915_private *dev_priv = to_i915(dev);
  5467. enum pipe pipe = crtc->pipe;
  5468. u32 mdiv;
  5469. u32 bestn, bestm1, bestm2, bestp1, bestp2;
  5470. u32 coreclk, reg_val;
  5471. /* Enable Refclk */
  5472. I915_WRITE(DPLL(pipe),
  5473. pipe_config->dpll_hw_state.dpll &
  5474. ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
  5475. /* No need to actually set up the DPLL with DSI */
  5476. if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
  5477. return;
  5478. mutex_lock(&dev_priv->sb_lock);
  5479. bestn = pipe_config->dpll.n;
  5480. bestm1 = pipe_config->dpll.m1;
  5481. bestm2 = pipe_config->dpll.m2;
  5482. bestp1 = pipe_config->dpll.p1;
  5483. bestp2 = pipe_config->dpll.p2;
  5484. /* See eDP HDMI DPIO driver vbios notes doc */
  5485. /* PLL B needs special handling */
  5486. if (pipe == PIPE_B)
  5487. vlv_pllb_recal_opamp(dev_priv, pipe);
  5488. /* Set up Tx target for periodic Rcomp update */
  5489. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
  5490. /* Disable target IRef on PLL */
  5491. reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
  5492. reg_val &= 0x00ffffff;
  5493. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
  5494. /* Disable fast lock */
  5495. vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
  5496. /* Set idtafcrecal before PLL is enabled */
  5497. mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
  5498. mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
  5499. mdiv |= ((bestn << DPIO_N_SHIFT));
  5500. mdiv |= (1 << DPIO_K_SHIFT);
  5501. /*
  5502. * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
  5503. * but we don't support that).
  5504. * Note: don't use the DAC post divider as it seems unstable.
  5505. */
  5506. mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
  5507. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
  5508. mdiv |= DPIO_ENABLE_CALIBRATION;
  5509. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
  5510. /* Set HBR and RBR LPF coefficients */
  5511. if (pipe_config->port_clock == 162000 ||
  5512. intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
  5513. intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
  5514. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
  5515. 0x009f0003);
  5516. else
  5517. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
  5518. 0x00d0000f);
  5519. if (intel_crtc_has_dp_encoder(pipe_config)) {
  5520. /* Use SSC source */
  5521. if (pipe == PIPE_A)
  5522. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
  5523. 0x0df40000);
  5524. else
  5525. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
  5526. 0x0df70000);
  5527. } else { /* HDMI or VGA */
  5528. /* Use bend source */
  5529. if (pipe == PIPE_A)
  5530. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
  5531. 0x0df70000);
  5532. else
  5533. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
  5534. 0x0df40000);
  5535. }
  5536. coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
  5537. coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
  5538. if (intel_crtc_has_dp_encoder(crtc->config))
  5539. coreclk |= 0x01000000;
  5540. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
  5541. vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
  5542. mutex_unlock(&dev_priv->sb_lock);
  5543. }
  5544. static void chv_prepare_pll(struct intel_crtc *crtc,
  5545. const struct intel_crtc_state *pipe_config)
  5546. {
  5547. struct drm_device *dev = crtc->base.dev;
  5548. struct drm_i915_private *dev_priv = to_i915(dev);
  5549. enum pipe pipe = crtc->pipe;
  5550. enum dpio_channel port = vlv_pipe_to_channel(pipe);
  5551. u32 loopfilter, tribuf_calcntr;
  5552. u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
  5553. u32 dpio_val;
  5554. int vco;
  5555. /* Enable Refclk and SSC */
  5556. I915_WRITE(DPLL(pipe),
  5557. pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
  5558. /* No need to actually set up the DPLL with DSI */
  5559. if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
  5560. return;
  5561. bestn = pipe_config->dpll.n;
  5562. bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
  5563. bestm1 = pipe_config->dpll.m1;
  5564. bestm2 = pipe_config->dpll.m2 >> 22;
  5565. bestp1 = pipe_config->dpll.p1;
  5566. bestp2 = pipe_config->dpll.p2;
  5567. vco = pipe_config->dpll.vco;
  5568. dpio_val = 0;
  5569. loopfilter = 0;
  5570. mutex_lock(&dev_priv->sb_lock);
  5571. /* p1 and p2 divider */
  5572. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
  5573. 5 << DPIO_CHV_S1_DIV_SHIFT |
  5574. bestp1 << DPIO_CHV_P1_DIV_SHIFT |
  5575. bestp2 << DPIO_CHV_P2_DIV_SHIFT |
  5576. 1 << DPIO_CHV_K_DIV_SHIFT);
  5577. /* Feedback post-divider - m2 */
  5578. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
  5579. /* Feedback refclk divider - n and m1 */
  5580. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
  5581. DPIO_CHV_M1_DIV_BY_2 |
  5582. 1 << DPIO_CHV_N_DIV_SHIFT);
  5583. /* M2 fraction division */
  5584. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
  5585. /* M2 fraction division enable */
  5586. dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
  5587. dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
  5588. dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
  5589. if (bestm2_frac)
  5590. dpio_val |= DPIO_CHV_FRAC_DIV_EN;
  5591. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
  5592. /* Program digital lock detect threshold */
  5593. dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
  5594. dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
  5595. DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
  5596. dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
  5597. if (!bestm2_frac)
  5598. dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
  5599. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
  5600. /* Loop filter */
  5601. if (vco == 5400000) {
  5602. loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
  5603. loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
  5604. loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
  5605. tribuf_calcntr = 0x9;
  5606. } else if (vco <= 6200000) {
  5607. loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
  5608. loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
  5609. loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
  5610. tribuf_calcntr = 0x9;
  5611. } else if (vco <= 6480000) {
  5612. loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
  5613. loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
  5614. loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
  5615. tribuf_calcntr = 0x8;
  5616. } else {
  5617. /* Not supported. Apply the same limits as in the max case */
  5618. loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
  5619. loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
  5620. loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
  5621. tribuf_calcntr = 0;
  5622. }
  5623. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
  5624. dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
  5625. dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
  5626. dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
  5627. vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
  5628. /* AFC Recal */
  5629. vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
  5630. vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
  5631. DPIO_AFC_RECAL);
  5632. mutex_unlock(&dev_priv->sb_lock);
  5633. }
  5634. /**
  5635. * vlv_force_pll_on - forcibly enable just the PLL
  5636. * @dev_priv: i915 private structure
  5637. * @pipe: pipe PLL to enable
  5638. * @dpll: PLL configuration
  5639. *
  5640. * Enable the PLL for @pipe using the supplied @dpll config. To be used
  5641. * in cases where we need the PLL enabled even when @pipe is not going to
  5642. * be enabled.
  5643. */
  5644. int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
  5645. const struct dpll *dpll)
  5646. {
  5647. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  5648. struct intel_crtc_state *pipe_config;
  5649. pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
  5650. if (!pipe_config)
  5651. return -ENOMEM;
  5652. pipe_config->base.crtc = &crtc->base;
  5653. pipe_config->pixel_multiplier = 1;
  5654. pipe_config->dpll = *dpll;
  5655. if (IS_CHERRYVIEW(dev_priv)) {
  5656. chv_compute_dpll(crtc, pipe_config);
  5657. chv_prepare_pll(crtc, pipe_config);
  5658. chv_enable_pll(crtc, pipe_config);
  5659. } else {
  5660. vlv_compute_dpll(crtc, pipe_config);
  5661. vlv_prepare_pll(crtc, pipe_config);
  5662. vlv_enable_pll(crtc, pipe_config);
  5663. }
  5664. kfree(pipe_config);
  5665. return 0;
  5666. }
  5667. /**
  5668. * vlv_force_pll_off - forcibly disable just the PLL
  5669. * @dev_priv: i915 private structure
  5670. * @pipe: pipe PLL to disable
  5671. *
  5672. * Disable the PLL for @pipe. To be used in cases where we need
  5673. * the PLL enabled even when @pipe is not going to be enabled.
  5674. */
  5675. void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
  5676. {
  5677. if (IS_CHERRYVIEW(dev_priv))
  5678. chv_disable_pll(dev_priv, pipe);
  5679. else
  5680. vlv_disable_pll(dev_priv, pipe);
  5681. }
  5682. static void i9xx_compute_dpll(struct intel_crtc *crtc,
  5683. struct intel_crtc_state *crtc_state,
  5684. struct dpll *reduced_clock)
  5685. {
  5686. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  5687. u32 dpll;
  5688. struct dpll *clock = &crtc_state->dpll;
  5689. i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
  5690. dpll = DPLL_VGA_MODE_DIS;
  5691. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
  5692. dpll |= DPLLB_MODE_LVDS;
  5693. else
  5694. dpll |= DPLLB_MODE_DAC_SERIAL;
  5695. if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
  5696. IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
  5697. dpll |= (crtc_state->pixel_multiplier - 1)
  5698. << SDVO_MULTIPLIER_SHIFT_HIRES;
  5699. }
  5700. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
  5701. intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
  5702. dpll |= DPLL_SDVO_HIGH_SPEED;
  5703. if (intel_crtc_has_dp_encoder(crtc_state))
  5704. dpll |= DPLL_SDVO_HIGH_SPEED;
  5705. /* compute bitmask from p1 value */
  5706. if (IS_PINEVIEW(dev_priv))
  5707. dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
  5708. else {
  5709. dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
  5710. if (IS_G4X(dev_priv) && reduced_clock)
  5711. dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
  5712. }
  5713. switch (clock->p2) {
  5714. case 5:
  5715. dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
  5716. break;
  5717. case 7:
  5718. dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
  5719. break;
  5720. case 10:
  5721. dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
  5722. break;
  5723. case 14:
  5724. dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
  5725. break;
  5726. }
  5727. if (INTEL_GEN(dev_priv) >= 4)
  5728. dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
  5729. if (crtc_state->sdvo_tv_clock)
  5730. dpll |= PLL_REF_INPUT_TVCLKINBC;
  5731. else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
  5732. intel_panel_use_ssc(dev_priv))
  5733. dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
  5734. else
  5735. dpll |= PLL_REF_INPUT_DREFCLK;
  5736. dpll |= DPLL_VCO_ENABLE;
  5737. crtc_state->dpll_hw_state.dpll = dpll;
  5738. if (INTEL_GEN(dev_priv) >= 4) {
  5739. u32 dpll_md = (crtc_state->pixel_multiplier - 1)
  5740. << DPLL_MD_UDI_MULTIPLIER_SHIFT;
  5741. crtc_state->dpll_hw_state.dpll_md = dpll_md;
  5742. }
  5743. }
  5744. static void i8xx_compute_dpll(struct intel_crtc *crtc,
  5745. struct intel_crtc_state *crtc_state,
  5746. struct dpll *reduced_clock)
  5747. {
  5748. struct drm_device *dev = crtc->base.dev;
  5749. struct drm_i915_private *dev_priv = to_i915(dev);
  5750. u32 dpll;
  5751. struct dpll *clock = &crtc_state->dpll;
  5752. i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
  5753. dpll = DPLL_VGA_MODE_DIS;
  5754. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  5755. dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
  5756. } else {
  5757. if (clock->p1 == 2)
  5758. dpll |= PLL_P1_DIVIDE_BY_TWO;
  5759. else
  5760. dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
  5761. if (clock->p2 == 4)
  5762. dpll |= PLL_P2_DIVIDE_BY_4;
  5763. }
  5764. if (!IS_I830(dev_priv) &&
  5765. intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
  5766. dpll |= DPLL_DVO_2X_MODE;
  5767. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
  5768. intel_panel_use_ssc(dev_priv))
  5769. dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
  5770. else
  5771. dpll |= PLL_REF_INPUT_DREFCLK;
  5772. dpll |= DPLL_VCO_ENABLE;
  5773. crtc_state->dpll_hw_state.dpll = dpll;
  5774. }
  5775. static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
  5776. {
  5777. struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
  5778. enum pipe pipe = intel_crtc->pipe;
  5779. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  5780. const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
  5781. uint32_t crtc_vtotal, crtc_vblank_end;
  5782. int vsyncshift = 0;
  5783. /* We need to be careful not to changed the adjusted mode, for otherwise
  5784. * the hw state checker will get angry at the mismatch. */
  5785. crtc_vtotal = adjusted_mode->crtc_vtotal;
  5786. crtc_vblank_end = adjusted_mode->crtc_vblank_end;
  5787. if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
  5788. /* the chip adds 2 halflines automatically */
  5789. crtc_vtotal -= 1;
  5790. crtc_vblank_end -= 1;
  5791. if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
  5792. vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
  5793. else
  5794. vsyncshift = adjusted_mode->crtc_hsync_start -
  5795. adjusted_mode->crtc_htotal / 2;
  5796. if (vsyncshift < 0)
  5797. vsyncshift += adjusted_mode->crtc_htotal;
  5798. }
  5799. if (INTEL_GEN(dev_priv) > 3)
  5800. I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
  5801. I915_WRITE(HTOTAL(cpu_transcoder),
  5802. (adjusted_mode->crtc_hdisplay - 1) |
  5803. ((adjusted_mode->crtc_htotal - 1) << 16));
  5804. I915_WRITE(HBLANK(cpu_transcoder),
  5805. (adjusted_mode->crtc_hblank_start - 1) |
  5806. ((adjusted_mode->crtc_hblank_end - 1) << 16));
  5807. I915_WRITE(HSYNC(cpu_transcoder),
  5808. (adjusted_mode->crtc_hsync_start - 1) |
  5809. ((adjusted_mode->crtc_hsync_end - 1) << 16));
  5810. I915_WRITE(VTOTAL(cpu_transcoder),
  5811. (adjusted_mode->crtc_vdisplay - 1) |
  5812. ((crtc_vtotal - 1) << 16));
  5813. I915_WRITE(VBLANK(cpu_transcoder),
  5814. (adjusted_mode->crtc_vblank_start - 1) |
  5815. ((crtc_vblank_end - 1) << 16));
  5816. I915_WRITE(VSYNC(cpu_transcoder),
  5817. (adjusted_mode->crtc_vsync_start - 1) |
  5818. ((adjusted_mode->crtc_vsync_end - 1) << 16));
  5819. /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
  5820. * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
  5821. * documented on the DDI_FUNC_CTL register description, EDP Input Select
  5822. * bits. */
  5823. if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
  5824. (pipe == PIPE_B || pipe == PIPE_C))
  5825. I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
  5826. }
  5827. static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
  5828. {
  5829. struct drm_device *dev = intel_crtc->base.dev;
  5830. struct drm_i915_private *dev_priv = to_i915(dev);
  5831. enum pipe pipe = intel_crtc->pipe;
  5832. /* pipesrc controls the size that is scaled from, which should
  5833. * always be the user's requested size.
  5834. */
  5835. I915_WRITE(PIPESRC(pipe),
  5836. ((intel_crtc->config->pipe_src_w - 1) << 16) |
  5837. (intel_crtc->config->pipe_src_h - 1));
  5838. }
  5839. static void intel_get_pipe_timings(struct intel_crtc *crtc,
  5840. struct intel_crtc_state *pipe_config)
  5841. {
  5842. struct drm_device *dev = crtc->base.dev;
  5843. struct drm_i915_private *dev_priv = to_i915(dev);
  5844. enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
  5845. uint32_t tmp;
  5846. tmp = I915_READ(HTOTAL(cpu_transcoder));
  5847. pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
  5848. pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
  5849. tmp = I915_READ(HBLANK(cpu_transcoder));
  5850. pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
  5851. pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
  5852. tmp = I915_READ(HSYNC(cpu_transcoder));
  5853. pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
  5854. pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
  5855. tmp = I915_READ(VTOTAL(cpu_transcoder));
  5856. pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
  5857. pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
  5858. tmp = I915_READ(VBLANK(cpu_transcoder));
  5859. pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
  5860. pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
  5861. tmp = I915_READ(VSYNC(cpu_transcoder));
  5862. pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
  5863. pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
  5864. if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
  5865. pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
  5866. pipe_config->base.adjusted_mode.crtc_vtotal += 1;
  5867. pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
  5868. }
  5869. }
  5870. static void intel_get_pipe_src_size(struct intel_crtc *crtc,
  5871. struct intel_crtc_state *pipe_config)
  5872. {
  5873. struct drm_device *dev = crtc->base.dev;
  5874. struct drm_i915_private *dev_priv = to_i915(dev);
  5875. u32 tmp;
  5876. tmp = I915_READ(PIPESRC(crtc->pipe));
  5877. pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
  5878. pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
  5879. pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
  5880. pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
  5881. }
  5882. void intel_mode_from_pipe_config(struct drm_display_mode *mode,
  5883. struct intel_crtc_state *pipe_config)
  5884. {
  5885. mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
  5886. mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
  5887. mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
  5888. mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
  5889. mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
  5890. mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
  5891. mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
  5892. mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
  5893. mode->flags = pipe_config->base.adjusted_mode.flags;
  5894. mode->type = DRM_MODE_TYPE_DRIVER;
  5895. mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
  5896. mode->hsync = drm_mode_hsync(mode);
  5897. mode->vrefresh = drm_mode_vrefresh(mode);
  5898. drm_mode_set_name(mode);
  5899. }
  5900. static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
  5901. {
  5902. struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
  5903. uint32_t pipeconf;
  5904. pipeconf = 0;
  5905. if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  5906. (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  5907. pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
  5908. if (intel_crtc->config->double_wide)
  5909. pipeconf |= PIPECONF_DOUBLE_WIDE;
  5910. /* only g4x and later have fancy bpc/dither controls */
  5911. if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
  5912. IS_CHERRYVIEW(dev_priv)) {
  5913. /* Bspec claims that we can't use dithering for 30bpp pipes. */
  5914. if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
  5915. pipeconf |= PIPECONF_DITHER_EN |
  5916. PIPECONF_DITHER_TYPE_SP;
  5917. switch (intel_crtc->config->pipe_bpp) {
  5918. case 18:
  5919. pipeconf |= PIPECONF_6BPC;
  5920. break;
  5921. case 24:
  5922. pipeconf |= PIPECONF_8BPC;
  5923. break;
  5924. case 30:
  5925. pipeconf |= PIPECONF_10BPC;
  5926. break;
  5927. default:
  5928. /* Case prevented by intel_choose_pipe_bpp_dither. */
  5929. BUG();
  5930. }
  5931. }
  5932. if (HAS_PIPE_CXSR(dev_priv)) {
  5933. if (intel_crtc->lowfreq_avail) {
  5934. DRM_DEBUG_KMS("enabling CxSR downclocking\n");
  5935. pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
  5936. } else {
  5937. DRM_DEBUG_KMS("disabling CxSR downclocking\n");
  5938. }
  5939. }
  5940. if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
  5941. if (INTEL_GEN(dev_priv) < 4 ||
  5942. intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
  5943. pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
  5944. else
  5945. pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
  5946. } else
  5947. pipeconf |= PIPECONF_PROGRESSIVE;
  5948. if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
  5949. intel_crtc->config->limited_color_range)
  5950. pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
  5951. I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
  5952. POSTING_READ(PIPECONF(intel_crtc->pipe));
  5953. }
  5954. static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
  5955. struct intel_crtc_state *crtc_state)
  5956. {
  5957. struct drm_device *dev = crtc->base.dev;
  5958. struct drm_i915_private *dev_priv = to_i915(dev);
  5959. const struct intel_limit *limit;
  5960. int refclk = 48000;
  5961. memset(&crtc_state->dpll_hw_state, 0,
  5962. sizeof(crtc_state->dpll_hw_state));
  5963. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  5964. if (intel_panel_use_ssc(dev_priv)) {
  5965. refclk = dev_priv->vbt.lvds_ssc_freq;
  5966. DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
  5967. }
  5968. limit = &intel_limits_i8xx_lvds;
  5969. } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
  5970. limit = &intel_limits_i8xx_dvo;
  5971. } else {
  5972. limit = &intel_limits_i8xx_dac;
  5973. }
  5974. if (!crtc_state->clock_set &&
  5975. !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  5976. refclk, NULL, &crtc_state->dpll)) {
  5977. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  5978. return -EINVAL;
  5979. }
  5980. i8xx_compute_dpll(crtc, crtc_state, NULL);
  5981. return 0;
  5982. }
  5983. static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
  5984. struct intel_crtc_state *crtc_state)
  5985. {
  5986. struct drm_device *dev = crtc->base.dev;
  5987. struct drm_i915_private *dev_priv = to_i915(dev);
  5988. const struct intel_limit *limit;
  5989. int refclk = 96000;
  5990. memset(&crtc_state->dpll_hw_state, 0,
  5991. sizeof(crtc_state->dpll_hw_state));
  5992. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  5993. if (intel_panel_use_ssc(dev_priv)) {
  5994. refclk = dev_priv->vbt.lvds_ssc_freq;
  5995. DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
  5996. }
  5997. if (intel_is_dual_link_lvds(dev))
  5998. limit = &intel_limits_g4x_dual_channel_lvds;
  5999. else
  6000. limit = &intel_limits_g4x_single_channel_lvds;
  6001. } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
  6002. intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
  6003. limit = &intel_limits_g4x_hdmi;
  6004. } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
  6005. limit = &intel_limits_g4x_sdvo;
  6006. } else {
  6007. /* The option is for other outputs */
  6008. limit = &intel_limits_i9xx_sdvo;
  6009. }
  6010. if (!crtc_state->clock_set &&
  6011. !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  6012. refclk, NULL, &crtc_state->dpll)) {
  6013. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  6014. return -EINVAL;
  6015. }
  6016. i9xx_compute_dpll(crtc, crtc_state, NULL);
  6017. return 0;
  6018. }
  6019. static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
  6020. struct intel_crtc_state *crtc_state)
  6021. {
  6022. struct drm_device *dev = crtc->base.dev;
  6023. struct drm_i915_private *dev_priv = to_i915(dev);
  6024. const struct intel_limit *limit;
  6025. int refclk = 96000;
  6026. memset(&crtc_state->dpll_hw_state, 0,
  6027. sizeof(crtc_state->dpll_hw_state));
  6028. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  6029. if (intel_panel_use_ssc(dev_priv)) {
  6030. refclk = dev_priv->vbt.lvds_ssc_freq;
  6031. DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
  6032. }
  6033. limit = &intel_limits_pineview_lvds;
  6034. } else {
  6035. limit = &intel_limits_pineview_sdvo;
  6036. }
  6037. if (!crtc_state->clock_set &&
  6038. !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  6039. refclk, NULL, &crtc_state->dpll)) {
  6040. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  6041. return -EINVAL;
  6042. }
  6043. i9xx_compute_dpll(crtc, crtc_state, NULL);
  6044. return 0;
  6045. }
  6046. static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
  6047. struct intel_crtc_state *crtc_state)
  6048. {
  6049. struct drm_device *dev = crtc->base.dev;
  6050. struct drm_i915_private *dev_priv = to_i915(dev);
  6051. const struct intel_limit *limit;
  6052. int refclk = 96000;
  6053. memset(&crtc_state->dpll_hw_state, 0,
  6054. sizeof(crtc_state->dpll_hw_state));
  6055. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  6056. if (intel_panel_use_ssc(dev_priv)) {
  6057. refclk = dev_priv->vbt.lvds_ssc_freq;
  6058. DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
  6059. }
  6060. limit = &intel_limits_i9xx_lvds;
  6061. } else {
  6062. limit = &intel_limits_i9xx_sdvo;
  6063. }
  6064. if (!crtc_state->clock_set &&
  6065. !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  6066. refclk, NULL, &crtc_state->dpll)) {
  6067. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  6068. return -EINVAL;
  6069. }
  6070. i9xx_compute_dpll(crtc, crtc_state, NULL);
  6071. return 0;
  6072. }
  6073. static int chv_crtc_compute_clock(struct intel_crtc *crtc,
  6074. struct intel_crtc_state *crtc_state)
  6075. {
  6076. int refclk = 100000;
  6077. const struct intel_limit *limit = &intel_limits_chv;
  6078. memset(&crtc_state->dpll_hw_state, 0,
  6079. sizeof(crtc_state->dpll_hw_state));
  6080. if (!crtc_state->clock_set &&
  6081. !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  6082. refclk, NULL, &crtc_state->dpll)) {
  6083. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  6084. return -EINVAL;
  6085. }
  6086. chv_compute_dpll(crtc, crtc_state);
  6087. return 0;
  6088. }
  6089. static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
  6090. struct intel_crtc_state *crtc_state)
  6091. {
  6092. int refclk = 100000;
  6093. const struct intel_limit *limit = &intel_limits_vlv;
  6094. memset(&crtc_state->dpll_hw_state, 0,
  6095. sizeof(crtc_state->dpll_hw_state));
  6096. if (!crtc_state->clock_set &&
  6097. !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  6098. refclk, NULL, &crtc_state->dpll)) {
  6099. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  6100. return -EINVAL;
  6101. }
  6102. vlv_compute_dpll(crtc, crtc_state);
  6103. return 0;
  6104. }
  6105. static void i9xx_get_pfit_config(struct intel_crtc *crtc,
  6106. struct intel_crtc_state *pipe_config)
  6107. {
  6108. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  6109. uint32_t tmp;
  6110. if (INTEL_GEN(dev_priv) <= 3 &&
  6111. (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
  6112. return;
  6113. tmp = I915_READ(PFIT_CONTROL);
  6114. if (!(tmp & PFIT_ENABLE))
  6115. return;
  6116. /* Check whether the pfit is attached to our pipe. */
  6117. if (INTEL_GEN(dev_priv) < 4) {
  6118. if (crtc->pipe != PIPE_B)
  6119. return;
  6120. } else {
  6121. if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
  6122. return;
  6123. }
  6124. pipe_config->gmch_pfit.control = tmp;
  6125. pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
  6126. }
  6127. static void vlv_crtc_clock_get(struct intel_crtc *crtc,
  6128. struct intel_crtc_state *pipe_config)
  6129. {
  6130. struct drm_device *dev = crtc->base.dev;
  6131. struct drm_i915_private *dev_priv = to_i915(dev);
  6132. int pipe = pipe_config->cpu_transcoder;
  6133. struct dpll clock;
  6134. u32 mdiv;
  6135. int refclk = 100000;
  6136. /* In case of DSI, DPLL will not be used */
  6137. if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
  6138. return;
  6139. mutex_lock(&dev_priv->sb_lock);
  6140. mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
  6141. mutex_unlock(&dev_priv->sb_lock);
  6142. clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
  6143. clock.m2 = mdiv & DPIO_M2DIV_MASK;
  6144. clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
  6145. clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
  6146. clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
  6147. pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
  6148. }
  6149. static void
  6150. i9xx_get_initial_plane_config(struct intel_crtc *crtc,
  6151. struct intel_initial_plane_config *plane_config)
  6152. {
  6153. struct drm_device *dev = crtc->base.dev;
  6154. struct drm_i915_private *dev_priv = to_i915(dev);
  6155. u32 val, base, offset;
  6156. int pipe = crtc->pipe, plane = crtc->plane;
  6157. int fourcc, pixel_format;
  6158. unsigned int aligned_height;
  6159. struct drm_framebuffer *fb;
  6160. struct intel_framebuffer *intel_fb;
  6161. val = I915_READ(DSPCNTR(plane));
  6162. if (!(val & DISPLAY_PLANE_ENABLE))
  6163. return;
  6164. intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
  6165. if (!intel_fb) {
  6166. DRM_DEBUG_KMS("failed to alloc fb\n");
  6167. return;
  6168. }
  6169. fb = &intel_fb->base;
  6170. fb->dev = dev;
  6171. if (INTEL_GEN(dev_priv) >= 4) {
  6172. if (val & DISPPLANE_TILED) {
  6173. plane_config->tiling = I915_TILING_X;
  6174. fb->modifier = I915_FORMAT_MOD_X_TILED;
  6175. }
  6176. }
  6177. pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
  6178. fourcc = i9xx_format_to_fourcc(pixel_format);
  6179. fb->format = drm_format_info(fourcc);
  6180. if (INTEL_GEN(dev_priv) >= 4) {
  6181. if (plane_config->tiling)
  6182. offset = I915_READ(DSPTILEOFF(plane));
  6183. else
  6184. offset = I915_READ(DSPLINOFF(plane));
  6185. base = I915_READ(DSPSURF(plane)) & 0xfffff000;
  6186. } else {
  6187. base = I915_READ(DSPADDR(plane));
  6188. }
  6189. plane_config->base = base;
  6190. val = I915_READ(PIPESRC(pipe));
  6191. fb->width = ((val >> 16) & 0xfff) + 1;
  6192. fb->height = ((val >> 0) & 0xfff) + 1;
  6193. val = I915_READ(DSPSTRIDE(pipe));
  6194. fb->pitches[0] = val & 0xffffffc0;
  6195. aligned_height = intel_fb_align_height(fb, 0, fb->height);
  6196. plane_config->size = fb->pitches[0] * aligned_height;
  6197. DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
  6198. pipe_name(pipe), plane, fb->width, fb->height,
  6199. fb->format->cpp[0] * 8, base, fb->pitches[0],
  6200. plane_config->size);
  6201. plane_config->fb = intel_fb;
  6202. }
  6203. static void chv_crtc_clock_get(struct intel_crtc *crtc,
  6204. struct intel_crtc_state *pipe_config)
  6205. {
  6206. struct drm_device *dev = crtc->base.dev;
  6207. struct drm_i915_private *dev_priv = to_i915(dev);
  6208. int pipe = pipe_config->cpu_transcoder;
  6209. enum dpio_channel port = vlv_pipe_to_channel(pipe);
  6210. struct dpll clock;
  6211. u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
  6212. int refclk = 100000;
  6213. /* In case of DSI, DPLL will not be used */
  6214. if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
  6215. return;
  6216. mutex_lock(&dev_priv->sb_lock);
  6217. cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
  6218. pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
  6219. pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
  6220. pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
  6221. pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
  6222. mutex_unlock(&dev_priv->sb_lock);
  6223. clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
  6224. clock.m2 = (pll_dw0 & 0xff) << 22;
  6225. if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
  6226. clock.m2 |= pll_dw2 & 0x3fffff;
  6227. clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
  6228. clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
  6229. clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
  6230. pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
  6231. }
  6232. static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
  6233. struct intel_crtc_state *pipe_config)
  6234. {
  6235. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  6236. enum intel_display_power_domain power_domain;
  6237. uint32_t tmp;
  6238. bool ret;
  6239. power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
  6240. if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
  6241. return false;
  6242. pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
  6243. pipe_config->shared_dpll = NULL;
  6244. ret = false;
  6245. tmp = I915_READ(PIPECONF(crtc->pipe));
  6246. if (!(tmp & PIPECONF_ENABLE))
  6247. goto out;
  6248. if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
  6249. IS_CHERRYVIEW(dev_priv)) {
  6250. switch (tmp & PIPECONF_BPC_MASK) {
  6251. case PIPECONF_6BPC:
  6252. pipe_config->pipe_bpp = 18;
  6253. break;
  6254. case PIPECONF_8BPC:
  6255. pipe_config->pipe_bpp = 24;
  6256. break;
  6257. case PIPECONF_10BPC:
  6258. pipe_config->pipe_bpp = 30;
  6259. break;
  6260. default:
  6261. break;
  6262. }
  6263. }
  6264. if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
  6265. (tmp & PIPECONF_COLOR_RANGE_SELECT))
  6266. pipe_config->limited_color_range = true;
  6267. if (INTEL_GEN(dev_priv) < 4)
  6268. pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
  6269. intel_get_pipe_timings(crtc, pipe_config);
  6270. intel_get_pipe_src_size(crtc, pipe_config);
  6271. i9xx_get_pfit_config(crtc, pipe_config);
  6272. if (INTEL_GEN(dev_priv) >= 4) {
  6273. /* No way to read it out on pipes B and C */
  6274. if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
  6275. tmp = dev_priv->chv_dpll_md[crtc->pipe];
  6276. else
  6277. tmp = I915_READ(DPLL_MD(crtc->pipe));
  6278. pipe_config->pixel_multiplier =
  6279. ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
  6280. >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
  6281. pipe_config->dpll_hw_state.dpll_md = tmp;
  6282. } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
  6283. IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
  6284. tmp = I915_READ(DPLL(crtc->pipe));
  6285. pipe_config->pixel_multiplier =
  6286. ((tmp & SDVO_MULTIPLIER_MASK)
  6287. >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
  6288. } else {
  6289. /* Note that on i915G/GM the pixel multiplier is in the sdvo
  6290. * port and will be fixed up in the encoder->get_config
  6291. * function. */
  6292. pipe_config->pixel_multiplier = 1;
  6293. }
  6294. pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
  6295. if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
  6296. /*
  6297. * DPLL_DVO_2X_MODE must be enabled for both DPLLs
  6298. * on 830. Filter it out here so that we don't
  6299. * report errors due to that.
  6300. */
  6301. if (IS_I830(dev_priv))
  6302. pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
  6303. pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
  6304. pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
  6305. } else {
  6306. /* Mask out read-only status bits. */
  6307. pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
  6308. DPLL_PORTC_READY_MASK |
  6309. DPLL_PORTB_READY_MASK);
  6310. }
  6311. if (IS_CHERRYVIEW(dev_priv))
  6312. chv_crtc_clock_get(crtc, pipe_config);
  6313. else if (IS_VALLEYVIEW(dev_priv))
  6314. vlv_crtc_clock_get(crtc, pipe_config);
  6315. else
  6316. i9xx_crtc_clock_get(crtc, pipe_config);
  6317. /*
  6318. * Normally the dotclock is filled in by the encoder .get_config()
  6319. * but in case the pipe is enabled w/o any ports we need a sane
  6320. * default.
  6321. */
  6322. pipe_config->base.adjusted_mode.crtc_clock =
  6323. pipe_config->port_clock / pipe_config->pixel_multiplier;
  6324. ret = true;
  6325. out:
  6326. intel_display_power_put(dev_priv, power_domain);
  6327. return ret;
  6328. }
  6329. static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
  6330. {
  6331. struct intel_encoder *encoder;
  6332. int i;
  6333. u32 val, final;
  6334. bool has_lvds = false;
  6335. bool has_cpu_edp = false;
  6336. bool has_panel = false;
  6337. bool has_ck505 = false;
  6338. bool can_ssc = false;
  6339. bool using_ssc_source = false;
  6340. /* We need to take the global config into account */
  6341. for_each_intel_encoder(&dev_priv->drm, encoder) {
  6342. switch (encoder->type) {
  6343. case INTEL_OUTPUT_LVDS:
  6344. has_panel = true;
  6345. has_lvds = true;
  6346. break;
  6347. case INTEL_OUTPUT_EDP:
  6348. has_panel = true;
  6349. if (enc_to_dig_port(&encoder->base)->port == PORT_A)
  6350. has_cpu_edp = true;
  6351. break;
  6352. default:
  6353. break;
  6354. }
  6355. }
  6356. if (HAS_PCH_IBX(dev_priv)) {
  6357. has_ck505 = dev_priv->vbt.display_clock_mode;
  6358. can_ssc = has_ck505;
  6359. } else {
  6360. has_ck505 = false;
  6361. can_ssc = true;
  6362. }
  6363. /* Check if any DPLLs are using the SSC source */
  6364. for (i = 0; i < dev_priv->num_shared_dpll; i++) {
  6365. u32 temp = I915_READ(PCH_DPLL(i));
  6366. if (!(temp & DPLL_VCO_ENABLE))
  6367. continue;
  6368. if ((temp & PLL_REF_INPUT_MASK) ==
  6369. PLLB_REF_INPUT_SPREADSPECTRUMIN) {
  6370. using_ssc_source = true;
  6371. break;
  6372. }
  6373. }
  6374. DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
  6375. has_panel, has_lvds, has_ck505, using_ssc_source);
  6376. /* Ironlake: try to setup display ref clock before DPLL
  6377. * enabling. This is only under driver's control after
  6378. * PCH B stepping, previous chipset stepping should be
  6379. * ignoring this setting.
  6380. */
  6381. val = I915_READ(PCH_DREF_CONTROL);
  6382. /* As we must carefully and slowly disable/enable each source in turn,
  6383. * compute the final state we want first and check if we need to
  6384. * make any changes at all.
  6385. */
  6386. final = val;
  6387. final &= ~DREF_NONSPREAD_SOURCE_MASK;
  6388. if (has_ck505)
  6389. final |= DREF_NONSPREAD_CK505_ENABLE;
  6390. else
  6391. final |= DREF_NONSPREAD_SOURCE_ENABLE;
  6392. final &= ~DREF_SSC_SOURCE_MASK;
  6393. final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
  6394. final &= ~DREF_SSC1_ENABLE;
  6395. if (has_panel) {
  6396. final |= DREF_SSC_SOURCE_ENABLE;
  6397. if (intel_panel_use_ssc(dev_priv) && can_ssc)
  6398. final |= DREF_SSC1_ENABLE;
  6399. if (has_cpu_edp) {
  6400. if (intel_panel_use_ssc(dev_priv) && can_ssc)
  6401. final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
  6402. else
  6403. final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
  6404. } else
  6405. final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
  6406. } else if (using_ssc_source) {
  6407. final |= DREF_SSC_SOURCE_ENABLE;
  6408. final |= DREF_SSC1_ENABLE;
  6409. }
  6410. if (final == val)
  6411. return;
  6412. /* Always enable nonspread source */
  6413. val &= ~DREF_NONSPREAD_SOURCE_MASK;
  6414. if (has_ck505)
  6415. val |= DREF_NONSPREAD_CK505_ENABLE;
  6416. else
  6417. val |= DREF_NONSPREAD_SOURCE_ENABLE;
  6418. if (has_panel) {
  6419. val &= ~DREF_SSC_SOURCE_MASK;
  6420. val |= DREF_SSC_SOURCE_ENABLE;
  6421. /* SSC must be turned on before enabling the CPU output */
  6422. if (intel_panel_use_ssc(dev_priv) && can_ssc) {
  6423. DRM_DEBUG_KMS("Using SSC on panel\n");
  6424. val |= DREF_SSC1_ENABLE;
  6425. } else
  6426. val &= ~DREF_SSC1_ENABLE;
  6427. /* Get SSC going before enabling the outputs */
  6428. I915_WRITE(PCH_DREF_CONTROL, val);
  6429. POSTING_READ(PCH_DREF_CONTROL);
  6430. udelay(200);
  6431. val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
  6432. /* Enable CPU source on CPU attached eDP */
  6433. if (has_cpu_edp) {
  6434. if (intel_panel_use_ssc(dev_priv) && can_ssc) {
  6435. DRM_DEBUG_KMS("Using SSC on eDP\n");
  6436. val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
  6437. } else
  6438. val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
  6439. } else
  6440. val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
  6441. I915_WRITE(PCH_DREF_CONTROL, val);
  6442. POSTING_READ(PCH_DREF_CONTROL);
  6443. udelay(200);
  6444. } else {
  6445. DRM_DEBUG_KMS("Disabling CPU source output\n");
  6446. val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
  6447. /* Turn off CPU output */
  6448. val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
  6449. I915_WRITE(PCH_DREF_CONTROL, val);
  6450. POSTING_READ(PCH_DREF_CONTROL);
  6451. udelay(200);
  6452. if (!using_ssc_source) {
  6453. DRM_DEBUG_KMS("Disabling SSC source\n");
  6454. /* Turn off the SSC source */
  6455. val &= ~DREF_SSC_SOURCE_MASK;
  6456. val |= DREF_SSC_SOURCE_DISABLE;
  6457. /* Turn off SSC1 */
  6458. val &= ~DREF_SSC1_ENABLE;
  6459. I915_WRITE(PCH_DREF_CONTROL, val);
  6460. POSTING_READ(PCH_DREF_CONTROL);
  6461. udelay(200);
  6462. }
  6463. }
  6464. BUG_ON(val != final);
  6465. }
  6466. static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
  6467. {
  6468. uint32_t tmp;
  6469. tmp = I915_READ(SOUTH_CHICKEN2);
  6470. tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
  6471. I915_WRITE(SOUTH_CHICKEN2, tmp);
  6472. if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
  6473. FDI_MPHY_IOSFSB_RESET_STATUS, 100))
  6474. DRM_ERROR("FDI mPHY reset assert timeout\n");
  6475. tmp = I915_READ(SOUTH_CHICKEN2);
  6476. tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
  6477. I915_WRITE(SOUTH_CHICKEN2, tmp);
  6478. if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
  6479. FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
  6480. DRM_ERROR("FDI mPHY reset de-assert timeout\n");
  6481. }
  6482. /* WaMPhyProgramming:hsw */
  6483. static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
  6484. {
  6485. uint32_t tmp;
  6486. tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
  6487. tmp &= ~(0xFF << 24);
  6488. tmp |= (0x12 << 24);
  6489. intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
  6490. tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
  6491. tmp |= (1 << 11);
  6492. intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
  6493. tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
  6494. tmp |= (1 << 11);
  6495. intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
  6496. tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
  6497. tmp |= (1 << 24) | (1 << 21) | (1 << 18);
  6498. intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
  6499. tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
  6500. tmp |= (1 << 24) | (1 << 21) | (1 << 18);
  6501. intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
  6502. tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
  6503. tmp &= ~(7 << 13);
  6504. tmp |= (5 << 13);
  6505. intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
  6506. tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
  6507. tmp &= ~(7 << 13);
  6508. tmp |= (5 << 13);
  6509. intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
  6510. tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
  6511. tmp &= ~0xFF;
  6512. tmp |= 0x1C;
  6513. intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
  6514. tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
  6515. tmp &= ~0xFF;
  6516. tmp |= 0x1C;
  6517. intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
  6518. tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
  6519. tmp &= ~(0xFF << 16);
  6520. tmp |= (0x1C << 16);
  6521. intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
  6522. tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
  6523. tmp &= ~(0xFF << 16);
  6524. tmp |= (0x1C << 16);
  6525. intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
  6526. tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
  6527. tmp |= (1 << 27);
  6528. intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
  6529. tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
  6530. tmp |= (1 << 27);
  6531. intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
  6532. tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
  6533. tmp &= ~(0xF << 28);
  6534. tmp |= (4 << 28);
  6535. intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
  6536. tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
  6537. tmp &= ~(0xF << 28);
  6538. tmp |= (4 << 28);
  6539. intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
  6540. }
  6541. /* Implements 3 different sequences from BSpec chapter "Display iCLK
  6542. * Programming" based on the parameters passed:
  6543. * - Sequence to enable CLKOUT_DP
  6544. * - Sequence to enable CLKOUT_DP without spread
  6545. * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
  6546. */
  6547. static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
  6548. bool with_spread, bool with_fdi)
  6549. {
  6550. uint32_t reg, tmp;
  6551. if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
  6552. with_spread = true;
  6553. if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
  6554. with_fdi, "LP PCH doesn't have FDI\n"))
  6555. with_fdi = false;
  6556. mutex_lock(&dev_priv->sb_lock);
  6557. tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
  6558. tmp &= ~SBI_SSCCTL_DISABLE;
  6559. tmp |= SBI_SSCCTL_PATHALT;
  6560. intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
  6561. udelay(24);
  6562. if (with_spread) {
  6563. tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
  6564. tmp &= ~SBI_SSCCTL_PATHALT;
  6565. intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
  6566. if (with_fdi) {
  6567. lpt_reset_fdi_mphy(dev_priv);
  6568. lpt_program_fdi_mphy(dev_priv);
  6569. }
  6570. }
  6571. reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
  6572. tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
  6573. tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
  6574. intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
  6575. mutex_unlock(&dev_priv->sb_lock);
  6576. }
  6577. /* Sequence to disable CLKOUT_DP */
  6578. static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
  6579. {
  6580. uint32_t reg, tmp;
  6581. mutex_lock(&dev_priv->sb_lock);
  6582. reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
  6583. tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
  6584. tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
  6585. intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
  6586. tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
  6587. if (!(tmp & SBI_SSCCTL_DISABLE)) {
  6588. if (!(tmp & SBI_SSCCTL_PATHALT)) {
  6589. tmp |= SBI_SSCCTL_PATHALT;
  6590. intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
  6591. udelay(32);
  6592. }
  6593. tmp |= SBI_SSCCTL_DISABLE;
  6594. intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
  6595. }
  6596. mutex_unlock(&dev_priv->sb_lock);
  6597. }
  6598. #define BEND_IDX(steps) ((50 + (steps)) / 5)
  6599. static const uint16_t sscdivintphase[] = {
  6600. [BEND_IDX( 50)] = 0x3B23,
  6601. [BEND_IDX( 45)] = 0x3B23,
  6602. [BEND_IDX( 40)] = 0x3C23,
  6603. [BEND_IDX( 35)] = 0x3C23,
  6604. [BEND_IDX( 30)] = 0x3D23,
  6605. [BEND_IDX( 25)] = 0x3D23,
  6606. [BEND_IDX( 20)] = 0x3E23,
  6607. [BEND_IDX( 15)] = 0x3E23,
  6608. [BEND_IDX( 10)] = 0x3F23,
  6609. [BEND_IDX( 5)] = 0x3F23,
  6610. [BEND_IDX( 0)] = 0x0025,
  6611. [BEND_IDX( -5)] = 0x0025,
  6612. [BEND_IDX(-10)] = 0x0125,
  6613. [BEND_IDX(-15)] = 0x0125,
  6614. [BEND_IDX(-20)] = 0x0225,
  6615. [BEND_IDX(-25)] = 0x0225,
  6616. [BEND_IDX(-30)] = 0x0325,
  6617. [BEND_IDX(-35)] = 0x0325,
  6618. [BEND_IDX(-40)] = 0x0425,
  6619. [BEND_IDX(-45)] = 0x0425,
  6620. [BEND_IDX(-50)] = 0x0525,
  6621. };
  6622. /*
  6623. * Bend CLKOUT_DP
  6624. * steps -50 to 50 inclusive, in steps of 5
  6625. * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
  6626. * change in clock period = -(steps / 10) * 5.787 ps
  6627. */
  6628. static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
  6629. {
  6630. uint32_t tmp;
  6631. int idx = BEND_IDX(steps);
  6632. if (WARN_ON(steps % 5 != 0))
  6633. return;
  6634. if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
  6635. return;
  6636. mutex_lock(&dev_priv->sb_lock);
  6637. if (steps % 10 != 0)
  6638. tmp = 0xAAAAAAAB;
  6639. else
  6640. tmp = 0x00000000;
  6641. intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
  6642. tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
  6643. tmp &= 0xffff0000;
  6644. tmp |= sscdivintphase[idx];
  6645. intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
  6646. mutex_unlock(&dev_priv->sb_lock);
  6647. }
  6648. #undef BEND_IDX
  6649. static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
  6650. {
  6651. struct intel_encoder *encoder;
  6652. bool has_vga = false;
  6653. for_each_intel_encoder(&dev_priv->drm, encoder) {
  6654. switch (encoder->type) {
  6655. case INTEL_OUTPUT_ANALOG:
  6656. has_vga = true;
  6657. break;
  6658. default:
  6659. break;
  6660. }
  6661. }
  6662. if (has_vga) {
  6663. lpt_bend_clkout_dp(dev_priv, 0);
  6664. lpt_enable_clkout_dp(dev_priv, true, true);
  6665. } else {
  6666. lpt_disable_clkout_dp(dev_priv);
  6667. }
  6668. }
  6669. /*
  6670. * Initialize reference clocks when the driver loads
  6671. */
  6672. void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
  6673. {
  6674. if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
  6675. ironlake_init_pch_refclk(dev_priv);
  6676. else if (HAS_PCH_LPT(dev_priv))
  6677. lpt_init_pch_refclk(dev_priv);
  6678. }
  6679. static void ironlake_set_pipeconf(struct drm_crtc *crtc)
  6680. {
  6681. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  6682. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  6683. int pipe = intel_crtc->pipe;
  6684. uint32_t val;
  6685. val = 0;
  6686. switch (intel_crtc->config->pipe_bpp) {
  6687. case 18:
  6688. val |= PIPECONF_6BPC;
  6689. break;
  6690. case 24:
  6691. val |= PIPECONF_8BPC;
  6692. break;
  6693. case 30:
  6694. val |= PIPECONF_10BPC;
  6695. break;
  6696. case 36:
  6697. val |= PIPECONF_12BPC;
  6698. break;
  6699. default:
  6700. /* Case prevented by intel_choose_pipe_bpp_dither. */
  6701. BUG();
  6702. }
  6703. if (intel_crtc->config->dither)
  6704. val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
  6705. if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
  6706. val |= PIPECONF_INTERLACED_ILK;
  6707. else
  6708. val |= PIPECONF_PROGRESSIVE;
  6709. if (intel_crtc->config->limited_color_range)
  6710. val |= PIPECONF_COLOR_RANGE_SELECT;
  6711. I915_WRITE(PIPECONF(pipe), val);
  6712. POSTING_READ(PIPECONF(pipe));
  6713. }
  6714. static void haswell_set_pipeconf(struct drm_crtc *crtc)
  6715. {
  6716. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  6717. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  6718. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  6719. u32 val = 0;
  6720. if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
  6721. val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
  6722. if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
  6723. val |= PIPECONF_INTERLACED_ILK;
  6724. else
  6725. val |= PIPECONF_PROGRESSIVE;
  6726. I915_WRITE(PIPECONF(cpu_transcoder), val);
  6727. POSTING_READ(PIPECONF(cpu_transcoder));
  6728. }
  6729. static void haswell_set_pipemisc(struct drm_crtc *crtc)
  6730. {
  6731. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  6732. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  6733. if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
  6734. u32 val = 0;
  6735. switch (intel_crtc->config->pipe_bpp) {
  6736. case 18:
  6737. val |= PIPEMISC_DITHER_6_BPC;
  6738. break;
  6739. case 24:
  6740. val |= PIPEMISC_DITHER_8_BPC;
  6741. break;
  6742. case 30:
  6743. val |= PIPEMISC_DITHER_10_BPC;
  6744. break;
  6745. case 36:
  6746. val |= PIPEMISC_DITHER_12_BPC;
  6747. break;
  6748. default:
  6749. /* Case prevented by pipe_config_set_bpp. */
  6750. BUG();
  6751. }
  6752. if (intel_crtc->config->dither)
  6753. val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
  6754. I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
  6755. }
  6756. }
  6757. int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
  6758. {
  6759. /*
  6760. * Account for spread spectrum to avoid
  6761. * oversubscribing the link. Max center spread
  6762. * is 2.5%; use 5% for safety's sake.
  6763. */
  6764. u32 bps = target_clock * bpp * 21 / 20;
  6765. return DIV_ROUND_UP(bps, link_bw * 8);
  6766. }
  6767. static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
  6768. {
  6769. return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
  6770. }
  6771. static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
  6772. struct intel_crtc_state *crtc_state,
  6773. struct dpll *reduced_clock)
  6774. {
  6775. struct drm_crtc *crtc = &intel_crtc->base;
  6776. struct drm_device *dev = crtc->dev;
  6777. struct drm_i915_private *dev_priv = to_i915(dev);
  6778. u32 dpll, fp, fp2;
  6779. int factor;
  6780. /* Enable autotuning of the PLL clock (if permissible) */
  6781. factor = 21;
  6782. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  6783. if ((intel_panel_use_ssc(dev_priv) &&
  6784. dev_priv->vbt.lvds_ssc_freq == 100000) ||
  6785. (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
  6786. factor = 25;
  6787. } else if (crtc_state->sdvo_tv_clock)
  6788. factor = 20;
  6789. fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
  6790. if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
  6791. fp |= FP_CB_TUNE;
  6792. if (reduced_clock) {
  6793. fp2 = i9xx_dpll_compute_fp(reduced_clock);
  6794. if (reduced_clock->m < factor * reduced_clock->n)
  6795. fp2 |= FP_CB_TUNE;
  6796. } else {
  6797. fp2 = fp;
  6798. }
  6799. dpll = 0;
  6800. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
  6801. dpll |= DPLLB_MODE_LVDS;
  6802. else
  6803. dpll |= DPLLB_MODE_DAC_SERIAL;
  6804. dpll |= (crtc_state->pixel_multiplier - 1)
  6805. << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
  6806. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
  6807. intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
  6808. dpll |= DPLL_SDVO_HIGH_SPEED;
  6809. if (intel_crtc_has_dp_encoder(crtc_state))
  6810. dpll |= DPLL_SDVO_HIGH_SPEED;
  6811. /*
  6812. * The high speed IO clock is only really required for
  6813. * SDVO/HDMI/DP, but we also enable it for CRT to make it
  6814. * possible to share the DPLL between CRT and HDMI. Enabling
  6815. * the clock needlessly does no real harm, except use up a
  6816. * bit of power potentially.
  6817. *
  6818. * We'll limit this to IVB with 3 pipes, since it has only two
  6819. * DPLLs and so DPLL sharing is the only way to get three pipes
  6820. * driving PCH ports at the same time. On SNB we could do this,
  6821. * and potentially avoid enabling the second DPLL, but it's not
  6822. * clear if it''s a win or loss power wise. No point in doing
  6823. * this on ILK at all since it has a fixed DPLL<->pipe mapping.
  6824. */
  6825. if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
  6826. intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
  6827. dpll |= DPLL_SDVO_HIGH_SPEED;
  6828. /* compute bitmask from p1 value */
  6829. dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
  6830. /* also FPA1 */
  6831. dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
  6832. switch (crtc_state->dpll.p2) {
  6833. case 5:
  6834. dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
  6835. break;
  6836. case 7:
  6837. dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
  6838. break;
  6839. case 10:
  6840. dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
  6841. break;
  6842. case 14:
  6843. dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
  6844. break;
  6845. }
  6846. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
  6847. intel_panel_use_ssc(dev_priv))
  6848. dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
  6849. else
  6850. dpll |= PLL_REF_INPUT_DREFCLK;
  6851. dpll |= DPLL_VCO_ENABLE;
  6852. crtc_state->dpll_hw_state.dpll = dpll;
  6853. crtc_state->dpll_hw_state.fp0 = fp;
  6854. crtc_state->dpll_hw_state.fp1 = fp2;
  6855. }
  6856. static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
  6857. struct intel_crtc_state *crtc_state)
  6858. {
  6859. struct drm_device *dev = crtc->base.dev;
  6860. struct drm_i915_private *dev_priv = to_i915(dev);
  6861. struct dpll reduced_clock;
  6862. bool has_reduced_clock = false;
  6863. struct intel_shared_dpll *pll;
  6864. const struct intel_limit *limit;
  6865. int refclk = 120000;
  6866. memset(&crtc_state->dpll_hw_state, 0,
  6867. sizeof(crtc_state->dpll_hw_state));
  6868. crtc->lowfreq_avail = false;
  6869. /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
  6870. if (!crtc_state->has_pch_encoder)
  6871. return 0;
  6872. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
  6873. if (intel_panel_use_ssc(dev_priv)) {
  6874. DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
  6875. dev_priv->vbt.lvds_ssc_freq);
  6876. refclk = dev_priv->vbt.lvds_ssc_freq;
  6877. }
  6878. if (intel_is_dual_link_lvds(dev)) {
  6879. if (refclk == 100000)
  6880. limit = &intel_limits_ironlake_dual_lvds_100m;
  6881. else
  6882. limit = &intel_limits_ironlake_dual_lvds;
  6883. } else {
  6884. if (refclk == 100000)
  6885. limit = &intel_limits_ironlake_single_lvds_100m;
  6886. else
  6887. limit = &intel_limits_ironlake_single_lvds;
  6888. }
  6889. } else {
  6890. limit = &intel_limits_ironlake_dac;
  6891. }
  6892. if (!crtc_state->clock_set &&
  6893. !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
  6894. refclk, NULL, &crtc_state->dpll)) {
  6895. DRM_ERROR("Couldn't find PLL settings for mode!\n");
  6896. return -EINVAL;
  6897. }
  6898. ironlake_compute_dpll(crtc, crtc_state,
  6899. has_reduced_clock ? &reduced_clock : NULL);
  6900. pll = intel_get_shared_dpll(crtc, crtc_state, NULL);
  6901. if (pll == NULL) {
  6902. DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
  6903. pipe_name(crtc->pipe));
  6904. return -EINVAL;
  6905. }
  6906. if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
  6907. has_reduced_clock)
  6908. crtc->lowfreq_avail = true;
  6909. return 0;
  6910. }
  6911. static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
  6912. struct intel_link_m_n *m_n)
  6913. {
  6914. struct drm_device *dev = crtc->base.dev;
  6915. struct drm_i915_private *dev_priv = to_i915(dev);
  6916. enum pipe pipe = crtc->pipe;
  6917. m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
  6918. m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
  6919. m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
  6920. & ~TU_SIZE_MASK;
  6921. m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
  6922. m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
  6923. & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
  6924. }
  6925. static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
  6926. enum transcoder transcoder,
  6927. struct intel_link_m_n *m_n,
  6928. struct intel_link_m_n *m2_n2)
  6929. {
  6930. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  6931. enum pipe pipe = crtc->pipe;
  6932. if (INTEL_GEN(dev_priv) >= 5) {
  6933. m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
  6934. m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
  6935. m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
  6936. & ~TU_SIZE_MASK;
  6937. m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
  6938. m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
  6939. & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
  6940. /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
  6941. * gen < 8) and if DRRS is supported (to make sure the
  6942. * registers are not unnecessarily read).
  6943. */
  6944. if (m2_n2 && INTEL_GEN(dev_priv) < 8 &&
  6945. crtc->config->has_drrs) {
  6946. m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
  6947. m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
  6948. m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
  6949. & ~TU_SIZE_MASK;
  6950. m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
  6951. m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
  6952. & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
  6953. }
  6954. } else {
  6955. m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
  6956. m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
  6957. m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
  6958. & ~TU_SIZE_MASK;
  6959. m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
  6960. m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
  6961. & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
  6962. }
  6963. }
  6964. void intel_dp_get_m_n(struct intel_crtc *crtc,
  6965. struct intel_crtc_state *pipe_config)
  6966. {
  6967. if (pipe_config->has_pch_encoder)
  6968. intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
  6969. else
  6970. intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
  6971. &pipe_config->dp_m_n,
  6972. &pipe_config->dp_m2_n2);
  6973. }
  6974. static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
  6975. struct intel_crtc_state *pipe_config)
  6976. {
  6977. intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
  6978. &pipe_config->fdi_m_n, NULL);
  6979. }
  6980. static void skylake_get_pfit_config(struct intel_crtc *crtc,
  6981. struct intel_crtc_state *pipe_config)
  6982. {
  6983. struct drm_device *dev = crtc->base.dev;
  6984. struct drm_i915_private *dev_priv = to_i915(dev);
  6985. struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
  6986. uint32_t ps_ctrl = 0;
  6987. int id = -1;
  6988. int i;
  6989. /* find scaler attached to this pipe */
  6990. for (i = 0; i < crtc->num_scalers; i++) {
  6991. ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
  6992. if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
  6993. id = i;
  6994. pipe_config->pch_pfit.enabled = true;
  6995. pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
  6996. pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
  6997. break;
  6998. }
  6999. }
  7000. scaler_state->scaler_id = id;
  7001. if (id >= 0) {
  7002. scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
  7003. } else {
  7004. scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
  7005. }
  7006. }
  7007. static void
  7008. skylake_get_initial_plane_config(struct intel_crtc *crtc,
  7009. struct intel_initial_plane_config *plane_config)
  7010. {
  7011. struct drm_device *dev = crtc->base.dev;
  7012. struct drm_i915_private *dev_priv = to_i915(dev);
  7013. u32 val, base, offset, stride_mult, tiling;
  7014. int pipe = crtc->pipe;
  7015. int fourcc, pixel_format;
  7016. unsigned int aligned_height;
  7017. struct drm_framebuffer *fb;
  7018. struct intel_framebuffer *intel_fb;
  7019. intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
  7020. if (!intel_fb) {
  7021. DRM_DEBUG_KMS("failed to alloc fb\n");
  7022. return;
  7023. }
  7024. fb = &intel_fb->base;
  7025. fb->dev = dev;
  7026. val = I915_READ(PLANE_CTL(pipe, 0));
  7027. if (!(val & PLANE_CTL_ENABLE))
  7028. goto error;
  7029. pixel_format = val & PLANE_CTL_FORMAT_MASK;
  7030. fourcc = skl_format_to_fourcc(pixel_format,
  7031. val & PLANE_CTL_ORDER_RGBX,
  7032. val & PLANE_CTL_ALPHA_MASK);
  7033. fb->format = drm_format_info(fourcc);
  7034. tiling = val & PLANE_CTL_TILED_MASK;
  7035. switch (tiling) {
  7036. case PLANE_CTL_TILED_LINEAR:
  7037. fb->modifier = DRM_FORMAT_MOD_NONE;
  7038. break;
  7039. case PLANE_CTL_TILED_X:
  7040. plane_config->tiling = I915_TILING_X;
  7041. fb->modifier = I915_FORMAT_MOD_X_TILED;
  7042. break;
  7043. case PLANE_CTL_TILED_Y:
  7044. fb->modifier = I915_FORMAT_MOD_Y_TILED;
  7045. break;
  7046. case PLANE_CTL_TILED_YF:
  7047. fb->modifier = I915_FORMAT_MOD_Yf_TILED;
  7048. break;
  7049. default:
  7050. MISSING_CASE(tiling);
  7051. goto error;
  7052. }
  7053. base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
  7054. plane_config->base = base;
  7055. offset = I915_READ(PLANE_OFFSET(pipe, 0));
  7056. val = I915_READ(PLANE_SIZE(pipe, 0));
  7057. fb->height = ((val >> 16) & 0xfff) + 1;
  7058. fb->width = ((val >> 0) & 0x1fff) + 1;
  7059. val = I915_READ(PLANE_STRIDE(pipe, 0));
  7060. stride_mult = intel_fb_stride_alignment(fb, 0);
  7061. fb->pitches[0] = (val & 0x3ff) * stride_mult;
  7062. aligned_height = intel_fb_align_height(fb, 0, fb->height);
  7063. plane_config->size = fb->pitches[0] * aligned_height;
  7064. DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
  7065. pipe_name(pipe), fb->width, fb->height,
  7066. fb->format->cpp[0] * 8, base, fb->pitches[0],
  7067. plane_config->size);
  7068. plane_config->fb = intel_fb;
  7069. return;
  7070. error:
  7071. kfree(intel_fb);
  7072. }
  7073. static void ironlake_get_pfit_config(struct intel_crtc *crtc,
  7074. struct intel_crtc_state *pipe_config)
  7075. {
  7076. struct drm_device *dev = crtc->base.dev;
  7077. struct drm_i915_private *dev_priv = to_i915(dev);
  7078. uint32_t tmp;
  7079. tmp = I915_READ(PF_CTL(crtc->pipe));
  7080. if (tmp & PF_ENABLE) {
  7081. pipe_config->pch_pfit.enabled = true;
  7082. pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
  7083. pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
  7084. /* We currently do not free assignements of panel fitters on
  7085. * ivb/hsw (since we don't use the higher upscaling modes which
  7086. * differentiates them) so just WARN about this case for now. */
  7087. if (IS_GEN7(dev_priv)) {
  7088. WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
  7089. PF_PIPE_SEL_IVB(crtc->pipe));
  7090. }
  7091. }
  7092. }
  7093. static void
  7094. ironlake_get_initial_plane_config(struct intel_crtc *crtc,
  7095. struct intel_initial_plane_config *plane_config)
  7096. {
  7097. struct drm_device *dev = crtc->base.dev;
  7098. struct drm_i915_private *dev_priv = to_i915(dev);
  7099. u32 val, base, offset;
  7100. int pipe = crtc->pipe;
  7101. int fourcc, pixel_format;
  7102. unsigned int aligned_height;
  7103. struct drm_framebuffer *fb;
  7104. struct intel_framebuffer *intel_fb;
  7105. val = I915_READ(DSPCNTR(pipe));
  7106. if (!(val & DISPLAY_PLANE_ENABLE))
  7107. return;
  7108. intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
  7109. if (!intel_fb) {
  7110. DRM_DEBUG_KMS("failed to alloc fb\n");
  7111. return;
  7112. }
  7113. fb = &intel_fb->base;
  7114. fb->dev = dev;
  7115. if (INTEL_GEN(dev_priv) >= 4) {
  7116. if (val & DISPPLANE_TILED) {
  7117. plane_config->tiling = I915_TILING_X;
  7118. fb->modifier = I915_FORMAT_MOD_X_TILED;
  7119. }
  7120. }
  7121. pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
  7122. fourcc = i9xx_format_to_fourcc(pixel_format);
  7123. fb->format = drm_format_info(fourcc);
  7124. base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
  7125. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  7126. offset = I915_READ(DSPOFFSET(pipe));
  7127. } else {
  7128. if (plane_config->tiling)
  7129. offset = I915_READ(DSPTILEOFF(pipe));
  7130. else
  7131. offset = I915_READ(DSPLINOFF(pipe));
  7132. }
  7133. plane_config->base = base;
  7134. val = I915_READ(PIPESRC(pipe));
  7135. fb->width = ((val >> 16) & 0xfff) + 1;
  7136. fb->height = ((val >> 0) & 0xfff) + 1;
  7137. val = I915_READ(DSPSTRIDE(pipe));
  7138. fb->pitches[0] = val & 0xffffffc0;
  7139. aligned_height = intel_fb_align_height(fb, 0, fb->height);
  7140. plane_config->size = fb->pitches[0] * aligned_height;
  7141. DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
  7142. pipe_name(pipe), fb->width, fb->height,
  7143. fb->format->cpp[0] * 8, base, fb->pitches[0],
  7144. plane_config->size);
  7145. plane_config->fb = intel_fb;
  7146. }
  7147. static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
  7148. struct intel_crtc_state *pipe_config)
  7149. {
  7150. struct drm_device *dev = crtc->base.dev;
  7151. struct drm_i915_private *dev_priv = to_i915(dev);
  7152. enum intel_display_power_domain power_domain;
  7153. uint32_t tmp;
  7154. bool ret;
  7155. power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
  7156. if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
  7157. return false;
  7158. pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
  7159. pipe_config->shared_dpll = NULL;
  7160. ret = false;
  7161. tmp = I915_READ(PIPECONF(crtc->pipe));
  7162. if (!(tmp & PIPECONF_ENABLE))
  7163. goto out;
  7164. switch (tmp & PIPECONF_BPC_MASK) {
  7165. case PIPECONF_6BPC:
  7166. pipe_config->pipe_bpp = 18;
  7167. break;
  7168. case PIPECONF_8BPC:
  7169. pipe_config->pipe_bpp = 24;
  7170. break;
  7171. case PIPECONF_10BPC:
  7172. pipe_config->pipe_bpp = 30;
  7173. break;
  7174. case PIPECONF_12BPC:
  7175. pipe_config->pipe_bpp = 36;
  7176. break;
  7177. default:
  7178. break;
  7179. }
  7180. if (tmp & PIPECONF_COLOR_RANGE_SELECT)
  7181. pipe_config->limited_color_range = true;
  7182. if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
  7183. struct intel_shared_dpll *pll;
  7184. enum intel_dpll_id pll_id;
  7185. pipe_config->has_pch_encoder = true;
  7186. tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
  7187. pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
  7188. FDI_DP_PORT_WIDTH_SHIFT) + 1;
  7189. ironlake_get_fdi_m_n_config(crtc, pipe_config);
  7190. if (HAS_PCH_IBX(dev_priv)) {
  7191. /*
  7192. * The pipe->pch transcoder and pch transcoder->pll
  7193. * mapping is fixed.
  7194. */
  7195. pll_id = (enum intel_dpll_id) crtc->pipe;
  7196. } else {
  7197. tmp = I915_READ(PCH_DPLL_SEL);
  7198. if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
  7199. pll_id = DPLL_ID_PCH_PLL_B;
  7200. else
  7201. pll_id= DPLL_ID_PCH_PLL_A;
  7202. }
  7203. pipe_config->shared_dpll =
  7204. intel_get_shared_dpll_by_id(dev_priv, pll_id);
  7205. pll = pipe_config->shared_dpll;
  7206. WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
  7207. &pipe_config->dpll_hw_state));
  7208. tmp = pipe_config->dpll_hw_state.dpll;
  7209. pipe_config->pixel_multiplier =
  7210. ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
  7211. >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
  7212. ironlake_pch_clock_get(crtc, pipe_config);
  7213. } else {
  7214. pipe_config->pixel_multiplier = 1;
  7215. }
  7216. intel_get_pipe_timings(crtc, pipe_config);
  7217. intel_get_pipe_src_size(crtc, pipe_config);
  7218. ironlake_get_pfit_config(crtc, pipe_config);
  7219. ret = true;
  7220. out:
  7221. intel_display_power_put(dev_priv, power_domain);
  7222. return ret;
  7223. }
  7224. static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
  7225. {
  7226. struct drm_device *dev = &dev_priv->drm;
  7227. struct intel_crtc *crtc;
  7228. for_each_intel_crtc(dev, crtc)
  7229. I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
  7230. pipe_name(crtc->pipe));
  7231. I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
  7232. I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
  7233. I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
  7234. I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
  7235. I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
  7236. I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
  7237. "CPU PWM1 enabled\n");
  7238. if (IS_HASWELL(dev_priv))
  7239. I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
  7240. "CPU PWM2 enabled\n");
  7241. I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
  7242. "PCH PWM1 enabled\n");
  7243. I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
  7244. "Utility pin enabled\n");
  7245. I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
  7246. /*
  7247. * In theory we can still leave IRQs enabled, as long as only the HPD
  7248. * interrupts remain enabled. We used to check for that, but since it's
  7249. * gen-specific and since we only disable LCPLL after we fully disable
  7250. * the interrupts, the check below should be enough.
  7251. */
  7252. I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
  7253. }
  7254. static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
  7255. {
  7256. if (IS_HASWELL(dev_priv))
  7257. return I915_READ(D_COMP_HSW);
  7258. else
  7259. return I915_READ(D_COMP_BDW);
  7260. }
  7261. static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
  7262. {
  7263. if (IS_HASWELL(dev_priv)) {
  7264. mutex_lock(&dev_priv->rps.hw_lock);
  7265. if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
  7266. val))
  7267. DRM_DEBUG_KMS("Failed to write to D_COMP\n");
  7268. mutex_unlock(&dev_priv->rps.hw_lock);
  7269. } else {
  7270. I915_WRITE(D_COMP_BDW, val);
  7271. POSTING_READ(D_COMP_BDW);
  7272. }
  7273. }
  7274. /*
  7275. * This function implements pieces of two sequences from BSpec:
  7276. * - Sequence for display software to disable LCPLL
  7277. * - Sequence for display software to allow package C8+
  7278. * The steps implemented here are just the steps that actually touch the LCPLL
  7279. * register. Callers should take care of disabling all the display engine
  7280. * functions, doing the mode unset, fixing interrupts, etc.
  7281. */
  7282. static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
  7283. bool switch_to_fclk, bool allow_power_down)
  7284. {
  7285. uint32_t val;
  7286. assert_can_disable_lcpll(dev_priv);
  7287. val = I915_READ(LCPLL_CTL);
  7288. if (switch_to_fclk) {
  7289. val |= LCPLL_CD_SOURCE_FCLK;
  7290. I915_WRITE(LCPLL_CTL, val);
  7291. if (wait_for_us(I915_READ(LCPLL_CTL) &
  7292. LCPLL_CD_SOURCE_FCLK_DONE, 1))
  7293. DRM_ERROR("Switching to FCLK failed\n");
  7294. val = I915_READ(LCPLL_CTL);
  7295. }
  7296. val |= LCPLL_PLL_DISABLE;
  7297. I915_WRITE(LCPLL_CTL, val);
  7298. POSTING_READ(LCPLL_CTL);
  7299. if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
  7300. DRM_ERROR("LCPLL still locked\n");
  7301. val = hsw_read_dcomp(dev_priv);
  7302. val |= D_COMP_COMP_DISABLE;
  7303. hsw_write_dcomp(dev_priv, val);
  7304. ndelay(100);
  7305. if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
  7306. 1))
  7307. DRM_ERROR("D_COMP RCOMP still in progress\n");
  7308. if (allow_power_down) {
  7309. val = I915_READ(LCPLL_CTL);
  7310. val |= LCPLL_POWER_DOWN_ALLOW;
  7311. I915_WRITE(LCPLL_CTL, val);
  7312. POSTING_READ(LCPLL_CTL);
  7313. }
  7314. }
  7315. /*
  7316. * Fully restores LCPLL, disallowing power down and switching back to LCPLL
  7317. * source.
  7318. */
  7319. static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
  7320. {
  7321. uint32_t val;
  7322. val = I915_READ(LCPLL_CTL);
  7323. if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
  7324. LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
  7325. return;
  7326. /*
  7327. * Make sure we're not on PC8 state before disabling PC8, otherwise
  7328. * we'll hang the machine. To prevent PC8 state, just enable force_wake.
  7329. */
  7330. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  7331. if (val & LCPLL_POWER_DOWN_ALLOW) {
  7332. val &= ~LCPLL_POWER_DOWN_ALLOW;
  7333. I915_WRITE(LCPLL_CTL, val);
  7334. POSTING_READ(LCPLL_CTL);
  7335. }
  7336. val = hsw_read_dcomp(dev_priv);
  7337. val |= D_COMP_COMP_FORCE;
  7338. val &= ~D_COMP_COMP_DISABLE;
  7339. hsw_write_dcomp(dev_priv, val);
  7340. val = I915_READ(LCPLL_CTL);
  7341. val &= ~LCPLL_PLL_DISABLE;
  7342. I915_WRITE(LCPLL_CTL, val);
  7343. if (intel_wait_for_register(dev_priv,
  7344. LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
  7345. 5))
  7346. DRM_ERROR("LCPLL not locked yet\n");
  7347. if (val & LCPLL_CD_SOURCE_FCLK) {
  7348. val = I915_READ(LCPLL_CTL);
  7349. val &= ~LCPLL_CD_SOURCE_FCLK;
  7350. I915_WRITE(LCPLL_CTL, val);
  7351. if (wait_for_us((I915_READ(LCPLL_CTL) &
  7352. LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
  7353. DRM_ERROR("Switching back to LCPLL failed\n");
  7354. }
  7355. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  7356. intel_update_cdclk(dev_priv);
  7357. }
  7358. /*
  7359. * Package states C8 and deeper are really deep PC states that can only be
  7360. * reached when all the devices on the system allow it, so even if the graphics
  7361. * device allows PC8+, it doesn't mean the system will actually get to these
  7362. * states. Our driver only allows PC8+ when going into runtime PM.
  7363. *
  7364. * The requirements for PC8+ are that all the outputs are disabled, the power
  7365. * well is disabled and most interrupts are disabled, and these are also
  7366. * requirements for runtime PM. When these conditions are met, we manually do
  7367. * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
  7368. * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
  7369. * hang the machine.
  7370. *
  7371. * When we really reach PC8 or deeper states (not just when we allow it) we lose
  7372. * the state of some registers, so when we come back from PC8+ we need to
  7373. * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
  7374. * need to take care of the registers kept by RC6. Notice that this happens even
  7375. * if we don't put the device in PCI D3 state (which is what currently happens
  7376. * because of the runtime PM support).
  7377. *
  7378. * For more, read "Display Sequences for Package C8" on the hardware
  7379. * documentation.
  7380. */
  7381. void hsw_enable_pc8(struct drm_i915_private *dev_priv)
  7382. {
  7383. uint32_t val;
  7384. DRM_DEBUG_KMS("Enabling package C8+\n");
  7385. if (HAS_PCH_LPT_LP(dev_priv)) {
  7386. val = I915_READ(SOUTH_DSPCLK_GATE_D);
  7387. val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
  7388. I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
  7389. }
  7390. lpt_disable_clkout_dp(dev_priv);
  7391. hsw_disable_lcpll(dev_priv, true, true);
  7392. }
  7393. void hsw_disable_pc8(struct drm_i915_private *dev_priv)
  7394. {
  7395. uint32_t val;
  7396. DRM_DEBUG_KMS("Disabling package C8+\n");
  7397. hsw_restore_lcpll(dev_priv);
  7398. lpt_init_pch_refclk(dev_priv);
  7399. if (HAS_PCH_LPT_LP(dev_priv)) {
  7400. val = I915_READ(SOUTH_DSPCLK_GATE_D);
  7401. val |= PCH_LP_PARTITION_LEVEL_DISABLE;
  7402. I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
  7403. }
  7404. }
  7405. static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
  7406. struct intel_crtc_state *crtc_state)
  7407. {
  7408. if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
  7409. if (!intel_ddi_pll_select(crtc, crtc_state))
  7410. return -EINVAL;
  7411. }
  7412. crtc->lowfreq_avail = false;
  7413. return 0;
  7414. }
  7415. static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
  7416. enum port port,
  7417. struct intel_crtc_state *pipe_config)
  7418. {
  7419. enum intel_dpll_id id;
  7420. switch (port) {
  7421. case PORT_A:
  7422. id = DPLL_ID_SKL_DPLL0;
  7423. break;
  7424. case PORT_B:
  7425. id = DPLL_ID_SKL_DPLL1;
  7426. break;
  7427. case PORT_C:
  7428. id = DPLL_ID_SKL_DPLL2;
  7429. break;
  7430. default:
  7431. DRM_ERROR("Incorrect port type\n");
  7432. return;
  7433. }
  7434. pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
  7435. }
  7436. static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
  7437. enum port port,
  7438. struct intel_crtc_state *pipe_config)
  7439. {
  7440. enum intel_dpll_id id;
  7441. u32 temp;
  7442. temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
  7443. id = temp >> (port * 3 + 1);
  7444. if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
  7445. return;
  7446. pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
  7447. }
  7448. static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
  7449. enum port port,
  7450. struct intel_crtc_state *pipe_config)
  7451. {
  7452. enum intel_dpll_id id;
  7453. uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
  7454. switch (ddi_pll_sel) {
  7455. case PORT_CLK_SEL_WRPLL1:
  7456. id = DPLL_ID_WRPLL1;
  7457. break;
  7458. case PORT_CLK_SEL_WRPLL2:
  7459. id = DPLL_ID_WRPLL2;
  7460. break;
  7461. case PORT_CLK_SEL_SPLL:
  7462. id = DPLL_ID_SPLL;
  7463. break;
  7464. case PORT_CLK_SEL_LCPLL_810:
  7465. id = DPLL_ID_LCPLL_810;
  7466. break;
  7467. case PORT_CLK_SEL_LCPLL_1350:
  7468. id = DPLL_ID_LCPLL_1350;
  7469. break;
  7470. case PORT_CLK_SEL_LCPLL_2700:
  7471. id = DPLL_ID_LCPLL_2700;
  7472. break;
  7473. default:
  7474. MISSING_CASE(ddi_pll_sel);
  7475. /* fall through */
  7476. case PORT_CLK_SEL_NONE:
  7477. return;
  7478. }
  7479. pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
  7480. }
  7481. static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
  7482. struct intel_crtc_state *pipe_config,
  7483. u64 *power_domain_mask)
  7484. {
  7485. struct drm_device *dev = crtc->base.dev;
  7486. struct drm_i915_private *dev_priv = to_i915(dev);
  7487. enum intel_display_power_domain power_domain;
  7488. u32 tmp;
  7489. /*
  7490. * The pipe->transcoder mapping is fixed with the exception of the eDP
  7491. * transcoder handled below.
  7492. */
  7493. pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
  7494. /*
  7495. * XXX: Do intel_display_power_get_if_enabled before reading this (for
  7496. * consistency and less surprising code; it's in always on power).
  7497. */
  7498. tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
  7499. if (tmp & TRANS_DDI_FUNC_ENABLE) {
  7500. enum pipe trans_edp_pipe;
  7501. switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
  7502. default:
  7503. WARN(1, "unknown pipe linked to edp transcoder\n");
  7504. case TRANS_DDI_EDP_INPUT_A_ONOFF:
  7505. case TRANS_DDI_EDP_INPUT_A_ON:
  7506. trans_edp_pipe = PIPE_A;
  7507. break;
  7508. case TRANS_DDI_EDP_INPUT_B_ONOFF:
  7509. trans_edp_pipe = PIPE_B;
  7510. break;
  7511. case TRANS_DDI_EDP_INPUT_C_ONOFF:
  7512. trans_edp_pipe = PIPE_C;
  7513. break;
  7514. }
  7515. if (trans_edp_pipe == crtc->pipe)
  7516. pipe_config->cpu_transcoder = TRANSCODER_EDP;
  7517. }
  7518. power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
  7519. if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
  7520. return false;
  7521. *power_domain_mask |= BIT_ULL(power_domain);
  7522. tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
  7523. return tmp & PIPECONF_ENABLE;
  7524. }
  7525. static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
  7526. struct intel_crtc_state *pipe_config,
  7527. u64 *power_domain_mask)
  7528. {
  7529. struct drm_device *dev = crtc->base.dev;
  7530. struct drm_i915_private *dev_priv = to_i915(dev);
  7531. enum intel_display_power_domain power_domain;
  7532. enum port port;
  7533. enum transcoder cpu_transcoder;
  7534. u32 tmp;
  7535. for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
  7536. if (port == PORT_A)
  7537. cpu_transcoder = TRANSCODER_DSI_A;
  7538. else
  7539. cpu_transcoder = TRANSCODER_DSI_C;
  7540. power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
  7541. if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
  7542. continue;
  7543. *power_domain_mask |= BIT_ULL(power_domain);
  7544. /*
  7545. * The PLL needs to be enabled with a valid divider
  7546. * configuration, otherwise accessing DSI registers will hang
  7547. * the machine. See BSpec North Display Engine
  7548. * registers/MIPI[BXT]. We can break out here early, since we
  7549. * need the same DSI PLL to be enabled for both DSI ports.
  7550. */
  7551. if (!intel_dsi_pll_is_enabled(dev_priv))
  7552. break;
  7553. /* XXX: this works for video mode only */
  7554. tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
  7555. if (!(tmp & DPI_ENABLE))
  7556. continue;
  7557. tmp = I915_READ(MIPI_CTRL(port));
  7558. if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
  7559. continue;
  7560. pipe_config->cpu_transcoder = cpu_transcoder;
  7561. break;
  7562. }
  7563. return transcoder_is_dsi(pipe_config->cpu_transcoder);
  7564. }
  7565. static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
  7566. struct intel_crtc_state *pipe_config)
  7567. {
  7568. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  7569. struct intel_shared_dpll *pll;
  7570. enum port port;
  7571. uint32_t tmp;
  7572. tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
  7573. port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
  7574. if (IS_GEN9_BC(dev_priv))
  7575. skylake_get_ddi_pll(dev_priv, port, pipe_config);
  7576. else if (IS_GEN9_LP(dev_priv))
  7577. bxt_get_ddi_pll(dev_priv, port, pipe_config);
  7578. else
  7579. haswell_get_ddi_pll(dev_priv, port, pipe_config);
  7580. pll = pipe_config->shared_dpll;
  7581. if (pll) {
  7582. WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
  7583. &pipe_config->dpll_hw_state));
  7584. }
  7585. /*
  7586. * Haswell has only FDI/PCH transcoder A. It is which is connected to
  7587. * DDI E. So just check whether this pipe is wired to DDI E and whether
  7588. * the PCH transcoder is on.
  7589. */
  7590. if (INTEL_GEN(dev_priv) < 9 &&
  7591. (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
  7592. pipe_config->has_pch_encoder = true;
  7593. tmp = I915_READ(FDI_RX_CTL(PIPE_A));
  7594. pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
  7595. FDI_DP_PORT_WIDTH_SHIFT) + 1;
  7596. ironlake_get_fdi_m_n_config(crtc, pipe_config);
  7597. }
  7598. }
  7599. static bool haswell_get_pipe_config(struct intel_crtc *crtc,
  7600. struct intel_crtc_state *pipe_config)
  7601. {
  7602. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  7603. enum intel_display_power_domain power_domain;
  7604. u64 power_domain_mask;
  7605. bool active;
  7606. power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
  7607. if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
  7608. return false;
  7609. power_domain_mask = BIT_ULL(power_domain);
  7610. pipe_config->shared_dpll = NULL;
  7611. active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
  7612. if (IS_GEN9_LP(dev_priv) &&
  7613. bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
  7614. WARN_ON(active);
  7615. active = true;
  7616. }
  7617. if (!active)
  7618. goto out;
  7619. if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
  7620. haswell_get_ddi_port_state(crtc, pipe_config);
  7621. intel_get_pipe_timings(crtc, pipe_config);
  7622. }
  7623. intel_get_pipe_src_size(crtc, pipe_config);
  7624. pipe_config->gamma_mode =
  7625. I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
  7626. if (INTEL_GEN(dev_priv) >= 9) {
  7627. intel_crtc_init_scalers(crtc, pipe_config);
  7628. pipe_config->scaler_state.scaler_id = -1;
  7629. pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
  7630. }
  7631. power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
  7632. if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
  7633. power_domain_mask |= BIT_ULL(power_domain);
  7634. if (INTEL_GEN(dev_priv) >= 9)
  7635. skylake_get_pfit_config(crtc, pipe_config);
  7636. else
  7637. ironlake_get_pfit_config(crtc, pipe_config);
  7638. }
  7639. if (IS_HASWELL(dev_priv))
  7640. pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
  7641. (I915_READ(IPS_CTL) & IPS_ENABLE);
  7642. if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
  7643. !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
  7644. pipe_config->pixel_multiplier =
  7645. I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
  7646. } else {
  7647. pipe_config->pixel_multiplier = 1;
  7648. }
  7649. out:
  7650. for_each_power_domain(power_domain, power_domain_mask)
  7651. intel_display_power_put(dev_priv, power_domain);
  7652. return active;
  7653. }
  7654. static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
  7655. const struct intel_plane_state *plane_state)
  7656. {
  7657. unsigned int width = plane_state->base.crtc_w;
  7658. unsigned int stride = roundup_pow_of_two(width) * 4;
  7659. switch (stride) {
  7660. default:
  7661. WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
  7662. width, stride);
  7663. stride = 256;
  7664. /* fallthrough */
  7665. case 256:
  7666. case 512:
  7667. case 1024:
  7668. case 2048:
  7669. break;
  7670. }
  7671. return CURSOR_ENABLE |
  7672. CURSOR_GAMMA_ENABLE |
  7673. CURSOR_FORMAT_ARGB |
  7674. CURSOR_STRIDE(stride);
  7675. }
  7676. static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
  7677. const struct intel_plane_state *plane_state)
  7678. {
  7679. struct drm_device *dev = crtc->dev;
  7680. struct drm_i915_private *dev_priv = to_i915(dev);
  7681. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  7682. uint32_t cntl = 0, size = 0;
  7683. if (plane_state && plane_state->base.visible) {
  7684. unsigned int width = plane_state->base.crtc_w;
  7685. unsigned int height = plane_state->base.crtc_h;
  7686. cntl = plane_state->ctl;
  7687. size = (height << 12) | width;
  7688. }
  7689. if (intel_crtc->cursor_cntl != 0 &&
  7690. (intel_crtc->cursor_base != base ||
  7691. intel_crtc->cursor_size != size ||
  7692. intel_crtc->cursor_cntl != cntl)) {
  7693. /* On these chipsets we can only modify the base/size/stride
  7694. * whilst the cursor is disabled.
  7695. */
  7696. I915_WRITE_FW(CURCNTR(PIPE_A), 0);
  7697. POSTING_READ_FW(CURCNTR(PIPE_A));
  7698. intel_crtc->cursor_cntl = 0;
  7699. }
  7700. if (intel_crtc->cursor_base != base) {
  7701. I915_WRITE_FW(CURBASE(PIPE_A), base);
  7702. intel_crtc->cursor_base = base;
  7703. }
  7704. if (intel_crtc->cursor_size != size) {
  7705. I915_WRITE_FW(CURSIZE, size);
  7706. intel_crtc->cursor_size = size;
  7707. }
  7708. if (intel_crtc->cursor_cntl != cntl) {
  7709. I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
  7710. POSTING_READ_FW(CURCNTR(PIPE_A));
  7711. intel_crtc->cursor_cntl = cntl;
  7712. }
  7713. }
  7714. static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
  7715. const struct intel_plane_state *plane_state)
  7716. {
  7717. struct drm_i915_private *dev_priv =
  7718. to_i915(plane_state->base.plane->dev);
  7719. struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
  7720. enum pipe pipe = crtc->pipe;
  7721. u32 cntl;
  7722. cntl = MCURSOR_GAMMA_ENABLE;
  7723. if (HAS_DDI(dev_priv))
  7724. cntl |= CURSOR_PIPE_CSC_ENABLE;
  7725. cntl |= pipe << 28; /* Connect to correct pipe */
  7726. switch (plane_state->base.crtc_w) {
  7727. case 64:
  7728. cntl |= CURSOR_MODE_64_ARGB_AX;
  7729. break;
  7730. case 128:
  7731. cntl |= CURSOR_MODE_128_ARGB_AX;
  7732. break;
  7733. case 256:
  7734. cntl |= CURSOR_MODE_256_ARGB_AX;
  7735. break;
  7736. default:
  7737. MISSING_CASE(plane_state->base.crtc_w);
  7738. return 0;
  7739. }
  7740. if (plane_state->base.rotation & DRM_ROTATE_180)
  7741. cntl |= CURSOR_ROTATE_180;
  7742. return cntl;
  7743. }
  7744. static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
  7745. const struct intel_plane_state *plane_state)
  7746. {
  7747. struct drm_device *dev = crtc->dev;
  7748. struct drm_i915_private *dev_priv = to_i915(dev);
  7749. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  7750. int pipe = intel_crtc->pipe;
  7751. uint32_t cntl = 0;
  7752. if (plane_state && plane_state->base.visible)
  7753. cntl = plane_state->ctl;
  7754. if (intel_crtc->cursor_cntl != cntl) {
  7755. I915_WRITE_FW(CURCNTR(pipe), cntl);
  7756. POSTING_READ_FW(CURCNTR(pipe));
  7757. intel_crtc->cursor_cntl = cntl;
  7758. }
  7759. /* and commit changes on next vblank */
  7760. I915_WRITE_FW(CURBASE(pipe), base);
  7761. POSTING_READ_FW(CURBASE(pipe));
  7762. intel_crtc->cursor_base = base;
  7763. }
  7764. /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
  7765. static void intel_crtc_update_cursor(struct drm_crtc *crtc,
  7766. const struct intel_plane_state *plane_state)
  7767. {
  7768. struct drm_device *dev = crtc->dev;
  7769. struct drm_i915_private *dev_priv = to_i915(dev);
  7770. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  7771. int pipe = intel_crtc->pipe;
  7772. u32 base = intel_crtc->cursor_addr;
  7773. unsigned long irqflags;
  7774. u32 pos = 0;
  7775. if (plane_state) {
  7776. int x = plane_state->base.crtc_x;
  7777. int y = plane_state->base.crtc_y;
  7778. if (x < 0) {
  7779. pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
  7780. x = -x;
  7781. }
  7782. pos |= x << CURSOR_X_SHIFT;
  7783. if (y < 0) {
  7784. pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
  7785. y = -y;
  7786. }
  7787. pos |= y << CURSOR_Y_SHIFT;
  7788. /* ILK+ do this automagically */
  7789. if (HAS_GMCH_DISPLAY(dev_priv) &&
  7790. plane_state->base.rotation & DRM_ROTATE_180) {
  7791. base += (plane_state->base.crtc_h *
  7792. plane_state->base.crtc_w - 1) * 4;
  7793. }
  7794. }
  7795. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  7796. I915_WRITE_FW(CURPOS(pipe), pos);
  7797. if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
  7798. i845_update_cursor(crtc, base, plane_state);
  7799. else
  7800. i9xx_update_cursor(crtc, base, plane_state);
  7801. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  7802. }
  7803. static bool cursor_size_ok(struct drm_i915_private *dev_priv,
  7804. uint32_t width, uint32_t height)
  7805. {
  7806. if (width == 0 || height == 0)
  7807. return false;
  7808. /*
  7809. * 845g/865g are special in that they are only limited by
  7810. * the width of their cursors, the height is arbitrary up to
  7811. * the precision of the register. Everything else requires
  7812. * square cursors, limited to a few power-of-two sizes.
  7813. */
  7814. if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
  7815. if ((width & 63) != 0)
  7816. return false;
  7817. if (width > (IS_I845G(dev_priv) ? 64 : 512))
  7818. return false;
  7819. if (height > 1023)
  7820. return false;
  7821. } else {
  7822. switch (width | height) {
  7823. case 256:
  7824. case 128:
  7825. if (IS_GEN2(dev_priv))
  7826. return false;
  7827. case 64:
  7828. break;
  7829. default:
  7830. return false;
  7831. }
  7832. }
  7833. return true;
  7834. }
  7835. /* VESA 640x480x72Hz mode to set on the pipe */
  7836. static struct drm_display_mode load_detect_mode = {
  7837. DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
  7838. 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
  7839. };
  7840. struct drm_framebuffer *
  7841. intel_framebuffer_create(struct drm_i915_gem_object *obj,
  7842. struct drm_mode_fb_cmd2 *mode_cmd)
  7843. {
  7844. struct intel_framebuffer *intel_fb;
  7845. int ret;
  7846. intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
  7847. if (!intel_fb)
  7848. return ERR_PTR(-ENOMEM);
  7849. ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
  7850. if (ret)
  7851. goto err;
  7852. return &intel_fb->base;
  7853. err:
  7854. kfree(intel_fb);
  7855. return ERR_PTR(ret);
  7856. }
  7857. static u32
  7858. intel_framebuffer_pitch_for_width(int width, int bpp)
  7859. {
  7860. u32 pitch = DIV_ROUND_UP(width * bpp, 8);
  7861. return ALIGN(pitch, 64);
  7862. }
  7863. static u32
  7864. intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
  7865. {
  7866. u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
  7867. return PAGE_ALIGN(pitch * mode->vdisplay);
  7868. }
  7869. static struct drm_framebuffer *
  7870. intel_framebuffer_create_for_mode(struct drm_device *dev,
  7871. struct drm_display_mode *mode,
  7872. int depth, int bpp)
  7873. {
  7874. struct drm_framebuffer *fb;
  7875. struct drm_i915_gem_object *obj;
  7876. struct drm_mode_fb_cmd2 mode_cmd = { 0 };
  7877. obj = i915_gem_object_create(to_i915(dev),
  7878. intel_framebuffer_size_for_mode(mode, bpp));
  7879. if (IS_ERR(obj))
  7880. return ERR_CAST(obj);
  7881. mode_cmd.width = mode->hdisplay;
  7882. mode_cmd.height = mode->vdisplay;
  7883. mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
  7884. bpp);
  7885. mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
  7886. fb = intel_framebuffer_create(obj, &mode_cmd);
  7887. if (IS_ERR(fb))
  7888. i915_gem_object_put(obj);
  7889. return fb;
  7890. }
  7891. static struct drm_framebuffer *
  7892. mode_fits_in_fbdev(struct drm_device *dev,
  7893. struct drm_display_mode *mode)
  7894. {
  7895. #ifdef CONFIG_DRM_FBDEV_EMULATION
  7896. struct drm_i915_private *dev_priv = to_i915(dev);
  7897. struct drm_i915_gem_object *obj;
  7898. struct drm_framebuffer *fb;
  7899. if (!dev_priv->fbdev)
  7900. return NULL;
  7901. if (!dev_priv->fbdev->fb)
  7902. return NULL;
  7903. obj = dev_priv->fbdev->fb->obj;
  7904. BUG_ON(!obj);
  7905. fb = &dev_priv->fbdev->fb->base;
  7906. if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
  7907. fb->format->cpp[0] * 8))
  7908. return NULL;
  7909. if (obj->base.size < mode->vdisplay * fb->pitches[0])
  7910. return NULL;
  7911. drm_framebuffer_reference(fb);
  7912. return fb;
  7913. #else
  7914. return NULL;
  7915. #endif
  7916. }
  7917. static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
  7918. struct drm_crtc *crtc,
  7919. struct drm_display_mode *mode,
  7920. struct drm_framebuffer *fb,
  7921. int x, int y)
  7922. {
  7923. struct drm_plane_state *plane_state;
  7924. int hdisplay, vdisplay;
  7925. int ret;
  7926. plane_state = drm_atomic_get_plane_state(state, crtc->primary);
  7927. if (IS_ERR(plane_state))
  7928. return PTR_ERR(plane_state);
  7929. if (mode)
  7930. drm_mode_get_hv_timing(mode, &hdisplay, &vdisplay);
  7931. else
  7932. hdisplay = vdisplay = 0;
  7933. ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
  7934. if (ret)
  7935. return ret;
  7936. drm_atomic_set_fb_for_plane(plane_state, fb);
  7937. plane_state->crtc_x = 0;
  7938. plane_state->crtc_y = 0;
  7939. plane_state->crtc_w = hdisplay;
  7940. plane_state->crtc_h = vdisplay;
  7941. plane_state->src_x = x << 16;
  7942. plane_state->src_y = y << 16;
  7943. plane_state->src_w = hdisplay << 16;
  7944. plane_state->src_h = vdisplay << 16;
  7945. return 0;
  7946. }
  7947. bool intel_get_load_detect_pipe(struct drm_connector *connector,
  7948. struct drm_display_mode *mode,
  7949. struct intel_load_detect_pipe *old,
  7950. struct drm_modeset_acquire_ctx *ctx)
  7951. {
  7952. struct intel_crtc *intel_crtc;
  7953. struct intel_encoder *intel_encoder =
  7954. intel_attached_encoder(connector);
  7955. struct drm_crtc *possible_crtc;
  7956. struct drm_encoder *encoder = &intel_encoder->base;
  7957. struct drm_crtc *crtc = NULL;
  7958. struct drm_device *dev = encoder->dev;
  7959. struct drm_i915_private *dev_priv = to_i915(dev);
  7960. struct drm_framebuffer *fb;
  7961. struct drm_mode_config *config = &dev->mode_config;
  7962. struct drm_atomic_state *state = NULL, *restore_state = NULL;
  7963. struct drm_connector_state *connector_state;
  7964. struct intel_crtc_state *crtc_state;
  7965. int ret, i = -1;
  7966. DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
  7967. connector->base.id, connector->name,
  7968. encoder->base.id, encoder->name);
  7969. old->restore_state = NULL;
  7970. retry:
  7971. ret = drm_modeset_lock(&config->connection_mutex, ctx);
  7972. if (ret)
  7973. goto fail;
  7974. /*
  7975. * Algorithm gets a little messy:
  7976. *
  7977. * - if the connector already has an assigned crtc, use it (but make
  7978. * sure it's on first)
  7979. *
  7980. * - try to find the first unused crtc that can drive this connector,
  7981. * and use that if we find one
  7982. */
  7983. /* See if we already have a CRTC for this connector */
  7984. if (connector->state->crtc) {
  7985. crtc = connector->state->crtc;
  7986. ret = drm_modeset_lock(&crtc->mutex, ctx);
  7987. if (ret)
  7988. goto fail;
  7989. /* Make sure the crtc and connector are running */
  7990. goto found;
  7991. }
  7992. /* Find an unused one (if possible) */
  7993. for_each_crtc(dev, possible_crtc) {
  7994. i++;
  7995. if (!(encoder->possible_crtcs & (1 << i)))
  7996. continue;
  7997. ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
  7998. if (ret)
  7999. goto fail;
  8000. if (possible_crtc->state->enable) {
  8001. drm_modeset_unlock(&possible_crtc->mutex);
  8002. continue;
  8003. }
  8004. crtc = possible_crtc;
  8005. break;
  8006. }
  8007. /*
  8008. * If we didn't find an unused CRTC, don't use any.
  8009. */
  8010. if (!crtc) {
  8011. DRM_DEBUG_KMS("no pipe available for load-detect\n");
  8012. goto fail;
  8013. }
  8014. found:
  8015. intel_crtc = to_intel_crtc(crtc);
  8016. ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
  8017. if (ret)
  8018. goto fail;
  8019. state = drm_atomic_state_alloc(dev);
  8020. restore_state = drm_atomic_state_alloc(dev);
  8021. if (!state || !restore_state) {
  8022. ret = -ENOMEM;
  8023. goto fail;
  8024. }
  8025. state->acquire_ctx = ctx;
  8026. restore_state->acquire_ctx = ctx;
  8027. connector_state = drm_atomic_get_connector_state(state, connector);
  8028. if (IS_ERR(connector_state)) {
  8029. ret = PTR_ERR(connector_state);
  8030. goto fail;
  8031. }
  8032. ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
  8033. if (ret)
  8034. goto fail;
  8035. crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
  8036. if (IS_ERR(crtc_state)) {
  8037. ret = PTR_ERR(crtc_state);
  8038. goto fail;
  8039. }
  8040. crtc_state->base.active = crtc_state->base.enable = true;
  8041. if (!mode)
  8042. mode = &load_detect_mode;
  8043. /* We need a framebuffer large enough to accommodate all accesses
  8044. * that the plane may generate whilst we perform load detection.
  8045. * We can not rely on the fbcon either being present (we get called
  8046. * during its initialisation to detect all boot displays, or it may
  8047. * not even exist) or that it is large enough to satisfy the
  8048. * requested mode.
  8049. */
  8050. fb = mode_fits_in_fbdev(dev, mode);
  8051. if (fb == NULL) {
  8052. DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
  8053. fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
  8054. } else
  8055. DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
  8056. if (IS_ERR(fb)) {
  8057. DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
  8058. goto fail;
  8059. }
  8060. ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
  8061. if (ret)
  8062. goto fail;
  8063. drm_framebuffer_unreference(fb);
  8064. ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
  8065. if (ret)
  8066. goto fail;
  8067. ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
  8068. if (!ret)
  8069. ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
  8070. if (!ret)
  8071. ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
  8072. if (ret) {
  8073. DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
  8074. goto fail;
  8075. }
  8076. ret = drm_atomic_commit(state);
  8077. if (ret) {
  8078. DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
  8079. goto fail;
  8080. }
  8081. old->restore_state = restore_state;
  8082. drm_atomic_state_put(state);
  8083. /* let the connector get through one full cycle before testing */
  8084. intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
  8085. return true;
  8086. fail:
  8087. if (state) {
  8088. drm_atomic_state_put(state);
  8089. state = NULL;
  8090. }
  8091. if (restore_state) {
  8092. drm_atomic_state_put(restore_state);
  8093. restore_state = NULL;
  8094. }
  8095. if (ret == -EDEADLK) {
  8096. drm_modeset_backoff(ctx);
  8097. goto retry;
  8098. }
  8099. return false;
  8100. }
  8101. void intel_release_load_detect_pipe(struct drm_connector *connector,
  8102. struct intel_load_detect_pipe *old,
  8103. struct drm_modeset_acquire_ctx *ctx)
  8104. {
  8105. struct intel_encoder *intel_encoder =
  8106. intel_attached_encoder(connector);
  8107. struct drm_encoder *encoder = &intel_encoder->base;
  8108. struct drm_atomic_state *state = old->restore_state;
  8109. int ret;
  8110. DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
  8111. connector->base.id, connector->name,
  8112. encoder->base.id, encoder->name);
  8113. if (!state)
  8114. return;
  8115. ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
  8116. if (ret)
  8117. DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
  8118. drm_atomic_state_put(state);
  8119. }
  8120. static int i9xx_pll_refclk(struct drm_device *dev,
  8121. const struct intel_crtc_state *pipe_config)
  8122. {
  8123. struct drm_i915_private *dev_priv = to_i915(dev);
  8124. u32 dpll = pipe_config->dpll_hw_state.dpll;
  8125. if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
  8126. return dev_priv->vbt.lvds_ssc_freq;
  8127. else if (HAS_PCH_SPLIT(dev_priv))
  8128. return 120000;
  8129. else if (!IS_GEN2(dev_priv))
  8130. return 96000;
  8131. else
  8132. return 48000;
  8133. }
  8134. /* Returns the clock of the currently programmed mode of the given pipe. */
  8135. static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
  8136. struct intel_crtc_state *pipe_config)
  8137. {
  8138. struct drm_device *dev = crtc->base.dev;
  8139. struct drm_i915_private *dev_priv = to_i915(dev);
  8140. int pipe = pipe_config->cpu_transcoder;
  8141. u32 dpll = pipe_config->dpll_hw_state.dpll;
  8142. u32 fp;
  8143. struct dpll clock;
  8144. int port_clock;
  8145. int refclk = i9xx_pll_refclk(dev, pipe_config);
  8146. if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
  8147. fp = pipe_config->dpll_hw_state.fp0;
  8148. else
  8149. fp = pipe_config->dpll_hw_state.fp1;
  8150. clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
  8151. if (IS_PINEVIEW(dev_priv)) {
  8152. clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
  8153. clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
  8154. } else {
  8155. clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
  8156. clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
  8157. }
  8158. if (!IS_GEN2(dev_priv)) {
  8159. if (IS_PINEVIEW(dev_priv))
  8160. clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
  8161. DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
  8162. else
  8163. clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
  8164. DPLL_FPA01_P1_POST_DIV_SHIFT);
  8165. switch (dpll & DPLL_MODE_MASK) {
  8166. case DPLLB_MODE_DAC_SERIAL:
  8167. clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
  8168. 5 : 10;
  8169. break;
  8170. case DPLLB_MODE_LVDS:
  8171. clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
  8172. 7 : 14;
  8173. break;
  8174. default:
  8175. DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
  8176. "mode\n", (int)(dpll & DPLL_MODE_MASK));
  8177. return;
  8178. }
  8179. if (IS_PINEVIEW(dev_priv))
  8180. port_clock = pnv_calc_dpll_params(refclk, &clock);
  8181. else
  8182. port_clock = i9xx_calc_dpll_params(refclk, &clock);
  8183. } else {
  8184. u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
  8185. bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
  8186. if (is_lvds) {
  8187. clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
  8188. DPLL_FPA01_P1_POST_DIV_SHIFT);
  8189. if (lvds & LVDS_CLKB_POWER_UP)
  8190. clock.p2 = 7;
  8191. else
  8192. clock.p2 = 14;
  8193. } else {
  8194. if (dpll & PLL_P1_DIVIDE_BY_TWO)
  8195. clock.p1 = 2;
  8196. else {
  8197. clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
  8198. DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
  8199. }
  8200. if (dpll & PLL_P2_DIVIDE_BY_4)
  8201. clock.p2 = 4;
  8202. else
  8203. clock.p2 = 2;
  8204. }
  8205. port_clock = i9xx_calc_dpll_params(refclk, &clock);
  8206. }
  8207. /*
  8208. * This value includes pixel_multiplier. We will use
  8209. * port_clock to compute adjusted_mode.crtc_clock in the
  8210. * encoder's get_config() function.
  8211. */
  8212. pipe_config->port_clock = port_clock;
  8213. }
  8214. int intel_dotclock_calculate(int link_freq,
  8215. const struct intel_link_m_n *m_n)
  8216. {
  8217. /*
  8218. * The calculation for the data clock is:
  8219. * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
  8220. * But we want to avoid losing precison if possible, so:
  8221. * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
  8222. *
  8223. * and the link clock is simpler:
  8224. * link_clock = (m * link_clock) / n
  8225. */
  8226. if (!m_n->link_n)
  8227. return 0;
  8228. return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
  8229. }
  8230. static void ironlake_pch_clock_get(struct intel_crtc *crtc,
  8231. struct intel_crtc_state *pipe_config)
  8232. {
  8233. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  8234. /* read out port_clock from the DPLL */
  8235. i9xx_crtc_clock_get(crtc, pipe_config);
  8236. /*
  8237. * In case there is an active pipe without active ports,
  8238. * we may need some idea for the dotclock anyway.
  8239. * Calculate one based on the FDI configuration.
  8240. */
  8241. pipe_config->base.adjusted_mode.crtc_clock =
  8242. intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
  8243. &pipe_config->fdi_m_n);
  8244. }
  8245. /** Returns the currently programmed mode of the given pipe. */
  8246. struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
  8247. struct drm_crtc *crtc)
  8248. {
  8249. struct drm_i915_private *dev_priv = to_i915(dev);
  8250. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  8251. enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
  8252. struct drm_display_mode *mode;
  8253. struct intel_crtc_state *pipe_config;
  8254. int htot = I915_READ(HTOTAL(cpu_transcoder));
  8255. int hsync = I915_READ(HSYNC(cpu_transcoder));
  8256. int vtot = I915_READ(VTOTAL(cpu_transcoder));
  8257. int vsync = I915_READ(VSYNC(cpu_transcoder));
  8258. enum pipe pipe = intel_crtc->pipe;
  8259. mode = kzalloc(sizeof(*mode), GFP_KERNEL);
  8260. if (!mode)
  8261. return NULL;
  8262. pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
  8263. if (!pipe_config) {
  8264. kfree(mode);
  8265. return NULL;
  8266. }
  8267. /*
  8268. * Construct a pipe_config sufficient for getting the clock info
  8269. * back out of crtc_clock_get.
  8270. *
  8271. * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
  8272. * to use a real value here instead.
  8273. */
  8274. pipe_config->cpu_transcoder = (enum transcoder) pipe;
  8275. pipe_config->pixel_multiplier = 1;
  8276. pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
  8277. pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
  8278. pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
  8279. i9xx_crtc_clock_get(intel_crtc, pipe_config);
  8280. mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
  8281. mode->hdisplay = (htot & 0xffff) + 1;
  8282. mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
  8283. mode->hsync_start = (hsync & 0xffff) + 1;
  8284. mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
  8285. mode->vdisplay = (vtot & 0xffff) + 1;
  8286. mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
  8287. mode->vsync_start = (vsync & 0xffff) + 1;
  8288. mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
  8289. drm_mode_set_name(mode);
  8290. kfree(pipe_config);
  8291. return mode;
  8292. }
  8293. static void intel_crtc_destroy(struct drm_crtc *crtc)
  8294. {
  8295. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  8296. struct drm_device *dev = crtc->dev;
  8297. struct intel_flip_work *work;
  8298. spin_lock_irq(&dev->event_lock);
  8299. work = intel_crtc->flip_work;
  8300. intel_crtc->flip_work = NULL;
  8301. spin_unlock_irq(&dev->event_lock);
  8302. if (work) {
  8303. cancel_work_sync(&work->mmio_work);
  8304. cancel_work_sync(&work->unpin_work);
  8305. kfree(work);
  8306. }
  8307. drm_crtc_cleanup(crtc);
  8308. kfree(intel_crtc);
  8309. }
  8310. static void intel_unpin_work_fn(struct work_struct *__work)
  8311. {
  8312. struct intel_flip_work *work =
  8313. container_of(__work, struct intel_flip_work, unpin_work);
  8314. struct intel_crtc *crtc = to_intel_crtc(work->crtc);
  8315. struct drm_device *dev = crtc->base.dev;
  8316. struct drm_plane *primary = crtc->base.primary;
  8317. if (is_mmio_work(work))
  8318. flush_work(&work->mmio_work);
  8319. mutex_lock(&dev->struct_mutex);
  8320. intel_unpin_fb_vma(work->old_vma);
  8321. i915_gem_object_put(work->pending_flip_obj);
  8322. mutex_unlock(&dev->struct_mutex);
  8323. i915_gem_request_put(work->flip_queued_req);
  8324. intel_frontbuffer_flip_complete(to_i915(dev),
  8325. to_intel_plane(primary)->frontbuffer_bit);
  8326. intel_fbc_post_update(crtc);
  8327. drm_framebuffer_unreference(work->old_fb);
  8328. BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
  8329. atomic_dec(&crtc->unpin_work_count);
  8330. kfree(work);
  8331. }
  8332. /* Is 'a' after or equal to 'b'? */
  8333. static bool g4x_flip_count_after_eq(u32 a, u32 b)
  8334. {
  8335. return !((a - b) & 0x80000000);
  8336. }
  8337. static bool __pageflip_finished_cs(struct intel_crtc *crtc,
  8338. struct intel_flip_work *work)
  8339. {
  8340. struct drm_device *dev = crtc->base.dev;
  8341. struct drm_i915_private *dev_priv = to_i915(dev);
  8342. if (abort_flip_on_reset(crtc))
  8343. return true;
  8344. /*
  8345. * The relevant registers doen't exist on pre-ctg.
  8346. * As the flip done interrupt doesn't trigger for mmio
  8347. * flips on gmch platforms, a flip count check isn't
  8348. * really needed there. But since ctg has the registers,
  8349. * include it in the check anyway.
  8350. */
  8351. if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
  8352. return true;
  8353. /*
  8354. * BDW signals flip done immediately if the plane
  8355. * is disabled, even if the plane enable is already
  8356. * armed to occur at the next vblank :(
  8357. */
  8358. /*
  8359. * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
  8360. * used the same base address. In that case the mmio flip might
  8361. * have completed, but the CS hasn't even executed the flip yet.
  8362. *
  8363. * A flip count check isn't enough as the CS might have updated
  8364. * the base address just after start of vblank, but before we
  8365. * managed to process the interrupt. This means we'd complete the
  8366. * CS flip too soon.
  8367. *
  8368. * Combining both checks should get us a good enough result. It may
  8369. * still happen that the CS flip has been executed, but has not
  8370. * yet actually completed. But in case the base address is the same
  8371. * anyway, we don't really care.
  8372. */
  8373. return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
  8374. crtc->flip_work->gtt_offset &&
  8375. g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
  8376. crtc->flip_work->flip_count);
  8377. }
  8378. static bool
  8379. __pageflip_finished_mmio(struct intel_crtc *crtc,
  8380. struct intel_flip_work *work)
  8381. {
  8382. /*
  8383. * MMIO work completes when vblank is different from
  8384. * flip_queued_vblank.
  8385. *
  8386. * Reset counter value doesn't matter, this is handled by
  8387. * i915_wait_request finishing early, so no need to handle
  8388. * reset here.
  8389. */
  8390. return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank;
  8391. }
  8392. static bool pageflip_finished(struct intel_crtc *crtc,
  8393. struct intel_flip_work *work)
  8394. {
  8395. if (!atomic_read(&work->pending))
  8396. return false;
  8397. smp_rmb();
  8398. if (is_mmio_work(work))
  8399. return __pageflip_finished_mmio(crtc, work);
  8400. else
  8401. return __pageflip_finished_cs(crtc, work);
  8402. }
  8403. void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
  8404. {
  8405. struct drm_device *dev = &dev_priv->drm;
  8406. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  8407. struct intel_flip_work *work;
  8408. unsigned long flags;
  8409. /* Ignore early vblank irqs */
  8410. if (!crtc)
  8411. return;
  8412. /*
  8413. * This is called both by irq handlers and the reset code (to complete
  8414. * lost pageflips) so needs the full irqsave spinlocks.
  8415. */
  8416. spin_lock_irqsave(&dev->event_lock, flags);
  8417. work = crtc->flip_work;
  8418. if (work != NULL &&
  8419. !is_mmio_work(work) &&
  8420. pageflip_finished(crtc, work))
  8421. page_flip_completed(crtc);
  8422. spin_unlock_irqrestore(&dev->event_lock, flags);
  8423. }
  8424. void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
  8425. {
  8426. struct drm_device *dev = &dev_priv->drm;
  8427. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  8428. struct intel_flip_work *work;
  8429. unsigned long flags;
  8430. /* Ignore early vblank irqs */
  8431. if (!crtc)
  8432. return;
  8433. /*
  8434. * This is called both by irq handlers and the reset code (to complete
  8435. * lost pageflips) so needs the full irqsave spinlocks.
  8436. */
  8437. spin_lock_irqsave(&dev->event_lock, flags);
  8438. work = crtc->flip_work;
  8439. if (work != NULL &&
  8440. is_mmio_work(work) &&
  8441. pageflip_finished(crtc, work))
  8442. page_flip_completed(crtc);
  8443. spin_unlock_irqrestore(&dev->event_lock, flags);
  8444. }
  8445. static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
  8446. struct intel_flip_work *work)
  8447. {
  8448. work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc);
  8449. /* Ensure that the work item is consistent when activating it ... */
  8450. smp_mb__before_atomic();
  8451. atomic_set(&work->pending, 1);
  8452. }
  8453. static int intel_gen2_queue_flip(struct drm_device *dev,
  8454. struct drm_crtc *crtc,
  8455. struct drm_framebuffer *fb,
  8456. struct drm_i915_gem_object *obj,
  8457. struct drm_i915_gem_request *req,
  8458. uint32_t flags)
  8459. {
  8460. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  8461. u32 flip_mask, *cs;
  8462. cs = intel_ring_begin(req, 6);
  8463. if (IS_ERR(cs))
  8464. return PTR_ERR(cs);
  8465. /* Can't queue multiple flips, so wait for the previous
  8466. * one to finish before executing the next.
  8467. */
  8468. if (intel_crtc->plane)
  8469. flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
  8470. else
  8471. flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
  8472. *cs++ = MI_WAIT_FOR_EVENT | flip_mask;
  8473. *cs++ = MI_NOOP;
  8474. *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
  8475. *cs++ = fb->pitches[0];
  8476. *cs++ = intel_crtc->flip_work->gtt_offset;
  8477. *cs++ = 0; /* aux display base address, unused */
  8478. return 0;
  8479. }
  8480. static int intel_gen3_queue_flip(struct drm_device *dev,
  8481. struct drm_crtc *crtc,
  8482. struct drm_framebuffer *fb,
  8483. struct drm_i915_gem_object *obj,
  8484. struct drm_i915_gem_request *req,
  8485. uint32_t flags)
  8486. {
  8487. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  8488. u32 flip_mask, *cs;
  8489. cs = intel_ring_begin(req, 6);
  8490. if (IS_ERR(cs))
  8491. return PTR_ERR(cs);
  8492. if (intel_crtc->plane)
  8493. flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
  8494. else
  8495. flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
  8496. *cs++ = MI_WAIT_FOR_EVENT | flip_mask;
  8497. *cs++ = MI_NOOP;
  8498. *cs++ = MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
  8499. *cs++ = fb->pitches[0];
  8500. *cs++ = intel_crtc->flip_work->gtt_offset;
  8501. *cs++ = MI_NOOP;
  8502. return 0;
  8503. }
  8504. static int intel_gen4_queue_flip(struct drm_device *dev,
  8505. struct drm_crtc *crtc,
  8506. struct drm_framebuffer *fb,
  8507. struct drm_i915_gem_object *obj,
  8508. struct drm_i915_gem_request *req,
  8509. uint32_t flags)
  8510. {
  8511. struct drm_i915_private *dev_priv = to_i915(dev);
  8512. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  8513. u32 pf, pipesrc, *cs;
  8514. cs = intel_ring_begin(req, 4);
  8515. if (IS_ERR(cs))
  8516. return PTR_ERR(cs);
  8517. /* i965+ uses the linear or tiled offsets from the
  8518. * Display Registers (which do not change across a page-flip)
  8519. * so we need only reprogram the base address.
  8520. */
  8521. *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
  8522. *cs++ = fb->pitches[0];
  8523. *cs++ = intel_crtc->flip_work->gtt_offset |
  8524. intel_fb_modifier_to_tiling(fb->modifier);
  8525. /* XXX Enabling the panel-fitter across page-flip is so far
  8526. * untested on non-native modes, so ignore it for now.
  8527. * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
  8528. */
  8529. pf = 0;
  8530. pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
  8531. *cs++ = pf | pipesrc;
  8532. return 0;
  8533. }
  8534. static int intel_gen6_queue_flip(struct drm_device *dev,
  8535. struct drm_crtc *crtc,
  8536. struct drm_framebuffer *fb,
  8537. struct drm_i915_gem_object *obj,
  8538. struct drm_i915_gem_request *req,
  8539. uint32_t flags)
  8540. {
  8541. struct drm_i915_private *dev_priv = to_i915(dev);
  8542. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  8543. u32 pf, pipesrc, *cs;
  8544. cs = intel_ring_begin(req, 4);
  8545. if (IS_ERR(cs))
  8546. return PTR_ERR(cs);
  8547. *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
  8548. *cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
  8549. *cs++ = intel_crtc->flip_work->gtt_offset;
  8550. /* Contrary to the suggestions in the documentation,
  8551. * "Enable Panel Fitter" does not seem to be required when page
  8552. * flipping with a non-native mode, and worse causes a normal
  8553. * modeset to fail.
  8554. * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
  8555. */
  8556. pf = 0;
  8557. pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
  8558. *cs++ = pf | pipesrc;
  8559. return 0;
  8560. }
  8561. static int intel_gen7_queue_flip(struct drm_device *dev,
  8562. struct drm_crtc *crtc,
  8563. struct drm_framebuffer *fb,
  8564. struct drm_i915_gem_object *obj,
  8565. struct drm_i915_gem_request *req,
  8566. uint32_t flags)
  8567. {
  8568. struct drm_i915_private *dev_priv = to_i915(dev);
  8569. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  8570. u32 *cs, plane_bit = 0;
  8571. int len, ret;
  8572. switch (intel_crtc->plane) {
  8573. case PLANE_A:
  8574. plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
  8575. break;
  8576. case PLANE_B:
  8577. plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
  8578. break;
  8579. case PLANE_C:
  8580. plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
  8581. break;
  8582. default:
  8583. WARN_ONCE(1, "unknown plane in flip command\n");
  8584. return -ENODEV;
  8585. }
  8586. len = 4;
  8587. if (req->engine->id == RCS) {
  8588. len += 6;
  8589. /*
  8590. * On Gen 8, SRM is now taking an extra dword to accommodate
  8591. * 48bits addresses, and we need a NOOP for the batch size to
  8592. * stay even.
  8593. */
  8594. if (IS_GEN8(dev_priv))
  8595. len += 2;
  8596. }
  8597. /*
  8598. * BSpec MI_DISPLAY_FLIP for IVB:
  8599. * "The full packet must be contained within the same cache line."
  8600. *
  8601. * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
  8602. * cacheline, if we ever start emitting more commands before
  8603. * the MI_DISPLAY_FLIP we may need to first emit everything else,
  8604. * then do the cacheline alignment, and finally emit the
  8605. * MI_DISPLAY_FLIP.
  8606. */
  8607. ret = intel_ring_cacheline_align(req);
  8608. if (ret)
  8609. return ret;
  8610. cs = intel_ring_begin(req, len);
  8611. if (IS_ERR(cs))
  8612. return PTR_ERR(cs);
  8613. /* Unmask the flip-done completion message. Note that the bspec says that
  8614. * we should do this for both the BCS and RCS, and that we must not unmask
  8615. * more than one flip event at any time (or ensure that one flip message
  8616. * can be sent by waiting for flip-done prior to queueing new flips).
  8617. * Experimentation says that BCS works despite DERRMR masking all
  8618. * flip-done completion events and that unmasking all planes at once
  8619. * for the RCS also doesn't appear to drop events. Setting the DERRMR
  8620. * to zero does lead to lockups within MI_DISPLAY_FLIP.
  8621. */
  8622. if (req->engine->id == RCS) {
  8623. *cs++ = MI_LOAD_REGISTER_IMM(1);
  8624. *cs++ = i915_mmio_reg_offset(DERRMR);
  8625. *cs++ = ~(DERRMR_PIPEA_PRI_FLIP_DONE |
  8626. DERRMR_PIPEB_PRI_FLIP_DONE |
  8627. DERRMR_PIPEC_PRI_FLIP_DONE);
  8628. if (IS_GEN8(dev_priv))
  8629. *cs++ = MI_STORE_REGISTER_MEM_GEN8 |
  8630. MI_SRM_LRM_GLOBAL_GTT;
  8631. else
  8632. *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
  8633. *cs++ = i915_mmio_reg_offset(DERRMR);
  8634. *cs++ = i915_ggtt_offset(req->engine->scratch) + 256;
  8635. if (IS_GEN8(dev_priv)) {
  8636. *cs++ = 0;
  8637. *cs++ = MI_NOOP;
  8638. }
  8639. }
  8640. *cs++ = MI_DISPLAY_FLIP_I915 | plane_bit;
  8641. *cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
  8642. *cs++ = intel_crtc->flip_work->gtt_offset;
  8643. *cs++ = MI_NOOP;
  8644. return 0;
  8645. }
  8646. static bool use_mmio_flip(struct intel_engine_cs *engine,
  8647. struct drm_i915_gem_object *obj)
  8648. {
  8649. /*
  8650. * This is not being used for older platforms, because
  8651. * non-availability of flip done interrupt forces us to use
  8652. * CS flips. Older platforms derive flip done using some clever
  8653. * tricks involving the flip_pending status bits and vblank irqs.
  8654. * So using MMIO flips there would disrupt this mechanism.
  8655. */
  8656. if (engine == NULL)
  8657. return true;
  8658. if (INTEL_GEN(engine->i915) < 5)
  8659. return false;
  8660. if (i915.use_mmio_flip < 0)
  8661. return false;
  8662. else if (i915.use_mmio_flip > 0)
  8663. return true;
  8664. else if (i915.enable_execlists)
  8665. return true;
  8666. return engine != i915_gem_object_last_write_engine(obj);
  8667. }
  8668. static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
  8669. unsigned int rotation,
  8670. struct intel_flip_work *work)
  8671. {
  8672. struct drm_device *dev = intel_crtc->base.dev;
  8673. struct drm_i915_private *dev_priv = to_i915(dev);
  8674. struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
  8675. const enum pipe pipe = intel_crtc->pipe;
  8676. u32 ctl, stride = skl_plane_stride(fb, 0, rotation);
  8677. ctl = I915_READ(PLANE_CTL(pipe, 0));
  8678. ctl &= ~PLANE_CTL_TILED_MASK;
  8679. switch (fb->modifier) {
  8680. case DRM_FORMAT_MOD_NONE:
  8681. break;
  8682. case I915_FORMAT_MOD_X_TILED:
  8683. ctl |= PLANE_CTL_TILED_X;
  8684. break;
  8685. case I915_FORMAT_MOD_Y_TILED:
  8686. ctl |= PLANE_CTL_TILED_Y;
  8687. break;
  8688. case I915_FORMAT_MOD_Yf_TILED:
  8689. ctl |= PLANE_CTL_TILED_YF;
  8690. break;
  8691. default:
  8692. MISSING_CASE(fb->modifier);
  8693. }
  8694. /*
  8695. * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
  8696. * PLANE_SURF updates, the update is then guaranteed to be atomic.
  8697. */
  8698. I915_WRITE(PLANE_CTL(pipe, 0), ctl);
  8699. I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
  8700. I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
  8701. POSTING_READ(PLANE_SURF(pipe, 0));
  8702. }
  8703. static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
  8704. struct intel_flip_work *work)
  8705. {
  8706. struct drm_device *dev = intel_crtc->base.dev;
  8707. struct drm_i915_private *dev_priv = to_i915(dev);
  8708. struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
  8709. i915_reg_t reg = DSPCNTR(intel_crtc->plane);
  8710. u32 dspcntr;
  8711. dspcntr = I915_READ(reg);
  8712. if (fb->modifier == I915_FORMAT_MOD_X_TILED)
  8713. dspcntr |= DISPPLANE_TILED;
  8714. else
  8715. dspcntr &= ~DISPPLANE_TILED;
  8716. I915_WRITE(reg, dspcntr);
  8717. I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
  8718. POSTING_READ(DSPSURF(intel_crtc->plane));
  8719. }
  8720. static void intel_mmio_flip_work_func(struct work_struct *w)
  8721. {
  8722. struct intel_flip_work *work =
  8723. container_of(w, struct intel_flip_work, mmio_work);
  8724. struct intel_crtc *crtc = to_intel_crtc(work->crtc);
  8725. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  8726. struct intel_framebuffer *intel_fb =
  8727. to_intel_framebuffer(crtc->base.primary->fb);
  8728. struct drm_i915_gem_object *obj = intel_fb->obj;
  8729. WARN_ON(i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT, NULL) < 0);
  8730. intel_pipe_update_start(crtc);
  8731. if (INTEL_GEN(dev_priv) >= 9)
  8732. skl_do_mmio_flip(crtc, work->rotation, work);
  8733. else
  8734. /* use_mmio_flip() retricts MMIO flips to ilk+ */
  8735. ilk_do_mmio_flip(crtc, work);
  8736. intel_pipe_update_end(crtc, work);
  8737. }
  8738. static int intel_default_queue_flip(struct drm_device *dev,
  8739. struct drm_crtc *crtc,
  8740. struct drm_framebuffer *fb,
  8741. struct drm_i915_gem_object *obj,
  8742. struct drm_i915_gem_request *req,
  8743. uint32_t flags)
  8744. {
  8745. return -ENODEV;
  8746. }
  8747. static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
  8748. struct intel_crtc *intel_crtc,
  8749. struct intel_flip_work *work)
  8750. {
  8751. u32 addr, vblank;
  8752. if (!atomic_read(&work->pending))
  8753. return false;
  8754. smp_rmb();
  8755. vblank = intel_crtc_get_vblank_counter(intel_crtc);
  8756. if (work->flip_ready_vblank == 0) {
  8757. if (work->flip_queued_req &&
  8758. !i915_gem_request_completed(work->flip_queued_req))
  8759. return false;
  8760. work->flip_ready_vblank = vblank;
  8761. }
  8762. if (vblank - work->flip_ready_vblank < 3)
  8763. return false;
  8764. /* Potential stall - if we see that the flip has happened,
  8765. * assume a missed interrupt. */
  8766. if (INTEL_GEN(dev_priv) >= 4)
  8767. addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
  8768. else
  8769. addr = I915_READ(DSPADDR(intel_crtc->plane));
  8770. /* There is a potential issue here with a false positive after a flip
  8771. * to the same address. We could address this by checking for a
  8772. * non-incrementing frame counter.
  8773. */
  8774. return addr == work->gtt_offset;
  8775. }
  8776. void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
  8777. {
  8778. struct drm_device *dev = &dev_priv->drm;
  8779. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  8780. struct intel_flip_work *work;
  8781. WARN_ON(!in_interrupt());
  8782. if (crtc == NULL)
  8783. return;
  8784. spin_lock(&dev->event_lock);
  8785. work = crtc->flip_work;
  8786. if (work != NULL && !is_mmio_work(work) &&
  8787. __pageflip_stall_check_cs(dev_priv, crtc, work)) {
  8788. WARN_ONCE(1,
  8789. "Kicking stuck page flip: queued at %d, now %d\n",
  8790. work->flip_queued_vblank, intel_crtc_get_vblank_counter(crtc));
  8791. page_flip_completed(crtc);
  8792. work = NULL;
  8793. }
  8794. if (work != NULL && !is_mmio_work(work) &&
  8795. intel_crtc_get_vblank_counter(crtc) - work->flip_queued_vblank > 1)
  8796. intel_queue_rps_boost_for_request(work->flip_queued_req);
  8797. spin_unlock(&dev->event_lock);
  8798. }
  8799. __maybe_unused
  8800. static int intel_crtc_page_flip(struct drm_crtc *crtc,
  8801. struct drm_framebuffer *fb,
  8802. struct drm_pending_vblank_event *event,
  8803. uint32_t page_flip_flags)
  8804. {
  8805. struct drm_device *dev = crtc->dev;
  8806. struct drm_i915_private *dev_priv = to_i915(dev);
  8807. struct drm_framebuffer *old_fb = crtc->primary->fb;
  8808. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  8809. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  8810. struct drm_plane *primary = crtc->primary;
  8811. enum pipe pipe = intel_crtc->pipe;
  8812. struct intel_flip_work *work;
  8813. struct intel_engine_cs *engine;
  8814. bool mmio_flip;
  8815. struct drm_i915_gem_request *request;
  8816. struct i915_vma *vma;
  8817. int ret;
  8818. /*
  8819. * drm_mode_page_flip_ioctl() should already catch this, but double
  8820. * check to be safe. In the future we may enable pageflipping from
  8821. * a disabled primary plane.
  8822. */
  8823. if (WARN_ON(intel_fb_obj(old_fb) == NULL))
  8824. return -EBUSY;
  8825. /* Can't change pixel format via MI display flips. */
  8826. if (fb->format != crtc->primary->fb->format)
  8827. return -EINVAL;
  8828. /*
  8829. * TILEOFF/LINOFF registers can't be changed via MI display flips.
  8830. * Note that pitch changes could also affect these register.
  8831. */
  8832. if (INTEL_GEN(dev_priv) > 3 &&
  8833. (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
  8834. fb->pitches[0] != crtc->primary->fb->pitches[0]))
  8835. return -EINVAL;
  8836. if (i915_terminally_wedged(&dev_priv->gpu_error))
  8837. goto out_hang;
  8838. work = kzalloc(sizeof(*work), GFP_KERNEL);
  8839. if (work == NULL)
  8840. return -ENOMEM;
  8841. work->event = event;
  8842. work->crtc = crtc;
  8843. work->old_fb = old_fb;
  8844. INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
  8845. ret = drm_crtc_vblank_get(crtc);
  8846. if (ret)
  8847. goto free_work;
  8848. /* We borrow the event spin lock for protecting flip_work */
  8849. spin_lock_irq(&dev->event_lock);
  8850. if (intel_crtc->flip_work) {
  8851. /* Before declaring the flip queue wedged, check if
  8852. * the hardware completed the operation behind our backs.
  8853. */
  8854. if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
  8855. DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
  8856. page_flip_completed(intel_crtc);
  8857. } else {
  8858. DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
  8859. spin_unlock_irq(&dev->event_lock);
  8860. drm_crtc_vblank_put(crtc);
  8861. kfree(work);
  8862. return -EBUSY;
  8863. }
  8864. }
  8865. intel_crtc->flip_work = work;
  8866. spin_unlock_irq(&dev->event_lock);
  8867. if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
  8868. flush_workqueue(dev_priv->wq);
  8869. /* Reference the objects for the scheduled work. */
  8870. drm_framebuffer_reference(work->old_fb);
  8871. crtc->primary->fb = fb;
  8872. update_state_fb(crtc->primary);
  8873. work->pending_flip_obj = i915_gem_object_get(obj);
  8874. ret = i915_mutex_lock_interruptible(dev);
  8875. if (ret)
  8876. goto cleanup;
  8877. intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error);
  8878. if (i915_reset_backoff_or_wedged(&dev_priv->gpu_error)) {
  8879. ret = -EIO;
  8880. goto unlock;
  8881. }
  8882. atomic_inc(&intel_crtc->unpin_work_count);
  8883. if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
  8884. work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
  8885. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  8886. engine = dev_priv->engine[BCS];
  8887. if (fb->modifier != old_fb->modifier)
  8888. /* vlv: DISPLAY_FLIP fails to change tiling */
  8889. engine = NULL;
  8890. } else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
  8891. engine = dev_priv->engine[BCS];
  8892. } else if (INTEL_GEN(dev_priv) >= 7) {
  8893. engine = i915_gem_object_last_write_engine(obj);
  8894. if (engine == NULL || engine->id != RCS)
  8895. engine = dev_priv->engine[BCS];
  8896. } else {
  8897. engine = dev_priv->engine[RCS];
  8898. }
  8899. mmio_flip = use_mmio_flip(engine, obj);
  8900. vma = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
  8901. if (IS_ERR(vma)) {
  8902. ret = PTR_ERR(vma);
  8903. goto cleanup_pending;
  8904. }
  8905. work->old_vma = to_intel_plane_state(primary->state)->vma;
  8906. to_intel_plane_state(primary->state)->vma = vma;
  8907. work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset;
  8908. work->rotation = crtc->primary->state->rotation;
  8909. /*
  8910. * There's the potential that the next frame will not be compatible with
  8911. * FBC, so we want to call pre_update() before the actual page flip.
  8912. * The problem is that pre_update() caches some information about the fb
  8913. * object, so we want to do this only after the object is pinned. Let's
  8914. * be on the safe side and do this immediately before scheduling the
  8915. * flip.
  8916. */
  8917. intel_fbc_pre_update(intel_crtc, intel_crtc->config,
  8918. to_intel_plane_state(primary->state));
  8919. if (mmio_flip) {
  8920. INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
  8921. queue_work(system_unbound_wq, &work->mmio_work);
  8922. } else {
  8923. request = i915_gem_request_alloc(engine,
  8924. dev_priv->kernel_context);
  8925. if (IS_ERR(request)) {
  8926. ret = PTR_ERR(request);
  8927. goto cleanup_unpin;
  8928. }
  8929. ret = i915_gem_request_await_object(request, obj, false);
  8930. if (ret)
  8931. goto cleanup_request;
  8932. ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
  8933. page_flip_flags);
  8934. if (ret)
  8935. goto cleanup_request;
  8936. intel_mark_page_flip_active(intel_crtc, work);
  8937. work->flip_queued_req = i915_gem_request_get(request);
  8938. i915_add_request(request);
  8939. }
  8940. i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
  8941. i915_gem_track_fb(intel_fb_obj(old_fb), obj,
  8942. to_intel_plane(primary)->frontbuffer_bit);
  8943. mutex_unlock(&dev->struct_mutex);
  8944. intel_frontbuffer_flip_prepare(to_i915(dev),
  8945. to_intel_plane(primary)->frontbuffer_bit);
  8946. trace_i915_flip_request(intel_crtc->plane, obj);
  8947. return 0;
  8948. cleanup_request:
  8949. i915_add_request(request);
  8950. cleanup_unpin:
  8951. to_intel_plane_state(primary->state)->vma = work->old_vma;
  8952. intel_unpin_fb_vma(vma);
  8953. cleanup_pending:
  8954. atomic_dec(&intel_crtc->unpin_work_count);
  8955. unlock:
  8956. mutex_unlock(&dev->struct_mutex);
  8957. cleanup:
  8958. crtc->primary->fb = old_fb;
  8959. update_state_fb(crtc->primary);
  8960. i915_gem_object_put(obj);
  8961. drm_framebuffer_unreference(work->old_fb);
  8962. spin_lock_irq(&dev->event_lock);
  8963. intel_crtc->flip_work = NULL;
  8964. spin_unlock_irq(&dev->event_lock);
  8965. drm_crtc_vblank_put(crtc);
  8966. free_work:
  8967. kfree(work);
  8968. if (ret == -EIO) {
  8969. struct drm_atomic_state *state;
  8970. struct drm_plane_state *plane_state;
  8971. out_hang:
  8972. state = drm_atomic_state_alloc(dev);
  8973. if (!state)
  8974. return -ENOMEM;
  8975. state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
  8976. retry:
  8977. plane_state = drm_atomic_get_plane_state(state, primary);
  8978. ret = PTR_ERR_OR_ZERO(plane_state);
  8979. if (!ret) {
  8980. drm_atomic_set_fb_for_plane(plane_state, fb);
  8981. ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
  8982. if (!ret)
  8983. ret = drm_atomic_commit(state);
  8984. }
  8985. if (ret == -EDEADLK) {
  8986. drm_modeset_backoff(state->acquire_ctx);
  8987. drm_atomic_state_clear(state);
  8988. goto retry;
  8989. }
  8990. drm_atomic_state_put(state);
  8991. if (ret == 0 && event) {
  8992. spin_lock_irq(&dev->event_lock);
  8993. drm_crtc_send_vblank_event(crtc, event);
  8994. spin_unlock_irq(&dev->event_lock);
  8995. }
  8996. }
  8997. return ret;
  8998. }
  8999. /**
  9000. * intel_wm_need_update - Check whether watermarks need updating
  9001. * @plane: drm plane
  9002. * @state: new plane state
  9003. *
  9004. * Check current plane state versus the new one to determine whether
  9005. * watermarks need to be recalculated.
  9006. *
  9007. * Returns true or false.
  9008. */
  9009. static bool intel_wm_need_update(struct drm_plane *plane,
  9010. struct drm_plane_state *state)
  9011. {
  9012. struct intel_plane_state *new = to_intel_plane_state(state);
  9013. struct intel_plane_state *cur = to_intel_plane_state(plane->state);
  9014. /* Update watermarks on tiling or size changes. */
  9015. if (new->base.visible != cur->base.visible)
  9016. return true;
  9017. if (!cur->base.fb || !new->base.fb)
  9018. return false;
  9019. if (cur->base.fb->modifier != new->base.fb->modifier ||
  9020. cur->base.rotation != new->base.rotation ||
  9021. drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
  9022. drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
  9023. drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
  9024. drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
  9025. return true;
  9026. return false;
  9027. }
  9028. static bool needs_scaling(struct intel_plane_state *state)
  9029. {
  9030. int src_w = drm_rect_width(&state->base.src) >> 16;
  9031. int src_h = drm_rect_height(&state->base.src) >> 16;
  9032. int dst_w = drm_rect_width(&state->base.dst);
  9033. int dst_h = drm_rect_height(&state->base.dst);
  9034. return (src_w != dst_w || src_h != dst_h);
  9035. }
  9036. int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
  9037. struct drm_plane_state *plane_state)
  9038. {
  9039. struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
  9040. struct drm_crtc *crtc = crtc_state->crtc;
  9041. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9042. struct intel_plane *plane = to_intel_plane(plane_state->plane);
  9043. struct drm_device *dev = crtc->dev;
  9044. struct drm_i915_private *dev_priv = to_i915(dev);
  9045. struct intel_plane_state *old_plane_state =
  9046. to_intel_plane_state(plane->base.state);
  9047. bool mode_changed = needs_modeset(crtc_state);
  9048. bool was_crtc_enabled = crtc->state->active;
  9049. bool is_crtc_enabled = crtc_state->active;
  9050. bool turn_off, turn_on, visible, was_visible;
  9051. struct drm_framebuffer *fb = plane_state->fb;
  9052. int ret;
  9053. if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
  9054. ret = skl_update_scaler_plane(
  9055. to_intel_crtc_state(crtc_state),
  9056. to_intel_plane_state(plane_state));
  9057. if (ret)
  9058. return ret;
  9059. }
  9060. was_visible = old_plane_state->base.visible;
  9061. visible = plane_state->visible;
  9062. if (!was_crtc_enabled && WARN_ON(was_visible))
  9063. was_visible = false;
  9064. /*
  9065. * Visibility is calculated as if the crtc was on, but
  9066. * after scaler setup everything depends on it being off
  9067. * when the crtc isn't active.
  9068. *
  9069. * FIXME this is wrong for watermarks. Watermarks should also
  9070. * be computed as if the pipe would be active. Perhaps move
  9071. * per-plane wm computation to the .check_plane() hook, and
  9072. * only combine the results from all planes in the current place?
  9073. */
  9074. if (!is_crtc_enabled) {
  9075. plane_state->visible = visible = false;
  9076. to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
  9077. }
  9078. if (!was_visible && !visible)
  9079. return 0;
  9080. if (fb != old_plane_state->base.fb)
  9081. pipe_config->fb_changed = true;
  9082. turn_off = was_visible && (!visible || mode_changed);
  9083. turn_on = visible && (!was_visible || mode_changed);
  9084. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
  9085. intel_crtc->base.base.id, intel_crtc->base.name,
  9086. plane->base.base.id, plane->base.name,
  9087. fb ? fb->base.id : -1);
  9088. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
  9089. plane->base.base.id, plane->base.name,
  9090. was_visible, visible,
  9091. turn_off, turn_on, mode_changed);
  9092. if (turn_on) {
  9093. if (INTEL_GEN(dev_priv) < 5)
  9094. pipe_config->update_wm_pre = true;
  9095. /* must disable cxsr around plane enable/disable */
  9096. if (plane->id != PLANE_CURSOR)
  9097. pipe_config->disable_cxsr = true;
  9098. } else if (turn_off) {
  9099. if (INTEL_GEN(dev_priv) < 5)
  9100. pipe_config->update_wm_post = true;
  9101. /* must disable cxsr around plane enable/disable */
  9102. if (plane->id != PLANE_CURSOR)
  9103. pipe_config->disable_cxsr = true;
  9104. } else if (intel_wm_need_update(&plane->base, plane_state)) {
  9105. if (INTEL_GEN(dev_priv) < 5) {
  9106. /* FIXME bollocks */
  9107. pipe_config->update_wm_pre = true;
  9108. pipe_config->update_wm_post = true;
  9109. }
  9110. }
  9111. if (visible || was_visible)
  9112. pipe_config->fb_bits |= plane->frontbuffer_bit;
  9113. /*
  9114. * WaCxSRDisabledForSpriteScaling:ivb
  9115. *
  9116. * cstate->update_wm was already set above, so this flag will
  9117. * take effect when we commit and program watermarks.
  9118. */
  9119. if (plane->id == PLANE_SPRITE0 && IS_IVYBRIDGE(dev_priv) &&
  9120. needs_scaling(to_intel_plane_state(plane_state)) &&
  9121. !needs_scaling(old_plane_state))
  9122. pipe_config->disable_lp_wm = true;
  9123. return 0;
  9124. }
  9125. static bool encoders_cloneable(const struct intel_encoder *a,
  9126. const struct intel_encoder *b)
  9127. {
  9128. /* masks could be asymmetric, so check both ways */
  9129. return a == b || (a->cloneable & (1 << b->type) &&
  9130. b->cloneable & (1 << a->type));
  9131. }
  9132. static bool check_single_encoder_cloning(struct drm_atomic_state *state,
  9133. struct intel_crtc *crtc,
  9134. struct intel_encoder *encoder)
  9135. {
  9136. struct intel_encoder *source_encoder;
  9137. struct drm_connector *connector;
  9138. struct drm_connector_state *connector_state;
  9139. int i;
  9140. for_each_new_connector_in_state(state, connector, connector_state, i) {
  9141. if (connector_state->crtc != &crtc->base)
  9142. continue;
  9143. source_encoder =
  9144. to_intel_encoder(connector_state->best_encoder);
  9145. if (!encoders_cloneable(encoder, source_encoder))
  9146. return false;
  9147. }
  9148. return true;
  9149. }
  9150. static int intel_crtc_atomic_check(struct drm_crtc *crtc,
  9151. struct drm_crtc_state *crtc_state)
  9152. {
  9153. struct drm_device *dev = crtc->dev;
  9154. struct drm_i915_private *dev_priv = to_i915(dev);
  9155. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9156. struct intel_crtc_state *pipe_config =
  9157. to_intel_crtc_state(crtc_state);
  9158. struct drm_atomic_state *state = crtc_state->state;
  9159. int ret;
  9160. bool mode_changed = needs_modeset(crtc_state);
  9161. if (mode_changed && !crtc_state->active)
  9162. pipe_config->update_wm_post = true;
  9163. if (mode_changed && crtc_state->enable &&
  9164. dev_priv->display.crtc_compute_clock &&
  9165. !WARN_ON(pipe_config->shared_dpll)) {
  9166. ret = dev_priv->display.crtc_compute_clock(intel_crtc,
  9167. pipe_config);
  9168. if (ret)
  9169. return ret;
  9170. }
  9171. if (crtc_state->color_mgmt_changed) {
  9172. ret = intel_color_check(crtc, crtc_state);
  9173. if (ret)
  9174. return ret;
  9175. /*
  9176. * Changing color management on Intel hardware is
  9177. * handled as part of planes update.
  9178. */
  9179. crtc_state->planes_changed = true;
  9180. }
  9181. ret = 0;
  9182. if (dev_priv->display.compute_pipe_wm) {
  9183. ret = dev_priv->display.compute_pipe_wm(pipe_config);
  9184. if (ret) {
  9185. DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
  9186. return ret;
  9187. }
  9188. }
  9189. if (dev_priv->display.compute_intermediate_wm &&
  9190. !to_intel_atomic_state(state)->skip_intermediate_wm) {
  9191. if (WARN_ON(!dev_priv->display.compute_pipe_wm))
  9192. return 0;
  9193. /*
  9194. * Calculate 'intermediate' watermarks that satisfy both the
  9195. * old state and the new state. We can program these
  9196. * immediately.
  9197. */
  9198. ret = dev_priv->display.compute_intermediate_wm(dev,
  9199. intel_crtc,
  9200. pipe_config);
  9201. if (ret) {
  9202. DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
  9203. return ret;
  9204. }
  9205. } else if (dev_priv->display.compute_intermediate_wm) {
  9206. if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
  9207. pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
  9208. }
  9209. if (INTEL_GEN(dev_priv) >= 9) {
  9210. if (mode_changed)
  9211. ret = skl_update_scaler_crtc(pipe_config);
  9212. if (!ret)
  9213. ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
  9214. pipe_config);
  9215. }
  9216. return ret;
  9217. }
  9218. static const struct drm_crtc_helper_funcs intel_helper_funcs = {
  9219. .mode_set_base_atomic = intel_pipe_set_base_atomic,
  9220. .atomic_begin = intel_begin_crtc_commit,
  9221. .atomic_flush = intel_finish_crtc_commit,
  9222. .atomic_check = intel_crtc_atomic_check,
  9223. };
  9224. static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
  9225. {
  9226. struct intel_connector *connector;
  9227. struct drm_connector_list_iter conn_iter;
  9228. drm_connector_list_iter_begin(dev, &conn_iter);
  9229. for_each_intel_connector_iter(connector, &conn_iter) {
  9230. if (connector->base.state->crtc)
  9231. drm_connector_unreference(&connector->base);
  9232. if (connector->base.encoder) {
  9233. connector->base.state->best_encoder =
  9234. connector->base.encoder;
  9235. connector->base.state->crtc =
  9236. connector->base.encoder->crtc;
  9237. drm_connector_reference(&connector->base);
  9238. } else {
  9239. connector->base.state->best_encoder = NULL;
  9240. connector->base.state->crtc = NULL;
  9241. }
  9242. }
  9243. drm_connector_list_iter_end(&conn_iter);
  9244. }
  9245. static void
  9246. connected_sink_compute_bpp(struct intel_connector *connector,
  9247. struct intel_crtc_state *pipe_config)
  9248. {
  9249. const struct drm_display_info *info = &connector->base.display_info;
  9250. int bpp = pipe_config->pipe_bpp;
  9251. DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
  9252. connector->base.base.id,
  9253. connector->base.name);
  9254. /* Don't use an invalid EDID bpc value */
  9255. if (info->bpc != 0 && info->bpc * 3 < bpp) {
  9256. DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
  9257. bpp, info->bpc * 3);
  9258. pipe_config->pipe_bpp = info->bpc * 3;
  9259. }
  9260. /* Clamp bpp to 8 on screens without EDID 1.4 */
  9261. if (info->bpc == 0 && bpp > 24) {
  9262. DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
  9263. bpp);
  9264. pipe_config->pipe_bpp = 24;
  9265. }
  9266. }
  9267. static int
  9268. compute_baseline_pipe_bpp(struct intel_crtc *crtc,
  9269. struct intel_crtc_state *pipe_config)
  9270. {
  9271. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  9272. struct drm_atomic_state *state;
  9273. struct drm_connector *connector;
  9274. struct drm_connector_state *connector_state;
  9275. int bpp, i;
  9276. if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
  9277. IS_CHERRYVIEW(dev_priv)))
  9278. bpp = 10*3;
  9279. else if (INTEL_GEN(dev_priv) >= 5)
  9280. bpp = 12*3;
  9281. else
  9282. bpp = 8*3;
  9283. pipe_config->pipe_bpp = bpp;
  9284. state = pipe_config->base.state;
  9285. /* Clamp display bpp to EDID value */
  9286. for_each_new_connector_in_state(state, connector, connector_state, i) {
  9287. if (connector_state->crtc != &crtc->base)
  9288. continue;
  9289. connected_sink_compute_bpp(to_intel_connector(connector),
  9290. pipe_config);
  9291. }
  9292. return bpp;
  9293. }
  9294. static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
  9295. {
  9296. DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
  9297. "type: 0x%x flags: 0x%x\n",
  9298. mode->crtc_clock,
  9299. mode->crtc_hdisplay, mode->crtc_hsync_start,
  9300. mode->crtc_hsync_end, mode->crtc_htotal,
  9301. mode->crtc_vdisplay, mode->crtc_vsync_start,
  9302. mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
  9303. }
  9304. static inline void
  9305. intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
  9306. unsigned int lane_count, struct intel_link_m_n *m_n)
  9307. {
  9308. DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
  9309. id, lane_count,
  9310. m_n->gmch_m, m_n->gmch_n,
  9311. m_n->link_m, m_n->link_n, m_n->tu);
  9312. }
  9313. static void intel_dump_pipe_config(struct intel_crtc *crtc,
  9314. struct intel_crtc_state *pipe_config,
  9315. const char *context)
  9316. {
  9317. struct drm_device *dev = crtc->base.dev;
  9318. struct drm_i915_private *dev_priv = to_i915(dev);
  9319. struct drm_plane *plane;
  9320. struct intel_plane *intel_plane;
  9321. struct intel_plane_state *state;
  9322. struct drm_framebuffer *fb;
  9323. DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
  9324. crtc->base.base.id, crtc->base.name, context);
  9325. DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
  9326. transcoder_name(pipe_config->cpu_transcoder),
  9327. pipe_config->pipe_bpp, pipe_config->dither);
  9328. if (pipe_config->has_pch_encoder)
  9329. intel_dump_m_n_config(pipe_config, "fdi",
  9330. pipe_config->fdi_lanes,
  9331. &pipe_config->fdi_m_n);
  9332. if (intel_crtc_has_dp_encoder(pipe_config)) {
  9333. intel_dump_m_n_config(pipe_config, "dp m_n",
  9334. pipe_config->lane_count, &pipe_config->dp_m_n);
  9335. if (pipe_config->has_drrs)
  9336. intel_dump_m_n_config(pipe_config, "dp m2_n2",
  9337. pipe_config->lane_count,
  9338. &pipe_config->dp_m2_n2);
  9339. }
  9340. DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
  9341. pipe_config->has_audio, pipe_config->has_infoframe);
  9342. DRM_DEBUG_KMS("requested mode:\n");
  9343. drm_mode_debug_printmodeline(&pipe_config->base.mode);
  9344. DRM_DEBUG_KMS("adjusted mode:\n");
  9345. drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
  9346. intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
  9347. DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
  9348. pipe_config->port_clock,
  9349. pipe_config->pipe_src_w, pipe_config->pipe_src_h,
  9350. pipe_config->pixel_rate);
  9351. if (INTEL_GEN(dev_priv) >= 9)
  9352. DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
  9353. crtc->num_scalers,
  9354. pipe_config->scaler_state.scaler_users,
  9355. pipe_config->scaler_state.scaler_id);
  9356. if (HAS_GMCH_DISPLAY(dev_priv))
  9357. DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
  9358. pipe_config->gmch_pfit.control,
  9359. pipe_config->gmch_pfit.pgm_ratios,
  9360. pipe_config->gmch_pfit.lvds_border_bits);
  9361. else
  9362. DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
  9363. pipe_config->pch_pfit.pos,
  9364. pipe_config->pch_pfit.size,
  9365. enableddisabled(pipe_config->pch_pfit.enabled));
  9366. DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
  9367. pipe_config->ips_enabled, pipe_config->double_wide);
  9368. intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
  9369. DRM_DEBUG_KMS("planes on this crtc\n");
  9370. list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
  9371. struct drm_format_name_buf format_name;
  9372. intel_plane = to_intel_plane(plane);
  9373. if (intel_plane->pipe != crtc->pipe)
  9374. continue;
  9375. state = to_intel_plane_state(plane->state);
  9376. fb = state->base.fb;
  9377. if (!fb) {
  9378. DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
  9379. plane->base.id, plane->name, state->scaler_id);
  9380. continue;
  9381. }
  9382. DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
  9383. plane->base.id, plane->name,
  9384. fb->base.id, fb->width, fb->height,
  9385. drm_get_format_name(fb->format->format, &format_name));
  9386. if (INTEL_GEN(dev_priv) >= 9)
  9387. DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
  9388. state->scaler_id,
  9389. state->base.src.x1 >> 16,
  9390. state->base.src.y1 >> 16,
  9391. drm_rect_width(&state->base.src) >> 16,
  9392. drm_rect_height(&state->base.src) >> 16,
  9393. state->base.dst.x1, state->base.dst.y1,
  9394. drm_rect_width(&state->base.dst),
  9395. drm_rect_height(&state->base.dst));
  9396. }
  9397. }
  9398. static bool check_digital_port_conflicts(struct drm_atomic_state *state)
  9399. {
  9400. struct drm_device *dev = state->dev;
  9401. struct drm_connector *connector;
  9402. unsigned int used_ports = 0;
  9403. unsigned int used_mst_ports = 0;
  9404. /*
  9405. * Walk the connector list instead of the encoder
  9406. * list to detect the problem on ddi platforms
  9407. * where there's just one encoder per digital port.
  9408. */
  9409. drm_for_each_connector(connector, dev) {
  9410. struct drm_connector_state *connector_state;
  9411. struct intel_encoder *encoder;
  9412. connector_state = drm_atomic_get_existing_connector_state(state, connector);
  9413. if (!connector_state)
  9414. connector_state = connector->state;
  9415. if (!connector_state->best_encoder)
  9416. continue;
  9417. encoder = to_intel_encoder(connector_state->best_encoder);
  9418. WARN_ON(!connector_state->crtc);
  9419. switch (encoder->type) {
  9420. unsigned int port_mask;
  9421. case INTEL_OUTPUT_UNKNOWN:
  9422. if (WARN_ON(!HAS_DDI(to_i915(dev))))
  9423. break;
  9424. case INTEL_OUTPUT_DP:
  9425. case INTEL_OUTPUT_HDMI:
  9426. case INTEL_OUTPUT_EDP:
  9427. port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
  9428. /* the same port mustn't appear more than once */
  9429. if (used_ports & port_mask)
  9430. return false;
  9431. used_ports |= port_mask;
  9432. break;
  9433. case INTEL_OUTPUT_DP_MST:
  9434. used_mst_ports |=
  9435. 1 << enc_to_mst(&encoder->base)->primary->port;
  9436. break;
  9437. default:
  9438. break;
  9439. }
  9440. }
  9441. /* can't mix MST and SST/HDMI on the same port */
  9442. if (used_ports & used_mst_ports)
  9443. return false;
  9444. return true;
  9445. }
  9446. static void
  9447. clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
  9448. {
  9449. struct drm_i915_private *dev_priv =
  9450. to_i915(crtc_state->base.crtc->dev);
  9451. struct intel_crtc_scaler_state scaler_state;
  9452. struct intel_dpll_hw_state dpll_hw_state;
  9453. struct intel_shared_dpll *shared_dpll;
  9454. struct intel_crtc_wm_state wm_state;
  9455. bool force_thru;
  9456. /* FIXME: before the switch to atomic started, a new pipe_config was
  9457. * kzalloc'd. Code that depends on any field being zero should be
  9458. * fixed, so that the crtc_state can be safely duplicated. For now,
  9459. * only fields that are know to not cause problems are preserved. */
  9460. scaler_state = crtc_state->scaler_state;
  9461. shared_dpll = crtc_state->shared_dpll;
  9462. dpll_hw_state = crtc_state->dpll_hw_state;
  9463. force_thru = crtc_state->pch_pfit.force_thru;
  9464. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  9465. wm_state = crtc_state->wm;
  9466. /* Keep base drm_crtc_state intact, only clear our extended struct */
  9467. BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
  9468. memset(&crtc_state->base + 1, 0,
  9469. sizeof(*crtc_state) - sizeof(crtc_state->base));
  9470. crtc_state->scaler_state = scaler_state;
  9471. crtc_state->shared_dpll = shared_dpll;
  9472. crtc_state->dpll_hw_state = dpll_hw_state;
  9473. crtc_state->pch_pfit.force_thru = force_thru;
  9474. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  9475. crtc_state->wm = wm_state;
  9476. }
  9477. static int
  9478. intel_modeset_pipe_config(struct drm_crtc *crtc,
  9479. struct intel_crtc_state *pipe_config)
  9480. {
  9481. struct drm_atomic_state *state = pipe_config->base.state;
  9482. struct intel_encoder *encoder;
  9483. struct drm_connector *connector;
  9484. struct drm_connector_state *connector_state;
  9485. int base_bpp, ret = -EINVAL;
  9486. int i;
  9487. bool retry = true;
  9488. clear_intel_crtc_state(pipe_config);
  9489. pipe_config->cpu_transcoder =
  9490. (enum transcoder) to_intel_crtc(crtc)->pipe;
  9491. /*
  9492. * Sanitize sync polarity flags based on requested ones. If neither
  9493. * positive or negative polarity is requested, treat this as meaning
  9494. * negative polarity.
  9495. */
  9496. if (!(pipe_config->base.adjusted_mode.flags &
  9497. (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
  9498. pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
  9499. if (!(pipe_config->base.adjusted_mode.flags &
  9500. (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
  9501. pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
  9502. base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
  9503. pipe_config);
  9504. if (base_bpp < 0)
  9505. goto fail;
  9506. /*
  9507. * Determine the real pipe dimensions. Note that stereo modes can
  9508. * increase the actual pipe size due to the frame doubling and
  9509. * insertion of additional space for blanks between the frame. This
  9510. * is stored in the crtc timings. We use the requested mode to do this
  9511. * computation to clearly distinguish it from the adjusted mode, which
  9512. * can be changed by the connectors in the below retry loop.
  9513. */
  9514. drm_mode_get_hv_timing(&pipe_config->base.mode,
  9515. &pipe_config->pipe_src_w,
  9516. &pipe_config->pipe_src_h);
  9517. for_each_new_connector_in_state(state, connector, connector_state, i) {
  9518. if (connector_state->crtc != crtc)
  9519. continue;
  9520. encoder = to_intel_encoder(connector_state->best_encoder);
  9521. if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
  9522. DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
  9523. goto fail;
  9524. }
  9525. /*
  9526. * Determine output_types before calling the .compute_config()
  9527. * hooks so that the hooks can use this information safely.
  9528. */
  9529. pipe_config->output_types |= 1 << encoder->type;
  9530. }
  9531. encoder_retry:
  9532. /* Ensure the port clock defaults are reset when retrying. */
  9533. pipe_config->port_clock = 0;
  9534. pipe_config->pixel_multiplier = 1;
  9535. /* Fill in default crtc timings, allow encoders to overwrite them. */
  9536. drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
  9537. CRTC_STEREO_DOUBLE);
  9538. /* Pass our mode to the connectors and the CRTC to give them a chance to
  9539. * adjust it according to limitations or connector properties, and also
  9540. * a chance to reject the mode entirely.
  9541. */
  9542. for_each_new_connector_in_state(state, connector, connector_state, i) {
  9543. if (connector_state->crtc != crtc)
  9544. continue;
  9545. encoder = to_intel_encoder(connector_state->best_encoder);
  9546. if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
  9547. DRM_DEBUG_KMS("Encoder config failure\n");
  9548. goto fail;
  9549. }
  9550. }
  9551. /* Set default port clock if not overwritten by the encoder. Needs to be
  9552. * done afterwards in case the encoder adjusts the mode. */
  9553. if (!pipe_config->port_clock)
  9554. pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
  9555. * pipe_config->pixel_multiplier;
  9556. ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
  9557. if (ret < 0) {
  9558. DRM_DEBUG_KMS("CRTC fixup failed\n");
  9559. goto fail;
  9560. }
  9561. if (ret == RETRY) {
  9562. if (WARN(!retry, "loop in pipe configuration computation\n")) {
  9563. ret = -EINVAL;
  9564. goto fail;
  9565. }
  9566. DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
  9567. retry = false;
  9568. goto encoder_retry;
  9569. }
  9570. /* Dithering seems to not pass-through bits correctly when it should, so
  9571. * only enable it on 6bpc panels and when its not a compliance
  9572. * test requesting 6bpc video pattern.
  9573. */
  9574. pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
  9575. !pipe_config->dither_force_disable;
  9576. DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
  9577. base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
  9578. fail:
  9579. return ret;
  9580. }
  9581. static void
  9582. intel_modeset_update_crtc_state(struct drm_atomic_state *state)
  9583. {
  9584. struct drm_crtc *crtc;
  9585. struct drm_crtc_state *new_crtc_state;
  9586. int i;
  9587. /* Double check state. */
  9588. for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
  9589. to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
  9590. /* Update hwmode for vblank functions */
  9591. if (new_crtc_state->active)
  9592. crtc->hwmode = new_crtc_state->adjusted_mode;
  9593. else
  9594. crtc->hwmode.crtc_clock = 0;
  9595. /*
  9596. * Update legacy state to satisfy fbc code. This can
  9597. * be removed when fbc uses the atomic state.
  9598. */
  9599. if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
  9600. struct drm_plane_state *plane_state = crtc->primary->state;
  9601. crtc->primary->fb = plane_state->fb;
  9602. crtc->x = plane_state->src_x >> 16;
  9603. crtc->y = plane_state->src_y >> 16;
  9604. }
  9605. }
  9606. }
  9607. static bool intel_fuzzy_clock_check(int clock1, int clock2)
  9608. {
  9609. int diff;
  9610. if (clock1 == clock2)
  9611. return true;
  9612. if (!clock1 || !clock2)
  9613. return false;
  9614. diff = abs(clock1 - clock2);
  9615. if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
  9616. return true;
  9617. return false;
  9618. }
  9619. static bool
  9620. intel_compare_m_n(unsigned int m, unsigned int n,
  9621. unsigned int m2, unsigned int n2,
  9622. bool exact)
  9623. {
  9624. if (m == m2 && n == n2)
  9625. return true;
  9626. if (exact || !m || !n || !m2 || !n2)
  9627. return false;
  9628. BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
  9629. if (n > n2) {
  9630. while (n > n2) {
  9631. m2 <<= 1;
  9632. n2 <<= 1;
  9633. }
  9634. } else if (n < n2) {
  9635. while (n < n2) {
  9636. m <<= 1;
  9637. n <<= 1;
  9638. }
  9639. }
  9640. if (n != n2)
  9641. return false;
  9642. return intel_fuzzy_clock_check(m, m2);
  9643. }
  9644. static bool
  9645. intel_compare_link_m_n(const struct intel_link_m_n *m_n,
  9646. struct intel_link_m_n *m2_n2,
  9647. bool adjust)
  9648. {
  9649. if (m_n->tu == m2_n2->tu &&
  9650. intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
  9651. m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
  9652. intel_compare_m_n(m_n->link_m, m_n->link_n,
  9653. m2_n2->link_m, m2_n2->link_n, !adjust)) {
  9654. if (adjust)
  9655. *m2_n2 = *m_n;
  9656. return true;
  9657. }
  9658. return false;
  9659. }
  9660. static void __printf(3, 4)
  9661. pipe_config_err(bool adjust, const char *name, const char *format, ...)
  9662. {
  9663. char *level;
  9664. unsigned int category;
  9665. struct va_format vaf;
  9666. va_list args;
  9667. if (adjust) {
  9668. level = KERN_DEBUG;
  9669. category = DRM_UT_KMS;
  9670. } else {
  9671. level = KERN_ERR;
  9672. category = DRM_UT_NONE;
  9673. }
  9674. va_start(args, format);
  9675. vaf.fmt = format;
  9676. vaf.va = &args;
  9677. drm_printk(level, category, "mismatch in %s %pV", name, &vaf);
  9678. va_end(args);
  9679. }
  9680. static bool
  9681. intel_pipe_config_compare(struct drm_i915_private *dev_priv,
  9682. struct intel_crtc_state *current_config,
  9683. struct intel_crtc_state *pipe_config,
  9684. bool adjust)
  9685. {
  9686. bool ret = true;
  9687. #define PIPE_CONF_CHECK_X(name) \
  9688. if (current_config->name != pipe_config->name) { \
  9689. pipe_config_err(adjust, __stringify(name), \
  9690. "(expected 0x%08x, found 0x%08x)\n", \
  9691. current_config->name, \
  9692. pipe_config->name); \
  9693. ret = false; \
  9694. }
  9695. #define PIPE_CONF_CHECK_I(name) \
  9696. if (current_config->name != pipe_config->name) { \
  9697. pipe_config_err(adjust, __stringify(name), \
  9698. "(expected %i, found %i)\n", \
  9699. current_config->name, \
  9700. pipe_config->name); \
  9701. ret = false; \
  9702. }
  9703. #define PIPE_CONF_CHECK_P(name) \
  9704. if (current_config->name != pipe_config->name) { \
  9705. pipe_config_err(adjust, __stringify(name), \
  9706. "(expected %p, found %p)\n", \
  9707. current_config->name, \
  9708. pipe_config->name); \
  9709. ret = false; \
  9710. }
  9711. #define PIPE_CONF_CHECK_M_N(name) \
  9712. if (!intel_compare_link_m_n(&current_config->name, \
  9713. &pipe_config->name,\
  9714. adjust)) { \
  9715. pipe_config_err(adjust, __stringify(name), \
  9716. "(expected tu %i gmch %i/%i link %i/%i, " \
  9717. "found tu %i, gmch %i/%i link %i/%i)\n", \
  9718. current_config->name.tu, \
  9719. current_config->name.gmch_m, \
  9720. current_config->name.gmch_n, \
  9721. current_config->name.link_m, \
  9722. current_config->name.link_n, \
  9723. pipe_config->name.tu, \
  9724. pipe_config->name.gmch_m, \
  9725. pipe_config->name.gmch_n, \
  9726. pipe_config->name.link_m, \
  9727. pipe_config->name.link_n); \
  9728. ret = false; \
  9729. }
  9730. /* This is required for BDW+ where there is only one set of registers for
  9731. * switching between high and low RR.
  9732. * This macro can be used whenever a comparison has to be made between one
  9733. * hw state and multiple sw state variables.
  9734. */
  9735. #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
  9736. if (!intel_compare_link_m_n(&current_config->name, \
  9737. &pipe_config->name, adjust) && \
  9738. !intel_compare_link_m_n(&current_config->alt_name, \
  9739. &pipe_config->name, adjust)) { \
  9740. pipe_config_err(adjust, __stringify(name), \
  9741. "(expected tu %i gmch %i/%i link %i/%i, " \
  9742. "or tu %i gmch %i/%i link %i/%i, " \
  9743. "found tu %i, gmch %i/%i link %i/%i)\n", \
  9744. current_config->name.tu, \
  9745. current_config->name.gmch_m, \
  9746. current_config->name.gmch_n, \
  9747. current_config->name.link_m, \
  9748. current_config->name.link_n, \
  9749. current_config->alt_name.tu, \
  9750. current_config->alt_name.gmch_m, \
  9751. current_config->alt_name.gmch_n, \
  9752. current_config->alt_name.link_m, \
  9753. current_config->alt_name.link_n, \
  9754. pipe_config->name.tu, \
  9755. pipe_config->name.gmch_m, \
  9756. pipe_config->name.gmch_n, \
  9757. pipe_config->name.link_m, \
  9758. pipe_config->name.link_n); \
  9759. ret = false; \
  9760. }
  9761. #define PIPE_CONF_CHECK_FLAGS(name, mask) \
  9762. if ((current_config->name ^ pipe_config->name) & (mask)) { \
  9763. pipe_config_err(adjust, __stringify(name), \
  9764. "(%x) (expected %i, found %i)\n", \
  9765. (mask), \
  9766. current_config->name & (mask), \
  9767. pipe_config->name & (mask)); \
  9768. ret = false; \
  9769. }
  9770. #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
  9771. if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
  9772. pipe_config_err(adjust, __stringify(name), \
  9773. "(expected %i, found %i)\n", \
  9774. current_config->name, \
  9775. pipe_config->name); \
  9776. ret = false; \
  9777. }
  9778. #define PIPE_CONF_QUIRK(quirk) \
  9779. ((current_config->quirks | pipe_config->quirks) & (quirk))
  9780. PIPE_CONF_CHECK_I(cpu_transcoder);
  9781. PIPE_CONF_CHECK_I(has_pch_encoder);
  9782. PIPE_CONF_CHECK_I(fdi_lanes);
  9783. PIPE_CONF_CHECK_M_N(fdi_m_n);
  9784. PIPE_CONF_CHECK_I(lane_count);
  9785. PIPE_CONF_CHECK_X(lane_lat_optim_mask);
  9786. if (INTEL_GEN(dev_priv) < 8) {
  9787. PIPE_CONF_CHECK_M_N(dp_m_n);
  9788. if (current_config->has_drrs)
  9789. PIPE_CONF_CHECK_M_N(dp_m2_n2);
  9790. } else
  9791. PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
  9792. PIPE_CONF_CHECK_X(output_types);
  9793. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
  9794. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
  9795. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
  9796. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
  9797. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
  9798. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
  9799. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
  9800. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
  9801. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
  9802. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
  9803. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
  9804. PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
  9805. PIPE_CONF_CHECK_I(pixel_multiplier);
  9806. PIPE_CONF_CHECK_I(has_hdmi_sink);
  9807. if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
  9808. IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  9809. PIPE_CONF_CHECK_I(limited_color_range);
  9810. PIPE_CONF_CHECK_I(has_infoframe);
  9811. PIPE_CONF_CHECK_I(has_audio);
  9812. PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
  9813. DRM_MODE_FLAG_INTERLACE);
  9814. if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
  9815. PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
  9816. DRM_MODE_FLAG_PHSYNC);
  9817. PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
  9818. DRM_MODE_FLAG_NHSYNC);
  9819. PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
  9820. DRM_MODE_FLAG_PVSYNC);
  9821. PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
  9822. DRM_MODE_FLAG_NVSYNC);
  9823. }
  9824. PIPE_CONF_CHECK_X(gmch_pfit.control);
  9825. /* pfit ratios are autocomputed by the hw on gen4+ */
  9826. if (INTEL_GEN(dev_priv) < 4)
  9827. PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
  9828. PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
  9829. if (!adjust) {
  9830. PIPE_CONF_CHECK_I(pipe_src_w);
  9831. PIPE_CONF_CHECK_I(pipe_src_h);
  9832. PIPE_CONF_CHECK_I(pch_pfit.enabled);
  9833. if (current_config->pch_pfit.enabled) {
  9834. PIPE_CONF_CHECK_X(pch_pfit.pos);
  9835. PIPE_CONF_CHECK_X(pch_pfit.size);
  9836. }
  9837. PIPE_CONF_CHECK_I(scaler_state.scaler_id);
  9838. PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
  9839. }
  9840. /* BDW+ don't expose a synchronous way to read the state */
  9841. if (IS_HASWELL(dev_priv))
  9842. PIPE_CONF_CHECK_I(ips_enabled);
  9843. PIPE_CONF_CHECK_I(double_wide);
  9844. PIPE_CONF_CHECK_P(shared_dpll);
  9845. PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
  9846. PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
  9847. PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
  9848. PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
  9849. PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
  9850. PIPE_CONF_CHECK_X(dpll_hw_state.spll);
  9851. PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
  9852. PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
  9853. PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
  9854. PIPE_CONF_CHECK_X(dsi_pll.ctrl);
  9855. PIPE_CONF_CHECK_X(dsi_pll.div);
  9856. if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
  9857. PIPE_CONF_CHECK_I(pipe_bpp);
  9858. PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
  9859. PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
  9860. #undef PIPE_CONF_CHECK_X
  9861. #undef PIPE_CONF_CHECK_I
  9862. #undef PIPE_CONF_CHECK_P
  9863. #undef PIPE_CONF_CHECK_FLAGS
  9864. #undef PIPE_CONF_CHECK_CLOCK_FUZZY
  9865. #undef PIPE_CONF_QUIRK
  9866. return ret;
  9867. }
  9868. static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
  9869. const struct intel_crtc_state *pipe_config)
  9870. {
  9871. if (pipe_config->has_pch_encoder) {
  9872. int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
  9873. &pipe_config->fdi_m_n);
  9874. int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
  9875. /*
  9876. * FDI already provided one idea for the dotclock.
  9877. * Yell if the encoder disagrees.
  9878. */
  9879. WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
  9880. "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
  9881. fdi_dotclock, dotclock);
  9882. }
  9883. }
  9884. static void verify_wm_state(struct drm_crtc *crtc,
  9885. struct drm_crtc_state *new_state)
  9886. {
  9887. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  9888. struct skl_ddb_allocation hw_ddb, *sw_ddb;
  9889. struct skl_pipe_wm hw_wm, *sw_wm;
  9890. struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
  9891. struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
  9892. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  9893. const enum pipe pipe = intel_crtc->pipe;
  9894. int plane, level, max_level = ilk_wm_max_level(dev_priv);
  9895. if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
  9896. return;
  9897. skl_pipe_wm_get_hw_state(crtc, &hw_wm);
  9898. sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
  9899. skl_ddb_get_hw_state(dev_priv, &hw_ddb);
  9900. sw_ddb = &dev_priv->wm.skl_hw.ddb;
  9901. /* planes */
  9902. for_each_universal_plane(dev_priv, pipe, plane) {
  9903. hw_plane_wm = &hw_wm.planes[plane];
  9904. sw_plane_wm = &sw_wm->planes[plane];
  9905. /* Watermarks */
  9906. for (level = 0; level <= max_level; level++) {
  9907. if (skl_wm_level_equals(&hw_plane_wm->wm[level],
  9908. &sw_plane_wm->wm[level]))
  9909. continue;
  9910. DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
  9911. pipe_name(pipe), plane + 1, level,
  9912. sw_plane_wm->wm[level].plane_en,
  9913. sw_plane_wm->wm[level].plane_res_b,
  9914. sw_plane_wm->wm[level].plane_res_l,
  9915. hw_plane_wm->wm[level].plane_en,
  9916. hw_plane_wm->wm[level].plane_res_b,
  9917. hw_plane_wm->wm[level].plane_res_l);
  9918. }
  9919. if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
  9920. &sw_plane_wm->trans_wm)) {
  9921. DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
  9922. pipe_name(pipe), plane + 1,
  9923. sw_plane_wm->trans_wm.plane_en,
  9924. sw_plane_wm->trans_wm.plane_res_b,
  9925. sw_plane_wm->trans_wm.plane_res_l,
  9926. hw_plane_wm->trans_wm.plane_en,
  9927. hw_plane_wm->trans_wm.plane_res_b,
  9928. hw_plane_wm->trans_wm.plane_res_l);
  9929. }
  9930. /* DDB */
  9931. hw_ddb_entry = &hw_ddb.plane[pipe][plane];
  9932. sw_ddb_entry = &sw_ddb->plane[pipe][plane];
  9933. if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
  9934. DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
  9935. pipe_name(pipe), plane + 1,
  9936. sw_ddb_entry->start, sw_ddb_entry->end,
  9937. hw_ddb_entry->start, hw_ddb_entry->end);
  9938. }
  9939. }
  9940. /*
  9941. * cursor
  9942. * If the cursor plane isn't active, we may not have updated it's ddb
  9943. * allocation. In that case since the ddb allocation will be updated
  9944. * once the plane becomes visible, we can skip this check
  9945. */
  9946. if (intel_crtc->cursor_addr) {
  9947. hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
  9948. sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
  9949. /* Watermarks */
  9950. for (level = 0; level <= max_level; level++) {
  9951. if (skl_wm_level_equals(&hw_plane_wm->wm[level],
  9952. &sw_plane_wm->wm[level]))
  9953. continue;
  9954. DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
  9955. pipe_name(pipe), level,
  9956. sw_plane_wm->wm[level].plane_en,
  9957. sw_plane_wm->wm[level].plane_res_b,
  9958. sw_plane_wm->wm[level].plane_res_l,
  9959. hw_plane_wm->wm[level].plane_en,
  9960. hw_plane_wm->wm[level].plane_res_b,
  9961. hw_plane_wm->wm[level].plane_res_l);
  9962. }
  9963. if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
  9964. &sw_plane_wm->trans_wm)) {
  9965. DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
  9966. pipe_name(pipe),
  9967. sw_plane_wm->trans_wm.plane_en,
  9968. sw_plane_wm->trans_wm.plane_res_b,
  9969. sw_plane_wm->trans_wm.plane_res_l,
  9970. hw_plane_wm->trans_wm.plane_en,
  9971. hw_plane_wm->trans_wm.plane_res_b,
  9972. hw_plane_wm->trans_wm.plane_res_l);
  9973. }
  9974. /* DDB */
  9975. hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
  9976. sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
  9977. if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
  9978. DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
  9979. pipe_name(pipe),
  9980. sw_ddb_entry->start, sw_ddb_entry->end,
  9981. hw_ddb_entry->start, hw_ddb_entry->end);
  9982. }
  9983. }
  9984. }
  9985. static void
  9986. verify_connector_state(struct drm_device *dev,
  9987. struct drm_atomic_state *state,
  9988. struct drm_crtc *crtc)
  9989. {
  9990. struct drm_connector *connector;
  9991. struct drm_connector_state *new_conn_state;
  9992. int i;
  9993. for_each_new_connector_in_state(state, connector, new_conn_state, i) {
  9994. struct drm_encoder *encoder = connector->encoder;
  9995. if (new_conn_state->crtc != crtc)
  9996. continue;
  9997. intel_connector_verify_state(to_intel_connector(connector));
  9998. I915_STATE_WARN(new_conn_state->best_encoder != encoder,
  9999. "connector's atomic encoder doesn't match legacy encoder\n");
  10000. }
  10001. }
  10002. static void
  10003. verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
  10004. {
  10005. struct intel_encoder *encoder;
  10006. struct drm_connector *connector;
  10007. struct drm_connector_state *old_conn_state, *new_conn_state;
  10008. int i;
  10009. for_each_intel_encoder(dev, encoder) {
  10010. bool enabled = false, found = false;
  10011. enum pipe pipe;
  10012. DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
  10013. encoder->base.base.id,
  10014. encoder->base.name);
  10015. for_each_oldnew_connector_in_state(state, connector, old_conn_state,
  10016. new_conn_state, i) {
  10017. if (old_conn_state->best_encoder == &encoder->base)
  10018. found = true;
  10019. if (new_conn_state->best_encoder != &encoder->base)
  10020. continue;
  10021. found = enabled = true;
  10022. I915_STATE_WARN(new_conn_state->crtc !=
  10023. encoder->base.crtc,
  10024. "connector's crtc doesn't match encoder crtc\n");
  10025. }
  10026. if (!found)
  10027. continue;
  10028. I915_STATE_WARN(!!encoder->base.crtc != enabled,
  10029. "encoder's enabled state mismatch "
  10030. "(expected %i, found %i)\n",
  10031. !!encoder->base.crtc, enabled);
  10032. if (!encoder->base.crtc) {
  10033. bool active;
  10034. active = encoder->get_hw_state(encoder, &pipe);
  10035. I915_STATE_WARN(active,
  10036. "encoder detached but still enabled on pipe %c.\n",
  10037. pipe_name(pipe));
  10038. }
  10039. }
  10040. }
  10041. static void
  10042. verify_crtc_state(struct drm_crtc *crtc,
  10043. struct drm_crtc_state *old_crtc_state,
  10044. struct drm_crtc_state *new_crtc_state)
  10045. {
  10046. struct drm_device *dev = crtc->dev;
  10047. struct drm_i915_private *dev_priv = to_i915(dev);
  10048. struct intel_encoder *encoder;
  10049. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  10050. struct intel_crtc_state *pipe_config, *sw_config;
  10051. struct drm_atomic_state *old_state;
  10052. bool active;
  10053. old_state = old_crtc_state->state;
  10054. __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
  10055. pipe_config = to_intel_crtc_state(old_crtc_state);
  10056. memset(pipe_config, 0, sizeof(*pipe_config));
  10057. pipe_config->base.crtc = crtc;
  10058. pipe_config->base.state = old_state;
  10059. DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
  10060. active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
  10061. /* hw state is inconsistent with the pipe quirk */
  10062. if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
  10063. (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
  10064. active = new_crtc_state->active;
  10065. I915_STATE_WARN(new_crtc_state->active != active,
  10066. "crtc active state doesn't match with hw state "
  10067. "(expected %i, found %i)\n", new_crtc_state->active, active);
  10068. I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
  10069. "transitional active state does not match atomic hw state "
  10070. "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
  10071. for_each_encoder_on_crtc(dev, crtc, encoder) {
  10072. enum pipe pipe;
  10073. active = encoder->get_hw_state(encoder, &pipe);
  10074. I915_STATE_WARN(active != new_crtc_state->active,
  10075. "[ENCODER:%i] active %i with crtc active %i\n",
  10076. encoder->base.base.id, active, new_crtc_state->active);
  10077. I915_STATE_WARN(active && intel_crtc->pipe != pipe,
  10078. "Encoder connected to wrong pipe %c\n",
  10079. pipe_name(pipe));
  10080. if (active) {
  10081. pipe_config->output_types |= 1 << encoder->type;
  10082. encoder->get_config(encoder, pipe_config);
  10083. }
  10084. }
  10085. intel_crtc_compute_pixel_rate(pipe_config);
  10086. if (!new_crtc_state->active)
  10087. return;
  10088. intel_pipe_config_sanity_check(dev_priv, pipe_config);
  10089. sw_config = to_intel_crtc_state(crtc->state);
  10090. if (!intel_pipe_config_compare(dev_priv, sw_config,
  10091. pipe_config, false)) {
  10092. I915_STATE_WARN(1, "pipe state doesn't match!\n");
  10093. intel_dump_pipe_config(intel_crtc, pipe_config,
  10094. "[hw state]");
  10095. intel_dump_pipe_config(intel_crtc, sw_config,
  10096. "[sw state]");
  10097. }
  10098. }
  10099. static void
  10100. verify_single_dpll_state(struct drm_i915_private *dev_priv,
  10101. struct intel_shared_dpll *pll,
  10102. struct drm_crtc *crtc,
  10103. struct drm_crtc_state *new_state)
  10104. {
  10105. struct intel_dpll_hw_state dpll_hw_state;
  10106. unsigned crtc_mask;
  10107. bool active;
  10108. memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
  10109. DRM_DEBUG_KMS("%s\n", pll->name);
  10110. active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state);
  10111. if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) {
  10112. I915_STATE_WARN(!pll->on && pll->active_mask,
  10113. "pll in active use but not on in sw tracking\n");
  10114. I915_STATE_WARN(pll->on && !pll->active_mask,
  10115. "pll is on but not used by any active crtc\n");
  10116. I915_STATE_WARN(pll->on != active,
  10117. "pll on state mismatch (expected %i, found %i)\n",
  10118. pll->on, active);
  10119. }
  10120. if (!crtc) {
  10121. I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
  10122. "more active pll users than references: %x vs %x\n",
  10123. pll->active_mask, pll->state.crtc_mask);
  10124. return;
  10125. }
  10126. crtc_mask = 1 << drm_crtc_index(crtc);
  10127. if (new_state->active)
  10128. I915_STATE_WARN(!(pll->active_mask & crtc_mask),
  10129. "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
  10130. pipe_name(drm_crtc_index(crtc)), pll->active_mask);
  10131. else
  10132. I915_STATE_WARN(pll->active_mask & crtc_mask,
  10133. "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
  10134. pipe_name(drm_crtc_index(crtc)), pll->active_mask);
  10135. I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
  10136. "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
  10137. crtc_mask, pll->state.crtc_mask);
  10138. I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
  10139. &dpll_hw_state,
  10140. sizeof(dpll_hw_state)),
  10141. "pll hw state mismatch\n");
  10142. }
  10143. static void
  10144. verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
  10145. struct drm_crtc_state *old_crtc_state,
  10146. struct drm_crtc_state *new_crtc_state)
  10147. {
  10148. struct drm_i915_private *dev_priv = to_i915(dev);
  10149. struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
  10150. struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
  10151. if (new_state->shared_dpll)
  10152. verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
  10153. if (old_state->shared_dpll &&
  10154. old_state->shared_dpll != new_state->shared_dpll) {
  10155. unsigned crtc_mask = 1 << drm_crtc_index(crtc);
  10156. struct intel_shared_dpll *pll = old_state->shared_dpll;
  10157. I915_STATE_WARN(pll->active_mask & crtc_mask,
  10158. "pll active mismatch (didn't expect pipe %c in active mask)\n",
  10159. pipe_name(drm_crtc_index(crtc)));
  10160. I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
  10161. "pll enabled crtcs mismatch (found %x in enabled mask)\n",
  10162. pipe_name(drm_crtc_index(crtc)));
  10163. }
  10164. }
  10165. static void
  10166. intel_modeset_verify_crtc(struct drm_crtc *crtc,
  10167. struct drm_atomic_state *state,
  10168. struct drm_crtc_state *old_state,
  10169. struct drm_crtc_state *new_state)
  10170. {
  10171. if (!needs_modeset(new_state) &&
  10172. !to_intel_crtc_state(new_state)->update_pipe)
  10173. return;
  10174. verify_wm_state(crtc, new_state);
  10175. verify_connector_state(crtc->dev, state, crtc);
  10176. verify_crtc_state(crtc, old_state, new_state);
  10177. verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
  10178. }
  10179. static void
  10180. verify_disabled_dpll_state(struct drm_device *dev)
  10181. {
  10182. struct drm_i915_private *dev_priv = to_i915(dev);
  10183. int i;
  10184. for (i = 0; i < dev_priv->num_shared_dpll; i++)
  10185. verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
  10186. }
  10187. static void
  10188. intel_modeset_verify_disabled(struct drm_device *dev,
  10189. struct drm_atomic_state *state)
  10190. {
  10191. verify_encoder_state(dev, state);
  10192. verify_connector_state(dev, state, NULL);
  10193. verify_disabled_dpll_state(dev);
  10194. }
  10195. static void update_scanline_offset(struct intel_crtc *crtc)
  10196. {
  10197. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  10198. /*
  10199. * The scanline counter increments at the leading edge of hsync.
  10200. *
  10201. * On most platforms it starts counting from vtotal-1 on the
  10202. * first active line. That means the scanline counter value is
  10203. * always one less than what we would expect. Ie. just after
  10204. * start of vblank, which also occurs at start of hsync (on the
  10205. * last active line), the scanline counter will read vblank_start-1.
  10206. *
  10207. * On gen2 the scanline counter starts counting from 1 instead
  10208. * of vtotal-1, so we have to subtract one (or rather add vtotal-1
  10209. * to keep the value positive), instead of adding one.
  10210. *
  10211. * On HSW+ the behaviour of the scanline counter depends on the output
  10212. * type. For DP ports it behaves like most other platforms, but on HDMI
  10213. * there's an extra 1 line difference. So we need to add two instead of
  10214. * one to the value.
  10215. */
  10216. if (IS_GEN2(dev_priv)) {
  10217. const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
  10218. int vtotal;
  10219. vtotal = adjusted_mode->crtc_vtotal;
  10220. if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
  10221. vtotal /= 2;
  10222. crtc->scanline_offset = vtotal - 1;
  10223. } else if (HAS_DDI(dev_priv) &&
  10224. intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
  10225. crtc->scanline_offset = 2;
  10226. } else
  10227. crtc->scanline_offset = 1;
  10228. }
  10229. static void intel_modeset_clear_plls(struct drm_atomic_state *state)
  10230. {
  10231. struct drm_device *dev = state->dev;
  10232. struct drm_i915_private *dev_priv = to_i915(dev);
  10233. struct drm_crtc *crtc;
  10234. struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  10235. int i;
  10236. if (!dev_priv->display.crtc_compute_clock)
  10237. return;
  10238. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
  10239. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  10240. struct intel_shared_dpll *old_dpll =
  10241. to_intel_crtc_state(old_crtc_state)->shared_dpll;
  10242. if (!needs_modeset(new_crtc_state))
  10243. continue;
  10244. to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL;
  10245. if (!old_dpll)
  10246. continue;
  10247. intel_release_shared_dpll(old_dpll, intel_crtc, state);
  10248. }
  10249. }
  10250. /*
  10251. * This implements the workaround described in the "notes" section of the mode
  10252. * set sequence documentation. When going from no pipes or single pipe to
  10253. * multiple pipes, and planes are enabled after the pipe, we need to wait at
  10254. * least 2 vblanks on the first pipe before enabling planes on the second pipe.
  10255. */
  10256. static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
  10257. {
  10258. struct drm_crtc_state *crtc_state;
  10259. struct intel_crtc *intel_crtc;
  10260. struct drm_crtc *crtc;
  10261. struct intel_crtc_state *first_crtc_state = NULL;
  10262. struct intel_crtc_state *other_crtc_state = NULL;
  10263. enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
  10264. int i;
  10265. /* look at all crtc's that are going to be enabled in during modeset */
  10266. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  10267. intel_crtc = to_intel_crtc(crtc);
  10268. if (!crtc_state->active || !needs_modeset(crtc_state))
  10269. continue;
  10270. if (first_crtc_state) {
  10271. other_crtc_state = to_intel_crtc_state(crtc_state);
  10272. break;
  10273. } else {
  10274. first_crtc_state = to_intel_crtc_state(crtc_state);
  10275. first_pipe = intel_crtc->pipe;
  10276. }
  10277. }
  10278. /* No workaround needed? */
  10279. if (!first_crtc_state)
  10280. return 0;
  10281. /* w/a possibly needed, check how many crtc's are already enabled. */
  10282. for_each_intel_crtc(state->dev, intel_crtc) {
  10283. struct intel_crtc_state *pipe_config;
  10284. pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
  10285. if (IS_ERR(pipe_config))
  10286. return PTR_ERR(pipe_config);
  10287. pipe_config->hsw_workaround_pipe = INVALID_PIPE;
  10288. if (!pipe_config->base.active ||
  10289. needs_modeset(&pipe_config->base))
  10290. continue;
  10291. /* 2 or more enabled crtcs means no need for w/a */
  10292. if (enabled_pipe != INVALID_PIPE)
  10293. return 0;
  10294. enabled_pipe = intel_crtc->pipe;
  10295. }
  10296. if (enabled_pipe != INVALID_PIPE)
  10297. first_crtc_state->hsw_workaround_pipe = enabled_pipe;
  10298. else if (other_crtc_state)
  10299. other_crtc_state->hsw_workaround_pipe = first_pipe;
  10300. return 0;
  10301. }
  10302. static int intel_lock_all_pipes(struct drm_atomic_state *state)
  10303. {
  10304. struct drm_crtc *crtc;
  10305. /* Add all pipes to the state */
  10306. for_each_crtc(state->dev, crtc) {
  10307. struct drm_crtc_state *crtc_state;
  10308. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  10309. if (IS_ERR(crtc_state))
  10310. return PTR_ERR(crtc_state);
  10311. }
  10312. return 0;
  10313. }
  10314. static int intel_modeset_all_pipes(struct drm_atomic_state *state)
  10315. {
  10316. struct drm_crtc *crtc;
  10317. /*
  10318. * Add all pipes to the state, and force
  10319. * a modeset on all the active ones.
  10320. */
  10321. for_each_crtc(state->dev, crtc) {
  10322. struct drm_crtc_state *crtc_state;
  10323. int ret;
  10324. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  10325. if (IS_ERR(crtc_state))
  10326. return PTR_ERR(crtc_state);
  10327. if (!crtc_state->active || needs_modeset(crtc_state))
  10328. continue;
  10329. crtc_state->mode_changed = true;
  10330. ret = drm_atomic_add_affected_connectors(state, crtc);
  10331. if (ret)
  10332. return ret;
  10333. ret = drm_atomic_add_affected_planes(state, crtc);
  10334. if (ret)
  10335. return ret;
  10336. }
  10337. return 0;
  10338. }
  10339. static int intel_modeset_checks(struct drm_atomic_state *state)
  10340. {
  10341. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  10342. struct drm_i915_private *dev_priv = to_i915(state->dev);
  10343. struct drm_crtc *crtc;
  10344. struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  10345. int ret = 0, i;
  10346. if (!check_digital_port_conflicts(state)) {
  10347. DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
  10348. return -EINVAL;
  10349. }
  10350. intel_state->modeset = true;
  10351. intel_state->active_crtcs = dev_priv->active_crtcs;
  10352. intel_state->cdclk.logical = dev_priv->cdclk.logical;
  10353. intel_state->cdclk.actual = dev_priv->cdclk.actual;
  10354. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
  10355. if (new_crtc_state->active)
  10356. intel_state->active_crtcs |= 1 << i;
  10357. else
  10358. intel_state->active_crtcs &= ~(1 << i);
  10359. if (old_crtc_state->active != new_crtc_state->active)
  10360. intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
  10361. }
  10362. /*
  10363. * See if the config requires any additional preparation, e.g.
  10364. * to adjust global state with pipes off. We need to do this
  10365. * here so we can get the modeset_pipe updated config for the new
  10366. * mode set on this crtc. For other crtcs we need to use the
  10367. * adjusted_mode bits in the crtc directly.
  10368. */
  10369. if (dev_priv->display.modeset_calc_cdclk) {
  10370. ret = dev_priv->display.modeset_calc_cdclk(state);
  10371. if (ret < 0)
  10372. return ret;
  10373. /*
  10374. * Writes to dev_priv->cdclk.logical must protected by
  10375. * holding all the crtc locks, even if we don't end up
  10376. * touching the hardware
  10377. */
  10378. if (!intel_cdclk_state_compare(&dev_priv->cdclk.logical,
  10379. &intel_state->cdclk.logical)) {
  10380. ret = intel_lock_all_pipes(state);
  10381. if (ret < 0)
  10382. return ret;
  10383. }
  10384. /* All pipes must be switched off while we change the cdclk. */
  10385. if (!intel_cdclk_state_compare(&dev_priv->cdclk.actual,
  10386. &intel_state->cdclk.actual)) {
  10387. ret = intel_modeset_all_pipes(state);
  10388. if (ret < 0)
  10389. return ret;
  10390. }
  10391. DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
  10392. intel_state->cdclk.logical.cdclk,
  10393. intel_state->cdclk.actual.cdclk);
  10394. } else {
  10395. to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
  10396. }
  10397. intel_modeset_clear_plls(state);
  10398. if (IS_HASWELL(dev_priv))
  10399. return haswell_mode_set_planes_workaround(state);
  10400. return 0;
  10401. }
  10402. /*
  10403. * Handle calculation of various watermark data at the end of the atomic check
  10404. * phase. The code here should be run after the per-crtc and per-plane 'check'
  10405. * handlers to ensure that all derived state has been updated.
  10406. */
  10407. static int calc_watermark_data(struct drm_atomic_state *state)
  10408. {
  10409. struct drm_device *dev = state->dev;
  10410. struct drm_i915_private *dev_priv = to_i915(dev);
  10411. /* Is there platform-specific watermark information to calculate? */
  10412. if (dev_priv->display.compute_global_watermarks)
  10413. return dev_priv->display.compute_global_watermarks(state);
  10414. return 0;
  10415. }
  10416. /**
  10417. * intel_atomic_check - validate state object
  10418. * @dev: drm device
  10419. * @state: state to validate
  10420. */
  10421. static int intel_atomic_check(struct drm_device *dev,
  10422. struct drm_atomic_state *state)
  10423. {
  10424. struct drm_i915_private *dev_priv = to_i915(dev);
  10425. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  10426. struct drm_crtc *crtc;
  10427. struct drm_crtc_state *old_crtc_state, *crtc_state;
  10428. int ret, i;
  10429. bool any_ms = false;
  10430. ret = drm_atomic_helper_check_modeset(dev, state);
  10431. if (ret)
  10432. return ret;
  10433. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
  10434. struct intel_crtc_state *pipe_config =
  10435. to_intel_crtc_state(crtc_state);
  10436. /* Catch I915_MODE_FLAG_INHERITED */
  10437. if (crtc_state->mode.private_flags != old_crtc_state->mode.private_flags)
  10438. crtc_state->mode_changed = true;
  10439. if (!needs_modeset(crtc_state))
  10440. continue;
  10441. if (!crtc_state->enable) {
  10442. any_ms = true;
  10443. continue;
  10444. }
  10445. /* FIXME: For only active_changed we shouldn't need to do any
  10446. * state recomputation at all. */
  10447. ret = drm_atomic_add_affected_connectors(state, crtc);
  10448. if (ret)
  10449. return ret;
  10450. ret = intel_modeset_pipe_config(crtc, pipe_config);
  10451. if (ret) {
  10452. intel_dump_pipe_config(to_intel_crtc(crtc),
  10453. pipe_config, "[failed]");
  10454. return ret;
  10455. }
  10456. if (i915.fastboot &&
  10457. intel_pipe_config_compare(dev_priv,
  10458. to_intel_crtc_state(old_crtc_state),
  10459. pipe_config, true)) {
  10460. crtc_state->mode_changed = false;
  10461. pipe_config->update_pipe = true;
  10462. }
  10463. if (needs_modeset(crtc_state))
  10464. any_ms = true;
  10465. ret = drm_atomic_add_affected_planes(state, crtc);
  10466. if (ret)
  10467. return ret;
  10468. intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
  10469. needs_modeset(crtc_state) ?
  10470. "[modeset]" : "[fastset]");
  10471. }
  10472. if (any_ms) {
  10473. ret = intel_modeset_checks(state);
  10474. if (ret)
  10475. return ret;
  10476. } else {
  10477. intel_state->cdclk.logical = dev_priv->cdclk.logical;
  10478. }
  10479. ret = drm_atomic_helper_check_planes(dev, state);
  10480. if (ret)
  10481. return ret;
  10482. intel_fbc_choose_crtc(dev_priv, state);
  10483. return calc_watermark_data(state);
  10484. }
  10485. static int intel_atomic_prepare_commit(struct drm_device *dev,
  10486. struct drm_atomic_state *state)
  10487. {
  10488. struct drm_i915_private *dev_priv = to_i915(dev);
  10489. struct drm_crtc_state *crtc_state;
  10490. struct drm_crtc *crtc;
  10491. int i, ret;
  10492. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  10493. if (state->legacy_cursor_update)
  10494. continue;
  10495. ret = intel_crtc_wait_for_pending_flips(crtc);
  10496. if (ret)
  10497. return ret;
  10498. if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
  10499. flush_workqueue(dev_priv->wq);
  10500. }
  10501. ret = mutex_lock_interruptible(&dev->struct_mutex);
  10502. if (ret)
  10503. return ret;
  10504. ret = drm_atomic_helper_prepare_planes(dev, state);
  10505. mutex_unlock(&dev->struct_mutex);
  10506. return ret;
  10507. }
  10508. u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
  10509. {
  10510. struct drm_device *dev = crtc->base.dev;
  10511. if (!dev->max_vblank_count)
  10512. return drm_accurate_vblank_count(&crtc->base);
  10513. return dev->driver->get_vblank_counter(dev, crtc->pipe);
  10514. }
  10515. static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
  10516. struct drm_i915_private *dev_priv,
  10517. unsigned crtc_mask)
  10518. {
  10519. unsigned last_vblank_count[I915_MAX_PIPES];
  10520. enum pipe pipe;
  10521. int ret;
  10522. if (!crtc_mask)
  10523. return;
  10524. for_each_pipe(dev_priv, pipe) {
  10525. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
  10526. pipe);
  10527. if (!((1 << pipe) & crtc_mask))
  10528. continue;
  10529. ret = drm_crtc_vblank_get(&crtc->base);
  10530. if (WARN_ON(ret != 0)) {
  10531. crtc_mask &= ~(1 << pipe);
  10532. continue;
  10533. }
  10534. last_vblank_count[pipe] = drm_crtc_vblank_count(&crtc->base);
  10535. }
  10536. for_each_pipe(dev_priv, pipe) {
  10537. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
  10538. pipe);
  10539. long lret;
  10540. if (!((1 << pipe) & crtc_mask))
  10541. continue;
  10542. lret = wait_event_timeout(dev->vblank[pipe].queue,
  10543. last_vblank_count[pipe] !=
  10544. drm_crtc_vblank_count(&crtc->base),
  10545. msecs_to_jiffies(50));
  10546. WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
  10547. drm_crtc_vblank_put(&crtc->base);
  10548. }
  10549. }
  10550. static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
  10551. {
  10552. /* fb updated, need to unpin old fb */
  10553. if (crtc_state->fb_changed)
  10554. return true;
  10555. /* wm changes, need vblank before final wm's */
  10556. if (crtc_state->update_wm_post)
  10557. return true;
  10558. if (crtc_state->wm.need_postvbl_update)
  10559. return true;
  10560. return false;
  10561. }
  10562. static void intel_update_crtc(struct drm_crtc *crtc,
  10563. struct drm_atomic_state *state,
  10564. struct drm_crtc_state *old_crtc_state,
  10565. struct drm_crtc_state *new_crtc_state,
  10566. unsigned int *crtc_vblank_mask)
  10567. {
  10568. struct drm_device *dev = crtc->dev;
  10569. struct drm_i915_private *dev_priv = to_i915(dev);
  10570. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  10571. struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
  10572. bool modeset = needs_modeset(new_crtc_state);
  10573. if (modeset) {
  10574. update_scanline_offset(intel_crtc);
  10575. dev_priv->display.crtc_enable(pipe_config, state);
  10576. } else {
  10577. intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
  10578. pipe_config);
  10579. }
  10580. if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
  10581. intel_fbc_enable(
  10582. intel_crtc, pipe_config,
  10583. to_intel_plane_state(crtc->primary->state));
  10584. }
  10585. drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
  10586. if (needs_vblank_wait(pipe_config))
  10587. *crtc_vblank_mask |= drm_crtc_mask(crtc);
  10588. }
  10589. static void intel_update_crtcs(struct drm_atomic_state *state,
  10590. unsigned int *crtc_vblank_mask)
  10591. {
  10592. struct drm_crtc *crtc;
  10593. struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  10594. int i;
  10595. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
  10596. if (!new_crtc_state->active)
  10597. continue;
  10598. intel_update_crtc(crtc, state, old_crtc_state,
  10599. new_crtc_state, crtc_vblank_mask);
  10600. }
  10601. }
  10602. static void skl_update_crtcs(struct drm_atomic_state *state,
  10603. unsigned int *crtc_vblank_mask)
  10604. {
  10605. struct drm_i915_private *dev_priv = to_i915(state->dev);
  10606. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  10607. struct drm_crtc *crtc;
  10608. struct intel_crtc *intel_crtc;
  10609. struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  10610. struct intel_crtc_state *cstate;
  10611. unsigned int updated = 0;
  10612. bool progress;
  10613. enum pipe pipe;
  10614. int i;
  10615. const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
  10616. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
  10617. /* ignore allocations for crtc's that have been turned off. */
  10618. if (new_crtc_state->active)
  10619. entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
  10620. /*
  10621. * Whenever the number of active pipes changes, we need to make sure we
  10622. * update the pipes in the right order so that their ddb allocations
  10623. * never overlap with eachother inbetween CRTC updates. Otherwise we'll
  10624. * cause pipe underruns and other bad stuff.
  10625. */
  10626. do {
  10627. progress = false;
  10628. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
  10629. bool vbl_wait = false;
  10630. unsigned int cmask = drm_crtc_mask(crtc);
  10631. intel_crtc = to_intel_crtc(crtc);
  10632. cstate = to_intel_crtc_state(crtc->state);
  10633. pipe = intel_crtc->pipe;
  10634. if (updated & cmask || !cstate->base.active)
  10635. continue;
  10636. if (skl_ddb_allocation_overlaps(entries, &cstate->wm.skl.ddb, i))
  10637. continue;
  10638. updated |= cmask;
  10639. entries[i] = &cstate->wm.skl.ddb;
  10640. /*
  10641. * If this is an already active pipe, it's DDB changed,
  10642. * and this isn't the last pipe that needs updating
  10643. * then we need to wait for a vblank to pass for the
  10644. * new ddb allocation to take effect.
  10645. */
  10646. if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
  10647. &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
  10648. !new_crtc_state->active_changed &&
  10649. intel_state->wm_results.dirty_pipes != updated)
  10650. vbl_wait = true;
  10651. intel_update_crtc(crtc, state, old_crtc_state,
  10652. new_crtc_state, crtc_vblank_mask);
  10653. if (vbl_wait)
  10654. intel_wait_for_vblank(dev_priv, pipe);
  10655. progress = true;
  10656. }
  10657. } while (progress);
  10658. }
  10659. static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
  10660. {
  10661. struct intel_atomic_state *state, *next;
  10662. struct llist_node *freed;
  10663. freed = llist_del_all(&dev_priv->atomic_helper.free_list);
  10664. llist_for_each_entry_safe(state, next, freed, freed)
  10665. drm_atomic_state_put(&state->base);
  10666. }
  10667. static void intel_atomic_helper_free_state_worker(struct work_struct *work)
  10668. {
  10669. struct drm_i915_private *dev_priv =
  10670. container_of(work, typeof(*dev_priv), atomic_helper.free_work);
  10671. intel_atomic_helper_free_state(dev_priv);
  10672. }
  10673. static void intel_atomic_commit_tail(struct drm_atomic_state *state)
  10674. {
  10675. struct drm_device *dev = state->dev;
  10676. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  10677. struct drm_i915_private *dev_priv = to_i915(dev);
  10678. struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  10679. struct drm_crtc *crtc;
  10680. struct intel_crtc_state *intel_cstate;
  10681. bool hw_check = intel_state->modeset;
  10682. u64 put_domains[I915_MAX_PIPES] = {};
  10683. unsigned crtc_vblank_mask = 0;
  10684. int i;
  10685. drm_atomic_helper_wait_for_dependencies(state);
  10686. if (intel_state->modeset)
  10687. intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
  10688. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
  10689. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  10690. if (needs_modeset(new_crtc_state) ||
  10691. to_intel_crtc_state(new_crtc_state)->update_pipe) {
  10692. hw_check = true;
  10693. put_domains[to_intel_crtc(crtc)->pipe] =
  10694. modeset_get_crtc_power_domains(crtc,
  10695. to_intel_crtc_state(new_crtc_state));
  10696. }
  10697. if (!needs_modeset(new_crtc_state))
  10698. continue;
  10699. intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
  10700. to_intel_crtc_state(new_crtc_state));
  10701. if (old_crtc_state->active) {
  10702. intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
  10703. dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state);
  10704. intel_crtc->active = false;
  10705. intel_fbc_disable(intel_crtc);
  10706. intel_disable_shared_dpll(intel_crtc);
  10707. /*
  10708. * Underruns don't always raise
  10709. * interrupts, so check manually.
  10710. */
  10711. intel_check_cpu_fifo_underruns(dev_priv);
  10712. intel_check_pch_fifo_underruns(dev_priv);
  10713. if (!crtc->state->active) {
  10714. /*
  10715. * Make sure we don't call initial_watermarks
  10716. * for ILK-style watermark updates.
  10717. *
  10718. * No clue what this is supposed to achieve.
  10719. */
  10720. if (INTEL_GEN(dev_priv) >= 9)
  10721. dev_priv->display.initial_watermarks(intel_state,
  10722. to_intel_crtc_state(crtc->state));
  10723. }
  10724. }
  10725. }
  10726. /* Only after disabling all output pipelines that will be changed can we
  10727. * update the the output configuration. */
  10728. intel_modeset_update_crtc_state(state);
  10729. if (intel_state->modeset) {
  10730. drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
  10731. intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual);
  10732. /*
  10733. * SKL workaround: bspec recommends we disable the SAGV when we
  10734. * have more then one pipe enabled
  10735. */
  10736. if (!intel_can_enable_sagv(state))
  10737. intel_disable_sagv(dev_priv);
  10738. intel_modeset_verify_disabled(dev, state);
  10739. }
  10740. /* Complete the events for pipes that have now been disabled */
  10741. for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
  10742. bool modeset = needs_modeset(new_crtc_state);
  10743. /* Complete events for now disable pipes here. */
  10744. if (modeset && !new_crtc_state->active && new_crtc_state->event) {
  10745. spin_lock_irq(&dev->event_lock);
  10746. drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
  10747. spin_unlock_irq(&dev->event_lock);
  10748. new_crtc_state->event = NULL;
  10749. }
  10750. }
  10751. /* Now enable the clocks, plane, pipe, and connectors that we set up. */
  10752. dev_priv->display.update_crtcs(state, &crtc_vblank_mask);
  10753. /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
  10754. * already, but still need the state for the delayed optimization. To
  10755. * fix this:
  10756. * - wrap the optimization/post_plane_update stuff into a per-crtc work.
  10757. * - schedule that vblank worker _before_ calling hw_done
  10758. * - at the start of commit_tail, cancel it _synchrously
  10759. * - switch over to the vblank wait helper in the core after that since
  10760. * we don't need out special handling any more.
  10761. */
  10762. if (!state->legacy_cursor_update)
  10763. intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
  10764. /*
  10765. * Now that the vblank has passed, we can go ahead and program the
  10766. * optimal watermarks on platforms that need two-step watermark
  10767. * programming.
  10768. *
  10769. * TODO: Move this (and other cleanup) to an async worker eventually.
  10770. */
  10771. for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
  10772. intel_cstate = to_intel_crtc_state(new_crtc_state);
  10773. if (dev_priv->display.optimize_watermarks)
  10774. dev_priv->display.optimize_watermarks(intel_state,
  10775. intel_cstate);
  10776. }
  10777. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
  10778. intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
  10779. if (put_domains[i])
  10780. modeset_put_power_domains(dev_priv, put_domains[i]);
  10781. intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
  10782. }
  10783. if (intel_state->modeset && intel_can_enable_sagv(state))
  10784. intel_enable_sagv(dev_priv);
  10785. drm_atomic_helper_commit_hw_done(state);
  10786. if (intel_state->modeset)
  10787. intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
  10788. mutex_lock(&dev->struct_mutex);
  10789. drm_atomic_helper_cleanup_planes(dev, state);
  10790. mutex_unlock(&dev->struct_mutex);
  10791. drm_atomic_helper_commit_cleanup_done(state);
  10792. drm_atomic_state_put(state);
  10793. /* As one of the primary mmio accessors, KMS has a high likelihood
  10794. * of triggering bugs in unclaimed access. After we finish
  10795. * modesetting, see if an error has been flagged, and if so
  10796. * enable debugging for the next modeset - and hope we catch
  10797. * the culprit.
  10798. *
  10799. * XXX note that we assume display power is on at this point.
  10800. * This might hold true now but we need to add pm helper to check
  10801. * unclaimed only when the hardware is on, as atomic commits
  10802. * can happen also when the device is completely off.
  10803. */
  10804. intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
  10805. intel_atomic_helper_free_state(dev_priv);
  10806. }
  10807. static void intel_atomic_commit_work(struct work_struct *work)
  10808. {
  10809. struct drm_atomic_state *state =
  10810. container_of(work, struct drm_atomic_state, commit_work);
  10811. intel_atomic_commit_tail(state);
  10812. }
  10813. static int __i915_sw_fence_call
  10814. intel_atomic_commit_ready(struct i915_sw_fence *fence,
  10815. enum i915_sw_fence_notify notify)
  10816. {
  10817. struct intel_atomic_state *state =
  10818. container_of(fence, struct intel_atomic_state, commit_ready);
  10819. switch (notify) {
  10820. case FENCE_COMPLETE:
  10821. if (state->base.commit_work.func)
  10822. queue_work(system_unbound_wq, &state->base.commit_work);
  10823. break;
  10824. case FENCE_FREE:
  10825. {
  10826. struct intel_atomic_helper *helper =
  10827. &to_i915(state->base.dev)->atomic_helper;
  10828. if (llist_add(&state->freed, &helper->free_list))
  10829. schedule_work(&helper->free_work);
  10830. break;
  10831. }
  10832. }
  10833. return NOTIFY_DONE;
  10834. }
  10835. static void intel_atomic_track_fbs(struct drm_atomic_state *state)
  10836. {
  10837. struct drm_plane_state *old_plane_state, *new_plane_state;
  10838. struct drm_plane *plane;
  10839. int i;
  10840. for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
  10841. i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
  10842. intel_fb_obj(new_plane_state->fb),
  10843. to_intel_plane(plane)->frontbuffer_bit);
  10844. }
  10845. /**
  10846. * intel_atomic_commit - commit validated state object
  10847. * @dev: DRM device
  10848. * @state: the top-level driver state object
  10849. * @nonblock: nonblocking commit
  10850. *
  10851. * This function commits a top-level state object that has been validated
  10852. * with drm_atomic_helper_check().
  10853. *
  10854. * RETURNS
  10855. * Zero for success or -errno.
  10856. */
  10857. static int intel_atomic_commit(struct drm_device *dev,
  10858. struct drm_atomic_state *state,
  10859. bool nonblock)
  10860. {
  10861. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  10862. struct drm_i915_private *dev_priv = to_i915(dev);
  10863. int ret = 0;
  10864. /*
  10865. * The intel_legacy_cursor_update() fast path takes care
  10866. * of avoiding the vblank waits for simple cursor
  10867. * movement and flips. For cursor on/off and size changes,
  10868. * we want to perform the vblank waits so that watermark
  10869. * updates happen during the correct frames. Gen9+ have
  10870. * double buffered watermarks and so shouldn't need this.
  10871. */
  10872. if (INTEL_GEN(dev_priv) < 9)
  10873. state->legacy_cursor_update = false;
  10874. ret = drm_atomic_helper_setup_commit(state, nonblock);
  10875. if (ret)
  10876. return ret;
  10877. drm_atomic_state_get(state);
  10878. i915_sw_fence_init(&intel_state->commit_ready,
  10879. intel_atomic_commit_ready);
  10880. ret = intel_atomic_prepare_commit(dev, state);
  10881. if (ret) {
  10882. DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
  10883. i915_sw_fence_commit(&intel_state->commit_ready);
  10884. return ret;
  10885. }
  10886. drm_atomic_helper_swap_state(state, true);
  10887. dev_priv->wm.distrust_bios_wm = false;
  10888. intel_shared_dpll_swap_state(state);
  10889. intel_atomic_track_fbs(state);
  10890. if (intel_state->modeset) {
  10891. memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
  10892. sizeof(intel_state->min_pixclk));
  10893. dev_priv->active_crtcs = intel_state->active_crtcs;
  10894. dev_priv->cdclk.logical = intel_state->cdclk.logical;
  10895. dev_priv->cdclk.actual = intel_state->cdclk.actual;
  10896. }
  10897. drm_atomic_state_get(state);
  10898. INIT_WORK(&state->commit_work,
  10899. nonblock ? intel_atomic_commit_work : NULL);
  10900. i915_sw_fence_commit(&intel_state->commit_ready);
  10901. if (!nonblock) {
  10902. i915_sw_fence_wait(&intel_state->commit_ready);
  10903. intel_atomic_commit_tail(state);
  10904. }
  10905. return 0;
  10906. }
  10907. void intel_crtc_restore_mode(struct drm_crtc *crtc)
  10908. {
  10909. struct drm_device *dev = crtc->dev;
  10910. struct drm_atomic_state *state;
  10911. struct drm_crtc_state *crtc_state;
  10912. int ret;
  10913. state = drm_atomic_state_alloc(dev);
  10914. if (!state) {
  10915. DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory",
  10916. crtc->base.id, crtc->name);
  10917. return;
  10918. }
  10919. state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
  10920. retry:
  10921. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  10922. ret = PTR_ERR_OR_ZERO(crtc_state);
  10923. if (!ret) {
  10924. if (!crtc_state->active)
  10925. goto out;
  10926. crtc_state->mode_changed = true;
  10927. ret = drm_atomic_commit(state);
  10928. }
  10929. if (ret == -EDEADLK) {
  10930. drm_atomic_state_clear(state);
  10931. drm_modeset_backoff(state->acquire_ctx);
  10932. goto retry;
  10933. }
  10934. out:
  10935. drm_atomic_state_put(state);
  10936. }
  10937. /*
  10938. * FIXME: Remove this once i915 is fully DRIVER_ATOMIC by calling
  10939. * drm_atomic_helper_legacy_gamma_set() directly.
  10940. */
  10941. static int intel_atomic_legacy_gamma_set(struct drm_crtc *crtc,
  10942. u16 *red, u16 *green, u16 *blue,
  10943. uint32_t size)
  10944. {
  10945. struct drm_device *dev = crtc->dev;
  10946. struct drm_mode_config *config = &dev->mode_config;
  10947. struct drm_crtc_state *state;
  10948. int ret;
  10949. ret = drm_atomic_helper_legacy_gamma_set(crtc, red, green, blue, size);
  10950. if (ret)
  10951. return ret;
  10952. /*
  10953. * Make sure we update the legacy properties so this works when
  10954. * atomic is not enabled.
  10955. */
  10956. state = crtc->state;
  10957. drm_object_property_set_value(&crtc->base,
  10958. config->degamma_lut_property,
  10959. (state->degamma_lut) ?
  10960. state->degamma_lut->base.id : 0);
  10961. drm_object_property_set_value(&crtc->base,
  10962. config->ctm_property,
  10963. (state->ctm) ?
  10964. state->ctm->base.id : 0);
  10965. drm_object_property_set_value(&crtc->base,
  10966. config->gamma_lut_property,
  10967. (state->gamma_lut) ?
  10968. state->gamma_lut->base.id : 0);
  10969. return 0;
  10970. }
  10971. static const struct drm_crtc_funcs intel_crtc_funcs = {
  10972. .gamma_set = intel_atomic_legacy_gamma_set,
  10973. .set_config = drm_atomic_helper_set_config,
  10974. .set_property = drm_atomic_helper_crtc_set_property,
  10975. .destroy = intel_crtc_destroy,
  10976. .page_flip = drm_atomic_helper_page_flip,
  10977. .atomic_duplicate_state = intel_crtc_duplicate_state,
  10978. .atomic_destroy_state = intel_crtc_destroy_state,
  10979. .set_crc_source = intel_crtc_set_crc_source,
  10980. };
  10981. /**
  10982. * intel_prepare_plane_fb - Prepare fb for usage on plane
  10983. * @plane: drm plane to prepare for
  10984. * @fb: framebuffer to prepare for presentation
  10985. *
  10986. * Prepares a framebuffer for usage on a display plane. Generally this
  10987. * involves pinning the underlying object and updating the frontbuffer tracking
  10988. * bits. Some older platforms need special physical address handling for
  10989. * cursor planes.
  10990. *
  10991. * Must be called with struct_mutex held.
  10992. *
  10993. * Returns 0 on success, negative error code on failure.
  10994. */
  10995. int
  10996. intel_prepare_plane_fb(struct drm_plane *plane,
  10997. struct drm_plane_state *new_state)
  10998. {
  10999. struct intel_atomic_state *intel_state =
  11000. to_intel_atomic_state(new_state->state);
  11001. struct drm_i915_private *dev_priv = to_i915(plane->dev);
  11002. struct drm_framebuffer *fb = new_state->fb;
  11003. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  11004. struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
  11005. int ret;
  11006. if (obj) {
  11007. if (plane->type == DRM_PLANE_TYPE_CURSOR &&
  11008. INTEL_INFO(dev_priv)->cursor_needs_physical) {
  11009. const int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
  11010. ret = i915_gem_object_attach_phys(obj, align);
  11011. if (ret) {
  11012. DRM_DEBUG_KMS("failed to attach phys object\n");
  11013. return ret;
  11014. }
  11015. } else {
  11016. struct i915_vma *vma;
  11017. vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
  11018. if (IS_ERR(vma)) {
  11019. DRM_DEBUG_KMS("failed to pin object\n");
  11020. return PTR_ERR(vma);
  11021. }
  11022. to_intel_plane_state(new_state)->vma = vma;
  11023. }
  11024. }
  11025. if (!obj && !old_obj)
  11026. return 0;
  11027. if (old_obj) {
  11028. struct drm_crtc_state *crtc_state =
  11029. drm_atomic_get_existing_crtc_state(new_state->state,
  11030. plane->state->crtc);
  11031. /* Big Hammer, we also need to ensure that any pending
  11032. * MI_WAIT_FOR_EVENT inside a user batch buffer on the
  11033. * current scanout is retired before unpinning the old
  11034. * framebuffer. Note that we rely on userspace rendering
  11035. * into the buffer attached to the pipe they are waiting
  11036. * on. If not, userspace generates a GPU hang with IPEHR
  11037. * point to the MI_WAIT_FOR_EVENT.
  11038. *
  11039. * This should only fail upon a hung GPU, in which case we
  11040. * can safely continue.
  11041. */
  11042. if (needs_modeset(crtc_state)) {
  11043. ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
  11044. old_obj->resv, NULL,
  11045. false, 0,
  11046. GFP_KERNEL);
  11047. if (ret < 0)
  11048. return ret;
  11049. }
  11050. }
  11051. if (new_state->fence) { /* explicit fencing */
  11052. ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
  11053. new_state->fence,
  11054. I915_FENCE_TIMEOUT,
  11055. GFP_KERNEL);
  11056. if (ret < 0)
  11057. return ret;
  11058. }
  11059. if (!obj)
  11060. return 0;
  11061. if (!new_state->fence) { /* implicit fencing */
  11062. ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
  11063. obj->resv, NULL,
  11064. false, I915_FENCE_TIMEOUT,
  11065. GFP_KERNEL);
  11066. if (ret < 0)
  11067. return ret;
  11068. i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
  11069. }
  11070. return 0;
  11071. }
  11072. /**
  11073. * intel_cleanup_plane_fb - Cleans up an fb after plane use
  11074. * @plane: drm plane to clean up for
  11075. * @fb: old framebuffer that was on plane
  11076. *
  11077. * Cleans up a framebuffer that has just been removed from a plane.
  11078. *
  11079. * Must be called with struct_mutex held.
  11080. */
  11081. void
  11082. intel_cleanup_plane_fb(struct drm_plane *plane,
  11083. struct drm_plane_state *old_state)
  11084. {
  11085. struct i915_vma *vma;
  11086. /* Should only be called after a successful intel_prepare_plane_fb()! */
  11087. vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma);
  11088. if (vma)
  11089. intel_unpin_fb_vma(vma);
  11090. }
  11091. int
  11092. skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
  11093. {
  11094. struct drm_i915_private *dev_priv;
  11095. int max_scale;
  11096. int crtc_clock, max_dotclk;
  11097. if (!intel_crtc || !crtc_state->base.enable)
  11098. return DRM_PLANE_HELPER_NO_SCALING;
  11099. dev_priv = to_i915(intel_crtc->base.dev);
  11100. crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
  11101. max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
  11102. if (IS_GEMINILAKE(dev_priv))
  11103. max_dotclk *= 2;
  11104. if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
  11105. return DRM_PLANE_HELPER_NO_SCALING;
  11106. /*
  11107. * skl max scale is lower of:
  11108. * close to 3 but not 3, -1 is for that purpose
  11109. * or
  11110. * cdclk/crtc_clock
  11111. */
  11112. max_scale = min((1 << 16) * 3 - 1,
  11113. (1 << 8) * ((max_dotclk << 8) / crtc_clock));
  11114. return max_scale;
  11115. }
  11116. static int
  11117. intel_check_primary_plane(struct drm_plane *plane,
  11118. struct intel_crtc_state *crtc_state,
  11119. struct intel_plane_state *state)
  11120. {
  11121. struct drm_i915_private *dev_priv = to_i915(plane->dev);
  11122. struct drm_crtc *crtc = state->base.crtc;
  11123. int min_scale = DRM_PLANE_HELPER_NO_SCALING;
  11124. int max_scale = DRM_PLANE_HELPER_NO_SCALING;
  11125. bool can_position = false;
  11126. int ret;
  11127. if (INTEL_GEN(dev_priv) >= 9) {
  11128. /* use scaler when colorkey is not required */
  11129. if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
  11130. min_scale = 1;
  11131. max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
  11132. }
  11133. can_position = true;
  11134. }
  11135. ret = drm_plane_helper_check_state(&state->base,
  11136. &state->clip,
  11137. min_scale, max_scale,
  11138. can_position, true);
  11139. if (ret)
  11140. return ret;
  11141. if (!state->base.fb)
  11142. return 0;
  11143. if (INTEL_GEN(dev_priv) >= 9) {
  11144. ret = skl_check_plane_surface(state);
  11145. if (ret)
  11146. return ret;
  11147. state->ctl = skl_plane_ctl(crtc_state, state);
  11148. } else {
  11149. state->ctl = i9xx_plane_ctl(crtc_state, state);
  11150. }
  11151. return 0;
  11152. }
  11153. static void intel_begin_crtc_commit(struct drm_crtc *crtc,
  11154. struct drm_crtc_state *old_crtc_state)
  11155. {
  11156. struct drm_device *dev = crtc->dev;
  11157. struct drm_i915_private *dev_priv = to_i915(dev);
  11158. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  11159. struct intel_crtc_state *intel_cstate =
  11160. to_intel_crtc_state(crtc->state);
  11161. struct intel_crtc_state *old_intel_cstate =
  11162. to_intel_crtc_state(old_crtc_state);
  11163. struct intel_atomic_state *old_intel_state =
  11164. to_intel_atomic_state(old_crtc_state->state);
  11165. bool modeset = needs_modeset(crtc->state);
  11166. if (!modeset &&
  11167. (intel_cstate->base.color_mgmt_changed ||
  11168. intel_cstate->update_pipe)) {
  11169. intel_color_set_csc(crtc->state);
  11170. intel_color_load_luts(crtc->state);
  11171. }
  11172. /* Perform vblank evasion around commit operation */
  11173. intel_pipe_update_start(intel_crtc);
  11174. if (modeset)
  11175. goto out;
  11176. if (intel_cstate->update_pipe)
  11177. intel_update_pipe_config(intel_crtc, old_intel_cstate);
  11178. else if (INTEL_GEN(dev_priv) >= 9)
  11179. skl_detach_scalers(intel_crtc);
  11180. out:
  11181. if (dev_priv->display.atomic_update_watermarks)
  11182. dev_priv->display.atomic_update_watermarks(old_intel_state,
  11183. intel_cstate);
  11184. }
  11185. static void intel_finish_crtc_commit(struct drm_crtc *crtc,
  11186. struct drm_crtc_state *old_crtc_state)
  11187. {
  11188. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  11189. intel_pipe_update_end(intel_crtc, NULL);
  11190. }
  11191. /**
  11192. * intel_plane_destroy - destroy a plane
  11193. * @plane: plane to destroy
  11194. *
  11195. * Common destruction function for all types of planes (primary, cursor,
  11196. * sprite).
  11197. */
  11198. void intel_plane_destroy(struct drm_plane *plane)
  11199. {
  11200. drm_plane_cleanup(plane);
  11201. kfree(to_intel_plane(plane));
  11202. }
  11203. const struct drm_plane_funcs intel_plane_funcs = {
  11204. .update_plane = drm_atomic_helper_update_plane,
  11205. .disable_plane = drm_atomic_helper_disable_plane,
  11206. .destroy = intel_plane_destroy,
  11207. .set_property = drm_atomic_helper_plane_set_property,
  11208. .atomic_get_property = intel_plane_atomic_get_property,
  11209. .atomic_set_property = intel_plane_atomic_set_property,
  11210. .atomic_duplicate_state = intel_plane_duplicate_state,
  11211. .atomic_destroy_state = intel_plane_destroy_state,
  11212. };
  11213. static int
  11214. intel_legacy_cursor_update(struct drm_plane *plane,
  11215. struct drm_crtc *crtc,
  11216. struct drm_framebuffer *fb,
  11217. int crtc_x, int crtc_y,
  11218. unsigned int crtc_w, unsigned int crtc_h,
  11219. uint32_t src_x, uint32_t src_y,
  11220. uint32_t src_w, uint32_t src_h)
  11221. {
  11222. struct drm_i915_private *dev_priv = to_i915(crtc->dev);
  11223. int ret;
  11224. struct drm_plane_state *old_plane_state, *new_plane_state;
  11225. struct intel_plane *intel_plane = to_intel_plane(plane);
  11226. struct drm_framebuffer *old_fb;
  11227. struct drm_crtc_state *crtc_state = crtc->state;
  11228. struct i915_vma *old_vma;
  11229. /*
  11230. * When crtc is inactive or there is a modeset pending,
  11231. * wait for it to complete in the slowpath
  11232. */
  11233. if (!crtc_state->active || needs_modeset(crtc_state) ||
  11234. to_intel_crtc_state(crtc_state)->update_pipe)
  11235. goto slow;
  11236. old_plane_state = plane->state;
  11237. /*
  11238. * If any parameters change that may affect watermarks,
  11239. * take the slowpath. Only changing fb or position should be
  11240. * in the fastpath.
  11241. */
  11242. if (old_plane_state->crtc != crtc ||
  11243. old_plane_state->src_w != src_w ||
  11244. old_plane_state->src_h != src_h ||
  11245. old_plane_state->crtc_w != crtc_w ||
  11246. old_plane_state->crtc_h != crtc_h ||
  11247. !old_plane_state->fb != !fb)
  11248. goto slow;
  11249. new_plane_state = intel_plane_duplicate_state(plane);
  11250. if (!new_plane_state)
  11251. return -ENOMEM;
  11252. drm_atomic_set_fb_for_plane(new_plane_state, fb);
  11253. new_plane_state->src_x = src_x;
  11254. new_plane_state->src_y = src_y;
  11255. new_plane_state->src_w = src_w;
  11256. new_plane_state->src_h = src_h;
  11257. new_plane_state->crtc_x = crtc_x;
  11258. new_plane_state->crtc_y = crtc_y;
  11259. new_plane_state->crtc_w = crtc_w;
  11260. new_plane_state->crtc_h = crtc_h;
  11261. ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state),
  11262. to_intel_plane_state(new_plane_state));
  11263. if (ret)
  11264. goto out_free;
  11265. ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
  11266. if (ret)
  11267. goto out_free;
  11268. if (INTEL_INFO(dev_priv)->cursor_needs_physical) {
  11269. int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
  11270. ret = i915_gem_object_attach_phys(intel_fb_obj(fb), align);
  11271. if (ret) {
  11272. DRM_DEBUG_KMS("failed to attach phys object\n");
  11273. goto out_unlock;
  11274. }
  11275. } else {
  11276. struct i915_vma *vma;
  11277. vma = intel_pin_and_fence_fb_obj(fb, new_plane_state->rotation);
  11278. if (IS_ERR(vma)) {
  11279. DRM_DEBUG_KMS("failed to pin object\n");
  11280. ret = PTR_ERR(vma);
  11281. goto out_unlock;
  11282. }
  11283. to_intel_plane_state(new_plane_state)->vma = vma;
  11284. }
  11285. old_fb = old_plane_state->fb;
  11286. old_vma = to_intel_plane_state(old_plane_state)->vma;
  11287. i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
  11288. intel_plane->frontbuffer_bit);
  11289. /* Swap plane state */
  11290. new_plane_state->fence = old_plane_state->fence;
  11291. *to_intel_plane_state(old_plane_state) = *to_intel_plane_state(new_plane_state);
  11292. new_plane_state->fence = NULL;
  11293. new_plane_state->fb = old_fb;
  11294. to_intel_plane_state(new_plane_state)->vma = old_vma;
  11295. if (plane->state->visible) {
  11296. trace_intel_update_plane(plane, to_intel_crtc(crtc));
  11297. intel_plane->update_plane(plane,
  11298. to_intel_crtc_state(crtc->state),
  11299. to_intel_plane_state(plane->state));
  11300. } else {
  11301. trace_intel_disable_plane(plane, to_intel_crtc(crtc));
  11302. intel_plane->disable_plane(plane, crtc);
  11303. }
  11304. intel_cleanup_plane_fb(plane, new_plane_state);
  11305. out_unlock:
  11306. mutex_unlock(&dev_priv->drm.struct_mutex);
  11307. out_free:
  11308. intel_plane_destroy_state(plane, new_plane_state);
  11309. return ret;
  11310. slow:
  11311. return drm_atomic_helper_update_plane(plane, crtc, fb,
  11312. crtc_x, crtc_y, crtc_w, crtc_h,
  11313. src_x, src_y, src_w, src_h);
  11314. }
  11315. static const struct drm_plane_funcs intel_cursor_plane_funcs = {
  11316. .update_plane = intel_legacy_cursor_update,
  11317. .disable_plane = drm_atomic_helper_disable_plane,
  11318. .destroy = intel_plane_destroy,
  11319. .set_property = drm_atomic_helper_plane_set_property,
  11320. .atomic_get_property = intel_plane_atomic_get_property,
  11321. .atomic_set_property = intel_plane_atomic_set_property,
  11322. .atomic_duplicate_state = intel_plane_duplicate_state,
  11323. .atomic_destroy_state = intel_plane_destroy_state,
  11324. };
  11325. static struct intel_plane *
  11326. intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
  11327. {
  11328. struct intel_plane *primary = NULL;
  11329. struct intel_plane_state *state = NULL;
  11330. const uint32_t *intel_primary_formats;
  11331. unsigned int supported_rotations;
  11332. unsigned int num_formats;
  11333. int ret;
  11334. primary = kzalloc(sizeof(*primary), GFP_KERNEL);
  11335. if (!primary) {
  11336. ret = -ENOMEM;
  11337. goto fail;
  11338. }
  11339. state = intel_create_plane_state(&primary->base);
  11340. if (!state) {
  11341. ret = -ENOMEM;
  11342. goto fail;
  11343. }
  11344. primary->base.state = &state->base;
  11345. primary->can_scale = false;
  11346. primary->max_downscale = 1;
  11347. if (INTEL_GEN(dev_priv) >= 9) {
  11348. primary->can_scale = true;
  11349. state->scaler_id = -1;
  11350. }
  11351. primary->pipe = pipe;
  11352. /*
  11353. * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
  11354. * port is hooked to pipe B. Hence we want plane A feeding pipe B.
  11355. */
  11356. if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
  11357. primary->plane = (enum plane) !pipe;
  11358. else
  11359. primary->plane = (enum plane) pipe;
  11360. primary->id = PLANE_PRIMARY;
  11361. primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
  11362. primary->check_plane = intel_check_primary_plane;
  11363. if (INTEL_GEN(dev_priv) >= 9) {
  11364. intel_primary_formats = skl_primary_formats;
  11365. num_formats = ARRAY_SIZE(skl_primary_formats);
  11366. primary->update_plane = skylake_update_primary_plane;
  11367. primary->disable_plane = skylake_disable_primary_plane;
  11368. } else if (HAS_PCH_SPLIT(dev_priv)) {
  11369. intel_primary_formats = i965_primary_formats;
  11370. num_formats = ARRAY_SIZE(i965_primary_formats);
  11371. primary->update_plane = ironlake_update_primary_plane;
  11372. primary->disable_plane = i9xx_disable_primary_plane;
  11373. } else if (INTEL_GEN(dev_priv) >= 4) {
  11374. intel_primary_formats = i965_primary_formats;
  11375. num_formats = ARRAY_SIZE(i965_primary_formats);
  11376. primary->update_plane = i9xx_update_primary_plane;
  11377. primary->disable_plane = i9xx_disable_primary_plane;
  11378. } else {
  11379. intel_primary_formats = i8xx_primary_formats;
  11380. num_formats = ARRAY_SIZE(i8xx_primary_formats);
  11381. primary->update_plane = i9xx_update_primary_plane;
  11382. primary->disable_plane = i9xx_disable_primary_plane;
  11383. }
  11384. if (INTEL_GEN(dev_priv) >= 9)
  11385. ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
  11386. 0, &intel_plane_funcs,
  11387. intel_primary_formats, num_formats,
  11388. DRM_PLANE_TYPE_PRIMARY,
  11389. "plane 1%c", pipe_name(pipe));
  11390. else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
  11391. ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
  11392. 0, &intel_plane_funcs,
  11393. intel_primary_formats, num_formats,
  11394. DRM_PLANE_TYPE_PRIMARY,
  11395. "primary %c", pipe_name(pipe));
  11396. else
  11397. ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
  11398. 0, &intel_plane_funcs,
  11399. intel_primary_formats, num_formats,
  11400. DRM_PLANE_TYPE_PRIMARY,
  11401. "plane %c", plane_name(primary->plane));
  11402. if (ret)
  11403. goto fail;
  11404. if (INTEL_GEN(dev_priv) >= 9) {
  11405. supported_rotations =
  11406. DRM_ROTATE_0 | DRM_ROTATE_90 |
  11407. DRM_ROTATE_180 | DRM_ROTATE_270;
  11408. } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
  11409. supported_rotations =
  11410. DRM_ROTATE_0 | DRM_ROTATE_180 |
  11411. DRM_REFLECT_X;
  11412. } else if (INTEL_GEN(dev_priv) >= 4) {
  11413. supported_rotations =
  11414. DRM_ROTATE_0 | DRM_ROTATE_180;
  11415. } else {
  11416. supported_rotations = DRM_ROTATE_0;
  11417. }
  11418. if (INTEL_GEN(dev_priv) >= 4)
  11419. drm_plane_create_rotation_property(&primary->base,
  11420. DRM_ROTATE_0,
  11421. supported_rotations);
  11422. drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
  11423. return primary;
  11424. fail:
  11425. kfree(state);
  11426. kfree(primary);
  11427. return ERR_PTR(ret);
  11428. }
  11429. static int
  11430. intel_check_cursor_plane(struct drm_plane *plane,
  11431. struct intel_crtc_state *crtc_state,
  11432. struct intel_plane_state *state)
  11433. {
  11434. struct drm_i915_private *dev_priv = to_i915(plane->dev);
  11435. struct drm_framebuffer *fb = state->base.fb;
  11436. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  11437. enum pipe pipe = to_intel_plane(plane)->pipe;
  11438. unsigned stride;
  11439. int ret;
  11440. ret = drm_plane_helper_check_state(&state->base,
  11441. &state->clip,
  11442. DRM_PLANE_HELPER_NO_SCALING,
  11443. DRM_PLANE_HELPER_NO_SCALING,
  11444. true, true);
  11445. if (ret)
  11446. return ret;
  11447. /* if we want to turn off the cursor ignore width and height */
  11448. if (!obj)
  11449. return 0;
  11450. /* Check for which cursor types we support */
  11451. if (!cursor_size_ok(dev_priv, state->base.crtc_w,
  11452. state->base.crtc_h)) {
  11453. DRM_DEBUG("Cursor dimension %dx%d not supported\n",
  11454. state->base.crtc_w, state->base.crtc_h);
  11455. return -EINVAL;
  11456. }
  11457. stride = roundup_pow_of_two(state->base.crtc_w) * 4;
  11458. if (obj->base.size < stride * state->base.crtc_h) {
  11459. DRM_DEBUG_KMS("buffer is too small\n");
  11460. return -ENOMEM;
  11461. }
  11462. if (fb->modifier != DRM_FORMAT_MOD_NONE) {
  11463. DRM_DEBUG_KMS("cursor cannot be tiled\n");
  11464. return -EINVAL;
  11465. }
  11466. /*
  11467. * There's something wrong with the cursor on CHV pipe C.
  11468. * If it straddles the left edge of the screen then
  11469. * moving it away from the edge or disabling it often
  11470. * results in a pipe underrun, and often that can lead to
  11471. * dead pipe (constant underrun reported, and it scans
  11472. * out just a solid color). To recover from that, the
  11473. * display power well must be turned off and on again.
  11474. * Refuse the put the cursor into that compromised position.
  11475. */
  11476. if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
  11477. state->base.visible && state->base.crtc_x < 0) {
  11478. DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
  11479. return -EINVAL;
  11480. }
  11481. if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
  11482. state->ctl = i845_cursor_ctl(crtc_state, state);
  11483. else
  11484. state->ctl = i9xx_cursor_ctl(crtc_state, state);
  11485. return 0;
  11486. }
  11487. static void
  11488. intel_disable_cursor_plane(struct drm_plane *plane,
  11489. struct drm_crtc *crtc)
  11490. {
  11491. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  11492. intel_crtc->cursor_addr = 0;
  11493. intel_crtc_update_cursor(crtc, NULL);
  11494. }
  11495. static void
  11496. intel_update_cursor_plane(struct drm_plane *plane,
  11497. const struct intel_crtc_state *crtc_state,
  11498. const struct intel_plane_state *state)
  11499. {
  11500. struct drm_crtc *crtc = crtc_state->base.crtc;
  11501. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  11502. struct drm_i915_private *dev_priv = to_i915(plane->dev);
  11503. struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
  11504. uint32_t addr;
  11505. if (!obj)
  11506. addr = 0;
  11507. else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
  11508. addr = intel_plane_ggtt_offset(state);
  11509. else
  11510. addr = obj->phys_handle->busaddr;
  11511. intel_crtc->cursor_addr = addr;
  11512. intel_crtc_update_cursor(crtc, state);
  11513. }
  11514. static struct intel_plane *
  11515. intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
  11516. {
  11517. struct intel_plane *cursor = NULL;
  11518. struct intel_plane_state *state = NULL;
  11519. int ret;
  11520. cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
  11521. if (!cursor) {
  11522. ret = -ENOMEM;
  11523. goto fail;
  11524. }
  11525. state = intel_create_plane_state(&cursor->base);
  11526. if (!state) {
  11527. ret = -ENOMEM;
  11528. goto fail;
  11529. }
  11530. cursor->base.state = &state->base;
  11531. cursor->can_scale = false;
  11532. cursor->max_downscale = 1;
  11533. cursor->pipe = pipe;
  11534. cursor->plane = pipe;
  11535. cursor->id = PLANE_CURSOR;
  11536. cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
  11537. cursor->check_plane = intel_check_cursor_plane;
  11538. cursor->update_plane = intel_update_cursor_plane;
  11539. cursor->disable_plane = intel_disable_cursor_plane;
  11540. ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
  11541. 0, &intel_cursor_plane_funcs,
  11542. intel_cursor_formats,
  11543. ARRAY_SIZE(intel_cursor_formats),
  11544. DRM_PLANE_TYPE_CURSOR,
  11545. "cursor %c", pipe_name(pipe));
  11546. if (ret)
  11547. goto fail;
  11548. if (INTEL_GEN(dev_priv) >= 4)
  11549. drm_plane_create_rotation_property(&cursor->base,
  11550. DRM_ROTATE_0,
  11551. DRM_ROTATE_0 |
  11552. DRM_ROTATE_180);
  11553. if (INTEL_GEN(dev_priv) >= 9)
  11554. state->scaler_id = -1;
  11555. drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
  11556. return cursor;
  11557. fail:
  11558. kfree(state);
  11559. kfree(cursor);
  11560. return ERR_PTR(ret);
  11561. }
  11562. static void intel_crtc_init_scalers(struct intel_crtc *crtc,
  11563. struct intel_crtc_state *crtc_state)
  11564. {
  11565. struct intel_crtc_scaler_state *scaler_state =
  11566. &crtc_state->scaler_state;
  11567. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  11568. int i;
  11569. crtc->num_scalers = dev_priv->info.num_scalers[crtc->pipe];
  11570. if (!crtc->num_scalers)
  11571. return;
  11572. for (i = 0; i < crtc->num_scalers; i++) {
  11573. struct intel_scaler *scaler = &scaler_state->scalers[i];
  11574. scaler->in_use = 0;
  11575. scaler->mode = PS_SCALER_MODE_DYN;
  11576. }
  11577. scaler_state->scaler_id = -1;
  11578. }
  11579. static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
  11580. {
  11581. struct intel_crtc *intel_crtc;
  11582. struct intel_crtc_state *crtc_state = NULL;
  11583. struct intel_plane *primary = NULL;
  11584. struct intel_plane *cursor = NULL;
  11585. int sprite, ret;
  11586. intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
  11587. if (!intel_crtc)
  11588. return -ENOMEM;
  11589. crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
  11590. if (!crtc_state) {
  11591. ret = -ENOMEM;
  11592. goto fail;
  11593. }
  11594. intel_crtc->config = crtc_state;
  11595. intel_crtc->base.state = &crtc_state->base;
  11596. crtc_state->base.crtc = &intel_crtc->base;
  11597. primary = intel_primary_plane_create(dev_priv, pipe);
  11598. if (IS_ERR(primary)) {
  11599. ret = PTR_ERR(primary);
  11600. goto fail;
  11601. }
  11602. intel_crtc->plane_ids_mask |= BIT(primary->id);
  11603. for_each_sprite(dev_priv, pipe, sprite) {
  11604. struct intel_plane *plane;
  11605. plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
  11606. if (IS_ERR(plane)) {
  11607. ret = PTR_ERR(plane);
  11608. goto fail;
  11609. }
  11610. intel_crtc->plane_ids_mask |= BIT(plane->id);
  11611. }
  11612. cursor = intel_cursor_plane_create(dev_priv, pipe);
  11613. if (IS_ERR(cursor)) {
  11614. ret = PTR_ERR(cursor);
  11615. goto fail;
  11616. }
  11617. intel_crtc->plane_ids_mask |= BIT(cursor->id);
  11618. ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
  11619. &primary->base, &cursor->base,
  11620. &intel_crtc_funcs,
  11621. "pipe %c", pipe_name(pipe));
  11622. if (ret)
  11623. goto fail;
  11624. intel_crtc->pipe = pipe;
  11625. intel_crtc->plane = primary->plane;
  11626. intel_crtc->cursor_base = ~0;
  11627. intel_crtc->cursor_cntl = ~0;
  11628. intel_crtc->cursor_size = ~0;
  11629. /* initialize shared scalers */
  11630. intel_crtc_init_scalers(intel_crtc, crtc_state);
  11631. BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
  11632. dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
  11633. dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = intel_crtc;
  11634. dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = intel_crtc;
  11635. drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
  11636. intel_color_init(&intel_crtc->base);
  11637. WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
  11638. return 0;
  11639. fail:
  11640. /*
  11641. * drm_mode_config_cleanup() will free up any
  11642. * crtcs/planes already initialized.
  11643. */
  11644. kfree(crtc_state);
  11645. kfree(intel_crtc);
  11646. return ret;
  11647. }
  11648. enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
  11649. {
  11650. struct drm_device *dev = connector->base.dev;
  11651. WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
  11652. if (!connector->base.state->crtc)
  11653. return INVALID_PIPE;
  11654. return to_intel_crtc(connector->base.state->crtc)->pipe;
  11655. }
  11656. int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
  11657. struct drm_file *file)
  11658. {
  11659. struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
  11660. struct drm_crtc *drmmode_crtc;
  11661. struct intel_crtc *crtc;
  11662. drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
  11663. if (!drmmode_crtc)
  11664. return -ENOENT;
  11665. crtc = to_intel_crtc(drmmode_crtc);
  11666. pipe_from_crtc_id->pipe = crtc->pipe;
  11667. return 0;
  11668. }
  11669. static int intel_encoder_clones(struct intel_encoder *encoder)
  11670. {
  11671. struct drm_device *dev = encoder->base.dev;
  11672. struct intel_encoder *source_encoder;
  11673. int index_mask = 0;
  11674. int entry = 0;
  11675. for_each_intel_encoder(dev, source_encoder) {
  11676. if (encoders_cloneable(encoder, source_encoder))
  11677. index_mask |= (1 << entry);
  11678. entry++;
  11679. }
  11680. return index_mask;
  11681. }
  11682. static bool has_edp_a(struct drm_i915_private *dev_priv)
  11683. {
  11684. if (!IS_MOBILE(dev_priv))
  11685. return false;
  11686. if ((I915_READ(DP_A) & DP_DETECTED) == 0)
  11687. return false;
  11688. if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
  11689. return false;
  11690. return true;
  11691. }
  11692. static bool intel_crt_present(struct drm_i915_private *dev_priv)
  11693. {
  11694. if (INTEL_GEN(dev_priv) >= 9)
  11695. return false;
  11696. if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
  11697. return false;
  11698. if (IS_CHERRYVIEW(dev_priv))
  11699. return false;
  11700. if (HAS_PCH_LPT_H(dev_priv) &&
  11701. I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
  11702. return false;
  11703. /* DDI E can't be used if DDI A requires 4 lanes */
  11704. if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
  11705. return false;
  11706. if (!dev_priv->vbt.int_crt_support)
  11707. return false;
  11708. return true;
  11709. }
  11710. void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
  11711. {
  11712. int pps_num;
  11713. int pps_idx;
  11714. if (HAS_DDI(dev_priv))
  11715. return;
  11716. /*
  11717. * This w/a is needed at least on CPT/PPT, but to be sure apply it
  11718. * everywhere where registers can be write protected.
  11719. */
  11720. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  11721. pps_num = 2;
  11722. else
  11723. pps_num = 1;
  11724. for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
  11725. u32 val = I915_READ(PP_CONTROL(pps_idx));
  11726. val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
  11727. I915_WRITE(PP_CONTROL(pps_idx), val);
  11728. }
  11729. }
  11730. static void intel_pps_init(struct drm_i915_private *dev_priv)
  11731. {
  11732. if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
  11733. dev_priv->pps_mmio_base = PCH_PPS_BASE;
  11734. else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  11735. dev_priv->pps_mmio_base = VLV_PPS_BASE;
  11736. else
  11737. dev_priv->pps_mmio_base = PPS_BASE;
  11738. intel_pps_unlock_regs_wa(dev_priv);
  11739. }
  11740. static void intel_setup_outputs(struct drm_i915_private *dev_priv)
  11741. {
  11742. struct intel_encoder *encoder;
  11743. bool dpd_is_edp = false;
  11744. intel_pps_init(dev_priv);
  11745. /*
  11746. * intel_edp_init_connector() depends on this completing first, to
  11747. * prevent the registeration of both eDP and LVDS and the incorrect
  11748. * sharing of the PPS.
  11749. */
  11750. intel_lvds_init(dev_priv);
  11751. if (intel_crt_present(dev_priv))
  11752. intel_crt_init(dev_priv);
  11753. if (IS_GEN9_LP(dev_priv)) {
  11754. /*
  11755. * FIXME: Broxton doesn't support port detection via the
  11756. * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
  11757. * detect the ports.
  11758. */
  11759. intel_ddi_init(dev_priv, PORT_A);
  11760. intel_ddi_init(dev_priv, PORT_B);
  11761. intel_ddi_init(dev_priv, PORT_C);
  11762. intel_dsi_init(dev_priv);
  11763. } else if (HAS_DDI(dev_priv)) {
  11764. int found;
  11765. /*
  11766. * Haswell uses DDI functions to detect digital outputs.
  11767. * On SKL pre-D0 the strap isn't connected, so we assume
  11768. * it's there.
  11769. */
  11770. found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
  11771. /* WaIgnoreDDIAStrap: skl */
  11772. if (found || IS_GEN9_BC(dev_priv))
  11773. intel_ddi_init(dev_priv, PORT_A);
  11774. /* DDI B, C and D detection is indicated by the SFUSE_STRAP
  11775. * register */
  11776. found = I915_READ(SFUSE_STRAP);
  11777. if (found & SFUSE_STRAP_DDIB_DETECTED)
  11778. intel_ddi_init(dev_priv, PORT_B);
  11779. if (found & SFUSE_STRAP_DDIC_DETECTED)
  11780. intel_ddi_init(dev_priv, PORT_C);
  11781. if (found & SFUSE_STRAP_DDID_DETECTED)
  11782. intel_ddi_init(dev_priv, PORT_D);
  11783. /*
  11784. * On SKL we don't have a way to detect DDI-E so we rely on VBT.
  11785. */
  11786. if (IS_GEN9_BC(dev_priv) &&
  11787. (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
  11788. dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
  11789. dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
  11790. intel_ddi_init(dev_priv, PORT_E);
  11791. } else if (HAS_PCH_SPLIT(dev_priv)) {
  11792. int found;
  11793. dpd_is_edp = intel_dp_is_edp(dev_priv, PORT_D);
  11794. if (has_edp_a(dev_priv))
  11795. intel_dp_init(dev_priv, DP_A, PORT_A);
  11796. if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
  11797. /* PCH SDVOB multiplex with HDMIB */
  11798. found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
  11799. if (!found)
  11800. intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
  11801. if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
  11802. intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
  11803. }
  11804. if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
  11805. intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
  11806. if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
  11807. intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
  11808. if (I915_READ(PCH_DP_C) & DP_DETECTED)
  11809. intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
  11810. if (I915_READ(PCH_DP_D) & DP_DETECTED)
  11811. intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
  11812. } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  11813. bool has_edp, has_port;
  11814. /*
  11815. * The DP_DETECTED bit is the latched state of the DDC
  11816. * SDA pin at boot. However since eDP doesn't require DDC
  11817. * (no way to plug in a DP->HDMI dongle) the DDC pins for
  11818. * eDP ports may have been muxed to an alternate function.
  11819. * Thus we can't rely on the DP_DETECTED bit alone to detect
  11820. * eDP ports. Consult the VBT as well as DP_DETECTED to
  11821. * detect eDP ports.
  11822. *
  11823. * Sadly the straps seem to be missing sometimes even for HDMI
  11824. * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
  11825. * and VBT for the presence of the port. Additionally we can't
  11826. * trust the port type the VBT declares as we've seen at least
  11827. * HDMI ports that the VBT claim are DP or eDP.
  11828. */
  11829. has_edp = intel_dp_is_edp(dev_priv, PORT_B);
  11830. has_port = intel_bios_is_port_present(dev_priv, PORT_B);
  11831. if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
  11832. has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
  11833. if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
  11834. intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
  11835. has_edp = intel_dp_is_edp(dev_priv, PORT_C);
  11836. has_port = intel_bios_is_port_present(dev_priv, PORT_C);
  11837. if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
  11838. has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
  11839. if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
  11840. intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
  11841. if (IS_CHERRYVIEW(dev_priv)) {
  11842. /*
  11843. * eDP not supported on port D,
  11844. * so no need to worry about it
  11845. */
  11846. has_port = intel_bios_is_port_present(dev_priv, PORT_D);
  11847. if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
  11848. intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
  11849. if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
  11850. intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
  11851. }
  11852. intel_dsi_init(dev_priv);
  11853. } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
  11854. bool found = false;
  11855. if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
  11856. DRM_DEBUG_KMS("probing SDVOB\n");
  11857. found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
  11858. if (!found && IS_G4X(dev_priv)) {
  11859. DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
  11860. intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
  11861. }
  11862. if (!found && IS_G4X(dev_priv))
  11863. intel_dp_init(dev_priv, DP_B, PORT_B);
  11864. }
  11865. /* Before G4X SDVOC doesn't have its own detect register */
  11866. if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
  11867. DRM_DEBUG_KMS("probing SDVOC\n");
  11868. found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
  11869. }
  11870. if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
  11871. if (IS_G4X(dev_priv)) {
  11872. DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
  11873. intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
  11874. }
  11875. if (IS_G4X(dev_priv))
  11876. intel_dp_init(dev_priv, DP_C, PORT_C);
  11877. }
  11878. if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
  11879. intel_dp_init(dev_priv, DP_D, PORT_D);
  11880. } else if (IS_GEN2(dev_priv))
  11881. intel_dvo_init(dev_priv);
  11882. if (SUPPORTS_TV(dev_priv))
  11883. intel_tv_init(dev_priv);
  11884. intel_psr_init(dev_priv);
  11885. for_each_intel_encoder(&dev_priv->drm, encoder) {
  11886. encoder->base.possible_crtcs = encoder->crtc_mask;
  11887. encoder->base.possible_clones =
  11888. intel_encoder_clones(encoder);
  11889. }
  11890. intel_init_pch_refclk(dev_priv);
  11891. drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
  11892. }
  11893. static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
  11894. {
  11895. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  11896. drm_framebuffer_cleanup(fb);
  11897. i915_gem_object_lock(intel_fb->obj);
  11898. WARN_ON(!intel_fb->obj->framebuffer_references--);
  11899. i915_gem_object_unlock(intel_fb->obj);
  11900. i915_gem_object_put(intel_fb->obj);
  11901. kfree(intel_fb);
  11902. }
  11903. static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
  11904. struct drm_file *file,
  11905. unsigned int *handle)
  11906. {
  11907. struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  11908. struct drm_i915_gem_object *obj = intel_fb->obj;
  11909. if (obj->userptr.mm) {
  11910. DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
  11911. return -EINVAL;
  11912. }
  11913. return drm_gem_handle_create(file, &obj->base, handle);
  11914. }
  11915. static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
  11916. struct drm_file *file,
  11917. unsigned flags, unsigned color,
  11918. struct drm_clip_rect *clips,
  11919. unsigned num_clips)
  11920. {
  11921. struct drm_i915_gem_object *obj = intel_fb_obj(fb);
  11922. i915_gem_object_flush_if_display(obj);
  11923. intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
  11924. return 0;
  11925. }
  11926. static const struct drm_framebuffer_funcs intel_fb_funcs = {
  11927. .destroy = intel_user_framebuffer_destroy,
  11928. .create_handle = intel_user_framebuffer_create_handle,
  11929. .dirty = intel_user_framebuffer_dirty,
  11930. };
  11931. static
  11932. u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
  11933. uint64_t fb_modifier, uint32_t pixel_format)
  11934. {
  11935. u32 gen = INTEL_GEN(dev_priv);
  11936. if (gen >= 9) {
  11937. int cpp = drm_format_plane_cpp(pixel_format, 0);
  11938. /* "The stride in bytes must not exceed the of the size of 8K
  11939. * pixels and 32K bytes."
  11940. */
  11941. return min(8192 * cpp, 32768);
  11942. } else if (gen >= 5 && !HAS_GMCH_DISPLAY(dev_priv)) {
  11943. return 32*1024;
  11944. } else if (gen >= 4) {
  11945. if (fb_modifier == I915_FORMAT_MOD_X_TILED)
  11946. return 16*1024;
  11947. else
  11948. return 32*1024;
  11949. } else if (gen >= 3) {
  11950. if (fb_modifier == I915_FORMAT_MOD_X_TILED)
  11951. return 8*1024;
  11952. else
  11953. return 16*1024;
  11954. } else {
  11955. /* XXX DSPC is limited to 4k tiled */
  11956. return 8*1024;
  11957. }
  11958. }
  11959. static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
  11960. struct drm_i915_gem_object *obj,
  11961. struct drm_mode_fb_cmd2 *mode_cmd)
  11962. {
  11963. struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
  11964. struct drm_format_name_buf format_name;
  11965. u32 pitch_limit, stride_alignment;
  11966. unsigned int tiling, stride;
  11967. int ret = -EINVAL;
  11968. i915_gem_object_lock(obj);
  11969. obj->framebuffer_references++;
  11970. tiling = i915_gem_object_get_tiling(obj);
  11971. stride = i915_gem_object_get_stride(obj);
  11972. i915_gem_object_unlock(obj);
  11973. if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
  11974. /*
  11975. * If there's a fence, enforce that
  11976. * the fb modifier and tiling mode match.
  11977. */
  11978. if (tiling != I915_TILING_NONE &&
  11979. tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
  11980. DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
  11981. goto err;
  11982. }
  11983. } else {
  11984. if (tiling == I915_TILING_X) {
  11985. mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
  11986. } else if (tiling == I915_TILING_Y) {
  11987. DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
  11988. goto err;
  11989. }
  11990. }
  11991. /* Passed in modifier sanity checking. */
  11992. switch (mode_cmd->modifier[0]) {
  11993. case I915_FORMAT_MOD_Y_TILED:
  11994. case I915_FORMAT_MOD_Yf_TILED:
  11995. if (INTEL_GEN(dev_priv) < 9) {
  11996. DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n",
  11997. mode_cmd->modifier[0]);
  11998. goto err;
  11999. }
  12000. case DRM_FORMAT_MOD_NONE:
  12001. case I915_FORMAT_MOD_X_TILED:
  12002. break;
  12003. default:
  12004. DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n",
  12005. mode_cmd->modifier[0]);
  12006. goto err;
  12007. }
  12008. /*
  12009. * gen2/3 display engine uses the fence if present,
  12010. * so the tiling mode must match the fb modifier exactly.
  12011. */
  12012. if (INTEL_INFO(dev_priv)->gen < 4 &&
  12013. tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
  12014. DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
  12015. goto err;
  12016. }
  12017. pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
  12018. mode_cmd->pixel_format);
  12019. if (mode_cmd->pitches[0] > pitch_limit) {
  12020. DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
  12021. mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
  12022. "tiled" : "linear",
  12023. mode_cmd->pitches[0], pitch_limit);
  12024. goto err;
  12025. }
  12026. /*
  12027. * If there's a fence, enforce that
  12028. * the fb pitch and fence stride match.
  12029. */
  12030. if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
  12031. DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
  12032. mode_cmd->pitches[0], stride);
  12033. goto err;
  12034. }
  12035. /* Reject formats not supported by any plane early. */
  12036. switch (mode_cmd->pixel_format) {
  12037. case DRM_FORMAT_C8:
  12038. case DRM_FORMAT_RGB565:
  12039. case DRM_FORMAT_XRGB8888:
  12040. case DRM_FORMAT_ARGB8888:
  12041. break;
  12042. case DRM_FORMAT_XRGB1555:
  12043. if (INTEL_GEN(dev_priv) > 3) {
  12044. DRM_DEBUG_KMS("unsupported pixel format: %s\n",
  12045. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  12046. goto err;
  12047. }
  12048. break;
  12049. case DRM_FORMAT_ABGR8888:
  12050. if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
  12051. INTEL_GEN(dev_priv) < 9) {
  12052. DRM_DEBUG_KMS("unsupported pixel format: %s\n",
  12053. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  12054. goto err;
  12055. }
  12056. break;
  12057. case DRM_FORMAT_XBGR8888:
  12058. case DRM_FORMAT_XRGB2101010:
  12059. case DRM_FORMAT_XBGR2101010:
  12060. if (INTEL_GEN(dev_priv) < 4) {
  12061. DRM_DEBUG_KMS("unsupported pixel format: %s\n",
  12062. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  12063. goto err;
  12064. }
  12065. break;
  12066. case DRM_FORMAT_ABGR2101010:
  12067. if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
  12068. DRM_DEBUG_KMS("unsupported pixel format: %s\n",
  12069. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  12070. goto err;
  12071. }
  12072. break;
  12073. case DRM_FORMAT_YUYV:
  12074. case DRM_FORMAT_UYVY:
  12075. case DRM_FORMAT_YVYU:
  12076. case DRM_FORMAT_VYUY:
  12077. if (INTEL_GEN(dev_priv) < 5) {
  12078. DRM_DEBUG_KMS("unsupported pixel format: %s\n",
  12079. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  12080. goto err;
  12081. }
  12082. break;
  12083. default:
  12084. DRM_DEBUG_KMS("unsupported pixel format: %s\n",
  12085. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  12086. goto err;
  12087. }
  12088. /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
  12089. if (mode_cmd->offsets[0] != 0)
  12090. goto err;
  12091. drm_helper_mode_fill_fb_struct(&dev_priv->drm,
  12092. &intel_fb->base, mode_cmd);
  12093. stride_alignment = intel_fb_stride_alignment(&intel_fb->base, 0);
  12094. if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
  12095. DRM_DEBUG_KMS("pitch (%d) must be at least %u byte aligned\n",
  12096. mode_cmd->pitches[0], stride_alignment);
  12097. goto err;
  12098. }
  12099. intel_fb->obj = obj;
  12100. ret = intel_fill_fb_info(dev_priv, &intel_fb->base);
  12101. if (ret)
  12102. goto err;
  12103. ret = drm_framebuffer_init(obj->base.dev,
  12104. &intel_fb->base,
  12105. &intel_fb_funcs);
  12106. if (ret) {
  12107. DRM_ERROR("framebuffer init failed %d\n", ret);
  12108. goto err;
  12109. }
  12110. return 0;
  12111. err:
  12112. i915_gem_object_lock(obj);
  12113. obj->framebuffer_references--;
  12114. i915_gem_object_unlock(obj);
  12115. return ret;
  12116. }
  12117. static struct drm_framebuffer *
  12118. intel_user_framebuffer_create(struct drm_device *dev,
  12119. struct drm_file *filp,
  12120. const struct drm_mode_fb_cmd2 *user_mode_cmd)
  12121. {
  12122. struct drm_framebuffer *fb;
  12123. struct drm_i915_gem_object *obj;
  12124. struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
  12125. obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
  12126. if (!obj)
  12127. return ERR_PTR(-ENOENT);
  12128. fb = intel_framebuffer_create(obj, &mode_cmd);
  12129. if (IS_ERR(fb))
  12130. i915_gem_object_put(obj);
  12131. return fb;
  12132. }
  12133. static void intel_atomic_state_free(struct drm_atomic_state *state)
  12134. {
  12135. struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
  12136. drm_atomic_state_default_release(state);
  12137. i915_sw_fence_fini(&intel_state->commit_ready);
  12138. kfree(state);
  12139. }
  12140. static const struct drm_mode_config_funcs intel_mode_funcs = {
  12141. .fb_create = intel_user_framebuffer_create,
  12142. .output_poll_changed = intel_fbdev_output_poll_changed,
  12143. .atomic_check = intel_atomic_check,
  12144. .atomic_commit = intel_atomic_commit,
  12145. .atomic_state_alloc = intel_atomic_state_alloc,
  12146. .atomic_state_clear = intel_atomic_state_clear,
  12147. .atomic_state_free = intel_atomic_state_free,
  12148. };
  12149. /**
  12150. * intel_init_display_hooks - initialize the display modesetting hooks
  12151. * @dev_priv: device private
  12152. */
  12153. void intel_init_display_hooks(struct drm_i915_private *dev_priv)
  12154. {
  12155. intel_init_cdclk_hooks(dev_priv);
  12156. if (INTEL_INFO(dev_priv)->gen >= 9) {
  12157. dev_priv->display.get_pipe_config = haswell_get_pipe_config;
  12158. dev_priv->display.get_initial_plane_config =
  12159. skylake_get_initial_plane_config;
  12160. dev_priv->display.crtc_compute_clock =
  12161. haswell_crtc_compute_clock;
  12162. dev_priv->display.crtc_enable = haswell_crtc_enable;
  12163. dev_priv->display.crtc_disable = haswell_crtc_disable;
  12164. } else if (HAS_DDI(dev_priv)) {
  12165. dev_priv->display.get_pipe_config = haswell_get_pipe_config;
  12166. dev_priv->display.get_initial_plane_config =
  12167. ironlake_get_initial_plane_config;
  12168. dev_priv->display.crtc_compute_clock =
  12169. haswell_crtc_compute_clock;
  12170. dev_priv->display.crtc_enable = haswell_crtc_enable;
  12171. dev_priv->display.crtc_disable = haswell_crtc_disable;
  12172. } else if (HAS_PCH_SPLIT(dev_priv)) {
  12173. dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
  12174. dev_priv->display.get_initial_plane_config =
  12175. ironlake_get_initial_plane_config;
  12176. dev_priv->display.crtc_compute_clock =
  12177. ironlake_crtc_compute_clock;
  12178. dev_priv->display.crtc_enable = ironlake_crtc_enable;
  12179. dev_priv->display.crtc_disable = ironlake_crtc_disable;
  12180. } else if (IS_CHERRYVIEW(dev_priv)) {
  12181. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  12182. dev_priv->display.get_initial_plane_config =
  12183. i9xx_get_initial_plane_config;
  12184. dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
  12185. dev_priv->display.crtc_enable = valleyview_crtc_enable;
  12186. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  12187. } else if (IS_VALLEYVIEW(dev_priv)) {
  12188. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  12189. dev_priv->display.get_initial_plane_config =
  12190. i9xx_get_initial_plane_config;
  12191. dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
  12192. dev_priv->display.crtc_enable = valleyview_crtc_enable;
  12193. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  12194. } else if (IS_G4X(dev_priv)) {
  12195. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  12196. dev_priv->display.get_initial_plane_config =
  12197. i9xx_get_initial_plane_config;
  12198. dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
  12199. dev_priv->display.crtc_enable = i9xx_crtc_enable;
  12200. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  12201. } else if (IS_PINEVIEW(dev_priv)) {
  12202. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  12203. dev_priv->display.get_initial_plane_config =
  12204. i9xx_get_initial_plane_config;
  12205. dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
  12206. dev_priv->display.crtc_enable = i9xx_crtc_enable;
  12207. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  12208. } else if (!IS_GEN2(dev_priv)) {
  12209. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  12210. dev_priv->display.get_initial_plane_config =
  12211. i9xx_get_initial_plane_config;
  12212. dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
  12213. dev_priv->display.crtc_enable = i9xx_crtc_enable;
  12214. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  12215. } else {
  12216. dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
  12217. dev_priv->display.get_initial_plane_config =
  12218. i9xx_get_initial_plane_config;
  12219. dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
  12220. dev_priv->display.crtc_enable = i9xx_crtc_enable;
  12221. dev_priv->display.crtc_disable = i9xx_crtc_disable;
  12222. }
  12223. if (IS_GEN5(dev_priv)) {
  12224. dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
  12225. } else if (IS_GEN6(dev_priv)) {
  12226. dev_priv->display.fdi_link_train = gen6_fdi_link_train;
  12227. } else if (IS_IVYBRIDGE(dev_priv)) {
  12228. /* FIXME: detect B0+ stepping and use auto training */
  12229. dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
  12230. } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  12231. dev_priv->display.fdi_link_train = hsw_fdi_link_train;
  12232. }
  12233. if (dev_priv->info.gen >= 9)
  12234. dev_priv->display.update_crtcs = skl_update_crtcs;
  12235. else
  12236. dev_priv->display.update_crtcs = intel_update_crtcs;
  12237. switch (INTEL_INFO(dev_priv)->gen) {
  12238. case 2:
  12239. dev_priv->display.queue_flip = intel_gen2_queue_flip;
  12240. break;
  12241. case 3:
  12242. dev_priv->display.queue_flip = intel_gen3_queue_flip;
  12243. break;
  12244. case 4:
  12245. case 5:
  12246. dev_priv->display.queue_flip = intel_gen4_queue_flip;
  12247. break;
  12248. case 6:
  12249. dev_priv->display.queue_flip = intel_gen6_queue_flip;
  12250. break;
  12251. case 7:
  12252. case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
  12253. dev_priv->display.queue_flip = intel_gen7_queue_flip;
  12254. break;
  12255. case 9:
  12256. /* Drop through - unsupported since execlist only. */
  12257. default:
  12258. /* Default just returns -ENODEV to indicate unsupported */
  12259. dev_priv->display.queue_flip = intel_default_queue_flip;
  12260. }
  12261. }
  12262. /*
  12263. * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
  12264. * resume, or other times. This quirk makes sure that's the case for
  12265. * affected systems.
  12266. */
  12267. static void quirk_pipea_force(struct drm_device *dev)
  12268. {
  12269. struct drm_i915_private *dev_priv = to_i915(dev);
  12270. dev_priv->quirks |= QUIRK_PIPEA_FORCE;
  12271. DRM_INFO("applying pipe a force quirk\n");
  12272. }
  12273. static void quirk_pipeb_force(struct drm_device *dev)
  12274. {
  12275. struct drm_i915_private *dev_priv = to_i915(dev);
  12276. dev_priv->quirks |= QUIRK_PIPEB_FORCE;
  12277. DRM_INFO("applying pipe b force quirk\n");
  12278. }
  12279. /*
  12280. * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
  12281. */
  12282. static void quirk_ssc_force_disable(struct drm_device *dev)
  12283. {
  12284. struct drm_i915_private *dev_priv = to_i915(dev);
  12285. dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
  12286. DRM_INFO("applying lvds SSC disable quirk\n");
  12287. }
  12288. /*
  12289. * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
  12290. * brightness value
  12291. */
  12292. static void quirk_invert_brightness(struct drm_device *dev)
  12293. {
  12294. struct drm_i915_private *dev_priv = to_i915(dev);
  12295. dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
  12296. DRM_INFO("applying inverted panel brightness quirk\n");
  12297. }
  12298. /* Some VBT's incorrectly indicate no backlight is present */
  12299. static void quirk_backlight_present(struct drm_device *dev)
  12300. {
  12301. struct drm_i915_private *dev_priv = to_i915(dev);
  12302. dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
  12303. DRM_INFO("applying backlight present quirk\n");
  12304. }
  12305. struct intel_quirk {
  12306. int device;
  12307. int subsystem_vendor;
  12308. int subsystem_device;
  12309. void (*hook)(struct drm_device *dev);
  12310. };
  12311. /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
  12312. struct intel_dmi_quirk {
  12313. void (*hook)(struct drm_device *dev);
  12314. const struct dmi_system_id (*dmi_id_list)[];
  12315. };
  12316. static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
  12317. {
  12318. DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
  12319. return 1;
  12320. }
  12321. static const struct intel_dmi_quirk intel_dmi_quirks[] = {
  12322. {
  12323. .dmi_id_list = &(const struct dmi_system_id[]) {
  12324. {
  12325. .callback = intel_dmi_reverse_brightness,
  12326. .ident = "NCR Corporation",
  12327. .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
  12328. DMI_MATCH(DMI_PRODUCT_NAME, ""),
  12329. },
  12330. },
  12331. { } /* terminating entry */
  12332. },
  12333. .hook = quirk_invert_brightness,
  12334. },
  12335. };
  12336. static struct intel_quirk intel_quirks[] = {
  12337. /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
  12338. { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
  12339. /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
  12340. { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
  12341. /* 830 needs to leave pipe A & dpll A up */
  12342. { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
  12343. /* 830 needs to leave pipe B & dpll B up */
  12344. { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
  12345. /* Lenovo U160 cannot use SSC on LVDS */
  12346. { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
  12347. /* Sony Vaio Y cannot use SSC on LVDS */
  12348. { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
  12349. /* Acer Aspire 5734Z must invert backlight brightness */
  12350. { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
  12351. /* Acer/eMachines G725 */
  12352. { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
  12353. /* Acer/eMachines e725 */
  12354. { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
  12355. /* Acer/Packard Bell NCL20 */
  12356. { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
  12357. /* Acer Aspire 4736Z */
  12358. { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
  12359. /* Acer Aspire 5336 */
  12360. { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
  12361. /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
  12362. { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
  12363. /* Acer C720 Chromebook (Core i3 4005U) */
  12364. { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
  12365. /* Apple Macbook 2,1 (Core 2 T7400) */
  12366. { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
  12367. /* Apple Macbook 4,1 */
  12368. { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
  12369. /* Toshiba CB35 Chromebook (Celeron 2955U) */
  12370. { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
  12371. /* HP Chromebook 14 (Celeron 2955U) */
  12372. { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
  12373. /* Dell Chromebook 11 */
  12374. { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
  12375. /* Dell Chromebook 11 (2015 version) */
  12376. { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
  12377. };
  12378. static void intel_init_quirks(struct drm_device *dev)
  12379. {
  12380. struct pci_dev *d = dev->pdev;
  12381. int i;
  12382. for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
  12383. struct intel_quirk *q = &intel_quirks[i];
  12384. if (d->device == q->device &&
  12385. (d->subsystem_vendor == q->subsystem_vendor ||
  12386. q->subsystem_vendor == PCI_ANY_ID) &&
  12387. (d->subsystem_device == q->subsystem_device ||
  12388. q->subsystem_device == PCI_ANY_ID))
  12389. q->hook(dev);
  12390. }
  12391. for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
  12392. if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
  12393. intel_dmi_quirks[i].hook(dev);
  12394. }
  12395. }
  12396. /* Disable the VGA plane that we never use */
  12397. static void i915_disable_vga(struct drm_i915_private *dev_priv)
  12398. {
  12399. struct pci_dev *pdev = dev_priv->drm.pdev;
  12400. u8 sr1;
  12401. i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
  12402. /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
  12403. vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
  12404. outb(SR01, VGA_SR_INDEX);
  12405. sr1 = inb(VGA_SR_DATA);
  12406. outb(sr1 | 1<<5, VGA_SR_DATA);
  12407. vga_put(pdev, VGA_RSRC_LEGACY_IO);
  12408. udelay(300);
  12409. I915_WRITE(vga_reg, VGA_DISP_DISABLE);
  12410. POSTING_READ(vga_reg);
  12411. }
  12412. void intel_modeset_init_hw(struct drm_device *dev)
  12413. {
  12414. struct drm_i915_private *dev_priv = to_i915(dev);
  12415. intel_update_cdclk(dev_priv);
  12416. dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
  12417. intel_init_clock_gating(dev_priv);
  12418. }
  12419. /*
  12420. * Calculate what we think the watermarks should be for the state we've read
  12421. * out of the hardware and then immediately program those watermarks so that
  12422. * we ensure the hardware settings match our internal state.
  12423. *
  12424. * We can calculate what we think WM's should be by creating a duplicate of the
  12425. * current state (which was constructed during hardware readout) and running it
  12426. * through the atomic check code to calculate new watermark values in the
  12427. * state object.
  12428. */
  12429. static void sanitize_watermarks(struct drm_device *dev)
  12430. {
  12431. struct drm_i915_private *dev_priv = to_i915(dev);
  12432. struct drm_atomic_state *state;
  12433. struct intel_atomic_state *intel_state;
  12434. struct drm_crtc *crtc;
  12435. struct drm_crtc_state *cstate;
  12436. struct drm_modeset_acquire_ctx ctx;
  12437. int ret;
  12438. int i;
  12439. /* Only supported on platforms that use atomic watermark design */
  12440. if (!dev_priv->display.optimize_watermarks)
  12441. return;
  12442. /*
  12443. * We need to hold connection_mutex before calling duplicate_state so
  12444. * that the connector loop is protected.
  12445. */
  12446. drm_modeset_acquire_init(&ctx, 0);
  12447. retry:
  12448. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  12449. if (ret == -EDEADLK) {
  12450. drm_modeset_backoff(&ctx);
  12451. goto retry;
  12452. } else if (WARN_ON(ret)) {
  12453. goto fail;
  12454. }
  12455. state = drm_atomic_helper_duplicate_state(dev, &ctx);
  12456. if (WARN_ON(IS_ERR(state)))
  12457. goto fail;
  12458. intel_state = to_intel_atomic_state(state);
  12459. /*
  12460. * Hardware readout is the only time we don't want to calculate
  12461. * intermediate watermarks (since we don't trust the current
  12462. * watermarks).
  12463. */
  12464. if (!HAS_GMCH_DISPLAY(dev_priv))
  12465. intel_state->skip_intermediate_wm = true;
  12466. ret = intel_atomic_check(dev, state);
  12467. if (ret) {
  12468. /*
  12469. * If we fail here, it means that the hardware appears to be
  12470. * programmed in a way that shouldn't be possible, given our
  12471. * understanding of watermark requirements. This might mean a
  12472. * mistake in the hardware readout code or a mistake in the
  12473. * watermark calculations for a given platform. Raise a WARN
  12474. * so that this is noticeable.
  12475. *
  12476. * If this actually happens, we'll have to just leave the
  12477. * BIOS-programmed watermarks untouched and hope for the best.
  12478. */
  12479. WARN(true, "Could not determine valid watermarks for inherited state\n");
  12480. goto put_state;
  12481. }
  12482. /* Write calculated watermark values back */
  12483. for_each_new_crtc_in_state(state, crtc, cstate, i) {
  12484. struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
  12485. cs->wm.need_postvbl_update = true;
  12486. dev_priv->display.optimize_watermarks(intel_state, cs);
  12487. }
  12488. put_state:
  12489. drm_atomic_state_put(state);
  12490. fail:
  12491. drm_modeset_drop_locks(&ctx);
  12492. drm_modeset_acquire_fini(&ctx);
  12493. }
  12494. int intel_modeset_init(struct drm_device *dev)
  12495. {
  12496. struct drm_i915_private *dev_priv = to_i915(dev);
  12497. struct i915_ggtt *ggtt = &dev_priv->ggtt;
  12498. enum pipe pipe;
  12499. struct intel_crtc *crtc;
  12500. drm_mode_config_init(dev);
  12501. dev->mode_config.min_width = 0;
  12502. dev->mode_config.min_height = 0;
  12503. dev->mode_config.preferred_depth = 24;
  12504. dev->mode_config.prefer_shadow = 1;
  12505. dev->mode_config.allow_fb_modifiers = true;
  12506. dev->mode_config.funcs = &intel_mode_funcs;
  12507. INIT_WORK(&dev_priv->atomic_helper.free_work,
  12508. intel_atomic_helper_free_state_worker);
  12509. intel_init_quirks(dev);
  12510. intel_init_pm(dev_priv);
  12511. if (INTEL_INFO(dev_priv)->num_pipes == 0)
  12512. return 0;
  12513. /*
  12514. * There may be no VBT; and if the BIOS enabled SSC we can
  12515. * just keep using it to avoid unnecessary flicker. Whereas if the
  12516. * BIOS isn't using it, don't assume it will work even if the VBT
  12517. * indicates as much.
  12518. */
  12519. if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
  12520. bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
  12521. DREF_SSC1_ENABLE);
  12522. if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
  12523. DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
  12524. bios_lvds_use_ssc ? "en" : "dis",
  12525. dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
  12526. dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
  12527. }
  12528. }
  12529. if (IS_GEN2(dev_priv)) {
  12530. dev->mode_config.max_width = 2048;
  12531. dev->mode_config.max_height = 2048;
  12532. } else if (IS_GEN3(dev_priv)) {
  12533. dev->mode_config.max_width = 4096;
  12534. dev->mode_config.max_height = 4096;
  12535. } else {
  12536. dev->mode_config.max_width = 8192;
  12537. dev->mode_config.max_height = 8192;
  12538. }
  12539. if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
  12540. dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
  12541. dev->mode_config.cursor_height = 1023;
  12542. } else if (IS_GEN2(dev_priv)) {
  12543. dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
  12544. dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
  12545. } else {
  12546. dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
  12547. dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
  12548. }
  12549. dev->mode_config.fb_base = ggtt->mappable_base;
  12550. DRM_DEBUG_KMS("%d display pipe%s available.\n",
  12551. INTEL_INFO(dev_priv)->num_pipes,
  12552. INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
  12553. for_each_pipe(dev_priv, pipe) {
  12554. int ret;
  12555. ret = intel_crtc_init(dev_priv, pipe);
  12556. if (ret) {
  12557. drm_mode_config_cleanup(dev);
  12558. return ret;
  12559. }
  12560. }
  12561. intel_shared_dpll_init(dev);
  12562. intel_update_czclk(dev_priv);
  12563. intel_modeset_init_hw(dev);
  12564. if (dev_priv->max_cdclk_freq == 0)
  12565. intel_update_max_cdclk(dev_priv);
  12566. /* Just disable it once at startup */
  12567. i915_disable_vga(dev_priv);
  12568. intel_setup_outputs(dev_priv);
  12569. drm_modeset_lock_all(dev);
  12570. intel_modeset_setup_hw_state(dev);
  12571. drm_modeset_unlock_all(dev);
  12572. for_each_intel_crtc(dev, crtc) {
  12573. struct intel_initial_plane_config plane_config = {};
  12574. if (!crtc->active)
  12575. continue;
  12576. /*
  12577. * Note that reserving the BIOS fb up front prevents us
  12578. * from stuffing other stolen allocations like the ring
  12579. * on top. This prevents some ugliness at boot time, and
  12580. * can even allow for smooth boot transitions if the BIOS
  12581. * fb is large enough for the active pipe configuration.
  12582. */
  12583. dev_priv->display.get_initial_plane_config(crtc,
  12584. &plane_config);
  12585. /*
  12586. * If the fb is shared between multiple heads, we'll
  12587. * just get the first one.
  12588. */
  12589. intel_find_initial_plane_obj(crtc, &plane_config);
  12590. }
  12591. /*
  12592. * Make sure hardware watermarks really match the state we read out.
  12593. * Note that we need to do this after reconstructing the BIOS fb's
  12594. * since the watermark calculation done here will use pstate->fb.
  12595. */
  12596. if (!HAS_GMCH_DISPLAY(dev_priv))
  12597. sanitize_watermarks(dev);
  12598. return 0;
  12599. }
  12600. static void intel_enable_pipe_a(struct drm_device *dev)
  12601. {
  12602. struct intel_connector *connector;
  12603. struct drm_connector_list_iter conn_iter;
  12604. struct drm_connector *crt = NULL;
  12605. struct intel_load_detect_pipe load_detect_temp;
  12606. struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
  12607. /* We can't just switch on the pipe A, we need to set things up with a
  12608. * proper mode and output configuration. As a gross hack, enable pipe A
  12609. * by enabling the load detect pipe once. */
  12610. drm_connector_list_iter_begin(dev, &conn_iter);
  12611. for_each_intel_connector_iter(connector, &conn_iter) {
  12612. if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
  12613. crt = &connector->base;
  12614. break;
  12615. }
  12616. }
  12617. drm_connector_list_iter_end(&conn_iter);
  12618. if (!crt)
  12619. return;
  12620. if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
  12621. intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
  12622. }
  12623. static bool
  12624. intel_check_plane_mapping(struct intel_crtc *crtc)
  12625. {
  12626. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  12627. u32 val;
  12628. if (INTEL_INFO(dev_priv)->num_pipes == 1)
  12629. return true;
  12630. val = I915_READ(DSPCNTR(!crtc->plane));
  12631. if ((val & DISPLAY_PLANE_ENABLE) &&
  12632. (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
  12633. return false;
  12634. return true;
  12635. }
  12636. static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
  12637. {
  12638. struct drm_device *dev = crtc->base.dev;
  12639. struct intel_encoder *encoder;
  12640. for_each_encoder_on_crtc(dev, &crtc->base, encoder)
  12641. return true;
  12642. return false;
  12643. }
  12644. static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
  12645. {
  12646. struct drm_device *dev = encoder->base.dev;
  12647. struct intel_connector *connector;
  12648. for_each_connector_on_encoder(dev, &encoder->base, connector)
  12649. return connector;
  12650. return NULL;
  12651. }
  12652. static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
  12653. enum transcoder pch_transcoder)
  12654. {
  12655. return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
  12656. (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);
  12657. }
  12658. static void intel_sanitize_crtc(struct intel_crtc *crtc)
  12659. {
  12660. struct drm_device *dev = crtc->base.dev;
  12661. struct drm_i915_private *dev_priv = to_i915(dev);
  12662. enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
  12663. /* Clear any frame start delays used for debugging left by the BIOS */
  12664. if (!transcoder_is_dsi(cpu_transcoder)) {
  12665. i915_reg_t reg = PIPECONF(cpu_transcoder);
  12666. I915_WRITE(reg,
  12667. I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
  12668. }
  12669. /* restore vblank interrupts to correct state */
  12670. drm_crtc_vblank_reset(&crtc->base);
  12671. if (crtc->active) {
  12672. struct intel_plane *plane;
  12673. drm_crtc_vblank_on(&crtc->base);
  12674. /* Disable everything but the primary plane */
  12675. for_each_intel_plane_on_crtc(dev, crtc, plane) {
  12676. if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
  12677. continue;
  12678. trace_intel_disable_plane(&plane->base, crtc);
  12679. plane->disable_plane(&plane->base, &crtc->base);
  12680. }
  12681. }
  12682. /* We need to sanitize the plane -> pipe mapping first because this will
  12683. * disable the crtc (and hence change the state) if it is wrong. Note
  12684. * that gen4+ has a fixed plane -> pipe mapping. */
  12685. if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) {
  12686. bool plane;
  12687. DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
  12688. crtc->base.base.id, crtc->base.name);
  12689. /* Pipe has the wrong plane attached and the plane is active.
  12690. * Temporarily change the plane mapping and disable everything
  12691. * ... */
  12692. plane = crtc->plane;
  12693. crtc->base.primary->state->visible = true;
  12694. crtc->plane = !plane;
  12695. intel_crtc_disable_noatomic(&crtc->base);
  12696. crtc->plane = plane;
  12697. }
  12698. if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
  12699. crtc->pipe == PIPE_A && !crtc->active) {
  12700. /* BIOS forgot to enable pipe A, this mostly happens after
  12701. * resume. Force-enable the pipe to fix this, the update_dpms
  12702. * call below we restore the pipe to the right state, but leave
  12703. * the required bits on. */
  12704. intel_enable_pipe_a(dev);
  12705. }
  12706. /* Adjust the state of the output pipe according to whether we
  12707. * have active connectors/encoders. */
  12708. if (crtc->active && !intel_crtc_has_encoders(crtc))
  12709. intel_crtc_disable_noatomic(&crtc->base);
  12710. if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
  12711. /*
  12712. * We start out with underrun reporting disabled to avoid races.
  12713. * For correct bookkeeping mark this on active crtcs.
  12714. *
  12715. * Also on gmch platforms we dont have any hardware bits to
  12716. * disable the underrun reporting. Which means we need to start
  12717. * out with underrun reporting disabled also on inactive pipes,
  12718. * since otherwise we'll complain about the garbage we read when
  12719. * e.g. coming up after runtime pm.
  12720. *
  12721. * No protection against concurrent access is required - at
  12722. * worst a fifo underrun happens which also sets this to false.
  12723. */
  12724. crtc->cpu_fifo_underrun_disabled = true;
  12725. /*
  12726. * We track the PCH trancoder underrun reporting state
  12727. * within the crtc. With crtc for pipe A housing the underrun
  12728. * reporting state for PCH transcoder A, crtc for pipe B housing
  12729. * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
  12730. * and marking underrun reporting as disabled for the non-existing
  12731. * PCH transcoders B and C would prevent enabling the south
  12732. * error interrupt (see cpt_can_enable_serr_int()).
  12733. */
  12734. if (has_pch_trancoder(dev_priv, (enum transcoder)crtc->pipe))
  12735. crtc->pch_fifo_underrun_disabled = true;
  12736. }
  12737. }
  12738. static void intel_sanitize_encoder(struct intel_encoder *encoder)
  12739. {
  12740. struct intel_connector *connector;
  12741. /* We need to check both for a crtc link (meaning that the
  12742. * encoder is active and trying to read from a pipe) and the
  12743. * pipe itself being active. */
  12744. bool has_active_crtc = encoder->base.crtc &&
  12745. to_intel_crtc(encoder->base.crtc)->active;
  12746. connector = intel_encoder_find_connector(encoder);
  12747. if (connector && !has_active_crtc) {
  12748. DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
  12749. encoder->base.base.id,
  12750. encoder->base.name);
  12751. /* Connector is active, but has no active pipe. This is
  12752. * fallout from our resume register restoring. Disable
  12753. * the encoder manually again. */
  12754. if (encoder->base.crtc) {
  12755. struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
  12756. DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
  12757. encoder->base.base.id,
  12758. encoder->base.name);
  12759. encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
  12760. if (encoder->post_disable)
  12761. encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
  12762. }
  12763. encoder->base.crtc = NULL;
  12764. /* Inconsistent output/port/pipe state happens presumably due to
  12765. * a bug in one of the get_hw_state functions. Or someplace else
  12766. * in our code, like the register restore mess on resume. Clamp
  12767. * things to off as a safer default. */
  12768. connector->base.dpms = DRM_MODE_DPMS_OFF;
  12769. connector->base.encoder = NULL;
  12770. }
  12771. /* Enabled encoders without active connectors will be fixed in
  12772. * the crtc fixup. */
  12773. }
  12774. void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
  12775. {
  12776. i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
  12777. if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
  12778. DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
  12779. i915_disable_vga(dev_priv);
  12780. }
  12781. }
  12782. void i915_redisable_vga(struct drm_i915_private *dev_priv)
  12783. {
  12784. /* This function can be called both from intel_modeset_setup_hw_state or
  12785. * at a very early point in our resume sequence, where the power well
  12786. * structures are not yet restored. Since this function is at a very
  12787. * paranoid "someone might have enabled VGA while we were not looking"
  12788. * level, just check if the power well is enabled instead of trying to
  12789. * follow the "don't touch the power well if we don't need it" policy
  12790. * the rest of the driver uses. */
  12791. if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
  12792. return;
  12793. i915_redisable_vga_power_on(dev_priv);
  12794. intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
  12795. }
  12796. static bool primary_get_hw_state(struct intel_plane *plane)
  12797. {
  12798. struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
  12799. return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
  12800. }
  12801. /* FIXME read out full plane state for all planes */
  12802. static void readout_plane_state(struct intel_crtc *crtc)
  12803. {
  12804. struct intel_plane *primary = to_intel_plane(crtc->base.primary);
  12805. bool visible;
  12806. visible = crtc->active && primary_get_hw_state(primary);
  12807. intel_set_plane_visible(to_intel_crtc_state(crtc->base.state),
  12808. to_intel_plane_state(primary->base.state),
  12809. visible);
  12810. }
  12811. static void intel_modeset_readout_hw_state(struct drm_device *dev)
  12812. {
  12813. struct drm_i915_private *dev_priv = to_i915(dev);
  12814. enum pipe pipe;
  12815. struct intel_crtc *crtc;
  12816. struct intel_encoder *encoder;
  12817. struct intel_connector *connector;
  12818. struct drm_connector_list_iter conn_iter;
  12819. int i;
  12820. dev_priv->active_crtcs = 0;
  12821. for_each_intel_crtc(dev, crtc) {
  12822. struct intel_crtc_state *crtc_state =
  12823. to_intel_crtc_state(crtc->base.state);
  12824. __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
  12825. memset(crtc_state, 0, sizeof(*crtc_state));
  12826. crtc_state->base.crtc = &crtc->base;
  12827. crtc_state->base.active = crtc_state->base.enable =
  12828. dev_priv->display.get_pipe_config(crtc, crtc_state);
  12829. crtc->base.enabled = crtc_state->base.enable;
  12830. crtc->active = crtc_state->base.active;
  12831. if (crtc_state->base.active)
  12832. dev_priv->active_crtcs |= 1 << crtc->pipe;
  12833. readout_plane_state(crtc);
  12834. DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
  12835. crtc->base.base.id, crtc->base.name,
  12836. enableddisabled(crtc_state->base.active));
  12837. }
  12838. for (i = 0; i < dev_priv->num_shared_dpll; i++) {
  12839. struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
  12840. pll->on = pll->funcs.get_hw_state(dev_priv, pll,
  12841. &pll->state.hw_state);
  12842. pll->state.crtc_mask = 0;
  12843. for_each_intel_crtc(dev, crtc) {
  12844. struct intel_crtc_state *crtc_state =
  12845. to_intel_crtc_state(crtc->base.state);
  12846. if (crtc_state->base.active &&
  12847. crtc_state->shared_dpll == pll)
  12848. pll->state.crtc_mask |= 1 << crtc->pipe;
  12849. }
  12850. pll->active_mask = pll->state.crtc_mask;
  12851. DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
  12852. pll->name, pll->state.crtc_mask, pll->on);
  12853. }
  12854. for_each_intel_encoder(dev, encoder) {
  12855. pipe = 0;
  12856. if (encoder->get_hw_state(encoder, &pipe)) {
  12857. struct intel_crtc_state *crtc_state;
  12858. crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  12859. crtc_state = to_intel_crtc_state(crtc->base.state);
  12860. encoder->base.crtc = &crtc->base;
  12861. crtc_state->output_types |= 1 << encoder->type;
  12862. encoder->get_config(encoder, crtc_state);
  12863. } else {
  12864. encoder->base.crtc = NULL;
  12865. }
  12866. DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
  12867. encoder->base.base.id, encoder->base.name,
  12868. enableddisabled(encoder->base.crtc),
  12869. pipe_name(pipe));
  12870. }
  12871. drm_connector_list_iter_begin(dev, &conn_iter);
  12872. for_each_intel_connector_iter(connector, &conn_iter) {
  12873. if (connector->get_hw_state(connector)) {
  12874. connector->base.dpms = DRM_MODE_DPMS_ON;
  12875. encoder = connector->encoder;
  12876. connector->base.encoder = &encoder->base;
  12877. if (encoder->base.crtc &&
  12878. encoder->base.crtc->state->active) {
  12879. /*
  12880. * This has to be done during hardware readout
  12881. * because anything calling .crtc_disable may
  12882. * rely on the connector_mask being accurate.
  12883. */
  12884. encoder->base.crtc->state->connector_mask |=
  12885. 1 << drm_connector_index(&connector->base);
  12886. encoder->base.crtc->state->encoder_mask |=
  12887. 1 << drm_encoder_index(&encoder->base);
  12888. }
  12889. } else {
  12890. connector->base.dpms = DRM_MODE_DPMS_OFF;
  12891. connector->base.encoder = NULL;
  12892. }
  12893. DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
  12894. connector->base.base.id, connector->base.name,
  12895. enableddisabled(connector->base.encoder));
  12896. }
  12897. drm_connector_list_iter_end(&conn_iter);
  12898. for_each_intel_crtc(dev, crtc) {
  12899. struct intel_crtc_state *crtc_state =
  12900. to_intel_crtc_state(crtc->base.state);
  12901. int pixclk = 0;
  12902. crtc->base.hwmode = crtc_state->base.adjusted_mode;
  12903. memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
  12904. if (crtc_state->base.active) {
  12905. intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
  12906. intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
  12907. WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
  12908. /*
  12909. * The initial mode needs to be set in order to keep
  12910. * the atomic core happy. It wants a valid mode if the
  12911. * crtc's enabled, so we do the above call.
  12912. *
  12913. * But we don't set all the derived state fully, hence
  12914. * set a flag to indicate that a full recalculation is
  12915. * needed on the next commit.
  12916. */
  12917. crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
  12918. intel_crtc_compute_pixel_rate(crtc_state);
  12919. if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv) ||
  12920. IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  12921. pixclk = crtc_state->pixel_rate;
  12922. else
  12923. WARN_ON(dev_priv->display.modeset_calc_cdclk);
  12924. /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
  12925. if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
  12926. pixclk = DIV_ROUND_UP(pixclk * 100, 95);
  12927. drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
  12928. update_scanline_offset(crtc);
  12929. }
  12930. dev_priv->min_pixclk[crtc->pipe] = pixclk;
  12931. intel_pipe_config_sanity_check(dev_priv, crtc_state);
  12932. }
  12933. }
  12934. static void
  12935. get_encoder_power_domains(struct drm_i915_private *dev_priv)
  12936. {
  12937. struct intel_encoder *encoder;
  12938. for_each_intel_encoder(&dev_priv->drm, encoder) {
  12939. u64 get_domains;
  12940. enum intel_display_power_domain domain;
  12941. if (!encoder->get_power_domains)
  12942. continue;
  12943. get_domains = encoder->get_power_domains(encoder);
  12944. for_each_power_domain(domain, get_domains)
  12945. intel_display_power_get(dev_priv, domain);
  12946. }
  12947. }
  12948. /* Scan out the current hw modeset state,
  12949. * and sanitizes it to the current state
  12950. */
  12951. static void
  12952. intel_modeset_setup_hw_state(struct drm_device *dev)
  12953. {
  12954. struct drm_i915_private *dev_priv = to_i915(dev);
  12955. enum pipe pipe;
  12956. struct intel_crtc *crtc;
  12957. struct intel_encoder *encoder;
  12958. int i;
  12959. intel_modeset_readout_hw_state(dev);
  12960. /* HW state is read out, now we need to sanitize this mess. */
  12961. get_encoder_power_domains(dev_priv);
  12962. for_each_intel_encoder(dev, encoder) {
  12963. intel_sanitize_encoder(encoder);
  12964. }
  12965. for_each_pipe(dev_priv, pipe) {
  12966. crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  12967. intel_sanitize_crtc(crtc);
  12968. intel_dump_pipe_config(crtc, crtc->config,
  12969. "[setup_hw_state]");
  12970. }
  12971. intel_modeset_update_connector_atomic_state(dev);
  12972. for (i = 0; i < dev_priv->num_shared_dpll; i++) {
  12973. struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
  12974. if (!pll->on || pll->active_mask)
  12975. continue;
  12976. DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
  12977. pll->funcs.disable(dev_priv, pll);
  12978. pll->on = false;
  12979. }
  12980. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  12981. vlv_wm_get_hw_state(dev);
  12982. vlv_wm_sanitize(dev_priv);
  12983. } else if (IS_GEN9(dev_priv)) {
  12984. skl_wm_get_hw_state(dev);
  12985. } else if (HAS_PCH_SPLIT(dev_priv)) {
  12986. ilk_wm_get_hw_state(dev);
  12987. }
  12988. for_each_intel_crtc(dev, crtc) {
  12989. u64 put_domains;
  12990. put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
  12991. if (WARN_ON(put_domains))
  12992. modeset_put_power_domains(dev_priv, put_domains);
  12993. }
  12994. intel_display_set_init_power(dev_priv, false);
  12995. intel_power_domains_verify_state(dev_priv);
  12996. intel_fbc_init_pipe_state(dev_priv);
  12997. }
  12998. void intel_display_resume(struct drm_device *dev)
  12999. {
  13000. struct drm_i915_private *dev_priv = to_i915(dev);
  13001. struct drm_atomic_state *state = dev_priv->modeset_restore_state;
  13002. struct drm_modeset_acquire_ctx ctx;
  13003. int ret;
  13004. dev_priv->modeset_restore_state = NULL;
  13005. if (state)
  13006. state->acquire_ctx = &ctx;
  13007. /*
  13008. * This is a cludge because with real atomic modeset mode_config.mutex
  13009. * won't be taken. Unfortunately some probed state like
  13010. * audio_codec_enable is still protected by mode_config.mutex, so lock
  13011. * it here for now.
  13012. */
  13013. mutex_lock(&dev->mode_config.mutex);
  13014. drm_modeset_acquire_init(&ctx, 0);
  13015. while (1) {
  13016. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  13017. if (ret != -EDEADLK)
  13018. break;
  13019. drm_modeset_backoff(&ctx);
  13020. }
  13021. if (!ret)
  13022. ret = __intel_display_resume(dev, state, &ctx);
  13023. drm_modeset_drop_locks(&ctx);
  13024. drm_modeset_acquire_fini(&ctx);
  13025. mutex_unlock(&dev->mode_config.mutex);
  13026. if (ret)
  13027. DRM_ERROR("Restoring old state failed with %i\n", ret);
  13028. if (state)
  13029. drm_atomic_state_put(state);
  13030. }
  13031. void intel_modeset_gem_init(struct drm_device *dev)
  13032. {
  13033. struct drm_i915_private *dev_priv = to_i915(dev);
  13034. intel_init_gt_powersave(dev_priv);
  13035. intel_setup_overlay(dev_priv);
  13036. }
  13037. int intel_connector_register(struct drm_connector *connector)
  13038. {
  13039. struct intel_connector *intel_connector = to_intel_connector(connector);
  13040. int ret;
  13041. ret = intel_backlight_device_register(intel_connector);
  13042. if (ret)
  13043. goto err;
  13044. return 0;
  13045. err:
  13046. return ret;
  13047. }
  13048. void intel_connector_unregister(struct drm_connector *connector)
  13049. {
  13050. struct intel_connector *intel_connector = to_intel_connector(connector);
  13051. intel_backlight_device_unregister(intel_connector);
  13052. intel_panel_destroy_backlight(connector);
  13053. }
  13054. void intel_modeset_cleanup(struct drm_device *dev)
  13055. {
  13056. struct drm_i915_private *dev_priv = to_i915(dev);
  13057. flush_work(&dev_priv->atomic_helper.free_work);
  13058. WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
  13059. intel_disable_gt_powersave(dev_priv);
  13060. /*
  13061. * Interrupts and polling as the first thing to avoid creating havoc.
  13062. * Too much stuff here (turning of connectors, ...) would
  13063. * experience fancy races otherwise.
  13064. */
  13065. intel_irq_uninstall(dev_priv);
  13066. /*
  13067. * Due to the hpd irq storm handling the hotplug work can re-arm the
  13068. * poll handlers. Hence disable polling after hpd handling is shut down.
  13069. */
  13070. drm_kms_helper_poll_fini(dev);
  13071. intel_unregister_dsm_handler();
  13072. intel_fbc_global_disable(dev_priv);
  13073. /* flush any delayed tasks or pending work */
  13074. flush_scheduled_work();
  13075. drm_mode_config_cleanup(dev);
  13076. intel_cleanup_overlay(dev_priv);
  13077. intel_cleanup_gt_powersave(dev_priv);
  13078. intel_teardown_gmbus(dev_priv);
  13079. }
  13080. void intel_connector_attach_encoder(struct intel_connector *connector,
  13081. struct intel_encoder *encoder)
  13082. {
  13083. connector->encoder = encoder;
  13084. drm_mode_connector_attach_encoder(&connector->base,
  13085. &encoder->base);
  13086. }
  13087. /*
  13088. * set vga decode state - true == enable VGA decode
  13089. */
  13090. int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
  13091. {
  13092. unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
  13093. u16 gmch_ctrl;
  13094. if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
  13095. DRM_ERROR("failed to read control word\n");
  13096. return -EIO;
  13097. }
  13098. if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
  13099. return 0;
  13100. if (state)
  13101. gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
  13102. else
  13103. gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
  13104. if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
  13105. DRM_ERROR("failed to write control word\n");
  13106. return -EIO;
  13107. }
  13108. return 0;
  13109. }
  13110. #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
  13111. struct intel_display_error_state {
  13112. u32 power_well_driver;
  13113. int num_transcoders;
  13114. struct intel_cursor_error_state {
  13115. u32 control;
  13116. u32 position;
  13117. u32 base;
  13118. u32 size;
  13119. } cursor[I915_MAX_PIPES];
  13120. struct intel_pipe_error_state {
  13121. bool power_domain_on;
  13122. u32 source;
  13123. u32 stat;
  13124. } pipe[I915_MAX_PIPES];
  13125. struct intel_plane_error_state {
  13126. u32 control;
  13127. u32 stride;
  13128. u32 size;
  13129. u32 pos;
  13130. u32 addr;
  13131. u32 surface;
  13132. u32 tile_offset;
  13133. } plane[I915_MAX_PIPES];
  13134. struct intel_transcoder_error_state {
  13135. bool power_domain_on;
  13136. enum transcoder cpu_transcoder;
  13137. u32 conf;
  13138. u32 htotal;
  13139. u32 hblank;
  13140. u32 hsync;
  13141. u32 vtotal;
  13142. u32 vblank;
  13143. u32 vsync;
  13144. } transcoder[4];
  13145. };
  13146. struct intel_display_error_state *
  13147. intel_display_capture_error_state(struct drm_i915_private *dev_priv)
  13148. {
  13149. struct intel_display_error_state *error;
  13150. int transcoders[] = {
  13151. TRANSCODER_A,
  13152. TRANSCODER_B,
  13153. TRANSCODER_C,
  13154. TRANSCODER_EDP,
  13155. };
  13156. int i;
  13157. if (INTEL_INFO(dev_priv)->num_pipes == 0)
  13158. return NULL;
  13159. error = kzalloc(sizeof(*error), GFP_ATOMIC);
  13160. if (error == NULL)
  13161. return NULL;
  13162. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  13163. error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
  13164. for_each_pipe(dev_priv, i) {
  13165. error->pipe[i].power_domain_on =
  13166. __intel_display_power_is_enabled(dev_priv,
  13167. POWER_DOMAIN_PIPE(i));
  13168. if (!error->pipe[i].power_domain_on)
  13169. continue;
  13170. error->cursor[i].control = I915_READ(CURCNTR(i));
  13171. error->cursor[i].position = I915_READ(CURPOS(i));
  13172. error->cursor[i].base = I915_READ(CURBASE(i));
  13173. error->plane[i].control = I915_READ(DSPCNTR(i));
  13174. error->plane[i].stride = I915_READ(DSPSTRIDE(i));
  13175. if (INTEL_GEN(dev_priv) <= 3) {
  13176. error->plane[i].size = I915_READ(DSPSIZE(i));
  13177. error->plane[i].pos = I915_READ(DSPPOS(i));
  13178. }
  13179. if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
  13180. error->plane[i].addr = I915_READ(DSPADDR(i));
  13181. if (INTEL_GEN(dev_priv) >= 4) {
  13182. error->plane[i].surface = I915_READ(DSPSURF(i));
  13183. error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
  13184. }
  13185. error->pipe[i].source = I915_READ(PIPESRC(i));
  13186. if (HAS_GMCH_DISPLAY(dev_priv))
  13187. error->pipe[i].stat = I915_READ(PIPESTAT(i));
  13188. }
  13189. /* Note: this does not include DSI transcoders. */
  13190. error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
  13191. if (HAS_DDI(dev_priv))
  13192. error->num_transcoders++; /* Account for eDP. */
  13193. for (i = 0; i < error->num_transcoders; i++) {
  13194. enum transcoder cpu_transcoder = transcoders[i];
  13195. error->transcoder[i].power_domain_on =
  13196. __intel_display_power_is_enabled(dev_priv,
  13197. POWER_DOMAIN_TRANSCODER(cpu_transcoder));
  13198. if (!error->transcoder[i].power_domain_on)
  13199. continue;
  13200. error->transcoder[i].cpu_transcoder = cpu_transcoder;
  13201. error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
  13202. error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
  13203. error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
  13204. error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
  13205. error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
  13206. error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
  13207. error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
  13208. }
  13209. return error;
  13210. }
  13211. #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
  13212. void
  13213. intel_display_print_error_state(struct drm_i915_error_state_buf *m,
  13214. struct intel_display_error_state *error)
  13215. {
  13216. struct drm_i915_private *dev_priv = m->i915;
  13217. int i;
  13218. if (!error)
  13219. return;
  13220. err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
  13221. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  13222. err_printf(m, "PWR_WELL_CTL2: %08x\n",
  13223. error->power_well_driver);
  13224. for_each_pipe(dev_priv, i) {
  13225. err_printf(m, "Pipe [%d]:\n", i);
  13226. err_printf(m, " Power: %s\n",
  13227. onoff(error->pipe[i].power_domain_on));
  13228. err_printf(m, " SRC: %08x\n", error->pipe[i].source);
  13229. err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
  13230. err_printf(m, "Plane [%d]:\n", i);
  13231. err_printf(m, " CNTR: %08x\n", error->plane[i].control);
  13232. err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
  13233. if (INTEL_GEN(dev_priv) <= 3) {
  13234. err_printf(m, " SIZE: %08x\n", error->plane[i].size);
  13235. err_printf(m, " POS: %08x\n", error->plane[i].pos);
  13236. }
  13237. if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
  13238. err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
  13239. if (INTEL_GEN(dev_priv) >= 4) {
  13240. err_printf(m, " SURF: %08x\n", error->plane[i].surface);
  13241. err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
  13242. }
  13243. err_printf(m, "Cursor [%d]:\n", i);
  13244. err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
  13245. err_printf(m, " POS: %08x\n", error->cursor[i].position);
  13246. err_printf(m, " BASE: %08x\n", error->cursor[i].base);
  13247. }
  13248. for (i = 0; i < error->num_transcoders; i++) {
  13249. err_printf(m, "CPU transcoder: %s\n",
  13250. transcoder_name(error->transcoder[i].cpu_transcoder));
  13251. err_printf(m, " Power: %s\n",
  13252. onoff(error->transcoder[i].power_domain_on));
  13253. err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
  13254. err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
  13255. err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
  13256. err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
  13257. err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
  13258. err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
  13259. err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
  13260. }
  13261. }
  13262. #endif