i915_irq.c 123 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425
  1. /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
  2. */
  3. /*
  4. * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23. * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. */
  28. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  29. #include <linux/sysrq.h>
  30. #include <linux/slab.h>
  31. #include <linux/circ_buf.h>
  32. #include <drm/drmP.h>
  33. #include <drm/i915_drm.h>
  34. #include "i915_drv.h"
  35. #include "i915_trace.h"
  36. #include "intel_drv.h"
  37. /**
  38. * DOC: interrupt handling
  39. *
  40. * These functions provide the basic support for enabling and disabling the
  41. * interrupt handling support. There's a lot more functionality in i915_irq.c
  42. * and related files, but that will be described in separate chapters.
  43. */
  44. static const u32 hpd_ilk[HPD_NUM_PINS] = {
  45. [HPD_PORT_A] = DE_DP_A_HOTPLUG,
  46. };
  47. static const u32 hpd_ivb[HPD_NUM_PINS] = {
  48. [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
  49. };
  50. static const u32 hpd_bdw[HPD_NUM_PINS] = {
  51. [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
  52. };
  53. static const u32 hpd_ibx[HPD_NUM_PINS] = {
  54. [HPD_CRT] = SDE_CRT_HOTPLUG,
  55. [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
  56. [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
  57. [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
  58. [HPD_PORT_D] = SDE_PORTD_HOTPLUG
  59. };
  60. static const u32 hpd_cpt[HPD_NUM_PINS] = {
  61. [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
  62. [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
  63. [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
  64. [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
  65. [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
  66. };
  67. static const u32 hpd_spt[HPD_NUM_PINS] = {
  68. [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
  69. [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
  70. [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
  71. [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
  72. [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
  73. };
  74. static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
  75. [HPD_CRT] = CRT_HOTPLUG_INT_EN,
  76. [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
  77. [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
  78. [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
  79. [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
  80. [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
  81. };
  82. static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
  83. [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
  84. [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
  85. [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
  86. [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
  87. [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
  88. [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
  89. };
  90. static const u32 hpd_status_i915[HPD_NUM_PINS] = {
  91. [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
  92. [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
  93. [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
  94. [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
  95. [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
  96. [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
  97. };
  98. /* BXT hpd list */
  99. static const u32 hpd_bxt[HPD_NUM_PINS] = {
  100. [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
  101. [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
  102. [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
  103. };
  104. /* IIR can theoretically queue up two events. Be paranoid. */
  105. #define GEN8_IRQ_RESET_NDX(type, which) do { \
  106. I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
  107. POSTING_READ(GEN8_##type##_IMR(which)); \
  108. I915_WRITE(GEN8_##type##_IER(which), 0); \
  109. I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
  110. POSTING_READ(GEN8_##type##_IIR(which)); \
  111. I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
  112. POSTING_READ(GEN8_##type##_IIR(which)); \
  113. } while (0)
  114. #define GEN5_IRQ_RESET(type) do { \
  115. I915_WRITE(type##IMR, 0xffffffff); \
  116. POSTING_READ(type##IMR); \
  117. I915_WRITE(type##IER, 0); \
  118. I915_WRITE(type##IIR, 0xffffffff); \
  119. POSTING_READ(type##IIR); \
  120. I915_WRITE(type##IIR, 0xffffffff); \
  121. POSTING_READ(type##IIR); \
  122. } while (0)
  123. /*
  124. * We should clear IMR at preinstall/uninstall, and just check at postinstall.
  125. */
  126. static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
  127. i915_reg_t reg)
  128. {
  129. u32 val = I915_READ(reg);
  130. if (val == 0)
  131. return;
  132. WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
  133. i915_mmio_reg_offset(reg), val);
  134. I915_WRITE(reg, 0xffffffff);
  135. POSTING_READ(reg);
  136. I915_WRITE(reg, 0xffffffff);
  137. POSTING_READ(reg);
  138. }
  139. #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
  140. gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
  141. I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
  142. I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
  143. POSTING_READ(GEN8_##type##_IMR(which)); \
  144. } while (0)
  145. #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
  146. gen5_assert_iir_is_zero(dev_priv, type##IIR); \
  147. I915_WRITE(type##IER, (ier_val)); \
  148. I915_WRITE(type##IMR, (imr_val)); \
  149. POSTING_READ(type##IMR); \
  150. } while (0)
  151. static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
  152. static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
  153. /* For display hotplug interrupt */
  154. static inline void
  155. i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
  156. uint32_t mask,
  157. uint32_t bits)
  158. {
  159. uint32_t val;
  160. lockdep_assert_held(&dev_priv->irq_lock);
  161. WARN_ON(bits & ~mask);
  162. val = I915_READ(PORT_HOTPLUG_EN);
  163. val &= ~mask;
  164. val |= bits;
  165. I915_WRITE(PORT_HOTPLUG_EN, val);
  166. }
  167. /**
  168. * i915_hotplug_interrupt_update - update hotplug interrupt enable
  169. * @dev_priv: driver private
  170. * @mask: bits to update
  171. * @bits: bits to enable
  172. * NOTE: the HPD enable bits are modified both inside and outside
  173. * of an interrupt context. To avoid that read-modify-write cycles
  174. * interfer, these bits are protected by a spinlock. Since this
  175. * function is usually not called from a context where the lock is
  176. * held already, this function acquires the lock itself. A non-locking
  177. * version is also available.
  178. */
  179. void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
  180. uint32_t mask,
  181. uint32_t bits)
  182. {
  183. spin_lock_irq(&dev_priv->irq_lock);
  184. i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
  185. spin_unlock_irq(&dev_priv->irq_lock);
  186. }
  187. /**
  188. * ilk_update_display_irq - update DEIMR
  189. * @dev_priv: driver private
  190. * @interrupt_mask: mask of interrupt bits to update
  191. * @enabled_irq_mask: mask of interrupt bits to enable
  192. */
  193. void ilk_update_display_irq(struct drm_i915_private *dev_priv,
  194. uint32_t interrupt_mask,
  195. uint32_t enabled_irq_mask)
  196. {
  197. uint32_t new_val;
  198. lockdep_assert_held(&dev_priv->irq_lock);
  199. WARN_ON(enabled_irq_mask & ~interrupt_mask);
  200. if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  201. return;
  202. new_val = dev_priv->irq_mask;
  203. new_val &= ~interrupt_mask;
  204. new_val |= (~enabled_irq_mask & interrupt_mask);
  205. if (new_val != dev_priv->irq_mask) {
  206. dev_priv->irq_mask = new_val;
  207. I915_WRITE(DEIMR, dev_priv->irq_mask);
  208. POSTING_READ(DEIMR);
  209. }
  210. }
  211. /**
  212. * ilk_update_gt_irq - update GTIMR
  213. * @dev_priv: driver private
  214. * @interrupt_mask: mask of interrupt bits to update
  215. * @enabled_irq_mask: mask of interrupt bits to enable
  216. */
  217. static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
  218. uint32_t interrupt_mask,
  219. uint32_t enabled_irq_mask)
  220. {
  221. lockdep_assert_held(&dev_priv->irq_lock);
  222. WARN_ON(enabled_irq_mask & ~interrupt_mask);
  223. if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  224. return;
  225. dev_priv->gt_irq_mask &= ~interrupt_mask;
  226. dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
  227. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  228. }
  229. void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  230. {
  231. ilk_update_gt_irq(dev_priv, mask, mask);
  232. POSTING_READ_FW(GTIMR);
  233. }
  234. void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  235. {
  236. ilk_update_gt_irq(dev_priv, mask, 0);
  237. }
  238. static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
  239. {
  240. return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
  241. }
  242. static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
  243. {
  244. return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
  245. }
  246. static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
  247. {
  248. return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
  249. }
  250. /**
  251. * snb_update_pm_irq - update GEN6_PMIMR
  252. * @dev_priv: driver private
  253. * @interrupt_mask: mask of interrupt bits to update
  254. * @enabled_irq_mask: mask of interrupt bits to enable
  255. */
  256. static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
  257. uint32_t interrupt_mask,
  258. uint32_t enabled_irq_mask)
  259. {
  260. uint32_t new_val;
  261. WARN_ON(enabled_irq_mask & ~interrupt_mask);
  262. lockdep_assert_held(&dev_priv->irq_lock);
  263. new_val = dev_priv->pm_imr;
  264. new_val &= ~interrupt_mask;
  265. new_val |= (~enabled_irq_mask & interrupt_mask);
  266. if (new_val != dev_priv->pm_imr) {
  267. dev_priv->pm_imr = new_val;
  268. I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
  269. POSTING_READ(gen6_pm_imr(dev_priv));
  270. }
  271. }
  272. void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
  273. {
  274. if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  275. return;
  276. snb_update_pm_irq(dev_priv, mask, mask);
  277. }
  278. static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
  279. {
  280. snb_update_pm_irq(dev_priv, mask, 0);
  281. }
  282. void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
  283. {
  284. if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  285. return;
  286. __gen6_mask_pm_irq(dev_priv, mask);
  287. }
  288. void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
  289. {
  290. i915_reg_t reg = gen6_pm_iir(dev_priv);
  291. lockdep_assert_held(&dev_priv->irq_lock);
  292. I915_WRITE(reg, reset_mask);
  293. I915_WRITE(reg, reset_mask);
  294. POSTING_READ(reg);
  295. }
  296. void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
  297. {
  298. lockdep_assert_held(&dev_priv->irq_lock);
  299. dev_priv->pm_ier |= enable_mask;
  300. I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
  301. gen6_unmask_pm_irq(dev_priv, enable_mask);
  302. /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
  303. }
  304. void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
  305. {
  306. lockdep_assert_held(&dev_priv->irq_lock);
  307. dev_priv->pm_ier &= ~disable_mask;
  308. __gen6_mask_pm_irq(dev_priv, disable_mask);
  309. I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
  310. /* though a barrier is missing here, but don't really need a one */
  311. }
  312. void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
  313. {
  314. spin_lock_irq(&dev_priv->irq_lock);
  315. gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
  316. dev_priv->rps.pm_iir = 0;
  317. spin_unlock_irq(&dev_priv->irq_lock);
  318. }
  319. void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
  320. {
  321. if (READ_ONCE(dev_priv->rps.interrupts_enabled))
  322. return;
  323. spin_lock_irq(&dev_priv->irq_lock);
  324. WARN_ON_ONCE(dev_priv->rps.pm_iir);
  325. WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
  326. dev_priv->rps.interrupts_enabled = true;
  327. gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
  328. spin_unlock_irq(&dev_priv->irq_lock);
  329. }
  330. void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
  331. {
  332. if (!READ_ONCE(dev_priv->rps.interrupts_enabled))
  333. return;
  334. spin_lock_irq(&dev_priv->irq_lock);
  335. dev_priv->rps.interrupts_enabled = false;
  336. I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
  337. gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
  338. spin_unlock_irq(&dev_priv->irq_lock);
  339. synchronize_irq(dev_priv->drm.irq);
  340. /* Now that we will not be generating any more work, flush any
  341. * outsanding tasks. As we are called on the RPS idle path,
  342. * we will reset the GPU to minimum frequencies, so the current
  343. * state of the worker can be discarded.
  344. */
  345. cancel_work_sync(&dev_priv->rps.work);
  346. gen6_reset_rps_interrupts(dev_priv);
  347. }
  348. void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
  349. {
  350. spin_lock_irq(&dev_priv->irq_lock);
  351. gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
  352. spin_unlock_irq(&dev_priv->irq_lock);
  353. }
  354. void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
  355. {
  356. spin_lock_irq(&dev_priv->irq_lock);
  357. if (!dev_priv->guc.interrupts_enabled) {
  358. WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
  359. dev_priv->pm_guc_events);
  360. dev_priv->guc.interrupts_enabled = true;
  361. gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
  362. }
  363. spin_unlock_irq(&dev_priv->irq_lock);
  364. }
  365. void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
  366. {
  367. spin_lock_irq(&dev_priv->irq_lock);
  368. dev_priv->guc.interrupts_enabled = false;
  369. gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
  370. spin_unlock_irq(&dev_priv->irq_lock);
  371. synchronize_irq(dev_priv->drm.irq);
  372. gen9_reset_guc_interrupts(dev_priv);
  373. }
  374. /**
  375. * bdw_update_port_irq - update DE port interrupt
  376. * @dev_priv: driver private
  377. * @interrupt_mask: mask of interrupt bits to update
  378. * @enabled_irq_mask: mask of interrupt bits to enable
  379. */
  380. static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
  381. uint32_t interrupt_mask,
  382. uint32_t enabled_irq_mask)
  383. {
  384. uint32_t new_val;
  385. uint32_t old_val;
  386. lockdep_assert_held(&dev_priv->irq_lock);
  387. WARN_ON(enabled_irq_mask & ~interrupt_mask);
  388. if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  389. return;
  390. old_val = I915_READ(GEN8_DE_PORT_IMR);
  391. new_val = old_val;
  392. new_val &= ~interrupt_mask;
  393. new_val |= (~enabled_irq_mask & interrupt_mask);
  394. if (new_val != old_val) {
  395. I915_WRITE(GEN8_DE_PORT_IMR, new_val);
  396. POSTING_READ(GEN8_DE_PORT_IMR);
  397. }
  398. }
  399. /**
  400. * bdw_update_pipe_irq - update DE pipe interrupt
  401. * @dev_priv: driver private
  402. * @pipe: pipe whose interrupt to update
  403. * @interrupt_mask: mask of interrupt bits to update
  404. * @enabled_irq_mask: mask of interrupt bits to enable
  405. */
  406. void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
  407. enum pipe pipe,
  408. uint32_t interrupt_mask,
  409. uint32_t enabled_irq_mask)
  410. {
  411. uint32_t new_val;
  412. lockdep_assert_held(&dev_priv->irq_lock);
  413. WARN_ON(enabled_irq_mask & ~interrupt_mask);
  414. if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  415. return;
  416. new_val = dev_priv->de_irq_mask[pipe];
  417. new_val &= ~interrupt_mask;
  418. new_val |= (~enabled_irq_mask & interrupt_mask);
  419. if (new_val != dev_priv->de_irq_mask[pipe]) {
  420. dev_priv->de_irq_mask[pipe] = new_val;
  421. I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
  422. POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
  423. }
  424. }
  425. /**
  426. * ibx_display_interrupt_update - update SDEIMR
  427. * @dev_priv: driver private
  428. * @interrupt_mask: mask of interrupt bits to update
  429. * @enabled_irq_mask: mask of interrupt bits to enable
  430. */
  431. void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
  432. uint32_t interrupt_mask,
  433. uint32_t enabled_irq_mask)
  434. {
  435. uint32_t sdeimr = I915_READ(SDEIMR);
  436. sdeimr &= ~interrupt_mask;
  437. sdeimr |= (~enabled_irq_mask & interrupt_mask);
  438. WARN_ON(enabled_irq_mask & ~interrupt_mask);
  439. lockdep_assert_held(&dev_priv->irq_lock);
  440. if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  441. return;
  442. I915_WRITE(SDEIMR, sdeimr);
  443. POSTING_READ(SDEIMR);
  444. }
  445. static void
  446. __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
  447. u32 enable_mask, u32 status_mask)
  448. {
  449. i915_reg_t reg = PIPESTAT(pipe);
  450. u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
  451. lockdep_assert_held(&dev_priv->irq_lock);
  452. WARN_ON(!intel_irqs_enabled(dev_priv));
  453. if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
  454. status_mask & ~PIPESTAT_INT_STATUS_MASK,
  455. "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
  456. pipe_name(pipe), enable_mask, status_mask))
  457. return;
  458. if ((pipestat & enable_mask) == enable_mask)
  459. return;
  460. dev_priv->pipestat_irq_mask[pipe] |= status_mask;
  461. /* Enable the interrupt, clear any pending status */
  462. pipestat |= enable_mask | status_mask;
  463. I915_WRITE(reg, pipestat);
  464. POSTING_READ(reg);
  465. }
  466. static void
  467. __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
  468. u32 enable_mask, u32 status_mask)
  469. {
  470. i915_reg_t reg = PIPESTAT(pipe);
  471. u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
  472. lockdep_assert_held(&dev_priv->irq_lock);
  473. WARN_ON(!intel_irqs_enabled(dev_priv));
  474. if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
  475. status_mask & ~PIPESTAT_INT_STATUS_MASK,
  476. "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
  477. pipe_name(pipe), enable_mask, status_mask))
  478. return;
  479. if ((pipestat & enable_mask) == 0)
  480. return;
  481. dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
  482. pipestat &= ~enable_mask;
  483. I915_WRITE(reg, pipestat);
  484. POSTING_READ(reg);
  485. }
  486. static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
  487. {
  488. u32 enable_mask = status_mask << 16;
  489. /*
  490. * On pipe A we don't support the PSR interrupt yet,
  491. * on pipe B and C the same bit MBZ.
  492. */
  493. if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
  494. return 0;
  495. /*
  496. * On pipe B and C we don't support the PSR interrupt yet, on pipe
  497. * A the same bit is for perf counters which we don't use either.
  498. */
  499. if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
  500. return 0;
  501. enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
  502. SPRITE0_FLIP_DONE_INT_EN_VLV |
  503. SPRITE1_FLIP_DONE_INT_EN_VLV);
  504. if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
  505. enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
  506. if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
  507. enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
  508. return enable_mask;
  509. }
  510. void
  511. i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
  512. u32 status_mask)
  513. {
  514. u32 enable_mask;
  515. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  516. enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
  517. status_mask);
  518. else
  519. enable_mask = status_mask << 16;
  520. __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
  521. }
  522. void
  523. i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
  524. u32 status_mask)
  525. {
  526. u32 enable_mask;
  527. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  528. enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
  529. status_mask);
  530. else
  531. enable_mask = status_mask << 16;
  532. __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
  533. }
  534. /**
  535. * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
  536. * @dev_priv: i915 device private
  537. */
  538. static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
  539. {
  540. if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
  541. return;
  542. spin_lock_irq(&dev_priv->irq_lock);
  543. i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
  544. if (INTEL_GEN(dev_priv) >= 4)
  545. i915_enable_pipestat(dev_priv, PIPE_A,
  546. PIPE_LEGACY_BLC_EVENT_STATUS);
  547. spin_unlock_irq(&dev_priv->irq_lock);
  548. }
  549. /*
  550. * This timing diagram depicts the video signal in and
  551. * around the vertical blanking period.
  552. *
  553. * Assumptions about the fictitious mode used in this example:
  554. * vblank_start >= 3
  555. * vsync_start = vblank_start + 1
  556. * vsync_end = vblank_start + 2
  557. * vtotal = vblank_start + 3
  558. *
  559. * start of vblank:
  560. * latch double buffered registers
  561. * increment frame counter (ctg+)
  562. * generate start of vblank interrupt (gen4+)
  563. * |
  564. * | frame start:
  565. * | generate frame start interrupt (aka. vblank interrupt) (gmch)
  566. * | may be shifted forward 1-3 extra lines via PIPECONF
  567. * | |
  568. * | | start of vsync:
  569. * | | generate vsync interrupt
  570. * | | |
  571. * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
  572. * . \hs/ . \hs/ \hs/ \hs/ . \hs/
  573. * ----va---> <-----------------vb--------------------> <--------va-------------
  574. * | | <----vs-----> |
  575. * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
  576. * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
  577. * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
  578. * | | |
  579. * last visible pixel first visible pixel
  580. * | increment frame counter (gen3/4)
  581. * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
  582. *
  583. * x = horizontal active
  584. * _ = horizontal blanking
  585. * hs = horizontal sync
  586. * va = vertical active
  587. * vb = vertical blanking
  588. * vs = vertical sync
  589. * vbs = vblank_start (number)
  590. *
  591. * Summary:
  592. * - most events happen at the start of horizontal sync
  593. * - frame start happens at the start of horizontal blank, 1-4 lines
  594. * (depending on PIPECONF settings) after the start of vblank
  595. * - gen3/4 pixel and frame counter are synchronized with the start
  596. * of horizontal active on the first line of vertical active
  597. */
  598. /* Called from drm generic code, passed a 'crtc', which
  599. * we use as a pipe index
  600. */
  601. static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
  602. {
  603. struct drm_i915_private *dev_priv = to_i915(dev);
  604. i915_reg_t high_frame, low_frame;
  605. u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
  606. struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
  607. pipe);
  608. const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
  609. unsigned long irqflags;
  610. htotal = mode->crtc_htotal;
  611. hsync_start = mode->crtc_hsync_start;
  612. vbl_start = mode->crtc_vblank_start;
  613. if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  614. vbl_start = DIV_ROUND_UP(vbl_start, 2);
  615. /* Convert to pixel count */
  616. vbl_start *= htotal;
  617. /* Start of vblank event occurs at start of hsync */
  618. vbl_start -= htotal - hsync_start;
  619. high_frame = PIPEFRAME(pipe);
  620. low_frame = PIPEFRAMEPIXEL(pipe);
  621. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  622. /*
  623. * High & low register fields aren't synchronized, so make sure
  624. * we get a low value that's stable across two reads of the high
  625. * register.
  626. */
  627. do {
  628. high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
  629. low = I915_READ_FW(low_frame);
  630. high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
  631. } while (high1 != high2);
  632. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  633. high1 >>= PIPE_FRAME_HIGH_SHIFT;
  634. pixel = low & PIPE_PIXEL_MASK;
  635. low >>= PIPE_FRAME_LOW_SHIFT;
  636. /*
  637. * The frame counter increments at beginning of active.
  638. * Cook up a vblank counter by also checking the pixel
  639. * counter against vblank start.
  640. */
  641. return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
  642. }
  643. static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
  644. {
  645. struct drm_i915_private *dev_priv = to_i915(dev);
  646. return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
  647. }
  648. /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
  649. static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
  650. {
  651. struct drm_device *dev = crtc->base.dev;
  652. struct drm_i915_private *dev_priv = to_i915(dev);
  653. const struct drm_display_mode *mode = &crtc->base.hwmode;
  654. enum pipe pipe = crtc->pipe;
  655. int position, vtotal;
  656. if (!crtc->active)
  657. return -1;
  658. vtotal = mode->crtc_vtotal;
  659. if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  660. vtotal /= 2;
  661. if (IS_GEN2(dev_priv))
  662. position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
  663. else
  664. position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
  665. /*
  666. * On HSW, the DSL reg (0x70000) appears to return 0 if we
  667. * read it just before the start of vblank. So try it again
  668. * so we don't accidentally end up spanning a vblank frame
  669. * increment, causing the pipe_update_end() code to squak at us.
  670. *
  671. * The nature of this problem means we can't simply check the ISR
  672. * bit and return the vblank start value; nor can we use the scanline
  673. * debug register in the transcoder as it appears to have the same
  674. * problem. We may need to extend this to include other platforms,
  675. * but so far testing only shows the problem on HSW.
  676. */
  677. if (HAS_DDI(dev_priv) && !position) {
  678. int i, temp;
  679. for (i = 0; i < 100; i++) {
  680. udelay(1);
  681. temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
  682. if (temp != position) {
  683. position = temp;
  684. break;
  685. }
  686. }
  687. }
  688. /*
  689. * See update_scanline_offset() for the details on the
  690. * scanline_offset adjustment.
  691. */
  692. return (position + crtc->scanline_offset) % vtotal;
  693. }
  694. static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
  695. unsigned int flags, int *vpos, int *hpos,
  696. ktime_t *stime, ktime_t *etime,
  697. const struct drm_display_mode *mode)
  698. {
  699. struct drm_i915_private *dev_priv = to_i915(dev);
  700. struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
  701. pipe);
  702. int position;
  703. int vbl_start, vbl_end, hsync_start, htotal, vtotal;
  704. bool in_vbl = true;
  705. int ret = 0;
  706. unsigned long irqflags;
  707. if (WARN_ON(!mode->crtc_clock)) {
  708. DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
  709. "pipe %c\n", pipe_name(pipe));
  710. return 0;
  711. }
  712. htotal = mode->crtc_htotal;
  713. hsync_start = mode->crtc_hsync_start;
  714. vtotal = mode->crtc_vtotal;
  715. vbl_start = mode->crtc_vblank_start;
  716. vbl_end = mode->crtc_vblank_end;
  717. if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
  718. vbl_start = DIV_ROUND_UP(vbl_start, 2);
  719. vbl_end /= 2;
  720. vtotal /= 2;
  721. }
  722. ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
  723. /*
  724. * Lock uncore.lock, as we will do multiple timing critical raw
  725. * register reads, potentially with preemption disabled, so the
  726. * following code must not block on uncore.lock.
  727. */
  728. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  729. /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
  730. /* Get optional system timestamp before query. */
  731. if (stime)
  732. *stime = ktime_get();
  733. if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
  734. /* No obvious pixelcount register. Only query vertical
  735. * scanout position from Display scan line register.
  736. */
  737. position = __intel_get_crtc_scanline(intel_crtc);
  738. } else {
  739. /* Have access to pixelcount since start of frame.
  740. * We can split this into vertical and horizontal
  741. * scanout position.
  742. */
  743. position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
  744. /* convert to pixel counts */
  745. vbl_start *= htotal;
  746. vbl_end *= htotal;
  747. vtotal *= htotal;
  748. /*
  749. * In interlaced modes, the pixel counter counts all pixels,
  750. * so one field will have htotal more pixels. In order to avoid
  751. * the reported position from jumping backwards when the pixel
  752. * counter is beyond the length of the shorter field, just
  753. * clamp the position the length of the shorter field. This
  754. * matches how the scanline counter based position works since
  755. * the scanline counter doesn't count the two half lines.
  756. */
  757. if (position >= vtotal)
  758. position = vtotal - 1;
  759. /*
  760. * Start of vblank interrupt is triggered at start of hsync,
  761. * just prior to the first active line of vblank. However we
  762. * consider lines to start at the leading edge of horizontal
  763. * active. So, should we get here before we've crossed into
  764. * the horizontal active of the first line in vblank, we would
  765. * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
  766. * always add htotal-hsync_start to the current pixel position.
  767. */
  768. position = (position + htotal - hsync_start) % vtotal;
  769. }
  770. /* Get optional system timestamp after query. */
  771. if (etime)
  772. *etime = ktime_get();
  773. /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
  774. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  775. in_vbl = position >= vbl_start && position < vbl_end;
  776. /*
  777. * While in vblank, position will be negative
  778. * counting up towards 0 at vbl_end. And outside
  779. * vblank, position will be positive counting
  780. * up since vbl_end.
  781. */
  782. if (position >= vbl_start)
  783. position -= vbl_end;
  784. else
  785. position += vtotal - vbl_end;
  786. if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
  787. *vpos = position;
  788. *hpos = 0;
  789. } else {
  790. *vpos = position / htotal;
  791. *hpos = position - (*vpos * htotal);
  792. }
  793. /* In vblank? */
  794. if (in_vbl)
  795. ret |= DRM_SCANOUTPOS_IN_VBLANK;
  796. return ret;
  797. }
  798. int intel_get_crtc_scanline(struct intel_crtc *crtc)
  799. {
  800. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  801. unsigned long irqflags;
  802. int position;
  803. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  804. position = __intel_get_crtc_scanline(crtc);
  805. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  806. return position;
  807. }
  808. static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
  809. int *max_error,
  810. struct timeval *vblank_time,
  811. unsigned flags)
  812. {
  813. struct drm_i915_private *dev_priv = to_i915(dev);
  814. struct intel_crtc *crtc;
  815. if (pipe >= INTEL_INFO(dev_priv)->num_pipes) {
  816. DRM_ERROR("Invalid crtc %u\n", pipe);
  817. return -EINVAL;
  818. }
  819. /* Get drm_crtc to timestamp: */
  820. crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  821. if (crtc == NULL) {
  822. DRM_ERROR("Invalid crtc %u\n", pipe);
  823. return -EINVAL;
  824. }
  825. if (!crtc->base.hwmode.crtc_clock) {
  826. DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
  827. return -EBUSY;
  828. }
  829. /* Helper routine in DRM core does all the work: */
  830. return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
  831. vblank_time, flags,
  832. &crtc->base.hwmode);
  833. }
  834. static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
  835. {
  836. u32 busy_up, busy_down, max_avg, min_avg;
  837. u8 new_delay;
  838. spin_lock(&mchdev_lock);
  839. I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
  840. new_delay = dev_priv->ips.cur_delay;
  841. I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
  842. busy_up = I915_READ(RCPREVBSYTUPAVG);
  843. busy_down = I915_READ(RCPREVBSYTDNAVG);
  844. max_avg = I915_READ(RCBMAXAVG);
  845. min_avg = I915_READ(RCBMINAVG);
  846. /* Handle RCS change request from hw */
  847. if (busy_up > max_avg) {
  848. if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
  849. new_delay = dev_priv->ips.cur_delay - 1;
  850. if (new_delay < dev_priv->ips.max_delay)
  851. new_delay = dev_priv->ips.max_delay;
  852. } else if (busy_down < min_avg) {
  853. if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
  854. new_delay = dev_priv->ips.cur_delay + 1;
  855. if (new_delay > dev_priv->ips.min_delay)
  856. new_delay = dev_priv->ips.min_delay;
  857. }
  858. if (ironlake_set_drps(dev_priv, new_delay))
  859. dev_priv->ips.cur_delay = new_delay;
  860. spin_unlock(&mchdev_lock);
  861. return;
  862. }
  863. static void notify_ring(struct intel_engine_cs *engine)
  864. {
  865. struct drm_i915_gem_request *rq = NULL;
  866. struct intel_wait *wait;
  867. atomic_inc(&engine->irq_count);
  868. set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
  869. spin_lock(&engine->breadcrumbs.irq_lock);
  870. wait = engine->breadcrumbs.irq_wait;
  871. if (wait) {
  872. /* We use a callback from the dma-fence to submit
  873. * requests after waiting on our own requests. To
  874. * ensure minimum delay in queuing the next request to
  875. * hardware, signal the fence now rather than wait for
  876. * the signaler to be woken up. We still wake up the
  877. * waiter in order to handle the irq-seqno coherency
  878. * issues (we may receive the interrupt before the
  879. * seqno is written, see __i915_request_irq_complete())
  880. * and to handle coalescing of multiple seqno updates
  881. * and many waiters.
  882. */
  883. if (i915_seqno_passed(intel_engine_get_seqno(engine),
  884. wait->seqno) &&
  885. !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
  886. &wait->request->fence.flags))
  887. rq = i915_gem_request_get(wait->request);
  888. wake_up_process(wait->tsk);
  889. } else {
  890. __intel_engine_disarm_breadcrumbs(engine);
  891. }
  892. spin_unlock(&engine->breadcrumbs.irq_lock);
  893. if (rq) {
  894. dma_fence_signal(&rq->fence);
  895. i915_gem_request_put(rq);
  896. }
  897. trace_intel_engine_notify(engine, wait);
  898. }
  899. static void vlv_c0_read(struct drm_i915_private *dev_priv,
  900. struct intel_rps_ei *ei)
  901. {
  902. ei->ktime = ktime_get_raw();
  903. ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
  904. ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
  905. }
  906. void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
  907. {
  908. memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
  909. }
  910. static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
  911. {
  912. const struct intel_rps_ei *prev = &dev_priv->rps.ei;
  913. struct intel_rps_ei now;
  914. u32 events = 0;
  915. if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
  916. return 0;
  917. vlv_c0_read(dev_priv, &now);
  918. if (prev->ktime) {
  919. u64 time, c0;
  920. u32 render, media;
  921. time = ktime_us_delta(now.ktime, prev->ktime);
  922. time *= dev_priv->czclk_freq;
  923. /* Workload can be split between render + media,
  924. * e.g. SwapBuffers being blitted in X after being rendered in
  925. * mesa. To account for this we need to combine both engines
  926. * into our activity counter.
  927. */
  928. render = now.render_c0 - prev->render_c0;
  929. media = now.media_c0 - prev->media_c0;
  930. c0 = max(render, media);
  931. c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
  932. if (c0 > time * dev_priv->rps.up_threshold)
  933. events = GEN6_PM_RP_UP_THRESHOLD;
  934. else if (c0 < time * dev_priv->rps.down_threshold)
  935. events = GEN6_PM_RP_DOWN_THRESHOLD;
  936. }
  937. dev_priv->rps.ei = now;
  938. return events;
  939. }
  940. static bool any_waiters(struct drm_i915_private *dev_priv)
  941. {
  942. struct intel_engine_cs *engine;
  943. enum intel_engine_id id;
  944. for_each_engine(engine, dev_priv, id)
  945. if (intel_engine_has_waiter(engine))
  946. return true;
  947. return false;
  948. }
  949. static void gen6_pm_rps_work(struct work_struct *work)
  950. {
  951. struct drm_i915_private *dev_priv =
  952. container_of(work, struct drm_i915_private, rps.work);
  953. bool client_boost = false;
  954. int new_delay, adj, min, max;
  955. u32 pm_iir = 0;
  956. spin_lock_irq(&dev_priv->irq_lock);
  957. if (dev_priv->rps.interrupts_enabled) {
  958. pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir);
  959. client_boost = fetch_and_zero(&dev_priv->rps.client_boost);
  960. }
  961. spin_unlock_irq(&dev_priv->irq_lock);
  962. /* Make sure we didn't queue anything we're not going to process. */
  963. WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
  964. if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
  965. goto out;
  966. mutex_lock(&dev_priv->rps.hw_lock);
  967. pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
  968. adj = dev_priv->rps.last_adj;
  969. new_delay = dev_priv->rps.cur_freq;
  970. min = dev_priv->rps.min_freq_softlimit;
  971. max = dev_priv->rps.max_freq_softlimit;
  972. if (client_boost || any_waiters(dev_priv))
  973. max = dev_priv->rps.max_freq;
  974. if (client_boost && new_delay < dev_priv->rps.boost_freq) {
  975. new_delay = dev_priv->rps.boost_freq;
  976. adj = 0;
  977. } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
  978. if (adj > 0)
  979. adj *= 2;
  980. else /* CHV needs even encode values */
  981. adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
  982. if (new_delay >= dev_priv->rps.max_freq_softlimit)
  983. adj = 0;
  984. } else if (client_boost || any_waiters(dev_priv)) {
  985. adj = 0;
  986. } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
  987. if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
  988. new_delay = dev_priv->rps.efficient_freq;
  989. else if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
  990. new_delay = dev_priv->rps.min_freq_softlimit;
  991. adj = 0;
  992. } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
  993. if (adj < 0)
  994. adj *= 2;
  995. else /* CHV needs even encode values */
  996. adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
  997. if (new_delay <= dev_priv->rps.min_freq_softlimit)
  998. adj = 0;
  999. } else { /* unknown event */
  1000. adj = 0;
  1001. }
  1002. dev_priv->rps.last_adj = adj;
  1003. /* sysfs frequency interfaces may have snuck in while servicing the
  1004. * interrupt
  1005. */
  1006. new_delay += adj;
  1007. new_delay = clamp_t(int, new_delay, min, max);
  1008. if (intel_set_rps(dev_priv, new_delay)) {
  1009. DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
  1010. dev_priv->rps.last_adj = 0;
  1011. }
  1012. mutex_unlock(&dev_priv->rps.hw_lock);
  1013. out:
  1014. /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
  1015. spin_lock_irq(&dev_priv->irq_lock);
  1016. if (dev_priv->rps.interrupts_enabled)
  1017. gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
  1018. spin_unlock_irq(&dev_priv->irq_lock);
  1019. }
  1020. /**
  1021. * ivybridge_parity_work - Workqueue called when a parity error interrupt
  1022. * occurred.
  1023. * @work: workqueue struct
  1024. *
  1025. * Doesn't actually do anything except notify userspace. As a consequence of
  1026. * this event, userspace should try to remap the bad rows since statistically
  1027. * it is likely the same row is more likely to go bad again.
  1028. */
  1029. static void ivybridge_parity_work(struct work_struct *work)
  1030. {
  1031. struct drm_i915_private *dev_priv =
  1032. container_of(work, struct drm_i915_private, l3_parity.error_work);
  1033. u32 error_status, row, bank, subbank;
  1034. char *parity_event[6];
  1035. uint32_t misccpctl;
  1036. uint8_t slice = 0;
  1037. /* We must turn off DOP level clock gating to access the L3 registers.
  1038. * In order to prevent a get/put style interface, acquire struct mutex
  1039. * any time we access those registers.
  1040. */
  1041. mutex_lock(&dev_priv->drm.struct_mutex);
  1042. /* If we've screwed up tracking, just let the interrupt fire again */
  1043. if (WARN_ON(!dev_priv->l3_parity.which_slice))
  1044. goto out;
  1045. misccpctl = I915_READ(GEN7_MISCCPCTL);
  1046. I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
  1047. POSTING_READ(GEN7_MISCCPCTL);
  1048. while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
  1049. i915_reg_t reg;
  1050. slice--;
  1051. if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
  1052. break;
  1053. dev_priv->l3_parity.which_slice &= ~(1<<slice);
  1054. reg = GEN7_L3CDERRST1(slice);
  1055. error_status = I915_READ(reg);
  1056. row = GEN7_PARITY_ERROR_ROW(error_status);
  1057. bank = GEN7_PARITY_ERROR_BANK(error_status);
  1058. subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
  1059. I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
  1060. POSTING_READ(reg);
  1061. parity_event[0] = I915_L3_PARITY_UEVENT "=1";
  1062. parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
  1063. parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
  1064. parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
  1065. parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
  1066. parity_event[5] = NULL;
  1067. kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
  1068. KOBJ_CHANGE, parity_event);
  1069. DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
  1070. slice, row, bank, subbank);
  1071. kfree(parity_event[4]);
  1072. kfree(parity_event[3]);
  1073. kfree(parity_event[2]);
  1074. kfree(parity_event[1]);
  1075. }
  1076. I915_WRITE(GEN7_MISCCPCTL, misccpctl);
  1077. out:
  1078. WARN_ON(dev_priv->l3_parity.which_slice);
  1079. spin_lock_irq(&dev_priv->irq_lock);
  1080. gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
  1081. spin_unlock_irq(&dev_priv->irq_lock);
  1082. mutex_unlock(&dev_priv->drm.struct_mutex);
  1083. }
  1084. static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
  1085. u32 iir)
  1086. {
  1087. if (!HAS_L3_DPF(dev_priv))
  1088. return;
  1089. spin_lock(&dev_priv->irq_lock);
  1090. gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
  1091. spin_unlock(&dev_priv->irq_lock);
  1092. iir &= GT_PARITY_ERROR(dev_priv);
  1093. if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
  1094. dev_priv->l3_parity.which_slice |= 1 << 1;
  1095. if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
  1096. dev_priv->l3_parity.which_slice |= 1 << 0;
  1097. queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
  1098. }
  1099. static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
  1100. u32 gt_iir)
  1101. {
  1102. if (gt_iir & GT_RENDER_USER_INTERRUPT)
  1103. notify_ring(dev_priv->engine[RCS]);
  1104. if (gt_iir & ILK_BSD_USER_INTERRUPT)
  1105. notify_ring(dev_priv->engine[VCS]);
  1106. }
  1107. static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
  1108. u32 gt_iir)
  1109. {
  1110. if (gt_iir & GT_RENDER_USER_INTERRUPT)
  1111. notify_ring(dev_priv->engine[RCS]);
  1112. if (gt_iir & GT_BSD_USER_INTERRUPT)
  1113. notify_ring(dev_priv->engine[VCS]);
  1114. if (gt_iir & GT_BLT_USER_INTERRUPT)
  1115. notify_ring(dev_priv->engine[BCS]);
  1116. if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
  1117. GT_BSD_CS_ERROR_INTERRUPT |
  1118. GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
  1119. DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
  1120. if (gt_iir & GT_PARITY_ERROR(dev_priv))
  1121. ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
  1122. }
  1123. static __always_inline void
  1124. gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
  1125. {
  1126. bool tasklet = false;
  1127. if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
  1128. set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
  1129. tasklet = true;
  1130. }
  1131. if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) {
  1132. notify_ring(engine);
  1133. tasklet |= i915.enable_guc_submission;
  1134. }
  1135. if (tasklet)
  1136. tasklet_hi_schedule(&engine->irq_tasklet);
  1137. }
  1138. static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
  1139. u32 master_ctl,
  1140. u32 gt_iir[4])
  1141. {
  1142. irqreturn_t ret = IRQ_NONE;
  1143. if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
  1144. gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
  1145. if (gt_iir[0]) {
  1146. I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
  1147. ret = IRQ_HANDLED;
  1148. } else
  1149. DRM_ERROR("The master control interrupt lied (GT0)!\n");
  1150. }
  1151. if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
  1152. gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
  1153. if (gt_iir[1]) {
  1154. I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
  1155. ret = IRQ_HANDLED;
  1156. } else
  1157. DRM_ERROR("The master control interrupt lied (GT1)!\n");
  1158. }
  1159. if (master_ctl & GEN8_GT_VECS_IRQ) {
  1160. gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
  1161. if (gt_iir[3]) {
  1162. I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
  1163. ret = IRQ_HANDLED;
  1164. } else
  1165. DRM_ERROR("The master control interrupt lied (GT3)!\n");
  1166. }
  1167. if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
  1168. gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
  1169. if (gt_iir[2] & (dev_priv->pm_rps_events |
  1170. dev_priv->pm_guc_events)) {
  1171. I915_WRITE_FW(GEN8_GT_IIR(2),
  1172. gt_iir[2] & (dev_priv->pm_rps_events |
  1173. dev_priv->pm_guc_events));
  1174. ret = IRQ_HANDLED;
  1175. } else
  1176. DRM_ERROR("The master control interrupt lied (PM)!\n");
  1177. }
  1178. return ret;
  1179. }
  1180. static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
  1181. u32 gt_iir[4])
  1182. {
  1183. if (gt_iir[0]) {
  1184. gen8_cs_irq_handler(dev_priv->engine[RCS],
  1185. gt_iir[0], GEN8_RCS_IRQ_SHIFT);
  1186. gen8_cs_irq_handler(dev_priv->engine[BCS],
  1187. gt_iir[0], GEN8_BCS_IRQ_SHIFT);
  1188. }
  1189. if (gt_iir[1]) {
  1190. gen8_cs_irq_handler(dev_priv->engine[VCS],
  1191. gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
  1192. gen8_cs_irq_handler(dev_priv->engine[VCS2],
  1193. gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
  1194. }
  1195. if (gt_iir[3])
  1196. gen8_cs_irq_handler(dev_priv->engine[VECS],
  1197. gt_iir[3], GEN8_VECS_IRQ_SHIFT);
  1198. if (gt_iir[2] & dev_priv->pm_rps_events)
  1199. gen6_rps_irq_handler(dev_priv, gt_iir[2]);
  1200. if (gt_iir[2] & dev_priv->pm_guc_events)
  1201. gen9_guc_irq_handler(dev_priv, gt_iir[2]);
  1202. }
  1203. static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
  1204. {
  1205. switch (port) {
  1206. case PORT_A:
  1207. return val & PORTA_HOTPLUG_LONG_DETECT;
  1208. case PORT_B:
  1209. return val & PORTB_HOTPLUG_LONG_DETECT;
  1210. case PORT_C:
  1211. return val & PORTC_HOTPLUG_LONG_DETECT;
  1212. default:
  1213. return false;
  1214. }
  1215. }
  1216. static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
  1217. {
  1218. switch (port) {
  1219. case PORT_E:
  1220. return val & PORTE_HOTPLUG_LONG_DETECT;
  1221. default:
  1222. return false;
  1223. }
  1224. }
  1225. static bool spt_port_hotplug_long_detect(enum port port, u32 val)
  1226. {
  1227. switch (port) {
  1228. case PORT_A:
  1229. return val & PORTA_HOTPLUG_LONG_DETECT;
  1230. case PORT_B:
  1231. return val & PORTB_HOTPLUG_LONG_DETECT;
  1232. case PORT_C:
  1233. return val & PORTC_HOTPLUG_LONG_DETECT;
  1234. case PORT_D:
  1235. return val & PORTD_HOTPLUG_LONG_DETECT;
  1236. default:
  1237. return false;
  1238. }
  1239. }
  1240. static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
  1241. {
  1242. switch (port) {
  1243. case PORT_A:
  1244. return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
  1245. default:
  1246. return false;
  1247. }
  1248. }
  1249. static bool pch_port_hotplug_long_detect(enum port port, u32 val)
  1250. {
  1251. switch (port) {
  1252. case PORT_B:
  1253. return val & PORTB_HOTPLUG_LONG_DETECT;
  1254. case PORT_C:
  1255. return val & PORTC_HOTPLUG_LONG_DETECT;
  1256. case PORT_D:
  1257. return val & PORTD_HOTPLUG_LONG_DETECT;
  1258. default:
  1259. return false;
  1260. }
  1261. }
  1262. static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
  1263. {
  1264. switch (port) {
  1265. case PORT_B:
  1266. return val & PORTB_HOTPLUG_INT_LONG_PULSE;
  1267. case PORT_C:
  1268. return val & PORTC_HOTPLUG_INT_LONG_PULSE;
  1269. case PORT_D:
  1270. return val & PORTD_HOTPLUG_INT_LONG_PULSE;
  1271. default:
  1272. return false;
  1273. }
  1274. }
  1275. /*
  1276. * Get a bit mask of pins that have triggered, and which ones may be long.
  1277. * This can be called multiple times with the same masks to accumulate
  1278. * hotplug detection results from several registers.
  1279. *
  1280. * Note that the caller is expected to zero out the masks initially.
  1281. */
  1282. static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
  1283. u32 hotplug_trigger, u32 dig_hotplug_reg,
  1284. const u32 hpd[HPD_NUM_PINS],
  1285. bool long_pulse_detect(enum port port, u32 val))
  1286. {
  1287. enum port port;
  1288. int i;
  1289. for_each_hpd_pin(i) {
  1290. if ((hpd[i] & hotplug_trigger) == 0)
  1291. continue;
  1292. *pin_mask |= BIT(i);
  1293. if (!intel_hpd_pin_to_port(i, &port))
  1294. continue;
  1295. if (long_pulse_detect(port, dig_hotplug_reg))
  1296. *long_mask |= BIT(i);
  1297. }
  1298. DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
  1299. hotplug_trigger, dig_hotplug_reg, *pin_mask);
  1300. }
  1301. static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
  1302. {
  1303. wake_up_all(&dev_priv->gmbus_wait_queue);
  1304. }
  1305. static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
  1306. {
  1307. wake_up_all(&dev_priv->gmbus_wait_queue);
  1308. }
  1309. #if defined(CONFIG_DEBUG_FS)
  1310. static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
  1311. enum pipe pipe,
  1312. uint32_t crc0, uint32_t crc1,
  1313. uint32_t crc2, uint32_t crc3,
  1314. uint32_t crc4)
  1315. {
  1316. struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
  1317. struct intel_pipe_crc_entry *entry;
  1318. struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
  1319. struct drm_driver *driver = dev_priv->drm.driver;
  1320. uint32_t crcs[5];
  1321. int head, tail;
  1322. spin_lock(&pipe_crc->lock);
  1323. if (pipe_crc->source) {
  1324. if (!pipe_crc->entries) {
  1325. spin_unlock(&pipe_crc->lock);
  1326. DRM_DEBUG_KMS("spurious interrupt\n");
  1327. return;
  1328. }
  1329. head = pipe_crc->head;
  1330. tail = pipe_crc->tail;
  1331. if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
  1332. spin_unlock(&pipe_crc->lock);
  1333. DRM_ERROR("CRC buffer overflowing\n");
  1334. return;
  1335. }
  1336. entry = &pipe_crc->entries[head];
  1337. entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe);
  1338. entry->crc[0] = crc0;
  1339. entry->crc[1] = crc1;
  1340. entry->crc[2] = crc2;
  1341. entry->crc[3] = crc3;
  1342. entry->crc[4] = crc4;
  1343. head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
  1344. pipe_crc->head = head;
  1345. spin_unlock(&pipe_crc->lock);
  1346. wake_up_interruptible(&pipe_crc->wq);
  1347. } else {
  1348. /*
  1349. * For some not yet identified reason, the first CRC is
  1350. * bonkers. So let's just wait for the next vblank and read
  1351. * out the buggy result.
  1352. *
  1353. * On CHV sometimes the second CRC is bonkers as well, so
  1354. * don't trust that one either.
  1355. */
  1356. if (pipe_crc->skipped == 0 ||
  1357. (IS_CHERRYVIEW(dev_priv) && pipe_crc->skipped == 1)) {
  1358. pipe_crc->skipped++;
  1359. spin_unlock(&pipe_crc->lock);
  1360. return;
  1361. }
  1362. spin_unlock(&pipe_crc->lock);
  1363. crcs[0] = crc0;
  1364. crcs[1] = crc1;
  1365. crcs[2] = crc2;
  1366. crcs[3] = crc3;
  1367. crcs[4] = crc4;
  1368. drm_crtc_add_crc_entry(&crtc->base, true,
  1369. drm_accurate_vblank_count(&crtc->base),
  1370. crcs);
  1371. }
  1372. }
  1373. #else
  1374. static inline void
  1375. display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
  1376. enum pipe pipe,
  1377. uint32_t crc0, uint32_t crc1,
  1378. uint32_t crc2, uint32_t crc3,
  1379. uint32_t crc4) {}
  1380. #endif
  1381. static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
  1382. enum pipe pipe)
  1383. {
  1384. display_pipe_crc_irq_handler(dev_priv, pipe,
  1385. I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
  1386. 0, 0, 0, 0);
  1387. }
  1388. static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
  1389. enum pipe pipe)
  1390. {
  1391. display_pipe_crc_irq_handler(dev_priv, pipe,
  1392. I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
  1393. I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
  1394. I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
  1395. I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
  1396. I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
  1397. }
  1398. static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
  1399. enum pipe pipe)
  1400. {
  1401. uint32_t res1, res2;
  1402. if (INTEL_GEN(dev_priv) >= 3)
  1403. res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
  1404. else
  1405. res1 = 0;
  1406. if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
  1407. res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
  1408. else
  1409. res2 = 0;
  1410. display_pipe_crc_irq_handler(dev_priv, pipe,
  1411. I915_READ(PIPE_CRC_RES_RED(pipe)),
  1412. I915_READ(PIPE_CRC_RES_GREEN(pipe)),
  1413. I915_READ(PIPE_CRC_RES_BLUE(pipe)),
  1414. res1, res2);
  1415. }
  1416. /* The RPS events need forcewake, so we add them to a work queue and mask their
  1417. * IMR bits until the work is done. Other interrupts can be processed without
  1418. * the work queue. */
  1419. static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
  1420. {
  1421. if (pm_iir & dev_priv->pm_rps_events) {
  1422. spin_lock(&dev_priv->irq_lock);
  1423. gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
  1424. if (dev_priv->rps.interrupts_enabled) {
  1425. dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
  1426. schedule_work(&dev_priv->rps.work);
  1427. }
  1428. spin_unlock(&dev_priv->irq_lock);
  1429. }
  1430. if (INTEL_INFO(dev_priv)->gen >= 8)
  1431. return;
  1432. if (HAS_VEBOX(dev_priv)) {
  1433. if (pm_iir & PM_VEBOX_USER_INTERRUPT)
  1434. notify_ring(dev_priv->engine[VECS]);
  1435. if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
  1436. DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
  1437. }
  1438. }
  1439. static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
  1440. {
  1441. if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) {
  1442. /* Sample the log buffer flush related bits & clear them out now
  1443. * itself from the message identity register to minimize the
  1444. * probability of losing a flush interrupt, when there are back
  1445. * to back flush interrupts.
  1446. * There can be a new flush interrupt, for different log buffer
  1447. * type (like for ISR), whilst Host is handling one (for DPC).
  1448. * Since same bit is used in message register for ISR & DPC, it
  1449. * could happen that GuC sets the bit for 2nd interrupt but Host
  1450. * clears out the bit on handling the 1st interrupt.
  1451. */
  1452. u32 msg, flush;
  1453. msg = I915_READ(SOFT_SCRATCH(15));
  1454. flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED |
  1455. INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER);
  1456. if (flush) {
  1457. /* Clear the message bits that are handled */
  1458. I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
  1459. /* Handle flush interrupt in bottom half */
  1460. queue_work(dev_priv->guc.log.runtime.flush_wq,
  1461. &dev_priv->guc.log.runtime.flush_work);
  1462. dev_priv->guc.log.flush_interrupt_count++;
  1463. } else {
  1464. /* Not clearing of unhandled event bits won't result in
  1465. * re-triggering of the interrupt.
  1466. */
  1467. }
  1468. }
  1469. }
  1470. static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
  1471. enum pipe pipe)
  1472. {
  1473. bool ret;
  1474. ret = drm_handle_vblank(&dev_priv->drm, pipe);
  1475. if (ret)
  1476. intel_finish_page_flip_mmio(dev_priv, pipe);
  1477. return ret;
  1478. }
  1479. static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
  1480. u32 iir, u32 pipe_stats[I915_MAX_PIPES])
  1481. {
  1482. int pipe;
  1483. spin_lock(&dev_priv->irq_lock);
  1484. if (!dev_priv->display_irqs_enabled) {
  1485. spin_unlock(&dev_priv->irq_lock);
  1486. return;
  1487. }
  1488. for_each_pipe(dev_priv, pipe) {
  1489. i915_reg_t reg;
  1490. u32 mask, iir_bit = 0;
  1491. /*
  1492. * PIPESTAT bits get signalled even when the interrupt is
  1493. * disabled with the mask bits, and some of the status bits do
  1494. * not generate interrupts at all (like the underrun bit). Hence
  1495. * we need to be careful that we only handle what we want to
  1496. * handle.
  1497. */
  1498. /* fifo underruns are filterered in the underrun handler. */
  1499. mask = PIPE_FIFO_UNDERRUN_STATUS;
  1500. switch (pipe) {
  1501. case PIPE_A:
  1502. iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
  1503. break;
  1504. case PIPE_B:
  1505. iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
  1506. break;
  1507. case PIPE_C:
  1508. iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
  1509. break;
  1510. }
  1511. if (iir & iir_bit)
  1512. mask |= dev_priv->pipestat_irq_mask[pipe];
  1513. if (!mask)
  1514. continue;
  1515. reg = PIPESTAT(pipe);
  1516. mask |= PIPESTAT_INT_ENABLE_MASK;
  1517. pipe_stats[pipe] = I915_READ(reg) & mask;
  1518. /*
  1519. * Clear the PIPE*STAT regs before the IIR
  1520. */
  1521. if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
  1522. PIPESTAT_INT_STATUS_MASK))
  1523. I915_WRITE(reg, pipe_stats[pipe]);
  1524. }
  1525. spin_unlock(&dev_priv->irq_lock);
  1526. }
  1527. static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
  1528. u32 pipe_stats[I915_MAX_PIPES])
  1529. {
  1530. enum pipe pipe;
  1531. for_each_pipe(dev_priv, pipe) {
  1532. if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
  1533. intel_pipe_handle_vblank(dev_priv, pipe))
  1534. intel_check_page_flip(dev_priv, pipe);
  1535. if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
  1536. intel_finish_page_flip_cs(dev_priv, pipe);
  1537. if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
  1538. i9xx_pipe_crc_irq_handler(dev_priv, pipe);
  1539. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  1540. intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
  1541. }
  1542. if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
  1543. gmbus_irq_handler(dev_priv);
  1544. }
  1545. static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
  1546. {
  1547. u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  1548. if (hotplug_status)
  1549. I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  1550. return hotplug_status;
  1551. }
  1552. static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
  1553. u32 hotplug_status)
  1554. {
  1555. u32 pin_mask = 0, long_mask = 0;
  1556. if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
  1557. IS_CHERRYVIEW(dev_priv)) {
  1558. u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
  1559. if (hotplug_trigger) {
  1560. intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
  1561. hotplug_trigger, hpd_status_g4x,
  1562. i9xx_port_hotplug_long_detect);
  1563. intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
  1564. }
  1565. if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
  1566. dp_aux_irq_handler(dev_priv);
  1567. } else {
  1568. u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
  1569. if (hotplug_trigger) {
  1570. intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
  1571. hotplug_trigger, hpd_status_i915,
  1572. i9xx_port_hotplug_long_detect);
  1573. intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
  1574. }
  1575. }
  1576. }
  1577. static irqreturn_t valleyview_irq_handler(int irq, void *arg)
  1578. {
  1579. struct drm_device *dev = arg;
  1580. struct drm_i915_private *dev_priv = to_i915(dev);
  1581. irqreturn_t ret = IRQ_NONE;
  1582. if (!intel_irqs_enabled(dev_priv))
  1583. return IRQ_NONE;
  1584. /* IRQs are synced during runtime_suspend, we don't require a wakeref */
  1585. disable_rpm_wakeref_asserts(dev_priv);
  1586. do {
  1587. u32 iir, gt_iir, pm_iir;
  1588. u32 pipe_stats[I915_MAX_PIPES] = {};
  1589. u32 hotplug_status = 0;
  1590. u32 ier = 0;
  1591. gt_iir = I915_READ(GTIIR);
  1592. pm_iir = I915_READ(GEN6_PMIIR);
  1593. iir = I915_READ(VLV_IIR);
  1594. if (gt_iir == 0 && pm_iir == 0 && iir == 0)
  1595. break;
  1596. ret = IRQ_HANDLED;
  1597. /*
  1598. * Theory on interrupt generation, based on empirical evidence:
  1599. *
  1600. * x = ((VLV_IIR & VLV_IER) ||
  1601. * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
  1602. * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
  1603. *
  1604. * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
  1605. * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
  1606. * guarantee the CPU interrupt will be raised again even if we
  1607. * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
  1608. * bits this time around.
  1609. */
  1610. I915_WRITE(VLV_MASTER_IER, 0);
  1611. ier = I915_READ(VLV_IER);
  1612. I915_WRITE(VLV_IER, 0);
  1613. if (gt_iir)
  1614. I915_WRITE(GTIIR, gt_iir);
  1615. if (pm_iir)
  1616. I915_WRITE(GEN6_PMIIR, pm_iir);
  1617. if (iir & I915_DISPLAY_PORT_INTERRUPT)
  1618. hotplug_status = i9xx_hpd_irq_ack(dev_priv);
  1619. /* Call regardless, as some status bits might not be
  1620. * signalled in iir */
  1621. valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
  1622. if (iir & (I915_LPE_PIPE_A_INTERRUPT |
  1623. I915_LPE_PIPE_B_INTERRUPT))
  1624. intel_lpe_audio_irq_handler(dev_priv);
  1625. /*
  1626. * VLV_IIR is single buffered, and reflects the level
  1627. * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
  1628. */
  1629. if (iir)
  1630. I915_WRITE(VLV_IIR, iir);
  1631. I915_WRITE(VLV_IER, ier);
  1632. I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
  1633. POSTING_READ(VLV_MASTER_IER);
  1634. if (gt_iir)
  1635. snb_gt_irq_handler(dev_priv, gt_iir);
  1636. if (pm_iir)
  1637. gen6_rps_irq_handler(dev_priv, pm_iir);
  1638. if (hotplug_status)
  1639. i9xx_hpd_irq_handler(dev_priv, hotplug_status);
  1640. valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
  1641. } while (0);
  1642. enable_rpm_wakeref_asserts(dev_priv);
  1643. return ret;
  1644. }
  1645. static irqreturn_t cherryview_irq_handler(int irq, void *arg)
  1646. {
  1647. struct drm_device *dev = arg;
  1648. struct drm_i915_private *dev_priv = to_i915(dev);
  1649. irqreturn_t ret = IRQ_NONE;
  1650. if (!intel_irqs_enabled(dev_priv))
  1651. return IRQ_NONE;
  1652. /* IRQs are synced during runtime_suspend, we don't require a wakeref */
  1653. disable_rpm_wakeref_asserts(dev_priv);
  1654. do {
  1655. u32 master_ctl, iir;
  1656. u32 gt_iir[4] = {};
  1657. u32 pipe_stats[I915_MAX_PIPES] = {};
  1658. u32 hotplug_status = 0;
  1659. u32 ier = 0;
  1660. master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
  1661. iir = I915_READ(VLV_IIR);
  1662. if (master_ctl == 0 && iir == 0)
  1663. break;
  1664. ret = IRQ_HANDLED;
  1665. /*
  1666. * Theory on interrupt generation, based on empirical evidence:
  1667. *
  1668. * x = ((VLV_IIR & VLV_IER) ||
  1669. * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
  1670. * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
  1671. *
  1672. * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
  1673. * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
  1674. * guarantee the CPU interrupt will be raised again even if we
  1675. * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
  1676. * bits this time around.
  1677. */
  1678. I915_WRITE(GEN8_MASTER_IRQ, 0);
  1679. ier = I915_READ(VLV_IER);
  1680. I915_WRITE(VLV_IER, 0);
  1681. gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
  1682. if (iir & I915_DISPLAY_PORT_INTERRUPT)
  1683. hotplug_status = i9xx_hpd_irq_ack(dev_priv);
  1684. /* Call regardless, as some status bits might not be
  1685. * signalled in iir */
  1686. valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
  1687. if (iir & (I915_LPE_PIPE_A_INTERRUPT |
  1688. I915_LPE_PIPE_B_INTERRUPT |
  1689. I915_LPE_PIPE_C_INTERRUPT))
  1690. intel_lpe_audio_irq_handler(dev_priv);
  1691. /*
  1692. * VLV_IIR is single buffered, and reflects the level
  1693. * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
  1694. */
  1695. if (iir)
  1696. I915_WRITE(VLV_IIR, iir);
  1697. I915_WRITE(VLV_IER, ier);
  1698. I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
  1699. POSTING_READ(GEN8_MASTER_IRQ);
  1700. gen8_gt_irq_handler(dev_priv, gt_iir);
  1701. if (hotplug_status)
  1702. i9xx_hpd_irq_handler(dev_priv, hotplug_status);
  1703. valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
  1704. } while (0);
  1705. enable_rpm_wakeref_asserts(dev_priv);
  1706. return ret;
  1707. }
  1708. static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
  1709. u32 hotplug_trigger,
  1710. const u32 hpd[HPD_NUM_PINS])
  1711. {
  1712. u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
  1713. /*
  1714. * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
  1715. * unless we touch the hotplug register, even if hotplug_trigger is
  1716. * zero. Not acking leads to "The master control interrupt lied (SDE)!"
  1717. * errors.
  1718. */
  1719. dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
  1720. if (!hotplug_trigger) {
  1721. u32 mask = PORTA_HOTPLUG_STATUS_MASK |
  1722. PORTD_HOTPLUG_STATUS_MASK |
  1723. PORTC_HOTPLUG_STATUS_MASK |
  1724. PORTB_HOTPLUG_STATUS_MASK;
  1725. dig_hotplug_reg &= ~mask;
  1726. }
  1727. I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
  1728. if (!hotplug_trigger)
  1729. return;
  1730. intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
  1731. dig_hotplug_reg, hpd,
  1732. pch_port_hotplug_long_detect);
  1733. intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
  1734. }
  1735. static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
  1736. {
  1737. int pipe;
  1738. u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
  1739. ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
  1740. if (pch_iir & SDE_AUDIO_POWER_MASK) {
  1741. int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
  1742. SDE_AUDIO_POWER_SHIFT);
  1743. DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
  1744. port_name(port));
  1745. }
  1746. if (pch_iir & SDE_AUX_MASK)
  1747. dp_aux_irq_handler(dev_priv);
  1748. if (pch_iir & SDE_GMBUS)
  1749. gmbus_irq_handler(dev_priv);
  1750. if (pch_iir & SDE_AUDIO_HDCP_MASK)
  1751. DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
  1752. if (pch_iir & SDE_AUDIO_TRANS_MASK)
  1753. DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
  1754. if (pch_iir & SDE_POISON)
  1755. DRM_ERROR("PCH poison interrupt\n");
  1756. if (pch_iir & SDE_FDI_MASK)
  1757. for_each_pipe(dev_priv, pipe)
  1758. DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
  1759. pipe_name(pipe),
  1760. I915_READ(FDI_RX_IIR(pipe)));
  1761. if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
  1762. DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
  1763. if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
  1764. DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
  1765. if (pch_iir & SDE_TRANSA_FIFO_UNDER)
  1766. intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
  1767. if (pch_iir & SDE_TRANSB_FIFO_UNDER)
  1768. intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
  1769. }
  1770. static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
  1771. {
  1772. u32 err_int = I915_READ(GEN7_ERR_INT);
  1773. enum pipe pipe;
  1774. if (err_int & ERR_INT_POISON)
  1775. DRM_ERROR("Poison interrupt\n");
  1776. for_each_pipe(dev_priv, pipe) {
  1777. if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
  1778. intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
  1779. if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
  1780. if (IS_IVYBRIDGE(dev_priv))
  1781. ivb_pipe_crc_irq_handler(dev_priv, pipe);
  1782. else
  1783. hsw_pipe_crc_irq_handler(dev_priv, pipe);
  1784. }
  1785. }
  1786. I915_WRITE(GEN7_ERR_INT, err_int);
  1787. }
  1788. static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
  1789. {
  1790. u32 serr_int = I915_READ(SERR_INT);
  1791. if (serr_int & SERR_INT_POISON)
  1792. DRM_ERROR("PCH poison interrupt\n");
  1793. if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
  1794. intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
  1795. if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
  1796. intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
  1797. if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
  1798. intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
  1799. I915_WRITE(SERR_INT, serr_int);
  1800. }
  1801. static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
  1802. {
  1803. int pipe;
  1804. u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
  1805. ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
  1806. if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
  1807. int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
  1808. SDE_AUDIO_POWER_SHIFT_CPT);
  1809. DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
  1810. port_name(port));
  1811. }
  1812. if (pch_iir & SDE_AUX_MASK_CPT)
  1813. dp_aux_irq_handler(dev_priv);
  1814. if (pch_iir & SDE_GMBUS_CPT)
  1815. gmbus_irq_handler(dev_priv);
  1816. if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
  1817. DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
  1818. if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
  1819. DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
  1820. if (pch_iir & SDE_FDI_MASK_CPT)
  1821. for_each_pipe(dev_priv, pipe)
  1822. DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
  1823. pipe_name(pipe),
  1824. I915_READ(FDI_RX_IIR(pipe)));
  1825. if (pch_iir & SDE_ERROR_CPT)
  1826. cpt_serr_int_handler(dev_priv);
  1827. }
  1828. static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
  1829. {
  1830. u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
  1831. ~SDE_PORTE_HOTPLUG_SPT;
  1832. u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
  1833. u32 pin_mask = 0, long_mask = 0;
  1834. if (hotplug_trigger) {
  1835. u32 dig_hotplug_reg;
  1836. dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
  1837. I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
  1838. intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
  1839. dig_hotplug_reg, hpd_spt,
  1840. spt_port_hotplug_long_detect);
  1841. }
  1842. if (hotplug2_trigger) {
  1843. u32 dig_hotplug_reg;
  1844. dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
  1845. I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
  1846. intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
  1847. dig_hotplug_reg, hpd_spt,
  1848. spt_port_hotplug2_long_detect);
  1849. }
  1850. if (pin_mask)
  1851. intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
  1852. if (pch_iir & SDE_GMBUS_CPT)
  1853. gmbus_irq_handler(dev_priv);
  1854. }
  1855. static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
  1856. u32 hotplug_trigger,
  1857. const u32 hpd[HPD_NUM_PINS])
  1858. {
  1859. u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
  1860. dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
  1861. I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
  1862. intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
  1863. dig_hotplug_reg, hpd,
  1864. ilk_port_hotplug_long_detect);
  1865. intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
  1866. }
  1867. static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
  1868. u32 de_iir)
  1869. {
  1870. enum pipe pipe;
  1871. u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
  1872. if (hotplug_trigger)
  1873. ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
  1874. if (de_iir & DE_AUX_CHANNEL_A)
  1875. dp_aux_irq_handler(dev_priv);
  1876. if (de_iir & DE_GSE)
  1877. intel_opregion_asle_intr(dev_priv);
  1878. if (de_iir & DE_POISON)
  1879. DRM_ERROR("Poison interrupt\n");
  1880. for_each_pipe(dev_priv, pipe) {
  1881. if (de_iir & DE_PIPE_VBLANK(pipe) &&
  1882. intel_pipe_handle_vblank(dev_priv, pipe))
  1883. intel_check_page_flip(dev_priv, pipe);
  1884. if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
  1885. intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
  1886. if (de_iir & DE_PIPE_CRC_DONE(pipe))
  1887. i9xx_pipe_crc_irq_handler(dev_priv, pipe);
  1888. /* plane/pipes map 1:1 on ilk+ */
  1889. if (de_iir & DE_PLANE_FLIP_DONE(pipe))
  1890. intel_finish_page_flip_cs(dev_priv, pipe);
  1891. }
  1892. /* check event from PCH */
  1893. if (de_iir & DE_PCH_EVENT) {
  1894. u32 pch_iir = I915_READ(SDEIIR);
  1895. if (HAS_PCH_CPT(dev_priv))
  1896. cpt_irq_handler(dev_priv, pch_iir);
  1897. else
  1898. ibx_irq_handler(dev_priv, pch_iir);
  1899. /* should clear PCH hotplug event before clear CPU irq */
  1900. I915_WRITE(SDEIIR, pch_iir);
  1901. }
  1902. if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
  1903. ironlake_rps_change_irq_handler(dev_priv);
  1904. }
  1905. static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
  1906. u32 de_iir)
  1907. {
  1908. enum pipe pipe;
  1909. u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
  1910. if (hotplug_trigger)
  1911. ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
  1912. if (de_iir & DE_ERR_INT_IVB)
  1913. ivb_err_int_handler(dev_priv);
  1914. if (de_iir & DE_AUX_CHANNEL_A_IVB)
  1915. dp_aux_irq_handler(dev_priv);
  1916. if (de_iir & DE_GSE_IVB)
  1917. intel_opregion_asle_intr(dev_priv);
  1918. for_each_pipe(dev_priv, pipe) {
  1919. if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
  1920. intel_pipe_handle_vblank(dev_priv, pipe))
  1921. intel_check_page_flip(dev_priv, pipe);
  1922. /* plane/pipes map 1:1 on ilk+ */
  1923. if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
  1924. intel_finish_page_flip_cs(dev_priv, pipe);
  1925. }
  1926. /* check event from PCH */
  1927. if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
  1928. u32 pch_iir = I915_READ(SDEIIR);
  1929. cpt_irq_handler(dev_priv, pch_iir);
  1930. /* clear PCH hotplug event before clear CPU irq */
  1931. I915_WRITE(SDEIIR, pch_iir);
  1932. }
  1933. }
  1934. /*
  1935. * To handle irqs with the minimum potential races with fresh interrupts, we:
  1936. * 1 - Disable Master Interrupt Control.
  1937. * 2 - Find the source(s) of the interrupt.
  1938. * 3 - Clear the Interrupt Identity bits (IIR).
  1939. * 4 - Process the interrupt(s) that had bits set in the IIRs.
  1940. * 5 - Re-enable Master Interrupt Control.
  1941. */
  1942. static irqreturn_t ironlake_irq_handler(int irq, void *arg)
  1943. {
  1944. struct drm_device *dev = arg;
  1945. struct drm_i915_private *dev_priv = to_i915(dev);
  1946. u32 de_iir, gt_iir, de_ier, sde_ier = 0;
  1947. irqreturn_t ret = IRQ_NONE;
  1948. if (!intel_irqs_enabled(dev_priv))
  1949. return IRQ_NONE;
  1950. /* IRQs are synced during runtime_suspend, we don't require a wakeref */
  1951. disable_rpm_wakeref_asserts(dev_priv);
  1952. /* disable master interrupt before clearing iir */
  1953. de_ier = I915_READ(DEIER);
  1954. I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
  1955. POSTING_READ(DEIER);
  1956. /* Disable south interrupts. We'll only write to SDEIIR once, so further
  1957. * interrupts will will be stored on its back queue, and then we'll be
  1958. * able to process them after we restore SDEIER (as soon as we restore
  1959. * it, we'll get an interrupt if SDEIIR still has something to process
  1960. * due to its back queue). */
  1961. if (!HAS_PCH_NOP(dev_priv)) {
  1962. sde_ier = I915_READ(SDEIER);
  1963. I915_WRITE(SDEIER, 0);
  1964. POSTING_READ(SDEIER);
  1965. }
  1966. /* Find, clear, then process each source of interrupt */
  1967. gt_iir = I915_READ(GTIIR);
  1968. if (gt_iir) {
  1969. I915_WRITE(GTIIR, gt_iir);
  1970. ret = IRQ_HANDLED;
  1971. if (INTEL_GEN(dev_priv) >= 6)
  1972. snb_gt_irq_handler(dev_priv, gt_iir);
  1973. else
  1974. ilk_gt_irq_handler(dev_priv, gt_iir);
  1975. }
  1976. de_iir = I915_READ(DEIIR);
  1977. if (de_iir) {
  1978. I915_WRITE(DEIIR, de_iir);
  1979. ret = IRQ_HANDLED;
  1980. if (INTEL_GEN(dev_priv) >= 7)
  1981. ivb_display_irq_handler(dev_priv, de_iir);
  1982. else
  1983. ilk_display_irq_handler(dev_priv, de_iir);
  1984. }
  1985. if (INTEL_GEN(dev_priv) >= 6) {
  1986. u32 pm_iir = I915_READ(GEN6_PMIIR);
  1987. if (pm_iir) {
  1988. I915_WRITE(GEN6_PMIIR, pm_iir);
  1989. ret = IRQ_HANDLED;
  1990. gen6_rps_irq_handler(dev_priv, pm_iir);
  1991. }
  1992. }
  1993. I915_WRITE(DEIER, de_ier);
  1994. POSTING_READ(DEIER);
  1995. if (!HAS_PCH_NOP(dev_priv)) {
  1996. I915_WRITE(SDEIER, sde_ier);
  1997. POSTING_READ(SDEIER);
  1998. }
  1999. /* IRQs are synced during runtime_suspend, we don't require a wakeref */
  2000. enable_rpm_wakeref_asserts(dev_priv);
  2001. return ret;
  2002. }
  2003. static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
  2004. u32 hotplug_trigger,
  2005. const u32 hpd[HPD_NUM_PINS])
  2006. {
  2007. u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
  2008. dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
  2009. I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
  2010. intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
  2011. dig_hotplug_reg, hpd,
  2012. bxt_port_hotplug_long_detect);
  2013. intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
  2014. }
  2015. static irqreturn_t
  2016. gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
  2017. {
  2018. irqreturn_t ret = IRQ_NONE;
  2019. u32 iir;
  2020. enum pipe pipe;
  2021. if (master_ctl & GEN8_DE_MISC_IRQ) {
  2022. iir = I915_READ(GEN8_DE_MISC_IIR);
  2023. if (iir) {
  2024. I915_WRITE(GEN8_DE_MISC_IIR, iir);
  2025. ret = IRQ_HANDLED;
  2026. if (iir & GEN8_DE_MISC_GSE)
  2027. intel_opregion_asle_intr(dev_priv);
  2028. else
  2029. DRM_ERROR("Unexpected DE Misc interrupt\n");
  2030. }
  2031. else
  2032. DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
  2033. }
  2034. if (master_ctl & GEN8_DE_PORT_IRQ) {
  2035. iir = I915_READ(GEN8_DE_PORT_IIR);
  2036. if (iir) {
  2037. u32 tmp_mask;
  2038. bool found = false;
  2039. I915_WRITE(GEN8_DE_PORT_IIR, iir);
  2040. ret = IRQ_HANDLED;
  2041. tmp_mask = GEN8_AUX_CHANNEL_A;
  2042. if (INTEL_INFO(dev_priv)->gen >= 9)
  2043. tmp_mask |= GEN9_AUX_CHANNEL_B |
  2044. GEN9_AUX_CHANNEL_C |
  2045. GEN9_AUX_CHANNEL_D;
  2046. if (iir & tmp_mask) {
  2047. dp_aux_irq_handler(dev_priv);
  2048. found = true;
  2049. }
  2050. if (IS_GEN9_LP(dev_priv)) {
  2051. tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
  2052. if (tmp_mask) {
  2053. bxt_hpd_irq_handler(dev_priv, tmp_mask,
  2054. hpd_bxt);
  2055. found = true;
  2056. }
  2057. } else if (IS_BROADWELL(dev_priv)) {
  2058. tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
  2059. if (tmp_mask) {
  2060. ilk_hpd_irq_handler(dev_priv,
  2061. tmp_mask, hpd_bdw);
  2062. found = true;
  2063. }
  2064. }
  2065. if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
  2066. gmbus_irq_handler(dev_priv);
  2067. found = true;
  2068. }
  2069. if (!found)
  2070. DRM_ERROR("Unexpected DE Port interrupt\n");
  2071. }
  2072. else
  2073. DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
  2074. }
  2075. for_each_pipe(dev_priv, pipe) {
  2076. u32 flip_done, fault_errors;
  2077. if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
  2078. continue;
  2079. iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
  2080. if (!iir) {
  2081. DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
  2082. continue;
  2083. }
  2084. ret = IRQ_HANDLED;
  2085. I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
  2086. if (iir & GEN8_PIPE_VBLANK &&
  2087. intel_pipe_handle_vblank(dev_priv, pipe))
  2088. intel_check_page_flip(dev_priv, pipe);
  2089. flip_done = iir;
  2090. if (INTEL_INFO(dev_priv)->gen >= 9)
  2091. flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
  2092. else
  2093. flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
  2094. if (flip_done)
  2095. intel_finish_page_flip_cs(dev_priv, pipe);
  2096. if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
  2097. hsw_pipe_crc_irq_handler(dev_priv, pipe);
  2098. if (iir & GEN8_PIPE_FIFO_UNDERRUN)
  2099. intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
  2100. fault_errors = iir;
  2101. if (INTEL_INFO(dev_priv)->gen >= 9)
  2102. fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
  2103. else
  2104. fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
  2105. if (fault_errors)
  2106. DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
  2107. pipe_name(pipe),
  2108. fault_errors);
  2109. }
  2110. if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
  2111. master_ctl & GEN8_DE_PCH_IRQ) {
  2112. /*
  2113. * FIXME(BDW): Assume for now that the new interrupt handling
  2114. * scheme also closed the SDE interrupt handling race we've seen
  2115. * on older pch-split platforms. But this needs testing.
  2116. */
  2117. iir = I915_READ(SDEIIR);
  2118. if (iir) {
  2119. I915_WRITE(SDEIIR, iir);
  2120. ret = IRQ_HANDLED;
  2121. if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
  2122. spt_irq_handler(dev_priv, iir);
  2123. else
  2124. cpt_irq_handler(dev_priv, iir);
  2125. } else {
  2126. /*
  2127. * Like on previous PCH there seems to be something
  2128. * fishy going on with forwarding PCH interrupts.
  2129. */
  2130. DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
  2131. }
  2132. }
  2133. return ret;
  2134. }
  2135. static irqreturn_t gen8_irq_handler(int irq, void *arg)
  2136. {
  2137. struct drm_device *dev = arg;
  2138. struct drm_i915_private *dev_priv = to_i915(dev);
  2139. u32 master_ctl;
  2140. u32 gt_iir[4] = {};
  2141. irqreturn_t ret;
  2142. if (!intel_irqs_enabled(dev_priv))
  2143. return IRQ_NONE;
  2144. master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
  2145. master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
  2146. if (!master_ctl)
  2147. return IRQ_NONE;
  2148. I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
  2149. /* IRQs are synced during runtime_suspend, we don't require a wakeref */
  2150. disable_rpm_wakeref_asserts(dev_priv);
  2151. /* Find, clear, then process each source of interrupt */
  2152. ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
  2153. gen8_gt_irq_handler(dev_priv, gt_iir);
  2154. ret |= gen8_de_irq_handler(dev_priv, master_ctl);
  2155. I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
  2156. POSTING_READ_FW(GEN8_MASTER_IRQ);
  2157. enable_rpm_wakeref_asserts(dev_priv);
  2158. return ret;
  2159. }
  2160. /**
  2161. * i915_reset_and_wakeup - do process context error handling work
  2162. * @dev_priv: i915 device private
  2163. *
  2164. * Fire an error uevent so userspace can see that a hang or error
  2165. * was detected.
  2166. */
  2167. static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
  2168. {
  2169. struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
  2170. char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
  2171. char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
  2172. char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
  2173. kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
  2174. DRM_DEBUG_DRIVER("resetting chip\n");
  2175. kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
  2176. intel_prepare_reset(dev_priv);
  2177. set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags);
  2178. wake_up_all(&dev_priv->gpu_error.wait_queue);
  2179. do {
  2180. /*
  2181. * All state reset _must_ be completed before we update the
  2182. * reset counter, for otherwise waiters might miss the reset
  2183. * pending state and not properly drop locks, resulting in
  2184. * deadlocks with the reset work.
  2185. */
  2186. if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
  2187. i915_reset(dev_priv);
  2188. mutex_unlock(&dev_priv->drm.struct_mutex);
  2189. }
  2190. /* We need to wait for anyone holding the lock to wakeup */
  2191. } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
  2192. I915_RESET_HANDOFF,
  2193. TASK_UNINTERRUPTIBLE,
  2194. HZ));
  2195. intel_finish_reset(dev_priv);
  2196. if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
  2197. kobject_uevent_env(kobj,
  2198. KOBJ_CHANGE, reset_done_event);
  2199. /*
  2200. * Note: The wake_up also serves as a memory barrier so that
  2201. * waiters see the updated value of the dev_priv->gpu_error.
  2202. */
  2203. clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
  2204. wake_up_all(&dev_priv->gpu_error.reset_queue);
  2205. }
  2206. static inline void
  2207. i915_err_print_instdone(struct drm_i915_private *dev_priv,
  2208. struct intel_instdone *instdone)
  2209. {
  2210. int slice;
  2211. int subslice;
  2212. pr_err(" INSTDONE: 0x%08x\n", instdone->instdone);
  2213. if (INTEL_GEN(dev_priv) <= 3)
  2214. return;
  2215. pr_err(" SC_INSTDONE: 0x%08x\n", instdone->slice_common);
  2216. if (INTEL_GEN(dev_priv) <= 6)
  2217. return;
  2218. for_each_instdone_slice_subslice(dev_priv, slice, subslice)
  2219. pr_err(" SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
  2220. slice, subslice, instdone->sampler[slice][subslice]);
  2221. for_each_instdone_slice_subslice(dev_priv, slice, subslice)
  2222. pr_err(" ROW_INSTDONE[%d][%d]: 0x%08x\n",
  2223. slice, subslice, instdone->row[slice][subslice]);
  2224. }
  2225. static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
  2226. {
  2227. u32 eir;
  2228. if (!IS_GEN2(dev_priv))
  2229. I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
  2230. if (INTEL_GEN(dev_priv) < 4)
  2231. I915_WRITE(IPEIR, I915_READ(IPEIR));
  2232. else
  2233. I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
  2234. I915_WRITE(EIR, I915_READ(EIR));
  2235. eir = I915_READ(EIR);
  2236. if (eir) {
  2237. /*
  2238. * some errors might have become stuck,
  2239. * mask them.
  2240. */
  2241. DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
  2242. I915_WRITE(EMR, I915_READ(EMR) | eir);
  2243. I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  2244. }
  2245. }
  2246. /**
  2247. * i915_handle_error - handle a gpu error
  2248. * @dev_priv: i915 device private
  2249. * @engine_mask: mask representing engines that are hung
  2250. * @fmt: Error message format string
  2251. *
  2252. * Do some basic checking of register state at error time and
  2253. * dump it to the syslog. Also call i915_capture_error_state() to make
  2254. * sure we get a record and make it available in debugfs. Fire a uevent
  2255. * so userspace knows something bad happened (should trigger collection
  2256. * of a ring dump etc.).
  2257. */
  2258. void i915_handle_error(struct drm_i915_private *dev_priv,
  2259. u32 engine_mask,
  2260. const char *fmt, ...)
  2261. {
  2262. va_list args;
  2263. char error_msg[80];
  2264. va_start(args, fmt);
  2265. vscnprintf(error_msg, sizeof(error_msg), fmt, args);
  2266. va_end(args);
  2267. /*
  2268. * In most cases it's guaranteed that we get here with an RPM
  2269. * reference held, for example because there is a pending GPU
  2270. * request that won't finish until the reset is done. This
  2271. * isn't the case at least when we get here by doing a
  2272. * simulated reset via debugfs, so get an RPM reference.
  2273. */
  2274. intel_runtime_pm_get(dev_priv);
  2275. i915_capture_error_state(dev_priv, engine_mask, error_msg);
  2276. i915_clear_error_registers(dev_priv);
  2277. if (!engine_mask)
  2278. goto out;
  2279. if (test_and_set_bit(I915_RESET_BACKOFF,
  2280. &dev_priv->gpu_error.flags))
  2281. goto out;
  2282. i915_reset_and_wakeup(dev_priv);
  2283. out:
  2284. intel_runtime_pm_put(dev_priv);
  2285. }
  2286. /* Called from drm generic code, passed 'crtc' which
  2287. * we use as a pipe index
  2288. */
  2289. static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
  2290. {
  2291. struct drm_i915_private *dev_priv = to_i915(dev);
  2292. unsigned long irqflags;
  2293. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2294. i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
  2295. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2296. return 0;
  2297. }
  2298. static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
  2299. {
  2300. struct drm_i915_private *dev_priv = to_i915(dev);
  2301. unsigned long irqflags;
  2302. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2303. i915_enable_pipestat(dev_priv, pipe,
  2304. PIPE_START_VBLANK_INTERRUPT_STATUS);
  2305. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2306. return 0;
  2307. }
  2308. static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
  2309. {
  2310. struct drm_i915_private *dev_priv = to_i915(dev);
  2311. unsigned long irqflags;
  2312. uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
  2313. DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
  2314. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2315. ilk_enable_display_irq(dev_priv, bit);
  2316. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2317. return 0;
  2318. }
  2319. static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
  2320. {
  2321. struct drm_i915_private *dev_priv = to_i915(dev);
  2322. unsigned long irqflags;
  2323. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2324. bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
  2325. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2326. return 0;
  2327. }
  2328. /* Called from drm generic code, passed 'crtc' which
  2329. * we use as a pipe index
  2330. */
  2331. static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
  2332. {
  2333. struct drm_i915_private *dev_priv = to_i915(dev);
  2334. unsigned long irqflags;
  2335. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2336. i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
  2337. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2338. }
  2339. static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
  2340. {
  2341. struct drm_i915_private *dev_priv = to_i915(dev);
  2342. unsigned long irqflags;
  2343. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2344. i915_disable_pipestat(dev_priv, pipe,
  2345. PIPE_START_VBLANK_INTERRUPT_STATUS);
  2346. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2347. }
  2348. static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
  2349. {
  2350. struct drm_i915_private *dev_priv = to_i915(dev);
  2351. unsigned long irqflags;
  2352. uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
  2353. DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
  2354. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2355. ilk_disable_display_irq(dev_priv, bit);
  2356. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2357. }
  2358. static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
  2359. {
  2360. struct drm_i915_private *dev_priv = to_i915(dev);
  2361. unsigned long irqflags;
  2362. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2363. bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
  2364. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2365. }
  2366. static void ibx_irq_reset(struct drm_i915_private *dev_priv)
  2367. {
  2368. if (HAS_PCH_NOP(dev_priv))
  2369. return;
  2370. GEN5_IRQ_RESET(SDE);
  2371. if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
  2372. I915_WRITE(SERR_INT, 0xffffffff);
  2373. }
  2374. /*
  2375. * SDEIER is also touched by the interrupt handler to work around missed PCH
  2376. * interrupts. Hence we can't update it after the interrupt handler is enabled -
  2377. * instead we unconditionally enable all PCH interrupt sources here, but then
  2378. * only unmask them as needed with SDEIMR.
  2379. *
  2380. * This function needs to be called before interrupts are enabled.
  2381. */
  2382. static void ibx_irq_pre_postinstall(struct drm_device *dev)
  2383. {
  2384. struct drm_i915_private *dev_priv = to_i915(dev);
  2385. if (HAS_PCH_NOP(dev_priv))
  2386. return;
  2387. WARN_ON(I915_READ(SDEIER) != 0);
  2388. I915_WRITE(SDEIER, 0xffffffff);
  2389. POSTING_READ(SDEIER);
  2390. }
  2391. static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
  2392. {
  2393. GEN5_IRQ_RESET(GT);
  2394. if (INTEL_GEN(dev_priv) >= 6)
  2395. GEN5_IRQ_RESET(GEN6_PM);
  2396. }
  2397. static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
  2398. {
  2399. enum pipe pipe;
  2400. if (IS_CHERRYVIEW(dev_priv))
  2401. I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
  2402. else
  2403. I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
  2404. i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
  2405. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2406. for_each_pipe(dev_priv, pipe) {
  2407. I915_WRITE(PIPESTAT(pipe),
  2408. PIPE_FIFO_UNDERRUN_STATUS |
  2409. PIPESTAT_INT_STATUS_MASK);
  2410. dev_priv->pipestat_irq_mask[pipe] = 0;
  2411. }
  2412. GEN5_IRQ_RESET(VLV_);
  2413. dev_priv->irq_mask = ~0;
  2414. }
  2415. static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
  2416. {
  2417. u32 pipestat_mask;
  2418. u32 enable_mask;
  2419. enum pipe pipe;
  2420. pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
  2421. PIPE_CRC_DONE_INTERRUPT_STATUS;
  2422. i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
  2423. for_each_pipe(dev_priv, pipe)
  2424. i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
  2425. enable_mask = I915_DISPLAY_PORT_INTERRUPT |
  2426. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2427. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2428. I915_LPE_PIPE_A_INTERRUPT |
  2429. I915_LPE_PIPE_B_INTERRUPT;
  2430. if (IS_CHERRYVIEW(dev_priv))
  2431. enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
  2432. I915_LPE_PIPE_C_INTERRUPT;
  2433. WARN_ON(dev_priv->irq_mask != ~0);
  2434. dev_priv->irq_mask = ~enable_mask;
  2435. GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
  2436. }
  2437. /* drm_dma.h hooks
  2438. */
  2439. static void ironlake_irq_reset(struct drm_device *dev)
  2440. {
  2441. struct drm_i915_private *dev_priv = to_i915(dev);
  2442. I915_WRITE(HWSTAM, 0xffffffff);
  2443. GEN5_IRQ_RESET(DE);
  2444. if (IS_GEN7(dev_priv))
  2445. I915_WRITE(GEN7_ERR_INT, 0xffffffff);
  2446. gen5_gt_irq_reset(dev_priv);
  2447. ibx_irq_reset(dev_priv);
  2448. }
  2449. static void valleyview_irq_preinstall(struct drm_device *dev)
  2450. {
  2451. struct drm_i915_private *dev_priv = to_i915(dev);
  2452. I915_WRITE(VLV_MASTER_IER, 0);
  2453. POSTING_READ(VLV_MASTER_IER);
  2454. gen5_gt_irq_reset(dev_priv);
  2455. spin_lock_irq(&dev_priv->irq_lock);
  2456. if (dev_priv->display_irqs_enabled)
  2457. vlv_display_irq_reset(dev_priv);
  2458. spin_unlock_irq(&dev_priv->irq_lock);
  2459. }
  2460. static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
  2461. {
  2462. GEN8_IRQ_RESET_NDX(GT, 0);
  2463. GEN8_IRQ_RESET_NDX(GT, 1);
  2464. GEN8_IRQ_RESET_NDX(GT, 2);
  2465. GEN8_IRQ_RESET_NDX(GT, 3);
  2466. }
  2467. static void gen8_irq_reset(struct drm_device *dev)
  2468. {
  2469. struct drm_i915_private *dev_priv = to_i915(dev);
  2470. int pipe;
  2471. I915_WRITE(GEN8_MASTER_IRQ, 0);
  2472. POSTING_READ(GEN8_MASTER_IRQ);
  2473. gen8_gt_irq_reset(dev_priv);
  2474. for_each_pipe(dev_priv, pipe)
  2475. if (intel_display_power_is_enabled(dev_priv,
  2476. POWER_DOMAIN_PIPE(pipe)))
  2477. GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
  2478. GEN5_IRQ_RESET(GEN8_DE_PORT_);
  2479. GEN5_IRQ_RESET(GEN8_DE_MISC_);
  2480. GEN5_IRQ_RESET(GEN8_PCU_);
  2481. if (HAS_PCH_SPLIT(dev_priv))
  2482. ibx_irq_reset(dev_priv);
  2483. }
  2484. void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
  2485. unsigned int pipe_mask)
  2486. {
  2487. uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
  2488. enum pipe pipe;
  2489. spin_lock_irq(&dev_priv->irq_lock);
  2490. for_each_pipe_masked(dev_priv, pipe, pipe_mask)
  2491. GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
  2492. dev_priv->de_irq_mask[pipe],
  2493. ~dev_priv->de_irq_mask[pipe] | extra_ier);
  2494. spin_unlock_irq(&dev_priv->irq_lock);
  2495. }
  2496. void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
  2497. unsigned int pipe_mask)
  2498. {
  2499. enum pipe pipe;
  2500. spin_lock_irq(&dev_priv->irq_lock);
  2501. for_each_pipe_masked(dev_priv, pipe, pipe_mask)
  2502. GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
  2503. spin_unlock_irq(&dev_priv->irq_lock);
  2504. /* make sure we're done processing display irqs */
  2505. synchronize_irq(dev_priv->drm.irq);
  2506. }
  2507. static void cherryview_irq_preinstall(struct drm_device *dev)
  2508. {
  2509. struct drm_i915_private *dev_priv = to_i915(dev);
  2510. I915_WRITE(GEN8_MASTER_IRQ, 0);
  2511. POSTING_READ(GEN8_MASTER_IRQ);
  2512. gen8_gt_irq_reset(dev_priv);
  2513. GEN5_IRQ_RESET(GEN8_PCU_);
  2514. spin_lock_irq(&dev_priv->irq_lock);
  2515. if (dev_priv->display_irqs_enabled)
  2516. vlv_display_irq_reset(dev_priv);
  2517. spin_unlock_irq(&dev_priv->irq_lock);
  2518. }
  2519. static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
  2520. const u32 hpd[HPD_NUM_PINS])
  2521. {
  2522. struct intel_encoder *encoder;
  2523. u32 enabled_irqs = 0;
  2524. for_each_intel_encoder(&dev_priv->drm, encoder)
  2525. if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
  2526. enabled_irqs |= hpd[encoder->hpd_pin];
  2527. return enabled_irqs;
  2528. }
  2529. static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
  2530. {
  2531. u32 hotplug;
  2532. /*
  2533. * Enable digital hotplug on the PCH, and configure the DP short pulse
  2534. * duration to 2ms (which is the minimum in the Display Port spec).
  2535. * The pulse duration bits are reserved on LPT+.
  2536. */
  2537. hotplug = I915_READ(PCH_PORT_HOTPLUG);
  2538. hotplug &= ~(PORTB_PULSE_DURATION_MASK |
  2539. PORTC_PULSE_DURATION_MASK |
  2540. PORTD_PULSE_DURATION_MASK);
  2541. hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
  2542. hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
  2543. hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
  2544. /*
  2545. * When CPU and PCH are on the same package, port A
  2546. * HPD must be enabled in both north and south.
  2547. */
  2548. if (HAS_PCH_LPT_LP(dev_priv))
  2549. hotplug |= PORTA_HOTPLUG_ENABLE;
  2550. I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
  2551. }
  2552. static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
  2553. {
  2554. u32 hotplug_irqs, enabled_irqs;
  2555. if (HAS_PCH_IBX(dev_priv)) {
  2556. hotplug_irqs = SDE_HOTPLUG_MASK;
  2557. enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
  2558. } else {
  2559. hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
  2560. enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
  2561. }
  2562. ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
  2563. ibx_hpd_detection_setup(dev_priv);
  2564. }
  2565. static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
  2566. {
  2567. u32 hotplug;
  2568. /* Enable digital hotplug on the PCH */
  2569. hotplug = I915_READ(PCH_PORT_HOTPLUG);
  2570. hotplug |= PORTA_HOTPLUG_ENABLE |
  2571. PORTB_HOTPLUG_ENABLE |
  2572. PORTC_HOTPLUG_ENABLE |
  2573. PORTD_HOTPLUG_ENABLE;
  2574. I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
  2575. hotplug = I915_READ(PCH_PORT_HOTPLUG2);
  2576. hotplug |= PORTE_HOTPLUG_ENABLE;
  2577. I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
  2578. }
  2579. static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
  2580. {
  2581. u32 hotplug_irqs, enabled_irqs;
  2582. hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
  2583. enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
  2584. ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
  2585. spt_hpd_detection_setup(dev_priv);
  2586. }
  2587. static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
  2588. {
  2589. u32 hotplug;
  2590. /*
  2591. * Enable digital hotplug on the CPU, and configure the DP short pulse
  2592. * duration to 2ms (which is the minimum in the Display Port spec)
  2593. * The pulse duration bits are reserved on HSW+.
  2594. */
  2595. hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
  2596. hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
  2597. hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
  2598. DIGITAL_PORTA_PULSE_DURATION_2ms;
  2599. I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
  2600. }
  2601. static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
  2602. {
  2603. u32 hotplug_irqs, enabled_irqs;
  2604. if (INTEL_GEN(dev_priv) >= 8) {
  2605. hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
  2606. enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
  2607. bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
  2608. } else if (INTEL_GEN(dev_priv) >= 7) {
  2609. hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
  2610. enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
  2611. ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
  2612. } else {
  2613. hotplug_irqs = DE_DP_A_HOTPLUG;
  2614. enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
  2615. ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
  2616. }
  2617. ilk_hpd_detection_setup(dev_priv);
  2618. ibx_hpd_irq_setup(dev_priv);
  2619. }
  2620. static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
  2621. u32 enabled_irqs)
  2622. {
  2623. u32 hotplug;
  2624. hotplug = I915_READ(PCH_PORT_HOTPLUG);
  2625. hotplug |= PORTA_HOTPLUG_ENABLE |
  2626. PORTB_HOTPLUG_ENABLE |
  2627. PORTC_HOTPLUG_ENABLE;
  2628. DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
  2629. hotplug, enabled_irqs);
  2630. hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
  2631. /*
  2632. * For BXT invert bit has to be set based on AOB design
  2633. * for HPD detection logic, update it based on VBT fields.
  2634. */
  2635. if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
  2636. intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
  2637. hotplug |= BXT_DDIA_HPD_INVERT;
  2638. if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
  2639. intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
  2640. hotplug |= BXT_DDIB_HPD_INVERT;
  2641. if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
  2642. intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
  2643. hotplug |= BXT_DDIC_HPD_INVERT;
  2644. I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
  2645. }
  2646. static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
  2647. {
  2648. __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
  2649. }
  2650. static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
  2651. {
  2652. u32 hotplug_irqs, enabled_irqs;
  2653. enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
  2654. hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
  2655. bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
  2656. __bxt_hpd_detection_setup(dev_priv, enabled_irqs);
  2657. }
  2658. static void ibx_irq_postinstall(struct drm_device *dev)
  2659. {
  2660. struct drm_i915_private *dev_priv = to_i915(dev);
  2661. u32 mask;
  2662. if (HAS_PCH_NOP(dev_priv))
  2663. return;
  2664. if (HAS_PCH_IBX(dev_priv))
  2665. mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
  2666. else
  2667. mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
  2668. gen5_assert_iir_is_zero(dev_priv, SDEIIR);
  2669. I915_WRITE(SDEIMR, ~mask);
  2670. if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
  2671. HAS_PCH_LPT(dev_priv))
  2672. ibx_hpd_detection_setup(dev_priv);
  2673. else
  2674. spt_hpd_detection_setup(dev_priv);
  2675. }
  2676. static void gen5_gt_irq_postinstall(struct drm_device *dev)
  2677. {
  2678. struct drm_i915_private *dev_priv = to_i915(dev);
  2679. u32 pm_irqs, gt_irqs;
  2680. pm_irqs = gt_irqs = 0;
  2681. dev_priv->gt_irq_mask = ~0;
  2682. if (HAS_L3_DPF(dev_priv)) {
  2683. /* L3 parity interrupt is always unmasked. */
  2684. dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
  2685. gt_irqs |= GT_PARITY_ERROR(dev_priv);
  2686. }
  2687. gt_irqs |= GT_RENDER_USER_INTERRUPT;
  2688. if (IS_GEN5(dev_priv)) {
  2689. gt_irqs |= ILK_BSD_USER_INTERRUPT;
  2690. } else {
  2691. gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
  2692. }
  2693. GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
  2694. if (INTEL_GEN(dev_priv) >= 6) {
  2695. /*
  2696. * RPS interrupts will get enabled/disabled on demand when RPS
  2697. * itself is enabled/disabled.
  2698. */
  2699. if (HAS_VEBOX(dev_priv)) {
  2700. pm_irqs |= PM_VEBOX_USER_INTERRUPT;
  2701. dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
  2702. }
  2703. dev_priv->pm_imr = 0xffffffff;
  2704. GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
  2705. }
  2706. }
  2707. static int ironlake_irq_postinstall(struct drm_device *dev)
  2708. {
  2709. struct drm_i915_private *dev_priv = to_i915(dev);
  2710. u32 display_mask, extra_mask;
  2711. if (INTEL_GEN(dev_priv) >= 7) {
  2712. display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
  2713. DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
  2714. DE_PLANEB_FLIP_DONE_IVB |
  2715. DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
  2716. extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
  2717. DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
  2718. DE_DP_A_HOTPLUG_IVB);
  2719. } else {
  2720. display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
  2721. DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
  2722. DE_AUX_CHANNEL_A |
  2723. DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
  2724. DE_POISON);
  2725. extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
  2726. DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
  2727. DE_DP_A_HOTPLUG);
  2728. }
  2729. dev_priv->irq_mask = ~display_mask;
  2730. I915_WRITE(HWSTAM, 0xeffe);
  2731. ibx_irq_pre_postinstall(dev);
  2732. GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
  2733. gen5_gt_irq_postinstall(dev);
  2734. ilk_hpd_detection_setup(dev_priv);
  2735. ibx_irq_postinstall(dev);
  2736. if (IS_IRONLAKE_M(dev_priv)) {
  2737. /* Enable PCU event interrupts
  2738. *
  2739. * spinlocking not required here for correctness since interrupt
  2740. * setup is guaranteed to run in single-threaded context. But we
  2741. * need it to make the assert_spin_locked happy. */
  2742. spin_lock_irq(&dev_priv->irq_lock);
  2743. ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
  2744. spin_unlock_irq(&dev_priv->irq_lock);
  2745. }
  2746. return 0;
  2747. }
  2748. void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
  2749. {
  2750. lockdep_assert_held(&dev_priv->irq_lock);
  2751. if (dev_priv->display_irqs_enabled)
  2752. return;
  2753. dev_priv->display_irqs_enabled = true;
  2754. if (intel_irqs_enabled(dev_priv)) {
  2755. vlv_display_irq_reset(dev_priv);
  2756. vlv_display_irq_postinstall(dev_priv);
  2757. }
  2758. }
  2759. void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
  2760. {
  2761. lockdep_assert_held(&dev_priv->irq_lock);
  2762. if (!dev_priv->display_irqs_enabled)
  2763. return;
  2764. dev_priv->display_irqs_enabled = false;
  2765. if (intel_irqs_enabled(dev_priv))
  2766. vlv_display_irq_reset(dev_priv);
  2767. }
  2768. static int valleyview_irq_postinstall(struct drm_device *dev)
  2769. {
  2770. struct drm_i915_private *dev_priv = to_i915(dev);
  2771. gen5_gt_irq_postinstall(dev);
  2772. spin_lock_irq(&dev_priv->irq_lock);
  2773. if (dev_priv->display_irqs_enabled)
  2774. vlv_display_irq_postinstall(dev_priv);
  2775. spin_unlock_irq(&dev_priv->irq_lock);
  2776. I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
  2777. POSTING_READ(VLV_MASTER_IER);
  2778. return 0;
  2779. }
  2780. static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
  2781. {
  2782. /* These are interrupts we'll toggle with the ring mask register */
  2783. uint32_t gt_interrupts[] = {
  2784. GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
  2785. GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
  2786. GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
  2787. GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
  2788. GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
  2789. GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
  2790. GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
  2791. GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
  2792. 0,
  2793. GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
  2794. GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
  2795. };
  2796. if (HAS_L3_DPF(dev_priv))
  2797. gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
  2798. dev_priv->pm_ier = 0x0;
  2799. dev_priv->pm_imr = ~dev_priv->pm_ier;
  2800. GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
  2801. GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
  2802. /*
  2803. * RPS interrupts will get enabled/disabled on demand when RPS itself
  2804. * is enabled/disabled. Same wil be the case for GuC interrupts.
  2805. */
  2806. GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
  2807. GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
  2808. }
  2809. static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
  2810. {
  2811. uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
  2812. uint32_t de_pipe_enables;
  2813. u32 de_port_masked = GEN8_AUX_CHANNEL_A;
  2814. u32 de_port_enables;
  2815. u32 de_misc_masked = GEN8_DE_MISC_GSE;
  2816. enum pipe pipe;
  2817. if (INTEL_INFO(dev_priv)->gen >= 9) {
  2818. de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
  2819. GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
  2820. de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
  2821. GEN9_AUX_CHANNEL_D;
  2822. if (IS_GEN9_LP(dev_priv))
  2823. de_port_masked |= BXT_DE_PORT_GMBUS;
  2824. } else {
  2825. de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
  2826. GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
  2827. }
  2828. de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
  2829. GEN8_PIPE_FIFO_UNDERRUN;
  2830. de_port_enables = de_port_masked;
  2831. if (IS_GEN9_LP(dev_priv))
  2832. de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
  2833. else if (IS_BROADWELL(dev_priv))
  2834. de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
  2835. dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
  2836. dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
  2837. dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
  2838. for_each_pipe(dev_priv, pipe)
  2839. if (intel_display_power_is_enabled(dev_priv,
  2840. POWER_DOMAIN_PIPE(pipe)))
  2841. GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
  2842. dev_priv->de_irq_mask[pipe],
  2843. de_pipe_enables);
  2844. GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
  2845. GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
  2846. if (IS_GEN9_LP(dev_priv))
  2847. bxt_hpd_detection_setup(dev_priv);
  2848. else if (IS_BROADWELL(dev_priv))
  2849. ilk_hpd_detection_setup(dev_priv);
  2850. }
  2851. static int gen8_irq_postinstall(struct drm_device *dev)
  2852. {
  2853. struct drm_i915_private *dev_priv = to_i915(dev);
  2854. if (HAS_PCH_SPLIT(dev_priv))
  2855. ibx_irq_pre_postinstall(dev);
  2856. gen8_gt_irq_postinstall(dev_priv);
  2857. gen8_de_irq_postinstall(dev_priv);
  2858. if (HAS_PCH_SPLIT(dev_priv))
  2859. ibx_irq_postinstall(dev);
  2860. I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
  2861. POSTING_READ(GEN8_MASTER_IRQ);
  2862. return 0;
  2863. }
  2864. static int cherryview_irq_postinstall(struct drm_device *dev)
  2865. {
  2866. struct drm_i915_private *dev_priv = to_i915(dev);
  2867. gen8_gt_irq_postinstall(dev_priv);
  2868. spin_lock_irq(&dev_priv->irq_lock);
  2869. if (dev_priv->display_irqs_enabled)
  2870. vlv_display_irq_postinstall(dev_priv);
  2871. spin_unlock_irq(&dev_priv->irq_lock);
  2872. I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
  2873. POSTING_READ(GEN8_MASTER_IRQ);
  2874. return 0;
  2875. }
  2876. static void gen8_irq_uninstall(struct drm_device *dev)
  2877. {
  2878. struct drm_i915_private *dev_priv = to_i915(dev);
  2879. if (!dev_priv)
  2880. return;
  2881. gen8_irq_reset(dev);
  2882. }
  2883. static void valleyview_irq_uninstall(struct drm_device *dev)
  2884. {
  2885. struct drm_i915_private *dev_priv = to_i915(dev);
  2886. if (!dev_priv)
  2887. return;
  2888. I915_WRITE(VLV_MASTER_IER, 0);
  2889. POSTING_READ(VLV_MASTER_IER);
  2890. gen5_gt_irq_reset(dev_priv);
  2891. I915_WRITE(HWSTAM, 0xffffffff);
  2892. spin_lock_irq(&dev_priv->irq_lock);
  2893. if (dev_priv->display_irqs_enabled)
  2894. vlv_display_irq_reset(dev_priv);
  2895. spin_unlock_irq(&dev_priv->irq_lock);
  2896. }
  2897. static void cherryview_irq_uninstall(struct drm_device *dev)
  2898. {
  2899. struct drm_i915_private *dev_priv = to_i915(dev);
  2900. if (!dev_priv)
  2901. return;
  2902. I915_WRITE(GEN8_MASTER_IRQ, 0);
  2903. POSTING_READ(GEN8_MASTER_IRQ);
  2904. gen8_gt_irq_reset(dev_priv);
  2905. GEN5_IRQ_RESET(GEN8_PCU_);
  2906. spin_lock_irq(&dev_priv->irq_lock);
  2907. if (dev_priv->display_irqs_enabled)
  2908. vlv_display_irq_reset(dev_priv);
  2909. spin_unlock_irq(&dev_priv->irq_lock);
  2910. }
  2911. static void ironlake_irq_uninstall(struct drm_device *dev)
  2912. {
  2913. struct drm_i915_private *dev_priv = to_i915(dev);
  2914. if (!dev_priv)
  2915. return;
  2916. ironlake_irq_reset(dev);
  2917. }
  2918. static void i8xx_irq_preinstall(struct drm_device * dev)
  2919. {
  2920. struct drm_i915_private *dev_priv = to_i915(dev);
  2921. int pipe;
  2922. for_each_pipe(dev_priv, pipe)
  2923. I915_WRITE(PIPESTAT(pipe), 0);
  2924. I915_WRITE16(IMR, 0xffff);
  2925. I915_WRITE16(IER, 0x0);
  2926. POSTING_READ16(IER);
  2927. }
  2928. static int i8xx_irq_postinstall(struct drm_device *dev)
  2929. {
  2930. struct drm_i915_private *dev_priv = to_i915(dev);
  2931. I915_WRITE16(EMR,
  2932. ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  2933. /* Unmask the interrupts that we always want on. */
  2934. dev_priv->irq_mask =
  2935. ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2936. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2937. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2938. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
  2939. I915_WRITE16(IMR, dev_priv->irq_mask);
  2940. I915_WRITE16(IER,
  2941. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  2942. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  2943. I915_USER_INTERRUPT);
  2944. POSTING_READ16(IER);
  2945. /* Interrupt setup is already guaranteed to be single-threaded, this is
  2946. * just to make the assert_spin_locked check happy. */
  2947. spin_lock_irq(&dev_priv->irq_lock);
  2948. i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
  2949. i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
  2950. spin_unlock_irq(&dev_priv->irq_lock);
  2951. return 0;
  2952. }
  2953. /*
  2954. * Returns true when a page flip has completed.
  2955. */
  2956. static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
  2957. int plane, int pipe, u32 iir)
  2958. {
  2959. u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
  2960. if (!intel_pipe_handle_vblank(dev_priv, pipe))
  2961. return false;
  2962. if ((iir & flip_pending) == 0)
  2963. goto check_page_flip;
  2964. /* We detect FlipDone by looking for the change in PendingFlip from '1'
  2965. * to '0' on the following vblank, i.e. IIR has the Pendingflip
  2966. * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
  2967. * the flip is completed (no longer pending). Since this doesn't raise
  2968. * an interrupt per se, we watch for the change at vblank.
  2969. */
  2970. if (I915_READ16(ISR) & flip_pending)
  2971. goto check_page_flip;
  2972. intel_finish_page_flip_cs(dev_priv, pipe);
  2973. return true;
  2974. check_page_flip:
  2975. intel_check_page_flip(dev_priv, pipe);
  2976. return false;
  2977. }
  2978. static irqreturn_t i8xx_irq_handler(int irq, void *arg)
  2979. {
  2980. struct drm_device *dev = arg;
  2981. struct drm_i915_private *dev_priv = to_i915(dev);
  2982. u16 iir, new_iir;
  2983. u32 pipe_stats[2];
  2984. int pipe;
  2985. u16 flip_mask =
  2986. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  2987. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  2988. irqreturn_t ret;
  2989. if (!intel_irqs_enabled(dev_priv))
  2990. return IRQ_NONE;
  2991. /* IRQs are synced during runtime_suspend, we don't require a wakeref */
  2992. disable_rpm_wakeref_asserts(dev_priv);
  2993. ret = IRQ_NONE;
  2994. iir = I915_READ16(IIR);
  2995. if (iir == 0)
  2996. goto out;
  2997. while (iir & ~flip_mask) {
  2998. /* Can't rely on pipestat interrupt bit in iir as it might
  2999. * have been cleared after the pipestat interrupt was received.
  3000. * It doesn't set the bit in iir again, but it still produces
  3001. * interrupts (for non-MSI).
  3002. */
  3003. spin_lock(&dev_priv->irq_lock);
  3004. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  3005. DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
  3006. for_each_pipe(dev_priv, pipe) {
  3007. i915_reg_t reg = PIPESTAT(pipe);
  3008. pipe_stats[pipe] = I915_READ(reg);
  3009. /*
  3010. * Clear the PIPE*STAT regs before the IIR
  3011. */
  3012. if (pipe_stats[pipe] & 0x8000ffff)
  3013. I915_WRITE(reg, pipe_stats[pipe]);
  3014. }
  3015. spin_unlock(&dev_priv->irq_lock);
  3016. I915_WRITE16(IIR, iir & ~flip_mask);
  3017. new_iir = I915_READ16(IIR); /* Flush posted writes */
  3018. if (iir & I915_USER_INTERRUPT)
  3019. notify_ring(dev_priv->engine[RCS]);
  3020. for_each_pipe(dev_priv, pipe) {
  3021. int plane = pipe;
  3022. if (HAS_FBC(dev_priv))
  3023. plane = !plane;
  3024. if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
  3025. i8xx_handle_vblank(dev_priv, plane, pipe, iir))
  3026. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
  3027. if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
  3028. i9xx_pipe_crc_irq_handler(dev_priv, pipe);
  3029. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  3030. intel_cpu_fifo_underrun_irq_handler(dev_priv,
  3031. pipe);
  3032. }
  3033. iir = new_iir;
  3034. }
  3035. ret = IRQ_HANDLED;
  3036. out:
  3037. enable_rpm_wakeref_asserts(dev_priv);
  3038. return ret;
  3039. }
  3040. static void i8xx_irq_uninstall(struct drm_device * dev)
  3041. {
  3042. struct drm_i915_private *dev_priv = to_i915(dev);
  3043. int pipe;
  3044. for_each_pipe(dev_priv, pipe) {
  3045. /* Clear enable bits; then clear status bits */
  3046. I915_WRITE(PIPESTAT(pipe), 0);
  3047. I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  3048. }
  3049. I915_WRITE16(IMR, 0xffff);
  3050. I915_WRITE16(IER, 0x0);
  3051. I915_WRITE16(IIR, I915_READ16(IIR));
  3052. }
  3053. static void i915_irq_preinstall(struct drm_device * dev)
  3054. {
  3055. struct drm_i915_private *dev_priv = to_i915(dev);
  3056. int pipe;
  3057. if (I915_HAS_HOTPLUG(dev_priv)) {
  3058. i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
  3059. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  3060. }
  3061. I915_WRITE16(HWSTAM, 0xeffe);
  3062. for_each_pipe(dev_priv, pipe)
  3063. I915_WRITE(PIPESTAT(pipe), 0);
  3064. I915_WRITE(IMR, 0xffffffff);
  3065. I915_WRITE(IER, 0x0);
  3066. POSTING_READ(IER);
  3067. }
  3068. static int i915_irq_postinstall(struct drm_device *dev)
  3069. {
  3070. struct drm_i915_private *dev_priv = to_i915(dev);
  3071. u32 enable_mask;
  3072. I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  3073. /* Unmask the interrupts that we always want on. */
  3074. dev_priv->irq_mask =
  3075. ~(I915_ASLE_INTERRUPT |
  3076. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3077. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  3078. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  3079. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
  3080. enable_mask =
  3081. I915_ASLE_INTERRUPT |
  3082. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3083. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  3084. I915_USER_INTERRUPT;
  3085. if (I915_HAS_HOTPLUG(dev_priv)) {
  3086. i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
  3087. POSTING_READ(PORT_HOTPLUG_EN);
  3088. /* Enable in IER... */
  3089. enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
  3090. /* and unmask in IMR */
  3091. dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
  3092. }
  3093. I915_WRITE(IMR, dev_priv->irq_mask);
  3094. I915_WRITE(IER, enable_mask);
  3095. POSTING_READ(IER);
  3096. i915_enable_asle_pipestat(dev_priv);
  3097. /* Interrupt setup is already guaranteed to be single-threaded, this is
  3098. * just to make the assert_spin_locked check happy. */
  3099. spin_lock_irq(&dev_priv->irq_lock);
  3100. i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
  3101. i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
  3102. spin_unlock_irq(&dev_priv->irq_lock);
  3103. return 0;
  3104. }
  3105. /*
  3106. * Returns true when a page flip has completed.
  3107. */
  3108. static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
  3109. int plane, int pipe, u32 iir)
  3110. {
  3111. u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
  3112. if (!intel_pipe_handle_vblank(dev_priv, pipe))
  3113. return false;
  3114. if ((iir & flip_pending) == 0)
  3115. goto check_page_flip;
  3116. /* We detect FlipDone by looking for the change in PendingFlip from '1'
  3117. * to '0' on the following vblank, i.e. IIR has the Pendingflip
  3118. * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
  3119. * the flip is completed (no longer pending). Since this doesn't raise
  3120. * an interrupt per se, we watch for the change at vblank.
  3121. */
  3122. if (I915_READ(ISR) & flip_pending)
  3123. goto check_page_flip;
  3124. intel_finish_page_flip_cs(dev_priv, pipe);
  3125. return true;
  3126. check_page_flip:
  3127. intel_check_page_flip(dev_priv, pipe);
  3128. return false;
  3129. }
  3130. static irqreturn_t i915_irq_handler(int irq, void *arg)
  3131. {
  3132. struct drm_device *dev = arg;
  3133. struct drm_i915_private *dev_priv = to_i915(dev);
  3134. u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
  3135. u32 flip_mask =
  3136. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  3137. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  3138. int pipe, ret = IRQ_NONE;
  3139. if (!intel_irqs_enabled(dev_priv))
  3140. return IRQ_NONE;
  3141. /* IRQs are synced during runtime_suspend, we don't require a wakeref */
  3142. disable_rpm_wakeref_asserts(dev_priv);
  3143. iir = I915_READ(IIR);
  3144. do {
  3145. bool irq_received = (iir & ~flip_mask) != 0;
  3146. bool blc_event = false;
  3147. /* Can't rely on pipestat interrupt bit in iir as it might
  3148. * have been cleared after the pipestat interrupt was received.
  3149. * It doesn't set the bit in iir again, but it still produces
  3150. * interrupts (for non-MSI).
  3151. */
  3152. spin_lock(&dev_priv->irq_lock);
  3153. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  3154. DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
  3155. for_each_pipe(dev_priv, pipe) {
  3156. i915_reg_t reg = PIPESTAT(pipe);
  3157. pipe_stats[pipe] = I915_READ(reg);
  3158. /* Clear the PIPE*STAT regs before the IIR */
  3159. if (pipe_stats[pipe] & 0x8000ffff) {
  3160. I915_WRITE(reg, pipe_stats[pipe]);
  3161. irq_received = true;
  3162. }
  3163. }
  3164. spin_unlock(&dev_priv->irq_lock);
  3165. if (!irq_received)
  3166. break;
  3167. /* Consume port. Then clear IIR or we'll miss events */
  3168. if (I915_HAS_HOTPLUG(dev_priv) &&
  3169. iir & I915_DISPLAY_PORT_INTERRUPT) {
  3170. u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
  3171. if (hotplug_status)
  3172. i9xx_hpd_irq_handler(dev_priv, hotplug_status);
  3173. }
  3174. I915_WRITE(IIR, iir & ~flip_mask);
  3175. new_iir = I915_READ(IIR); /* Flush posted writes */
  3176. if (iir & I915_USER_INTERRUPT)
  3177. notify_ring(dev_priv->engine[RCS]);
  3178. for_each_pipe(dev_priv, pipe) {
  3179. int plane = pipe;
  3180. if (HAS_FBC(dev_priv))
  3181. plane = !plane;
  3182. if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
  3183. i915_handle_vblank(dev_priv, plane, pipe, iir))
  3184. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
  3185. if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  3186. blc_event = true;
  3187. if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
  3188. i9xx_pipe_crc_irq_handler(dev_priv, pipe);
  3189. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  3190. intel_cpu_fifo_underrun_irq_handler(dev_priv,
  3191. pipe);
  3192. }
  3193. if (blc_event || (iir & I915_ASLE_INTERRUPT))
  3194. intel_opregion_asle_intr(dev_priv);
  3195. /* With MSI, interrupts are only generated when iir
  3196. * transitions from zero to nonzero. If another bit got
  3197. * set while we were handling the existing iir bits, then
  3198. * we would never get another interrupt.
  3199. *
  3200. * This is fine on non-MSI as well, as if we hit this path
  3201. * we avoid exiting the interrupt handler only to generate
  3202. * another one.
  3203. *
  3204. * Note that for MSI this could cause a stray interrupt report
  3205. * if an interrupt landed in the time between writing IIR and
  3206. * the posting read. This should be rare enough to never
  3207. * trigger the 99% of 100,000 interrupts test for disabling
  3208. * stray interrupts.
  3209. */
  3210. ret = IRQ_HANDLED;
  3211. iir = new_iir;
  3212. } while (iir & ~flip_mask);
  3213. enable_rpm_wakeref_asserts(dev_priv);
  3214. return ret;
  3215. }
  3216. static void i915_irq_uninstall(struct drm_device * dev)
  3217. {
  3218. struct drm_i915_private *dev_priv = to_i915(dev);
  3219. int pipe;
  3220. if (I915_HAS_HOTPLUG(dev_priv)) {
  3221. i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
  3222. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  3223. }
  3224. I915_WRITE16(HWSTAM, 0xffff);
  3225. for_each_pipe(dev_priv, pipe) {
  3226. /* Clear enable bits; then clear status bits */
  3227. I915_WRITE(PIPESTAT(pipe), 0);
  3228. I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  3229. }
  3230. I915_WRITE(IMR, 0xffffffff);
  3231. I915_WRITE(IER, 0x0);
  3232. I915_WRITE(IIR, I915_READ(IIR));
  3233. }
  3234. static void i965_irq_preinstall(struct drm_device * dev)
  3235. {
  3236. struct drm_i915_private *dev_priv = to_i915(dev);
  3237. int pipe;
  3238. i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
  3239. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  3240. I915_WRITE(HWSTAM, 0xeffe);
  3241. for_each_pipe(dev_priv, pipe)
  3242. I915_WRITE(PIPESTAT(pipe), 0);
  3243. I915_WRITE(IMR, 0xffffffff);
  3244. I915_WRITE(IER, 0x0);
  3245. POSTING_READ(IER);
  3246. }
  3247. static int i965_irq_postinstall(struct drm_device *dev)
  3248. {
  3249. struct drm_i915_private *dev_priv = to_i915(dev);
  3250. u32 enable_mask;
  3251. u32 error_mask;
  3252. /* Unmask the interrupts that we always want on. */
  3253. dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
  3254. I915_DISPLAY_PORT_INTERRUPT |
  3255. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3256. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  3257. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  3258. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  3259. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  3260. enable_mask = ~dev_priv->irq_mask;
  3261. enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  3262. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
  3263. enable_mask |= I915_USER_INTERRUPT;
  3264. if (IS_G4X(dev_priv))
  3265. enable_mask |= I915_BSD_USER_INTERRUPT;
  3266. /* Interrupt setup is already guaranteed to be single-threaded, this is
  3267. * just to make the assert_spin_locked check happy. */
  3268. spin_lock_irq(&dev_priv->irq_lock);
  3269. i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
  3270. i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
  3271. i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
  3272. spin_unlock_irq(&dev_priv->irq_lock);
  3273. /*
  3274. * Enable some error detection, note the instruction error mask
  3275. * bit is reserved, so we leave it masked.
  3276. */
  3277. if (IS_G4X(dev_priv)) {
  3278. error_mask = ~(GM45_ERROR_PAGE_TABLE |
  3279. GM45_ERROR_MEM_PRIV |
  3280. GM45_ERROR_CP_PRIV |
  3281. I915_ERROR_MEMORY_REFRESH);
  3282. } else {
  3283. error_mask = ~(I915_ERROR_PAGE_TABLE |
  3284. I915_ERROR_MEMORY_REFRESH);
  3285. }
  3286. I915_WRITE(EMR, error_mask);
  3287. I915_WRITE(IMR, dev_priv->irq_mask);
  3288. I915_WRITE(IER, enable_mask);
  3289. POSTING_READ(IER);
  3290. i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
  3291. POSTING_READ(PORT_HOTPLUG_EN);
  3292. i915_enable_asle_pipestat(dev_priv);
  3293. return 0;
  3294. }
  3295. static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
  3296. {
  3297. u32 hotplug_en;
  3298. lockdep_assert_held(&dev_priv->irq_lock);
  3299. /* Note HDMI and DP share hotplug bits */
  3300. /* enable bits are the same for all generations */
  3301. hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
  3302. /* Programming the CRT detection parameters tends
  3303. to generate a spurious hotplug event about three
  3304. seconds later. So just do it once.
  3305. */
  3306. if (IS_G4X(dev_priv))
  3307. hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
  3308. hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
  3309. /* Ignore TV since it's buggy */
  3310. i915_hotplug_interrupt_update_locked(dev_priv,
  3311. HOTPLUG_INT_EN_MASK |
  3312. CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
  3313. CRT_HOTPLUG_ACTIVATION_PERIOD_64,
  3314. hotplug_en);
  3315. }
  3316. static irqreturn_t i965_irq_handler(int irq, void *arg)
  3317. {
  3318. struct drm_device *dev = arg;
  3319. struct drm_i915_private *dev_priv = to_i915(dev);
  3320. u32 iir, new_iir;
  3321. u32 pipe_stats[I915_MAX_PIPES];
  3322. int ret = IRQ_NONE, pipe;
  3323. u32 flip_mask =
  3324. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  3325. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  3326. if (!intel_irqs_enabled(dev_priv))
  3327. return IRQ_NONE;
  3328. /* IRQs are synced during runtime_suspend, we don't require a wakeref */
  3329. disable_rpm_wakeref_asserts(dev_priv);
  3330. iir = I915_READ(IIR);
  3331. for (;;) {
  3332. bool irq_received = (iir & ~flip_mask) != 0;
  3333. bool blc_event = false;
  3334. /* Can't rely on pipestat interrupt bit in iir as it might
  3335. * have been cleared after the pipestat interrupt was received.
  3336. * It doesn't set the bit in iir again, but it still produces
  3337. * interrupts (for non-MSI).
  3338. */
  3339. spin_lock(&dev_priv->irq_lock);
  3340. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  3341. DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
  3342. for_each_pipe(dev_priv, pipe) {
  3343. i915_reg_t reg = PIPESTAT(pipe);
  3344. pipe_stats[pipe] = I915_READ(reg);
  3345. /*
  3346. * Clear the PIPE*STAT regs before the IIR
  3347. */
  3348. if (pipe_stats[pipe] & 0x8000ffff) {
  3349. I915_WRITE(reg, pipe_stats[pipe]);
  3350. irq_received = true;
  3351. }
  3352. }
  3353. spin_unlock(&dev_priv->irq_lock);
  3354. if (!irq_received)
  3355. break;
  3356. ret = IRQ_HANDLED;
  3357. /* Consume port. Then clear IIR or we'll miss events */
  3358. if (iir & I915_DISPLAY_PORT_INTERRUPT) {
  3359. u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
  3360. if (hotplug_status)
  3361. i9xx_hpd_irq_handler(dev_priv, hotplug_status);
  3362. }
  3363. I915_WRITE(IIR, iir & ~flip_mask);
  3364. new_iir = I915_READ(IIR); /* Flush posted writes */
  3365. if (iir & I915_USER_INTERRUPT)
  3366. notify_ring(dev_priv->engine[RCS]);
  3367. if (iir & I915_BSD_USER_INTERRUPT)
  3368. notify_ring(dev_priv->engine[VCS]);
  3369. for_each_pipe(dev_priv, pipe) {
  3370. if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
  3371. i915_handle_vblank(dev_priv, pipe, pipe, iir))
  3372. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
  3373. if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  3374. blc_event = true;
  3375. if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
  3376. i9xx_pipe_crc_irq_handler(dev_priv, pipe);
  3377. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
  3378. intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
  3379. }
  3380. if (blc_event || (iir & I915_ASLE_INTERRUPT))
  3381. intel_opregion_asle_intr(dev_priv);
  3382. if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
  3383. gmbus_irq_handler(dev_priv);
  3384. /* With MSI, interrupts are only generated when iir
  3385. * transitions from zero to nonzero. If another bit got
  3386. * set while we were handling the existing iir bits, then
  3387. * we would never get another interrupt.
  3388. *
  3389. * This is fine on non-MSI as well, as if we hit this path
  3390. * we avoid exiting the interrupt handler only to generate
  3391. * another one.
  3392. *
  3393. * Note that for MSI this could cause a stray interrupt report
  3394. * if an interrupt landed in the time between writing IIR and
  3395. * the posting read. This should be rare enough to never
  3396. * trigger the 99% of 100,000 interrupts test for disabling
  3397. * stray interrupts.
  3398. */
  3399. iir = new_iir;
  3400. }
  3401. enable_rpm_wakeref_asserts(dev_priv);
  3402. return ret;
  3403. }
  3404. static void i965_irq_uninstall(struct drm_device * dev)
  3405. {
  3406. struct drm_i915_private *dev_priv = to_i915(dev);
  3407. int pipe;
  3408. if (!dev_priv)
  3409. return;
  3410. i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
  3411. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  3412. I915_WRITE(HWSTAM, 0xffffffff);
  3413. for_each_pipe(dev_priv, pipe)
  3414. I915_WRITE(PIPESTAT(pipe), 0);
  3415. I915_WRITE(IMR, 0xffffffff);
  3416. I915_WRITE(IER, 0x0);
  3417. for_each_pipe(dev_priv, pipe)
  3418. I915_WRITE(PIPESTAT(pipe),
  3419. I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
  3420. I915_WRITE(IIR, I915_READ(IIR));
  3421. }
  3422. /**
  3423. * intel_irq_init - initializes irq support
  3424. * @dev_priv: i915 device instance
  3425. *
  3426. * This function initializes all the irq support including work items, timers
  3427. * and all the vtables. It does not setup the interrupt itself though.
  3428. */
  3429. void intel_irq_init(struct drm_i915_private *dev_priv)
  3430. {
  3431. struct drm_device *dev = &dev_priv->drm;
  3432. intel_hpd_init_work(dev_priv);
  3433. INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
  3434. INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
  3435. if (HAS_GUC_SCHED(dev_priv))
  3436. dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
  3437. /* Let's track the enabled rps events */
  3438. if (IS_VALLEYVIEW(dev_priv))
  3439. /* WaGsvRC0ResidencyMethod:vlv */
  3440. dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
  3441. else
  3442. dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
  3443. dev_priv->rps.pm_intrmsk_mbz = 0;
  3444. /*
  3445. * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
  3446. * if GEN6_PM_UP_EI_EXPIRED is masked.
  3447. *
  3448. * TODO: verify if this can be reproduced on VLV,CHV.
  3449. */
  3450. if (INTEL_INFO(dev_priv)->gen <= 7)
  3451. dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
  3452. if (INTEL_INFO(dev_priv)->gen >= 8)
  3453. dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
  3454. if (IS_GEN2(dev_priv)) {
  3455. /* Gen2 doesn't have a hardware frame counter */
  3456. dev->max_vblank_count = 0;
  3457. } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
  3458. dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
  3459. dev->driver->get_vblank_counter = g4x_get_vblank_counter;
  3460. } else {
  3461. dev->driver->get_vblank_counter = i915_get_vblank_counter;
  3462. dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
  3463. }
  3464. /*
  3465. * Opt out of the vblank disable timer on everything except gen2.
  3466. * Gen2 doesn't have a hardware frame counter and so depends on
  3467. * vblank interrupts to produce sane vblank seuquence numbers.
  3468. */
  3469. if (!IS_GEN2(dev_priv))
  3470. dev->vblank_disable_immediate = true;
  3471. /* Most platforms treat the display irq block as an always-on
  3472. * power domain. vlv/chv can disable it at runtime and need
  3473. * special care to avoid writing any of the display block registers
  3474. * outside of the power domain. We defer setting up the display irqs
  3475. * in this case to the runtime pm.
  3476. */
  3477. dev_priv->display_irqs_enabled = true;
  3478. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  3479. dev_priv->display_irqs_enabled = false;
  3480. dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
  3481. dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
  3482. dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
  3483. if (IS_CHERRYVIEW(dev_priv)) {
  3484. dev->driver->irq_handler = cherryview_irq_handler;
  3485. dev->driver->irq_preinstall = cherryview_irq_preinstall;
  3486. dev->driver->irq_postinstall = cherryview_irq_postinstall;
  3487. dev->driver->irq_uninstall = cherryview_irq_uninstall;
  3488. dev->driver->enable_vblank = i965_enable_vblank;
  3489. dev->driver->disable_vblank = i965_disable_vblank;
  3490. dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  3491. } else if (IS_VALLEYVIEW(dev_priv)) {
  3492. dev->driver->irq_handler = valleyview_irq_handler;
  3493. dev->driver->irq_preinstall = valleyview_irq_preinstall;
  3494. dev->driver->irq_postinstall = valleyview_irq_postinstall;
  3495. dev->driver->irq_uninstall = valleyview_irq_uninstall;
  3496. dev->driver->enable_vblank = i965_enable_vblank;
  3497. dev->driver->disable_vblank = i965_disable_vblank;
  3498. dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  3499. } else if (INTEL_INFO(dev_priv)->gen >= 8) {
  3500. dev->driver->irq_handler = gen8_irq_handler;
  3501. dev->driver->irq_preinstall = gen8_irq_reset;
  3502. dev->driver->irq_postinstall = gen8_irq_postinstall;
  3503. dev->driver->irq_uninstall = gen8_irq_uninstall;
  3504. dev->driver->enable_vblank = gen8_enable_vblank;
  3505. dev->driver->disable_vblank = gen8_disable_vblank;
  3506. if (IS_GEN9_LP(dev_priv))
  3507. dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
  3508. else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
  3509. dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
  3510. else
  3511. dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
  3512. } else if (HAS_PCH_SPLIT(dev_priv)) {
  3513. dev->driver->irq_handler = ironlake_irq_handler;
  3514. dev->driver->irq_preinstall = ironlake_irq_reset;
  3515. dev->driver->irq_postinstall = ironlake_irq_postinstall;
  3516. dev->driver->irq_uninstall = ironlake_irq_uninstall;
  3517. dev->driver->enable_vblank = ironlake_enable_vblank;
  3518. dev->driver->disable_vblank = ironlake_disable_vblank;
  3519. dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
  3520. } else {
  3521. if (IS_GEN2(dev_priv)) {
  3522. dev->driver->irq_preinstall = i8xx_irq_preinstall;
  3523. dev->driver->irq_postinstall = i8xx_irq_postinstall;
  3524. dev->driver->irq_handler = i8xx_irq_handler;
  3525. dev->driver->irq_uninstall = i8xx_irq_uninstall;
  3526. dev->driver->enable_vblank = i8xx_enable_vblank;
  3527. dev->driver->disable_vblank = i8xx_disable_vblank;
  3528. } else if (IS_GEN3(dev_priv)) {
  3529. dev->driver->irq_preinstall = i915_irq_preinstall;
  3530. dev->driver->irq_postinstall = i915_irq_postinstall;
  3531. dev->driver->irq_uninstall = i915_irq_uninstall;
  3532. dev->driver->irq_handler = i915_irq_handler;
  3533. dev->driver->enable_vblank = i8xx_enable_vblank;
  3534. dev->driver->disable_vblank = i8xx_disable_vblank;
  3535. } else {
  3536. dev->driver->irq_preinstall = i965_irq_preinstall;
  3537. dev->driver->irq_postinstall = i965_irq_postinstall;
  3538. dev->driver->irq_uninstall = i965_irq_uninstall;
  3539. dev->driver->irq_handler = i965_irq_handler;
  3540. dev->driver->enable_vblank = i965_enable_vblank;
  3541. dev->driver->disable_vblank = i965_disable_vblank;
  3542. }
  3543. if (I915_HAS_HOTPLUG(dev_priv))
  3544. dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  3545. }
  3546. }
  3547. /**
  3548. * intel_irq_install - enables the hardware interrupt
  3549. * @dev_priv: i915 device instance
  3550. *
  3551. * This function enables the hardware interrupt handling, but leaves the hotplug
  3552. * handling still disabled. It is called after intel_irq_init().
  3553. *
  3554. * In the driver load and resume code we need working interrupts in a few places
  3555. * but don't want to deal with the hassle of concurrent probe and hotplug
  3556. * workers. Hence the split into this two-stage approach.
  3557. */
  3558. int intel_irq_install(struct drm_i915_private *dev_priv)
  3559. {
  3560. /*
  3561. * We enable some interrupt sources in our postinstall hooks, so mark
  3562. * interrupts as enabled _before_ actually enabling them to avoid
  3563. * special cases in our ordering checks.
  3564. */
  3565. dev_priv->pm.irqs_enabled = true;
  3566. return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
  3567. }
  3568. /**
  3569. * intel_irq_uninstall - finilizes all irq handling
  3570. * @dev_priv: i915 device instance
  3571. *
  3572. * This stops interrupt and hotplug handling and unregisters and frees all
  3573. * resources acquired in the init functions.
  3574. */
  3575. void intel_irq_uninstall(struct drm_i915_private *dev_priv)
  3576. {
  3577. drm_irq_uninstall(&dev_priv->drm);
  3578. intel_hpd_cancel_work(dev_priv);
  3579. dev_priv->pm.irqs_enabled = false;
  3580. }
  3581. /**
  3582. * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
  3583. * @dev_priv: i915 device instance
  3584. *
  3585. * This function is used to disable interrupts at runtime, both in the runtime
  3586. * pm and the system suspend/resume code.
  3587. */
  3588. void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
  3589. {
  3590. dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
  3591. dev_priv->pm.irqs_enabled = false;
  3592. synchronize_irq(dev_priv->drm.irq);
  3593. }
  3594. /**
  3595. * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
  3596. * @dev_priv: i915 device instance
  3597. *
  3598. * This function is used to enable interrupts at runtime, both in the runtime
  3599. * pm and the system suspend/resume code.
  3600. */
  3601. void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
  3602. {
  3603. dev_priv->pm.irqs_enabled = true;
  3604. dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
  3605. dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
  3606. }