i915_irq.c 135 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783
  1. /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
  2. */
  3. /*
  4. * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23. * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. */
  28. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  29. #include <linux/sysrq.h>
  30. #include <linux/slab.h>
  31. #include <linux/circ_buf.h>
  32. #include <drm/drmP.h>
  33. #include <drm/i915_drm.h>
  34. #include "i915_drv.h"
  35. #include "i915_trace.h"
  36. #include "intel_drv.h"
  37. static const u32 hpd_ibx[] = {
  38. [HPD_CRT] = SDE_CRT_HOTPLUG,
  39. [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
  40. [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
  41. [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
  42. [HPD_PORT_D] = SDE_PORTD_HOTPLUG
  43. };
  44. static const u32 hpd_cpt[] = {
  45. [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
  46. [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
  47. [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
  48. [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
  49. [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
  50. };
  51. static const u32 hpd_mask_i915[] = {
  52. [HPD_CRT] = CRT_HOTPLUG_INT_EN,
  53. [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
  54. [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
  55. [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
  56. [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
  57. [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
  58. };
  59. static const u32 hpd_status_g4x[] = {
  60. [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
  61. [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
  62. [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
  63. [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
  64. [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
  65. [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
  66. };
  67. static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
  68. [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
  69. [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
  70. [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
  71. [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
  72. [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
  73. [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
  74. };
  75. /* IIR can theoretically queue up two events. Be paranoid. */
  76. #define GEN8_IRQ_RESET_NDX(type, which) do { \
  77. I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
  78. POSTING_READ(GEN8_##type##_IMR(which)); \
  79. I915_WRITE(GEN8_##type##_IER(which), 0); \
  80. I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
  81. POSTING_READ(GEN8_##type##_IIR(which)); \
  82. I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
  83. POSTING_READ(GEN8_##type##_IIR(which)); \
  84. } while (0)
  85. #define GEN5_IRQ_RESET(type) do { \
  86. I915_WRITE(type##IMR, 0xffffffff); \
  87. POSTING_READ(type##IMR); \
  88. I915_WRITE(type##IER, 0); \
  89. I915_WRITE(type##IIR, 0xffffffff); \
  90. POSTING_READ(type##IIR); \
  91. I915_WRITE(type##IIR, 0xffffffff); \
  92. POSTING_READ(type##IIR); \
  93. } while (0)
  94. /*
  95. * We should clear IMR at preinstall/uninstall, and just check at postinstall.
  96. */
  97. #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
  98. u32 val = I915_READ(reg); \
  99. if (val) { \
  100. WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
  101. (reg), val); \
  102. I915_WRITE((reg), 0xffffffff); \
  103. POSTING_READ(reg); \
  104. I915_WRITE((reg), 0xffffffff); \
  105. POSTING_READ(reg); \
  106. } \
  107. } while (0)
  108. #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
  109. GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
  110. I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
  111. I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
  112. POSTING_READ(GEN8_##type##_IER(which)); \
  113. } while (0)
  114. #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
  115. GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
  116. I915_WRITE(type##IMR, (imr_val)); \
  117. I915_WRITE(type##IER, (ier_val)); \
  118. POSTING_READ(type##IER); \
  119. } while (0)
  120. /* For display hotplug interrupt */
  121. static void
  122. ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
  123. {
  124. assert_spin_locked(&dev_priv->irq_lock);
  125. if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  126. return;
  127. if ((dev_priv->irq_mask & mask) != 0) {
  128. dev_priv->irq_mask &= ~mask;
  129. I915_WRITE(DEIMR, dev_priv->irq_mask);
  130. POSTING_READ(DEIMR);
  131. }
  132. }
  133. static void
  134. ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
  135. {
  136. assert_spin_locked(&dev_priv->irq_lock);
  137. if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  138. return;
  139. if ((dev_priv->irq_mask & mask) != mask) {
  140. dev_priv->irq_mask |= mask;
  141. I915_WRITE(DEIMR, dev_priv->irq_mask);
  142. POSTING_READ(DEIMR);
  143. }
  144. }
  145. /**
  146. * ilk_update_gt_irq - update GTIMR
  147. * @dev_priv: driver private
  148. * @interrupt_mask: mask of interrupt bits to update
  149. * @enabled_irq_mask: mask of interrupt bits to enable
  150. */
  151. static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
  152. uint32_t interrupt_mask,
  153. uint32_t enabled_irq_mask)
  154. {
  155. assert_spin_locked(&dev_priv->irq_lock);
  156. if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  157. return;
  158. dev_priv->gt_irq_mask &= ~interrupt_mask;
  159. dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
  160. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  161. POSTING_READ(GTIMR);
  162. }
  163. void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  164. {
  165. ilk_update_gt_irq(dev_priv, mask, mask);
  166. }
  167. void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  168. {
  169. ilk_update_gt_irq(dev_priv, mask, 0);
  170. }
  171. /**
  172. * snb_update_pm_irq - update GEN6_PMIMR
  173. * @dev_priv: driver private
  174. * @interrupt_mask: mask of interrupt bits to update
  175. * @enabled_irq_mask: mask of interrupt bits to enable
  176. */
  177. static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
  178. uint32_t interrupt_mask,
  179. uint32_t enabled_irq_mask)
  180. {
  181. uint32_t new_val;
  182. assert_spin_locked(&dev_priv->irq_lock);
  183. if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  184. return;
  185. new_val = dev_priv->pm_irq_mask;
  186. new_val &= ~interrupt_mask;
  187. new_val |= (~enabled_irq_mask & interrupt_mask);
  188. if (new_val != dev_priv->pm_irq_mask) {
  189. dev_priv->pm_irq_mask = new_val;
  190. I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
  191. POSTING_READ(GEN6_PMIMR);
  192. }
  193. }
  194. void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  195. {
  196. snb_update_pm_irq(dev_priv, mask, mask);
  197. }
  198. void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  199. {
  200. snb_update_pm_irq(dev_priv, mask, 0);
  201. }
  202. static bool ivb_can_enable_err_int(struct drm_device *dev)
  203. {
  204. struct drm_i915_private *dev_priv = dev->dev_private;
  205. struct intel_crtc *crtc;
  206. enum pipe pipe;
  207. assert_spin_locked(&dev_priv->irq_lock);
  208. for_each_pipe(pipe) {
  209. crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
  210. if (crtc->cpu_fifo_underrun_disabled)
  211. return false;
  212. }
  213. return true;
  214. }
  215. /**
  216. * bdw_update_pm_irq - update GT interrupt 2
  217. * @dev_priv: driver private
  218. * @interrupt_mask: mask of interrupt bits to update
  219. * @enabled_irq_mask: mask of interrupt bits to enable
  220. *
  221. * Copied from the snb function, updated with relevant register offsets
  222. */
  223. static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
  224. uint32_t interrupt_mask,
  225. uint32_t enabled_irq_mask)
  226. {
  227. uint32_t new_val;
  228. assert_spin_locked(&dev_priv->irq_lock);
  229. if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  230. return;
  231. new_val = dev_priv->pm_irq_mask;
  232. new_val &= ~interrupt_mask;
  233. new_val |= (~enabled_irq_mask & interrupt_mask);
  234. if (new_val != dev_priv->pm_irq_mask) {
  235. dev_priv->pm_irq_mask = new_val;
  236. I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
  237. POSTING_READ(GEN8_GT_IMR(2));
  238. }
  239. }
  240. void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  241. {
  242. bdw_update_pm_irq(dev_priv, mask, mask);
  243. }
  244. void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
  245. {
  246. bdw_update_pm_irq(dev_priv, mask, 0);
  247. }
  248. static bool cpt_can_enable_serr_int(struct drm_device *dev)
  249. {
  250. struct drm_i915_private *dev_priv = dev->dev_private;
  251. enum pipe pipe;
  252. struct intel_crtc *crtc;
  253. assert_spin_locked(&dev_priv->irq_lock);
  254. for_each_pipe(pipe) {
  255. crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
  256. if (crtc->pch_fifo_underrun_disabled)
  257. return false;
  258. }
  259. return true;
  260. }
  261. void i9xx_check_fifo_underruns(struct drm_device *dev)
  262. {
  263. struct drm_i915_private *dev_priv = dev->dev_private;
  264. struct intel_crtc *crtc;
  265. unsigned long flags;
  266. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  267. for_each_intel_crtc(dev, crtc) {
  268. u32 reg = PIPESTAT(crtc->pipe);
  269. u32 pipestat;
  270. if (crtc->cpu_fifo_underrun_disabled)
  271. continue;
  272. pipestat = I915_READ(reg) & 0xffff0000;
  273. if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
  274. continue;
  275. I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
  276. POSTING_READ(reg);
  277. DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
  278. }
  279. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  280. }
  281. static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
  282. enum pipe pipe,
  283. bool enable, bool old)
  284. {
  285. struct drm_i915_private *dev_priv = dev->dev_private;
  286. u32 reg = PIPESTAT(pipe);
  287. u32 pipestat = I915_READ(reg) & 0xffff0000;
  288. assert_spin_locked(&dev_priv->irq_lock);
  289. if (enable) {
  290. I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
  291. POSTING_READ(reg);
  292. } else {
  293. if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
  294. DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
  295. }
  296. }
  297. static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
  298. enum pipe pipe, bool enable)
  299. {
  300. struct drm_i915_private *dev_priv = dev->dev_private;
  301. uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
  302. DE_PIPEB_FIFO_UNDERRUN;
  303. if (enable)
  304. ironlake_enable_display_irq(dev_priv, bit);
  305. else
  306. ironlake_disable_display_irq(dev_priv, bit);
  307. }
  308. static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
  309. enum pipe pipe,
  310. bool enable, bool old)
  311. {
  312. struct drm_i915_private *dev_priv = dev->dev_private;
  313. if (enable) {
  314. I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
  315. if (!ivb_can_enable_err_int(dev))
  316. return;
  317. ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
  318. } else {
  319. ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
  320. if (old &&
  321. I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
  322. DRM_ERROR("uncleared fifo underrun on pipe %c\n",
  323. pipe_name(pipe));
  324. }
  325. }
  326. }
  327. static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
  328. enum pipe pipe, bool enable)
  329. {
  330. struct drm_i915_private *dev_priv = dev->dev_private;
  331. assert_spin_locked(&dev_priv->irq_lock);
  332. if (enable)
  333. dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
  334. else
  335. dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
  336. I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
  337. POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
  338. }
  339. /**
  340. * ibx_display_interrupt_update - update SDEIMR
  341. * @dev_priv: driver private
  342. * @interrupt_mask: mask of interrupt bits to update
  343. * @enabled_irq_mask: mask of interrupt bits to enable
  344. */
  345. static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
  346. uint32_t interrupt_mask,
  347. uint32_t enabled_irq_mask)
  348. {
  349. uint32_t sdeimr = I915_READ(SDEIMR);
  350. sdeimr &= ~interrupt_mask;
  351. sdeimr |= (~enabled_irq_mask & interrupt_mask);
  352. assert_spin_locked(&dev_priv->irq_lock);
  353. if (WARN_ON(!intel_irqs_enabled(dev_priv)))
  354. return;
  355. I915_WRITE(SDEIMR, sdeimr);
  356. POSTING_READ(SDEIMR);
  357. }
  358. #define ibx_enable_display_interrupt(dev_priv, bits) \
  359. ibx_display_interrupt_update((dev_priv), (bits), (bits))
  360. #define ibx_disable_display_interrupt(dev_priv, bits) \
  361. ibx_display_interrupt_update((dev_priv), (bits), 0)
  362. static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
  363. enum transcoder pch_transcoder,
  364. bool enable)
  365. {
  366. struct drm_i915_private *dev_priv = dev->dev_private;
  367. uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
  368. SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
  369. if (enable)
  370. ibx_enable_display_interrupt(dev_priv, bit);
  371. else
  372. ibx_disable_display_interrupt(dev_priv, bit);
  373. }
  374. static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
  375. enum transcoder pch_transcoder,
  376. bool enable, bool old)
  377. {
  378. struct drm_i915_private *dev_priv = dev->dev_private;
  379. if (enable) {
  380. I915_WRITE(SERR_INT,
  381. SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
  382. if (!cpt_can_enable_serr_int(dev))
  383. return;
  384. ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
  385. } else {
  386. ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
  387. if (old && I915_READ(SERR_INT) &
  388. SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
  389. DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
  390. transcoder_name(pch_transcoder));
  391. }
  392. }
  393. }
  394. /**
  395. * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
  396. * @dev: drm device
  397. * @pipe: pipe
  398. * @enable: true if we want to report FIFO underrun errors, false otherwise
  399. *
  400. * This function makes us disable or enable CPU fifo underruns for a specific
  401. * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
  402. * reporting for one pipe may also disable all the other CPU error interruts for
  403. * the other pipes, due to the fact that there's just one interrupt mask/enable
  404. * bit for all the pipes.
  405. *
  406. * Returns the previous state of underrun reporting.
  407. */
  408. static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
  409. enum pipe pipe, bool enable)
  410. {
  411. struct drm_i915_private *dev_priv = dev->dev_private;
  412. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  413. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  414. bool old;
  415. assert_spin_locked(&dev_priv->irq_lock);
  416. old = !intel_crtc->cpu_fifo_underrun_disabled;
  417. intel_crtc->cpu_fifo_underrun_disabled = !enable;
  418. if (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
  419. i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
  420. else if (IS_GEN5(dev) || IS_GEN6(dev))
  421. ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
  422. else if (IS_GEN7(dev))
  423. ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
  424. else if (IS_GEN8(dev))
  425. broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
  426. return old;
  427. }
  428. bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
  429. enum pipe pipe, bool enable)
  430. {
  431. struct drm_i915_private *dev_priv = dev->dev_private;
  432. unsigned long flags;
  433. bool ret;
  434. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  435. ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
  436. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  437. return ret;
  438. }
  439. static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
  440. enum pipe pipe)
  441. {
  442. struct drm_i915_private *dev_priv = dev->dev_private;
  443. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  444. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  445. return !intel_crtc->cpu_fifo_underrun_disabled;
  446. }
  447. /**
  448. * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
  449. * @dev: drm device
  450. * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
  451. * @enable: true if we want to report FIFO underrun errors, false otherwise
  452. *
  453. * This function makes us disable or enable PCH fifo underruns for a specific
  454. * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
  455. * underrun reporting for one transcoder may also disable all the other PCH
  456. * error interruts for the other transcoders, due to the fact that there's just
  457. * one interrupt mask/enable bit for all the transcoders.
  458. *
  459. * Returns the previous state of underrun reporting.
  460. */
  461. bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
  462. enum transcoder pch_transcoder,
  463. bool enable)
  464. {
  465. struct drm_i915_private *dev_priv = dev->dev_private;
  466. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
  467. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  468. unsigned long flags;
  469. bool old;
  470. /*
  471. * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
  472. * has only one pch transcoder A that all pipes can use. To avoid racy
  473. * pch transcoder -> pipe lookups from interrupt code simply store the
  474. * underrun statistics in crtc A. Since we never expose this anywhere
  475. * nor use it outside of the fifo underrun code here using the "wrong"
  476. * crtc on LPT won't cause issues.
  477. */
  478. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  479. old = !intel_crtc->pch_fifo_underrun_disabled;
  480. intel_crtc->pch_fifo_underrun_disabled = !enable;
  481. if (HAS_PCH_IBX(dev))
  482. ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
  483. else
  484. cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old);
  485. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  486. return old;
  487. }
  488. static void
  489. __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
  490. u32 enable_mask, u32 status_mask)
  491. {
  492. u32 reg = PIPESTAT(pipe);
  493. u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
  494. assert_spin_locked(&dev_priv->irq_lock);
  495. if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
  496. status_mask & ~PIPESTAT_INT_STATUS_MASK,
  497. "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
  498. pipe_name(pipe), enable_mask, status_mask))
  499. return;
  500. if ((pipestat & enable_mask) == enable_mask)
  501. return;
  502. dev_priv->pipestat_irq_mask[pipe] |= status_mask;
  503. /* Enable the interrupt, clear any pending status */
  504. pipestat |= enable_mask | status_mask;
  505. I915_WRITE(reg, pipestat);
  506. POSTING_READ(reg);
  507. }
  508. static void
  509. __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
  510. u32 enable_mask, u32 status_mask)
  511. {
  512. u32 reg = PIPESTAT(pipe);
  513. u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
  514. assert_spin_locked(&dev_priv->irq_lock);
  515. if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
  516. status_mask & ~PIPESTAT_INT_STATUS_MASK,
  517. "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
  518. pipe_name(pipe), enable_mask, status_mask))
  519. return;
  520. if ((pipestat & enable_mask) == 0)
  521. return;
  522. dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
  523. pipestat &= ~enable_mask;
  524. I915_WRITE(reg, pipestat);
  525. POSTING_READ(reg);
  526. }
  527. static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
  528. {
  529. u32 enable_mask = status_mask << 16;
  530. /*
  531. * On pipe A we don't support the PSR interrupt yet,
  532. * on pipe B and C the same bit MBZ.
  533. */
  534. if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
  535. return 0;
  536. /*
  537. * On pipe B and C we don't support the PSR interrupt yet, on pipe
  538. * A the same bit is for perf counters which we don't use either.
  539. */
  540. if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
  541. return 0;
  542. enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
  543. SPRITE0_FLIP_DONE_INT_EN_VLV |
  544. SPRITE1_FLIP_DONE_INT_EN_VLV);
  545. if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
  546. enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
  547. if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
  548. enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
  549. return enable_mask;
  550. }
  551. void
  552. i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
  553. u32 status_mask)
  554. {
  555. u32 enable_mask;
  556. if (IS_VALLEYVIEW(dev_priv->dev))
  557. enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
  558. status_mask);
  559. else
  560. enable_mask = status_mask << 16;
  561. __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
  562. }
  563. void
  564. i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
  565. u32 status_mask)
  566. {
  567. u32 enable_mask;
  568. if (IS_VALLEYVIEW(dev_priv->dev))
  569. enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
  570. status_mask);
  571. else
  572. enable_mask = status_mask << 16;
  573. __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
  574. }
  575. /**
  576. * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
  577. */
  578. static void i915_enable_asle_pipestat(struct drm_device *dev)
  579. {
  580. struct drm_i915_private *dev_priv = dev->dev_private;
  581. unsigned long irqflags;
  582. if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
  583. return;
  584. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  585. i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
  586. if (INTEL_INFO(dev)->gen >= 4)
  587. i915_enable_pipestat(dev_priv, PIPE_A,
  588. PIPE_LEGACY_BLC_EVENT_STATUS);
  589. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  590. }
  591. /**
  592. * i915_pipe_enabled - check if a pipe is enabled
  593. * @dev: DRM device
  594. * @pipe: pipe to check
  595. *
  596. * Reading certain registers when the pipe is disabled can hang the chip.
  597. * Use this routine to make sure the PLL is running and the pipe is active
  598. * before reading such registers if unsure.
  599. */
  600. static int
  601. i915_pipe_enabled(struct drm_device *dev, int pipe)
  602. {
  603. struct drm_i915_private *dev_priv = dev->dev_private;
  604. if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  605. /* Locking is horribly broken here, but whatever. */
  606. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  607. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  608. return intel_crtc->active;
  609. } else {
  610. return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
  611. }
  612. }
  613. /*
  614. * This timing diagram depicts the video signal in and
  615. * around the vertical blanking period.
  616. *
  617. * Assumptions about the fictitious mode used in this example:
  618. * vblank_start >= 3
  619. * vsync_start = vblank_start + 1
  620. * vsync_end = vblank_start + 2
  621. * vtotal = vblank_start + 3
  622. *
  623. * start of vblank:
  624. * latch double buffered registers
  625. * increment frame counter (ctg+)
  626. * generate start of vblank interrupt (gen4+)
  627. * |
  628. * | frame start:
  629. * | generate frame start interrupt (aka. vblank interrupt) (gmch)
  630. * | may be shifted forward 1-3 extra lines via PIPECONF
  631. * | |
  632. * | | start of vsync:
  633. * | | generate vsync interrupt
  634. * | | |
  635. * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
  636. * . \hs/ . \hs/ \hs/ \hs/ . \hs/
  637. * ----va---> <-----------------vb--------------------> <--------va-------------
  638. * | | <----vs-----> |
  639. * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
  640. * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
  641. * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
  642. * | | |
  643. * last visible pixel first visible pixel
  644. * | increment frame counter (gen3/4)
  645. * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
  646. *
  647. * x = horizontal active
  648. * _ = horizontal blanking
  649. * hs = horizontal sync
  650. * va = vertical active
  651. * vb = vertical blanking
  652. * vs = vertical sync
  653. * vbs = vblank_start (number)
  654. *
  655. * Summary:
  656. * - most events happen at the start of horizontal sync
  657. * - frame start happens at the start of horizontal blank, 1-4 lines
  658. * (depending on PIPECONF settings) after the start of vblank
  659. * - gen3/4 pixel and frame counter are synchronized with the start
  660. * of horizontal active on the first line of vertical active
  661. */
  662. static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
  663. {
  664. /* Gen2 doesn't have a hardware frame counter */
  665. return 0;
  666. }
  667. /* Called from drm generic code, passed a 'crtc', which
  668. * we use as a pipe index
  669. */
  670. static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
  671. {
  672. struct drm_i915_private *dev_priv = dev->dev_private;
  673. unsigned long high_frame;
  674. unsigned long low_frame;
  675. u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
  676. if (!i915_pipe_enabled(dev, pipe)) {
  677. DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
  678. "pipe %c\n", pipe_name(pipe));
  679. return 0;
  680. }
  681. if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  682. struct intel_crtc *intel_crtc =
  683. to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
  684. const struct drm_display_mode *mode =
  685. &intel_crtc->config.adjusted_mode;
  686. htotal = mode->crtc_htotal;
  687. hsync_start = mode->crtc_hsync_start;
  688. vbl_start = mode->crtc_vblank_start;
  689. if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  690. vbl_start = DIV_ROUND_UP(vbl_start, 2);
  691. } else {
  692. enum transcoder cpu_transcoder = (enum transcoder) pipe;
  693. htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
  694. hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
  695. vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
  696. if ((I915_READ(PIPECONF(cpu_transcoder)) &
  697. PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
  698. vbl_start = DIV_ROUND_UP(vbl_start, 2);
  699. }
  700. /* Convert to pixel count */
  701. vbl_start *= htotal;
  702. /* Start of vblank event occurs at start of hsync */
  703. vbl_start -= htotal - hsync_start;
  704. high_frame = PIPEFRAME(pipe);
  705. low_frame = PIPEFRAMEPIXEL(pipe);
  706. /*
  707. * High & low register fields aren't synchronized, so make sure
  708. * we get a low value that's stable across two reads of the high
  709. * register.
  710. */
  711. do {
  712. high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  713. low = I915_READ(low_frame);
  714. high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
  715. } while (high1 != high2);
  716. high1 >>= PIPE_FRAME_HIGH_SHIFT;
  717. pixel = low & PIPE_PIXEL_MASK;
  718. low >>= PIPE_FRAME_LOW_SHIFT;
  719. /*
  720. * The frame counter increments at beginning of active.
  721. * Cook up a vblank counter by also checking the pixel
  722. * counter against vblank start.
  723. */
  724. return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
  725. }
  726. static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
  727. {
  728. struct drm_i915_private *dev_priv = dev->dev_private;
  729. int reg = PIPE_FRMCOUNT_GM45(pipe);
  730. if (!i915_pipe_enabled(dev, pipe)) {
  731. DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
  732. "pipe %c\n", pipe_name(pipe));
  733. return 0;
  734. }
  735. return I915_READ(reg);
  736. }
  737. /* raw reads, only for fast reads of display block, no need for forcewake etc. */
  738. #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
  739. static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
  740. {
  741. struct drm_device *dev = crtc->base.dev;
  742. struct drm_i915_private *dev_priv = dev->dev_private;
  743. const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
  744. enum pipe pipe = crtc->pipe;
  745. int position, vtotal;
  746. vtotal = mode->crtc_vtotal;
  747. if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  748. vtotal /= 2;
  749. if (IS_GEN2(dev))
  750. position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
  751. else
  752. position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
  753. /*
  754. * See update_scanline_offset() for the details on the
  755. * scanline_offset adjustment.
  756. */
  757. return (position + crtc->scanline_offset) % vtotal;
  758. }
  759. static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
  760. unsigned int flags, int *vpos, int *hpos,
  761. ktime_t *stime, ktime_t *etime)
  762. {
  763. struct drm_i915_private *dev_priv = dev->dev_private;
  764. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  765. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  766. const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
  767. int position;
  768. int vbl_start, vbl_end, hsync_start, htotal, vtotal;
  769. bool in_vbl = true;
  770. int ret = 0;
  771. unsigned long irqflags;
  772. if (!intel_crtc->active) {
  773. DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
  774. "pipe %c\n", pipe_name(pipe));
  775. return 0;
  776. }
  777. htotal = mode->crtc_htotal;
  778. hsync_start = mode->crtc_hsync_start;
  779. vtotal = mode->crtc_vtotal;
  780. vbl_start = mode->crtc_vblank_start;
  781. vbl_end = mode->crtc_vblank_end;
  782. if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
  783. vbl_start = DIV_ROUND_UP(vbl_start, 2);
  784. vbl_end /= 2;
  785. vtotal /= 2;
  786. }
  787. ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
  788. /*
  789. * Lock uncore.lock, as we will do multiple timing critical raw
  790. * register reads, potentially with preemption disabled, so the
  791. * following code must not block on uncore.lock.
  792. */
  793. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  794. /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
  795. /* Get optional system timestamp before query. */
  796. if (stime)
  797. *stime = ktime_get();
  798. if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
  799. /* No obvious pixelcount register. Only query vertical
  800. * scanout position from Display scan line register.
  801. */
  802. position = __intel_get_crtc_scanline(intel_crtc);
  803. } else {
  804. /* Have access to pixelcount since start of frame.
  805. * We can split this into vertical and horizontal
  806. * scanout position.
  807. */
  808. position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
  809. /* convert to pixel counts */
  810. vbl_start *= htotal;
  811. vbl_end *= htotal;
  812. vtotal *= htotal;
  813. /*
  814. * In interlaced modes, the pixel counter counts all pixels,
  815. * so one field will have htotal more pixels. In order to avoid
  816. * the reported position from jumping backwards when the pixel
  817. * counter is beyond the length of the shorter field, just
  818. * clamp the position the length of the shorter field. This
  819. * matches how the scanline counter based position works since
  820. * the scanline counter doesn't count the two half lines.
  821. */
  822. if (position >= vtotal)
  823. position = vtotal - 1;
  824. /*
  825. * Start of vblank interrupt is triggered at start of hsync,
  826. * just prior to the first active line of vblank. However we
  827. * consider lines to start at the leading edge of horizontal
  828. * active. So, should we get here before we've crossed into
  829. * the horizontal active of the first line in vblank, we would
  830. * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
  831. * always add htotal-hsync_start to the current pixel position.
  832. */
  833. position = (position + htotal - hsync_start) % vtotal;
  834. }
  835. /* Get optional system timestamp after query. */
  836. if (etime)
  837. *etime = ktime_get();
  838. /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
  839. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  840. in_vbl = position >= vbl_start && position < vbl_end;
  841. /*
  842. * While in vblank, position will be negative
  843. * counting up towards 0 at vbl_end. And outside
  844. * vblank, position will be positive counting
  845. * up since vbl_end.
  846. */
  847. if (position >= vbl_start)
  848. position -= vbl_end;
  849. else
  850. position += vtotal - vbl_end;
  851. if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
  852. *vpos = position;
  853. *hpos = 0;
  854. } else {
  855. *vpos = position / htotal;
  856. *hpos = position - (*vpos * htotal);
  857. }
  858. /* In vblank? */
  859. if (in_vbl)
  860. ret |= DRM_SCANOUTPOS_INVBL;
  861. return ret;
  862. }
  863. int intel_get_crtc_scanline(struct intel_crtc *crtc)
  864. {
  865. struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
  866. unsigned long irqflags;
  867. int position;
  868. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  869. position = __intel_get_crtc_scanline(crtc);
  870. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  871. return position;
  872. }
  873. static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
  874. int *max_error,
  875. struct timeval *vblank_time,
  876. unsigned flags)
  877. {
  878. struct drm_crtc *crtc;
  879. if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
  880. DRM_ERROR("Invalid crtc %d\n", pipe);
  881. return -EINVAL;
  882. }
  883. /* Get drm_crtc to timestamp: */
  884. crtc = intel_get_crtc_for_pipe(dev, pipe);
  885. if (crtc == NULL) {
  886. DRM_ERROR("Invalid crtc %d\n", pipe);
  887. return -EINVAL;
  888. }
  889. if (!crtc->enabled) {
  890. DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
  891. return -EBUSY;
  892. }
  893. /* Helper routine in DRM core does all the work: */
  894. return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
  895. vblank_time, flags,
  896. crtc,
  897. &to_intel_crtc(crtc)->config.adjusted_mode);
  898. }
  899. static bool intel_hpd_irq_event(struct drm_device *dev,
  900. struct drm_connector *connector)
  901. {
  902. enum drm_connector_status old_status;
  903. WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
  904. old_status = connector->status;
  905. connector->status = connector->funcs->detect(connector, false);
  906. if (old_status == connector->status)
  907. return false;
  908. DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
  909. connector->base.id,
  910. connector->name,
  911. drm_get_connector_status_name(old_status),
  912. drm_get_connector_status_name(connector->status));
  913. return true;
  914. }
  915. static void i915_digport_work_func(struct work_struct *work)
  916. {
  917. struct drm_i915_private *dev_priv =
  918. container_of(work, struct drm_i915_private, dig_port_work);
  919. unsigned long irqflags;
  920. u32 long_port_mask, short_port_mask;
  921. struct intel_digital_port *intel_dig_port;
  922. int i, ret;
  923. u32 old_bits = 0;
  924. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  925. long_port_mask = dev_priv->long_hpd_port_mask;
  926. dev_priv->long_hpd_port_mask = 0;
  927. short_port_mask = dev_priv->short_hpd_port_mask;
  928. dev_priv->short_hpd_port_mask = 0;
  929. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  930. for (i = 0; i < I915_MAX_PORTS; i++) {
  931. bool valid = false;
  932. bool long_hpd = false;
  933. intel_dig_port = dev_priv->hpd_irq_port[i];
  934. if (!intel_dig_port || !intel_dig_port->hpd_pulse)
  935. continue;
  936. if (long_port_mask & (1 << i)) {
  937. valid = true;
  938. long_hpd = true;
  939. } else if (short_port_mask & (1 << i))
  940. valid = true;
  941. if (valid) {
  942. ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
  943. if (ret == true) {
  944. /* if we get true fallback to old school hpd */
  945. old_bits |= (1 << intel_dig_port->base.hpd_pin);
  946. }
  947. }
  948. }
  949. if (old_bits) {
  950. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  951. dev_priv->hpd_event_bits |= old_bits;
  952. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  953. schedule_work(&dev_priv->hotplug_work);
  954. }
  955. }
  956. /*
  957. * Handle hotplug events outside the interrupt handler proper.
  958. */
  959. #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
  960. static void i915_hotplug_work_func(struct work_struct *work)
  961. {
  962. struct drm_i915_private *dev_priv =
  963. container_of(work, struct drm_i915_private, hotplug_work);
  964. struct drm_device *dev = dev_priv->dev;
  965. struct drm_mode_config *mode_config = &dev->mode_config;
  966. struct intel_connector *intel_connector;
  967. struct intel_encoder *intel_encoder;
  968. struct drm_connector *connector;
  969. unsigned long irqflags;
  970. bool hpd_disabled = false;
  971. bool changed = false;
  972. u32 hpd_event_bits;
  973. mutex_lock(&mode_config->mutex);
  974. DRM_DEBUG_KMS("running encoder hotplug functions\n");
  975. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  976. hpd_event_bits = dev_priv->hpd_event_bits;
  977. dev_priv->hpd_event_bits = 0;
  978. list_for_each_entry(connector, &mode_config->connector_list, head) {
  979. intel_connector = to_intel_connector(connector);
  980. if (!intel_connector->encoder)
  981. continue;
  982. intel_encoder = intel_connector->encoder;
  983. if (intel_encoder->hpd_pin > HPD_NONE &&
  984. dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
  985. connector->polled == DRM_CONNECTOR_POLL_HPD) {
  986. DRM_INFO("HPD interrupt storm detected on connector %s: "
  987. "switching from hotplug detection to polling\n",
  988. connector->name);
  989. dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
  990. connector->polled = DRM_CONNECTOR_POLL_CONNECT
  991. | DRM_CONNECTOR_POLL_DISCONNECT;
  992. hpd_disabled = true;
  993. }
  994. if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
  995. DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
  996. connector->name, intel_encoder->hpd_pin);
  997. }
  998. }
  999. /* if there were no outputs to poll, poll was disabled,
  1000. * therefore make sure it's enabled when disabling HPD on
  1001. * some connectors */
  1002. if (hpd_disabled) {
  1003. drm_kms_helper_poll_enable(dev);
  1004. mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
  1005. msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
  1006. }
  1007. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  1008. list_for_each_entry(connector, &mode_config->connector_list, head) {
  1009. intel_connector = to_intel_connector(connector);
  1010. if (!intel_connector->encoder)
  1011. continue;
  1012. intel_encoder = intel_connector->encoder;
  1013. if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
  1014. if (intel_encoder->hot_plug)
  1015. intel_encoder->hot_plug(intel_encoder);
  1016. if (intel_hpd_irq_event(dev, connector))
  1017. changed = true;
  1018. }
  1019. }
  1020. mutex_unlock(&mode_config->mutex);
  1021. if (changed)
  1022. drm_kms_helper_hotplug_event(dev);
  1023. }
  1024. static void ironlake_rps_change_irq_handler(struct drm_device *dev)
  1025. {
  1026. struct drm_i915_private *dev_priv = dev->dev_private;
  1027. u32 busy_up, busy_down, max_avg, min_avg;
  1028. u8 new_delay;
  1029. spin_lock(&mchdev_lock);
  1030. I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
  1031. new_delay = dev_priv->ips.cur_delay;
  1032. I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
  1033. busy_up = I915_READ(RCPREVBSYTUPAVG);
  1034. busy_down = I915_READ(RCPREVBSYTDNAVG);
  1035. max_avg = I915_READ(RCBMAXAVG);
  1036. min_avg = I915_READ(RCBMINAVG);
  1037. /* Handle RCS change request from hw */
  1038. if (busy_up > max_avg) {
  1039. if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
  1040. new_delay = dev_priv->ips.cur_delay - 1;
  1041. if (new_delay < dev_priv->ips.max_delay)
  1042. new_delay = dev_priv->ips.max_delay;
  1043. } else if (busy_down < min_avg) {
  1044. if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
  1045. new_delay = dev_priv->ips.cur_delay + 1;
  1046. if (new_delay > dev_priv->ips.min_delay)
  1047. new_delay = dev_priv->ips.min_delay;
  1048. }
  1049. if (ironlake_set_drps(dev, new_delay))
  1050. dev_priv->ips.cur_delay = new_delay;
  1051. spin_unlock(&mchdev_lock);
  1052. return;
  1053. }
  1054. static void notify_ring(struct drm_device *dev,
  1055. struct intel_engine_cs *ring)
  1056. {
  1057. if (!intel_ring_initialized(ring))
  1058. return;
  1059. trace_i915_gem_request_complete(ring);
  1060. if (drm_core_check_feature(dev, DRIVER_MODESET))
  1061. intel_notify_mmio_flip(ring);
  1062. wake_up_all(&ring->irq_queue);
  1063. i915_queue_hangcheck(dev);
  1064. }
  1065. static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
  1066. struct intel_rps_ei *rps_ei)
  1067. {
  1068. u32 cz_ts, cz_freq_khz;
  1069. u32 render_count, media_count;
  1070. u32 elapsed_render, elapsed_media, elapsed_time;
  1071. u32 residency = 0;
  1072. cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
  1073. cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
  1074. render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
  1075. media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
  1076. if (rps_ei->cz_clock == 0) {
  1077. rps_ei->cz_clock = cz_ts;
  1078. rps_ei->render_c0 = render_count;
  1079. rps_ei->media_c0 = media_count;
  1080. return dev_priv->rps.cur_freq;
  1081. }
  1082. elapsed_time = cz_ts - rps_ei->cz_clock;
  1083. rps_ei->cz_clock = cz_ts;
  1084. elapsed_render = render_count - rps_ei->render_c0;
  1085. rps_ei->render_c0 = render_count;
  1086. elapsed_media = media_count - rps_ei->media_c0;
  1087. rps_ei->media_c0 = media_count;
  1088. /* Convert all the counters into common unit of milli sec */
  1089. elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
  1090. elapsed_render /= cz_freq_khz;
  1091. elapsed_media /= cz_freq_khz;
  1092. /*
  1093. * Calculate overall C0 residency percentage
  1094. * only if elapsed time is non zero
  1095. */
  1096. if (elapsed_time) {
  1097. residency =
  1098. ((max(elapsed_render, elapsed_media) * 100)
  1099. / elapsed_time);
  1100. }
  1101. return residency;
  1102. }
  1103. /**
  1104. * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
  1105. * busy-ness calculated from C0 counters of render & media power wells
  1106. * @dev_priv: DRM device private
  1107. *
  1108. */
  1109. static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
  1110. {
  1111. u32 residency_C0_up = 0, residency_C0_down = 0;
  1112. u8 new_delay, adj;
  1113. dev_priv->rps.ei_interrupt_count++;
  1114. WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
  1115. if (dev_priv->rps.up_ei.cz_clock == 0) {
  1116. vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
  1117. vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
  1118. return dev_priv->rps.cur_freq;
  1119. }
  1120. /*
  1121. * To down throttle, C0 residency should be less than down threshold
  1122. * for continous EI intervals. So calculate down EI counters
  1123. * once in VLV_INT_COUNT_FOR_DOWN_EI
  1124. */
  1125. if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
  1126. dev_priv->rps.ei_interrupt_count = 0;
  1127. residency_C0_down = vlv_c0_residency(dev_priv,
  1128. &dev_priv->rps.down_ei);
  1129. } else {
  1130. residency_C0_up = vlv_c0_residency(dev_priv,
  1131. &dev_priv->rps.up_ei);
  1132. }
  1133. new_delay = dev_priv->rps.cur_freq;
  1134. adj = dev_priv->rps.last_adj;
  1135. /* C0 residency is greater than UP threshold. Increase Frequency */
  1136. if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
  1137. if (adj > 0)
  1138. adj *= 2;
  1139. else
  1140. adj = 1;
  1141. if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
  1142. new_delay = dev_priv->rps.cur_freq + adj;
  1143. /*
  1144. * For better performance, jump directly
  1145. * to RPe if we're below it.
  1146. */
  1147. if (new_delay < dev_priv->rps.efficient_freq)
  1148. new_delay = dev_priv->rps.efficient_freq;
  1149. } else if (!dev_priv->rps.ei_interrupt_count &&
  1150. (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
  1151. if (adj < 0)
  1152. adj *= 2;
  1153. else
  1154. adj = -1;
  1155. /*
  1156. * This means, C0 residency is less than down threshold over
  1157. * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
  1158. */
  1159. if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
  1160. new_delay = dev_priv->rps.cur_freq + adj;
  1161. }
  1162. return new_delay;
  1163. }
  1164. static void gen6_pm_rps_work(struct work_struct *work)
  1165. {
  1166. struct drm_i915_private *dev_priv =
  1167. container_of(work, struct drm_i915_private, rps.work);
  1168. u32 pm_iir;
  1169. int new_delay, adj;
  1170. spin_lock_irq(&dev_priv->irq_lock);
  1171. pm_iir = dev_priv->rps.pm_iir;
  1172. dev_priv->rps.pm_iir = 0;
  1173. if (INTEL_INFO(dev_priv->dev)->gen >= 8)
  1174. gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
  1175. else {
  1176. /* Make sure not to corrupt PMIMR state used by ringbuffer */
  1177. gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
  1178. }
  1179. spin_unlock_irq(&dev_priv->irq_lock);
  1180. /* Make sure we didn't queue anything we're not going to process. */
  1181. WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
  1182. if ((pm_iir & dev_priv->pm_rps_events) == 0)
  1183. return;
  1184. mutex_lock(&dev_priv->rps.hw_lock);
  1185. adj = dev_priv->rps.last_adj;
  1186. if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
  1187. if (adj > 0)
  1188. adj *= 2;
  1189. else {
  1190. /* CHV needs even encode values */
  1191. adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
  1192. }
  1193. new_delay = dev_priv->rps.cur_freq + adj;
  1194. /*
  1195. * For better performance, jump directly
  1196. * to RPe if we're below it.
  1197. */
  1198. if (new_delay < dev_priv->rps.efficient_freq)
  1199. new_delay = dev_priv->rps.efficient_freq;
  1200. } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
  1201. if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
  1202. new_delay = dev_priv->rps.efficient_freq;
  1203. else
  1204. new_delay = dev_priv->rps.min_freq_softlimit;
  1205. adj = 0;
  1206. } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
  1207. new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
  1208. } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
  1209. if (adj < 0)
  1210. adj *= 2;
  1211. else {
  1212. /* CHV needs even encode values */
  1213. adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
  1214. }
  1215. new_delay = dev_priv->rps.cur_freq + adj;
  1216. } else { /* unknown event */
  1217. new_delay = dev_priv->rps.cur_freq;
  1218. }
  1219. /* sysfs frequency interfaces may have snuck in while servicing the
  1220. * interrupt
  1221. */
  1222. new_delay = clamp_t(int, new_delay,
  1223. dev_priv->rps.min_freq_softlimit,
  1224. dev_priv->rps.max_freq_softlimit);
  1225. dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
  1226. if (IS_VALLEYVIEW(dev_priv->dev))
  1227. valleyview_set_rps(dev_priv->dev, new_delay);
  1228. else
  1229. gen6_set_rps(dev_priv->dev, new_delay);
  1230. mutex_unlock(&dev_priv->rps.hw_lock);
  1231. }
  1232. /**
  1233. * ivybridge_parity_work - Workqueue called when a parity error interrupt
  1234. * occurred.
  1235. * @work: workqueue struct
  1236. *
  1237. * Doesn't actually do anything except notify userspace. As a consequence of
  1238. * this event, userspace should try to remap the bad rows since statistically
  1239. * it is likely the same row is more likely to go bad again.
  1240. */
  1241. static void ivybridge_parity_work(struct work_struct *work)
  1242. {
  1243. struct drm_i915_private *dev_priv =
  1244. container_of(work, struct drm_i915_private, l3_parity.error_work);
  1245. u32 error_status, row, bank, subbank;
  1246. char *parity_event[6];
  1247. uint32_t misccpctl;
  1248. unsigned long flags;
  1249. uint8_t slice = 0;
  1250. /* We must turn off DOP level clock gating to access the L3 registers.
  1251. * In order to prevent a get/put style interface, acquire struct mutex
  1252. * any time we access those registers.
  1253. */
  1254. mutex_lock(&dev_priv->dev->struct_mutex);
  1255. /* If we've screwed up tracking, just let the interrupt fire again */
  1256. if (WARN_ON(!dev_priv->l3_parity.which_slice))
  1257. goto out;
  1258. misccpctl = I915_READ(GEN7_MISCCPCTL);
  1259. I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
  1260. POSTING_READ(GEN7_MISCCPCTL);
  1261. while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
  1262. u32 reg;
  1263. slice--;
  1264. if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
  1265. break;
  1266. dev_priv->l3_parity.which_slice &= ~(1<<slice);
  1267. reg = GEN7_L3CDERRST1 + (slice * 0x200);
  1268. error_status = I915_READ(reg);
  1269. row = GEN7_PARITY_ERROR_ROW(error_status);
  1270. bank = GEN7_PARITY_ERROR_BANK(error_status);
  1271. subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
  1272. I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
  1273. POSTING_READ(reg);
  1274. parity_event[0] = I915_L3_PARITY_UEVENT "=1";
  1275. parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
  1276. parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
  1277. parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
  1278. parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
  1279. parity_event[5] = NULL;
  1280. kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
  1281. KOBJ_CHANGE, parity_event);
  1282. DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
  1283. slice, row, bank, subbank);
  1284. kfree(parity_event[4]);
  1285. kfree(parity_event[3]);
  1286. kfree(parity_event[2]);
  1287. kfree(parity_event[1]);
  1288. }
  1289. I915_WRITE(GEN7_MISCCPCTL, misccpctl);
  1290. out:
  1291. WARN_ON(dev_priv->l3_parity.which_slice);
  1292. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  1293. gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
  1294. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  1295. mutex_unlock(&dev_priv->dev->struct_mutex);
  1296. }
  1297. static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
  1298. {
  1299. struct drm_i915_private *dev_priv = dev->dev_private;
  1300. if (!HAS_L3_DPF(dev))
  1301. return;
  1302. spin_lock(&dev_priv->irq_lock);
  1303. gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
  1304. spin_unlock(&dev_priv->irq_lock);
  1305. iir &= GT_PARITY_ERROR(dev);
  1306. if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
  1307. dev_priv->l3_parity.which_slice |= 1 << 1;
  1308. if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
  1309. dev_priv->l3_parity.which_slice |= 1 << 0;
  1310. queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
  1311. }
  1312. static void ilk_gt_irq_handler(struct drm_device *dev,
  1313. struct drm_i915_private *dev_priv,
  1314. u32 gt_iir)
  1315. {
  1316. if (gt_iir &
  1317. (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
  1318. notify_ring(dev, &dev_priv->ring[RCS]);
  1319. if (gt_iir & ILK_BSD_USER_INTERRUPT)
  1320. notify_ring(dev, &dev_priv->ring[VCS]);
  1321. }
  1322. static void snb_gt_irq_handler(struct drm_device *dev,
  1323. struct drm_i915_private *dev_priv,
  1324. u32 gt_iir)
  1325. {
  1326. if (gt_iir &
  1327. (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
  1328. notify_ring(dev, &dev_priv->ring[RCS]);
  1329. if (gt_iir & GT_BSD_USER_INTERRUPT)
  1330. notify_ring(dev, &dev_priv->ring[VCS]);
  1331. if (gt_iir & GT_BLT_USER_INTERRUPT)
  1332. notify_ring(dev, &dev_priv->ring[BCS]);
  1333. if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
  1334. GT_BSD_CS_ERROR_INTERRUPT |
  1335. GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
  1336. i915_handle_error(dev, false, "GT error interrupt 0x%08x",
  1337. gt_iir);
  1338. }
  1339. if (gt_iir & GT_PARITY_ERROR(dev))
  1340. ivybridge_parity_error_irq_handler(dev, gt_iir);
  1341. }
  1342. static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
  1343. {
  1344. if ((pm_iir & dev_priv->pm_rps_events) == 0)
  1345. return;
  1346. spin_lock(&dev_priv->irq_lock);
  1347. dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
  1348. gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
  1349. spin_unlock(&dev_priv->irq_lock);
  1350. queue_work(dev_priv->wq, &dev_priv->rps.work);
  1351. }
  1352. static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
  1353. struct drm_i915_private *dev_priv,
  1354. u32 master_ctl)
  1355. {
  1356. u32 rcs, bcs, vcs;
  1357. uint32_t tmp = 0;
  1358. irqreturn_t ret = IRQ_NONE;
  1359. if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
  1360. tmp = I915_READ(GEN8_GT_IIR(0));
  1361. if (tmp) {
  1362. I915_WRITE(GEN8_GT_IIR(0), tmp);
  1363. ret = IRQ_HANDLED;
  1364. rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
  1365. bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
  1366. if (rcs & GT_RENDER_USER_INTERRUPT)
  1367. notify_ring(dev, &dev_priv->ring[RCS]);
  1368. if (bcs & GT_RENDER_USER_INTERRUPT)
  1369. notify_ring(dev, &dev_priv->ring[BCS]);
  1370. } else
  1371. DRM_ERROR("The master control interrupt lied (GT0)!\n");
  1372. }
  1373. if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
  1374. tmp = I915_READ(GEN8_GT_IIR(1));
  1375. if (tmp) {
  1376. I915_WRITE(GEN8_GT_IIR(1), tmp);
  1377. ret = IRQ_HANDLED;
  1378. vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
  1379. if (vcs & GT_RENDER_USER_INTERRUPT)
  1380. notify_ring(dev, &dev_priv->ring[VCS]);
  1381. vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
  1382. if (vcs & GT_RENDER_USER_INTERRUPT)
  1383. notify_ring(dev, &dev_priv->ring[VCS2]);
  1384. } else
  1385. DRM_ERROR("The master control interrupt lied (GT1)!\n");
  1386. }
  1387. if (master_ctl & GEN8_GT_PM_IRQ) {
  1388. tmp = I915_READ(GEN8_GT_IIR(2));
  1389. if (tmp & dev_priv->pm_rps_events) {
  1390. I915_WRITE(GEN8_GT_IIR(2),
  1391. tmp & dev_priv->pm_rps_events);
  1392. ret = IRQ_HANDLED;
  1393. gen8_rps_irq_handler(dev_priv, tmp);
  1394. } else
  1395. DRM_ERROR("The master control interrupt lied (PM)!\n");
  1396. }
  1397. if (master_ctl & GEN8_GT_VECS_IRQ) {
  1398. tmp = I915_READ(GEN8_GT_IIR(3));
  1399. if (tmp) {
  1400. I915_WRITE(GEN8_GT_IIR(3), tmp);
  1401. ret = IRQ_HANDLED;
  1402. vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
  1403. if (vcs & GT_RENDER_USER_INTERRUPT)
  1404. notify_ring(dev, &dev_priv->ring[VECS]);
  1405. } else
  1406. DRM_ERROR("The master control interrupt lied (GT3)!\n");
  1407. }
  1408. return ret;
  1409. }
  1410. #define HPD_STORM_DETECT_PERIOD 1000
  1411. #define HPD_STORM_THRESHOLD 5
  1412. static int ilk_port_to_hotplug_shift(enum port port)
  1413. {
  1414. switch (port) {
  1415. case PORT_A:
  1416. case PORT_E:
  1417. default:
  1418. return -1;
  1419. case PORT_B:
  1420. return 0;
  1421. case PORT_C:
  1422. return 8;
  1423. case PORT_D:
  1424. return 16;
  1425. }
  1426. }
  1427. static int g4x_port_to_hotplug_shift(enum port port)
  1428. {
  1429. switch (port) {
  1430. case PORT_A:
  1431. case PORT_E:
  1432. default:
  1433. return -1;
  1434. case PORT_B:
  1435. return 17;
  1436. case PORT_C:
  1437. return 19;
  1438. case PORT_D:
  1439. return 21;
  1440. }
  1441. }
  1442. static inline enum port get_port_from_pin(enum hpd_pin pin)
  1443. {
  1444. switch (pin) {
  1445. case HPD_PORT_B:
  1446. return PORT_B;
  1447. case HPD_PORT_C:
  1448. return PORT_C;
  1449. case HPD_PORT_D:
  1450. return PORT_D;
  1451. default:
  1452. return PORT_A; /* no hpd */
  1453. }
  1454. }
  1455. static inline void intel_hpd_irq_handler(struct drm_device *dev,
  1456. u32 hotplug_trigger,
  1457. u32 dig_hotplug_reg,
  1458. const u32 *hpd)
  1459. {
  1460. struct drm_i915_private *dev_priv = dev->dev_private;
  1461. int i;
  1462. enum port port;
  1463. bool storm_detected = false;
  1464. bool queue_dig = false, queue_hp = false;
  1465. u32 dig_shift;
  1466. u32 dig_port_mask = 0;
  1467. if (!hotplug_trigger)
  1468. return;
  1469. DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
  1470. hotplug_trigger, dig_hotplug_reg);
  1471. spin_lock(&dev_priv->irq_lock);
  1472. for (i = 1; i < HPD_NUM_PINS; i++) {
  1473. if (!(hpd[i] & hotplug_trigger))
  1474. continue;
  1475. port = get_port_from_pin(i);
  1476. if (port && dev_priv->hpd_irq_port[port]) {
  1477. bool long_hpd;
  1478. if (IS_G4X(dev)) {
  1479. dig_shift = g4x_port_to_hotplug_shift(port);
  1480. long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
  1481. } else {
  1482. dig_shift = ilk_port_to_hotplug_shift(port);
  1483. long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
  1484. }
  1485. DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd);
  1486. /* for long HPD pulses we want to have the digital queue happen,
  1487. but we still want HPD storm detection to function. */
  1488. if (long_hpd) {
  1489. dev_priv->long_hpd_port_mask |= (1 << port);
  1490. dig_port_mask |= hpd[i];
  1491. } else {
  1492. /* for short HPD just trigger the digital queue */
  1493. dev_priv->short_hpd_port_mask |= (1 << port);
  1494. hotplug_trigger &= ~hpd[i];
  1495. }
  1496. queue_dig = true;
  1497. }
  1498. }
  1499. for (i = 1; i < HPD_NUM_PINS; i++) {
  1500. if (hpd[i] & hotplug_trigger &&
  1501. dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
  1502. /*
  1503. * On GMCH platforms the interrupt mask bits only
  1504. * prevent irq generation, not the setting of the
  1505. * hotplug bits itself. So only WARN about unexpected
  1506. * interrupts on saner platforms.
  1507. */
  1508. WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
  1509. "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
  1510. hotplug_trigger, i, hpd[i]);
  1511. continue;
  1512. }
  1513. if (!(hpd[i] & hotplug_trigger) ||
  1514. dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
  1515. continue;
  1516. if (!(dig_port_mask & hpd[i])) {
  1517. dev_priv->hpd_event_bits |= (1 << i);
  1518. queue_hp = true;
  1519. }
  1520. if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
  1521. dev_priv->hpd_stats[i].hpd_last_jiffies
  1522. + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
  1523. dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
  1524. dev_priv->hpd_stats[i].hpd_cnt = 0;
  1525. DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
  1526. } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
  1527. dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
  1528. dev_priv->hpd_event_bits &= ~(1 << i);
  1529. DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
  1530. storm_detected = true;
  1531. } else {
  1532. dev_priv->hpd_stats[i].hpd_cnt++;
  1533. DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
  1534. dev_priv->hpd_stats[i].hpd_cnt);
  1535. }
  1536. }
  1537. if (storm_detected)
  1538. dev_priv->display.hpd_irq_setup(dev);
  1539. spin_unlock(&dev_priv->irq_lock);
  1540. /*
  1541. * Our hotplug handler can grab modeset locks (by calling down into the
  1542. * fb helpers). Hence it must not be run on our own dev-priv->wq work
  1543. * queue for otherwise the flush_work in the pageflip code will
  1544. * deadlock.
  1545. */
  1546. if (queue_dig)
  1547. queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
  1548. if (queue_hp)
  1549. schedule_work(&dev_priv->hotplug_work);
  1550. }
  1551. static void gmbus_irq_handler(struct drm_device *dev)
  1552. {
  1553. struct drm_i915_private *dev_priv = dev->dev_private;
  1554. wake_up_all(&dev_priv->gmbus_wait_queue);
  1555. }
  1556. static void dp_aux_irq_handler(struct drm_device *dev)
  1557. {
  1558. struct drm_i915_private *dev_priv = dev->dev_private;
  1559. wake_up_all(&dev_priv->gmbus_wait_queue);
  1560. }
  1561. #if defined(CONFIG_DEBUG_FS)
  1562. static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
  1563. uint32_t crc0, uint32_t crc1,
  1564. uint32_t crc2, uint32_t crc3,
  1565. uint32_t crc4)
  1566. {
  1567. struct drm_i915_private *dev_priv = dev->dev_private;
  1568. struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
  1569. struct intel_pipe_crc_entry *entry;
  1570. int head, tail;
  1571. spin_lock(&pipe_crc->lock);
  1572. if (!pipe_crc->entries) {
  1573. spin_unlock(&pipe_crc->lock);
  1574. DRM_ERROR("spurious interrupt\n");
  1575. return;
  1576. }
  1577. head = pipe_crc->head;
  1578. tail = pipe_crc->tail;
  1579. if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
  1580. spin_unlock(&pipe_crc->lock);
  1581. DRM_ERROR("CRC buffer overflowing\n");
  1582. return;
  1583. }
  1584. entry = &pipe_crc->entries[head];
  1585. entry->frame = dev->driver->get_vblank_counter(dev, pipe);
  1586. entry->crc[0] = crc0;
  1587. entry->crc[1] = crc1;
  1588. entry->crc[2] = crc2;
  1589. entry->crc[3] = crc3;
  1590. entry->crc[4] = crc4;
  1591. head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
  1592. pipe_crc->head = head;
  1593. spin_unlock(&pipe_crc->lock);
  1594. wake_up_interruptible(&pipe_crc->wq);
  1595. }
  1596. #else
  1597. static inline void
  1598. display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
  1599. uint32_t crc0, uint32_t crc1,
  1600. uint32_t crc2, uint32_t crc3,
  1601. uint32_t crc4) {}
  1602. #endif
  1603. static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
  1604. {
  1605. struct drm_i915_private *dev_priv = dev->dev_private;
  1606. display_pipe_crc_irq_handler(dev, pipe,
  1607. I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
  1608. 0, 0, 0, 0);
  1609. }
  1610. static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
  1611. {
  1612. struct drm_i915_private *dev_priv = dev->dev_private;
  1613. display_pipe_crc_irq_handler(dev, pipe,
  1614. I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
  1615. I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
  1616. I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
  1617. I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
  1618. I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
  1619. }
  1620. static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
  1621. {
  1622. struct drm_i915_private *dev_priv = dev->dev_private;
  1623. uint32_t res1, res2;
  1624. if (INTEL_INFO(dev)->gen >= 3)
  1625. res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
  1626. else
  1627. res1 = 0;
  1628. if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
  1629. res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
  1630. else
  1631. res2 = 0;
  1632. display_pipe_crc_irq_handler(dev, pipe,
  1633. I915_READ(PIPE_CRC_RES_RED(pipe)),
  1634. I915_READ(PIPE_CRC_RES_GREEN(pipe)),
  1635. I915_READ(PIPE_CRC_RES_BLUE(pipe)),
  1636. res1, res2);
  1637. }
  1638. /* The RPS events need forcewake, so we add them to a work queue and mask their
  1639. * IMR bits until the work is done. Other interrupts can be processed without
  1640. * the work queue. */
  1641. static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
  1642. {
  1643. if (pm_iir & dev_priv->pm_rps_events) {
  1644. spin_lock(&dev_priv->irq_lock);
  1645. dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
  1646. gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
  1647. spin_unlock(&dev_priv->irq_lock);
  1648. queue_work(dev_priv->wq, &dev_priv->rps.work);
  1649. }
  1650. if (HAS_VEBOX(dev_priv->dev)) {
  1651. if (pm_iir & PM_VEBOX_USER_INTERRUPT)
  1652. notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
  1653. if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
  1654. i915_handle_error(dev_priv->dev, false,
  1655. "VEBOX CS error interrupt 0x%08x",
  1656. pm_iir);
  1657. }
  1658. }
  1659. }
  1660. static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
  1661. {
  1662. if (!drm_handle_vblank(dev, pipe))
  1663. return false;
  1664. return true;
  1665. }
  1666. static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
  1667. {
  1668. struct drm_i915_private *dev_priv = dev->dev_private;
  1669. u32 pipe_stats[I915_MAX_PIPES] = { };
  1670. int pipe;
  1671. spin_lock(&dev_priv->irq_lock);
  1672. for_each_pipe(pipe) {
  1673. int reg;
  1674. u32 mask, iir_bit = 0;
  1675. /*
  1676. * PIPESTAT bits get signalled even when the interrupt is
  1677. * disabled with the mask bits, and some of the status bits do
  1678. * not generate interrupts at all (like the underrun bit). Hence
  1679. * we need to be careful that we only handle what we want to
  1680. * handle.
  1681. */
  1682. mask = 0;
  1683. if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
  1684. mask |= PIPE_FIFO_UNDERRUN_STATUS;
  1685. switch (pipe) {
  1686. case PIPE_A:
  1687. iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
  1688. break;
  1689. case PIPE_B:
  1690. iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
  1691. break;
  1692. case PIPE_C:
  1693. iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
  1694. break;
  1695. }
  1696. if (iir & iir_bit)
  1697. mask |= dev_priv->pipestat_irq_mask[pipe];
  1698. if (!mask)
  1699. continue;
  1700. reg = PIPESTAT(pipe);
  1701. mask |= PIPESTAT_INT_ENABLE_MASK;
  1702. pipe_stats[pipe] = I915_READ(reg) & mask;
  1703. /*
  1704. * Clear the PIPE*STAT regs before the IIR
  1705. */
  1706. if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
  1707. PIPESTAT_INT_STATUS_MASK))
  1708. I915_WRITE(reg, pipe_stats[pipe]);
  1709. }
  1710. spin_unlock(&dev_priv->irq_lock);
  1711. for_each_pipe(pipe) {
  1712. if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
  1713. intel_pipe_handle_vblank(dev, pipe);
  1714. if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
  1715. intel_prepare_page_flip(dev, pipe);
  1716. intel_finish_page_flip(dev, pipe);
  1717. }
  1718. if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
  1719. i9xx_pipe_crc_irq_handler(dev, pipe);
  1720. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
  1721. intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
  1722. DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
  1723. }
  1724. if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
  1725. gmbus_irq_handler(dev);
  1726. }
  1727. static void i9xx_hpd_irq_handler(struct drm_device *dev)
  1728. {
  1729. struct drm_i915_private *dev_priv = dev->dev_private;
  1730. u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
  1731. if (hotplug_status) {
  1732. I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  1733. /*
  1734. * Make sure hotplug status is cleared before we clear IIR, or else we
  1735. * may miss hotplug events.
  1736. */
  1737. POSTING_READ(PORT_HOTPLUG_STAT);
  1738. if (IS_G4X(dev)) {
  1739. u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
  1740. intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
  1741. } else {
  1742. u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
  1743. intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
  1744. }
  1745. if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
  1746. hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
  1747. dp_aux_irq_handler(dev);
  1748. }
  1749. }
  1750. static irqreturn_t valleyview_irq_handler(int irq, void *arg)
  1751. {
  1752. struct drm_device *dev = arg;
  1753. struct drm_i915_private *dev_priv = dev->dev_private;
  1754. u32 iir, gt_iir, pm_iir;
  1755. irqreturn_t ret = IRQ_NONE;
  1756. while (true) {
  1757. /* Find, clear, then process each source of interrupt */
  1758. gt_iir = I915_READ(GTIIR);
  1759. if (gt_iir)
  1760. I915_WRITE(GTIIR, gt_iir);
  1761. pm_iir = I915_READ(GEN6_PMIIR);
  1762. if (pm_iir)
  1763. I915_WRITE(GEN6_PMIIR, pm_iir);
  1764. iir = I915_READ(VLV_IIR);
  1765. if (iir) {
  1766. /* Consume port before clearing IIR or we'll miss events */
  1767. if (iir & I915_DISPLAY_PORT_INTERRUPT)
  1768. i9xx_hpd_irq_handler(dev);
  1769. I915_WRITE(VLV_IIR, iir);
  1770. }
  1771. if (gt_iir == 0 && pm_iir == 0 && iir == 0)
  1772. goto out;
  1773. ret = IRQ_HANDLED;
  1774. if (gt_iir)
  1775. snb_gt_irq_handler(dev, dev_priv, gt_iir);
  1776. if (pm_iir)
  1777. gen6_rps_irq_handler(dev_priv, pm_iir);
  1778. /* Call regardless, as some status bits might not be
  1779. * signalled in iir */
  1780. valleyview_pipestat_irq_handler(dev, iir);
  1781. }
  1782. out:
  1783. return ret;
  1784. }
  1785. static irqreturn_t cherryview_irq_handler(int irq, void *arg)
  1786. {
  1787. struct drm_device *dev = arg;
  1788. struct drm_i915_private *dev_priv = dev->dev_private;
  1789. u32 master_ctl, iir;
  1790. irqreturn_t ret = IRQ_NONE;
  1791. for (;;) {
  1792. master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
  1793. iir = I915_READ(VLV_IIR);
  1794. if (master_ctl == 0 && iir == 0)
  1795. break;
  1796. ret = IRQ_HANDLED;
  1797. I915_WRITE(GEN8_MASTER_IRQ, 0);
  1798. /* Find, clear, then process each source of interrupt */
  1799. if (iir) {
  1800. /* Consume port before clearing IIR or we'll miss events */
  1801. if (iir & I915_DISPLAY_PORT_INTERRUPT)
  1802. i9xx_hpd_irq_handler(dev);
  1803. I915_WRITE(VLV_IIR, iir);
  1804. }
  1805. gen8_gt_irq_handler(dev, dev_priv, master_ctl);
  1806. /* Call regardless, as some status bits might not be
  1807. * signalled in iir */
  1808. valleyview_pipestat_irq_handler(dev, iir);
  1809. I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
  1810. POSTING_READ(GEN8_MASTER_IRQ);
  1811. }
  1812. return ret;
  1813. }
  1814. static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
  1815. {
  1816. struct drm_i915_private *dev_priv = dev->dev_private;
  1817. int pipe;
  1818. u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
  1819. u32 dig_hotplug_reg;
  1820. dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
  1821. I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
  1822. intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
  1823. if (pch_iir & SDE_AUDIO_POWER_MASK) {
  1824. int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
  1825. SDE_AUDIO_POWER_SHIFT);
  1826. DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
  1827. port_name(port));
  1828. }
  1829. if (pch_iir & SDE_AUX_MASK)
  1830. dp_aux_irq_handler(dev);
  1831. if (pch_iir & SDE_GMBUS)
  1832. gmbus_irq_handler(dev);
  1833. if (pch_iir & SDE_AUDIO_HDCP_MASK)
  1834. DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
  1835. if (pch_iir & SDE_AUDIO_TRANS_MASK)
  1836. DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
  1837. if (pch_iir & SDE_POISON)
  1838. DRM_ERROR("PCH poison interrupt\n");
  1839. if (pch_iir & SDE_FDI_MASK)
  1840. for_each_pipe(pipe)
  1841. DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
  1842. pipe_name(pipe),
  1843. I915_READ(FDI_RX_IIR(pipe)));
  1844. if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
  1845. DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
  1846. if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
  1847. DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
  1848. if (pch_iir & SDE_TRANSA_FIFO_UNDER)
  1849. if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
  1850. false))
  1851. DRM_ERROR("PCH transcoder A FIFO underrun\n");
  1852. if (pch_iir & SDE_TRANSB_FIFO_UNDER)
  1853. if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
  1854. false))
  1855. DRM_ERROR("PCH transcoder B FIFO underrun\n");
  1856. }
  1857. static void ivb_err_int_handler(struct drm_device *dev)
  1858. {
  1859. struct drm_i915_private *dev_priv = dev->dev_private;
  1860. u32 err_int = I915_READ(GEN7_ERR_INT);
  1861. enum pipe pipe;
  1862. if (err_int & ERR_INT_POISON)
  1863. DRM_ERROR("Poison interrupt\n");
  1864. for_each_pipe(pipe) {
  1865. if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
  1866. if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
  1867. false))
  1868. DRM_ERROR("Pipe %c FIFO underrun\n",
  1869. pipe_name(pipe));
  1870. }
  1871. if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
  1872. if (IS_IVYBRIDGE(dev))
  1873. ivb_pipe_crc_irq_handler(dev, pipe);
  1874. else
  1875. hsw_pipe_crc_irq_handler(dev, pipe);
  1876. }
  1877. }
  1878. I915_WRITE(GEN7_ERR_INT, err_int);
  1879. }
  1880. static void cpt_serr_int_handler(struct drm_device *dev)
  1881. {
  1882. struct drm_i915_private *dev_priv = dev->dev_private;
  1883. u32 serr_int = I915_READ(SERR_INT);
  1884. if (serr_int & SERR_INT_POISON)
  1885. DRM_ERROR("PCH poison interrupt\n");
  1886. if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
  1887. if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
  1888. false))
  1889. DRM_ERROR("PCH transcoder A FIFO underrun\n");
  1890. if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
  1891. if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
  1892. false))
  1893. DRM_ERROR("PCH transcoder B FIFO underrun\n");
  1894. if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
  1895. if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
  1896. false))
  1897. DRM_ERROR("PCH transcoder C FIFO underrun\n");
  1898. I915_WRITE(SERR_INT, serr_int);
  1899. }
  1900. static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
  1901. {
  1902. struct drm_i915_private *dev_priv = dev->dev_private;
  1903. int pipe;
  1904. u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
  1905. u32 dig_hotplug_reg;
  1906. dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
  1907. I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
  1908. intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
  1909. if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
  1910. int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
  1911. SDE_AUDIO_POWER_SHIFT_CPT);
  1912. DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
  1913. port_name(port));
  1914. }
  1915. if (pch_iir & SDE_AUX_MASK_CPT)
  1916. dp_aux_irq_handler(dev);
  1917. if (pch_iir & SDE_GMBUS_CPT)
  1918. gmbus_irq_handler(dev);
  1919. if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
  1920. DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
  1921. if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
  1922. DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
  1923. if (pch_iir & SDE_FDI_MASK_CPT)
  1924. for_each_pipe(pipe)
  1925. DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
  1926. pipe_name(pipe),
  1927. I915_READ(FDI_RX_IIR(pipe)));
  1928. if (pch_iir & SDE_ERROR_CPT)
  1929. cpt_serr_int_handler(dev);
  1930. }
  1931. static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
  1932. {
  1933. struct drm_i915_private *dev_priv = dev->dev_private;
  1934. enum pipe pipe;
  1935. if (de_iir & DE_AUX_CHANNEL_A)
  1936. dp_aux_irq_handler(dev);
  1937. if (de_iir & DE_GSE)
  1938. intel_opregion_asle_intr(dev);
  1939. if (de_iir & DE_POISON)
  1940. DRM_ERROR("Poison interrupt\n");
  1941. for_each_pipe(pipe) {
  1942. if (de_iir & DE_PIPE_VBLANK(pipe))
  1943. intel_pipe_handle_vblank(dev, pipe);
  1944. if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
  1945. if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
  1946. DRM_ERROR("Pipe %c FIFO underrun\n",
  1947. pipe_name(pipe));
  1948. if (de_iir & DE_PIPE_CRC_DONE(pipe))
  1949. i9xx_pipe_crc_irq_handler(dev, pipe);
  1950. /* plane/pipes map 1:1 on ilk+ */
  1951. if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
  1952. intel_prepare_page_flip(dev, pipe);
  1953. intel_finish_page_flip_plane(dev, pipe);
  1954. }
  1955. }
  1956. /* check event from PCH */
  1957. if (de_iir & DE_PCH_EVENT) {
  1958. u32 pch_iir = I915_READ(SDEIIR);
  1959. if (HAS_PCH_CPT(dev))
  1960. cpt_irq_handler(dev, pch_iir);
  1961. else
  1962. ibx_irq_handler(dev, pch_iir);
  1963. /* should clear PCH hotplug event before clear CPU irq */
  1964. I915_WRITE(SDEIIR, pch_iir);
  1965. }
  1966. if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
  1967. ironlake_rps_change_irq_handler(dev);
  1968. }
  1969. static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
  1970. {
  1971. struct drm_i915_private *dev_priv = dev->dev_private;
  1972. enum pipe pipe;
  1973. if (de_iir & DE_ERR_INT_IVB)
  1974. ivb_err_int_handler(dev);
  1975. if (de_iir & DE_AUX_CHANNEL_A_IVB)
  1976. dp_aux_irq_handler(dev);
  1977. if (de_iir & DE_GSE_IVB)
  1978. intel_opregion_asle_intr(dev);
  1979. for_each_pipe(pipe) {
  1980. if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
  1981. intel_pipe_handle_vblank(dev, pipe);
  1982. /* plane/pipes map 1:1 on ilk+ */
  1983. if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
  1984. intel_prepare_page_flip(dev, pipe);
  1985. intel_finish_page_flip_plane(dev, pipe);
  1986. }
  1987. }
  1988. /* check event from PCH */
  1989. if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
  1990. u32 pch_iir = I915_READ(SDEIIR);
  1991. cpt_irq_handler(dev, pch_iir);
  1992. /* clear PCH hotplug event before clear CPU irq */
  1993. I915_WRITE(SDEIIR, pch_iir);
  1994. }
  1995. }
  1996. /*
  1997. * To handle irqs with the minimum potential races with fresh interrupts, we:
  1998. * 1 - Disable Master Interrupt Control.
  1999. * 2 - Find the source(s) of the interrupt.
  2000. * 3 - Clear the Interrupt Identity bits (IIR).
  2001. * 4 - Process the interrupt(s) that had bits set in the IIRs.
  2002. * 5 - Re-enable Master Interrupt Control.
  2003. */
  2004. static irqreturn_t ironlake_irq_handler(int irq, void *arg)
  2005. {
  2006. struct drm_device *dev = arg;
  2007. struct drm_i915_private *dev_priv = dev->dev_private;
  2008. u32 de_iir, gt_iir, de_ier, sde_ier = 0;
  2009. irqreturn_t ret = IRQ_NONE;
  2010. /* We get interrupts on unclaimed registers, so check for this before we
  2011. * do any I915_{READ,WRITE}. */
  2012. intel_uncore_check_errors(dev);
  2013. /* disable master interrupt before clearing iir */
  2014. de_ier = I915_READ(DEIER);
  2015. I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
  2016. POSTING_READ(DEIER);
  2017. /* Disable south interrupts. We'll only write to SDEIIR once, so further
  2018. * interrupts will will be stored on its back queue, and then we'll be
  2019. * able to process them after we restore SDEIER (as soon as we restore
  2020. * it, we'll get an interrupt if SDEIIR still has something to process
  2021. * due to its back queue). */
  2022. if (!HAS_PCH_NOP(dev)) {
  2023. sde_ier = I915_READ(SDEIER);
  2024. I915_WRITE(SDEIER, 0);
  2025. POSTING_READ(SDEIER);
  2026. }
  2027. /* Find, clear, then process each source of interrupt */
  2028. gt_iir = I915_READ(GTIIR);
  2029. if (gt_iir) {
  2030. I915_WRITE(GTIIR, gt_iir);
  2031. ret = IRQ_HANDLED;
  2032. if (INTEL_INFO(dev)->gen >= 6)
  2033. snb_gt_irq_handler(dev, dev_priv, gt_iir);
  2034. else
  2035. ilk_gt_irq_handler(dev, dev_priv, gt_iir);
  2036. }
  2037. de_iir = I915_READ(DEIIR);
  2038. if (de_iir) {
  2039. I915_WRITE(DEIIR, de_iir);
  2040. ret = IRQ_HANDLED;
  2041. if (INTEL_INFO(dev)->gen >= 7)
  2042. ivb_display_irq_handler(dev, de_iir);
  2043. else
  2044. ilk_display_irq_handler(dev, de_iir);
  2045. }
  2046. if (INTEL_INFO(dev)->gen >= 6) {
  2047. u32 pm_iir = I915_READ(GEN6_PMIIR);
  2048. if (pm_iir) {
  2049. I915_WRITE(GEN6_PMIIR, pm_iir);
  2050. ret = IRQ_HANDLED;
  2051. gen6_rps_irq_handler(dev_priv, pm_iir);
  2052. }
  2053. }
  2054. I915_WRITE(DEIER, de_ier);
  2055. POSTING_READ(DEIER);
  2056. if (!HAS_PCH_NOP(dev)) {
  2057. I915_WRITE(SDEIER, sde_ier);
  2058. POSTING_READ(SDEIER);
  2059. }
  2060. return ret;
  2061. }
  2062. static irqreturn_t gen8_irq_handler(int irq, void *arg)
  2063. {
  2064. struct drm_device *dev = arg;
  2065. struct drm_i915_private *dev_priv = dev->dev_private;
  2066. u32 master_ctl;
  2067. irqreturn_t ret = IRQ_NONE;
  2068. uint32_t tmp = 0;
  2069. enum pipe pipe;
  2070. master_ctl = I915_READ(GEN8_MASTER_IRQ);
  2071. master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
  2072. if (!master_ctl)
  2073. return IRQ_NONE;
  2074. I915_WRITE(GEN8_MASTER_IRQ, 0);
  2075. POSTING_READ(GEN8_MASTER_IRQ);
  2076. /* Find, clear, then process each source of interrupt */
  2077. ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
  2078. if (master_ctl & GEN8_DE_MISC_IRQ) {
  2079. tmp = I915_READ(GEN8_DE_MISC_IIR);
  2080. if (tmp) {
  2081. I915_WRITE(GEN8_DE_MISC_IIR, tmp);
  2082. ret = IRQ_HANDLED;
  2083. if (tmp & GEN8_DE_MISC_GSE)
  2084. intel_opregion_asle_intr(dev);
  2085. else
  2086. DRM_ERROR("Unexpected DE Misc interrupt\n");
  2087. }
  2088. else
  2089. DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
  2090. }
  2091. if (master_ctl & GEN8_DE_PORT_IRQ) {
  2092. tmp = I915_READ(GEN8_DE_PORT_IIR);
  2093. if (tmp) {
  2094. I915_WRITE(GEN8_DE_PORT_IIR, tmp);
  2095. ret = IRQ_HANDLED;
  2096. if (tmp & GEN8_AUX_CHANNEL_A)
  2097. dp_aux_irq_handler(dev);
  2098. else
  2099. DRM_ERROR("Unexpected DE Port interrupt\n");
  2100. }
  2101. else
  2102. DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
  2103. }
  2104. for_each_pipe(pipe) {
  2105. uint32_t pipe_iir;
  2106. if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
  2107. continue;
  2108. pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
  2109. if (pipe_iir) {
  2110. ret = IRQ_HANDLED;
  2111. I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
  2112. if (pipe_iir & GEN8_PIPE_VBLANK)
  2113. intel_pipe_handle_vblank(dev, pipe);
  2114. if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
  2115. intel_prepare_page_flip(dev, pipe);
  2116. intel_finish_page_flip_plane(dev, pipe);
  2117. }
  2118. if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
  2119. hsw_pipe_crc_irq_handler(dev, pipe);
  2120. if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
  2121. if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
  2122. false))
  2123. DRM_ERROR("Pipe %c FIFO underrun\n",
  2124. pipe_name(pipe));
  2125. }
  2126. if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
  2127. DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
  2128. pipe_name(pipe),
  2129. pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
  2130. }
  2131. } else
  2132. DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
  2133. }
  2134. if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
  2135. /*
  2136. * FIXME(BDW): Assume for now that the new interrupt handling
  2137. * scheme also closed the SDE interrupt handling race we've seen
  2138. * on older pch-split platforms. But this needs testing.
  2139. */
  2140. u32 pch_iir = I915_READ(SDEIIR);
  2141. if (pch_iir) {
  2142. I915_WRITE(SDEIIR, pch_iir);
  2143. ret = IRQ_HANDLED;
  2144. cpt_irq_handler(dev, pch_iir);
  2145. } else
  2146. DRM_ERROR("The master control interrupt lied (SDE)!\n");
  2147. }
  2148. I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
  2149. POSTING_READ(GEN8_MASTER_IRQ);
  2150. return ret;
  2151. }
  2152. static void i915_error_wake_up(struct drm_i915_private *dev_priv,
  2153. bool reset_completed)
  2154. {
  2155. struct intel_engine_cs *ring;
  2156. int i;
  2157. /*
  2158. * Notify all waiters for GPU completion events that reset state has
  2159. * been changed, and that they need to restart their wait after
  2160. * checking for potential errors (and bail out to drop locks if there is
  2161. * a gpu reset pending so that i915_error_work_func can acquire them).
  2162. */
  2163. /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
  2164. for_each_ring(ring, dev_priv, i)
  2165. wake_up_all(&ring->irq_queue);
  2166. /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
  2167. wake_up_all(&dev_priv->pending_flip_queue);
  2168. /*
  2169. * Signal tasks blocked in i915_gem_wait_for_error that the pending
  2170. * reset state is cleared.
  2171. */
  2172. if (reset_completed)
  2173. wake_up_all(&dev_priv->gpu_error.reset_queue);
  2174. }
  2175. /**
  2176. * i915_error_work_func - do process context error handling work
  2177. * @work: work struct
  2178. *
  2179. * Fire an error uevent so userspace can see that a hang or error
  2180. * was detected.
  2181. */
  2182. static void i915_error_work_func(struct work_struct *work)
  2183. {
  2184. struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
  2185. work);
  2186. struct drm_i915_private *dev_priv =
  2187. container_of(error, struct drm_i915_private, gpu_error);
  2188. struct drm_device *dev = dev_priv->dev;
  2189. char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
  2190. char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
  2191. char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
  2192. int ret;
  2193. kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
  2194. /*
  2195. * Note that there's only one work item which does gpu resets, so we
  2196. * need not worry about concurrent gpu resets potentially incrementing
  2197. * error->reset_counter twice. We only need to take care of another
  2198. * racing irq/hangcheck declaring the gpu dead for a second time. A
  2199. * quick check for that is good enough: schedule_work ensures the
  2200. * correct ordering between hang detection and this work item, and since
  2201. * the reset in-progress bit is only ever set by code outside of this
  2202. * work we don't need to worry about any other races.
  2203. */
  2204. if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
  2205. DRM_DEBUG_DRIVER("resetting chip\n");
  2206. kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
  2207. reset_event);
  2208. /*
  2209. * In most cases it's guaranteed that we get here with an RPM
  2210. * reference held, for example because there is a pending GPU
  2211. * request that won't finish until the reset is done. This
  2212. * isn't the case at least when we get here by doing a
  2213. * simulated reset via debugs, so get an RPM reference.
  2214. */
  2215. intel_runtime_pm_get(dev_priv);
  2216. /*
  2217. * All state reset _must_ be completed before we update the
  2218. * reset counter, for otherwise waiters might miss the reset
  2219. * pending state and not properly drop locks, resulting in
  2220. * deadlocks with the reset work.
  2221. */
  2222. ret = i915_reset(dev);
  2223. intel_display_handle_reset(dev);
  2224. intel_runtime_pm_put(dev_priv);
  2225. if (ret == 0) {
  2226. /*
  2227. * After all the gem state is reset, increment the reset
  2228. * counter and wake up everyone waiting for the reset to
  2229. * complete.
  2230. *
  2231. * Since unlock operations are a one-sided barrier only,
  2232. * we need to insert a barrier here to order any seqno
  2233. * updates before
  2234. * the counter increment.
  2235. */
  2236. smp_mb__before_atomic();
  2237. atomic_inc(&dev_priv->gpu_error.reset_counter);
  2238. kobject_uevent_env(&dev->primary->kdev->kobj,
  2239. KOBJ_CHANGE, reset_done_event);
  2240. } else {
  2241. atomic_set_mask(I915_WEDGED, &error->reset_counter);
  2242. }
  2243. /*
  2244. * Note: The wake_up also serves as a memory barrier so that
  2245. * waiters see the update value of the reset counter atomic_t.
  2246. */
  2247. i915_error_wake_up(dev_priv, true);
  2248. }
  2249. }
  2250. static void i915_report_and_clear_eir(struct drm_device *dev)
  2251. {
  2252. struct drm_i915_private *dev_priv = dev->dev_private;
  2253. uint32_t instdone[I915_NUM_INSTDONE_REG];
  2254. u32 eir = I915_READ(EIR);
  2255. int pipe, i;
  2256. if (!eir)
  2257. return;
  2258. pr_err("render error detected, EIR: 0x%08x\n", eir);
  2259. i915_get_extra_instdone(dev, instdone);
  2260. if (IS_G4X(dev)) {
  2261. if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
  2262. u32 ipeir = I915_READ(IPEIR_I965);
  2263. pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
  2264. pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
  2265. for (i = 0; i < ARRAY_SIZE(instdone); i++)
  2266. pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
  2267. pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
  2268. pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
  2269. I915_WRITE(IPEIR_I965, ipeir);
  2270. POSTING_READ(IPEIR_I965);
  2271. }
  2272. if (eir & GM45_ERROR_PAGE_TABLE) {
  2273. u32 pgtbl_err = I915_READ(PGTBL_ER);
  2274. pr_err("page table error\n");
  2275. pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
  2276. I915_WRITE(PGTBL_ER, pgtbl_err);
  2277. POSTING_READ(PGTBL_ER);
  2278. }
  2279. }
  2280. if (!IS_GEN2(dev)) {
  2281. if (eir & I915_ERROR_PAGE_TABLE) {
  2282. u32 pgtbl_err = I915_READ(PGTBL_ER);
  2283. pr_err("page table error\n");
  2284. pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
  2285. I915_WRITE(PGTBL_ER, pgtbl_err);
  2286. POSTING_READ(PGTBL_ER);
  2287. }
  2288. }
  2289. if (eir & I915_ERROR_MEMORY_REFRESH) {
  2290. pr_err("memory refresh error:\n");
  2291. for_each_pipe(pipe)
  2292. pr_err("pipe %c stat: 0x%08x\n",
  2293. pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
  2294. /* pipestat has already been acked */
  2295. }
  2296. if (eir & I915_ERROR_INSTRUCTION) {
  2297. pr_err("instruction error\n");
  2298. pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
  2299. for (i = 0; i < ARRAY_SIZE(instdone); i++)
  2300. pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
  2301. if (INTEL_INFO(dev)->gen < 4) {
  2302. u32 ipeir = I915_READ(IPEIR);
  2303. pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
  2304. pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
  2305. pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
  2306. I915_WRITE(IPEIR, ipeir);
  2307. POSTING_READ(IPEIR);
  2308. } else {
  2309. u32 ipeir = I915_READ(IPEIR_I965);
  2310. pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
  2311. pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
  2312. pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
  2313. pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
  2314. I915_WRITE(IPEIR_I965, ipeir);
  2315. POSTING_READ(IPEIR_I965);
  2316. }
  2317. }
  2318. I915_WRITE(EIR, eir);
  2319. POSTING_READ(EIR);
  2320. eir = I915_READ(EIR);
  2321. if (eir) {
  2322. /*
  2323. * some errors might have become stuck,
  2324. * mask them.
  2325. */
  2326. DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
  2327. I915_WRITE(EMR, I915_READ(EMR) | eir);
  2328. I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  2329. }
  2330. }
  2331. /**
  2332. * i915_handle_error - handle an error interrupt
  2333. * @dev: drm device
  2334. *
  2335. * Do some basic checking of regsiter state at error interrupt time and
  2336. * dump it to the syslog. Also call i915_capture_error_state() to make
  2337. * sure we get a record and make it available in debugfs. Fire a uevent
  2338. * so userspace knows something bad happened (should trigger collection
  2339. * of a ring dump etc.).
  2340. */
  2341. void i915_handle_error(struct drm_device *dev, bool wedged,
  2342. const char *fmt, ...)
  2343. {
  2344. struct drm_i915_private *dev_priv = dev->dev_private;
  2345. va_list args;
  2346. char error_msg[80];
  2347. va_start(args, fmt);
  2348. vscnprintf(error_msg, sizeof(error_msg), fmt, args);
  2349. va_end(args);
  2350. i915_capture_error_state(dev, wedged, error_msg);
  2351. i915_report_and_clear_eir(dev);
  2352. if (wedged) {
  2353. atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
  2354. &dev_priv->gpu_error.reset_counter);
  2355. /*
  2356. * Wakeup waiting processes so that the reset work function
  2357. * i915_error_work_func doesn't deadlock trying to grab various
  2358. * locks. By bumping the reset counter first, the woken
  2359. * processes will see a reset in progress and back off,
  2360. * releasing their locks and then wait for the reset completion.
  2361. * We must do this for _all_ gpu waiters that might hold locks
  2362. * that the reset work needs to acquire.
  2363. *
  2364. * Note: The wake_up serves as the required memory barrier to
  2365. * ensure that the waiters see the updated value of the reset
  2366. * counter atomic_t.
  2367. */
  2368. i915_error_wake_up(dev_priv, false);
  2369. }
  2370. /*
  2371. * Our reset work can grab modeset locks (since it needs to reset the
  2372. * state of outstanding pagelips). Hence it must not be run on our own
  2373. * dev-priv->wq work queue for otherwise the flush_work in the pageflip
  2374. * code will deadlock.
  2375. */
  2376. schedule_work(&dev_priv->gpu_error.work);
  2377. }
  2378. static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
  2379. {
  2380. struct drm_i915_private *dev_priv = dev->dev_private;
  2381. struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
  2382. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  2383. struct drm_i915_gem_object *obj;
  2384. struct intel_unpin_work *work;
  2385. unsigned long flags;
  2386. bool stall_detected;
  2387. /* Ignore early vblank irqs */
  2388. if (intel_crtc == NULL)
  2389. return;
  2390. spin_lock_irqsave(&dev->event_lock, flags);
  2391. work = intel_crtc->unpin_work;
  2392. if (work == NULL ||
  2393. atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
  2394. !work->enable_stall_check) {
  2395. /* Either the pending flip IRQ arrived, or we're too early. Don't check */
  2396. spin_unlock_irqrestore(&dev->event_lock, flags);
  2397. return;
  2398. }
  2399. /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
  2400. obj = work->pending_flip_obj;
  2401. if (INTEL_INFO(dev)->gen >= 4) {
  2402. int dspsurf = DSPSURF(intel_crtc->plane);
  2403. stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
  2404. i915_gem_obj_ggtt_offset(obj);
  2405. } else {
  2406. int dspaddr = DSPADDR(intel_crtc->plane);
  2407. stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
  2408. crtc->y * crtc->primary->fb->pitches[0] +
  2409. crtc->x * crtc->primary->fb->bits_per_pixel/8);
  2410. }
  2411. spin_unlock_irqrestore(&dev->event_lock, flags);
  2412. if (stall_detected) {
  2413. DRM_DEBUG_DRIVER("Pageflip stall detected\n");
  2414. intel_prepare_page_flip(dev, intel_crtc->plane);
  2415. }
  2416. }
  2417. /* Called from drm generic code, passed 'crtc' which
  2418. * we use as a pipe index
  2419. */
  2420. static int i915_enable_vblank(struct drm_device *dev, int pipe)
  2421. {
  2422. struct drm_i915_private *dev_priv = dev->dev_private;
  2423. unsigned long irqflags;
  2424. if (!i915_pipe_enabled(dev, pipe))
  2425. return -EINVAL;
  2426. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2427. if (INTEL_INFO(dev)->gen >= 4)
  2428. i915_enable_pipestat(dev_priv, pipe,
  2429. PIPE_START_VBLANK_INTERRUPT_STATUS);
  2430. else
  2431. i915_enable_pipestat(dev_priv, pipe,
  2432. PIPE_VBLANK_INTERRUPT_STATUS);
  2433. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2434. return 0;
  2435. }
  2436. static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
  2437. {
  2438. struct drm_i915_private *dev_priv = dev->dev_private;
  2439. unsigned long irqflags;
  2440. uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
  2441. DE_PIPE_VBLANK(pipe);
  2442. if (!i915_pipe_enabled(dev, pipe))
  2443. return -EINVAL;
  2444. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2445. ironlake_enable_display_irq(dev_priv, bit);
  2446. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2447. return 0;
  2448. }
  2449. static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
  2450. {
  2451. struct drm_i915_private *dev_priv = dev->dev_private;
  2452. unsigned long irqflags;
  2453. if (!i915_pipe_enabled(dev, pipe))
  2454. return -EINVAL;
  2455. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2456. i915_enable_pipestat(dev_priv, pipe,
  2457. PIPE_START_VBLANK_INTERRUPT_STATUS);
  2458. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2459. return 0;
  2460. }
  2461. static int gen8_enable_vblank(struct drm_device *dev, int pipe)
  2462. {
  2463. struct drm_i915_private *dev_priv = dev->dev_private;
  2464. unsigned long irqflags;
  2465. if (!i915_pipe_enabled(dev, pipe))
  2466. return -EINVAL;
  2467. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2468. dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
  2469. I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
  2470. POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
  2471. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2472. return 0;
  2473. }
  2474. /* Called from drm generic code, passed 'crtc' which
  2475. * we use as a pipe index
  2476. */
  2477. static void i915_disable_vblank(struct drm_device *dev, int pipe)
  2478. {
  2479. struct drm_i915_private *dev_priv = dev->dev_private;
  2480. unsigned long irqflags;
  2481. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2482. i915_disable_pipestat(dev_priv, pipe,
  2483. PIPE_VBLANK_INTERRUPT_STATUS |
  2484. PIPE_START_VBLANK_INTERRUPT_STATUS);
  2485. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2486. }
  2487. static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
  2488. {
  2489. struct drm_i915_private *dev_priv = dev->dev_private;
  2490. unsigned long irqflags;
  2491. uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
  2492. DE_PIPE_VBLANK(pipe);
  2493. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2494. ironlake_disable_display_irq(dev_priv, bit);
  2495. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2496. }
  2497. static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
  2498. {
  2499. struct drm_i915_private *dev_priv = dev->dev_private;
  2500. unsigned long irqflags;
  2501. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2502. i915_disable_pipestat(dev_priv, pipe,
  2503. PIPE_START_VBLANK_INTERRUPT_STATUS);
  2504. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2505. }
  2506. static void gen8_disable_vblank(struct drm_device *dev, int pipe)
  2507. {
  2508. struct drm_i915_private *dev_priv = dev->dev_private;
  2509. unsigned long irqflags;
  2510. if (!i915_pipe_enabled(dev, pipe))
  2511. return;
  2512. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2513. dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
  2514. I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
  2515. POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
  2516. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2517. }
  2518. static u32
  2519. ring_last_seqno(struct intel_engine_cs *ring)
  2520. {
  2521. return list_entry(ring->request_list.prev,
  2522. struct drm_i915_gem_request, list)->seqno;
  2523. }
  2524. static bool
  2525. ring_idle(struct intel_engine_cs *ring, u32 seqno)
  2526. {
  2527. return (list_empty(&ring->request_list) ||
  2528. i915_seqno_passed(seqno, ring_last_seqno(ring)));
  2529. }
  2530. static bool
  2531. ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
  2532. {
  2533. if (INTEL_INFO(dev)->gen >= 8) {
  2534. return (ipehr >> 23) == 0x1c;
  2535. } else {
  2536. ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
  2537. return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
  2538. MI_SEMAPHORE_REGISTER);
  2539. }
  2540. }
  2541. static struct intel_engine_cs *
  2542. semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
  2543. {
  2544. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  2545. struct intel_engine_cs *signaller;
  2546. int i;
  2547. if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
  2548. for_each_ring(signaller, dev_priv, i) {
  2549. if (ring == signaller)
  2550. continue;
  2551. if (offset == signaller->semaphore.signal_ggtt[ring->id])
  2552. return signaller;
  2553. }
  2554. } else {
  2555. u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
  2556. for_each_ring(signaller, dev_priv, i) {
  2557. if(ring == signaller)
  2558. continue;
  2559. if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
  2560. return signaller;
  2561. }
  2562. }
  2563. DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
  2564. ring->id, ipehr, offset);
  2565. return NULL;
  2566. }
  2567. static struct intel_engine_cs *
  2568. semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
  2569. {
  2570. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  2571. u32 cmd, ipehr, head;
  2572. u64 offset = 0;
  2573. int i, backwards;
  2574. ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
  2575. if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
  2576. return NULL;
  2577. /*
  2578. * HEAD is likely pointing to the dword after the actual command,
  2579. * so scan backwards until we find the MBOX. But limit it to just 3
  2580. * or 4 dwords depending on the semaphore wait command size.
  2581. * Note that we don't care about ACTHD here since that might
  2582. * point at at batch, and semaphores are always emitted into the
  2583. * ringbuffer itself.
  2584. */
  2585. head = I915_READ_HEAD(ring) & HEAD_ADDR;
  2586. backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
  2587. for (i = backwards; i; --i) {
  2588. /*
  2589. * Be paranoid and presume the hw has gone off into the wild -
  2590. * our ring is smaller than what the hardware (and hence
  2591. * HEAD_ADDR) allows. Also handles wrap-around.
  2592. */
  2593. head &= ring->buffer->size - 1;
  2594. /* This here seems to blow up */
  2595. cmd = ioread32(ring->buffer->virtual_start + head);
  2596. if (cmd == ipehr)
  2597. break;
  2598. head -= 4;
  2599. }
  2600. if (!i)
  2601. return NULL;
  2602. *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
  2603. if (INTEL_INFO(ring->dev)->gen >= 8) {
  2604. offset = ioread32(ring->buffer->virtual_start + head + 12);
  2605. offset <<= 32;
  2606. offset = ioread32(ring->buffer->virtual_start + head + 8);
  2607. }
  2608. return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
  2609. }
  2610. static int semaphore_passed(struct intel_engine_cs *ring)
  2611. {
  2612. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  2613. struct intel_engine_cs *signaller;
  2614. u32 seqno;
  2615. ring->hangcheck.deadlock++;
  2616. signaller = semaphore_waits_for(ring, &seqno);
  2617. if (signaller == NULL)
  2618. return -1;
  2619. /* Prevent pathological recursion due to driver bugs */
  2620. if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
  2621. return -1;
  2622. if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
  2623. return 1;
  2624. /* cursory check for an unkickable deadlock */
  2625. if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
  2626. semaphore_passed(signaller) < 0)
  2627. return -1;
  2628. return 0;
  2629. }
  2630. static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
  2631. {
  2632. struct intel_engine_cs *ring;
  2633. int i;
  2634. for_each_ring(ring, dev_priv, i)
  2635. ring->hangcheck.deadlock = 0;
  2636. }
  2637. static enum intel_ring_hangcheck_action
  2638. ring_stuck(struct intel_engine_cs *ring, u64 acthd)
  2639. {
  2640. struct drm_device *dev = ring->dev;
  2641. struct drm_i915_private *dev_priv = dev->dev_private;
  2642. u32 tmp;
  2643. if (acthd != ring->hangcheck.acthd) {
  2644. if (acthd > ring->hangcheck.max_acthd) {
  2645. ring->hangcheck.max_acthd = acthd;
  2646. return HANGCHECK_ACTIVE;
  2647. }
  2648. return HANGCHECK_ACTIVE_LOOP;
  2649. }
  2650. if (IS_GEN2(dev))
  2651. return HANGCHECK_HUNG;
  2652. /* Is the chip hanging on a WAIT_FOR_EVENT?
  2653. * If so we can simply poke the RB_WAIT bit
  2654. * and break the hang. This should work on
  2655. * all but the second generation chipsets.
  2656. */
  2657. tmp = I915_READ_CTL(ring);
  2658. if (tmp & RING_WAIT) {
  2659. i915_handle_error(dev, false,
  2660. "Kicking stuck wait on %s",
  2661. ring->name);
  2662. I915_WRITE_CTL(ring, tmp);
  2663. return HANGCHECK_KICK;
  2664. }
  2665. if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
  2666. switch (semaphore_passed(ring)) {
  2667. default:
  2668. return HANGCHECK_HUNG;
  2669. case 1:
  2670. i915_handle_error(dev, false,
  2671. "Kicking stuck semaphore on %s",
  2672. ring->name);
  2673. I915_WRITE_CTL(ring, tmp);
  2674. return HANGCHECK_KICK;
  2675. case 0:
  2676. return HANGCHECK_WAIT;
  2677. }
  2678. }
  2679. return HANGCHECK_HUNG;
  2680. }
  2681. /**
  2682. * This is called when the chip hasn't reported back with completed
  2683. * batchbuffers in a long time. We keep track per ring seqno progress and
  2684. * if there are no progress, hangcheck score for that ring is increased.
  2685. * Further, acthd is inspected to see if the ring is stuck. On stuck case
  2686. * we kick the ring. If we see no progress on three subsequent calls
  2687. * we assume chip is wedged and try to fix it by resetting the chip.
  2688. */
  2689. static void i915_hangcheck_elapsed(unsigned long data)
  2690. {
  2691. struct drm_device *dev = (struct drm_device *)data;
  2692. struct drm_i915_private *dev_priv = dev->dev_private;
  2693. struct intel_engine_cs *ring;
  2694. int i;
  2695. int busy_count = 0, rings_hung = 0;
  2696. bool stuck[I915_NUM_RINGS] = { 0 };
  2697. #define BUSY 1
  2698. #define KICK 5
  2699. #define HUNG 20
  2700. if (!i915.enable_hangcheck)
  2701. return;
  2702. for_each_ring(ring, dev_priv, i) {
  2703. u64 acthd;
  2704. u32 seqno;
  2705. bool busy = true;
  2706. semaphore_clear_deadlocks(dev_priv);
  2707. seqno = ring->get_seqno(ring, false);
  2708. acthd = intel_ring_get_active_head(ring);
  2709. if (ring->hangcheck.seqno == seqno) {
  2710. if (ring_idle(ring, seqno)) {
  2711. ring->hangcheck.action = HANGCHECK_IDLE;
  2712. if (waitqueue_active(&ring->irq_queue)) {
  2713. /* Issue a wake-up to catch stuck h/w. */
  2714. if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
  2715. if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
  2716. DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
  2717. ring->name);
  2718. else
  2719. DRM_INFO("Fake missed irq on %s\n",
  2720. ring->name);
  2721. wake_up_all(&ring->irq_queue);
  2722. }
  2723. /* Safeguard against driver failure */
  2724. ring->hangcheck.score += BUSY;
  2725. } else
  2726. busy = false;
  2727. } else {
  2728. /* We always increment the hangcheck score
  2729. * if the ring is busy and still processing
  2730. * the same request, so that no single request
  2731. * can run indefinitely (such as a chain of
  2732. * batches). The only time we do not increment
  2733. * the hangcheck score on this ring, if this
  2734. * ring is in a legitimate wait for another
  2735. * ring. In that case the waiting ring is a
  2736. * victim and we want to be sure we catch the
  2737. * right culprit. Then every time we do kick
  2738. * the ring, add a small increment to the
  2739. * score so that we can catch a batch that is
  2740. * being repeatedly kicked and so responsible
  2741. * for stalling the machine.
  2742. */
  2743. ring->hangcheck.action = ring_stuck(ring,
  2744. acthd);
  2745. switch (ring->hangcheck.action) {
  2746. case HANGCHECK_IDLE:
  2747. case HANGCHECK_WAIT:
  2748. case HANGCHECK_ACTIVE:
  2749. break;
  2750. case HANGCHECK_ACTIVE_LOOP:
  2751. ring->hangcheck.score += BUSY;
  2752. break;
  2753. case HANGCHECK_KICK:
  2754. ring->hangcheck.score += KICK;
  2755. break;
  2756. case HANGCHECK_HUNG:
  2757. ring->hangcheck.score += HUNG;
  2758. stuck[i] = true;
  2759. break;
  2760. }
  2761. }
  2762. } else {
  2763. ring->hangcheck.action = HANGCHECK_ACTIVE;
  2764. /* Gradually reduce the count so that we catch DoS
  2765. * attempts across multiple batches.
  2766. */
  2767. if (ring->hangcheck.score > 0)
  2768. ring->hangcheck.score--;
  2769. ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
  2770. }
  2771. ring->hangcheck.seqno = seqno;
  2772. ring->hangcheck.acthd = acthd;
  2773. busy_count += busy;
  2774. }
  2775. for_each_ring(ring, dev_priv, i) {
  2776. if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
  2777. DRM_INFO("%s on %s\n",
  2778. stuck[i] ? "stuck" : "no progress",
  2779. ring->name);
  2780. rings_hung++;
  2781. }
  2782. }
  2783. if (rings_hung)
  2784. return i915_handle_error(dev, true, "Ring hung");
  2785. if (busy_count)
  2786. /* Reset timer case chip hangs without another request
  2787. * being added */
  2788. i915_queue_hangcheck(dev);
  2789. }
  2790. void i915_queue_hangcheck(struct drm_device *dev)
  2791. {
  2792. struct drm_i915_private *dev_priv = dev->dev_private;
  2793. if (!i915.enable_hangcheck)
  2794. return;
  2795. mod_timer(&dev_priv->gpu_error.hangcheck_timer,
  2796. round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
  2797. }
  2798. static void ibx_irq_reset(struct drm_device *dev)
  2799. {
  2800. struct drm_i915_private *dev_priv = dev->dev_private;
  2801. if (HAS_PCH_NOP(dev))
  2802. return;
  2803. GEN5_IRQ_RESET(SDE);
  2804. if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
  2805. I915_WRITE(SERR_INT, 0xffffffff);
  2806. }
  2807. /*
  2808. * SDEIER is also touched by the interrupt handler to work around missed PCH
  2809. * interrupts. Hence we can't update it after the interrupt handler is enabled -
  2810. * instead we unconditionally enable all PCH interrupt sources here, but then
  2811. * only unmask them as needed with SDEIMR.
  2812. *
  2813. * This function needs to be called before interrupts are enabled.
  2814. */
  2815. static void ibx_irq_pre_postinstall(struct drm_device *dev)
  2816. {
  2817. struct drm_i915_private *dev_priv = dev->dev_private;
  2818. if (HAS_PCH_NOP(dev))
  2819. return;
  2820. WARN_ON(I915_READ(SDEIER) != 0);
  2821. I915_WRITE(SDEIER, 0xffffffff);
  2822. POSTING_READ(SDEIER);
  2823. }
  2824. static void gen5_gt_irq_reset(struct drm_device *dev)
  2825. {
  2826. struct drm_i915_private *dev_priv = dev->dev_private;
  2827. GEN5_IRQ_RESET(GT);
  2828. if (INTEL_INFO(dev)->gen >= 6)
  2829. GEN5_IRQ_RESET(GEN6_PM);
  2830. }
  2831. /* drm_dma.h hooks
  2832. */
  2833. static void ironlake_irq_reset(struct drm_device *dev)
  2834. {
  2835. struct drm_i915_private *dev_priv = dev->dev_private;
  2836. I915_WRITE(HWSTAM, 0xffffffff);
  2837. GEN5_IRQ_RESET(DE);
  2838. if (IS_GEN7(dev))
  2839. I915_WRITE(GEN7_ERR_INT, 0xffffffff);
  2840. gen5_gt_irq_reset(dev);
  2841. ibx_irq_reset(dev);
  2842. }
  2843. static void valleyview_irq_preinstall(struct drm_device *dev)
  2844. {
  2845. struct drm_i915_private *dev_priv = dev->dev_private;
  2846. int pipe;
  2847. /* VLV magic */
  2848. I915_WRITE(VLV_IMR, 0);
  2849. I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
  2850. I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
  2851. I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
  2852. /* and GT */
  2853. I915_WRITE(GTIIR, I915_READ(GTIIR));
  2854. I915_WRITE(GTIIR, I915_READ(GTIIR));
  2855. gen5_gt_irq_reset(dev);
  2856. I915_WRITE(DPINVGTT, 0xff);
  2857. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2858. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2859. for_each_pipe(pipe)
  2860. I915_WRITE(PIPESTAT(pipe), 0xffff);
  2861. I915_WRITE(VLV_IIR, 0xffffffff);
  2862. I915_WRITE(VLV_IMR, 0xffffffff);
  2863. I915_WRITE(VLV_IER, 0x0);
  2864. POSTING_READ(VLV_IER);
  2865. }
  2866. static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
  2867. {
  2868. GEN8_IRQ_RESET_NDX(GT, 0);
  2869. GEN8_IRQ_RESET_NDX(GT, 1);
  2870. GEN8_IRQ_RESET_NDX(GT, 2);
  2871. GEN8_IRQ_RESET_NDX(GT, 3);
  2872. }
  2873. static void gen8_irq_reset(struct drm_device *dev)
  2874. {
  2875. struct drm_i915_private *dev_priv = dev->dev_private;
  2876. int pipe;
  2877. I915_WRITE(GEN8_MASTER_IRQ, 0);
  2878. POSTING_READ(GEN8_MASTER_IRQ);
  2879. gen8_gt_irq_reset(dev_priv);
  2880. for_each_pipe(pipe)
  2881. if (intel_display_power_enabled(dev_priv,
  2882. POWER_DOMAIN_PIPE(pipe)))
  2883. GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
  2884. GEN5_IRQ_RESET(GEN8_DE_PORT_);
  2885. GEN5_IRQ_RESET(GEN8_DE_MISC_);
  2886. GEN5_IRQ_RESET(GEN8_PCU_);
  2887. ibx_irq_reset(dev);
  2888. }
  2889. void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
  2890. {
  2891. unsigned long irqflags;
  2892. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  2893. GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
  2894. ~dev_priv->de_irq_mask[PIPE_B]);
  2895. GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
  2896. ~dev_priv->de_irq_mask[PIPE_C]);
  2897. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  2898. }
  2899. static void cherryview_irq_preinstall(struct drm_device *dev)
  2900. {
  2901. struct drm_i915_private *dev_priv = dev->dev_private;
  2902. int pipe;
  2903. I915_WRITE(GEN8_MASTER_IRQ, 0);
  2904. POSTING_READ(GEN8_MASTER_IRQ);
  2905. gen8_gt_irq_reset(dev_priv);
  2906. GEN5_IRQ_RESET(GEN8_PCU_);
  2907. POSTING_READ(GEN8_PCU_IIR);
  2908. I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
  2909. I915_WRITE(PORT_HOTPLUG_EN, 0);
  2910. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  2911. for_each_pipe(pipe)
  2912. I915_WRITE(PIPESTAT(pipe), 0xffff);
  2913. I915_WRITE(VLV_IMR, 0xffffffff);
  2914. I915_WRITE(VLV_IER, 0x0);
  2915. I915_WRITE(VLV_IIR, 0xffffffff);
  2916. POSTING_READ(VLV_IIR);
  2917. }
  2918. static void ibx_hpd_irq_setup(struct drm_device *dev)
  2919. {
  2920. struct drm_i915_private *dev_priv = dev->dev_private;
  2921. struct intel_encoder *intel_encoder;
  2922. u32 hotplug_irqs, hotplug, enabled_irqs = 0;
  2923. if (HAS_PCH_IBX(dev)) {
  2924. hotplug_irqs = SDE_HOTPLUG_MASK;
  2925. for_each_intel_encoder(dev, intel_encoder)
  2926. if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
  2927. enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
  2928. } else {
  2929. hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
  2930. for_each_intel_encoder(dev, intel_encoder)
  2931. if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
  2932. enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
  2933. }
  2934. ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
  2935. /*
  2936. * Enable digital hotplug on the PCH, and configure the DP short pulse
  2937. * duration to 2ms (which is the minimum in the Display Port spec)
  2938. *
  2939. * This register is the same on all known PCH chips.
  2940. */
  2941. hotplug = I915_READ(PCH_PORT_HOTPLUG);
  2942. hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
  2943. hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
  2944. hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
  2945. hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
  2946. I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
  2947. }
  2948. static void ibx_irq_postinstall(struct drm_device *dev)
  2949. {
  2950. struct drm_i915_private *dev_priv = dev->dev_private;
  2951. u32 mask;
  2952. if (HAS_PCH_NOP(dev))
  2953. return;
  2954. if (HAS_PCH_IBX(dev))
  2955. mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
  2956. else
  2957. mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
  2958. GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
  2959. I915_WRITE(SDEIMR, ~mask);
  2960. }
  2961. static void gen5_gt_irq_postinstall(struct drm_device *dev)
  2962. {
  2963. struct drm_i915_private *dev_priv = dev->dev_private;
  2964. u32 pm_irqs, gt_irqs;
  2965. pm_irqs = gt_irqs = 0;
  2966. dev_priv->gt_irq_mask = ~0;
  2967. if (HAS_L3_DPF(dev)) {
  2968. /* L3 parity interrupt is always unmasked. */
  2969. dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
  2970. gt_irqs |= GT_PARITY_ERROR(dev);
  2971. }
  2972. gt_irqs |= GT_RENDER_USER_INTERRUPT;
  2973. if (IS_GEN5(dev)) {
  2974. gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
  2975. ILK_BSD_USER_INTERRUPT;
  2976. } else {
  2977. gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
  2978. }
  2979. GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
  2980. if (INTEL_INFO(dev)->gen >= 6) {
  2981. pm_irqs |= dev_priv->pm_rps_events;
  2982. if (HAS_VEBOX(dev))
  2983. pm_irqs |= PM_VEBOX_USER_INTERRUPT;
  2984. dev_priv->pm_irq_mask = 0xffffffff;
  2985. GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
  2986. }
  2987. }
  2988. static int ironlake_irq_postinstall(struct drm_device *dev)
  2989. {
  2990. unsigned long irqflags;
  2991. struct drm_i915_private *dev_priv = dev->dev_private;
  2992. u32 display_mask, extra_mask;
  2993. if (INTEL_INFO(dev)->gen >= 7) {
  2994. display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
  2995. DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
  2996. DE_PLANEB_FLIP_DONE_IVB |
  2997. DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
  2998. extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
  2999. DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
  3000. } else {
  3001. display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
  3002. DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
  3003. DE_AUX_CHANNEL_A |
  3004. DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
  3005. DE_POISON);
  3006. extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
  3007. DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
  3008. }
  3009. dev_priv->irq_mask = ~display_mask;
  3010. I915_WRITE(HWSTAM, 0xeffe);
  3011. ibx_irq_pre_postinstall(dev);
  3012. GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
  3013. gen5_gt_irq_postinstall(dev);
  3014. ibx_irq_postinstall(dev);
  3015. if (IS_IRONLAKE_M(dev)) {
  3016. /* Enable PCU event interrupts
  3017. *
  3018. * spinlocking not required here for correctness since interrupt
  3019. * setup is guaranteed to run in single-threaded context. But we
  3020. * need it to make the assert_spin_locked happy. */
  3021. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  3022. ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
  3023. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  3024. }
  3025. return 0;
  3026. }
  3027. static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
  3028. {
  3029. u32 pipestat_mask;
  3030. u32 iir_mask;
  3031. pipestat_mask = PIPESTAT_INT_STATUS_MASK |
  3032. PIPE_FIFO_UNDERRUN_STATUS;
  3033. I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
  3034. I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
  3035. POSTING_READ(PIPESTAT(PIPE_A));
  3036. pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
  3037. PIPE_CRC_DONE_INTERRUPT_STATUS;
  3038. i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
  3039. PIPE_GMBUS_INTERRUPT_STATUS);
  3040. i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
  3041. iir_mask = I915_DISPLAY_PORT_INTERRUPT |
  3042. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3043. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
  3044. dev_priv->irq_mask &= ~iir_mask;
  3045. I915_WRITE(VLV_IIR, iir_mask);
  3046. I915_WRITE(VLV_IIR, iir_mask);
  3047. I915_WRITE(VLV_IMR, dev_priv->irq_mask);
  3048. I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
  3049. POSTING_READ(VLV_IER);
  3050. }
  3051. static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
  3052. {
  3053. u32 pipestat_mask;
  3054. u32 iir_mask;
  3055. iir_mask = I915_DISPLAY_PORT_INTERRUPT |
  3056. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3057. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
  3058. dev_priv->irq_mask |= iir_mask;
  3059. I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
  3060. I915_WRITE(VLV_IMR, dev_priv->irq_mask);
  3061. I915_WRITE(VLV_IIR, iir_mask);
  3062. I915_WRITE(VLV_IIR, iir_mask);
  3063. POSTING_READ(VLV_IIR);
  3064. pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
  3065. PIPE_CRC_DONE_INTERRUPT_STATUS;
  3066. i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
  3067. PIPE_GMBUS_INTERRUPT_STATUS);
  3068. i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
  3069. pipestat_mask = PIPESTAT_INT_STATUS_MASK |
  3070. PIPE_FIFO_UNDERRUN_STATUS;
  3071. I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
  3072. I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
  3073. POSTING_READ(PIPESTAT(PIPE_A));
  3074. }
  3075. void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
  3076. {
  3077. assert_spin_locked(&dev_priv->irq_lock);
  3078. if (dev_priv->display_irqs_enabled)
  3079. return;
  3080. dev_priv->display_irqs_enabled = true;
  3081. if (dev_priv->dev->irq_enabled)
  3082. valleyview_display_irqs_install(dev_priv);
  3083. }
  3084. void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
  3085. {
  3086. assert_spin_locked(&dev_priv->irq_lock);
  3087. if (!dev_priv->display_irqs_enabled)
  3088. return;
  3089. dev_priv->display_irqs_enabled = false;
  3090. if (dev_priv->dev->irq_enabled)
  3091. valleyview_display_irqs_uninstall(dev_priv);
  3092. }
  3093. static int valleyview_irq_postinstall(struct drm_device *dev)
  3094. {
  3095. struct drm_i915_private *dev_priv = dev->dev_private;
  3096. unsigned long irqflags;
  3097. dev_priv->irq_mask = ~0;
  3098. I915_WRITE(PORT_HOTPLUG_EN, 0);
  3099. POSTING_READ(PORT_HOTPLUG_EN);
  3100. I915_WRITE(VLV_IMR, dev_priv->irq_mask);
  3101. I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
  3102. I915_WRITE(VLV_IIR, 0xffffffff);
  3103. POSTING_READ(VLV_IER);
  3104. /* Interrupt setup is already guaranteed to be single-threaded, this is
  3105. * just to make the assert_spin_locked check happy. */
  3106. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  3107. if (dev_priv->display_irqs_enabled)
  3108. valleyview_display_irqs_install(dev_priv);
  3109. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  3110. I915_WRITE(VLV_IIR, 0xffffffff);
  3111. I915_WRITE(VLV_IIR, 0xffffffff);
  3112. gen5_gt_irq_postinstall(dev);
  3113. /* ack & enable invalid PTE error interrupts */
  3114. #if 0 /* FIXME: add support to irq handler for checking these bits */
  3115. I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
  3116. I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
  3117. #endif
  3118. I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
  3119. return 0;
  3120. }
  3121. static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
  3122. {
  3123. int i;
  3124. /* These are interrupts we'll toggle with the ring mask register */
  3125. uint32_t gt_interrupts[] = {
  3126. GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
  3127. GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
  3128. GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
  3129. GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
  3130. GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
  3131. 0,
  3132. GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
  3133. };
  3134. for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
  3135. GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]);
  3136. dev_priv->pm_irq_mask = 0xffffffff;
  3137. }
  3138. static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
  3139. {
  3140. struct drm_device *dev = dev_priv->dev;
  3141. uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
  3142. GEN8_PIPE_CDCLK_CRC_DONE |
  3143. GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
  3144. uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
  3145. GEN8_PIPE_FIFO_UNDERRUN;
  3146. int pipe;
  3147. dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
  3148. dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
  3149. dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
  3150. for_each_pipe(pipe)
  3151. if (intel_display_power_enabled(dev_priv,
  3152. POWER_DOMAIN_PIPE(pipe)))
  3153. GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
  3154. dev_priv->de_irq_mask[pipe],
  3155. de_pipe_enables);
  3156. GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
  3157. }
  3158. static int gen8_irq_postinstall(struct drm_device *dev)
  3159. {
  3160. struct drm_i915_private *dev_priv = dev->dev_private;
  3161. ibx_irq_pre_postinstall(dev);
  3162. gen8_gt_irq_postinstall(dev_priv);
  3163. gen8_de_irq_postinstall(dev_priv);
  3164. ibx_irq_postinstall(dev);
  3165. I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
  3166. POSTING_READ(GEN8_MASTER_IRQ);
  3167. return 0;
  3168. }
  3169. static int cherryview_irq_postinstall(struct drm_device *dev)
  3170. {
  3171. struct drm_i915_private *dev_priv = dev->dev_private;
  3172. u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
  3173. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3174. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  3175. I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
  3176. u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
  3177. PIPE_CRC_DONE_INTERRUPT_STATUS;
  3178. unsigned long irqflags;
  3179. int pipe;
  3180. /*
  3181. * Leave vblank interrupts masked initially. enable/disable will
  3182. * toggle them based on usage.
  3183. */
  3184. dev_priv->irq_mask = ~enable_mask;
  3185. for_each_pipe(pipe)
  3186. I915_WRITE(PIPESTAT(pipe), 0xffff);
  3187. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  3188. i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
  3189. for_each_pipe(pipe)
  3190. i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
  3191. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  3192. I915_WRITE(VLV_IIR, 0xffffffff);
  3193. I915_WRITE(VLV_IMR, dev_priv->irq_mask);
  3194. I915_WRITE(VLV_IER, enable_mask);
  3195. gen8_gt_irq_postinstall(dev_priv);
  3196. I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
  3197. POSTING_READ(GEN8_MASTER_IRQ);
  3198. return 0;
  3199. }
  3200. static void gen8_irq_uninstall(struct drm_device *dev)
  3201. {
  3202. struct drm_i915_private *dev_priv = dev->dev_private;
  3203. if (!dev_priv)
  3204. return;
  3205. gen8_irq_reset(dev);
  3206. }
  3207. static void valleyview_irq_uninstall(struct drm_device *dev)
  3208. {
  3209. struct drm_i915_private *dev_priv = dev->dev_private;
  3210. unsigned long irqflags;
  3211. int pipe;
  3212. if (!dev_priv)
  3213. return;
  3214. I915_WRITE(VLV_MASTER_IER, 0);
  3215. for_each_pipe(pipe)
  3216. I915_WRITE(PIPESTAT(pipe), 0xffff);
  3217. I915_WRITE(HWSTAM, 0xffffffff);
  3218. I915_WRITE(PORT_HOTPLUG_EN, 0);
  3219. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  3220. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  3221. if (dev_priv->display_irqs_enabled)
  3222. valleyview_display_irqs_uninstall(dev_priv);
  3223. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  3224. dev_priv->irq_mask = 0;
  3225. I915_WRITE(VLV_IIR, 0xffffffff);
  3226. I915_WRITE(VLV_IMR, 0xffffffff);
  3227. I915_WRITE(VLV_IER, 0x0);
  3228. POSTING_READ(VLV_IER);
  3229. }
  3230. static void cherryview_irq_uninstall(struct drm_device *dev)
  3231. {
  3232. struct drm_i915_private *dev_priv = dev->dev_private;
  3233. int pipe;
  3234. if (!dev_priv)
  3235. return;
  3236. I915_WRITE(GEN8_MASTER_IRQ, 0);
  3237. POSTING_READ(GEN8_MASTER_IRQ);
  3238. #define GEN8_IRQ_FINI_NDX(type, which) \
  3239. do { \
  3240. I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
  3241. I915_WRITE(GEN8_##type##_IER(which), 0); \
  3242. I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
  3243. POSTING_READ(GEN8_##type##_IIR(which)); \
  3244. I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
  3245. } while (0)
  3246. #define GEN8_IRQ_FINI(type) \
  3247. do { \
  3248. I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
  3249. I915_WRITE(GEN8_##type##_IER, 0); \
  3250. I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
  3251. POSTING_READ(GEN8_##type##_IIR); \
  3252. I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
  3253. } while (0)
  3254. GEN8_IRQ_FINI_NDX(GT, 0);
  3255. GEN8_IRQ_FINI_NDX(GT, 1);
  3256. GEN8_IRQ_FINI_NDX(GT, 2);
  3257. GEN8_IRQ_FINI_NDX(GT, 3);
  3258. GEN8_IRQ_FINI(PCU);
  3259. #undef GEN8_IRQ_FINI
  3260. #undef GEN8_IRQ_FINI_NDX
  3261. I915_WRITE(PORT_HOTPLUG_EN, 0);
  3262. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  3263. for_each_pipe(pipe)
  3264. I915_WRITE(PIPESTAT(pipe), 0xffff);
  3265. I915_WRITE(VLV_IMR, 0xffffffff);
  3266. I915_WRITE(VLV_IER, 0x0);
  3267. I915_WRITE(VLV_IIR, 0xffffffff);
  3268. POSTING_READ(VLV_IIR);
  3269. }
  3270. static void ironlake_irq_uninstall(struct drm_device *dev)
  3271. {
  3272. struct drm_i915_private *dev_priv = dev->dev_private;
  3273. if (!dev_priv)
  3274. return;
  3275. ironlake_irq_reset(dev);
  3276. }
  3277. static void i8xx_irq_preinstall(struct drm_device * dev)
  3278. {
  3279. struct drm_i915_private *dev_priv = dev->dev_private;
  3280. int pipe;
  3281. for_each_pipe(pipe)
  3282. I915_WRITE(PIPESTAT(pipe), 0);
  3283. I915_WRITE16(IMR, 0xffff);
  3284. I915_WRITE16(IER, 0x0);
  3285. POSTING_READ16(IER);
  3286. }
  3287. static int i8xx_irq_postinstall(struct drm_device *dev)
  3288. {
  3289. struct drm_i915_private *dev_priv = dev->dev_private;
  3290. unsigned long irqflags;
  3291. I915_WRITE16(EMR,
  3292. ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  3293. /* Unmask the interrupts that we always want on. */
  3294. dev_priv->irq_mask =
  3295. ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3296. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  3297. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  3298. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  3299. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  3300. I915_WRITE16(IMR, dev_priv->irq_mask);
  3301. I915_WRITE16(IER,
  3302. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3303. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  3304. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
  3305. I915_USER_INTERRUPT);
  3306. POSTING_READ16(IER);
  3307. /* Interrupt setup is already guaranteed to be single-threaded, this is
  3308. * just to make the assert_spin_locked check happy. */
  3309. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  3310. i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
  3311. i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
  3312. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  3313. return 0;
  3314. }
  3315. /*
  3316. * Returns true when a page flip has completed.
  3317. */
  3318. static bool i8xx_handle_vblank(struct drm_device *dev,
  3319. int plane, int pipe, u32 iir)
  3320. {
  3321. struct drm_i915_private *dev_priv = dev->dev_private;
  3322. u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
  3323. if (!intel_pipe_handle_vblank(dev, pipe))
  3324. return false;
  3325. if ((iir & flip_pending) == 0)
  3326. return false;
  3327. intel_prepare_page_flip(dev, plane);
  3328. /* We detect FlipDone by looking for the change in PendingFlip from '1'
  3329. * to '0' on the following vblank, i.e. IIR has the Pendingflip
  3330. * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
  3331. * the flip is completed (no longer pending). Since this doesn't raise
  3332. * an interrupt per se, we watch for the change at vblank.
  3333. */
  3334. if (I915_READ16(ISR) & flip_pending)
  3335. return false;
  3336. intel_finish_page_flip(dev, pipe);
  3337. return true;
  3338. }
  3339. static irqreturn_t i8xx_irq_handler(int irq, void *arg)
  3340. {
  3341. struct drm_device *dev = arg;
  3342. struct drm_i915_private *dev_priv = dev->dev_private;
  3343. u16 iir, new_iir;
  3344. u32 pipe_stats[2];
  3345. unsigned long irqflags;
  3346. int pipe;
  3347. u16 flip_mask =
  3348. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  3349. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  3350. iir = I915_READ16(IIR);
  3351. if (iir == 0)
  3352. return IRQ_NONE;
  3353. while (iir & ~flip_mask) {
  3354. /* Can't rely on pipestat interrupt bit in iir as it might
  3355. * have been cleared after the pipestat interrupt was received.
  3356. * It doesn't set the bit in iir again, but it still produces
  3357. * interrupts (for non-MSI).
  3358. */
  3359. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  3360. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  3361. i915_handle_error(dev, false,
  3362. "Command parser error, iir 0x%08x",
  3363. iir);
  3364. for_each_pipe(pipe) {
  3365. int reg = PIPESTAT(pipe);
  3366. pipe_stats[pipe] = I915_READ(reg);
  3367. /*
  3368. * Clear the PIPE*STAT regs before the IIR
  3369. */
  3370. if (pipe_stats[pipe] & 0x8000ffff)
  3371. I915_WRITE(reg, pipe_stats[pipe]);
  3372. }
  3373. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  3374. I915_WRITE16(IIR, iir & ~flip_mask);
  3375. new_iir = I915_READ16(IIR); /* Flush posted writes */
  3376. i915_update_dri1_breadcrumb(dev);
  3377. if (iir & I915_USER_INTERRUPT)
  3378. notify_ring(dev, &dev_priv->ring[RCS]);
  3379. for_each_pipe(pipe) {
  3380. int plane = pipe;
  3381. if (HAS_FBC(dev))
  3382. plane = !plane;
  3383. if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
  3384. i8xx_handle_vblank(dev, plane, pipe, iir))
  3385. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
  3386. if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
  3387. i9xx_pipe_crc_irq_handler(dev, pipe);
  3388. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
  3389. intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
  3390. DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
  3391. }
  3392. iir = new_iir;
  3393. }
  3394. return IRQ_HANDLED;
  3395. }
  3396. static void i8xx_irq_uninstall(struct drm_device * dev)
  3397. {
  3398. struct drm_i915_private *dev_priv = dev->dev_private;
  3399. int pipe;
  3400. for_each_pipe(pipe) {
  3401. /* Clear enable bits; then clear status bits */
  3402. I915_WRITE(PIPESTAT(pipe), 0);
  3403. I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  3404. }
  3405. I915_WRITE16(IMR, 0xffff);
  3406. I915_WRITE16(IER, 0x0);
  3407. I915_WRITE16(IIR, I915_READ16(IIR));
  3408. }
  3409. static void i915_irq_preinstall(struct drm_device * dev)
  3410. {
  3411. struct drm_i915_private *dev_priv = dev->dev_private;
  3412. int pipe;
  3413. if (I915_HAS_HOTPLUG(dev)) {
  3414. I915_WRITE(PORT_HOTPLUG_EN, 0);
  3415. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  3416. }
  3417. I915_WRITE16(HWSTAM, 0xeffe);
  3418. for_each_pipe(pipe)
  3419. I915_WRITE(PIPESTAT(pipe), 0);
  3420. I915_WRITE(IMR, 0xffffffff);
  3421. I915_WRITE(IER, 0x0);
  3422. POSTING_READ(IER);
  3423. }
  3424. static int i915_irq_postinstall(struct drm_device *dev)
  3425. {
  3426. struct drm_i915_private *dev_priv = dev->dev_private;
  3427. u32 enable_mask;
  3428. unsigned long irqflags;
  3429. I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
  3430. /* Unmask the interrupts that we always want on. */
  3431. dev_priv->irq_mask =
  3432. ~(I915_ASLE_INTERRUPT |
  3433. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3434. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  3435. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  3436. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  3437. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  3438. enable_mask =
  3439. I915_ASLE_INTERRUPT |
  3440. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3441. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  3442. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
  3443. I915_USER_INTERRUPT;
  3444. if (I915_HAS_HOTPLUG(dev)) {
  3445. I915_WRITE(PORT_HOTPLUG_EN, 0);
  3446. POSTING_READ(PORT_HOTPLUG_EN);
  3447. /* Enable in IER... */
  3448. enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
  3449. /* and unmask in IMR */
  3450. dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
  3451. }
  3452. I915_WRITE(IMR, dev_priv->irq_mask);
  3453. I915_WRITE(IER, enable_mask);
  3454. POSTING_READ(IER);
  3455. i915_enable_asle_pipestat(dev);
  3456. /* Interrupt setup is already guaranteed to be single-threaded, this is
  3457. * just to make the assert_spin_locked check happy. */
  3458. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  3459. i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
  3460. i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
  3461. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  3462. return 0;
  3463. }
  3464. /*
  3465. * Returns true when a page flip has completed.
  3466. */
  3467. static bool i915_handle_vblank(struct drm_device *dev,
  3468. int plane, int pipe, u32 iir)
  3469. {
  3470. struct drm_i915_private *dev_priv = dev->dev_private;
  3471. u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
  3472. if (!intel_pipe_handle_vblank(dev, pipe))
  3473. return false;
  3474. if ((iir & flip_pending) == 0)
  3475. return false;
  3476. intel_prepare_page_flip(dev, plane);
  3477. /* We detect FlipDone by looking for the change in PendingFlip from '1'
  3478. * to '0' on the following vblank, i.e. IIR has the Pendingflip
  3479. * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
  3480. * the flip is completed (no longer pending). Since this doesn't raise
  3481. * an interrupt per se, we watch for the change at vblank.
  3482. */
  3483. if (I915_READ(ISR) & flip_pending)
  3484. return false;
  3485. intel_finish_page_flip(dev, pipe);
  3486. return true;
  3487. }
  3488. static irqreturn_t i915_irq_handler(int irq, void *arg)
  3489. {
  3490. struct drm_device *dev = arg;
  3491. struct drm_i915_private *dev_priv = dev->dev_private;
  3492. u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
  3493. unsigned long irqflags;
  3494. u32 flip_mask =
  3495. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  3496. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  3497. int pipe, ret = IRQ_NONE;
  3498. iir = I915_READ(IIR);
  3499. do {
  3500. bool irq_received = (iir & ~flip_mask) != 0;
  3501. bool blc_event = false;
  3502. /* Can't rely on pipestat interrupt bit in iir as it might
  3503. * have been cleared after the pipestat interrupt was received.
  3504. * It doesn't set the bit in iir again, but it still produces
  3505. * interrupts (for non-MSI).
  3506. */
  3507. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  3508. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  3509. i915_handle_error(dev, false,
  3510. "Command parser error, iir 0x%08x",
  3511. iir);
  3512. for_each_pipe(pipe) {
  3513. int reg = PIPESTAT(pipe);
  3514. pipe_stats[pipe] = I915_READ(reg);
  3515. /* Clear the PIPE*STAT regs before the IIR */
  3516. if (pipe_stats[pipe] & 0x8000ffff) {
  3517. I915_WRITE(reg, pipe_stats[pipe]);
  3518. irq_received = true;
  3519. }
  3520. }
  3521. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  3522. if (!irq_received)
  3523. break;
  3524. /* Consume port. Then clear IIR or we'll miss events */
  3525. if (I915_HAS_HOTPLUG(dev) &&
  3526. iir & I915_DISPLAY_PORT_INTERRUPT)
  3527. i9xx_hpd_irq_handler(dev);
  3528. I915_WRITE(IIR, iir & ~flip_mask);
  3529. new_iir = I915_READ(IIR); /* Flush posted writes */
  3530. if (iir & I915_USER_INTERRUPT)
  3531. notify_ring(dev, &dev_priv->ring[RCS]);
  3532. for_each_pipe(pipe) {
  3533. int plane = pipe;
  3534. if (HAS_FBC(dev))
  3535. plane = !plane;
  3536. if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
  3537. i915_handle_vblank(dev, plane, pipe, iir))
  3538. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
  3539. if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  3540. blc_event = true;
  3541. if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
  3542. i9xx_pipe_crc_irq_handler(dev, pipe);
  3543. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
  3544. intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
  3545. DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
  3546. }
  3547. if (blc_event || (iir & I915_ASLE_INTERRUPT))
  3548. intel_opregion_asle_intr(dev);
  3549. /* With MSI, interrupts are only generated when iir
  3550. * transitions from zero to nonzero. If another bit got
  3551. * set while we were handling the existing iir bits, then
  3552. * we would never get another interrupt.
  3553. *
  3554. * This is fine on non-MSI as well, as if we hit this path
  3555. * we avoid exiting the interrupt handler only to generate
  3556. * another one.
  3557. *
  3558. * Note that for MSI this could cause a stray interrupt report
  3559. * if an interrupt landed in the time between writing IIR and
  3560. * the posting read. This should be rare enough to never
  3561. * trigger the 99% of 100,000 interrupts test for disabling
  3562. * stray interrupts.
  3563. */
  3564. ret = IRQ_HANDLED;
  3565. iir = new_iir;
  3566. } while (iir & ~flip_mask);
  3567. i915_update_dri1_breadcrumb(dev);
  3568. return ret;
  3569. }
  3570. static void i915_irq_uninstall(struct drm_device * dev)
  3571. {
  3572. struct drm_i915_private *dev_priv = dev->dev_private;
  3573. int pipe;
  3574. if (I915_HAS_HOTPLUG(dev)) {
  3575. I915_WRITE(PORT_HOTPLUG_EN, 0);
  3576. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  3577. }
  3578. I915_WRITE16(HWSTAM, 0xffff);
  3579. for_each_pipe(pipe) {
  3580. /* Clear enable bits; then clear status bits */
  3581. I915_WRITE(PIPESTAT(pipe), 0);
  3582. I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
  3583. }
  3584. I915_WRITE(IMR, 0xffffffff);
  3585. I915_WRITE(IER, 0x0);
  3586. I915_WRITE(IIR, I915_READ(IIR));
  3587. }
  3588. static void i965_irq_preinstall(struct drm_device * dev)
  3589. {
  3590. struct drm_i915_private *dev_priv = dev->dev_private;
  3591. int pipe;
  3592. I915_WRITE(PORT_HOTPLUG_EN, 0);
  3593. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  3594. I915_WRITE(HWSTAM, 0xeffe);
  3595. for_each_pipe(pipe)
  3596. I915_WRITE(PIPESTAT(pipe), 0);
  3597. I915_WRITE(IMR, 0xffffffff);
  3598. I915_WRITE(IER, 0x0);
  3599. POSTING_READ(IER);
  3600. }
  3601. static int i965_irq_postinstall(struct drm_device *dev)
  3602. {
  3603. struct drm_i915_private *dev_priv = dev->dev_private;
  3604. u32 enable_mask;
  3605. u32 error_mask;
  3606. unsigned long irqflags;
  3607. /* Unmask the interrupts that we always want on. */
  3608. dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
  3609. I915_DISPLAY_PORT_INTERRUPT |
  3610. I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
  3611. I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
  3612. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  3613. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
  3614. I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
  3615. enable_mask = ~dev_priv->irq_mask;
  3616. enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  3617. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
  3618. enable_mask |= I915_USER_INTERRUPT;
  3619. if (IS_G4X(dev))
  3620. enable_mask |= I915_BSD_USER_INTERRUPT;
  3621. /* Interrupt setup is already guaranteed to be single-threaded, this is
  3622. * just to make the assert_spin_locked check happy. */
  3623. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  3624. i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
  3625. i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
  3626. i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
  3627. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  3628. /*
  3629. * Enable some error detection, note the instruction error mask
  3630. * bit is reserved, so we leave it masked.
  3631. */
  3632. if (IS_G4X(dev)) {
  3633. error_mask = ~(GM45_ERROR_PAGE_TABLE |
  3634. GM45_ERROR_MEM_PRIV |
  3635. GM45_ERROR_CP_PRIV |
  3636. I915_ERROR_MEMORY_REFRESH);
  3637. } else {
  3638. error_mask = ~(I915_ERROR_PAGE_TABLE |
  3639. I915_ERROR_MEMORY_REFRESH);
  3640. }
  3641. I915_WRITE(EMR, error_mask);
  3642. I915_WRITE(IMR, dev_priv->irq_mask);
  3643. I915_WRITE(IER, enable_mask);
  3644. POSTING_READ(IER);
  3645. I915_WRITE(PORT_HOTPLUG_EN, 0);
  3646. POSTING_READ(PORT_HOTPLUG_EN);
  3647. i915_enable_asle_pipestat(dev);
  3648. return 0;
  3649. }
  3650. static void i915_hpd_irq_setup(struct drm_device *dev)
  3651. {
  3652. struct drm_i915_private *dev_priv = dev->dev_private;
  3653. struct intel_encoder *intel_encoder;
  3654. u32 hotplug_en;
  3655. assert_spin_locked(&dev_priv->irq_lock);
  3656. if (I915_HAS_HOTPLUG(dev)) {
  3657. hotplug_en = I915_READ(PORT_HOTPLUG_EN);
  3658. hotplug_en &= ~HOTPLUG_INT_EN_MASK;
  3659. /* Note HDMI and DP share hotplug bits */
  3660. /* enable bits are the same for all generations */
  3661. for_each_intel_encoder(dev, intel_encoder)
  3662. if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
  3663. hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
  3664. /* Programming the CRT detection parameters tends
  3665. to generate a spurious hotplug event about three
  3666. seconds later. So just do it once.
  3667. */
  3668. if (IS_G4X(dev))
  3669. hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
  3670. hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
  3671. hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
  3672. /* Ignore TV since it's buggy */
  3673. I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
  3674. }
  3675. }
  3676. static irqreturn_t i965_irq_handler(int irq, void *arg)
  3677. {
  3678. struct drm_device *dev = arg;
  3679. struct drm_i915_private *dev_priv = dev->dev_private;
  3680. u32 iir, new_iir;
  3681. u32 pipe_stats[I915_MAX_PIPES];
  3682. unsigned long irqflags;
  3683. int ret = IRQ_NONE, pipe;
  3684. u32 flip_mask =
  3685. I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
  3686. I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  3687. iir = I915_READ(IIR);
  3688. for (;;) {
  3689. bool irq_received = (iir & ~flip_mask) != 0;
  3690. bool blc_event = false;
  3691. /* Can't rely on pipestat interrupt bit in iir as it might
  3692. * have been cleared after the pipestat interrupt was received.
  3693. * It doesn't set the bit in iir again, but it still produces
  3694. * interrupts (for non-MSI).
  3695. */
  3696. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  3697. if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  3698. i915_handle_error(dev, false,
  3699. "Command parser error, iir 0x%08x",
  3700. iir);
  3701. for_each_pipe(pipe) {
  3702. int reg = PIPESTAT(pipe);
  3703. pipe_stats[pipe] = I915_READ(reg);
  3704. /*
  3705. * Clear the PIPE*STAT regs before the IIR
  3706. */
  3707. if (pipe_stats[pipe] & 0x8000ffff) {
  3708. I915_WRITE(reg, pipe_stats[pipe]);
  3709. irq_received = true;
  3710. }
  3711. }
  3712. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  3713. if (!irq_received)
  3714. break;
  3715. ret = IRQ_HANDLED;
  3716. /* Consume port. Then clear IIR or we'll miss events */
  3717. if (iir & I915_DISPLAY_PORT_INTERRUPT)
  3718. i9xx_hpd_irq_handler(dev);
  3719. I915_WRITE(IIR, iir & ~flip_mask);
  3720. new_iir = I915_READ(IIR); /* Flush posted writes */
  3721. if (iir & I915_USER_INTERRUPT)
  3722. notify_ring(dev, &dev_priv->ring[RCS]);
  3723. if (iir & I915_BSD_USER_INTERRUPT)
  3724. notify_ring(dev, &dev_priv->ring[VCS]);
  3725. for_each_pipe(pipe) {
  3726. if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
  3727. i915_handle_vblank(dev, pipe, pipe, iir))
  3728. flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
  3729. if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
  3730. blc_event = true;
  3731. if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
  3732. i9xx_pipe_crc_irq_handler(dev, pipe);
  3733. if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
  3734. intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
  3735. DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
  3736. }
  3737. if (blc_event || (iir & I915_ASLE_INTERRUPT))
  3738. intel_opregion_asle_intr(dev);
  3739. if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
  3740. gmbus_irq_handler(dev);
  3741. /* With MSI, interrupts are only generated when iir
  3742. * transitions from zero to nonzero. If another bit got
  3743. * set while we were handling the existing iir bits, then
  3744. * we would never get another interrupt.
  3745. *
  3746. * This is fine on non-MSI as well, as if we hit this path
  3747. * we avoid exiting the interrupt handler only to generate
  3748. * another one.
  3749. *
  3750. * Note that for MSI this could cause a stray interrupt report
  3751. * if an interrupt landed in the time between writing IIR and
  3752. * the posting read. This should be rare enough to never
  3753. * trigger the 99% of 100,000 interrupts test for disabling
  3754. * stray interrupts.
  3755. */
  3756. iir = new_iir;
  3757. }
  3758. i915_update_dri1_breadcrumb(dev);
  3759. return ret;
  3760. }
  3761. static void i965_irq_uninstall(struct drm_device * dev)
  3762. {
  3763. struct drm_i915_private *dev_priv = dev->dev_private;
  3764. int pipe;
  3765. if (!dev_priv)
  3766. return;
  3767. I915_WRITE(PORT_HOTPLUG_EN, 0);
  3768. I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  3769. I915_WRITE(HWSTAM, 0xffffffff);
  3770. for_each_pipe(pipe)
  3771. I915_WRITE(PIPESTAT(pipe), 0);
  3772. I915_WRITE(IMR, 0xffffffff);
  3773. I915_WRITE(IER, 0x0);
  3774. for_each_pipe(pipe)
  3775. I915_WRITE(PIPESTAT(pipe),
  3776. I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
  3777. I915_WRITE(IIR, I915_READ(IIR));
  3778. }
  3779. static void intel_hpd_irq_reenable(struct work_struct *work)
  3780. {
  3781. struct drm_i915_private *dev_priv =
  3782. container_of(work, typeof(*dev_priv),
  3783. hotplug_reenable_work.work);
  3784. struct drm_device *dev = dev_priv->dev;
  3785. struct drm_mode_config *mode_config = &dev->mode_config;
  3786. unsigned long irqflags;
  3787. int i;
  3788. intel_runtime_pm_get(dev_priv);
  3789. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  3790. for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
  3791. struct drm_connector *connector;
  3792. if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
  3793. continue;
  3794. dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
  3795. list_for_each_entry(connector, &mode_config->connector_list, head) {
  3796. struct intel_connector *intel_connector = to_intel_connector(connector);
  3797. if (intel_connector->encoder->hpd_pin == i) {
  3798. if (connector->polled != intel_connector->polled)
  3799. DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
  3800. connector->name);
  3801. connector->polled = intel_connector->polled;
  3802. if (!connector->polled)
  3803. connector->polled = DRM_CONNECTOR_POLL_HPD;
  3804. }
  3805. }
  3806. }
  3807. if (dev_priv->display.hpd_irq_setup)
  3808. dev_priv->display.hpd_irq_setup(dev);
  3809. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  3810. intel_runtime_pm_put(dev_priv);
  3811. }
  3812. void intel_irq_init(struct drm_device *dev)
  3813. {
  3814. struct drm_i915_private *dev_priv = dev->dev_private;
  3815. INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
  3816. INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
  3817. INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
  3818. INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
  3819. INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
  3820. /* Let's track the enabled rps events */
  3821. if (IS_VALLEYVIEW(dev))
  3822. /* WaGsvRC0ResidenncyMethod:VLV */
  3823. dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
  3824. else
  3825. dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
  3826. setup_timer(&dev_priv->gpu_error.hangcheck_timer,
  3827. i915_hangcheck_elapsed,
  3828. (unsigned long) dev);
  3829. INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
  3830. intel_hpd_irq_reenable);
  3831. pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
  3832. /* Haven't installed the IRQ handler yet */
  3833. dev_priv->pm._irqs_disabled = true;
  3834. if (IS_GEN2(dev)) {
  3835. dev->max_vblank_count = 0;
  3836. dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
  3837. } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
  3838. dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
  3839. dev->driver->get_vblank_counter = gm45_get_vblank_counter;
  3840. } else {
  3841. dev->driver->get_vblank_counter = i915_get_vblank_counter;
  3842. dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
  3843. }
  3844. if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  3845. dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
  3846. dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
  3847. }
  3848. if (IS_CHERRYVIEW(dev)) {
  3849. dev->driver->irq_handler = cherryview_irq_handler;
  3850. dev->driver->irq_preinstall = cherryview_irq_preinstall;
  3851. dev->driver->irq_postinstall = cherryview_irq_postinstall;
  3852. dev->driver->irq_uninstall = cherryview_irq_uninstall;
  3853. dev->driver->enable_vblank = valleyview_enable_vblank;
  3854. dev->driver->disable_vblank = valleyview_disable_vblank;
  3855. dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  3856. } else if (IS_VALLEYVIEW(dev)) {
  3857. dev->driver->irq_handler = valleyview_irq_handler;
  3858. dev->driver->irq_preinstall = valleyview_irq_preinstall;
  3859. dev->driver->irq_postinstall = valleyview_irq_postinstall;
  3860. dev->driver->irq_uninstall = valleyview_irq_uninstall;
  3861. dev->driver->enable_vblank = valleyview_enable_vblank;
  3862. dev->driver->disable_vblank = valleyview_disable_vblank;
  3863. dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  3864. } else if (IS_GEN8(dev)) {
  3865. dev->driver->irq_handler = gen8_irq_handler;
  3866. dev->driver->irq_preinstall = gen8_irq_reset;
  3867. dev->driver->irq_postinstall = gen8_irq_postinstall;
  3868. dev->driver->irq_uninstall = gen8_irq_uninstall;
  3869. dev->driver->enable_vblank = gen8_enable_vblank;
  3870. dev->driver->disable_vblank = gen8_disable_vblank;
  3871. dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
  3872. } else if (HAS_PCH_SPLIT(dev)) {
  3873. dev->driver->irq_handler = ironlake_irq_handler;
  3874. dev->driver->irq_preinstall = ironlake_irq_reset;
  3875. dev->driver->irq_postinstall = ironlake_irq_postinstall;
  3876. dev->driver->irq_uninstall = ironlake_irq_uninstall;
  3877. dev->driver->enable_vblank = ironlake_enable_vblank;
  3878. dev->driver->disable_vblank = ironlake_disable_vblank;
  3879. dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
  3880. } else {
  3881. if (INTEL_INFO(dev)->gen == 2) {
  3882. dev->driver->irq_preinstall = i8xx_irq_preinstall;
  3883. dev->driver->irq_postinstall = i8xx_irq_postinstall;
  3884. dev->driver->irq_handler = i8xx_irq_handler;
  3885. dev->driver->irq_uninstall = i8xx_irq_uninstall;
  3886. } else if (INTEL_INFO(dev)->gen == 3) {
  3887. dev->driver->irq_preinstall = i915_irq_preinstall;
  3888. dev->driver->irq_postinstall = i915_irq_postinstall;
  3889. dev->driver->irq_uninstall = i915_irq_uninstall;
  3890. dev->driver->irq_handler = i915_irq_handler;
  3891. dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  3892. } else {
  3893. dev->driver->irq_preinstall = i965_irq_preinstall;
  3894. dev->driver->irq_postinstall = i965_irq_postinstall;
  3895. dev->driver->irq_uninstall = i965_irq_uninstall;
  3896. dev->driver->irq_handler = i965_irq_handler;
  3897. dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
  3898. }
  3899. dev->driver->enable_vblank = i915_enable_vblank;
  3900. dev->driver->disable_vblank = i915_disable_vblank;
  3901. }
  3902. }
  3903. void intel_hpd_init(struct drm_device *dev)
  3904. {
  3905. struct drm_i915_private *dev_priv = dev->dev_private;
  3906. struct drm_mode_config *mode_config = &dev->mode_config;
  3907. struct drm_connector *connector;
  3908. unsigned long irqflags;
  3909. int i;
  3910. for (i = 1; i < HPD_NUM_PINS; i++) {
  3911. dev_priv->hpd_stats[i].hpd_cnt = 0;
  3912. dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
  3913. }
  3914. list_for_each_entry(connector, &mode_config->connector_list, head) {
  3915. struct intel_connector *intel_connector = to_intel_connector(connector);
  3916. connector->polled = intel_connector->polled;
  3917. if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
  3918. connector->polled = DRM_CONNECTOR_POLL_HPD;
  3919. if (intel_connector->mst_port)
  3920. connector->polled = DRM_CONNECTOR_POLL_HPD;
  3921. }
  3922. /* Interrupt setup is already guaranteed to be single-threaded, this is
  3923. * just to make the assert_spin_locked checks happy. */
  3924. spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  3925. if (dev_priv->display.hpd_irq_setup)
  3926. dev_priv->display.hpd_irq_setup(dev);
  3927. spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  3928. }
  3929. /* Disable interrupts so we can allow runtime PM. */
  3930. void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
  3931. {
  3932. struct drm_i915_private *dev_priv = dev->dev_private;
  3933. dev->driver->irq_uninstall(dev);
  3934. dev_priv->pm._irqs_disabled = true;
  3935. }
  3936. /* Restore interrupts so we can recover from runtime PM. */
  3937. void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
  3938. {
  3939. struct drm_i915_private *dev_priv = dev->dev_private;
  3940. dev_priv->pm._irqs_disabled = false;
  3941. dev->driver->irq_preinstall(dev);
  3942. dev->driver->irq_postinstall(dev);
  3943. }