i915_debugfs.c 116 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436
  1. /*
  2. * Copyright © 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Keith Packard <keithp@keithp.com>
  26. *
  27. */
  28. #include <linux/seq_file.h>
  29. #include <linux/circ_buf.h>
  30. #include <linux/ctype.h>
  31. #include <linux/debugfs.h>
  32. #include <linux/slab.h>
  33. #include <linux/export.h>
  34. #include <linux/list_sort.h>
  35. #include <asm/msr-index.h>
  36. #include <drm/drmP.h>
  37. #include "intel_drv.h"
  38. #include "intel_ringbuffer.h"
  39. #include <drm/i915_drm.h>
  40. #include "i915_drv.h"
  41. enum {
  42. ACTIVE_LIST,
  43. INACTIVE_LIST,
  44. PINNED_LIST,
  45. };
  46. static const char *yesno(int v)
  47. {
  48. return v ? "yes" : "no";
  49. }
  50. /* As the drm_debugfs_init() routines are called before dev->dev_private is
  51. * allocated we need to hook into the minor for release. */
  52. static int
  53. drm_add_fake_info_node(struct drm_minor *minor,
  54. struct dentry *ent,
  55. const void *key)
  56. {
  57. struct drm_info_node *node;
  58. node = kmalloc(sizeof(*node), GFP_KERNEL);
  59. if (node == NULL) {
  60. debugfs_remove(ent);
  61. return -ENOMEM;
  62. }
  63. node->minor = minor;
  64. node->dent = ent;
  65. node->info_ent = (void *) key;
  66. mutex_lock(&minor->debugfs_lock);
  67. list_add(&node->list, &minor->debugfs_list);
  68. mutex_unlock(&minor->debugfs_lock);
  69. return 0;
  70. }
  71. static int i915_capabilities(struct seq_file *m, void *data)
  72. {
  73. struct drm_info_node *node = m->private;
  74. struct drm_device *dev = node->minor->dev;
  75. const struct intel_device_info *info = INTEL_INFO(dev);
  76. seq_printf(m, "gen: %d\n", info->gen);
  77. seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
  78. #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
  79. #define SEP_SEMICOLON ;
  80. DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
  81. #undef PRINT_FLAG
  82. #undef SEP_SEMICOLON
  83. return 0;
  84. }
  85. static const char *get_pin_flag(struct drm_i915_gem_object *obj)
  86. {
  87. if (i915_gem_obj_is_pinned(obj))
  88. return "p";
  89. else
  90. return " ";
  91. }
  92. static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
  93. {
  94. switch (obj->tiling_mode) {
  95. default:
  96. case I915_TILING_NONE: return " ";
  97. case I915_TILING_X: return "X";
  98. case I915_TILING_Y: return "Y";
  99. }
  100. }
  101. static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
  102. {
  103. return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
  104. }
  105. static void
  106. describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
  107. {
  108. struct i915_vma *vma;
  109. int pin_count = 0;
  110. seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
  111. &obj->base,
  112. get_pin_flag(obj),
  113. get_tiling_flag(obj),
  114. get_global_flag(obj),
  115. obj->base.size / 1024,
  116. obj->base.read_domains,
  117. obj->base.write_domain,
  118. obj->last_read_seqno,
  119. obj->last_write_seqno,
  120. obj->last_fenced_seqno,
  121. i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
  122. obj->dirty ? " dirty" : "",
  123. obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
  124. if (obj->base.name)
  125. seq_printf(m, " (name: %d)", obj->base.name);
  126. list_for_each_entry(vma, &obj->vma_list, vma_link)
  127. if (vma->pin_count > 0)
  128. pin_count++;
  129. seq_printf(m, " (pinned x %d)", pin_count);
  130. if (obj->pin_display)
  131. seq_printf(m, " (display)");
  132. if (obj->fence_reg != I915_FENCE_REG_NONE)
  133. seq_printf(m, " (fence: %d)", obj->fence_reg);
  134. list_for_each_entry(vma, &obj->vma_list, vma_link) {
  135. if (!i915_is_ggtt(vma->vm))
  136. seq_puts(m, " (pp");
  137. else
  138. seq_puts(m, " (g");
  139. seq_printf(m, "gtt offset: %08lx, size: %08lx)",
  140. vma->node.start, vma->node.size);
  141. }
  142. if (obj->stolen)
  143. seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
  144. if (obj->pin_mappable || obj->fault_mappable) {
  145. char s[3], *t = s;
  146. if (obj->pin_mappable)
  147. *t++ = 'p';
  148. if (obj->fault_mappable)
  149. *t++ = 'f';
  150. *t = '\0';
  151. seq_printf(m, " (%s mappable)", s);
  152. }
  153. if (obj->ring != NULL)
  154. seq_printf(m, " (%s)", obj->ring->name);
  155. if (obj->frontbuffer_bits)
  156. seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
  157. }
  158. static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
  159. {
  160. seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
  161. seq_putc(m, ctx->remap_slice ? 'R' : 'r');
  162. seq_putc(m, ' ');
  163. }
  164. static int i915_gem_object_list_info(struct seq_file *m, void *data)
  165. {
  166. struct drm_info_node *node = m->private;
  167. uintptr_t list = (uintptr_t) node->info_ent->data;
  168. struct list_head *head;
  169. struct drm_device *dev = node->minor->dev;
  170. struct drm_i915_private *dev_priv = dev->dev_private;
  171. struct i915_address_space *vm = &dev_priv->gtt.base;
  172. struct i915_vma *vma;
  173. size_t total_obj_size, total_gtt_size;
  174. int count, ret;
  175. ret = mutex_lock_interruptible(&dev->struct_mutex);
  176. if (ret)
  177. return ret;
  178. /* FIXME: the user of this interface might want more than just GGTT */
  179. switch (list) {
  180. case ACTIVE_LIST:
  181. seq_puts(m, "Active:\n");
  182. head = &vm->active_list;
  183. break;
  184. case INACTIVE_LIST:
  185. seq_puts(m, "Inactive:\n");
  186. head = &vm->inactive_list;
  187. break;
  188. default:
  189. mutex_unlock(&dev->struct_mutex);
  190. return -EINVAL;
  191. }
  192. total_obj_size = total_gtt_size = count = 0;
  193. list_for_each_entry(vma, head, mm_list) {
  194. seq_printf(m, " ");
  195. describe_obj(m, vma->obj);
  196. seq_printf(m, "\n");
  197. total_obj_size += vma->obj->base.size;
  198. total_gtt_size += vma->node.size;
  199. count++;
  200. }
  201. mutex_unlock(&dev->struct_mutex);
  202. seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
  203. count, total_obj_size, total_gtt_size);
  204. return 0;
  205. }
  206. static int obj_rank_by_stolen(void *priv,
  207. struct list_head *A, struct list_head *B)
  208. {
  209. struct drm_i915_gem_object *a =
  210. container_of(A, struct drm_i915_gem_object, obj_exec_link);
  211. struct drm_i915_gem_object *b =
  212. container_of(B, struct drm_i915_gem_object, obj_exec_link);
  213. return a->stolen->start - b->stolen->start;
  214. }
  215. static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
  216. {
  217. struct drm_info_node *node = m->private;
  218. struct drm_device *dev = node->minor->dev;
  219. struct drm_i915_private *dev_priv = dev->dev_private;
  220. struct drm_i915_gem_object *obj;
  221. size_t total_obj_size, total_gtt_size;
  222. LIST_HEAD(stolen);
  223. int count, ret;
  224. ret = mutex_lock_interruptible(&dev->struct_mutex);
  225. if (ret)
  226. return ret;
  227. total_obj_size = total_gtt_size = count = 0;
  228. list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
  229. if (obj->stolen == NULL)
  230. continue;
  231. list_add(&obj->obj_exec_link, &stolen);
  232. total_obj_size += obj->base.size;
  233. total_gtt_size += i915_gem_obj_ggtt_size(obj);
  234. count++;
  235. }
  236. list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
  237. if (obj->stolen == NULL)
  238. continue;
  239. list_add(&obj->obj_exec_link, &stolen);
  240. total_obj_size += obj->base.size;
  241. count++;
  242. }
  243. list_sort(NULL, &stolen, obj_rank_by_stolen);
  244. seq_puts(m, "Stolen:\n");
  245. while (!list_empty(&stolen)) {
  246. obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
  247. seq_puts(m, " ");
  248. describe_obj(m, obj);
  249. seq_putc(m, '\n');
  250. list_del_init(&obj->obj_exec_link);
  251. }
  252. mutex_unlock(&dev->struct_mutex);
  253. seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
  254. count, total_obj_size, total_gtt_size);
  255. return 0;
  256. }
  257. #define count_objects(list, member) do { \
  258. list_for_each_entry(obj, list, member) { \
  259. size += i915_gem_obj_ggtt_size(obj); \
  260. ++count; \
  261. if (obj->map_and_fenceable) { \
  262. mappable_size += i915_gem_obj_ggtt_size(obj); \
  263. ++mappable_count; \
  264. } \
  265. } \
  266. } while (0)
  267. struct file_stats {
  268. struct drm_i915_file_private *file_priv;
  269. int count;
  270. size_t total, unbound;
  271. size_t global, shared;
  272. size_t active, inactive;
  273. };
  274. static int per_file_stats(int id, void *ptr, void *data)
  275. {
  276. struct drm_i915_gem_object *obj = ptr;
  277. struct file_stats *stats = data;
  278. struct i915_vma *vma;
  279. stats->count++;
  280. stats->total += obj->base.size;
  281. if (obj->base.name || obj->base.dma_buf)
  282. stats->shared += obj->base.size;
  283. if (USES_FULL_PPGTT(obj->base.dev)) {
  284. list_for_each_entry(vma, &obj->vma_list, vma_link) {
  285. struct i915_hw_ppgtt *ppgtt;
  286. if (!drm_mm_node_allocated(&vma->node))
  287. continue;
  288. if (i915_is_ggtt(vma->vm)) {
  289. stats->global += obj->base.size;
  290. continue;
  291. }
  292. ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
  293. if (ppgtt->file_priv != stats->file_priv)
  294. continue;
  295. if (obj->ring) /* XXX per-vma statistic */
  296. stats->active += obj->base.size;
  297. else
  298. stats->inactive += obj->base.size;
  299. return 0;
  300. }
  301. } else {
  302. if (i915_gem_obj_ggtt_bound(obj)) {
  303. stats->global += obj->base.size;
  304. if (obj->ring)
  305. stats->active += obj->base.size;
  306. else
  307. stats->inactive += obj->base.size;
  308. return 0;
  309. }
  310. }
  311. if (!list_empty(&obj->global_list))
  312. stats->unbound += obj->base.size;
  313. return 0;
  314. }
  315. #define count_vmas(list, member) do { \
  316. list_for_each_entry(vma, list, member) { \
  317. size += i915_gem_obj_ggtt_size(vma->obj); \
  318. ++count; \
  319. if (vma->obj->map_and_fenceable) { \
  320. mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
  321. ++mappable_count; \
  322. } \
  323. } \
  324. } while (0)
  325. static int i915_gem_object_info(struct seq_file *m, void* data)
  326. {
  327. struct drm_info_node *node = m->private;
  328. struct drm_device *dev = node->minor->dev;
  329. struct drm_i915_private *dev_priv = dev->dev_private;
  330. u32 count, mappable_count, purgeable_count;
  331. size_t size, mappable_size, purgeable_size;
  332. struct drm_i915_gem_object *obj;
  333. struct i915_address_space *vm = &dev_priv->gtt.base;
  334. struct drm_file *file;
  335. struct i915_vma *vma;
  336. int ret;
  337. ret = mutex_lock_interruptible(&dev->struct_mutex);
  338. if (ret)
  339. return ret;
  340. seq_printf(m, "%u objects, %zu bytes\n",
  341. dev_priv->mm.object_count,
  342. dev_priv->mm.object_memory);
  343. size = count = mappable_size = mappable_count = 0;
  344. count_objects(&dev_priv->mm.bound_list, global_list);
  345. seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
  346. count, mappable_count, size, mappable_size);
  347. size = count = mappable_size = mappable_count = 0;
  348. count_vmas(&vm->active_list, mm_list);
  349. seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
  350. count, mappable_count, size, mappable_size);
  351. size = count = mappable_size = mappable_count = 0;
  352. count_vmas(&vm->inactive_list, mm_list);
  353. seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
  354. count, mappable_count, size, mappable_size);
  355. size = count = purgeable_size = purgeable_count = 0;
  356. list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
  357. size += obj->base.size, ++count;
  358. if (obj->madv == I915_MADV_DONTNEED)
  359. purgeable_size += obj->base.size, ++purgeable_count;
  360. }
  361. seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
  362. size = count = mappable_size = mappable_count = 0;
  363. list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
  364. if (obj->fault_mappable) {
  365. size += i915_gem_obj_ggtt_size(obj);
  366. ++count;
  367. }
  368. if (obj->pin_mappable) {
  369. mappable_size += i915_gem_obj_ggtt_size(obj);
  370. ++mappable_count;
  371. }
  372. if (obj->madv == I915_MADV_DONTNEED) {
  373. purgeable_size += obj->base.size;
  374. ++purgeable_count;
  375. }
  376. }
  377. seq_printf(m, "%u purgeable objects, %zu bytes\n",
  378. purgeable_count, purgeable_size);
  379. seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
  380. mappable_count, mappable_size);
  381. seq_printf(m, "%u fault mappable objects, %zu bytes\n",
  382. count, size);
  383. seq_printf(m, "%zu [%lu] gtt total\n",
  384. dev_priv->gtt.base.total,
  385. dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
  386. seq_putc(m, '\n');
  387. list_for_each_entry_reverse(file, &dev->filelist, lhead) {
  388. struct file_stats stats;
  389. struct task_struct *task;
  390. memset(&stats, 0, sizeof(stats));
  391. stats.file_priv = file->driver_priv;
  392. spin_lock(&file->table_lock);
  393. idr_for_each(&file->object_idr, per_file_stats, &stats);
  394. spin_unlock(&file->table_lock);
  395. /*
  396. * Although we have a valid reference on file->pid, that does
  397. * not guarantee that the task_struct who called get_pid() is
  398. * still alive (e.g. get_pid(current) => fork() => exit()).
  399. * Therefore, we need to protect this ->comm access using RCU.
  400. */
  401. rcu_read_lock();
  402. task = pid_task(file->pid, PIDTYPE_PID);
  403. seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n",
  404. task ? task->comm : "<unknown>",
  405. stats.count,
  406. stats.total,
  407. stats.active,
  408. stats.inactive,
  409. stats.global,
  410. stats.shared,
  411. stats.unbound);
  412. rcu_read_unlock();
  413. }
  414. mutex_unlock(&dev->struct_mutex);
  415. return 0;
  416. }
  417. static int i915_gem_gtt_info(struct seq_file *m, void *data)
  418. {
  419. struct drm_info_node *node = m->private;
  420. struct drm_device *dev = node->minor->dev;
  421. uintptr_t list = (uintptr_t) node->info_ent->data;
  422. struct drm_i915_private *dev_priv = dev->dev_private;
  423. struct drm_i915_gem_object *obj;
  424. size_t total_obj_size, total_gtt_size;
  425. int count, ret;
  426. ret = mutex_lock_interruptible(&dev->struct_mutex);
  427. if (ret)
  428. return ret;
  429. total_obj_size = total_gtt_size = count = 0;
  430. list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
  431. if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
  432. continue;
  433. seq_puts(m, " ");
  434. describe_obj(m, obj);
  435. seq_putc(m, '\n');
  436. total_obj_size += obj->base.size;
  437. total_gtt_size += i915_gem_obj_ggtt_size(obj);
  438. count++;
  439. }
  440. mutex_unlock(&dev->struct_mutex);
  441. seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
  442. count, total_obj_size, total_gtt_size);
  443. return 0;
  444. }
  445. static int i915_gem_pageflip_info(struct seq_file *m, void *data)
  446. {
  447. struct drm_info_node *node = m->private;
  448. struct drm_device *dev = node->minor->dev;
  449. struct drm_i915_private *dev_priv = dev->dev_private;
  450. struct intel_crtc *crtc;
  451. int ret;
  452. ret = mutex_lock_interruptible(&dev->struct_mutex);
  453. if (ret)
  454. return ret;
  455. for_each_intel_crtc(dev, crtc) {
  456. const char pipe = pipe_name(crtc->pipe);
  457. const char plane = plane_name(crtc->plane);
  458. struct intel_unpin_work *work;
  459. spin_lock_irq(&dev->event_lock);
  460. work = crtc->unpin_work;
  461. if (work == NULL) {
  462. seq_printf(m, "No flip due on pipe %c (plane %c)\n",
  463. pipe, plane);
  464. } else {
  465. u32 addr;
  466. if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
  467. seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
  468. pipe, plane);
  469. } else {
  470. seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
  471. pipe, plane);
  472. }
  473. if (work->flip_queued_ring) {
  474. seq_printf(m, "Flip queued on %s at seqno %u, next seqno %u [current breadcrumb %u], completed? %d\n",
  475. work->flip_queued_ring->name,
  476. work->flip_queued_seqno,
  477. dev_priv->next_seqno,
  478. work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
  479. i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
  480. work->flip_queued_seqno));
  481. } else
  482. seq_printf(m, "Flip not associated with any ring\n");
  483. seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
  484. work->flip_queued_vblank,
  485. work->flip_ready_vblank,
  486. drm_vblank_count(dev, crtc->pipe));
  487. if (work->enable_stall_check)
  488. seq_puts(m, "Stall check enabled, ");
  489. else
  490. seq_puts(m, "Stall check waiting for page flip ioctl, ");
  491. seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
  492. if (INTEL_INFO(dev)->gen >= 4)
  493. addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
  494. else
  495. addr = I915_READ(DSPADDR(crtc->plane));
  496. seq_printf(m, "Current scanout address 0x%08x\n", addr);
  497. if (work->pending_flip_obj) {
  498. seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
  499. seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset);
  500. }
  501. }
  502. spin_unlock_irq(&dev->event_lock);
  503. }
  504. mutex_unlock(&dev->struct_mutex);
  505. return 0;
  506. }
  507. static int i915_gem_request_info(struct seq_file *m, void *data)
  508. {
  509. struct drm_info_node *node = m->private;
  510. struct drm_device *dev = node->minor->dev;
  511. struct drm_i915_private *dev_priv = dev->dev_private;
  512. struct intel_engine_cs *ring;
  513. struct drm_i915_gem_request *gem_request;
  514. int ret, count, i;
  515. ret = mutex_lock_interruptible(&dev->struct_mutex);
  516. if (ret)
  517. return ret;
  518. count = 0;
  519. for_each_ring(ring, dev_priv, i) {
  520. if (list_empty(&ring->request_list))
  521. continue;
  522. seq_printf(m, "%s requests:\n", ring->name);
  523. list_for_each_entry(gem_request,
  524. &ring->request_list,
  525. list) {
  526. seq_printf(m, " %d @ %d\n",
  527. gem_request->seqno,
  528. (int) (jiffies - gem_request->emitted_jiffies));
  529. }
  530. count++;
  531. }
  532. mutex_unlock(&dev->struct_mutex);
  533. if (count == 0)
  534. seq_puts(m, "No requests\n");
  535. return 0;
  536. }
  537. static void i915_ring_seqno_info(struct seq_file *m,
  538. struct intel_engine_cs *ring)
  539. {
  540. if (ring->get_seqno) {
  541. seq_printf(m, "Current sequence (%s): %u\n",
  542. ring->name, ring->get_seqno(ring, false));
  543. }
  544. }
  545. static int i915_gem_seqno_info(struct seq_file *m, void *data)
  546. {
  547. struct drm_info_node *node = m->private;
  548. struct drm_device *dev = node->minor->dev;
  549. struct drm_i915_private *dev_priv = dev->dev_private;
  550. struct intel_engine_cs *ring;
  551. int ret, i;
  552. ret = mutex_lock_interruptible(&dev->struct_mutex);
  553. if (ret)
  554. return ret;
  555. intel_runtime_pm_get(dev_priv);
  556. for_each_ring(ring, dev_priv, i)
  557. i915_ring_seqno_info(m, ring);
  558. intel_runtime_pm_put(dev_priv);
  559. mutex_unlock(&dev->struct_mutex);
  560. return 0;
  561. }
  562. static int i915_interrupt_info(struct seq_file *m, void *data)
  563. {
  564. struct drm_info_node *node = m->private;
  565. struct drm_device *dev = node->minor->dev;
  566. struct drm_i915_private *dev_priv = dev->dev_private;
  567. struct intel_engine_cs *ring;
  568. int ret, i, pipe;
  569. ret = mutex_lock_interruptible(&dev->struct_mutex);
  570. if (ret)
  571. return ret;
  572. intel_runtime_pm_get(dev_priv);
  573. if (IS_CHERRYVIEW(dev)) {
  574. seq_printf(m, "Master Interrupt Control:\t%08x\n",
  575. I915_READ(GEN8_MASTER_IRQ));
  576. seq_printf(m, "Display IER:\t%08x\n",
  577. I915_READ(VLV_IER));
  578. seq_printf(m, "Display IIR:\t%08x\n",
  579. I915_READ(VLV_IIR));
  580. seq_printf(m, "Display IIR_RW:\t%08x\n",
  581. I915_READ(VLV_IIR_RW));
  582. seq_printf(m, "Display IMR:\t%08x\n",
  583. I915_READ(VLV_IMR));
  584. for_each_pipe(dev_priv, pipe)
  585. seq_printf(m, "Pipe %c stat:\t%08x\n",
  586. pipe_name(pipe),
  587. I915_READ(PIPESTAT(pipe)));
  588. seq_printf(m, "Port hotplug:\t%08x\n",
  589. I915_READ(PORT_HOTPLUG_EN));
  590. seq_printf(m, "DPFLIPSTAT:\t%08x\n",
  591. I915_READ(VLV_DPFLIPSTAT));
  592. seq_printf(m, "DPINVGTT:\t%08x\n",
  593. I915_READ(DPINVGTT));
  594. for (i = 0; i < 4; i++) {
  595. seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
  596. i, I915_READ(GEN8_GT_IMR(i)));
  597. seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
  598. i, I915_READ(GEN8_GT_IIR(i)));
  599. seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
  600. i, I915_READ(GEN8_GT_IER(i)));
  601. }
  602. seq_printf(m, "PCU interrupt mask:\t%08x\n",
  603. I915_READ(GEN8_PCU_IMR));
  604. seq_printf(m, "PCU interrupt identity:\t%08x\n",
  605. I915_READ(GEN8_PCU_IIR));
  606. seq_printf(m, "PCU interrupt enable:\t%08x\n",
  607. I915_READ(GEN8_PCU_IER));
  608. } else if (INTEL_INFO(dev)->gen >= 8) {
  609. seq_printf(m, "Master Interrupt Control:\t%08x\n",
  610. I915_READ(GEN8_MASTER_IRQ));
  611. for (i = 0; i < 4; i++) {
  612. seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
  613. i, I915_READ(GEN8_GT_IMR(i)));
  614. seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
  615. i, I915_READ(GEN8_GT_IIR(i)));
  616. seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
  617. i, I915_READ(GEN8_GT_IER(i)));
  618. }
  619. for_each_pipe(dev_priv, pipe) {
  620. if (!intel_display_power_is_enabled(dev_priv,
  621. POWER_DOMAIN_PIPE(pipe))) {
  622. seq_printf(m, "Pipe %c power disabled\n",
  623. pipe_name(pipe));
  624. continue;
  625. }
  626. seq_printf(m, "Pipe %c IMR:\t%08x\n",
  627. pipe_name(pipe),
  628. I915_READ(GEN8_DE_PIPE_IMR(pipe)));
  629. seq_printf(m, "Pipe %c IIR:\t%08x\n",
  630. pipe_name(pipe),
  631. I915_READ(GEN8_DE_PIPE_IIR(pipe)));
  632. seq_printf(m, "Pipe %c IER:\t%08x\n",
  633. pipe_name(pipe),
  634. I915_READ(GEN8_DE_PIPE_IER(pipe)));
  635. }
  636. seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
  637. I915_READ(GEN8_DE_PORT_IMR));
  638. seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
  639. I915_READ(GEN8_DE_PORT_IIR));
  640. seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
  641. I915_READ(GEN8_DE_PORT_IER));
  642. seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
  643. I915_READ(GEN8_DE_MISC_IMR));
  644. seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
  645. I915_READ(GEN8_DE_MISC_IIR));
  646. seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
  647. I915_READ(GEN8_DE_MISC_IER));
  648. seq_printf(m, "PCU interrupt mask:\t%08x\n",
  649. I915_READ(GEN8_PCU_IMR));
  650. seq_printf(m, "PCU interrupt identity:\t%08x\n",
  651. I915_READ(GEN8_PCU_IIR));
  652. seq_printf(m, "PCU interrupt enable:\t%08x\n",
  653. I915_READ(GEN8_PCU_IER));
  654. } else if (IS_VALLEYVIEW(dev)) {
  655. seq_printf(m, "Display IER:\t%08x\n",
  656. I915_READ(VLV_IER));
  657. seq_printf(m, "Display IIR:\t%08x\n",
  658. I915_READ(VLV_IIR));
  659. seq_printf(m, "Display IIR_RW:\t%08x\n",
  660. I915_READ(VLV_IIR_RW));
  661. seq_printf(m, "Display IMR:\t%08x\n",
  662. I915_READ(VLV_IMR));
  663. for_each_pipe(dev_priv, pipe)
  664. seq_printf(m, "Pipe %c stat:\t%08x\n",
  665. pipe_name(pipe),
  666. I915_READ(PIPESTAT(pipe)));
  667. seq_printf(m, "Master IER:\t%08x\n",
  668. I915_READ(VLV_MASTER_IER));
  669. seq_printf(m, "Render IER:\t%08x\n",
  670. I915_READ(GTIER));
  671. seq_printf(m, "Render IIR:\t%08x\n",
  672. I915_READ(GTIIR));
  673. seq_printf(m, "Render IMR:\t%08x\n",
  674. I915_READ(GTIMR));
  675. seq_printf(m, "PM IER:\t\t%08x\n",
  676. I915_READ(GEN6_PMIER));
  677. seq_printf(m, "PM IIR:\t\t%08x\n",
  678. I915_READ(GEN6_PMIIR));
  679. seq_printf(m, "PM IMR:\t\t%08x\n",
  680. I915_READ(GEN6_PMIMR));
  681. seq_printf(m, "Port hotplug:\t%08x\n",
  682. I915_READ(PORT_HOTPLUG_EN));
  683. seq_printf(m, "DPFLIPSTAT:\t%08x\n",
  684. I915_READ(VLV_DPFLIPSTAT));
  685. seq_printf(m, "DPINVGTT:\t%08x\n",
  686. I915_READ(DPINVGTT));
  687. } else if (!HAS_PCH_SPLIT(dev)) {
  688. seq_printf(m, "Interrupt enable: %08x\n",
  689. I915_READ(IER));
  690. seq_printf(m, "Interrupt identity: %08x\n",
  691. I915_READ(IIR));
  692. seq_printf(m, "Interrupt mask: %08x\n",
  693. I915_READ(IMR));
  694. for_each_pipe(dev_priv, pipe)
  695. seq_printf(m, "Pipe %c stat: %08x\n",
  696. pipe_name(pipe),
  697. I915_READ(PIPESTAT(pipe)));
  698. } else {
  699. seq_printf(m, "North Display Interrupt enable: %08x\n",
  700. I915_READ(DEIER));
  701. seq_printf(m, "North Display Interrupt identity: %08x\n",
  702. I915_READ(DEIIR));
  703. seq_printf(m, "North Display Interrupt mask: %08x\n",
  704. I915_READ(DEIMR));
  705. seq_printf(m, "South Display Interrupt enable: %08x\n",
  706. I915_READ(SDEIER));
  707. seq_printf(m, "South Display Interrupt identity: %08x\n",
  708. I915_READ(SDEIIR));
  709. seq_printf(m, "South Display Interrupt mask: %08x\n",
  710. I915_READ(SDEIMR));
  711. seq_printf(m, "Graphics Interrupt enable: %08x\n",
  712. I915_READ(GTIER));
  713. seq_printf(m, "Graphics Interrupt identity: %08x\n",
  714. I915_READ(GTIIR));
  715. seq_printf(m, "Graphics Interrupt mask: %08x\n",
  716. I915_READ(GTIMR));
  717. }
  718. for_each_ring(ring, dev_priv, i) {
  719. if (INTEL_INFO(dev)->gen >= 6) {
  720. seq_printf(m,
  721. "Graphics Interrupt mask (%s): %08x\n",
  722. ring->name, I915_READ_IMR(ring));
  723. }
  724. i915_ring_seqno_info(m, ring);
  725. }
  726. intel_runtime_pm_put(dev_priv);
  727. mutex_unlock(&dev->struct_mutex);
  728. return 0;
  729. }
  730. static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
  731. {
  732. struct drm_info_node *node = m->private;
  733. struct drm_device *dev = node->minor->dev;
  734. struct drm_i915_private *dev_priv = dev->dev_private;
  735. int i, ret;
  736. ret = mutex_lock_interruptible(&dev->struct_mutex);
  737. if (ret)
  738. return ret;
  739. seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
  740. seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
  741. for (i = 0; i < dev_priv->num_fence_regs; i++) {
  742. struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
  743. seq_printf(m, "Fence %d, pin count = %d, object = ",
  744. i, dev_priv->fence_regs[i].pin_count);
  745. if (obj == NULL)
  746. seq_puts(m, "unused");
  747. else
  748. describe_obj(m, obj);
  749. seq_putc(m, '\n');
  750. }
  751. mutex_unlock(&dev->struct_mutex);
  752. return 0;
  753. }
  754. static int i915_hws_info(struct seq_file *m, void *data)
  755. {
  756. struct drm_info_node *node = m->private;
  757. struct drm_device *dev = node->minor->dev;
  758. struct drm_i915_private *dev_priv = dev->dev_private;
  759. struct intel_engine_cs *ring;
  760. const u32 *hws;
  761. int i;
  762. ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
  763. hws = ring->status_page.page_addr;
  764. if (hws == NULL)
  765. return 0;
  766. for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
  767. seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
  768. i * 4,
  769. hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
  770. }
  771. return 0;
  772. }
  773. static ssize_t
  774. i915_error_state_write(struct file *filp,
  775. const char __user *ubuf,
  776. size_t cnt,
  777. loff_t *ppos)
  778. {
  779. struct i915_error_state_file_priv *error_priv = filp->private_data;
  780. struct drm_device *dev = error_priv->dev;
  781. int ret;
  782. DRM_DEBUG_DRIVER("Resetting error state\n");
  783. ret = mutex_lock_interruptible(&dev->struct_mutex);
  784. if (ret)
  785. return ret;
  786. i915_destroy_error_state(dev);
  787. mutex_unlock(&dev->struct_mutex);
  788. return cnt;
  789. }
  790. static int i915_error_state_open(struct inode *inode, struct file *file)
  791. {
  792. struct drm_device *dev = inode->i_private;
  793. struct i915_error_state_file_priv *error_priv;
  794. error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
  795. if (!error_priv)
  796. return -ENOMEM;
  797. error_priv->dev = dev;
  798. i915_error_state_get(dev, error_priv);
  799. file->private_data = error_priv;
  800. return 0;
  801. }
  802. static int i915_error_state_release(struct inode *inode, struct file *file)
  803. {
  804. struct i915_error_state_file_priv *error_priv = file->private_data;
  805. i915_error_state_put(error_priv);
  806. kfree(error_priv);
  807. return 0;
  808. }
  809. static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
  810. size_t count, loff_t *pos)
  811. {
  812. struct i915_error_state_file_priv *error_priv = file->private_data;
  813. struct drm_i915_error_state_buf error_str;
  814. loff_t tmp_pos = 0;
  815. ssize_t ret_count = 0;
  816. int ret;
  817. ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos);
  818. if (ret)
  819. return ret;
  820. ret = i915_error_state_to_str(&error_str, error_priv);
  821. if (ret)
  822. goto out;
  823. ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
  824. error_str.buf,
  825. error_str.bytes);
  826. if (ret_count < 0)
  827. ret = ret_count;
  828. else
  829. *pos = error_str.start + ret_count;
  830. out:
  831. i915_error_state_buf_release(&error_str);
  832. return ret ?: ret_count;
  833. }
  834. static const struct file_operations i915_error_state_fops = {
  835. .owner = THIS_MODULE,
  836. .open = i915_error_state_open,
  837. .read = i915_error_state_read,
  838. .write = i915_error_state_write,
  839. .llseek = default_llseek,
  840. .release = i915_error_state_release,
  841. };
  842. static int
  843. i915_next_seqno_get(void *data, u64 *val)
  844. {
  845. struct drm_device *dev = data;
  846. struct drm_i915_private *dev_priv = dev->dev_private;
  847. int ret;
  848. ret = mutex_lock_interruptible(&dev->struct_mutex);
  849. if (ret)
  850. return ret;
  851. *val = dev_priv->next_seqno;
  852. mutex_unlock(&dev->struct_mutex);
  853. return 0;
  854. }
  855. static int
  856. i915_next_seqno_set(void *data, u64 val)
  857. {
  858. struct drm_device *dev = data;
  859. int ret;
  860. ret = mutex_lock_interruptible(&dev->struct_mutex);
  861. if (ret)
  862. return ret;
  863. ret = i915_gem_set_seqno(dev, val);
  864. mutex_unlock(&dev->struct_mutex);
  865. return ret;
  866. }
  867. DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
  868. i915_next_seqno_get, i915_next_seqno_set,
  869. "0x%llx\n");
  870. static int i915_frequency_info(struct seq_file *m, void *unused)
  871. {
  872. struct drm_info_node *node = m->private;
  873. struct drm_device *dev = node->minor->dev;
  874. struct drm_i915_private *dev_priv = dev->dev_private;
  875. int ret = 0;
  876. intel_runtime_pm_get(dev_priv);
  877. flush_delayed_work(&dev_priv->rps.delayed_resume_work);
  878. if (IS_GEN5(dev)) {
  879. u16 rgvswctl = I915_READ16(MEMSWCTL);
  880. u16 rgvstat = I915_READ16(MEMSTAT_ILK);
  881. seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
  882. seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
  883. seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
  884. MEMSTAT_VID_SHIFT);
  885. seq_printf(m, "Current P-state: %d\n",
  886. (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
  887. } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
  888. IS_BROADWELL(dev)) {
  889. u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
  890. u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
  891. u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
  892. u32 rpmodectl, rpinclimit, rpdeclimit;
  893. u32 rpstat, cagf, reqf;
  894. u32 rpupei, rpcurup, rpprevup;
  895. u32 rpdownei, rpcurdown, rpprevdown;
  896. u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
  897. int max_freq;
  898. /* RPSTAT1 is in the GT power well */
  899. ret = mutex_lock_interruptible(&dev->struct_mutex);
  900. if (ret)
  901. goto out;
  902. gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
  903. reqf = I915_READ(GEN6_RPNSWREQ);
  904. reqf &= ~GEN6_TURBO_DISABLE;
  905. if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  906. reqf >>= 24;
  907. else
  908. reqf >>= 25;
  909. reqf *= GT_FREQUENCY_MULTIPLIER;
  910. rpmodectl = I915_READ(GEN6_RP_CONTROL);
  911. rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
  912. rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
  913. rpstat = I915_READ(GEN6_RPSTAT1);
  914. rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
  915. rpcurup = I915_READ(GEN6_RP_CUR_UP);
  916. rpprevup = I915_READ(GEN6_RP_PREV_UP);
  917. rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
  918. rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
  919. rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
  920. if (IS_HASWELL(dev) || IS_BROADWELL(dev))
  921. cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
  922. else
  923. cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
  924. cagf *= GT_FREQUENCY_MULTIPLIER;
  925. gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
  926. mutex_unlock(&dev->struct_mutex);
  927. if (IS_GEN6(dev) || IS_GEN7(dev)) {
  928. pm_ier = I915_READ(GEN6_PMIER);
  929. pm_imr = I915_READ(GEN6_PMIMR);
  930. pm_isr = I915_READ(GEN6_PMISR);
  931. pm_iir = I915_READ(GEN6_PMIIR);
  932. pm_mask = I915_READ(GEN6_PMINTRMSK);
  933. } else {
  934. pm_ier = I915_READ(GEN8_GT_IER(2));
  935. pm_imr = I915_READ(GEN8_GT_IMR(2));
  936. pm_isr = I915_READ(GEN8_GT_ISR(2));
  937. pm_iir = I915_READ(GEN8_GT_IIR(2));
  938. pm_mask = I915_READ(GEN6_PMINTRMSK);
  939. }
  940. seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
  941. pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
  942. seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
  943. seq_printf(m, "Render p-state ratio: %d\n",
  944. (gt_perf_status & 0xff00) >> 8);
  945. seq_printf(m, "Render p-state VID: %d\n",
  946. gt_perf_status & 0xff);
  947. seq_printf(m, "Render p-state limit: %d\n",
  948. rp_state_limits & 0xff);
  949. seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
  950. seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
  951. seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
  952. seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
  953. seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
  954. seq_printf(m, "CAGF: %dMHz\n", cagf);
  955. seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
  956. GEN6_CURICONT_MASK);
  957. seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
  958. GEN6_CURBSYTAVG_MASK);
  959. seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
  960. GEN6_CURBSYTAVG_MASK);
  961. seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
  962. GEN6_CURIAVG_MASK);
  963. seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
  964. GEN6_CURBSYTAVG_MASK);
  965. seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
  966. GEN6_CURBSYTAVG_MASK);
  967. max_freq = (rp_state_cap & 0xff0000) >> 16;
  968. seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
  969. max_freq * GT_FREQUENCY_MULTIPLIER);
  970. max_freq = (rp_state_cap & 0xff00) >> 8;
  971. seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
  972. max_freq * GT_FREQUENCY_MULTIPLIER);
  973. max_freq = rp_state_cap & 0xff;
  974. seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
  975. max_freq * GT_FREQUENCY_MULTIPLIER);
  976. seq_printf(m, "Max overclocked frequency: %dMHz\n",
  977. dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
  978. } else if (IS_VALLEYVIEW(dev)) {
  979. u32 freq_sts;
  980. mutex_lock(&dev_priv->rps.hw_lock);
  981. freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
  982. seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
  983. seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
  984. seq_printf(m, "max GPU freq: %d MHz\n",
  985. vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq));
  986. seq_printf(m, "min GPU freq: %d MHz\n",
  987. vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq));
  988. seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
  989. vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
  990. seq_printf(m, "current GPU freq: %d MHz\n",
  991. vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
  992. mutex_unlock(&dev_priv->rps.hw_lock);
  993. } else {
  994. seq_puts(m, "no P-state info available\n");
  995. }
  996. out:
  997. intel_runtime_pm_put(dev_priv);
  998. return ret;
  999. }
  1000. static int ironlake_drpc_info(struct seq_file *m)
  1001. {
  1002. struct drm_info_node *node = m->private;
  1003. struct drm_device *dev = node->minor->dev;
  1004. struct drm_i915_private *dev_priv = dev->dev_private;
  1005. u32 rgvmodectl, rstdbyctl;
  1006. u16 crstandvid;
  1007. int ret;
  1008. ret = mutex_lock_interruptible(&dev->struct_mutex);
  1009. if (ret)
  1010. return ret;
  1011. intel_runtime_pm_get(dev_priv);
  1012. rgvmodectl = I915_READ(MEMMODECTL);
  1013. rstdbyctl = I915_READ(RSTDBYCTL);
  1014. crstandvid = I915_READ16(CRSTANDVID);
  1015. intel_runtime_pm_put(dev_priv);
  1016. mutex_unlock(&dev->struct_mutex);
  1017. seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
  1018. "yes" : "no");
  1019. seq_printf(m, "Boost freq: %d\n",
  1020. (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
  1021. MEMMODE_BOOST_FREQ_SHIFT);
  1022. seq_printf(m, "HW control enabled: %s\n",
  1023. rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
  1024. seq_printf(m, "SW control enabled: %s\n",
  1025. rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
  1026. seq_printf(m, "Gated voltage change: %s\n",
  1027. rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
  1028. seq_printf(m, "Starting frequency: P%d\n",
  1029. (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
  1030. seq_printf(m, "Max P-state: P%d\n",
  1031. (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
  1032. seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
  1033. seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
  1034. seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
  1035. seq_printf(m, "Render standby enabled: %s\n",
  1036. (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
  1037. seq_puts(m, "Current RS state: ");
  1038. switch (rstdbyctl & RSX_STATUS_MASK) {
  1039. case RSX_STATUS_ON:
  1040. seq_puts(m, "on\n");
  1041. break;
  1042. case RSX_STATUS_RC1:
  1043. seq_puts(m, "RC1\n");
  1044. break;
  1045. case RSX_STATUS_RC1E:
  1046. seq_puts(m, "RC1E\n");
  1047. break;
  1048. case RSX_STATUS_RS1:
  1049. seq_puts(m, "RS1\n");
  1050. break;
  1051. case RSX_STATUS_RS2:
  1052. seq_puts(m, "RS2 (RC6)\n");
  1053. break;
  1054. case RSX_STATUS_RS3:
  1055. seq_puts(m, "RC3 (RC6+)\n");
  1056. break;
  1057. default:
  1058. seq_puts(m, "unknown\n");
  1059. break;
  1060. }
  1061. return 0;
  1062. }
  1063. static int vlv_drpc_info(struct seq_file *m)
  1064. {
  1065. struct drm_info_node *node = m->private;
  1066. struct drm_device *dev = node->minor->dev;
  1067. struct drm_i915_private *dev_priv = dev->dev_private;
  1068. u32 rpmodectl1, rcctl1, pw_status;
  1069. unsigned fw_rendercount = 0, fw_mediacount = 0;
  1070. intel_runtime_pm_get(dev_priv);
  1071. pw_status = I915_READ(VLV_GTLC_PW_STATUS);
  1072. rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
  1073. rcctl1 = I915_READ(GEN6_RC_CONTROL);
  1074. intel_runtime_pm_put(dev_priv);
  1075. seq_printf(m, "Video Turbo Mode: %s\n",
  1076. yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
  1077. seq_printf(m, "Turbo enabled: %s\n",
  1078. yesno(rpmodectl1 & GEN6_RP_ENABLE));
  1079. seq_printf(m, "HW control enabled: %s\n",
  1080. yesno(rpmodectl1 & GEN6_RP_ENABLE));
  1081. seq_printf(m, "SW control enabled: %s\n",
  1082. yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
  1083. GEN6_RP_MEDIA_SW_MODE));
  1084. seq_printf(m, "RC6 Enabled: %s\n",
  1085. yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
  1086. GEN6_RC_CTL_EI_MODE(1))));
  1087. seq_printf(m, "Render Power Well: %s\n",
  1088. (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
  1089. seq_printf(m, "Media Power Well: %s\n",
  1090. (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
  1091. seq_printf(m, "Render RC6 residency since boot: %u\n",
  1092. I915_READ(VLV_GT_RENDER_RC6));
  1093. seq_printf(m, "Media RC6 residency since boot: %u\n",
  1094. I915_READ(VLV_GT_MEDIA_RC6));
  1095. spin_lock_irq(&dev_priv->uncore.lock);
  1096. fw_rendercount = dev_priv->uncore.fw_rendercount;
  1097. fw_mediacount = dev_priv->uncore.fw_mediacount;
  1098. spin_unlock_irq(&dev_priv->uncore.lock);
  1099. seq_printf(m, "Forcewake Render Count = %u\n", fw_rendercount);
  1100. seq_printf(m, "Forcewake Media Count = %u\n", fw_mediacount);
  1101. return 0;
  1102. }
  1103. static int gen6_drpc_info(struct seq_file *m)
  1104. {
  1105. struct drm_info_node *node = m->private;
  1106. struct drm_device *dev = node->minor->dev;
  1107. struct drm_i915_private *dev_priv = dev->dev_private;
  1108. u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
  1109. unsigned forcewake_count;
  1110. int count = 0, ret;
  1111. ret = mutex_lock_interruptible(&dev->struct_mutex);
  1112. if (ret)
  1113. return ret;
  1114. intel_runtime_pm_get(dev_priv);
  1115. spin_lock_irq(&dev_priv->uncore.lock);
  1116. forcewake_count = dev_priv->uncore.forcewake_count;
  1117. spin_unlock_irq(&dev_priv->uncore.lock);
  1118. if (forcewake_count) {
  1119. seq_puts(m, "RC information inaccurate because somebody "
  1120. "holds a forcewake reference \n");
  1121. } else {
  1122. /* NB: we cannot use forcewake, else we read the wrong values */
  1123. while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
  1124. udelay(10);
  1125. seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
  1126. }
  1127. gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
  1128. trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
  1129. rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
  1130. rcctl1 = I915_READ(GEN6_RC_CONTROL);
  1131. mutex_unlock(&dev->struct_mutex);
  1132. mutex_lock(&dev_priv->rps.hw_lock);
  1133. sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
  1134. mutex_unlock(&dev_priv->rps.hw_lock);
  1135. intel_runtime_pm_put(dev_priv);
  1136. seq_printf(m, "Video Turbo Mode: %s\n",
  1137. yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
  1138. seq_printf(m, "HW control enabled: %s\n",
  1139. yesno(rpmodectl1 & GEN6_RP_ENABLE));
  1140. seq_printf(m, "SW control enabled: %s\n",
  1141. yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
  1142. GEN6_RP_MEDIA_SW_MODE));
  1143. seq_printf(m, "RC1e Enabled: %s\n",
  1144. yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
  1145. seq_printf(m, "RC6 Enabled: %s\n",
  1146. yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
  1147. seq_printf(m, "Deep RC6 Enabled: %s\n",
  1148. yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
  1149. seq_printf(m, "Deepest RC6 Enabled: %s\n",
  1150. yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
  1151. seq_puts(m, "Current RC state: ");
  1152. switch (gt_core_status & GEN6_RCn_MASK) {
  1153. case GEN6_RC0:
  1154. if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
  1155. seq_puts(m, "Core Power Down\n");
  1156. else
  1157. seq_puts(m, "on\n");
  1158. break;
  1159. case GEN6_RC3:
  1160. seq_puts(m, "RC3\n");
  1161. break;
  1162. case GEN6_RC6:
  1163. seq_puts(m, "RC6\n");
  1164. break;
  1165. case GEN6_RC7:
  1166. seq_puts(m, "RC7\n");
  1167. break;
  1168. default:
  1169. seq_puts(m, "Unknown\n");
  1170. break;
  1171. }
  1172. seq_printf(m, "Core Power Down: %s\n",
  1173. yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
  1174. /* Not exactly sure what this is */
  1175. seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
  1176. I915_READ(GEN6_GT_GFX_RC6_LOCKED));
  1177. seq_printf(m, "RC6 residency since boot: %u\n",
  1178. I915_READ(GEN6_GT_GFX_RC6));
  1179. seq_printf(m, "RC6+ residency since boot: %u\n",
  1180. I915_READ(GEN6_GT_GFX_RC6p));
  1181. seq_printf(m, "RC6++ residency since boot: %u\n",
  1182. I915_READ(GEN6_GT_GFX_RC6pp));
  1183. seq_printf(m, "RC6 voltage: %dmV\n",
  1184. GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
  1185. seq_printf(m, "RC6+ voltage: %dmV\n",
  1186. GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
  1187. seq_printf(m, "RC6++ voltage: %dmV\n",
  1188. GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
  1189. return 0;
  1190. }
  1191. static int i915_drpc_info(struct seq_file *m, void *unused)
  1192. {
  1193. struct drm_info_node *node = m->private;
  1194. struct drm_device *dev = node->minor->dev;
  1195. if (IS_VALLEYVIEW(dev))
  1196. return vlv_drpc_info(m);
  1197. else if (INTEL_INFO(dev)->gen >= 6)
  1198. return gen6_drpc_info(m);
  1199. else
  1200. return ironlake_drpc_info(m);
  1201. }
  1202. static int i915_fbc_status(struct seq_file *m, void *unused)
  1203. {
  1204. struct drm_info_node *node = m->private;
  1205. struct drm_device *dev = node->minor->dev;
  1206. struct drm_i915_private *dev_priv = dev->dev_private;
  1207. if (!HAS_FBC(dev)) {
  1208. seq_puts(m, "FBC unsupported on this chipset\n");
  1209. return 0;
  1210. }
  1211. intel_runtime_pm_get(dev_priv);
  1212. if (intel_fbc_enabled(dev)) {
  1213. seq_puts(m, "FBC enabled\n");
  1214. } else {
  1215. seq_puts(m, "FBC disabled: ");
  1216. switch (dev_priv->fbc.no_fbc_reason) {
  1217. case FBC_OK:
  1218. seq_puts(m, "FBC actived, but currently disabled in hardware");
  1219. break;
  1220. case FBC_UNSUPPORTED:
  1221. seq_puts(m, "unsupported by this chipset");
  1222. break;
  1223. case FBC_NO_OUTPUT:
  1224. seq_puts(m, "no outputs");
  1225. break;
  1226. case FBC_STOLEN_TOO_SMALL:
  1227. seq_puts(m, "not enough stolen memory");
  1228. break;
  1229. case FBC_UNSUPPORTED_MODE:
  1230. seq_puts(m, "mode not supported");
  1231. break;
  1232. case FBC_MODE_TOO_LARGE:
  1233. seq_puts(m, "mode too large");
  1234. break;
  1235. case FBC_BAD_PLANE:
  1236. seq_puts(m, "FBC unsupported on plane");
  1237. break;
  1238. case FBC_NOT_TILED:
  1239. seq_puts(m, "scanout buffer not tiled");
  1240. break;
  1241. case FBC_MULTIPLE_PIPES:
  1242. seq_puts(m, "multiple pipes are enabled");
  1243. break;
  1244. case FBC_MODULE_PARAM:
  1245. seq_puts(m, "disabled per module param (default off)");
  1246. break;
  1247. case FBC_CHIP_DEFAULT:
  1248. seq_puts(m, "disabled per chip default");
  1249. break;
  1250. default:
  1251. seq_puts(m, "unknown reason");
  1252. }
  1253. seq_putc(m, '\n');
  1254. }
  1255. intel_runtime_pm_put(dev_priv);
  1256. return 0;
  1257. }
  1258. static int i915_fbc_fc_get(void *data, u64 *val)
  1259. {
  1260. struct drm_device *dev = data;
  1261. struct drm_i915_private *dev_priv = dev->dev_private;
  1262. if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
  1263. return -ENODEV;
  1264. drm_modeset_lock_all(dev);
  1265. *val = dev_priv->fbc.false_color;
  1266. drm_modeset_unlock_all(dev);
  1267. return 0;
  1268. }
  1269. static int i915_fbc_fc_set(void *data, u64 val)
  1270. {
  1271. struct drm_device *dev = data;
  1272. struct drm_i915_private *dev_priv = dev->dev_private;
  1273. u32 reg;
  1274. if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
  1275. return -ENODEV;
  1276. drm_modeset_lock_all(dev);
  1277. reg = I915_READ(ILK_DPFC_CONTROL);
  1278. dev_priv->fbc.false_color = val;
  1279. I915_WRITE(ILK_DPFC_CONTROL, val ?
  1280. (reg | FBC_CTL_FALSE_COLOR) :
  1281. (reg & ~FBC_CTL_FALSE_COLOR));
  1282. drm_modeset_unlock_all(dev);
  1283. return 0;
  1284. }
  1285. DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
  1286. i915_fbc_fc_get, i915_fbc_fc_set,
  1287. "%llu\n");
  1288. static int i915_ips_status(struct seq_file *m, void *unused)
  1289. {
  1290. struct drm_info_node *node = m->private;
  1291. struct drm_device *dev = node->minor->dev;
  1292. struct drm_i915_private *dev_priv = dev->dev_private;
  1293. if (!HAS_IPS(dev)) {
  1294. seq_puts(m, "not supported\n");
  1295. return 0;
  1296. }
  1297. intel_runtime_pm_get(dev_priv);
  1298. seq_printf(m, "Enabled by kernel parameter: %s\n",
  1299. yesno(i915.enable_ips));
  1300. if (INTEL_INFO(dev)->gen >= 8) {
  1301. seq_puts(m, "Currently: unknown\n");
  1302. } else {
  1303. if (I915_READ(IPS_CTL) & IPS_ENABLE)
  1304. seq_puts(m, "Currently: enabled\n");
  1305. else
  1306. seq_puts(m, "Currently: disabled\n");
  1307. }
  1308. intel_runtime_pm_put(dev_priv);
  1309. return 0;
  1310. }
  1311. static int i915_sr_status(struct seq_file *m, void *unused)
  1312. {
  1313. struct drm_info_node *node = m->private;
  1314. struct drm_device *dev = node->minor->dev;
  1315. struct drm_i915_private *dev_priv = dev->dev_private;
  1316. bool sr_enabled = false;
  1317. intel_runtime_pm_get(dev_priv);
  1318. if (HAS_PCH_SPLIT(dev))
  1319. sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
  1320. else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
  1321. sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
  1322. else if (IS_I915GM(dev))
  1323. sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
  1324. else if (IS_PINEVIEW(dev))
  1325. sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
  1326. intel_runtime_pm_put(dev_priv);
  1327. seq_printf(m, "self-refresh: %s\n",
  1328. sr_enabled ? "enabled" : "disabled");
  1329. return 0;
  1330. }
  1331. static int i915_emon_status(struct seq_file *m, void *unused)
  1332. {
  1333. struct drm_info_node *node = m->private;
  1334. struct drm_device *dev = node->minor->dev;
  1335. struct drm_i915_private *dev_priv = dev->dev_private;
  1336. unsigned long temp, chipset, gfx;
  1337. int ret;
  1338. if (!IS_GEN5(dev))
  1339. return -ENODEV;
  1340. ret = mutex_lock_interruptible(&dev->struct_mutex);
  1341. if (ret)
  1342. return ret;
  1343. temp = i915_mch_val(dev_priv);
  1344. chipset = i915_chipset_val(dev_priv);
  1345. gfx = i915_gfx_val(dev_priv);
  1346. mutex_unlock(&dev->struct_mutex);
  1347. seq_printf(m, "GMCH temp: %ld\n", temp);
  1348. seq_printf(m, "Chipset power: %ld\n", chipset);
  1349. seq_printf(m, "GFX power: %ld\n", gfx);
  1350. seq_printf(m, "Total power: %ld\n", chipset + gfx);
  1351. return 0;
  1352. }
  1353. static int i915_ring_freq_table(struct seq_file *m, void *unused)
  1354. {
  1355. struct drm_info_node *node = m->private;
  1356. struct drm_device *dev = node->minor->dev;
  1357. struct drm_i915_private *dev_priv = dev->dev_private;
  1358. int ret = 0;
  1359. int gpu_freq, ia_freq;
  1360. if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
  1361. seq_puts(m, "unsupported on this chipset\n");
  1362. return 0;
  1363. }
  1364. intel_runtime_pm_get(dev_priv);
  1365. flush_delayed_work(&dev_priv->rps.delayed_resume_work);
  1366. ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
  1367. if (ret)
  1368. goto out;
  1369. seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
  1370. for (gpu_freq = dev_priv->rps.min_freq_softlimit;
  1371. gpu_freq <= dev_priv->rps.max_freq_softlimit;
  1372. gpu_freq++) {
  1373. ia_freq = gpu_freq;
  1374. sandybridge_pcode_read(dev_priv,
  1375. GEN6_PCODE_READ_MIN_FREQ_TABLE,
  1376. &ia_freq);
  1377. seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
  1378. gpu_freq * GT_FREQUENCY_MULTIPLIER,
  1379. ((ia_freq >> 0) & 0xff) * 100,
  1380. ((ia_freq >> 8) & 0xff) * 100);
  1381. }
  1382. mutex_unlock(&dev_priv->rps.hw_lock);
  1383. out:
  1384. intel_runtime_pm_put(dev_priv);
  1385. return ret;
  1386. }
  1387. static int i915_opregion(struct seq_file *m, void *unused)
  1388. {
  1389. struct drm_info_node *node = m->private;
  1390. struct drm_device *dev = node->minor->dev;
  1391. struct drm_i915_private *dev_priv = dev->dev_private;
  1392. struct intel_opregion *opregion = &dev_priv->opregion;
  1393. void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
  1394. int ret;
  1395. if (data == NULL)
  1396. return -ENOMEM;
  1397. ret = mutex_lock_interruptible(&dev->struct_mutex);
  1398. if (ret)
  1399. goto out;
  1400. if (opregion->header) {
  1401. memcpy_fromio(data, opregion->header, OPREGION_SIZE);
  1402. seq_write(m, data, OPREGION_SIZE);
  1403. }
  1404. mutex_unlock(&dev->struct_mutex);
  1405. out:
  1406. kfree(data);
  1407. return 0;
  1408. }
  1409. static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
  1410. {
  1411. struct drm_info_node *node = m->private;
  1412. struct drm_device *dev = node->minor->dev;
  1413. struct intel_fbdev *ifbdev = NULL;
  1414. struct intel_framebuffer *fb;
  1415. #ifdef CONFIG_DRM_I915_FBDEV
  1416. struct drm_i915_private *dev_priv = dev->dev_private;
  1417. ifbdev = dev_priv->fbdev;
  1418. fb = to_intel_framebuffer(ifbdev->helper.fb);
  1419. seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
  1420. fb->base.width,
  1421. fb->base.height,
  1422. fb->base.depth,
  1423. fb->base.bits_per_pixel,
  1424. atomic_read(&fb->base.refcount.refcount));
  1425. describe_obj(m, fb->obj);
  1426. seq_putc(m, '\n');
  1427. #endif
  1428. mutex_lock(&dev->mode_config.fb_lock);
  1429. list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
  1430. if (ifbdev && &fb->base == ifbdev->helper.fb)
  1431. continue;
  1432. seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
  1433. fb->base.width,
  1434. fb->base.height,
  1435. fb->base.depth,
  1436. fb->base.bits_per_pixel,
  1437. atomic_read(&fb->base.refcount.refcount));
  1438. describe_obj(m, fb->obj);
  1439. seq_putc(m, '\n');
  1440. }
  1441. mutex_unlock(&dev->mode_config.fb_lock);
  1442. return 0;
  1443. }
  1444. static void describe_ctx_ringbuf(struct seq_file *m,
  1445. struct intel_ringbuffer *ringbuf)
  1446. {
  1447. seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
  1448. ringbuf->space, ringbuf->head, ringbuf->tail,
  1449. ringbuf->last_retired_head);
  1450. }
  1451. static int i915_context_status(struct seq_file *m, void *unused)
  1452. {
  1453. struct drm_info_node *node = m->private;
  1454. struct drm_device *dev = node->minor->dev;
  1455. struct drm_i915_private *dev_priv = dev->dev_private;
  1456. struct intel_engine_cs *ring;
  1457. struct intel_context *ctx;
  1458. int ret, i;
  1459. ret = mutex_lock_interruptible(&dev->struct_mutex);
  1460. if (ret)
  1461. return ret;
  1462. if (dev_priv->ips.pwrctx) {
  1463. seq_puts(m, "power context ");
  1464. describe_obj(m, dev_priv->ips.pwrctx);
  1465. seq_putc(m, '\n');
  1466. }
  1467. if (dev_priv->ips.renderctx) {
  1468. seq_puts(m, "render context ");
  1469. describe_obj(m, dev_priv->ips.renderctx);
  1470. seq_putc(m, '\n');
  1471. }
  1472. list_for_each_entry(ctx, &dev_priv->context_list, link) {
  1473. if (!i915.enable_execlists &&
  1474. ctx->legacy_hw_ctx.rcs_state == NULL)
  1475. continue;
  1476. seq_puts(m, "HW context ");
  1477. describe_ctx(m, ctx);
  1478. for_each_ring(ring, dev_priv, i) {
  1479. if (ring->default_context == ctx)
  1480. seq_printf(m, "(default context %s) ",
  1481. ring->name);
  1482. }
  1483. if (i915.enable_execlists) {
  1484. seq_putc(m, '\n');
  1485. for_each_ring(ring, dev_priv, i) {
  1486. struct drm_i915_gem_object *ctx_obj =
  1487. ctx->engine[i].state;
  1488. struct intel_ringbuffer *ringbuf =
  1489. ctx->engine[i].ringbuf;
  1490. seq_printf(m, "%s: ", ring->name);
  1491. if (ctx_obj)
  1492. describe_obj(m, ctx_obj);
  1493. if (ringbuf)
  1494. describe_ctx_ringbuf(m, ringbuf);
  1495. seq_putc(m, '\n');
  1496. }
  1497. } else {
  1498. describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
  1499. }
  1500. seq_putc(m, '\n');
  1501. }
  1502. mutex_unlock(&dev->struct_mutex);
  1503. return 0;
  1504. }
  1505. static void i915_dump_lrc_obj(struct seq_file *m,
  1506. struct intel_engine_cs *ring,
  1507. struct drm_i915_gem_object *ctx_obj)
  1508. {
  1509. struct page *page;
  1510. uint32_t *reg_state;
  1511. int j;
  1512. unsigned long ggtt_offset = 0;
  1513. if (ctx_obj == NULL) {
  1514. seq_printf(m, "Context on %s with no gem object\n",
  1515. ring->name);
  1516. return;
  1517. }
  1518. seq_printf(m, "CONTEXT: %s %u\n", ring->name,
  1519. intel_execlists_ctx_id(ctx_obj));
  1520. if (!i915_gem_obj_ggtt_bound(ctx_obj))
  1521. seq_puts(m, "\tNot bound in GGTT\n");
  1522. else
  1523. ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
  1524. if (i915_gem_object_get_pages(ctx_obj)) {
  1525. seq_puts(m, "\tFailed to get pages for context object\n");
  1526. return;
  1527. }
  1528. page = i915_gem_object_get_page(ctx_obj, 1);
  1529. if (!WARN_ON(page == NULL)) {
  1530. reg_state = kmap_atomic(page);
  1531. for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
  1532. seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
  1533. ggtt_offset + 4096 + (j * 4),
  1534. reg_state[j], reg_state[j + 1],
  1535. reg_state[j + 2], reg_state[j + 3]);
  1536. }
  1537. kunmap_atomic(reg_state);
  1538. }
  1539. seq_putc(m, '\n');
  1540. }
  1541. static int i915_dump_lrc(struct seq_file *m, void *unused)
  1542. {
  1543. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1544. struct drm_device *dev = node->minor->dev;
  1545. struct drm_i915_private *dev_priv = dev->dev_private;
  1546. struct intel_engine_cs *ring;
  1547. struct intel_context *ctx;
  1548. int ret, i;
  1549. if (!i915.enable_execlists) {
  1550. seq_printf(m, "Logical Ring Contexts are disabled\n");
  1551. return 0;
  1552. }
  1553. ret = mutex_lock_interruptible(&dev->struct_mutex);
  1554. if (ret)
  1555. return ret;
  1556. list_for_each_entry(ctx, &dev_priv->context_list, link) {
  1557. for_each_ring(ring, dev_priv, i) {
  1558. if (ring->default_context != ctx)
  1559. i915_dump_lrc_obj(m, ring,
  1560. ctx->engine[i].state);
  1561. }
  1562. }
  1563. mutex_unlock(&dev->struct_mutex);
  1564. return 0;
  1565. }
  1566. static int i915_execlists(struct seq_file *m, void *data)
  1567. {
  1568. struct drm_info_node *node = (struct drm_info_node *)m->private;
  1569. struct drm_device *dev = node->minor->dev;
  1570. struct drm_i915_private *dev_priv = dev->dev_private;
  1571. struct intel_engine_cs *ring;
  1572. u32 status_pointer;
  1573. u8 read_pointer;
  1574. u8 write_pointer;
  1575. u32 status;
  1576. u32 ctx_id;
  1577. struct list_head *cursor;
  1578. int ring_id, i;
  1579. int ret;
  1580. if (!i915.enable_execlists) {
  1581. seq_puts(m, "Logical Ring Contexts are disabled\n");
  1582. return 0;
  1583. }
  1584. ret = mutex_lock_interruptible(&dev->struct_mutex);
  1585. if (ret)
  1586. return ret;
  1587. intel_runtime_pm_get(dev_priv);
  1588. for_each_ring(ring, dev_priv, ring_id) {
  1589. struct intel_ctx_submit_request *head_req = NULL;
  1590. int count = 0;
  1591. unsigned long flags;
  1592. seq_printf(m, "%s\n", ring->name);
  1593. status = I915_READ(RING_EXECLIST_STATUS(ring));
  1594. ctx_id = I915_READ(RING_EXECLIST_STATUS(ring) + 4);
  1595. seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
  1596. status, ctx_id);
  1597. status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
  1598. seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
  1599. read_pointer = ring->next_context_status_buffer;
  1600. write_pointer = status_pointer & 0x07;
  1601. if (read_pointer > write_pointer)
  1602. write_pointer += 6;
  1603. seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
  1604. read_pointer, write_pointer);
  1605. for (i = 0; i < 6; i++) {
  1606. status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i);
  1607. ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i + 4);
  1608. seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
  1609. i, status, ctx_id);
  1610. }
  1611. spin_lock_irqsave(&ring->execlist_lock, flags);
  1612. list_for_each(cursor, &ring->execlist_queue)
  1613. count++;
  1614. head_req = list_first_entry_or_null(&ring->execlist_queue,
  1615. struct intel_ctx_submit_request, execlist_link);
  1616. spin_unlock_irqrestore(&ring->execlist_lock, flags);
  1617. seq_printf(m, "\t%d requests in queue\n", count);
  1618. if (head_req) {
  1619. struct drm_i915_gem_object *ctx_obj;
  1620. ctx_obj = head_req->ctx->engine[ring_id].state;
  1621. seq_printf(m, "\tHead request id: %u\n",
  1622. intel_execlists_ctx_id(ctx_obj));
  1623. seq_printf(m, "\tHead request tail: %u\n",
  1624. head_req->tail);
  1625. }
  1626. seq_putc(m, '\n');
  1627. }
  1628. intel_runtime_pm_put(dev_priv);
  1629. mutex_unlock(&dev->struct_mutex);
  1630. return 0;
  1631. }
  1632. static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
  1633. {
  1634. struct drm_info_node *node = m->private;
  1635. struct drm_device *dev = node->minor->dev;
  1636. struct drm_i915_private *dev_priv = dev->dev_private;
  1637. unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0;
  1638. spin_lock_irq(&dev_priv->uncore.lock);
  1639. if (IS_VALLEYVIEW(dev)) {
  1640. fw_rendercount = dev_priv->uncore.fw_rendercount;
  1641. fw_mediacount = dev_priv->uncore.fw_mediacount;
  1642. } else
  1643. forcewake_count = dev_priv->uncore.forcewake_count;
  1644. spin_unlock_irq(&dev_priv->uncore.lock);
  1645. if (IS_VALLEYVIEW(dev)) {
  1646. seq_printf(m, "fw_rendercount = %u\n", fw_rendercount);
  1647. seq_printf(m, "fw_mediacount = %u\n", fw_mediacount);
  1648. } else
  1649. seq_printf(m, "forcewake count = %u\n", forcewake_count);
  1650. return 0;
  1651. }
  1652. static const char *swizzle_string(unsigned swizzle)
  1653. {
  1654. switch (swizzle) {
  1655. case I915_BIT_6_SWIZZLE_NONE:
  1656. return "none";
  1657. case I915_BIT_6_SWIZZLE_9:
  1658. return "bit9";
  1659. case I915_BIT_6_SWIZZLE_9_10:
  1660. return "bit9/bit10";
  1661. case I915_BIT_6_SWIZZLE_9_11:
  1662. return "bit9/bit11";
  1663. case I915_BIT_6_SWIZZLE_9_10_11:
  1664. return "bit9/bit10/bit11";
  1665. case I915_BIT_6_SWIZZLE_9_17:
  1666. return "bit9/bit17";
  1667. case I915_BIT_6_SWIZZLE_9_10_17:
  1668. return "bit9/bit10/bit17";
  1669. case I915_BIT_6_SWIZZLE_UNKNOWN:
  1670. return "unknown";
  1671. }
  1672. return "bug";
  1673. }
  1674. static int i915_swizzle_info(struct seq_file *m, void *data)
  1675. {
  1676. struct drm_info_node *node = m->private;
  1677. struct drm_device *dev = node->minor->dev;
  1678. struct drm_i915_private *dev_priv = dev->dev_private;
  1679. int ret;
  1680. ret = mutex_lock_interruptible(&dev->struct_mutex);
  1681. if (ret)
  1682. return ret;
  1683. intel_runtime_pm_get(dev_priv);
  1684. seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
  1685. swizzle_string(dev_priv->mm.bit_6_swizzle_x));
  1686. seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
  1687. swizzle_string(dev_priv->mm.bit_6_swizzle_y));
  1688. if (IS_GEN3(dev) || IS_GEN4(dev)) {
  1689. seq_printf(m, "DDC = 0x%08x\n",
  1690. I915_READ(DCC));
  1691. seq_printf(m, "DDC2 = 0x%08x\n",
  1692. I915_READ(DCC2));
  1693. seq_printf(m, "C0DRB3 = 0x%04x\n",
  1694. I915_READ16(C0DRB3));
  1695. seq_printf(m, "C1DRB3 = 0x%04x\n",
  1696. I915_READ16(C1DRB3));
  1697. } else if (INTEL_INFO(dev)->gen >= 6) {
  1698. seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
  1699. I915_READ(MAD_DIMM_C0));
  1700. seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
  1701. I915_READ(MAD_DIMM_C1));
  1702. seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
  1703. I915_READ(MAD_DIMM_C2));
  1704. seq_printf(m, "TILECTL = 0x%08x\n",
  1705. I915_READ(TILECTL));
  1706. if (INTEL_INFO(dev)->gen >= 8)
  1707. seq_printf(m, "GAMTARBMODE = 0x%08x\n",
  1708. I915_READ(GAMTARBMODE));
  1709. else
  1710. seq_printf(m, "ARB_MODE = 0x%08x\n",
  1711. I915_READ(ARB_MODE));
  1712. seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
  1713. I915_READ(DISP_ARB_CTL));
  1714. }
  1715. if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
  1716. seq_puts(m, "L-shaped memory detected\n");
  1717. intel_runtime_pm_put(dev_priv);
  1718. mutex_unlock(&dev->struct_mutex);
  1719. return 0;
  1720. }
  1721. static int per_file_ctx(int id, void *ptr, void *data)
  1722. {
  1723. struct intel_context *ctx = ptr;
  1724. struct seq_file *m = data;
  1725. struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
  1726. if (!ppgtt) {
  1727. seq_printf(m, " no ppgtt for context %d\n",
  1728. ctx->user_handle);
  1729. return 0;
  1730. }
  1731. if (i915_gem_context_is_default(ctx))
  1732. seq_puts(m, " default context:\n");
  1733. else
  1734. seq_printf(m, " context %d:\n", ctx->user_handle);
  1735. ppgtt->debug_dump(ppgtt, m);
  1736. return 0;
  1737. }
  1738. static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
  1739. {
  1740. struct drm_i915_private *dev_priv = dev->dev_private;
  1741. struct intel_engine_cs *ring;
  1742. struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
  1743. int unused, i;
  1744. if (!ppgtt)
  1745. return;
  1746. seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
  1747. seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries);
  1748. for_each_ring(ring, dev_priv, unused) {
  1749. seq_printf(m, "%s\n", ring->name);
  1750. for (i = 0; i < 4; i++) {
  1751. u32 offset = 0x270 + i * 8;
  1752. u64 pdp = I915_READ(ring->mmio_base + offset + 4);
  1753. pdp <<= 32;
  1754. pdp |= I915_READ(ring->mmio_base + offset);
  1755. seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
  1756. }
  1757. }
  1758. }
  1759. static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
  1760. {
  1761. struct drm_i915_private *dev_priv = dev->dev_private;
  1762. struct intel_engine_cs *ring;
  1763. struct drm_file *file;
  1764. int i;
  1765. if (INTEL_INFO(dev)->gen == 6)
  1766. seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
  1767. for_each_ring(ring, dev_priv, i) {
  1768. seq_printf(m, "%s\n", ring->name);
  1769. if (INTEL_INFO(dev)->gen == 7)
  1770. seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
  1771. seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
  1772. seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
  1773. seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
  1774. }
  1775. if (dev_priv->mm.aliasing_ppgtt) {
  1776. struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
  1777. seq_puts(m, "aliasing PPGTT:\n");
  1778. seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
  1779. ppgtt->debug_dump(ppgtt, m);
  1780. }
  1781. list_for_each_entry_reverse(file, &dev->filelist, lhead) {
  1782. struct drm_i915_file_private *file_priv = file->driver_priv;
  1783. seq_printf(m, "proc: %s\n",
  1784. get_pid_task(file->pid, PIDTYPE_PID)->comm);
  1785. idr_for_each(&file_priv->context_idr, per_file_ctx, m);
  1786. }
  1787. seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
  1788. }
  1789. static int i915_ppgtt_info(struct seq_file *m, void *data)
  1790. {
  1791. struct drm_info_node *node = m->private;
  1792. struct drm_device *dev = node->minor->dev;
  1793. struct drm_i915_private *dev_priv = dev->dev_private;
  1794. int ret = mutex_lock_interruptible(&dev->struct_mutex);
  1795. if (ret)
  1796. return ret;
  1797. intel_runtime_pm_get(dev_priv);
  1798. if (INTEL_INFO(dev)->gen >= 8)
  1799. gen8_ppgtt_info(m, dev);
  1800. else if (INTEL_INFO(dev)->gen >= 6)
  1801. gen6_ppgtt_info(m, dev);
  1802. intel_runtime_pm_put(dev_priv);
  1803. mutex_unlock(&dev->struct_mutex);
  1804. return 0;
  1805. }
  1806. static int i915_llc(struct seq_file *m, void *data)
  1807. {
  1808. struct drm_info_node *node = m->private;
  1809. struct drm_device *dev = node->minor->dev;
  1810. struct drm_i915_private *dev_priv = dev->dev_private;
  1811. /* Size calculation for LLC is a bit of a pain. Ignore for now. */
  1812. seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
  1813. seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
  1814. return 0;
  1815. }
  1816. static int i915_edp_psr_status(struct seq_file *m, void *data)
  1817. {
  1818. struct drm_info_node *node = m->private;
  1819. struct drm_device *dev = node->minor->dev;
  1820. struct drm_i915_private *dev_priv = dev->dev_private;
  1821. u32 psrperf = 0;
  1822. u32 stat[3];
  1823. enum pipe pipe;
  1824. bool enabled = false;
  1825. intel_runtime_pm_get(dev_priv);
  1826. mutex_lock(&dev_priv->psr.lock);
  1827. seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
  1828. seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
  1829. seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
  1830. seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
  1831. seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
  1832. dev_priv->psr.busy_frontbuffer_bits);
  1833. seq_printf(m, "Re-enable work scheduled: %s\n",
  1834. yesno(work_busy(&dev_priv->psr.work.work)));
  1835. if (HAS_PSR(dev)) {
  1836. if (HAS_DDI(dev))
  1837. enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
  1838. else {
  1839. for_each_pipe(dev_priv, pipe) {
  1840. stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
  1841. VLV_EDP_PSR_CURR_STATE_MASK;
  1842. if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
  1843. (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
  1844. enabled = true;
  1845. }
  1846. }
  1847. }
  1848. seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
  1849. if (!HAS_DDI(dev))
  1850. for_each_pipe(dev_priv, pipe) {
  1851. if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
  1852. (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
  1853. seq_printf(m, " pipe %c", pipe_name(pipe));
  1854. }
  1855. seq_puts(m, "\n");
  1856. /* CHV PSR has no kind of performance counter */
  1857. if (HAS_PSR(dev) && HAS_DDI(dev)) {
  1858. psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
  1859. EDP_PSR_PERF_CNT_MASK;
  1860. seq_printf(m, "Performance_Counter: %u\n", psrperf);
  1861. }
  1862. mutex_unlock(&dev_priv->psr.lock);
  1863. intel_runtime_pm_put(dev_priv);
  1864. return 0;
  1865. }
  1866. static int i915_sink_crc(struct seq_file *m, void *data)
  1867. {
  1868. struct drm_info_node *node = m->private;
  1869. struct drm_device *dev = node->minor->dev;
  1870. struct intel_encoder *encoder;
  1871. struct intel_connector *connector;
  1872. struct intel_dp *intel_dp = NULL;
  1873. int ret;
  1874. u8 crc[6];
  1875. drm_modeset_lock_all(dev);
  1876. list_for_each_entry(connector, &dev->mode_config.connector_list,
  1877. base.head) {
  1878. if (connector->base.dpms != DRM_MODE_DPMS_ON)
  1879. continue;
  1880. if (!connector->base.encoder)
  1881. continue;
  1882. encoder = to_intel_encoder(connector->base.encoder);
  1883. if (encoder->type != INTEL_OUTPUT_EDP)
  1884. continue;
  1885. intel_dp = enc_to_intel_dp(&encoder->base);
  1886. ret = intel_dp_sink_crc(intel_dp, crc);
  1887. if (ret)
  1888. goto out;
  1889. seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
  1890. crc[0], crc[1], crc[2],
  1891. crc[3], crc[4], crc[5]);
  1892. goto out;
  1893. }
  1894. ret = -ENODEV;
  1895. out:
  1896. drm_modeset_unlock_all(dev);
  1897. return ret;
  1898. }
  1899. static int i915_energy_uJ(struct seq_file *m, void *data)
  1900. {
  1901. struct drm_info_node *node = m->private;
  1902. struct drm_device *dev = node->minor->dev;
  1903. struct drm_i915_private *dev_priv = dev->dev_private;
  1904. u64 power;
  1905. u32 units;
  1906. if (INTEL_INFO(dev)->gen < 6)
  1907. return -ENODEV;
  1908. intel_runtime_pm_get(dev_priv);
  1909. rdmsrl(MSR_RAPL_POWER_UNIT, power);
  1910. power = (power & 0x1f00) >> 8;
  1911. units = 1000000 / (1 << power); /* convert to uJ */
  1912. power = I915_READ(MCH_SECP_NRG_STTS);
  1913. power *= units;
  1914. intel_runtime_pm_put(dev_priv);
  1915. seq_printf(m, "%llu", (long long unsigned)power);
  1916. return 0;
  1917. }
  1918. static int i915_pc8_status(struct seq_file *m, void *unused)
  1919. {
  1920. struct drm_info_node *node = m->private;
  1921. struct drm_device *dev = node->minor->dev;
  1922. struct drm_i915_private *dev_priv = dev->dev_private;
  1923. if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
  1924. seq_puts(m, "not supported\n");
  1925. return 0;
  1926. }
  1927. seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
  1928. seq_printf(m, "IRQs disabled: %s\n",
  1929. yesno(!intel_irqs_enabled(dev_priv)));
  1930. return 0;
  1931. }
  1932. static const char *power_domain_str(enum intel_display_power_domain domain)
  1933. {
  1934. switch (domain) {
  1935. case POWER_DOMAIN_PIPE_A:
  1936. return "PIPE_A";
  1937. case POWER_DOMAIN_PIPE_B:
  1938. return "PIPE_B";
  1939. case POWER_DOMAIN_PIPE_C:
  1940. return "PIPE_C";
  1941. case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
  1942. return "PIPE_A_PANEL_FITTER";
  1943. case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
  1944. return "PIPE_B_PANEL_FITTER";
  1945. case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
  1946. return "PIPE_C_PANEL_FITTER";
  1947. case POWER_DOMAIN_TRANSCODER_A:
  1948. return "TRANSCODER_A";
  1949. case POWER_DOMAIN_TRANSCODER_B:
  1950. return "TRANSCODER_B";
  1951. case POWER_DOMAIN_TRANSCODER_C:
  1952. return "TRANSCODER_C";
  1953. case POWER_DOMAIN_TRANSCODER_EDP:
  1954. return "TRANSCODER_EDP";
  1955. case POWER_DOMAIN_PORT_DDI_A_2_LANES:
  1956. return "PORT_DDI_A_2_LANES";
  1957. case POWER_DOMAIN_PORT_DDI_A_4_LANES:
  1958. return "PORT_DDI_A_4_LANES";
  1959. case POWER_DOMAIN_PORT_DDI_B_2_LANES:
  1960. return "PORT_DDI_B_2_LANES";
  1961. case POWER_DOMAIN_PORT_DDI_B_4_LANES:
  1962. return "PORT_DDI_B_4_LANES";
  1963. case POWER_DOMAIN_PORT_DDI_C_2_LANES:
  1964. return "PORT_DDI_C_2_LANES";
  1965. case POWER_DOMAIN_PORT_DDI_C_4_LANES:
  1966. return "PORT_DDI_C_4_LANES";
  1967. case POWER_DOMAIN_PORT_DDI_D_2_LANES:
  1968. return "PORT_DDI_D_2_LANES";
  1969. case POWER_DOMAIN_PORT_DDI_D_4_LANES:
  1970. return "PORT_DDI_D_4_LANES";
  1971. case POWER_DOMAIN_PORT_DSI:
  1972. return "PORT_DSI";
  1973. case POWER_DOMAIN_PORT_CRT:
  1974. return "PORT_CRT";
  1975. case POWER_DOMAIN_PORT_OTHER:
  1976. return "PORT_OTHER";
  1977. case POWER_DOMAIN_VGA:
  1978. return "VGA";
  1979. case POWER_DOMAIN_AUDIO:
  1980. return "AUDIO";
  1981. case POWER_DOMAIN_PLLS:
  1982. return "PLLS";
  1983. case POWER_DOMAIN_INIT:
  1984. return "INIT";
  1985. default:
  1986. WARN_ON(1);
  1987. return "?";
  1988. }
  1989. }
  1990. static int i915_power_domain_info(struct seq_file *m, void *unused)
  1991. {
  1992. struct drm_info_node *node = m->private;
  1993. struct drm_device *dev = node->minor->dev;
  1994. struct drm_i915_private *dev_priv = dev->dev_private;
  1995. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  1996. int i;
  1997. mutex_lock(&power_domains->lock);
  1998. seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
  1999. for (i = 0; i < power_domains->power_well_count; i++) {
  2000. struct i915_power_well *power_well;
  2001. enum intel_display_power_domain power_domain;
  2002. power_well = &power_domains->power_wells[i];
  2003. seq_printf(m, "%-25s %d\n", power_well->name,
  2004. power_well->count);
  2005. for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
  2006. power_domain++) {
  2007. if (!(BIT(power_domain) & power_well->domains))
  2008. continue;
  2009. seq_printf(m, " %-23s %d\n",
  2010. power_domain_str(power_domain),
  2011. power_domains->domain_use_count[power_domain]);
  2012. }
  2013. }
  2014. mutex_unlock(&power_domains->lock);
  2015. return 0;
  2016. }
  2017. static void intel_seq_print_mode(struct seq_file *m, int tabs,
  2018. struct drm_display_mode *mode)
  2019. {
  2020. int i;
  2021. for (i = 0; i < tabs; i++)
  2022. seq_putc(m, '\t');
  2023. seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
  2024. mode->base.id, mode->name,
  2025. mode->vrefresh, mode->clock,
  2026. mode->hdisplay, mode->hsync_start,
  2027. mode->hsync_end, mode->htotal,
  2028. mode->vdisplay, mode->vsync_start,
  2029. mode->vsync_end, mode->vtotal,
  2030. mode->type, mode->flags);
  2031. }
  2032. static void intel_encoder_info(struct seq_file *m,
  2033. struct intel_crtc *intel_crtc,
  2034. struct intel_encoder *intel_encoder)
  2035. {
  2036. struct drm_info_node *node = m->private;
  2037. struct drm_device *dev = node->minor->dev;
  2038. struct drm_crtc *crtc = &intel_crtc->base;
  2039. struct intel_connector *intel_connector;
  2040. struct drm_encoder *encoder;
  2041. encoder = &intel_encoder->base;
  2042. seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
  2043. encoder->base.id, encoder->name);
  2044. for_each_connector_on_encoder(dev, encoder, intel_connector) {
  2045. struct drm_connector *connector = &intel_connector->base;
  2046. seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
  2047. connector->base.id,
  2048. connector->name,
  2049. drm_get_connector_status_name(connector->status));
  2050. if (connector->status == connector_status_connected) {
  2051. struct drm_display_mode *mode = &crtc->mode;
  2052. seq_printf(m, ", mode:\n");
  2053. intel_seq_print_mode(m, 2, mode);
  2054. } else {
  2055. seq_putc(m, '\n');
  2056. }
  2057. }
  2058. }
  2059. static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
  2060. {
  2061. struct drm_info_node *node = m->private;
  2062. struct drm_device *dev = node->minor->dev;
  2063. struct drm_crtc *crtc = &intel_crtc->base;
  2064. struct intel_encoder *intel_encoder;
  2065. if (crtc->primary->fb)
  2066. seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
  2067. crtc->primary->fb->base.id, crtc->x, crtc->y,
  2068. crtc->primary->fb->width, crtc->primary->fb->height);
  2069. else
  2070. seq_puts(m, "\tprimary plane disabled\n");
  2071. for_each_encoder_on_crtc(dev, crtc, intel_encoder)
  2072. intel_encoder_info(m, intel_crtc, intel_encoder);
  2073. }
  2074. static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
  2075. {
  2076. struct drm_display_mode *mode = panel->fixed_mode;
  2077. seq_printf(m, "\tfixed mode:\n");
  2078. intel_seq_print_mode(m, 2, mode);
  2079. }
  2080. static void intel_dp_info(struct seq_file *m,
  2081. struct intel_connector *intel_connector)
  2082. {
  2083. struct intel_encoder *intel_encoder = intel_connector->encoder;
  2084. struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
  2085. seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
  2086. seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" :
  2087. "no");
  2088. if (intel_encoder->type == INTEL_OUTPUT_EDP)
  2089. intel_panel_info(m, &intel_connector->panel);
  2090. }
  2091. static void intel_hdmi_info(struct seq_file *m,
  2092. struct intel_connector *intel_connector)
  2093. {
  2094. struct intel_encoder *intel_encoder = intel_connector->encoder;
  2095. struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
  2096. seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" :
  2097. "no");
  2098. }
  2099. static void intel_lvds_info(struct seq_file *m,
  2100. struct intel_connector *intel_connector)
  2101. {
  2102. intel_panel_info(m, &intel_connector->panel);
  2103. }
  2104. static void intel_connector_info(struct seq_file *m,
  2105. struct drm_connector *connector)
  2106. {
  2107. struct intel_connector *intel_connector = to_intel_connector(connector);
  2108. struct intel_encoder *intel_encoder = intel_connector->encoder;
  2109. struct drm_display_mode *mode;
  2110. seq_printf(m, "connector %d: type %s, status: %s\n",
  2111. connector->base.id, connector->name,
  2112. drm_get_connector_status_name(connector->status));
  2113. if (connector->status == connector_status_connected) {
  2114. seq_printf(m, "\tname: %s\n", connector->display_info.name);
  2115. seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
  2116. connector->display_info.width_mm,
  2117. connector->display_info.height_mm);
  2118. seq_printf(m, "\tsubpixel order: %s\n",
  2119. drm_get_subpixel_order_name(connector->display_info.subpixel_order));
  2120. seq_printf(m, "\tCEA rev: %d\n",
  2121. connector->display_info.cea_rev);
  2122. }
  2123. if (intel_encoder) {
  2124. if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
  2125. intel_encoder->type == INTEL_OUTPUT_EDP)
  2126. intel_dp_info(m, intel_connector);
  2127. else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
  2128. intel_hdmi_info(m, intel_connector);
  2129. else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
  2130. intel_lvds_info(m, intel_connector);
  2131. }
  2132. seq_printf(m, "\tmodes:\n");
  2133. list_for_each_entry(mode, &connector->modes, head)
  2134. intel_seq_print_mode(m, 2, mode);
  2135. }
  2136. static bool cursor_active(struct drm_device *dev, int pipe)
  2137. {
  2138. struct drm_i915_private *dev_priv = dev->dev_private;
  2139. u32 state;
  2140. if (IS_845G(dev) || IS_I865G(dev))
  2141. state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
  2142. else
  2143. state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
  2144. return state;
  2145. }
  2146. static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
  2147. {
  2148. struct drm_i915_private *dev_priv = dev->dev_private;
  2149. u32 pos;
  2150. pos = I915_READ(CURPOS(pipe));
  2151. *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
  2152. if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
  2153. *x = -*x;
  2154. *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
  2155. if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
  2156. *y = -*y;
  2157. return cursor_active(dev, pipe);
  2158. }
  2159. static int i915_display_info(struct seq_file *m, void *unused)
  2160. {
  2161. struct drm_info_node *node = m->private;
  2162. struct drm_device *dev = node->minor->dev;
  2163. struct drm_i915_private *dev_priv = dev->dev_private;
  2164. struct intel_crtc *crtc;
  2165. struct drm_connector *connector;
  2166. intel_runtime_pm_get(dev_priv);
  2167. drm_modeset_lock_all(dev);
  2168. seq_printf(m, "CRTC info\n");
  2169. seq_printf(m, "---------\n");
  2170. for_each_intel_crtc(dev, crtc) {
  2171. bool active;
  2172. int x, y;
  2173. seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
  2174. crtc->base.base.id, pipe_name(crtc->pipe),
  2175. yesno(crtc->active), crtc->config.pipe_src_w, crtc->config.pipe_src_h);
  2176. if (crtc->active) {
  2177. intel_crtc_info(m, crtc);
  2178. active = cursor_position(dev, crtc->pipe, &x, &y);
  2179. seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
  2180. yesno(crtc->cursor_base),
  2181. x, y, crtc->cursor_width, crtc->cursor_height,
  2182. crtc->cursor_addr, yesno(active));
  2183. }
  2184. seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
  2185. yesno(!crtc->cpu_fifo_underrun_disabled),
  2186. yesno(!crtc->pch_fifo_underrun_disabled));
  2187. }
  2188. seq_printf(m, "\n");
  2189. seq_printf(m, "Connector info\n");
  2190. seq_printf(m, "--------------\n");
  2191. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  2192. intel_connector_info(m, connector);
  2193. }
  2194. drm_modeset_unlock_all(dev);
  2195. intel_runtime_pm_put(dev_priv);
  2196. return 0;
  2197. }
  2198. static int i915_semaphore_status(struct seq_file *m, void *unused)
  2199. {
  2200. struct drm_info_node *node = (struct drm_info_node *) m->private;
  2201. struct drm_device *dev = node->minor->dev;
  2202. struct drm_i915_private *dev_priv = dev->dev_private;
  2203. struct intel_engine_cs *ring;
  2204. int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
  2205. int i, j, ret;
  2206. if (!i915_semaphore_is_enabled(dev)) {
  2207. seq_puts(m, "Semaphores are disabled\n");
  2208. return 0;
  2209. }
  2210. ret = mutex_lock_interruptible(&dev->struct_mutex);
  2211. if (ret)
  2212. return ret;
  2213. intel_runtime_pm_get(dev_priv);
  2214. if (IS_BROADWELL(dev)) {
  2215. struct page *page;
  2216. uint64_t *seqno;
  2217. page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
  2218. seqno = (uint64_t *)kmap_atomic(page);
  2219. for_each_ring(ring, dev_priv, i) {
  2220. uint64_t offset;
  2221. seq_printf(m, "%s\n", ring->name);
  2222. seq_puts(m, " Last signal:");
  2223. for (j = 0; j < num_rings; j++) {
  2224. offset = i * I915_NUM_RINGS + j;
  2225. seq_printf(m, "0x%08llx (0x%02llx) ",
  2226. seqno[offset], offset * 8);
  2227. }
  2228. seq_putc(m, '\n');
  2229. seq_puts(m, " Last wait: ");
  2230. for (j = 0; j < num_rings; j++) {
  2231. offset = i + (j * I915_NUM_RINGS);
  2232. seq_printf(m, "0x%08llx (0x%02llx) ",
  2233. seqno[offset], offset * 8);
  2234. }
  2235. seq_putc(m, '\n');
  2236. }
  2237. kunmap_atomic(seqno);
  2238. } else {
  2239. seq_puts(m, " Last signal:");
  2240. for_each_ring(ring, dev_priv, i)
  2241. for (j = 0; j < num_rings; j++)
  2242. seq_printf(m, "0x%08x\n",
  2243. I915_READ(ring->semaphore.mbox.signal[j]));
  2244. seq_putc(m, '\n');
  2245. }
  2246. seq_puts(m, "\nSync seqno:\n");
  2247. for_each_ring(ring, dev_priv, i) {
  2248. for (j = 0; j < num_rings; j++) {
  2249. seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]);
  2250. }
  2251. seq_putc(m, '\n');
  2252. }
  2253. seq_putc(m, '\n');
  2254. intel_runtime_pm_put(dev_priv);
  2255. mutex_unlock(&dev->struct_mutex);
  2256. return 0;
  2257. }
  2258. static int i915_shared_dplls_info(struct seq_file *m, void *unused)
  2259. {
  2260. struct drm_info_node *node = (struct drm_info_node *) m->private;
  2261. struct drm_device *dev = node->minor->dev;
  2262. struct drm_i915_private *dev_priv = dev->dev_private;
  2263. int i;
  2264. drm_modeset_lock_all(dev);
  2265. for (i = 0; i < dev_priv->num_shared_dpll; i++) {
  2266. struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
  2267. seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
  2268. seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n",
  2269. pll->config.crtc_mask, pll->active, yesno(pll->on));
  2270. seq_printf(m, " tracked hardware state:\n");
  2271. seq_printf(m, " dpll: 0x%08x\n", pll->config.hw_state.dpll);
  2272. seq_printf(m, " dpll_md: 0x%08x\n",
  2273. pll->config.hw_state.dpll_md);
  2274. seq_printf(m, " fp0: 0x%08x\n", pll->config.hw_state.fp0);
  2275. seq_printf(m, " fp1: 0x%08x\n", pll->config.hw_state.fp1);
  2276. seq_printf(m, " wrpll: 0x%08x\n", pll->config.hw_state.wrpll);
  2277. }
  2278. drm_modeset_unlock_all(dev);
  2279. return 0;
  2280. }
  2281. static int i915_wa_registers(struct seq_file *m, void *unused)
  2282. {
  2283. int i;
  2284. int ret;
  2285. struct drm_info_node *node = (struct drm_info_node *) m->private;
  2286. struct drm_device *dev = node->minor->dev;
  2287. struct drm_i915_private *dev_priv = dev->dev_private;
  2288. ret = mutex_lock_interruptible(&dev->struct_mutex);
  2289. if (ret)
  2290. return ret;
  2291. intel_runtime_pm_get(dev_priv);
  2292. seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count);
  2293. for (i = 0; i < dev_priv->workarounds.count; ++i) {
  2294. u32 addr, mask, value, read;
  2295. bool ok;
  2296. addr = dev_priv->workarounds.reg[i].addr;
  2297. mask = dev_priv->workarounds.reg[i].mask;
  2298. value = dev_priv->workarounds.reg[i].value;
  2299. read = I915_READ(addr);
  2300. ok = (value & mask) == (read & mask);
  2301. seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
  2302. addr, value, mask, read, ok ? "OK" : "FAIL");
  2303. }
  2304. intel_runtime_pm_put(dev_priv);
  2305. mutex_unlock(&dev->struct_mutex);
  2306. return 0;
  2307. }
  2308. static int i915_ddb_info(struct seq_file *m, void *unused)
  2309. {
  2310. struct drm_info_node *node = m->private;
  2311. struct drm_device *dev = node->minor->dev;
  2312. struct drm_i915_private *dev_priv = dev->dev_private;
  2313. struct skl_ddb_allocation *ddb;
  2314. struct skl_ddb_entry *entry;
  2315. enum pipe pipe;
  2316. int plane;
  2317. drm_modeset_lock_all(dev);
  2318. ddb = &dev_priv->wm.skl_hw.ddb;
  2319. seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
  2320. for_each_pipe(dev_priv, pipe) {
  2321. seq_printf(m, "Pipe %c\n", pipe_name(pipe));
  2322. for_each_plane(pipe, plane) {
  2323. entry = &ddb->plane[pipe][plane];
  2324. seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
  2325. entry->start, entry->end,
  2326. skl_ddb_entry_size(entry));
  2327. }
  2328. entry = &ddb->cursor[pipe];
  2329. seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
  2330. entry->end, skl_ddb_entry_size(entry));
  2331. }
  2332. drm_modeset_unlock_all(dev);
  2333. return 0;
  2334. }
  2335. struct pipe_crc_info {
  2336. const char *name;
  2337. struct drm_device *dev;
  2338. enum pipe pipe;
  2339. };
  2340. static int i915_dp_mst_info(struct seq_file *m, void *unused)
  2341. {
  2342. struct drm_info_node *node = (struct drm_info_node *) m->private;
  2343. struct drm_device *dev = node->minor->dev;
  2344. struct drm_encoder *encoder;
  2345. struct intel_encoder *intel_encoder;
  2346. struct intel_digital_port *intel_dig_port;
  2347. drm_modeset_lock_all(dev);
  2348. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  2349. intel_encoder = to_intel_encoder(encoder);
  2350. if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT)
  2351. continue;
  2352. intel_dig_port = enc_to_dig_port(encoder);
  2353. if (!intel_dig_port->dp.can_mst)
  2354. continue;
  2355. drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
  2356. }
  2357. drm_modeset_unlock_all(dev);
  2358. return 0;
  2359. }
  2360. static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
  2361. {
  2362. struct pipe_crc_info *info = inode->i_private;
  2363. struct drm_i915_private *dev_priv = info->dev->dev_private;
  2364. struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
  2365. if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
  2366. return -ENODEV;
  2367. spin_lock_irq(&pipe_crc->lock);
  2368. if (pipe_crc->opened) {
  2369. spin_unlock_irq(&pipe_crc->lock);
  2370. return -EBUSY; /* already open */
  2371. }
  2372. pipe_crc->opened = true;
  2373. filep->private_data = inode->i_private;
  2374. spin_unlock_irq(&pipe_crc->lock);
  2375. return 0;
  2376. }
  2377. static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
  2378. {
  2379. struct pipe_crc_info *info = inode->i_private;
  2380. struct drm_i915_private *dev_priv = info->dev->dev_private;
  2381. struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
  2382. spin_lock_irq(&pipe_crc->lock);
  2383. pipe_crc->opened = false;
  2384. spin_unlock_irq(&pipe_crc->lock);
  2385. return 0;
  2386. }
  2387. /* (6 fields, 8 chars each, space separated (5) + '\n') */
  2388. #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
  2389. /* account for \'0' */
  2390. #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
  2391. static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
  2392. {
  2393. assert_spin_locked(&pipe_crc->lock);
  2394. return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
  2395. INTEL_PIPE_CRC_ENTRIES_NR);
  2396. }
  2397. static ssize_t
  2398. i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
  2399. loff_t *pos)
  2400. {
  2401. struct pipe_crc_info *info = filep->private_data;
  2402. struct drm_device *dev = info->dev;
  2403. struct drm_i915_private *dev_priv = dev->dev_private;
  2404. struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
  2405. char buf[PIPE_CRC_BUFFER_LEN];
  2406. int head, tail, n_entries, n;
  2407. ssize_t bytes_read;
  2408. /*
  2409. * Don't allow user space to provide buffers not big enough to hold
  2410. * a line of data.
  2411. */
  2412. if (count < PIPE_CRC_LINE_LEN)
  2413. return -EINVAL;
  2414. if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
  2415. return 0;
  2416. /* nothing to read */
  2417. spin_lock_irq(&pipe_crc->lock);
  2418. while (pipe_crc_data_count(pipe_crc) == 0) {
  2419. int ret;
  2420. if (filep->f_flags & O_NONBLOCK) {
  2421. spin_unlock_irq(&pipe_crc->lock);
  2422. return -EAGAIN;
  2423. }
  2424. ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
  2425. pipe_crc_data_count(pipe_crc), pipe_crc->lock);
  2426. if (ret) {
  2427. spin_unlock_irq(&pipe_crc->lock);
  2428. return ret;
  2429. }
  2430. }
  2431. /* We now have one or more entries to read */
  2432. head = pipe_crc->head;
  2433. tail = pipe_crc->tail;
  2434. n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR),
  2435. count / PIPE_CRC_LINE_LEN);
  2436. spin_unlock_irq(&pipe_crc->lock);
  2437. bytes_read = 0;
  2438. n = 0;
  2439. do {
  2440. struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail];
  2441. int ret;
  2442. bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
  2443. "%8u %8x %8x %8x %8x %8x\n",
  2444. entry->frame, entry->crc[0],
  2445. entry->crc[1], entry->crc[2],
  2446. entry->crc[3], entry->crc[4]);
  2447. ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN,
  2448. buf, PIPE_CRC_LINE_LEN);
  2449. if (ret == PIPE_CRC_LINE_LEN)
  2450. return -EFAULT;
  2451. BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
  2452. tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
  2453. n++;
  2454. } while (--n_entries);
  2455. spin_lock_irq(&pipe_crc->lock);
  2456. pipe_crc->tail = tail;
  2457. spin_unlock_irq(&pipe_crc->lock);
  2458. return bytes_read;
  2459. }
  2460. static const struct file_operations i915_pipe_crc_fops = {
  2461. .owner = THIS_MODULE,
  2462. .open = i915_pipe_crc_open,
  2463. .read = i915_pipe_crc_read,
  2464. .release = i915_pipe_crc_release,
  2465. };
  2466. static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
  2467. {
  2468. .name = "i915_pipe_A_crc",
  2469. .pipe = PIPE_A,
  2470. },
  2471. {
  2472. .name = "i915_pipe_B_crc",
  2473. .pipe = PIPE_B,
  2474. },
  2475. {
  2476. .name = "i915_pipe_C_crc",
  2477. .pipe = PIPE_C,
  2478. },
  2479. };
  2480. static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
  2481. enum pipe pipe)
  2482. {
  2483. struct drm_device *dev = minor->dev;
  2484. struct dentry *ent;
  2485. struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
  2486. info->dev = dev;
  2487. ent = debugfs_create_file(info->name, S_IRUGO, root, info,
  2488. &i915_pipe_crc_fops);
  2489. if (!ent)
  2490. return -ENOMEM;
  2491. return drm_add_fake_info_node(minor, ent, info);
  2492. }
  2493. static const char * const pipe_crc_sources[] = {
  2494. "none",
  2495. "plane1",
  2496. "plane2",
  2497. "pf",
  2498. "pipe",
  2499. "TV",
  2500. "DP-B",
  2501. "DP-C",
  2502. "DP-D",
  2503. "auto",
  2504. };
  2505. static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
  2506. {
  2507. BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
  2508. return pipe_crc_sources[source];
  2509. }
  2510. static int display_crc_ctl_show(struct seq_file *m, void *data)
  2511. {
  2512. struct drm_device *dev = m->private;
  2513. struct drm_i915_private *dev_priv = dev->dev_private;
  2514. int i;
  2515. for (i = 0; i < I915_MAX_PIPES; i++)
  2516. seq_printf(m, "%c %s\n", pipe_name(i),
  2517. pipe_crc_source_name(dev_priv->pipe_crc[i].source));
  2518. return 0;
  2519. }
  2520. static int display_crc_ctl_open(struct inode *inode, struct file *file)
  2521. {
  2522. struct drm_device *dev = inode->i_private;
  2523. return single_open(file, display_crc_ctl_show, dev);
  2524. }
  2525. static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
  2526. uint32_t *val)
  2527. {
  2528. if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
  2529. *source = INTEL_PIPE_CRC_SOURCE_PIPE;
  2530. switch (*source) {
  2531. case INTEL_PIPE_CRC_SOURCE_PIPE:
  2532. *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
  2533. break;
  2534. case INTEL_PIPE_CRC_SOURCE_NONE:
  2535. *val = 0;
  2536. break;
  2537. default:
  2538. return -EINVAL;
  2539. }
  2540. return 0;
  2541. }
  2542. static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
  2543. enum intel_pipe_crc_source *source)
  2544. {
  2545. struct intel_encoder *encoder;
  2546. struct intel_crtc *crtc;
  2547. struct intel_digital_port *dig_port;
  2548. int ret = 0;
  2549. *source = INTEL_PIPE_CRC_SOURCE_PIPE;
  2550. drm_modeset_lock_all(dev);
  2551. for_each_intel_encoder(dev, encoder) {
  2552. if (!encoder->base.crtc)
  2553. continue;
  2554. crtc = to_intel_crtc(encoder->base.crtc);
  2555. if (crtc->pipe != pipe)
  2556. continue;
  2557. switch (encoder->type) {
  2558. case INTEL_OUTPUT_TVOUT:
  2559. *source = INTEL_PIPE_CRC_SOURCE_TV;
  2560. break;
  2561. case INTEL_OUTPUT_DISPLAYPORT:
  2562. case INTEL_OUTPUT_EDP:
  2563. dig_port = enc_to_dig_port(&encoder->base);
  2564. switch (dig_port->port) {
  2565. case PORT_B:
  2566. *source = INTEL_PIPE_CRC_SOURCE_DP_B;
  2567. break;
  2568. case PORT_C:
  2569. *source = INTEL_PIPE_CRC_SOURCE_DP_C;
  2570. break;
  2571. case PORT_D:
  2572. *source = INTEL_PIPE_CRC_SOURCE_DP_D;
  2573. break;
  2574. default:
  2575. WARN(1, "nonexisting DP port %c\n",
  2576. port_name(dig_port->port));
  2577. break;
  2578. }
  2579. break;
  2580. default:
  2581. break;
  2582. }
  2583. }
  2584. drm_modeset_unlock_all(dev);
  2585. return ret;
  2586. }
  2587. static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
  2588. enum pipe pipe,
  2589. enum intel_pipe_crc_source *source,
  2590. uint32_t *val)
  2591. {
  2592. struct drm_i915_private *dev_priv = dev->dev_private;
  2593. bool need_stable_symbols = false;
  2594. if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
  2595. int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
  2596. if (ret)
  2597. return ret;
  2598. }
  2599. switch (*source) {
  2600. case INTEL_PIPE_CRC_SOURCE_PIPE:
  2601. *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
  2602. break;
  2603. case INTEL_PIPE_CRC_SOURCE_DP_B:
  2604. *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
  2605. need_stable_symbols = true;
  2606. break;
  2607. case INTEL_PIPE_CRC_SOURCE_DP_C:
  2608. *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
  2609. need_stable_symbols = true;
  2610. break;
  2611. case INTEL_PIPE_CRC_SOURCE_NONE:
  2612. *val = 0;
  2613. break;
  2614. default:
  2615. return -EINVAL;
  2616. }
  2617. /*
  2618. * When the pipe CRC tap point is after the transcoders we need
  2619. * to tweak symbol-level features to produce a deterministic series of
  2620. * symbols for a given frame. We need to reset those features only once
  2621. * a frame (instead of every nth symbol):
  2622. * - DC-balance: used to ensure a better clock recovery from the data
  2623. * link (SDVO)
  2624. * - DisplayPort scrambling: used for EMI reduction
  2625. */
  2626. if (need_stable_symbols) {
  2627. uint32_t tmp = I915_READ(PORT_DFT2_G4X);
  2628. tmp |= DC_BALANCE_RESET_VLV;
  2629. if (pipe == PIPE_A)
  2630. tmp |= PIPE_A_SCRAMBLE_RESET;
  2631. else
  2632. tmp |= PIPE_B_SCRAMBLE_RESET;
  2633. I915_WRITE(PORT_DFT2_G4X, tmp);
  2634. }
  2635. return 0;
  2636. }
  2637. static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
  2638. enum pipe pipe,
  2639. enum intel_pipe_crc_source *source,
  2640. uint32_t *val)
  2641. {
  2642. struct drm_i915_private *dev_priv = dev->dev_private;
  2643. bool need_stable_symbols = false;
  2644. if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
  2645. int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
  2646. if (ret)
  2647. return ret;
  2648. }
  2649. switch (*source) {
  2650. case INTEL_PIPE_CRC_SOURCE_PIPE:
  2651. *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
  2652. break;
  2653. case INTEL_PIPE_CRC_SOURCE_TV:
  2654. if (!SUPPORTS_TV(dev))
  2655. return -EINVAL;
  2656. *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
  2657. break;
  2658. case INTEL_PIPE_CRC_SOURCE_DP_B:
  2659. if (!IS_G4X(dev))
  2660. return -EINVAL;
  2661. *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
  2662. need_stable_symbols = true;
  2663. break;
  2664. case INTEL_PIPE_CRC_SOURCE_DP_C:
  2665. if (!IS_G4X(dev))
  2666. return -EINVAL;
  2667. *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
  2668. need_stable_symbols = true;
  2669. break;
  2670. case INTEL_PIPE_CRC_SOURCE_DP_D:
  2671. if (!IS_G4X(dev))
  2672. return -EINVAL;
  2673. *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
  2674. need_stable_symbols = true;
  2675. break;
  2676. case INTEL_PIPE_CRC_SOURCE_NONE:
  2677. *val = 0;
  2678. break;
  2679. default:
  2680. return -EINVAL;
  2681. }
  2682. /*
  2683. * When the pipe CRC tap point is after the transcoders we need
  2684. * to tweak symbol-level features to produce a deterministic series of
  2685. * symbols for a given frame. We need to reset those features only once
  2686. * a frame (instead of every nth symbol):
  2687. * - DC-balance: used to ensure a better clock recovery from the data
  2688. * link (SDVO)
  2689. * - DisplayPort scrambling: used for EMI reduction
  2690. */
  2691. if (need_stable_symbols) {
  2692. uint32_t tmp = I915_READ(PORT_DFT2_G4X);
  2693. WARN_ON(!IS_G4X(dev));
  2694. I915_WRITE(PORT_DFT_I9XX,
  2695. I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
  2696. if (pipe == PIPE_A)
  2697. tmp |= PIPE_A_SCRAMBLE_RESET;
  2698. else
  2699. tmp |= PIPE_B_SCRAMBLE_RESET;
  2700. I915_WRITE(PORT_DFT2_G4X, tmp);
  2701. }
  2702. return 0;
  2703. }
  2704. static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
  2705. enum pipe pipe)
  2706. {
  2707. struct drm_i915_private *dev_priv = dev->dev_private;
  2708. uint32_t tmp = I915_READ(PORT_DFT2_G4X);
  2709. if (pipe == PIPE_A)
  2710. tmp &= ~PIPE_A_SCRAMBLE_RESET;
  2711. else
  2712. tmp &= ~PIPE_B_SCRAMBLE_RESET;
  2713. if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
  2714. tmp &= ~DC_BALANCE_RESET_VLV;
  2715. I915_WRITE(PORT_DFT2_G4X, tmp);
  2716. }
  2717. static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
  2718. enum pipe pipe)
  2719. {
  2720. struct drm_i915_private *dev_priv = dev->dev_private;
  2721. uint32_t tmp = I915_READ(PORT_DFT2_G4X);
  2722. if (pipe == PIPE_A)
  2723. tmp &= ~PIPE_A_SCRAMBLE_RESET;
  2724. else
  2725. tmp &= ~PIPE_B_SCRAMBLE_RESET;
  2726. I915_WRITE(PORT_DFT2_G4X, tmp);
  2727. if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
  2728. I915_WRITE(PORT_DFT_I9XX,
  2729. I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
  2730. }
  2731. }
  2732. static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
  2733. uint32_t *val)
  2734. {
  2735. if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
  2736. *source = INTEL_PIPE_CRC_SOURCE_PIPE;
  2737. switch (*source) {
  2738. case INTEL_PIPE_CRC_SOURCE_PLANE1:
  2739. *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
  2740. break;
  2741. case INTEL_PIPE_CRC_SOURCE_PLANE2:
  2742. *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
  2743. break;
  2744. case INTEL_PIPE_CRC_SOURCE_PIPE:
  2745. *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
  2746. break;
  2747. case INTEL_PIPE_CRC_SOURCE_NONE:
  2748. *val = 0;
  2749. break;
  2750. default:
  2751. return -EINVAL;
  2752. }
  2753. return 0;
  2754. }
  2755. static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
  2756. {
  2757. struct drm_i915_private *dev_priv = dev->dev_private;
  2758. struct intel_crtc *crtc =
  2759. to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
  2760. drm_modeset_lock_all(dev);
  2761. /*
  2762. * If we use the eDP transcoder we need to make sure that we don't
  2763. * bypass the pfit, since otherwise the pipe CRC source won't work. Only
  2764. * relevant on hsw with pipe A when using the always-on power well
  2765. * routing.
  2766. */
  2767. if (crtc->config.cpu_transcoder == TRANSCODER_EDP &&
  2768. !crtc->config.pch_pfit.enabled) {
  2769. crtc->config.pch_pfit.force_thru = true;
  2770. intel_display_power_get(dev_priv,
  2771. POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
  2772. dev_priv->display.crtc_disable(&crtc->base);
  2773. dev_priv->display.crtc_enable(&crtc->base);
  2774. }
  2775. drm_modeset_unlock_all(dev);
  2776. }
  2777. static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
  2778. {
  2779. struct drm_i915_private *dev_priv = dev->dev_private;
  2780. struct intel_crtc *crtc =
  2781. to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
  2782. drm_modeset_lock_all(dev);
  2783. /*
  2784. * If we use the eDP transcoder we need to make sure that we don't
  2785. * bypass the pfit, since otherwise the pipe CRC source won't work. Only
  2786. * relevant on hsw with pipe A when using the always-on power well
  2787. * routing.
  2788. */
  2789. if (crtc->config.pch_pfit.force_thru) {
  2790. crtc->config.pch_pfit.force_thru = false;
  2791. dev_priv->display.crtc_disable(&crtc->base);
  2792. dev_priv->display.crtc_enable(&crtc->base);
  2793. intel_display_power_put(dev_priv,
  2794. POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
  2795. }
  2796. drm_modeset_unlock_all(dev);
  2797. }
  2798. static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
  2799. enum pipe pipe,
  2800. enum intel_pipe_crc_source *source,
  2801. uint32_t *val)
  2802. {
  2803. if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
  2804. *source = INTEL_PIPE_CRC_SOURCE_PF;
  2805. switch (*source) {
  2806. case INTEL_PIPE_CRC_SOURCE_PLANE1:
  2807. *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
  2808. break;
  2809. case INTEL_PIPE_CRC_SOURCE_PLANE2:
  2810. *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
  2811. break;
  2812. case INTEL_PIPE_CRC_SOURCE_PF:
  2813. if (IS_HASWELL(dev) && pipe == PIPE_A)
  2814. hsw_trans_edp_pipe_A_crc_wa(dev);
  2815. *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
  2816. break;
  2817. case INTEL_PIPE_CRC_SOURCE_NONE:
  2818. *val = 0;
  2819. break;
  2820. default:
  2821. return -EINVAL;
  2822. }
  2823. return 0;
  2824. }
  2825. static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
  2826. enum intel_pipe_crc_source source)
  2827. {
  2828. struct drm_i915_private *dev_priv = dev->dev_private;
  2829. struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
  2830. struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
  2831. pipe));
  2832. u32 val = 0; /* shut up gcc */
  2833. int ret;
  2834. if (pipe_crc->source == source)
  2835. return 0;
  2836. /* forbid changing the source without going back to 'none' */
  2837. if (pipe_crc->source && source)
  2838. return -EINVAL;
  2839. if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) {
  2840. DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
  2841. return -EIO;
  2842. }
  2843. if (IS_GEN2(dev))
  2844. ret = i8xx_pipe_crc_ctl_reg(&source, &val);
  2845. else if (INTEL_INFO(dev)->gen < 5)
  2846. ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
  2847. else if (IS_VALLEYVIEW(dev))
  2848. ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
  2849. else if (IS_GEN5(dev) || IS_GEN6(dev))
  2850. ret = ilk_pipe_crc_ctl_reg(&source, &val);
  2851. else
  2852. ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
  2853. if (ret != 0)
  2854. return ret;
  2855. /* none -> real source transition */
  2856. if (source) {
  2857. DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
  2858. pipe_name(pipe), pipe_crc_source_name(source));
  2859. pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) *
  2860. INTEL_PIPE_CRC_ENTRIES_NR,
  2861. GFP_KERNEL);
  2862. if (!pipe_crc->entries)
  2863. return -ENOMEM;
  2864. /*
  2865. * When IPS gets enabled, the pipe CRC changes. Since IPS gets
  2866. * enabled and disabled dynamically based on package C states,
  2867. * user space can't make reliable use of the CRCs, so let's just
  2868. * completely disable it.
  2869. */
  2870. hsw_disable_ips(crtc);
  2871. spin_lock_irq(&pipe_crc->lock);
  2872. pipe_crc->head = 0;
  2873. pipe_crc->tail = 0;
  2874. spin_unlock_irq(&pipe_crc->lock);
  2875. }
  2876. pipe_crc->source = source;
  2877. I915_WRITE(PIPE_CRC_CTL(pipe), val);
  2878. POSTING_READ(PIPE_CRC_CTL(pipe));
  2879. /* real source -> none transition */
  2880. if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
  2881. struct intel_pipe_crc_entry *entries;
  2882. struct intel_crtc *crtc =
  2883. to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
  2884. DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
  2885. pipe_name(pipe));
  2886. drm_modeset_lock(&crtc->base.mutex, NULL);
  2887. if (crtc->active)
  2888. intel_wait_for_vblank(dev, pipe);
  2889. drm_modeset_unlock(&crtc->base.mutex);
  2890. spin_lock_irq(&pipe_crc->lock);
  2891. entries = pipe_crc->entries;
  2892. pipe_crc->entries = NULL;
  2893. spin_unlock_irq(&pipe_crc->lock);
  2894. kfree(entries);
  2895. if (IS_G4X(dev))
  2896. g4x_undo_pipe_scramble_reset(dev, pipe);
  2897. else if (IS_VALLEYVIEW(dev))
  2898. vlv_undo_pipe_scramble_reset(dev, pipe);
  2899. else if (IS_HASWELL(dev) && pipe == PIPE_A)
  2900. hsw_undo_trans_edp_pipe_A_crc_wa(dev);
  2901. hsw_enable_ips(crtc);
  2902. }
  2903. return 0;
  2904. }
  2905. /*
  2906. * Parse pipe CRC command strings:
  2907. * command: wsp* object wsp+ name wsp+ source wsp*
  2908. * object: 'pipe'
  2909. * name: (A | B | C)
  2910. * source: (none | plane1 | plane2 | pf)
  2911. * wsp: (#0x20 | #0x9 | #0xA)+
  2912. *
  2913. * eg.:
  2914. * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
  2915. * "pipe A none" -> Stop CRC
  2916. */
  2917. static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
  2918. {
  2919. int n_words = 0;
  2920. while (*buf) {
  2921. char *end;
  2922. /* skip leading white space */
  2923. buf = skip_spaces(buf);
  2924. if (!*buf)
  2925. break; /* end of buffer */
  2926. /* find end of word */
  2927. for (end = buf; *end && !isspace(*end); end++)
  2928. ;
  2929. if (n_words == max_words) {
  2930. DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
  2931. max_words);
  2932. return -EINVAL; /* ran out of words[] before bytes */
  2933. }
  2934. if (*end)
  2935. *end++ = '\0';
  2936. words[n_words++] = buf;
  2937. buf = end;
  2938. }
  2939. return n_words;
  2940. }
  2941. enum intel_pipe_crc_object {
  2942. PIPE_CRC_OBJECT_PIPE,
  2943. };
  2944. static const char * const pipe_crc_objects[] = {
  2945. "pipe",
  2946. };
  2947. static int
  2948. display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
  2949. {
  2950. int i;
  2951. for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
  2952. if (!strcmp(buf, pipe_crc_objects[i])) {
  2953. *o = i;
  2954. return 0;
  2955. }
  2956. return -EINVAL;
  2957. }
  2958. static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
  2959. {
  2960. const char name = buf[0];
  2961. if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
  2962. return -EINVAL;
  2963. *pipe = name - 'A';
  2964. return 0;
  2965. }
  2966. static int
  2967. display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
  2968. {
  2969. int i;
  2970. for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
  2971. if (!strcmp(buf, pipe_crc_sources[i])) {
  2972. *s = i;
  2973. return 0;
  2974. }
  2975. return -EINVAL;
  2976. }
  2977. static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
  2978. {
  2979. #define N_WORDS 3
  2980. int n_words;
  2981. char *words[N_WORDS];
  2982. enum pipe pipe;
  2983. enum intel_pipe_crc_object object;
  2984. enum intel_pipe_crc_source source;
  2985. n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
  2986. if (n_words != N_WORDS) {
  2987. DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
  2988. N_WORDS);
  2989. return -EINVAL;
  2990. }
  2991. if (display_crc_ctl_parse_object(words[0], &object) < 0) {
  2992. DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
  2993. return -EINVAL;
  2994. }
  2995. if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
  2996. DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
  2997. return -EINVAL;
  2998. }
  2999. if (display_crc_ctl_parse_source(words[2], &source) < 0) {
  3000. DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
  3001. return -EINVAL;
  3002. }
  3003. return pipe_crc_set_source(dev, pipe, source);
  3004. }
  3005. static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
  3006. size_t len, loff_t *offp)
  3007. {
  3008. struct seq_file *m = file->private_data;
  3009. struct drm_device *dev = m->private;
  3010. char *tmpbuf;
  3011. int ret;
  3012. if (len == 0)
  3013. return 0;
  3014. if (len > PAGE_SIZE - 1) {
  3015. DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
  3016. PAGE_SIZE);
  3017. return -E2BIG;
  3018. }
  3019. tmpbuf = kmalloc(len + 1, GFP_KERNEL);
  3020. if (!tmpbuf)
  3021. return -ENOMEM;
  3022. if (copy_from_user(tmpbuf, ubuf, len)) {
  3023. ret = -EFAULT;
  3024. goto out;
  3025. }
  3026. tmpbuf[len] = '\0';
  3027. ret = display_crc_ctl_parse(dev, tmpbuf, len);
  3028. out:
  3029. kfree(tmpbuf);
  3030. if (ret < 0)
  3031. return ret;
  3032. *offp += len;
  3033. return len;
  3034. }
  3035. static const struct file_operations i915_display_crc_ctl_fops = {
  3036. .owner = THIS_MODULE,
  3037. .open = display_crc_ctl_open,
  3038. .read = seq_read,
  3039. .llseek = seq_lseek,
  3040. .release = single_release,
  3041. .write = display_crc_ctl_write
  3042. };
  3043. static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
  3044. {
  3045. struct drm_device *dev = m->private;
  3046. int num_levels = ilk_wm_max_level(dev) + 1;
  3047. int level;
  3048. drm_modeset_lock_all(dev);
  3049. for (level = 0; level < num_levels; level++) {
  3050. unsigned int latency = wm[level];
  3051. /*
  3052. * - WM1+ latency values in 0.5us units
  3053. * - latencies are in us on gen9
  3054. */
  3055. if (INTEL_INFO(dev)->gen >= 9)
  3056. latency *= 10;
  3057. else if (level > 0)
  3058. latency *= 5;
  3059. seq_printf(m, "WM%d %u (%u.%u usec)\n",
  3060. level, wm[level], latency / 10, latency % 10);
  3061. }
  3062. drm_modeset_unlock_all(dev);
  3063. }
  3064. static int pri_wm_latency_show(struct seq_file *m, void *data)
  3065. {
  3066. struct drm_device *dev = m->private;
  3067. struct drm_i915_private *dev_priv = dev->dev_private;
  3068. const uint16_t *latencies;
  3069. if (INTEL_INFO(dev)->gen >= 9)
  3070. latencies = dev_priv->wm.skl_latency;
  3071. else
  3072. latencies = to_i915(dev)->wm.pri_latency;
  3073. wm_latency_show(m, latencies);
  3074. return 0;
  3075. }
  3076. static int spr_wm_latency_show(struct seq_file *m, void *data)
  3077. {
  3078. struct drm_device *dev = m->private;
  3079. struct drm_i915_private *dev_priv = dev->dev_private;
  3080. const uint16_t *latencies;
  3081. if (INTEL_INFO(dev)->gen >= 9)
  3082. latencies = dev_priv->wm.skl_latency;
  3083. else
  3084. latencies = to_i915(dev)->wm.spr_latency;
  3085. wm_latency_show(m, latencies);
  3086. return 0;
  3087. }
  3088. static int cur_wm_latency_show(struct seq_file *m, void *data)
  3089. {
  3090. struct drm_device *dev = m->private;
  3091. struct drm_i915_private *dev_priv = dev->dev_private;
  3092. const uint16_t *latencies;
  3093. if (INTEL_INFO(dev)->gen >= 9)
  3094. latencies = dev_priv->wm.skl_latency;
  3095. else
  3096. latencies = to_i915(dev)->wm.cur_latency;
  3097. wm_latency_show(m, latencies);
  3098. return 0;
  3099. }
  3100. static int pri_wm_latency_open(struct inode *inode, struct file *file)
  3101. {
  3102. struct drm_device *dev = inode->i_private;
  3103. if (HAS_GMCH_DISPLAY(dev))
  3104. return -ENODEV;
  3105. return single_open(file, pri_wm_latency_show, dev);
  3106. }
  3107. static int spr_wm_latency_open(struct inode *inode, struct file *file)
  3108. {
  3109. struct drm_device *dev = inode->i_private;
  3110. if (HAS_GMCH_DISPLAY(dev))
  3111. return -ENODEV;
  3112. return single_open(file, spr_wm_latency_show, dev);
  3113. }
  3114. static int cur_wm_latency_open(struct inode *inode, struct file *file)
  3115. {
  3116. struct drm_device *dev = inode->i_private;
  3117. if (HAS_GMCH_DISPLAY(dev))
  3118. return -ENODEV;
  3119. return single_open(file, cur_wm_latency_show, dev);
  3120. }
  3121. static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
  3122. size_t len, loff_t *offp, uint16_t wm[8])
  3123. {
  3124. struct seq_file *m = file->private_data;
  3125. struct drm_device *dev = m->private;
  3126. uint16_t new[8] = { 0 };
  3127. int num_levels = ilk_wm_max_level(dev) + 1;
  3128. int level;
  3129. int ret;
  3130. char tmp[32];
  3131. if (len >= sizeof(tmp))
  3132. return -EINVAL;
  3133. if (copy_from_user(tmp, ubuf, len))
  3134. return -EFAULT;
  3135. tmp[len] = '\0';
  3136. ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
  3137. &new[0], &new[1], &new[2], &new[3],
  3138. &new[4], &new[5], &new[6], &new[7]);
  3139. if (ret != num_levels)
  3140. return -EINVAL;
  3141. drm_modeset_lock_all(dev);
  3142. for (level = 0; level < num_levels; level++)
  3143. wm[level] = new[level];
  3144. drm_modeset_unlock_all(dev);
  3145. return len;
  3146. }
  3147. static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
  3148. size_t len, loff_t *offp)
  3149. {
  3150. struct seq_file *m = file->private_data;
  3151. struct drm_device *dev = m->private;
  3152. struct drm_i915_private *dev_priv = dev->dev_private;
  3153. uint16_t *latencies;
  3154. if (INTEL_INFO(dev)->gen >= 9)
  3155. latencies = dev_priv->wm.skl_latency;
  3156. else
  3157. latencies = to_i915(dev)->wm.pri_latency;
  3158. return wm_latency_write(file, ubuf, len, offp, latencies);
  3159. }
  3160. static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
  3161. size_t len, loff_t *offp)
  3162. {
  3163. struct seq_file *m = file->private_data;
  3164. struct drm_device *dev = m->private;
  3165. struct drm_i915_private *dev_priv = dev->dev_private;
  3166. uint16_t *latencies;
  3167. if (INTEL_INFO(dev)->gen >= 9)
  3168. latencies = dev_priv->wm.skl_latency;
  3169. else
  3170. latencies = to_i915(dev)->wm.spr_latency;
  3171. return wm_latency_write(file, ubuf, len, offp, latencies);
  3172. }
  3173. static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
  3174. size_t len, loff_t *offp)
  3175. {
  3176. struct seq_file *m = file->private_data;
  3177. struct drm_device *dev = m->private;
  3178. struct drm_i915_private *dev_priv = dev->dev_private;
  3179. uint16_t *latencies;
  3180. if (INTEL_INFO(dev)->gen >= 9)
  3181. latencies = dev_priv->wm.skl_latency;
  3182. else
  3183. latencies = to_i915(dev)->wm.cur_latency;
  3184. return wm_latency_write(file, ubuf, len, offp, latencies);
  3185. }
  3186. static const struct file_operations i915_pri_wm_latency_fops = {
  3187. .owner = THIS_MODULE,
  3188. .open = pri_wm_latency_open,
  3189. .read = seq_read,
  3190. .llseek = seq_lseek,
  3191. .release = single_release,
  3192. .write = pri_wm_latency_write
  3193. };
  3194. static const struct file_operations i915_spr_wm_latency_fops = {
  3195. .owner = THIS_MODULE,
  3196. .open = spr_wm_latency_open,
  3197. .read = seq_read,
  3198. .llseek = seq_lseek,
  3199. .release = single_release,
  3200. .write = spr_wm_latency_write
  3201. };
  3202. static const struct file_operations i915_cur_wm_latency_fops = {
  3203. .owner = THIS_MODULE,
  3204. .open = cur_wm_latency_open,
  3205. .read = seq_read,
  3206. .llseek = seq_lseek,
  3207. .release = single_release,
  3208. .write = cur_wm_latency_write
  3209. };
  3210. static int
  3211. i915_wedged_get(void *data, u64 *val)
  3212. {
  3213. struct drm_device *dev = data;
  3214. struct drm_i915_private *dev_priv = dev->dev_private;
  3215. *val = atomic_read(&dev_priv->gpu_error.reset_counter);
  3216. return 0;
  3217. }
  3218. static int
  3219. i915_wedged_set(void *data, u64 val)
  3220. {
  3221. struct drm_device *dev = data;
  3222. struct drm_i915_private *dev_priv = dev->dev_private;
  3223. intel_runtime_pm_get(dev_priv);
  3224. i915_handle_error(dev, val,
  3225. "Manually setting wedged to %llu", val);
  3226. intel_runtime_pm_put(dev_priv);
  3227. return 0;
  3228. }
  3229. DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
  3230. i915_wedged_get, i915_wedged_set,
  3231. "%llu\n");
  3232. static int
  3233. i915_ring_stop_get(void *data, u64 *val)
  3234. {
  3235. struct drm_device *dev = data;
  3236. struct drm_i915_private *dev_priv = dev->dev_private;
  3237. *val = dev_priv->gpu_error.stop_rings;
  3238. return 0;
  3239. }
  3240. static int
  3241. i915_ring_stop_set(void *data, u64 val)
  3242. {
  3243. struct drm_device *dev = data;
  3244. struct drm_i915_private *dev_priv = dev->dev_private;
  3245. int ret;
  3246. DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
  3247. ret = mutex_lock_interruptible(&dev->struct_mutex);
  3248. if (ret)
  3249. return ret;
  3250. dev_priv->gpu_error.stop_rings = val;
  3251. mutex_unlock(&dev->struct_mutex);
  3252. return 0;
  3253. }
  3254. DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
  3255. i915_ring_stop_get, i915_ring_stop_set,
  3256. "0x%08llx\n");
  3257. static int
  3258. i915_ring_missed_irq_get(void *data, u64 *val)
  3259. {
  3260. struct drm_device *dev = data;
  3261. struct drm_i915_private *dev_priv = dev->dev_private;
  3262. *val = dev_priv->gpu_error.missed_irq_rings;
  3263. return 0;
  3264. }
  3265. static int
  3266. i915_ring_missed_irq_set(void *data, u64 val)
  3267. {
  3268. struct drm_device *dev = data;
  3269. struct drm_i915_private *dev_priv = dev->dev_private;
  3270. int ret;
  3271. /* Lock against concurrent debugfs callers */
  3272. ret = mutex_lock_interruptible(&dev->struct_mutex);
  3273. if (ret)
  3274. return ret;
  3275. dev_priv->gpu_error.missed_irq_rings = val;
  3276. mutex_unlock(&dev->struct_mutex);
  3277. return 0;
  3278. }
  3279. DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
  3280. i915_ring_missed_irq_get, i915_ring_missed_irq_set,
  3281. "0x%08llx\n");
  3282. static int
  3283. i915_ring_test_irq_get(void *data, u64 *val)
  3284. {
  3285. struct drm_device *dev = data;
  3286. struct drm_i915_private *dev_priv = dev->dev_private;
  3287. *val = dev_priv->gpu_error.test_irq_rings;
  3288. return 0;
  3289. }
  3290. static int
  3291. i915_ring_test_irq_set(void *data, u64 val)
  3292. {
  3293. struct drm_device *dev = data;
  3294. struct drm_i915_private *dev_priv = dev->dev_private;
  3295. int ret;
  3296. DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
  3297. /* Lock against concurrent debugfs callers */
  3298. ret = mutex_lock_interruptible(&dev->struct_mutex);
  3299. if (ret)
  3300. return ret;
  3301. dev_priv->gpu_error.test_irq_rings = val;
  3302. mutex_unlock(&dev->struct_mutex);
  3303. return 0;
  3304. }
  3305. DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
  3306. i915_ring_test_irq_get, i915_ring_test_irq_set,
  3307. "0x%08llx\n");
  3308. #define DROP_UNBOUND 0x1
  3309. #define DROP_BOUND 0x2
  3310. #define DROP_RETIRE 0x4
  3311. #define DROP_ACTIVE 0x8
  3312. #define DROP_ALL (DROP_UNBOUND | \
  3313. DROP_BOUND | \
  3314. DROP_RETIRE | \
  3315. DROP_ACTIVE)
  3316. static int
  3317. i915_drop_caches_get(void *data, u64 *val)
  3318. {
  3319. *val = DROP_ALL;
  3320. return 0;
  3321. }
  3322. static int
  3323. i915_drop_caches_set(void *data, u64 val)
  3324. {
  3325. struct drm_device *dev = data;
  3326. struct drm_i915_private *dev_priv = dev->dev_private;
  3327. int ret;
  3328. DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
  3329. /* No need to check and wait for gpu resets, only libdrm auto-restarts
  3330. * on ioctls on -EAGAIN. */
  3331. ret = mutex_lock_interruptible(&dev->struct_mutex);
  3332. if (ret)
  3333. return ret;
  3334. if (val & DROP_ACTIVE) {
  3335. ret = i915_gpu_idle(dev);
  3336. if (ret)
  3337. goto unlock;
  3338. }
  3339. if (val & (DROP_RETIRE | DROP_ACTIVE))
  3340. i915_gem_retire_requests(dev);
  3341. if (val & DROP_BOUND)
  3342. i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
  3343. if (val & DROP_UNBOUND)
  3344. i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
  3345. unlock:
  3346. mutex_unlock(&dev->struct_mutex);
  3347. return ret;
  3348. }
  3349. DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
  3350. i915_drop_caches_get, i915_drop_caches_set,
  3351. "0x%08llx\n");
  3352. static int
  3353. i915_max_freq_get(void *data, u64 *val)
  3354. {
  3355. struct drm_device *dev = data;
  3356. struct drm_i915_private *dev_priv = dev->dev_private;
  3357. int ret;
  3358. if (INTEL_INFO(dev)->gen < 6)
  3359. return -ENODEV;
  3360. flush_delayed_work(&dev_priv->rps.delayed_resume_work);
  3361. ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
  3362. if (ret)
  3363. return ret;
  3364. if (IS_VALLEYVIEW(dev))
  3365. *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
  3366. else
  3367. *val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
  3368. mutex_unlock(&dev_priv->rps.hw_lock);
  3369. return 0;
  3370. }
  3371. static int
  3372. i915_max_freq_set(void *data, u64 val)
  3373. {
  3374. struct drm_device *dev = data;
  3375. struct drm_i915_private *dev_priv = dev->dev_private;
  3376. u32 rp_state_cap, hw_max, hw_min;
  3377. int ret;
  3378. if (INTEL_INFO(dev)->gen < 6)
  3379. return -ENODEV;
  3380. flush_delayed_work(&dev_priv->rps.delayed_resume_work);
  3381. DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
  3382. ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
  3383. if (ret)
  3384. return ret;
  3385. /*
  3386. * Turbo will still be enabled, but won't go above the set value.
  3387. */
  3388. if (IS_VALLEYVIEW(dev)) {
  3389. val = vlv_freq_opcode(dev_priv, val);
  3390. hw_max = dev_priv->rps.max_freq;
  3391. hw_min = dev_priv->rps.min_freq;
  3392. } else {
  3393. do_div(val, GT_FREQUENCY_MULTIPLIER);
  3394. rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
  3395. hw_max = dev_priv->rps.max_freq;
  3396. hw_min = (rp_state_cap >> 16) & 0xff;
  3397. }
  3398. if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
  3399. mutex_unlock(&dev_priv->rps.hw_lock);
  3400. return -EINVAL;
  3401. }
  3402. dev_priv->rps.max_freq_softlimit = val;
  3403. if (IS_VALLEYVIEW(dev))
  3404. valleyview_set_rps(dev, val);
  3405. else
  3406. gen6_set_rps(dev, val);
  3407. mutex_unlock(&dev_priv->rps.hw_lock);
  3408. return 0;
  3409. }
  3410. DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
  3411. i915_max_freq_get, i915_max_freq_set,
  3412. "%llu\n");
  3413. static int
  3414. i915_min_freq_get(void *data, u64 *val)
  3415. {
  3416. struct drm_device *dev = data;
  3417. struct drm_i915_private *dev_priv = dev->dev_private;
  3418. int ret;
  3419. if (INTEL_INFO(dev)->gen < 6)
  3420. return -ENODEV;
  3421. flush_delayed_work(&dev_priv->rps.delayed_resume_work);
  3422. ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
  3423. if (ret)
  3424. return ret;
  3425. if (IS_VALLEYVIEW(dev))
  3426. *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
  3427. else
  3428. *val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
  3429. mutex_unlock(&dev_priv->rps.hw_lock);
  3430. return 0;
  3431. }
  3432. static int
  3433. i915_min_freq_set(void *data, u64 val)
  3434. {
  3435. struct drm_device *dev = data;
  3436. struct drm_i915_private *dev_priv = dev->dev_private;
  3437. u32 rp_state_cap, hw_max, hw_min;
  3438. int ret;
  3439. if (INTEL_INFO(dev)->gen < 6)
  3440. return -ENODEV;
  3441. flush_delayed_work(&dev_priv->rps.delayed_resume_work);
  3442. DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
  3443. ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
  3444. if (ret)
  3445. return ret;
  3446. /*
  3447. * Turbo will still be enabled, but won't go below the set value.
  3448. */
  3449. if (IS_VALLEYVIEW(dev)) {
  3450. val = vlv_freq_opcode(dev_priv, val);
  3451. hw_max = dev_priv->rps.max_freq;
  3452. hw_min = dev_priv->rps.min_freq;
  3453. } else {
  3454. do_div(val, GT_FREQUENCY_MULTIPLIER);
  3455. rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
  3456. hw_max = dev_priv->rps.max_freq;
  3457. hw_min = (rp_state_cap >> 16) & 0xff;
  3458. }
  3459. if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
  3460. mutex_unlock(&dev_priv->rps.hw_lock);
  3461. return -EINVAL;
  3462. }
  3463. dev_priv->rps.min_freq_softlimit = val;
  3464. if (IS_VALLEYVIEW(dev))
  3465. valleyview_set_rps(dev, val);
  3466. else
  3467. gen6_set_rps(dev, val);
  3468. mutex_unlock(&dev_priv->rps.hw_lock);
  3469. return 0;
  3470. }
  3471. DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
  3472. i915_min_freq_get, i915_min_freq_set,
  3473. "%llu\n");
  3474. static int
  3475. i915_cache_sharing_get(void *data, u64 *val)
  3476. {
  3477. struct drm_device *dev = data;
  3478. struct drm_i915_private *dev_priv = dev->dev_private;
  3479. u32 snpcr;
  3480. int ret;
  3481. if (!(IS_GEN6(dev) || IS_GEN7(dev)))
  3482. return -ENODEV;
  3483. ret = mutex_lock_interruptible(&dev->struct_mutex);
  3484. if (ret)
  3485. return ret;
  3486. intel_runtime_pm_get(dev_priv);
  3487. snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
  3488. intel_runtime_pm_put(dev_priv);
  3489. mutex_unlock(&dev_priv->dev->struct_mutex);
  3490. *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
  3491. return 0;
  3492. }
  3493. static int
  3494. i915_cache_sharing_set(void *data, u64 val)
  3495. {
  3496. struct drm_device *dev = data;
  3497. struct drm_i915_private *dev_priv = dev->dev_private;
  3498. u32 snpcr;
  3499. if (!(IS_GEN6(dev) || IS_GEN7(dev)))
  3500. return -ENODEV;
  3501. if (val > 3)
  3502. return -EINVAL;
  3503. intel_runtime_pm_get(dev_priv);
  3504. DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
  3505. /* Update the cache sharing policy here as well */
  3506. snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
  3507. snpcr &= ~GEN6_MBC_SNPCR_MASK;
  3508. snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
  3509. I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
  3510. intel_runtime_pm_put(dev_priv);
  3511. return 0;
  3512. }
  3513. DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
  3514. i915_cache_sharing_get, i915_cache_sharing_set,
  3515. "%llu\n");
  3516. static int i915_forcewake_open(struct inode *inode, struct file *file)
  3517. {
  3518. struct drm_device *dev = inode->i_private;
  3519. struct drm_i915_private *dev_priv = dev->dev_private;
  3520. if (INTEL_INFO(dev)->gen < 6)
  3521. return 0;
  3522. gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
  3523. return 0;
  3524. }
  3525. static int i915_forcewake_release(struct inode *inode, struct file *file)
  3526. {
  3527. struct drm_device *dev = inode->i_private;
  3528. struct drm_i915_private *dev_priv = dev->dev_private;
  3529. if (INTEL_INFO(dev)->gen < 6)
  3530. return 0;
  3531. gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
  3532. return 0;
  3533. }
  3534. static const struct file_operations i915_forcewake_fops = {
  3535. .owner = THIS_MODULE,
  3536. .open = i915_forcewake_open,
  3537. .release = i915_forcewake_release,
  3538. };
  3539. static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
  3540. {
  3541. struct drm_device *dev = minor->dev;
  3542. struct dentry *ent;
  3543. ent = debugfs_create_file("i915_forcewake_user",
  3544. S_IRUSR,
  3545. root, dev,
  3546. &i915_forcewake_fops);
  3547. if (!ent)
  3548. return -ENOMEM;
  3549. return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
  3550. }
  3551. static int i915_debugfs_create(struct dentry *root,
  3552. struct drm_minor *minor,
  3553. const char *name,
  3554. const struct file_operations *fops)
  3555. {
  3556. struct drm_device *dev = minor->dev;
  3557. struct dentry *ent;
  3558. ent = debugfs_create_file(name,
  3559. S_IRUGO | S_IWUSR,
  3560. root, dev,
  3561. fops);
  3562. if (!ent)
  3563. return -ENOMEM;
  3564. return drm_add_fake_info_node(minor, ent, fops);
  3565. }
  3566. static const struct drm_info_list i915_debugfs_list[] = {
  3567. {"i915_capabilities", i915_capabilities, 0},
  3568. {"i915_gem_objects", i915_gem_object_info, 0},
  3569. {"i915_gem_gtt", i915_gem_gtt_info, 0},
  3570. {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
  3571. {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
  3572. {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
  3573. {"i915_gem_stolen", i915_gem_stolen_list_info },
  3574. {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
  3575. {"i915_gem_request", i915_gem_request_info, 0},
  3576. {"i915_gem_seqno", i915_gem_seqno_info, 0},
  3577. {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
  3578. {"i915_gem_interrupt", i915_interrupt_info, 0},
  3579. {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
  3580. {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
  3581. {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
  3582. {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
  3583. {"i915_frequency_info", i915_frequency_info, 0},
  3584. {"i915_drpc_info", i915_drpc_info, 0},
  3585. {"i915_emon_status", i915_emon_status, 0},
  3586. {"i915_ring_freq_table", i915_ring_freq_table, 0},
  3587. {"i915_fbc_status", i915_fbc_status, 0},
  3588. {"i915_ips_status", i915_ips_status, 0},
  3589. {"i915_sr_status", i915_sr_status, 0},
  3590. {"i915_opregion", i915_opregion, 0},
  3591. {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
  3592. {"i915_context_status", i915_context_status, 0},
  3593. {"i915_dump_lrc", i915_dump_lrc, 0},
  3594. {"i915_execlists", i915_execlists, 0},
  3595. {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
  3596. {"i915_swizzle_info", i915_swizzle_info, 0},
  3597. {"i915_ppgtt_info", i915_ppgtt_info, 0},
  3598. {"i915_llc", i915_llc, 0},
  3599. {"i915_edp_psr_status", i915_edp_psr_status, 0},
  3600. {"i915_sink_crc_eDP1", i915_sink_crc, 0},
  3601. {"i915_energy_uJ", i915_energy_uJ, 0},
  3602. {"i915_pc8_status", i915_pc8_status, 0},
  3603. {"i915_power_domain_info", i915_power_domain_info, 0},
  3604. {"i915_display_info", i915_display_info, 0},
  3605. {"i915_semaphore_status", i915_semaphore_status, 0},
  3606. {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
  3607. {"i915_dp_mst_info", i915_dp_mst_info, 0},
  3608. {"i915_wa_registers", i915_wa_registers, 0},
  3609. {"i915_ddb_info", i915_ddb_info, 0},
  3610. };
  3611. #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
  3612. static const struct i915_debugfs_files {
  3613. const char *name;
  3614. const struct file_operations *fops;
  3615. } i915_debugfs_files[] = {
  3616. {"i915_wedged", &i915_wedged_fops},
  3617. {"i915_max_freq", &i915_max_freq_fops},
  3618. {"i915_min_freq", &i915_min_freq_fops},
  3619. {"i915_cache_sharing", &i915_cache_sharing_fops},
  3620. {"i915_ring_stop", &i915_ring_stop_fops},
  3621. {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
  3622. {"i915_ring_test_irq", &i915_ring_test_irq_fops},
  3623. {"i915_gem_drop_caches", &i915_drop_caches_fops},
  3624. {"i915_error_state", &i915_error_state_fops},
  3625. {"i915_next_seqno", &i915_next_seqno_fops},
  3626. {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
  3627. {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
  3628. {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
  3629. {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
  3630. {"i915_fbc_false_color", &i915_fbc_fc_fops},
  3631. };
  3632. void intel_display_crc_init(struct drm_device *dev)
  3633. {
  3634. struct drm_i915_private *dev_priv = dev->dev_private;
  3635. enum pipe pipe;
  3636. for_each_pipe(dev_priv, pipe) {
  3637. struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
  3638. pipe_crc->opened = false;
  3639. spin_lock_init(&pipe_crc->lock);
  3640. init_waitqueue_head(&pipe_crc->wq);
  3641. }
  3642. }
  3643. int i915_debugfs_init(struct drm_minor *minor)
  3644. {
  3645. int ret, i;
  3646. ret = i915_forcewake_create(minor->debugfs_root, minor);
  3647. if (ret)
  3648. return ret;
  3649. for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
  3650. ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
  3651. if (ret)
  3652. return ret;
  3653. }
  3654. for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
  3655. ret = i915_debugfs_create(minor->debugfs_root, minor,
  3656. i915_debugfs_files[i].name,
  3657. i915_debugfs_files[i].fops);
  3658. if (ret)
  3659. return ret;
  3660. }
  3661. return drm_debugfs_create_files(i915_debugfs_list,
  3662. I915_DEBUGFS_ENTRIES,
  3663. minor->debugfs_root, minor);
  3664. }
  3665. void i915_debugfs_cleanup(struct drm_minor *minor)
  3666. {
  3667. int i;
  3668. drm_debugfs_remove_files(i915_debugfs_list,
  3669. I915_DEBUGFS_ENTRIES, minor);
  3670. drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
  3671. 1, minor);
  3672. for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
  3673. struct drm_info_list *info_list =
  3674. (struct drm_info_list *)&i915_pipe_crc_data[i];
  3675. drm_debugfs_remove_files(info_list, 1, minor);
  3676. }
  3677. for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
  3678. struct drm_info_list *info_list =
  3679. (struct drm_info_list *) i915_debugfs_files[i].fops;
  3680. drm_debugfs_remove_files(info_list, 1, minor);
  3681. }
  3682. }