i915_perf.c 114 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608
  1. /*
  2. * Copyright © 2015-2016 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Robert Bragg <robert@sixbynine.org>
  25. */
  26. /**
  27. * DOC: i915 Perf Overview
  28. *
  29. * Gen graphics supports a large number of performance counters that can help
  30. * driver and application developers understand and optimize their use of the
  31. * GPU.
  32. *
  33. * This i915 perf interface enables userspace to configure and open a file
  34. * descriptor representing a stream of GPU metrics which can then be read() as
  35. * a stream of sample records.
  36. *
  37. * The interface is particularly suited to exposing buffered metrics that are
  38. * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
  39. *
  40. * Streams representing a single context are accessible to applications with a
  41. * corresponding drm file descriptor, such that OpenGL can use the interface
  42. * without special privileges. Access to system-wide metrics requires root
  43. * privileges by default, unless changed via the dev.i915.perf_event_paranoid
  44. * sysctl option.
  45. *
  46. */
  47. /**
  48. * DOC: i915 Perf History and Comparison with Core Perf
  49. *
  50. * The interface was initially inspired by the core Perf infrastructure but
  51. * some notable differences are:
  52. *
  53. * i915 perf file descriptors represent a "stream" instead of an "event"; where
  54. * a perf event primarily corresponds to a single 64bit value, while a stream
  55. * might sample sets of tightly-coupled counters, depending on the
  56. * configuration. For example the Gen OA unit isn't designed to support
  57. * orthogonal configurations of individual counters; it's configured for a set
  58. * of related counters. Samples for an i915 perf stream capturing OA metrics
  59. * will include a set of counter values packed in a compact HW specific format.
  60. * The OA unit supports a number of different packing formats which can be
  61. * selected by the user opening the stream. Perf has support for grouping
  62. * events, but each event in the group is configured, validated and
  63. * authenticated individually with separate system calls.
  64. *
  65. * i915 perf stream configurations are provided as an array of u64 (key,value)
  66. * pairs, instead of a fixed struct with multiple miscellaneous config members,
  67. * interleaved with event-type specific members.
  68. *
  69. * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
  70. * The supported metrics are being written to memory by the GPU unsynchronized
  71. * with the CPU, using HW specific packing formats for counter sets. Sometimes
  72. * the constraints on HW configuration require reports to be filtered before it
  73. * would be acceptable to expose them to unprivileged applications - to hide
  74. * the metrics of other processes/contexts. For these use cases a read() based
  75. * interface is a good fit, and provides an opportunity to filter data as it
  76. * gets copied from the GPU mapped buffers to userspace buffers.
  77. *
  78. *
  79. * Issues hit with first prototype based on Core Perf
  80. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  81. *
  82. * The first prototype of this driver was based on the core perf
  83. * infrastructure, and while we did make that mostly work, with some changes to
  84. * perf, we found we were breaking or working around too many assumptions baked
  85. * into perf's currently cpu centric design.
  86. *
  87. * In the end we didn't see a clear benefit to making perf's implementation and
  88. * interface more complex by changing design assumptions while we knew we still
  89. * wouldn't be able to use any existing perf based userspace tools.
  90. *
  91. * Also considering the Gen specific nature of the Observability hardware and
  92. * how userspace will sometimes need to combine i915 perf OA metrics with
  93. * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
  94. * expecting the interface to be used by a platform specific userspace such as
  95. * OpenGL or tools. This is to say; we aren't inherently missing out on having
  96. * a standard vendor/architecture agnostic interface by not using perf.
  97. *
  98. *
  99. * For posterity, in case we might re-visit trying to adapt core perf to be
  100. * better suited to exposing i915 metrics these were the main pain points we
  101. * hit:
  102. *
  103. * - The perf based OA PMU driver broke some significant design assumptions:
  104. *
  105. * Existing perf pmus are used for profiling work on a cpu and we were
  106. * introducing the idea of _IS_DEVICE pmus with different security
  107. * implications, the need to fake cpu-related data (such as user/kernel
  108. * registers) to fit with perf's current design, and adding _DEVICE records
  109. * as a way to forward device-specific status records.
  110. *
  111. * The OA unit writes reports of counters into a circular buffer, without
  112. * involvement from the CPU, making our PMU driver the first of a kind.
  113. *
  114. * Given the way we were periodically forward data from the GPU-mapped, OA
  115. * buffer to perf's buffer, those bursts of sample writes looked to perf like
  116. * we were sampling too fast and so we had to subvert its throttling checks.
  117. *
  118. * Perf supports groups of counters and allows those to be read via
  119. * transactions internally but transactions currently seem designed to be
  120. * explicitly initiated from the cpu (say in response to a userspace read())
  121. * and while we could pull a report out of the OA buffer we can't
  122. * trigger a report from the cpu on demand.
  123. *
  124. * Related to being report based; the OA counters are configured in HW as a
  125. * set while perf generally expects counter configurations to be orthogonal.
  126. * Although counters can be associated with a group leader as they are
  127. * opened, there's no clear precedent for being able to provide group-wide
  128. * configuration attributes (for example we want to let userspace choose the
  129. * OA unit report format used to capture all counters in a set, or specify a
  130. * GPU context to filter metrics on). We avoided using perf's grouping
  131. * feature and forwarded OA reports to userspace via perf's 'raw' sample
  132. * field. This suited our userspace well considering how coupled the counters
  133. * are when dealing with normalizing. It would be inconvenient to split
  134. * counters up into separate events, only to require userspace to recombine
  135. * them. For Mesa it's also convenient to be forwarded raw, periodic reports
  136. * for combining with the side-band raw reports it captures using
  137. * MI_REPORT_PERF_COUNT commands.
  138. *
  139. * - As a side note on perf's grouping feature; there was also some concern
  140. * that using PERF_FORMAT_GROUP as a way to pack together counter values
  141. * would quite drastically inflate our sample sizes, which would likely
  142. * lower the effective sampling resolutions we could use when the available
  143. * memory bandwidth is limited.
  144. *
  145. * With the OA unit's report formats, counters are packed together as 32
  146. * or 40bit values, with the largest report size being 256 bytes.
  147. *
  148. * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
  149. * documented ordering to the values, implying PERF_FORMAT_ID must also be
  150. * used to add a 64bit ID before each value; giving 16 bytes per counter.
  151. *
  152. * Related to counter orthogonality; we can't time share the OA unit, while
  153. * event scheduling is a central design idea within perf for allowing
  154. * userspace to open + enable more events than can be configured in HW at any
  155. * one time. The OA unit is not designed to allow re-configuration while in
  156. * use. We can't reconfigure the OA unit without losing internal OA unit
  157. * state which we can't access explicitly to save and restore. Reconfiguring
  158. * the OA unit is also relatively slow, involving ~100 register writes. From
  159. * userspace Mesa also depends on a stable OA configuration when emitting
  160. * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
  161. * disabled while there are outstanding MI_RPC commands lest we hang the
  162. * command streamer.
  163. *
  164. * The contents of sample records aren't extensible by device drivers (i.e.
  165. * the sample_type bits). As an example; Sourab Gupta had been looking to
  166. * attach GPU timestamps to our OA samples. We were shoehorning OA reports
  167. * into sample records by using the 'raw' field, but it's tricky to pack more
  168. * than one thing into this field because events/core.c currently only lets a
  169. * pmu give a single raw data pointer plus len which will be copied into the
  170. * ring buffer. To include more than the OA report we'd have to copy the
  171. * report into an intermediate larger buffer. I'd been considering allowing a
  172. * vector of data+len values to be specified for copying the raw data, but
  173. * it felt like a kludge to being using the raw field for this purpose.
  174. *
  175. * - It felt like our perf based PMU was making some technical compromises
  176. * just for the sake of using perf:
  177. *
  178. * perf_event_open() requires events to either relate to a pid or a specific
  179. * cpu core, while our device pmu related to neither. Events opened with a
  180. * pid will be automatically enabled/disabled according to the scheduling of
  181. * that process - so not appropriate for us. When an event is related to a
  182. * cpu id, perf ensures pmu methods will be invoked via an inter process
  183. * interrupt on that core. To avoid invasive changes our userspace opened OA
  184. * perf events for a specific cpu. This was workable but it meant the
  185. * majority of the OA driver ran in atomic context, including all OA report
  186. * forwarding, which wasn't really necessary in our case and seems to make
  187. * our locking requirements somewhat complex as we handled the interaction
  188. * with the rest of the i915 driver.
  189. */
  190. #include <linux/anon_inodes.h>
  191. #include <linux/sizes.h>
  192. #include <linux/uuid.h>
  193. #include "i915_drv.h"
  194. #include "i915_oa_hsw.h"
  195. #include "i915_oa_bdw.h"
  196. #include "i915_oa_chv.h"
  197. #include "i915_oa_sklgt2.h"
  198. #include "i915_oa_sklgt3.h"
  199. #include "i915_oa_sklgt4.h"
  200. #include "i915_oa_bxt.h"
  201. #include "i915_oa_kblgt2.h"
  202. #include "i915_oa_kblgt3.h"
  203. #include "i915_oa_glk.h"
  204. #include "i915_oa_cflgt2.h"
  205. #include "i915_oa_cflgt3.h"
  206. #include "i915_oa_cnl.h"
  207. #include "i915_oa_icl.h"
  208. /* HW requires this to be a power of two, between 128k and 16M, though driver
  209. * is currently generally designed assuming the largest 16M size is used such
  210. * that the overflow cases are unlikely in normal operation.
  211. */
  212. #define OA_BUFFER_SIZE SZ_16M
  213. #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1))
  214. /**
  215. * DOC: OA Tail Pointer Race
  216. *
  217. * There's a HW race condition between OA unit tail pointer register updates and
  218. * writes to memory whereby the tail pointer can sometimes get ahead of what's
  219. * been written out to the OA buffer so far (in terms of what's visible to the
  220. * CPU).
  221. *
  222. * Although this can be observed explicitly while copying reports to userspace
  223. * by checking for a zeroed report-id field in tail reports, we want to account
  224. * for this earlier, as part of the oa_buffer_check to avoid lots of redundant
  225. * read() attempts.
  226. *
  227. * In effect we define a tail pointer for reading that lags the real tail
  228. * pointer by at least %OA_TAIL_MARGIN_NSEC nanoseconds, which gives enough
  229. * time for the corresponding reports to become visible to the CPU.
  230. *
  231. * To manage this we actually track two tail pointers:
  232. * 1) An 'aging' tail with an associated timestamp that is tracked until we
  233. * can trust the corresponding data is visible to the CPU; at which point
  234. * it is considered 'aged'.
  235. * 2) An 'aged' tail that can be used for read()ing.
  236. *
  237. * The two separate pointers let us decouple read()s from tail pointer aging.
  238. *
  239. * The tail pointers are checked and updated at a limited rate within a hrtimer
  240. * callback (the same callback that is used for delivering EPOLLIN events)
  241. *
  242. * Initially the tails are marked invalid with %INVALID_TAIL_PTR which
  243. * indicates that an updated tail pointer is needed.
  244. *
  245. * Most of the implementation details for this workaround are in
  246. * oa_buffer_check_unlocked() and _append_oa_reports()
  247. *
  248. * Note for posterity: previously the driver used to define an effective tail
  249. * pointer that lagged the real pointer by a 'tail margin' measured in bytes
  250. * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
  251. * This was flawed considering that the OA unit may also automatically generate
  252. * non-periodic reports (such as on context switch) or the OA unit may be
  253. * enabled without any periodic sampling.
  254. */
  255. #define OA_TAIL_MARGIN_NSEC 100000ULL
  256. #define INVALID_TAIL_PTR 0xffffffff
  257. /* frequency for checking whether the OA unit has written new reports to the
  258. * circular OA buffer...
  259. */
  260. #define POLL_FREQUENCY 200
  261. #define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY)
  262. /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
  263. static int zero;
  264. static int one = 1;
  265. static u32 i915_perf_stream_paranoid = true;
  266. /* The maximum exponent the hardware accepts is 63 (essentially it selects one
  267. * of the 64bit timestamp bits to trigger reports from) but there's currently
  268. * no known use case for sampling as infrequently as once per 47 thousand years.
  269. *
  270. * Since the timestamps included in OA reports are only 32bits it seems
  271. * reasonable to limit the OA exponent where it's still possible to account for
  272. * overflow in OA report timestamps.
  273. */
  274. #define OA_EXPONENT_MAX 31
  275. #define INVALID_CTX_ID 0xffffffff
  276. /* On Gen8+ automatically triggered OA reports include a 'reason' field... */
  277. #define OAREPORT_REASON_MASK 0x3f
  278. #define OAREPORT_REASON_SHIFT 19
  279. #define OAREPORT_REASON_TIMER (1<<0)
  280. #define OAREPORT_REASON_CTX_SWITCH (1<<3)
  281. #define OAREPORT_REASON_CLK_RATIO (1<<5)
  282. /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
  283. *
  284. * The highest sampling frequency we can theoretically program the OA unit
  285. * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
  286. *
  287. * Initialized just before we register the sysctl parameter.
  288. */
  289. static int oa_sample_rate_hard_limit;
  290. /* Theoretically we can program the OA unit to sample every 160ns but don't
  291. * allow that by default unless root...
  292. *
  293. * The default threshold of 100000Hz is based on perf's similar
  294. * kernel.perf_event_max_sample_rate sysctl parameter.
  295. */
  296. static u32 i915_oa_max_sample_rate = 100000;
  297. /* XXX: beware if future OA HW adds new report formats that the current
  298. * code assumes all reports have a power-of-two size and ~(size - 1) can
  299. * be used as a mask to align the OA tail pointer.
  300. */
  301. static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
  302. [I915_OA_FORMAT_A13] = { 0, 64 },
  303. [I915_OA_FORMAT_A29] = { 1, 128 },
  304. [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 },
  305. /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
  306. [I915_OA_FORMAT_B4_C8] = { 4, 64 },
  307. [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 },
  308. [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 },
  309. [I915_OA_FORMAT_C4_B8] = { 7, 64 },
  310. };
  311. static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
  312. [I915_OA_FORMAT_A12] = { 0, 64 },
  313. [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 },
  314. [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
  315. [I915_OA_FORMAT_C4_B8] = { 7, 64 },
  316. };
  317. #define SAMPLE_OA_REPORT (1<<0)
  318. /**
  319. * struct perf_open_properties - for validated properties given to open a stream
  320. * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
  321. * @single_context: Whether a single or all gpu contexts should be monitored
  322. * @ctx_handle: A gem ctx handle for use with @single_context
  323. * @metrics_set: An ID for an OA unit metric set advertised via sysfs
  324. * @oa_format: An OA unit HW report format
  325. * @oa_periodic: Whether to enable periodic OA unit sampling
  326. * @oa_period_exponent: The OA unit sampling period is derived from this
  327. *
  328. * As read_properties_unlocked() enumerates and validates the properties given
  329. * to open a stream of metrics the configuration is built up in the structure
  330. * which starts out zero initialized.
  331. */
  332. struct perf_open_properties {
  333. u32 sample_flags;
  334. u64 single_context:1;
  335. u64 ctx_handle;
  336. /* OA sampling state */
  337. int metrics_set;
  338. int oa_format;
  339. bool oa_periodic;
  340. int oa_period_exponent;
  341. };
  342. static void free_oa_config(struct drm_i915_private *dev_priv,
  343. struct i915_oa_config *oa_config)
  344. {
  345. if (!PTR_ERR(oa_config->flex_regs))
  346. kfree(oa_config->flex_regs);
  347. if (!PTR_ERR(oa_config->b_counter_regs))
  348. kfree(oa_config->b_counter_regs);
  349. if (!PTR_ERR(oa_config->mux_regs))
  350. kfree(oa_config->mux_regs);
  351. kfree(oa_config);
  352. }
  353. static void put_oa_config(struct drm_i915_private *dev_priv,
  354. struct i915_oa_config *oa_config)
  355. {
  356. if (!atomic_dec_and_test(&oa_config->ref_count))
  357. return;
  358. free_oa_config(dev_priv, oa_config);
  359. }
  360. static int get_oa_config(struct drm_i915_private *dev_priv,
  361. int metrics_set,
  362. struct i915_oa_config **out_config)
  363. {
  364. int ret;
  365. if (metrics_set == 1) {
  366. *out_config = &dev_priv->perf.oa.test_config;
  367. atomic_inc(&dev_priv->perf.oa.test_config.ref_count);
  368. return 0;
  369. }
  370. ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
  371. if (ret)
  372. return ret;
  373. *out_config = idr_find(&dev_priv->perf.metrics_idr, metrics_set);
  374. if (!*out_config)
  375. ret = -EINVAL;
  376. else
  377. atomic_inc(&(*out_config)->ref_count);
  378. mutex_unlock(&dev_priv->perf.metrics_lock);
  379. return ret;
  380. }
  381. static u32 gen8_oa_hw_tail_read(struct drm_i915_private *dev_priv)
  382. {
  383. return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
  384. }
  385. static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv)
  386. {
  387. u32 oastatus1 = I915_READ(GEN7_OASTATUS1);
  388. return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
  389. }
  390. /**
  391. * oa_buffer_check_unlocked - check for data and update tail ptr state
  392. * @dev_priv: i915 device instance
  393. *
  394. * This is either called via fops (for blocking reads in user ctx) or the poll
  395. * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
  396. * if there is data available for userspace to read.
  397. *
  398. * This function is central to providing a workaround for the OA unit tail
  399. * pointer having a race with respect to what data is visible to the CPU.
  400. * It is responsible for reading tail pointers from the hardware and giving
  401. * the pointers time to 'age' before they are made available for reading.
  402. * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
  403. *
  404. * Besides returning true when there is data available to read() this function
  405. * also has the side effect of updating the oa_buffer.tails[], .aging_timestamp
  406. * and .aged_tail_idx state used for reading.
  407. *
  408. * Note: It's safe to read OA config state here unlocked, assuming that this is
  409. * only called while the stream is enabled, while the global OA configuration
  410. * can't be modified.
  411. *
  412. * Returns: %true if the OA buffer contains data, else %false
  413. */
  414. static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
  415. {
  416. int report_size = dev_priv->perf.oa.oa_buffer.format_size;
  417. unsigned long flags;
  418. unsigned int aged_idx;
  419. u32 head, hw_tail, aged_tail, aging_tail;
  420. u64 now;
  421. /* We have to consider the (unlikely) possibility that read() errors
  422. * could result in an OA buffer reset which might reset the head,
  423. * tails[] and aged_tail state.
  424. */
  425. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  426. /* NB: The head we observe here might effectively be a little out of
  427. * date (between head and tails[aged_idx].offset if there is currently
  428. * a read() in progress.
  429. */
  430. head = dev_priv->perf.oa.oa_buffer.head;
  431. aged_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
  432. aged_tail = dev_priv->perf.oa.oa_buffer.tails[aged_idx].offset;
  433. aging_tail = dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset;
  434. hw_tail = dev_priv->perf.oa.ops.oa_hw_tail_read(dev_priv);
  435. /* The tail pointer increases in 64 byte increments,
  436. * not in report_size steps...
  437. */
  438. hw_tail &= ~(report_size - 1);
  439. now = ktime_get_mono_fast_ns();
  440. /* Update the aged tail
  441. *
  442. * Flip the tail pointer available for read()s once the aging tail is
  443. * old enough to trust that the corresponding data will be visible to
  444. * the CPU...
  445. *
  446. * Do this before updating the aging pointer in case we may be able to
  447. * immediately start aging a new pointer too (if new data has become
  448. * available) without needing to wait for a later hrtimer callback.
  449. */
  450. if (aging_tail != INVALID_TAIL_PTR &&
  451. ((now - dev_priv->perf.oa.oa_buffer.aging_timestamp) >
  452. OA_TAIL_MARGIN_NSEC)) {
  453. aged_idx ^= 1;
  454. dev_priv->perf.oa.oa_buffer.aged_tail_idx = aged_idx;
  455. aged_tail = aging_tail;
  456. /* Mark that we need a new pointer to start aging... */
  457. dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR;
  458. aging_tail = INVALID_TAIL_PTR;
  459. }
  460. /* Update the aging tail
  461. *
  462. * We throttle aging tail updates until we have a new tail that
  463. * represents >= one report more data than is already available for
  464. * reading. This ensures there will be enough data for a successful
  465. * read once this new pointer has aged and ensures we will give the new
  466. * pointer time to age.
  467. */
  468. if (aging_tail == INVALID_TAIL_PTR &&
  469. (aged_tail == INVALID_TAIL_PTR ||
  470. OA_TAKEN(hw_tail, aged_tail) >= report_size)) {
  471. struct i915_vma *vma = dev_priv->perf.oa.oa_buffer.vma;
  472. u32 gtt_offset = i915_ggtt_offset(vma);
  473. /* Be paranoid and do a bounds check on the pointer read back
  474. * from hardware, just in case some spurious hardware condition
  475. * could put the tail out of bounds...
  476. */
  477. if (hw_tail >= gtt_offset &&
  478. hw_tail < (gtt_offset + OA_BUFFER_SIZE)) {
  479. dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset =
  480. aging_tail = hw_tail;
  481. dev_priv->perf.oa.oa_buffer.aging_timestamp = now;
  482. } else {
  483. DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %u\n",
  484. hw_tail);
  485. }
  486. }
  487. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  488. return aged_tail == INVALID_TAIL_PTR ?
  489. false : OA_TAKEN(aged_tail, head) >= report_size;
  490. }
  491. /**
  492. * append_oa_status - Appends a status record to a userspace read() buffer.
  493. * @stream: An i915-perf stream opened for OA metrics
  494. * @buf: destination buffer given by userspace
  495. * @count: the number of bytes userspace wants to read
  496. * @offset: (inout): the current position for writing into @buf
  497. * @type: The kind of status to report to userspace
  498. *
  499. * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
  500. * into the userspace read() buffer.
  501. *
  502. * The @buf @offset will only be updated on success.
  503. *
  504. * Returns: 0 on success, negative error code on failure.
  505. */
  506. static int append_oa_status(struct i915_perf_stream *stream,
  507. char __user *buf,
  508. size_t count,
  509. size_t *offset,
  510. enum drm_i915_perf_record_type type)
  511. {
  512. struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
  513. if ((count - *offset) < header.size)
  514. return -ENOSPC;
  515. if (copy_to_user(buf + *offset, &header, sizeof(header)))
  516. return -EFAULT;
  517. (*offset) += header.size;
  518. return 0;
  519. }
  520. /**
  521. * append_oa_sample - Copies single OA report into userspace read() buffer.
  522. * @stream: An i915-perf stream opened for OA metrics
  523. * @buf: destination buffer given by userspace
  524. * @count: the number of bytes userspace wants to read
  525. * @offset: (inout): the current position for writing into @buf
  526. * @report: A single OA report to (optionally) include as part of the sample
  527. *
  528. * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
  529. * properties when opening a stream, tracked as `stream->sample_flags`. This
  530. * function copies the requested components of a single sample to the given
  531. * read() @buf.
  532. *
  533. * The @buf @offset will only be updated on success.
  534. *
  535. * Returns: 0 on success, negative error code on failure.
  536. */
  537. static int append_oa_sample(struct i915_perf_stream *stream,
  538. char __user *buf,
  539. size_t count,
  540. size_t *offset,
  541. const u8 *report)
  542. {
  543. struct drm_i915_private *dev_priv = stream->dev_priv;
  544. int report_size = dev_priv->perf.oa.oa_buffer.format_size;
  545. struct drm_i915_perf_record_header header;
  546. u32 sample_flags = stream->sample_flags;
  547. header.type = DRM_I915_PERF_RECORD_SAMPLE;
  548. header.pad = 0;
  549. header.size = stream->sample_size;
  550. if ((count - *offset) < header.size)
  551. return -ENOSPC;
  552. buf += *offset;
  553. if (copy_to_user(buf, &header, sizeof(header)))
  554. return -EFAULT;
  555. buf += sizeof(header);
  556. if (sample_flags & SAMPLE_OA_REPORT) {
  557. if (copy_to_user(buf, report, report_size))
  558. return -EFAULT;
  559. }
  560. (*offset) += header.size;
  561. return 0;
  562. }
  563. /**
  564. * Copies all buffered OA reports into userspace read() buffer.
  565. * @stream: An i915-perf stream opened for OA metrics
  566. * @buf: destination buffer given by userspace
  567. * @count: the number of bytes userspace wants to read
  568. * @offset: (inout): the current position for writing into @buf
  569. *
  570. * Notably any error condition resulting in a short read (-%ENOSPC or
  571. * -%EFAULT) will be returned even though one or more records may
  572. * have been successfully copied. In this case it's up to the caller
  573. * to decide if the error should be squashed before returning to
  574. * userspace.
  575. *
  576. * Note: reports are consumed from the head, and appended to the
  577. * tail, so the tail chases the head?... If you think that's mad
  578. * and back-to-front you're not alone, but this follows the
  579. * Gen PRM naming convention.
  580. *
  581. * Returns: 0 on success, negative error code on failure.
  582. */
  583. static int gen8_append_oa_reports(struct i915_perf_stream *stream,
  584. char __user *buf,
  585. size_t count,
  586. size_t *offset)
  587. {
  588. struct drm_i915_private *dev_priv = stream->dev_priv;
  589. int report_size = dev_priv->perf.oa.oa_buffer.format_size;
  590. u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
  591. u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
  592. u32 mask = (OA_BUFFER_SIZE - 1);
  593. size_t start_offset = *offset;
  594. unsigned long flags;
  595. unsigned int aged_tail_idx;
  596. u32 head, tail;
  597. u32 taken;
  598. int ret = 0;
  599. if (WARN_ON(!stream->enabled))
  600. return -EIO;
  601. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  602. head = dev_priv->perf.oa.oa_buffer.head;
  603. aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
  604. tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
  605. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  606. /*
  607. * An invalid tail pointer here means we're still waiting for the poll
  608. * hrtimer callback to give us a pointer
  609. */
  610. if (tail == INVALID_TAIL_PTR)
  611. return -EAGAIN;
  612. /*
  613. * NB: oa_buffer.head/tail include the gtt_offset which we don't want
  614. * while indexing relative to oa_buf_base.
  615. */
  616. head -= gtt_offset;
  617. tail -= gtt_offset;
  618. /*
  619. * An out of bounds or misaligned head or tail pointer implies a driver
  620. * bug since we validate + align the tail pointers we read from the
  621. * hardware and we are in full control of the head pointer which should
  622. * only be incremented by multiples of the report size (notably also
  623. * all a power of two).
  624. */
  625. if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
  626. tail > OA_BUFFER_SIZE || tail % report_size,
  627. "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
  628. head, tail))
  629. return -EIO;
  630. for (/* none */;
  631. (taken = OA_TAKEN(tail, head));
  632. head = (head + report_size) & mask) {
  633. u8 *report = oa_buf_base + head;
  634. u32 *report32 = (void *)report;
  635. u32 ctx_id;
  636. u32 reason;
  637. /*
  638. * All the report sizes factor neatly into the buffer
  639. * size so we never expect to see a report split
  640. * between the beginning and end of the buffer.
  641. *
  642. * Given the initial alignment check a misalignment
  643. * here would imply a driver bug that would result
  644. * in an overrun.
  645. */
  646. if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
  647. DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
  648. break;
  649. }
  650. /*
  651. * The reason field includes flags identifying what
  652. * triggered this specific report (mostly timer
  653. * triggered or e.g. due to a context switch).
  654. *
  655. * This field is never expected to be zero so we can
  656. * check that the report isn't invalid before copying
  657. * it to userspace...
  658. */
  659. reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
  660. OAREPORT_REASON_MASK);
  661. if (reason == 0) {
  662. if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
  663. DRM_NOTE("Skipping spurious, invalid OA report\n");
  664. continue;
  665. }
  666. ctx_id = report32[2] & dev_priv->perf.oa.specific_ctx_id_mask;
  667. /*
  668. * Squash whatever is in the CTX_ID field if it's marked as
  669. * invalid to be sure we avoid false-positive, single-context
  670. * filtering below...
  671. *
  672. * Note: that we don't clear the valid_ctx_bit so userspace can
  673. * understand that the ID has been squashed by the kernel.
  674. */
  675. if (!(report32[0] & dev_priv->perf.oa.gen8_valid_ctx_bit))
  676. ctx_id = report32[2] = INVALID_CTX_ID;
  677. /*
  678. * NB: For Gen 8 the OA unit no longer supports clock gating
  679. * off for a specific context and the kernel can't securely
  680. * stop the counters from updating as system-wide / global
  681. * values.
  682. *
  683. * Automatic reports now include a context ID so reports can be
  684. * filtered on the cpu but it's not worth trying to
  685. * automatically subtract/hide counter progress for other
  686. * contexts while filtering since we can't stop userspace
  687. * issuing MI_REPORT_PERF_COUNT commands which would still
  688. * provide a side-band view of the real values.
  689. *
  690. * To allow userspace (such as Mesa/GL_INTEL_performance_query)
  691. * to normalize counters for a single filtered context then it
  692. * needs be forwarded bookend context-switch reports so that it
  693. * can track switches in between MI_REPORT_PERF_COUNT commands
  694. * and can itself subtract/ignore the progress of counters
  695. * associated with other contexts. Note that the hardware
  696. * automatically triggers reports when switching to a new
  697. * context which are tagged with the ID of the newly active
  698. * context. To avoid the complexity (and likely fragility) of
  699. * reading ahead while parsing reports to try and minimize
  700. * forwarding redundant context switch reports (i.e. between
  701. * other, unrelated contexts) we simply elect to forward them
  702. * all.
  703. *
  704. * We don't rely solely on the reason field to identify context
  705. * switches since it's not-uncommon for periodic samples to
  706. * identify a switch before any 'context switch' report.
  707. */
  708. if (!dev_priv->perf.oa.exclusive_stream->ctx ||
  709. dev_priv->perf.oa.specific_ctx_id == ctx_id ||
  710. (dev_priv->perf.oa.oa_buffer.last_ctx_id ==
  711. dev_priv->perf.oa.specific_ctx_id) ||
  712. reason & OAREPORT_REASON_CTX_SWITCH) {
  713. /*
  714. * While filtering for a single context we avoid
  715. * leaking the IDs of other contexts.
  716. */
  717. if (dev_priv->perf.oa.exclusive_stream->ctx &&
  718. dev_priv->perf.oa.specific_ctx_id != ctx_id) {
  719. report32[2] = INVALID_CTX_ID;
  720. }
  721. ret = append_oa_sample(stream, buf, count, offset,
  722. report);
  723. if (ret)
  724. break;
  725. dev_priv->perf.oa.oa_buffer.last_ctx_id = ctx_id;
  726. }
  727. /*
  728. * The above reason field sanity check is based on
  729. * the assumption that the OA buffer is initially
  730. * zeroed and we reset the field after copying so the
  731. * check is still meaningful once old reports start
  732. * being overwritten.
  733. */
  734. report32[0] = 0;
  735. }
  736. if (start_offset != *offset) {
  737. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  738. /*
  739. * We removed the gtt_offset for the copy loop above, indexing
  740. * relative to oa_buf_base so put back here...
  741. */
  742. head += gtt_offset;
  743. I915_WRITE(GEN8_OAHEADPTR, head & GEN8_OAHEADPTR_MASK);
  744. dev_priv->perf.oa.oa_buffer.head = head;
  745. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  746. }
  747. return ret;
  748. }
  749. /**
  750. * gen8_oa_read - copy status records then buffered OA reports
  751. * @stream: An i915-perf stream opened for OA metrics
  752. * @buf: destination buffer given by userspace
  753. * @count: the number of bytes userspace wants to read
  754. * @offset: (inout): the current position for writing into @buf
  755. *
  756. * Checks OA unit status registers and if necessary appends corresponding
  757. * status records for userspace (such as for a buffer full condition) and then
  758. * initiate appending any buffered OA reports.
  759. *
  760. * Updates @offset according to the number of bytes successfully copied into
  761. * the userspace buffer.
  762. *
  763. * NB: some data may be successfully copied to the userspace buffer
  764. * even if an error is returned, and this is reflected in the
  765. * updated @offset.
  766. *
  767. * Returns: zero on success or a negative error code
  768. */
  769. static int gen8_oa_read(struct i915_perf_stream *stream,
  770. char __user *buf,
  771. size_t count,
  772. size_t *offset)
  773. {
  774. struct drm_i915_private *dev_priv = stream->dev_priv;
  775. u32 oastatus;
  776. int ret;
  777. if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
  778. return -EIO;
  779. oastatus = I915_READ(GEN8_OASTATUS);
  780. /*
  781. * We treat OABUFFER_OVERFLOW as a significant error:
  782. *
  783. * Although theoretically we could handle this more gracefully
  784. * sometimes, some Gens don't correctly suppress certain
  785. * automatically triggered reports in this condition and so we
  786. * have to assume that old reports are now being trampled
  787. * over.
  788. *
  789. * Considering how we don't currently give userspace control
  790. * over the OA buffer size and always configure a large 16MB
  791. * buffer, then a buffer overflow does anyway likely indicate
  792. * that something has gone quite badly wrong.
  793. */
  794. if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
  795. ret = append_oa_status(stream, buf, count, offset,
  796. DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
  797. if (ret)
  798. return ret;
  799. DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
  800. dev_priv->perf.oa.period_exponent);
  801. dev_priv->perf.oa.ops.oa_disable(dev_priv);
  802. dev_priv->perf.oa.ops.oa_enable(dev_priv);
  803. /*
  804. * Note: .oa_enable() is expected to re-init the oabuffer and
  805. * reset GEN8_OASTATUS for us
  806. */
  807. oastatus = I915_READ(GEN8_OASTATUS);
  808. }
  809. if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
  810. ret = append_oa_status(stream, buf, count, offset,
  811. DRM_I915_PERF_RECORD_OA_REPORT_LOST);
  812. if (ret)
  813. return ret;
  814. I915_WRITE(GEN8_OASTATUS,
  815. oastatus & ~GEN8_OASTATUS_REPORT_LOST);
  816. }
  817. return gen8_append_oa_reports(stream, buf, count, offset);
  818. }
  819. /**
  820. * Copies all buffered OA reports into userspace read() buffer.
  821. * @stream: An i915-perf stream opened for OA metrics
  822. * @buf: destination buffer given by userspace
  823. * @count: the number of bytes userspace wants to read
  824. * @offset: (inout): the current position for writing into @buf
  825. *
  826. * Notably any error condition resulting in a short read (-%ENOSPC or
  827. * -%EFAULT) will be returned even though one or more records may
  828. * have been successfully copied. In this case it's up to the caller
  829. * to decide if the error should be squashed before returning to
  830. * userspace.
  831. *
  832. * Note: reports are consumed from the head, and appended to the
  833. * tail, so the tail chases the head?... If you think that's mad
  834. * and back-to-front you're not alone, but this follows the
  835. * Gen PRM naming convention.
  836. *
  837. * Returns: 0 on success, negative error code on failure.
  838. */
  839. static int gen7_append_oa_reports(struct i915_perf_stream *stream,
  840. char __user *buf,
  841. size_t count,
  842. size_t *offset)
  843. {
  844. struct drm_i915_private *dev_priv = stream->dev_priv;
  845. int report_size = dev_priv->perf.oa.oa_buffer.format_size;
  846. u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
  847. u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
  848. u32 mask = (OA_BUFFER_SIZE - 1);
  849. size_t start_offset = *offset;
  850. unsigned long flags;
  851. unsigned int aged_tail_idx;
  852. u32 head, tail;
  853. u32 taken;
  854. int ret = 0;
  855. if (WARN_ON(!stream->enabled))
  856. return -EIO;
  857. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  858. head = dev_priv->perf.oa.oa_buffer.head;
  859. aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
  860. tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
  861. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  862. /* An invalid tail pointer here means we're still waiting for the poll
  863. * hrtimer callback to give us a pointer
  864. */
  865. if (tail == INVALID_TAIL_PTR)
  866. return -EAGAIN;
  867. /* NB: oa_buffer.head/tail include the gtt_offset which we don't want
  868. * while indexing relative to oa_buf_base.
  869. */
  870. head -= gtt_offset;
  871. tail -= gtt_offset;
  872. /* An out of bounds or misaligned head or tail pointer implies a driver
  873. * bug since we validate + align the tail pointers we read from the
  874. * hardware and we are in full control of the head pointer which should
  875. * only be incremented by multiples of the report size (notably also
  876. * all a power of two).
  877. */
  878. if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
  879. tail > OA_BUFFER_SIZE || tail % report_size,
  880. "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
  881. head, tail))
  882. return -EIO;
  883. for (/* none */;
  884. (taken = OA_TAKEN(tail, head));
  885. head = (head + report_size) & mask) {
  886. u8 *report = oa_buf_base + head;
  887. u32 *report32 = (void *)report;
  888. /* All the report sizes factor neatly into the buffer
  889. * size so we never expect to see a report split
  890. * between the beginning and end of the buffer.
  891. *
  892. * Given the initial alignment check a misalignment
  893. * here would imply a driver bug that would result
  894. * in an overrun.
  895. */
  896. if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
  897. DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
  898. break;
  899. }
  900. /* The report-ID field for periodic samples includes
  901. * some undocumented flags related to what triggered
  902. * the report and is never expected to be zero so we
  903. * can check that the report isn't invalid before
  904. * copying it to userspace...
  905. */
  906. if (report32[0] == 0) {
  907. if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
  908. DRM_NOTE("Skipping spurious, invalid OA report\n");
  909. continue;
  910. }
  911. ret = append_oa_sample(stream, buf, count, offset, report);
  912. if (ret)
  913. break;
  914. /* The above report-id field sanity check is based on
  915. * the assumption that the OA buffer is initially
  916. * zeroed and we reset the field after copying so the
  917. * check is still meaningful once old reports start
  918. * being overwritten.
  919. */
  920. report32[0] = 0;
  921. }
  922. if (start_offset != *offset) {
  923. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  924. /* We removed the gtt_offset for the copy loop above, indexing
  925. * relative to oa_buf_base so put back here...
  926. */
  927. head += gtt_offset;
  928. I915_WRITE(GEN7_OASTATUS2,
  929. ((head & GEN7_OASTATUS2_HEAD_MASK) |
  930. GEN7_OASTATUS2_MEM_SELECT_GGTT));
  931. dev_priv->perf.oa.oa_buffer.head = head;
  932. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  933. }
  934. return ret;
  935. }
  936. /**
  937. * gen7_oa_read - copy status records then buffered OA reports
  938. * @stream: An i915-perf stream opened for OA metrics
  939. * @buf: destination buffer given by userspace
  940. * @count: the number of bytes userspace wants to read
  941. * @offset: (inout): the current position for writing into @buf
  942. *
  943. * Checks Gen 7 specific OA unit status registers and if necessary appends
  944. * corresponding status records for userspace (such as for a buffer full
  945. * condition) and then initiate appending any buffered OA reports.
  946. *
  947. * Updates @offset according to the number of bytes successfully copied into
  948. * the userspace buffer.
  949. *
  950. * Returns: zero on success or a negative error code
  951. */
  952. static int gen7_oa_read(struct i915_perf_stream *stream,
  953. char __user *buf,
  954. size_t count,
  955. size_t *offset)
  956. {
  957. struct drm_i915_private *dev_priv = stream->dev_priv;
  958. u32 oastatus1;
  959. int ret;
  960. if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
  961. return -EIO;
  962. oastatus1 = I915_READ(GEN7_OASTATUS1);
  963. /* XXX: On Haswell we don't have a safe way to clear oastatus1
  964. * bits while the OA unit is enabled (while the tail pointer
  965. * may be updated asynchronously) so we ignore status bits
  966. * that have already been reported to userspace.
  967. */
  968. oastatus1 &= ~dev_priv->perf.oa.gen7_latched_oastatus1;
  969. /* We treat OABUFFER_OVERFLOW as a significant error:
  970. *
  971. * - The status can be interpreted to mean that the buffer is
  972. * currently full (with a higher precedence than OA_TAKEN()
  973. * which will start to report a near-empty buffer after an
  974. * overflow) but it's awkward that we can't clear the status
  975. * on Haswell, so without a reset we won't be able to catch
  976. * the state again.
  977. *
  978. * - Since it also implies the HW has started overwriting old
  979. * reports it may also affect our sanity checks for invalid
  980. * reports when copying to userspace that assume new reports
  981. * are being written to cleared memory.
  982. *
  983. * - In the future we may want to introduce a flight recorder
  984. * mode where the driver will automatically maintain a safe
  985. * guard band between head/tail, avoiding this overflow
  986. * condition, but we avoid the added driver complexity for
  987. * now.
  988. */
  989. if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
  990. ret = append_oa_status(stream, buf, count, offset,
  991. DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
  992. if (ret)
  993. return ret;
  994. DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
  995. dev_priv->perf.oa.period_exponent);
  996. dev_priv->perf.oa.ops.oa_disable(dev_priv);
  997. dev_priv->perf.oa.ops.oa_enable(dev_priv);
  998. oastatus1 = I915_READ(GEN7_OASTATUS1);
  999. }
  1000. if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
  1001. ret = append_oa_status(stream, buf, count, offset,
  1002. DRM_I915_PERF_RECORD_OA_REPORT_LOST);
  1003. if (ret)
  1004. return ret;
  1005. dev_priv->perf.oa.gen7_latched_oastatus1 |=
  1006. GEN7_OASTATUS1_REPORT_LOST;
  1007. }
  1008. return gen7_append_oa_reports(stream, buf, count, offset);
  1009. }
  1010. /**
  1011. * i915_oa_wait_unlocked - handles blocking IO until OA data available
  1012. * @stream: An i915-perf stream opened for OA metrics
  1013. *
  1014. * Called when userspace tries to read() from a blocking stream FD opened
  1015. * for OA metrics. It waits until the hrtimer callback finds a non-empty
  1016. * OA buffer and wakes us.
  1017. *
  1018. * Note: it's acceptable to have this return with some false positives
  1019. * since any subsequent read handling will return -EAGAIN if there isn't
  1020. * really data ready for userspace yet.
  1021. *
  1022. * Returns: zero on success or a negative error code
  1023. */
  1024. static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
  1025. {
  1026. struct drm_i915_private *dev_priv = stream->dev_priv;
  1027. /* We would wait indefinitely if periodic sampling is not enabled */
  1028. if (!dev_priv->perf.oa.periodic)
  1029. return -EIO;
  1030. return wait_event_interruptible(dev_priv->perf.oa.poll_wq,
  1031. oa_buffer_check_unlocked(dev_priv));
  1032. }
  1033. /**
  1034. * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
  1035. * @stream: An i915-perf stream opened for OA metrics
  1036. * @file: An i915 perf stream file
  1037. * @wait: poll() state table
  1038. *
  1039. * For handling userspace polling on an i915 perf stream opened for OA metrics,
  1040. * this starts a poll_wait with the wait queue that our hrtimer callback wakes
  1041. * when it sees data ready to read in the circular OA buffer.
  1042. */
  1043. static void i915_oa_poll_wait(struct i915_perf_stream *stream,
  1044. struct file *file,
  1045. poll_table *wait)
  1046. {
  1047. struct drm_i915_private *dev_priv = stream->dev_priv;
  1048. poll_wait(file, &dev_priv->perf.oa.poll_wq, wait);
  1049. }
  1050. /**
  1051. * i915_oa_read - just calls through to &i915_oa_ops->read
  1052. * @stream: An i915-perf stream opened for OA metrics
  1053. * @buf: destination buffer given by userspace
  1054. * @count: the number of bytes userspace wants to read
  1055. * @offset: (inout): the current position for writing into @buf
  1056. *
  1057. * Updates @offset according to the number of bytes successfully copied into
  1058. * the userspace buffer.
  1059. *
  1060. * Returns: zero on success or a negative error code
  1061. */
  1062. static int i915_oa_read(struct i915_perf_stream *stream,
  1063. char __user *buf,
  1064. size_t count,
  1065. size_t *offset)
  1066. {
  1067. struct drm_i915_private *dev_priv = stream->dev_priv;
  1068. return dev_priv->perf.oa.ops.read(stream, buf, count, offset);
  1069. }
  1070. static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
  1071. struct i915_gem_context *ctx)
  1072. {
  1073. struct intel_engine_cs *engine = i915->engine[RCS];
  1074. struct intel_context *ce;
  1075. int ret;
  1076. ret = i915_mutex_lock_interruptible(&i915->drm);
  1077. if (ret)
  1078. return ERR_PTR(ret);
  1079. /*
  1080. * As the ID is the gtt offset of the context's vma we
  1081. * pin the vma to ensure the ID remains fixed.
  1082. *
  1083. * NB: implied RCS engine...
  1084. */
  1085. ce = intel_context_pin(ctx, engine);
  1086. mutex_unlock(&i915->drm.struct_mutex);
  1087. if (IS_ERR(ce))
  1088. return ce;
  1089. i915->perf.oa.pinned_ctx = ce;
  1090. return ce;
  1091. }
  1092. /**
  1093. * oa_get_render_ctx_id - determine and hold ctx hw id
  1094. * @stream: An i915-perf stream opened for OA metrics
  1095. *
  1096. * Determine the render context hw id, and ensure it remains fixed for the
  1097. * lifetime of the stream. This ensures that we don't have to worry about
  1098. * updating the context ID in OACONTROL on the fly.
  1099. *
  1100. * Returns: zero on success or a negative error code
  1101. */
  1102. static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
  1103. {
  1104. struct drm_i915_private *i915 = stream->dev_priv;
  1105. struct intel_context *ce;
  1106. ce = oa_pin_context(i915, stream->ctx);
  1107. if (IS_ERR(ce))
  1108. return PTR_ERR(ce);
  1109. switch (INTEL_GEN(i915)) {
  1110. case 7: {
  1111. /*
  1112. * On Haswell we don't do any post processing of the reports
  1113. * and don't need to use the mask.
  1114. */
  1115. i915->perf.oa.specific_ctx_id = i915_ggtt_offset(ce->state);
  1116. i915->perf.oa.specific_ctx_id_mask = 0;
  1117. break;
  1118. }
  1119. case 8:
  1120. case 9:
  1121. case 10:
  1122. if (USES_GUC_SUBMISSION(i915)) {
  1123. /*
  1124. * When using GuC, the context descriptor we write in
  1125. * i915 is read by GuC and rewritten before it's
  1126. * actually written into the hardware. The LRCA is
  1127. * what is put into the context id field of the
  1128. * context descriptor by GuC. Because it's aligned to
  1129. * a page, the lower 12bits are always at 0 and
  1130. * dropped by GuC. They won't be part of the context
  1131. * ID in the OA reports, so squash those lower bits.
  1132. */
  1133. i915->perf.oa.specific_ctx_id =
  1134. lower_32_bits(ce->lrc_desc) >> 12;
  1135. /*
  1136. * GuC uses the top bit to signal proxy submission, so
  1137. * ignore that bit.
  1138. */
  1139. i915->perf.oa.specific_ctx_id_mask =
  1140. (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
  1141. } else {
  1142. i915->perf.oa.specific_ctx_id_mask =
  1143. (1U << GEN8_CTX_ID_WIDTH) - 1;
  1144. i915->perf.oa.specific_ctx_id =
  1145. upper_32_bits(ce->lrc_desc);
  1146. i915->perf.oa.specific_ctx_id &=
  1147. i915->perf.oa.specific_ctx_id_mask;
  1148. }
  1149. break;
  1150. case 11: {
  1151. i915->perf.oa.specific_ctx_id_mask =
  1152. ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32) |
  1153. ((1U << GEN11_ENGINE_INSTANCE_WIDTH) - 1) << (GEN11_ENGINE_INSTANCE_SHIFT - 32) |
  1154. ((1 << GEN11_ENGINE_CLASS_WIDTH) - 1) << (GEN11_ENGINE_CLASS_SHIFT - 32);
  1155. i915->perf.oa.specific_ctx_id = upper_32_bits(ce->lrc_desc);
  1156. i915->perf.oa.specific_ctx_id &=
  1157. i915->perf.oa.specific_ctx_id_mask;
  1158. break;
  1159. }
  1160. default:
  1161. MISSING_CASE(INTEL_GEN(i915));
  1162. }
  1163. DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
  1164. i915->perf.oa.specific_ctx_id,
  1165. i915->perf.oa.specific_ctx_id_mask);
  1166. return 0;
  1167. }
  1168. /**
  1169. * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
  1170. * @stream: An i915-perf stream opened for OA metrics
  1171. *
  1172. * In case anything needed doing to ensure the context HW ID would remain valid
  1173. * for the lifetime of the stream, then that can be undone here.
  1174. */
  1175. static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
  1176. {
  1177. struct drm_i915_private *dev_priv = stream->dev_priv;
  1178. struct intel_context *ce;
  1179. dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
  1180. dev_priv->perf.oa.specific_ctx_id_mask = 0;
  1181. ce = fetch_and_zero(&dev_priv->perf.oa.pinned_ctx);
  1182. if (ce) {
  1183. mutex_lock(&dev_priv->drm.struct_mutex);
  1184. intel_context_unpin(ce);
  1185. mutex_unlock(&dev_priv->drm.struct_mutex);
  1186. }
  1187. }
  1188. static void
  1189. free_oa_buffer(struct drm_i915_private *i915)
  1190. {
  1191. mutex_lock(&i915->drm.struct_mutex);
  1192. i915_gem_object_unpin_map(i915->perf.oa.oa_buffer.vma->obj);
  1193. i915_vma_unpin(i915->perf.oa.oa_buffer.vma);
  1194. i915_gem_object_put(i915->perf.oa.oa_buffer.vma->obj);
  1195. i915->perf.oa.oa_buffer.vma = NULL;
  1196. i915->perf.oa.oa_buffer.vaddr = NULL;
  1197. mutex_unlock(&i915->drm.struct_mutex);
  1198. }
  1199. static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
  1200. {
  1201. struct drm_i915_private *dev_priv = stream->dev_priv;
  1202. BUG_ON(stream != dev_priv->perf.oa.exclusive_stream);
  1203. /*
  1204. * Unset exclusive_stream first, it will be checked while disabling
  1205. * the metric set on gen8+.
  1206. */
  1207. mutex_lock(&dev_priv->drm.struct_mutex);
  1208. dev_priv->perf.oa.exclusive_stream = NULL;
  1209. dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
  1210. mutex_unlock(&dev_priv->drm.struct_mutex);
  1211. free_oa_buffer(dev_priv);
  1212. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  1213. intel_runtime_pm_put(dev_priv);
  1214. if (stream->ctx)
  1215. oa_put_render_ctx_id(stream);
  1216. put_oa_config(dev_priv, stream->oa_config);
  1217. if (dev_priv->perf.oa.spurious_report_rs.missed) {
  1218. DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
  1219. dev_priv->perf.oa.spurious_report_rs.missed);
  1220. }
  1221. }
  1222. static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
  1223. {
  1224. u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
  1225. unsigned long flags;
  1226. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  1227. /* Pre-DevBDW: OABUFFER must be set with counters off,
  1228. * before OASTATUS1, but after OASTATUS2
  1229. */
  1230. I915_WRITE(GEN7_OASTATUS2,
  1231. gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT); /* head */
  1232. dev_priv->perf.oa.oa_buffer.head = gtt_offset;
  1233. I915_WRITE(GEN7_OABUFFER, gtt_offset);
  1234. I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */
  1235. /* Mark that we need updated tail pointers to read from... */
  1236. dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
  1237. dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
  1238. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  1239. /* On Haswell we have to track which OASTATUS1 flags we've
  1240. * already seen since they can't be cleared while periodic
  1241. * sampling is enabled.
  1242. */
  1243. dev_priv->perf.oa.gen7_latched_oastatus1 = 0;
  1244. /* NB: although the OA buffer will initially be allocated
  1245. * zeroed via shmfs (and so this memset is redundant when
  1246. * first allocating), we may re-init the OA buffer, either
  1247. * when re-enabling a stream or in error/reset paths.
  1248. *
  1249. * The reason we clear the buffer for each re-init is for the
  1250. * sanity check in gen7_append_oa_reports() that looks at the
  1251. * report-id field to make sure it's non-zero which relies on
  1252. * the assumption that new reports are being written to zeroed
  1253. * memory...
  1254. */
  1255. memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
  1256. /* Maybe make ->pollin per-stream state if we support multiple
  1257. * concurrent streams in the future.
  1258. */
  1259. dev_priv->perf.oa.pollin = false;
  1260. }
  1261. static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
  1262. {
  1263. u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
  1264. unsigned long flags;
  1265. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  1266. I915_WRITE(GEN8_OASTATUS, 0);
  1267. I915_WRITE(GEN8_OAHEADPTR, gtt_offset);
  1268. dev_priv->perf.oa.oa_buffer.head = gtt_offset;
  1269. I915_WRITE(GEN8_OABUFFER_UDW, 0);
  1270. /*
  1271. * PRM says:
  1272. *
  1273. * "This MMIO must be set before the OATAILPTR
  1274. * register and after the OAHEADPTR register. This is
  1275. * to enable proper functionality of the overflow
  1276. * bit."
  1277. */
  1278. I915_WRITE(GEN8_OABUFFER, gtt_offset |
  1279. OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
  1280. I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
  1281. /* Mark that we need updated tail pointers to read from... */
  1282. dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
  1283. dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
  1284. /*
  1285. * Reset state used to recognise context switches, affecting which
  1286. * reports we will forward to userspace while filtering for a single
  1287. * context.
  1288. */
  1289. dev_priv->perf.oa.oa_buffer.last_ctx_id = INVALID_CTX_ID;
  1290. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  1291. /*
  1292. * NB: although the OA buffer will initially be allocated
  1293. * zeroed via shmfs (and so this memset is redundant when
  1294. * first allocating), we may re-init the OA buffer, either
  1295. * when re-enabling a stream or in error/reset paths.
  1296. *
  1297. * The reason we clear the buffer for each re-init is for the
  1298. * sanity check in gen8_append_oa_reports() that looks at the
  1299. * reason field to make sure it's non-zero which relies on
  1300. * the assumption that new reports are being written to zeroed
  1301. * memory...
  1302. */
  1303. memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
  1304. /*
  1305. * Maybe make ->pollin per-stream state if we support multiple
  1306. * concurrent streams in the future.
  1307. */
  1308. dev_priv->perf.oa.pollin = false;
  1309. }
  1310. static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
  1311. {
  1312. struct drm_i915_gem_object *bo;
  1313. struct i915_vma *vma;
  1314. int ret;
  1315. if (WARN_ON(dev_priv->perf.oa.oa_buffer.vma))
  1316. return -ENODEV;
  1317. ret = i915_mutex_lock_interruptible(&dev_priv->drm);
  1318. if (ret)
  1319. return ret;
  1320. BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
  1321. BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
  1322. bo = i915_gem_object_create(dev_priv, OA_BUFFER_SIZE);
  1323. if (IS_ERR(bo)) {
  1324. DRM_ERROR("Failed to allocate OA buffer\n");
  1325. ret = PTR_ERR(bo);
  1326. goto unlock;
  1327. }
  1328. ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC);
  1329. if (ret)
  1330. goto err_unref;
  1331. /* PreHSW required 512K alignment, HSW requires 16M */
  1332. vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
  1333. if (IS_ERR(vma)) {
  1334. ret = PTR_ERR(vma);
  1335. goto err_unref;
  1336. }
  1337. dev_priv->perf.oa.oa_buffer.vma = vma;
  1338. dev_priv->perf.oa.oa_buffer.vaddr =
  1339. i915_gem_object_pin_map(bo, I915_MAP_WB);
  1340. if (IS_ERR(dev_priv->perf.oa.oa_buffer.vaddr)) {
  1341. ret = PTR_ERR(dev_priv->perf.oa.oa_buffer.vaddr);
  1342. goto err_unpin;
  1343. }
  1344. dev_priv->perf.oa.ops.init_oa_buffer(dev_priv);
  1345. DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
  1346. i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma),
  1347. dev_priv->perf.oa.oa_buffer.vaddr);
  1348. goto unlock;
  1349. err_unpin:
  1350. __i915_vma_unpin(vma);
  1351. err_unref:
  1352. i915_gem_object_put(bo);
  1353. dev_priv->perf.oa.oa_buffer.vaddr = NULL;
  1354. dev_priv->perf.oa.oa_buffer.vma = NULL;
  1355. unlock:
  1356. mutex_unlock(&dev_priv->drm.struct_mutex);
  1357. return ret;
  1358. }
  1359. static void config_oa_regs(struct drm_i915_private *dev_priv,
  1360. const struct i915_oa_reg *regs,
  1361. u32 n_regs)
  1362. {
  1363. u32 i;
  1364. for (i = 0; i < n_regs; i++) {
  1365. const struct i915_oa_reg *reg = regs + i;
  1366. I915_WRITE(reg->addr, reg->value);
  1367. }
  1368. }
  1369. static int hsw_enable_metric_set(struct drm_i915_private *dev_priv,
  1370. const struct i915_oa_config *oa_config)
  1371. {
  1372. /* PRM:
  1373. *
  1374. * OA unit is using “crclk” for its functionality. When trunk
  1375. * level clock gating takes place, OA clock would be gated,
  1376. * unable to count the events from non-render clock domain.
  1377. * Render clock gating must be disabled when OA is enabled to
  1378. * count the events from non-render domain. Unit level clock
  1379. * gating for RCS should also be disabled.
  1380. */
  1381. I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
  1382. ~GEN7_DOP_CLOCK_GATE_ENABLE));
  1383. I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
  1384. GEN6_CSUNIT_CLOCK_GATE_DISABLE));
  1385. config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
  1386. /* It apparently takes a fairly long time for a new MUX
  1387. * configuration to be be applied after these register writes.
  1388. * This delay duration was derived empirically based on the
  1389. * render_basic config but hopefully it covers the maximum
  1390. * configuration latency.
  1391. *
  1392. * As a fallback, the checks in _append_oa_reports() to skip
  1393. * invalid OA reports do also seem to work to discard reports
  1394. * generated before this config has completed - albeit not
  1395. * silently.
  1396. *
  1397. * Unfortunately this is essentially a magic number, since we
  1398. * don't currently know of a reliable mechanism for predicting
  1399. * how long the MUX config will take to apply and besides
  1400. * seeing invalid reports we don't know of a reliable way to
  1401. * explicitly check that the MUX config has landed.
  1402. *
  1403. * It's even possible we've miss characterized the underlying
  1404. * problem - it just seems like the simplest explanation why
  1405. * a delay at this location would mitigate any invalid reports.
  1406. */
  1407. usleep_range(15000, 20000);
  1408. config_oa_regs(dev_priv, oa_config->b_counter_regs,
  1409. oa_config->b_counter_regs_len);
  1410. return 0;
  1411. }
  1412. static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
  1413. {
  1414. I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) &
  1415. ~GEN6_CSUNIT_CLOCK_GATE_DISABLE));
  1416. I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) |
  1417. GEN7_DOP_CLOCK_GATE_ENABLE));
  1418. I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
  1419. ~GT_NOA_ENABLE));
  1420. }
  1421. /*
  1422. * NB: It must always remain pointer safe to run this even if the OA unit
  1423. * has been disabled.
  1424. *
  1425. * It's fine to put out-of-date values into these per-context registers
  1426. * in the case that the OA unit has been disabled.
  1427. */
  1428. static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
  1429. u32 *reg_state,
  1430. const struct i915_oa_config *oa_config)
  1431. {
  1432. struct drm_i915_private *dev_priv = ctx->i915;
  1433. u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
  1434. u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
  1435. /* The MMIO offsets for Flex EU registers aren't contiguous */
  1436. u32 flex_mmio[] = {
  1437. i915_mmio_reg_offset(EU_PERF_CNTL0),
  1438. i915_mmio_reg_offset(EU_PERF_CNTL1),
  1439. i915_mmio_reg_offset(EU_PERF_CNTL2),
  1440. i915_mmio_reg_offset(EU_PERF_CNTL3),
  1441. i915_mmio_reg_offset(EU_PERF_CNTL4),
  1442. i915_mmio_reg_offset(EU_PERF_CNTL5),
  1443. i915_mmio_reg_offset(EU_PERF_CNTL6),
  1444. };
  1445. int i;
  1446. reg_state[ctx_oactxctrl] = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
  1447. reg_state[ctx_oactxctrl+1] = (dev_priv->perf.oa.period_exponent <<
  1448. GEN8_OA_TIMER_PERIOD_SHIFT) |
  1449. (dev_priv->perf.oa.periodic ?
  1450. GEN8_OA_TIMER_ENABLE : 0) |
  1451. GEN8_OA_COUNTER_RESUME;
  1452. for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) {
  1453. u32 state_offset = ctx_flexeu0 + i * 2;
  1454. u32 mmio = flex_mmio[i];
  1455. /*
  1456. * This arbitrary default will select the 'EU FPU0 Pipeline
  1457. * Active' event. In the future it's anticipated that there
  1458. * will be an explicit 'No Event' we can select, but not yet...
  1459. */
  1460. u32 value = 0;
  1461. if (oa_config) {
  1462. u32 j;
  1463. for (j = 0; j < oa_config->flex_regs_len; j++) {
  1464. if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) {
  1465. value = oa_config->flex_regs[j].value;
  1466. break;
  1467. }
  1468. }
  1469. }
  1470. reg_state[state_offset] = mmio;
  1471. reg_state[state_offset+1] = value;
  1472. }
  1473. }
  1474. /*
  1475. * Same as gen8_update_reg_state_unlocked only through the batchbuffer. This
  1476. * is only used by the kernel context.
  1477. */
  1478. static int gen8_emit_oa_config(struct i915_request *rq,
  1479. const struct i915_oa_config *oa_config)
  1480. {
  1481. struct drm_i915_private *dev_priv = rq->i915;
  1482. /* The MMIO offsets for Flex EU registers aren't contiguous */
  1483. u32 flex_mmio[] = {
  1484. i915_mmio_reg_offset(EU_PERF_CNTL0),
  1485. i915_mmio_reg_offset(EU_PERF_CNTL1),
  1486. i915_mmio_reg_offset(EU_PERF_CNTL2),
  1487. i915_mmio_reg_offset(EU_PERF_CNTL3),
  1488. i915_mmio_reg_offset(EU_PERF_CNTL4),
  1489. i915_mmio_reg_offset(EU_PERF_CNTL5),
  1490. i915_mmio_reg_offset(EU_PERF_CNTL6),
  1491. };
  1492. u32 *cs;
  1493. int i;
  1494. cs = intel_ring_begin(rq, ARRAY_SIZE(flex_mmio) * 2 + 4);
  1495. if (IS_ERR(cs))
  1496. return PTR_ERR(cs);
  1497. *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1);
  1498. *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
  1499. *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
  1500. (dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
  1501. GEN8_OA_COUNTER_RESUME;
  1502. for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) {
  1503. u32 mmio = flex_mmio[i];
  1504. /*
  1505. * This arbitrary default will select the 'EU FPU0 Pipeline
  1506. * Active' event. In the future it's anticipated that there
  1507. * will be an explicit 'No Event' we can select, but not
  1508. * yet...
  1509. */
  1510. u32 value = 0;
  1511. if (oa_config) {
  1512. u32 j;
  1513. for (j = 0; j < oa_config->flex_regs_len; j++) {
  1514. if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) {
  1515. value = oa_config->flex_regs[j].value;
  1516. break;
  1517. }
  1518. }
  1519. }
  1520. *cs++ = mmio;
  1521. *cs++ = value;
  1522. }
  1523. *cs++ = MI_NOOP;
  1524. intel_ring_advance(rq, cs);
  1525. return 0;
  1526. }
  1527. static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_priv,
  1528. const struct i915_oa_config *oa_config)
  1529. {
  1530. struct intel_engine_cs *engine = dev_priv->engine[RCS];
  1531. struct i915_timeline *timeline;
  1532. struct i915_request *rq;
  1533. int ret;
  1534. lockdep_assert_held(&dev_priv->drm.struct_mutex);
  1535. i915_retire_requests(dev_priv);
  1536. rq = i915_request_alloc(engine, dev_priv->kernel_context);
  1537. if (IS_ERR(rq))
  1538. return PTR_ERR(rq);
  1539. ret = gen8_emit_oa_config(rq, oa_config);
  1540. if (ret) {
  1541. i915_request_add(rq);
  1542. return ret;
  1543. }
  1544. /* Queue this switch after all other activity */
  1545. list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
  1546. struct i915_request *prev;
  1547. prev = i915_gem_active_raw(&timeline->last_request,
  1548. &dev_priv->drm.struct_mutex);
  1549. if (prev)
  1550. i915_request_await_dma_fence(rq, &prev->fence);
  1551. }
  1552. i915_request_add(rq);
  1553. return 0;
  1554. }
  1555. /*
  1556. * Manages updating the per-context aspects of the OA stream
  1557. * configuration across all contexts.
  1558. *
  1559. * The awkward consideration here is that OACTXCONTROL controls the
  1560. * exponent for periodic sampling which is primarily used for system
  1561. * wide profiling where we'd like a consistent sampling period even in
  1562. * the face of context switches.
  1563. *
  1564. * Our approach of updating the register state context (as opposed to
  1565. * say using a workaround batch buffer) ensures that the hardware
  1566. * won't automatically reload an out-of-date timer exponent even
  1567. * transiently before a WA BB could be parsed.
  1568. *
  1569. * This function needs to:
  1570. * - Ensure the currently running context's per-context OA state is
  1571. * updated
  1572. * - Ensure that all existing contexts will have the correct per-context
  1573. * OA state if they are scheduled for use.
  1574. * - Ensure any new contexts will be initialized with the correct
  1575. * per-context OA state.
  1576. *
  1577. * Note: it's only the RCS/Render context that has any OA state.
  1578. */
  1579. static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
  1580. const struct i915_oa_config *oa_config)
  1581. {
  1582. struct intel_engine_cs *engine = dev_priv->engine[RCS];
  1583. struct i915_gem_context *ctx;
  1584. int ret;
  1585. unsigned int wait_flags = I915_WAIT_LOCKED;
  1586. lockdep_assert_held(&dev_priv->drm.struct_mutex);
  1587. /* Switch away from any user context. */
  1588. ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config);
  1589. if (ret)
  1590. goto out;
  1591. /*
  1592. * The OA register config is setup through the context image. This image
  1593. * might be written to by the GPU on context switch (in particular on
  1594. * lite-restore). This means we can't safely update a context's image,
  1595. * if this context is scheduled/submitted to run on the GPU.
  1596. *
  1597. * We could emit the OA register config through the batch buffer but
  1598. * this might leave small interval of time where the OA unit is
  1599. * configured at an invalid sampling period.
  1600. *
  1601. * So far the best way to work around this issue seems to be draining
  1602. * the GPU from any submitted work.
  1603. */
  1604. ret = i915_gem_wait_for_idle(dev_priv, wait_flags);
  1605. if (ret)
  1606. goto out;
  1607. /* Update all contexts now that we've stalled the submission. */
  1608. list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
  1609. struct intel_context *ce = to_intel_context(ctx, engine);
  1610. u32 *regs;
  1611. /* OA settings will be set upon first use */
  1612. if (!ce->state)
  1613. continue;
  1614. regs = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
  1615. if (IS_ERR(regs)) {
  1616. ret = PTR_ERR(regs);
  1617. goto out;
  1618. }
  1619. ce->state->obj->mm.dirty = true;
  1620. regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
  1621. gen8_update_reg_state_unlocked(ctx, regs, oa_config);
  1622. i915_gem_object_unpin_map(ce->state->obj);
  1623. }
  1624. out:
  1625. return ret;
  1626. }
  1627. static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
  1628. const struct i915_oa_config *oa_config)
  1629. {
  1630. int ret;
  1631. /*
  1632. * We disable slice/unslice clock ratio change reports on SKL since
  1633. * they are too noisy. The HW generates a lot of redundant reports
  1634. * where the ratio hasn't really changed causing a lot of redundant
  1635. * work to processes and increasing the chances we'll hit buffer
  1636. * overruns.
  1637. *
  1638. * Although we don't currently use the 'disable overrun' OABUFFER
  1639. * feature it's worth noting that clock ratio reports have to be
  1640. * disabled before considering to use that feature since the HW doesn't
  1641. * correctly block these reports.
  1642. *
  1643. * Currently none of the high-level metrics we have depend on knowing
  1644. * this ratio to normalize.
  1645. *
  1646. * Note: This register is not power context saved and restored, but
  1647. * that's OK considering that we disable RC6 while the OA unit is
  1648. * enabled.
  1649. *
  1650. * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
  1651. * be read back from automatically triggered reports, as part of the
  1652. * RPT_ID field.
  1653. */
  1654. if (IS_GEN(dev_priv, 9, 11)) {
  1655. I915_WRITE(GEN8_OA_DEBUG,
  1656. _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
  1657. GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
  1658. }
  1659. /*
  1660. * Update all contexts prior writing the mux configurations as we need
  1661. * to make sure all slices/subslices are ON before writing to NOA
  1662. * registers.
  1663. */
  1664. ret = gen8_configure_all_contexts(dev_priv, oa_config);
  1665. if (ret)
  1666. return ret;
  1667. config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
  1668. config_oa_regs(dev_priv, oa_config->b_counter_regs,
  1669. oa_config->b_counter_regs_len);
  1670. return 0;
  1671. }
  1672. static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
  1673. {
  1674. /* Reset all contexts' slices/subslices configurations. */
  1675. gen8_configure_all_contexts(dev_priv, NULL);
  1676. I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
  1677. ~GT_NOA_ENABLE));
  1678. }
  1679. static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
  1680. {
  1681. /* Reset all contexts' slices/subslices configurations. */
  1682. gen8_configure_all_contexts(dev_priv, NULL);
  1683. /* Make sure we disable noa to save power. */
  1684. I915_WRITE(RPM_CONFIG1,
  1685. I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE);
  1686. }
  1687. static void gen7_oa_enable(struct drm_i915_private *dev_priv)
  1688. {
  1689. struct i915_gem_context *ctx =
  1690. dev_priv->perf.oa.exclusive_stream->ctx;
  1691. u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
  1692. bool periodic = dev_priv->perf.oa.periodic;
  1693. u32 period_exponent = dev_priv->perf.oa.period_exponent;
  1694. u32 report_format = dev_priv->perf.oa.oa_buffer.format;
  1695. /*
  1696. * Reset buf pointers so we don't forward reports from before now.
  1697. *
  1698. * Think carefully if considering trying to avoid this, since it
  1699. * also ensures status flags and the buffer itself are cleared
  1700. * in error paths, and we have checks for invalid reports based
  1701. * on the assumption that certain fields are written to zeroed
  1702. * memory which this helps maintains.
  1703. */
  1704. gen7_init_oa_buffer(dev_priv);
  1705. I915_WRITE(GEN7_OACONTROL,
  1706. (ctx_id & GEN7_OACONTROL_CTX_MASK) |
  1707. (period_exponent <<
  1708. GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
  1709. (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
  1710. (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
  1711. (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
  1712. GEN7_OACONTROL_ENABLE);
  1713. }
  1714. static void gen8_oa_enable(struct drm_i915_private *dev_priv)
  1715. {
  1716. u32 report_format = dev_priv->perf.oa.oa_buffer.format;
  1717. /*
  1718. * Reset buf pointers so we don't forward reports from before now.
  1719. *
  1720. * Think carefully if considering trying to avoid this, since it
  1721. * also ensures status flags and the buffer itself are cleared
  1722. * in error paths, and we have checks for invalid reports based
  1723. * on the assumption that certain fields are written to zeroed
  1724. * memory which this helps maintains.
  1725. */
  1726. gen8_init_oa_buffer(dev_priv);
  1727. /*
  1728. * Note: we don't rely on the hardware to perform single context
  1729. * filtering and instead filter on the cpu based on the context-id
  1730. * field of reports
  1731. */
  1732. I915_WRITE(GEN8_OACONTROL, (report_format <<
  1733. GEN8_OA_REPORT_FORMAT_SHIFT) |
  1734. GEN8_OA_COUNTER_ENABLE);
  1735. }
  1736. /**
  1737. * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
  1738. * @stream: An i915 perf stream opened for OA metrics
  1739. *
  1740. * [Re]enables hardware periodic sampling according to the period configured
  1741. * when opening the stream. This also starts a hrtimer that will periodically
  1742. * check for data in the circular OA buffer for notifying userspace (e.g.
  1743. * during a read() or poll()).
  1744. */
  1745. static void i915_oa_stream_enable(struct i915_perf_stream *stream)
  1746. {
  1747. struct drm_i915_private *dev_priv = stream->dev_priv;
  1748. dev_priv->perf.oa.ops.oa_enable(dev_priv);
  1749. if (dev_priv->perf.oa.periodic)
  1750. hrtimer_start(&dev_priv->perf.oa.poll_check_timer,
  1751. ns_to_ktime(POLL_PERIOD),
  1752. HRTIMER_MODE_REL_PINNED);
  1753. }
  1754. static void gen7_oa_disable(struct drm_i915_private *dev_priv)
  1755. {
  1756. I915_WRITE(GEN7_OACONTROL, 0);
  1757. if (intel_wait_for_register(dev_priv,
  1758. GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
  1759. 50))
  1760. DRM_ERROR("wait for OA to be disabled timed out\n");
  1761. }
  1762. static void gen8_oa_disable(struct drm_i915_private *dev_priv)
  1763. {
  1764. I915_WRITE(GEN8_OACONTROL, 0);
  1765. if (intel_wait_for_register(dev_priv,
  1766. GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
  1767. 50))
  1768. DRM_ERROR("wait for OA to be disabled timed out\n");
  1769. }
  1770. /**
  1771. * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
  1772. * @stream: An i915 perf stream opened for OA metrics
  1773. *
  1774. * Stops the OA unit from periodically writing counter reports into the
  1775. * circular OA buffer. This also stops the hrtimer that periodically checks for
  1776. * data in the circular OA buffer, for notifying userspace.
  1777. */
  1778. static void i915_oa_stream_disable(struct i915_perf_stream *stream)
  1779. {
  1780. struct drm_i915_private *dev_priv = stream->dev_priv;
  1781. dev_priv->perf.oa.ops.oa_disable(dev_priv);
  1782. if (dev_priv->perf.oa.periodic)
  1783. hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer);
  1784. }
  1785. static const struct i915_perf_stream_ops i915_oa_stream_ops = {
  1786. .destroy = i915_oa_stream_destroy,
  1787. .enable = i915_oa_stream_enable,
  1788. .disable = i915_oa_stream_disable,
  1789. .wait_unlocked = i915_oa_wait_unlocked,
  1790. .poll_wait = i915_oa_poll_wait,
  1791. .read = i915_oa_read,
  1792. };
  1793. /**
  1794. * i915_oa_stream_init - validate combined props for OA stream and init
  1795. * @stream: An i915 perf stream
  1796. * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
  1797. * @props: The property state that configures stream (individually validated)
  1798. *
  1799. * While read_properties_unlocked() validates properties in isolation it
  1800. * doesn't ensure that the combination necessarily makes sense.
  1801. *
  1802. * At this point it has been determined that userspace wants a stream of
  1803. * OA metrics, but still we need to further validate the combined
  1804. * properties are OK.
  1805. *
  1806. * If the configuration makes sense then we can allocate memory for
  1807. * a circular OA buffer and apply the requested metric set configuration.
  1808. *
  1809. * Returns: zero on success or a negative error code.
  1810. */
  1811. static int i915_oa_stream_init(struct i915_perf_stream *stream,
  1812. struct drm_i915_perf_open_param *param,
  1813. struct perf_open_properties *props)
  1814. {
  1815. struct drm_i915_private *dev_priv = stream->dev_priv;
  1816. int format_size;
  1817. int ret;
  1818. /* If the sysfs metrics/ directory wasn't registered for some
  1819. * reason then don't let userspace try their luck with config
  1820. * IDs
  1821. */
  1822. if (!dev_priv->perf.metrics_kobj) {
  1823. DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
  1824. return -EINVAL;
  1825. }
  1826. if (!(props->sample_flags & SAMPLE_OA_REPORT)) {
  1827. DRM_DEBUG("Only OA report sampling supported\n");
  1828. return -EINVAL;
  1829. }
  1830. if (!dev_priv->perf.oa.ops.init_oa_buffer) {
  1831. DRM_DEBUG("OA unit not supported\n");
  1832. return -ENODEV;
  1833. }
  1834. /* To avoid the complexity of having to accurately filter
  1835. * counter reports and marshal to the appropriate client
  1836. * we currently only allow exclusive access
  1837. */
  1838. if (dev_priv->perf.oa.exclusive_stream) {
  1839. DRM_DEBUG("OA unit already in use\n");
  1840. return -EBUSY;
  1841. }
  1842. if (!props->oa_format) {
  1843. DRM_DEBUG("OA report format not specified\n");
  1844. return -EINVAL;
  1845. }
  1846. /* We set up some ratelimit state to potentially throttle any _NOTES
  1847. * about spurious, invalid OA reports which we don't forward to
  1848. * userspace.
  1849. *
  1850. * The initialization is associated with opening the stream (not driver
  1851. * init) considering we print a _NOTE about any throttling when closing
  1852. * the stream instead of waiting until driver _fini which no one would
  1853. * ever see.
  1854. *
  1855. * Using the same limiting factors as printk_ratelimit()
  1856. */
  1857. ratelimit_state_init(&dev_priv->perf.oa.spurious_report_rs,
  1858. 5 * HZ, 10);
  1859. /* Since we use a DRM_NOTE for spurious reports it would be
  1860. * inconsistent to let __ratelimit() automatically print a warning for
  1861. * throttling.
  1862. */
  1863. ratelimit_set_flags(&dev_priv->perf.oa.spurious_report_rs,
  1864. RATELIMIT_MSG_ON_RELEASE);
  1865. stream->sample_size = sizeof(struct drm_i915_perf_record_header);
  1866. format_size = dev_priv->perf.oa.oa_formats[props->oa_format].size;
  1867. stream->sample_flags |= SAMPLE_OA_REPORT;
  1868. stream->sample_size += format_size;
  1869. dev_priv->perf.oa.oa_buffer.format_size = format_size;
  1870. if (WARN_ON(dev_priv->perf.oa.oa_buffer.format_size == 0))
  1871. return -EINVAL;
  1872. dev_priv->perf.oa.oa_buffer.format =
  1873. dev_priv->perf.oa.oa_formats[props->oa_format].format;
  1874. dev_priv->perf.oa.periodic = props->oa_periodic;
  1875. if (dev_priv->perf.oa.periodic)
  1876. dev_priv->perf.oa.period_exponent = props->oa_period_exponent;
  1877. if (stream->ctx) {
  1878. ret = oa_get_render_ctx_id(stream);
  1879. if (ret) {
  1880. DRM_DEBUG("Invalid context id to filter with\n");
  1881. return ret;
  1882. }
  1883. }
  1884. ret = get_oa_config(dev_priv, props->metrics_set, &stream->oa_config);
  1885. if (ret) {
  1886. DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set);
  1887. goto err_config;
  1888. }
  1889. /* PRM - observability performance counters:
  1890. *
  1891. * OACONTROL, performance counter enable, note:
  1892. *
  1893. * "When this bit is set, in order to have coherent counts,
  1894. * RC6 power state and trunk clock gating must be disabled.
  1895. * This can be achieved by programming MMIO registers as
  1896. * 0xA094=0 and 0xA090[31]=1"
  1897. *
  1898. * In our case we are expecting that taking pm + FORCEWAKE
  1899. * references will effectively disable RC6.
  1900. */
  1901. intel_runtime_pm_get(dev_priv);
  1902. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  1903. ret = alloc_oa_buffer(dev_priv);
  1904. if (ret)
  1905. goto err_oa_buf_alloc;
  1906. ret = i915_mutex_lock_interruptible(&dev_priv->drm);
  1907. if (ret)
  1908. goto err_lock;
  1909. ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
  1910. stream->oa_config);
  1911. if (ret) {
  1912. DRM_DEBUG("Unable to enable metric set\n");
  1913. goto err_enable;
  1914. }
  1915. stream->ops = &i915_oa_stream_ops;
  1916. dev_priv->perf.oa.exclusive_stream = stream;
  1917. mutex_unlock(&dev_priv->drm.struct_mutex);
  1918. return 0;
  1919. err_enable:
  1920. dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
  1921. mutex_unlock(&dev_priv->drm.struct_mutex);
  1922. err_lock:
  1923. free_oa_buffer(dev_priv);
  1924. err_oa_buf_alloc:
  1925. put_oa_config(dev_priv, stream->oa_config);
  1926. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  1927. intel_runtime_pm_put(dev_priv);
  1928. err_config:
  1929. if (stream->ctx)
  1930. oa_put_render_ctx_id(stream);
  1931. return ret;
  1932. }
  1933. void i915_oa_init_reg_state(struct intel_engine_cs *engine,
  1934. struct i915_gem_context *ctx,
  1935. u32 *reg_state)
  1936. {
  1937. struct i915_perf_stream *stream;
  1938. if (engine->id != RCS)
  1939. return;
  1940. stream = engine->i915->perf.oa.exclusive_stream;
  1941. if (stream)
  1942. gen8_update_reg_state_unlocked(ctx, reg_state, stream->oa_config);
  1943. }
  1944. /**
  1945. * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation
  1946. * @stream: An i915 perf stream
  1947. * @file: An i915 perf stream file
  1948. * @buf: destination buffer given by userspace
  1949. * @count: the number of bytes userspace wants to read
  1950. * @ppos: (inout) file seek position (unused)
  1951. *
  1952. * Besides wrapping &i915_perf_stream_ops->read this provides a common place to
  1953. * ensure that if we've successfully copied any data then reporting that takes
  1954. * precedence over any internal error status, so the data isn't lost.
  1955. *
  1956. * For example ret will be -ENOSPC whenever there is more buffered data than
  1957. * can be copied to userspace, but that's only interesting if we weren't able
  1958. * to copy some data because it implies the userspace buffer is too small to
  1959. * receive a single record (and we never split records).
  1960. *
  1961. * Another case with ret == -EFAULT is more of a grey area since it would seem
  1962. * like bad form for userspace to ask us to overrun its buffer, but the user
  1963. * knows best:
  1964. *
  1965. * http://yarchive.net/comp/linux/partial_reads_writes.html
  1966. *
  1967. * Returns: The number of bytes copied or a negative error code on failure.
  1968. */
  1969. static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream,
  1970. struct file *file,
  1971. char __user *buf,
  1972. size_t count,
  1973. loff_t *ppos)
  1974. {
  1975. /* Note we keep the offset (aka bytes read) separate from any
  1976. * error status so that the final check for whether we return
  1977. * the bytes read with a higher precedence than any error (see
  1978. * comment below) doesn't need to be handled/duplicated in
  1979. * stream->ops->read() implementations.
  1980. */
  1981. size_t offset = 0;
  1982. int ret = stream->ops->read(stream, buf, count, &offset);
  1983. return offset ?: (ret ?: -EAGAIN);
  1984. }
  1985. /**
  1986. * i915_perf_read - handles read() FOP for i915 perf stream FDs
  1987. * @file: An i915 perf stream file
  1988. * @buf: destination buffer given by userspace
  1989. * @count: the number of bytes userspace wants to read
  1990. * @ppos: (inout) file seek position (unused)
  1991. *
  1992. * The entry point for handling a read() on a stream file descriptor from
  1993. * userspace. Most of the work is left to the i915_perf_read_locked() and
  1994. * &i915_perf_stream_ops->read but to save having stream implementations (of
  1995. * which we might have multiple later) we handle blocking read here.
  1996. *
  1997. * We can also consistently treat trying to read from a disabled stream
  1998. * as an IO error so implementations can assume the stream is enabled
  1999. * while reading.
  2000. *
  2001. * Returns: The number of bytes copied or a negative error code on failure.
  2002. */
  2003. static ssize_t i915_perf_read(struct file *file,
  2004. char __user *buf,
  2005. size_t count,
  2006. loff_t *ppos)
  2007. {
  2008. struct i915_perf_stream *stream = file->private_data;
  2009. struct drm_i915_private *dev_priv = stream->dev_priv;
  2010. ssize_t ret;
  2011. /* To ensure it's handled consistently we simply treat all reads of a
  2012. * disabled stream as an error. In particular it might otherwise lead
  2013. * to a deadlock for blocking file descriptors...
  2014. */
  2015. if (!stream->enabled)
  2016. return -EIO;
  2017. if (!(file->f_flags & O_NONBLOCK)) {
  2018. /* There's the small chance of false positives from
  2019. * stream->ops->wait_unlocked.
  2020. *
  2021. * E.g. with single context filtering since we only wait until
  2022. * oabuffer has >= 1 report we don't immediately know whether
  2023. * any reports really belong to the current context
  2024. */
  2025. do {
  2026. ret = stream->ops->wait_unlocked(stream);
  2027. if (ret)
  2028. return ret;
  2029. mutex_lock(&dev_priv->perf.lock);
  2030. ret = i915_perf_read_locked(stream, file,
  2031. buf, count, ppos);
  2032. mutex_unlock(&dev_priv->perf.lock);
  2033. } while (ret == -EAGAIN);
  2034. } else {
  2035. mutex_lock(&dev_priv->perf.lock);
  2036. ret = i915_perf_read_locked(stream, file, buf, count, ppos);
  2037. mutex_unlock(&dev_priv->perf.lock);
  2038. }
  2039. /* We allow the poll checking to sometimes report false positive EPOLLIN
  2040. * events where we might actually report EAGAIN on read() if there's
  2041. * not really any data available. In this situation though we don't
  2042. * want to enter a busy loop between poll() reporting a EPOLLIN event
  2043. * and read() returning -EAGAIN. Clearing the oa.pollin state here
  2044. * effectively ensures we back off until the next hrtimer callback
  2045. * before reporting another EPOLLIN event.
  2046. */
  2047. if (ret >= 0 || ret == -EAGAIN) {
  2048. /* Maybe make ->pollin per-stream state if we support multiple
  2049. * concurrent streams in the future.
  2050. */
  2051. dev_priv->perf.oa.pollin = false;
  2052. }
  2053. return ret;
  2054. }
  2055. static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
  2056. {
  2057. struct drm_i915_private *dev_priv =
  2058. container_of(hrtimer, typeof(*dev_priv),
  2059. perf.oa.poll_check_timer);
  2060. if (oa_buffer_check_unlocked(dev_priv)) {
  2061. dev_priv->perf.oa.pollin = true;
  2062. wake_up(&dev_priv->perf.oa.poll_wq);
  2063. }
  2064. hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD));
  2065. return HRTIMER_RESTART;
  2066. }
  2067. /**
  2068. * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
  2069. * @dev_priv: i915 device instance
  2070. * @stream: An i915 perf stream
  2071. * @file: An i915 perf stream file
  2072. * @wait: poll() state table
  2073. *
  2074. * For handling userspace polling on an i915 perf stream, this calls through to
  2075. * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
  2076. * will be woken for new stream data.
  2077. *
  2078. * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
  2079. * with any non-file-operation driver hooks.
  2080. *
  2081. * Returns: any poll events that are ready without sleeping
  2082. */
  2083. static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
  2084. struct i915_perf_stream *stream,
  2085. struct file *file,
  2086. poll_table *wait)
  2087. {
  2088. __poll_t events = 0;
  2089. stream->ops->poll_wait(stream, file, wait);
  2090. /* Note: we don't explicitly check whether there's something to read
  2091. * here since this path may be very hot depending on what else
  2092. * userspace is polling, or on the timeout in use. We rely solely on
  2093. * the hrtimer/oa_poll_check_timer_cb to notify us when there are
  2094. * samples to read.
  2095. */
  2096. if (dev_priv->perf.oa.pollin)
  2097. events |= EPOLLIN;
  2098. return events;
  2099. }
  2100. /**
  2101. * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
  2102. * @file: An i915 perf stream file
  2103. * @wait: poll() state table
  2104. *
  2105. * For handling userspace polling on an i915 perf stream, this ensures
  2106. * poll_wait() gets called with a wait queue that will be woken for new stream
  2107. * data.
  2108. *
  2109. * Note: Implementation deferred to i915_perf_poll_locked()
  2110. *
  2111. * Returns: any poll events that are ready without sleeping
  2112. */
  2113. static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
  2114. {
  2115. struct i915_perf_stream *stream = file->private_data;
  2116. struct drm_i915_private *dev_priv = stream->dev_priv;
  2117. __poll_t ret;
  2118. mutex_lock(&dev_priv->perf.lock);
  2119. ret = i915_perf_poll_locked(dev_priv, stream, file, wait);
  2120. mutex_unlock(&dev_priv->perf.lock);
  2121. return ret;
  2122. }
  2123. /**
  2124. * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
  2125. * @stream: A disabled i915 perf stream
  2126. *
  2127. * [Re]enables the associated capture of data for this stream.
  2128. *
  2129. * If a stream was previously enabled then there's currently no intention
  2130. * to provide userspace any guarantee about the preservation of previously
  2131. * buffered data.
  2132. */
  2133. static void i915_perf_enable_locked(struct i915_perf_stream *stream)
  2134. {
  2135. if (stream->enabled)
  2136. return;
  2137. /* Allow stream->ops->enable() to refer to this */
  2138. stream->enabled = true;
  2139. if (stream->ops->enable)
  2140. stream->ops->enable(stream);
  2141. }
  2142. /**
  2143. * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
  2144. * @stream: An enabled i915 perf stream
  2145. *
  2146. * Disables the associated capture of data for this stream.
  2147. *
  2148. * The intention is that disabling an re-enabling a stream will ideally be
  2149. * cheaper than destroying and re-opening a stream with the same configuration,
  2150. * though there are no formal guarantees about what state or buffered data
  2151. * must be retained between disabling and re-enabling a stream.
  2152. *
  2153. * Note: while a stream is disabled it's considered an error for userspace
  2154. * to attempt to read from the stream (-EIO).
  2155. */
  2156. static void i915_perf_disable_locked(struct i915_perf_stream *stream)
  2157. {
  2158. if (!stream->enabled)
  2159. return;
  2160. /* Allow stream->ops->disable() to refer to this */
  2161. stream->enabled = false;
  2162. if (stream->ops->disable)
  2163. stream->ops->disable(stream);
  2164. }
  2165. /**
  2166. * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
  2167. * @stream: An i915 perf stream
  2168. * @cmd: the ioctl request
  2169. * @arg: the ioctl data
  2170. *
  2171. * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
  2172. * with any non-file-operation driver hooks.
  2173. *
  2174. * Returns: zero on success or a negative error code. Returns -EINVAL for
  2175. * an unknown ioctl request.
  2176. */
  2177. static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
  2178. unsigned int cmd,
  2179. unsigned long arg)
  2180. {
  2181. switch (cmd) {
  2182. case I915_PERF_IOCTL_ENABLE:
  2183. i915_perf_enable_locked(stream);
  2184. return 0;
  2185. case I915_PERF_IOCTL_DISABLE:
  2186. i915_perf_disable_locked(stream);
  2187. return 0;
  2188. }
  2189. return -EINVAL;
  2190. }
  2191. /**
  2192. * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
  2193. * @file: An i915 perf stream file
  2194. * @cmd: the ioctl request
  2195. * @arg: the ioctl data
  2196. *
  2197. * Implementation deferred to i915_perf_ioctl_locked().
  2198. *
  2199. * Returns: zero on success or a negative error code. Returns -EINVAL for
  2200. * an unknown ioctl request.
  2201. */
  2202. static long i915_perf_ioctl(struct file *file,
  2203. unsigned int cmd,
  2204. unsigned long arg)
  2205. {
  2206. struct i915_perf_stream *stream = file->private_data;
  2207. struct drm_i915_private *dev_priv = stream->dev_priv;
  2208. long ret;
  2209. mutex_lock(&dev_priv->perf.lock);
  2210. ret = i915_perf_ioctl_locked(stream, cmd, arg);
  2211. mutex_unlock(&dev_priv->perf.lock);
  2212. return ret;
  2213. }
  2214. /**
  2215. * i915_perf_destroy_locked - destroy an i915 perf stream
  2216. * @stream: An i915 perf stream
  2217. *
  2218. * Frees all resources associated with the given i915 perf @stream, disabling
  2219. * any associated data capture in the process.
  2220. *
  2221. * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
  2222. * with any non-file-operation driver hooks.
  2223. */
  2224. static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
  2225. {
  2226. if (stream->enabled)
  2227. i915_perf_disable_locked(stream);
  2228. if (stream->ops->destroy)
  2229. stream->ops->destroy(stream);
  2230. list_del(&stream->link);
  2231. if (stream->ctx)
  2232. i915_gem_context_put(stream->ctx);
  2233. kfree(stream);
  2234. }
  2235. /**
  2236. * i915_perf_release - handles userspace close() of a stream file
  2237. * @inode: anonymous inode associated with file
  2238. * @file: An i915 perf stream file
  2239. *
  2240. * Cleans up any resources associated with an open i915 perf stream file.
  2241. *
  2242. * NB: close() can't really fail from the userspace point of view.
  2243. *
  2244. * Returns: zero on success or a negative error code.
  2245. */
  2246. static int i915_perf_release(struct inode *inode, struct file *file)
  2247. {
  2248. struct i915_perf_stream *stream = file->private_data;
  2249. struct drm_i915_private *dev_priv = stream->dev_priv;
  2250. mutex_lock(&dev_priv->perf.lock);
  2251. i915_perf_destroy_locked(stream);
  2252. mutex_unlock(&dev_priv->perf.lock);
  2253. return 0;
  2254. }
  2255. static const struct file_operations fops = {
  2256. .owner = THIS_MODULE,
  2257. .llseek = no_llseek,
  2258. .release = i915_perf_release,
  2259. .poll = i915_perf_poll,
  2260. .read = i915_perf_read,
  2261. .unlocked_ioctl = i915_perf_ioctl,
  2262. /* Our ioctl have no arguments, so it's safe to use the same function
  2263. * to handle 32bits compatibility.
  2264. */
  2265. .compat_ioctl = i915_perf_ioctl,
  2266. };
  2267. /**
  2268. * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
  2269. * @dev_priv: i915 device instance
  2270. * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
  2271. * @props: individually validated u64 property value pairs
  2272. * @file: drm file
  2273. *
  2274. * See i915_perf_ioctl_open() for interface details.
  2275. *
  2276. * Implements further stream config validation and stream initialization on
  2277. * behalf of i915_perf_open_ioctl() with the &drm_i915_private->perf.lock mutex
  2278. * taken to serialize with any non-file-operation driver hooks.
  2279. *
  2280. * Note: at this point the @props have only been validated in isolation and
  2281. * it's still necessary to validate that the combination of properties makes
  2282. * sense.
  2283. *
  2284. * In the case where userspace is interested in OA unit metrics then further
  2285. * config validation and stream initialization details will be handled by
  2286. * i915_oa_stream_init(). The code here should only validate config state that
  2287. * will be relevant to all stream types / backends.
  2288. *
  2289. * Returns: zero on success or a negative error code.
  2290. */
  2291. static int
  2292. i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
  2293. struct drm_i915_perf_open_param *param,
  2294. struct perf_open_properties *props,
  2295. struct drm_file *file)
  2296. {
  2297. struct i915_gem_context *specific_ctx = NULL;
  2298. struct i915_perf_stream *stream = NULL;
  2299. unsigned long f_flags = 0;
  2300. bool privileged_op = true;
  2301. int stream_fd;
  2302. int ret;
  2303. if (props->single_context) {
  2304. u32 ctx_handle = props->ctx_handle;
  2305. struct drm_i915_file_private *file_priv = file->driver_priv;
  2306. specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
  2307. if (!specific_ctx) {
  2308. DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n",
  2309. ctx_handle);
  2310. ret = -ENOENT;
  2311. goto err;
  2312. }
  2313. }
  2314. /*
  2315. * On Haswell the OA unit supports clock gating off for a specific
  2316. * context and in this mode there's no visibility of metrics for the
  2317. * rest of the system, which we consider acceptable for a
  2318. * non-privileged client.
  2319. *
  2320. * For Gen8+ the OA unit no longer supports clock gating off for a
  2321. * specific context and the kernel can't securely stop the counters
  2322. * from updating as system-wide / global values. Even though we can
  2323. * filter reports based on the included context ID we can't block
  2324. * clients from seeing the raw / global counter values via
  2325. * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
  2326. * enable the OA unit by default.
  2327. */
  2328. if (IS_HASWELL(dev_priv) && specific_ctx)
  2329. privileged_op = false;
  2330. /* Similar to perf's kernel.perf_paranoid_cpu sysctl option
  2331. * we check a dev.i915.perf_stream_paranoid sysctl option
  2332. * to determine if it's ok to access system wide OA counters
  2333. * without CAP_SYS_ADMIN privileges.
  2334. */
  2335. if (privileged_op &&
  2336. i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
  2337. DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n");
  2338. ret = -EACCES;
  2339. goto err_ctx;
  2340. }
  2341. stream = kzalloc(sizeof(*stream), GFP_KERNEL);
  2342. if (!stream) {
  2343. ret = -ENOMEM;
  2344. goto err_ctx;
  2345. }
  2346. stream->dev_priv = dev_priv;
  2347. stream->ctx = specific_ctx;
  2348. ret = i915_oa_stream_init(stream, param, props);
  2349. if (ret)
  2350. goto err_alloc;
  2351. /* we avoid simply assigning stream->sample_flags = props->sample_flags
  2352. * to have _stream_init check the combination of sample flags more
  2353. * thoroughly, but still this is the expected result at this point.
  2354. */
  2355. if (WARN_ON(stream->sample_flags != props->sample_flags)) {
  2356. ret = -ENODEV;
  2357. goto err_flags;
  2358. }
  2359. list_add(&stream->link, &dev_priv->perf.streams);
  2360. if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
  2361. f_flags |= O_CLOEXEC;
  2362. if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
  2363. f_flags |= O_NONBLOCK;
  2364. stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
  2365. if (stream_fd < 0) {
  2366. ret = stream_fd;
  2367. goto err_open;
  2368. }
  2369. if (!(param->flags & I915_PERF_FLAG_DISABLED))
  2370. i915_perf_enable_locked(stream);
  2371. return stream_fd;
  2372. err_open:
  2373. list_del(&stream->link);
  2374. err_flags:
  2375. if (stream->ops->destroy)
  2376. stream->ops->destroy(stream);
  2377. err_alloc:
  2378. kfree(stream);
  2379. err_ctx:
  2380. if (specific_ctx)
  2381. i915_gem_context_put(specific_ctx);
  2382. err:
  2383. return ret;
  2384. }
  2385. static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
  2386. {
  2387. return div64_u64(1000000000ULL * (2ULL << exponent),
  2388. 1000ULL * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz);
  2389. }
  2390. /**
  2391. * read_properties_unlocked - validate + copy userspace stream open properties
  2392. * @dev_priv: i915 device instance
  2393. * @uprops: The array of u64 key value pairs given by userspace
  2394. * @n_props: The number of key value pairs expected in @uprops
  2395. * @props: The stream configuration built up while validating properties
  2396. *
  2397. * Note this function only validates properties in isolation it doesn't
  2398. * validate that the combination of properties makes sense or that all
  2399. * properties necessary for a particular kind of stream have been set.
  2400. *
  2401. * Note that there currently aren't any ordering requirements for properties so
  2402. * we shouldn't validate or assume anything about ordering here. This doesn't
  2403. * rule out defining new properties with ordering requirements in the future.
  2404. */
  2405. static int read_properties_unlocked(struct drm_i915_private *dev_priv,
  2406. u64 __user *uprops,
  2407. u32 n_props,
  2408. struct perf_open_properties *props)
  2409. {
  2410. u64 __user *uprop = uprops;
  2411. u32 i;
  2412. memset(props, 0, sizeof(struct perf_open_properties));
  2413. if (!n_props) {
  2414. DRM_DEBUG("No i915 perf properties given\n");
  2415. return -EINVAL;
  2416. }
  2417. /* Considering that ID = 0 is reserved and assuming that we don't
  2418. * (currently) expect any configurations to ever specify duplicate
  2419. * values for a particular property ID then the last _PROP_MAX value is
  2420. * one greater than the maximum number of properties we expect to get
  2421. * from userspace.
  2422. */
  2423. if (n_props >= DRM_I915_PERF_PROP_MAX) {
  2424. DRM_DEBUG("More i915 perf properties specified than exist\n");
  2425. return -EINVAL;
  2426. }
  2427. for (i = 0; i < n_props; i++) {
  2428. u64 oa_period, oa_freq_hz;
  2429. u64 id, value;
  2430. int ret;
  2431. ret = get_user(id, uprop);
  2432. if (ret)
  2433. return ret;
  2434. ret = get_user(value, uprop + 1);
  2435. if (ret)
  2436. return ret;
  2437. if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
  2438. DRM_DEBUG("Unknown i915 perf property ID\n");
  2439. return -EINVAL;
  2440. }
  2441. switch ((enum drm_i915_perf_property_id)id) {
  2442. case DRM_I915_PERF_PROP_CTX_HANDLE:
  2443. props->single_context = 1;
  2444. props->ctx_handle = value;
  2445. break;
  2446. case DRM_I915_PERF_PROP_SAMPLE_OA:
  2447. if (value)
  2448. props->sample_flags |= SAMPLE_OA_REPORT;
  2449. break;
  2450. case DRM_I915_PERF_PROP_OA_METRICS_SET:
  2451. if (value == 0) {
  2452. DRM_DEBUG("Unknown OA metric set ID\n");
  2453. return -EINVAL;
  2454. }
  2455. props->metrics_set = value;
  2456. break;
  2457. case DRM_I915_PERF_PROP_OA_FORMAT:
  2458. if (value == 0 || value >= I915_OA_FORMAT_MAX) {
  2459. DRM_DEBUG("Out-of-range OA report format %llu\n",
  2460. value);
  2461. return -EINVAL;
  2462. }
  2463. if (!dev_priv->perf.oa.oa_formats[value].size) {
  2464. DRM_DEBUG("Unsupported OA report format %llu\n",
  2465. value);
  2466. return -EINVAL;
  2467. }
  2468. props->oa_format = value;
  2469. break;
  2470. case DRM_I915_PERF_PROP_OA_EXPONENT:
  2471. if (value > OA_EXPONENT_MAX) {
  2472. DRM_DEBUG("OA timer exponent too high (> %u)\n",
  2473. OA_EXPONENT_MAX);
  2474. return -EINVAL;
  2475. }
  2476. /* Theoretically we can program the OA unit to sample
  2477. * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
  2478. * for BXT. We don't allow such high sampling
  2479. * frequencies by default unless root.
  2480. */
  2481. BUILD_BUG_ON(sizeof(oa_period) != 8);
  2482. oa_period = oa_exponent_to_ns(dev_priv, value);
  2483. /* This check is primarily to ensure that oa_period <=
  2484. * UINT32_MAX (before passing to do_div which only
  2485. * accepts a u32 denominator), but we can also skip
  2486. * checking anything < 1Hz which implicitly can't be
  2487. * limited via an integer oa_max_sample_rate.
  2488. */
  2489. if (oa_period <= NSEC_PER_SEC) {
  2490. u64 tmp = NSEC_PER_SEC;
  2491. do_div(tmp, oa_period);
  2492. oa_freq_hz = tmp;
  2493. } else
  2494. oa_freq_hz = 0;
  2495. if (oa_freq_hz > i915_oa_max_sample_rate &&
  2496. !capable(CAP_SYS_ADMIN)) {
  2497. DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n",
  2498. i915_oa_max_sample_rate);
  2499. return -EACCES;
  2500. }
  2501. props->oa_periodic = true;
  2502. props->oa_period_exponent = value;
  2503. break;
  2504. case DRM_I915_PERF_PROP_MAX:
  2505. MISSING_CASE(id);
  2506. return -EINVAL;
  2507. }
  2508. uprop += 2;
  2509. }
  2510. return 0;
  2511. }
  2512. /**
  2513. * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
  2514. * @dev: drm device
  2515. * @data: ioctl data copied from userspace (unvalidated)
  2516. * @file: drm file
  2517. *
  2518. * Validates the stream open parameters given by userspace including flags
  2519. * and an array of u64 key, value pair properties.
  2520. *
  2521. * Very little is assumed up front about the nature of the stream being
  2522. * opened (for instance we don't assume it's for periodic OA unit metrics). An
  2523. * i915-perf stream is expected to be a suitable interface for other forms of
  2524. * buffered data written by the GPU besides periodic OA metrics.
  2525. *
  2526. * Note we copy the properties from userspace outside of the i915 perf
  2527. * mutex to avoid an awkward lockdep with mmap_sem.
  2528. *
  2529. * Most of the implementation details are handled by
  2530. * i915_perf_open_ioctl_locked() after taking the &drm_i915_private->perf.lock
  2531. * mutex for serializing with any non-file-operation driver hooks.
  2532. *
  2533. * Return: A newly opened i915 Perf stream file descriptor or negative
  2534. * error code on failure.
  2535. */
  2536. int i915_perf_open_ioctl(struct drm_device *dev, void *data,
  2537. struct drm_file *file)
  2538. {
  2539. struct drm_i915_private *dev_priv = dev->dev_private;
  2540. struct drm_i915_perf_open_param *param = data;
  2541. struct perf_open_properties props;
  2542. u32 known_open_flags;
  2543. int ret;
  2544. if (!dev_priv->perf.initialized) {
  2545. DRM_DEBUG("i915 perf interface not available for this system\n");
  2546. return -ENOTSUPP;
  2547. }
  2548. known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
  2549. I915_PERF_FLAG_FD_NONBLOCK |
  2550. I915_PERF_FLAG_DISABLED;
  2551. if (param->flags & ~known_open_flags) {
  2552. DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n");
  2553. return -EINVAL;
  2554. }
  2555. ret = read_properties_unlocked(dev_priv,
  2556. u64_to_user_ptr(param->properties_ptr),
  2557. param->num_properties,
  2558. &props);
  2559. if (ret)
  2560. return ret;
  2561. mutex_lock(&dev_priv->perf.lock);
  2562. ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file);
  2563. mutex_unlock(&dev_priv->perf.lock);
  2564. return ret;
  2565. }
  2566. /**
  2567. * i915_perf_register - exposes i915-perf to userspace
  2568. * @dev_priv: i915 device instance
  2569. *
  2570. * In particular OA metric sets are advertised under a sysfs metrics/
  2571. * directory allowing userspace to enumerate valid IDs that can be
  2572. * used to open an i915-perf stream.
  2573. */
  2574. void i915_perf_register(struct drm_i915_private *dev_priv)
  2575. {
  2576. int ret;
  2577. if (!dev_priv->perf.initialized)
  2578. return;
  2579. /* To be sure we're synchronized with an attempted
  2580. * i915_perf_open_ioctl(); considering that we register after
  2581. * being exposed to userspace.
  2582. */
  2583. mutex_lock(&dev_priv->perf.lock);
  2584. dev_priv->perf.metrics_kobj =
  2585. kobject_create_and_add("metrics",
  2586. &dev_priv->drm.primary->kdev->kobj);
  2587. if (!dev_priv->perf.metrics_kobj)
  2588. goto exit;
  2589. sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr);
  2590. if (IS_HASWELL(dev_priv)) {
  2591. i915_perf_load_test_config_hsw(dev_priv);
  2592. } else if (IS_BROADWELL(dev_priv)) {
  2593. i915_perf_load_test_config_bdw(dev_priv);
  2594. } else if (IS_CHERRYVIEW(dev_priv)) {
  2595. i915_perf_load_test_config_chv(dev_priv);
  2596. } else if (IS_SKYLAKE(dev_priv)) {
  2597. if (IS_SKL_GT2(dev_priv))
  2598. i915_perf_load_test_config_sklgt2(dev_priv);
  2599. else if (IS_SKL_GT3(dev_priv))
  2600. i915_perf_load_test_config_sklgt3(dev_priv);
  2601. else if (IS_SKL_GT4(dev_priv))
  2602. i915_perf_load_test_config_sklgt4(dev_priv);
  2603. } else if (IS_BROXTON(dev_priv)) {
  2604. i915_perf_load_test_config_bxt(dev_priv);
  2605. } else if (IS_KABYLAKE(dev_priv)) {
  2606. if (IS_KBL_GT2(dev_priv))
  2607. i915_perf_load_test_config_kblgt2(dev_priv);
  2608. else if (IS_KBL_GT3(dev_priv))
  2609. i915_perf_load_test_config_kblgt3(dev_priv);
  2610. } else if (IS_GEMINILAKE(dev_priv)) {
  2611. i915_perf_load_test_config_glk(dev_priv);
  2612. } else if (IS_COFFEELAKE(dev_priv)) {
  2613. if (IS_CFL_GT2(dev_priv))
  2614. i915_perf_load_test_config_cflgt2(dev_priv);
  2615. if (IS_CFL_GT3(dev_priv))
  2616. i915_perf_load_test_config_cflgt3(dev_priv);
  2617. } else if (IS_CANNONLAKE(dev_priv)) {
  2618. i915_perf_load_test_config_cnl(dev_priv);
  2619. } else if (IS_ICELAKE(dev_priv)) {
  2620. i915_perf_load_test_config_icl(dev_priv);
  2621. }
  2622. if (dev_priv->perf.oa.test_config.id == 0)
  2623. goto sysfs_error;
  2624. ret = sysfs_create_group(dev_priv->perf.metrics_kobj,
  2625. &dev_priv->perf.oa.test_config.sysfs_metric);
  2626. if (ret)
  2627. goto sysfs_error;
  2628. atomic_set(&dev_priv->perf.oa.test_config.ref_count, 1);
  2629. goto exit;
  2630. sysfs_error:
  2631. kobject_put(dev_priv->perf.metrics_kobj);
  2632. dev_priv->perf.metrics_kobj = NULL;
  2633. exit:
  2634. mutex_unlock(&dev_priv->perf.lock);
  2635. }
  2636. /**
  2637. * i915_perf_unregister - hide i915-perf from userspace
  2638. * @dev_priv: i915 device instance
  2639. *
  2640. * i915-perf state cleanup is split up into an 'unregister' and
  2641. * 'deinit' phase where the interface is first hidden from
  2642. * userspace by i915_perf_unregister() before cleaning up
  2643. * remaining state in i915_perf_fini().
  2644. */
  2645. void i915_perf_unregister(struct drm_i915_private *dev_priv)
  2646. {
  2647. if (!dev_priv->perf.metrics_kobj)
  2648. return;
  2649. sysfs_remove_group(dev_priv->perf.metrics_kobj,
  2650. &dev_priv->perf.oa.test_config.sysfs_metric);
  2651. kobject_put(dev_priv->perf.metrics_kobj);
  2652. dev_priv->perf.metrics_kobj = NULL;
  2653. }
  2654. static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
  2655. {
  2656. static const i915_reg_t flex_eu_regs[] = {
  2657. EU_PERF_CNTL0,
  2658. EU_PERF_CNTL1,
  2659. EU_PERF_CNTL2,
  2660. EU_PERF_CNTL3,
  2661. EU_PERF_CNTL4,
  2662. EU_PERF_CNTL5,
  2663. EU_PERF_CNTL6,
  2664. };
  2665. int i;
  2666. for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
  2667. if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr)
  2668. return true;
  2669. }
  2670. return false;
  2671. }
  2672. static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32 addr)
  2673. {
  2674. return (addr >= i915_mmio_reg_offset(OASTARTTRIG1) &&
  2675. addr <= i915_mmio_reg_offset(OASTARTTRIG8)) ||
  2676. (addr >= i915_mmio_reg_offset(OAREPORTTRIG1) &&
  2677. addr <= i915_mmio_reg_offset(OAREPORTTRIG8)) ||
  2678. (addr >= i915_mmio_reg_offset(OACEC0_0) &&
  2679. addr <= i915_mmio_reg_offset(OACEC7_1));
  2680. }
  2681. static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
  2682. {
  2683. return addr == i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) ||
  2684. (addr >= i915_mmio_reg_offset(MICRO_BP0_0) &&
  2685. addr <= i915_mmio_reg_offset(NOA_WRITE)) ||
  2686. (addr >= i915_mmio_reg_offset(OA_PERFCNT1_LO) &&
  2687. addr <= i915_mmio_reg_offset(OA_PERFCNT2_HI)) ||
  2688. (addr >= i915_mmio_reg_offset(OA_PERFMATRIX_LO) &&
  2689. addr <= i915_mmio_reg_offset(OA_PERFMATRIX_HI));
  2690. }
  2691. static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
  2692. {
  2693. return gen7_is_valid_mux_addr(dev_priv, addr) ||
  2694. addr == i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) ||
  2695. (addr >= i915_mmio_reg_offset(RPM_CONFIG0) &&
  2696. addr <= i915_mmio_reg_offset(NOA_CONFIG(8)));
  2697. }
  2698. static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
  2699. {
  2700. return gen8_is_valid_mux_addr(dev_priv, addr) ||
  2701. (addr >= i915_mmio_reg_offset(OA_PERFCNT3_LO) &&
  2702. addr <= i915_mmio_reg_offset(OA_PERFCNT4_HI));
  2703. }
  2704. static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
  2705. {
  2706. return gen7_is_valid_mux_addr(dev_priv, addr) ||
  2707. (addr >= 0x25100 && addr <= 0x2FF90) ||
  2708. (addr >= i915_mmio_reg_offset(HSW_MBVID2_NOA0) &&
  2709. addr <= i915_mmio_reg_offset(HSW_MBVID2_NOA9)) ||
  2710. addr == i915_mmio_reg_offset(HSW_MBVID2_MISR0);
  2711. }
  2712. static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
  2713. {
  2714. return gen7_is_valid_mux_addr(dev_priv, addr) ||
  2715. (addr >= 0x182300 && addr <= 0x1823A4);
  2716. }
  2717. static uint32_t mask_reg_value(u32 reg, u32 val)
  2718. {
  2719. /* HALF_SLICE_CHICKEN2 is programmed with a the
  2720. * WaDisableSTUnitPowerOptimization workaround. Make sure the value
  2721. * programmed by userspace doesn't change this.
  2722. */
  2723. if (i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) == reg)
  2724. val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
  2725. /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
  2726. * indicated by its name and a bunch of selection fields used by OA
  2727. * configs.
  2728. */
  2729. if (i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) == reg)
  2730. val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
  2731. return val;
  2732. }
  2733. static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv,
  2734. bool (*is_valid)(struct drm_i915_private *dev_priv, u32 addr),
  2735. u32 __user *regs,
  2736. u32 n_regs)
  2737. {
  2738. struct i915_oa_reg *oa_regs;
  2739. int err;
  2740. u32 i;
  2741. if (!n_regs)
  2742. return NULL;
  2743. if (!access_ok(VERIFY_READ, regs, n_regs * sizeof(u32) * 2))
  2744. return ERR_PTR(-EFAULT);
  2745. /* No is_valid function means we're not allowing any register to be programmed. */
  2746. GEM_BUG_ON(!is_valid);
  2747. if (!is_valid)
  2748. return ERR_PTR(-EINVAL);
  2749. oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
  2750. if (!oa_regs)
  2751. return ERR_PTR(-ENOMEM);
  2752. for (i = 0; i < n_regs; i++) {
  2753. u32 addr, value;
  2754. err = get_user(addr, regs);
  2755. if (err)
  2756. goto addr_err;
  2757. if (!is_valid(dev_priv, addr)) {
  2758. DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
  2759. err = -EINVAL;
  2760. goto addr_err;
  2761. }
  2762. err = get_user(value, regs + 1);
  2763. if (err)
  2764. goto addr_err;
  2765. oa_regs[i].addr = _MMIO(addr);
  2766. oa_regs[i].value = mask_reg_value(addr, value);
  2767. regs += 2;
  2768. }
  2769. return oa_regs;
  2770. addr_err:
  2771. kfree(oa_regs);
  2772. return ERR_PTR(err);
  2773. }
  2774. static ssize_t show_dynamic_id(struct device *dev,
  2775. struct device_attribute *attr,
  2776. char *buf)
  2777. {
  2778. struct i915_oa_config *oa_config =
  2779. container_of(attr, typeof(*oa_config), sysfs_metric_id);
  2780. return sprintf(buf, "%d\n", oa_config->id);
  2781. }
  2782. static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
  2783. struct i915_oa_config *oa_config)
  2784. {
  2785. sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
  2786. oa_config->sysfs_metric_id.attr.name = "id";
  2787. oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
  2788. oa_config->sysfs_metric_id.show = show_dynamic_id;
  2789. oa_config->sysfs_metric_id.store = NULL;
  2790. oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
  2791. oa_config->attrs[1] = NULL;
  2792. oa_config->sysfs_metric.name = oa_config->uuid;
  2793. oa_config->sysfs_metric.attrs = oa_config->attrs;
  2794. return sysfs_create_group(dev_priv->perf.metrics_kobj,
  2795. &oa_config->sysfs_metric);
  2796. }
  2797. /**
  2798. * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
  2799. * @dev: drm device
  2800. * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
  2801. * userspace (unvalidated)
  2802. * @file: drm file
  2803. *
  2804. * Validates the submitted OA register to be saved into a new OA config that
  2805. * can then be used for programming the OA unit and its NOA network.
  2806. *
  2807. * Returns: A new allocated config number to be used with the perf open ioctl
  2808. * or a negative error code on failure.
  2809. */
  2810. int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
  2811. struct drm_file *file)
  2812. {
  2813. struct drm_i915_private *dev_priv = dev->dev_private;
  2814. struct drm_i915_perf_oa_config *args = data;
  2815. struct i915_oa_config *oa_config, *tmp;
  2816. int err, id;
  2817. if (!dev_priv->perf.initialized) {
  2818. DRM_DEBUG("i915 perf interface not available for this system\n");
  2819. return -ENOTSUPP;
  2820. }
  2821. if (!dev_priv->perf.metrics_kobj) {
  2822. DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
  2823. return -EINVAL;
  2824. }
  2825. if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
  2826. DRM_DEBUG("Insufficient privileges to add i915 OA config\n");
  2827. return -EACCES;
  2828. }
  2829. if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
  2830. (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
  2831. (!args->flex_regs_ptr || !args->n_flex_regs)) {
  2832. DRM_DEBUG("No OA registers given\n");
  2833. return -EINVAL;
  2834. }
  2835. oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
  2836. if (!oa_config) {
  2837. DRM_DEBUG("Failed to allocate memory for the OA config\n");
  2838. return -ENOMEM;
  2839. }
  2840. atomic_set(&oa_config->ref_count, 1);
  2841. if (!uuid_is_valid(args->uuid)) {
  2842. DRM_DEBUG("Invalid uuid format for OA config\n");
  2843. err = -EINVAL;
  2844. goto reg_err;
  2845. }
  2846. /* Last character in oa_config->uuid will be 0 because oa_config is
  2847. * kzalloc.
  2848. */
  2849. memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
  2850. oa_config->mux_regs_len = args->n_mux_regs;
  2851. oa_config->mux_regs =
  2852. alloc_oa_regs(dev_priv,
  2853. dev_priv->perf.oa.ops.is_valid_mux_reg,
  2854. u64_to_user_ptr(args->mux_regs_ptr),
  2855. args->n_mux_regs);
  2856. if (IS_ERR(oa_config->mux_regs)) {
  2857. DRM_DEBUG("Failed to create OA config for mux_regs\n");
  2858. err = PTR_ERR(oa_config->mux_regs);
  2859. goto reg_err;
  2860. }
  2861. oa_config->b_counter_regs_len = args->n_boolean_regs;
  2862. oa_config->b_counter_regs =
  2863. alloc_oa_regs(dev_priv,
  2864. dev_priv->perf.oa.ops.is_valid_b_counter_reg,
  2865. u64_to_user_ptr(args->boolean_regs_ptr),
  2866. args->n_boolean_regs);
  2867. if (IS_ERR(oa_config->b_counter_regs)) {
  2868. DRM_DEBUG("Failed to create OA config for b_counter_regs\n");
  2869. err = PTR_ERR(oa_config->b_counter_regs);
  2870. goto reg_err;
  2871. }
  2872. if (INTEL_GEN(dev_priv) < 8) {
  2873. if (args->n_flex_regs != 0) {
  2874. err = -EINVAL;
  2875. goto reg_err;
  2876. }
  2877. } else {
  2878. oa_config->flex_regs_len = args->n_flex_regs;
  2879. oa_config->flex_regs =
  2880. alloc_oa_regs(dev_priv,
  2881. dev_priv->perf.oa.ops.is_valid_flex_reg,
  2882. u64_to_user_ptr(args->flex_regs_ptr),
  2883. args->n_flex_regs);
  2884. if (IS_ERR(oa_config->flex_regs)) {
  2885. DRM_DEBUG("Failed to create OA config for flex_regs\n");
  2886. err = PTR_ERR(oa_config->flex_regs);
  2887. goto reg_err;
  2888. }
  2889. }
  2890. err = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
  2891. if (err)
  2892. goto reg_err;
  2893. /* We shouldn't have too many configs, so this iteration shouldn't be
  2894. * too costly.
  2895. */
  2896. idr_for_each_entry(&dev_priv->perf.metrics_idr, tmp, id) {
  2897. if (!strcmp(tmp->uuid, oa_config->uuid)) {
  2898. DRM_DEBUG("OA config already exists with this uuid\n");
  2899. err = -EADDRINUSE;
  2900. goto sysfs_err;
  2901. }
  2902. }
  2903. err = create_dynamic_oa_sysfs_entry(dev_priv, oa_config);
  2904. if (err) {
  2905. DRM_DEBUG("Failed to create sysfs entry for OA config\n");
  2906. goto sysfs_err;
  2907. }
  2908. /* Config id 0 is invalid, id 1 for kernel stored test config. */
  2909. oa_config->id = idr_alloc(&dev_priv->perf.metrics_idr,
  2910. oa_config, 2,
  2911. 0, GFP_KERNEL);
  2912. if (oa_config->id < 0) {
  2913. DRM_DEBUG("Failed to create sysfs entry for OA config\n");
  2914. err = oa_config->id;
  2915. goto sysfs_err;
  2916. }
  2917. mutex_unlock(&dev_priv->perf.metrics_lock);
  2918. DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id);
  2919. return oa_config->id;
  2920. sysfs_err:
  2921. mutex_unlock(&dev_priv->perf.metrics_lock);
  2922. reg_err:
  2923. put_oa_config(dev_priv, oa_config);
  2924. DRM_DEBUG("Failed to add new OA config\n");
  2925. return err;
  2926. }
  2927. /**
  2928. * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
  2929. * @dev: drm device
  2930. * @data: ioctl data (pointer to u64 integer) copied from userspace
  2931. * @file: drm file
  2932. *
  2933. * Configs can be removed while being used, the will stop appearing in sysfs
  2934. * and their content will be freed when the stream using the config is closed.
  2935. *
  2936. * Returns: 0 on success or a negative error code on failure.
  2937. */
  2938. int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
  2939. struct drm_file *file)
  2940. {
  2941. struct drm_i915_private *dev_priv = dev->dev_private;
  2942. u64 *arg = data;
  2943. struct i915_oa_config *oa_config;
  2944. int ret;
  2945. if (!dev_priv->perf.initialized) {
  2946. DRM_DEBUG("i915 perf interface not available for this system\n");
  2947. return -ENOTSUPP;
  2948. }
  2949. if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
  2950. DRM_DEBUG("Insufficient privileges to remove i915 OA config\n");
  2951. return -EACCES;
  2952. }
  2953. ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
  2954. if (ret)
  2955. goto lock_err;
  2956. oa_config = idr_find(&dev_priv->perf.metrics_idr, *arg);
  2957. if (!oa_config) {
  2958. DRM_DEBUG("Failed to remove unknown OA config\n");
  2959. ret = -ENOENT;
  2960. goto config_err;
  2961. }
  2962. GEM_BUG_ON(*arg != oa_config->id);
  2963. sysfs_remove_group(dev_priv->perf.metrics_kobj,
  2964. &oa_config->sysfs_metric);
  2965. idr_remove(&dev_priv->perf.metrics_idr, *arg);
  2966. DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
  2967. put_oa_config(dev_priv, oa_config);
  2968. config_err:
  2969. mutex_unlock(&dev_priv->perf.metrics_lock);
  2970. lock_err:
  2971. return ret;
  2972. }
  2973. static struct ctl_table oa_table[] = {
  2974. {
  2975. .procname = "perf_stream_paranoid",
  2976. .data = &i915_perf_stream_paranoid,
  2977. .maxlen = sizeof(i915_perf_stream_paranoid),
  2978. .mode = 0644,
  2979. .proc_handler = proc_dointvec_minmax,
  2980. .extra1 = &zero,
  2981. .extra2 = &one,
  2982. },
  2983. {
  2984. .procname = "oa_max_sample_rate",
  2985. .data = &i915_oa_max_sample_rate,
  2986. .maxlen = sizeof(i915_oa_max_sample_rate),
  2987. .mode = 0644,
  2988. .proc_handler = proc_dointvec_minmax,
  2989. .extra1 = &zero,
  2990. .extra2 = &oa_sample_rate_hard_limit,
  2991. },
  2992. {}
  2993. };
  2994. static struct ctl_table i915_root[] = {
  2995. {
  2996. .procname = "i915",
  2997. .maxlen = 0,
  2998. .mode = 0555,
  2999. .child = oa_table,
  3000. },
  3001. {}
  3002. };
  3003. static struct ctl_table dev_root[] = {
  3004. {
  3005. .procname = "dev",
  3006. .maxlen = 0,
  3007. .mode = 0555,
  3008. .child = i915_root,
  3009. },
  3010. {}
  3011. };
  3012. /**
  3013. * i915_perf_init - initialize i915-perf state on module load
  3014. * @dev_priv: i915 device instance
  3015. *
  3016. * Initializes i915-perf state without exposing anything to userspace.
  3017. *
  3018. * Note: i915-perf initialization is split into an 'init' and 'register'
  3019. * phase with the i915_perf_register() exposing state to userspace.
  3020. */
  3021. void i915_perf_init(struct drm_i915_private *dev_priv)
  3022. {
  3023. if (IS_HASWELL(dev_priv)) {
  3024. dev_priv->perf.oa.ops.is_valid_b_counter_reg =
  3025. gen7_is_valid_b_counter_addr;
  3026. dev_priv->perf.oa.ops.is_valid_mux_reg =
  3027. hsw_is_valid_mux_addr;
  3028. dev_priv->perf.oa.ops.is_valid_flex_reg = NULL;
  3029. dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer;
  3030. dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
  3031. dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
  3032. dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
  3033. dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable;
  3034. dev_priv->perf.oa.ops.read = gen7_oa_read;
  3035. dev_priv->perf.oa.ops.oa_hw_tail_read =
  3036. gen7_oa_hw_tail_read;
  3037. dev_priv->perf.oa.oa_formats = hsw_oa_formats;
  3038. } else if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
  3039. /* Note: that although we could theoretically also support the
  3040. * legacy ringbuffer mode on BDW (and earlier iterations of
  3041. * this driver, before upstreaming did this) it didn't seem
  3042. * worth the complexity to maintain now that BDW+ enable
  3043. * execlist mode by default.
  3044. */
  3045. dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
  3046. dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer;
  3047. dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
  3048. dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
  3049. dev_priv->perf.oa.ops.read = gen8_oa_read;
  3050. dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
  3051. if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv)) {
  3052. dev_priv->perf.oa.ops.is_valid_b_counter_reg =
  3053. gen7_is_valid_b_counter_addr;
  3054. dev_priv->perf.oa.ops.is_valid_mux_reg =
  3055. gen8_is_valid_mux_addr;
  3056. dev_priv->perf.oa.ops.is_valid_flex_reg =
  3057. gen8_is_valid_flex_addr;
  3058. if (IS_CHERRYVIEW(dev_priv)) {
  3059. dev_priv->perf.oa.ops.is_valid_mux_reg =
  3060. chv_is_valid_mux_addr;
  3061. }
  3062. dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
  3063. dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
  3064. if (IS_GEN8(dev_priv)) {
  3065. dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
  3066. dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
  3067. dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25);
  3068. } else {
  3069. dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
  3070. dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
  3071. dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
  3072. }
  3073. } else if (IS_GEN(dev_priv, 10, 11)) {
  3074. dev_priv->perf.oa.ops.is_valid_b_counter_reg =
  3075. gen7_is_valid_b_counter_addr;
  3076. dev_priv->perf.oa.ops.is_valid_mux_reg =
  3077. gen10_is_valid_mux_addr;
  3078. dev_priv->perf.oa.ops.is_valid_flex_reg =
  3079. gen8_is_valid_flex_addr;
  3080. dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
  3081. dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set;
  3082. dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
  3083. dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
  3084. dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
  3085. }
  3086. }
  3087. if (dev_priv->perf.oa.ops.enable_metric_set) {
  3088. hrtimer_init(&dev_priv->perf.oa.poll_check_timer,
  3089. CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  3090. dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb;
  3091. init_waitqueue_head(&dev_priv->perf.oa.poll_wq);
  3092. INIT_LIST_HEAD(&dev_priv->perf.streams);
  3093. mutex_init(&dev_priv->perf.lock);
  3094. spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
  3095. oa_sample_rate_hard_limit = 1000 *
  3096. (INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
  3097. dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
  3098. mutex_init(&dev_priv->perf.metrics_lock);
  3099. idr_init(&dev_priv->perf.metrics_idr);
  3100. dev_priv->perf.initialized = true;
  3101. }
  3102. }
  3103. static int destroy_config(int id, void *p, void *data)
  3104. {
  3105. struct drm_i915_private *dev_priv = data;
  3106. struct i915_oa_config *oa_config = p;
  3107. put_oa_config(dev_priv, oa_config);
  3108. return 0;
  3109. }
  3110. /**
  3111. * i915_perf_fini - Counter part to i915_perf_init()
  3112. * @dev_priv: i915 device instance
  3113. */
  3114. void i915_perf_fini(struct drm_i915_private *dev_priv)
  3115. {
  3116. if (!dev_priv->perf.initialized)
  3117. return;
  3118. idr_for_each(&dev_priv->perf.metrics_idr, destroy_config, dev_priv);
  3119. idr_destroy(&dev_priv->perf.metrics_idr);
  3120. unregister_sysctl_table(dev_priv->perf.sysctl_header);
  3121. memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops));
  3122. dev_priv->perf.initialized = false;
  3123. }