i915_perf.c 111 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519
  1. /*
  2. * Copyright © 2015-2016 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Robert Bragg <robert@sixbynine.org>
  25. */
  26. /**
  27. * DOC: i915 Perf Overview
  28. *
  29. * Gen graphics supports a large number of performance counters that can help
  30. * driver and application developers understand and optimize their use of the
  31. * GPU.
  32. *
  33. * This i915 perf interface enables userspace to configure and open a file
  34. * descriptor representing a stream of GPU metrics which can then be read() as
  35. * a stream of sample records.
  36. *
  37. * The interface is particularly suited to exposing buffered metrics that are
  38. * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
  39. *
  40. * Streams representing a single context are accessible to applications with a
  41. * corresponding drm file descriptor, such that OpenGL can use the interface
  42. * without special privileges. Access to system-wide metrics requires root
  43. * privileges by default, unless changed via the dev.i915.perf_event_paranoid
  44. * sysctl option.
  45. *
  46. */
  47. /**
  48. * DOC: i915 Perf History and Comparison with Core Perf
  49. *
  50. * The interface was initially inspired by the core Perf infrastructure but
  51. * some notable differences are:
  52. *
  53. * i915 perf file descriptors represent a "stream" instead of an "event"; where
  54. * a perf event primarily corresponds to a single 64bit value, while a stream
  55. * might sample sets of tightly-coupled counters, depending on the
  56. * configuration. For example the Gen OA unit isn't designed to support
  57. * orthogonal configurations of individual counters; it's configured for a set
  58. * of related counters. Samples for an i915 perf stream capturing OA metrics
  59. * will include a set of counter values packed in a compact HW specific format.
  60. * The OA unit supports a number of different packing formats which can be
  61. * selected by the user opening the stream. Perf has support for grouping
  62. * events, but each event in the group is configured, validated and
  63. * authenticated individually with separate system calls.
  64. *
  65. * i915 perf stream configurations are provided as an array of u64 (key,value)
  66. * pairs, instead of a fixed struct with multiple miscellaneous config members,
  67. * interleaved with event-type specific members.
  68. *
  69. * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
  70. * The supported metrics are being written to memory by the GPU unsynchronized
  71. * with the CPU, using HW specific packing formats for counter sets. Sometimes
  72. * the constraints on HW configuration require reports to be filtered before it
  73. * would be acceptable to expose them to unprivileged applications - to hide
  74. * the metrics of other processes/contexts. For these use cases a read() based
  75. * interface is a good fit, and provides an opportunity to filter data as it
  76. * gets copied from the GPU mapped buffers to userspace buffers.
  77. *
  78. *
  79. * Issues hit with first prototype based on Core Perf
  80. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  81. *
  82. * The first prototype of this driver was based on the core perf
  83. * infrastructure, and while we did make that mostly work, with some changes to
  84. * perf, we found we were breaking or working around too many assumptions baked
  85. * into perf's currently cpu centric design.
  86. *
  87. * In the end we didn't see a clear benefit to making perf's implementation and
  88. * interface more complex by changing design assumptions while we knew we still
  89. * wouldn't be able to use any existing perf based userspace tools.
  90. *
  91. * Also considering the Gen specific nature of the Observability hardware and
  92. * how userspace will sometimes need to combine i915 perf OA metrics with
  93. * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
  94. * expecting the interface to be used by a platform specific userspace such as
  95. * OpenGL or tools. This is to say; we aren't inherently missing out on having
  96. * a standard vendor/architecture agnostic interface by not using perf.
  97. *
  98. *
  99. * For posterity, in case we might re-visit trying to adapt core perf to be
  100. * better suited to exposing i915 metrics these were the main pain points we
  101. * hit:
  102. *
  103. * - The perf based OA PMU driver broke some significant design assumptions:
  104. *
  105. * Existing perf pmus are used for profiling work on a cpu and we were
  106. * introducing the idea of _IS_DEVICE pmus with different security
  107. * implications, the need to fake cpu-related data (such as user/kernel
  108. * registers) to fit with perf's current design, and adding _DEVICE records
  109. * as a way to forward device-specific status records.
  110. *
  111. * The OA unit writes reports of counters into a circular buffer, without
  112. * involvement from the CPU, making our PMU driver the first of a kind.
  113. *
  114. * Given the way we were periodically forward data from the GPU-mapped, OA
  115. * buffer to perf's buffer, those bursts of sample writes looked to perf like
  116. * we were sampling too fast and so we had to subvert its throttling checks.
  117. *
  118. * Perf supports groups of counters and allows those to be read via
  119. * transactions internally but transactions currently seem designed to be
  120. * explicitly initiated from the cpu (say in response to a userspace read())
  121. * and while we could pull a report out of the OA buffer we can't
  122. * trigger a report from the cpu on demand.
  123. *
  124. * Related to being report based; the OA counters are configured in HW as a
  125. * set while perf generally expects counter configurations to be orthogonal.
  126. * Although counters can be associated with a group leader as they are
  127. * opened, there's no clear precedent for being able to provide group-wide
  128. * configuration attributes (for example we want to let userspace choose the
  129. * OA unit report format used to capture all counters in a set, or specify a
  130. * GPU context to filter metrics on). We avoided using perf's grouping
  131. * feature and forwarded OA reports to userspace via perf's 'raw' sample
  132. * field. This suited our userspace well considering how coupled the counters
  133. * are when dealing with normalizing. It would be inconvenient to split
  134. * counters up into separate events, only to require userspace to recombine
  135. * them. For Mesa it's also convenient to be forwarded raw, periodic reports
  136. * for combining with the side-band raw reports it captures using
  137. * MI_REPORT_PERF_COUNT commands.
  138. *
  139. * - As a side note on perf's grouping feature; there was also some concern
  140. * that using PERF_FORMAT_GROUP as a way to pack together counter values
  141. * would quite drastically inflate our sample sizes, which would likely
  142. * lower the effective sampling resolutions we could use when the available
  143. * memory bandwidth is limited.
  144. *
  145. * With the OA unit's report formats, counters are packed together as 32
  146. * or 40bit values, with the largest report size being 256 bytes.
  147. *
  148. * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
  149. * documented ordering to the values, implying PERF_FORMAT_ID must also be
  150. * used to add a 64bit ID before each value; giving 16 bytes per counter.
  151. *
  152. * Related to counter orthogonality; we can't time share the OA unit, while
  153. * event scheduling is a central design idea within perf for allowing
  154. * userspace to open + enable more events than can be configured in HW at any
  155. * one time. The OA unit is not designed to allow re-configuration while in
  156. * use. We can't reconfigure the OA unit without losing internal OA unit
  157. * state which we can't access explicitly to save and restore. Reconfiguring
  158. * the OA unit is also relatively slow, involving ~100 register writes. From
  159. * userspace Mesa also depends on a stable OA configuration when emitting
  160. * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
  161. * disabled while there are outstanding MI_RPC commands lest we hang the
  162. * command streamer.
  163. *
  164. * The contents of sample records aren't extensible by device drivers (i.e.
  165. * the sample_type bits). As an example; Sourab Gupta had been looking to
  166. * attach GPU timestamps to our OA samples. We were shoehorning OA reports
  167. * into sample records by using the 'raw' field, but it's tricky to pack more
  168. * than one thing into this field because events/core.c currently only lets a
  169. * pmu give a single raw data pointer plus len which will be copied into the
  170. * ring buffer. To include more than the OA report we'd have to copy the
  171. * report into an intermediate larger buffer. I'd been considering allowing a
  172. * vector of data+len values to be specified for copying the raw data, but
  173. * it felt like a kludge to being using the raw field for this purpose.
  174. *
  175. * - It felt like our perf based PMU was making some technical compromises
  176. * just for the sake of using perf:
  177. *
  178. * perf_event_open() requires events to either relate to a pid or a specific
  179. * cpu core, while our device pmu related to neither. Events opened with a
  180. * pid will be automatically enabled/disabled according to the scheduling of
  181. * that process - so not appropriate for us. When an event is related to a
  182. * cpu id, perf ensures pmu methods will be invoked via an inter process
  183. * interrupt on that core. To avoid invasive changes our userspace opened OA
  184. * perf events for a specific cpu. This was workable but it meant the
  185. * majority of the OA driver ran in atomic context, including all OA report
  186. * forwarding, which wasn't really necessary in our case and seems to make
  187. * our locking requirements somewhat complex as we handled the interaction
  188. * with the rest of the i915 driver.
  189. */
  190. #include <linux/anon_inodes.h>
  191. #include <linux/sizes.h>
  192. #include <linux/uuid.h>
  193. #include "i915_drv.h"
  194. #include "i915_oa_hsw.h"
  195. #include "i915_oa_bdw.h"
  196. #include "i915_oa_chv.h"
  197. #include "i915_oa_sklgt2.h"
  198. #include "i915_oa_sklgt3.h"
  199. #include "i915_oa_sklgt4.h"
  200. #include "i915_oa_bxt.h"
  201. #include "i915_oa_kblgt2.h"
  202. #include "i915_oa_kblgt3.h"
  203. #include "i915_oa_glk.h"
  204. /* HW requires this to be a power of two, between 128k and 16M, though driver
  205. * is currently generally designed assuming the largest 16M size is used such
  206. * that the overflow cases are unlikely in normal operation.
  207. */
  208. #define OA_BUFFER_SIZE SZ_16M
  209. #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1))
  210. /**
  211. * DOC: OA Tail Pointer Race
  212. *
  213. * There's a HW race condition between OA unit tail pointer register updates and
  214. * writes to memory whereby the tail pointer can sometimes get ahead of what's
  215. * been written out to the OA buffer so far (in terms of what's visible to the
  216. * CPU).
  217. *
  218. * Although this can be observed explicitly while copying reports to userspace
  219. * by checking for a zeroed report-id field in tail reports, we want to account
  220. * for this earlier, as part of the oa_buffer_check to avoid lots of redundant
  221. * read() attempts.
  222. *
  223. * In effect we define a tail pointer for reading that lags the real tail
  224. * pointer by at least %OA_TAIL_MARGIN_NSEC nanoseconds, which gives enough
  225. * time for the corresponding reports to become visible to the CPU.
  226. *
  227. * To manage this we actually track two tail pointers:
  228. * 1) An 'aging' tail with an associated timestamp that is tracked until we
  229. * can trust the corresponding data is visible to the CPU; at which point
  230. * it is considered 'aged'.
  231. * 2) An 'aged' tail that can be used for read()ing.
  232. *
  233. * The two separate pointers let us decouple read()s from tail pointer aging.
  234. *
  235. * The tail pointers are checked and updated at a limited rate within a hrtimer
  236. * callback (the same callback that is used for delivering POLLIN events)
  237. *
  238. * Initially the tails are marked invalid with %INVALID_TAIL_PTR which
  239. * indicates that an updated tail pointer is needed.
  240. *
  241. * Most of the implementation details for this workaround are in
  242. * oa_buffer_check_unlocked() and _append_oa_reports()
  243. *
  244. * Note for posterity: previously the driver used to define an effective tail
  245. * pointer that lagged the real pointer by a 'tail margin' measured in bytes
  246. * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
  247. * This was flawed considering that the OA unit may also automatically generate
  248. * non-periodic reports (such as on context switch) or the OA unit may be
  249. * enabled without any periodic sampling.
  250. */
  251. #define OA_TAIL_MARGIN_NSEC 100000ULL
  252. #define INVALID_TAIL_PTR 0xffffffff
  253. /* frequency for checking whether the OA unit has written new reports to the
  254. * circular OA buffer...
  255. */
  256. #define POLL_FREQUENCY 200
  257. #define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY)
  258. /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
  259. static int zero;
  260. static int one = 1;
  261. static u32 i915_perf_stream_paranoid = true;
  262. /* The maximum exponent the hardware accepts is 63 (essentially it selects one
  263. * of the 64bit timestamp bits to trigger reports from) but there's currently
  264. * no known use case for sampling as infrequently as once per 47 thousand years.
  265. *
  266. * Since the timestamps included in OA reports are only 32bits it seems
  267. * reasonable to limit the OA exponent where it's still possible to account for
  268. * overflow in OA report timestamps.
  269. */
  270. #define OA_EXPONENT_MAX 31
  271. #define INVALID_CTX_ID 0xffffffff
  272. /* On Gen8+ automatically triggered OA reports include a 'reason' field... */
  273. #define OAREPORT_REASON_MASK 0x3f
  274. #define OAREPORT_REASON_SHIFT 19
  275. #define OAREPORT_REASON_TIMER (1<<0)
  276. #define OAREPORT_REASON_CTX_SWITCH (1<<3)
  277. #define OAREPORT_REASON_CLK_RATIO (1<<5)
  278. /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
  279. *
  280. * The highest sampling frequency we can theoretically program the OA unit
  281. * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
  282. *
  283. * Initialized just before we register the sysctl parameter.
  284. */
  285. static int oa_sample_rate_hard_limit;
  286. /* Theoretically we can program the OA unit to sample every 160ns but don't
  287. * allow that by default unless root...
  288. *
  289. * The default threshold of 100000Hz is based on perf's similar
  290. * kernel.perf_event_max_sample_rate sysctl parameter.
  291. */
  292. static u32 i915_oa_max_sample_rate = 100000;
  293. /* XXX: beware if future OA HW adds new report formats that the current
  294. * code assumes all reports have a power-of-two size and ~(size - 1) can
  295. * be used as a mask to align the OA tail pointer.
  296. */
  297. static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
  298. [I915_OA_FORMAT_A13] = { 0, 64 },
  299. [I915_OA_FORMAT_A29] = { 1, 128 },
  300. [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 },
  301. /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
  302. [I915_OA_FORMAT_B4_C8] = { 4, 64 },
  303. [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 },
  304. [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 },
  305. [I915_OA_FORMAT_C4_B8] = { 7, 64 },
  306. };
  307. static struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
  308. [I915_OA_FORMAT_A12] = { 0, 64 },
  309. [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 },
  310. [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
  311. [I915_OA_FORMAT_C4_B8] = { 7, 64 },
  312. };
  313. #define SAMPLE_OA_REPORT (1<<0)
  314. /**
  315. * struct perf_open_properties - for validated properties given to open a stream
  316. * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
  317. * @single_context: Whether a single or all gpu contexts should be monitored
  318. * @ctx_handle: A gem ctx handle for use with @single_context
  319. * @metrics_set: An ID for an OA unit metric set advertised via sysfs
  320. * @oa_format: An OA unit HW report format
  321. * @oa_periodic: Whether to enable periodic OA unit sampling
  322. * @oa_period_exponent: The OA unit sampling period is derived from this
  323. *
  324. * As read_properties_unlocked() enumerates and validates the properties given
  325. * to open a stream of metrics the configuration is built up in the structure
  326. * which starts out zero initialized.
  327. */
  328. struct perf_open_properties {
  329. u32 sample_flags;
  330. u64 single_context:1;
  331. u64 ctx_handle;
  332. /* OA sampling state */
  333. int metrics_set;
  334. int oa_format;
  335. bool oa_periodic;
  336. int oa_period_exponent;
  337. };
  338. static void free_oa_config(struct drm_i915_private *dev_priv,
  339. struct i915_oa_config *oa_config)
  340. {
  341. if (!PTR_ERR(oa_config->flex_regs))
  342. kfree(oa_config->flex_regs);
  343. if (!PTR_ERR(oa_config->b_counter_regs))
  344. kfree(oa_config->b_counter_regs);
  345. if (!PTR_ERR(oa_config->mux_regs))
  346. kfree(oa_config->mux_regs);
  347. kfree(oa_config);
  348. }
  349. static void put_oa_config(struct drm_i915_private *dev_priv,
  350. struct i915_oa_config *oa_config)
  351. {
  352. if (!atomic_dec_and_test(&oa_config->ref_count))
  353. return;
  354. free_oa_config(dev_priv, oa_config);
  355. }
  356. static int get_oa_config(struct drm_i915_private *dev_priv,
  357. int metrics_set,
  358. struct i915_oa_config **out_config)
  359. {
  360. int ret;
  361. if (metrics_set == 1) {
  362. *out_config = &dev_priv->perf.oa.test_config;
  363. atomic_inc(&dev_priv->perf.oa.test_config.ref_count);
  364. return 0;
  365. }
  366. ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
  367. if (ret)
  368. return ret;
  369. *out_config = idr_find(&dev_priv->perf.metrics_idr, metrics_set);
  370. if (!*out_config)
  371. ret = -EINVAL;
  372. else
  373. atomic_inc(&(*out_config)->ref_count);
  374. mutex_unlock(&dev_priv->perf.metrics_lock);
  375. return ret;
  376. }
  377. static u32 gen8_oa_hw_tail_read(struct drm_i915_private *dev_priv)
  378. {
  379. return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
  380. }
  381. static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv)
  382. {
  383. u32 oastatus1 = I915_READ(GEN7_OASTATUS1);
  384. return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
  385. }
  386. /**
  387. * oa_buffer_check_unlocked - check for data and update tail ptr state
  388. * @dev_priv: i915 device instance
  389. *
  390. * This is either called via fops (for blocking reads in user ctx) or the poll
  391. * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
  392. * if there is data available for userspace to read.
  393. *
  394. * This function is central to providing a workaround for the OA unit tail
  395. * pointer having a race with respect to what data is visible to the CPU.
  396. * It is responsible for reading tail pointers from the hardware and giving
  397. * the pointers time to 'age' before they are made available for reading.
  398. * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
  399. *
  400. * Besides returning true when there is data available to read() this function
  401. * also has the side effect of updating the oa_buffer.tails[], .aging_timestamp
  402. * and .aged_tail_idx state used for reading.
  403. *
  404. * Note: It's safe to read OA config state here unlocked, assuming that this is
  405. * only called while the stream is enabled, while the global OA configuration
  406. * can't be modified.
  407. *
  408. * Returns: %true if the OA buffer contains data, else %false
  409. */
  410. static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
  411. {
  412. int report_size = dev_priv->perf.oa.oa_buffer.format_size;
  413. unsigned long flags;
  414. unsigned int aged_idx;
  415. u32 head, hw_tail, aged_tail, aging_tail;
  416. u64 now;
  417. /* We have to consider the (unlikely) possibility that read() errors
  418. * could result in an OA buffer reset which might reset the head,
  419. * tails[] and aged_tail state.
  420. */
  421. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  422. /* NB: The head we observe here might effectively be a little out of
  423. * date (between head and tails[aged_idx].offset if there is currently
  424. * a read() in progress.
  425. */
  426. head = dev_priv->perf.oa.oa_buffer.head;
  427. aged_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
  428. aged_tail = dev_priv->perf.oa.oa_buffer.tails[aged_idx].offset;
  429. aging_tail = dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset;
  430. hw_tail = dev_priv->perf.oa.ops.oa_hw_tail_read(dev_priv);
  431. /* The tail pointer increases in 64 byte increments,
  432. * not in report_size steps...
  433. */
  434. hw_tail &= ~(report_size - 1);
  435. now = ktime_get_mono_fast_ns();
  436. /* Update the aged tail
  437. *
  438. * Flip the tail pointer available for read()s once the aging tail is
  439. * old enough to trust that the corresponding data will be visible to
  440. * the CPU...
  441. *
  442. * Do this before updating the aging pointer in case we may be able to
  443. * immediately start aging a new pointer too (if new data has become
  444. * available) without needing to wait for a later hrtimer callback.
  445. */
  446. if (aging_tail != INVALID_TAIL_PTR &&
  447. ((now - dev_priv->perf.oa.oa_buffer.aging_timestamp) >
  448. OA_TAIL_MARGIN_NSEC)) {
  449. aged_idx ^= 1;
  450. dev_priv->perf.oa.oa_buffer.aged_tail_idx = aged_idx;
  451. aged_tail = aging_tail;
  452. /* Mark that we need a new pointer to start aging... */
  453. dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR;
  454. aging_tail = INVALID_TAIL_PTR;
  455. }
  456. /* Update the aging tail
  457. *
  458. * We throttle aging tail updates until we have a new tail that
  459. * represents >= one report more data than is already available for
  460. * reading. This ensures there will be enough data for a successful
  461. * read once this new pointer has aged and ensures we will give the new
  462. * pointer time to age.
  463. */
  464. if (aging_tail == INVALID_TAIL_PTR &&
  465. (aged_tail == INVALID_TAIL_PTR ||
  466. OA_TAKEN(hw_tail, aged_tail) >= report_size)) {
  467. struct i915_vma *vma = dev_priv->perf.oa.oa_buffer.vma;
  468. u32 gtt_offset = i915_ggtt_offset(vma);
  469. /* Be paranoid and do a bounds check on the pointer read back
  470. * from hardware, just in case some spurious hardware condition
  471. * could put the tail out of bounds...
  472. */
  473. if (hw_tail >= gtt_offset &&
  474. hw_tail < (gtt_offset + OA_BUFFER_SIZE)) {
  475. dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset =
  476. aging_tail = hw_tail;
  477. dev_priv->perf.oa.oa_buffer.aging_timestamp = now;
  478. } else {
  479. DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %u\n",
  480. hw_tail);
  481. }
  482. }
  483. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  484. return aged_tail == INVALID_TAIL_PTR ?
  485. false : OA_TAKEN(aged_tail, head) >= report_size;
  486. }
  487. /**
  488. * append_oa_status - Appends a status record to a userspace read() buffer.
  489. * @stream: An i915-perf stream opened for OA metrics
  490. * @buf: destination buffer given by userspace
  491. * @count: the number of bytes userspace wants to read
  492. * @offset: (inout): the current position for writing into @buf
  493. * @type: The kind of status to report to userspace
  494. *
  495. * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
  496. * into the userspace read() buffer.
  497. *
  498. * The @buf @offset will only be updated on success.
  499. *
  500. * Returns: 0 on success, negative error code on failure.
  501. */
  502. static int append_oa_status(struct i915_perf_stream *stream,
  503. char __user *buf,
  504. size_t count,
  505. size_t *offset,
  506. enum drm_i915_perf_record_type type)
  507. {
  508. struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
  509. if ((count - *offset) < header.size)
  510. return -ENOSPC;
  511. if (copy_to_user(buf + *offset, &header, sizeof(header)))
  512. return -EFAULT;
  513. (*offset) += header.size;
  514. return 0;
  515. }
  516. /**
  517. * append_oa_sample - Copies single OA report into userspace read() buffer.
  518. * @stream: An i915-perf stream opened for OA metrics
  519. * @buf: destination buffer given by userspace
  520. * @count: the number of bytes userspace wants to read
  521. * @offset: (inout): the current position for writing into @buf
  522. * @report: A single OA report to (optionally) include as part of the sample
  523. *
  524. * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
  525. * properties when opening a stream, tracked as `stream->sample_flags`. This
  526. * function copies the requested components of a single sample to the given
  527. * read() @buf.
  528. *
  529. * The @buf @offset will only be updated on success.
  530. *
  531. * Returns: 0 on success, negative error code on failure.
  532. */
  533. static int append_oa_sample(struct i915_perf_stream *stream,
  534. char __user *buf,
  535. size_t count,
  536. size_t *offset,
  537. const u8 *report)
  538. {
  539. struct drm_i915_private *dev_priv = stream->dev_priv;
  540. int report_size = dev_priv->perf.oa.oa_buffer.format_size;
  541. struct drm_i915_perf_record_header header;
  542. u32 sample_flags = stream->sample_flags;
  543. header.type = DRM_I915_PERF_RECORD_SAMPLE;
  544. header.pad = 0;
  545. header.size = stream->sample_size;
  546. if ((count - *offset) < header.size)
  547. return -ENOSPC;
  548. buf += *offset;
  549. if (copy_to_user(buf, &header, sizeof(header)))
  550. return -EFAULT;
  551. buf += sizeof(header);
  552. if (sample_flags & SAMPLE_OA_REPORT) {
  553. if (copy_to_user(buf, report, report_size))
  554. return -EFAULT;
  555. }
  556. (*offset) += header.size;
  557. return 0;
  558. }
  559. /**
  560. * Copies all buffered OA reports into userspace read() buffer.
  561. * @stream: An i915-perf stream opened for OA metrics
  562. * @buf: destination buffer given by userspace
  563. * @count: the number of bytes userspace wants to read
  564. * @offset: (inout): the current position for writing into @buf
  565. *
  566. * Notably any error condition resulting in a short read (-%ENOSPC or
  567. * -%EFAULT) will be returned even though one or more records may
  568. * have been successfully copied. In this case it's up to the caller
  569. * to decide if the error should be squashed before returning to
  570. * userspace.
  571. *
  572. * Note: reports are consumed from the head, and appended to the
  573. * tail, so the tail chases the head?... If you think that's mad
  574. * and back-to-front you're not alone, but this follows the
  575. * Gen PRM naming convention.
  576. *
  577. * Returns: 0 on success, negative error code on failure.
  578. */
  579. static int gen8_append_oa_reports(struct i915_perf_stream *stream,
  580. char __user *buf,
  581. size_t count,
  582. size_t *offset)
  583. {
  584. struct drm_i915_private *dev_priv = stream->dev_priv;
  585. int report_size = dev_priv->perf.oa.oa_buffer.format_size;
  586. u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
  587. u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
  588. u32 mask = (OA_BUFFER_SIZE - 1);
  589. size_t start_offset = *offset;
  590. unsigned long flags;
  591. unsigned int aged_tail_idx;
  592. u32 head, tail;
  593. u32 taken;
  594. int ret = 0;
  595. if (WARN_ON(!stream->enabled))
  596. return -EIO;
  597. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  598. head = dev_priv->perf.oa.oa_buffer.head;
  599. aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
  600. tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
  601. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  602. /*
  603. * An invalid tail pointer here means we're still waiting for the poll
  604. * hrtimer callback to give us a pointer
  605. */
  606. if (tail == INVALID_TAIL_PTR)
  607. return -EAGAIN;
  608. /*
  609. * NB: oa_buffer.head/tail include the gtt_offset which we don't want
  610. * while indexing relative to oa_buf_base.
  611. */
  612. head -= gtt_offset;
  613. tail -= gtt_offset;
  614. /*
  615. * An out of bounds or misaligned head or tail pointer implies a driver
  616. * bug since we validate + align the tail pointers we read from the
  617. * hardware and we are in full control of the head pointer which should
  618. * only be incremented by multiples of the report size (notably also
  619. * all a power of two).
  620. */
  621. if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
  622. tail > OA_BUFFER_SIZE || tail % report_size,
  623. "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
  624. head, tail))
  625. return -EIO;
  626. for (/* none */;
  627. (taken = OA_TAKEN(tail, head));
  628. head = (head + report_size) & mask) {
  629. u8 *report = oa_buf_base + head;
  630. u32 *report32 = (void *)report;
  631. u32 ctx_id;
  632. u32 reason;
  633. /*
  634. * All the report sizes factor neatly into the buffer
  635. * size so we never expect to see a report split
  636. * between the beginning and end of the buffer.
  637. *
  638. * Given the initial alignment check a misalignment
  639. * here would imply a driver bug that would result
  640. * in an overrun.
  641. */
  642. if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
  643. DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
  644. break;
  645. }
  646. /*
  647. * The reason field includes flags identifying what
  648. * triggered this specific report (mostly timer
  649. * triggered or e.g. due to a context switch).
  650. *
  651. * This field is never expected to be zero so we can
  652. * check that the report isn't invalid before copying
  653. * it to userspace...
  654. */
  655. reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
  656. OAREPORT_REASON_MASK);
  657. if (reason == 0) {
  658. if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
  659. DRM_NOTE("Skipping spurious, invalid OA report\n");
  660. continue;
  661. }
  662. /*
  663. * XXX: Just keep the lower 21 bits for now since I'm not
  664. * entirely sure if the HW touches any of the higher bits in
  665. * this field
  666. */
  667. ctx_id = report32[2] & 0x1fffff;
  668. /*
  669. * Squash whatever is in the CTX_ID field if it's marked as
  670. * invalid to be sure we avoid false-positive, single-context
  671. * filtering below...
  672. *
  673. * Note: that we don't clear the valid_ctx_bit so userspace can
  674. * understand that the ID has been squashed by the kernel.
  675. */
  676. if (!(report32[0] & dev_priv->perf.oa.gen8_valid_ctx_bit))
  677. ctx_id = report32[2] = INVALID_CTX_ID;
  678. /*
  679. * NB: For Gen 8 the OA unit no longer supports clock gating
  680. * off for a specific context and the kernel can't securely
  681. * stop the counters from updating as system-wide / global
  682. * values.
  683. *
  684. * Automatic reports now include a context ID so reports can be
  685. * filtered on the cpu but it's not worth trying to
  686. * automatically subtract/hide counter progress for other
  687. * contexts while filtering since we can't stop userspace
  688. * issuing MI_REPORT_PERF_COUNT commands which would still
  689. * provide a side-band view of the real values.
  690. *
  691. * To allow userspace (such as Mesa/GL_INTEL_performance_query)
  692. * to normalize counters for a single filtered context then it
  693. * needs be forwarded bookend context-switch reports so that it
  694. * can track switches in between MI_REPORT_PERF_COUNT commands
  695. * and can itself subtract/ignore the progress of counters
  696. * associated with other contexts. Note that the hardware
  697. * automatically triggers reports when switching to a new
  698. * context which are tagged with the ID of the newly active
  699. * context. To avoid the complexity (and likely fragility) of
  700. * reading ahead while parsing reports to try and minimize
  701. * forwarding redundant context switch reports (i.e. between
  702. * other, unrelated contexts) we simply elect to forward them
  703. * all.
  704. *
  705. * We don't rely solely on the reason field to identify context
  706. * switches since it's not-uncommon for periodic samples to
  707. * identify a switch before any 'context switch' report.
  708. */
  709. if (!dev_priv->perf.oa.exclusive_stream->ctx ||
  710. dev_priv->perf.oa.specific_ctx_id == ctx_id ||
  711. (dev_priv->perf.oa.oa_buffer.last_ctx_id ==
  712. dev_priv->perf.oa.specific_ctx_id) ||
  713. reason & OAREPORT_REASON_CTX_SWITCH) {
  714. /*
  715. * While filtering for a single context we avoid
  716. * leaking the IDs of other contexts.
  717. */
  718. if (dev_priv->perf.oa.exclusive_stream->ctx &&
  719. dev_priv->perf.oa.specific_ctx_id != ctx_id) {
  720. report32[2] = INVALID_CTX_ID;
  721. }
  722. ret = append_oa_sample(stream, buf, count, offset,
  723. report);
  724. if (ret)
  725. break;
  726. dev_priv->perf.oa.oa_buffer.last_ctx_id = ctx_id;
  727. }
  728. /*
  729. * The above reason field sanity check is based on
  730. * the assumption that the OA buffer is initially
  731. * zeroed and we reset the field after copying so the
  732. * check is still meaningful once old reports start
  733. * being overwritten.
  734. */
  735. report32[0] = 0;
  736. }
  737. if (start_offset != *offset) {
  738. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  739. /*
  740. * We removed the gtt_offset for the copy loop above, indexing
  741. * relative to oa_buf_base so put back here...
  742. */
  743. head += gtt_offset;
  744. I915_WRITE(GEN8_OAHEADPTR, head & GEN8_OAHEADPTR_MASK);
  745. dev_priv->perf.oa.oa_buffer.head = head;
  746. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  747. }
  748. return ret;
  749. }
  750. /**
  751. * gen8_oa_read - copy status records then buffered OA reports
  752. * @stream: An i915-perf stream opened for OA metrics
  753. * @buf: destination buffer given by userspace
  754. * @count: the number of bytes userspace wants to read
  755. * @offset: (inout): the current position for writing into @buf
  756. *
  757. * Checks OA unit status registers and if necessary appends corresponding
  758. * status records for userspace (such as for a buffer full condition) and then
  759. * initiate appending any buffered OA reports.
  760. *
  761. * Updates @offset according to the number of bytes successfully copied into
  762. * the userspace buffer.
  763. *
  764. * NB: some data may be successfully copied to the userspace buffer
  765. * even if an error is returned, and this is reflected in the
  766. * updated @offset.
  767. *
  768. * Returns: zero on success or a negative error code
  769. */
  770. static int gen8_oa_read(struct i915_perf_stream *stream,
  771. char __user *buf,
  772. size_t count,
  773. size_t *offset)
  774. {
  775. struct drm_i915_private *dev_priv = stream->dev_priv;
  776. u32 oastatus;
  777. int ret;
  778. if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
  779. return -EIO;
  780. oastatus = I915_READ(GEN8_OASTATUS);
  781. /*
  782. * We treat OABUFFER_OVERFLOW as a significant error:
  783. *
  784. * Although theoretically we could handle this more gracefully
  785. * sometimes, some Gens don't correctly suppress certain
  786. * automatically triggered reports in this condition and so we
  787. * have to assume that old reports are now being trampled
  788. * over.
  789. *
  790. * Considering how we don't currently give userspace control
  791. * over the OA buffer size and always configure a large 16MB
  792. * buffer, then a buffer overflow does anyway likely indicate
  793. * that something has gone quite badly wrong.
  794. */
  795. if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
  796. ret = append_oa_status(stream, buf, count, offset,
  797. DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
  798. if (ret)
  799. return ret;
  800. DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
  801. dev_priv->perf.oa.period_exponent);
  802. dev_priv->perf.oa.ops.oa_disable(dev_priv);
  803. dev_priv->perf.oa.ops.oa_enable(dev_priv);
  804. /*
  805. * Note: .oa_enable() is expected to re-init the oabuffer and
  806. * reset GEN8_OASTATUS for us
  807. */
  808. oastatus = I915_READ(GEN8_OASTATUS);
  809. }
  810. if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
  811. ret = append_oa_status(stream, buf, count, offset,
  812. DRM_I915_PERF_RECORD_OA_REPORT_LOST);
  813. if (ret)
  814. return ret;
  815. I915_WRITE(GEN8_OASTATUS,
  816. oastatus & ~GEN8_OASTATUS_REPORT_LOST);
  817. }
  818. return gen8_append_oa_reports(stream, buf, count, offset);
  819. }
  820. /**
  821. * Copies all buffered OA reports into userspace read() buffer.
  822. * @stream: An i915-perf stream opened for OA metrics
  823. * @buf: destination buffer given by userspace
  824. * @count: the number of bytes userspace wants to read
  825. * @offset: (inout): the current position for writing into @buf
  826. *
  827. * Notably any error condition resulting in a short read (-%ENOSPC or
  828. * -%EFAULT) will be returned even though one or more records may
  829. * have been successfully copied. In this case it's up to the caller
  830. * to decide if the error should be squashed before returning to
  831. * userspace.
  832. *
  833. * Note: reports are consumed from the head, and appended to the
  834. * tail, so the tail chases the head?... If you think that's mad
  835. * and back-to-front you're not alone, but this follows the
  836. * Gen PRM naming convention.
  837. *
  838. * Returns: 0 on success, negative error code on failure.
  839. */
  840. static int gen7_append_oa_reports(struct i915_perf_stream *stream,
  841. char __user *buf,
  842. size_t count,
  843. size_t *offset)
  844. {
  845. struct drm_i915_private *dev_priv = stream->dev_priv;
  846. int report_size = dev_priv->perf.oa.oa_buffer.format_size;
  847. u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
  848. u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
  849. u32 mask = (OA_BUFFER_SIZE - 1);
  850. size_t start_offset = *offset;
  851. unsigned long flags;
  852. unsigned int aged_tail_idx;
  853. u32 head, tail;
  854. u32 taken;
  855. int ret = 0;
  856. if (WARN_ON(!stream->enabled))
  857. return -EIO;
  858. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  859. head = dev_priv->perf.oa.oa_buffer.head;
  860. aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
  861. tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
  862. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  863. /* An invalid tail pointer here means we're still waiting for the poll
  864. * hrtimer callback to give us a pointer
  865. */
  866. if (tail == INVALID_TAIL_PTR)
  867. return -EAGAIN;
  868. /* NB: oa_buffer.head/tail include the gtt_offset which we don't want
  869. * while indexing relative to oa_buf_base.
  870. */
  871. head -= gtt_offset;
  872. tail -= gtt_offset;
  873. /* An out of bounds or misaligned head or tail pointer implies a driver
  874. * bug since we validate + align the tail pointers we read from the
  875. * hardware and we are in full control of the head pointer which should
  876. * only be incremented by multiples of the report size (notably also
  877. * all a power of two).
  878. */
  879. if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
  880. tail > OA_BUFFER_SIZE || tail % report_size,
  881. "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
  882. head, tail))
  883. return -EIO;
  884. for (/* none */;
  885. (taken = OA_TAKEN(tail, head));
  886. head = (head + report_size) & mask) {
  887. u8 *report = oa_buf_base + head;
  888. u32 *report32 = (void *)report;
  889. /* All the report sizes factor neatly into the buffer
  890. * size so we never expect to see a report split
  891. * between the beginning and end of the buffer.
  892. *
  893. * Given the initial alignment check a misalignment
  894. * here would imply a driver bug that would result
  895. * in an overrun.
  896. */
  897. if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
  898. DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
  899. break;
  900. }
  901. /* The report-ID field for periodic samples includes
  902. * some undocumented flags related to what triggered
  903. * the report and is never expected to be zero so we
  904. * can check that the report isn't invalid before
  905. * copying it to userspace...
  906. */
  907. if (report32[0] == 0) {
  908. if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
  909. DRM_NOTE("Skipping spurious, invalid OA report\n");
  910. continue;
  911. }
  912. ret = append_oa_sample(stream, buf, count, offset, report);
  913. if (ret)
  914. break;
  915. /* The above report-id field sanity check is based on
  916. * the assumption that the OA buffer is initially
  917. * zeroed and we reset the field after copying so the
  918. * check is still meaningful once old reports start
  919. * being overwritten.
  920. */
  921. report32[0] = 0;
  922. }
  923. if (start_offset != *offset) {
  924. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  925. /* We removed the gtt_offset for the copy loop above, indexing
  926. * relative to oa_buf_base so put back here...
  927. */
  928. head += gtt_offset;
  929. I915_WRITE(GEN7_OASTATUS2,
  930. ((head & GEN7_OASTATUS2_HEAD_MASK) |
  931. OA_MEM_SELECT_GGTT));
  932. dev_priv->perf.oa.oa_buffer.head = head;
  933. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  934. }
  935. return ret;
  936. }
  937. /**
  938. * gen7_oa_read - copy status records then buffered OA reports
  939. * @stream: An i915-perf stream opened for OA metrics
  940. * @buf: destination buffer given by userspace
  941. * @count: the number of bytes userspace wants to read
  942. * @offset: (inout): the current position for writing into @buf
  943. *
  944. * Checks Gen 7 specific OA unit status registers and if necessary appends
  945. * corresponding status records for userspace (such as for a buffer full
  946. * condition) and then initiate appending any buffered OA reports.
  947. *
  948. * Updates @offset according to the number of bytes successfully copied into
  949. * the userspace buffer.
  950. *
  951. * Returns: zero on success or a negative error code
  952. */
  953. static int gen7_oa_read(struct i915_perf_stream *stream,
  954. char __user *buf,
  955. size_t count,
  956. size_t *offset)
  957. {
  958. struct drm_i915_private *dev_priv = stream->dev_priv;
  959. u32 oastatus1;
  960. int ret;
  961. if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
  962. return -EIO;
  963. oastatus1 = I915_READ(GEN7_OASTATUS1);
  964. /* XXX: On Haswell we don't have a safe way to clear oastatus1
  965. * bits while the OA unit is enabled (while the tail pointer
  966. * may be updated asynchronously) so we ignore status bits
  967. * that have already been reported to userspace.
  968. */
  969. oastatus1 &= ~dev_priv->perf.oa.gen7_latched_oastatus1;
  970. /* We treat OABUFFER_OVERFLOW as a significant error:
  971. *
  972. * - The status can be interpreted to mean that the buffer is
  973. * currently full (with a higher precedence than OA_TAKEN()
  974. * which will start to report a near-empty buffer after an
  975. * overflow) but it's awkward that we can't clear the status
  976. * on Haswell, so without a reset we won't be able to catch
  977. * the state again.
  978. *
  979. * - Since it also implies the HW has started overwriting old
  980. * reports it may also affect our sanity checks for invalid
  981. * reports when copying to userspace that assume new reports
  982. * are being written to cleared memory.
  983. *
  984. * - In the future we may want to introduce a flight recorder
  985. * mode where the driver will automatically maintain a safe
  986. * guard band between head/tail, avoiding this overflow
  987. * condition, but we avoid the added driver complexity for
  988. * now.
  989. */
  990. if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
  991. ret = append_oa_status(stream, buf, count, offset,
  992. DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
  993. if (ret)
  994. return ret;
  995. DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
  996. dev_priv->perf.oa.period_exponent);
  997. dev_priv->perf.oa.ops.oa_disable(dev_priv);
  998. dev_priv->perf.oa.ops.oa_enable(dev_priv);
  999. oastatus1 = I915_READ(GEN7_OASTATUS1);
  1000. }
  1001. if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
  1002. ret = append_oa_status(stream, buf, count, offset,
  1003. DRM_I915_PERF_RECORD_OA_REPORT_LOST);
  1004. if (ret)
  1005. return ret;
  1006. dev_priv->perf.oa.gen7_latched_oastatus1 |=
  1007. GEN7_OASTATUS1_REPORT_LOST;
  1008. }
  1009. return gen7_append_oa_reports(stream, buf, count, offset);
  1010. }
  1011. /**
  1012. * i915_oa_wait_unlocked - handles blocking IO until OA data available
  1013. * @stream: An i915-perf stream opened for OA metrics
  1014. *
  1015. * Called when userspace tries to read() from a blocking stream FD opened
  1016. * for OA metrics. It waits until the hrtimer callback finds a non-empty
  1017. * OA buffer and wakes us.
  1018. *
  1019. * Note: it's acceptable to have this return with some false positives
  1020. * since any subsequent read handling will return -EAGAIN if there isn't
  1021. * really data ready for userspace yet.
  1022. *
  1023. * Returns: zero on success or a negative error code
  1024. */
  1025. static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
  1026. {
  1027. struct drm_i915_private *dev_priv = stream->dev_priv;
  1028. /* We would wait indefinitely if periodic sampling is not enabled */
  1029. if (!dev_priv->perf.oa.periodic)
  1030. return -EIO;
  1031. return wait_event_interruptible(dev_priv->perf.oa.poll_wq,
  1032. oa_buffer_check_unlocked(dev_priv));
  1033. }
  1034. /**
  1035. * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
  1036. * @stream: An i915-perf stream opened for OA metrics
  1037. * @file: An i915 perf stream file
  1038. * @wait: poll() state table
  1039. *
  1040. * For handling userspace polling on an i915 perf stream opened for OA metrics,
  1041. * this starts a poll_wait with the wait queue that our hrtimer callback wakes
  1042. * when it sees data ready to read in the circular OA buffer.
  1043. */
  1044. static void i915_oa_poll_wait(struct i915_perf_stream *stream,
  1045. struct file *file,
  1046. poll_table *wait)
  1047. {
  1048. struct drm_i915_private *dev_priv = stream->dev_priv;
  1049. poll_wait(file, &dev_priv->perf.oa.poll_wq, wait);
  1050. }
  1051. /**
  1052. * i915_oa_read - just calls through to &i915_oa_ops->read
  1053. * @stream: An i915-perf stream opened for OA metrics
  1054. * @buf: destination buffer given by userspace
  1055. * @count: the number of bytes userspace wants to read
  1056. * @offset: (inout): the current position for writing into @buf
  1057. *
  1058. * Updates @offset according to the number of bytes successfully copied into
  1059. * the userspace buffer.
  1060. *
  1061. * Returns: zero on success or a negative error code
  1062. */
  1063. static int i915_oa_read(struct i915_perf_stream *stream,
  1064. char __user *buf,
  1065. size_t count,
  1066. size_t *offset)
  1067. {
  1068. struct drm_i915_private *dev_priv = stream->dev_priv;
  1069. return dev_priv->perf.oa.ops.read(stream, buf, count, offset);
  1070. }
  1071. /**
  1072. * oa_get_render_ctx_id - determine and hold ctx hw id
  1073. * @stream: An i915-perf stream opened for OA metrics
  1074. *
  1075. * Determine the render context hw id, and ensure it remains fixed for the
  1076. * lifetime of the stream. This ensures that we don't have to worry about
  1077. * updating the context ID in OACONTROL on the fly.
  1078. *
  1079. * Returns: zero on success or a negative error code
  1080. */
  1081. static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
  1082. {
  1083. struct drm_i915_private *dev_priv = stream->dev_priv;
  1084. if (i915.enable_execlists)
  1085. dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id;
  1086. else {
  1087. struct intel_engine_cs *engine = dev_priv->engine[RCS];
  1088. struct intel_ring *ring;
  1089. int ret;
  1090. ret = i915_mutex_lock_interruptible(&dev_priv->drm);
  1091. if (ret)
  1092. return ret;
  1093. /*
  1094. * As the ID is the gtt offset of the context's vma we
  1095. * pin the vma to ensure the ID remains fixed.
  1096. *
  1097. * NB: implied RCS engine...
  1098. */
  1099. ring = engine->context_pin(engine, stream->ctx);
  1100. mutex_unlock(&dev_priv->drm.struct_mutex);
  1101. if (IS_ERR(ring))
  1102. return PTR_ERR(ring);
  1103. /*
  1104. * Explicitly track the ID (instead of calling
  1105. * i915_ggtt_offset() on the fly) considering the difference
  1106. * with gen8+ and execlists
  1107. */
  1108. dev_priv->perf.oa.specific_ctx_id =
  1109. i915_ggtt_offset(stream->ctx->engine[engine->id].state);
  1110. }
  1111. return 0;
  1112. }
  1113. /**
  1114. * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
  1115. * @stream: An i915-perf stream opened for OA metrics
  1116. *
  1117. * In case anything needed doing to ensure the context HW ID would remain valid
  1118. * for the lifetime of the stream, then that can be undone here.
  1119. */
  1120. static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
  1121. {
  1122. struct drm_i915_private *dev_priv = stream->dev_priv;
  1123. if (i915.enable_execlists) {
  1124. dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
  1125. } else {
  1126. struct intel_engine_cs *engine = dev_priv->engine[RCS];
  1127. mutex_lock(&dev_priv->drm.struct_mutex);
  1128. dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
  1129. engine->context_unpin(engine, stream->ctx);
  1130. mutex_unlock(&dev_priv->drm.struct_mutex);
  1131. }
  1132. }
  1133. static void
  1134. free_oa_buffer(struct drm_i915_private *i915)
  1135. {
  1136. mutex_lock(&i915->drm.struct_mutex);
  1137. i915_gem_object_unpin_map(i915->perf.oa.oa_buffer.vma->obj);
  1138. i915_vma_unpin(i915->perf.oa.oa_buffer.vma);
  1139. i915_gem_object_put(i915->perf.oa.oa_buffer.vma->obj);
  1140. i915->perf.oa.oa_buffer.vma = NULL;
  1141. i915->perf.oa.oa_buffer.vaddr = NULL;
  1142. mutex_unlock(&i915->drm.struct_mutex);
  1143. }
  1144. static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
  1145. {
  1146. struct drm_i915_private *dev_priv = stream->dev_priv;
  1147. BUG_ON(stream != dev_priv->perf.oa.exclusive_stream);
  1148. /*
  1149. * Unset exclusive_stream first, it will be checked while disabling
  1150. * the metric set on gen8+.
  1151. */
  1152. mutex_lock(&dev_priv->drm.struct_mutex);
  1153. dev_priv->perf.oa.exclusive_stream = NULL;
  1154. mutex_unlock(&dev_priv->drm.struct_mutex);
  1155. dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
  1156. free_oa_buffer(dev_priv);
  1157. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  1158. intel_runtime_pm_put(dev_priv);
  1159. if (stream->ctx)
  1160. oa_put_render_ctx_id(stream);
  1161. put_oa_config(dev_priv, stream->oa_config);
  1162. if (dev_priv->perf.oa.spurious_report_rs.missed) {
  1163. DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
  1164. dev_priv->perf.oa.spurious_report_rs.missed);
  1165. }
  1166. }
  1167. static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
  1168. {
  1169. u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
  1170. unsigned long flags;
  1171. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  1172. /* Pre-DevBDW: OABUFFER must be set with counters off,
  1173. * before OASTATUS1, but after OASTATUS2
  1174. */
  1175. I915_WRITE(GEN7_OASTATUS2, gtt_offset | OA_MEM_SELECT_GGTT); /* head */
  1176. dev_priv->perf.oa.oa_buffer.head = gtt_offset;
  1177. I915_WRITE(GEN7_OABUFFER, gtt_offset);
  1178. I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */
  1179. /* Mark that we need updated tail pointers to read from... */
  1180. dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
  1181. dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
  1182. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  1183. /* On Haswell we have to track which OASTATUS1 flags we've
  1184. * already seen since they can't be cleared while periodic
  1185. * sampling is enabled.
  1186. */
  1187. dev_priv->perf.oa.gen7_latched_oastatus1 = 0;
  1188. /* NB: although the OA buffer will initially be allocated
  1189. * zeroed via shmfs (and so this memset is redundant when
  1190. * first allocating), we may re-init the OA buffer, either
  1191. * when re-enabling a stream or in error/reset paths.
  1192. *
  1193. * The reason we clear the buffer for each re-init is for the
  1194. * sanity check in gen7_append_oa_reports() that looks at the
  1195. * report-id field to make sure it's non-zero which relies on
  1196. * the assumption that new reports are being written to zeroed
  1197. * memory...
  1198. */
  1199. memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
  1200. /* Maybe make ->pollin per-stream state if we support multiple
  1201. * concurrent streams in the future.
  1202. */
  1203. dev_priv->perf.oa.pollin = false;
  1204. }
  1205. static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
  1206. {
  1207. u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
  1208. unsigned long flags;
  1209. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  1210. I915_WRITE(GEN8_OASTATUS, 0);
  1211. I915_WRITE(GEN8_OAHEADPTR, gtt_offset);
  1212. dev_priv->perf.oa.oa_buffer.head = gtt_offset;
  1213. I915_WRITE(GEN8_OABUFFER_UDW, 0);
  1214. /*
  1215. * PRM says:
  1216. *
  1217. * "This MMIO must be set before the OATAILPTR
  1218. * register and after the OAHEADPTR register. This is
  1219. * to enable proper functionality of the overflow
  1220. * bit."
  1221. */
  1222. I915_WRITE(GEN8_OABUFFER, gtt_offset |
  1223. OABUFFER_SIZE_16M | OA_MEM_SELECT_GGTT);
  1224. I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
  1225. /* Mark that we need updated tail pointers to read from... */
  1226. dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
  1227. dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
  1228. /*
  1229. * Reset state used to recognise context switches, affecting which
  1230. * reports we will forward to userspace while filtering for a single
  1231. * context.
  1232. */
  1233. dev_priv->perf.oa.oa_buffer.last_ctx_id = INVALID_CTX_ID;
  1234. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  1235. /*
  1236. * NB: although the OA buffer will initially be allocated
  1237. * zeroed via shmfs (and so this memset is redundant when
  1238. * first allocating), we may re-init the OA buffer, either
  1239. * when re-enabling a stream or in error/reset paths.
  1240. *
  1241. * The reason we clear the buffer for each re-init is for the
  1242. * sanity check in gen8_append_oa_reports() that looks at the
  1243. * reason field to make sure it's non-zero which relies on
  1244. * the assumption that new reports are being written to zeroed
  1245. * memory...
  1246. */
  1247. memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
  1248. /*
  1249. * Maybe make ->pollin per-stream state if we support multiple
  1250. * concurrent streams in the future.
  1251. */
  1252. dev_priv->perf.oa.pollin = false;
  1253. }
  1254. static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
  1255. {
  1256. struct drm_i915_gem_object *bo;
  1257. struct i915_vma *vma;
  1258. int ret;
  1259. if (WARN_ON(dev_priv->perf.oa.oa_buffer.vma))
  1260. return -ENODEV;
  1261. ret = i915_mutex_lock_interruptible(&dev_priv->drm);
  1262. if (ret)
  1263. return ret;
  1264. BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
  1265. BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
  1266. bo = i915_gem_object_create(dev_priv, OA_BUFFER_SIZE);
  1267. if (IS_ERR(bo)) {
  1268. DRM_ERROR("Failed to allocate OA buffer\n");
  1269. ret = PTR_ERR(bo);
  1270. goto unlock;
  1271. }
  1272. ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC);
  1273. if (ret)
  1274. goto err_unref;
  1275. /* PreHSW required 512K alignment, HSW requires 16M */
  1276. vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
  1277. if (IS_ERR(vma)) {
  1278. ret = PTR_ERR(vma);
  1279. goto err_unref;
  1280. }
  1281. dev_priv->perf.oa.oa_buffer.vma = vma;
  1282. dev_priv->perf.oa.oa_buffer.vaddr =
  1283. i915_gem_object_pin_map(bo, I915_MAP_WB);
  1284. if (IS_ERR(dev_priv->perf.oa.oa_buffer.vaddr)) {
  1285. ret = PTR_ERR(dev_priv->perf.oa.oa_buffer.vaddr);
  1286. goto err_unpin;
  1287. }
  1288. dev_priv->perf.oa.ops.init_oa_buffer(dev_priv);
  1289. DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
  1290. i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma),
  1291. dev_priv->perf.oa.oa_buffer.vaddr);
  1292. goto unlock;
  1293. err_unpin:
  1294. __i915_vma_unpin(vma);
  1295. err_unref:
  1296. i915_gem_object_put(bo);
  1297. dev_priv->perf.oa.oa_buffer.vaddr = NULL;
  1298. dev_priv->perf.oa.oa_buffer.vma = NULL;
  1299. unlock:
  1300. mutex_unlock(&dev_priv->drm.struct_mutex);
  1301. return ret;
  1302. }
  1303. static void config_oa_regs(struct drm_i915_private *dev_priv,
  1304. const struct i915_oa_reg *regs,
  1305. u32 n_regs)
  1306. {
  1307. u32 i;
  1308. for (i = 0; i < n_regs; i++) {
  1309. const struct i915_oa_reg *reg = regs + i;
  1310. I915_WRITE(reg->addr, reg->value);
  1311. }
  1312. }
  1313. static int hsw_enable_metric_set(struct drm_i915_private *dev_priv,
  1314. const struct i915_oa_config *oa_config)
  1315. {
  1316. /* PRM:
  1317. *
  1318. * OA unit is using “crclk” for its functionality. When trunk
  1319. * level clock gating takes place, OA clock would be gated,
  1320. * unable to count the events from non-render clock domain.
  1321. * Render clock gating must be disabled when OA is enabled to
  1322. * count the events from non-render domain. Unit level clock
  1323. * gating for RCS should also be disabled.
  1324. */
  1325. I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
  1326. ~GEN7_DOP_CLOCK_GATE_ENABLE));
  1327. I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
  1328. GEN6_CSUNIT_CLOCK_GATE_DISABLE));
  1329. config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
  1330. /* It apparently takes a fairly long time for a new MUX
  1331. * configuration to be be applied after these register writes.
  1332. * This delay duration was derived empirically based on the
  1333. * render_basic config but hopefully it covers the maximum
  1334. * configuration latency.
  1335. *
  1336. * As a fallback, the checks in _append_oa_reports() to skip
  1337. * invalid OA reports do also seem to work to discard reports
  1338. * generated before this config has completed - albeit not
  1339. * silently.
  1340. *
  1341. * Unfortunately this is essentially a magic number, since we
  1342. * don't currently know of a reliable mechanism for predicting
  1343. * how long the MUX config will take to apply and besides
  1344. * seeing invalid reports we don't know of a reliable way to
  1345. * explicitly check that the MUX config has landed.
  1346. *
  1347. * It's even possible we've miss characterized the underlying
  1348. * problem - it just seems like the simplest explanation why
  1349. * a delay at this location would mitigate any invalid reports.
  1350. */
  1351. usleep_range(15000, 20000);
  1352. config_oa_regs(dev_priv, oa_config->b_counter_regs,
  1353. oa_config->b_counter_regs_len);
  1354. return 0;
  1355. }
  1356. static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
  1357. {
  1358. I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) &
  1359. ~GEN6_CSUNIT_CLOCK_GATE_DISABLE));
  1360. I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) |
  1361. GEN7_DOP_CLOCK_GATE_ENABLE));
  1362. I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
  1363. ~GT_NOA_ENABLE));
  1364. }
  1365. /*
  1366. * NB: It must always remain pointer safe to run this even if the OA unit
  1367. * has been disabled.
  1368. *
  1369. * It's fine to put out-of-date values into these per-context registers
  1370. * in the case that the OA unit has been disabled.
  1371. */
  1372. static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
  1373. u32 *reg_state,
  1374. const struct i915_oa_config *oa_config)
  1375. {
  1376. struct drm_i915_private *dev_priv = ctx->i915;
  1377. u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
  1378. u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
  1379. /* The MMIO offsets for Flex EU registers aren't contiguous */
  1380. u32 flex_mmio[] = {
  1381. i915_mmio_reg_offset(EU_PERF_CNTL0),
  1382. i915_mmio_reg_offset(EU_PERF_CNTL1),
  1383. i915_mmio_reg_offset(EU_PERF_CNTL2),
  1384. i915_mmio_reg_offset(EU_PERF_CNTL3),
  1385. i915_mmio_reg_offset(EU_PERF_CNTL4),
  1386. i915_mmio_reg_offset(EU_PERF_CNTL5),
  1387. i915_mmio_reg_offset(EU_PERF_CNTL6),
  1388. };
  1389. int i;
  1390. reg_state[ctx_oactxctrl] = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
  1391. reg_state[ctx_oactxctrl+1] = (dev_priv->perf.oa.period_exponent <<
  1392. GEN8_OA_TIMER_PERIOD_SHIFT) |
  1393. (dev_priv->perf.oa.periodic ?
  1394. GEN8_OA_TIMER_ENABLE : 0) |
  1395. GEN8_OA_COUNTER_RESUME;
  1396. for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) {
  1397. u32 state_offset = ctx_flexeu0 + i * 2;
  1398. u32 mmio = flex_mmio[i];
  1399. /*
  1400. * This arbitrary default will select the 'EU FPU0 Pipeline
  1401. * Active' event. In the future it's anticipated that there
  1402. * will be an explicit 'No Event' we can select, but not yet...
  1403. */
  1404. u32 value = 0;
  1405. if (oa_config) {
  1406. u32 j;
  1407. for (j = 0; j < oa_config->flex_regs_len; j++) {
  1408. if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) {
  1409. value = oa_config->flex_regs[j].value;
  1410. break;
  1411. }
  1412. }
  1413. }
  1414. reg_state[state_offset] = mmio;
  1415. reg_state[state_offset+1] = value;
  1416. }
  1417. }
  1418. /*
  1419. * Same as gen8_update_reg_state_unlocked only through the batchbuffer. This
  1420. * is only used by the kernel context.
  1421. */
  1422. static int gen8_emit_oa_config(struct drm_i915_gem_request *req,
  1423. const struct i915_oa_config *oa_config)
  1424. {
  1425. struct drm_i915_private *dev_priv = req->i915;
  1426. /* The MMIO offsets for Flex EU registers aren't contiguous */
  1427. u32 flex_mmio[] = {
  1428. i915_mmio_reg_offset(EU_PERF_CNTL0),
  1429. i915_mmio_reg_offset(EU_PERF_CNTL1),
  1430. i915_mmio_reg_offset(EU_PERF_CNTL2),
  1431. i915_mmio_reg_offset(EU_PERF_CNTL3),
  1432. i915_mmio_reg_offset(EU_PERF_CNTL4),
  1433. i915_mmio_reg_offset(EU_PERF_CNTL5),
  1434. i915_mmio_reg_offset(EU_PERF_CNTL6),
  1435. };
  1436. u32 *cs;
  1437. int i;
  1438. cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4);
  1439. if (IS_ERR(cs))
  1440. return PTR_ERR(cs);
  1441. *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1);
  1442. *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
  1443. *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
  1444. (dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
  1445. GEN8_OA_COUNTER_RESUME;
  1446. for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) {
  1447. u32 mmio = flex_mmio[i];
  1448. /*
  1449. * This arbitrary default will select the 'EU FPU0 Pipeline
  1450. * Active' event. In the future it's anticipated that there
  1451. * will be an explicit 'No Event' we can select, but not
  1452. * yet...
  1453. */
  1454. u32 value = 0;
  1455. if (oa_config) {
  1456. u32 j;
  1457. for (j = 0; j < oa_config->flex_regs_len; j++) {
  1458. if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) {
  1459. value = oa_config->flex_regs[j].value;
  1460. break;
  1461. }
  1462. }
  1463. }
  1464. *cs++ = mmio;
  1465. *cs++ = value;
  1466. }
  1467. *cs++ = MI_NOOP;
  1468. intel_ring_advance(req, cs);
  1469. return 0;
  1470. }
  1471. static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_priv,
  1472. const struct i915_oa_config *oa_config)
  1473. {
  1474. struct intel_engine_cs *engine = dev_priv->engine[RCS];
  1475. struct i915_gem_timeline *timeline;
  1476. struct drm_i915_gem_request *req;
  1477. int ret;
  1478. lockdep_assert_held(&dev_priv->drm.struct_mutex);
  1479. i915_gem_retire_requests(dev_priv);
  1480. req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
  1481. if (IS_ERR(req))
  1482. return PTR_ERR(req);
  1483. ret = gen8_emit_oa_config(req, oa_config);
  1484. if (ret) {
  1485. i915_add_request(req);
  1486. return ret;
  1487. }
  1488. /* Queue this switch after all other activity */
  1489. list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
  1490. struct drm_i915_gem_request *prev;
  1491. struct intel_timeline *tl;
  1492. tl = &timeline->engine[engine->id];
  1493. prev = i915_gem_active_raw(&tl->last_request,
  1494. &dev_priv->drm.struct_mutex);
  1495. if (prev)
  1496. i915_sw_fence_await_sw_fence_gfp(&req->submit,
  1497. &prev->submit,
  1498. GFP_KERNEL);
  1499. }
  1500. ret = i915_switch_context(req);
  1501. i915_add_request(req);
  1502. return ret;
  1503. }
  1504. /*
  1505. * Manages updating the per-context aspects of the OA stream
  1506. * configuration across all contexts.
  1507. *
  1508. * The awkward consideration here is that OACTXCONTROL controls the
  1509. * exponent for periodic sampling which is primarily used for system
  1510. * wide profiling where we'd like a consistent sampling period even in
  1511. * the face of context switches.
  1512. *
  1513. * Our approach of updating the register state context (as opposed to
  1514. * say using a workaround batch buffer) ensures that the hardware
  1515. * won't automatically reload an out-of-date timer exponent even
  1516. * transiently before a WA BB could be parsed.
  1517. *
  1518. * This function needs to:
  1519. * - Ensure the currently running context's per-context OA state is
  1520. * updated
  1521. * - Ensure that all existing contexts will have the correct per-context
  1522. * OA state if they are scheduled for use.
  1523. * - Ensure any new contexts will be initialized with the correct
  1524. * per-context OA state.
  1525. *
  1526. * Note: it's only the RCS/Render context that has any OA state.
  1527. */
  1528. static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
  1529. const struct i915_oa_config *oa_config,
  1530. bool interruptible)
  1531. {
  1532. struct i915_gem_context *ctx;
  1533. int ret;
  1534. unsigned int wait_flags = I915_WAIT_LOCKED;
  1535. if (interruptible) {
  1536. ret = i915_mutex_lock_interruptible(&dev_priv->drm);
  1537. if (ret)
  1538. return ret;
  1539. wait_flags |= I915_WAIT_INTERRUPTIBLE;
  1540. } else {
  1541. mutex_lock(&dev_priv->drm.struct_mutex);
  1542. }
  1543. /* Switch away from any user context. */
  1544. ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config);
  1545. if (ret)
  1546. goto out;
  1547. /*
  1548. * The OA register config is setup through the context image. This image
  1549. * might be written to by the GPU on context switch (in particular on
  1550. * lite-restore). This means we can't safely update a context's image,
  1551. * if this context is scheduled/submitted to run on the GPU.
  1552. *
  1553. * We could emit the OA register config through the batch buffer but
  1554. * this might leave small interval of time where the OA unit is
  1555. * configured at an invalid sampling period.
  1556. *
  1557. * So far the best way to work around this issue seems to be draining
  1558. * the GPU from any submitted work.
  1559. */
  1560. ret = i915_gem_wait_for_idle(dev_priv, wait_flags);
  1561. if (ret)
  1562. goto out;
  1563. /* Update all contexts now that we've stalled the submission. */
  1564. list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
  1565. struct intel_context *ce = &ctx->engine[RCS];
  1566. u32 *regs;
  1567. /* OA settings will be set upon first use */
  1568. if (!ce->state)
  1569. continue;
  1570. regs = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
  1571. if (IS_ERR(regs)) {
  1572. ret = PTR_ERR(regs);
  1573. goto out;
  1574. }
  1575. ce->state->obj->mm.dirty = true;
  1576. regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
  1577. gen8_update_reg_state_unlocked(ctx, regs, oa_config);
  1578. i915_gem_object_unpin_map(ce->state->obj);
  1579. }
  1580. out:
  1581. mutex_unlock(&dev_priv->drm.struct_mutex);
  1582. return ret;
  1583. }
  1584. static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
  1585. const struct i915_oa_config *oa_config)
  1586. {
  1587. int ret;
  1588. /*
  1589. * We disable slice/unslice clock ratio change reports on SKL since
  1590. * they are too noisy. The HW generates a lot of redundant reports
  1591. * where the ratio hasn't really changed causing a lot of redundant
  1592. * work to processes and increasing the chances we'll hit buffer
  1593. * overruns.
  1594. *
  1595. * Although we don't currently use the 'disable overrun' OABUFFER
  1596. * feature it's worth noting that clock ratio reports have to be
  1597. * disabled before considering to use that feature since the HW doesn't
  1598. * correctly block these reports.
  1599. *
  1600. * Currently none of the high-level metrics we have depend on knowing
  1601. * this ratio to normalize.
  1602. *
  1603. * Note: This register is not power context saved and restored, but
  1604. * that's OK considering that we disable RC6 while the OA unit is
  1605. * enabled.
  1606. *
  1607. * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
  1608. * be read back from automatically triggered reports, as part of the
  1609. * RPT_ID field.
  1610. */
  1611. if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
  1612. IS_KABYLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
  1613. I915_WRITE(GEN8_OA_DEBUG,
  1614. _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
  1615. GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
  1616. }
  1617. /*
  1618. * Update all contexts prior writing the mux configurations as we need
  1619. * to make sure all slices/subslices are ON before writing to NOA
  1620. * registers.
  1621. */
  1622. ret = gen8_configure_all_contexts(dev_priv, oa_config, true);
  1623. if (ret)
  1624. return ret;
  1625. config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
  1626. config_oa_regs(dev_priv, oa_config->b_counter_regs,
  1627. oa_config->b_counter_regs_len);
  1628. return 0;
  1629. }
  1630. static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
  1631. {
  1632. /* Reset all contexts' slices/subslices configurations. */
  1633. gen8_configure_all_contexts(dev_priv, NULL, false);
  1634. I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
  1635. ~GT_NOA_ENABLE));
  1636. }
  1637. static void gen7_oa_enable(struct drm_i915_private *dev_priv)
  1638. {
  1639. /*
  1640. * Reset buf pointers so we don't forward reports from before now.
  1641. *
  1642. * Think carefully if considering trying to avoid this, since it
  1643. * also ensures status flags and the buffer itself are cleared
  1644. * in error paths, and we have checks for invalid reports based
  1645. * on the assumption that certain fields are written to zeroed
  1646. * memory which this helps maintains.
  1647. */
  1648. gen7_init_oa_buffer(dev_priv);
  1649. if (dev_priv->perf.oa.exclusive_stream->enabled) {
  1650. struct i915_gem_context *ctx =
  1651. dev_priv->perf.oa.exclusive_stream->ctx;
  1652. u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
  1653. bool periodic = dev_priv->perf.oa.periodic;
  1654. u32 period_exponent = dev_priv->perf.oa.period_exponent;
  1655. u32 report_format = dev_priv->perf.oa.oa_buffer.format;
  1656. I915_WRITE(GEN7_OACONTROL,
  1657. (ctx_id & GEN7_OACONTROL_CTX_MASK) |
  1658. (period_exponent <<
  1659. GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
  1660. (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
  1661. (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
  1662. (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
  1663. GEN7_OACONTROL_ENABLE);
  1664. } else
  1665. I915_WRITE(GEN7_OACONTROL, 0);
  1666. }
  1667. static void gen8_oa_enable(struct drm_i915_private *dev_priv)
  1668. {
  1669. u32 report_format = dev_priv->perf.oa.oa_buffer.format;
  1670. /*
  1671. * Reset buf pointers so we don't forward reports from before now.
  1672. *
  1673. * Think carefully if considering trying to avoid this, since it
  1674. * also ensures status flags and the buffer itself are cleared
  1675. * in error paths, and we have checks for invalid reports based
  1676. * on the assumption that certain fields are written to zeroed
  1677. * memory which this helps maintains.
  1678. */
  1679. gen8_init_oa_buffer(dev_priv);
  1680. /*
  1681. * Note: we don't rely on the hardware to perform single context
  1682. * filtering and instead filter on the cpu based on the context-id
  1683. * field of reports
  1684. */
  1685. I915_WRITE(GEN8_OACONTROL, (report_format <<
  1686. GEN8_OA_REPORT_FORMAT_SHIFT) |
  1687. GEN8_OA_COUNTER_ENABLE);
  1688. }
  1689. /**
  1690. * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
  1691. * @stream: An i915 perf stream opened for OA metrics
  1692. *
  1693. * [Re]enables hardware periodic sampling according to the period configured
  1694. * when opening the stream. This also starts a hrtimer that will periodically
  1695. * check for data in the circular OA buffer for notifying userspace (e.g.
  1696. * during a read() or poll()).
  1697. */
  1698. static void i915_oa_stream_enable(struct i915_perf_stream *stream)
  1699. {
  1700. struct drm_i915_private *dev_priv = stream->dev_priv;
  1701. dev_priv->perf.oa.ops.oa_enable(dev_priv);
  1702. if (dev_priv->perf.oa.periodic)
  1703. hrtimer_start(&dev_priv->perf.oa.poll_check_timer,
  1704. ns_to_ktime(POLL_PERIOD),
  1705. HRTIMER_MODE_REL_PINNED);
  1706. }
  1707. static void gen7_oa_disable(struct drm_i915_private *dev_priv)
  1708. {
  1709. I915_WRITE(GEN7_OACONTROL, 0);
  1710. }
  1711. static void gen8_oa_disable(struct drm_i915_private *dev_priv)
  1712. {
  1713. I915_WRITE(GEN8_OACONTROL, 0);
  1714. }
  1715. /**
  1716. * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
  1717. * @stream: An i915 perf stream opened for OA metrics
  1718. *
  1719. * Stops the OA unit from periodically writing counter reports into the
  1720. * circular OA buffer. This also stops the hrtimer that periodically checks for
  1721. * data in the circular OA buffer, for notifying userspace.
  1722. */
  1723. static void i915_oa_stream_disable(struct i915_perf_stream *stream)
  1724. {
  1725. struct drm_i915_private *dev_priv = stream->dev_priv;
  1726. dev_priv->perf.oa.ops.oa_disable(dev_priv);
  1727. if (dev_priv->perf.oa.periodic)
  1728. hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer);
  1729. }
  1730. static const struct i915_perf_stream_ops i915_oa_stream_ops = {
  1731. .destroy = i915_oa_stream_destroy,
  1732. .enable = i915_oa_stream_enable,
  1733. .disable = i915_oa_stream_disable,
  1734. .wait_unlocked = i915_oa_wait_unlocked,
  1735. .poll_wait = i915_oa_poll_wait,
  1736. .read = i915_oa_read,
  1737. };
  1738. /**
  1739. * i915_oa_stream_init - validate combined props for OA stream and init
  1740. * @stream: An i915 perf stream
  1741. * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
  1742. * @props: The property state that configures stream (individually validated)
  1743. *
  1744. * While read_properties_unlocked() validates properties in isolation it
  1745. * doesn't ensure that the combination necessarily makes sense.
  1746. *
  1747. * At this point it has been determined that userspace wants a stream of
  1748. * OA metrics, but still we need to further validate the combined
  1749. * properties are OK.
  1750. *
  1751. * If the configuration makes sense then we can allocate memory for
  1752. * a circular OA buffer and apply the requested metric set configuration.
  1753. *
  1754. * Returns: zero on success or a negative error code.
  1755. */
  1756. static int i915_oa_stream_init(struct i915_perf_stream *stream,
  1757. struct drm_i915_perf_open_param *param,
  1758. struct perf_open_properties *props)
  1759. {
  1760. struct drm_i915_private *dev_priv = stream->dev_priv;
  1761. int format_size;
  1762. int ret;
  1763. /* If the sysfs metrics/ directory wasn't registered for some
  1764. * reason then don't let userspace try their luck with config
  1765. * IDs
  1766. */
  1767. if (!dev_priv->perf.metrics_kobj) {
  1768. DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
  1769. return -EINVAL;
  1770. }
  1771. if (!(props->sample_flags & SAMPLE_OA_REPORT)) {
  1772. DRM_DEBUG("Only OA report sampling supported\n");
  1773. return -EINVAL;
  1774. }
  1775. if (!dev_priv->perf.oa.ops.init_oa_buffer) {
  1776. DRM_DEBUG("OA unit not supported\n");
  1777. return -ENODEV;
  1778. }
  1779. /* To avoid the complexity of having to accurately filter
  1780. * counter reports and marshal to the appropriate client
  1781. * we currently only allow exclusive access
  1782. */
  1783. if (dev_priv->perf.oa.exclusive_stream) {
  1784. DRM_DEBUG("OA unit already in use\n");
  1785. return -EBUSY;
  1786. }
  1787. if (!props->oa_format) {
  1788. DRM_DEBUG("OA report format not specified\n");
  1789. return -EINVAL;
  1790. }
  1791. /* We set up some ratelimit state to potentially throttle any _NOTES
  1792. * about spurious, invalid OA reports which we don't forward to
  1793. * userspace.
  1794. *
  1795. * The initialization is associated with opening the stream (not driver
  1796. * init) considering we print a _NOTE about any throttling when closing
  1797. * the stream instead of waiting until driver _fini which no one would
  1798. * ever see.
  1799. *
  1800. * Using the same limiting factors as printk_ratelimit()
  1801. */
  1802. ratelimit_state_init(&dev_priv->perf.oa.spurious_report_rs,
  1803. 5 * HZ, 10);
  1804. /* Since we use a DRM_NOTE for spurious reports it would be
  1805. * inconsistent to let __ratelimit() automatically print a warning for
  1806. * throttling.
  1807. */
  1808. ratelimit_set_flags(&dev_priv->perf.oa.spurious_report_rs,
  1809. RATELIMIT_MSG_ON_RELEASE);
  1810. stream->sample_size = sizeof(struct drm_i915_perf_record_header);
  1811. format_size = dev_priv->perf.oa.oa_formats[props->oa_format].size;
  1812. stream->sample_flags |= SAMPLE_OA_REPORT;
  1813. stream->sample_size += format_size;
  1814. dev_priv->perf.oa.oa_buffer.format_size = format_size;
  1815. if (WARN_ON(dev_priv->perf.oa.oa_buffer.format_size == 0))
  1816. return -EINVAL;
  1817. dev_priv->perf.oa.oa_buffer.format =
  1818. dev_priv->perf.oa.oa_formats[props->oa_format].format;
  1819. dev_priv->perf.oa.periodic = props->oa_periodic;
  1820. if (dev_priv->perf.oa.periodic)
  1821. dev_priv->perf.oa.period_exponent = props->oa_period_exponent;
  1822. if (stream->ctx) {
  1823. ret = oa_get_render_ctx_id(stream);
  1824. if (ret)
  1825. return ret;
  1826. }
  1827. ret = get_oa_config(dev_priv, props->metrics_set, &stream->oa_config);
  1828. if (ret)
  1829. goto err_config;
  1830. /* PRM - observability performance counters:
  1831. *
  1832. * OACONTROL, performance counter enable, note:
  1833. *
  1834. * "When this bit is set, in order to have coherent counts,
  1835. * RC6 power state and trunk clock gating must be disabled.
  1836. * This can be achieved by programming MMIO registers as
  1837. * 0xA094=0 and 0xA090[31]=1"
  1838. *
  1839. * In our case we are expecting that taking pm + FORCEWAKE
  1840. * references will effectively disable RC6.
  1841. */
  1842. intel_runtime_pm_get(dev_priv);
  1843. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  1844. ret = alloc_oa_buffer(dev_priv);
  1845. if (ret)
  1846. goto err_oa_buf_alloc;
  1847. ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
  1848. stream->oa_config);
  1849. if (ret)
  1850. goto err_enable;
  1851. stream->ops = &i915_oa_stream_ops;
  1852. /* Lock device for exclusive_stream access late because
  1853. * enable_metric_set() might lock as well on gen8+.
  1854. */
  1855. ret = i915_mutex_lock_interruptible(&dev_priv->drm);
  1856. if (ret)
  1857. goto err_lock;
  1858. dev_priv->perf.oa.exclusive_stream = stream;
  1859. mutex_unlock(&dev_priv->drm.struct_mutex);
  1860. return 0;
  1861. err_lock:
  1862. dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
  1863. err_enable:
  1864. free_oa_buffer(dev_priv);
  1865. err_oa_buf_alloc:
  1866. put_oa_config(dev_priv, stream->oa_config);
  1867. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  1868. intel_runtime_pm_put(dev_priv);
  1869. err_config:
  1870. if (stream->ctx)
  1871. oa_put_render_ctx_id(stream);
  1872. return ret;
  1873. }
  1874. void i915_oa_init_reg_state(struct intel_engine_cs *engine,
  1875. struct i915_gem_context *ctx,
  1876. u32 *reg_state)
  1877. {
  1878. struct i915_perf_stream *stream;
  1879. if (engine->id != RCS)
  1880. return;
  1881. stream = engine->i915->perf.oa.exclusive_stream;
  1882. if (stream)
  1883. gen8_update_reg_state_unlocked(ctx, reg_state, stream->oa_config);
  1884. }
  1885. /**
  1886. * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation
  1887. * @stream: An i915 perf stream
  1888. * @file: An i915 perf stream file
  1889. * @buf: destination buffer given by userspace
  1890. * @count: the number of bytes userspace wants to read
  1891. * @ppos: (inout) file seek position (unused)
  1892. *
  1893. * Besides wrapping &i915_perf_stream_ops->read this provides a common place to
  1894. * ensure that if we've successfully copied any data then reporting that takes
  1895. * precedence over any internal error status, so the data isn't lost.
  1896. *
  1897. * For example ret will be -ENOSPC whenever there is more buffered data than
  1898. * can be copied to userspace, but that's only interesting if we weren't able
  1899. * to copy some data because it implies the userspace buffer is too small to
  1900. * receive a single record (and we never split records).
  1901. *
  1902. * Another case with ret == -EFAULT is more of a grey area since it would seem
  1903. * like bad form for userspace to ask us to overrun its buffer, but the user
  1904. * knows best:
  1905. *
  1906. * http://yarchive.net/comp/linux/partial_reads_writes.html
  1907. *
  1908. * Returns: The number of bytes copied or a negative error code on failure.
  1909. */
  1910. static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream,
  1911. struct file *file,
  1912. char __user *buf,
  1913. size_t count,
  1914. loff_t *ppos)
  1915. {
  1916. /* Note we keep the offset (aka bytes read) separate from any
  1917. * error status so that the final check for whether we return
  1918. * the bytes read with a higher precedence than any error (see
  1919. * comment below) doesn't need to be handled/duplicated in
  1920. * stream->ops->read() implementations.
  1921. */
  1922. size_t offset = 0;
  1923. int ret = stream->ops->read(stream, buf, count, &offset);
  1924. return offset ?: (ret ?: -EAGAIN);
  1925. }
  1926. /**
  1927. * i915_perf_read - handles read() FOP for i915 perf stream FDs
  1928. * @file: An i915 perf stream file
  1929. * @buf: destination buffer given by userspace
  1930. * @count: the number of bytes userspace wants to read
  1931. * @ppos: (inout) file seek position (unused)
  1932. *
  1933. * The entry point for handling a read() on a stream file descriptor from
  1934. * userspace. Most of the work is left to the i915_perf_read_locked() and
  1935. * &i915_perf_stream_ops->read but to save having stream implementations (of
  1936. * which we might have multiple later) we handle blocking read here.
  1937. *
  1938. * We can also consistently treat trying to read from a disabled stream
  1939. * as an IO error so implementations can assume the stream is enabled
  1940. * while reading.
  1941. *
  1942. * Returns: The number of bytes copied or a negative error code on failure.
  1943. */
  1944. static ssize_t i915_perf_read(struct file *file,
  1945. char __user *buf,
  1946. size_t count,
  1947. loff_t *ppos)
  1948. {
  1949. struct i915_perf_stream *stream = file->private_data;
  1950. struct drm_i915_private *dev_priv = stream->dev_priv;
  1951. ssize_t ret;
  1952. /* To ensure it's handled consistently we simply treat all reads of a
  1953. * disabled stream as an error. In particular it might otherwise lead
  1954. * to a deadlock for blocking file descriptors...
  1955. */
  1956. if (!stream->enabled)
  1957. return -EIO;
  1958. if (!(file->f_flags & O_NONBLOCK)) {
  1959. /* There's the small chance of false positives from
  1960. * stream->ops->wait_unlocked.
  1961. *
  1962. * E.g. with single context filtering since we only wait until
  1963. * oabuffer has >= 1 report we don't immediately know whether
  1964. * any reports really belong to the current context
  1965. */
  1966. do {
  1967. ret = stream->ops->wait_unlocked(stream);
  1968. if (ret)
  1969. return ret;
  1970. mutex_lock(&dev_priv->perf.lock);
  1971. ret = i915_perf_read_locked(stream, file,
  1972. buf, count, ppos);
  1973. mutex_unlock(&dev_priv->perf.lock);
  1974. } while (ret == -EAGAIN);
  1975. } else {
  1976. mutex_lock(&dev_priv->perf.lock);
  1977. ret = i915_perf_read_locked(stream, file, buf, count, ppos);
  1978. mutex_unlock(&dev_priv->perf.lock);
  1979. }
  1980. /* We allow the poll checking to sometimes report false positive POLLIN
  1981. * events where we might actually report EAGAIN on read() if there's
  1982. * not really any data available. In this situation though we don't
  1983. * want to enter a busy loop between poll() reporting a POLLIN event
  1984. * and read() returning -EAGAIN. Clearing the oa.pollin state here
  1985. * effectively ensures we back off until the next hrtimer callback
  1986. * before reporting another POLLIN event.
  1987. */
  1988. if (ret >= 0 || ret == -EAGAIN) {
  1989. /* Maybe make ->pollin per-stream state if we support multiple
  1990. * concurrent streams in the future.
  1991. */
  1992. dev_priv->perf.oa.pollin = false;
  1993. }
  1994. return ret;
  1995. }
  1996. static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
  1997. {
  1998. struct drm_i915_private *dev_priv =
  1999. container_of(hrtimer, typeof(*dev_priv),
  2000. perf.oa.poll_check_timer);
  2001. if (oa_buffer_check_unlocked(dev_priv)) {
  2002. dev_priv->perf.oa.pollin = true;
  2003. wake_up(&dev_priv->perf.oa.poll_wq);
  2004. }
  2005. hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD));
  2006. return HRTIMER_RESTART;
  2007. }
  2008. /**
  2009. * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
  2010. * @dev_priv: i915 device instance
  2011. * @stream: An i915 perf stream
  2012. * @file: An i915 perf stream file
  2013. * @wait: poll() state table
  2014. *
  2015. * For handling userspace polling on an i915 perf stream, this calls through to
  2016. * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
  2017. * will be woken for new stream data.
  2018. *
  2019. * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
  2020. * with any non-file-operation driver hooks.
  2021. *
  2022. * Returns: any poll events that are ready without sleeping
  2023. */
  2024. static unsigned int i915_perf_poll_locked(struct drm_i915_private *dev_priv,
  2025. struct i915_perf_stream *stream,
  2026. struct file *file,
  2027. poll_table *wait)
  2028. {
  2029. unsigned int events = 0;
  2030. stream->ops->poll_wait(stream, file, wait);
  2031. /* Note: we don't explicitly check whether there's something to read
  2032. * here since this path may be very hot depending on what else
  2033. * userspace is polling, or on the timeout in use. We rely solely on
  2034. * the hrtimer/oa_poll_check_timer_cb to notify us when there are
  2035. * samples to read.
  2036. */
  2037. if (dev_priv->perf.oa.pollin)
  2038. events |= POLLIN;
  2039. return events;
  2040. }
  2041. /**
  2042. * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
  2043. * @file: An i915 perf stream file
  2044. * @wait: poll() state table
  2045. *
  2046. * For handling userspace polling on an i915 perf stream, this ensures
  2047. * poll_wait() gets called with a wait queue that will be woken for new stream
  2048. * data.
  2049. *
  2050. * Note: Implementation deferred to i915_perf_poll_locked()
  2051. *
  2052. * Returns: any poll events that are ready without sleeping
  2053. */
  2054. static unsigned int i915_perf_poll(struct file *file, poll_table *wait)
  2055. {
  2056. struct i915_perf_stream *stream = file->private_data;
  2057. struct drm_i915_private *dev_priv = stream->dev_priv;
  2058. int ret;
  2059. mutex_lock(&dev_priv->perf.lock);
  2060. ret = i915_perf_poll_locked(dev_priv, stream, file, wait);
  2061. mutex_unlock(&dev_priv->perf.lock);
  2062. return ret;
  2063. }
  2064. /**
  2065. * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
  2066. * @stream: A disabled i915 perf stream
  2067. *
  2068. * [Re]enables the associated capture of data for this stream.
  2069. *
  2070. * If a stream was previously enabled then there's currently no intention
  2071. * to provide userspace any guarantee about the preservation of previously
  2072. * buffered data.
  2073. */
  2074. static void i915_perf_enable_locked(struct i915_perf_stream *stream)
  2075. {
  2076. if (stream->enabled)
  2077. return;
  2078. /* Allow stream->ops->enable() to refer to this */
  2079. stream->enabled = true;
  2080. if (stream->ops->enable)
  2081. stream->ops->enable(stream);
  2082. }
  2083. /**
  2084. * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
  2085. * @stream: An enabled i915 perf stream
  2086. *
  2087. * Disables the associated capture of data for this stream.
  2088. *
  2089. * The intention is that disabling an re-enabling a stream will ideally be
  2090. * cheaper than destroying and re-opening a stream with the same configuration,
  2091. * though there are no formal guarantees about what state or buffered data
  2092. * must be retained between disabling and re-enabling a stream.
  2093. *
  2094. * Note: while a stream is disabled it's considered an error for userspace
  2095. * to attempt to read from the stream (-EIO).
  2096. */
  2097. static void i915_perf_disable_locked(struct i915_perf_stream *stream)
  2098. {
  2099. if (!stream->enabled)
  2100. return;
  2101. /* Allow stream->ops->disable() to refer to this */
  2102. stream->enabled = false;
  2103. if (stream->ops->disable)
  2104. stream->ops->disable(stream);
  2105. }
  2106. /**
  2107. * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
  2108. * @stream: An i915 perf stream
  2109. * @cmd: the ioctl request
  2110. * @arg: the ioctl data
  2111. *
  2112. * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
  2113. * with any non-file-operation driver hooks.
  2114. *
  2115. * Returns: zero on success or a negative error code. Returns -EINVAL for
  2116. * an unknown ioctl request.
  2117. */
  2118. static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
  2119. unsigned int cmd,
  2120. unsigned long arg)
  2121. {
  2122. switch (cmd) {
  2123. case I915_PERF_IOCTL_ENABLE:
  2124. i915_perf_enable_locked(stream);
  2125. return 0;
  2126. case I915_PERF_IOCTL_DISABLE:
  2127. i915_perf_disable_locked(stream);
  2128. return 0;
  2129. }
  2130. return -EINVAL;
  2131. }
  2132. /**
  2133. * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
  2134. * @file: An i915 perf stream file
  2135. * @cmd: the ioctl request
  2136. * @arg: the ioctl data
  2137. *
  2138. * Implementation deferred to i915_perf_ioctl_locked().
  2139. *
  2140. * Returns: zero on success or a negative error code. Returns -EINVAL for
  2141. * an unknown ioctl request.
  2142. */
  2143. static long i915_perf_ioctl(struct file *file,
  2144. unsigned int cmd,
  2145. unsigned long arg)
  2146. {
  2147. struct i915_perf_stream *stream = file->private_data;
  2148. struct drm_i915_private *dev_priv = stream->dev_priv;
  2149. long ret;
  2150. mutex_lock(&dev_priv->perf.lock);
  2151. ret = i915_perf_ioctl_locked(stream, cmd, arg);
  2152. mutex_unlock(&dev_priv->perf.lock);
  2153. return ret;
  2154. }
  2155. /**
  2156. * i915_perf_destroy_locked - destroy an i915 perf stream
  2157. * @stream: An i915 perf stream
  2158. *
  2159. * Frees all resources associated with the given i915 perf @stream, disabling
  2160. * any associated data capture in the process.
  2161. *
  2162. * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
  2163. * with any non-file-operation driver hooks.
  2164. */
  2165. static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
  2166. {
  2167. if (stream->enabled)
  2168. i915_perf_disable_locked(stream);
  2169. if (stream->ops->destroy)
  2170. stream->ops->destroy(stream);
  2171. list_del(&stream->link);
  2172. if (stream->ctx)
  2173. i915_gem_context_put(stream->ctx);
  2174. kfree(stream);
  2175. }
  2176. /**
  2177. * i915_perf_release - handles userspace close() of a stream file
  2178. * @inode: anonymous inode associated with file
  2179. * @file: An i915 perf stream file
  2180. *
  2181. * Cleans up any resources associated with an open i915 perf stream file.
  2182. *
  2183. * NB: close() can't really fail from the userspace point of view.
  2184. *
  2185. * Returns: zero on success or a negative error code.
  2186. */
  2187. static int i915_perf_release(struct inode *inode, struct file *file)
  2188. {
  2189. struct i915_perf_stream *stream = file->private_data;
  2190. struct drm_i915_private *dev_priv = stream->dev_priv;
  2191. mutex_lock(&dev_priv->perf.lock);
  2192. i915_perf_destroy_locked(stream);
  2193. mutex_unlock(&dev_priv->perf.lock);
  2194. return 0;
  2195. }
  2196. static const struct file_operations fops = {
  2197. .owner = THIS_MODULE,
  2198. .llseek = no_llseek,
  2199. .release = i915_perf_release,
  2200. .poll = i915_perf_poll,
  2201. .read = i915_perf_read,
  2202. .unlocked_ioctl = i915_perf_ioctl,
  2203. /* Our ioctl have no arguments, so it's safe to use the same function
  2204. * to handle 32bits compatibility.
  2205. */
  2206. .compat_ioctl = i915_perf_ioctl,
  2207. };
  2208. /**
  2209. * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
  2210. * @dev_priv: i915 device instance
  2211. * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
  2212. * @props: individually validated u64 property value pairs
  2213. * @file: drm file
  2214. *
  2215. * See i915_perf_ioctl_open() for interface details.
  2216. *
  2217. * Implements further stream config validation and stream initialization on
  2218. * behalf of i915_perf_open_ioctl() with the &drm_i915_private->perf.lock mutex
  2219. * taken to serialize with any non-file-operation driver hooks.
  2220. *
  2221. * Note: at this point the @props have only been validated in isolation and
  2222. * it's still necessary to validate that the combination of properties makes
  2223. * sense.
  2224. *
  2225. * In the case where userspace is interested in OA unit metrics then further
  2226. * config validation and stream initialization details will be handled by
  2227. * i915_oa_stream_init(). The code here should only validate config state that
  2228. * will be relevant to all stream types / backends.
  2229. *
  2230. * Returns: zero on success or a negative error code.
  2231. */
  2232. static int
  2233. i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
  2234. struct drm_i915_perf_open_param *param,
  2235. struct perf_open_properties *props,
  2236. struct drm_file *file)
  2237. {
  2238. struct i915_gem_context *specific_ctx = NULL;
  2239. struct i915_perf_stream *stream = NULL;
  2240. unsigned long f_flags = 0;
  2241. bool privileged_op = true;
  2242. int stream_fd;
  2243. int ret;
  2244. if (props->single_context) {
  2245. u32 ctx_handle = props->ctx_handle;
  2246. struct drm_i915_file_private *file_priv = file->driver_priv;
  2247. specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
  2248. if (!specific_ctx) {
  2249. DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n",
  2250. ctx_handle);
  2251. ret = -ENOENT;
  2252. goto err;
  2253. }
  2254. }
  2255. /*
  2256. * On Haswell the OA unit supports clock gating off for a specific
  2257. * context and in this mode there's no visibility of metrics for the
  2258. * rest of the system, which we consider acceptable for a
  2259. * non-privileged client.
  2260. *
  2261. * For Gen8+ the OA unit no longer supports clock gating off for a
  2262. * specific context and the kernel can't securely stop the counters
  2263. * from updating as system-wide / global values. Even though we can
  2264. * filter reports based on the included context ID we can't block
  2265. * clients from seeing the raw / global counter values via
  2266. * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
  2267. * enable the OA unit by default.
  2268. */
  2269. if (IS_HASWELL(dev_priv) && specific_ctx)
  2270. privileged_op = false;
  2271. /* Similar to perf's kernel.perf_paranoid_cpu sysctl option
  2272. * we check a dev.i915.perf_stream_paranoid sysctl option
  2273. * to determine if it's ok to access system wide OA counters
  2274. * without CAP_SYS_ADMIN privileges.
  2275. */
  2276. if (privileged_op &&
  2277. i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
  2278. DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n");
  2279. ret = -EACCES;
  2280. goto err_ctx;
  2281. }
  2282. stream = kzalloc(sizeof(*stream), GFP_KERNEL);
  2283. if (!stream) {
  2284. ret = -ENOMEM;
  2285. goto err_ctx;
  2286. }
  2287. stream->dev_priv = dev_priv;
  2288. stream->ctx = specific_ctx;
  2289. ret = i915_oa_stream_init(stream, param, props);
  2290. if (ret)
  2291. goto err_alloc;
  2292. /* we avoid simply assigning stream->sample_flags = props->sample_flags
  2293. * to have _stream_init check the combination of sample flags more
  2294. * thoroughly, but still this is the expected result at this point.
  2295. */
  2296. if (WARN_ON(stream->sample_flags != props->sample_flags)) {
  2297. ret = -ENODEV;
  2298. goto err_flags;
  2299. }
  2300. list_add(&stream->link, &dev_priv->perf.streams);
  2301. if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
  2302. f_flags |= O_CLOEXEC;
  2303. if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
  2304. f_flags |= O_NONBLOCK;
  2305. stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
  2306. if (stream_fd < 0) {
  2307. ret = stream_fd;
  2308. goto err_open;
  2309. }
  2310. if (!(param->flags & I915_PERF_FLAG_DISABLED))
  2311. i915_perf_enable_locked(stream);
  2312. return stream_fd;
  2313. err_open:
  2314. list_del(&stream->link);
  2315. err_flags:
  2316. if (stream->ops->destroy)
  2317. stream->ops->destroy(stream);
  2318. err_alloc:
  2319. kfree(stream);
  2320. err_ctx:
  2321. if (specific_ctx)
  2322. i915_gem_context_put(specific_ctx);
  2323. err:
  2324. return ret;
  2325. }
  2326. static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
  2327. {
  2328. return div_u64(1000000000ULL * (2ULL << exponent),
  2329. dev_priv->perf.oa.timestamp_frequency);
  2330. }
  2331. /**
  2332. * read_properties_unlocked - validate + copy userspace stream open properties
  2333. * @dev_priv: i915 device instance
  2334. * @uprops: The array of u64 key value pairs given by userspace
  2335. * @n_props: The number of key value pairs expected in @uprops
  2336. * @props: The stream configuration built up while validating properties
  2337. *
  2338. * Note this function only validates properties in isolation it doesn't
  2339. * validate that the combination of properties makes sense or that all
  2340. * properties necessary for a particular kind of stream have been set.
  2341. *
  2342. * Note that there currently aren't any ordering requirements for properties so
  2343. * we shouldn't validate or assume anything about ordering here. This doesn't
  2344. * rule out defining new properties with ordering requirements in the future.
  2345. */
  2346. static int read_properties_unlocked(struct drm_i915_private *dev_priv,
  2347. u64 __user *uprops,
  2348. u32 n_props,
  2349. struct perf_open_properties *props)
  2350. {
  2351. u64 __user *uprop = uprops;
  2352. u32 i;
  2353. memset(props, 0, sizeof(struct perf_open_properties));
  2354. if (!n_props) {
  2355. DRM_DEBUG("No i915 perf properties given\n");
  2356. return -EINVAL;
  2357. }
  2358. /* Considering that ID = 0 is reserved and assuming that we don't
  2359. * (currently) expect any configurations to ever specify duplicate
  2360. * values for a particular property ID then the last _PROP_MAX value is
  2361. * one greater than the maximum number of properties we expect to get
  2362. * from userspace.
  2363. */
  2364. if (n_props >= DRM_I915_PERF_PROP_MAX) {
  2365. DRM_DEBUG("More i915 perf properties specified than exist\n");
  2366. return -EINVAL;
  2367. }
  2368. for (i = 0; i < n_props; i++) {
  2369. u64 oa_period, oa_freq_hz;
  2370. u64 id, value;
  2371. int ret;
  2372. ret = get_user(id, uprop);
  2373. if (ret)
  2374. return ret;
  2375. ret = get_user(value, uprop + 1);
  2376. if (ret)
  2377. return ret;
  2378. if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
  2379. DRM_DEBUG("Unknown i915 perf property ID\n");
  2380. return -EINVAL;
  2381. }
  2382. switch ((enum drm_i915_perf_property_id)id) {
  2383. case DRM_I915_PERF_PROP_CTX_HANDLE:
  2384. props->single_context = 1;
  2385. props->ctx_handle = value;
  2386. break;
  2387. case DRM_I915_PERF_PROP_SAMPLE_OA:
  2388. props->sample_flags |= SAMPLE_OA_REPORT;
  2389. break;
  2390. case DRM_I915_PERF_PROP_OA_METRICS_SET:
  2391. if (value == 0) {
  2392. DRM_DEBUG("Unknown OA metric set ID\n");
  2393. return -EINVAL;
  2394. }
  2395. props->metrics_set = value;
  2396. break;
  2397. case DRM_I915_PERF_PROP_OA_FORMAT:
  2398. if (value == 0 || value >= I915_OA_FORMAT_MAX) {
  2399. DRM_DEBUG("Out-of-range OA report format %llu\n",
  2400. value);
  2401. return -EINVAL;
  2402. }
  2403. if (!dev_priv->perf.oa.oa_formats[value].size) {
  2404. DRM_DEBUG("Unsupported OA report format %llu\n",
  2405. value);
  2406. return -EINVAL;
  2407. }
  2408. props->oa_format = value;
  2409. break;
  2410. case DRM_I915_PERF_PROP_OA_EXPONENT:
  2411. if (value > OA_EXPONENT_MAX) {
  2412. DRM_DEBUG("OA timer exponent too high (> %u)\n",
  2413. OA_EXPONENT_MAX);
  2414. return -EINVAL;
  2415. }
  2416. /* Theoretically we can program the OA unit to sample
  2417. * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
  2418. * for BXT. We don't allow such high sampling
  2419. * frequencies by default unless root.
  2420. */
  2421. BUILD_BUG_ON(sizeof(oa_period) != 8);
  2422. oa_period = oa_exponent_to_ns(dev_priv, value);
  2423. /* This check is primarily to ensure that oa_period <=
  2424. * UINT32_MAX (before passing to do_div which only
  2425. * accepts a u32 denominator), but we can also skip
  2426. * checking anything < 1Hz which implicitly can't be
  2427. * limited via an integer oa_max_sample_rate.
  2428. */
  2429. if (oa_period <= NSEC_PER_SEC) {
  2430. u64 tmp = NSEC_PER_SEC;
  2431. do_div(tmp, oa_period);
  2432. oa_freq_hz = tmp;
  2433. } else
  2434. oa_freq_hz = 0;
  2435. if (oa_freq_hz > i915_oa_max_sample_rate &&
  2436. !capable(CAP_SYS_ADMIN)) {
  2437. DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n",
  2438. i915_oa_max_sample_rate);
  2439. return -EACCES;
  2440. }
  2441. props->oa_periodic = true;
  2442. props->oa_period_exponent = value;
  2443. break;
  2444. case DRM_I915_PERF_PROP_MAX:
  2445. MISSING_CASE(id);
  2446. return -EINVAL;
  2447. }
  2448. uprop += 2;
  2449. }
  2450. return 0;
  2451. }
  2452. /**
  2453. * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
  2454. * @dev: drm device
  2455. * @data: ioctl data copied from userspace (unvalidated)
  2456. * @file: drm file
  2457. *
  2458. * Validates the stream open parameters given by userspace including flags
  2459. * and an array of u64 key, value pair properties.
  2460. *
  2461. * Very little is assumed up front about the nature of the stream being
  2462. * opened (for instance we don't assume it's for periodic OA unit metrics). An
  2463. * i915-perf stream is expected to be a suitable interface for other forms of
  2464. * buffered data written by the GPU besides periodic OA metrics.
  2465. *
  2466. * Note we copy the properties from userspace outside of the i915 perf
  2467. * mutex to avoid an awkward lockdep with mmap_sem.
  2468. *
  2469. * Most of the implementation details are handled by
  2470. * i915_perf_open_ioctl_locked() after taking the &drm_i915_private->perf.lock
  2471. * mutex for serializing with any non-file-operation driver hooks.
  2472. *
  2473. * Return: A newly opened i915 Perf stream file descriptor or negative
  2474. * error code on failure.
  2475. */
  2476. int i915_perf_open_ioctl(struct drm_device *dev, void *data,
  2477. struct drm_file *file)
  2478. {
  2479. struct drm_i915_private *dev_priv = dev->dev_private;
  2480. struct drm_i915_perf_open_param *param = data;
  2481. struct perf_open_properties props;
  2482. u32 known_open_flags;
  2483. int ret;
  2484. if (!dev_priv->perf.initialized) {
  2485. DRM_DEBUG("i915 perf interface not available for this system\n");
  2486. return -ENOTSUPP;
  2487. }
  2488. known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
  2489. I915_PERF_FLAG_FD_NONBLOCK |
  2490. I915_PERF_FLAG_DISABLED;
  2491. if (param->flags & ~known_open_flags) {
  2492. DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n");
  2493. return -EINVAL;
  2494. }
  2495. ret = read_properties_unlocked(dev_priv,
  2496. u64_to_user_ptr(param->properties_ptr),
  2497. param->num_properties,
  2498. &props);
  2499. if (ret)
  2500. return ret;
  2501. mutex_lock(&dev_priv->perf.lock);
  2502. ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file);
  2503. mutex_unlock(&dev_priv->perf.lock);
  2504. return ret;
  2505. }
  2506. /**
  2507. * i915_perf_register - exposes i915-perf to userspace
  2508. * @dev_priv: i915 device instance
  2509. *
  2510. * In particular OA metric sets are advertised under a sysfs metrics/
  2511. * directory allowing userspace to enumerate valid IDs that can be
  2512. * used to open an i915-perf stream.
  2513. */
  2514. void i915_perf_register(struct drm_i915_private *dev_priv)
  2515. {
  2516. int ret;
  2517. if (!dev_priv->perf.initialized)
  2518. return;
  2519. /* To be sure we're synchronized with an attempted
  2520. * i915_perf_open_ioctl(); considering that we register after
  2521. * being exposed to userspace.
  2522. */
  2523. mutex_lock(&dev_priv->perf.lock);
  2524. dev_priv->perf.metrics_kobj =
  2525. kobject_create_and_add("metrics",
  2526. &dev_priv->drm.primary->kdev->kobj);
  2527. if (!dev_priv->perf.metrics_kobj)
  2528. goto exit;
  2529. sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr);
  2530. if (IS_HASWELL(dev_priv)) {
  2531. i915_perf_load_test_config_hsw(dev_priv);
  2532. } else if (IS_BROADWELL(dev_priv)) {
  2533. i915_perf_load_test_config_bdw(dev_priv);
  2534. } else if (IS_CHERRYVIEW(dev_priv)) {
  2535. i915_perf_load_test_config_chv(dev_priv);
  2536. } else if (IS_SKYLAKE(dev_priv)) {
  2537. if (IS_SKL_GT2(dev_priv))
  2538. i915_perf_load_test_config_sklgt2(dev_priv);
  2539. else if (IS_SKL_GT3(dev_priv))
  2540. i915_perf_load_test_config_sklgt3(dev_priv);
  2541. else if (IS_SKL_GT4(dev_priv))
  2542. i915_perf_load_test_config_sklgt4(dev_priv);
  2543. } else if (IS_BROXTON(dev_priv)) {
  2544. i915_perf_load_test_config_bxt(dev_priv);
  2545. } else if (IS_KABYLAKE(dev_priv)) {
  2546. if (IS_KBL_GT2(dev_priv))
  2547. i915_perf_load_test_config_kblgt2(dev_priv);
  2548. else if (IS_KBL_GT3(dev_priv))
  2549. i915_perf_load_test_config_kblgt3(dev_priv);
  2550. } else if (IS_GEMINILAKE(dev_priv)) {
  2551. i915_perf_load_test_config_glk(dev_priv);
  2552. }
  2553. if (dev_priv->perf.oa.test_config.id == 0)
  2554. goto sysfs_error;
  2555. ret = sysfs_create_group(dev_priv->perf.metrics_kobj,
  2556. &dev_priv->perf.oa.test_config.sysfs_metric);
  2557. if (ret)
  2558. goto sysfs_error;
  2559. atomic_set(&dev_priv->perf.oa.test_config.ref_count, 1);
  2560. goto exit;
  2561. sysfs_error:
  2562. kobject_put(dev_priv->perf.metrics_kobj);
  2563. dev_priv->perf.metrics_kobj = NULL;
  2564. exit:
  2565. mutex_unlock(&dev_priv->perf.lock);
  2566. }
  2567. /**
  2568. * i915_perf_unregister - hide i915-perf from userspace
  2569. * @dev_priv: i915 device instance
  2570. *
  2571. * i915-perf state cleanup is split up into an 'unregister' and
  2572. * 'deinit' phase where the interface is first hidden from
  2573. * userspace by i915_perf_unregister() before cleaning up
  2574. * remaining state in i915_perf_fini().
  2575. */
  2576. void i915_perf_unregister(struct drm_i915_private *dev_priv)
  2577. {
  2578. if (!dev_priv->perf.metrics_kobj)
  2579. return;
  2580. sysfs_remove_group(dev_priv->perf.metrics_kobj,
  2581. &dev_priv->perf.oa.test_config.sysfs_metric);
  2582. kobject_put(dev_priv->perf.metrics_kobj);
  2583. dev_priv->perf.metrics_kobj = NULL;
  2584. }
  2585. static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
  2586. {
  2587. static const i915_reg_t flex_eu_regs[] = {
  2588. EU_PERF_CNTL0,
  2589. EU_PERF_CNTL1,
  2590. EU_PERF_CNTL2,
  2591. EU_PERF_CNTL3,
  2592. EU_PERF_CNTL4,
  2593. EU_PERF_CNTL5,
  2594. EU_PERF_CNTL6,
  2595. };
  2596. int i;
  2597. for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
  2598. if (flex_eu_regs[i].reg == addr)
  2599. return true;
  2600. }
  2601. return false;
  2602. }
  2603. static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32 addr)
  2604. {
  2605. return (addr >= OASTARTTRIG1.reg && addr <= OASTARTTRIG8.reg) ||
  2606. (addr >= OAREPORTTRIG1.reg && addr <= OAREPORTTRIG8.reg) ||
  2607. (addr >= OACEC0_0.reg && addr <= OACEC7_1.reg);
  2608. }
  2609. static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
  2610. {
  2611. return addr == HALF_SLICE_CHICKEN2.reg ||
  2612. (addr >= MICRO_BP0_0.reg && addr <= NOA_WRITE.reg) ||
  2613. (addr >= OA_PERFCNT1_LO.reg && addr <= OA_PERFCNT2_HI.reg) ||
  2614. (addr >= OA_PERFMATRIX_LO.reg && addr <= OA_PERFMATRIX_HI.reg);
  2615. }
  2616. static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
  2617. {
  2618. return gen7_is_valid_mux_addr(dev_priv, addr) ||
  2619. addr == WAIT_FOR_RC6_EXIT.reg ||
  2620. (addr >= RPM_CONFIG0.reg && addr <= NOA_CONFIG(8).reg);
  2621. }
  2622. static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
  2623. {
  2624. return gen7_is_valid_mux_addr(dev_priv, addr) ||
  2625. (addr >= 0x25100 && addr <= 0x2FF90) ||
  2626. addr == 0x9ec0;
  2627. }
  2628. static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
  2629. {
  2630. return gen7_is_valid_mux_addr(dev_priv, addr) ||
  2631. (addr >= 0x182300 && addr <= 0x1823A4);
  2632. }
  2633. static uint32_t mask_reg_value(u32 reg, u32 val)
  2634. {
  2635. /* HALF_SLICE_CHICKEN2 is programmed with a the
  2636. * WaDisableSTUnitPowerOptimization workaround. Make sure the value
  2637. * programmed by userspace doesn't change this.
  2638. */
  2639. if (HALF_SLICE_CHICKEN2.reg == reg)
  2640. val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
  2641. /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
  2642. * indicated by its name and a bunch of selection fields used by OA
  2643. * configs.
  2644. */
  2645. if (WAIT_FOR_RC6_EXIT.reg == reg)
  2646. val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
  2647. return val;
  2648. }
  2649. static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv,
  2650. bool (*is_valid)(struct drm_i915_private *dev_priv, u32 addr),
  2651. u32 __user *regs,
  2652. u32 n_regs)
  2653. {
  2654. struct i915_oa_reg *oa_regs;
  2655. int err;
  2656. u32 i;
  2657. if (!n_regs)
  2658. return NULL;
  2659. if (!access_ok(VERIFY_READ, regs, n_regs * sizeof(u32) * 2))
  2660. return ERR_PTR(-EFAULT);
  2661. /* No is_valid function means we're not allowing any register to be programmed. */
  2662. GEM_BUG_ON(!is_valid);
  2663. if (!is_valid)
  2664. return ERR_PTR(-EINVAL);
  2665. oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
  2666. if (!oa_regs)
  2667. return ERR_PTR(-ENOMEM);
  2668. for (i = 0; i < n_regs; i++) {
  2669. u32 addr, value;
  2670. err = get_user(addr, regs);
  2671. if (err)
  2672. goto addr_err;
  2673. if (!is_valid(dev_priv, addr)) {
  2674. DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
  2675. err = -EINVAL;
  2676. goto addr_err;
  2677. }
  2678. err = get_user(value, regs + 1);
  2679. if (err)
  2680. goto addr_err;
  2681. oa_regs[i].addr = _MMIO(addr);
  2682. oa_regs[i].value = mask_reg_value(addr, value);
  2683. regs += 2;
  2684. }
  2685. return oa_regs;
  2686. addr_err:
  2687. kfree(oa_regs);
  2688. return ERR_PTR(err);
  2689. }
  2690. static ssize_t show_dynamic_id(struct device *dev,
  2691. struct device_attribute *attr,
  2692. char *buf)
  2693. {
  2694. struct i915_oa_config *oa_config =
  2695. container_of(attr, typeof(*oa_config), sysfs_metric_id);
  2696. return sprintf(buf, "%d\n", oa_config->id);
  2697. }
  2698. static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
  2699. struct i915_oa_config *oa_config)
  2700. {
  2701. sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
  2702. oa_config->sysfs_metric_id.attr.name = "id";
  2703. oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
  2704. oa_config->sysfs_metric_id.show = show_dynamic_id;
  2705. oa_config->sysfs_metric_id.store = NULL;
  2706. oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
  2707. oa_config->attrs[1] = NULL;
  2708. oa_config->sysfs_metric.name = oa_config->uuid;
  2709. oa_config->sysfs_metric.attrs = oa_config->attrs;
  2710. return sysfs_create_group(dev_priv->perf.metrics_kobj,
  2711. &oa_config->sysfs_metric);
  2712. }
  2713. /**
  2714. * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
  2715. * @dev: drm device
  2716. * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
  2717. * userspace (unvalidated)
  2718. * @file: drm file
  2719. *
  2720. * Validates the submitted OA register to be saved into a new OA config that
  2721. * can then be used for programming the OA unit and its NOA network.
  2722. *
  2723. * Returns: A new allocated config number to be used with the perf open ioctl
  2724. * or a negative error code on failure.
  2725. */
  2726. int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
  2727. struct drm_file *file)
  2728. {
  2729. struct drm_i915_private *dev_priv = dev->dev_private;
  2730. struct drm_i915_perf_oa_config *args = data;
  2731. struct i915_oa_config *oa_config, *tmp;
  2732. int err, id;
  2733. if (!dev_priv->perf.initialized) {
  2734. DRM_DEBUG("i915 perf interface not available for this system\n");
  2735. return -ENOTSUPP;
  2736. }
  2737. if (!dev_priv->perf.metrics_kobj) {
  2738. DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
  2739. return -EINVAL;
  2740. }
  2741. if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
  2742. DRM_DEBUG("Insufficient privileges to add i915 OA config\n");
  2743. return -EACCES;
  2744. }
  2745. if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
  2746. (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
  2747. (!args->flex_regs_ptr || !args->n_flex_regs)) {
  2748. DRM_DEBUG("No OA registers given\n");
  2749. return -EINVAL;
  2750. }
  2751. oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
  2752. if (!oa_config) {
  2753. DRM_DEBUG("Failed to allocate memory for the OA config\n");
  2754. return -ENOMEM;
  2755. }
  2756. atomic_set(&oa_config->ref_count, 1);
  2757. if (!uuid_is_valid(args->uuid)) {
  2758. DRM_DEBUG("Invalid uuid format for OA config\n");
  2759. err = -EINVAL;
  2760. goto reg_err;
  2761. }
  2762. /* Last character in oa_config->uuid will be 0 because oa_config is
  2763. * kzalloc.
  2764. */
  2765. memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
  2766. oa_config->mux_regs_len = args->n_mux_regs;
  2767. oa_config->mux_regs =
  2768. alloc_oa_regs(dev_priv,
  2769. dev_priv->perf.oa.ops.is_valid_mux_reg,
  2770. u64_to_user_ptr(args->mux_regs_ptr),
  2771. args->n_mux_regs);
  2772. if (IS_ERR(oa_config->mux_regs)) {
  2773. DRM_DEBUG("Failed to create OA config for mux_regs\n");
  2774. err = PTR_ERR(oa_config->mux_regs);
  2775. goto reg_err;
  2776. }
  2777. oa_config->b_counter_regs_len = args->n_boolean_regs;
  2778. oa_config->b_counter_regs =
  2779. alloc_oa_regs(dev_priv,
  2780. dev_priv->perf.oa.ops.is_valid_b_counter_reg,
  2781. u64_to_user_ptr(args->boolean_regs_ptr),
  2782. args->n_boolean_regs);
  2783. if (IS_ERR(oa_config->b_counter_regs)) {
  2784. DRM_DEBUG("Failed to create OA config for b_counter_regs\n");
  2785. err = PTR_ERR(oa_config->b_counter_regs);
  2786. goto reg_err;
  2787. }
  2788. if (INTEL_GEN(dev_priv) < 8) {
  2789. if (args->n_flex_regs != 0) {
  2790. err = -EINVAL;
  2791. goto reg_err;
  2792. }
  2793. } else {
  2794. oa_config->flex_regs_len = args->n_flex_regs;
  2795. oa_config->flex_regs =
  2796. alloc_oa_regs(dev_priv,
  2797. dev_priv->perf.oa.ops.is_valid_flex_reg,
  2798. u64_to_user_ptr(args->flex_regs_ptr),
  2799. args->n_flex_regs);
  2800. if (IS_ERR(oa_config->flex_regs)) {
  2801. DRM_DEBUG("Failed to create OA config for flex_regs\n");
  2802. err = PTR_ERR(oa_config->flex_regs);
  2803. goto reg_err;
  2804. }
  2805. }
  2806. err = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
  2807. if (err)
  2808. goto reg_err;
  2809. /* We shouldn't have too many configs, so this iteration shouldn't be
  2810. * too costly.
  2811. */
  2812. idr_for_each_entry(&dev_priv->perf.metrics_idr, tmp, id) {
  2813. if (!strcmp(tmp->uuid, oa_config->uuid)) {
  2814. DRM_DEBUG("OA config already exists with this uuid\n");
  2815. err = -EADDRINUSE;
  2816. goto sysfs_err;
  2817. }
  2818. }
  2819. err = create_dynamic_oa_sysfs_entry(dev_priv, oa_config);
  2820. if (err) {
  2821. DRM_DEBUG("Failed to create sysfs entry for OA config\n");
  2822. goto sysfs_err;
  2823. }
  2824. /* Config id 0 is invalid, id 1 for kernel stored test config. */
  2825. oa_config->id = idr_alloc(&dev_priv->perf.metrics_idr,
  2826. oa_config, 2,
  2827. 0, GFP_KERNEL);
  2828. if (oa_config->id < 0) {
  2829. DRM_DEBUG("Failed to create sysfs entry for OA config\n");
  2830. err = oa_config->id;
  2831. goto sysfs_err;
  2832. }
  2833. mutex_unlock(&dev_priv->perf.metrics_lock);
  2834. return oa_config->id;
  2835. sysfs_err:
  2836. mutex_unlock(&dev_priv->perf.metrics_lock);
  2837. reg_err:
  2838. put_oa_config(dev_priv, oa_config);
  2839. DRM_DEBUG("Failed to add new OA config\n");
  2840. return err;
  2841. }
  2842. /**
  2843. * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
  2844. * @dev: drm device
  2845. * @data: ioctl data (pointer to u64 integer) copied from userspace
  2846. * @file: drm file
  2847. *
  2848. * Configs can be removed while being used, the will stop appearing in sysfs
  2849. * and their content will be freed when the stream using the config is closed.
  2850. *
  2851. * Returns: 0 on success or a negative error code on failure.
  2852. */
  2853. int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
  2854. struct drm_file *file)
  2855. {
  2856. struct drm_i915_private *dev_priv = dev->dev_private;
  2857. u64 *arg = data;
  2858. struct i915_oa_config *oa_config;
  2859. int ret;
  2860. if (!dev_priv->perf.initialized) {
  2861. DRM_DEBUG("i915 perf interface not available for this system\n");
  2862. return -ENOTSUPP;
  2863. }
  2864. if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
  2865. DRM_DEBUG("Insufficient privileges to remove i915 OA config\n");
  2866. return -EACCES;
  2867. }
  2868. ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
  2869. if (ret)
  2870. goto lock_err;
  2871. oa_config = idr_find(&dev_priv->perf.metrics_idr, *arg);
  2872. if (!oa_config) {
  2873. DRM_DEBUG("Failed to remove unknown OA config\n");
  2874. ret = -ENOENT;
  2875. goto config_err;
  2876. }
  2877. GEM_BUG_ON(*arg != oa_config->id);
  2878. sysfs_remove_group(dev_priv->perf.metrics_kobj,
  2879. &oa_config->sysfs_metric);
  2880. idr_remove(&dev_priv->perf.metrics_idr, *arg);
  2881. put_oa_config(dev_priv, oa_config);
  2882. config_err:
  2883. mutex_unlock(&dev_priv->perf.metrics_lock);
  2884. lock_err:
  2885. return ret;
  2886. }
  2887. static struct ctl_table oa_table[] = {
  2888. {
  2889. .procname = "perf_stream_paranoid",
  2890. .data = &i915_perf_stream_paranoid,
  2891. .maxlen = sizeof(i915_perf_stream_paranoid),
  2892. .mode = 0644,
  2893. .proc_handler = proc_dointvec_minmax,
  2894. .extra1 = &zero,
  2895. .extra2 = &one,
  2896. },
  2897. {
  2898. .procname = "oa_max_sample_rate",
  2899. .data = &i915_oa_max_sample_rate,
  2900. .maxlen = sizeof(i915_oa_max_sample_rate),
  2901. .mode = 0644,
  2902. .proc_handler = proc_dointvec_minmax,
  2903. .extra1 = &zero,
  2904. .extra2 = &oa_sample_rate_hard_limit,
  2905. },
  2906. {}
  2907. };
  2908. static struct ctl_table i915_root[] = {
  2909. {
  2910. .procname = "i915",
  2911. .maxlen = 0,
  2912. .mode = 0555,
  2913. .child = oa_table,
  2914. },
  2915. {}
  2916. };
  2917. static struct ctl_table dev_root[] = {
  2918. {
  2919. .procname = "dev",
  2920. .maxlen = 0,
  2921. .mode = 0555,
  2922. .child = i915_root,
  2923. },
  2924. {}
  2925. };
  2926. /**
  2927. * i915_perf_init - initialize i915-perf state on module load
  2928. * @dev_priv: i915 device instance
  2929. *
  2930. * Initializes i915-perf state without exposing anything to userspace.
  2931. *
  2932. * Note: i915-perf initialization is split into an 'init' and 'register'
  2933. * phase with the i915_perf_register() exposing state to userspace.
  2934. */
  2935. void i915_perf_init(struct drm_i915_private *dev_priv)
  2936. {
  2937. dev_priv->perf.oa.timestamp_frequency = 0;
  2938. if (IS_HASWELL(dev_priv)) {
  2939. dev_priv->perf.oa.ops.is_valid_b_counter_reg =
  2940. gen7_is_valid_b_counter_addr;
  2941. dev_priv->perf.oa.ops.is_valid_mux_reg =
  2942. hsw_is_valid_mux_addr;
  2943. dev_priv->perf.oa.ops.is_valid_flex_reg = NULL;
  2944. dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer;
  2945. dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
  2946. dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
  2947. dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
  2948. dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable;
  2949. dev_priv->perf.oa.ops.read = gen7_oa_read;
  2950. dev_priv->perf.oa.ops.oa_hw_tail_read =
  2951. gen7_oa_hw_tail_read;
  2952. dev_priv->perf.oa.timestamp_frequency = 12500000;
  2953. dev_priv->perf.oa.oa_formats = hsw_oa_formats;
  2954. } else if (i915.enable_execlists) {
  2955. /* Note: that although we could theoretically also support the
  2956. * legacy ringbuffer mode on BDW (and earlier iterations of
  2957. * this driver, before upstreaming did this) it didn't seem
  2958. * worth the complexity to maintain now that BDW+ enable
  2959. * execlist mode by default.
  2960. */
  2961. dev_priv->perf.oa.ops.is_valid_b_counter_reg =
  2962. gen7_is_valid_b_counter_addr;
  2963. dev_priv->perf.oa.ops.is_valid_mux_reg =
  2964. gen8_is_valid_mux_addr;
  2965. dev_priv->perf.oa.ops.is_valid_flex_reg =
  2966. gen8_is_valid_flex_addr;
  2967. dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer;
  2968. dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
  2969. dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
  2970. dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
  2971. dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
  2972. dev_priv->perf.oa.ops.read = gen8_oa_read;
  2973. dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
  2974. dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
  2975. if (IS_GEN8(dev_priv)) {
  2976. dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
  2977. dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
  2978. dev_priv->perf.oa.timestamp_frequency = 12500000;
  2979. dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25);
  2980. if (IS_CHERRYVIEW(dev_priv)) {
  2981. dev_priv->perf.oa.ops.is_valid_mux_reg =
  2982. chv_is_valid_mux_addr;
  2983. }
  2984. } else if (IS_GEN9(dev_priv)) {
  2985. dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
  2986. dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
  2987. dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
  2988. switch (dev_priv->info.platform) {
  2989. case INTEL_BROXTON:
  2990. case INTEL_GEMINILAKE:
  2991. dev_priv->perf.oa.timestamp_frequency = 19200000;
  2992. break;
  2993. case INTEL_SKYLAKE:
  2994. case INTEL_KABYLAKE:
  2995. dev_priv->perf.oa.timestamp_frequency = 12000000;
  2996. break;
  2997. default:
  2998. /* Leave timestamp_frequency to 0 so we can
  2999. * detect unsupported platforms.
  3000. */
  3001. break;
  3002. }
  3003. }
  3004. }
  3005. if (dev_priv->perf.oa.timestamp_frequency) {
  3006. hrtimer_init(&dev_priv->perf.oa.poll_check_timer,
  3007. CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  3008. dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb;
  3009. init_waitqueue_head(&dev_priv->perf.oa.poll_wq);
  3010. INIT_LIST_HEAD(&dev_priv->perf.streams);
  3011. mutex_init(&dev_priv->perf.lock);
  3012. spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
  3013. oa_sample_rate_hard_limit =
  3014. dev_priv->perf.oa.timestamp_frequency / 2;
  3015. dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
  3016. mutex_init(&dev_priv->perf.metrics_lock);
  3017. idr_init(&dev_priv->perf.metrics_idr);
  3018. dev_priv->perf.initialized = true;
  3019. }
  3020. }
  3021. static int destroy_config(int id, void *p, void *data)
  3022. {
  3023. struct drm_i915_private *dev_priv = data;
  3024. struct i915_oa_config *oa_config = p;
  3025. put_oa_config(dev_priv, oa_config);
  3026. return 0;
  3027. }
  3028. /**
  3029. * i915_perf_fini - Counter part to i915_perf_init()
  3030. * @dev_priv: i915 device instance
  3031. */
  3032. void i915_perf_fini(struct drm_i915_private *dev_priv)
  3033. {
  3034. if (!dev_priv->perf.initialized)
  3035. return;
  3036. idr_for_each(&dev_priv->perf.metrics_idr, destroy_config, dev_priv);
  3037. idr_destroy(&dev_priv->perf.metrics_idr);
  3038. unregister_sysctl_table(dev_priv->perf.sysctl_header);
  3039. memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops));
  3040. dev_priv->perf.initialized = false;
  3041. }