i915_perf.c 112 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536
  1. /*
  2. * Copyright © 2015-2016 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Robert Bragg <robert@sixbynine.org>
  25. */
  26. /**
  27. * DOC: i915 Perf Overview
  28. *
  29. * Gen graphics supports a large number of performance counters that can help
  30. * driver and application developers understand and optimize their use of the
  31. * GPU.
  32. *
  33. * This i915 perf interface enables userspace to configure and open a file
  34. * descriptor representing a stream of GPU metrics which can then be read() as
  35. * a stream of sample records.
  36. *
  37. * The interface is particularly suited to exposing buffered metrics that are
  38. * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
  39. *
  40. * Streams representing a single context are accessible to applications with a
  41. * corresponding drm file descriptor, such that OpenGL can use the interface
  42. * without special privileges. Access to system-wide metrics requires root
  43. * privileges by default, unless changed via the dev.i915.perf_event_paranoid
  44. * sysctl option.
  45. *
  46. */
  47. /**
  48. * DOC: i915 Perf History and Comparison with Core Perf
  49. *
  50. * The interface was initially inspired by the core Perf infrastructure but
  51. * some notable differences are:
  52. *
  53. * i915 perf file descriptors represent a "stream" instead of an "event"; where
  54. * a perf event primarily corresponds to a single 64bit value, while a stream
  55. * might sample sets of tightly-coupled counters, depending on the
  56. * configuration. For example the Gen OA unit isn't designed to support
  57. * orthogonal configurations of individual counters; it's configured for a set
  58. * of related counters. Samples for an i915 perf stream capturing OA metrics
  59. * will include a set of counter values packed in a compact HW specific format.
  60. * The OA unit supports a number of different packing formats which can be
  61. * selected by the user opening the stream. Perf has support for grouping
  62. * events, but each event in the group is configured, validated and
  63. * authenticated individually with separate system calls.
  64. *
  65. * i915 perf stream configurations are provided as an array of u64 (key,value)
  66. * pairs, instead of a fixed struct with multiple miscellaneous config members,
  67. * interleaved with event-type specific members.
  68. *
  69. * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
  70. * The supported metrics are being written to memory by the GPU unsynchronized
  71. * with the CPU, using HW specific packing formats for counter sets. Sometimes
  72. * the constraints on HW configuration require reports to be filtered before it
  73. * would be acceptable to expose them to unprivileged applications - to hide
  74. * the metrics of other processes/contexts. For these use cases a read() based
  75. * interface is a good fit, and provides an opportunity to filter data as it
  76. * gets copied from the GPU mapped buffers to userspace buffers.
  77. *
  78. *
  79. * Issues hit with first prototype based on Core Perf
  80. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  81. *
  82. * The first prototype of this driver was based on the core perf
  83. * infrastructure, and while we did make that mostly work, with some changes to
  84. * perf, we found we were breaking or working around too many assumptions baked
  85. * into perf's currently cpu centric design.
  86. *
  87. * In the end we didn't see a clear benefit to making perf's implementation and
  88. * interface more complex by changing design assumptions while we knew we still
  89. * wouldn't be able to use any existing perf based userspace tools.
  90. *
  91. * Also considering the Gen specific nature of the Observability hardware and
  92. * how userspace will sometimes need to combine i915 perf OA metrics with
  93. * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
  94. * expecting the interface to be used by a platform specific userspace such as
  95. * OpenGL or tools. This is to say; we aren't inherently missing out on having
  96. * a standard vendor/architecture agnostic interface by not using perf.
  97. *
  98. *
  99. * For posterity, in case we might re-visit trying to adapt core perf to be
  100. * better suited to exposing i915 metrics these were the main pain points we
  101. * hit:
  102. *
  103. * - The perf based OA PMU driver broke some significant design assumptions:
  104. *
  105. * Existing perf pmus are used for profiling work on a cpu and we were
  106. * introducing the idea of _IS_DEVICE pmus with different security
  107. * implications, the need to fake cpu-related data (such as user/kernel
  108. * registers) to fit with perf's current design, and adding _DEVICE records
  109. * as a way to forward device-specific status records.
  110. *
  111. * The OA unit writes reports of counters into a circular buffer, without
  112. * involvement from the CPU, making our PMU driver the first of a kind.
  113. *
  114. * Given the way we were periodically forward data from the GPU-mapped, OA
  115. * buffer to perf's buffer, those bursts of sample writes looked to perf like
  116. * we were sampling too fast and so we had to subvert its throttling checks.
  117. *
  118. * Perf supports groups of counters and allows those to be read via
  119. * transactions internally but transactions currently seem designed to be
  120. * explicitly initiated from the cpu (say in response to a userspace read())
  121. * and while we could pull a report out of the OA buffer we can't
  122. * trigger a report from the cpu on demand.
  123. *
  124. * Related to being report based; the OA counters are configured in HW as a
  125. * set while perf generally expects counter configurations to be orthogonal.
  126. * Although counters can be associated with a group leader as they are
  127. * opened, there's no clear precedent for being able to provide group-wide
  128. * configuration attributes (for example we want to let userspace choose the
  129. * OA unit report format used to capture all counters in a set, or specify a
  130. * GPU context to filter metrics on). We avoided using perf's grouping
  131. * feature and forwarded OA reports to userspace via perf's 'raw' sample
  132. * field. This suited our userspace well considering how coupled the counters
  133. * are when dealing with normalizing. It would be inconvenient to split
  134. * counters up into separate events, only to require userspace to recombine
  135. * them. For Mesa it's also convenient to be forwarded raw, periodic reports
  136. * for combining with the side-band raw reports it captures using
  137. * MI_REPORT_PERF_COUNT commands.
  138. *
  139. * - As a side note on perf's grouping feature; there was also some concern
  140. * that using PERF_FORMAT_GROUP as a way to pack together counter values
  141. * would quite drastically inflate our sample sizes, which would likely
  142. * lower the effective sampling resolutions we could use when the available
  143. * memory bandwidth is limited.
  144. *
  145. * With the OA unit's report formats, counters are packed together as 32
  146. * or 40bit values, with the largest report size being 256 bytes.
  147. *
  148. * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
  149. * documented ordering to the values, implying PERF_FORMAT_ID must also be
  150. * used to add a 64bit ID before each value; giving 16 bytes per counter.
  151. *
  152. * Related to counter orthogonality; we can't time share the OA unit, while
  153. * event scheduling is a central design idea within perf for allowing
  154. * userspace to open + enable more events than can be configured in HW at any
  155. * one time. The OA unit is not designed to allow re-configuration while in
  156. * use. We can't reconfigure the OA unit without losing internal OA unit
  157. * state which we can't access explicitly to save and restore. Reconfiguring
  158. * the OA unit is also relatively slow, involving ~100 register writes. From
  159. * userspace Mesa also depends on a stable OA configuration when emitting
  160. * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
  161. * disabled while there are outstanding MI_RPC commands lest we hang the
  162. * command streamer.
  163. *
  164. * The contents of sample records aren't extensible by device drivers (i.e.
  165. * the sample_type bits). As an example; Sourab Gupta had been looking to
  166. * attach GPU timestamps to our OA samples. We were shoehorning OA reports
  167. * into sample records by using the 'raw' field, but it's tricky to pack more
  168. * than one thing into this field because events/core.c currently only lets a
  169. * pmu give a single raw data pointer plus len which will be copied into the
  170. * ring buffer. To include more than the OA report we'd have to copy the
  171. * report into an intermediate larger buffer. I'd been considering allowing a
  172. * vector of data+len values to be specified for copying the raw data, but
  173. * it felt like a kludge to being using the raw field for this purpose.
  174. *
  175. * - It felt like our perf based PMU was making some technical compromises
  176. * just for the sake of using perf:
  177. *
  178. * perf_event_open() requires events to either relate to a pid or a specific
  179. * cpu core, while our device pmu related to neither. Events opened with a
  180. * pid will be automatically enabled/disabled according to the scheduling of
  181. * that process - so not appropriate for us. When an event is related to a
  182. * cpu id, perf ensures pmu methods will be invoked via an inter process
  183. * interrupt on that core. To avoid invasive changes our userspace opened OA
  184. * perf events for a specific cpu. This was workable but it meant the
  185. * majority of the OA driver ran in atomic context, including all OA report
  186. * forwarding, which wasn't really necessary in our case and seems to make
  187. * our locking requirements somewhat complex as we handled the interaction
  188. * with the rest of the i915 driver.
  189. */
  190. #include <linux/anon_inodes.h>
  191. #include <linux/sizes.h>
  192. #include <linux/uuid.h>
  193. #include "i915_drv.h"
  194. #include "i915_oa_hsw.h"
  195. #include "i915_oa_bdw.h"
  196. #include "i915_oa_chv.h"
  197. #include "i915_oa_sklgt2.h"
  198. #include "i915_oa_sklgt3.h"
  199. #include "i915_oa_sklgt4.h"
  200. #include "i915_oa_bxt.h"
  201. #include "i915_oa_kblgt2.h"
  202. #include "i915_oa_kblgt3.h"
  203. #include "i915_oa_glk.h"
  204. #include "i915_oa_cflgt2.h"
  205. #include "i915_oa_cflgt3.h"
  206. #include "i915_oa_cnl.h"
  207. /* HW requires this to be a power of two, between 128k and 16M, though driver
  208. * is currently generally designed assuming the largest 16M size is used such
  209. * that the overflow cases are unlikely in normal operation.
  210. */
  211. #define OA_BUFFER_SIZE SZ_16M
  212. #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1))
  213. /**
  214. * DOC: OA Tail Pointer Race
  215. *
  216. * There's a HW race condition between OA unit tail pointer register updates and
  217. * writes to memory whereby the tail pointer can sometimes get ahead of what's
  218. * been written out to the OA buffer so far (in terms of what's visible to the
  219. * CPU).
  220. *
  221. * Although this can be observed explicitly while copying reports to userspace
  222. * by checking for a zeroed report-id field in tail reports, we want to account
  223. * for this earlier, as part of the oa_buffer_check to avoid lots of redundant
  224. * read() attempts.
  225. *
  226. * In effect we define a tail pointer for reading that lags the real tail
  227. * pointer by at least %OA_TAIL_MARGIN_NSEC nanoseconds, which gives enough
  228. * time for the corresponding reports to become visible to the CPU.
  229. *
  230. * To manage this we actually track two tail pointers:
  231. * 1) An 'aging' tail with an associated timestamp that is tracked until we
  232. * can trust the corresponding data is visible to the CPU; at which point
  233. * it is considered 'aged'.
  234. * 2) An 'aged' tail that can be used for read()ing.
  235. *
  236. * The two separate pointers let us decouple read()s from tail pointer aging.
  237. *
  238. * The tail pointers are checked and updated at a limited rate within a hrtimer
  239. * callback (the same callback that is used for delivering EPOLLIN events)
  240. *
  241. * Initially the tails are marked invalid with %INVALID_TAIL_PTR which
  242. * indicates that an updated tail pointer is needed.
  243. *
  244. * Most of the implementation details for this workaround are in
  245. * oa_buffer_check_unlocked() and _append_oa_reports()
  246. *
  247. * Note for posterity: previously the driver used to define an effective tail
  248. * pointer that lagged the real pointer by a 'tail margin' measured in bytes
  249. * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
  250. * This was flawed considering that the OA unit may also automatically generate
  251. * non-periodic reports (such as on context switch) or the OA unit may be
  252. * enabled without any periodic sampling.
  253. */
  254. #define OA_TAIL_MARGIN_NSEC 100000ULL
  255. #define INVALID_TAIL_PTR 0xffffffff
  256. /* frequency for checking whether the OA unit has written new reports to the
  257. * circular OA buffer...
  258. */
  259. #define POLL_FREQUENCY 200
  260. #define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY)
  261. /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
  262. static int zero;
  263. static int one = 1;
  264. static u32 i915_perf_stream_paranoid = true;
  265. /* The maximum exponent the hardware accepts is 63 (essentially it selects one
  266. * of the 64bit timestamp bits to trigger reports from) but there's currently
  267. * no known use case for sampling as infrequently as once per 47 thousand years.
  268. *
  269. * Since the timestamps included in OA reports are only 32bits it seems
  270. * reasonable to limit the OA exponent where it's still possible to account for
  271. * overflow in OA report timestamps.
  272. */
  273. #define OA_EXPONENT_MAX 31
  274. #define INVALID_CTX_ID 0xffffffff
  275. /* On Gen8+ automatically triggered OA reports include a 'reason' field... */
  276. #define OAREPORT_REASON_MASK 0x3f
  277. #define OAREPORT_REASON_SHIFT 19
  278. #define OAREPORT_REASON_TIMER (1<<0)
  279. #define OAREPORT_REASON_CTX_SWITCH (1<<3)
  280. #define OAREPORT_REASON_CLK_RATIO (1<<5)
  281. /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
  282. *
  283. * The highest sampling frequency we can theoretically program the OA unit
  284. * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
  285. *
  286. * Initialized just before we register the sysctl parameter.
  287. */
  288. static int oa_sample_rate_hard_limit;
  289. /* Theoretically we can program the OA unit to sample every 160ns but don't
  290. * allow that by default unless root...
  291. *
  292. * The default threshold of 100000Hz is based on perf's similar
  293. * kernel.perf_event_max_sample_rate sysctl parameter.
  294. */
  295. static u32 i915_oa_max_sample_rate = 100000;
  296. /* XXX: beware if future OA HW adds new report formats that the current
  297. * code assumes all reports have a power-of-two size and ~(size - 1) can
  298. * be used as a mask to align the OA tail pointer.
  299. */
  300. static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
  301. [I915_OA_FORMAT_A13] = { 0, 64 },
  302. [I915_OA_FORMAT_A29] = { 1, 128 },
  303. [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 },
  304. /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
  305. [I915_OA_FORMAT_B4_C8] = { 4, 64 },
  306. [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 },
  307. [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 },
  308. [I915_OA_FORMAT_C4_B8] = { 7, 64 },
  309. };
  310. static struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
  311. [I915_OA_FORMAT_A12] = { 0, 64 },
  312. [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 },
  313. [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
  314. [I915_OA_FORMAT_C4_B8] = { 7, 64 },
  315. };
  316. #define SAMPLE_OA_REPORT (1<<0)
  317. /**
  318. * struct perf_open_properties - for validated properties given to open a stream
  319. * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
  320. * @single_context: Whether a single or all gpu contexts should be monitored
  321. * @ctx_handle: A gem ctx handle for use with @single_context
  322. * @metrics_set: An ID for an OA unit metric set advertised via sysfs
  323. * @oa_format: An OA unit HW report format
  324. * @oa_periodic: Whether to enable periodic OA unit sampling
  325. * @oa_period_exponent: The OA unit sampling period is derived from this
  326. *
  327. * As read_properties_unlocked() enumerates and validates the properties given
  328. * to open a stream of metrics the configuration is built up in the structure
  329. * which starts out zero initialized.
  330. */
  331. struct perf_open_properties {
  332. u32 sample_flags;
  333. u64 single_context:1;
  334. u64 ctx_handle;
  335. /* OA sampling state */
  336. int metrics_set;
  337. int oa_format;
  338. bool oa_periodic;
  339. int oa_period_exponent;
  340. };
  341. static void free_oa_config(struct drm_i915_private *dev_priv,
  342. struct i915_oa_config *oa_config)
  343. {
  344. if (!PTR_ERR(oa_config->flex_regs))
  345. kfree(oa_config->flex_regs);
  346. if (!PTR_ERR(oa_config->b_counter_regs))
  347. kfree(oa_config->b_counter_regs);
  348. if (!PTR_ERR(oa_config->mux_regs))
  349. kfree(oa_config->mux_regs);
  350. kfree(oa_config);
  351. }
  352. static void put_oa_config(struct drm_i915_private *dev_priv,
  353. struct i915_oa_config *oa_config)
  354. {
  355. if (!atomic_dec_and_test(&oa_config->ref_count))
  356. return;
  357. free_oa_config(dev_priv, oa_config);
  358. }
  359. static int get_oa_config(struct drm_i915_private *dev_priv,
  360. int metrics_set,
  361. struct i915_oa_config **out_config)
  362. {
  363. int ret;
  364. if (metrics_set == 1) {
  365. *out_config = &dev_priv->perf.oa.test_config;
  366. atomic_inc(&dev_priv->perf.oa.test_config.ref_count);
  367. return 0;
  368. }
  369. ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
  370. if (ret)
  371. return ret;
  372. *out_config = idr_find(&dev_priv->perf.metrics_idr, metrics_set);
  373. if (!*out_config)
  374. ret = -EINVAL;
  375. else
  376. atomic_inc(&(*out_config)->ref_count);
  377. mutex_unlock(&dev_priv->perf.metrics_lock);
  378. return ret;
  379. }
  380. static u32 gen8_oa_hw_tail_read(struct drm_i915_private *dev_priv)
  381. {
  382. return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
  383. }
  384. static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv)
  385. {
  386. u32 oastatus1 = I915_READ(GEN7_OASTATUS1);
  387. return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
  388. }
  389. /**
  390. * oa_buffer_check_unlocked - check for data and update tail ptr state
  391. * @dev_priv: i915 device instance
  392. *
  393. * This is either called via fops (for blocking reads in user ctx) or the poll
  394. * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
  395. * if there is data available for userspace to read.
  396. *
  397. * This function is central to providing a workaround for the OA unit tail
  398. * pointer having a race with respect to what data is visible to the CPU.
  399. * It is responsible for reading tail pointers from the hardware and giving
  400. * the pointers time to 'age' before they are made available for reading.
  401. * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
  402. *
  403. * Besides returning true when there is data available to read() this function
  404. * also has the side effect of updating the oa_buffer.tails[], .aging_timestamp
  405. * and .aged_tail_idx state used for reading.
  406. *
  407. * Note: It's safe to read OA config state here unlocked, assuming that this is
  408. * only called while the stream is enabled, while the global OA configuration
  409. * can't be modified.
  410. *
  411. * Returns: %true if the OA buffer contains data, else %false
  412. */
  413. static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
  414. {
  415. int report_size = dev_priv->perf.oa.oa_buffer.format_size;
  416. unsigned long flags;
  417. unsigned int aged_idx;
  418. u32 head, hw_tail, aged_tail, aging_tail;
  419. u64 now;
  420. /* We have to consider the (unlikely) possibility that read() errors
  421. * could result in an OA buffer reset which might reset the head,
  422. * tails[] and aged_tail state.
  423. */
  424. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  425. /* NB: The head we observe here might effectively be a little out of
  426. * date (between head and tails[aged_idx].offset if there is currently
  427. * a read() in progress.
  428. */
  429. head = dev_priv->perf.oa.oa_buffer.head;
  430. aged_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
  431. aged_tail = dev_priv->perf.oa.oa_buffer.tails[aged_idx].offset;
  432. aging_tail = dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset;
  433. hw_tail = dev_priv->perf.oa.ops.oa_hw_tail_read(dev_priv);
  434. /* The tail pointer increases in 64 byte increments,
  435. * not in report_size steps...
  436. */
  437. hw_tail &= ~(report_size - 1);
  438. now = ktime_get_mono_fast_ns();
  439. /* Update the aged tail
  440. *
  441. * Flip the tail pointer available for read()s once the aging tail is
  442. * old enough to trust that the corresponding data will be visible to
  443. * the CPU...
  444. *
  445. * Do this before updating the aging pointer in case we may be able to
  446. * immediately start aging a new pointer too (if new data has become
  447. * available) without needing to wait for a later hrtimer callback.
  448. */
  449. if (aging_tail != INVALID_TAIL_PTR &&
  450. ((now - dev_priv->perf.oa.oa_buffer.aging_timestamp) >
  451. OA_TAIL_MARGIN_NSEC)) {
  452. aged_idx ^= 1;
  453. dev_priv->perf.oa.oa_buffer.aged_tail_idx = aged_idx;
  454. aged_tail = aging_tail;
  455. /* Mark that we need a new pointer to start aging... */
  456. dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR;
  457. aging_tail = INVALID_TAIL_PTR;
  458. }
  459. /* Update the aging tail
  460. *
  461. * We throttle aging tail updates until we have a new tail that
  462. * represents >= one report more data than is already available for
  463. * reading. This ensures there will be enough data for a successful
  464. * read once this new pointer has aged and ensures we will give the new
  465. * pointer time to age.
  466. */
  467. if (aging_tail == INVALID_TAIL_PTR &&
  468. (aged_tail == INVALID_TAIL_PTR ||
  469. OA_TAKEN(hw_tail, aged_tail) >= report_size)) {
  470. struct i915_vma *vma = dev_priv->perf.oa.oa_buffer.vma;
  471. u32 gtt_offset = i915_ggtt_offset(vma);
  472. /* Be paranoid and do a bounds check on the pointer read back
  473. * from hardware, just in case some spurious hardware condition
  474. * could put the tail out of bounds...
  475. */
  476. if (hw_tail >= gtt_offset &&
  477. hw_tail < (gtt_offset + OA_BUFFER_SIZE)) {
  478. dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset =
  479. aging_tail = hw_tail;
  480. dev_priv->perf.oa.oa_buffer.aging_timestamp = now;
  481. } else {
  482. DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %u\n",
  483. hw_tail);
  484. }
  485. }
  486. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  487. return aged_tail == INVALID_TAIL_PTR ?
  488. false : OA_TAKEN(aged_tail, head) >= report_size;
  489. }
  490. /**
  491. * append_oa_status - Appends a status record to a userspace read() buffer.
  492. * @stream: An i915-perf stream opened for OA metrics
  493. * @buf: destination buffer given by userspace
  494. * @count: the number of bytes userspace wants to read
  495. * @offset: (inout): the current position for writing into @buf
  496. * @type: The kind of status to report to userspace
  497. *
  498. * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
  499. * into the userspace read() buffer.
  500. *
  501. * The @buf @offset will only be updated on success.
  502. *
  503. * Returns: 0 on success, negative error code on failure.
  504. */
  505. static int append_oa_status(struct i915_perf_stream *stream,
  506. char __user *buf,
  507. size_t count,
  508. size_t *offset,
  509. enum drm_i915_perf_record_type type)
  510. {
  511. struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
  512. if ((count - *offset) < header.size)
  513. return -ENOSPC;
  514. if (copy_to_user(buf + *offset, &header, sizeof(header)))
  515. return -EFAULT;
  516. (*offset) += header.size;
  517. return 0;
  518. }
  519. /**
  520. * append_oa_sample - Copies single OA report into userspace read() buffer.
  521. * @stream: An i915-perf stream opened for OA metrics
  522. * @buf: destination buffer given by userspace
  523. * @count: the number of bytes userspace wants to read
  524. * @offset: (inout): the current position for writing into @buf
  525. * @report: A single OA report to (optionally) include as part of the sample
  526. *
  527. * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
  528. * properties when opening a stream, tracked as `stream->sample_flags`. This
  529. * function copies the requested components of a single sample to the given
  530. * read() @buf.
  531. *
  532. * The @buf @offset will only be updated on success.
  533. *
  534. * Returns: 0 on success, negative error code on failure.
  535. */
  536. static int append_oa_sample(struct i915_perf_stream *stream,
  537. char __user *buf,
  538. size_t count,
  539. size_t *offset,
  540. const u8 *report)
  541. {
  542. struct drm_i915_private *dev_priv = stream->dev_priv;
  543. int report_size = dev_priv->perf.oa.oa_buffer.format_size;
  544. struct drm_i915_perf_record_header header;
  545. u32 sample_flags = stream->sample_flags;
  546. header.type = DRM_I915_PERF_RECORD_SAMPLE;
  547. header.pad = 0;
  548. header.size = stream->sample_size;
  549. if ((count - *offset) < header.size)
  550. return -ENOSPC;
  551. buf += *offset;
  552. if (copy_to_user(buf, &header, sizeof(header)))
  553. return -EFAULT;
  554. buf += sizeof(header);
  555. if (sample_flags & SAMPLE_OA_REPORT) {
  556. if (copy_to_user(buf, report, report_size))
  557. return -EFAULT;
  558. }
  559. (*offset) += header.size;
  560. return 0;
  561. }
  562. /**
  563. * Copies all buffered OA reports into userspace read() buffer.
  564. * @stream: An i915-perf stream opened for OA metrics
  565. * @buf: destination buffer given by userspace
  566. * @count: the number of bytes userspace wants to read
  567. * @offset: (inout): the current position for writing into @buf
  568. *
  569. * Notably any error condition resulting in a short read (-%ENOSPC or
  570. * -%EFAULT) will be returned even though one or more records may
  571. * have been successfully copied. In this case it's up to the caller
  572. * to decide if the error should be squashed before returning to
  573. * userspace.
  574. *
  575. * Note: reports are consumed from the head, and appended to the
  576. * tail, so the tail chases the head?... If you think that's mad
  577. * and back-to-front you're not alone, but this follows the
  578. * Gen PRM naming convention.
  579. *
  580. * Returns: 0 on success, negative error code on failure.
  581. */
  582. static int gen8_append_oa_reports(struct i915_perf_stream *stream,
  583. char __user *buf,
  584. size_t count,
  585. size_t *offset)
  586. {
  587. struct drm_i915_private *dev_priv = stream->dev_priv;
  588. int report_size = dev_priv->perf.oa.oa_buffer.format_size;
  589. u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
  590. u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
  591. u32 mask = (OA_BUFFER_SIZE - 1);
  592. size_t start_offset = *offset;
  593. unsigned long flags;
  594. unsigned int aged_tail_idx;
  595. u32 head, tail;
  596. u32 taken;
  597. int ret = 0;
  598. if (WARN_ON(!stream->enabled))
  599. return -EIO;
  600. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  601. head = dev_priv->perf.oa.oa_buffer.head;
  602. aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
  603. tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
  604. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  605. /*
  606. * An invalid tail pointer here means we're still waiting for the poll
  607. * hrtimer callback to give us a pointer
  608. */
  609. if (tail == INVALID_TAIL_PTR)
  610. return -EAGAIN;
  611. /*
  612. * NB: oa_buffer.head/tail include the gtt_offset which we don't want
  613. * while indexing relative to oa_buf_base.
  614. */
  615. head -= gtt_offset;
  616. tail -= gtt_offset;
  617. /*
  618. * An out of bounds or misaligned head or tail pointer implies a driver
  619. * bug since we validate + align the tail pointers we read from the
  620. * hardware and we are in full control of the head pointer which should
  621. * only be incremented by multiples of the report size (notably also
  622. * all a power of two).
  623. */
  624. if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
  625. tail > OA_BUFFER_SIZE || tail % report_size,
  626. "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
  627. head, tail))
  628. return -EIO;
  629. for (/* none */;
  630. (taken = OA_TAKEN(tail, head));
  631. head = (head + report_size) & mask) {
  632. u8 *report = oa_buf_base + head;
  633. u32 *report32 = (void *)report;
  634. u32 ctx_id;
  635. u32 reason;
  636. /*
  637. * All the report sizes factor neatly into the buffer
  638. * size so we never expect to see a report split
  639. * between the beginning and end of the buffer.
  640. *
  641. * Given the initial alignment check a misalignment
  642. * here would imply a driver bug that would result
  643. * in an overrun.
  644. */
  645. if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
  646. DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
  647. break;
  648. }
  649. /*
  650. * The reason field includes flags identifying what
  651. * triggered this specific report (mostly timer
  652. * triggered or e.g. due to a context switch).
  653. *
  654. * This field is never expected to be zero so we can
  655. * check that the report isn't invalid before copying
  656. * it to userspace...
  657. */
  658. reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
  659. OAREPORT_REASON_MASK);
  660. if (reason == 0) {
  661. if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
  662. DRM_NOTE("Skipping spurious, invalid OA report\n");
  663. continue;
  664. }
  665. /*
  666. * XXX: Just keep the lower 21 bits for now since I'm not
  667. * entirely sure if the HW touches any of the higher bits in
  668. * this field
  669. */
  670. ctx_id = report32[2] & 0x1fffff;
  671. /*
  672. * Squash whatever is in the CTX_ID field if it's marked as
  673. * invalid to be sure we avoid false-positive, single-context
  674. * filtering below...
  675. *
  676. * Note: that we don't clear the valid_ctx_bit so userspace can
  677. * understand that the ID has been squashed by the kernel.
  678. */
  679. if (!(report32[0] & dev_priv->perf.oa.gen8_valid_ctx_bit))
  680. ctx_id = report32[2] = INVALID_CTX_ID;
  681. /*
  682. * NB: For Gen 8 the OA unit no longer supports clock gating
  683. * off for a specific context and the kernel can't securely
  684. * stop the counters from updating as system-wide / global
  685. * values.
  686. *
  687. * Automatic reports now include a context ID so reports can be
  688. * filtered on the cpu but it's not worth trying to
  689. * automatically subtract/hide counter progress for other
  690. * contexts while filtering since we can't stop userspace
  691. * issuing MI_REPORT_PERF_COUNT commands which would still
  692. * provide a side-band view of the real values.
  693. *
  694. * To allow userspace (such as Mesa/GL_INTEL_performance_query)
  695. * to normalize counters for a single filtered context then it
  696. * needs be forwarded bookend context-switch reports so that it
  697. * can track switches in between MI_REPORT_PERF_COUNT commands
  698. * and can itself subtract/ignore the progress of counters
  699. * associated with other contexts. Note that the hardware
  700. * automatically triggers reports when switching to a new
  701. * context which are tagged with the ID of the newly active
  702. * context. To avoid the complexity (and likely fragility) of
  703. * reading ahead while parsing reports to try and minimize
  704. * forwarding redundant context switch reports (i.e. between
  705. * other, unrelated contexts) we simply elect to forward them
  706. * all.
  707. *
  708. * We don't rely solely on the reason field to identify context
  709. * switches since it's not-uncommon for periodic samples to
  710. * identify a switch before any 'context switch' report.
  711. */
  712. if (!dev_priv->perf.oa.exclusive_stream->ctx ||
  713. dev_priv->perf.oa.specific_ctx_id == ctx_id ||
  714. (dev_priv->perf.oa.oa_buffer.last_ctx_id ==
  715. dev_priv->perf.oa.specific_ctx_id) ||
  716. reason & OAREPORT_REASON_CTX_SWITCH) {
  717. /*
  718. * While filtering for a single context we avoid
  719. * leaking the IDs of other contexts.
  720. */
  721. if (dev_priv->perf.oa.exclusive_stream->ctx &&
  722. dev_priv->perf.oa.specific_ctx_id != ctx_id) {
  723. report32[2] = INVALID_CTX_ID;
  724. }
  725. ret = append_oa_sample(stream, buf, count, offset,
  726. report);
  727. if (ret)
  728. break;
  729. dev_priv->perf.oa.oa_buffer.last_ctx_id = ctx_id;
  730. }
  731. /*
  732. * The above reason field sanity check is based on
  733. * the assumption that the OA buffer is initially
  734. * zeroed and we reset the field after copying so the
  735. * check is still meaningful once old reports start
  736. * being overwritten.
  737. */
  738. report32[0] = 0;
  739. }
  740. if (start_offset != *offset) {
  741. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  742. /*
  743. * We removed the gtt_offset for the copy loop above, indexing
  744. * relative to oa_buf_base so put back here...
  745. */
  746. head += gtt_offset;
  747. I915_WRITE(GEN8_OAHEADPTR, head & GEN8_OAHEADPTR_MASK);
  748. dev_priv->perf.oa.oa_buffer.head = head;
  749. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  750. }
  751. return ret;
  752. }
  753. /**
  754. * gen8_oa_read - copy status records then buffered OA reports
  755. * @stream: An i915-perf stream opened for OA metrics
  756. * @buf: destination buffer given by userspace
  757. * @count: the number of bytes userspace wants to read
  758. * @offset: (inout): the current position for writing into @buf
  759. *
  760. * Checks OA unit status registers and if necessary appends corresponding
  761. * status records for userspace (such as for a buffer full condition) and then
  762. * initiate appending any buffered OA reports.
  763. *
  764. * Updates @offset according to the number of bytes successfully copied into
  765. * the userspace buffer.
  766. *
  767. * NB: some data may be successfully copied to the userspace buffer
  768. * even if an error is returned, and this is reflected in the
  769. * updated @offset.
  770. *
  771. * Returns: zero on success or a negative error code
  772. */
  773. static int gen8_oa_read(struct i915_perf_stream *stream,
  774. char __user *buf,
  775. size_t count,
  776. size_t *offset)
  777. {
  778. struct drm_i915_private *dev_priv = stream->dev_priv;
  779. u32 oastatus;
  780. int ret;
  781. if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
  782. return -EIO;
  783. oastatus = I915_READ(GEN8_OASTATUS);
  784. /*
  785. * We treat OABUFFER_OVERFLOW as a significant error:
  786. *
  787. * Although theoretically we could handle this more gracefully
  788. * sometimes, some Gens don't correctly suppress certain
  789. * automatically triggered reports in this condition and so we
  790. * have to assume that old reports are now being trampled
  791. * over.
  792. *
  793. * Considering how we don't currently give userspace control
  794. * over the OA buffer size and always configure a large 16MB
  795. * buffer, then a buffer overflow does anyway likely indicate
  796. * that something has gone quite badly wrong.
  797. */
  798. if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
  799. ret = append_oa_status(stream, buf, count, offset,
  800. DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
  801. if (ret)
  802. return ret;
  803. DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
  804. dev_priv->perf.oa.period_exponent);
  805. dev_priv->perf.oa.ops.oa_disable(dev_priv);
  806. dev_priv->perf.oa.ops.oa_enable(dev_priv);
  807. /*
  808. * Note: .oa_enable() is expected to re-init the oabuffer and
  809. * reset GEN8_OASTATUS for us
  810. */
  811. oastatus = I915_READ(GEN8_OASTATUS);
  812. }
  813. if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
  814. ret = append_oa_status(stream, buf, count, offset,
  815. DRM_I915_PERF_RECORD_OA_REPORT_LOST);
  816. if (ret)
  817. return ret;
  818. I915_WRITE(GEN8_OASTATUS,
  819. oastatus & ~GEN8_OASTATUS_REPORT_LOST);
  820. }
  821. return gen8_append_oa_reports(stream, buf, count, offset);
  822. }
  823. /**
  824. * Copies all buffered OA reports into userspace read() buffer.
  825. * @stream: An i915-perf stream opened for OA metrics
  826. * @buf: destination buffer given by userspace
  827. * @count: the number of bytes userspace wants to read
  828. * @offset: (inout): the current position for writing into @buf
  829. *
  830. * Notably any error condition resulting in a short read (-%ENOSPC or
  831. * -%EFAULT) will be returned even though one or more records may
  832. * have been successfully copied. In this case it's up to the caller
  833. * to decide if the error should be squashed before returning to
  834. * userspace.
  835. *
  836. * Note: reports are consumed from the head, and appended to the
  837. * tail, so the tail chases the head?... If you think that's mad
  838. * and back-to-front you're not alone, but this follows the
  839. * Gen PRM naming convention.
  840. *
  841. * Returns: 0 on success, negative error code on failure.
  842. */
  843. static int gen7_append_oa_reports(struct i915_perf_stream *stream,
  844. char __user *buf,
  845. size_t count,
  846. size_t *offset)
  847. {
  848. struct drm_i915_private *dev_priv = stream->dev_priv;
  849. int report_size = dev_priv->perf.oa.oa_buffer.format_size;
  850. u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
  851. u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
  852. u32 mask = (OA_BUFFER_SIZE - 1);
  853. size_t start_offset = *offset;
  854. unsigned long flags;
  855. unsigned int aged_tail_idx;
  856. u32 head, tail;
  857. u32 taken;
  858. int ret = 0;
  859. if (WARN_ON(!stream->enabled))
  860. return -EIO;
  861. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  862. head = dev_priv->perf.oa.oa_buffer.head;
  863. aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
  864. tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
  865. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  866. /* An invalid tail pointer here means we're still waiting for the poll
  867. * hrtimer callback to give us a pointer
  868. */
  869. if (tail == INVALID_TAIL_PTR)
  870. return -EAGAIN;
  871. /* NB: oa_buffer.head/tail include the gtt_offset which we don't want
  872. * while indexing relative to oa_buf_base.
  873. */
  874. head -= gtt_offset;
  875. tail -= gtt_offset;
  876. /* An out of bounds or misaligned head or tail pointer implies a driver
  877. * bug since we validate + align the tail pointers we read from the
  878. * hardware and we are in full control of the head pointer which should
  879. * only be incremented by multiples of the report size (notably also
  880. * all a power of two).
  881. */
  882. if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
  883. tail > OA_BUFFER_SIZE || tail % report_size,
  884. "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
  885. head, tail))
  886. return -EIO;
  887. for (/* none */;
  888. (taken = OA_TAKEN(tail, head));
  889. head = (head + report_size) & mask) {
  890. u8 *report = oa_buf_base + head;
  891. u32 *report32 = (void *)report;
  892. /* All the report sizes factor neatly into the buffer
  893. * size so we never expect to see a report split
  894. * between the beginning and end of the buffer.
  895. *
  896. * Given the initial alignment check a misalignment
  897. * here would imply a driver bug that would result
  898. * in an overrun.
  899. */
  900. if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
  901. DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
  902. break;
  903. }
  904. /* The report-ID field for periodic samples includes
  905. * some undocumented flags related to what triggered
  906. * the report and is never expected to be zero so we
  907. * can check that the report isn't invalid before
  908. * copying it to userspace...
  909. */
  910. if (report32[0] == 0) {
  911. if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
  912. DRM_NOTE("Skipping spurious, invalid OA report\n");
  913. continue;
  914. }
  915. ret = append_oa_sample(stream, buf, count, offset, report);
  916. if (ret)
  917. break;
  918. /* The above report-id field sanity check is based on
  919. * the assumption that the OA buffer is initially
  920. * zeroed and we reset the field after copying so the
  921. * check is still meaningful once old reports start
  922. * being overwritten.
  923. */
  924. report32[0] = 0;
  925. }
  926. if (start_offset != *offset) {
  927. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  928. /* We removed the gtt_offset for the copy loop above, indexing
  929. * relative to oa_buf_base so put back here...
  930. */
  931. head += gtt_offset;
  932. I915_WRITE(GEN7_OASTATUS2,
  933. ((head & GEN7_OASTATUS2_HEAD_MASK) |
  934. OA_MEM_SELECT_GGTT));
  935. dev_priv->perf.oa.oa_buffer.head = head;
  936. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  937. }
  938. return ret;
  939. }
  940. /**
  941. * gen7_oa_read - copy status records then buffered OA reports
  942. * @stream: An i915-perf stream opened for OA metrics
  943. * @buf: destination buffer given by userspace
  944. * @count: the number of bytes userspace wants to read
  945. * @offset: (inout): the current position for writing into @buf
  946. *
  947. * Checks Gen 7 specific OA unit status registers and if necessary appends
  948. * corresponding status records for userspace (such as for a buffer full
  949. * condition) and then initiate appending any buffered OA reports.
  950. *
  951. * Updates @offset according to the number of bytes successfully copied into
  952. * the userspace buffer.
  953. *
  954. * Returns: zero on success or a negative error code
  955. */
  956. static int gen7_oa_read(struct i915_perf_stream *stream,
  957. char __user *buf,
  958. size_t count,
  959. size_t *offset)
  960. {
  961. struct drm_i915_private *dev_priv = stream->dev_priv;
  962. u32 oastatus1;
  963. int ret;
  964. if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
  965. return -EIO;
  966. oastatus1 = I915_READ(GEN7_OASTATUS1);
  967. /* XXX: On Haswell we don't have a safe way to clear oastatus1
  968. * bits while the OA unit is enabled (while the tail pointer
  969. * may be updated asynchronously) so we ignore status bits
  970. * that have already been reported to userspace.
  971. */
  972. oastatus1 &= ~dev_priv->perf.oa.gen7_latched_oastatus1;
  973. /* We treat OABUFFER_OVERFLOW as a significant error:
  974. *
  975. * - The status can be interpreted to mean that the buffer is
  976. * currently full (with a higher precedence than OA_TAKEN()
  977. * which will start to report a near-empty buffer after an
  978. * overflow) but it's awkward that we can't clear the status
  979. * on Haswell, so without a reset we won't be able to catch
  980. * the state again.
  981. *
  982. * - Since it also implies the HW has started overwriting old
  983. * reports it may also affect our sanity checks for invalid
  984. * reports when copying to userspace that assume new reports
  985. * are being written to cleared memory.
  986. *
  987. * - In the future we may want to introduce a flight recorder
  988. * mode where the driver will automatically maintain a safe
  989. * guard band between head/tail, avoiding this overflow
  990. * condition, but we avoid the added driver complexity for
  991. * now.
  992. */
  993. if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
  994. ret = append_oa_status(stream, buf, count, offset,
  995. DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
  996. if (ret)
  997. return ret;
  998. DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
  999. dev_priv->perf.oa.period_exponent);
  1000. dev_priv->perf.oa.ops.oa_disable(dev_priv);
  1001. dev_priv->perf.oa.ops.oa_enable(dev_priv);
  1002. oastatus1 = I915_READ(GEN7_OASTATUS1);
  1003. }
  1004. if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
  1005. ret = append_oa_status(stream, buf, count, offset,
  1006. DRM_I915_PERF_RECORD_OA_REPORT_LOST);
  1007. if (ret)
  1008. return ret;
  1009. dev_priv->perf.oa.gen7_latched_oastatus1 |=
  1010. GEN7_OASTATUS1_REPORT_LOST;
  1011. }
  1012. return gen7_append_oa_reports(stream, buf, count, offset);
  1013. }
  1014. /**
  1015. * i915_oa_wait_unlocked - handles blocking IO until OA data available
  1016. * @stream: An i915-perf stream opened for OA metrics
  1017. *
  1018. * Called when userspace tries to read() from a blocking stream FD opened
  1019. * for OA metrics. It waits until the hrtimer callback finds a non-empty
  1020. * OA buffer and wakes us.
  1021. *
  1022. * Note: it's acceptable to have this return with some false positives
  1023. * since any subsequent read handling will return -EAGAIN if there isn't
  1024. * really data ready for userspace yet.
  1025. *
  1026. * Returns: zero on success or a negative error code
  1027. */
  1028. static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
  1029. {
  1030. struct drm_i915_private *dev_priv = stream->dev_priv;
  1031. /* We would wait indefinitely if periodic sampling is not enabled */
  1032. if (!dev_priv->perf.oa.periodic)
  1033. return -EIO;
  1034. return wait_event_interruptible(dev_priv->perf.oa.poll_wq,
  1035. oa_buffer_check_unlocked(dev_priv));
  1036. }
  1037. /**
  1038. * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
  1039. * @stream: An i915-perf stream opened for OA metrics
  1040. * @file: An i915 perf stream file
  1041. * @wait: poll() state table
  1042. *
  1043. * For handling userspace polling on an i915 perf stream opened for OA metrics,
  1044. * this starts a poll_wait with the wait queue that our hrtimer callback wakes
  1045. * when it sees data ready to read in the circular OA buffer.
  1046. */
  1047. static void i915_oa_poll_wait(struct i915_perf_stream *stream,
  1048. struct file *file,
  1049. poll_table *wait)
  1050. {
  1051. struct drm_i915_private *dev_priv = stream->dev_priv;
  1052. poll_wait(file, &dev_priv->perf.oa.poll_wq, wait);
  1053. }
  1054. /**
  1055. * i915_oa_read - just calls through to &i915_oa_ops->read
  1056. * @stream: An i915-perf stream opened for OA metrics
  1057. * @buf: destination buffer given by userspace
  1058. * @count: the number of bytes userspace wants to read
  1059. * @offset: (inout): the current position for writing into @buf
  1060. *
  1061. * Updates @offset according to the number of bytes successfully copied into
  1062. * the userspace buffer.
  1063. *
  1064. * Returns: zero on success or a negative error code
  1065. */
  1066. static int i915_oa_read(struct i915_perf_stream *stream,
  1067. char __user *buf,
  1068. size_t count,
  1069. size_t *offset)
  1070. {
  1071. struct drm_i915_private *dev_priv = stream->dev_priv;
  1072. return dev_priv->perf.oa.ops.read(stream, buf, count, offset);
  1073. }
  1074. /**
  1075. * oa_get_render_ctx_id - determine and hold ctx hw id
  1076. * @stream: An i915-perf stream opened for OA metrics
  1077. *
  1078. * Determine the render context hw id, and ensure it remains fixed for the
  1079. * lifetime of the stream. This ensures that we don't have to worry about
  1080. * updating the context ID in OACONTROL on the fly.
  1081. *
  1082. * Returns: zero on success or a negative error code
  1083. */
  1084. static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
  1085. {
  1086. struct drm_i915_private *dev_priv = stream->dev_priv;
  1087. if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
  1088. dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id;
  1089. } else {
  1090. struct intel_engine_cs *engine = dev_priv->engine[RCS];
  1091. struct intel_ring *ring;
  1092. int ret;
  1093. ret = i915_mutex_lock_interruptible(&dev_priv->drm);
  1094. if (ret)
  1095. return ret;
  1096. /*
  1097. * As the ID is the gtt offset of the context's vma we
  1098. * pin the vma to ensure the ID remains fixed.
  1099. *
  1100. * NB: implied RCS engine...
  1101. */
  1102. ring = engine->context_pin(engine, stream->ctx);
  1103. mutex_unlock(&dev_priv->drm.struct_mutex);
  1104. if (IS_ERR(ring))
  1105. return PTR_ERR(ring);
  1106. /*
  1107. * Explicitly track the ID (instead of calling
  1108. * i915_ggtt_offset() on the fly) considering the difference
  1109. * with gen8+ and execlists
  1110. */
  1111. dev_priv->perf.oa.specific_ctx_id =
  1112. i915_ggtt_offset(stream->ctx->engine[engine->id].state);
  1113. }
  1114. return 0;
  1115. }
  1116. /**
  1117. * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
  1118. * @stream: An i915-perf stream opened for OA metrics
  1119. *
  1120. * In case anything needed doing to ensure the context HW ID would remain valid
  1121. * for the lifetime of the stream, then that can be undone here.
  1122. */
  1123. static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
  1124. {
  1125. struct drm_i915_private *dev_priv = stream->dev_priv;
  1126. if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
  1127. dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
  1128. } else {
  1129. struct intel_engine_cs *engine = dev_priv->engine[RCS];
  1130. mutex_lock(&dev_priv->drm.struct_mutex);
  1131. dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
  1132. engine->context_unpin(engine, stream->ctx);
  1133. mutex_unlock(&dev_priv->drm.struct_mutex);
  1134. }
  1135. }
  1136. static void
  1137. free_oa_buffer(struct drm_i915_private *i915)
  1138. {
  1139. mutex_lock(&i915->drm.struct_mutex);
  1140. i915_gem_object_unpin_map(i915->perf.oa.oa_buffer.vma->obj);
  1141. i915_vma_unpin(i915->perf.oa.oa_buffer.vma);
  1142. i915_gem_object_put(i915->perf.oa.oa_buffer.vma->obj);
  1143. i915->perf.oa.oa_buffer.vma = NULL;
  1144. i915->perf.oa.oa_buffer.vaddr = NULL;
  1145. mutex_unlock(&i915->drm.struct_mutex);
  1146. }
  1147. static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
  1148. {
  1149. struct drm_i915_private *dev_priv = stream->dev_priv;
  1150. BUG_ON(stream != dev_priv->perf.oa.exclusive_stream);
  1151. /*
  1152. * Unset exclusive_stream first, it will be checked while disabling
  1153. * the metric set on gen8+.
  1154. */
  1155. mutex_lock(&dev_priv->drm.struct_mutex);
  1156. dev_priv->perf.oa.exclusive_stream = NULL;
  1157. dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
  1158. mutex_unlock(&dev_priv->drm.struct_mutex);
  1159. free_oa_buffer(dev_priv);
  1160. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  1161. intel_runtime_pm_put(dev_priv);
  1162. if (stream->ctx)
  1163. oa_put_render_ctx_id(stream);
  1164. put_oa_config(dev_priv, stream->oa_config);
  1165. if (dev_priv->perf.oa.spurious_report_rs.missed) {
  1166. DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
  1167. dev_priv->perf.oa.spurious_report_rs.missed);
  1168. }
  1169. }
  1170. static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
  1171. {
  1172. u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
  1173. unsigned long flags;
  1174. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  1175. /* Pre-DevBDW: OABUFFER must be set with counters off,
  1176. * before OASTATUS1, but after OASTATUS2
  1177. */
  1178. I915_WRITE(GEN7_OASTATUS2, gtt_offset | OA_MEM_SELECT_GGTT); /* head */
  1179. dev_priv->perf.oa.oa_buffer.head = gtt_offset;
  1180. I915_WRITE(GEN7_OABUFFER, gtt_offset);
  1181. I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */
  1182. /* Mark that we need updated tail pointers to read from... */
  1183. dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
  1184. dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
  1185. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  1186. /* On Haswell we have to track which OASTATUS1 flags we've
  1187. * already seen since they can't be cleared while periodic
  1188. * sampling is enabled.
  1189. */
  1190. dev_priv->perf.oa.gen7_latched_oastatus1 = 0;
  1191. /* NB: although the OA buffer will initially be allocated
  1192. * zeroed via shmfs (and so this memset is redundant when
  1193. * first allocating), we may re-init the OA buffer, either
  1194. * when re-enabling a stream or in error/reset paths.
  1195. *
  1196. * The reason we clear the buffer for each re-init is for the
  1197. * sanity check in gen7_append_oa_reports() that looks at the
  1198. * report-id field to make sure it's non-zero which relies on
  1199. * the assumption that new reports are being written to zeroed
  1200. * memory...
  1201. */
  1202. memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
  1203. /* Maybe make ->pollin per-stream state if we support multiple
  1204. * concurrent streams in the future.
  1205. */
  1206. dev_priv->perf.oa.pollin = false;
  1207. }
  1208. static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
  1209. {
  1210. u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
  1211. unsigned long flags;
  1212. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  1213. I915_WRITE(GEN8_OASTATUS, 0);
  1214. I915_WRITE(GEN8_OAHEADPTR, gtt_offset);
  1215. dev_priv->perf.oa.oa_buffer.head = gtt_offset;
  1216. I915_WRITE(GEN8_OABUFFER_UDW, 0);
  1217. /*
  1218. * PRM says:
  1219. *
  1220. * "This MMIO must be set before the OATAILPTR
  1221. * register and after the OAHEADPTR register. This is
  1222. * to enable proper functionality of the overflow
  1223. * bit."
  1224. */
  1225. I915_WRITE(GEN8_OABUFFER, gtt_offset |
  1226. OABUFFER_SIZE_16M | OA_MEM_SELECT_GGTT);
  1227. I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
  1228. /* Mark that we need updated tail pointers to read from... */
  1229. dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
  1230. dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
  1231. /*
  1232. * Reset state used to recognise context switches, affecting which
  1233. * reports we will forward to userspace while filtering for a single
  1234. * context.
  1235. */
  1236. dev_priv->perf.oa.oa_buffer.last_ctx_id = INVALID_CTX_ID;
  1237. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  1238. /*
  1239. * NB: although the OA buffer will initially be allocated
  1240. * zeroed via shmfs (and so this memset is redundant when
  1241. * first allocating), we may re-init the OA buffer, either
  1242. * when re-enabling a stream or in error/reset paths.
  1243. *
  1244. * The reason we clear the buffer for each re-init is for the
  1245. * sanity check in gen8_append_oa_reports() that looks at the
  1246. * reason field to make sure it's non-zero which relies on
  1247. * the assumption that new reports are being written to zeroed
  1248. * memory...
  1249. */
  1250. memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
  1251. /*
  1252. * Maybe make ->pollin per-stream state if we support multiple
  1253. * concurrent streams in the future.
  1254. */
  1255. dev_priv->perf.oa.pollin = false;
  1256. }
  1257. static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
  1258. {
  1259. struct drm_i915_gem_object *bo;
  1260. struct i915_vma *vma;
  1261. int ret;
  1262. if (WARN_ON(dev_priv->perf.oa.oa_buffer.vma))
  1263. return -ENODEV;
  1264. ret = i915_mutex_lock_interruptible(&dev_priv->drm);
  1265. if (ret)
  1266. return ret;
  1267. BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
  1268. BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
  1269. bo = i915_gem_object_create(dev_priv, OA_BUFFER_SIZE);
  1270. if (IS_ERR(bo)) {
  1271. DRM_ERROR("Failed to allocate OA buffer\n");
  1272. ret = PTR_ERR(bo);
  1273. goto unlock;
  1274. }
  1275. ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC);
  1276. if (ret)
  1277. goto err_unref;
  1278. /* PreHSW required 512K alignment, HSW requires 16M */
  1279. vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
  1280. if (IS_ERR(vma)) {
  1281. ret = PTR_ERR(vma);
  1282. goto err_unref;
  1283. }
  1284. dev_priv->perf.oa.oa_buffer.vma = vma;
  1285. dev_priv->perf.oa.oa_buffer.vaddr =
  1286. i915_gem_object_pin_map(bo, I915_MAP_WB);
  1287. if (IS_ERR(dev_priv->perf.oa.oa_buffer.vaddr)) {
  1288. ret = PTR_ERR(dev_priv->perf.oa.oa_buffer.vaddr);
  1289. goto err_unpin;
  1290. }
  1291. dev_priv->perf.oa.ops.init_oa_buffer(dev_priv);
  1292. DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
  1293. i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma),
  1294. dev_priv->perf.oa.oa_buffer.vaddr);
  1295. goto unlock;
  1296. err_unpin:
  1297. __i915_vma_unpin(vma);
  1298. err_unref:
  1299. i915_gem_object_put(bo);
  1300. dev_priv->perf.oa.oa_buffer.vaddr = NULL;
  1301. dev_priv->perf.oa.oa_buffer.vma = NULL;
  1302. unlock:
  1303. mutex_unlock(&dev_priv->drm.struct_mutex);
  1304. return ret;
  1305. }
  1306. static void config_oa_regs(struct drm_i915_private *dev_priv,
  1307. const struct i915_oa_reg *regs,
  1308. u32 n_regs)
  1309. {
  1310. u32 i;
  1311. for (i = 0; i < n_regs; i++) {
  1312. const struct i915_oa_reg *reg = regs + i;
  1313. I915_WRITE(reg->addr, reg->value);
  1314. }
  1315. }
  1316. static int hsw_enable_metric_set(struct drm_i915_private *dev_priv,
  1317. const struct i915_oa_config *oa_config)
  1318. {
  1319. /* PRM:
  1320. *
  1321. * OA unit is using “crclk” for its functionality. When trunk
  1322. * level clock gating takes place, OA clock would be gated,
  1323. * unable to count the events from non-render clock domain.
  1324. * Render clock gating must be disabled when OA is enabled to
  1325. * count the events from non-render domain. Unit level clock
  1326. * gating for RCS should also be disabled.
  1327. */
  1328. I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
  1329. ~GEN7_DOP_CLOCK_GATE_ENABLE));
  1330. I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
  1331. GEN6_CSUNIT_CLOCK_GATE_DISABLE));
  1332. config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
  1333. /* It apparently takes a fairly long time for a new MUX
  1334. * configuration to be be applied after these register writes.
  1335. * This delay duration was derived empirically based on the
  1336. * render_basic config but hopefully it covers the maximum
  1337. * configuration latency.
  1338. *
  1339. * As a fallback, the checks in _append_oa_reports() to skip
  1340. * invalid OA reports do also seem to work to discard reports
  1341. * generated before this config has completed - albeit not
  1342. * silently.
  1343. *
  1344. * Unfortunately this is essentially a magic number, since we
  1345. * don't currently know of a reliable mechanism for predicting
  1346. * how long the MUX config will take to apply and besides
  1347. * seeing invalid reports we don't know of a reliable way to
  1348. * explicitly check that the MUX config has landed.
  1349. *
  1350. * It's even possible we've miss characterized the underlying
  1351. * problem - it just seems like the simplest explanation why
  1352. * a delay at this location would mitigate any invalid reports.
  1353. */
  1354. usleep_range(15000, 20000);
  1355. config_oa_regs(dev_priv, oa_config->b_counter_regs,
  1356. oa_config->b_counter_regs_len);
  1357. return 0;
  1358. }
  1359. static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
  1360. {
  1361. I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) &
  1362. ~GEN6_CSUNIT_CLOCK_GATE_DISABLE));
  1363. I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) |
  1364. GEN7_DOP_CLOCK_GATE_ENABLE));
  1365. I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
  1366. ~GT_NOA_ENABLE));
  1367. }
  1368. /*
  1369. * NB: It must always remain pointer safe to run this even if the OA unit
  1370. * has been disabled.
  1371. *
  1372. * It's fine to put out-of-date values into these per-context registers
  1373. * in the case that the OA unit has been disabled.
  1374. */
  1375. static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
  1376. u32 *reg_state,
  1377. const struct i915_oa_config *oa_config)
  1378. {
  1379. struct drm_i915_private *dev_priv = ctx->i915;
  1380. u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
  1381. u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
  1382. /* The MMIO offsets for Flex EU registers aren't contiguous */
  1383. u32 flex_mmio[] = {
  1384. i915_mmio_reg_offset(EU_PERF_CNTL0),
  1385. i915_mmio_reg_offset(EU_PERF_CNTL1),
  1386. i915_mmio_reg_offset(EU_PERF_CNTL2),
  1387. i915_mmio_reg_offset(EU_PERF_CNTL3),
  1388. i915_mmio_reg_offset(EU_PERF_CNTL4),
  1389. i915_mmio_reg_offset(EU_PERF_CNTL5),
  1390. i915_mmio_reg_offset(EU_PERF_CNTL6),
  1391. };
  1392. int i;
  1393. reg_state[ctx_oactxctrl] = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
  1394. reg_state[ctx_oactxctrl+1] = (dev_priv->perf.oa.period_exponent <<
  1395. GEN8_OA_TIMER_PERIOD_SHIFT) |
  1396. (dev_priv->perf.oa.periodic ?
  1397. GEN8_OA_TIMER_ENABLE : 0) |
  1398. GEN8_OA_COUNTER_RESUME;
  1399. for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) {
  1400. u32 state_offset = ctx_flexeu0 + i * 2;
  1401. u32 mmio = flex_mmio[i];
  1402. /*
  1403. * This arbitrary default will select the 'EU FPU0 Pipeline
  1404. * Active' event. In the future it's anticipated that there
  1405. * will be an explicit 'No Event' we can select, but not yet...
  1406. */
  1407. u32 value = 0;
  1408. if (oa_config) {
  1409. u32 j;
  1410. for (j = 0; j < oa_config->flex_regs_len; j++) {
  1411. if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) {
  1412. value = oa_config->flex_regs[j].value;
  1413. break;
  1414. }
  1415. }
  1416. }
  1417. reg_state[state_offset] = mmio;
  1418. reg_state[state_offset+1] = value;
  1419. }
  1420. }
  1421. /*
  1422. * Same as gen8_update_reg_state_unlocked only through the batchbuffer. This
  1423. * is only used by the kernel context.
  1424. */
  1425. static int gen8_emit_oa_config(struct i915_request *rq,
  1426. const struct i915_oa_config *oa_config)
  1427. {
  1428. struct drm_i915_private *dev_priv = rq->i915;
  1429. /* The MMIO offsets for Flex EU registers aren't contiguous */
  1430. u32 flex_mmio[] = {
  1431. i915_mmio_reg_offset(EU_PERF_CNTL0),
  1432. i915_mmio_reg_offset(EU_PERF_CNTL1),
  1433. i915_mmio_reg_offset(EU_PERF_CNTL2),
  1434. i915_mmio_reg_offset(EU_PERF_CNTL3),
  1435. i915_mmio_reg_offset(EU_PERF_CNTL4),
  1436. i915_mmio_reg_offset(EU_PERF_CNTL5),
  1437. i915_mmio_reg_offset(EU_PERF_CNTL6),
  1438. };
  1439. u32 *cs;
  1440. int i;
  1441. cs = intel_ring_begin(rq, ARRAY_SIZE(flex_mmio) * 2 + 4);
  1442. if (IS_ERR(cs))
  1443. return PTR_ERR(cs);
  1444. *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1);
  1445. *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
  1446. *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
  1447. (dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
  1448. GEN8_OA_COUNTER_RESUME;
  1449. for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) {
  1450. u32 mmio = flex_mmio[i];
  1451. /*
  1452. * This arbitrary default will select the 'EU FPU0 Pipeline
  1453. * Active' event. In the future it's anticipated that there
  1454. * will be an explicit 'No Event' we can select, but not
  1455. * yet...
  1456. */
  1457. u32 value = 0;
  1458. if (oa_config) {
  1459. u32 j;
  1460. for (j = 0; j < oa_config->flex_regs_len; j++) {
  1461. if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) {
  1462. value = oa_config->flex_regs[j].value;
  1463. break;
  1464. }
  1465. }
  1466. }
  1467. *cs++ = mmio;
  1468. *cs++ = value;
  1469. }
  1470. *cs++ = MI_NOOP;
  1471. intel_ring_advance(rq, cs);
  1472. return 0;
  1473. }
  1474. static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_priv,
  1475. const struct i915_oa_config *oa_config)
  1476. {
  1477. struct intel_engine_cs *engine = dev_priv->engine[RCS];
  1478. struct i915_gem_timeline *timeline;
  1479. struct i915_request *rq;
  1480. int ret;
  1481. lockdep_assert_held(&dev_priv->drm.struct_mutex);
  1482. i915_retire_requests(dev_priv);
  1483. rq = i915_request_alloc(engine, dev_priv->kernel_context);
  1484. if (IS_ERR(rq))
  1485. return PTR_ERR(rq);
  1486. ret = gen8_emit_oa_config(rq, oa_config);
  1487. if (ret) {
  1488. i915_request_add(rq);
  1489. return ret;
  1490. }
  1491. /* Queue this switch after all other activity */
  1492. list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
  1493. struct i915_request *prev;
  1494. struct intel_timeline *tl;
  1495. tl = &timeline->engine[engine->id];
  1496. prev = i915_gem_active_raw(&tl->last_request,
  1497. &dev_priv->drm.struct_mutex);
  1498. if (prev)
  1499. i915_sw_fence_await_sw_fence_gfp(&rq->submit,
  1500. &prev->submit,
  1501. GFP_KERNEL);
  1502. }
  1503. i915_request_add(rq);
  1504. return 0;
  1505. }
  1506. /*
  1507. * Manages updating the per-context aspects of the OA stream
  1508. * configuration across all contexts.
  1509. *
  1510. * The awkward consideration here is that OACTXCONTROL controls the
  1511. * exponent for periodic sampling which is primarily used for system
  1512. * wide profiling where we'd like a consistent sampling period even in
  1513. * the face of context switches.
  1514. *
  1515. * Our approach of updating the register state context (as opposed to
  1516. * say using a workaround batch buffer) ensures that the hardware
  1517. * won't automatically reload an out-of-date timer exponent even
  1518. * transiently before a WA BB could be parsed.
  1519. *
  1520. * This function needs to:
  1521. * - Ensure the currently running context's per-context OA state is
  1522. * updated
  1523. * - Ensure that all existing contexts will have the correct per-context
  1524. * OA state if they are scheduled for use.
  1525. * - Ensure any new contexts will be initialized with the correct
  1526. * per-context OA state.
  1527. *
  1528. * Note: it's only the RCS/Render context that has any OA state.
  1529. */
  1530. static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
  1531. const struct i915_oa_config *oa_config)
  1532. {
  1533. struct i915_gem_context *ctx;
  1534. int ret;
  1535. unsigned int wait_flags = I915_WAIT_LOCKED;
  1536. lockdep_assert_held(&dev_priv->drm.struct_mutex);
  1537. /* Switch away from any user context. */
  1538. ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config);
  1539. if (ret)
  1540. goto out;
  1541. /*
  1542. * The OA register config is setup through the context image. This image
  1543. * might be written to by the GPU on context switch (in particular on
  1544. * lite-restore). This means we can't safely update a context's image,
  1545. * if this context is scheduled/submitted to run on the GPU.
  1546. *
  1547. * We could emit the OA register config through the batch buffer but
  1548. * this might leave small interval of time where the OA unit is
  1549. * configured at an invalid sampling period.
  1550. *
  1551. * So far the best way to work around this issue seems to be draining
  1552. * the GPU from any submitted work.
  1553. */
  1554. ret = i915_gem_wait_for_idle(dev_priv, wait_flags);
  1555. if (ret)
  1556. goto out;
  1557. /* Update all contexts now that we've stalled the submission. */
  1558. list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
  1559. struct intel_context *ce = &ctx->engine[RCS];
  1560. u32 *regs;
  1561. /* OA settings will be set upon first use */
  1562. if (!ce->state)
  1563. continue;
  1564. regs = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
  1565. if (IS_ERR(regs)) {
  1566. ret = PTR_ERR(regs);
  1567. goto out;
  1568. }
  1569. ce->state->obj->mm.dirty = true;
  1570. regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
  1571. gen8_update_reg_state_unlocked(ctx, regs, oa_config);
  1572. i915_gem_object_unpin_map(ce->state->obj);
  1573. }
  1574. out:
  1575. return ret;
  1576. }
  1577. static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
  1578. const struct i915_oa_config *oa_config)
  1579. {
  1580. int ret;
  1581. /*
  1582. * We disable slice/unslice clock ratio change reports on SKL since
  1583. * they are too noisy. The HW generates a lot of redundant reports
  1584. * where the ratio hasn't really changed causing a lot of redundant
  1585. * work to processes and increasing the chances we'll hit buffer
  1586. * overruns.
  1587. *
  1588. * Although we don't currently use the 'disable overrun' OABUFFER
  1589. * feature it's worth noting that clock ratio reports have to be
  1590. * disabled before considering to use that feature since the HW doesn't
  1591. * correctly block these reports.
  1592. *
  1593. * Currently none of the high-level metrics we have depend on knowing
  1594. * this ratio to normalize.
  1595. *
  1596. * Note: This register is not power context saved and restored, but
  1597. * that's OK considering that we disable RC6 while the OA unit is
  1598. * enabled.
  1599. *
  1600. * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
  1601. * be read back from automatically triggered reports, as part of the
  1602. * RPT_ID field.
  1603. */
  1604. if (IS_GEN9(dev_priv) || IS_GEN10(dev_priv)) {
  1605. I915_WRITE(GEN8_OA_DEBUG,
  1606. _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
  1607. GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
  1608. }
  1609. /*
  1610. * Update all contexts prior writing the mux configurations as we need
  1611. * to make sure all slices/subslices are ON before writing to NOA
  1612. * registers.
  1613. */
  1614. ret = gen8_configure_all_contexts(dev_priv, oa_config);
  1615. if (ret)
  1616. return ret;
  1617. config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
  1618. config_oa_regs(dev_priv, oa_config->b_counter_regs,
  1619. oa_config->b_counter_regs_len);
  1620. return 0;
  1621. }
  1622. static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
  1623. {
  1624. /* Reset all contexts' slices/subslices configurations. */
  1625. gen8_configure_all_contexts(dev_priv, NULL);
  1626. I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
  1627. ~GT_NOA_ENABLE));
  1628. }
  1629. static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
  1630. {
  1631. /* Reset all contexts' slices/subslices configurations. */
  1632. gen8_configure_all_contexts(dev_priv, NULL);
  1633. /* Make sure we disable noa to save power. */
  1634. I915_WRITE(RPM_CONFIG1,
  1635. I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE);
  1636. }
  1637. static void gen7_oa_enable(struct drm_i915_private *dev_priv)
  1638. {
  1639. /*
  1640. * Reset buf pointers so we don't forward reports from before now.
  1641. *
  1642. * Think carefully if considering trying to avoid this, since it
  1643. * also ensures status flags and the buffer itself are cleared
  1644. * in error paths, and we have checks for invalid reports based
  1645. * on the assumption that certain fields are written to zeroed
  1646. * memory which this helps maintains.
  1647. */
  1648. gen7_init_oa_buffer(dev_priv);
  1649. if (dev_priv->perf.oa.exclusive_stream->enabled) {
  1650. struct i915_gem_context *ctx =
  1651. dev_priv->perf.oa.exclusive_stream->ctx;
  1652. u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
  1653. bool periodic = dev_priv->perf.oa.periodic;
  1654. u32 period_exponent = dev_priv->perf.oa.period_exponent;
  1655. u32 report_format = dev_priv->perf.oa.oa_buffer.format;
  1656. I915_WRITE(GEN7_OACONTROL,
  1657. (ctx_id & GEN7_OACONTROL_CTX_MASK) |
  1658. (period_exponent <<
  1659. GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
  1660. (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
  1661. (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
  1662. (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
  1663. GEN7_OACONTROL_ENABLE);
  1664. } else
  1665. I915_WRITE(GEN7_OACONTROL, 0);
  1666. }
  1667. static void gen8_oa_enable(struct drm_i915_private *dev_priv)
  1668. {
  1669. u32 report_format = dev_priv->perf.oa.oa_buffer.format;
  1670. /*
  1671. * Reset buf pointers so we don't forward reports from before now.
  1672. *
  1673. * Think carefully if considering trying to avoid this, since it
  1674. * also ensures status flags and the buffer itself are cleared
  1675. * in error paths, and we have checks for invalid reports based
  1676. * on the assumption that certain fields are written to zeroed
  1677. * memory which this helps maintains.
  1678. */
  1679. gen8_init_oa_buffer(dev_priv);
  1680. /*
  1681. * Note: we don't rely on the hardware to perform single context
  1682. * filtering and instead filter on the cpu based on the context-id
  1683. * field of reports
  1684. */
  1685. I915_WRITE(GEN8_OACONTROL, (report_format <<
  1686. GEN8_OA_REPORT_FORMAT_SHIFT) |
  1687. GEN8_OA_COUNTER_ENABLE);
  1688. }
  1689. /**
  1690. * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
  1691. * @stream: An i915 perf stream opened for OA metrics
  1692. *
  1693. * [Re]enables hardware periodic sampling according to the period configured
  1694. * when opening the stream. This also starts a hrtimer that will periodically
  1695. * check for data in the circular OA buffer for notifying userspace (e.g.
  1696. * during a read() or poll()).
  1697. */
  1698. static void i915_oa_stream_enable(struct i915_perf_stream *stream)
  1699. {
  1700. struct drm_i915_private *dev_priv = stream->dev_priv;
  1701. dev_priv->perf.oa.ops.oa_enable(dev_priv);
  1702. if (dev_priv->perf.oa.periodic)
  1703. hrtimer_start(&dev_priv->perf.oa.poll_check_timer,
  1704. ns_to_ktime(POLL_PERIOD),
  1705. HRTIMER_MODE_REL_PINNED);
  1706. }
  1707. static void gen7_oa_disable(struct drm_i915_private *dev_priv)
  1708. {
  1709. I915_WRITE(GEN7_OACONTROL, 0);
  1710. }
  1711. static void gen8_oa_disable(struct drm_i915_private *dev_priv)
  1712. {
  1713. I915_WRITE(GEN8_OACONTROL, 0);
  1714. }
  1715. /**
  1716. * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
  1717. * @stream: An i915 perf stream opened for OA metrics
  1718. *
  1719. * Stops the OA unit from periodically writing counter reports into the
  1720. * circular OA buffer. This also stops the hrtimer that periodically checks for
  1721. * data in the circular OA buffer, for notifying userspace.
  1722. */
  1723. static void i915_oa_stream_disable(struct i915_perf_stream *stream)
  1724. {
  1725. struct drm_i915_private *dev_priv = stream->dev_priv;
  1726. dev_priv->perf.oa.ops.oa_disable(dev_priv);
  1727. if (dev_priv->perf.oa.periodic)
  1728. hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer);
  1729. }
  1730. static const struct i915_perf_stream_ops i915_oa_stream_ops = {
  1731. .destroy = i915_oa_stream_destroy,
  1732. .enable = i915_oa_stream_enable,
  1733. .disable = i915_oa_stream_disable,
  1734. .wait_unlocked = i915_oa_wait_unlocked,
  1735. .poll_wait = i915_oa_poll_wait,
  1736. .read = i915_oa_read,
  1737. };
  1738. /**
  1739. * i915_oa_stream_init - validate combined props for OA stream and init
  1740. * @stream: An i915 perf stream
  1741. * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
  1742. * @props: The property state that configures stream (individually validated)
  1743. *
  1744. * While read_properties_unlocked() validates properties in isolation it
  1745. * doesn't ensure that the combination necessarily makes sense.
  1746. *
  1747. * At this point it has been determined that userspace wants a stream of
  1748. * OA metrics, but still we need to further validate the combined
  1749. * properties are OK.
  1750. *
  1751. * If the configuration makes sense then we can allocate memory for
  1752. * a circular OA buffer and apply the requested metric set configuration.
  1753. *
  1754. * Returns: zero on success or a negative error code.
  1755. */
  1756. static int i915_oa_stream_init(struct i915_perf_stream *stream,
  1757. struct drm_i915_perf_open_param *param,
  1758. struct perf_open_properties *props)
  1759. {
  1760. struct drm_i915_private *dev_priv = stream->dev_priv;
  1761. int format_size;
  1762. int ret;
  1763. /* If the sysfs metrics/ directory wasn't registered for some
  1764. * reason then don't let userspace try their luck with config
  1765. * IDs
  1766. */
  1767. if (!dev_priv->perf.metrics_kobj) {
  1768. DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
  1769. return -EINVAL;
  1770. }
  1771. if (!(props->sample_flags & SAMPLE_OA_REPORT)) {
  1772. DRM_DEBUG("Only OA report sampling supported\n");
  1773. return -EINVAL;
  1774. }
  1775. if (!dev_priv->perf.oa.ops.init_oa_buffer) {
  1776. DRM_DEBUG("OA unit not supported\n");
  1777. return -ENODEV;
  1778. }
  1779. /* To avoid the complexity of having to accurately filter
  1780. * counter reports and marshal to the appropriate client
  1781. * we currently only allow exclusive access
  1782. */
  1783. if (dev_priv->perf.oa.exclusive_stream) {
  1784. DRM_DEBUG("OA unit already in use\n");
  1785. return -EBUSY;
  1786. }
  1787. if (!props->oa_format) {
  1788. DRM_DEBUG("OA report format not specified\n");
  1789. return -EINVAL;
  1790. }
  1791. /* We set up some ratelimit state to potentially throttle any _NOTES
  1792. * about spurious, invalid OA reports which we don't forward to
  1793. * userspace.
  1794. *
  1795. * The initialization is associated with opening the stream (not driver
  1796. * init) considering we print a _NOTE about any throttling when closing
  1797. * the stream instead of waiting until driver _fini which no one would
  1798. * ever see.
  1799. *
  1800. * Using the same limiting factors as printk_ratelimit()
  1801. */
  1802. ratelimit_state_init(&dev_priv->perf.oa.spurious_report_rs,
  1803. 5 * HZ, 10);
  1804. /* Since we use a DRM_NOTE for spurious reports it would be
  1805. * inconsistent to let __ratelimit() automatically print a warning for
  1806. * throttling.
  1807. */
  1808. ratelimit_set_flags(&dev_priv->perf.oa.spurious_report_rs,
  1809. RATELIMIT_MSG_ON_RELEASE);
  1810. stream->sample_size = sizeof(struct drm_i915_perf_record_header);
  1811. format_size = dev_priv->perf.oa.oa_formats[props->oa_format].size;
  1812. stream->sample_flags |= SAMPLE_OA_REPORT;
  1813. stream->sample_size += format_size;
  1814. dev_priv->perf.oa.oa_buffer.format_size = format_size;
  1815. if (WARN_ON(dev_priv->perf.oa.oa_buffer.format_size == 0))
  1816. return -EINVAL;
  1817. dev_priv->perf.oa.oa_buffer.format =
  1818. dev_priv->perf.oa.oa_formats[props->oa_format].format;
  1819. dev_priv->perf.oa.periodic = props->oa_periodic;
  1820. if (dev_priv->perf.oa.periodic)
  1821. dev_priv->perf.oa.period_exponent = props->oa_period_exponent;
  1822. if (stream->ctx) {
  1823. ret = oa_get_render_ctx_id(stream);
  1824. if (ret)
  1825. return ret;
  1826. }
  1827. ret = get_oa_config(dev_priv, props->metrics_set, &stream->oa_config);
  1828. if (ret)
  1829. goto err_config;
  1830. /* PRM - observability performance counters:
  1831. *
  1832. * OACONTROL, performance counter enable, note:
  1833. *
  1834. * "When this bit is set, in order to have coherent counts,
  1835. * RC6 power state and trunk clock gating must be disabled.
  1836. * This can be achieved by programming MMIO registers as
  1837. * 0xA094=0 and 0xA090[31]=1"
  1838. *
  1839. * In our case we are expecting that taking pm + FORCEWAKE
  1840. * references will effectively disable RC6.
  1841. */
  1842. intel_runtime_pm_get(dev_priv);
  1843. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  1844. ret = alloc_oa_buffer(dev_priv);
  1845. if (ret)
  1846. goto err_oa_buf_alloc;
  1847. ret = i915_mutex_lock_interruptible(&dev_priv->drm);
  1848. if (ret)
  1849. goto err_lock;
  1850. ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
  1851. stream->oa_config);
  1852. if (ret)
  1853. goto err_enable;
  1854. stream->ops = &i915_oa_stream_ops;
  1855. dev_priv->perf.oa.exclusive_stream = stream;
  1856. mutex_unlock(&dev_priv->drm.struct_mutex);
  1857. return 0;
  1858. err_enable:
  1859. dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
  1860. mutex_unlock(&dev_priv->drm.struct_mutex);
  1861. err_lock:
  1862. free_oa_buffer(dev_priv);
  1863. err_oa_buf_alloc:
  1864. put_oa_config(dev_priv, stream->oa_config);
  1865. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  1866. intel_runtime_pm_put(dev_priv);
  1867. err_config:
  1868. if (stream->ctx)
  1869. oa_put_render_ctx_id(stream);
  1870. return ret;
  1871. }
  1872. void i915_oa_init_reg_state(struct intel_engine_cs *engine,
  1873. struct i915_gem_context *ctx,
  1874. u32 *reg_state)
  1875. {
  1876. struct i915_perf_stream *stream;
  1877. if (engine->id != RCS)
  1878. return;
  1879. stream = engine->i915->perf.oa.exclusive_stream;
  1880. if (stream)
  1881. gen8_update_reg_state_unlocked(ctx, reg_state, stream->oa_config);
  1882. }
  1883. /**
  1884. * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation
  1885. * @stream: An i915 perf stream
  1886. * @file: An i915 perf stream file
  1887. * @buf: destination buffer given by userspace
  1888. * @count: the number of bytes userspace wants to read
  1889. * @ppos: (inout) file seek position (unused)
  1890. *
  1891. * Besides wrapping &i915_perf_stream_ops->read this provides a common place to
  1892. * ensure that if we've successfully copied any data then reporting that takes
  1893. * precedence over any internal error status, so the data isn't lost.
  1894. *
  1895. * For example ret will be -ENOSPC whenever there is more buffered data than
  1896. * can be copied to userspace, but that's only interesting if we weren't able
  1897. * to copy some data because it implies the userspace buffer is too small to
  1898. * receive a single record (and we never split records).
  1899. *
  1900. * Another case with ret == -EFAULT is more of a grey area since it would seem
  1901. * like bad form for userspace to ask us to overrun its buffer, but the user
  1902. * knows best:
  1903. *
  1904. * http://yarchive.net/comp/linux/partial_reads_writes.html
  1905. *
  1906. * Returns: The number of bytes copied or a negative error code on failure.
  1907. */
  1908. static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream,
  1909. struct file *file,
  1910. char __user *buf,
  1911. size_t count,
  1912. loff_t *ppos)
  1913. {
  1914. /* Note we keep the offset (aka bytes read) separate from any
  1915. * error status so that the final check for whether we return
  1916. * the bytes read with a higher precedence than any error (see
  1917. * comment below) doesn't need to be handled/duplicated in
  1918. * stream->ops->read() implementations.
  1919. */
  1920. size_t offset = 0;
  1921. int ret = stream->ops->read(stream, buf, count, &offset);
  1922. return offset ?: (ret ?: -EAGAIN);
  1923. }
  1924. /**
  1925. * i915_perf_read - handles read() FOP for i915 perf stream FDs
  1926. * @file: An i915 perf stream file
  1927. * @buf: destination buffer given by userspace
  1928. * @count: the number of bytes userspace wants to read
  1929. * @ppos: (inout) file seek position (unused)
  1930. *
  1931. * The entry point for handling a read() on a stream file descriptor from
  1932. * userspace. Most of the work is left to the i915_perf_read_locked() and
  1933. * &i915_perf_stream_ops->read but to save having stream implementations (of
  1934. * which we might have multiple later) we handle blocking read here.
  1935. *
  1936. * We can also consistently treat trying to read from a disabled stream
  1937. * as an IO error so implementations can assume the stream is enabled
  1938. * while reading.
  1939. *
  1940. * Returns: The number of bytes copied or a negative error code on failure.
  1941. */
  1942. static ssize_t i915_perf_read(struct file *file,
  1943. char __user *buf,
  1944. size_t count,
  1945. loff_t *ppos)
  1946. {
  1947. struct i915_perf_stream *stream = file->private_data;
  1948. struct drm_i915_private *dev_priv = stream->dev_priv;
  1949. ssize_t ret;
  1950. /* To ensure it's handled consistently we simply treat all reads of a
  1951. * disabled stream as an error. In particular it might otherwise lead
  1952. * to a deadlock for blocking file descriptors...
  1953. */
  1954. if (!stream->enabled)
  1955. return -EIO;
  1956. if (!(file->f_flags & O_NONBLOCK)) {
  1957. /* There's the small chance of false positives from
  1958. * stream->ops->wait_unlocked.
  1959. *
  1960. * E.g. with single context filtering since we only wait until
  1961. * oabuffer has >= 1 report we don't immediately know whether
  1962. * any reports really belong to the current context
  1963. */
  1964. do {
  1965. ret = stream->ops->wait_unlocked(stream);
  1966. if (ret)
  1967. return ret;
  1968. mutex_lock(&dev_priv->perf.lock);
  1969. ret = i915_perf_read_locked(stream, file,
  1970. buf, count, ppos);
  1971. mutex_unlock(&dev_priv->perf.lock);
  1972. } while (ret == -EAGAIN);
  1973. } else {
  1974. mutex_lock(&dev_priv->perf.lock);
  1975. ret = i915_perf_read_locked(stream, file, buf, count, ppos);
  1976. mutex_unlock(&dev_priv->perf.lock);
  1977. }
  1978. /* We allow the poll checking to sometimes report false positive EPOLLIN
  1979. * events where we might actually report EAGAIN on read() if there's
  1980. * not really any data available. In this situation though we don't
  1981. * want to enter a busy loop between poll() reporting a EPOLLIN event
  1982. * and read() returning -EAGAIN. Clearing the oa.pollin state here
  1983. * effectively ensures we back off until the next hrtimer callback
  1984. * before reporting another EPOLLIN event.
  1985. */
  1986. if (ret >= 0 || ret == -EAGAIN) {
  1987. /* Maybe make ->pollin per-stream state if we support multiple
  1988. * concurrent streams in the future.
  1989. */
  1990. dev_priv->perf.oa.pollin = false;
  1991. }
  1992. return ret;
  1993. }
  1994. static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
  1995. {
  1996. struct drm_i915_private *dev_priv =
  1997. container_of(hrtimer, typeof(*dev_priv),
  1998. perf.oa.poll_check_timer);
  1999. if (oa_buffer_check_unlocked(dev_priv)) {
  2000. dev_priv->perf.oa.pollin = true;
  2001. wake_up(&dev_priv->perf.oa.poll_wq);
  2002. }
  2003. hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD));
  2004. return HRTIMER_RESTART;
  2005. }
  2006. /**
  2007. * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
  2008. * @dev_priv: i915 device instance
  2009. * @stream: An i915 perf stream
  2010. * @file: An i915 perf stream file
  2011. * @wait: poll() state table
  2012. *
  2013. * For handling userspace polling on an i915 perf stream, this calls through to
  2014. * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
  2015. * will be woken for new stream data.
  2016. *
  2017. * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
  2018. * with any non-file-operation driver hooks.
  2019. *
  2020. * Returns: any poll events that are ready without sleeping
  2021. */
  2022. static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
  2023. struct i915_perf_stream *stream,
  2024. struct file *file,
  2025. poll_table *wait)
  2026. {
  2027. __poll_t events = 0;
  2028. stream->ops->poll_wait(stream, file, wait);
  2029. /* Note: we don't explicitly check whether there's something to read
  2030. * here since this path may be very hot depending on what else
  2031. * userspace is polling, or on the timeout in use. We rely solely on
  2032. * the hrtimer/oa_poll_check_timer_cb to notify us when there are
  2033. * samples to read.
  2034. */
  2035. if (dev_priv->perf.oa.pollin)
  2036. events |= EPOLLIN;
  2037. return events;
  2038. }
  2039. /**
  2040. * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
  2041. * @file: An i915 perf stream file
  2042. * @wait: poll() state table
  2043. *
  2044. * For handling userspace polling on an i915 perf stream, this ensures
  2045. * poll_wait() gets called with a wait queue that will be woken for new stream
  2046. * data.
  2047. *
  2048. * Note: Implementation deferred to i915_perf_poll_locked()
  2049. *
  2050. * Returns: any poll events that are ready without sleeping
  2051. */
  2052. static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
  2053. {
  2054. struct i915_perf_stream *stream = file->private_data;
  2055. struct drm_i915_private *dev_priv = stream->dev_priv;
  2056. __poll_t ret;
  2057. mutex_lock(&dev_priv->perf.lock);
  2058. ret = i915_perf_poll_locked(dev_priv, stream, file, wait);
  2059. mutex_unlock(&dev_priv->perf.lock);
  2060. return ret;
  2061. }
  2062. /**
  2063. * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
  2064. * @stream: A disabled i915 perf stream
  2065. *
  2066. * [Re]enables the associated capture of data for this stream.
  2067. *
  2068. * If a stream was previously enabled then there's currently no intention
  2069. * to provide userspace any guarantee about the preservation of previously
  2070. * buffered data.
  2071. */
  2072. static void i915_perf_enable_locked(struct i915_perf_stream *stream)
  2073. {
  2074. if (stream->enabled)
  2075. return;
  2076. /* Allow stream->ops->enable() to refer to this */
  2077. stream->enabled = true;
  2078. if (stream->ops->enable)
  2079. stream->ops->enable(stream);
  2080. }
  2081. /**
  2082. * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
  2083. * @stream: An enabled i915 perf stream
  2084. *
  2085. * Disables the associated capture of data for this stream.
  2086. *
  2087. * The intention is that disabling an re-enabling a stream will ideally be
  2088. * cheaper than destroying and re-opening a stream with the same configuration,
  2089. * though there are no formal guarantees about what state or buffered data
  2090. * must be retained between disabling and re-enabling a stream.
  2091. *
  2092. * Note: while a stream is disabled it's considered an error for userspace
  2093. * to attempt to read from the stream (-EIO).
  2094. */
  2095. static void i915_perf_disable_locked(struct i915_perf_stream *stream)
  2096. {
  2097. if (!stream->enabled)
  2098. return;
  2099. /* Allow stream->ops->disable() to refer to this */
  2100. stream->enabled = false;
  2101. if (stream->ops->disable)
  2102. stream->ops->disable(stream);
  2103. }
  2104. /**
  2105. * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
  2106. * @stream: An i915 perf stream
  2107. * @cmd: the ioctl request
  2108. * @arg: the ioctl data
  2109. *
  2110. * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
  2111. * with any non-file-operation driver hooks.
  2112. *
  2113. * Returns: zero on success or a negative error code. Returns -EINVAL for
  2114. * an unknown ioctl request.
  2115. */
  2116. static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
  2117. unsigned int cmd,
  2118. unsigned long arg)
  2119. {
  2120. switch (cmd) {
  2121. case I915_PERF_IOCTL_ENABLE:
  2122. i915_perf_enable_locked(stream);
  2123. return 0;
  2124. case I915_PERF_IOCTL_DISABLE:
  2125. i915_perf_disable_locked(stream);
  2126. return 0;
  2127. }
  2128. return -EINVAL;
  2129. }
  2130. /**
  2131. * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
  2132. * @file: An i915 perf stream file
  2133. * @cmd: the ioctl request
  2134. * @arg: the ioctl data
  2135. *
  2136. * Implementation deferred to i915_perf_ioctl_locked().
  2137. *
  2138. * Returns: zero on success or a negative error code. Returns -EINVAL for
  2139. * an unknown ioctl request.
  2140. */
  2141. static long i915_perf_ioctl(struct file *file,
  2142. unsigned int cmd,
  2143. unsigned long arg)
  2144. {
  2145. struct i915_perf_stream *stream = file->private_data;
  2146. struct drm_i915_private *dev_priv = stream->dev_priv;
  2147. long ret;
  2148. mutex_lock(&dev_priv->perf.lock);
  2149. ret = i915_perf_ioctl_locked(stream, cmd, arg);
  2150. mutex_unlock(&dev_priv->perf.lock);
  2151. return ret;
  2152. }
  2153. /**
  2154. * i915_perf_destroy_locked - destroy an i915 perf stream
  2155. * @stream: An i915 perf stream
  2156. *
  2157. * Frees all resources associated with the given i915 perf @stream, disabling
  2158. * any associated data capture in the process.
  2159. *
  2160. * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
  2161. * with any non-file-operation driver hooks.
  2162. */
  2163. static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
  2164. {
  2165. if (stream->enabled)
  2166. i915_perf_disable_locked(stream);
  2167. if (stream->ops->destroy)
  2168. stream->ops->destroy(stream);
  2169. list_del(&stream->link);
  2170. if (stream->ctx)
  2171. i915_gem_context_put(stream->ctx);
  2172. kfree(stream);
  2173. }
  2174. /**
  2175. * i915_perf_release - handles userspace close() of a stream file
  2176. * @inode: anonymous inode associated with file
  2177. * @file: An i915 perf stream file
  2178. *
  2179. * Cleans up any resources associated with an open i915 perf stream file.
  2180. *
  2181. * NB: close() can't really fail from the userspace point of view.
  2182. *
  2183. * Returns: zero on success or a negative error code.
  2184. */
  2185. static int i915_perf_release(struct inode *inode, struct file *file)
  2186. {
  2187. struct i915_perf_stream *stream = file->private_data;
  2188. struct drm_i915_private *dev_priv = stream->dev_priv;
  2189. mutex_lock(&dev_priv->perf.lock);
  2190. i915_perf_destroy_locked(stream);
  2191. mutex_unlock(&dev_priv->perf.lock);
  2192. return 0;
  2193. }
  2194. static const struct file_operations fops = {
  2195. .owner = THIS_MODULE,
  2196. .llseek = no_llseek,
  2197. .release = i915_perf_release,
  2198. .poll = i915_perf_poll,
  2199. .read = i915_perf_read,
  2200. .unlocked_ioctl = i915_perf_ioctl,
  2201. /* Our ioctl have no arguments, so it's safe to use the same function
  2202. * to handle 32bits compatibility.
  2203. */
  2204. .compat_ioctl = i915_perf_ioctl,
  2205. };
  2206. /**
  2207. * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
  2208. * @dev_priv: i915 device instance
  2209. * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
  2210. * @props: individually validated u64 property value pairs
  2211. * @file: drm file
  2212. *
  2213. * See i915_perf_ioctl_open() for interface details.
  2214. *
  2215. * Implements further stream config validation and stream initialization on
  2216. * behalf of i915_perf_open_ioctl() with the &drm_i915_private->perf.lock mutex
  2217. * taken to serialize with any non-file-operation driver hooks.
  2218. *
  2219. * Note: at this point the @props have only been validated in isolation and
  2220. * it's still necessary to validate that the combination of properties makes
  2221. * sense.
  2222. *
  2223. * In the case where userspace is interested in OA unit metrics then further
  2224. * config validation and stream initialization details will be handled by
  2225. * i915_oa_stream_init(). The code here should only validate config state that
  2226. * will be relevant to all stream types / backends.
  2227. *
  2228. * Returns: zero on success or a negative error code.
  2229. */
  2230. static int
  2231. i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
  2232. struct drm_i915_perf_open_param *param,
  2233. struct perf_open_properties *props,
  2234. struct drm_file *file)
  2235. {
  2236. struct i915_gem_context *specific_ctx = NULL;
  2237. struct i915_perf_stream *stream = NULL;
  2238. unsigned long f_flags = 0;
  2239. bool privileged_op = true;
  2240. int stream_fd;
  2241. int ret;
  2242. if (props->single_context) {
  2243. u32 ctx_handle = props->ctx_handle;
  2244. struct drm_i915_file_private *file_priv = file->driver_priv;
  2245. specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
  2246. if (!specific_ctx) {
  2247. DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n",
  2248. ctx_handle);
  2249. ret = -ENOENT;
  2250. goto err;
  2251. }
  2252. }
  2253. /*
  2254. * On Haswell the OA unit supports clock gating off for a specific
  2255. * context and in this mode there's no visibility of metrics for the
  2256. * rest of the system, which we consider acceptable for a
  2257. * non-privileged client.
  2258. *
  2259. * For Gen8+ the OA unit no longer supports clock gating off for a
  2260. * specific context and the kernel can't securely stop the counters
  2261. * from updating as system-wide / global values. Even though we can
  2262. * filter reports based on the included context ID we can't block
  2263. * clients from seeing the raw / global counter values via
  2264. * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
  2265. * enable the OA unit by default.
  2266. */
  2267. if (IS_HASWELL(dev_priv) && specific_ctx)
  2268. privileged_op = false;
  2269. /* Similar to perf's kernel.perf_paranoid_cpu sysctl option
  2270. * we check a dev.i915.perf_stream_paranoid sysctl option
  2271. * to determine if it's ok to access system wide OA counters
  2272. * without CAP_SYS_ADMIN privileges.
  2273. */
  2274. if (privileged_op &&
  2275. i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
  2276. DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n");
  2277. ret = -EACCES;
  2278. goto err_ctx;
  2279. }
  2280. stream = kzalloc(sizeof(*stream), GFP_KERNEL);
  2281. if (!stream) {
  2282. ret = -ENOMEM;
  2283. goto err_ctx;
  2284. }
  2285. stream->dev_priv = dev_priv;
  2286. stream->ctx = specific_ctx;
  2287. ret = i915_oa_stream_init(stream, param, props);
  2288. if (ret)
  2289. goto err_alloc;
  2290. /* we avoid simply assigning stream->sample_flags = props->sample_flags
  2291. * to have _stream_init check the combination of sample flags more
  2292. * thoroughly, but still this is the expected result at this point.
  2293. */
  2294. if (WARN_ON(stream->sample_flags != props->sample_flags)) {
  2295. ret = -ENODEV;
  2296. goto err_flags;
  2297. }
  2298. list_add(&stream->link, &dev_priv->perf.streams);
  2299. if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
  2300. f_flags |= O_CLOEXEC;
  2301. if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
  2302. f_flags |= O_NONBLOCK;
  2303. stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
  2304. if (stream_fd < 0) {
  2305. ret = stream_fd;
  2306. goto err_open;
  2307. }
  2308. if (!(param->flags & I915_PERF_FLAG_DISABLED))
  2309. i915_perf_enable_locked(stream);
  2310. return stream_fd;
  2311. err_open:
  2312. list_del(&stream->link);
  2313. err_flags:
  2314. if (stream->ops->destroy)
  2315. stream->ops->destroy(stream);
  2316. err_alloc:
  2317. kfree(stream);
  2318. err_ctx:
  2319. if (specific_ctx)
  2320. i915_gem_context_put(specific_ctx);
  2321. err:
  2322. return ret;
  2323. }
  2324. static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
  2325. {
  2326. return div64_u64(1000000000ULL * (2ULL << exponent),
  2327. 1000ULL * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz);
  2328. }
  2329. /**
  2330. * read_properties_unlocked - validate + copy userspace stream open properties
  2331. * @dev_priv: i915 device instance
  2332. * @uprops: The array of u64 key value pairs given by userspace
  2333. * @n_props: The number of key value pairs expected in @uprops
  2334. * @props: The stream configuration built up while validating properties
  2335. *
  2336. * Note this function only validates properties in isolation it doesn't
  2337. * validate that the combination of properties makes sense or that all
  2338. * properties necessary for a particular kind of stream have been set.
  2339. *
  2340. * Note that there currently aren't any ordering requirements for properties so
  2341. * we shouldn't validate or assume anything about ordering here. This doesn't
  2342. * rule out defining new properties with ordering requirements in the future.
  2343. */
  2344. static int read_properties_unlocked(struct drm_i915_private *dev_priv,
  2345. u64 __user *uprops,
  2346. u32 n_props,
  2347. struct perf_open_properties *props)
  2348. {
  2349. u64 __user *uprop = uprops;
  2350. u32 i;
  2351. memset(props, 0, sizeof(struct perf_open_properties));
  2352. if (!n_props) {
  2353. DRM_DEBUG("No i915 perf properties given\n");
  2354. return -EINVAL;
  2355. }
  2356. /* Considering that ID = 0 is reserved and assuming that we don't
  2357. * (currently) expect any configurations to ever specify duplicate
  2358. * values for a particular property ID then the last _PROP_MAX value is
  2359. * one greater than the maximum number of properties we expect to get
  2360. * from userspace.
  2361. */
  2362. if (n_props >= DRM_I915_PERF_PROP_MAX) {
  2363. DRM_DEBUG("More i915 perf properties specified than exist\n");
  2364. return -EINVAL;
  2365. }
  2366. for (i = 0; i < n_props; i++) {
  2367. u64 oa_period, oa_freq_hz;
  2368. u64 id, value;
  2369. int ret;
  2370. ret = get_user(id, uprop);
  2371. if (ret)
  2372. return ret;
  2373. ret = get_user(value, uprop + 1);
  2374. if (ret)
  2375. return ret;
  2376. if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
  2377. DRM_DEBUG("Unknown i915 perf property ID\n");
  2378. return -EINVAL;
  2379. }
  2380. switch ((enum drm_i915_perf_property_id)id) {
  2381. case DRM_I915_PERF_PROP_CTX_HANDLE:
  2382. props->single_context = 1;
  2383. props->ctx_handle = value;
  2384. break;
  2385. case DRM_I915_PERF_PROP_SAMPLE_OA:
  2386. props->sample_flags |= SAMPLE_OA_REPORT;
  2387. break;
  2388. case DRM_I915_PERF_PROP_OA_METRICS_SET:
  2389. if (value == 0) {
  2390. DRM_DEBUG("Unknown OA metric set ID\n");
  2391. return -EINVAL;
  2392. }
  2393. props->metrics_set = value;
  2394. break;
  2395. case DRM_I915_PERF_PROP_OA_FORMAT:
  2396. if (value == 0 || value >= I915_OA_FORMAT_MAX) {
  2397. DRM_DEBUG("Out-of-range OA report format %llu\n",
  2398. value);
  2399. return -EINVAL;
  2400. }
  2401. if (!dev_priv->perf.oa.oa_formats[value].size) {
  2402. DRM_DEBUG("Unsupported OA report format %llu\n",
  2403. value);
  2404. return -EINVAL;
  2405. }
  2406. props->oa_format = value;
  2407. break;
  2408. case DRM_I915_PERF_PROP_OA_EXPONENT:
  2409. if (value > OA_EXPONENT_MAX) {
  2410. DRM_DEBUG("OA timer exponent too high (> %u)\n",
  2411. OA_EXPONENT_MAX);
  2412. return -EINVAL;
  2413. }
  2414. /* Theoretically we can program the OA unit to sample
  2415. * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
  2416. * for BXT. We don't allow such high sampling
  2417. * frequencies by default unless root.
  2418. */
  2419. BUILD_BUG_ON(sizeof(oa_period) != 8);
  2420. oa_period = oa_exponent_to_ns(dev_priv, value);
  2421. /* This check is primarily to ensure that oa_period <=
  2422. * UINT32_MAX (before passing to do_div which only
  2423. * accepts a u32 denominator), but we can also skip
  2424. * checking anything < 1Hz which implicitly can't be
  2425. * limited via an integer oa_max_sample_rate.
  2426. */
  2427. if (oa_period <= NSEC_PER_SEC) {
  2428. u64 tmp = NSEC_PER_SEC;
  2429. do_div(tmp, oa_period);
  2430. oa_freq_hz = tmp;
  2431. } else
  2432. oa_freq_hz = 0;
  2433. if (oa_freq_hz > i915_oa_max_sample_rate &&
  2434. !capable(CAP_SYS_ADMIN)) {
  2435. DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n",
  2436. i915_oa_max_sample_rate);
  2437. return -EACCES;
  2438. }
  2439. props->oa_periodic = true;
  2440. props->oa_period_exponent = value;
  2441. break;
  2442. case DRM_I915_PERF_PROP_MAX:
  2443. MISSING_CASE(id);
  2444. return -EINVAL;
  2445. }
  2446. uprop += 2;
  2447. }
  2448. return 0;
  2449. }
  2450. /**
  2451. * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
  2452. * @dev: drm device
  2453. * @data: ioctl data copied from userspace (unvalidated)
  2454. * @file: drm file
  2455. *
  2456. * Validates the stream open parameters given by userspace including flags
  2457. * and an array of u64 key, value pair properties.
  2458. *
  2459. * Very little is assumed up front about the nature of the stream being
  2460. * opened (for instance we don't assume it's for periodic OA unit metrics). An
  2461. * i915-perf stream is expected to be a suitable interface for other forms of
  2462. * buffered data written by the GPU besides periodic OA metrics.
  2463. *
  2464. * Note we copy the properties from userspace outside of the i915 perf
  2465. * mutex to avoid an awkward lockdep with mmap_sem.
  2466. *
  2467. * Most of the implementation details are handled by
  2468. * i915_perf_open_ioctl_locked() after taking the &drm_i915_private->perf.lock
  2469. * mutex for serializing with any non-file-operation driver hooks.
  2470. *
  2471. * Return: A newly opened i915 Perf stream file descriptor or negative
  2472. * error code on failure.
  2473. */
  2474. int i915_perf_open_ioctl(struct drm_device *dev, void *data,
  2475. struct drm_file *file)
  2476. {
  2477. struct drm_i915_private *dev_priv = dev->dev_private;
  2478. struct drm_i915_perf_open_param *param = data;
  2479. struct perf_open_properties props;
  2480. u32 known_open_flags;
  2481. int ret;
  2482. if (!dev_priv->perf.initialized) {
  2483. DRM_DEBUG("i915 perf interface not available for this system\n");
  2484. return -ENOTSUPP;
  2485. }
  2486. known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
  2487. I915_PERF_FLAG_FD_NONBLOCK |
  2488. I915_PERF_FLAG_DISABLED;
  2489. if (param->flags & ~known_open_flags) {
  2490. DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n");
  2491. return -EINVAL;
  2492. }
  2493. ret = read_properties_unlocked(dev_priv,
  2494. u64_to_user_ptr(param->properties_ptr),
  2495. param->num_properties,
  2496. &props);
  2497. if (ret)
  2498. return ret;
  2499. mutex_lock(&dev_priv->perf.lock);
  2500. ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file);
  2501. mutex_unlock(&dev_priv->perf.lock);
  2502. return ret;
  2503. }
  2504. /**
  2505. * i915_perf_register - exposes i915-perf to userspace
  2506. * @dev_priv: i915 device instance
  2507. *
  2508. * In particular OA metric sets are advertised under a sysfs metrics/
  2509. * directory allowing userspace to enumerate valid IDs that can be
  2510. * used to open an i915-perf stream.
  2511. */
  2512. void i915_perf_register(struct drm_i915_private *dev_priv)
  2513. {
  2514. int ret;
  2515. if (!dev_priv->perf.initialized)
  2516. return;
  2517. /* To be sure we're synchronized with an attempted
  2518. * i915_perf_open_ioctl(); considering that we register after
  2519. * being exposed to userspace.
  2520. */
  2521. mutex_lock(&dev_priv->perf.lock);
  2522. dev_priv->perf.metrics_kobj =
  2523. kobject_create_and_add("metrics",
  2524. &dev_priv->drm.primary->kdev->kobj);
  2525. if (!dev_priv->perf.metrics_kobj)
  2526. goto exit;
  2527. sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr);
  2528. if (IS_HASWELL(dev_priv)) {
  2529. i915_perf_load_test_config_hsw(dev_priv);
  2530. } else if (IS_BROADWELL(dev_priv)) {
  2531. i915_perf_load_test_config_bdw(dev_priv);
  2532. } else if (IS_CHERRYVIEW(dev_priv)) {
  2533. i915_perf_load_test_config_chv(dev_priv);
  2534. } else if (IS_SKYLAKE(dev_priv)) {
  2535. if (IS_SKL_GT2(dev_priv))
  2536. i915_perf_load_test_config_sklgt2(dev_priv);
  2537. else if (IS_SKL_GT3(dev_priv))
  2538. i915_perf_load_test_config_sklgt3(dev_priv);
  2539. else if (IS_SKL_GT4(dev_priv))
  2540. i915_perf_load_test_config_sklgt4(dev_priv);
  2541. } else if (IS_BROXTON(dev_priv)) {
  2542. i915_perf_load_test_config_bxt(dev_priv);
  2543. } else if (IS_KABYLAKE(dev_priv)) {
  2544. if (IS_KBL_GT2(dev_priv))
  2545. i915_perf_load_test_config_kblgt2(dev_priv);
  2546. else if (IS_KBL_GT3(dev_priv))
  2547. i915_perf_load_test_config_kblgt3(dev_priv);
  2548. } else if (IS_GEMINILAKE(dev_priv)) {
  2549. i915_perf_load_test_config_glk(dev_priv);
  2550. } else if (IS_COFFEELAKE(dev_priv)) {
  2551. if (IS_CFL_GT2(dev_priv))
  2552. i915_perf_load_test_config_cflgt2(dev_priv);
  2553. if (IS_CFL_GT3(dev_priv))
  2554. i915_perf_load_test_config_cflgt3(dev_priv);
  2555. } else if (IS_CANNONLAKE(dev_priv)) {
  2556. i915_perf_load_test_config_cnl(dev_priv);
  2557. }
  2558. if (dev_priv->perf.oa.test_config.id == 0)
  2559. goto sysfs_error;
  2560. ret = sysfs_create_group(dev_priv->perf.metrics_kobj,
  2561. &dev_priv->perf.oa.test_config.sysfs_metric);
  2562. if (ret)
  2563. goto sysfs_error;
  2564. atomic_set(&dev_priv->perf.oa.test_config.ref_count, 1);
  2565. goto exit;
  2566. sysfs_error:
  2567. kobject_put(dev_priv->perf.metrics_kobj);
  2568. dev_priv->perf.metrics_kobj = NULL;
  2569. exit:
  2570. mutex_unlock(&dev_priv->perf.lock);
  2571. }
  2572. /**
  2573. * i915_perf_unregister - hide i915-perf from userspace
  2574. * @dev_priv: i915 device instance
  2575. *
  2576. * i915-perf state cleanup is split up into an 'unregister' and
  2577. * 'deinit' phase where the interface is first hidden from
  2578. * userspace by i915_perf_unregister() before cleaning up
  2579. * remaining state in i915_perf_fini().
  2580. */
  2581. void i915_perf_unregister(struct drm_i915_private *dev_priv)
  2582. {
  2583. if (!dev_priv->perf.metrics_kobj)
  2584. return;
  2585. sysfs_remove_group(dev_priv->perf.metrics_kobj,
  2586. &dev_priv->perf.oa.test_config.sysfs_metric);
  2587. kobject_put(dev_priv->perf.metrics_kobj);
  2588. dev_priv->perf.metrics_kobj = NULL;
  2589. }
  2590. static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
  2591. {
  2592. static const i915_reg_t flex_eu_regs[] = {
  2593. EU_PERF_CNTL0,
  2594. EU_PERF_CNTL1,
  2595. EU_PERF_CNTL2,
  2596. EU_PERF_CNTL3,
  2597. EU_PERF_CNTL4,
  2598. EU_PERF_CNTL5,
  2599. EU_PERF_CNTL6,
  2600. };
  2601. int i;
  2602. for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
  2603. if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr)
  2604. return true;
  2605. }
  2606. return false;
  2607. }
  2608. static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32 addr)
  2609. {
  2610. return (addr >= i915_mmio_reg_offset(OASTARTTRIG1) &&
  2611. addr <= i915_mmio_reg_offset(OASTARTTRIG8)) ||
  2612. (addr >= i915_mmio_reg_offset(OAREPORTTRIG1) &&
  2613. addr <= i915_mmio_reg_offset(OAREPORTTRIG8)) ||
  2614. (addr >= i915_mmio_reg_offset(OACEC0_0) &&
  2615. addr <= i915_mmio_reg_offset(OACEC7_1));
  2616. }
  2617. static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
  2618. {
  2619. return addr == i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) ||
  2620. (addr >= i915_mmio_reg_offset(MICRO_BP0_0) &&
  2621. addr <= i915_mmio_reg_offset(NOA_WRITE)) ||
  2622. (addr >= i915_mmio_reg_offset(OA_PERFCNT1_LO) &&
  2623. addr <= i915_mmio_reg_offset(OA_PERFCNT2_HI)) ||
  2624. (addr >= i915_mmio_reg_offset(OA_PERFMATRIX_LO) &&
  2625. addr <= i915_mmio_reg_offset(OA_PERFMATRIX_HI));
  2626. }
  2627. static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
  2628. {
  2629. return gen7_is_valid_mux_addr(dev_priv, addr) ||
  2630. addr == i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) ||
  2631. (addr >= i915_mmio_reg_offset(RPM_CONFIG0) &&
  2632. addr <= i915_mmio_reg_offset(NOA_CONFIG(8)));
  2633. }
  2634. static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
  2635. {
  2636. return gen8_is_valid_mux_addr(dev_priv, addr) ||
  2637. (addr >= i915_mmio_reg_offset(OA_PERFCNT3_LO) &&
  2638. addr <= i915_mmio_reg_offset(OA_PERFCNT4_HI));
  2639. }
  2640. static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
  2641. {
  2642. return gen7_is_valid_mux_addr(dev_priv, addr) ||
  2643. (addr >= 0x25100 && addr <= 0x2FF90) ||
  2644. (addr >= i915_mmio_reg_offset(HSW_MBVID2_NOA0) &&
  2645. addr <= i915_mmio_reg_offset(HSW_MBVID2_NOA9)) ||
  2646. addr == i915_mmio_reg_offset(HSW_MBVID2_MISR0);
  2647. }
  2648. static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
  2649. {
  2650. return gen7_is_valid_mux_addr(dev_priv, addr) ||
  2651. (addr >= 0x182300 && addr <= 0x1823A4);
  2652. }
  2653. static uint32_t mask_reg_value(u32 reg, u32 val)
  2654. {
  2655. /* HALF_SLICE_CHICKEN2 is programmed with a the
  2656. * WaDisableSTUnitPowerOptimization workaround. Make sure the value
  2657. * programmed by userspace doesn't change this.
  2658. */
  2659. if (i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) == reg)
  2660. val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
  2661. /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
  2662. * indicated by its name and a bunch of selection fields used by OA
  2663. * configs.
  2664. */
  2665. if (i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) == reg)
  2666. val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
  2667. return val;
  2668. }
  2669. static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv,
  2670. bool (*is_valid)(struct drm_i915_private *dev_priv, u32 addr),
  2671. u32 __user *regs,
  2672. u32 n_regs)
  2673. {
  2674. struct i915_oa_reg *oa_regs;
  2675. int err;
  2676. u32 i;
  2677. if (!n_regs)
  2678. return NULL;
  2679. if (!access_ok(VERIFY_READ, regs, n_regs * sizeof(u32) * 2))
  2680. return ERR_PTR(-EFAULT);
  2681. /* No is_valid function means we're not allowing any register to be programmed. */
  2682. GEM_BUG_ON(!is_valid);
  2683. if (!is_valid)
  2684. return ERR_PTR(-EINVAL);
  2685. oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
  2686. if (!oa_regs)
  2687. return ERR_PTR(-ENOMEM);
  2688. for (i = 0; i < n_regs; i++) {
  2689. u32 addr, value;
  2690. err = get_user(addr, regs);
  2691. if (err)
  2692. goto addr_err;
  2693. if (!is_valid(dev_priv, addr)) {
  2694. DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
  2695. err = -EINVAL;
  2696. goto addr_err;
  2697. }
  2698. err = get_user(value, regs + 1);
  2699. if (err)
  2700. goto addr_err;
  2701. oa_regs[i].addr = _MMIO(addr);
  2702. oa_regs[i].value = mask_reg_value(addr, value);
  2703. regs += 2;
  2704. }
  2705. return oa_regs;
  2706. addr_err:
  2707. kfree(oa_regs);
  2708. return ERR_PTR(err);
  2709. }
  2710. static ssize_t show_dynamic_id(struct device *dev,
  2711. struct device_attribute *attr,
  2712. char *buf)
  2713. {
  2714. struct i915_oa_config *oa_config =
  2715. container_of(attr, typeof(*oa_config), sysfs_metric_id);
  2716. return sprintf(buf, "%d\n", oa_config->id);
  2717. }
  2718. static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
  2719. struct i915_oa_config *oa_config)
  2720. {
  2721. sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
  2722. oa_config->sysfs_metric_id.attr.name = "id";
  2723. oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
  2724. oa_config->sysfs_metric_id.show = show_dynamic_id;
  2725. oa_config->sysfs_metric_id.store = NULL;
  2726. oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
  2727. oa_config->attrs[1] = NULL;
  2728. oa_config->sysfs_metric.name = oa_config->uuid;
  2729. oa_config->sysfs_metric.attrs = oa_config->attrs;
  2730. return sysfs_create_group(dev_priv->perf.metrics_kobj,
  2731. &oa_config->sysfs_metric);
  2732. }
  2733. /**
  2734. * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
  2735. * @dev: drm device
  2736. * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
  2737. * userspace (unvalidated)
  2738. * @file: drm file
  2739. *
  2740. * Validates the submitted OA register to be saved into a new OA config that
  2741. * can then be used for programming the OA unit and its NOA network.
  2742. *
  2743. * Returns: A new allocated config number to be used with the perf open ioctl
  2744. * or a negative error code on failure.
  2745. */
  2746. int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
  2747. struct drm_file *file)
  2748. {
  2749. struct drm_i915_private *dev_priv = dev->dev_private;
  2750. struct drm_i915_perf_oa_config *args = data;
  2751. struct i915_oa_config *oa_config, *tmp;
  2752. int err, id;
  2753. if (!dev_priv->perf.initialized) {
  2754. DRM_DEBUG("i915 perf interface not available for this system\n");
  2755. return -ENOTSUPP;
  2756. }
  2757. if (!dev_priv->perf.metrics_kobj) {
  2758. DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
  2759. return -EINVAL;
  2760. }
  2761. if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
  2762. DRM_DEBUG("Insufficient privileges to add i915 OA config\n");
  2763. return -EACCES;
  2764. }
  2765. if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
  2766. (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
  2767. (!args->flex_regs_ptr || !args->n_flex_regs)) {
  2768. DRM_DEBUG("No OA registers given\n");
  2769. return -EINVAL;
  2770. }
  2771. oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
  2772. if (!oa_config) {
  2773. DRM_DEBUG("Failed to allocate memory for the OA config\n");
  2774. return -ENOMEM;
  2775. }
  2776. atomic_set(&oa_config->ref_count, 1);
  2777. if (!uuid_is_valid(args->uuid)) {
  2778. DRM_DEBUG("Invalid uuid format for OA config\n");
  2779. err = -EINVAL;
  2780. goto reg_err;
  2781. }
  2782. /* Last character in oa_config->uuid will be 0 because oa_config is
  2783. * kzalloc.
  2784. */
  2785. memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
  2786. oa_config->mux_regs_len = args->n_mux_regs;
  2787. oa_config->mux_regs =
  2788. alloc_oa_regs(dev_priv,
  2789. dev_priv->perf.oa.ops.is_valid_mux_reg,
  2790. u64_to_user_ptr(args->mux_regs_ptr),
  2791. args->n_mux_regs);
  2792. if (IS_ERR(oa_config->mux_regs)) {
  2793. DRM_DEBUG("Failed to create OA config for mux_regs\n");
  2794. err = PTR_ERR(oa_config->mux_regs);
  2795. goto reg_err;
  2796. }
  2797. oa_config->b_counter_regs_len = args->n_boolean_regs;
  2798. oa_config->b_counter_regs =
  2799. alloc_oa_regs(dev_priv,
  2800. dev_priv->perf.oa.ops.is_valid_b_counter_reg,
  2801. u64_to_user_ptr(args->boolean_regs_ptr),
  2802. args->n_boolean_regs);
  2803. if (IS_ERR(oa_config->b_counter_regs)) {
  2804. DRM_DEBUG("Failed to create OA config for b_counter_regs\n");
  2805. err = PTR_ERR(oa_config->b_counter_regs);
  2806. goto reg_err;
  2807. }
  2808. if (INTEL_GEN(dev_priv) < 8) {
  2809. if (args->n_flex_regs != 0) {
  2810. err = -EINVAL;
  2811. goto reg_err;
  2812. }
  2813. } else {
  2814. oa_config->flex_regs_len = args->n_flex_regs;
  2815. oa_config->flex_regs =
  2816. alloc_oa_regs(dev_priv,
  2817. dev_priv->perf.oa.ops.is_valid_flex_reg,
  2818. u64_to_user_ptr(args->flex_regs_ptr),
  2819. args->n_flex_regs);
  2820. if (IS_ERR(oa_config->flex_regs)) {
  2821. DRM_DEBUG("Failed to create OA config for flex_regs\n");
  2822. err = PTR_ERR(oa_config->flex_regs);
  2823. goto reg_err;
  2824. }
  2825. }
  2826. err = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
  2827. if (err)
  2828. goto reg_err;
  2829. /* We shouldn't have too many configs, so this iteration shouldn't be
  2830. * too costly.
  2831. */
  2832. idr_for_each_entry(&dev_priv->perf.metrics_idr, tmp, id) {
  2833. if (!strcmp(tmp->uuid, oa_config->uuid)) {
  2834. DRM_DEBUG("OA config already exists with this uuid\n");
  2835. err = -EADDRINUSE;
  2836. goto sysfs_err;
  2837. }
  2838. }
  2839. err = create_dynamic_oa_sysfs_entry(dev_priv, oa_config);
  2840. if (err) {
  2841. DRM_DEBUG("Failed to create sysfs entry for OA config\n");
  2842. goto sysfs_err;
  2843. }
  2844. /* Config id 0 is invalid, id 1 for kernel stored test config. */
  2845. oa_config->id = idr_alloc(&dev_priv->perf.metrics_idr,
  2846. oa_config, 2,
  2847. 0, GFP_KERNEL);
  2848. if (oa_config->id < 0) {
  2849. DRM_DEBUG("Failed to create sysfs entry for OA config\n");
  2850. err = oa_config->id;
  2851. goto sysfs_err;
  2852. }
  2853. mutex_unlock(&dev_priv->perf.metrics_lock);
  2854. return oa_config->id;
  2855. sysfs_err:
  2856. mutex_unlock(&dev_priv->perf.metrics_lock);
  2857. reg_err:
  2858. put_oa_config(dev_priv, oa_config);
  2859. DRM_DEBUG("Failed to add new OA config\n");
  2860. return err;
  2861. }
  2862. /**
  2863. * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
  2864. * @dev: drm device
  2865. * @data: ioctl data (pointer to u64 integer) copied from userspace
  2866. * @file: drm file
  2867. *
  2868. * Configs can be removed while being used, the will stop appearing in sysfs
  2869. * and their content will be freed when the stream using the config is closed.
  2870. *
  2871. * Returns: 0 on success or a negative error code on failure.
  2872. */
  2873. int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
  2874. struct drm_file *file)
  2875. {
  2876. struct drm_i915_private *dev_priv = dev->dev_private;
  2877. u64 *arg = data;
  2878. struct i915_oa_config *oa_config;
  2879. int ret;
  2880. if (!dev_priv->perf.initialized) {
  2881. DRM_DEBUG("i915 perf interface not available for this system\n");
  2882. return -ENOTSUPP;
  2883. }
  2884. if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
  2885. DRM_DEBUG("Insufficient privileges to remove i915 OA config\n");
  2886. return -EACCES;
  2887. }
  2888. ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
  2889. if (ret)
  2890. goto lock_err;
  2891. oa_config = idr_find(&dev_priv->perf.metrics_idr, *arg);
  2892. if (!oa_config) {
  2893. DRM_DEBUG("Failed to remove unknown OA config\n");
  2894. ret = -ENOENT;
  2895. goto config_err;
  2896. }
  2897. GEM_BUG_ON(*arg != oa_config->id);
  2898. sysfs_remove_group(dev_priv->perf.metrics_kobj,
  2899. &oa_config->sysfs_metric);
  2900. idr_remove(&dev_priv->perf.metrics_idr, *arg);
  2901. put_oa_config(dev_priv, oa_config);
  2902. config_err:
  2903. mutex_unlock(&dev_priv->perf.metrics_lock);
  2904. lock_err:
  2905. return ret;
  2906. }
  2907. static struct ctl_table oa_table[] = {
  2908. {
  2909. .procname = "perf_stream_paranoid",
  2910. .data = &i915_perf_stream_paranoid,
  2911. .maxlen = sizeof(i915_perf_stream_paranoid),
  2912. .mode = 0644,
  2913. .proc_handler = proc_dointvec_minmax,
  2914. .extra1 = &zero,
  2915. .extra2 = &one,
  2916. },
  2917. {
  2918. .procname = "oa_max_sample_rate",
  2919. .data = &i915_oa_max_sample_rate,
  2920. .maxlen = sizeof(i915_oa_max_sample_rate),
  2921. .mode = 0644,
  2922. .proc_handler = proc_dointvec_minmax,
  2923. .extra1 = &zero,
  2924. .extra2 = &oa_sample_rate_hard_limit,
  2925. },
  2926. {}
  2927. };
  2928. static struct ctl_table i915_root[] = {
  2929. {
  2930. .procname = "i915",
  2931. .maxlen = 0,
  2932. .mode = 0555,
  2933. .child = oa_table,
  2934. },
  2935. {}
  2936. };
  2937. static struct ctl_table dev_root[] = {
  2938. {
  2939. .procname = "dev",
  2940. .maxlen = 0,
  2941. .mode = 0555,
  2942. .child = i915_root,
  2943. },
  2944. {}
  2945. };
  2946. /**
  2947. * i915_perf_init - initialize i915-perf state on module load
  2948. * @dev_priv: i915 device instance
  2949. *
  2950. * Initializes i915-perf state without exposing anything to userspace.
  2951. *
  2952. * Note: i915-perf initialization is split into an 'init' and 'register'
  2953. * phase with the i915_perf_register() exposing state to userspace.
  2954. */
  2955. void i915_perf_init(struct drm_i915_private *dev_priv)
  2956. {
  2957. if (IS_HASWELL(dev_priv)) {
  2958. dev_priv->perf.oa.ops.is_valid_b_counter_reg =
  2959. gen7_is_valid_b_counter_addr;
  2960. dev_priv->perf.oa.ops.is_valid_mux_reg =
  2961. hsw_is_valid_mux_addr;
  2962. dev_priv->perf.oa.ops.is_valid_flex_reg = NULL;
  2963. dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer;
  2964. dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
  2965. dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
  2966. dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
  2967. dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable;
  2968. dev_priv->perf.oa.ops.read = gen7_oa_read;
  2969. dev_priv->perf.oa.ops.oa_hw_tail_read =
  2970. gen7_oa_hw_tail_read;
  2971. dev_priv->perf.oa.oa_formats = hsw_oa_formats;
  2972. } else if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
  2973. /* Note: that although we could theoretically also support the
  2974. * legacy ringbuffer mode on BDW (and earlier iterations of
  2975. * this driver, before upstreaming did this) it didn't seem
  2976. * worth the complexity to maintain now that BDW+ enable
  2977. * execlist mode by default.
  2978. */
  2979. dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
  2980. dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer;
  2981. dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
  2982. dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
  2983. dev_priv->perf.oa.ops.read = gen8_oa_read;
  2984. dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
  2985. if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv)) {
  2986. dev_priv->perf.oa.ops.is_valid_b_counter_reg =
  2987. gen7_is_valid_b_counter_addr;
  2988. dev_priv->perf.oa.ops.is_valid_mux_reg =
  2989. gen8_is_valid_mux_addr;
  2990. dev_priv->perf.oa.ops.is_valid_flex_reg =
  2991. gen8_is_valid_flex_addr;
  2992. if (IS_CHERRYVIEW(dev_priv)) {
  2993. dev_priv->perf.oa.ops.is_valid_mux_reg =
  2994. chv_is_valid_mux_addr;
  2995. }
  2996. dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
  2997. dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
  2998. if (IS_GEN8(dev_priv)) {
  2999. dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
  3000. dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
  3001. dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25);
  3002. } else {
  3003. dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
  3004. dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
  3005. dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
  3006. }
  3007. } else if (IS_GEN10(dev_priv)) {
  3008. dev_priv->perf.oa.ops.is_valid_b_counter_reg =
  3009. gen7_is_valid_b_counter_addr;
  3010. dev_priv->perf.oa.ops.is_valid_mux_reg =
  3011. gen10_is_valid_mux_addr;
  3012. dev_priv->perf.oa.ops.is_valid_flex_reg =
  3013. gen8_is_valid_flex_addr;
  3014. dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
  3015. dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set;
  3016. dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
  3017. dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
  3018. dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
  3019. }
  3020. }
  3021. if (dev_priv->perf.oa.ops.enable_metric_set) {
  3022. hrtimer_init(&dev_priv->perf.oa.poll_check_timer,
  3023. CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  3024. dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb;
  3025. init_waitqueue_head(&dev_priv->perf.oa.poll_wq);
  3026. INIT_LIST_HEAD(&dev_priv->perf.streams);
  3027. mutex_init(&dev_priv->perf.lock);
  3028. spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
  3029. oa_sample_rate_hard_limit = 1000 *
  3030. (INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
  3031. dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
  3032. mutex_init(&dev_priv->perf.metrics_lock);
  3033. idr_init(&dev_priv->perf.metrics_idr);
  3034. dev_priv->perf.initialized = true;
  3035. }
  3036. }
  3037. static int destroy_config(int id, void *p, void *data)
  3038. {
  3039. struct drm_i915_private *dev_priv = data;
  3040. struct i915_oa_config *oa_config = p;
  3041. put_oa_config(dev_priv, oa_config);
  3042. return 0;
  3043. }
  3044. /**
  3045. * i915_perf_fini - Counter part to i915_perf_init()
  3046. * @dev_priv: i915 device instance
  3047. */
  3048. void i915_perf_fini(struct drm_i915_private *dev_priv)
  3049. {
  3050. if (!dev_priv->perf.initialized)
  3051. return;
  3052. idr_for_each(&dev_priv->perf.metrics_idr, destroy_config, dev_priv);
  3053. idr_destroy(&dev_priv->perf.metrics_idr);
  3054. unregister_sysctl_table(dev_priv->perf.sysctl_header);
  3055. memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops));
  3056. dev_priv->perf.initialized = false;
  3057. }