i915_perf.c 111 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515
  1. /*
  2. * Copyright © 2015-2016 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Robert Bragg <robert@sixbynine.org>
  25. */
  26. /**
  27. * DOC: i915 Perf Overview
  28. *
  29. * Gen graphics supports a large number of performance counters that can help
  30. * driver and application developers understand and optimize their use of the
  31. * GPU.
  32. *
  33. * This i915 perf interface enables userspace to configure and open a file
  34. * descriptor representing a stream of GPU metrics which can then be read() as
  35. * a stream of sample records.
  36. *
  37. * The interface is particularly suited to exposing buffered metrics that are
  38. * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
  39. *
  40. * Streams representing a single context are accessible to applications with a
  41. * corresponding drm file descriptor, such that OpenGL can use the interface
  42. * without special privileges. Access to system-wide metrics requires root
  43. * privileges by default, unless changed via the dev.i915.perf_event_paranoid
  44. * sysctl option.
  45. *
  46. */
  47. /**
  48. * DOC: i915 Perf History and Comparison with Core Perf
  49. *
  50. * The interface was initially inspired by the core Perf infrastructure but
  51. * some notable differences are:
  52. *
  53. * i915 perf file descriptors represent a "stream" instead of an "event"; where
  54. * a perf event primarily corresponds to a single 64bit value, while a stream
  55. * might sample sets of tightly-coupled counters, depending on the
  56. * configuration. For example the Gen OA unit isn't designed to support
  57. * orthogonal configurations of individual counters; it's configured for a set
  58. * of related counters. Samples for an i915 perf stream capturing OA metrics
  59. * will include a set of counter values packed in a compact HW specific format.
  60. * The OA unit supports a number of different packing formats which can be
  61. * selected by the user opening the stream. Perf has support for grouping
  62. * events, but each event in the group is configured, validated and
  63. * authenticated individually with separate system calls.
  64. *
  65. * i915 perf stream configurations are provided as an array of u64 (key,value)
  66. * pairs, instead of a fixed struct with multiple miscellaneous config members,
  67. * interleaved with event-type specific members.
  68. *
  69. * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
  70. * The supported metrics are being written to memory by the GPU unsynchronized
  71. * with the CPU, using HW specific packing formats for counter sets. Sometimes
  72. * the constraints on HW configuration require reports to be filtered before it
  73. * would be acceptable to expose them to unprivileged applications - to hide
  74. * the metrics of other processes/contexts. For these use cases a read() based
  75. * interface is a good fit, and provides an opportunity to filter data as it
  76. * gets copied from the GPU mapped buffers to userspace buffers.
  77. *
  78. *
  79. * Issues hit with first prototype based on Core Perf
  80. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  81. *
  82. * The first prototype of this driver was based on the core perf
  83. * infrastructure, and while we did make that mostly work, with some changes to
  84. * perf, we found we were breaking or working around too many assumptions baked
  85. * into perf's currently cpu centric design.
  86. *
  87. * In the end we didn't see a clear benefit to making perf's implementation and
  88. * interface more complex by changing design assumptions while we knew we still
  89. * wouldn't be able to use any existing perf based userspace tools.
  90. *
  91. * Also considering the Gen specific nature of the Observability hardware and
  92. * how userspace will sometimes need to combine i915 perf OA metrics with
  93. * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
  94. * expecting the interface to be used by a platform specific userspace such as
  95. * OpenGL or tools. This is to say; we aren't inherently missing out on having
  96. * a standard vendor/architecture agnostic interface by not using perf.
  97. *
  98. *
  99. * For posterity, in case we might re-visit trying to adapt core perf to be
  100. * better suited to exposing i915 metrics these were the main pain points we
  101. * hit:
  102. *
  103. * - The perf based OA PMU driver broke some significant design assumptions:
  104. *
  105. * Existing perf pmus are used for profiling work on a cpu and we were
  106. * introducing the idea of _IS_DEVICE pmus with different security
  107. * implications, the need to fake cpu-related data (such as user/kernel
  108. * registers) to fit with perf's current design, and adding _DEVICE records
  109. * as a way to forward device-specific status records.
  110. *
  111. * The OA unit writes reports of counters into a circular buffer, without
  112. * involvement from the CPU, making our PMU driver the first of a kind.
  113. *
  114. * Given the way we were periodically forward data from the GPU-mapped, OA
  115. * buffer to perf's buffer, those bursts of sample writes looked to perf like
  116. * we were sampling too fast and so we had to subvert its throttling checks.
  117. *
  118. * Perf supports groups of counters and allows those to be read via
  119. * transactions internally but transactions currently seem designed to be
  120. * explicitly initiated from the cpu (say in response to a userspace read())
  121. * and while we could pull a report out of the OA buffer we can't
  122. * trigger a report from the cpu on demand.
  123. *
  124. * Related to being report based; the OA counters are configured in HW as a
  125. * set while perf generally expects counter configurations to be orthogonal.
  126. * Although counters can be associated with a group leader as they are
  127. * opened, there's no clear precedent for being able to provide group-wide
  128. * configuration attributes (for example we want to let userspace choose the
  129. * OA unit report format used to capture all counters in a set, or specify a
  130. * GPU context to filter metrics on). We avoided using perf's grouping
  131. * feature and forwarded OA reports to userspace via perf's 'raw' sample
  132. * field. This suited our userspace well considering how coupled the counters
  133. * are when dealing with normalizing. It would be inconvenient to split
  134. * counters up into separate events, only to require userspace to recombine
  135. * them. For Mesa it's also convenient to be forwarded raw, periodic reports
  136. * for combining with the side-band raw reports it captures using
  137. * MI_REPORT_PERF_COUNT commands.
  138. *
  139. * - As a side note on perf's grouping feature; there was also some concern
  140. * that using PERF_FORMAT_GROUP as a way to pack together counter values
  141. * would quite drastically inflate our sample sizes, which would likely
  142. * lower the effective sampling resolutions we could use when the available
  143. * memory bandwidth is limited.
  144. *
  145. * With the OA unit's report formats, counters are packed together as 32
  146. * or 40bit values, with the largest report size being 256 bytes.
  147. *
  148. * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
  149. * documented ordering to the values, implying PERF_FORMAT_ID must also be
  150. * used to add a 64bit ID before each value; giving 16 bytes per counter.
  151. *
  152. * Related to counter orthogonality; we can't time share the OA unit, while
  153. * event scheduling is a central design idea within perf for allowing
  154. * userspace to open + enable more events than can be configured in HW at any
  155. * one time. The OA unit is not designed to allow re-configuration while in
  156. * use. We can't reconfigure the OA unit without losing internal OA unit
  157. * state which we can't access explicitly to save and restore. Reconfiguring
  158. * the OA unit is also relatively slow, involving ~100 register writes. From
  159. * userspace Mesa also depends on a stable OA configuration when emitting
  160. * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
  161. * disabled while there are outstanding MI_RPC commands lest we hang the
  162. * command streamer.
  163. *
  164. * The contents of sample records aren't extensible by device drivers (i.e.
  165. * the sample_type bits). As an example; Sourab Gupta had been looking to
  166. * attach GPU timestamps to our OA samples. We were shoehorning OA reports
  167. * into sample records by using the 'raw' field, but it's tricky to pack more
  168. * than one thing into this field because events/core.c currently only lets a
  169. * pmu give a single raw data pointer plus len which will be copied into the
  170. * ring buffer. To include more than the OA report we'd have to copy the
  171. * report into an intermediate larger buffer. I'd been considering allowing a
  172. * vector of data+len values to be specified for copying the raw data, but
  173. * it felt like a kludge to being using the raw field for this purpose.
  174. *
  175. * - It felt like our perf based PMU was making some technical compromises
  176. * just for the sake of using perf:
  177. *
  178. * perf_event_open() requires events to either relate to a pid or a specific
  179. * cpu core, while our device pmu related to neither. Events opened with a
  180. * pid will be automatically enabled/disabled according to the scheduling of
  181. * that process - so not appropriate for us. When an event is related to a
  182. * cpu id, perf ensures pmu methods will be invoked via an inter process
  183. * interrupt on that core. To avoid invasive changes our userspace opened OA
  184. * perf events for a specific cpu. This was workable but it meant the
  185. * majority of the OA driver ran in atomic context, including all OA report
  186. * forwarding, which wasn't really necessary in our case and seems to make
  187. * our locking requirements somewhat complex as we handled the interaction
  188. * with the rest of the i915 driver.
  189. */
  190. #include <linux/anon_inodes.h>
  191. #include <linux/sizes.h>
  192. #include <linux/uuid.h>
  193. #include "i915_drv.h"
  194. #include "i915_oa_hsw.h"
  195. #include "i915_oa_bdw.h"
  196. #include "i915_oa_chv.h"
  197. #include "i915_oa_sklgt2.h"
  198. #include "i915_oa_sklgt3.h"
  199. #include "i915_oa_sklgt4.h"
  200. #include "i915_oa_bxt.h"
  201. #include "i915_oa_kblgt2.h"
  202. #include "i915_oa_kblgt3.h"
  203. #include "i915_oa_glk.h"
  204. /* HW requires this to be a power of two, between 128k and 16M, though driver
  205. * is currently generally designed assuming the largest 16M size is used such
  206. * that the overflow cases are unlikely in normal operation.
  207. */
  208. #define OA_BUFFER_SIZE SZ_16M
  209. #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1))
  210. /**
  211. * DOC: OA Tail Pointer Race
  212. *
  213. * There's a HW race condition between OA unit tail pointer register updates and
  214. * writes to memory whereby the tail pointer can sometimes get ahead of what's
  215. * been written out to the OA buffer so far (in terms of what's visible to the
  216. * CPU).
  217. *
  218. * Although this can be observed explicitly while copying reports to userspace
  219. * by checking for a zeroed report-id field in tail reports, we want to account
  220. * for this earlier, as part of the oa_buffer_check to avoid lots of redundant
  221. * read() attempts.
  222. *
  223. * In effect we define a tail pointer for reading that lags the real tail
  224. * pointer by at least %OA_TAIL_MARGIN_NSEC nanoseconds, which gives enough
  225. * time for the corresponding reports to become visible to the CPU.
  226. *
  227. * To manage this we actually track two tail pointers:
  228. * 1) An 'aging' tail with an associated timestamp that is tracked until we
  229. * can trust the corresponding data is visible to the CPU; at which point
  230. * it is considered 'aged'.
  231. * 2) An 'aged' tail that can be used for read()ing.
  232. *
  233. * The two separate pointers let us decouple read()s from tail pointer aging.
  234. *
  235. * The tail pointers are checked and updated at a limited rate within a hrtimer
  236. * callback (the same callback that is used for delivering POLLIN events)
  237. *
  238. * Initially the tails are marked invalid with %INVALID_TAIL_PTR which
  239. * indicates that an updated tail pointer is needed.
  240. *
  241. * Most of the implementation details for this workaround are in
  242. * oa_buffer_check_unlocked() and _append_oa_reports()
  243. *
  244. * Note for posterity: previously the driver used to define an effective tail
  245. * pointer that lagged the real pointer by a 'tail margin' measured in bytes
  246. * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
  247. * This was flawed considering that the OA unit may also automatically generate
  248. * non-periodic reports (such as on context switch) or the OA unit may be
  249. * enabled without any periodic sampling.
  250. */
  251. #define OA_TAIL_MARGIN_NSEC 100000ULL
  252. #define INVALID_TAIL_PTR 0xffffffff
  253. /* frequency for checking whether the OA unit has written new reports to the
  254. * circular OA buffer...
  255. */
  256. #define POLL_FREQUENCY 200
  257. #define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY)
  258. /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
  259. static int zero;
  260. static int one = 1;
  261. static u32 i915_perf_stream_paranoid = true;
  262. /* The maximum exponent the hardware accepts is 63 (essentially it selects one
  263. * of the 64bit timestamp bits to trigger reports from) but there's currently
  264. * no known use case for sampling as infrequently as once per 47 thousand years.
  265. *
  266. * Since the timestamps included in OA reports are only 32bits it seems
  267. * reasonable to limit the OA exponent where it's still possible to account for
  268. * overflow in OA report timestamps.
  269. */
  270. #define OA_EXPONENT_MAX 31
  271. #define INVALID_CTX_ID 0xffffffff
  272. /* On Gen8+ automatically triggered OA reports include a 'reason' field... */
  273. #define OAREPORT_REASON_MASK 0x3f
  274. #define OAREPORT_REASON_SHIFT 19
  275. #define OAREPORT_REASON_TIMER (1<<0)
  276. #define OAREPORT_REASON_CTX_SWITCH (1<<3)
  277. #define OAREPORT_REASON_CLK_RATIO (1<<5)
  278. /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
  279. *
  280. * The highest sampling frequency we can theoretically program the OA unit
  281. * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
  282. *
  283. * Initialized just before we register the sysctl parameter.
  284. */
  285. static int oa_sample_rate_hard_limit;
  286. /* Theoretically we can program the OA unit to sample every 160ns but don't
  287. * allow that by default unless root...
  288. *
  289. * The default threshold of 100000Hz is based on perf's similar
  290. * kernel.perf_event_max_sample_rate sysctl parameter.
  291. */
  292. static u32 i915_oa_max_sample_rate = 100000;
  293. /* XXX: beware if future OA HW adds new report formats that the current
  294. * code assumes all reports have a power-of-two size and ~(size - 1) can
  295. * be used as a mask to align the OA tail pointer.
  296. */
  297. static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
  298. [I915_OA_FORMAT_A13] = { 0, 64 },
  299. [I915_OA_FORMAT_A29] = { 1, 128 },
  300. [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 },
  301. /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
  302. [I915_OA_FORMAT_B4_C8] = { 4, 64 },
  303. [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 },
  304. [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 },
  305. [I915_OA_FORMAT_C4_B8] = { 7, 64 },
  306. };
  307. static struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
  308. [I915_OA_FORMAT_A12] = { 0, 64 },
  309. [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 },
  310. [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
  311. [I915_OA_FORMAT_C4_B8] = { 7, 64 },
  312. };
  313. #define SAMPLE_OA_REPORT (1<<0)
  314. /**
  315. * struct perf_open_properties - for validated properties given to open a stream
  316. * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
  317. * @single_context: Whether a single or all gpu contexts should be monitored
  318. * @ctx_handle: A gem ctx handle for use with @single_context
  319. * @metrics_set: An ID for an OA unit metric set advertised via sysfs
  320. * @oa_format: An OA unit HW report format
  321. * @oa_periodic: Whether to enable periodic OA unit sampling
  322. * @oa_period_exponent: The OA unit sampling period is derived from this
  323. *
  324. * As read_properties_unlocked() enumerates and validates the properties given
  325. * to open a stream of metrics the configuration is built up in the structure
  326. * which starts out zero initialized.
  327. */
  328. struct perf_open_properties {
  329. u32 sample_flags;
  330. u64 single_context:1;
  331. u64 ctx_handle;
  332. /* OA sampling state */
  333. int metrics_set;
  334. int oa_format;
  335. bool oa_periodic;
  336. int oa_period_exponent;
  337. };
  338. static void free_oa_config(struct drm_i915_private *dev_priv,
  339. struct i915_oa_config *oa_config)
  340. {
  341. if (!PTR_ERR(oa_config->flex_regs))
  342. kfree(oa_config->flex_regs);
  343. if (!PTR_ERR(oa_config->b_counter_regs))
  344. kfree(oa_config->b_counter_regs);
  345. if (!PTR_ERR(oa_config->mux_regs))
  346. kfree(oa_config->mux_regs);
  347. kfree(oa_config);
  348. }
  349. static void put_oa_config(struct drm_i915_private *dev_priv,
  350. struct i915_oa_config *oa_config)
  351. {
  352. if (!atomic_dec_and_test(&oa_config->ref_count))
  353. return;
  354. free_oa_config(dev_priv, oa_config);
  355. }
  356. static int get_oa_config(struct drm_i915_private *dev_priv,
  357. int metrics_set,
  358. struct i915_oa_config **out_config)
  359. {
  360. int ret;
  361. if (metrics_set == 1) {
  362. *out_config = &dev_priv->perf.oa.test_config;
  363. atomic_inc(&dev_priv->perf.oa.test_config.ref_count);
  364. return 0;
  365. }
  366. ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
  367. if (ret)
  368. return ret;
  369. *out_config = idr_find(&dev_priv->perf.metrics_idr, metrics_set);
  370. if (!*out_config)
  371. ret = -EINVAL;
  372. else
  373. atomic_inc(&(*out_config)->ref_count);
  374. mutex_unlock(&dev_priv->perf.metrics_lock);
  375. return ret;
  376. }
  377. static u32 gen8_oa_hw_tail_read(struct drm_i915_private *dev_priv)
  378. {
  379. return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
  380. }
  381. static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv)
  382. {
  383. u32 oastatus1 = I915_READ(GEN7_OASTATUS1);
  384. return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
  385. }
  386. /**
  387. * oa_buffer_check_unlocked - check for data and update tail ptr state
  388. * @dev_priv: i915 device instance
  389. *
  390. * This is either called via fops (for blocking reads in user ctx) or the poll
  391. * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
  392. * if there is data available for userspace to read.
  393. *
  394. * This function is central to providing a workaround for the OA unit tail
  395. * pointer having a race with respect to what data is visible to the CPU.
  396. * It is responsible for reading tail pointers from the hardware and giving
  397. * the pointers time to 'age' before they are made available for reading.
  398. * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
  399. *
  400. * Besides returning true when there is data available to read() this function
  401. * also has the side effect of updating the oa_buffer.tails[], .aging_timestamp
  402. * and .aged_tail_idx state used for reading.
  403. *
  404. * Note: It's safe to read OA config state here unlocked, assuming that this is
  405. * only called while the stream is enabled, while the global OA configuration
  406. * can't be modified.
  407. *
  408. * Returns: %true if the OA buffer contains data, else %false
  409. */
  410. static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
  411. {
  412. int report_size = dev_priv->perf.oa.oa_buffer.format_size;
  413. unsigned long flags;
  414. unsigned int aged_idx;
  415. u32 head, hw_tail, aged_tail, aging_tail;
  416. u64 now;
  417. /* We have to consider the (unlikely) possibility that read() errors
  418. * could result in an OA buffer reset which might reset the head,
  419. * tails[] and aged_tail state.
  420. */
  421. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  422. /* NB: The head we observe here might effectively be a little out of
  423. * date (between head and tails[aged_idx].offset if there is currently
  424. * a read() in progress.
  425. */
  426. head = dev_priv->perf.oa.oa_buffer.head;
  427. aged_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
  428. aged_tail = dev_priv->perf.oa.oa_buffer.tails[aged_idx].offset;
  429. aging_tail = dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset;
  430. hw_tail = dev_priv->perf.oa.ops.oa_hw_tail_read(dev_priv);
  431. /* The tail pointer increases in 64 byte increments,
  432. * not in report_size steps...
  433. */
  434. hw_tail &= ~(report_size - 1);
  435. now = ktime_get_mono_fast_ns();
  436. /* Update the aged tail
  437. *
  438. * Flip the tail pointer available for read()s once the aging tail is
  439. * old enough to trust that the corresponding data will be visible to
  440. * the CPU...
  441. *
  442. * Do this before updating the aging pointer in case we may be able to
  443. * immediately start aging a new pointer too (if new data has become
  444. * available) without needing to wait for a later hrtimer callback.
  445. */
  446. if (aging_tail != INVALID_TAIL_PTR &&
  447. ((now - dev_priv->perf.oa.oa_buffer.aging_timestamp) >
  448. OA_TAIL_MARGIN_NSEC)) {
  449. aged_idx ^= 1;
  450. dev_priv->perf.oa.oa_buffer.aged_tail_idx = aged_idx;
  451. aged_tail = aging_tail;
  452. /* Mark that we need a new pointer to start aging... */
  453. dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR;
  454. aging_tail = INVALID_TAIL_PTR;
  455. }
  456. /* Update the aging tail
  457. *
  458. * We throttle aging tail updates until we have a new tail that
  459. * represents >= one report more data than is already available for
  460. * reading. This ensures there will be enough data for a successful
  461. * read once this new pointer has aged and ensures we will give the new
  462. * pointer time to age.
  463. */
  464. if (aging_tail == INVALID_TAIL_PTR &&
  465. (aged_tail == INVALID_TAIL_PTR ||
  466. OA_TAKEN(hw_tail, aged_tail) >= report_size)) {
  467. struct i915_vma *vma = dev_priv->perf.oa.oa_buffer.vma;
  468. u32 gtt_offset = i915_ggtt_offset(vma);
  469. /* Be paranoid and do a bounds check on the pointer read back
  470. * from hardware, just in case some spurious hardware condition
  471. * could put the tail out of bounds...
  472. */
  473. if (hw_tail >= gtt_offset &&
  474. hw_tail < (gtt_offset + OA_BUFFER_SIZE)) {
  475. dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset =
  476. aging_tail = hw_tail;
  477. dev_priv->perf.oa.oa_buffer.aging_timestamp = now;
  478. } else {
  479. DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %u\n",
  480. hw_tail);
  481. }
  482. }
  483. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  484. return aged_tail == INVALID_TAIL_PTR ?
  485. false : OA_TAKEN(aged_tail, head) >= report_size;
  486. }
  487. /**
  488. * append_oa_status - Appends a status record to a userspace read() buffer.
  489. * @stream: An i915-perf stream opened for OA metrics
  490. * @buf: destination buffer given by userspace
  491. * @count: the number of bytes userspace wants to read
  492. * @offset: (inout): the current position for writing into @buf
  493. * @type: The kind of status to report to userspace
  494. *
  495. * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
  496. * into the userspace read() buffer.
  497. *
  498. * The @buf @offset will only be updated on success.
  499. *
  500. * Returns: 0 on success, negative error code on failure.
  501. */
  502. static int append_oa_status(struct i915_perf_stream *stream,
  503. char __user *buf,
  504. size_t count,
  505. size_t *offset,
  506. enum drm_i915_perf_record_type type)
  507. {
  508. struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
  509. if ((count - *offset) < header.size)
  510. return -ENOSPC;
  511. if (copy_to_user(buf + *offset, &header, sizeof(header)))
  512. return -EFAULT;
  513. (*offset) += header.size;
  514. return 0;
  515. }
  516. /**
  517. * append_oa_sample - Copies single OA report into userspace read() buffer.
  518. * @stream: An i915-perf stream opened for OA metrics
  519. * @buf: destination buffer given by userspace
  520. * @count: the number of bytes userspace wants to read
  521. * @offset: (inout): the current position for writing into @buf
  522. * @report: A single OA report to (optionally) include as part of the sample
  523. *
  524. * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
  525. * properties when opening a stream, tracked as `stream->sample_flags`. This
  526. * function copies the requested components of a single sample to the given
  527. * read() @buf.
  528. *
  529. * The @buf @offset will only be updated on success.
  530. *
  531. * Returns: 0 on success, negative error code on failure.
  532. */
  533. static int append_oa_sample(struct i915_perf_stream *stream,
  534. char __user *buf,
  535. size_t count,
  536. size_t *offset,
  537. const u8 *report)
  538. {
  539. struct drm_i915_private *dev_priv = stream->dev_priv;
  540. int report_size = dev_priv->perf.oa.oa_buffer.format_size;
  541. struct drm_i915_perf_record_header header;
  542. u32 sample_flags = stream->sample_flags;
  543. header.type = DRM_I915_PERF_RECORD_SAMPLE;
  544. header.pad = 0;
  545. header.size = stream->sample_size;
  546. if ((count - *offset) < header.size)
  547. return -ENOSPC;
  548. buf += *offset;
  549. if (copy_to_user(buf, &header, sizeof(header)))
  550. return -EFAULT;
  551. buf += sizeof(header);
  552. if (sample_flags & SAMPLE_OA_REPORT) {
  553. if (copy_to_user(buf, report, report_size))
  554. return -EFAULT;
  555. }
  556. (*offset) += header.size;
  557. return 0;
  558. }
  559. /**
  560. * Copies all buffered OA reports into userspace read() buffer.
  561. * @stream: An i915-perf stream opened for OA metrics
  562. * @buf: destination buffer given by userspace
  563. * @count: the number of bytes userspace wants to read
  564. * @offset: (inout): the current position for writing into @buf
  565. *
  566. * Notably any error condition resulting in a short read (-%ENOSPC or
  567. * -%EFAULT) will be returned even though one or more records may
  568. * have been successfully copied. In this case it's up to the caller
  569. * to decide if the error should be squashed before returning to
  570. * userspace.
  571. *
  572. * Note: reports are consumed from the head, and appended to the
  573. * tail, so the tail chases the head?... If you think that's mad
  574. * and back-to-front you're not alone, but this follows the
  575. * Gen PRM naming convention.
  576. *
  577. * Returns: 0 on success, negative error code on failure.
  578. */
  579. static int gen8_append_oa_reports(struct i915_perf_stream *stream,
  580. char __user *buf,
  581. size_t count,
  582. size_t *offset)
  583. {
  584. struct drm_i915_private *dev_priv = stream->dev_priv;
  585. int report_size = dev_priv->perf.oa.oa_buffer.format_size;
  586. u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
  587. u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
  588. u32 mask = (OA_BUFFER_SIZE - 1);
  589. size_t start_offset = *offset;
  590. unsigned long flags;
  591. unsigned int aged_tail_idx;
  592. u32 head, tail;
  593. u32 taken;
  594. int ret = 0;
  595. if (WARN_ON(!stream->enabled))
  596. return -EIO;
  597. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  598. head = dev_priv->perf.oa.oa_buffer.head;
  599. aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
  600. tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
  601. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  602. /*
  603. * An invalid tail pointer here means we're still waiting for the poll
  604. * hrtimer callback to give us a pointer
  605. */
  606. if (tail == INVALID_TAIL_PTR)
  607. return -EAGAIN;
  608. /*
  609. * NB: oa_buffer.head/tail include the gtt_offset which we don't want
  610. * while indexing relative to oa_buf_base.
  611. */
  612. head -= gtt_offset;
  613. tail -= gtt_offset;
  614. /*
  615. * An out of bounds or misaligned head or tail pointer implies a driver
  616. * bug since we validate + align the tail pointers we read from the
  617. * hardware and we are in full control of the head pointer which should
  618. * only be incremented by multiples of the report size (notably also
  619. * all a power of two).
  620. */
  621. if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
  622. tail > OA_BUFFER_SIZE || tail % report_size,
  623. "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
  624. head, tail))
  625. return -EIO;
  626. for (/* none */;
  627. (taken = OA_TAKEN(tail, head));
  628. head = (head + report_size) & mask) {
  629. u8 *report = oa_buf_base + head;
  630. u32 *report32 = (void *)report;
  631. u32 ctx_id;
  632. u32 reason;
  633. /*
  634. * All the report sizes factor neatly into the buffer
  635. * size so we never expect to see a report split
  636. * between the beginning and end of the buffer.
  637. *
  638. * Given the initial alignment check a misalignment
  639. * here would imply a driver bug that would result
  640. * in an overrun.
  641. */
  642. if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
  643. DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
  644. break;
  645. }
  646. /*
  647. * The reason field includes flags identifying what
  648. * triggered this specific report (mostly timer
  649. * triggered or e.g. due to a context switch).
  650. *
  651. * This field is never expected to be zero so we can
  652. * check that the report isn't invalid before copying
  653. * it to userspace...
  654. */
  655. reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
  656. OAREPORT_REASON_MASK);
  657. if (reason == 0) {
  658. if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
  659. DRM_NOTE("Skipping spurious, invalid OA report\n");
  660. continue;
  661. }
  662. /*
  663. * XXX: Just keep the lower 21 bits for now since I'm not
  664. * entirely sure if the HW touches any of the higher bits in
  665. * this field
  666. */
  667. ctx_id = report32[2] & 0x1fffff;
  668. /*
  669. * Squash whatever is in the CTX_ID field if it's marked as
  670. * invalid to be sure we avoid false-positive, single-context
  671. * filtering below...
  672. *
  673. * Note: that we don't clear the valid_ctx_bit so userspace can
  674. * understand that the ID has been squashed by the kernel.
  675. */
  676. if (!(report32[0] & dev_priv->perf.oa.gen8_valid_ctx_bit))
  677. ctx_id = report32[2] = INVALID_CTX_ID;
  678. /*
  679. * NB: For Gen 8 the OA unit no longer supports clock gating
  680. * off for a specific context and the kernel can't securely
  681. * stop the counters from updating as system-wide / global
  682. * values.
  683. *
  684. * Automatic reports now include a context ID so reports can be
  685. * filtered on the cpu but it's not worth trying to
  686. * automatically subtract/hide counter progress for other
  687. * contexts while filtering since we can't stop userspace
  688. * issuing MI_REPORT_PERF_COUNT commands which would still
  689. * provide a side-band view of the real values.
  690. *
  691. * To allow userspace (such as Mesa/GL_INTEL_performance_query)
  692. * to normalize counters for a single filtered context then it
  693. * needs be forwarded bookend context-switch reports so that it
  694. * can track switches in between MI_REPORT_PERF_COUNT commands
  695. * and can itself subtract/ignore the progress of counters
  696. * associated with other contexts. Note that the hardware
  697. * automatically triggers reports when switching to a new
  698. * context which are tagged with the ID of the newly active
  699. * context. To avoid the complexity (and likely fragility) of
  700. * reading ahead while parsing reports to try and minimize
  701. * forwarding redundant context switch reports (i.e. between
  702. * other, unrelated contexts) we simply elect to forward them
  703. * all.
  704. *
  705. * We don't rely solely on the reason field to identify context
  706. * switches since it's not-uncommon for periodic samples to
  707. * identify a switch before any 'context switch' report.
  708. */
  709. if (!dev_priv->perf.oa.exclusive_stream->ctx ||
  710. dev_priv->perf.oa.specific_ctx_id == ctx_id ||
  711. (dev_priv->perf.oa.oa_buffer.last_ctx_id ==
  712. dev_priv->perf.oa.specific_ctx_id) ||
  713. reason & OAREPORT_REASON_CTX_SWITCH) {
  714. /*
  715. * While filtering for a single context we avoid
  716. * leaking the IDs of other contexts.
  717. */
  718. if (dev_priv->perf.oa.exclusive_stream->ctx &&
  719. dev_priv->perf.oa.specific_ctx_id != ctx_id) {
  720. report32[2] = INVALID_CTX_ID;
  721. }
  722. ret = append_oa_sample(stream, buf, count, offset,
  723. report);
  724. if (ret)
  725. break;
  726. dev_priv->perf.oa.oa_buffer.last_ctx_id = ctx_id;
  727. }
  728. /*
  729. * The above reason field sanity check is based on
  730. * the assumption that the OA buffer is initially
  731. * zeroed and we reset the field after copying so the
  732. * check is still meaningful once old reports start
  733. * being overwritten.
  734. */
  735. report32[0] = 0;
  736. }
  737. if (start_offset != *offset) {
  738. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  739. /*
  740. * We removed the gtt_offset for the copy loop above, indexing
  741. * relative to oa_buf_base so put back here...
  742. */
  743. head += gtt_offset;
  744. I915_WRITE(GEN8_OAHEADPTR, head & GEN8_OAHEADPTR_MASK);
  745. dev_priv->perf.oa.oa_buffer.head = head;
  746. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  747. }
  748. return ret;
  749. }
  750. /**
  751. * gen8_oa_read - copy status records then buffered OA reports
  752. * @stream: An i915-perf stream opened for OA metrics
  753. * @buf: destination buffer given by userspace
  754. * @count: the number of bytes userspace wants to read
  755. * @offset: (inout): the current position for writing into @buf
  756. *
  757. * Checks OA unit status registers and if necessary appends corresponding
  758. * status records for userspace (such as for a buffer full condition) and then
  759. * initiate appending any buffered OA reports.
  760. *
  761. * Updates @offset according to the number of bytes successfully copied into
  762. * the userspace buffer.
  763. *
  764. * NB: some data may be successfully copied to the userspace buffer
  765. * even if an error is returned, and this is reflected in the
  766. * updated @offset.
  767. *
  768. * Returns: zero on success or a negative error code
  769. */
  770. static int gen8_oa_read(struct i915_perf_stream *stream,
  771. char __user *buf,
  772. size_t count,
  773. size_t *offset)
  774. {
  775. struct drm_i915_private *dev_priv = stream->dev_priv;
  776. u32 oastatus;
  777. int ret;
  778. if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
  779. return -EIO;
  780. oastatus = I915_READ(GEN8_OASTATUS);
  781. /*
  782. * We treat OABUFFER_OVERFLOW as a significant error:
  783. *
  784. * Although theoretically we could handle this more gracefully
  785. * sometimes, some Gens don't correctly suppress certain
  786. * automatically triggered reports in this condition and so we
  787. * have to assume that old reports are now being trampled
  788. * over.
  789. *
  790. * Considering how we don't currently give userspace control
  791. * over the OA buffer size and always configure a large 16MB
  792. * buffer, then a buffer overflow does anyway likely indicate
  793. * that something has gone quite badly wrong.
  794. */
  795. if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
  796. ret = append_oa_status(stream, buf, count, offset,
  797. DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
  798. if (ret)
  799. return ret;
  800. DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
  801. dev_priv->perf.oa.period_exponent);
  802. dev_priv->perf.oa.ops.oa_disable(dev_priv);
  803. dev_priv->perf.oa.ops.oa_enable(dev_priv);
  804. /*
  805. * Note: .oa_enable() is expected to re-init the oabuffer and
  806. * reset GEN8_OASTATUS for us
  807. */
  808. oastatus = I915_READ(GEN8_OASTATUS);
  809. }
  810. if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
  811. ret = append_oa_status(stream, buf, count, offset,
  812. DRM_I915_PERF_RECORD_OA_REPORT_LOST);
  813. if (ret)
  814. return ret;
  815. I915_WRITE(GEN8_OASTATUS,
  816. oastatus & ~GEN8_OASTATUS_REPORT_LOST);
  817. }
  818. return gen8_append_oa_reports(stream, buf, count, offset);
  819. }
  820. /**
  821. * Copies all buffered OA reports into userspace read() buffer.
  822. * @stream: An i915-perf stream opened for OA metrics
  823. * @buf: destination buffer given by userspace
  824. * @count: the number of bytes userspace wants to read
  825. * @offset: (inout): the current position for writing into @buf
  826. *
  827. * Notably any error condition resulting in a short read (-%ENOSPC or
  828. * -%EFAULT) will be returned even though one or more records may
  829. * have been successfully copied. In this case it's up to the caller
  830. * to decide if the error should be squashed before returning to
  831. * userspace.
  832. *
  833. * Note: reports are consumed from the head, and appended to the
  834. * tail, so the tail chases the head?... If you think that's mad
  835. * and back-to-front you're not alone, but this follows the
  836. * Gen PRM naming convention.
  837. *
  838. * Returns: 0 on success, negative error code on failure.
  839. */
  840. static int gen7_append_oa_reports(struct i915_perf_stream *stream,
  841. char __user *buf,
  842. size_t count,
  843. size_t *offset)
  844. {
  845. struct drm_i915_private *dev_priv = stream->dev_priv;
  846. int report_size = dev_priv->perf.oa.oa_buffer.format_size;
  847. u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
  848. u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
  849. u32 mask = (OA_BUFFER_SIZE - 1);
  850. size_t start_offset = *offset;
  851. unsigned long flags;
  852. unsigned int aged_tail_idx;
  853. u32 head, tail;
  854. u32 taken;
  855. int ret = 0;
  856. if (WARN_ON(!stream->enabled))
  857. return -EIO;
  858. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  859. head = dev_priv->perf.oa.oa_buffer.head;
  860. aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
  861. tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
  862. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  863. /* An invalid tail pointer here means we're still waiting for the poll
  864. * hrtimer callback to give us a pointer
  865. */
  866. if (tail == INVALID_TAIL_PTR)
  867. return -EAGAIN;
  868. /* NB: oa_buffer.head/tail include the gtt_offset which we don't want
  869. * while indexing relative to oa_buf_base.
  870. */
  871. head -= gtt_offset;
  872. tail -= gtt_offset;
  873. /* An out of bounds or misaligned head or tail pointer implies a driver
  874. * bug since we validate + align the tail pointers we read from the
  875. * hardware and we are in full control of the head pointer which should
  876. * only be incremented by multiples of the report size (notably also
  877. * all a power of two).
  878. */
  879. if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
  880. tail > OA_BUFFER_SIZE || tail % report_size,
  881. "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
  882. head, tail))
  883. return -EIO;
  884. for (/* none */;
  885. (taken = OA_TAKEN(tail, head));
  886. head = (head + report_size) & mask) {
  887. u8 *report = oa_buf_base + head;
  888. u32 *report32 = (void *)report;
  889. /* All the report sizes factor neatly into the buffer
  890. * size so we never expect to see a report split
  891. * between the beginning and end of the buffer.
  892. *
  893. * Given the initial alignment check a misalignment
  894. * here would imply a driver bug that would result
  895. * in an overrun.
  896. */
  897. if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
  898. DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
  899. break;
  900. }
  901. /* The report-ID field for periodic samples includes
  902. * some undocumented flags related to what triggered
  903. * the report and is never expected to be zero so we
  904. * can check that the report isn't invalid before
  905. * copying it to userspace...
  906. */
  907. if (report32[0] == 0) {
  908. if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
  909. DRM_NOTE("Skipping spurious, invalid OA report\n");
  910. continue;
  911. }
  912. ret = append_oa_sample(stream, buf, count, offset, report);
  913. if (ret)
  914. break;
  915. /* The above report-id field sanity check is based on
  916. * the assumption that the OA buffer is initially
  917. * zeroed and we reset the field after copying so the
  918. * check is still meaningful once old reports start
  919. * being overwritten.
  920. */
  921. report32[0] = 0;
  922. }
  923. if (start_offset != *offset) {
  924. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  925. /* We removed the gtt_offset for the copy loop above, indexing
  926. * relative to oa_buf_base so put back here...
  927. */
  928. head += gtt_offset;
  929. I915_WRITE(GEN7_OASTATUS2,
  930. ((head & GEN7_OASTATUS2_HEAD_MASK) |
  931. OA_MEM_SELECT_GGTT));
  932. dev_priv->perf.oa.oa_buffer.head = head;
  933. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  934. }
  935. return ret;
  936. }
  937. /**
  938. * gen7_oa_read - copy status records then buffered OA reports
  939. * @stream: An i915-perf stream opened for OA metrics
  940. * @buf: destination buffer given by userspace
  941. * @count: the number of bytes userspace wants to read
  942. * @offset: (inout): the current position for writing into @buf
  943. *
  944. * Checks Gen 7 specific OA unit status registers and if necessary appends
  945. * corresponding status records for userspace (such as for a buffer full
  946. * condition) and then initiate appending any buffered OA reports.
  947. *
  948. * Updates @offset according to the number of bytes successfully copied into
  949. * the userspace buffer.
  950. *
  951. * Returns: zero on success or a negative error code
  952. */
  953. static int gen7_oa_read(struct i915_perf_stream *stream,
  954. char __user *buf,
  955. size_t count,
  956. size_t *offset)
  957. {
  958. struct drm_i915_private *dev_priv = stream->dev_priv;
  959. u32 oastatus1;
  960. int ret;
  961. if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
  962. return -EIO;
  963. oastatus1 = I915_READ(GEN7_OASTATUS1);
  964. /* XXX: On Haswell we don't have a safe way to clear oastatus1
  965. * bits while the OA unit is enabled (while the tail pointer
  966. * may be updated asynchronously) so we ignore status bits
  967. * that have already been reported to userspace.
  968. */
  969. oastatus1 &= ~dev_priv->perf.oa.gen7_latched_oastatus1;
  970. /* We treat OABUFFER_OVERFLOW as a significant error:
  971. *
  972. * - The status can be interpreted to mean that the buffer is
  973. * currently full (with a higher precedence than OA_TAKEN()
  974. * which will start to report a near-empty buffer after an
  975. * overflow) but it's awkward that we can't clear the status
  976. * on Haswell, so without a reset we won't be able to catch
  977. * the state again.
  978. *
  979. * - Since it also implies the HW has started overwriting old
  980. * reports it may also affect our sanity checks for invalid
  981. * reports when copying to userspace that assume new reports
  982. * are being written to cleared memory.
  983. *
  984. * - In the future we may want to introduce a flight recorder
  985. * mode where the driver will automatically maintain a safe
  986. * guard band between head/tail, avoiding this overflow
  987. * condition, but we avoid the added driver complexity for
  988. * now.
  989. */
  990. if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
  991. ret = append_oa_status(stream, buf, count, offset,
  992. DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
  993. if (ret)
  994. return ret;
  995. DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
  996. dev_priv->perf.oa.period_exponent);
  997. dev_priv->perf.oa.ops.oa_disable(dev_priv);
  998. dev_priv->perf.oa.ops.oa_enable(dev_priv);
  999. oastatus1 = I915_READ(GEN7_OASTATUS1);
  1000. }
  1001. if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
  1002. ret = append_oa_status(stream, buf, count, offset,
  1003. DRM_I915_PERF_RECORD_OA_REPORT_LOST);
  1004. if (ret)
  1005. return ret;
  1006. dev_priv->perf.oa.gen7_latched_oastatus1 |=
  1007. GEN7_OASTATUS1_REPORT_LOST;
  1008. }
  1009. return gen7_append_oa_reports(stream, buf, count, offset);
  1010. }
  1011. /**
  1012. * i915_oa_wait_unlocked - handles blocking IO until OA data available
  1013. * @stream: An i915-perf stream opened for OA metrics
  1014. *
  1015. * Called when userspace tries to read() from a blocking stream FD opened
  1016. * for OA metrics. It waits until the hrtimer callback finds a non-empty
  1017. * OA buffer and wakes us.
  1018. *
  1019. * Note: it's acceptable to have this return with some false positives
  1020. * since any subsequent read handling will return -EAGAIN if there isn't
  1021. * really data ready for userspace yet.
  1022. *
  1023. * Returns: zero on success or a negative error code
  1024. */
  1025. static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
  1026. {
  1027. struct drm_i915_private *dev_priv = stream->dev_priv;
  1028. /* We would wait indefinitely if periodic sampling is not enabled */
  1029. if (!dev_priv->perf.oa.periodic)
  1030. return -EIO;
  1031. return wait_event_interruptible(dev_priv->perf.oa.poll_wq,
  1032. oa_buffer_check_unlocked(dev_priv));
  1033. }
  1034. /**
  1035. * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
  1036. * @stream: An i915-perf stream opened for OA metrics
  1037. * @file: An i915 perf stream file
  1038. * @wait: poll() state table
  1039. *
  1040. * For handling userspace polling on an i915 perf stream opened for OA metrics,
  1041. * this starts a poll_wait with the wait queue that our hrtimer callback wakes
  1042. * when it sees data ready to read in the circular OA buffer.
  1043. */
  1044. static void i915_oa_poll_wait(struct i915_perf_stream *stream,
  1045. struct file *file,
  1046. poll_table *wait)
  1047. {
  1048. struct drm_i915_private *dev_priv = stream->dev_priv;
  1049. poll_wait(file, &dev_priv->perf.oa.poll_wq, wait);
  1050. }
  1051. /**
  1052. * i915_oa_read - just calls through to &i915_oa_ops->read
  1053. * @stream: An i915-perf stream opened for OA metrics
  1054. * @buf: destination buffer given by userspace
  1055. * @count: the number of bytes userspace wants to read
  1056. * @offset: (inout): the current position for writing into @buf
  1057. *
  1058. * Updates @offset according to the number of bytes successfully copied into
  1059. * the userspace buffer.
  1060. *
  1061. * Returns: zero on success or a negative error code
  1062. */
  1063. static int i915_oa_read(struct i915_perf_stream *stream,
  1064. char __user *buf,
  1065. size_t count,
  1066. size_t *offset)
  1067. {
  1068. struct drm_i915_private *dev_priv = stream->dev_priv;
  1069. return dev_priv->perf.oa.ops.read(stream, buf, count, offset);
  1070. }
  1071. /**
  1072. * oa_get_render_ctx_id - determine and hold ctx hw id
  1073. * @stream: An i915-perf stream opened for OA metrics
  1074. *
  1075. * Determine the render context hw id, and ensure it remains fixed for the
  1076. * lifetime of the stream. This ensures that we don't have to worry about
  1077. * updating the context ID in OACONTROL on the fly.
  1078. *
  1079. * Returns: zero on success or a negative error code
  1080. */
  1081. static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
  1082. {
  1083. struct drm_i915_private *dev_priv = stream->dev_priv;
  1084. if (i915.enable_execlists)
  1085. dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id;
  1086. else {
  1087. struct intel_engine_cs *engine = dev_priv->engine[RCS];
  1088. struct intel_ring *ring;
  1089. int ret;
  1090. ret = i915_mutex_lock_interruptible(&dev_priv->drm);
  1091. if (ret)
  1092. return ret;
  1093. /*
  1094. * As the ID is the gtt offset of the context's vma we
  1095. * pin the vma to ensure the ID remains fixed.
  1096. *
  1097. * NB: implied RCS engine...
  1098. */
  1099. ring = engine->context_pin(engine, stream->ctx);
  1100. mutex_unlock(&dev_priv->drm.struct_mutex);
  1101. if (IS_ERR(ring))
  1102. return PTR_ERR(ring);
  1103. /*
  1104. * Explicitly track the ID (instead of calling
  1105. * i915_ggtt_offset() on the fly) considering the difference
  1106. * with gen8+ and execlists
  1107. */
  1108. dev_priv->perf.oa.specific_ctx_id =
  1109. i915_ggtt_offset(stream->ctx->engine[engine->id].state);
  1110. }
  1111. return 0;
  1112. }
  1113. /**
  1114. * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
  1115. * @stream: An i915-perf stream opened for OA metrics
  1116. *
  1117. * In case anything needed doing to ensure the context HW ID would remain valid
  1118. * for the lifetime of the stream, then that can be undone here.
  1119. */
  1120. static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
  1121. {
  1122. struct drm_i915_private *dev_priv = stream->dev_priv;
  1123. if (i915.enable_execlists) {
  1124. dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
  1125. } else {
  1126. struct intel_engine_cs *engine = dev_priv->engine[RCS];
  1127. mutex_lock(&dev_priv->drm.struct_mutex);
  1128. dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
  1129. engine->context_unpin(engine, stream->ctx);
  1130. mutex_unlock(&dev_priv->drm.struct_mutex);
  1131. }
  1132. }
  1133. static void
  1134. free_oa_buffer(struct drm_i915_private *i915)
  1135. {
  1136. mutex_lock(&i915->drm.struct_mutex);
  1137. i915_gem_object_unpin_map(i915->perf.oa.oa_buffer.vma->obj);
  1138. i915_vma_unpin(i915->perf.oa.oa_buffer.vma);
  1139. i915_gem_object_put(i915->perf.oa.oa_buffer.vma->obj);
  1140. i915->perf.oa.oa_buffer.vma = NULL;
  1141. i915->perf.oa.oa_buffer.vaddr = NULL;
  1142. mutex_unlock(&i915->drm.struct_mutex);
  1143. }
  1144. static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
  1145. {
  1146. struct drm_i915_private *dev_priv = stream->dev_priv;
  1147. BUG_ON(stream != dev_priv->perf.oa.exclusive_stream);
  1148. /*
  1149. * Unset exclusive_stream first, it will be checked while disabling
  1150. * the metric set on gen8+.
  1151. */
  1152. mutex_lock(&dev_priv->drm.struct_mutex);
  1153. dev_priv->perf.oa.exclusive_stream = NULL;
  1154. mutex_unlock(&dev_priv->drm.struct_mutex);
  1155. dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
  1156. free_oa_buffer(dev_priv);
  1157. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  1158. intel_runtime_pm_put(dev_priv);
  1159. if (stream->ctx)
  1160. oa_put_render_ctx_id(stream);
  1161. put_oa_config(dev_priv, stream->oa_config);
  1162. if (dev_priv->perf.oa.spurious_report_rs.missed) {
  1163. DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
  1164. dev_priv->perf.oa.spurious_report_rs.missed);
  1165. }
  1166. }
  1167. static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
  1168. {
  1169. u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
  1170. unsigned long flags;
  1171. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  1172. /* Pre-DevBDW: OABUFFER must be set with counters off,
  1173. * before OASTATUS1, but after OASTATUS2
  1174. */
  1175. I915_WRITE(GEN7_OASTATUS2, gtt_offset | OA_MEM_SELECT_GGTT); /* head */
  1176. dev_priv->perf.oa.oa_buffer.head = gtt_offset;
  1177. I915_WRITE(GEN7_OABUFFER, gtt_offset);
  1178. I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */
  1179. /* Mark that we need updated tail pointers to read from... */
  1180. dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
  1181. dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
  1182. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  1183. /* On Haswell we have to track which OASTATUS1 flags we've
  1184. * already seen since they can't be cleared while periodic
  1185. * sampling is enabled.
  1186. */
  1187. dev_priv->perf.oa.gen7_latched_oastatus1 = 0;
  1188. /* NB: although the OA buffer will initially be allocated
  1189. * zeroed via shmfs (and so this memset is redundant when
  1190. * first allocating), we may re-init the OA buffer, either
  1191. * when re-enabling a stream or in error/reset paths.
  1192. *
  1193. * The reason we clear the buffer for each re-init is for the
  1194. * sanity check in gen7_append_oa_reports() that looks at the
  1195. * report-id field to make sure it's non-zero which relies on
  1196. * the assumption that new reports are being written to zeroed
  1197. * memory...
  1198. */
  1199. memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
  1200. /* Maybe make ->pollin per-stream state if we support multiple
  1201. * concurrent streams in the future.
  1202. */
  1203. dev_priv->perf.oa.pollin = false;
  1204. }
  1205. static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
  1206. {
  1207. u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
  1208. unsigned long flags;
  1209. spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  1210. I915_WRITE(GEN8_OASTATUS, 0);
  1211. I915_WRITE(GEN8_OAHEADPTR, gtt_offset);
  1212. dev_priv->perf.oa.oa_buffer.head = gtt_offset;
  1213. I915_WRITE(GEN8_OABUFFER_UDW, 0);
  1214. /*
  1215. * PRM says:
  1216. *
  1217. * "This MMIO must be set before the OATAILPTR
  1218. * register and after the OAHEADPTR register. This is
  1219. * to enable proper functionality of the overflow
  1220. * bit."
  1221. */
  1222. I915_WRITE(GEN8_OABUFFER, gtt_offset |
  1223. OABUFFER_SIZE_16M | OA_MEM_SELECT_GGTT);
  1224. I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
  1225. /* Mark that we need updated tail pointers to read from... */
  1226. dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
  1227. dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
  1228. /*
  1229. * Reset state used to recognise context switches, affecting which
  1230. * reports we will forward to userspace while filtering for a single
  1231. * context.
  1232. */
  1233. dev_priv->perf.oa.oa_buffer.last_ctx_id = INVALID_CTX_ID;
  1234. spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
  1235. /*
  1236. * NB: although the OA buffer will initially be allocated
  1237. * zeroed via shmfs (and so this memset is redundant when
  1238. * first allocating), we may re-init the OA buffer, either
  1239. * when re-enabling a stream or in error/reset paths.
  1240. *
  1241. * The reason we clear the buffer for each re-init is for the
  1242. * sanity check in gen8_append_oa_reports() that looks at the
  1243. * reason field to make sure it's non-zero which relies on
  1244. * the assumption that new reports are being written to zeroed
  1245. * memory...
  1246. */
  1247. memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
  1248. /*
  1249. * Maybe make ->pollin per-stream state if we support multiple
  1250. * concurrent streams in the future.
  1251. */
  1252. dev_priv->perf.oa.pollin = false;
  1253. }
  1254. static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
  1255. {
  1256. struct drm_i915_gem_object *bo;
  1257. struct i915_vma *vma;
  1258. int ret;
  1259. if (WARN_ON(dev_priv->perf.oa.oa_buffer.vma))
  1260. return -ENODEV;
  1261. ret = i915_mutex_lock_interruptible(&dev_priv->drm);
  1262. if (ret)
  1263. return ret;
  1264. BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
  1265. BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
  1266. bo = i915_gem_object_create(dev_priv, OA_BUFFER_SIZE);
  1267. if (IS_ERR(bo)) {
  1268. DRM_ERROR("Failed to allocate OA buffer\n");
  1269. ret = PTR_ERR(bo);
  1270. goto unlock;
  1271. }
  1272. ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC);
  1273. if (ret)
  1274. goto err_unref;
  1275. /* PreHSW required 512K alignment, HSW requires 16M */
  1276. vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
  1277. if (IS_ERR(vma)) {
  1278. ret = PTR_ERR(vma);
  1279. goto err_unref;
  1280. }
  1281. dev_priv->perf.oa.oa_buffer.vma = vma;
  1282. dev_priv->perf.oa.oa_buffer.vaddr =
  1283. i915_gem_object_pin_map(bo, I915_MAP_WB);
  1284. if (IS_ERR(dev_priv->perf.oa.oa_buffer.vaddr)) {
  1285. ret = PTR_ERR(dev_priv->perf.oa.oa_buffer.vaddr);
  1286. goto err_unpin;
  1287. }
  1288. dev_priv->perf.oa.ops.init_oa_buffer(dev_priv);
  1289. DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
  1290. i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma),
  1291. dev_priv->perf.oa.oa_buffer.vaddr);
  1292. goto unlock;
  1293. err_unpin:
  1294. __i915_vma_unpin(vma);
  1295. err_unref:
  1296. i915_gem_object_put(bo);
  1297. dev_priv->perf.oa.oa_buffer.vaddr = NULL;
  1298. dev_priv->perf.oa.oa_buffer.vma = NULL;
  1299. unlock:
  1300. mutex_unlock(&dev_priv->drm.struct_mutex);
  1301. return ret;
  1302. }
  1303. static void config_oa_regs(struct drm_i915_private *dev_priv,
  1304. const struct i915_oa_reg *regs,
  1305. u32 n_regs)
  1306. {
  1307. u32 i;
  1308. for (i = 0; i < n_regs; i++) {
  1309. const struct i915_oa_reg *reg = regs + i;
  1310. I915_WRITE(reg->addr, reg->value);
  1311. }
  1312. }
  1313. static int hsw_enable_metric_set(struct drm_i915_private *dev_priv,
  1314. const struct i915_oa_config *oa_config)
  1315. {
  1316. /* PRM:
  1317. *
  1318. * OA unit is using “crclk” for its functionality. When trunk
  1319. * level clock gating takes place, OA clock would be gated,
  1320. * unable to count the events from non-render clock domain.
  1321. * Render clock gating must be disabled when OA is enabled to
  1322. * count the events from non-render domain. Unit level clock
  1323. * gating for RCS should also be disabled.
  1324. */
  1325. I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
  1326. ~GEN7_DOP_CLOCK_GATE_ENABLE));
  1327. I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
  1328. GEN6_CSUNIT_CLOCK_GATE_DISABLE));
  1329. config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
  1330. /* It apparently takes a fairly long time for a new MUX
  1331. * configuration to be be applied after these register writes.
  1332. * This delay duration was derived empirically based on the
  1333. * render_basic config but hopefully it covers the maximum
  1334. * configuration latency.
  1335. *
  1336. * As a fallback, the checks in _append_oa_reports() to skip
  1337. * invalid OA reports do also seem to work to discard reports
  1338. * generated before this config has completed - albeit not
  1339. * silently.
  1340. *
  1341. * Unfortunately this is essentially a magic number, since we
  1342. * don't currently know of a reliable mechanism for predicting
  1343. * how long the MUX config will take to apply and besides
  1344. * seeing invalid reports we don't know of a reliable way to
  1345. * explicitly check that the MUX config has landed.
  1346. *
  1347. * It's even possible we've miss characterized the underlying
  1348. * problem - it just seems like the simplest explanation why
  1349. * a delay at this location would mitigate any invalid reports.
  1350. */
  1351. usleep_range(15000, 20000);
  1352. config_oa_regs(dev_priv, oa_config->b_counter_regs,
  1353. oa_config->b_counter_regs_len);
  1354. return 0;
  1355. }
  1356. static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
  1357. {
  1358. I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) &
  1359. ~GEN6_CSUNIT_CLOCK_GATE_DISABLE));
  1360. I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) |
  1361. GEN7_DOP_CLOCK_GATE_ENABLE));
  1362. I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
  1363. ~GT_NOA_ENABLE));
  1364. }
  1365. /*
  1366. * NB: It must always remain pointer safe to run this even if the OA unit
  1367. * has been disabled.
  1368. *
  1369. * It's fine to put out-of-date values into these per-context registers
  1370. * in the case that the OA unit has been disabled.
  1371. */
  1372. static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
  1373. u32 *reg_state,
  1374. const struct i915_oa_config *oa_config)
  1375. {
  1376. struct drm_i915_private *dev_priv = ctx->i915;
  1377. u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
  1378. u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
  1379. /* The MMIO offsets for Flex EU registers aren't contiguous */
  1380. u32 flex_mmio[] = {
  1381. i915_mmio_reg_offset(EU_PERF_CNTL0),
  1382. i915_mmio_reg_offset(EU_PERF_CNTL1),
  1383. i915_mmio_reg_offset(EU_PERF_CNTL2),
  1384. i915_mmio_reg_offset(EU_PERF_CNTL3),
  1385. i915_mmio_reg_offset(EU_PERF_CNTL4),
  1386. i915_mmio_reg_offset(EU_PERF_CNTL5),
  1387. i915_mmio_reg_offset(EU_PERF_CNTL6),
  1388. };
  1389. int i;
  1390. reg_state[ctx_oactxctrl] = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
  1391. reg_state[ctx_oactxctrl+1] = (dev_priv->perf.oa.period_exponent <<
  1392. GEN8_OA_TIMER_PERIOD_SHIFT) |
  1393. (dev_priv->perf.oa.periodic ?
  1394. GEN8_OA_TIMER_ENABLE : 0) |
  1395. GEN8_OA_COUNTER_RESUME;
  1396. for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) {
  1397. u32 state_offset = ctx_flexeu0 + i * 2;
  1398. u32 mmio = flex_mmio[i];
  1399. /*
  1400. * This arbitrary default will select the 'EU FPU0 Pipeline
  1401. * Active' event. In the future it's anticipated that there
  1402. * will be an explicit 'No Event' we can select, but not yet...
  1403. */
  1404. u32 value = 0;
  1405. if (oa_config) {
  1406. u32 j;
  1407. for (j = 0; j < oa_config->flex_regs_len; j++) {
  1408. if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) {
  1409. value = oa_config->flex_regs[j].value;
  1410. break;
  1411. }
  1412. }
  1413. }
  1414. reg_state[state_offset] = mmio;
  1415. reg_state[state_offset+1] = value;
  1416. }
  1417. }
  1418. /*
  1419. * Same as gen8_update_reg_state_unlocked only through the batchbuffer. This
  1420. * is only used by the kernel context.
  1421. */
  1422. static int gen8_emit_oa_config(struct drm_i915_gem_request *req,
  1423. const struct i915_oa_config *oa_config)
  1424. {
  1425. struct drm_i915_private *dev_priv = req->i915;
  1426. /* The MMIO offsets for Flex EU registers aren't contiguous */
  1427. u32 flex_mmio[] = {
  1428. i915_mmio_reg_offset(EU_PERF_CNTL0),
  1429. i915_mmio_reg_offset(EU_PERF_CNTL1),
  1430. i915_mmio_reg_offset(EU_PERF_CNTL2),
  1431. i915_mmio_reg_offset(EU_PERF_CNTL3),
  1432. i915_mmio_reg_offset(EU_PERF_CNTL4),
  1433. i915_mmio_reg_offset(EU_PERF_CNTL5),
  1434. i915_mmio_reg_offset(EU_PERF_CNTL6),
  1435. };
  1436. u32 *cs;
  1437. int i;
  1438. cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4);
  1439. if (IS_ERR(cs))
  1440. return PTR_ERR(cs);
  1441. *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1);
  1442. *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
  1443. *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
  1444. (dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
  1445. GEN8_OA_COUNTER_RESUME;
  1446. for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) {
  1447. u32 mmio = flex_mmio[i];
  1448. /*
  1449. * This arbitrary default will select the 'EU FPU0 Pipeline
  1450. * Active' event. In the future it's anticipated that there
  1451. * will be an explicit 'No Event' we can select, but not
  1452. * yet...
  1453. */
  1454. u32 value = 0;
  1455. if (oa_config) {
  1456. u32 j;
  1457. for (j = 0; j < oa_config->flex_regs_len; j++) {
  1458. if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) {
  1459. value = oa_config->flex_regs[j].value;
  1460. break;
  1461. }
  1462. }
  1463. }
  1464. *cs++ = mmio;
  1465. *cs++ = value;
  1466. }
  1467. *cs++ = MI_NOOP;
  1468. intel_ring_advance(req, cs);
  1469. return 0;
  1470. }
  1471. static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_priv,
  1472. const struct i915_oa_config *oa_config)
  1473. {
  1474. struct intel_engine_cs *engine = dev_priv->engine[RCS];
  1475. struct i915_gem_timeline *timeline;
  1476. struct drm_i915_gem_request *req;
  1477. int ret;
  1478. lockdep_assert_held(&dev_priv->drm.struct_mutex);
  1479. i915_gem_retire_requests(dev_priv);
  1480. req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
  1481. if (IS_ERR(req))
  1482. return PTR_ERR(req);
  1483. ret = gen8_emit_oa_config(req, oa_config);
  1484. if (ret) {
  1485. i915_add_request(req);
  1486. return ret;
  1487. }
  1488. /* Queue this switch after all other activity */
  1489. list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
  1490. struct drm_i915_gem_request *prev;
  1491. struct intel_timeline *tl;
  1492. tl = &timeline->engine[engine->id];
  1493. prev = i915_gem_active_raw(&tl->last_request,
  1494. &dev_priv->drm.struct_mutex);
  1495. if (prev)
  1496. i915_sw_fence_await_sw_fence_gfp(&req->submit,
  1497. &prev->submit,
  1498. GFP_KERNEL);
  1499. }
  1500. ret = i915_switch_context(req);
  1501. i915_add_request(req);
  1502. return ret;
  1503. }
  1504. /*
  1505. * Manages updating the per-context aspects of the OA stream
  1506. * configuration across all contexts.
  1507. *
  1508. * The awkward consideration here is that OACTXCONTROL controls the
  1509. * exponent for periodic sampling which is primarily used for system
  1510. * wide profiling where we'd like a consistent sampling period even in
  1511. * the face of context switches.
  1512. *
  1513. * Our approach of updating the register state context (as opposed to
  1514. * say using a workaround batch buffer) ensures that the hardware
  1515. * won't automatically reload an out-of-date timer exponent even
  1516. * transiently before a WA BB could be parsed.
  1517. *
  1518. * This function needs to:
  1519. * - Ensure the currently running context's per-context OA state is
  1520. * updated
  1521. * - Ensure that all existing contexts will have the correct per-context
  1522. * OA state if they are scheduled for use.
  1523. * - Ensure any new contexts will be initialized with the correct
  1524. * per-context OA state.
  1525. *
  1526. * Note: it's only the RCS/Render context that has any OA state.
  1527. */
  1528. static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
  1529. const struct i915_oa_config *oa_config,
  1530. bool interruptible)
  1531. {
  1532. struct i915_gem_context *ctx;
  1533. int ret;
  1534. unsigned int wait_flags = I915_WAIT_LOCKED;
  1535. if (interruptible) {
  1536. ret = i915_mutex_lock_interruptible(&dev_priv->drm);
  1537. if (ret)
  1538. return ret;
  1539. wait_flags |= I915_WAIT_INTERRUPTIBLE;
  1540. } else {
  1541. mutex_lock(&dev_priv->drm.struct_mutex);
  1542. }
  1543. /* Switch away from any user context. */
  1544. ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config);
  1545. if (ret)
  1546. goto out;
  1547. /*
  1548. * The OA register config is setup through the context image. This image
  1549. * might be written to by the GPU on context switch (in particular on
  1550. * lite-restore). This means we can't safely update a context's image,
  1551. * if this context is scheduled/submitted to run on the GPU.
  1552. *
  1553. * We could emit the OA register config through the batch buffer but
  1554. * this might leave small interval of time where the OA unit is
  1555. * configured at an invalid sampling period.
  1556. *
  1557. * So far the best way to work around this issue seems to be draining
  1558. * the GPU from any submitted work.
  1559. */
  1560. ret = i915_gem_wait_for_idle(dev_priv, wait_flags);
  1561. if (ret)
  1562. goto out;
  1563. /* Update all contexts now that we've stalled the submission. */
  1564. list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
  1565. struct intel_context *ce = &ctx->engine[RCS];
  1566. u32 *regs;
  1567. /* OA settings will be set upon first use */
  1568. if (!ce->state)
  1569. continue;
  1570. regs = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
  1571. if (IS_ERR(regs)) {
  1572. ret = PTR_ERR(regs);
  1573. goto out;
  1574. }
  1575. ce->state->obj->mm.dirty = true;
  1576. regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
  1577. gen8_update_reg_state_unlocked(ctx, regs, oa_config);
  1578. i915_gem_object_unpin_map(ce->state->obj);
  1579. }
  1580. out:
  1581. mutex_unlock(&dev_priv->drm.struct_mutex);
  1582. return ret;
  1583. }
  1584. static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
  1585. const struct i915_oa_config *oa_config)
  1586. {
  1587. int ret;
  1588. /*
  1589. * We disable slice/unslice clock ratio change reports on SKL since
  1590. * they are too noisy. The HW generates a lot of redundant reports
  1591. * where the ratio hasn't really changed causing a lot of redundant
  1592. * work to processes and increasing the chances we'll hit buffer
  1593. * overruns.
  1594. *
  1595. * Although we don't currently use the 'disable overrun' OABUFFER
  1596. * feature it's worth noting that clock ratio reports have to be
  1597. * disabled before considering to use that feature since the HW doesn't
  1598. * correctly block these reports.
  1599. *
  1600. * Currently none of the high-level metrics we have depend on knowing
  1601. * this ratio to normalize.
  1602. *
  1603. * Note: This register is not power context saved and restored, but
  1604. * that's OK considering that we disable RC6 while the OA unit is
  1605. * enabled.
  1606. *
  1607. * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
  1608. * be read back from automatically triggered reports, as part of the
  1609. * RPT_ID field.
  1610. */
  1611. if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
  1612. IS_KABYLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
  1613. I915_WRITE(GEN8_OA_DEBUG,
  1614. _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
  1615. GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
  1616. }
  1617. /*
  1618. * Update all contexts prior writing the mux configurations as we need
  1619. * to make sure all slices/subslices are ON before writing to NOA
  1620. * registers.
  1621. */
  1622. ret = gen8_configure_all_contexts(dev_priv, oa_config, true);
  1623. if (ret)
  1624. return ret;
  1625. config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
  1626. config_oa_regs(dev_priv, oa_config->b_counter_regs,
  1627. oa_config->b_counter_regs_len);
  1628. return 0;
  1629. }
  1630. static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
  1631. {
  1632. /* Reset all contexts' slices/subslices configurations. */
  1633. gen8_configure_all_contexts(dev_priv, NULL, false);
  1634. I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
  1635. ~GT_NOA_ENABLE));
  1636. }
  1637. static void gen7_oa_enable(struct drm_i915_private *dev_priv)
  1638. {
  1639. /*
  1640. * Reset buf pointers so we don't forward reports from before now.
  1641. *
  1642. * Think carefully if considering trying to avoid this, since it
  1643. * also ensures status flags and the buffer itself are cleared
  1644. * in error paths, and we have checks for invalid reports based
  1645. * on the assumption that certain fields are written to zeroed
  1646. * memory which this helps maintains.
  1647. */
  1648. gen7_init_oa_buffer(dev_priv);
  1649. if (dev_priv->perf.oa.exclusive_stream->enabled) {
  1650. struct i915_gem_context *ctx =
  1651. dev_priv->perf.oa.exclusive_stream->ctx;
  1652. u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
  1653. bool periodic = dev_priv->perf.oa.periodic;
  1654. u32 period_exponent = dev_priv->perf.oa.period_exponent;
  1655. u32 report_format = dev_priv->perf.oa.oa_buffer.format;
  1656. I915_WRITE(GEN7_OACONTROL,
  1657. (ctx_id & GEN7_OACONTROL_CTX_MASK) |
  1658. (period_exponent <<
  1659. GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
  1660. (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
  1661. (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
  1662. (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
  1663. GEN7_OACONTROL_ENABLE);
  1664. } else
  1665. I915_WRITE(GEN7_OACONTROL, 0);
  1666. }
  1667. static void gen8_oa_enable(struct drm_i915_private *dev_priv)
  1668. {
  1669. u32 report_format = dev_priv->perf.oa.oa_buffer.format;
  1670. /*
  1671. * Reset buf pointers so we don't forward reports from before now.
  1672. *
  1673. * Think carefully if considering trying to avoid this, since it
  1674. * also ensures status flags and the buffer itself are cleared
  1675. * in error paths, and we have checks for invalid reports based
  1676. * on the assumption that certain fields are written to zeroed
  1677. * memory which this helps maintains.
  1678. */
  1679. gen8_init_oa_buffer(dev_priv);
  1680. /*
  1681. * Note: we don't rely on the hardware to perform single context
  1682. * filtering and instead filter on the cpu based on the context-id
  1683. * field of reports
  1684. */
  1685. I915_WRITE(GEN8_OACONTROL, (report_format <<
  1686. GEN8_OA_REPORT_FORMAT_SHIFT) |
  1687. GEN8_OA_COUNTER_ENABLE);
  1688. }
  1689. /**
  1690. * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
  1691. * @stream: An i915 perf stream opened for OA metrics
  1692. *
  1693. * [Re]enables hardware periodic sampling according to the period configured
  1694. * when opening the stream. This also starts a hrtimer that will periodically
  1695. * check for data in the circular OA buffer for notifying userspace (e.g.
  1696. * during a read() or poll()).
  1697. */
  1698. static void i915_oa_stream_enable(struct i915_perf_stream *stream)
  1699. {
  1700. struct drm_i915_private *dev_priv = stream->dev_priv;
  1701. dev_priv->perf.oa.ops.oa_enable(dev_priv);
  1702. if (dev_priv->perf.oa.periodic)
  1703. hrtimer_start(&dev_priv->perf.oa.poll_check_timer,
  1704. ns_to_ktime(POLL_PERIOD),
  1705. HRTIMER_MODE_REL_PINNED);
  1706. }
  1707. static void gen7_oa_disable(struct drm_i915_private *dev_priv)
  1708. {
  1709. I915_WRITE(GEN7_OACONTROL, 0);
  1710. }
  1711. static void gen8_oa_disable(struct drm_i915_private *dev_priv)
  1712. {
  1713. I915_WRITE(GEN8_OACONTROL, 0);
  1714. }
  1715. /**
  1716. * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
  1717. * @stream: An i915 perf stream opened for OA metrics
  1718. *
  1719. * Stops the OA unit from periodically writing counter reports into the
  1720. * circular OA buffer. This also stops the hrtimer that periodically checks for
  1721. * data in the circular OA buffer, for notifying userspace.
  1722. */
  1723. static void i915_oa_stream_disable(struct i915_perf_stream *stream)
  1724. {
  1725. struct drm_i915_private *dev_priv = stream->dev_priv;
  1726. dev_priv->perf.oa.ops.oa_disable(dev_priv);
  1727. if (dev_priv->perf.oa.periodic)
  1728. hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer);
  1729. }
  1730. static const struct i915_perf_stream_ops i915_oa_stream_ops = {
  1731. .destroy = i915_oa_stream_destroy,
  1732. .enable = i915_oa_stream_enable,
  1733. .disable = i915_oa_stream_disable,
  1734. .wait_unlocked = i915_oa_wait_unlocked,
  1735. .poll_wait = i915_oa_poll_wait,
  1736. .read = i915_oa_read,
  1737. };
  1738. /**
  1739. * i915_oa_stream_init - validate combined props for OA stream and init
  1740. * @stream: An i915 perf stream
  1741. * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
  1742. * @props: The property state that configures stream (individually validated)
  1743. *
  1744. * While read_properties_unlocked() validates properties in isolation it
  1745. * doesn't ensure that the combination necessarily makes sense.
  1746. *
  1747. * At this point it has been determined that userspace wants a stream of
  1748. * OA metrics, but still we need to further validate the combined
  1749. * properties are OK.
  1750. *
  1751. * If the configuration makes sense then we can allocate memory for
  1752. * a circular OA buffer and apply the requested metric set configuration.
  1753. *
  1754. * Returns: zero on success or a negative error code.
  1755. */
  1756. static int i915_oa_stream_init(struct i915_perf_stream *stream,
  1757. struct drm_i915_perf_open_param *param,
  1758. struct perf_open_properties *props)
  1759. {
  1760. struct drm_i915_private *dev_priv = stream->dev_priv;
  1761. int format_size;
  1762. int ret;
  1763. /* If the sysfs metrics/ directory wasn't registered for some
  1764. * reason then don't let userspace try their luck with config
  1765. * IDs
  1766. */
  1767. if (!dev_priv->perf.metrics_kobj) {
  1768. DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
  1769. return -EINVAL;
  1770. }
  1771. if (!(props->sample_flags & SAMPLE_OA_REPORT)) {
  1772. DRM_DEBUG("Only OA report sampling supported\n");
  1773. return -EINVAL;
  1774. }
  1775. if (!dev_priv->perf.oa.ops.init_oa_buffer) {
  1776. DRM_DEBUG("OA unit not supported\n");
  1777. return -ENODEV;
  1778. }
  1779. /* To avoid the complexity of having to accurately filter
  1780. * counter reports and marshal to the appropriate client
  1781. * we currently only allow exclusive access
  1782. */
  1783. if (dev_priv->perf.oa.exclusive_stream) {
  1784. DRM_DEBUG("OA unit already in use\n");
  1785. return -EBUSY;
  1786. }
  1787. if (!props->oa_format) {
  1788. DRM_DEBUG("OA report format not specified\n");
  1789. return -EINVAL;
  1790. }
  1791. /* We set up some ratelimit state to potentially throttle any _NOTES
  1792. * about spurious, invalid OA reports which we don't forward to
  1793. * userspace.
  1794. *
  1795. * The initialization is associated with opening the stream (not driver
  1796. * init) considering we print a _NOTE about any throttling when closing
  1797. * the stream instead of waiting until driver _fini which no one would
  1798. * ever see.
  1799. *
  1800. * Using the same limiting factors as printk_ratelimit()
  1801. */
  1802. ratelimit_state_init(&dev_priv->perf.oa.spurious_report_rs,
  1803. 5 * HZ, 10);
  1804. /* Since we use a DRM_NOTE for spurious reports it would be
  1805. * inconsistent to let __ratelimit() automatically print a warning for
  1806. * throttling.
  1807. */
  1808. ratelimit_set_flags(&dev_priv->perf.oa.spurious_report_rs,
  1809. RATELIMIT_MSG_ON_RELEASE);
  1810. stream->sample_size = sizeof(struct drm_i915_perf_record_header);
  1811. format_size = dev_priv->perf.oa.oa_formats[props->oa_format].size;
  1812. stream->sample_flags |= SAMPLE_OA_REPORT;
  1813. stream->sample_size += format_size;
  1814. dev_priv->perf.oa.oa_buffer.format_size = format_size;
  1815. if (WARN_ON(dev_priv->perf.oa.oa_buffer.format_size == 0))
  1816. return -EINVAL;
  1817. dev_priv->perf.oa.oa_buffer.format =
  1818. dev_priv->perf.oa.oa_formats[props->oa_format].format;
  1819. dev_priv->perf.oa.periodic = props->oa_periodic;
  1820. if (dev_priv->perf.oa.periodic)
  1821. dev_priv->perf.oa.period_exponent = props->oa_period_exponent;
  1822. if (stream->ctx) {
  1823. ret = oa_get_render_ctx_id(stream);
  1824. if (ret)
  1825. return ret;
  1826. }
  1827. ret = get_oa_config(dev_priv, props->metrics_set, &stream->oa_config);
  1828. if (ret)
  1829. goto err_config;
  1830. /* PRM - observability performance counters:
  1831. *
  1832. * OACONTROL, performance counter enable, note:
  1833. *
  1834. * "When this bit is set, in order to have coherent counts,
  1835. * RC6 power state and trunk clock gating must be disabled.
  1836. * This can be achieved by programming MMIO registers as
  1837. * 0xA094=0 and 0xA090[31]=1"
  1838. *
  1839. * In our case we are expecting that taking pm + FORCEWAKE
  1840. * references will effectively disable RC6.
  1841. */
  1842. intel_runtime_pm_get(dev_priv);
  1843. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  1844. ret = alloc_oa_buffer(dev_priv);
  1845. if (ret)
  1846. goto err_oa_buf_alloc;
  1847. ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
  1848. stream->oa_config);
  1849. if (ret)
  1850. goto err_enable;
  1851. stream->ops = &i915_oa_stream_ops;
  1852. /* Lock device for exclusive_stream access late because
  1853. * enable_metric_set() might lock as well on gen8+.
  1854. */
  1855. ret = i915_mutex_lock_interruptible(&dev_priv->drm);
  1856. if (ret)
  1857. goto err_lock;
  1858. dev_priv->perf.oa.exclusive_stream = stream;
  1859. mutex_unlock(&dev_priv->drm.struct_mutex);
  1860. return 0;
  1861. err_lock:
  1862. dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
  1863. err_enable:
  1864. free_oa_buffer(dev_priv);
  1865. err_oa_buf_alloc:
  1866. put_oa_config(dev_priv, stream->oa_config);
  1867. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  1868. intel_runtime_pm_put(dev_priv);
  1869. err_config:
  1870. if (stream->ctx)
  1871. oa_put_render_ctx_id(stream);
  1872. return ret;
  1873. }
  1874. void i915_oa_init_reg_state(struct intel_engine_cs *engine,
  1875. struct i915_gem_context *ctx,
  1876. u32 *reg_state)
  1877. {
  1878. struct i915_perf_stream *stream;
  1879. if (engine->id != RCS)
  1880. return;
  1881. stream = engine->i915->perf.oa.exclusive_stream;
  1882. if (stream)
  1883. gen8_update_reg_state_unlocked(ctx, reg_state, stream->oa_config);
  1884. }
  1885. /**
  1886. * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation
  1887. * @stream: An i915 perf stream
  1888. * @file: An i915 perf stream file
  1889. * @buf: destination buffer given by userspace
  1890. * @count: the number of bytes userspace wants to read
  1891. * @ppos: (inout) file seek position (unused)
  1892. *
  1893. * Besides wrapping &i915_perf_stream_ops->read this provides a common place to
  1894. * ensure that if we've successfully copied any data then reporting that takes
  1895. * precedence over any internal error status, so the data isn't lost.
  1896. *
  1897. * For example ret will be -ENOSPC whenever there is more buffered data than
  1898. * can be copied to userspace, but that's only interesting if we weren't able
  1899. * to copy some data because it implies the userspace buffer is too small to
  1900. * receive a single record (and we never split records).
  1901. *
  1902. * Another case with ret == -EFAULT is more of a grey area since it would seem
  1903. * like bad form for userspace to ask us to overrun its buffer, but the user
  1904. * knows best:
  1905. *
  1906. * http://yarchive.net/comp/linux/partial_reads_writes.html
  1907. *
  1908. * Returns: The number of bytes copied or a negative error code on failure.
  1909. */
  1910. static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream,
  1911. struct file *file,
  1912. char __user *buf,
  1913. size_t count,
  1914. loff_t *ppos)
  1915. {
  1916. /* Note we keep the offset (aka bytes read) separate from any
  1917. * error status so that the final check for whether we return
  1918. * the bytes read with a higher precedence than any error (see
  1919. * comment below) doesn't need to be handled/duplicated in
  1920. * stream->ops->read() implementations.
  1921. */
  1922. size_t offset = 0;
  1923. int ret = stream->ops->read(stream, buf, count, &offset);
  1924. return offset ?: (ret ?: -EAGAIN);
  1925. }
  1926. /**
  1927. * i915_perf_read - handles read() FOP for i915 perf stream FDs
  1928. * @file: An i915 perf stream file
  1929. * @buf: destination buffer given by userspace
  1930. * @count: the number of bytes userspace wants to read
  1931. * @ppos: (inout) file seek position (unused)
  1932. *
  1933. * The entry point for handling a read() on a stream file descriptor from
  1934. * userspace. Most of the work is left to the i915_perf_read_locked() and
  1935. * &i915_perf_stream_ops->read but to save having stream implementations (of
  1936. * which we might have multiple later) we handle blocking read here.
  1937. *
  1938. * We can also consistently treat trying to read from a disabled stream
  1939. * as an IO error so implementations can assume the stream is enabled
  1940. * while reading.
  1941. *
  1942. * Returns: The number of bytes copied or a negative error code on failure.
  1943. */
  1944. static ssize_t i915_perf_read(struct file *file,
  1945. char __user *buf,
  1946. size_t count,
  1947. loff_t *ppos)
  1948. {
  1949. struct i915_perf_stream *stream = file->private_data;
  1950. struct drm_i915_private *dev_priv = stream->dev_priv;
  1951. ssize_t ret;
  1952. /* To ensure it's handled consistently we simply treat all reads of a
  1953. * disabled stream as an error. In particular it might otherwise lead
  1954. * to a deadlock for blocking file descriptors...
  1955. */
  1956. if (!stream->enabled)
  1957. return -EIO;
  1958. if (!(file->f_flags & O_NONBLOCK)) {
  1959. /* There's the small chance of false positives from
  1960. * stream->ops->wait_unlocked.
  1961. *
  1962. * E.g. with single context filtering since we only wait until
  1963. * oabuffer has >= 1 report we don't immediately know whether
  1964. * any reports really belong to the current context
  1965. */
  1966. do {
  1967. ret = stream->ops->wait_unlocked(stream);
  1968. if (ret)
  1969. return ret;
  1970. mutex_lock(&dev_priv->perf.lock);
  1971. ret = i915_perf_read_locked(stream, file,
  1972. buf, count, ppos);
  1973. mutex_unlock(&dev_priv->perf.lock);
  1974. } while (ret == -EAGAIN);
  1975. } else {
  1976. mutex_lock(&dev_priv->perf.lock);
  1977. ret = i915_perf_read_locked(stream, file, buf, count, ppos);
  1978. mutex_unlock(&dev_priv->perf.lock);
  1979. }
  1980. /* We allow the poll checking to sometimes report false positive POLLIN
  1981. * events where we might actually report EAGAIN on read() if there's
  1982. * not really any data available. In this situation though we don't
  1983. * want to enter a busy loop between poll() reporting a POLLIN event
  1984. * and read() returning -EAGAIN. Clearing the oa.pollin state here
  1985. * effectively ensures we back off until the next hrtimer callback
  1986. * before reporting another POLLIN event.
  1987. */
  1988. if (ret >= 0 || ret == -EAGAIN) {
  1989. /* Maybe make ->pollin per-stream state if we support multiple
  1990. * concurrent streams in the future.
  1991. */
  1992. dev_priv->perf.oa.pollin = false;
  1993. }
  1994. return ret;
  1995. }
  1996. static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
  1997. {
  1998. struct drm_i915_private *dev_priv =
  1999. container_of(hrtimer, typeof(*dev_priv),
  2000. perf.oa.poll_check_timer);
  2001. if (oa_buffer_check_unlocked(dev_priv)) {
  2002. dev_priv->perf.oa.pollin = true;
  2003. wake_up(&dev_priv->perf.oa.poll_wq);
  2004. }
  2005. hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD));
  2006. return HRTIMER_RESTART;
  2007. }
  2008. /**
  2009. * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
  2010. * @dev_priv: i915 device instance
  2011. * @stream: An i915 perf stream
  2012. * @file: An i915 perf stream file
  2013. * @wait: poll() state table
  2014. *
  2015. * For handling userspace polling on an i915 perf stream, this calls through to
  2016. * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
  2017. * will be woken for new stream data.
  2018. *
  2019. * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
  2020. * with any non-file-operation driver hooks.
  2021. *
  2022. * Returns: any poll events that are ready without sleeping
  2023. */
  2024. static unsigned int i915_perf_poll_locked(struct drm_i915_private *dev_priv,
  2025. struct i915_perf_stream *stream,
  2026. struct file *file,
  2027. poll_table *wait)
  2028. {
  2029. unsigned int events = 0;
  2030. stream->ops->poll_wait(stream, file, wait);
  2031. /* Note: we don't explicitly check whether there's something to read
  2032. * here since this path may be very hot depending on what else
  2033. * userspace is polling, or on the timeout in use. We rely solely on
  2034. * the hrtimer/oa_poll_check_timer_cb to notify us when there are
  2035. * samples to read.
  2036. */
  2037. if (dev_priv->perf.oa.pollin)
  2038. events |= POLLIN;
  2039. return events;
  2040. }
  2041. /**
  2042. * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
  2043. * @file: An i915 perf stream file
  2044. * @wait: poll() state table
  2045. *
  2046. * For handling userspace polling on an i915 perf stream, this ensures
  2047. * poll_wait() gets called with a wait queue that will be woken for new stream
  2048. * data.
  2049. *
  2050. * Note: Implementation deferred to i915_perf_poll_locked()
  2051. *
  2052. * Returns: any poll events that are ready without sleeping
  2053. */
  2054. static unsigned int i915_perf_poll(struct file *file, poll_table *wait)
  2055. {
  2056. struct i915_perf_stream *stream = file->private_data;
  2057. struct drm_i915_private *dev_priv = stream->dev_priv;
  2058. int ret;
  2059. mutex_lock(&dev_priv->perf.lock);
  2060. ret = i915_perf_poll_locked(dev_priv, stream, file, wait);
  2061. mutex_unlock(&dev_priv->perf.lock);
  2062. return ret;
  2063. }
  2064. /**
  2065. * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
  2066. * @stream: A disabled i915 perf stream
  2067. *
  2068. * [Re]enables the associated capture of data for this stream.
  2069. *
  2070. * If a stream was previously enabled then there's currently no intention
  2071. * to provide userspace any guarantee about the preservation of previously
  2072. * buffered data.
  2073. */
  2074. static void i915_perf_enable_locked(struct i915_perf_stream *stream)
  2075. {
  2076. if (stream->enabled)
  2077. return;
  2078. /* Allow stream->ops->enable() to refer to this */
  2079. stream->enabled = true;
  2080. if (stream->ops->enable)
  2081. stream->ops->enable(stream);
  2082. }
  2083. /**
  2084. * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
  2085. * @stream: An enabled i915 perf stream
  2086. *
  2087. * Disables the associated capture of data for this stream.
  2088. *
  2089. * The intention is that disabling an re-enabling a stream will ideally be
  2090. * cheaper than destroying and re-opening a stream with the same configuration,
  2091. * though there are no formal guarantees about what state or buffered data
  2092. * must be retained between disabling and re-enabling a stream.
  2093. *
  2094. * Note: while a stream is disabled it's considered an error for userspace
  2095. * to attempt to read from the stream (-EIO).
  2096. */
  2097. static void i915_perf_disable_locked(struct i915_perf_stream *stream)
  2098. {
  2099. if (!stream->enabled)
  2100. return;
  2101. /* Allow stream->ops->disable() to refer to this */
  2102. stream->enabled = false;
  2103. if (stream->ops->disable)
  2104. stream->ops->disable(stream);
  2105. }
  2106. /**
  2107. * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
  2108. * @stream: An i915 perf stream
  2109. * @cmd: the ioctl request
  2110. * @arg: the ioctl data
  2111. *
  2112. * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
  2113. * with any non-file-operation driver hooks.
  2114. *
  2115. * Returns: zero on success or a negative error code. Returns -EINVAL for
  2116. * an unknown ioctl request.
  2117. */
  2118. static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
  2119. unsigned int cmd,
  2120. unsigned long arg)
  2121. {
  2122. switch (cmd) {
  2123. case I915_PERF_IOCTL_ENABLE:
  2124. i915_perf_enable_locked(stream);
  2125. return 0;
  2126. case I915_PERF_IOCTL_DISABLE:
  2127. i915_perf_disable_locked(stream);
  2128. return 0;
  2129. }
  2130. return -EINVAL;
  2131. }
  2132. /**
  2133. * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
  2134. * @file: An i915 perf stream file
  2135. * @cmd: the ioctl request
  2136. * @arg: the ioctl data
  2137. *
  2138. * Implementation deferred to i915_perf_ioctl_locked().
  2139. *
  2140. * Returns: zero on success or a negative error code. Returns -EINVAL for
  2141. * an unknown ioctl request.
  2142. */
  2143. static long i915_perf_ioctl(struct file *file,
  2144. unsigned int cmd,
  2145. unsigned long arg)
  2146. {
  2147. struct i915_perf_stream *stream = file->private_data;
  2148. struct drm_i915_private *dev_priv = stream->dev_priv;
  2149. long ret;
  2150. mutex_lock(&dev_priv->perf.lock);
  2151. ret = i915_perf_ioctl_locked(stream, cmd, arg);
  2152. mutex_unlock(&dev_priv->perf.lock);
  2153. return ret;
  2154. }
  2155. /**
  2156. * i915_perf_destroy_locked - destroy an i915 perf stream
  2157. * @stream: An i915 perf stream
  2158. *
  2159. * Frees all resources associated with the given i915 perf @stream, disabling
  2160. * any associated data capture in the process.
  2161. *
  2162. * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
  2163. * with any non-file-operation driver hooks.
  2164. */
  2165. static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
  2166. {
  2167. if (stream->enabled)
  2168. i915_perf_disable_locked(stream);
  2169. if (stream->ops->destroy)
  2170. stream->ops->destroy(stream);
  2171. list_del(&stream->link);
  2172. if (stream->ctx)
  2173. i915_gem_context_put(stream->ctx);
  2174. kfree(stream);
  2175. }
  2176. /**
  2177. * i915_perf_release - handles userspace close() of a stream file
  2178. * @inode: anonymous inode associated with file
  2179. * @file: An i915 perf stream file
  2180. *
  2181. * Cleans up any resources associated with an open i915 perf stream file.
  2182. *
  2183. * NB: close() can't really fail from the userspace point of view.
  2184. *
  2185. * Returns: zero on success or a negative error code.
  2186. */
  2187. static int i915_perf_release(struct inode *inode, struct file *file)
  2188. {
  2189. struct i915_perf_stream *stream = file->private_data;
  2190. struct drm_i915_private *dev_priv = stream->dev_priv;
  2191. mutex_lock(&dev_priv->perf.lock);
  2192. i915_perf_destroy_locked(stream);
  2193. mutex_unlock(&dev_priv->perf.lock);
  2194. return 0;
  2195. }
  2196. static const struct file_operations fops = {
  2197. .owner = THIS_MODULE,
  2198. .llseek = no_llseek,
  2199. .release = i915_perf_release,
  2200. .poll = i915_perf_poll,
  2201. .read = i915_perf_read,
  2202. .unlocked_ioctl = i915_perf_ioctl,
  2203. };
  2204. /**
  2205. * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
  2206. * @dev_priv: i915 device instance
  2207. * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
  2208. * @props: individually validated u64 property value pairs
  2209. * @file: drm file
  2210. *
  2211. * See i915_perf_ioctl_open() for interface details.
  2212. *
  2213. * Implements further stream config validation and stream initialization on
  2214. * behalf of i915_perf_open_ioctl() with the &drm_i915_private->perf.lock mutex
  2215. * taken to serialize with any non-file-operation driver hooks.
  2216. *
  2217. * Note: at this point the @props have only been validated in isolation and
  2218. * it's still necessary to validate that the combination of properties makes
  2219. * sense.
  2220. *
  2221. * In the case where userspace is interested in OA unit metrics then further
  2222. * config validation and stream initialization details will be handled by
  2223. * i915_oa_stream_init(). The code here should only validate config state that
  2224. * will be relevant to all stream types / backends.
  2225. *
  2226. * Returns: zero on success or a negative error code.
  2227. */
  2228. static int
  2229. i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
  2230. struct drm_i915_perf_open_param *param,
  2231. struct perf_open_properties *props,
  2232. struct drm_file *file)
  2233. {
  2234. struct i915_gem_context *specific_ctx = NULL;
  2235. struct i915_perf_stream *stream = NULL;
  2236. unsigned long f_flags = 0;
  2237. bool privileged_op = true;
  2238. int stream_fd;
  2239. int ret;
  2240. if (props->single_context) {
  2241. u32 ctx_handle = props->ctx_handle;
  2242. struct drm_i915_file_private *file_priv = file->driver_priv;
  2243. specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
  2244. if (!specific_ctx) {
  2245. DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n",
  2246. ctx_handle);
  2247. ret = -ENOENT;
  2248. goto err;
  2249. }
  2250. }
  2251. /*
  2252. * On Haswell the OA unit supports clock gating off for a specific
  2253. * context and in this mode there's no visibility of metrics for the
  2254. * rest of the system, which we consider acceptable for a
  2255. * non-privileged client.
  2256. *
  2257. * For Gen8+ the OA unit no longer supports clock gating off for a
  2258. * specific context and the kernel can't securely stop the counters
  2259. * from updating as system-wide / global values. Even though we can
  2260. * filter reports based on the included context ID we can't block
  2261. * clients from seeing the raw / global counter values via
  2262. * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
  2263. * enable the OA unit by default.
  2264. */
  2265. if (IS_HASWELL(dev_priv) && specific_ctx)
  2266. privileged_op = false;
  2267. /* Similar to perf's kernel.perf_paranoid_cpu sysctl option
  2268. * we check a dev.i915.perf_stream_paranoid sysctl option
  2269. * to determine if it's ok to access system wide OA counters
  2270. * without CAP_SYS_ADMIN privileges.
  2271. */
  2272. if (privileged_op &&
  2273. i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
  2274. DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n");
  2275. ret = -EACCES;
  2276. goto err_ctx;
  2277. }
  2278. stream = kzalloc(sizeof(*stream), GFP_KERNEL);
  2279. if (!stream) {
  2280. ret = -ENOMEM;
  2281. goto err_ctx;
  2282. }
  2283. stream->dev_priv = dev_priv;
  2284. stream->ctx = specific_ctx;
  2285. ret = i915_oa_stream_init(stream, param, props);
  2286. if (ret)
  2287. goto err_alloc;
  2288. /* we avoid simply assigning stream->sample_flags = props->sample_flags
  2289. * to have _stream_init check the combination of sample flags more
  2290. * thoroughly, but still this is the expected result at this point.
  2291. */
  2292. if (WARN_ON(stream->sample_flags != props->sample_flags)) {
  2293. ret = -ENODEV;
  2294. goto err_flags;
  2295. }
  2296. list_add(&stream->link, &dev_priv->perf.streams);
  2297. if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
  2298. f_flags |= O_CLOEXEC;
  2299. if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
  2300. f_flags |= O_NONBLOCK;
  2301. stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
  2302. if (stream_fd < 0) {
  2303. ret = stream_fd;
  2304. goto err_open;
  2305. }
  2306. if (!(param->flags & I915_PERF_FLAG_DISABLED))
  2307. i915_perf_enable_locked(stream);
  2308. return stream_fd;
  2309. err_open:
  2310. list_del(&stream->link);
  2311. err_flags:
  2312. if (stream->ops->destroy)
  2313. stream->ops->destroy(stream);
  2314. err_alloc:
  2315. kfree(stream);
  2316. err_ctx:
  2317. if (specific_ctx)
  2318. i915_gem_context_put(specific_ctx);
  2319. err:
  2320. return ret;
  2321. }
  2322. static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
  2323. {
  2324. return div_u64(1000000000ULL * (2ULL << exponent),
  2325. dev_priv->perf.oa.timestamp_frequency);
  2326. }
  2327. /**
  2328. * read_properties_unlocked - validate + copy userspace stream open properties
  2329. * @dev_priv: i915 device instance
  2330. * @uprops: The array of u64 key value pairs given by userspace
  2331. * @n_props: The number of key value pairs expected in @uprops
  2332. * @props: The stream configuration built up while validating properties
  2333. *
  2334. * Note this function only validates properties in isolation it doesn't
  2335. * validate that the combination of properties makes sense or that all
  2336. * properties necessary for a particular kind of stream have been set.
  2337. *
  2338. * Note that there currently aren't any ordering requirements for properties so
  2339. * we shouldn't validate or assume anything about ordering here. This doesn't
  2340. * rule out defining new properties with ordering requirements in the future.
  2341. */
  2342. static int read_properties_unlocked(struct drm_i915_private *dev_priv,
  2343. u64 __user *uprops,
  2344. u32 n_props,
  2345. struct perf_open_properties *props)
  2346. {
  2347. u64 __user *uprop = uprops;
  2348. u32 i;
  2349. memset(props, 0, sizeof(struct perf_open_properties));
  2350. if (!n_props) {
  2351. DRM_DEBUG("No i915 perf properties given\n");
  2352. return -EINVAL;
  2353. }
  2354. /* Considering that ID = 0 is reserved and assuming that we don't
  2355. * (currently) expect any configurations to ever specify duplicate
  2356. * values for a particular property ID then the last _PROP_MAX value is
  2357. * one greater than the maximum number of properties we expect to get
  2358. * from userspace.
  2359. */
  2360. if (n_props >= DRM_I915_PERF_PROP_MAX) {
  2361. DRM_DEBUG("More i915 perf properties specified than exist\n");
  2362. return -EINVAL;
  2363. }
  2364. for (i = 0; i < n_props; i++) {
  2365. u64 oa_period, oa_freq_hz;
  2366. u64 id, value;
  2367. int ret;
  2368. ret = get_user(id, uprop);
  2369. if (ret)
  2370. return ret;
  2371. ret = get_user(value, uprop + 1);
  2372. if (ret)
  2373. return ret;
  2374. if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
  2375. DRM_DEBUG("Unknown i915 perf property ID\n");
  2376. return -EINVAL;
  2377. }
  2378. switch ((enum drm_i915_perf_property_id)id) {
  2379. case DRM_I915_PERF_PROP_CTX_HANDLE:
  2380. props->single_context = 1;
  2381. props->ctx_handle = value;
  2382. break;
  2383. case DRM_I915_PERF_PROP_SAMPLE_OA:
  2384. props->sample_flags |= SAMPLE_OA_REPORT;
  2385. break;
  2386. case DRM_I915_PERF_PROP_OA_METRICS_SET:
  2387. if (value == 0) {
  2388. DRM_DEBUG("Unknown OA metric set ID\n");
  2389. return -EINVAL;
  2390. }
  2391. props->metrics_set = value;
  2392. break;
  2393. case DRM_I915_PERF_PROP_OA_FORMAT:
  2394. if (value == 0 || value >= I915_OA_FORMAT_MAX) {
  2395. DRM_DEBUG("Out-of-range OA report format %llu\n",
  2396. value);
  2397. return -EINVAL;
  2398. }
  2399. if (!dev_priv->perf.oa.oa_formats[value].size) {
  2400. DRM_DEBUG("Unsupported OA report format %llu\n",
  2401. value);
  2402. return -EINVAL;
  2403. }
  2404. props->oa_format = value;
  2405. break;
  2406. case DRM_I915_PERF_PROP_OA_EXPONENT:
  2407. if (value > OA_EXPONENT_MAX) {
  2408. DRM_DEBUG("OA timer exponent too high (> %u)\n",
  2409. OA_EXPONENT_MAX);
  2410. return -EINVAL;
  2411. }
  2412. /* Theoretically we can program the OA unit to sample
  2413. * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
  2414. * for BXT. We don't allow such high sampling
  2415. * frequencies by default unless root.
  2416. */
  2417. BUILD_BUG_ON(sizeof(oa_period) != 8);
  2418. oa_period = oa_exponent_to_ns(dev_priv, value);
  2419. /* This check is primarily to ensure that oa_period <=
  2420. * UINT32_MAX (before passing to do_div which only
  2421. * accepts a u32 denominator), but we can also skip
  2422. * checking anything < 1Hz which implicitly can't be
  2423. * limited via an integer oa_max_sample_rate.
  2424. */
  2425. if (oa_period <= NSEC_PER_SEC) {
  2426. u64 tmp = NSEC_PER_SEC;
  2427. do_div(tmp, oa_period);
  2428. oa_freq_hz = tmp;
  2429. } else
  2430. oa_freq_hz = 0;
  2431. if (oa_freq_hz > i915_oa_max_sample_rate &&
  2432. !capable(CAP_SYS_ADMIN)) {
  2433. DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n",
  2434. i915_oa_max_sample_rate);
  2435. return -EACCES;
  2436. }
  2437. props->oa_periodic = true;
  2438. props->oa_period_exponent = value;
  2439. break;
  2440. case DRM_I915_PERF_PROP_MAX:
  2441. MISSING_CASE(id);
  2442. return -EINVAL;
  2443. }
  2444. uprop += 2;
  2445. }
  2446. return 0;
  2447. }
  2448. /**
  2449. * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
  2450. * @dev: drm device
  2451. * @data: ioctl data copied from userspace (unvalidated)
  2452. * @file: drm file
  2453. *
  2454. * Validates the stream open parameters given by userspace including flags
  2455. * and an array of u64 key, value pair properties.
  2456. *
  2457. * Very little is assumed up front about the nature of the stream being
  2458. * opened (for instance we don't assume it's for periodic OA unit metrics). An
  2459. * i915-perf stream is expected to be a suitable interface for other forms of
  2460. * buffered data written by the GPU besides periodic OA metrics.
  2461. *
  2462. * Note we copy the properties from userspace outside of the i915 perf
  2463. * mutex to avoid an awkward lockdep with mmap_sem.
  2464. *
  2465. * Most of the implementation details are handled by
  2466. * i915_perf_open_ioctl_locked() after taking the &drm_i915_private->perf.lock
  2467. * mutex for serializing with any non-file-operation driver hooks.
  2468. *
  2469. * Return: A newly opened i915 Perf stream file descriptor or negative
  2470. * error code on failure.
  2471. */
  2472. int i915_perf_open_ioctl(struct drm_device *dev, void *data,
  2473. struct drm_file *file)
  2474. {
  2475. struct drm_i915_private *dev_priv = dev->dev_private;
  2476. struct drm_i915_perf_open_param *param = data;
  2477. struct perf_open_properties props;
  2478. u32 known_open_flags;
  2479. int ret;
  2480. if (!dev_priv->perf.initialized) {
  2481. DRM_DEBUG("i915 perf interface not available for this system\n");
  2482. return -ENOTSUPP;
  2483. }
  2484. known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
  2485. I915_PERF_FLAG_FD_NONBLOCK |
  2486. I915_PERF_FLAG_DISABLED;
  2487. if (param->flags & ~known_open_flags) {
  2488. DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n");
  2489. return -EINVAL;
  2490. }
  2491. ret = read_properties_unlocked(dev_priv,
  2492. u64_to_user_ptr(param->properties_ptr),
  2493. param->num_properties,
  2494. &props);
  2495. if (ret)
  2496. return ret;
  2497. mutex_lock(&dev_priv->perf.lock);
  2498. ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file);
  2499. mutex_unlock(&dev_priv->perf.lock);
  2500. return ret;
  2501. }
  2502. /**
  2503. * i915_perf_register - exposes i915-perf to userspace
  2504. * @dev_priv: i915 device instance
  2505. *
  2506. * In particular OA metric sets are advertised under a sysfs metrics/
  2507. * directory allowing userspace to enumerate valid IDs that can be
  2508. * used to open an i915-perf stream.
  2509. */
  2510. void i915_perf_register(struct drm_i915_private *dev_priv)
  2511. {
  2512. int ret;
  2513. if (!dev_priv->perf.initialized)
  2514. return;
  2515. /* To be sure we're synchronized with an attempted
  2516. * i915_perf_open_ioctl(); considering that we register after
  2517. * being exposed to userspace.
  2518. */
  2519. mutex_lock(&dev_priv->perf.lock);
  2520. dev_priv->perf.metrics_kobj =
  2521. kobject_create_and_add("metrics",
  2522. &dev_priv->drm.primary->kdev->kobj);
  2523. if (!dev_priv->perf.metrics_kobj)
  2524. goto exit;
  2525. sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr);
  2526. if (IS_HASWELL(dev_priv)) {
  2527. i915_perf_load_test_config_hsw(dev_priv);
  2528. } else if (IS_BROADWELL(dev_priv)) {
  2529. i915_perf_load_test_config_bdw(dev_priv);
  2530. } else if (IS_CHERRYVIEW(dev_priv)) {
  2531. i915_perf_load_test_config_chv(dev_priv);
  2532. } else if (IS_SKYLAKE(dev_priv)) {
  2533. if (IS_SKL_GT2(dev_priv))
  2534. i915_perf_load_test_config_sklgt2(dev_priv);
  2535. else if (IS_SKL_GT3(dev_priv))
  2536. i915_perf_load_test_config_sklgt3(dev_priv);
  2537. else if (IS_SKL_GT4(dev_priv))
  2538. i915_perf_load_test_config_sklgt4(dev_priv);
  2539. } else if (IS_BROXTON(dev_priv)) {
  2540. i915_perf_load_test_config_bxt(dev_priv);
  2541. } else if (IS_KABYLAKE(dev_priv)) {
  2542. if (IS_KBL_GT2(dev_priv))
  2543. i915_perf_load_test_config_kblgt2(dev_priv);
  2544. else if (IS_KBL_GT3(dev_priv))
  2545. i915_perf_load_test_config_kblgt3(dev_priv);
  2546. } else if (IS_GEMINILAKE(dev_priv)) {
  2547. i915_perf_load_test_config_glk(dev_priv);
  2548. }
  2549. if (dev_priv->perf.oa.test_config.id == 0)
  2550. goto sysfs_error;
  2551. ret = sysfs_create_group(dev_priv->perf.metrics_kobj,
  2552. &dev_priv->perf.oa.test_config.sysfs_metric);
  2553. if (ret)
  2554. goto sysfs_error;
  2555. atomic_set(&dev_priv->perf.oa.test_config.ref_count, 1);
  2556. goto exit;
  2557. sysfs_error:
  2558. kobject_put(dev_priv->perf.metrics_kobj);
  2559. dev_priv->perf.metrics_kobj = NULL;
  2560. exit:
  2561. mutex_unlock(&dev_priv->perf.lock);
  2562. }
  2563. /**
  2564. * i915_perf_unregister - hide i915-perf from userspace
  2565. * @dev_priv: i915 device instance
  2566. *
  2567. * i915-perf state cleanup is split up into an 'unregister' and
  2568. * 'deinit' phase where the interface is first hidden from
  2569. * userspace by i915_perf_unregister() before cleaning up
  2570. * remaining state in i915_perf_fini().
  2571. */
  2572. void i915_perf_unregister(struct drm_i915_private *dev_priv)
  2573. {
  2574. if (!dev_priv->perf.metrics_kobj)
  2575. return;
  2576. sysfs_remove_group(dev_priv->perf.metrics_kobj,
  2577. &dev_priv->perf.oa.test_config.sysfs_metric);
  2578. kobject_put(dev_priv->perf.metrics_kobj);
  2579. dev_priv->perf.metrics_kobj = NULL;
  2580. }
  2581. static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
  2582. {
  2583. static const i915_reg_t flex_eu_regs[] = {
  2584. EU_PERF_CNTL0,
  2585. EU_PERF_CNTL1,
  2586. EU_PERF_CNTL2,
  2587. EU_PERF_CNTL3,
  2588. EU_PERF_CNTL4,
  2589. EU_PERF_CNTL5,
  2590. EU_PERF_CNTL6,
  2591. };
  2592. int i;
  2593. for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
  2594. if (flex_eu_regs[i].reg == addr)
  2595. return true;
  2596. }
  2597. return false;
  2598. }
  2599. static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32 addr)
  2600. {
  2601. return (addr >= OASTARTTRIG1.reg && addr <= OASTARTTRIG8.reg) ||
  2602. (addr >= OAREPORTTRIG1.reg && addr <= OAREPORTTRIG8.reg) ||
  2603. (addr >= OACEC0_0.reg && addr <= OACEC7_1.reg);
  2604. }
  2605. static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
  2606. {
  2607. return addr == HALF_SLICE_CHICKEN2.reg ||
  2608. (addr >= MICRO_BP0_0.reg && addr <= NOA_WRITE.reg) ||
  2609. (addr >= OA_PERFCNT1_LO.reg && addr <= OA_PERFCNT2_HI.reg) ||
  2610. (addr >= OA_PERFMATRIX_LO.reg && addr <= OA_PERFMATRIX_HI.reg);
  2611. }
  2612. static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
  2613. {
  2614. return gen7_is_valid_mux_addr(dev_priv, addr) ||
  2615. addr == WAIT_FOR_RC6_EXIT.reg ||
  2616. (addr >= RPM_CONFIG0.reg && addr <= NOA_CONFIG(8).reg);
  2617. }
  2618. static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
  2619. {
  2620. return gen7_is_valid_mux_addr(dev_priv, addr) ||
  2621. (addr >= 0x25100 && addr <= 0x2FF90) ||
  2622. addr == 0x9ec0;
  2623. }
  2624. static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
  2625. {
  2626. return gen7_is_valid_mux_addr(dev_priv, addr) ||
  2627. (addr >= 0x182300 && addr <= 0x1823A4);
  2628. }
  2629. static uint32_t mask_reg_value(u32 reg, u32 val)
  2630. {
  2631. /* HALF_SLICE_CHICKEN2 is programmed with a the
  2632. * WaDisableSTUnitPowerOptimization workaround. Make sure the value
  2633. * programmed by userspace doesn't change this.
  2634. */
  2635. if (HALF_SLICE_CHICKEN2.reg == reg)
  2636. val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
  2637. /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
  2638. * indicated by its name and a bunch of selection fields used by OA
  2639. * configs.
  2640. */
  2641. if (WAIT_FOR_RC6_EXIT.reg == reg)
  2642. val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
  2643. return val;
  2644. }
  2645. static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv,
  2646. bool (*is_valid)(struct drm_i915_private *dev_priv, u32 addr),
  2647. u32 __user *regs,
  2648. u32 n_regs)
  2649. {
  2650. struct i915_oa_reg *oa_regs;
  2651. int err;
  2652. u32 i;
  2653. if (!n_regs)
  2654. return NULL;
  2655. if (!access_ok(VERIFY_READ, regs, n_regs * sizeof(u32) * 2))
  2656. return ERR_PTR(-EFAULT);
  2657. /* No is_valid function means we're not allowing any register to be programmed. */
  2658. GEM_BUG_ON(!is_valid);
  2659. if (!is_valid)
  2660. return ERR_PTR(-EINVAL);
  2661. oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
  2662. if (!oa_regs)
  2663. return ERR_PTR(-ENOMEM);
  2664. for (i = 0; i < n_regs; i++) {
  2665. u32 addr, value;
  2666. err = get_user(addr, regs);
  2667. if (err)
  2668. goto addr_err;
  2669. if (!is_valid(dev_priv, addr)) {
  2670. DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
  2671. err = -EINVAL;
  2672. goto addr_err;
  2673. }
  2674. err = get_user(value, regs + 1);
  2675. if (err)
  2676. goto addr_err;
  2677. oa_regs[i].addr = _MMIO(addr);
  2678. oa_regs[i].value = mask_reg_value(addr, value);
  2679. regs += 2;
  2680. }
  2681. return oa_regs;
  2682. addr_err:
  2683. kfree(oa_regs);
  2684. return ERR_PTR(err);
  2685. }
  2686. static ssize_t show_dynamic_id(struct device *dev,
  2687. struct device_attribute *attr,
  2688. char *buf)
  2689. {
  2690. struct i915_oa_config *oa_config =
  2691. container_of(attr, typeof(*oa_config), sysfs_metric_id);
  2692. return sprintf(buf, "%d\n", oa_config->id);
  2693. }
  2694. static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
  2695. struct i915_oa_config *oa_config)
  2696. {
  2697. sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
  2698. oa_config->sysfs_metric_id.attr.name = "id";
  2699. oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
  2700. oa_config->sysfs_metric_id.show = show_dynamic_id;
  2701. oa_config->sysfs_metric_id.store = NULL;
  2702. oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
  2703. oa_config->attrs[1] = NULL;
  2704. oa_config->sysfs_metric.name = oa_config->uuid;
  2705. oa_config->sysfs_metric.attrs = oa_config->attrs;
  2706. return sysfs_create_group(dev_priv->perf.metrics_kobj,
  2707. &oa_config->sysfs_metric);
  2708. }
  2709. /**
  2710. * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
  2711. * @dev: drm device
  2712. * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
  2713. * userspace (unvalidated)
  2714. * @file: drm file
  2715. *
  2716. * Validates the submitted OA register to be saved into a new OA config that
  2717. * can then be used for programming the OA unit and its NOA network.
  2718. *
  2719. * Returns: A new allocated config number to be used with the perf open ioctl
  2720. * or a negative error code on failure.
  2721. */
  2722. int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
  2723. struct drm_file *file)
  2724. {
  2725. struct drm_i915_private *dev_priv = dev->dev_private;
  2726. struct drm_i915_perf_oa_config *args = data;
  2727. struct i915_oa_config *oa_config, *tmp;
  2728. int err, id;
  2729. if (!dev_priv->perf.initialized) {
  2730. DRM_DEBUG("i915 perf interface not available for this system\n");
  2731. return -ENOTSUPP;
  2732. }
  2733. if (!dev_priv->perf.metrics_kobj) {
  2734. DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
  2735. return -EINVAL;
  2736. }
  2737. if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
  2738. DRM_DEBUG("Insufficient privileges to add i915 OA config\n");
  2739. return -EACCES;
  2740. }
  2741. if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
  2742. (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
  2743. (!args->flex_regs_ptr || !args->n_flex_regs)) {
  2744. DRM_DEBUG("No OA registers given\n");
  2745. return -EINVAL;
  2746. }
  2747. oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
  2748. if (!oa_config) {
  2749. DRM_DEBUG("Failed to allocate memory for the OA config\n");
  2750. return -ENOMEM;
  2751. }
  2752. atomic_set(&oa_config->ref_count, 1);
  2753. if (!uuid_is_valid(args->uuid)) {
  2754. DRM_DEBUG("Invalid uuid format for OA config\n");
  2755. err = -EINVAL;
  2756. goto reg_err;
  2757. }
  2758. /* Last character in oa_config->uuid will be 0 because oa_config is
  2759. * kzalloc.
  2760. */
  2761. memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
  2762. oa_config->mux_regs_len = args->n_mux_regs;
  2763. oa_config->mux_regs =
  2764. alloc_oa_regs(dev_priv,
  2765. dev_priv->perf.oa.ops.is_valid_mux_reg,
  2766. u64_to_user_ptr(args->mux_regs_ptr),
  2767. args->n_mux_regs);
  2768. if (IS_ERR(oa_config->mux_regs)) {
  2769. DRM_DEBUG("Failed to create OA config for mux_regs\n");
  2770. err = PTR_ERR(oa_config->mux_regs);
  2771. goto reg_err;
  2772. }
  2773. oa_config->b_counter_regs_len = args->n_boolean_regs;
  2774. oa_config->b_counter_regs =
  2775. alloc_oa_regs(dev_priv,
  2776. dev_priv->perf.oa.ops.is_valid_b_counter_reg,
  2777. u64_to_user_ptr(args->boolean_regs_ptr),
  2778. args->n_boolean_regs);
  2779. if (IS_ERR(oa_config->b_counter_regs)) {
  2780. DRM_DEBUG("Failed to create OA config for b_counter_regs\n");
  2781. err = PTR_ERR(oa_config->b_counter_regs);
  2782. goto reg_err;
  2783. }
  2784. if (INTEL_GEN(dev_priv) < 8) {
  2785. if (args->n_flex_regs != 0) {
  2786. err = -EINVAL;
  2787. goto reg_err;
  2788. }
  2789. } else {
  2790. oa_config->flex_regs_len = args->n_flex_regs;
  2791. oa_config->flex_regs =
  2792. alloc_oa_regs(dev_priv,
  2793. dev_priv->perf.oa.ops.is_valid_flex_reg,
  2794. u64_to_user_ptr(args->flex_regs_ptr),
  2795. args->n_flex_regs);
  2796. if (IS_ERR(oa_config->flex_regs)) {
  2797. DRM_DEBUG("Failed to create OA config for flex_regs\n");
  2798. err = PTR_ERR(oa_config->flex_regs);
  2799. goto reg_err;
  2800. }
  2801. }
  2802. err = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
  2803. if (err)
  2804. goto reg_err;
  2805. /* We shouldn't have too many configs, so this iteration shouldn't be
  2806. * too costly.
  2807. */
  2808. idr_for_each_entry(&dev_priv->perf.metrics_idr, tmp, id) {
  2809. if (!strcmp(tmp->uuid, oa_config->uuid)) {
  2810. DRM_DEBUG("OA config already exists with this uuid\n");
  2811. err = -EADDRINUSE;
  2812. goto sysfs_err;
  2813. }
  2814. }
  2815. err = create_dynamic_oa_sysfs_entry(dev_priv, oa_config);
  2816. if (err) {
  2817. DRM_DEBUG("Failed to create sysfs entry for OA config\n");
  2818. goto sysfs_err;
  2819. }
  2820. /* Config id 0 is invalid, id 1 for kernel stored test config. */
  2821. oa_config->id = idr_alloc(&dev_priv->perf.metrics_idr,
  2822. oa_config, 2,
  2823. 0, GFP_KERNEL);
  2824. if (oa_config->id < 0) {
  2825. DRM_DEBUG("Failed to create sysfs entry for OA config\n");
  2826. err = oa_config->id;
  2827. goto sysfs_err;
  2828. }
  2829. mutex_unlock(&dev_priv->perf.metrics_lock);
  2830. return oa_config->id;
  2831. sysfs_err:
  2832. mutex_unlock(&dev_priv->perf.metrics_lock);
  2833. reg_err:
  2834. put_oa_config(dev_priv, oa_config);
  2835. DRM_DEBUG("Failed to add new OA config\n");
  2836. return err;
  2837. }
  2838. /**
  2839. * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
  2840. * @dev: drm device
  2841. * @data: ioctl data (pointer to u64 integer) copied from userspace
  2842. * @file: drm file
  2843. *
  2844. * Configs can be removed while being used, the will stop appearing in sysfs
  2845. * and their content will be freed when the stream using the config is closed.
  2846. *
  2847. * Returns: 0 on success or a negative error code on failure.
  2848. */
  2849. int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
  2850. struct drm_file *file)
  2851. {
  2852. struct drm_i915_private *dev_priv = dev->dev_private;
  2853. u64 *arg = data;
  2854. struct i915_oa_config *oa_config;
  2855. int ret;
  2856. if (!dev_priv->perf.initialized) {
  2857. DRM_DEBUG("i915 perf interface not available for this system\n");
  2858. return -ENOTSUPP;
  2859. }
  2860. if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
  2861. DRM_DEBUG("Insufficient privileges to remove i915 OA config\n");
  2862. return -EACCES;
  2863. }
  2864. ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
  2865. if (ret)
  2866. goto lock_err;
  2867. oa_config = idr_find(&dev_priv->perf.metrics_idr, *arg);
  2868. if (!oa_config) {
  2869. DRM_DEBUG("Failed to remove unknown OA config\n");
  2870. ret = -ENOENT;
  2871. goto config_err;
  2872. }
  2873. GEM_BUG_ON(*arg != oa_config->id);
  2874. sysfs_remove_group(dev_priv->perf.metrics_kobj,
  2875. &oa_config->sysfs_metric);
  2876. idr_remove(&dev_priv->perf.metrics_idr, *arg);
  2877. put_oa_config(dev_priv, oa_config);
  2878. config_err:
  2879. mutex_unlock(&dev_priv->perf.metrics_lock);
  2880. lock_err:
  2881. return ret;
  2882. }
  2883. static struct ctl_table oa_table[] = {
  2884. {
  2885. .procname = "perf_stream_paranoid",
  2886. .data = &i915_perf_stream_paranoid,
  2887. .maxlen = sizeof(i915_perf_stream_paranoid),
  2888. .mode = 0644,
  2889. .proc_handler = proc_dointvec_minmax,
  2890. .extra1 = &zero,
  2891. .extra2 = &one,
  2892. },
  2893. {
  2894. .procname = "oa_max_sample_rate",
  2895. .data = &i915_oa_max_sample_rate,
  2896. .maxlen = sizeof(i915_oa_max_sample_rate),
  2897. .mode = 0644,
  2898. .proc_handler = proc_dointvec_minmax,
  2899. .extra1 = &zero,
  2900. .extra2 = &oa_sample_rate_hard_limit,
  2901. },
  2902. {}
  2903. };
  2904. static struct ctl_table i915_root[] = {
  2905. {
  2906. .procname = "i915",
  2907. .maxlen = 0,
  2908. .mode = 0555,
  2909. .child = oa_table,
  2910. },
  2911. {}
  2912. };
  2913. static struct ctl_table dev_root[] = {
  2914. {
  2915. .procname = "dev",
  2916. .maxlen = 0,
  2917. .mode = 0555,
  2918. .child = i915_root,
  2919. },
  2920. {}
  2921. };
  2922. /**
  2923. * i915_perf_init - initialize i915-perf state on module load
  2924. * @dev_priv: i915 device instance
  2925. *
  2926. * Initializes i915-perf state without exposing anything to userspace.
  2927. *
  2928. * Note: i915-perf initialization is split into an 'init' and 'register'
  2929. * phase with the i915_perf_register() exposing state to userspace.
  2930. */
  2931. void i915_perf_init(struct drm_i915_private *dev_priv)
  2932. {
  2933. dev_priv->perf.oa.timestamp_frequency = 0;
  2934. if (IS_HASWELL(dev_priv)) {
  2935. dev_priv->perf.oa.ops.is_valid_b_counter_reg =
  2936. gen7_is_valid_b_counter_addr;
  2937. dev_priv->perf.oa.ops.is_valid_mux_reg =
  2938. hsw_is_valid_mux_addr;
  2939. dev_priv->perf.oa.ops.is_valid_flex_reg = NULL;
  2940. dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer;
  2941. dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
  2942. dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
  2943. dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
  2944. dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable;
  2945. dev_priv->perf.oa.ops.read = gen7_oa_read;
  2946. dev_priv->perf.oa.ops.oa_hw_tail_read =
  2947. gen7_oa_hw_tail_read;
  2948. dev_priv->perf.oa.timestamp_frequency = 12500000;
  2949. dev_priv->perf.oa.oa_formats = hsw_oa_formats;
  2950. } else if (i915.enable_execlists) {
  2951. /* Note: that although we could theoretically also support the
  2952. * legacy ringbuffer mode on BDW (and earlier iterations of
  2953. * this driver, before upstreaming did this) it didn't seem
  2954. * worth the complexity to maintain now that BDW+ enable
  2955. * execlist mode by default.
  2956. */
  2957. dev_priv->perf.oa.ops.is_valid_b_counter_reg =
  2958. gen7_is_valid_b_counter_addr;
  2959. dev_priv->perf.oa.ops.is_valid_mux_reg =
  2960. gen8_is_valid_mux_addr;
  2961. dev_priv->perf.oa.ops.is_valid_flex_reg =
  2962. gen8_is_valid_flex_addr;
  2963. dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer;
  2964. dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
  2965. dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
  2966. dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
  2967. dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
  2968. dev_priv->perf.oa.ops.read = gen8_oa_read;
  2969. dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
  2970. dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
  2971. if (IS_GEN8(dev_priv)) {
  2972. dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
  2973. dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
  2974. dev_priv->perf.oa.timestamp_frequency = 12500000;
  2975. dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25);
  2976. if (IS_CHERRYVIEW(dev_priv)) {
  2977. dev_priv->perf.oa.ops.is_valid_mux_reg =
  2978. chv_is_valid_mux_addr;
  2979. }
  2980. } else if (IS_GEN9(dev_priv)) {
  2981. dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
  2982. dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
  2983. dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
  2984. switch (dev_priv->info.platform) {
  2985. case INTEL_BROXTON:
  2986. case INTEL_GEMINILAKE:
  2987. dev_priv->perf.oa.timestamp_frequency = 19200000;
  2988. break;
  2989. case INTEL_SKYLAKE:
  2990. case INTEL_KABYLAKE:
  2991. dev_priv->perf.oa.timestamp_frequency = 12000000;
  2992. break;
  2993. default:
  2994. /* Leave timestamp_frequency to 0 so we can
  2995. * detect unsupported platforms.
  2996. */
  2997. break;
  2998. }
  2999. }
  3000. }
  3001. if (dev_priv->perf.oa.timestamp_frequency) {
  3002. hrtimer_init(&dev_priv->perf.oa.poll_check_timer,
  3003. CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  3004. dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb;
  3005. init_waitqueue_head(&dev_priv->perf.oa.poll_wq);
  3006. INIT_LIST_HEAD(&dev_priv->perf.streams);
  3007. mutex_init(&dev_priv->perf.lock);
  3008. spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
  3009. oa_sample_rate_hard_limit =
  3010. dev_priv->perf.oa.timestamp_frequency / 2;
  3011. dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
  3012. mutex_init(&dev_priv->perf.metrics_lock);
  3013. idr_init(&dev_priv->perf.metrics_idr);
  3014. dev_priv->perf.initialized = true;
  3015. }
  3016. }
  3017. static int destroy_config(int id, void *p, void *data)
  3018. {
  3019. struct drm_i915_private *dev_priv = data;
  3020. struct i915_oa_config *oa_config = p;
  3021. put_oa_config(dev_priv, oa_config);
  3022. return 0;
  3023. }
  3024. /**
  3025. * i915_perf_fini - Counter part to i915_perf_init()
  3026. * @dev_priv: i915 device instance
  3027. */
  3028. void i915_perf_fini(struct drm_i915_private *dev_priv)
  3029. {
  3030. if (!dev_priv->perf.initialized)
  3031. return;
  3032. idr_for_each(&dev_priv->perf.metrics_idr, destroy_config, dev_priv);
  3033. idr_destroy(&dev_priv->perf.metrics_idr);
  3034. unregister_sysctl_table(dev_priv->perf.sysctl_header);
  3035. memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops));
  3036. dev_priv->perf.initialized = false;
  3037. }