i915_perf.c 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754
  1. /*
  2. * Copyright © 2015-2016 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Robert Bragg <robert@sixbynine.org>
  25. */
  26. /**
  27. * DOC: i915 Perf, streaming API for GPU metrics
  28. *
  29. * Gen graphics supports a large number of performance counters that can help
  30. * driver and application developers understand and optimize their use of the
  31. * GPU.
  32. *
  33. * This i915 perf interface enables userspace to configure and open a file
  34. * descriptor representing a stream of GPU metrics which can then be read() as
  35. * a stream of sample records.
  36. *
  37. * The interface is particularly suited to exposing buffered metrics that are
  38. * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
  39. *
  40. * Streams representing a single context are accessible to applications with a
  41. * corresponding drm file descriptor, such that OpenGL can use the interface
  42. * without special privileges. Access to system-wide metrics requires root
  43. * privileges by default, unless changed via the dev.i915.perf_event_paranoid
  44. * sysctl option.
  45. *
  46. *
  47. * The interface was initially inspired by the core Perf infrastructure but
  48. * some notable differences are:
  49. *
  50. * i915 perf file descriptors represent a "stream" instead of an "event"; where
  51. * a perf event primarily corresponds to a single 64bit value, while a stream
  52. * might sample sets of tightly-coupled counters, depending on the
  53. * configuration. For example the Gen OA unit isn't designed to support
  54. * orthogonal configurations of individual counters; it's configured for a set
  55. * of related counters. Samples for an i915 perf stream capturing OA metrics
  56. * will include a set of counter values packed in a compact HW specific format.
  57. * The OA unit supports a number of different packing formats which can be
  58. * selected by the user opening the stream. Perf has support for grouping
  59. * events, but each event in the group is configured, validated and
  60. * authenticated individually with separate system calls.
  61. *
  62. * i915 perf stream configurations are provided as an array of u64 (key,value)
  63. * pairs, instead of a fixed struct with multiple miscellaneous config members,
  64. * interleaved with event-type specific members.
  65. *
  66. * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
  67. * The supported metrics are being written to memory by the GPU unsynchronized
  68. * with the CPU, using HW specific packing formats for counter sets. Sometimes
  69. * the constraints on HW configuration require reports to be filtered before it
  70. * would be acceptable to expose them to unprivileged applications - to hide
  71. * the metrics of other processes/contexts. For these use cases a read() based
  72. * interface is a good fit, and provides an opportunity to filter data as it
  73. * gets copied from the GPU mapped buffers to userspace buffers.
  74. *
  75. *
  76. * Some notes regarding Linux Perf:
  77. * --------------------------------
  78. *
  79. * The first prototype of this driver was based on the core perf
  80. * infrastructure, and while we did make that mostly work, with some changes to
  81. * perf, we found we were breaking or working around too many assumptions baked
  82. * into perf's currently cpu centric design.
  83. *
  84. * In the end we didn't see a clear benefit to making perf's implementation and
  85. * interface more complex by changing design assumptions while we knew we still
  86. * wouldn't be able to use any existing perf based userspace tools.
  87. *
  88. * Also considering the Gen specific nature of the Observability hardware and
  89. * how userspace will sometimes need to combine i915 perf OA metrics with
  90. * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
  91. * expecting the interface to be used by a platform specific userspace such as
  92. * OpenGL or tools. This is to say; we aren't inherently missing out on having
  93. * a standard vendor/architecture agnostic interface by not using perf.
  94. *
  95. *
  96. * For posterity, in case we might re-visit trying to adapt core perf to be
  97. * better suited to exposing i915 metrics these were the main pain points we
  98. * hit:
  99. *
  100. * - The perf based OA PMU driver broke some significant design assumptions:
  101. *
  102. * Existing perf pmus are used for profiling work on a cpu and we were
  103. * introducing the idea of _IS_DEVICE pmus with different security
  104. * implications, the need to fake cpu-related data (such as user/kernel
  105. * registers) to fit with perf's current design, and adding _DEVICE records
  106. * as a way to forward device-specific status records.
  107. *
  108. * The OA unit writes reports of counters into a circular buffer, without
  109. * involvement from the CPU, making our PMU driver the first of a kind.
  110. *
  111. * Given the way we were periodically forward data from the GPU-mapped, OA
  112. * buffer to perf's buffer, those bursts of sample writes looked to perf like
  113. * we were sampling too fast and so we had to subvert its throttling checks.
  114. *
  115. * Perf supports groups of counters and allows those to be read via
  116. * transactions internally but transactions currently seem designed to be
  117. * explicitly initiated from the cpu (say in response to a userspace read())
  118. * and while we could pull a report out of the OA buffer we can't
  119. * trigger a report from the cpu on demand.
  120. *
  121. * Related to being report based; the OA counters are configured in HW as a
  122. * set while perf generally expects counter configurations to be orthogonal.
  123. * Although counters can be associated with a group leader as they are
  124. * opened, there's no clear precedent for being able to provide group-wide
  125. * configuration attributes (for example we want to let userspace choose the
  126. * OA unit report format used to capture all counters in a set, or specify a
  127. * GPU context to filter metrics on). We avoided using perf's grouping
  128. * feature and forwarded OA reports to userspace via perf's 'raw' sample
  129. * field. This suited our userspace well considering how coupled the counters
  130. * are when dealing with normalizing. It would be inconvenient to split
  131. * counters up into separate events, only to require userspace to recombine
  132. * them. For Mesa it's also convenient to be forwarded raw, periodic reports
  133. * for combining with the side-band raw reports it captures using
  134. * MI_REPORT_PERF_COUNT commands.
  135. *
  136. * _ As a side note on perf's grouping feature; there was also some concern
  137. * that using PERF_FORMAT_GROUP as a way to pack together counter values
  138. * would quite drastically inflate our sample sizes, which would likely
  139. * lower the effective sampling resolutions we could use when the available
  140. * memory bandwidth is limited.
  141. *
  142. * With the OA unit's report formats, counters are packed together as 32
  143. * or 40bit values, with the largest report size being 256 bytes.
  144. *
  145. * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
  146. * documented ordering to the values, implying PERF_FORMAT_ID must also be
  147. * used to add a 64bit ID before each value; giving 16 bytes per counter.
  148. *
  149. * Related to counter orthogonality; we can't time share the OA unit, while
  150. * event scheduling is a central design idea within perf for allowing
  151. * userspace to open + enable more events than can be configured in HW at any
  152. * one time. The OA unit is not designed to allow re-configuration while in
  153. * use. We can't reconfigure the OA unit without losing internal OA unit
  154. * state which we can't access explicitly to save and restore. Reconfiguring
  155. * the OA unit is also relatively slow, involving ~100 register writes. From
  156. * userspace Mesa also depends on a stable OA configuration when emitting
  157. * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
  158. * disabled while there are outstanding MI_RPC commands lest we hang the
  159. * command streamer.
  160. *
  161. * The contents of sample records aren't extensible by device drivers (i.e.
  162. * the sample_type bits). As an example; Sourab Gupta had been looking to
  163. * attach GPU timestamps to our OA samples. We were shoehorning OA reports
  164. * into sample records by using the 'raw' field, but it's tricky to pack more
  165. * than one thing into this field because events/core.c currently only lets a
  166. * pmu give a single raw data pointer plus len which will be copied into the
  167. * ring buffer. To include more than the OA report we'd have to copy the
  168. * report into an intermediate larger buffer. I'd been considering allowing a
  169. * vector of data+len values to be specified for copying the raw data, but
  170. * it felt like a kludge to being using the raw field for this purpose.
  171. *
  172. * - It felt like our perf based PMU was making some technical compromises
  173. * just for the sake of using perf:
  174. *
  175. * perf_event_open() requires events to either relate to a pid or a specific
  176. * cpu core, while our device pmu related to neither. Events opened with a
  177. * pid will be automatically enabled/disabled according to the scheduling of
  178. * that process - so not appropriate for us. When an event is related to a
  179. * cpu id, perf ensures pmu methods will be invoked via an inter process
  180. * interrupt on that core. To avoid invasive changes our userspace opened OA
  181. * perf events for a specific cpu. This was workable but it meant the
  182. * majority of the OA driver ran in atomic context, including all OA report
  183. * forwarding, which wasn't really necessary in our case and seems to make
  184. * our locking requirements somewhat complex as we handled the interaction
  185. * with the rest of the i915 driver.
  186. */
  187. #include <linux/anon_inodes.h>
  188. #include <linux/sizes.h>
  189. #include "i915_drv.h"
  190. #include "i915_oa_hsw.h"
  191. /* HW requires this to be a power of two, between 128k and 16M, though driver
  192. * is currently generally designed assuming the largest 16M size is used such
  193. * that the overflow cases are unlikely in normal operation.
  194. */
  195. #define OA_BUFFER_SIZE SZ_16M
  196. #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1))
  197. /* There's a HW race condition between OA unit tail pointer register updates and
  198. * writes to memory whereby the tail pointer can sometimes get ahead of what's
  199. * been written out to the OA buffer so far.
  200. *
  201. * Although this can be observed explicitly by checking for a zeroed report-id
  202. * field in tail reports, it seems preferable to account for this earlier e.g.
  203. * as part of the _oa_buffer_is_empty checks to minimize -EAGAIN polling cycles
  204. * in this situation.
  205. *
  206. * To give time for the most recent reports to land before they may be copied to
  207. * userspace, the driver operates as if the tail pointer effectively lags behind
  208. * the HW tail pointer by 'tail_margin' bytes. The margin in bytes is calculated
  209. * based on this constant in nanoseconds, the current OA sampling exponent
  210. * and current report size.
  211. *
  212. * There is also a fallback check while reading to simply skip over reports with
  213. * a zeroed report-id.
  214. */
  215. #define OA_TAIL_MARGIN_NSEC 100000ULL
  216. /* frequency for checking whether the OA unit has written new reports to the
  217. * circular OA buffer...
  218. */
  219. #define POLL_FREQUENCY 200
  220. #define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY)
  221. /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
  222. static int zero;
  223. static int one = 1;
  224. static u32 i915_perf_stream_paranoid = true;
  225. /* The maximum exponent the hardware accepts is 63 (essentially it selects one
  226. * of the 64bit timestamp bits to trigger reports from) but there's currently
  227. * no known use case for sampling as infrequently as once per 47 thousand years.
  228. *
  229. * Since the timestamps included in OA reports are only 32bits it seems
  230. * reasonable to limit the OA exponent where it's still possible to account for
  231. * overflow in OA report timestamps.
  232. */
  233. #define OA_EXPONENT_MAX 31
  234. #define INVALID_CTX_ID 0xffffffff
  235. /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
  236. *
  237. * 160ns is the smallest sampling period we can theoretically program the OA
  238. * unit with on Haswell, corresponding to 6.25MHz.
  239. */
  240. static int oa_sample_rate_hard_limit = 6250000;
  241. /* Theoretically we can program the OA unit to sample every 160ns but don't
  242. * allow that by default unless root...
  243. *
  244. * The default threshold of 100000Hz is based on perf's similar
  245. * kernel.perf_event_max_sample_rate sysctl parameter.
  246. */
  247. static u32 i915_oa_max_sample_rate = 100000;
  248. /* XXX: beware if future OA HW adds new report formats that the current
  249. * code assumes all reports have a power-of-two size and ~(size - 1) can
  250. * be used as a mask to align the OA tail pointer.
  251. */
  252. static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
  253. [I915_OA_FORMAT_A13] = { 0, 64 },
  254. [I915_OA_FORMAT_A29] = { 1, 128 },
  255. [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 },
  256. /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
  257. [I915_OA_FORMAT_B4_C8] = { 4, 64 },
  258. [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 },
  259. [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 },
  260. [I915_OA_FORMAT_C4_B8] = { 7, 64 },
  261. };
  262. #define SAMPLE_OA_REPORT (1<<0)
  263. struct perf_open_properties {
  264. u32 sample_flags;
  265. u64 single_context:1;
  266. u64 ctx_handle;
  267. /* OA sampling state */
  268. int metrics_set;
  269. int oa_format;
  270. bool oa_periodic;
  271. int oa_period_exponent;
  272. };
  273. /* NB: This is either called via fops or the poll check hrtimer (atomic ctx)
  274. *
  275. * It's safe to read OA config state here unlocked, assuming that this is only
  276. * called while the stream is enabled, while the global OA configuration can't
  277. * be modified.
  278. *
  279. * Note: we don't lock around the head/tail reads even though there's the slim
  280. * possibility of read() fop errors forcing a re-init of the OA buffer
  281. * pointers. A race here could result in a false positive !empty status which
  282. * is acceptable.
  283. */
  284. static bool gen7_oa_buffer_is_empty_fop_unlocked(struct drm_i915_private *dev_priv)
  285. {
  286. int report_size = dev_priv->perf.oa.oa_buffer.format_size;
  287. u32 oastatus2 = I915_READ(GEN7_OASTATUS2);
  288. u32 oastatus1 = I915_READ(GEN7_OASTATUS1);
  289. u32 head = oastatus2 & GEN7_OASTATUS2_HEAD_MASK;
  290. u32 tail = oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
  291. return OA_TAKEN(tail, head) <
  292. dev_priv->perf.oa.tail_margin + report_size;
  293. }
  294. /**
  295. * Appends a status record to a userspace read() buffer.
  296. */
  297. static int append_oa_status(struct i915_perf_stream *stream,
  298. char __user *buf,
  299. size_t count,
  300. size_t *offset,
  301. enum drm_i915_perf_record_type type)
  302. {
  303. struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
  304. if ((count - *offset) < header.size)
  305. return -ENOSPC;
  306. if (copy_to_user(buf + *offset, &header, sizeof(header)))
  307. return -EFAULT;
  308. (*offset) += header.size;
  309. return 0;
  310. }
  311. /**
  312. * Copies single OA report into userspace read() buffer.
  313. */
  314. static int append_oa_sample(struct i915_perf_stream *stream,
  315. char __user *buf,
  316. size_t count,
  317. size_t *offset,
  318. const u8 *report)
  319. {
  320. struct drm_i915_private *dev_priv = stream->dev_priv;
  321. int report_size = dev_priv->perf.oa.oa_buffer.format_size;
  322. struct drm_i915_perf_record_header header;
  323. u32 sample_flags = stream->sample_flags;
  324. header.type = DRM_I915_PERF_RECORD_SAMPLE;
  325. header.pad = 0;
  326. header.size = stream->sample_size;
  327. if ((count - *offset) < header.size)
  328. return -ENOSPC;
  329. buf += *offset;
  330. if (copy_to_user(buf, &header, sizeof(header)))
  331. return -EFAULT;
  332. buf += sizeof(header);
  333. if (sample_flags & SAMPLE_OA_REPORT) {
  334. if (copy_to_user(buf, report, report_size))
  335. return -EFAULT;
  336. }
  337. (*offset) += header.size;
  338. return 0;
  339. }
  340. /**
  341. * Copies all buffered OA reports into userspace read() buffer.
  342. * @stream: An i915-perf stream opened for OA metrics
  343. * @buf: destination buffer given by userspace
  344. * @count: the number of bytes userspace wants to read
  345. * @offset: (inout): the current position for writing into @buf
  346. * @head_ptr: (inout): the current oa buffer cpu read position
  347. * @tail: the current oa buffer gpu write position
  348. *
  349. * Returns 0 on success, negative error code on failure.
  350. *
  351. * Notably any error condition resulting in a short read (-ENOSPC or
  352. * -EFAULT) will be returned even though one or more records may
  353. * have been successfully copied. In this case it's up to the caller
  354. * to decide if the error should be squashed before returning to
  355. * userspace.
  356. *
  357. * Note: reports are consumed from the head, and appended to the
  358. * tail, so the head chases the tail?... If you think that's mad
  359. * and back-to-front you're not alone, but this follows the
  360. * Gen PRM naming convention.
  361. */
  362. static int gen7_append_oa_reports(struct i915_perf_stream *stream,
  363. char __user *buf,
  364. size_t count,
  365. size_t *offset,
  366. u32 *head_ptr,
  367. u32 tail)
  368. {
  369. struct drm_i915_private *dev_priv = stream->dev_priv;
  370. int report_size = dev_priv->perf.oa.oa_buffer.format_size;
  371. u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
  372. int tail_margin = dev_priv->perf.oa.tail_margin;
  373. u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
  374. u32 mask = (OA_BUFFER_SIZE - 1);
  375. u32 head;
  376. u32 taken;
  377. int ret = 0;
  378. if (WARN_ON(!stream->enabled))
  379. return -EIO;
  380. head = *head_ptr - gtt_offset;
  381. tail -= gtt_offset;
  382. /* The OA unit is expected to wrap the tail pointer according to the OA
  383. * buffer size and since we should never write a misaligned head
  384. * pointer we don't expect to read one back either...
  385. */
  386. if (tail > OA_BUFFER_SIZE || head > OA_BUFFER_SIZE ||
  387. head % report_size) {
  388. DRM_ERROR("Inconsistent OA buffer pointer (head = %u, tail = %u): force restart\n",
  389. head, tail);
  390. dev_priv->perf.oa.ops.oa_disable(dev_priv);
  391. dev_priv->perf.oa.ops.oa_enable(dev_priv);
  392. *head_ptr = I915_READ(GEN7_OASTATUS2) &
  393. GEN7_OASTATUS2_HEAD_MASK;
  394. return -EIO;
  395. }
  396. /* The tail pointer increases in 64 byte increments, not in report_size
  397. * steps...
  398. */
  399. tail &= ~(report_size - 1);
  400. /* Move the tail pointer back by the current tail_margin to account for
  401. * the possibility that the latest reports may not have really landed
  402. * in memory yet...
  403. */
  404. if (OA_TAKEN(tail, head) < report_size + tail_margin)
  405. return -EAGAIN;
  406. tail -= tail_margin;
  407. tail &= mask;
  408. for (/* none */;
  409. (taken = OA_TAKEN(tail, head));
  410. head = (head + report_size) & mask) {
  411. u8 *report = oa_buf_base + head;
  412. u32 *report32 = (void *)report;
  413. /* All the report sizes factor neatly into the buffer
  414. * size so we never expect to see a report split
  415. * between the beginning and end of the buffer.
  416. *
  417. * Given the initial alignment check a misalignment
  418. * here would imply a driver bug that would result
  419. * in an overrun.
  420. */
  421. if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
  422. DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
  423. break;
  424. }
  425. /* The report-ID field for periodic samples includes
  426. * some undocumented flags related to what triggered
  427. * the report and is never expected to be zero so we
  428. * can check that the report isn't invalid before
  429. * copying it to userspace...
  430. */
  431. if (report32[0] == 0) {
  432. DRM_ERROR("Skipping spurious, invalid OA report\n");
  433. continue;
  434. }
  435. ret = append_oa_sample(stream, buf, count, offset, report);
  436. if (ret)
  437. break;
  438. /* The above report-id field sanity check is based on
  439. * the assumption that the OA buffer is initially
  440. * zeroed and we reset the field after copying so the
  441. * check is still meaningful once old reports start
  442. * being overwritten.
  443. */
  444. report32[0] = 0;
  445. }
  446. *head_ptr = gtt_offset + head;
  447. return ret;
  448. }
  449. static int gen7_oa_read(struct i915_perf_stream *stream,
  450. char __user *buf,
  451. size_t count,
  452. size_t *offset)
  453. {
  454. struct drm_i915_private *dev_priv = stream->dev_priv;
  455. int report_size = dev_priv->perf.oa.oa_buffer.format_size;
  456. u32 oastatus2;
  457. u32 oastatus1;
  458. u32 head;
  459. u32 tail;
  460. int ret;
  461. if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
  462. return -EIO;
  463. oastatus2 = I915_READ(GEN7_OASTATUS2);
  464. oastatus1 = I915_READ(GEN7_OASTATUS1);
  465. head = oastatus2 & GEN7_OASTATUS2_HEAD_MASK;
  466. tail = oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
  467. /* XXX: On Haswell we don't have a safe way to clear oastatus1
  468. * bits while the OA unit is enabled (while the tail pointer
  469. * may be updated asynchronously) so we ignore status bits
  470. * that have already been reported to userspace.
  471. */
  472. oastatus1 &= ~dev_priv->perf.oa.gen7_latched_oastatus1;
  473. /* We treat OABUFFER_OVERFLOW as a significant error:
  474. *
  475. * - The status can be interpreted to mean that the buffer is
  476. * currently full (with a higher precedence than OA_TAKEN()
  477. * which will start to report a near-empty buffer after an
  478. * overflow) but it's awkward that we can't clear the status
  479. * on Haswell, so without a reset we won't be able to catch
  480. * the state again.
  481. *
  482. * - Since it also implies the HW has started overwriting old
  483. * reports it may also affect our sanity checks for invalid
  484. * reports when copying to userspace that assume new reports
  485. * are being written to cleared memory.
  486. *
  487. * - In the future we may want to introduce a flight recorder
  488. * mode where the driver will automatically maintain a safe
  489. * guard band between head/tail, avoiding this overflow
  490. * condition, but we avoid the added driver complexity for
  491. * now.
  492. */
  493. if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
  494. ret = append_oa_status(stream, buf, count, offset,
  495. DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
  496. if (ret)
  497. return ret;
  498. DRM_ERROR("OA buffer overflow: force restart\n");
  499. dev_priv->perf.oa.ops.oa_disable(dev_priv);
  500. dev_priv->perf.oa.ops.oa_enable(dev_priv);
  501. oastatus2 = I915_READ(GEN7_OASTATUS2);
  502. oastatus1 = I915_READ(GEN7_OASTATUS1);
  503. head = oastatus2 & GEN7_OASTATUS2_HEAD_MASK;
  504. tail = oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
  505. }
  506. if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
  507. ret = append_oa_status(stream, buf, count, offset,
  508. DRM_I915_PERF_RECORD_OA_REPORT_LOST);
  509. if (ret)
  510. return ret;
  511. dev_priv->perf.oa.gen7_latched_oastatus1 |=
  512. GEN7_OASTATUS1_REPORT_LOST;
  513. }
  514. ret = gen7_append_oa_reports(stream, buf, count, offset,
  515. &head, tail);
  516. /* All the report sizes are a power of two and the
  517. * head should always be incremented by some multiple
  518. * of the report size.
  519. *
  520. * A warning here, but notably if we later read back a
  521. * misaligned pointer we will treat that as a bug since
  522. * it could lead to a buffer overrun.
  523. */
  524. WARN_ONCE(head & (report_size - 1),
  525. "i915: Writing misaligned OA head pointer");
  526. /* Note: we update the head pointer here even if an error
  527. * was returned since the error may represent a short read
  528. * where some some reports were successfully copied.
  529. */
  530. I915_WRITE(GEN7_OASTATUS2,
  531. ((head & GEN7_OASTATUS2_HEAD_MASK) |
  532. OA_MEM_SELECT_GGTT));
  533. return ret;
  534. }
  535. static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
  536. {
  537. struct drm_i915_private *dev_priv = stream->dev_priv;
  538. /* We would wait indefinitely if periodic sampling is not enabled */
  539. if (!dev_priv->perf.oa.periodic)
  540. return -EIO;
  541. /* Note: the oa_buffer_is_empty() condition is ok to run unlocked as it
  542. * just performs mmio reads of the OA buffer head + tail pointers and
  543. * it's assumed we're handling some operation that implies the stream
  544. * can't be destroyed until completion (such as a read()) that ensures
  545. * the device + OA buffer can't disappear
  546. */
  547. return wait_event_interruptible(dev_priv->perf.oa.poll_wq,
  548. !dev_priv->perf.oa.ops.oa_buffer_is_empty(dev_priv));
  549. }
  550. static void i915_oa_poll_wait(struct i915_perf_stream *stream,
  551. struct file *file,
  552. poll_table *wait)
  553. {
  554. struct drm_i915_private *dev_priv = stream->dev_priv;
  555. poll_wait(file, &dev_priv->perf.oa.poll_wq, wait);
  556. }
  557. static int i915_oa_read(struct i915_perf_stream *stream,
  558. char __user *buf,
  559. size_t count,
  560. size_t *offset)
  561. {
  562. struct drm_i915_private *dev_priv = stream->dev_priv;
  563. return dev_priv->perf.oa.ops.read(stream, buf, count, offset);
  564. }
  565. /* Determine the render context hw id, and ensure it remains fixed for the
  566. * lifetime of the stream. This ensures that we don't have to worry about
  567. * updating the context ID in OACONTROL on the fly.
  568. */
  569. static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
  570. {
  571. struct drm_i915_private *dev_priv = stream->dev_priv;
  572. struct i915_vma *vma;
  573. int ret;
  574. ret = i915_mutex_lock_interruptible(&dev_priv->drm);
  575. if (ret)
  576. return ret;
  577. /* As the ID is the gtt offset of the context's vma we pin
  578. * the vma to ensure the ID remains fixed.
  579. *
  580. * NB: implied RCS engine...
  581. */
  582. vma = i915_gem_context_pin_legacy(stream->ctx, 0);
  583. if (IS_ERR(vma)) {
  584. ret = PTR_ERR(vma);
  585. goto unlock;
  586. }
  587. dev_priv->perf.oa.pinned_rcs_vma = vma;
  588. /* Explicitly track the ID (instead of calling i915_ggtt_offset()
  589. * on the fly) considering the difference with gen8+ and
  590. * execlists
  591. */
  592. dev_priv->perf.oa.specific_ctx_id = i915_ggtt_offset(vma);
  593. unlock:
  594. mutex_unlock(&dev_priv->drm.struct_mutex);
  595. return ret;
  596. }
  597. static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
  598. {
  599. struct drm_i915_private *dev_priv = stream->dev_priv;
  600. mutex_lock(&dev_priv->drm.struct_mutex);
  601. i915_vma_unpin(dev_priv->perf.oa.pinned_rcs_vma);
  602. dev_priv->perf.oa.pinned_rcs_vma = NULL;
  603. dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
  604. mutex_unlock(&dev_priv->drm.struct_mutex);
  605. }
  606. static void
  607. free_oa_buffer(struct drm_i915_private *i915)
  608. {
  609. mutex_lock(&i915->drm.struct_mutex);
  610. i915_gem_object_unpin_map(i915->perf.oa.oa_buffer.vma->obj);
  611. i915_vma_unpin(i915->perf.oa.oa_buffer.vma);
  612. i915_gem_object_put(i915->perf.oa.oa_buffer.vma->obj);
  613. i915->perf.oa.oa_buffer.vma = NULL;
  614. i915->perf.oa.oa_buffer.vaddr = NULL;
  615. mutex_unlock(&i915->drm.struct_mutex);
  616. }
  617. static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
  618. {
  619. struct drm_i915_private *dev_priv = stream->dev_priv;
  620. BUG_ON(stream != dev_priv->perf.oa.exclusive_stream);
  621. dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
  622. free_oa_buffer(dev_priv);
  623. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  624. intel_runtime_pm_put(dev_priv);
  625. if (stream->ctx)
  626. oa_put_render_ctx_id(stream);
  627. dev_priv->perf.oa.exclusive_stream = NULL;
  628. }
  629. static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
  630. {
  631. u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
  632. /* Pre-DevBDW: OABUFFER must be set with counters off,
  633. * before OASTATUS1, but after OASTATUS2
  634. */
  635. I915_WRITE(GEN7_OASTATUS2, gtt_offset | OA_MEM_SELECT_GGTT); /* head */
  636. I915_WRITE(GEN7_OABUFFER, gtt_offset);
  637. I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */
  638. /* On Haswell we have to track which OASTATUS1 flags we've
  639. * already seen since they can't be cleared while periodic
  640. * sampling is enabled.
  641. */
  642. dev_priv->perf.oa.gen7_latched_oastatus1 = 0;
  643. /* NB: although the OA buffer will initially be allocated
  644. * zeroed via shmfs (and so this memset is redundant when
  645. * first allocating), we may re-init the OA buffer, either
  646. * when re-enabling a stream or in error/reset paths.
  647. *
  648. * The reason we clear the buffer for each re-init is for the
  649. * sanity check in gen7_append_oa_reports() that looks at the
  650. * report-id field to make sure it's non-zero which relies on
  651. * the assumption that new reports are being written to zeroed
  652. * memory...
  653. */
  654. memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
  655. /* Maybe make ->pollin per-stream state if we support multiple
  656. * concurrent streams in the future.
  657. */
  658. dev_priv->perf.oa.pollin = false;
  659. }
  660. static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
  661. {
  662. struct drm_i915_gem_object *bo;
  663. struct i915_vma *vma;
  664. int ret;
  665. if (WARN_ON(dev_priv->perf.oa.oa_buffer.vma))
  666. return -ENODEV;
  667. ret = i915_mutex_lock_interruptible(&dev_priv->drm);
  668. if (ret)
  669. return ret;
  670. BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
  671. BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
  672. bo = i915_gem_object_create(dev_priv, OA_BUFFER_SIZE);
  673. if (IS_ERR(bo)) {
  674. DRM_ERROR("Failed to allocate OA buffer\n");
  675. ret = PTR_ERR(bo);
  676. goto unlock;
  677. }
  678. ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC);
  679. if (ret)
  680. goto err_unref;
  681. /* PreHSW required 512K alignment, HSW requires 16M */
  682. vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
  683. if (IS_ERR(vma)) {
  684. ret = PTR_ERR(vma);
  685. goto err_unref;
  686. }
  687. dev_priv->perf.oa.oa_buffer.vma = vma;
  688. dev_priv->perf.oa.oa_buffer.vaddr =
  689. i915_gem_object_pin_map(bo, I915_MAP_WB);
  690. if (IS_ERR(dev_priv->perf.oa.oa_buffer.vaddr)) {
  691. ret = PTR_ERR(dev_priv->perf.oa.oa_buffer.vaddr);
  692. goto err_unpin;
  693. }
  694. dev_priv->perf.oa.ops.init_oa_buffer(dev_priv);
  695. DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
  696. i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma),
  697. dev_priv->perf.oa.oa_buffer.vaddr);
  698. goto unlock;
  699. err_unpin:
  700. __i915_vma_unpin(vma);
  701. err_unref:
  702. i915_gem_object_put(bo);
  703. dev_priv->perf.oa.oa_buffer.vaddr = NULL;
  704. dev_priv->perf.oa.oa_buffer.vma = NULL;
  705. unlock:
  706. mutex_unlock(&dev_priv->drm.struct_mutex);
  707. return ret;
  708. }
  709. static void config_oa_regs(struct drm_i915_private *dev_priv,
  710. const struct i915_oa_reg *regs,
  711. int n_regs)
  712. {
  713. int i;
  714. for (i = 0; i < n_regs; i++) {
  715. const struct i915_oa_reg *reg = regs + i;
  716. I915_WRITE(reg->addr, reg->value);
  717. }
  718. }
  719. static int hsw_enable_metric_set(struct drm_i915_private *dev_priv)
  720. {
  721. int ret = i915_oa_select_metric_set_hsw(dev_priv);
  722. if (ret)
  723. return ret;
  724. I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) |
  725. GT_NOA_ENABLE));
  726. /* PRM:
  727. *
  728. * OA unit is using “crclk” for its functionality. When trunk
  729. * level clock gating takes place, OA clock would be gated,
  730. * unable to count the events from non-render clock domain.
  731. * Render clock gating must be disabled when OA is enabled to
  732. * count the events from non-render domain. Unit level clock
  733. * gating for RCS should also be disabled.
  734. */
  735. I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
  736. ~GEN7_DOP_CLOCK_GATE_ENABLE));
  737. I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
  738. GEN6_CSUNIT_CLOCK_GATE_DISABLE));
  739. config_oa_regs(dev_priv, dev_priv->perf.oa.mux_regs,
  740. dev_priv->perf.oa.mux_regs_len);
  741. /* It apparently takes a fairly long time for a new MUX
  742. * configuration to be be applied after these register writes.
  743. * This delay duration was derived empirically based on the
  744. * render_basic config but hopefully it covers the maximum
  745. * configuration latency.
  746. *
  747. * As a fallback, the checks in _append_oa_reports() to skip
  748. * invalid OA reports do also seem to work to discard reports
  749. * generated before this config has completed - albeit not
  750. * silently.
  751. *
  752. * Unfortunately this is essentially a magic number, since we
  753. * don't currently know of a reliable mechanism for predicting
  754. * how long the MUX config will take to apply and besides
  755. * seeing invalid reports we don't know of a reliable way to
  756. * explicitly check that the MUX config has landed.
  757. *
  758. * It's even possible we've miss characterized the underlying
  759. * problem - it just seems like the simplest explanation why
  760. * a delay at this location would mitigate any invalid reports.
  761. */
  762. usleep_range(15000, 20000);
  763. config_oa_regs(dev_priv, dev_priv->perf.oa.b_counter_regs,
  764. dev_priv->perf.oa.b_counter_regs_len);
  765. return 0;
  766. }
  767. static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
  768. {
  769. I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) &
  770. ~GEN6_CSUNIT_CLOCK_GATE_DISABLE));
  771. I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) |
  772. GEN7_DOP_CLOCK_GATE_ENABLE));
  773. I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
  774. ~GT_NOA_ENABLE));
  775. }
  776. static void gen7_update_oacontrol_locked(struct drm_i915_private *dev_priv)
  777. {
  778. assert_spin_locked(&dev_priv->perf.hook_lock);
  779. if (dev_priv->perf.oa.exclusive_stream->enabled) {
  780. struct i915_gem_context *ctx =
  781. dev_priv->perf.oa.exclusive_stream->ctx;
  782. u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
  783. bool periodic = dev_priv->perf.oa.periodic;
  784. u32 period_exponent = dev_priv->perf.oa.period_exponent;
  785. u32 report_format = dev_priv->perf.oa.oa_buffer.format;
  786. I915_WRITE(GEN7_OACONTROL,
  787. (ctx_id & GEN7_OACONTROL_CTX_MASK) |
  788. (period_exponent <<
  789. GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
  790. (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
  791. (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
  792. (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
  793. GEN7_OACONTROL_ENABLE);
  794. } else
  795. I915_WRITE(GEN7_OACONTROL, 0);
  796. }
  797. static void gen7_oa_enable(struct drm_i915_private *dev_priv)
  798. {
  799. unsigned long flags;
  800. /* Reset buf pointers so we don't forward reports from before now.
  801. *
  802. * Think carefully if considering trying to avoid this, since it
  803. * also ensures status flags and the buffer itself are cleared
  804. * in error paths, and we have checks for invalid reports based
  805. * on the assumption that certain fields are written to zeroed
  806. * memory which this helps maintains.
  807. */
  808. gen7_init_oa_buffer(dev_priv);
  809. spin_lock_irqsave(&dev_priv->perf.hook_lock, flags);
  810. gen7_update_oacontrol_locked(dev_priv);
  811. spin_unlock_irqrestore(&dev_priv->perf.hook_lock, flags);
  812. }
  813. static void i915_oa_stream_enable(struct i915_perf_stream *stream)
  814. {
  815. struct drm_i915_private *dev_priv = stream->dev_priv;
  816. dev_priv->perf.oa.ops.oa_enable(dev_priv);
  817. if (dev_priv->perf.oa.periodic)
  818. hrtimer_start(&dev_priv->perf.oa.poll_check_timer,
  819. ns_to_ktime(POLL_PERIOD),
  820. HRTIMER_MODE_REL_PINNED);
  821. }
  822. static void gen7_oa_disable(struct drm_i915_private *dev_priv)
  823. {
  824. I915_WRITE(GEN7_OACONTROL, 0);
  825. }
  826. static void i915_oa_stream_disable(struct i915_perf_stream *stream)
  827. {
  828. struct drm_i915_private *dev_priv = stream->dev_priv;
  829. dev_priv->perf.oa.ops.oa_disable(dev_priv);
  830. if (dev_priv->perf.oa.periodic)
  831. hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer);
  832. }
  833. static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
  834. {
  835. return div_u64(1000000000ULL * (2ULL << exponent),
  836. dev_priv->perf.oa.timestamp_frequency);
  837. }
  838. static const struct i915_perf_stream_ops i915_oa_stream_ops = {
  839. .destroy = i915_oa_stream_destroy,
  840. .enable = i915_oa_stream_enable,
  841. .disable = i915_oa_stream_disable,
  842. .wait_unlocked = i915_oa_wait_unlocked,
  843. .poll_wait = i915_oa_poll_wait,
  844. .read = i915_oa_read,
  845. };
  846. static int i915_oa_stream_init(struct i915_perf_stream *stream,
  847. struct drm_i915_perf_open_param *param,
  848. struct perf_open_properties *props)
  849. {
  850. struct drm_i915_private *dev_priv = stream->dev_priv;
  851. int format_size;
  852. int ret;
  853. /* If the sysfs metrics/ directory wasn't registered for some
  854. * reason then don't let userspace try their luck with config
  855. * IDs
  856. */
  857. if (!dev_priv->perf.metrics_kobj) {
  858. DRM_ERROR("OA metrics weren't advertised via sysfs\n");
  859. return -EINVAL;
  860. }
  861. if (!(props->sample_flags & SAMPLE_OA_REPORT)) {
  862. DRM_ERROR("Only OA report sampling supported\n");
  863. return -EINVAL;
  864. }
  865. if (!dev_priv->perf.oa.ops.init_oa_buffer) {
  866. DRM_ERROR("OA unit not supported\n");
  867. return -ENODEV;
  868. }
  869. /* To avoid the complexity of having to accurately filter
  870. * counter reports and marshal to the appropriate client
  871. * we currently only allow exclusive access
  872. */
  873. if (dev_priv->perf.oa.exclusive_stream) {
  874. DRM_ERROR("OA unit already in use\n");
  875. return -EBUSY;
  876. }
  877. if (!props->metrics_set) {
  878. DRM_ERROR("OA metric set not specified\n");
  879. return -EINVAL;
  880. }
  881. if (!props->oa_format) {
  882. DRM_ERROR("OA report format not specified\n");
  883. return -EINVAL;
  884. }
  885. stream->sample_size = sizeof(struct drm_i915_perf_record_header);
  886. format_size = dev_priv->perf.oa.oa_formats[props->oa_format].size;
  887. stream->sample_flags |= SAMPLE_OA_REPORT;
  888. stream->sample_size += format_size;
  889. dev_priv->perf.oa.oa_buffer.format_size = format_size;
  890. if (WARN_ON(dev_priv->perf.oa.oa_buffer.format_size == 0))
  891. return -EINVAL;
  892. dev_priv->perf.oa.oa_buffer.format =
  893. dev_priv->perf.oa.oa_formats[props->oa_format].format;
  894. dev_priv->perf.oa.metrics_set = props->metrics_set;
  895. dev_priv->perf.oa.periodic = props->oa_periodic;
  896. if (dev_priv->perf.oa.periodic) {
  897. u32 tail;
  898. dev_priv->perf.oa.period_exponent = props->oa_period_exponent;
  899. /* See comment for OA_TAIL_MARGIN_NSEC for details
  900. * about this tail_margin...
  901. */
  902. tail = div64_u64(OA_TAIL_MARGIN_NSEC,
  903. oa_exponent_to_ns(dev_priv,
  904. props->oa_period_exponent));
  905. dev_priv->perf.oa.tail_margin = (tail + 1) * format_size;
  906. }
  907. if (stream->ctx) {
  908. ret = oa_get_render_ctx_id(stream);
  909. if (ret)
  910. return ret;
  911. }
  912. ret = alloc_oa_buffer(dev_priv);
  913. if (ret)
  914. goto err_oa_buf_alloc;
  915. /* PRM - observability performance counters:
  916. *
  917. * OACONTROL, performance counter enable, note:
  918. *
  919. * "When this bit is set, in order to have coherent counts,
  920. * RC6 power state and trunk clock gating must be disabled.
  921. * This can be achieved by programming MMIO registers as
  922. * 0xA094=0 and 0xA090[31]=1"
  923. *
  924. * In our case we are expecting that taking pm + FORCEWAKE
  925. * references will effectively disable RC6.
  926. */
  927. intel_runtime_pm_get(dev_priv);
  928. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  929. ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv);
  930. if (ret)
  931. goto err_enable;
  932. stream->ops = &i915_oa_stream_ops;
  933. dev_priv->perf.oa.exclusive_stream = stream;
  934. return 0;
  935. err_enable:
  936. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  937. intel_runtime_pm_put(dev_priv);
  938. free_oa_buffer(dev_priv);
  939. err_oa_buf_alloc:
  940. if (stream->ctx)
  941. oa_put_render_ctx_id(stream);
  942. return ret;
  943. }
  944. static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream,
  945. struct file *file,
  946. char __user *buf,
  947. size_t count,
  948. loff_t *ppos)
  949. {
  950. /* Note we keep the offset (aka bytes read) separate from any
  951. * error status so that the final check for whether we return
  952. * the bytes read with a higher precedence than any error (see
  953. * comment below) doesn't need to be handled/duplicated in
  954. * stream->ops->read() implementations.
  955. */
  956. size_t offset = 0;
  957. int ret = stream->ops->read(stream, buf, count, &offset);
  958. /* If we've successfully copied any data then reporting that
  959. * takes precedence over any internal error status, so the
  960. * data isn't lost.
  961. *
  962. * For example ret will be -ENOSPC whenever there is more
  963. * buffered data than can be copied to userspace, but that's
  964. * only interesting if we weren't able to copy some data
  965. * because it implies the userspace buffer is too small to
  966. * receive a single record (and we never split records).
  967. *
  968. * Another case with ret == -EFAULT is more of a grey area
  969. * since it would seem like bad form for userspace to ask us
  970. * to overrun its buffer, but the user knows best:
  971. *
  972. * http://yarchive.net/comp/linux/partial_reads_writes.html
  973. */
  974. return offset ?: (ret ?: -EAGAIN);
  975. }
  976. static ssize_t i915_perf_read(struct file *file,
  977. char __user *buf,
  978. size_t count,
  979. loff_t *ppos)
  980. {
  981. struct i915_perf_stream *stream = file->private_data;
  982. struct drm_i915_private *dev_priv = stream->dev_priv;
  983. ssize_t ret;
  984. /* To ensure it's handled consistently we simply treat all reads of a
  985. * disabled stream as an error. In particular it might otherwise lead
  986. * to a deadlock for blocking file descriptors...
  987. */
  988. if (!stream->enabled)
  989. return -EIO;
  990. if (!(file->f_flags & O_NONBLOCK)) {
  991. /* There's the small chance of false positives from
  992. * stream->ops->wait_unlocked.
  993. *
  994. * E.g. with single context filtering since we only wait until
  995. * oabuffer has >= 1 report we don't immediately know whether
  996. * any reports really belong to the current context
  997. */
  998. do {
  999. ret = stream->ops->wait_unlocked(stream);
  1000. if (ret)
  1001. return ret;
  1002. mutex_lock(&dev_priv->perf.lock);
  1003. ret = i915_perf_read_locked(stream, file,
  1004. buf, count, ppos);
  1005. mutex_unlock(&dev_priv->perf.lock);
  1006. } while (ret == -EAGAIN);
  1007. } else {
  1008. mutex_lock(&dev_priv->perf.lock);
  1009. ret = i915_perf_read_locked(stream, file, buf, count, ppos);
  1010. mutex_unlock(&dev_priv->perf.lock);
  1011. }
  1012. if (ret >= 0) {
  1013. /* Maybe make ->pollin per-stream state if we support multiple
  1014. * concurrent streams in the future.
  1015. */
  1016. dev_priv->perf.oa.pollin = false;
  1017. }
  1018. return ret;
  1019. }
  1020. static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
  1021. {
  1022. struct drm_i915_private *dev_priv =
  1023. container_of(hrtimer, typeof(*dev_priv),
  1024. perf.oa.poll_check_timer);
  1025. if (!dev_priv->perf.oa.ops.oa_buffer_is_empty(dev_priv)) {
  1026. dev_priv->perf.oa.pollin = true;
  1027. wake_up(&dev_priv->perf.oa.poll_wq);
  1028. }
  1029. hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD));
  1030. return HRTIMER_RESTART;
  1031. }
  1032. static unsigned int i915_perf_poll_locked(struct drm_i915_private *dev_priv,
  1033. struct i915_perf_stream *stream,
  1034. struct file *file,
  1035. poll_table *wait)
  1036. {
  1037. unsigned int events = 0;
  1038. stream->ops->poll_wait(stream, file, wait);
  1039. /* Note: we don't explicitly check whether there's something to read
  1040. * here since this path may be very hot depending on what else
  1041. * userspace is polling, or on the timeout in use. We rely solely on
  1042. * the hrtimer/oa_poll_check_timer_cb to notify us when there are
  1043. * samples to read.
  1044. */
  1045. if (dev_priv->perf.oa.pollin)
  1046. events |= POLLIN;
  1047. return events;
  1048. }
  1049. static unsigned int i915_perf_poll(struct file *file, poll_table *wait)
  1050. {
  1051. struct i915_perf_stream *stream = file->private_data;
  1052. struct drm_i915_private *dev_priv = stream->dev_priv;
  1053. int ret;
  1054. mutex_lock(&dev_priv->perf.lock);
  1055. ret = i915_perf_poll_locked(dev_priv, stream, file, wait);
  1056. mutex_unlock(&dev_priv->perf.lock);
  1057. return ret;
  1058. }
  1059. static void i915_perf_enable_locked(struct i915_perf_stream *stream)
  1060. {
  1061. if (stream->enabled)
  1062. return;
  1063. /* Allow stream->ops->enable() to refer to this */
  1064. stream->enabled = true;
  1065. if (stream->ops->enable)
  1066. stream->ops->enable(stream);
  1067. }
  1068. static void i915_perf_disable_locked(struct i915_perf_stream *stream)
  1069. {
  1070. if (!stream->enabled)
  1071. return;
  1072. /* Allow stream->ops->disable() to refer to this */
  1073. stream->enabled = false;
  1074. if (stream->ops->disable)
  1075. stream->ops->disable(stream);
  1076. }
  1077. static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
  1078. unsigned int cmd,
  1079. unsigned long arg)
  1080. {
  1081. switch (cmd) {
  1082. case I915_PERF_IOCTL_ENABLE:
  1083. i915_perf_enable_locked(stream);
  1084. return 0;
  1085. case I915_PERF_IOCTL_DISABLE:
  1086. i915_perf_disable_locked(stream);
  1087. return 0;
  1088. }
  1089. return -EINVAL;
  1090. }
  1091. static long i915_perf_ioctl(struct file *file,
  1092. unsigned int cmd,
  1093. unsigned long arg)
  1094. {
  1095. struct i915_perf_stream *stream = file->private_data;
  1096. struct drm_i915_private *dev_priv = stream->dev_priv;
  1097. long ret;
  1098. mutex_lock(&dev_priv->perf.lock);
  1099. ret = i915_perf_ioctl_locked(stream, cmd, arg);
  1100. mutex_unlock(&dev_priv->perf.lock);
  1101. return ret;
  1102. }
  1103. static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
  1104. {
  1105. struct drm_i915_private *dev_priv = stream->dev_priv;
  1106. if (stream->enabled)
  1107. i915_perf_disable_locked(stream);
  1108. if (stream->ops->destroy)
  1109. stream->ops->destroy(stream);
  1110. list_del(&stream->link);
  1111. if (stream->ctx) {
  1112. mutex_lock(&dev_priv->drm.struct_mutex);
  1113. i915_gem_context_put(stream->ctx);
  1114. mutex_unlock(&dev_priv->drm.struct_mutex);
  1115. }
  1116. kfree(stream);
  1117. }
  1118. static int i915_perf_release(struct inode *inode, struct file *file)
  1119. {
  1120. struct i915_perf_stream *stream = file->private_data;
  1121. struct drm_i915_private *dev_priv = stream->dev_priv;
  1122. mutex_lock(&dev_priv->perf.lock);
  1123. i915_perf_destroy_locked(stream);
  1124. mutex_unlock(&dev_priv->perf.lock);
  1125. return 0;
  1126. }
  1127. static const struct file_operations fops = {
  1128. .owner = THIS_MODULE,
  1129. .llseek = no_llseek,
  1130. .release = i915_perf_release,
  1131. .poll = i915_perf_poll,
  1132. .read = i915_perf_read,
  1133. .unlocked_ioctl = i915_perf_ioctl,
  1134. };
  1135. static struct i915_gem_context *
  1136. lookup_context(struct drm_i915_private *dev_priv,
  1137. struct drm_i915_file_private *file_priv,
  1138. u32 ctx_user_handle)
  1139. {
  1140. struct i915_gem_context *ctx;
  1141. int ret;
  1142. ret = i915_mutex_lock_interruptible(&dev_priv->drm);
  1143. if (ret)
  1144. return ERR_PTR(ret);
  1145. ctx = i915_gem_context_lookup(file_priv, ctx_user_handle);
  1146. if (!IS_ERR(ctx))
  1147. i915_gem_context_get(ctx);
  1148. mutex_unlock(&dev_priv->drm.struct_mutex);
  1149. return ctx;
  1150. }
  1151. static int
  1152. i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
  1153. struct drm_i915_perf_open_param *param,
  1154. struct perf_open_properties *props,
  1155. struct drm_file *file)
  1156. {
  1157. struct i915_gem_context *specific_ctx = NULL;
  1158. struct i915_perf_stream *stream = NULL;
  1159. unsigned long f_flags = 0;
  1160. int stream_fd;
  1161. int ret;
  1162. if (props->single_context) {
  1163. u32 ctx_handle = props->ctx_handle;
  1164. struct drm_i915_file_private *file_priv = file->driver_priv;
  1165. specific_ctx = lookup_context(dev_priv, file_priv, ctx_handle);
  1166. if (IS_ERR(specific_ctx)) {
  1167. ret = PTR_ERR(specific_ctx);
  1168. if (ret != -EINTR)
  1169. DRM_ERROR("Failed to look up context with ID %u for opening perf stream\n",
  1170. ctx_handle);
  1171. goto err;
  1172. }
  1173. }
  1174. /* Similar to perf's kernel.perf_paranoid_cpu sysctl option
  1175. * we check a dev.i915.perf_stream_paranoid sysctl option
  1176. * to determine if it's ok to access system wide OA counters
  1177. * without CAP_SYS_ADMIN privileges.
  1178. */
  1179. if (!specific_ctx &&
  1180. i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
  1181. DRM_ERROR("Insufficient privileges to open system-wide i915 perf stream\n");
  1182. ret = -EACCES;
  1183. goto err_ctx;
  1184. }
  1185. stream = kzalloc(sizeof(*stream), GFP_KERNEL);
  1186. if (!stream) {
  1187. ret = -ENOMEM;
  1188. goto err_ctx;
  1189. }
  1190. stream->dev_priv = dev_priv;
  1191. stream->ctx = specific_ctx;
  1192. ret = i915_oa_stream_init(stream, param, props);
  1193. if (ret)
  1194. goto err_alloc;
  1195. /* we avoid simply assigning stream->sample_flags = props->sample_flags
  1196. * to have _stream_init check the combination of sample flags more
  1197. * thoroughly, but still this is the expected result at this point.
  1198. */
  1199. if (WARN_ON(stream->sample_flags != props->sample_flags)) {
  1200. ret = -ENODEV;
  1201. goto err_alloc;
  1202. }
  1203. list_add(&stream->link, &dev_priv->perf.streams);
  1204. if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
  1205. f_flags |= O_CLOEXEC;
  1206. if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
  1207. f_flags |= O_NONBLOCK;
  1208. stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
  1209. if (stream_fd < 0) {
  1210. ret = stream_fd;
  1211. goto err_open;
  1212. }
  1213. if (!(param->flags & I915_PERF_FLAG_DISABLED))
  1214. i915_perf_enable_locked(stream);
  1215. return stream_fd;
  1216. err_open:
  1217. list_del(&stream->link);
  1218. if (stream->ops->destroy)
  1219. stream->ops->destroy(stream);
  1220. err_alloc:
  1221. kfree(stream);
  1222. err_ctx:
  1223. if (specific_ctx) {
  1224. mutex_lock(&dev_priv->drm.struct_mutex);
  1225. i915_gem_context_put(specific_ctx);
  1226. mutex_unlock(&dev_priv->drm.struct_mutex);
  1227. }
  1228. err:
  1229. return ret;
  1230. }
  1231. /* Note we copy the properties from userspace outside of the i915 perf
  1232. * mutex to avoid an awkward lockdep with mmap_sem.
  1233. *
  1234. * Note this function only validates properties in isolation it doesn't
  1235. * validate that the combination of properties makes sense or that all
  1236. * properties necessary for a particular kind of stream have been set.
  1237. */
  1238. static int read_properties_unlocked(struct drm_i915_private *dev_priv,
  1239. u64 __user *uprops,
  1240. u32 n_props,
  1241. struct perf_open_properties *props)
  1242. {
  1243. u64 __user *uprop = uprops;
  1244. int i;
  1245. memset(props, 0, sizeof(struct perf_open_properties));
  1246. if (!n_props) {
  1247. DRM_ERROR("No i915 perf properties given");
  1248. return -EINVAL;
  1249. }
  1250. /* Considering that ID = 0 is reserved and assuming that we don't
  1251. * (currently) expect any configurations to ever specify duplicate
  1252. * values for a particular property ID then the last _PROP_MAX value is
  1253. * one greater than the maximum number of properties we expect to get
  1254. * from userspace.
  1255. */
  1256. if (n_props >= DRM_I915_PERF_PROP_MAX) {
  1257. DRM_ERROR("More i915 perf properties specified than exist");
  1258. return -EINVAL;
  1259. }
  1260. for (i = 0; i < n_props; i++) {
  1261. u64 oa_period, oa_freq_hz;
  1262. u64 id, value;
  1263. int ret;
  1264. ret = get_user(id, uprop);
  1265. if (ret)
  1266. return ret;
  1267. ret = get_user(value, uprop + 1);
  1268. if (ret)
  1269. return ret;
  1270. switch ((enum drm_i915_perf_property_id)id) {
  1271. case DRM_I915_PERF_PROP_CTX_HANDLE:
  1272. props->single_context = 1;
  1273. props->ctx_handle = value;
  1274. break;
  1275. case DRM_I915_PERF_PROP_SAMPLE_OA:
  1276. props->sample_flags |= SAMPLE_OA_REPORT;
  1277. break;
  1278. case DRM_I915_PERF_PROP_OA_METRICS_SET:
  1279. if (value == 0 ||
  1280. value > dev_priv->perf.oa.n_builtin_sets) {
  1281. DRM_ERROR("Unknown OA metric set ID");
  1282. return -EINVAL;
  1283. }
  1284. props->metrics_set = value;
  1285. break;
  1286. case DRM_I915_PERF_PROP_OA_FORMAT:
  1287. if (value == 0 || value >= I915_OA_FORMAT_MAX) {
  1288. DRM_ERROR("Invalid OA report format\n");
  1289. return -EINVAL;
  1290. }
  1291. if (!dev_priv->perf.oa.oa_formats[value].size) {
  1292. DRM_ERROR("Invalid OA report format\n");
  1293. return -EINVAL;
  1294. }
  1295. props->oa_format = value;
  1296. break;
  1297. case DRM_I915_PERF_PROP_OA_EXPONENT:
  1298. if (value > OA_EXPONENT_MAX) {
  1299. DRM_ERROR("OA timer exponent too high (> %u)\n",
  1300. OA_EXPONENT_MAX);
  1301. return -EINVAL;
  1302. }
  1303. /* Theoretically we can program the OA unit to sample
  1304. * every 160ns but don't allow that by default unless
  1305. * root.
  1306. *
  1307. * On Haswell the period is derived from the exponent
  1308. * as:
  1309. *
  1310. * period = 80ns * 2^(exponent + 1)
  1311. */
  1312. BUILD_BUG_ON(sizeof(oa_period) != 8);
  1313. oa_period = 80ull * (2ull << value);
  1314. /* This check is primarily to ensure that oa_period <=
  1315. * UINT32_MAX (before passing to do_div which only
  1316. * accepts a u32 denominator), but we can also skip
  1317. * checking anything < 1Hz which implicitly can't be
  1318. * limited via an integer oa_max_sample_rate.
  1319. */
  1320. if (oa_period <= NSEC_PER_SEC) {
  1321. u64 tmp = NSEC_PER_SEC;
  1322. do_div(tmp, oa_period);
  1323. oa_freq_hz = tmp;
  1324. } else
  1325. oa_freq_hz = 0;
  1326. if (oa_freq_hz > i915_oa_max_sample_rate &&
  1327. !capable(CAP_SYS_ADMIN)) {
  1328. DRM_ERROR("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n",
  1329. i915_oa_max_sample_rate);
  1330. return -EACCES;
  1331. }
  1332. props->oa_periodic = true;
  1333. props->oa_period_exponent = value;
  1334. break;
  1335. default:
  1336. MISSING_CASE(id);
  1337. DRM_ERROR("Unknown i915 perf property ID");
  1338. return -EINVAL;
  1339. }
  1340. uprop += 2;
  1341. }
  1342. return 0;
  1343. }
  1344. int i915_perf_open_ioctl(struct drm_device *dev, void *data,
  1345. struct drm_file *file)
  1346. {
  1347. struct drm_i915_private *dev_priv = dev->dev_private;
  1348. struct drm_i915_perf_open_param *param = data;
  1349. struct perf_open_properties props;
  1350. u32 known_open_flags;
  1351. int ret;
  1352. if (!dev_priv->perf.initialized) {
  1353. DRM_ERROR("i915 perf interface not available for this system");
  1354. return -ENOTSUPP;
  1355. }
  1356. known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
  1357. I915_PERF_FLAG_FD_NONBLOCK |
  1358. I915_PERF_FLAG_DISABLED;
  1359. if (param->flags & ~known_open_flags) {
  1360. DRM_ERROR("Unknown drm_i915_perf_open_param flag\n");
  1361. return -EINVAL;
  1362. }
  1363. ret = read_properties_unlocked(dev_priv,
  1364. u64_to_user_ptr(param->properties_ptr),
  1365. param->num_properties,
  1366. &props);
  1367. if (ret)
  1368. return ret;
  1369. mutex_lock(&dev_priv->perf.lock);
  1370. ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file);
  1371. mutex_unlock(&dev_priv->perf.lock);
  1372. return ret;
  1373. }
  1374. void i915_perf_register(struct drm_i915_private *dev_priv)
  1375. {
  1376. if (!IS_HASWELL(dev_priv))
  1377. return;
  1378. if (!dev_priv->perf.initialized)
  1379. return;
  1380. /* To be sure we're synchronized with an attempted
  1381. * i915_perf_open_ioctl(); considering that we register after
  1382. * being exposed to userspace.
  1383. */
  1384. mutex_lock(&dev_priv->perf.lock);
  1385. dev_priv->perf.metrics_kobj =
  1386. kobject_create_and_add("metrics",
  1387. &dev_priv->drm.primary->kdev->kobj);
  1388. if (!dev_priv->perf.metrics_kobj)
  1389. goto exit;
  1390. if (i915_perf_register_sysfs_hsw(dev_priv)) {
  1391. kobject_put(dev_priv->perf.metrics_kobj);
  1392. dev_priv->perf.metrics_kobj = NULL;
  1393. }
  1394. exit:
  1395. mutex_unlock(&dev_priv->perf.lock);
  1396. }
  1397. void i915_perf_unregister(struct drm_i915_private *dev_priv)
  1398. {
  1399. if (!IS_HASWELL(dev_priv))
  1400. return;
  1401. if (!dev_priv->perf.metrics_kobj)
  1402. return;
  1403. i915_perf_unregister_sysfs_hsw(dev_priv);
  1404. kobject_put(dev_priv->perf.metrics_kobj);
  1405. dev_priv->perf.metrics_kobj = NULL;
  1406. }
  1407. static struct ctl_table oa_table[] = {
  1408. {
  1409. .procname = "perf_stream_paranoid",
  1410. .data = &i915_perf_stream_paranoid,
  1411. .maxlen = sizeof(i915_perf_stream_paranoid),
  1412. .mode = 0644,
  1413. .proc_handler = proc_dointvec_minmax,
  1414. .extra1 = &zero,
  1415. .extra2 = &one,
  1416. },
  1417. {
  1418. .procname = "oa_max_sample_rate",
  1419. .data = &i915_oa_max_sample_rate,
  1420. .maxlen = sizeof(i915_oa_max_sample_rate),
  1421. .mode = 0644,
  1422. .proc_handler = proc_dointvec_minmax,
  1423. .extra1 = &zero,
  1424. .extra2 = &oa_sample_rate_hard_limit,
  1425. },
  1426. {}
  1427. };
  1428. static struct ctl_table i915_root[] = {
  1429. {
  1430. .procname = "i915",
  1431. .maxlen = 0,
  1432. .mode = 0555,
  1433. .child = oa_table,
  1434. },
  1435. {}
  1436. };
  1437. static struct ctl_table dev_root[] = {
  1438. {
  1439. .procname = "dev",
  1440. .maxlen = 0,
  1441. .mode = 0555,
  1442. .child = i915_root,
  1443. },
  1444. {}
  1445. };
  1446. void i915_perf_init(struct drm_i915_private *dev_priv)
  1447. {
  1448. if (!IS_HASWELL(dev_priv))
  1449. return;
  1450. hrtimer_init(&dev_priv->perf.oa.poll_check_timer,
  1451. CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  1452. dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb;
  1453. init_waitqueue_head(&dev_priv->perf.oa.poll_wq);
  1454. INIT_LIST_HEAD(&dev_priv->perf.streams);
  1455. mutex_init(&dev_priv->perf.lock);
  1456. spin_lock_init(&dev_priv->perf.hook_lock);
  1457. dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer;
  1458. dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
  1459. dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
  1460. dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
  1461. dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable;
  1462. dev_priv->perf.oa.ops.read = gen7_oa_read;
  1463. dev_priv->perf.oa.ops.oa_buffer_is_empty =
  1464. gen7_oa_buffer_is_empty_fop_unlocked;
  1465. dev_priv->perf.oa.timestamp_frequency = 12500000;
  1466. dev_priv->perf.oa.oa_formats = hsw_oa_formats;
  1467. dev_priv->perf.oa.n_builtin_sets =
  1468. i915_oa_n_builtin_metric_sets_hsw;
  1469. dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
  1470. dev_priv->perf.initialized = true;
  1471. }
  1472. void i915_perf_fini(struct drm_i915_private *dev_priv)
  1473. {
  1474. if (!dev_priv->perf.initialized)
  1475. return;
  1476. unregister_sysctl_table(dev_priv->perf.sysctl_header);
  1477. memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops));
  1478. dev_priv->perf.initialized = false;
  1479. }