perf_event.h 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189
  1. /*
  2. * Performance events:
  3. *
  4. * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
  7. *
  8. * Data type definitions, declarations, prototypes.
  9. *
  10. * Started by: Thomas Gleixner and Ingo Molnar
  11. *
  12. * For licencing details see kernel-base/COPYING
  13. */
  14. #ifndef _LINUX_PERF_EVENT_H
  15. #define _LINUX_PERF_EVENT_H
  16. #include <uapi/linux/perf_event.h>
  17. /*
  18. * Kernel-internal data types and definitions:
  19. */
  20. #ifdef CONFIG_PERF_EVENTS
  21. # include <asm/perf_event.h>
  22. # include <asm/local64.h>
  23. #endif
  24. struct perf_guest_info_callbacks {
  25. int (*is_in_guest)(void);
  26. int (*is_user_mode)(void);
  27. unsigned long (*get_guest_ip)(void);
  28. };
  29. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  30. #include <asm/hw_breakpoint.h>
  31. #endif
  32. #include <linux/list.h>
  33. #include <linux/mutex.h>
  34. #include <linux/rculist.h>
  35. #include <linux/rcupdate.h>
  36. #include <linux/spinlock.h>
  37. #include <linux/hrtimer.h>
  38. #include <linux/fs.h>
  39. #include <linux/pid_namespace.h>
  40. #include <linux/workqueue.h>
  41. #include <linux/ftrace.h>
  42. #include <linux/cpu.h>
  43. #include <linux/irq_work.h>
  44. #include <linux/static_key.h>
  45. #include <linux/jump_label_ratelimit.h>
  46. #include <linux/atomic.h>
  47. #include <linux/sysfs.h>
  48. #include <linux/perf_regs.h>
  49. #include <linux/workqueue.h>
  50. #include <linux/cgroup.h>
  51. #include <asm/local.h>
  52. struct perf_callchain_entry {
  53. __u64 nr;
  54. __u64 ip[PERF_MAX_STACK_DEPTH];
  55. };
  56. struct perf_raw_record {
  57. u32 size;
  58. void *data;
  59. };
  60. /*
  61. * branch stack layout:
  62. * nr: number of taken branches stored in entries[]
  63. *
  64. * Note that nr can vary from sample to sample
  65. * branches (to, from) are stored from most recent
  66. * to least recent, i.e., entries[0] contains the most
  67. * recent branch.
  68. */
  69. struct perf_branch_stack {
  70. __u64 nr;
  71. struct perf_branch_entry entries[0];
  72. };
  73. struct task_struct;
  74. /*
  75. * extra PMU register associated with an event
  76. */
  77. struct hw_perf_event_extra {
  78. u64 config; /* register value */
  79. unsigned int reg; /* register address or index */
  80. int alloc; /* extra register already allocated */
  81. int idx; /* index in shared_regs->regs[] */
  82. };
  83. /**
  84. * struct hw_perf_event - performance event hardware details:
  85. */
  86. struct hw_perf_event {
  87. #ifdef CONFIG_PERF_EVENTS
  88. union {
  89. struct { /* hardware */
  90. u64 config;
  91. u64 last_tag;
  92. unsigned long config_base;
  93. unsigned long event_base;
  94. int event_base_rdpmc;
  95. int idx;
  96. int last_cpu;
  97. int flags;
  98. struct hw_perf_event_extra extra_reg;
  99. struct hw_perf_event_extra branch_reg;
  100. };
  101. struct { /* software */
  102. struct hrtimer hrtimer;
  103. };
  104. struct { /* tracepoint */
  105. /* for tp_event->class */
  106. struct list_head tp_list;
  107. };
  108. struct { /* intel_cqm */
  109. int cqm_state;
  110. u32 cqm_rmid;
  111. struct list_head cqm_events_entry;
  112. struct list_head cqm_groups_entry;
  113. struct list_head cqm_group_entry;
  114. };
  115. struct { /* itrace */
  116. int itrace_started;
  117. };
  118. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  119. struct { /* breakpoint */
  120. /*
  121. * Crufty hack to avoid the chicken and egg
  122. * problem hw_breakpoint has with context
  123. * creation and event initalization.
  124. */
  125. struct arch_hw_breakpoint info;
  126. struct list_head bp_list;
  127. };
  128. #endif
  129. };
  130. /*
  131. * If the event is a per task event, this will point to the task in
  132. * question. See the comment in perf_event_alloc().
  133. */
  134. struct task_struct *target;
  135. /*
  136. * hw_perf_event::state flags; used to track the PERF_EF_* state.
  137. */
  138. #define PERF_HES_STOPPED 0x01 /* the counter is stopped */
  139. #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
  140. #define PERF_HES_ARCH 0x04
  141. int state;
  142. /*
  143. * The last observed hardware counter value, updated with a
  144. * local64_cmpxchg() such that pmu::read() can be called nested.
  145. */
  146. local64_t prev_count;
  147. /*
  148. * The period to start the next sample with.
  149. */
  150. u64 sample_period;
  151. /*
  152. * The period we started this sample with.
  153. */
  154. u64 last_period;
  155. /*
  156. * However much is left of the current period; note that this is
  157. * a full 64bit value and allows for generation of periods longer
  158. * than hardware might allow.
  159. */
  160. local64_t period_left;
  161. /*
  162. * State for throttling the event, see __perf_event_overflow() and
  163. * perf_adjust_freq_unthr_context().
  164. */
  165. u64 interrupts_seq;
  166. u64 interrupts;
  167. /*
  168. * State for freq target events, see __perf_event_overflow() and
  169. * perf_adjust_freq_unthr_context().
  170. */
  171. u64 freq_time_stamp;
  172. u64 freq_count_stamp;
  173. #endif
  174. };
  175. struct perf_event;
  176. /*
  177. * Common implementation detail of pmu::{start,commit,cancel}_txn
  178. */
  179. #define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */
  180. #define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */
  181. /**
  182. * pmu::capabilities flags
  183. */
  184. #define PERF_PMU_CAP_NO_INTERRUPT 0x01
  185. #define PERF_PMU_CAP_NO_NMI 0x02
  186. #define PERF_PMU_CAP_AUX_NO_SG 0x04
  187. #define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08
  188. #define PERF_PMU_CAP_EXCLUSIVE 0x10
  189. #define PERF_PMU_CAP_ITRACE 0x20
  190. /**
  191. * struct pmu - generic performance monitoring unit
  192. */
  193. struct pmu {
  194. struct list_head entry;
  195. struct module *module;
  196. struct device *dev;
  197. const struct attribute_group **attr_groups;
  198. const char *name;
  199. int type;
  200. /*
  201. * various common per-pmu feature flags
  202. */
  203. int capabilities;
  204. int * __percpu pmu_disable_count;
  205. struct perf_cpu_context * __percpu pmu_cpu_context;
  206. atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
  207. int task_ctx_nr;
  208. int hrtimer_interval_ms;
  209. /*
  210. * Fully disable/enable this PMU, can be used to protect from the PMI
  211. * as well as for lazy/batch writing of the MSRs.
  212. */
  213. void (*pmu_enable) (struct pmu *pmu); /* optional */
  214. void (*pmu_disable) (struct pmu *pmu); /* optional */
  215. /*
  216. * Try and initialize the event for this PMU.
  217. *
  218. * Returns:
  219. * -ENOENT -- @event is not for this PMU
  220. *
  221. * -ENODEV -- @event is for this PMU but PMU not present
  222. * -EBUSY -- @event is for this PMU but PMU temporarily unavailable
  223. * -EINVAL -- @event is for this PMU but @event is not valid
  224. * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
  225. * -EACCESS -- @event is for this PMU, @event is valid, but no privilidges
  226. *
  227. * 0 -- @event is for this PMU and valid
  228. *
  229. * Other error return values are allowed.
  230. */
  231. int (*event_init) (struct perf_event *event);
  232. /*
  233. * Notification that the event was mapped or unmapped. Called
  234. * in the context of the mapping task.
  235. */
  236. void (*event_mapped) (struct perf_event *event); /*optional*/
  237. void (*event_unmapped) (struct perf_event *event); /*optional*/
  238. /*
  239. * Flags for ->add()/->del()/ ->start()/->stop(). There are
  240. * matching hw_perf_event::state flags.
  241. */
  242. #define PERF_EF_START 0x01 /* start the counter when adding */
  243. #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
  244. #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
  245. /*
  246. * Adds/Removes a counter to/from the PMU, can be done inside a
  247. * transaction, see the ->*_txn() methods.
  248. *
  249. * The add/del callbacks will reserve all hardware resources required
  250. * to service the event, this includes any counter constraint
  251. * scheduling etc.
  252. *
  253. * Called with IRQs disabled and the PMU disabled on the CPU the event
  254. * is on.
  255. *
  256. * ->add() called without PERF_EF_START should result in the same state
  257. * as ->add() followed by ->stop().
  258. *
  259. * ->del() must always PERF_EF_UPDATE stop an event. If it calls
  260. * ->stop() that must deal with already being stopped without
  261. * PERF_EF_UPDATE.
  262. */
  263. int (*add) (struct perf_event *event, int flags);
  264. void (*del) (struct perf_event *event, int flags);
  265. /*
  266. * Starts/Stops a counter present on the PMU.
  267. *
  268. * The PMI handler should stop the counter when perf_event_overflow()
  269. * returns !0. ->start() will be used to continue.
  270. *
  271. * Also used to change the sample period.
  272. *
  273. * Called with IRQs disabled and the PMU disabled on the CPU the event
  274. * is on -- will be called from NMI context with the PMU generates
  275. * NMIs.
  276. *
  277. * ->stop() with PERF_EF_UPDATE will read the counter and update
  278. * period/count values like ->read() would.
  279. *
  280. * ->start() with PERF_EF_RELOAD will reprogram the the counter
  281. * value, must be preceded by a ->stop() with PERF_EF_UPDATE.
  282. */
  283. void (*start) (struct perf_event *event, int flags);
  284. void (*stop) (struct perf_event *event, int flags);
  285. /*
  286. * Updates the counter value of the event.
  287. *
  288. * For sampling capable PMUs this will also update the software period
  289. * hw_perf_event::period_left field.
  290. */
  291. void (*read) (struct perf_event *event);
  292. /*
  293. * Group events scheduling is treated as a transaction, add
  294. * group events as a whole and perform one schedulability test.
  295. * If the test fails, roll back the whole group
  296. *
  297. * Start the transaction, after this ->add() doesn't need to
  298. * do schedulability tests.
  299. *
  300. * Optional.
  301. */
  302. void (*start_txn) (struct pmu *pmu, unsigned int txn_flags);
  303. /*
  304. * If ->start_txn() disabled the ->add() schedulability test
  305. * then ->commit_txn() is required to perform one. On success
  306. * the transaction is closed. On error the transaction is kept
  307. * open until ->cancel_txn() is called.
  308. *
  309. * Optional.
  310. */
  311. int (*commit_txn) (struct pmu *pmu);
  312. /*
  313. * Will cancel the transaction, assumes ->del() is called
  314. * for each successful ->add() during the transaction.
  315. *
  316. * Optional.
  317. */
  318. void (*cancel_txn) (struct pmu *pmu);
  319. /*
  320. * Will return the value for perf_event_mmap_page::index for this event,
  321. * if no implementation is provided it will default to: event->hw.idx + 1.
  322. */
  323. int (*event_idx) (struct perf_event *event); /*optional */
  324. /*
  325. * context-switches callback
  326. */
  327. void (*sched_task) (struct perf_event_context *ctx,
  328. bool sched_in);
  329. /*
  330. * PMU specific data size
  331. */
  332. size_t task_ctx_size;
  333. /*
  334. * Return the count value for a counter.
  335. */
  336. u64 (*count) (struct perf_event *event); /*optional*/
  337. /*
  338. * Set up pmu-private data structures for an AUX area
  339. */
  340. void *(*setup_aux) (int cpu, void **pages,
  341. int nr_pages, bool overwrite);
  342. /* optional */
  343. /*
  344. * Free pmu-private AUX data structures
  345. */
  346. void (*free_aux) (void *aux); /* optional */
  347. /*
  348. * Filter events for PMU-specific reasons.
  349. */
  350. int (*filter_match) (struct perf_event *event); /* optional */
  351. };
  352. /**
  353. * enum perf_event_active_state - the states of a event
  354. */
  355. enum perf_event_active_state {
  356. PERF_EVENT_STATE_DEAD = -4,
  357. PERF_EVENT_STATE_EXIT = -3,
  358. PERF_EVENT_STATE_ERROR = -2,
  359. PERF_EVENT_STATE_OFF = -1,
  360. PERF_EVENT_STATE_INACTIVE = 0,
  361. PERF_EVENT_STATE_ACTIVE = 1,
  362. };
  363. struct file;
  364. struct perf_sample_data;
  365. typedef void (*perf_overflow_handler_t)(struct perf_event *,
  366. struct perf_sample_data *,
  367. struct pt_regs *regs);
  368. enum perf_group_flag {
  369. PERF_GROUP_SOFTWARE = 0x1,
  370. };
  371. #define SWEVENT_HLIST_BITS 8
  372. #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
  373. struct swevent_hlist {
  374. struct hlist_head heads[SWEVENT_HLIST_SIZE];
  375. struct rcu_head rcu_head;
  376. };
  377. #define PERF_ATTACH_CONTEXT 0x01
  378. #define PERF_ATTACH_GROUP 0x02
  379. #define PERF_ATTACH_TASK 0x04
  380. #define PERF_ATTACH_TASK_DATA 0x08
  381. struct perf_cgroup;
  382. struct ring_buffer;
  383. /**
  384. * struct perf_event - performance event kernel representation:
  385. */
  386. struct perf_event {
  387. #ifdef CONFIG_PERF_EVENTS
  388. /*
  389. * entry onto perf_event_context::event_list;
  390. * modifications require ctx->lock
  391. * RCU safe iterations.
  392. */
  393. struct list_head event_entry;
  394. /*
  395. * XXX: group_entry and sibling_list should be mutually exclusive;
  396. * either you're a sibling on a group, or you're the group leader.
  397. * Rework the code to always use the same list element.
  398. *
  399. * Locked for modification by both ctx->mutex and ctx->lock; holding
  400. * either sufficies for read.
  401. */
  402. struct list_head group_entry;
  403. struct list_head sibling_list;
  404. /*
  405. * We need storage to track the entries in perf_pmu_migrate_context; we
  406. * cannot use the event_entry because of RCU and we want to keep the
  407. * group in tact which avoids us using the other two entries.
  408. */
  409. struct list_head migrate_entry;
  410. struct hlist_node hlist_entry;
  411. struct list_head active_entry;
  412. int nr_siblings;
  413. int group_flags;
  414. struct perf_event *group_leader;
  415. struct pmu *pmu;
  416. enum perf_event_active_state state;
  417. unsigned int attach_state;
  418. local64_t count;
  419. atomic64_t child_count;
  420. /*
  421. * These are the total time in nanoseconds that the event
  422. * has been enabled (i.e. eligible to run, and the task has
  423. * been scheduled in, if this is a per-task event)
  424. * and running (scheduled onto the CPU), respectively.
  425. *
  426. * They are computed from tstamp_enabled, tstamp_running and
  427. * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
  428. */
  429. u64 total_time_enabled;
  430. u64 total_time_running;
  431. /*
  432. * These are timestamps used for computing total_time_enabled
  433. * and total_time_running when the event is in INACTIVE or
  434. * ACTIVE state, measured in nanoseconds from an arbitrary point
  435. * in time.
  436. * tstamp_enabled: the notional time when the event was enabled
  437. * tstamp_running: the notional time when the event was scheduled on
  438. * tstamp_stopped: in INACTIVE state, the notional time when the
  439. * event was scheduled off.
  440. */
  441. u64 tstamp_enabled;
  442. u64 tstamp_running;
  443. u64 tstamp_stopped;
  444. /*
  445. * timestamp shadows the actual context timing but it can
  446. * be safely used in NMI interrupt context. It reflects the
  447. * context time as it was when the event was last scheduled in.
  448. *
  449. * ctx_time already accounts for ctx->timestamp. Therefore to
  450. * compute ctx_time for a sample, simply add perf_clock().
  451. */
  452. u64 shadow_ctx_time;
  453. struct perf_event_attr attr;
  454. u16 header_size;
  455. u16 id_header_size;
  456. u16 read_size;
  457. struct hw_perf_event hw;
  458. struct perf_event_context *ctx;
  459. atomic_long_t refcount;
  460. /*
  461. * These accumulate total time (in nanoseconds) that children
  462. * events have been enabled and running, respectively.
  463. */
  464. atomic64_t child_total_time_enabled;
  465. atomic64_t child_total_time_running;
  466. /*
  467. * Protect attach/detach and child_list:
  468. */
  469. struct mutex child_mutex;
  470. struct list_head child_list;
  471. struct perf_event *parent;
  472. int oncpu;
  473. int cpu;
  474. struct list_head owner_entry;
  475. struct task_struct *owner;
  476. /* mmap bits */
  477. struct mutex mmap_mutex;
  478. atomic_t mmap_count;
  479. struct ring_buffer *rb;
  480. struct list_head rb_entry;
  481. unsigned long rcu_batches;
  482. int rcu_pending;
  483. /* poll related */
  484. wait_queue_head_t waitq;
  485. struct fasync_struct *fasync;
  486. /* delayed work for NMIs and such */
  487. int pending_wakeup;
  488. int pending_kill;
  489. int pending_disable;
  490. struct irq_work pending;
  491. atomic_t event_limit;
  492. void (*destroy)(struct perf_event *);
  493. struct rcu_head rcu_head;
  494. struct pid_namespace *ns;
  495. u64 id;
  496. u64 (*clock)(void);
  497. perf_overflow_handler_t overflow_handler;
  498. void *overflow_handler_context;
  499. #ifdef CONFIG_EVENT_TRACING
  500. struct trace_event_call *tp_event;
  501. struct event_filter *filter;
  502. #ifdef CONFIG_FUNCTION_TRACER
  503. struct ftrace_ops ftrace_ops;
  504. #endif
  505. #endif
  506. #ifdef CONFIG_CGROUP_PERF
  507. struct perf_cgroup *cgrp; /* cgroup event is attach to */
  508. int cgrp_defer_enabled;
  509. #endif
  510. #endif /* CONFIG_PERF_EVENTS */
  511. };
  512. /**
  513. * struct perf_event_context - event context structure
  514. *
  515. * Used as a container for task events and CPU events as well:
  516. */
  517. struct perf_event_context {
  518. struct pmu *pmu;
  519. /*
  520. * Protect the states of the events in the list,
  521. * nr_active, and the list:
  522. */
  523. raw_spinlock_t lock;
  524. /*
  525. * Protect the list of events. Locking either mutex or lock
  526. * is sufficient to ensure the list doesn't change; to change
  527. * the list you need to lock both the mutex and the spinlock.
  528. */
  529. struct mutex mutex;
  530. struct list_head active_ctx_list;
  531. struct list_head pinned_groups;
  532. struct list_head flexible_groups;
  533. struct list_head event_list;
  534. int nr_events;
  535. int nr_active;
  536. int is_active;
  537. int nr_stat;
  538. int nr_freq;
  539. int rotate_disable;
  540. atomic_t refcount;
  541. struct task_struct *task;
  542. /*
  543. * Context clock, runs when context enabled.
  544. */
  545. u64 time;
  546. u64 timestamp;
  547. /*
  548. * These fields let us detect when two contexts have both
  549. * been cloned (inherited) from a common ancestor.
  550. */
  551. struct perf_event_context *parent_ctx;
  552. u64 parent_gen;
  553. u64 generation;
  554. int pin_count;
  555. int nr_cgroups; /* cgroup evts */
  556. void *task_ctx_data; /* pmu specific data */
  557. struct rcu_head rcu_head;
  558. };
  559. /*
  560. * Number of contexts where an event can trigger:
  561. * task, softirq, hardirq, nmi.
  562. */
  563. #define PERF_NR_CONTEXTS 4
  564. /**
  565. * struct perf_event_cpu_context - per cpu event context structure
  566. */
  567. struct perf_cpu_context {
  568. struct perf_event_context ctx;
  569. struct perf_event_context *task_ctx;
  570. int active_oncpu;
  571. int exclusive;
  572. raw_spinlock_t hrtimer_lock;
  573. struct hrtimer hrtimer;
  574. ktime_t hrtimer_interval;
  575. unsigned int hrtimer_active;
  576. struct pmu *unique_pmu;
  577. struct perf_cgroup *cgrp;
  578. };
  579. struct perf_output_handle {
  580. struct perf_event *event;
  581. struct ring_buffer *rb;
  582. unsigned long wakeup;
  583. unsigned long size;
  584. union {
  585. void *addr;
  586. unsigned long head;
  587. };
  588. int page;
  589. };
  590. #ifdef CONFIG_CGROUP_PERF
  591. /*
  592. * perf_cgroup_info keeps track of time_enabled for a cgroup.
  593. * This is a per-cpu dynamically allocated data structure.
  594. */
  595. struct perf_cgroup_info {
  596. u64 time;
  597. u64 timestamp;
  598. };
  599. struct perf_cgroup {
  600. struct cgroup_subsys_state css;
  601. struct perf_cgroup_info __percpu *info;
  602. };
  603. /*
  604. * Must ensure cgroup is pinned (css_get) before calling
  605. * this function. In other words, we cannot call this function
  606. * if there is no cgroup event for the current CPU context.
  607. */
  608. static inline struct perf_cgroup *
  609. perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
  610. {
  611. return container_of(task_css_check(task, perf_event_cgrp_id,
  612. ctx ? lockdep_is_held(&ctx->lock)
  613. : true),
  614. struct perf_cgroup, css);
  615. }
  616. #endif /* CONFIG_CGROUP_PERF */
  617. #ifdef CONFIG_PERF_EVENTS
  618. extern void *perf_aux_output_begin(struct perf_output_handle *handle,
  619. struct perf_event *event);
  620. extern void perf_aux_output_end(struct perf_output_handle *handle,
  621. unsigned long size, bool truncated);
  622. extern int perf_aux_output_skip(struct perf_output_handle *handle,
  623. unsigned long size);
  624. extern void *perf_get_aux(struct perf_output_handle *handle);
  625. extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
  626. extern void perf_pmu_unregister(struct pmu *pmu);
  627. extern int perf_num_counters(void);
  628. extern const char *perf_pmu_name(void);
  629. extern void __perf_event_task_sched_in(struct task_struct *prev,
  630. struct task_struct *task);
  631. extern void __perf_event_task_sched_out(struct task_struct *prev,
  632. struct task_struct *next);
  633. extern int perf_event_init_task(struct task_struct *child);
  634. extern void perf_event_exit_task(struct task_struct *child);
  635. extern void perf_event_free_task(struct task_struct *task);
  636. extern void perf_event_delayed_put(struct task_struct *task);
  637. extern struct file *perf_event_get(unsigned int fd);
  638. extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
  639. extern void perf_event_print_debug(void);
  640. extern void perf_pmu_disable(struct pmu *pmu);
  641. extern void perf_pmu_enable(struct pmu *pmu);
  642. extern void perf_sched_cb_dec(struct pmu *pmu);
  643. extern void perf_sched_cb_inc(struct pmu *pmu);
  644. extern int perf_event_task_disable(void);
  645. extern int perf_event_task_enable(void);
  646. extern int perf_event_refresh(struct perf_event *event, int refresh);
  647. extern void perf_event_update_userpage(struct perf_event *event);
  648. extern int perf_event_release_kernel(struct perf_event *event);
  649. extern struct perf_event *
  650. perf_event_create_kernel_counter(struct perf_event_attr *attr,
  651. int cpu,
  652. struct task_struct *task,
  653. perf_overflow_handler_t callback,
  654. void *context);
  655. extern void perf_pmu_migrate_context(struct pmu *pmu,
  656. int src_cpu, int dst_cpu);
  657. extern u64 perf_event_read_local(struct perf_event *event);
  658. extern u64 perf_event_read_value(struct perf_event *event,
  659. u64 *enabled, u64 *running);
  660. struct perf_sample_data {
  661. /*
  662. * Fields set by perf_sample_data_init(), group so as to
  663. * minimize the cachelines touched.
  664. */
  665. u64 addr;
  666. struct perf_raw_record *raw;
  667. struct perf_branch_stack *br_stack;
  668. u64 period;
  669. u64 weight;
  670. u64 txn;
  671. union perf_mem_data_src data_src;
  672. /*
  673. * The other fields, optionally {set,used} by
  674. * perf_{prepare,output}_sample().
  675. */
  676. u64 type;
  677. u64 ip;
  678. struct {
  679. u32 pid;
  680. u32 tid;
  681. } tid_entry;
  682. u64 time;
  683. u64 id;
  684. u64 stream_id;
  685. struct {
  686. u32 cpu;
  687. u32 reserved;
  688. } cpu_entry;
  689. struct perf_callchain_entry *callchain;
  690. /*
  691. * regs_user may point to task_pt_regs or to regs_user_copy, depending
  692. * on arch details.
  693. */
  694. struct perf_regs regs_user;
  695. struct pt_regs regs_user_copy;
  696. struct perf_regs regs_intr;
  697. u64 stack_user_size;
  698. } ____cacheline_aligned;
  699. /* default value for data source */
  700. #define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\
  701. PERF_MEM_S(LVL, NA) |\
  702. PERF_MEM_S(SNOOP, NA) |\
  703. PERF_MEM_S(LOCK, NA) |\
  704. PERF_MEM_S(TLB, NA))
  705. static inline void perf_sample_data_init(struct perf_sample_data *data,
  706. u64 addr, u64 period)
  707. {
  708. /* remaining struct members initialized in perf_prepare_sample() */
  709. data->addr = addr;
  710. data->raw = NULL;
  711. data->br_stack = NULL;
  712. data->period = period;
  713. data->weight = 0;
  714. data->data_src.val = PERF_MEM_NA;
  715. data->txn = 0;
  716. }
  717. extern void perf_output_sample(struct perf_output_handle *handle,
  718. struct perf_event_header *header,
  719. struct perf_sample_data *data,
  720. struct perf_event *event);
  721. extern void perf_prepare_sample(struct perf_event_header *header,
  722. struct perf_sample_data *data,
  723. struct perf_event *event,
  724. struct pt_regs *regs);
  725. extern int perf_event_overflow(struct perf_event *event,
  726. struct perf_sample_data *data,
  727. struct pt_regs *regs);
  728. extern void perf_event_output(struct perf_event *event,
  729. struct perf_sample_data *data,
  730. struct pt_regs *regs);
  731. extern void
  732. perf_event_header__init_id(struct perf_event_header *header,
  733. struct perf_sample_data *data,
  734. struct perf_event *event);
  735. extern void
  736. perf_event__output_id_sample(struct perf_event *event,
  737. struct perf_output_handle *handle,
  738. struct perf_sample_data *sample);
  739. extern void
  740. perf_log_lost_samples(struct perf_event *event, u64 lost);
  741. static inline bool is_sampling_event(struct perf_event *event)
  742. {
  743. return event->attr.sample_period != 0;
  744. }
  745. /*
  746. * Return 1 for a software event, 0 for a hardware event
  747. */
  748. static inline int is_software_event(struct perf_event *event)
  749. {
  750. return event->pmu->task_ctx_nr == perf_sw_context;
  751. }
  752. extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
  753. extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
  754. extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
  755. #ifndef perf_arch_fetch_caller_regs
  756. static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
  757. #endif
  758. /*
  759. * Take a snapshot of the regs. Skip ip and frame pointer to
  760. * the nth caller. We only need a few of the regs:
  761. * - ip for PERF_SAMPLE_IP
  762. * - cs for user_mode() tests
  763. * - bp for callchains
  764. * - eflags, for future purposes, just in case
  765. */
  766. static inline void perf_fetch_caller_regs(struct pt_regs *regs)
  767. {
  768. memset(regs, 0, sizeof(*regs));
  769. perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
  770. }
  771. static __always_inline void
  772. perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
  773. {
  774. if (static_key_false(&perf_swevent_enabled[event_id]))
  775. __perf_sw_event(event_id, nr, regs, addr);
  776. }
  777. DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
  778. /*
  779. * 'Special' version for the scheduler, it hard assumes no recursion,
  780. * which is guaranteed by us not actually scheduling inside other swevents
  781. * because those disable preemption.
  782. */
  783. static __always_inline void
  784. perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
  785. {
  786. if (static_key_false(&perf_swevent_enabled[event_id])) {
  787. struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
  788. perf_fetch_caller_regs(regs);
  789. ___perf_sw_event(event_id, nr, regs, addr);
  790. }
  791. }
  792. extern struct static_key_false perf_sched_events;
  793. static __always_inline bool
  794. perf_sw_migrate_enabled(void)
  795. {
  796. if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
  797. return true;
  798. return false;
  799. }
  800. static inline void perf_event_task_migrate(struct task_struct *task)
  801. {
  802. if (perf_sw_migrate_enabled())
  803. task->sched_migrated = 1;
  804. }
  805. static inline void perf_event_task_sched_in(struct task_struct *prev,
  806. struct task_struct *task)
  807. {
  808. if (static_branch_unlikely(&perf_sched_events))
  809. __perf_event_task_sched_in(prev, task);
  810. if (perf_sw_migrate_enabled() && task->sched_migrated) {
  811. struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
  812. perf_fetch_caller_regs(regs);
  813. ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
  814. task->sched_migrated = 0;
  815. }
  816. }
  817. static inline void perf_event_task_sched_out(struct task_struct *prev,
  818. struct task_struct *next)
  819. {
  820. perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
  821. if (static_branch_unlikely(&perf_sched_events))
  822. __perf_event_task_sched_out(prev, next);
  823. }
  824. static inline u64 __perf_event_count(struct perf_event *event)
  825. {
  826. return local64_read(&event->count) + atomic64_read(&event->child_count);
  827. }
  828. extern void perf_event_mmap(struct vm_area_struct *vma);
  829. extern struct perf_guest_info_callbacks *perf_guest_cbs;
  830. extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
  831. extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
  832. extern void perf_event_exec(void);
  833. extern void perf_event_comm(struct task_struct *tsk, bool exec);
  834. extern void perf_event_fork(struct task_struct *tsk);
  835. /* Callchains */
  836. DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
  837. extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
  838. extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
  839. static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
  840. {
  841. if (entry->nr < PERF_MAX_STACK_DEPTH)
  842. entry->ip[entry->nr++] = ip;
  843. }
  844. extern int sysctl_perf_event_paranoid;
  845. extern int sysctl_perf_event_mlock;
  846. extern int sysctl_perf_event_sample_rate;
  847. extern int sysctl_perf_cpu_time_max_percent;
  848. extern void perf_sample_event_took(u64 sample_len_ns);
  849. extern int perf_proc_update_handler(struct ctl_table *table, int write,
  850. void __user *buffer, size_t *lenp,
  851. loff_t *ppos);
  852. extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
  853. void __user *buffer, size_t *lenp,
  854. loff_t *ppos);
  855. static inline bool perf_paranoid_tracepoint_raw(void)
  856. {
  857. return sysctl_perf_event_paranoid > -1;
  858. }
  859. static inline bool perf_paranoid_cpu(void)
  860. {
  861. return sysctl_perf_event_paranoid > 0;
  862. }
  863. static inline bool perf_paranoid_kernel(void)
  864. {
  865. return sysctl_perf_event_paranoid > 1;
  866. }
  867. extern void perf_event_init(void);
  868. extern void perf_tp_event(u64 addr, u64 count, void *record,
  869. int entry_size, struct pt_regs *regs,
  870. struct hlist_head *head, int rctx,
  871. struct task_struct *task);
  872. extern void perf_bp_event(struct perf_event *event, void *data);
  873. #ifndef perf_misc_flags
  874. # define perf_misc_flags(regs) \
  875. (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
  876. # define perf_instruction_pointer(regs) instruction_pointer(regs)
  877. #endif
  878. static inline bool has_branch_stack(struct perf_event *event)
  879. {
  880. return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
  881. }
  882. static inline bool needs_branch_stack(struct perf_event *event)
  883. {
  884. return event->attr.branch_sample_type != 0;
  885. }
  886. static inline bool has_aux(struct perf_event *event)
  887. {
  888. return event->pmu->setup_aux;
  889. }
  890. extern int perf_output_begin(struct perf_output_handle *handle,
  891. struct perf_event *event, unsigned int size);
  892. extern void perf_output_end(struct perf_output_handle *handle);
  893. extern unsigned int perf_output_copy(struct perf_output_handle *handle,
  894. const void *buf, unsigned int len);
  895. extern unsigned int perf_output_skip(struct perf_output_handle *handle,
  896. unsigned int len);
  897. extern int perf_swevent_get_recursion_context(void);
  898. extern void perf_swevent_put_recursion_context(int rctx);
  899. extern u64 perf_swevent_set_period(struct perf_event *event);
  900. extern void perf_event_enable(struct perf_event *event);
  901. extern void perf_event_disable(struct perf_event *event);
  902. extern void perf_event_disable_local(struct perf_event *event);
  903. extern void perf_event_task_tick(void);
  904. #else /* !CONFIG_PERF_EVENTS: */
  905. static inline void *
  906. perf_aux_output_begin(struct perf_output_handle *handle,
  907. struct perf_event *event) { return NULL; }
  908. static inline void
  909. perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
  910. bool truncated) { }
  911. static inline int
  912. perf_aux_output_skip(struct perf_output_handle *handle,
  913. unsigned long size) { return -EINVAL; }
  914. static inline void *
  915. perf_get_aux(struct perf_output_handle *handle) { return NULL; }
  916. static inline void
  917. perf_event_task_migrate(struct task_struct *task) { }
  918. static inline void
  919. perf_event_task_sched_in(struct task_struct *prev,
  920. struct task_struct *task) { }
  921. static inline void
  922. perf_event_task_sched_out(struct task_struct *prev,
  923. struct task_struct *next) { }
  924. static inline int perf_event_init_task(struct task_struct *child) { return 0; }
  925. static inline void perf_event_exit_task(struct task_struct *child) { }
  926. static inline void perf_event_free_task(struct task_struct *task) { }
  927. static inline void perf_event_delayed_put(struct task_struct *task) { }
  928. static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
  929. static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
  930. {
  931. return ERR_PTR(-EINVAL);
  932. }
  933. static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; }
  934. static inline void perf_event_print_debug(void) { }
  935. static inline int perf_event_task_disable(void) { return -EINVAL; }
  936. static inline int perf_event_task_enable(void) { return -EINVAL; }
  937. static inline int perf_event_refresh(struct perf_event *event, int refresh)
  938. {
  939. return -EINVAL;
  940. }
  941. static inline void
  942. perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
  943. static inline void
  944. perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { }
  945. static inline void
  946. perf_bp_event(struct perf_event *event, void *data) { }
  947. static inline int perf_register_guest_info_callbacks
  948. (struct perf_guest_info_callbacks *callbacks) { return 0; }
  949. static inline int perf_unregister_guest_info_callbacks
  950. (struct perf_guest_info_callbacks *callbacks) { return 0; }
  951. static inline void perf_event_mmap(struct vm_area_struct *vma) { }
  952. static inline void perf_event_exec(void) { }
  953. static inline void perf_event_comm(struct task_struct *tsk, bool exec) { }
  954. static inline void perf_event_fork(struct task_struct *tsk) { }
  955. static inline void perf_event_init(void) { }
  956. static inline int perf_swevent_get_recursion_context(void) { return -1; }
  957. static inline void perf_swevent_put_recursion_context(int rctx) { }
  958. static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; }
  959. static inline void perf_event_enable(struct perf_event *event) { }
  960. static inline void perf_event_disable(struct perf_event *event) { }
  961. static inline int __perf_event_disable(void *info) { return -1; }
  962. static inline void perf_event_task_tick(void) { }
  963. static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
  964. #endif
  965. #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
  966. extern void perf_restore_debug_store(void);
  967. #else
  968. static inline void perf_restore_debug_store(void) { }
  969. #endif
  970. #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
  971. /*
  972. * This has to have a higher priority than migration_notifier in sched/core.c.
  973. */
  974. #define perf_cpu_notifier(fn) \
  975. do { \
  976. static struct notifier_block fn##_nb = \
  977. { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
  978. unsigned long cpu = smp_processor_id(); \
  979. unsigned long flags; \
  980. \
  981. cpu_notifier_register_begin(); \
  982. fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
  983. (void *)(unsigned long)cpu); \
  984. local_irq_save(flags); \
  985. fn(&fn##_nb, (unsigned long)CPU_STARTING, \
  986. (void *)(unsigned long)cpu); \
  987. local_irq_restore(flags); \
  988. fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
  989. (void *)(unsigned long)cpu); \
  990. __register_cpu_notifier(&fn##_nb); \
  991. cpu_notifier_register_done(); \
  992. } while (0)
  993. /*
  994. * Bare-bones version of perf_cpu_notifier(), which doesn't invoke the
  995. * callback for already online CPUs.
  996. */
  997. #define __perf_cpu_notifier(fn) \
  998. do { \
  999. static struct notifier_block fn##_nb = \
  1000. { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
  1001. \
  1002. __register_cpu_notifier(&fn##_nb); \
  1003. } while (0)
  1004. struct perf_pmu_events_attr {
  1005. struct device_attribute attr;
  1006. u64 id;
  1007. const char *event_str;
  1008. };
  1009. ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
  1010. char *page);
  1011. #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
  1012. static struct perf_pmu_events_attr _var = { \
  1013. .attr = __ATTR(_name, 0444, _show, NULL), \
  1014. .id = _id, \
  1015. };
  1016. #define PMU_EVENT_ATTR_STRING(_name, _var, _str) \
  1017. static struct perf_pmu_events_attr _var = { \
  1018. .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
  1019. .id = 0, \
  1020. .event_str = _str, \
  1021. };
  1022. #define PMU_FORMAT_ATTR(_name, _format) \
  1023. static ssize_t \
  1024. _name##_show(struct device *dev, \
  1025. struct device_attribute *attr, \
  1026. char *page) \
  1027. { \
  1028. BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
  1029. return sprintf(page, _format "\n"); \
  1030. } \
  1031. \
  1032. static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
  1033. #endif /* _LINUX_PERF_EVENT_H */