perf_event.h 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945
  1. /*
  2. * Performance events:
  3. *
  4. * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
  7. *
  8. * Data type definitions, declarations, prototypes.
  9. *
  10. * Started by: Thomas Gleixner and Ingo Molnar
  11. *
  12. * For licencing details see kernel-base/COPYING
  13. */
  14. #ifndef _LINUX_PERF_EVENT_H
  15. #define _LINUX_PERF_EVENT_H
  16. #include <uapi/linux/perf_event.h>
  17. /*
  18. * Kernel-internal data types and definitions:
  19. */
  20. #ifdef CONFIG_PERF_EVENTS
  21. # include <asm/perf_event.h>
  22. # include <asm/local64.h>
  23. #endif
  24. struct perf_guest_info_callbacks {
  25. int (*is_in_guest)(void);
  26. int (*is_user_mode)(void);
  27. unsigned long (*get_guest_ip)(void);
  28. };
  29. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  30. #include <asm/hw_breakpoint.h>
  31. #endif
  32. #include <linux/list.h>
  33. #include <linux/mutex.h>
  34. #include <linux/rculist.h>
  35. #include <linux/rcupdate.h>
  36. #include <linux/spinlock.h>
  37. #include <linux/hrtimer.h>
  38. #include <linux/fs.h>
  39. #include <linux/pid_namespace.h>
  40. #include <linux/workqueue.h>
  41. #include <linux/ftrace.h>
  42. #include <linux/cpu.h>
  43. #include <linux/irq_work.h>
  44. #include <linux/static_key.h>
  45. #include <linux/jump_label_ratelimit.h>
  46. #include <linux/atomic.h>
  47. #include <linux/sysfs.h>
  48. #include <linux/perf_regs.h>
  49. #include <linux/workqueue.h>
  50. #include <asm/local.h>
  51. struct perf_callchain_entry {
  52. __u64 nr;
  53. __u64 ip[PERF_MAX_STACK_DEPTH];
  54. };
  55. struct perf_raw_record {
  56. u32 size;
  57. void *data;
  58. };
  59. /*
  60. * branch stack layout:
  61. * nr: number of taken branches stored in entries[]
  62. *
  63. * Note that nr can vary from sample to sample
  64. * branches (to, from) are stored from most recent
  65. * to least recent, i.e., entries[0] contains the most
  66. * recent branch.
  67. */
  68. struct perf_branch_stack {
  69. __u64 nr;
  70. struct perf_branch_entry entries[0];
  71. };
  72. struct task_struct;
  73. /*
  74. * extra PMU register associated with an event
  75. */
  76. struct hw_perf_event_extra {
  77. u64 config; /* register value */
  78. unsigned int reg; /* register address or index */
  79. int alloc; /* extra register already allocated */
  80. int idx; /* index in shared_regs->regs[] */
  81. };
  82. struct event_constraint;
  83. /**
  84. * struct hw_perf_event - performance event hardware details:
  85. */
  86. struct hw_perf_event {
  87. #ifdef CONFIG_PERF_EVENTS
  88. union {
  89. struct { /* hardware */
  90. u64 config;
  91. u64 last_tag;
  92. unsigned long config_base;
  93. unsigned long event_base;
  94. int event_base_rdpmc;
  95. int idx;
  96. int last_cpu;
  97. int flags;
  98. struct hw_perf_event_extra extra_reg;
  99. struct hw_perf_event_extra branch_reg;
  100. struct event_constraint *constraint;
  101. };
  102. struct { /* software */
  103. struct hrtimer hrtimer;
  104. };
  105. struct { /* tracepoint */
  106. struct task_struct *tp_target;
  107. /* for tp_event->class */
  108. struct list_head tp_list;
  109. };
  110. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  111. struct { /* breakpoint */
  112. /*
  113. * Crufty hack to avoid the chicken and egg
  114. * problem hw_breakpoint has with context
  115. * creation and event initalization.
  116. */
  117. struct task_struct *bp_target;
  118. struct arch_hw_breakpoint info;
  119. struct list_head bp_list;
  120. };
  121. #endif
  122. };
  123. int state;
  124. local64_t prev_count;
  125. u64 sample_period;
  126. u64 last_period;
  127. local64_t period_left;
  128. u64 interrupts_seq;
  129. u64 interrupts;
  130. u64 freq_time_stamp;
  131. u64 freq_count_stamp;
  132. #endif
  133. };
  134. /*
  135. * hw_perf_event::state flags
  136. */
  137. #define PERF_HES_STOPPED 0x01 /* the counter is stopped */
  138. #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
  139. #define PERF_HES_ARCH 0x04
  140. struct perf_event;
  141. /*
  142. * Common implementation detail of pmu::{start,commit,cancel}_txn
  143. */
  144. #define PERF_EVENT_TXN 0x1
  145. /**
  146. * pmu::capabilities flags
  147. */
  148. #define PERF_PMU_CAP_NO_INTERRUPT 0x01
  149. /**
  150. * struct pmu - generic performance monitoring unit
  151. */
  152. struct pmu {
  153. struct list_head entry;
  154. struct module *module;
  155. struct device *dev;
  156. const struct attribute_group **attr_groups;
  157. const char *name;
  158. int type;
  159. /*
  160. * various common per-pmu feature flags
  161. */
  162. int capabilities;
  163. int * __percpu pmu_disable_count;
  164. struct perf_cpu_context * __percpu pmu_cpu_context;
  165. int task_ctx_nr;
  166. int hrtimer_interval_ms;
  167. /*
  168. * Fully disable/enable this PMU, can be used to protect from the PMI
  169. * as well as for lazy/batch writing of the MSRs.
  170. */
  171. void (*pmu_enable) (struct pmu *pmu); /* optional */
  172. void (*pmu_disable) (struct pmu *pmu); /* optional */
  173. /*
  174. * Try and initialize the event for this PMU.
  175. * Should return -ENOENT when the @event doesn't match this PMU.
  176. */
  177. int (*event_init) (struct perf_event *event);
  178. /*
  179. * Notification that the event was mapped or unmapped. Called
  180. * in the context of the mapping task.
  181. */
  182. void (*event_mapped) (struct perf_event *event); /*optional*/
  183. void (*event_unmapped) (struct perf_event *event); /*optional*/
  184. #define PERF_EF_START 0x01 /* start the counter when adding */
  185. #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
  186. #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
  187. /*
  188. * Adds/Removes a counter to/from the PMU, can be done inside
  189. * a transaction, see the ->*_txn() methods.
  190. */
  191. int (*add) (struct perf_event *event, int flags);
  192. void (*del) (struct perf_event *event, int flags);
  193. /*
  194. * Starts/Stops a counter present on the PMU. The PMI handler
  195. * should stop the counter when perf_event_overflow() returns
  196. * !0. ->start() will be used to continue.
  197. */
  198. void (*start) (struct perf_event *event, int flags);
  199. void (*stop) (struct perf_event *event, int flags);
  200. /*
  201. * Updates the counter value of the event.
  202. */
  203. void (*read) (struct perf_event *event);
  204. /*
  205. * Group events scheduling is treated as a transaction, add
  206. * group events as a whole and perform one schedulability test.
  207. * If the test fails, roll back the whole group
  208. *
  209. * Start the transaction, after this ->add() doesn't need to
  210. * do schedulability tests.
  211. */
  212. void (*start_txn) (struct pmu *pmu); /* optional */
  213. /*
  214. * If ->start_txn() disabled the ->add() schedulability test
  215. * then ->commit_txn() is required to perform one. On success
  216. * the transaction is closed. On error the transaction is kept
  217. * open until ->cancel_txn() is called.
  218. */
  219. int (*commit_txn) (struct pmu *pmu); /* optional */
  220. /*
  221. * Will cancel the transaction, assumes ->del() is called
  222. * for each successful ->add() during the transaction.
  223. */
  224. void (*cancel_txn) (struct pmu *pmu); /* optional */
  225. /*
  226. * Will return the value for perf_event_mmap_page::index for this event,
  227. * if no implementation is provided it will default to: event->hw.idx + 1.
  228. */
  229. int (*event_idx) (struct perf_event *event); /*optional */
  230. /*
  231. * flush branch stack on context-switches (needed in cpu-wide mode)
  232. */
  233. void (*flush_branch_stack) (void);
  234. };
  235. /**
  236. * enum perf_event_active_state - the states of a event
  237. */
  238. enum perf_event_active_state {
  239. PERF_EVENT_STATE_EXIT = -3,
  240. PERF_EVENT_STATE_ERROR = -2,
  241. PERF_EVENT_STATE_OFF = -1,
  242. PERF_EVENT_STATE_INACTIVE = 0,
  243. PERF_EVENT_STATE_ACTIVE = 1,
  244. };
  245. struct file;
  246. struct perf_sample_data;
  247. typedef void (*perf_overflow_handler_t)(struct perf_event *,
  248. struct perf_sample_data *,
  249. struct pt_regs *regs);
  250. enum perf_group_flag {
  251. PERF_GROUP_SOFTWARE = 0x1,
  252. };
  253. #define SWEVENT_HLIST_BITS 8
  254. #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
  255. struct swevent_hlist {
  256. struct hlist_head heads[SWEVENT_HLIST_SIZE];
  257. struct rcu_head rcu_head;
  258. };
  259. #define PERF_ATTACH_CONTEXT 0x01
  260. #define PERF_ATTACH_GROUP 0x02
  261. #define PERF_ATTACH_TASK 0x04
  262. struct perf_cgroup;
  263. struct ring_buffer;
  264. /**
  265. * struct perf_event - performance event kernel representation:
  266. */
  267. struct perf_event {
  268. #ifdef CONFIG_PERF_EVENTS
  269. /*
  270. * entry onto perf_event_context::event_list;
  271. * modifications require ctx->lock
  272. * RCU safe iterations.
  273. */
  274. struct list_head event_entry;
  275. /*
  276. * XXX: group_entry and sibling_list should be mutually exclusive;
  277. * either you're a sibling on a group, or you're the group leader.
  278. * Rework the code to always use the same list element.
  279. *
  280. * Locked for modification by both ctx->mutex and ctx->lock; holding
  281. * either sufficies for read.
  282. */
  283. struct list_head group_entry;
  284. struct list_head sibling_list;
  285. /*
  286. * We need storage to track the entries in perf_pmu_migrate_context; we
  287. * cannot use the event_entry because of RCU and we want to keep the
  288. * group in tact which avoids us using the other two entries.
  289. */
  290. struct list_head migrate_entry;
  291. struct hlist_node hlist_entry;
  292. struct list_head active_entry;
  293. int nr_siblings;
  294. int group_flags;
  295. struct perf_event *group_leader;
  296. struct pmu *pmu;
  297. enum perf_event_active_state state;
  298. unsigned int attach_state;
  299. local64_t count;
  300. atomic64_t child_count;
  301. /*
  302. * These are the total time in nanoseconds that the event
  303. * has been enabled (i.e. eligible to run, and the task has
  304. * been scheduled in, if this is a per-task event)
  305. * and running (scheduled onto the CPU), respectively.
  306. *
  307. * They are computed from tstamp_enabled, tstamp_running and
  308. * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
  309. */
  310. u64 total_time_enabled;
  311. u64 total_time_running;
  312. /*
  313. * These are timestamps used for computing total_time_enabled
  314. * and total_time_running when the event is in INACTIVE or
  315. * ACTIVE state, measured in nanoseconds from an arbitrary point
  316. * in time.
  317. * tstamp_enabled: the notional time when the event was enabled
  318. * tstamp_running: the notional time when the event was scheduled on
  319. * tstamp_stopped: in INACTIVE state, the notional time when the
  320. * event was scheduled off.
  321. */
  322. u64 tstamp_enabled;
  323. u64 tstamp_running;
  324. u64 tstamp_stopped;
  325. /*
  326. * timestamp shadows the actual context timing but it can
  327. * be safely used in NMI interrupt context. It reflects the
  328. * context time as it was when the event was last scheduled in.
  329. *
  330. * ctx_time already accounts for ctx->timestamp. Therefore to
  331. * compute ctx_time for a sample, simply add perf_clock().
  332. */
  333. u64 shadow_ctx_time;
  334. struct perf_event_attr attr;
  335. u16 header_size;
  336. u16 id_header_size;
  337. u16 read_size;
  338. struct hw_perf_event hw;
  339. struct perf_event_context *ctx;
  340. atomic_long_t refcount;
  341. /*
  342. * These accumulate total time (in nanoseconds) that children
  343. * events have been enabled and running, respectively.
  344. */
  345. atomic64_t child_total_time_enabled;
  346. atomic64_t child_total_time_running;
  347. /*
  348. * Protect attach/detach and child_list:
  349. */
  350. struct mutex child_mutex;
  351. struct list_head child_list;
  352. struct perf_event *parent;
  353. int oncpu;
  354. int cpu;
  355. struct list_head owner_entry;
  356. struct task_struct *owner;
  357. /* mmap bits */
  358. struct mutex mmap_mutex;
  359. atomic_t mmap_count;
  360. struct ring_buffer *rb;
  361. struct list_head rb_entry;
  362. unsigned long rcu_batches;
  363. int rcu_pending;
  364. /* poll related */
  365. wait_queue_head_t waitq;
  366. struct fasync_struct *fasync;
  367. /* delayed work for NMIs and such */
  368. int pending_wakeup;
  369. int pending_kill;
  370. int pending_disable;
  371. struct irq_work pending;
  372. atomic_t event_limit;
  373. void (*destroy)(struct perf_event *);
  374. struct rcu_head rcu_head;
  375. struct pid_namespace *ns;
  376. u64 id;
  377. perf_overflow_handler_t overflow_handler;
  378. void *overflow_handler_context;
  379. #ifdef CONFIG_EVENT_TRACING
  380. struct ftrace_event_call *tp_event;
  381. struct event_filter *filter;
  382. #ifdef CONFIG_FUNCTION_TRACER
  383. struct ftrace_ops ftrace_ops;
  384. #endif
  385. #endif
  386. #ifdef CONFIG_CGROUP_PERF
  387. struct perf_cgroup *cgrp; /* cgroup event is attach to */
  388. int cgrp_defer_enabled;
  389. #endif
  390. #endif /* CONFIG_PERF_EVENTS */
  391. };
  392. /**
  393. * struct perf_event_context - event context structure
  394. *
  395. * Used as a container for task events and CPU events as well:
  396. */
  397. struct perf_event_context {
  398. struct pmu *pmu;
  399. /*
  400. * Protect the states of the events in the list,
  401. * nr_active, and the list:
  402. */
  403. raw_spinlock_t lock;
  404. /*
  405. * Protect the list of events. Locking either mutex or lock
  406. * is sufficient to ensure the list doesn't change; to change
  407. * the list you need to lock both the mutex and the spinlock.
  408. */
  409. struct mutex mutex;
  410. struct list_head active_ctx_list;
  411. struct list_head pinned_groups;
  412. struct list_head flexible_groups;
  413. struct list_head event_list;
  414. int nr_events;
  415. int nr_active;
  416. int is_active;
  417. int nr_stat;
  418. int nr_freq;
  419. int rotate_disable;
  420. atomic_t refcount;
  421. struct task_struct *task;
  422. /*
  423. * Context clock, runs when context enabled.
  424. */
  425. u64 time;
  426. u64 timestamp;
  427. /*
  428. * These fields let us detect when two contexts have both
  429. * been cloned (inherited) from a common ancestor.
  430. */
  431. struct perf_event_context *parent_ctx;
  432. u64 parent_gen;
  433. u64 generation;
  434. int pin_count;
  435. int nr_cgroups; /* cgroup evts */
  436. int nr_branch_stack; /* branch_stack evt */
  437. struct rcu_head rcu_head;
  438. struct delayed_work orphans_remove;
  439. bool orphans_remove_sched;
  440. };
  441. /*
  442. * Number of contexts where an event can trigger:
  443. * task, softirq, hardirq, nmi.
  444. */
  445. #define PERF_NR_CONTEXTS 4
  446. /**
  447. * struct perf_event_cpu_context - per cpu event context structure
  448. */
  449. struct perf_cpu_context {
  450. struct perf_event_context ctx;
  451. struct perf_event_context *task_ctx;
  452. int active_oncpu;
  453. int exclusive;
  454. struct hrtimer hrtimer;
  455. ktime_t hrtimer_interval;
  456. struct pmu *unique_pmu;
  457. struct perf_cgroup *cgrp;
  458. };
  459. struct perf_output_handle {
  460. struct perf_event *event;
  461. struct ring_buffer *rb;
  462. unsigned long wakeup;
  463. unsigned long size;
  464. void *addr;
  465. int page;
  466. };
  467. #ifdef CONFIG_PERF_EVENTS
  468. extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
  469. extern void perf_pmu_unregister(struct pmu *pmu);
  470. extern int perf_num_counters(void);
  471. extern const char *perf_pmu_name(void);
  472. extern void __perf_event_task_sched_in(struct task_struct *prev,
  473. struct task_struct *task);
  474. extern void __perf_event_task_sched_out(struct task_struct *prev,
  475. struct task_struct *next);
  476. extern int perf_event_init_task(struct task_struct *child);
  477. extern void perf_event_exit_task(struct task_struct *child);
  478. extern void perf_event_free_task(struct task_struct *task);
  479. extern void perf_event_delayed_put(struct task_struct *task);
  480. extern void perf_event_print_debug(void);
  481. extern void perf_pmu_disable(struct pmu *pmu);
  482. extern void perf_pmu_enable(struct pmu *pmu);
  483. extern int perf_event_task_disable(void);
  484. extern int perf_event_task_enable(void);
  485. extern int perf_event_refresh(struct perf_event *event, int refresh);
  486. extern void perf_event_update_userpage(struct perf_event *event);
  487. extern int perf_event_release_kernel(struct perf_event *event);
  488. extern struct perf_event *
  489. perf_event_create_kernel_counter(struct perf_event_attr *attr,
  490. int cpu,
  491. struct task_struct *task,
  492. perf_overflow_handler_t callback,
  493. void *context);
  494. extern void perf_pmu_migrate_context(struct pmu *pmu,
  495. int src_cpu, int dst_cpu);
  496. extern u64 perf_event_read_value(struct perf_event *event,
  497. u64 *enabled, u64 *running);
  498. struct perf_sample_data {
  499. /*
  500. * Fields set by perf_sample_data_init(), group so as to
  501. * minimize the cachelines touched.
  502. */
  503. u64 addr;
  504. struct perf_raw_record *raw;
  505. struct perf_branch_stack *br_stack;
  506. u64 period;
  507. u64 weight;
  508. u64 txn;
  509. union perf_mem_data_src data_src;
  510. /*
  511. * The other fields, optionally {set,used} by
  512. * perf_{prepare,output}_sample().
  513. */
  514. u64 type;
  515. u64 ip;
  516. struct {
  517. u32 pid;
  518. u32 tid;
  519. } tid_entry;
  520. u64 time;
  521. u64 id;
  522. u64 stream_id;
  523. struct {
  524. u32 cpu;
  525. u32 reserved;
  526. } cpu_entry;
  527. struct perf_callchain_entry *callchain;
  528. /*
  529. * regs_user may point to task_pt_regs or to regs_user_copy, depending
  530. * on arch details.
  531. */
  532. struct perf_regs regs_user;
  533. struct pt_regs regs_user_copy;
  534. struct perf_regs regs_intr;
  535. u64 stack_user_size;
  536. } ____cacheline_aligned;
  537. /* default value for data source */
  538. #define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\
  539. PERF_MEM_S(LVL, NA) |\
  540. PERF_MEM_S(SNOOP, NA) |\
  541. PERF_MEM_S(LOCK, NA) |\
  542. PERF_MEM_S(TLB, NA))
  543. static inline void perf_sample_data_init(struct perf_sample_data *data,
  544. u64 addr, u64 period)
  545. {
  546. /* remaining struct members initialized in perf_prepare_sample() */
  547. data->addr = addr;
  548. data->raw = NULL;
  549. data->br_stack = NULL;
  550. data->period = period;
  551. data->weight = 0;
  552. data->data_src.val = PERF_MEM_NA;
  553. data->txn = 0;
  554. }
  555. extern void perf_output_sample(struct perf_output_handle *handle,
  556. struct perf_event_header *header,
  557. struct perf_sample_data *data,
  558. struct perf_event *event);
  559. extern void perf_prepare_sample(struct perf_event_header *header,
  560. struct perf_sample_data *data,
  561. struct perf_event *event,
  562. struct pt_regs *regs);
  563. extern int perf_event_overflow(struct perf_event *event,
  564. struct perf_sample_data *data,
  565. struct pt_regs *regs);
  566. static inline bool is_sampling_event(struct perf_event *event)
  567. {
  568. return event->attr.sample_period != 0;
  569. }
  570. /*
  571. * Return 1 for a software event, 0 for a hardware event
  572. */
  573. static inline int is_software_event(struct perf_event *event)
  574. {
  575. return event->pmu->task_ctx_nr == perf_sw_context;
  576. }
  577. extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
  578. extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
  579. extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
  580. #ifndef perf_arch_fetch_caller_regs
  581. static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
  582. #endif
  583. /*
  584. * Take a snapshot of the regs. Skip ip and frame pointer to
  585. * the nth caller. We only need a few of the regs:
  586. * - ip for PERF_SAMPLE_IP
  587. * - cs for user_mode() tests
  588. * - bp for callchains
  589. * - eflags, for future purposes, just in case
  590. */
  591. static inline void perf_fetch_caller_regs(struct pt_regs *regs)
  592. {
  593. memset(regs, 0, sizeof(*regs));
  594. perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
  595. }
  596. static __always_inline void
  597. perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
  598. {
  599. if (static_key_false(&perf_swevent_enabled[event_id]))
  600. __perf_sw_event(event_id, nr, regs, addr);
  601. }
  602. DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
  603. /*
  604. * 'Special' version for the scheduler, it hard assumes no recursion,
  605. * which is guaranteed by us not actually scheduling inside other swevents
  606. * because those disable preemption.
  607. */
  608. static __always_inline void
  609. perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
  610. {
  611. if (static_key_false(&perf_swevent_enabled[event_id])) {
  612. struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
  613. perf_fetch_caller_regs(regs);
  614. ___perf_sw_event(event_id, nr, regs, addr);
  615. }
  616. }
  617. extern struct static_key_deferred perf_sched_events;
  618. static inline void perf_event_task_sched_in(struct task_struct *prev,
  619. struct task_struct *task)
  620. {
  621. if (static_key_false(&perf_sched_events.key))
  622. __perf_event_task_sched_in(prev, task);
  623. }
  624. static inline void perf_event_task_sched_out(struct task_struct *prev,
  625. struct task_struct *next)
  626. {
  627. perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
  628. if (static_key_false(&perf_sched_events.key))
  629. __perf_event_task_sched_out(prev, next);
  630. }
  631. extern void perf_event_mmap(struct vm_area_struct *vma);
  632. extern struct perf_guest_info_callbacks *perf_guest_cbs;
  633. extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
  634. extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
  635. extern void perf_event_exec(void);
  636. extern void perf_event_comm(struct task_struct *tsk, bool exec);
  637. extern void perf_event_fork(struct task_struct *tsk);
  638. /* Callchains */
  639. DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
  640. extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
  641. extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
  642. static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
  643. {
  644. if (entry->nr < PERF_MAX_STACK_DEPTH)
  645. entry->ip[entry->nr++] = ip;
  646. }
  647. extern int sysctl_perf_event_paranoid;
  648. extern int sysctl_perf_event_mlock;
  649. extern int sysctl_perf_event_sample_rate;
  650. extern int sysctl_perf_cpu_time_max_percent;
  651. extern void perf_sample_event_took(u64 sample_len_ns);
  652. extern int perf_proc_update_handler(struct ctl_table *table, int write,
  653. void __user *buffer, size_t *lenp,
  654. loff_t *ppos);
  655. extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
  656. void __user *buffer, size_t *lenp,
  657. loff_t *ppos);
  658. static inline bool perf_paranoid_tracepoint_raw(void)
  659. {
  660. return sysctl_perf_event_paranoid > -1;
  661. }
  662. static inline bool perf_paranoid_cpu(void)
  663. {
  664. return sysctl_perf_event_paranoid > 0;
  665. }
  666. static inline bool perf_paranoid_kernel(void)
  667. {
  668. return sysctl_perf_event_paranoid > 1;
  669. }
  670. extern void perf_event_init(void);
  671. extern void perf_tp_event(u64 addr, u64 count, void *record,
  672. int entry_size, struct pt_regs *regs,
  673. struct hlist_head *head, int rctx,
  674. struct task_struct *task);
  675. extern void perf_bp_event(struct perf_event *event, void *data);
  676. #ifndef perf_misc_flags
  677. # define perf_misc_flags(regs) \
  678. (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
  679. # define perf_instruction_pointer(regs) instruction_pointer(regs)
  680. #endif
  681. static inline bool has_branch_stack(struct perf_event *event)
  682. {
  683. return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
  684. }
  685. extern int perf_output_begin(struct perf_output_handle *handle,
  686. struct perf_event *event, unsigned int size);
  687. extern void perf_output_end(struct perf_output_handle *handle);
  688. extern unsigned int perf_output_copy(struct perf_output_handle *handle,
  689. const void *buf, unsigned int len);
  690. extern unsigned int perf_output_skip(struct perf_output_handle *handle,
  691. unsigned int len);
  692. extern int perf_swevent_get_recursion_context(void);
  693. extern void perf_swevent_put_recursion_context(int rctx);
  694. extern u64 perf_swevent_set_period(struct perf_event *event);
  695. extern void perf_event_enable(struct perf_event *event);
  696. extern void perf_event_disable(struct perf_event *event);
  697. extern int __perf_event_disable(void *info);
  698. extern void perf_event_task_tick(void);
  699. #else /* !CONFIG_PERF_EVENTS: */
  700. static inline void
  701. perf_event_task_sched_in(struct task_struct *prev,
  702. struct task_struct *task) { }
  703. static inline void
  704. perf_event_task_sched_out(struct task_struct *prev,
  705. struct task_struct *next) { }
  706. static inline int perf_event_init_task(struct task_struct *child) { return 0; }
  707. static inline void perf_event_exit_task(struct task_struct *child) { }
  708. static inline void perf_event_free_task(struct task_struct *task) { }
  709. static inline void perf_event_delayed_put(struct task_struct *task) { }
  710. static inline void perf_event_print_debug(void) { }
  711. static inline int perf_event_task_disable(void) { return -EINVAL; }
  712. static inline int perf_event_task_enable(void) { return -EINVAL; }
  713. static inline int perf_event_refresh(struct perf_event *event, int refresh)
  714. {
  715. return -EINVAL;
  716. }
  717. static inline void
  718. perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
  719. static inline void
  720. perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { }
  721. static inline void
  722. perf_bp_event(struct perf_event *event, void *data) { }
  723. static inline int perf_register_guest_info_callbacks
  724. (struct perf_guest_info_callbacks *callbacks) { return 0; }
  725. static inline int perf_unregister_guest_info_callbacks
  726. (struct perf_guest_info_callbacks *callbacks) { return 0; }
  727. static inline void perf_event_mmap(struct vm_area_struct *vma) { }
  728. static inline void perf_event_exec(void) { }
  729. static inline void perf_event_comm(struct task_struct *tsk, bool exec) { }
  730. static inline void perf_event_fork(struct task_struct *tsk) { }
  731. static inline void perf_event_init(void) { }
  732. static inline int perf_swevent_get_recursion_context(void) { return -1; }
  733. static inline void perf_swevent_put_recursion_context(int rctx) { }
  734. static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; }
  735. static inline void perf_event_enable(struct perf_event *event) { }
  736. static inline void perf_event_disable(struct perf_event *event) { }
  737. static inline int __perf_event_disable(void *info) { return -1; }
  738. static inline void perf_event_task_tick(void) { }
  739. #endif
  740. #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
  741. extern bool perf_event_can_stop_tick(void);
  742. #else
  743. static inline bool perf_event_can_stop_tick(void) { return true; }
  744. #endif
  745. #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
  746. extern void perf_restore_debug_store(void);
  747. #else
  748. static inline void perf_restore_debug_store(void) { }
  749. #endif
  750. #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
  751. /*
  752. * This has to have a higher priority than migration_notifier in sched/core.c.
  753. */
  754. #define perf_cpu_notifier(fn) \
  755. do { \
  756. static struct notifier_block fn##_nb = \
  757. { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
  758. unsigned long cpu = smp_processor_id(); \
  759. unsigned long flags; \
  760. \
  761. cpu_notifier_register_begin(); \
  762. fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
  763. (void *)(unsigned long)cpu); \
  764. local_irq_save(flags); \
  765. fn(&fn##_nb, (unsigned long)CPU_STARTING, \
  766. (void *)(unsigned long)cpu); \
  767. local_irq_restore(flags); \
  768. fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
  769. (void *)(unsigned long)cpu); \
  770. __register_cpu_notifier(&fn##_nb); \
  771. cpu_notifier_register_done(); \
  772. } while (0)
  773. /*
  774. * Bare-bones version of perf_cpu_notifier(), which doesn't invoke the
  775. * callback for already online CPUs.
  776. */
  777. #define __perf_cpu_notifier(fn) \
  778. do { \
  779. static struct notifier_block fn##_nb = \
  780. { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
  781. \
  782. __register_cpu_notifier(&fn##_nb); \
  783. } while (0)
  784. struct perf_pmu_events_attr {
  785. struct device_attribute attr;
  786. u64 id;
  787. const char *event_str;
  788. };
  789. ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
  790. char *page);
  791. #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
  792. static struct perf_pmu_events_attr _var = { \
  793. .attr = __ATTR(_name, 0444, _show, NULL), \
  794. .id = _id, \
  795. };
  796. #define PMU_EVENT_ATTR_STRING(_name, _var, _str) \
  797. static struct perf_pmu_events_attr _var = { \
  798. .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
  799. .id = 0, \
  800. .event_str = _str, \
  801. };
  802. #define PMU_FORMAT_ATTR(_name, _format) \
  803. static ssize_t \
  804. _name##_show(struct device *dev, \
  805. struct device_attribute *attr, \
  806. char *page) \
  807. { \
  808. BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
  809. return sprintf(page, _format "\n"); \
  810. } \
  811. \
  812. static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
  813. #endif /* _LINUX_PERF_EVENT_H */