perf_event.h 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395
  1. /*
  2. * Performance events:
  3. *
  4. * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
  7. *
  8. * Data type definitions, declarations, prototypes.
  9. *
  10. * Started by: Thomas Gleixner and Ingo Molnar
  11. *
  12. * For licencing details see kernel-base/COPYING
  13. */
  14. #ifndef _LINUX_PERF_EVENT_H
  15. #define _LINUX_PERF_EVENT_H
  16. #include <uapi/linux/perf_event.h>
  17. /*
  18. * Kernel-internal data types and definitions:
  19. */
  20. #ifdef CONFIG_PERF_EVENTS
  21. # include <asm/perf_event.h>
  22. # include <asm/local64.h>
  23. #endif
  24. struct perf_guest_info_callbacks {
  25. int (*is_in_guest)(void);
  26. int (*is_user_mode)(void);
  27. unsigned long (*get_guest_ip)(void);
  28. };
  29. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  30. #include <asm/hw_breakpoint.h>
  31. #endif
  32. #include <linux/list.h>
  33. #include <linux/mutex.h>
  34. #include <linux/rculist.h>
  35. #include <linux/rcupdate.h>
  36. #include <linux/spinlock.h>
  37. #include <linux/hrtimer.h>
  38. #include <linux/fs.h>
  39. #include <linux/pid_namespace.h>
  40. #include <linux/workqueue.h>
  41. #include <linux/ftrace.h>
  42. #include <linux/cpu.h>
  43. #include <linux/irq_work.h>
  44. #include <linux/static_key.h>
  45. #include <linux/jump_label_ratelimit.h>
  46. #include <linux/atomic.h>
  47. #include <linux/sysfs.h>
  48. #include <linux/perf_regs.h>
  49. #include <linux/workqueue.h>
  50. #include <linux/cgroup.h>
  51. #include <asm/local.h>
  52. struct perf_callchain_entry {
  53. __u64 nr;
  54. __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
  55. };
  56. struct perf_callchain_entry_ctx {
  57. struct perf_callchain_entry *entry;
  58. u32 max_stack;
  59. u32 nr;
  60. short contexts;
  61. bool contexts_maxed;
  62. };
  63. typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
  64. unsigned long off, unsigned long len);
  65. struct perf_raw_frag {
  66. union {
  67. struct perf_raw_frag *next;
  68. unsigned long pad;
  69. };
  70. perf_copy_f copy;
  71. void *data;
  72. u32 size;
  73. } __packed;
  74. struct perf_raw_record {
  75. struct perf_raw_frag frag;
  76. u32 size;
  77. };
  78. /*
  79. * branch stack layout:
  80. * nr: number of taken branches stored in entries[]
  81. *
  82. * Note that nr can vary from sample to sample
  83. * branches (to, from) are stored from most recent
  84. * to least recent, i.e., entries[0] contains the most
  85. * recent branch.
  86. */
  87. struct perf_branch_stack {
  88. __u64 nr;
  89. struct perf_branch_entry entries[0];
  90. };
  91. struct task_struct;
  92. /*
  93. * extra PMU register associated with an event
  94. */
  95. struct hw_perf_event_extra {
  96. u64 config; /* register value */
  97. unsigned int reg; /* register address or index */
  98. int alloc; /* extra register already allocated */
  99. int idx; /* index in shared_regs->regs[] */
  100. };
  101. /**
  102. * struct hw_perf_event - performance event hardware details:
  103. */
  104. struct hw_perf_event {
  105. #ifdef CONFIG_PERF_EVENTS
  106. union {
  107. struct { /* hardware */
  108. u64 config;
  109. u64 last_tag;
  110. unsigned long config_base;
  111. unsigned long event_base;
  112. int event_base_rdpmc;
  113. int idx;
  114. int last_cpu;
  115. int flags;
  116. struct hw_perf_event_extra extra_reg;
  117. struct hw_perf_event_extra branch_reg;
  118. };
  119. struct { /* software */
  120. struct hrtimer hrtimer;
  121. };
  122. struct { /* tracepoint */
  123. /* for tp_event->class */
  124. struct list_head tp_list;
  125. };
  126. struct { /* intel_cqm */
  127. int cqm_state;
  128. u32 cqm_rmid;
  129. int is_group_event;
  130. struct list_head cqm_events_entry;
  131. struct list_head cqm_groups_entry;
  132. struct list_head cqm_group_entry;
  133. };
  134. struct { /* itrace */
  135. int itrace_started;
  136. };
  137. struct { /* amd_power */
  138. u64 pwr_acc;
  139. u64 ptsc;
  140. };
  141. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  142. struct { /* breakpoint */
  143. /*
  144. * Crufty hack to avoid the chicken and egg
  145. * problem hw_breakpoint has with context
  146. * creation and event initalization.
  147. */
  148. struct arch_hw_breakpoint info;
  149. struct list_head bp_list;
  150. };
  151. #endif
  152. };
  153. /*
  154. * If the event is a per task event, this will point to the task in
  155. * question. See the comment in perf_event_alloc().
  156. */
  157. struct task_struct *target;
  158. /*
  159. * PMU would store hardware filter configuration
  160. * here.
  161. */
  162. void *addr_filters;
  163. /* Last sync'ed generation of filters */
  164. unsigned long addr_filters_gen;
  165. /*
  166. * hw_perf_event::state flags; used to track the PERF_EF_* state.
  167. */
  168. #define PERF_HES_STOPPED 0x01 /* the counter is stopped */
  169. #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
  170. #define PERF_HES_ARCH 0x04
  171. int state;
  172. /*
  173. * The last observed hardware counter value, updated with a
  174. * local64_cmpxchg() such that pmu::read() can be called nested.
  175. */
  176. local64_t prev_count;
  177. /*
  178. * The period to start the next sample with.
  179. */
  180. u64 sample_period;
  181. /*
  182. * The period we started this sample with.
  183. */
  184. u64 last_period;
  185. /*
  186. * However much is left of the current period; note that this is
  187. * a full 64bit value and allows for generation of periods longer
  188. * than hardware might allow.
  189. */
  190. local64_t period_left;
  191. /*
  192. * State for throttling the event, see __perf_event_overflow() and
  193. * perf_adjust_freq_unthr_context().
  194. */
  195. u64 interrupts_seq;
  196. u64 interrupts;
  197. /*
  198. * State for freq target events, see __perf_event_overflow() and
  199. * perf_adjust_freq_unthr_context().
  200. */
  201. u64 freq_time_stamp;
  202. u64 freq_count_stamp;
  203. #endif
  204. };
  205. struct perf_event;
  206. /*
  207. * Common implementation detail of pmu::{start,commit,cancel}_txn
  208. */
  209. #define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */
  210. #define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */
  211. /**
  212. * pmu::capabilities flags
  213. */
  214. #define PERF_PMU_CAP_NO_INTERRUPT 0x01
  215. #define PERF_PMU_CAP_NO_NMI 0x02
  216. #define PERF_PMU_CAP_AUX_NO_SG 0x04
  217. #define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08
  218. #define PERF_PMU_CAP_EXCLUSIVE 0x10
  219. #define PERF_PMU_CAP_ITRACE 0x20
  220. #define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40
  221. /**
  222. * struct pmu - generic performance monitoring unit
  223. */
  224. struct pmu {
  225. struct list_head entry;
  226. struct module *module;
  227. struct device *dev;
  228. const struct attribute_group **attr_groups;
  229. const char *name;
  230. int type;
  231. /*
  232. * various common per-pmu feature flags
  233. */
  234. int capabilities;
  235. int * __percpu pmu_disable_count;
  236. struct perf_cpu_context * __percpu pmu_cpu_context;
  237. atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
  238. int task_ctx_nr;
  239. int hrtimer_interval_ms;
  240. /* number of address filters this PMU can do */
  241. unsigned int nr_addr_filters;
  242. /*
  243. * Fully disable/enable this PMU, can be used to protect from the PMI
  244. * as well as for lazy/batch writing of the MSRs.
  245. */
  246. void (*pmu_enable) (struct pmu *pmu); /* optional */
  247. void (*pmu_disable) (struct pmu *pmu); /* optional */
  248. /*
  249. * Try and initialize the event for this PMU.
  250. *
  251. * Returns:
  252. * -ENOENT -- @event is not for this PMU
  253. *
  254. * -ENODEV -- @event is for this PMU but PMU not present
  255. * -EBUSY -- @event is for this PMU but PMU temporarily unavailable
  256. * -EINVAL -- @event is for this PMU but @event is not valid
  257. * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
  258. * -EACCESS -- @event is for this PMU, @event is valid, but no privilidges
  259. *
  260. * 0 -- @event is for this PMU and valid
  261. *
  262. * Other error return values are allowed.
  263. */
  264. int (*event_init) (struct perf_event *event);
  265. /*
  266. * Notification that the event was mapped or unmapped. Called
  267. * in the context of the mapping task.
  268. */
  269. void (*event_mapped) (struct perf_event *event); /*optional*/
  270. void (*event_unmapped) (struct perf_event *event); /*optional*/
  271. /*
  272. * Flags for ->add()/->del()/ ->start()/->stop(). There are
  273. * matching hw_perf_event::state flags.
  274. */
  275. #define PERF_EF_START 0x01 /* start the counter when adding */
  276. #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
  277. #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
  278. /*
  279. * Adds/Removes a counter to/from the PMU, can be done inside a
  280. * transaction, see the ->*_txn() methods.
  281. *
  282. * The add/del callbacks will reserve all hardware resources required
  283. * to service the event, this includes any counter constraint
  284. * scheduling etc.
  285. *
  286. * Called with IRQs disabled and the PMU disabled on the CPU the event
  287. * is on.
  288. *
  289. * ->add() called without PERF_EF_START should result in the same state
  290. * as ->add() followed by ->stop().
  291. *
  292. * ->del() must always PERF_EF_UPDATE stop an event. If it calls
  293. * ->stop() that must deal with already being stopped without
  294. * PERF_EF_UPDATE.
  295. */
  296. int (*add) (struct perf_event *event, int flags);
  297. void (*del) (struct perf_event *event, int flags);
  298. /*
  299. * Starts/Stops a counter present on the PMU.
  300. *
  301. * The PMI handler should stop the counter when perf_event_overflow()
  302. * returns !0. ->start() will be used to continue.
  303. *
  304. * Also used to change the sample period.
  305. *
  306. * Called with IRQs disabled and the PMU disabled on the CPU the event
  307. * is on -- will be called from NMI context with the PMU generates
  308. * NMIs.
  309. *
  310. * ->stop() with PERF_EF_UPDATE will read the counter and update
  311. * period/count values like ->read() would.
  312. *
  313. * ->start() with PERF_EF_RELOAD will reprogram the the counter
  314. * value, must be preceded by a ->stop() with PERF_EF_UPDATE.
  315. */
  316. void (*start) (struct perf_event *event, int flags);
  317. void (*stop) (struct perf_event *event, int flags);
  318. /*
  319. * Updates the counter value of the event.
  320. *
  321. * For sampling capable PMUs this will also update the software period
  322. * hw_perf_event::period_left field.
  323. */
  324. void (*read) (struct perf_event *event);
  325. /*
  326. * Group events scheduling is treated as a transaction, add
  327. * group events as a whole and perform one schedulability test.
  328. * If the test fails, roll back the whole group
  329. *
  330. * Start the transaction, after this ->add() doesn't need to
  331. * do schedulability tests.
  332. *
  333. * Optional.
  334. */
  335. void (*start_txn) (struct pmu *pmu, unsigned int txn_flags);
  336. /*
  337. * If ->start_txn() disabled the ->add() schedulability test
  338. * then ->commit_txn() is required to perform one. On success
  339. * the transaction is closed. On error the transaction is kept
  340. * open until ->cancel_txn() is called.
  341. *
  342. * Optional.
  343. */
  344. int (*commit_txn) (struct pmu *pmu);
  345. /*
  346. * Will cancel the transaction, assumes ->del() is called
  347. * for each successful ->add() during the transaction.
  348. *
  349. * Optional.
  350. */
  351. void (*cancel_txn) (struct pmu *pmu);
  352. /*
  353. * Will return the value for perf_event_mmap_page::index for this event,
  354. * if no implementation is provided it will default to: event->hw.idx + 1.
  355. */
  356. int (*event_idx) (struct perf_event *event); /*optional */
  357. /*
  358. * context-switches callback
  359. */
  360. void (*sched_task) (struct perf_event_context *ctx,
  361. bool sched_in);
  362. /*
  363. * PMU specific data size
  364. */
  365. size_t task_ctx_size;
  366. /*
  367. * Return the count value for a counter.
  368. */
  369. u64 (*count) (struct perf_event *event); /*optional*/
  370. /*
  371. * Set up pmu-private data structures for an AUX area
  372. */
  373. void *(*setup_aux) (int cpu, void **pages,
  374. int nr_pages, bool overwrite);
  375. /* optional */
  376. /*
  377. * Free pmu-private AUX data structures
  378. */
  379. void (*free_aux) (void *aux); /* optional */
  380. /*
  381. * Validate address range filters: make sure the HW supports the
  382. * requested configuration and number of filters; return 0 if the
  383. * supplied filters are valid, -errno otherwise.
  384. *
  385. * Runs in the context of the ioctl()ing process and is not serialized
  386. * with the rest of the PMU callbacks.
  387. */
  388. int (*addr_filters_validate) (struct list_head *filters);
  389. /* optional */
  390. /*
  391. * Synchronize address range filter configuration:
  392. * translate hw-agnostic filters into hardware configuration in
  393. * event::hw::addr_filters.
  394. *
  395. * Runs as a part of filter sync sequence that is done in ->start()
  396. * callback by calling perf_event_addr_filters_sync().
  397. *
  398. * May (and should) traverse event::addr_filters::list, for which its
  399. * caller provides necessary serialization.
  400. */
  401. void (*addr_filters_sync) (struct perf_event *event);
  402. /* optional */
  403. /*
  404. * Filter events for PMU-specific reasons.
  405. */
  406. int (*filter_match) (struct perf_event *event); /* optional */
  407. };
  408. /**
  409. * struct perf_addr_filter - address range filter definition
  410. * @entry: event's filter list linkage
  411. * @inode: object file's inode for file-based filters
  412. * @offset: filter range offset
  413. * @size: filter range size
  414. * @range: 1: range, 0: address
  415. * @filter: 1: filter/start, 0: stop
  416. *
  417. * This is a hardware-agnostic filter configuration as specified by the user.
  418. */
  419. struct perf_addr_filter {
  420. struct list_head entry;
  421. struct inode *inode;
  422. unsigned long offset;
  423. unsigned long size;
  424. unsigned int range : 1,
  425. filter : 1;
  426. };
  427. /**
  428. * struct perf_addr_filters_head - container for address range filters
  429. * @list: list of filters for this event
  430. * @lock: spinlock that serializes accesses to the @list and event's
  431. * (and its children's) filter generations.
  432. * @nr_file_filters: number of file-based filters
  433. *
  434. * A child event will use parent's @list (and therefore @lock), so they are
  435. * bundled together; see perf_event_addr_filters().
  436. */
  437. struct perf_addr_filters_head {
  438. struct list_head list;
  439. raw_spinlock_t lock;
  440. unsigned int nr_file_filters;
  441. };
  442. /**
  443. * enum perf_event_active_state - the states of a event
  444. */
  445. enum perf_event_active_state {
  446. PERF_EVENT_STATE_DEAD = -4,
  447. PERF_EVENT_STATE_EXIT = -3,
  448. PERF_EVENT_STATE_ERROR = -2,
  449. PERF_EVENT_STATE_OFF = -1,
  450. PERF_EVENT_STATE_INACTIVE = 0,
  451. PERF_EVENT_STATE_ACTIVE = 1,
  452. };
  453. struct file;
  454. struct perf_sample_data;
  455. typedef void (*perf_overflow_handler_t)(struct perf_event *,
  456. struct perf_sample_data *,
  457. struct pt_regs *regs);
  458. /*
  459. * Event capabilities. For event_caps and groups caps.
  460. *
  461. * PERF_EV_CAP_SOFTWARE: Is a software event.
  462. * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read
  463. * from any CPU in the package where it is active.
  464. */
  465. #define PERF_EV_CAP_SOFTWARE BIT(0)
  466. #define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1)
  467. #define SWEVENT_HLIST_BITS 8
  468. #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
  469. struct swevent_hlist {
  470. struct hlist_head heads[SWEVENT_HLIST_SIZE];
  471. struct rcu_head rcu_head;
  472. };
  473. #define PERF_ATTACH_CONTEXT 0x01
  474. #define PERF_ATTACH_GROUP 0x02
  475. #define PERF_ATTACH_TASK 0x04
  476. #define PERF_ATTACH_TASK_DATA 0x08
  477. struct perf_cgroup;
  478. struct ring_buffer;
  479. struct pmu_event_list {
  480. raw_spinlock_t lock;
  481. struct list_head list;
  482. };
  483. /**
  484. * struct perf_event - performance event kernel representation:
  485. */
  486. struct perf_event {
  487. #ifdef CONFIG_PERF_EVENTS
  488. /*
  489. * entry onto perf_event_context::event_list;
  490. * modifications require ctx->lock
  491. * RCU safe iterations.
  492. */
  493. struct list_head event_entry;
  494. /*
  495. * XXX: group_entry and sibling_list should be mutually exclusive;
  496. * either you're a sibling on a group, or you're the group leader.
  497. * Rework the code to always use the same list element.
  498. *
  499. * Locked for modification by both ctx->mutex and ctx->lock; holding
  500. * either sufficies for read.
  501. */
  502. struct list_head group_entry;
  503. struct list_head sibling_list;
  504. /*
  505. * We need storage to track the entries in perf_pmu_migrate_context; we
  506. * cannot use the event_entry because of RCU and we want to keep the
  507. * group in tact which avoids us using the other two entries.
  508. */
  509. struct list_head migrate_entry;
  510. struct hlist_node hlist_entry;
  511. struct list_head active_entry;
  512. int nr_siblings;
  513. /* Not serialized. Only written during event initialization. */
  514. int event_caps;
  515. /* The cumulative AND of all event_caps for events in this group. */
  516. int group_caps;
  517. struct perf_event *group_leader;
  518. struct pmu *pmu;
  519. void *pmu_private;
  520. enum perf_event_active_state state;
  521. unsigned int attach_state;
  522. local64_t count;
  523. atomic64_t child_count;
  524. /*
  525. * These are the total time in nanoseconds that the event
  526. * has been enabled (i.e. eligible to run, and the task has
  527. * been scheduled in, if this is a per-task event)
  528. * and running (scheduled onto the CPU), respectively.
  529. *
  530. * They are computed from tstamp_enabled, tstamp_running and
  531. * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
  532. */
  533. u64 total_time_enabled;
  534. u64 total_time_running;
  535. /*
  536. * These are timestamps used for computing total_time_enabled
  537. * and total_time_running when the event is in INACTIVE or
  538. * ACTIVE state, measured in nanoseconds from an arbitrary point
  539. * in time.
  540. * tstamp_enabled: the notional time when the event was enabled
  541. * tstamp_running: the notional time when the event was scheduled on
  542. * tstamp_stopped: in INACTIVE state, the notional time when the
  543. * event was scheduled off.
  544. */
  545. u64 tstamp_enabled;
  546. u64 tstamp_running;
  547. u64 tstamp_stopped;
  548. /*
  549. * timestamp shadows the actual context timing but it can
  550. * be safely used in NMI interrupt context. It reflects the
  551. * context time as it was when the event was last scheduled in.
  552. *
  553. * ctx_time already accounts for ctx->timestamp. Therefore to
  554. * compute ctx_time for a sample, simply add perf_clock().
  555. */
  556. u64 shadow_ctx_time;
  557. struct perf_event_attr attr;
  558. u16 header_size;
  559. u16 id_header_size;
  560. u16 read_size;
  561. struct hw_perf_event hw;
  562. struct perf_event_context *ctx;
  563. atomic_long_t refcount;
  564. /*
  565. * These accumulate total time (in nanoseconds) that children
  566. * events have been enabled and running, respectively.
  567. */
  568. atomic64_t child_total_time_enabled;
  569. atomic64_t child_total_time_running;
  570. /*
  571. * Protect attach/detach and child_list:
  572. */
  573. struct mutex child_mutex;
  574. struct list_head child_list;
  575. struct perf_event *parent;
  576. int oncpu;
  577. int cpu;
  578. struct list_head owner_entry;
  579. struct task_struct *owner;
  580. /* mmap bits */
  581. struct mutex mmap_mutex;
  582. atomic_t mmap_count;
  583. struct ring_buffer *rb;
  584. struct list_head rb_entry;
  585. unsigned long rcu_batches;
  586. int rcu_pending;
  587. /* poll related */
  588. wait_queue_head_t waitq;
  589. struct fasync_struct *fasync;
  590. /* delayed work for NMIs and such */
  591. int pending_wakeup;
  592. int pending_kill;
  593. int pending_disable;
  594. struct irq_work pending;
  595. atomic_t event_limit;
  596. /* address range filters */
  597. struct perf_addr_filters_head addr_filters;
  598. /* vma address array for file-based filders */
  599. unsigned long *addr_filters_offs;
  600. unsigned long addr_filters_gen;
  601. void (*destroy)(struct perf_event *);
  602. struct rcu_head rcu_head;
  603. struct pid_namespace *ns;
  604. u64 id;
  605. u64 (*clock)(void);
  606. perf_overflow_handler_t overflow_handler;
  607. void *overflow_handler_context;
  608. #ifdef CONFIG_BPF_SYSCALL
  609. perf_overflow_handler_t orig_overflow_handler;
  610. struct bpf_prog *prog;
  611. #endif
  612. #ifdef CONFIG_EVENT_TRACING
  613. struct trace_event_call *tp_event;
  614. struct event_filter *filter;
  615. #ifdef CONFIG_FUNCTION_TRACER
  616. struct ftrace_ops ftrace_ops;
  617. #endif
  618. #endif
  619. #ifdef CONFIG_CGROUP_PERF
  620. struct perf_cgroup *cgrp; /* cgroup event is attach to */
  621. int cgrp_defer_enabled;
  622. #endif
  623. struct list_head sb_list;
  624. #endif /* CONFIG_PERF_EVENTS */
  625. };
  626. /**
  627. * struct perf_event_context - event context structure
  628. *
  629. * Used as a container for task events and CPU events as well:
  630. */
  631. struct perf_event_context {
  632. struct pmu *pmu;
  633. /*
  634. * Protect the states of the events in the list,
  635. * nr_active, and the list:
  636. */
  637. raw_spinlock_t lock;
  638. /*
  639. * Protect the list of events. Locking either mutex or lock
  640. * is sufficient to ensure the list doesn't change; to change
  641. * the list you need to lock both the mutex and the spinlock.
  642. */
  643. struct mutex mutex;
  644. struct list_head active_ctx_list;
  645. struct list_head pinned_groups;
  646. struct list_head flexible_groups;
  647. struct list_head event_list;
  648. int nr_events;
  649. int nr_active;
  650. int is_active;
  651. int nr_stat;
  652. int nr_freq;
  653. int rotate_disable;
  654. atomic_t refcount;
  655. struct task_struct *task;
  656. /*
  657. * Context clock, runs when context enabled.
  658. */
  659. u64 time;
  660. u64 timestamp;
  661. /*
  662. * These fields let us detect when two contexts have both
  663. * been cloned (inherited) from a common ancestor.
  664. */
  665. struct perf_event_context *parent_ctx;
  666. u64 parent_gen;
  667. u64 generation;
  668. int pin_count;
  669. #ifdef CONFIG_CGROUP_PERF
  670. int nr_cgroups; /* cgroup evts */
  671. #endif
  672. void *task_ctx_data; /* pmu specific data */
  673. struct rcu_head rcu_head;
  674. };
  675. /*
  676. * Number of contexts where an event can trigger:
  677. * task, softirq, hardirq, nmi.
  678. */
  679. #define PERF_NR_CONTEXTS 4
  680. /**
  681. * struct perf_event_cpu_context - per cpu event context structure
  682. */
  683. struct perf_cpu_context {
  684. struct perf_event_context ctx;
  685. struct perf_event_context *task_ctx;
  686. int active_oncpu;
  687. int exclusive;
  688. raw_spinlock_t hrtimer_lock;
  689. struct hrtimer hrtimer;
  690. ktime_t hrtimer_interval;
  691. unsigned int hrtimer_active;
  692. #ifdef CONFIG_CGROUP_PERF
  693. struct perf_cgroup *cgrp;
  694. struct list_head cgrp_cpuctx_entry;
  695. #endif
  696. struct list_head sched_cb_entry;
  697. int sched_cb_usage;
  698. };
  699. struct perf_output_handle {
  700. struct perf_event *event;
  701. struct ring_buffer *rb;
  702. unsigned long wakeup;
  703. unsigned long size;
  704. union {
  705. void *addr;
  706. unsigned long head;
  707. };
  708. int page;
  709. };
  710. struct bpf_perf_event_data_kern {
  711. struct pt_regs *regs;
  712. struct perf_sample_data *data;
  713. };
  714. #ifdef CONFIG_CGROUP_PERF
  715. /*
  716. * perf_cgroup_info keeps track of time_enabled for a cgroup.
  717. * This is a per-cpu dynamically allocated data structure.
  718. */
  719. struct perf_cgroup_info {
  720. u64 time;
  721. u64 timestamp;
  722. };
  723. struct perf_cgroup {
  724. struct cgroup_subsys_state css;
  725. struct perf_cgroup_info __percpu *info;
  726. };
  727. /*
  728. * Must ensure cgroup is pinned (css_get) before calling
  729. * this function. In other words, we cannot call this function
  730. * if there is no cgroup event for the current CPU context.
  731. */
  732. static inline struct perf_cgroup *
  733. perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
  734. {
  735. return container_of(task_css_check(task, perf_event_cgrp_id,
  736. ctx ? lockdep_is_held(&ctx->lock)
  737. : true),
  738. struct perf_cgroup, css);
  739. }
  740. #endif /* CONFIG_CGROUP_PERF */
  741. #ifdef CONFIG_PERF_EVENTS
  742. extern void *perf_aux_output_begin(struct perf_output_handle *handle,
  743. struct perf_event *event);
  744. extern void perf_aux_output_end(struct perf_output_handle *handle,
  745. unsigned long size, bool truncated);
  746. extern int perf_aux_output_skip(struct perf_output_handle *handle,
  747. unsigned long size);
  748. extern void *perf_get_aux(struct perf_output_handle *handle);
  749. extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
  750. extern void perf_pmu_unregister(struct pmu *pmu);
  751. extern int perf_num_counters(void);
  752. extern const char *perf_pmu_name(void);
  753. extern void __perf_event_task_sched_in(struct task_struct *prev,
  754. struct task_struct *task);
  755. extern void __perf_event_task_sched_out(struct task_struct *prev,
  756. struct task_struct *next);
  757. extern int perf_event_init_task(struct task_struct *child);
  758. extern void perf_event_exit_task(struct task_struct *child);
  759. extern void perf_event_free_task(struct task_struct *task);
  760. extern void perf_event_delayed_put(struct task_struct *task);
  761. extern struct file *perf_event_get(unsigned int fd);
  762. extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
  763. extern void perf_event_print_debug(void);
  764. extern void perf_pmu_disable(struct pmu *pmu);
  765. extern void perf_pmu_enable(struct pmu *pmu);
  766. extern void perf_sched_cb_dec(struct pmu *pmu);
  767. extern void perf_sched_cb_inc(struct pmu *pmu);
  768. extern int perf_event_task_disable(void);
  769. extern int perf_event_task_enable(void);
  770. extern int perf_event_refresh(struct perf_event *event, int refresh);
  771. extern void perf_event_update_userpage(struct perf_event *event);
  772. extern int perf_event_release_kernel(struct perf_event *event);
  773. extern struct perf_event *
  774. perf_event_create_kernel_counter(struct perf_event_attr *attr,
  775. int cpu,
  776. struct task_struct *task,
  777. perf_overflow_handler_t callback,
  778. void *context);
  779. extern void perf_pmu_migrate_context(struct pmu *pmu,
  780. int src_cpu, int dst_cpu);
  781. extern u64 perf_event_read_local(struct perf_event *event);
  782. extern u64 perf_event_read_value(struct perf_event *event,
  783. u64 *enabled, u64 *running);
  784. struct perf_sample_data {
  785. /*
  786. * Fields set by perf_sample_data_init(), group so as to
  787. * minimize the cachelines touched.
  788. */
  789. u64 addr;
  790. struct perf_raw_record *raw;
  791. struct perf_branch_stack *br_stack;
  792. u64 period;
  793. u64 weight;
  794. u64 txn;
  795. union perf_mem_data_src data_src;
  796. /*
  797. * The other fields, optionally {set,used} by
  798. * perf_{prepare,output}_sample().
  799. */
  800. u64 type;
  801. u64 ip;
  802. struct {
  803. u32 pid;
  804. u32 tid;
  805. } tid_entry;
  806. u64 time;
  807. u64 id;
  808. u64 stream_id;
  809. struct {
  810. u32 cpu;
  811. u32 reserved;
  812. } cpu_entry;
  813. struct perf_callchain_entry *callchain;
  814. /*
  815. * regs_user may point to task_pt_regs or to regs_user_copy, depending
  816. * on arch details.
  817. */
  818. struct perf_regs regs_user;
  819. struct pt_regs regs_user_copy;
  820. struct perf_regs regs_intr;
  821. u64 stack_user_size;
  822. } ____cacheline_aligned;
  823. /* default value for data source */
  824. #define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\
  825. PERF_MEM_S(LVL, NA) |\
  826. PERF_MEM_S(SNOOP, NA) |\
  827. PERF_MEM_S(LOCK, NA) |\
  828. PERF_MEM_S(TLB, NA))
  829. static inline void perf_sample_data_init(struct perf_sample_data *data,
  830. u64 addr, u64 period)
  831. {
  832. /* remaining struct members initialized in perf_prepare_sample() */
  833. data->addr = addr;
  834. data->raw = NULL;
  835. data->br_stack = NULL;
  836. data->period = period;
  837. data->weight = 0;
  838. data->data_src.val = PERF_MEM_NA;
  839. data->txn = 0;
  840. }
  841. extern void perf_output_sample(struct perf_output_handle *handle,
  842. struct perf_event_header *header,
  843. struct perf_sample_data *data,
  844. struct perf_event *event);
  845. extern void perf_prepare_sample(struct perf_event_header *header,
  846. struct perf_sample_data *data,
  847. struct perf_event *event,
  848. struct pt_regs *regs);
  849. extern int perf_event_overflow(struct perf_event *event,
  850. struct perf_sample_data *data,
  851. struct pt_regs *regs);
  852. extern void perf_event_output_forward(struct perf_event *event,
  853. struct perf_sample_data *data,
  854. struct pt_regs *regs);
  855. extern void perf_event_output_backward(struct perf_event *event,
  856. struct perf_sample_data *data,
  857. struct pt_regs *regs);
  858. extern void perf_event_output(struct perf_event *event,
  859. struct perf_sample_data *data,
  860. struct pt_regs *regs);
  861. static inline bool
  862. is_default_overflow_handler(struct perf_event *event)
  863. {
  864. if (likely(event->overflow_handler == perf_event_output_forward))
  865. return true;
  866. if (unlikely(event->overflow_handler == perf_event_output_backward))
  867. return true;
  868. return false;
  869. }
  870. extern void
  871. perf_event_header__init_id(struct perf_event_header *header,
  872. struct perf_sample_data *data,
  873. struct perf_event *event);
  874. extern void
  875. perf_event__output_id_sample(struct perf_event *event,
  876. struct perf_output_handle *handle,
  877. struct perf_sample_data *sample);
  878. extern void
  879. perf_log_lost_samples(struct perf_event *event, u64 lost);
  880. static inline bool is_sampling_event(struct perf_event *event)
  881. {
  882. return event->attr.sample_period != 0;
  883. }
  884. /*
  885. * Return 1 for a software event, 0 for a hardware event
  886. */
  887. static inline int is_software_event(struct perf_event *event)
  888. {
  889. return event->event_caps & PERF_EV_CAP_SOFTWARE;
  890. }
  891. extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
  892. extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
  893. extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
  894. #ifndef perf_arch_fetch_caller_regs
  895. static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
  896. #endif
  897. /*
  898. * Take a snapshot of the regs. Skip ip and frame pointer to
  899. * the nth caller. We only need a few of the regs:
  900. * - ip for PERF_SAMPLE_IP
  901. * - cs for user_mode() tests
  902. * - bp for callchains
  903. * - eflags, for future purposes, just in case
  904. */
  905. static inline void perf_fetch_caller_regs(struct pt_regs *regs)
  906. {
  907. perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
  908. }
  909. static __always_inline void
  910. perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
  911. {
  912. if (static_key_false(&perf_swevent_enabled[event_id]))
  913. __perf_sw_event(event_id, nr, regs, addr);
  914. }
  915. DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
  916. /*
  917. * 'Special' version for the scheduler, it hard assumes no recursion,
  918. * which is guaranteed by us not actually scheduling inside other swevents
  919. * because those disable preemption.
  920. */
  921. static __always_inline void
  922. perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
  923. {
  924. if (static_key_false(&perf_swevent_enabled[event_id])) {
  925. struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
  926. perf_fetch_caller_regs(regs);
  927. ___perf_sw_event(event_id, nr, regs, addr);
  928. }
  929. }
  930. extern struct static_key_false perf_sched_events;
  931. static __always_inline bool
  932. perf_sw_migrate_enabled(void)
  933. {
  934. if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
  935. return true;
  936. return false;
  937. }
  938. static inline void perf_event_task_migrate(struct task_struct *task)
  939. {
  940. if (perf_sw_migrate_enabled())
  941. task->sched_migrated = 1;
  942. }
  943. static inline void perf_event_task_sched_in(struct task_struct *prev,
  944. struct task_struct *task)
  945. {
  946. if (static_branch_unlikely(&perf_sched_events))
  947. __perf_event_task_sched_in(prev, task);
  948. if (perf_sw_migrate_enabled() && task->sched_migrated) {
  949. struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
  950. perf_fetch_caller_regs(regs);
  951. ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
  952. task->sched_migrated = 0;
  953. }
  954. }
  955. static inline void perf_event_task_sched_out(struct task_struct *prev,
  956. struct task_struct *next)
  957. {
  958. perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
  959. if (static_branch_unlikely(&perf_sched_events))
  960. __perf_event_task_sched_out(prev, next);
  961. }
  962. static inline u64 __perf_event_count(struct perf_event *event)
  963. {
  964. return local64_read(&event->count) + atomic64_read(&event->child_count);
  965. }
  966. extern void perf_event_mmap(struct vm_area_struct *vma);
  967. extern struct perf_guest_info_callbacks *perf_guest_cbs;
  968. extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
  969. extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
  970. extern void perf_event_exec(void);
  971. extern void perf_event_comm(struct task_struct *tsk, bool exec);
  972. extern void perf_event_namespaces(struct task_struct *tsk);
  973. extern void perf_event_fork(struct task_struct *tsk);
  974. /* Callchains */
  975. DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
  976. extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
  977. extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
  978. extern struct perf_callchain_entry *
  979. get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
  980. u32 max_stack, bool crosstask, bool add_mark);
  981. extern int get_callchain_buffers(int max_stack);
  982. extern void put_callchain_buffers(void);
  983. extern int sysctl_perf_event_max_stack;
  984. extern int sysctl_perf_event_max_contexts_per_stack;
  985. static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip)
  986. {
  987. if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
  988. struct perf_callchain_entry *entry = ctx->entry;
  989. entry->ip[entry->nr++] = ip;
  990. ++ctx->contexts;
  991. return 0;
  992. } else {
  993. ctx->contexts_maxed = true;
  994. return -1; /* no more room, stop walking the stack */
  995. }
  996. }
  997. static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
  998. {
  999. if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
  1000. struct perf_callchain_entry *entry = ctx->entry;
  1001. entry->ip[entry->nr++] = ip;
  1002. ++ctx->nr;
  1003. return 0;
  1004. } else {
  1005. return -1; /* no more room, stop walking the stack */
  1006. }
  1007. }
  1008. extern int sysctl_perf_event_paranoid;
  1009. extern int sysctl_perf_event_mlock;
  1010. extern int sysctl_perf_event_sample_rate;
  1011. extern int sysctl_perf_cpu_time_max_percent;
  1012. extern void perf_sample_event_took(u64 sample_len_ns);
  1013. extern int perf_proc_update_handler(struct ctl_table *table, int write,
  1014. void __user *buffer, size_t *lenp,
  1015. loff_t *ppos);
  1016. extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
  1017. void __user *buffer, size_t *lenp,
  1018. loff_t *ppos);
  1019. int perf_event_max_stack_handler(struct ctl_table *table, int write,
  1020. void __user *buffer, size_t *lenp, loff_t *ppos);
  1021. static inline bool perf_paranoid_tracepoint_raw(void)
  1022. {
  1023. return sysctl_perf_event_paranoid > -1;
  1024. }
  1025. static inline bool perf_paranoid_cpu(void)
  1026. {
  1027. return sysctl_perf_event_paranoid > 0;
  1028. }
  1029. static inline bool perf_paranoid_kernel(void)
  1030. {
  1031. return sysctl_perf_event_paranoid > 1;
  1032. }
  1033. extern void perf_event_init(void);
  1034. extern void perf_tp_event(u16 event_type, u64 count, void *record,
  1035. int entry_size, struct pt_regs *regs,
  1036. struct hlist_head *head, int rctx,
  1037. struct task_struct *task);
  1038. extern void perf_bp_event(struct perf_event *event, void *data);
  1039. #ifndef perf_misc_flags
  1040. # define perf_misc_flags(regs) \
  1041. (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
  1042. # define perf_instruction_pointer(regs) instruction_pointer(regs)
  1043. #endif
  1044. static inline bool has_branch_stack(struct perf_event *event)
  1045. {
  1046. return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
  1047. }
  1048. static inline bool needs_branch_stack(struct perf_event *event)
  1049. {
  1050. return event->attr.branch_sample_type != 0;
  1051. }
  1052. static inline bool has_aux(struct perf_event *event)
  1053. {
  1054. return event->pmu->setup_aux;
  1055. }
  1056. static inline bool is_write_backward(struct perf_event *event)
  1057. {
  1058. return !!event->attr.write_backward;
  1059. }
  1060. static inline bool has_addr_filter(struct perf_event *event)
  1061. {
  1062. return event->pmu->nr_addr_filters;
  1063. }
  1064. /*
  1065. * An inherited event uses parent's filters
  1066. */
  1067. static inline struct perf_addr_filters_head *
  1068. perf_event_addr_filters(struct perf_event *event)
  1069. {
  1070. struct perf_addr_filters_head *ifh = &event->addr_filters;
  1071. if (event->parent)
  1072. ifh = &event->parent->addr_filters;
  1073. return ifh;
  1074. }
  1075. extern void perf_event_addr_filters_sync(struct perf_event *event);
  1076. extern int perf_output_begin(struct perf_output_handle *handle,
  1077. struct perf_event *event, unsigned int size);
  1078. extern int perf_output_begin_forward(struct perf_output_handle *handle,
  1079. struct perf_event *event,
  1080. unsigned int size);
  1081. extern int perf_output_begin_backward(struct perf_output_handle *handle,
  1082. struct perf_event *event,
  1083. unsigned int size);
  1084. extern void perf_output_end(struct perf_output_handle *handle);
  1085. extern unsigned int perf_output_copy(struct perf_output_handle *handle,
  1086. const void *buf, unsigned int len);
  1087. extern unsigned int perf_output_skip(struct perf_output_handle *handle,
  1088. unsigned int len);
  1089. extern int perf_swevent_get_recursion_context(void);
  1090. extern void perf_swevent_put_recursion_context(int rctx);
  1091. extern u64 perf_swevent_set_period(struct perf_event *event);
  1092. extern void perf_event_enable(struct perf_event *event);
  1093. extern void perf_event_disable(struct perf_event *event);
  1094. extern void perf_event_disable_local(struct perf_event *event);
  1095. extern void perf_event_disable_inatomic(struct perf_event *event);
  1096. extern void perf_event_task_tick(void);
  1097. extern int perf_event_account_interrupt(struct perf_event *event);
  1098. #else /* !CONFIG_PERF_EVENTS: */
  1099. static inline void *
  1100. perf_aux_output_begin(struct perf_output_handle *handle,
  1101. struct perf_event *event) { return NULL; }
  1102. static inline void
  1103. perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
  1104. bool truncated) { }
  1105. static inline int
  1106. perf_aux_output_skip(struct perf_output_handle *handle,
  1107. unsigned long size) { return -EINVAL; }
  1108. static inline void *
  1109. perf_get_aux(struct perf_output_handle *handle) { return NULL; }
  1110. static inline void
  1111. perf_event_task_migrate(struct task_struct *task) { }
  1112. static inline void
  1113. perf_event_task_sched_in(struct task_struct *prev,
  1114. struct task_struct *task) { }
  1115. static inline void
  1116. perf_event_task_sched_out(struct task_struct *prev,
  1117. struct task_struct *next) { }
  1118. static inline int perf_event_init_task(struct task_struct *child) { return 0; }
  1119. static inline void perf_event_exit_task(struct task_struct *child) { }
  1120. static inline void perf_event_free_task(struct task_struct *task) { }
  1121. static inline void perf_event_delayed_put(struct task_struct *task) { }
  1122. static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
  1123. static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
  1124. {
  1125. return ERR_PTR(-EINVAL);
  1126. }
  1127. static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; }
  1128. static inline void perf_event_print_debug(void) { }
  1129. static inline int perf_event_task_disable(void) { return -EINVAL; }
  1130. static inline int perf_event_task_enable(void) { return -EINVAL; }
  1131. static inline int perf_event_refresh(struct perf_event *event, int refresh)
  1132. {
  1133. return -EINVAL;
  1134. }
  1135. static inline void
  1136. perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
  1137. static inline void
  1138. perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { }
  1139. static inline void
  1140. perf_bp_event(struct perf_event *event, void *data) { }
  1141. static inline int perf_register_guest_info_callbacks
  1142. (struct perf_guest_info_callbacks *callbacks) { return 0; }
  1143. static inline int perf_unregister_guest_info_callbacks
  1144. (struct perf_guest_info_callbacks *callbacks) { return 0; }
  1145. static inline void perf_event_mmap(struct vm_area_struct *vma) { }
  1146. static inline void perf_event_exec(void) { }
  1147. static inline void perf_event_comm(struct task_struct *tsk, bool exec) { }
  1148. static inline void perf_event_namespaces(struct task_struct *tsk) { }
  1149. static inline void perf_event_fork(struct task_struct *tsk) { }
  1150. static inline void perf_event_init(void) { }
  1151. static inline int perf_swevent_get_recursion_context(void) { return -1; }
  1152. static inline void perf_swevent_put_recursion_context(int rctx) { }
  1153. static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; }
  1154. static inline void perf_event_enable(struct perf_event *event) { }
  1155. static inline void perf_event_disable(struct perf_event *event) { }
  1156. static inline int __perf_event_disable(void *info) { return -1; }
  1157. static inline void perf_event_task_tick(void) { }
  1158. static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
  1159. #endif
  1160. #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
  1161. extern void perf_restore_debug_store(void);
  1162. #else
  1163. static inline void perf_restore_debug_store(void) { }
  1164. #endif
  1165. static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
  1166. {
  1167. return frag->pad < sizeof(u64);
  1168. }
  1169. #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
  1170. struct perf_pmu_events_attr {
  1171. struct device_attribute attr;
  1172. u64 id;
  1173. const char *event_str;
  1174. };
  1175. struct perf_pmu_events_ht_attr {
  1176. struct device_attribute attr;
  1177. u64 id;
  1178. const char *event_str_ht;
  1179. const char *event_str_noht;
  1180. };
  1181. ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
  1182. char *page);
  1183. #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
  1184. static struct perf_pmu_events_attr _var = { \
  1185. .attr = __ATTR(_name, 0444, _show, NULL), \
  1186. .id = _id, \
  1187. };
  1188. #define PMU_EVENT_ATTR_STRING(_name, _var, _str) \
  1189. static struct perf_pmu_events_attr _var = { \
  1190. .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
  1191. .id = 0, \
  1192. .event_str = _str, \
  1193. };
  1194. #define PMU_FORMAT_ATTR(_name, _format) \
  1195. static ssize_t \
  1196. _name##_show(struct device *dev, \
  1197. struct device_attribute *attr, \
  1198. char *page) \
  1199. { \
  1200. BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
  1201. return sprintf(page, _format "\n"); \
  1202. } \
  1203. \
  1204. static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
  1205. /* Performance counter hotplug functions */
  1206. #ifdef CONFIG_PERF_EVENTS
  1207. int perf_event_init_cpu(unsigned int cpu);
  1208. int perf_event_exit_cpu(unsigned int cpu);
  1209. #else
  1210. #define perf_event_init_cpu NULL
  1211. #define perf_event_exit_cpu NULL
  1212. #endif
  1213. #endif /* _LINUX_PERF_EVENT_H */