perf_event.h 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380
  1. /*
  2. * Performance events:
  3. *
  4. * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
  7. *
  8. * Data type definitions, declarations, prototypes.
  9. *
  10. * Started by: Thomas Gleixner and Ingo Molnar
  11. *
  12. * For licencing details see kernel-base/COPYING
  13. */
  14. #ifndef _LINUX_PERF_EVENT_H
  15. #define _LINUX_PERF_EVENT_H
  16. #include <uapi/linux/perf_event.h>
  17. /*
  18. * Kernel-internal data types and definitions:
  19. */
  20. #ifdef CONFIG_PERF_EVENTS
  21. # include <asm/perf_event.h>
  22. # include <asm/local64.h>
  23. #endif
  24. struct perf_guest_info_callbacks {
  25. int (*is_in_guest)(void);
  26. int (*is_user_mode)(void);
  27. unsigned long (*get_guest_ip)(void);
  28. };
  29. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  30. #include <asm/hw_breakpoint.h>
  31. #endif
  32. #include <linux/list.h>
  33. #include <linux/mutex.h>
  34. #include <linux/rculist.h>
  35. #include <linux/rcupdate.h>
  36. #include <linux/spinlock.h>
  37. #include <linux/hrtimer.h>
  38. #include <linux/fs.h>
  39. #include <linux/pid_namespace.h>
  40. #include <linux/workqueue.h>
  41. #include <linux/ftrace.h>
  42. #include <linux/cpu.h>
  43. #include <linux/irq_work.h>
  44. #include <linux/static_key.h>
  45. #include <linux/jump_label_ratelimit.h>
  46. #include <linux/atomic.h>
  47. #include <linux/sysfs.h>
  48. #include <linux/perf_regs.h>
  49. #include <linux/workqueue.h>
  50. #include <linux/cgroup.h>
  51. #include <asm/local.h>
  52. struct perf_callchain_entry {
  53. __u64 nr;
  54. __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
  55. };
  56. struct perf_callchain_entry_ctx {
  57. struct perf_callchain_entry *entry;
  58. u32 max_stack;
  59. u32 nr;
  60. short contexts;
  61. bool contexts_maxed;
  62. };
  63. typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
  64. unsigned long off, unsigned long len);
  65. struct perf_raw_frag {
  66. union {
  67. struct perf_raw_frag *next;
  68. unsigned long pad;
  69. };
  70. perf_copy_f copy;
  71. void *data;
  72. u32 size;
  73. } __packed;
  74. struct perf_raw_record {
  75. struct perf_raw_frag frag;
  76. u32 size;
  77. };
  78. /*
  79. * branch stack layout:
  80. * nr: number of taken branches stored in entries[]
  81. *
  82. * Note that nr can vary from sample to sample
  83. * branches (to, from) are stored from most recent
  84. * to least recent, i.e., entries[0] contains the most
  85. * recent branch.
  86. */
  87. struct perf_branch_stack {
  88. __u64 nr;
  89. struct perf_branch_entry entries[0];
  90. };
  91. struct task_struct;
  92. /*
  93. * extra PMU register associated with an event
  94. */
  95. struct hw_perf_event_extra {
  96. u64 config; /* register value */
  97. unsigned int reg; /* register address or index */
  98. int alloc; /* extra register already allocated */
  99. int idx; /* index in shared_regs->regs[] */
  100. };
  101. /**
  102. * struct hw_perf_event - performance event hardware details:
  103. */
  104. struct hw_perf_event {
  105. #ifdef CONFIG_PERF_EVENTS
  106. union {
  107. struct { /* hardware */
  108. u64 config;
  109. u64 last_tag;
  110. unsigned long config_base;
  111. unsigned long event_base;
  112. int event_base_rdpmc;
  113. int idx;
  114. int last_cpu;
  115. int flags;
  116. struct hw_perf_event_extra extra_reg;
  117. struct hw_perf_event_extra branch_reg;
  118. };
  119. struct { /* software */
  120. struct hrtimer hrtimer;
  121. };
  122. struct { /* tracepoint */
  123. /* for tp_event->class */
  124. struct list_head tp_list;
  125. };
  126. struct { /* intel_cqm */
  127. int cqm_state;
  128. u32 cqm_rmid;
  129. int is_group_event;
  130. struct list_head cqm_events_entry;
  131. struct list_head cqm_groups_entry;
  132. struct list_head cqm_group_entry;
  133. };
  134. struct { /* itrace */
  135. int itrace_started;
  136. };
  137. struct { /* amd_power */
  138. u64 pwr_acc;
  139. u64 ptsc;
  140. };
  141. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  142. struct { /* breakpoint */
  143. /*
  144. * Crufty hack to avoid the chicken and egg
  145. * problem hw_breakpoint has with context
  146. * creation and event initalization.
  147. */
  148. struct arch_hw_breakpoint info;
  149. struct list_head bp_list;
  150. };
  151. #endif
  152. };
  153. /*
  154. * If the event is a per task event, this will point to the task in
  155. * question. See the comment in perf_event_alloc().
  156. */
  157. struct task_struct *target;
  158. /*
  159. * PMU would store hardware filter configuration
  160. * here.
  161. */
  162. void *addr_filters;
  163. /* Last sync'ed generation of filters */
  164. unsigned long addr_filters_gen;
  165. /*
  166. * hw_perf_event::state flags; used to track the PERF_EF_* state.
  167. */
  168. #define PERF_HES_STOPPED 0x01 /* the counter is stopped */
  169. #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
  170. #define PERF_HES_ARCH 0x04
  171. int state;
  172. /*
  173. * The last observed hardware counter value, updated with a
  174. * local64_cmpxchg() such that pmu::read() can be called nested.
  175. */
  176. local64_t prev_count;
  177. /*
  178. * The period to start the next sample with.
  179. */
  180. u64 sample_period;
  181. /*
  182. * The period we started this sample with.
  183. */
  184. u64 last_period;
  185. /*
  186. * However much is left of the current period; note that this is
  187. * a full 64bit value and allows for generation of periods longer
  188. * than hardware might allow.
  189. */
  190. local64_t period_left;
  191. /*
  192. * State for throttling the event, see __perf_event_overflow() and
  193. * perf_adjust_freq_unthr_context().
  194. */
  195. u64 interrupts_seq;
  196. u64 interrupts;
  197. /*
  198. * State for freq target events, see __perf_event_overflow() and
  199. * perf_adjust_freq_unthr_context().
  200. */
  201. u64 freq_time_stamp;
  202. u64 freq_count_stamp;
  203. #endif
  204. };
  205. struct perf_event;
  206. /*
  207. * Common implementation detail of pmu::{start,commit,cancel}_txn
  208. */
  209. #define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */
  210. #define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */
  211. /**
  212. * pmu::capabilities flags
  213. */
  214. #define PERF_PMU_CAP_NO_INTERRUPT 0x01
  215. #define PERF_PMU_CAP_NO_NMI 0x02
  216. #define PERF_PMU_CAP_AUX_NO_SG 0x04
  217. #define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08
  218. #define PERF_PMU_CAP_EXCLUSIVE 0x10
  219. #define PERF_PMU_CAP_ITRACE 0x20
  220. #define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40
  221. /**
  222. * struct pmu - generic performance monitoring unit
  223. */
  224. struct pmu {
  225. struct list_head entry;
  226. struct module *module;
  227. struct device *dev;
  228. const struct attribute_group **attr_groups;
  229. const char *name;
  230. int type;
  231. /*
  232. * various common per-pmu feature flags
  233. */
  234. int capabilities;
  235. int * __percpu pmu_disable_count;
  236. struct perf_cpu_context * __percpu pmu_cpu_context;
  237. atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
  238. int task_ctx_nr;
  239. int hrtimer_interval_ms;
  240. /* number of address filters this PMU can do */
  241. unsigned int nr_addr_filters;
  242. /*
  243. * Fully disable/enable this PMU, can be used to protect from the PMI
  244. * as well as for lazy/batch writing of the MSRs.
  245. */
  246. void (*pmu_enable) (struct pmu *pmu); /* optional */
  247. void (*pmu_disable) (struct pmu *pmu); /* optional */
  248. /*
  249. * Try and initialize the event for this PMU.
  250. *
  251. * Returns:
  252. * -ENOENT -- @event is not for this PMU
  253. *
  254. * -ENODEV -- @event is for this PMU but PMU not present
  255. * -EBUSY -- @event is for this PMU but PMU temporarily unavailable
  256. * -EINVAL -- @event is for this PMU but @event is not valid
  257. * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
  258. * -EACCESS -- @event is for this PMU, @event is valid, but no privilidges
  259. *
  260. * 0 -- @event is for this PMU and valid
  261. *
  262. * Other error return values are allowed.
  263. */
  264. int (*event_init) (struct perf_event *event);
  265. /*
  266. * Notification that the event was mapped or unmapped. Called
  267. * in the context of the mapping task.
  268. */
  269. void (*event_mapped) (struct perf_event *event); /*optional*/
  270. void (*event_unmapped) (struct perf_event *event); /*optional*/
  271. /*
  272. * Flags for ->add()/->del()/ ->start()/->stop(). There are
  273. * matching hw_perf_event::state flags.
  274. */
  275. #define PERF_EF_START 0x01 /* start the counter when adding */
  276. #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
  277. #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
  278. /*
  279. * Adds/Removes a counter to/from the PMU, can be done inside a
  280. * transaction, see the ->*_txn() methods.
  281. *
  282. * The add/del callbacks will reserve all hardware resources required
  283. * to service the event, this includes any counter constraint
  284. * scheduling etc.
  285. *
  286. * Called with IRQs disabled and the PMU disabled on the CPU the event
  287. * is on.
  288. *
  289. * ->add() called without PERF_EF_START should result in the same state
  290. * as ->add() followed by ->stop().
  291. *
  292. * ->del() must always PERF_EF_UPDATE stop an event. If it calls
  293. * ->stop() that must deal with already being stopped without
  294. * PERF_EF_UPDATE.
  295. */
  296. int (*add) (struct perf_event *event, int flags);
  297. void (*del) (struct perf_event *event, int flags);
  298. /*
  299. * Starts/Stops a counter present on the PMU.
  300. *
  301. * The PMI handler should stop the counter when perf_event_overflow()
  302. * returns !0. ->start() will be used to continue.
  303. *
  304. * Also used to change the sample period.
  305. *
  306. * Called with IRQs disabled and the PMU disabled on the CPU the event
  307. * is on -- will be called from NMI context with the PMU generates
  308. * NMIs.
  309. *
  310. * ->stop() with PERF_EF_UPDATE will read the counter and update
  311. * period/count values like ->read() would.
  312. *
  313. * ->start() with PERF_EF_RELOAD will reprogram the the counter
  314. * value, must be preceded by a ->stop() with PERF_EF_UPDATE.
  315. */
  316. void (*start) (struct perf_event *event, int flags);
  317. void (*stop) (struct perf_event *event, int flags);
  318. /*
  319. * Updates the counter value of the event.
  320. *
  321. * For sampling capable PMUs this will also update the software period
  322. * hw_perf_event::period_left field.
  323. */
  324. void (*read) (struct perf_event *event);
  325. /*
  326. * Group events scheduling is treated as a transaction, add
  327. * group events as a whole and perform one schedulability test.
  328. * If the test fails, roll back the whole group
  329. *
  330. * Start the transaction, after this ->add() doesn't need to
  331. * do schedulability tests.
  332. *
  333. * Optional.
  334. */
  335. void (*start_txn) (struct pmu *pmu, unsigned int txn_flags);
  336. /*
  337. * If ->start_txn() disabled the ->add() schedulability test
  338. * then ->commit_txn() is required to perform one. On success
  339. * the transaction is closed. On error the transaction is kept
  340. * open until ->cancel_txn() is called.
  341. *
  342. * Optional.
  343. */
  344. int (*commit_txn) (struct pmu *pmu);
  345. /*
  346. * Will cancel the transaction, assumes ->del() is called
  347. * for each successful ->add() during the transaction.
  348. *
  349. * Optional.
  350. */
  351. void (*cancel_txn) (struct pmu *pmu);
  352. /*
  353. * Will return the value for perf_event_mmap_page::index for this event,
  354. * if no implementation is provided it will default to: event->hw.idx + 1.
  355. */
  356. int (*event_idx) (struct perf_event *event); /*optional */
  357. /*
  358. * context-switches callback
  359. */
  360. void (*sched_task) (struct perf_event_context *ctx,
  361. bool sched_in);
  362. /*
  363. * PMU specific data size
  364. */
  365. size_t task_ctx_size;
  366. /*
  367. * Return the count value for a counter.
  368. */
  369. u64 (*count) (struct perf_event *event); /*optional*/
  370. /*
  371. * Set up pmu-private data structures for an AUX area
  372. */
  373. void *(*setup_aux) (int cpu, void **pages,
  374. int nr_pages, bool overwrite);
  375. /* optional */
  376. /*
  377. * Free pmu-private AUX data structures
  378. */
  379. void (*free_aux) (void *aux); /* optional */
  380. /*
  381. * Validate address range filters: make sure the HW supports the
  382. * requested configuration and number of filters; return 0 if the
  383. * supplied filters are valid, -errno otherwise.
  384. *
  385. * Runs in the context of the ioctl()ing process and is not serialized
  386. * with the rest of the PMU callbacks.
  387. */
  388. int (*addr_filters_validate) (struct list_head *filters);
  389. /* optional */
  390. /*
  391. * Synchronize address range filter configuration:
  392. * translate hw-agnostic filters into hardware configuration in
  393. * event::hw::addr_filters.
  394. *
  395. * Runs as a part of filter sync sequence that is done in ->start()
  396. * callback by calling perf_event_addr_filters_sync().
  397. *
  398. * May (and should) traverse event::addr_filters::list, for which its
  399. * caller provides necessary serialization.
  400. */
  401. void (*addr_filters_sync) (struct perf_event *event);
  402. /* optional */
  403. /*
  404. * Filter events for PMU-specific reasons.
  405. */
  406. int (*filter_match) (struct perf_event *event); /* optional */
  407. };
  408. /**
  409. * struct perf_addr_filter - address range filter definition
  410. * @entry: event's filter list linkage
  411. * @inode: object file's inode for file-based filters
  412. * @offset: filter range offset
  413. * @size: filter range size
  414. * @range: 1: range, 0: address
  415. * @filter: 1: filter/start, 0: stop
  416. *
  417. * This is a hardware-agnostic filter configuration as specified by the user.
  418. */
  419. struct perf_addr_filter {
  420. struct list_head entry;
  421. struct inode *inode;
  422. unsigned long offset;
  423. unsigned long size;
  424. unsigned int range : 1,
  425. filter : 1;
  426. };
  427. /**
  428. * struct perf_addr_filters_head - container for address range filters
  429. * @list: list of filters for this event
  430. * @lock: spinlock that serializes accesses to the @list and event's
  431. * (and its children's) filter generations.
  432. *
  433. * A child event will use parent's @list (and therefore @lock), so they are
  434. * bundled together; see perf_event_addr_filters().
  435. */
  436. struct perf_addr_filters_head {
  437. struct list_head list;
  438. raw_spinlock_t lock;
  439. };
  440. /**
  441. * enum perf_event_active_state - the states of a event
  442. */
  443. enum perf_event_active_state {
  444. PERF_EVENT_STATE_DEAD = -4,
  445. PERF_EVENT_STATE_EXIT = -3,
  446. PERF_EVENT_STATE_ERROR = -2,
  447. PERF_EVENT_STATE_OFF = -1,
  448. PERF_EVENT_STATE_INACTIVE = 0,
  449. PERF_EVENT_STATE_ACTIVE = 1,
  450. };
  451. struct file;
  452. struct perf_sample_data;
  453. typedef void (*perf_overflow_handler_t)(struct perf_event *,
  454. struct perf_sample_data *,
  455. struct pt_regs *regs);
  456. /*
  457. * Event capabilities. For event_caps and groups caps.
  458. *
  459. * PERF_EV_CAP_SOFTWARE: Is a software event.
  460. * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read
  461. * from any CPU in the package where it is active.
  462. */
  463. #define PERF_EV_CAP_SOFTWARE BIT(0)
  464. #define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1)
  465. #define SWEVENT_HLIST_BITS 8
  466. #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
  467. struct swevent_hlist {
  468. struct hlist_head heads[SWEVENT_HLIST_SIZE];
  469. struct rcu_head rcu_head;
  470. };
  471. #define PERF_ATTACH_CONTEXT 0x01
  472. #define PERF_ATTACH_GROUP 0x02
  473. #define PERF_ATTACH_TASK 0x04
  474. #define PERF_ATTACH_TASK_DATA 0x08
  475. struct perf_cgroup;
  476. struct ring_buffer;
  477. struct pmu_event_list {
  478. raw_spinlock_t lock;
  479. struct list_head list;
  480. };
  481. /**
  482. * struct perf_event - performance event kernel representation:
  483. */
  484. struct perf_event {
  485. #ifdef CONFIG_PERF_EVENTS
  486. /*
  487. * entry onto perf_event_context::event_list;
  488. * modifications require ctx->lock
  489. * RCU safe iterations.
  490. */
  491. struct list_head event_entry;
  492. /*
  493. * XXX: group_entry and sibling_list should be mutually exclusive;
  494. * either you're a sibling on a group, or you're the group leader.
  495. * Rework the code to always use the same list element.
  496. *
  497. * Locked for modification by both ctx->mutex and ctx->lock; holding
  498. * either sufficies for read.
  499. */
  500. struct list_head group_entry;
  501. struct list_head sibling_list;
  502. /*
  503. * We need storage to track the entries in perf_pmu_migrate_context; we
  504. * cannot use the event_entry because of RCU and we want to keep the
  505. * group in tact which avoids us using the other two entries.
  506. */
  507. struct list_head migrate_entry;
  508. struct hlist_node hlist_entry;
  509. struct list_head active_entry;
  510. int nr_siblings;
  511. /* Not serialized. Only written during event initialization. */
  512. int event_caps;
  513. /* The cumulative AND of all event_caps for events in this group. */
  514. int group_caps;
  515. struct perf_event *group_leader;
  516. struct pmu *pmu;
  517. void *pmu_private;
  518. enum perf_event_active_state state;
  519. unsigned int attach_state;
  520. local64_t count;
  521. atomic64_t child_count;
  522. /*
  523. * These are the total time in nanoseconds that the event
  524. * has been enabled (i.e. eligible to run, and the task has
  525. * been scheduled in, if this is a per-task event)
  526. * and running (scheduled onto the CPU), respectively.
  527. *
  528. * They are computed from tstamp_enabled, tstamp_running and
  529. * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
  530. */
  531. u64 total_time_enabled;
  532. u64 total_time_running;
  533. /*
  534. * These are timestamps used for computing total_time_enabled
  535. * and total_time_running when the event is in INACTIVE or
  536. * ACTIVE state, measured in nanoseconds from an arbitrary point
  537. * in time.
  538. * tstamp_enabled: the notional time when the event was enabled
  539. * tstamp_running: the notional time when the event was scheduled on
  540. * tstamp_stopped: in INACTIVE state, the notional time when the
  541. * event was scheduled off.
  542. */
  543. u64 tstamp_enabled;
  544. u64 tstamp_running;
  545. u64 tstamp_stopped;
  546. /*
  547. * timestamp shadows the actual context timing but it can
  548. * be safely used in NMI interrupt context. It reflects the
  549. * context time as it was when the event was last scheduled in.
  550. *
  551. * ctx_time already accounts for ctx->timestamp. Therefore to
  552. * compute ctx_time for a sample, simply add perf_clock().
  553. */
  554. u64 shadow_ctx_time;
  555. struct perf_event_attr attr;
  556. u16 header_size;
  557. u16 id_header_size;
  558. u16 read_size;
  559. struct hw_perf_event hw;
  560. struct perf_event_context *ctx;
  561. atomic_long_t refcount;
  562. /*
  563. * These accumulate total time (in nanoseconds) that children
  564. * events have been enabled and running, respectively.
  565. */
  566. atomic64_t child_total_time_enabled;
  567. atomic64_t child_total_time_running;
  568. /*
  569. * Protect attach/detach and child_list:
  570. */
  571. struct mutex child_mutex;
  572. struct list_head child_list;
  573. struct perf_event *parent;
  574. int oncpu;
  575. int cpu;
  576. struct list_head owner_entry;
  577. struct task_struct *owner;
  578. /* mmap bits */
  579. struct mutex mmap_mutex;
  580. atomic_t mmap_count;
  581. struct ring_buffer *rb;
  582. struct list_head rb_entry;
  583. unsigned long rcu_batches;
  584. int rcu_pending;
  585. /* poll related */
  586. wait_queue_head_t waitq;
  587. struct fasync_struct *fasync;
  588. /* delayed work for NMIs and such */
  589. int pending_wakeup;
  590. int pending_kill;
  591. int pending_disable;
  592. struct irq_work pending;
  593. atomic_t event_limit;
  594. /* address range filters */
  595. struct perf_addr_filters_head addr_filters;
  596. /* vma address array for file-based filders */
  597. unsigned long *addr_filters_offs;
  598. unsigned long addr_filters_gen;
  599. void (*destroy)(struct perf_event *);
  600. struct rcu_head rcu_head;
  601. struct pid_namespace *ns;
  602. u64 id;
  603. u64 (*clock)(void);
  604. perf_overflow_handler_t overflow_handler;
  605. void *overflow_handler_context;
  606. #ifdef CONFIG_EVENT_TRACING
  607. struct trace_event_call *tp_event;
  608. struct event_filter *filter;
  609. #ifdef CONFIG_FUNCTION_TRACER
  610. struct ftrace_ops ftrace_ops;
  611. #endif
  612. #endif
  613. #ifdef CONFIG_CGROUP_PERF
  614. struct perf_cgroup *cgrp; /* cgroup event is attach to */
  615. int cgrp_defer_enabled;
  616. #endif
  617. struct list_head sb_list;
  618. #endif /* CONFIG_PERF_EVENTS */
  619. };
  620. /**
  621. * struct perf_event_context - event context structure
  622. *
  623. * Used as a container for task events and CPU events as well:
  624. */
  625. struct perf_event_context {
  626. struct pmu *pmu;
  627. /*
  628. * Protect the states of the events in the list,
  629. * nr_active, and the list:
  630. */
  631. raw_spinlock_t lock;
  632. /*
  633. * Protect the list of events. Locking either mutex or lock
  634. * is sufficient to ensure the list doesn't change; to change
  635. * the list you need to lock both the mutex and the spinlock.
  636. */
  637. struct mutex mutex;
  638. struct list_head active_ctx_list;
  639. struct list_head pinned_groups;
  640. struct list_head flexible_groups;
  641. struct list_head event_list;
  642. int nr_events;
  643. int nr_active;
  644. int is_active;
  645. int nr_stat;
  646. int nr_freq;
  647. int rotate_disable;
  648. atomic_t refcount;
  649. struct task_struct *task;
  650. /*
  651. * Context clock, runs when context enabled.
  652. */
  653. u64 time;
  654. u64 timestamp;
  655. /*
  656. * These fields let us detect when two contexts have both
  657. * been cloned (inherited) from a common ancestor.
  658. */
  659. struct perf_event_context *parent_ctx;
  660. u64 parent_gen;
  661. u64 generation;
  662. int pin_count;
  663. #ifdef CONFIG_CGROUP_PERF
  664. int nr_cgroups; /* cgroup evts */
  665. #endif
  666. void *task_ctx_data; /* pmu specific data */
  667. struct rcu_head rcu_head;
  668. };
  669. /*
  670. * Number of contexts where an event can trigger:
  671. * task, softirq, hardirq, nmi.
  672. */
  673. #define PERF_NR_CONTEXTS 4
  674. /**
  675. * struct perf_event_cpu_context - per cpu event context structure
  676. */
  677. struct perf_cpu_context {
  678. struct perf_event_context ctx;
  679. struct perf_event_context *task_ctx;
  680. int active_oncpu;
  681. int exclusive;
  682. raw_spinlock_t hrtimer_lock;
  683. struct hrtimer hrtimer;
  684. ktime_t hrtimer_interval;
  685. unsigned int hrtimer_active;
  686. struct pmu *unique_pmu;
  687. #ifdef CONFIG_CGROUP_PERF
  688. struct perf_cgroup *cgrp;
  689. #endif
  690. struct list_head sched_cb_entry;
  691. int sched_cb_usage;
  692. };
  693. struct perf_output_handle {
  694. struct perf_event *event;
  695. struct ring_buffer *rb;
  696. unsigned long wakeup;
  697. unsigned long size;
  698. union {
  699. void *addr;
  700. unsigned long head;
  701. };
  702. int page;
  703. };
  704. #ifdef CONFIG_CGROUP_PERF
  705. /*
  706. * perf_cgroup_info keeps track of time_enabled for a cgroup.
  707. * This is a per-cpu dynamically allocated data structure.
  708. */
  709. struct perf_cgroup_info {
  710. u64 time;
  711. u64 timestamp;
  712. };
  713. struct perf_cgroup {
  714. struct cgroup_subsys_state css;
  715. struct perf_cgroup_info __percpu *info;
  716. };
  717. /*
  718. * Must ensure cgroup is pinned (css_get) before calling
  719. * this function. In other words, we cannot call this function
  720. * if there is no cgroup event for the current CPU context.
  721. */
  722. static inline struct perf_cgroup *
  723. perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
  724. {
  725. return container_of(task_css_check(task, perf_event_cgrp_id,
  726. ctx ? lockdep_is_held(&ctx->lock)
  727. : true),
  728. struct perf_cgroup, css);
  729. }
  730. #endif /* CONFIG_CGROUP_PERF */
  731. #ifdef CONFIG_PERF_EVENTS
  732. extern void *perf_aux_output_begin(struct perf_output_handle *handle,
  733. struct perf_event *event);
  734. extern void perf_aux_output_end(struct perf_output_handle *handle,
  735. unsigned long size, bool truncated);
  736. extern int perf_aux_output_skip(struct perf_output_handle *handle,
  737. unsigned long size);
  738. extern void *perf_get_aux(struct perf_output_handle *handle);
  739. extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
  740. extern void perf_pmu_unregister(struct pmu *pmu);
  741. extern int perf_num_counters(void);
  742. extern const char *perf_pmu_name(void);
  743. extern void __perf_event_task_sched_in(struct task_struct *prev,
  744. struct task_struct *task);
  745. extern void __perf_event_task_sched_out(struct task_struct *prev,
  746. struct task_struct *next);
  747. extern int perf_event_init_task(struct task_struct *child);
  748. extern void perf_event_exit_task(struct task_struct *child);
  749. extern void perf_event_free_task(struct task_struct *task);
  750. extern void perf_event_delayed_put(struct task_struct *task);
  751. extern struct file *perf_event_get(unsigned int fd);
  752. extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
  753. extern void perf_event_print_debug(void);
  754. extern void perf_pmu_disable(struct pmu *pmu);
  755. extern void perf_pmu_enable(struct pmu *pmu);
  756. extern void perf_sched_cb_dec(struct pmu *pmu);
  757. extern void perf_sched_cb_inc(struct pmu *pmu);
  758. extern int perf_event_task_disable(void);
  759. extern int perf_event_task_enable(void);
  760. extern int perf_event_refresh(struct perf_event *event, int refresh);
  761. extern void perf_event_update_userpage(struct perf_event *event);
  762. extern int perf_event_release_kernel(struct perf_event *event);
  763. extern struct perf_event *
  764. perf_event_create_kernel_counter(struct perf_event_attr *attr,
  765. int cpu,
  766. struct task_struct *task,
  767. perf_overflow_handler_t callback,
  768. void *context);
  769. extern void perf_pmu_migrate_context(struct pmu *pmu,
  770. int src_cpu, int dst_cpu);
  771. extern u64 perf_event_read_local(struct perf_event *event);
  772. extern u64 perf_event_read_value(struct perf_event *event,
  773. u64 *enabled, u64 *running);
  774. struct perf_sample_data {
  775. /*
  776. * Fields set by perf_sample_data_init(), group so as to
  777. * minimize the cachelines touched.
  778. */
  779. u64 addr;
  780. struct perf_raw_record *raw;
  781. struct perf_branch_stack *br_stack;
  782. u64 period;
  783. u64 weight;
  784. u64 txn;
  785. union perf_mem_data_src data_src;
  786. /*
  787. * The other fields, optionally {set,used} by
  788. * perf_{prepare,output}_sample().
  789. */
  790. u64 type;
  791. u64 ip;
  792. struct {
  793. u32 pid;
  794. u32 tid;
  795. } tid_entry;
  796. u64 time;
  797. u64 id;
  798. u64 stream_id;
  799. struct {
  800. u32 cpu;
  801. u32 reserved;
  802. } cpu_entry;
  803. struct perf_callchain_entry *callchain;
  804. /*
  805. * regs_user may point to task_pt_regs or to regs_user_copy, depending
  806. * on arch details.
  807. */
  808. struct perf_regs regs_user;
  809. struct pt_regs regs_user_copy;
  810. struct perf_regs regs_intr;
  811. u64 stack_user_size;
  812. } ____cacheline_aligned;
  813. /* default value for data source */
  814. #define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\
  815. PERF_MEM_S(LVL, NA) |\
  816. PERF_MEM_S(SNOOP, NA) |\
  817. PERF_MEM_S(LOCK, NA) |\
  818. PERF_MEM_S(TLB, NA))
  819. static inline void perf_sample_data_init(struct perf_sample_data *data,
  820. u64 addr, u64 period)
  821. {
  822. /* remaining struct members initialized in perf_prepare_sample() */
  823. data->addr = addr;
  824. data->raw = NULL;
  825. data->br_stack = NULL;
  826. data->period = period;
  827. data->weight = 0;
  828. data->data_src.val = PERF_MEM_NA;
  829. data->txn = 0;
  830. }
  831. extern void perf_output_sample(struct perf_output_handle *handle,
  832. struct perf_event_header *header,
  833. struct perf_sample_data *data,
  834. struct perf_event *event);
  835. extern void perf_prepare_sample(struct perf_event_header *header,
  836. struct perf_sample_data *data,
  837. struct perf_event *event,
  838. struct pt_regs *regs);
  839. extern int perf_event_overflow(struct perf_event *event,
  840. struct perf_sample_data *data,
  841. struct pt_regs *regs);
  842. extern void perf_event_output_forward(struct perf_event *event,
  843. struct perf_sample_data *data,
  844. struct pt_regs *regs);
  845. extern void perf_event_output_backward(struct perf_event *event,
  846. struct perf_sample_data *data,
  847. struct pt_regs *regs);
  848. extern void perf_event_output(struct perf_event *event,
  849. struct perf_sample_data *data,
  850. struct pt_regs *regs);
  851. static inline bool
  852. is_default_overflow_handler(struct perf_event *event)
  853. {
  854. if (likely(event->overflow_handler == perf_event_output_forward))
  855. return true;
  856. if (unlikely(event->overflow_handler == perf_event_output_backward))
  857. return true;
  858. return false;
  859. }
  860. extern void
  861. perf_event_header__init_id(struct perf_event_header *header,
  862. struct perf_sample_data *data,
  863. struct perf_event *event);
  864. extern void
  865. perf_event__output_id_sample(struct perf_event *event,
  866. struct perf_output_handle *handle,
  867. struct perf_sample_data *sample);
  868. extern void
  869. perf_log_lost_samples(struct perf_event *event, u64 lost);
  870. static inline bool is_sampling_event(struct perf_event *event)
  871. {
  872. return event->attr.sample_period != 0;
  873. }
  874. /*
  875. * Return 1 for a software event, 0 for a hardware event
  876. */
  877. static inline int is_software_event(struct perf_event *event)
  878. {
  879. return event->event_caps & PERF_EV_CAP_SOFTWARE;
  880. }
  881. extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
  882. extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
  883. extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
  884. #ifndef perf_arch_fetch_caller_regs
  885. static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
  886. #endif
  887. /*
  888. * Take a snapshot of the regs. Skip ip and frame pointer to
  889. * the nth caller. We only need a few of the regs:
  890. * - ip for PERF_SAMPLE_IP
  891. * - cs for user_mode() tests
  892. * - bp for callchains
  893. * - eflags, for future purposes, just in case
  894. */
  895. static inline void perf_fetch_caller_regs(struct pt_regs *regs)
  896. {
  897. perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
  898. }
  899. static __always_inline void
  900. perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
  901. {
  902. if (static_key_false(&perf_swevent_enabled[event_id]))
  903. __perf_sw_event(event_id, nr, regs, addr);
  904. }
  905. DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
  906. /*
  907. * 'Special' version for the scheduler, it hard assumes no recursion,
  908. * which is guaranteed by us not actually scheduling inside other swevents
  909. * because those disable preemption.
  910. */
  911. static __always_inline void
  912. perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
  913. {
  914. if (static_key_false(&perf_swevent_enabled[event_id])) {
  915. struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
  916. perf_fetch_caller_regs(regs);
  917. ___perf_sw_event(event_id, nr, regs, addr);
  918. }
  919. }
  920. extern struct static_key_false perf_sched_events;
  921. static __always_inline bool
  922. perf_sw_migrate_enabled(void)
  923. {
  924. if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
  925. return true;
  926. return false;
  927. }
  928. static inline void perf_event_task_migrate(struct task_struct *task)
  929. {
  930. if (perf_sw_migrate_enabled())
  931. task->sched_migrated = 1;
  932. }
  933. static inline void perf_event_task_sched_in(struct task_struct *prev,
  934. struct task_struct *task)
  935. {
  936. if (static_branch_unlikely(&perf_sched_events))
  937. __perf_event_task_sched_in(prev, task);
  938. if (perf_sw_migrate_enabled() && task->sched_migrated) {
  939. struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
  940. perf_fetch_caller_regs(regs);
  941. ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
  942. task->sched_migrated = 0;
  943. }
  944. }
  945. static inline void perf_event_task_sched_out(struct task_struct *prev,
  946. struct task_struct *next)
  947. {
  948. perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
  949. if (static_branch_unlikely(&perf_sched_events))
  950. __perf_event_task_sched_out(prev, next);
  951. }
  952. static inline u64 __perf_event_count(struct perf_event *event)
  953. {
  954. return local64_read(&event->count) + atomic64_read(&event->child_count);
  955. }
  956. extern void perf_event_mmap(struct vm_area_struct *vma);
  957. extern struct perf_guest_info_callbacks *perf_guest_cbs;
  958. extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
  959. extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
  960. extern void perf_event_exec(void);
  961. extern void perf_event_comm(struct task_struct *tsk, bool exec);
  962. extern void perf_event_fork(struct task_struct *tsk);
  963. /* Callchains */
  964. DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
  965. extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
  966. extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
  967. extern struct perf_callchain_entry *
  968. get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
  969. u32 max_stack, bool crosstask, bool add_mark);
  970. extern int get_callchain_buffers(int max_stack);
  971. extern void put_callchain_buffers(void);
  972. extern int sysctl_perf_event_max_stack;
  973. extern int sysctl_perf_event_max_contexts_per_stack;
  974. static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip)
  975. {
  976. if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
  977. struct perf_callchain_entry *entry = ctx->entry;
  978. entry->ip[entry->nr++] = ip;
  979. ++ctx->contexts;
  980. return 0;
  981. } else {
  982. ctx->contexts_maxed = true;
  983. return -1; /* no more room, stop walking the stack */
  984. }
  985. }
  986. static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
  987. {
  988. if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
  989. struct perf_callchain_entry *entry = ctx->entry;
  990. entry->ip[entry->nr++] = ip;
  991. ++ctx->nr;
  992. return 0;
  993. } else {
  994. return -1; /* no more room, stop walking the stack */
  995. }
  996. }
  997. extern int sysctl_perf_event_paranoid;
  998. extern int sysctl_perf_event_mlock;
  999. extern int sysctl_perf_event_sample_rate;
  1000. extern int sysctl_perf_cpu_time_max_percent;
  1001. extern void perf_sample_event_took(u64 sample_len_ns);
  1002. extern int perf_proc_update_handler(struct ctl_table *table, int write,
  1003. void __user *buffer, size_t *lenp,
  1004. loff_t *ppos);
  1005. extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
  1006. void __user *buffer, size_t *lenp,
  1007. loff_t *ppos);
  1008. int perf_event_max_stack_handler(struct ctl_table *table, int write,
  1009. void __user *buffer, size_t *lenp, loff_t *ppos);
  1010. static inline bool perf_paranoid_tracepoint_raw(void)
  1011. {
  1012. return sysctl_perf_event_paranoid > -1;
  1013. }
  1014. static inline bool perf_paranoid_cpu(void)
  1015. {
  1016. return sysctl_perf_event_paranoid > 0;
  1017. }
  1018. static inline bool perf_paranoid_kernel(void)
  1019. {
  1020. return sysctl_perf_event_paranoid > 1;
  1021. }
  1022. extern void perf_event_init(void);
  1023. extern void perf_tp_event(u16 event_type, u64 count, void *record,
  1024. int entry_size, struct pt_regs *regs,
  1025. struct hlist_head *head, int rctx,
  1026. struct task_struct *task);
  1027. extern void perf_bp_event(struct perf_event *event, void *data);
  1028. #ifndef perf_misc_flags
  1029. # define perf_misc_flags(regs) \
  1030. (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
  1031. # define perf_instruction_pointer(regs) instruction_pointer(regs)
  1032. #endif
  1033. static inline bool has_branch_stack(struct perf_event *event)
  1034. {
  1035. return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
  1036. }
  1037. static inline bool needs_branch_stack(struct perf_event *event)
  1038. {
  1039. return event->attr.branch_sample_type != 0;
  1040. }
  1041. static inline bool has_aux(struct perf_event *event)
  1042. {
  1043. return event->pmu->setup_aux;
  1044. }
  1045. static inline bool is_write_backward(struct perf_event *event)
  1046. {
  1047. return !!event->attr.write_backward;
  1048. }
  1049. static inline bool has_addr_filter(struct perf_event *event)
  1050. {
  1051. return event->pmu->nr_addr_filters;
  1052. }
  1053. /*
  1054. * An inherited event uses parent's filters
  1055. */
  1056. static inline struct perf_addr_filters_head *
  1057. perf_event_addr_filters(struct perf_event *event)
  1058. {
  1059. struct perf_addr_filters_head *ifh = &event->addr_filters;
  1060. if (event->parent)
  1061. ifh = &event->parent->addr_filters;
  1062. return ifh;
  1063. }
  1064. extern void perf_event_addr_filters_sync(struct perf_event *event);
  1065. extern int perf_output_begin(struct perf_output_handle *handle,
  1066. struct perf_event *event, unsigned int size);
  1067. extern int perf_output_begin_forward(struct perf_output_handle *handle,
  1068. struct perf_event *event,
  1069. unsigned int size);
  1070. extern int perf_output_begin_backward(struct perf_output_handle *handle,
  1071. struct perf_event *event,
  1072. unsigned int size);
  1073. extern void perf_output_end(struct perf_output_handle *handle);
  1074. extern unsigned int perf_output_copy(struct perf_output_handle *handle,
  1075. const void *buf, unsigned int len);
  1076. extern unsigned int perf_output_skip(struct perf_output_handle *handle,
  1077. unsigned int len);
  1078. extern int perf_swevent_get_recursion_context(void);
  1079. extern void perf_swevent_put_recursion_context(int rctx);
  1080. extern u64 perf_swevent_set_period(struct perf_event *event);
  1081. extern void perf_event_enable(struct perf_event *event);
  1082. extern void perf_event_disable(struct perf_event *event);
  1083. extern void perf_event_disable_local(struct perf_event *event);
  1084. extern void perf_event_task_tick(void);
  1085. #else /* !CONFIG_PERF_EVENTS: */
  1086. static inline void *
  1087. perf_aux_output_begin(struct perf_output_handle *handle,
  1088. struct perf_event *event) { return NULL; }
  1089. static inline void
  1090. perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
  1091. bool truncated) { }
  1092. static inline int
  1093. perf_aux_output_skip(struct perf_output_handle *handle,
  1094. unsigned long size) { return -EINVAL; }
  1095. static inline void *
  1096. perf_get_aux(struct perf_output_handle *handle) { return NULL; }
  1097. static inline void
  1098. perf_event_task_migrate(struct task_struct *task) { }
  1099. static inline void
  1100. perf_event_task_sched_in(struct task_struct *prev,
  1101. struct task_struct *task) { }
  1102. static inline void
  1103. perf_event_task_sched_out(struct task_struct *prev,
  1104. struct task_struct *next) { }
  1105. static inline int perf_event_init_task(struct task_struct *child) { return 0; }
  1106. static inline void perf_event_exit_task(struct task_struct *child) { }
  1107. static inline void perf_event_free_task(struct task_struct *task) { }
  1108. static inline void perf_event_delayed_put(struct task_struct *task) { }
  1109. static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
  1110. static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
  1111. {
  1112. return ERR_PTR(-EINVAL);
  1113. }
  1114. static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; }
  1115. static inline void perf_event_print_debug(void) { }
  1116. static inline int perf_event_task_disable(void) { return -EINVAL; }
  1117. static inline int perf_event_task_enable(void) { return -EINVAL; }
  1118. static inline int perf_event_refresh(struct perf_event *event, int refresh)
  1119. {
  1120. return -EINVAL;
  1121. }
  1122. static inline void
  1123. perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
  1124. static inline void
  1125. perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { }
  1126. static inline void
  1127. perf_bp_event(struct perf_event *event, void *data) { }
  1128. static inline int perf_register_guest_info_callbacks
  1129. (struct perf_guest_info_callbacks *callbacks) { return 0; }
  1130. static inline int perf_unregister_guest_info_callbacks
  1131. (struct perf_guest_info_callbacks *callbacks) { return 0; }
  1132. static inline void perf_event_mmap(struct vm_area_struct *vma) { }
  1133. static inline void perf_event_exec(void) { }
  1134. static inline void perf_event_comm(struct task_struct *tsk, bool exec) { }
  1135. static inline void perf_event_fork(struct task_struct *tsk) { }
  1136. static inline void perf_event_init(void) { }
  1137. static inline int perf_swevent_get_recursion_context(void) { return -1; }
  1138. static inline void perf_swevent_put_recursion_context(int rctx) { }
  1139. static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; }
  1140. static inline void perf_event_enable(struct perf_event *event) { }
  1141. static inline void perf_event_disable(struct perf_event *event) { }
  1142. static inline int __perf_event_disable(void *info) { return -1; }
  1143. static inline void perf_event_task_tick(void) { }
  1144. static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
  1145. #endif
  1146. #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
  1147. extern void perf_restore_debug_store(void);
  1148. #else
  1149. static inline void perf_restore_debug_store(void) { }
  1150. #endif
  1151. static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
  1152. {
  1153. return frag->pad < sizeof(u64);
  1154. }
  1155. #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
  1156. struct perf_pmu_events_attr {
  1157. struct device_attribute attr;
  1158. u64 id;
  1159. const char *event_str;
  1160. };
  1161. struct perf_pmu_events_ht_attr {
  1162. struct device_attribute attr;
  1163. u64 id;
  1164. const char *event_str_ht;
  1165. const char *event_str_noht;
  1166. };
  1167. ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
  1168. char *page);
  1169. #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
  1170. static struct perf_pmu_events_attr _var = { \
  1171. .attr = __ATTR(_name, 0444, _show, NULL), \
  1172. .id = _id, \
  1173. };
  1174. #define PMU_EVENT_ATTR_STRING(_name, _var, _str) \
  1175. static struct perf_pmu_events_attr _var = { \
  1176. .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
  1177. .id = 0, \
  1178. .event_str = _str, \
  1179. };
  1180. #define PMU_FORMAT_ATTR(_name, _format) \
  1181. static ssize_t \
  1182. _name##_show(struct device *dev, \
  1183. struct device_attribute *attr, \
  1184. char *page) \
  1185. { \
  1186. BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
  1187. return sprintf(page, _format "\n"); \
  1188. } \
  1189. \
  1190. static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
  1191. /* Performance counter hotplug functions */
  1192. #ifdef CONFIG_PERF_EVENTS
  1193. int perf_event_init_cpu(unsigned int cpu);
  1194. int perf_event_exit_cpu(unsigned int cpu);
  1195. #else
  1196. #define perf_event_init_cpu NULL
  1197. #define perf_event_exit_cpu NULL
  1198. #endif
  1199. #endif /* _LINUX_PERF_EVENT_H */