perf.h 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282
  1. #ifndef _PERF_PERF_H
  2. #define _PERF_PERF_H
  3. #include <asm/unistd.h>
  4. #if defined(__i386__)
  5. #define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
  6. #define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
  7. #define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
  8. #define cpu_relax() asm volatile("rep; nop" ::: "memory");
  9. #define CPUINFO_PROC "model name"
  10. #ifndef __NR_perf_event_open
  11. # define __NR_perf_event_open 336
  12. #endif
  13. #endif
  14. #if defined(__x86_64__)
  15. #define mb() asm volatile("mfence" ::: "memory")
  16. #define wmb() asm volatile("sfence" ::: "memory")
  17. #define rmb() asm volatile("lfence" ::: "memory")
  18. #define cpu_relax() asm volatile("rep; nop" ::: "memory");
  19. #define CPUINFO_PROC "model name"
  20. #ifndef __NR_perf_event_open
  21. # define __NR_perf_event_open 298
  22. #endif
  23. #endif
  24. #ifdef __powerpc__
  25. #include "../../arch/powerpc/include/uapi/asm/unistd.h"
  26. #define mb() asm volatile ("sync" ::: "memory")
  27. #define wmb() asm volatile ("sync" ::: "memory")
  28. #define rmb() asm volatile ("sync" ::: "memory")
  29. #define CPUINFO_PROC "cpu"
  30. #endif
  31. #ifdef __s390__
  32. #define mb() asm volatile("bcr 15,0" ::: "memory")
  33. #define wmb() asm volatile("bcr 15,0" ::: "memory")
  34. #define rmb() asm volatile("bcr 15,0" ::: "memory")
  35. #endif
  36. #ifdef __sh__
  37. #if defined(__SH4A__) || defined(__SH5__)
  38. # define mb() asm volatile("synco" ::: "memory")
  39. # define wmb() asm volatile("synco" ::: "memory")
  40. # define rmb() asm volatile("synco" ::: "memory")
  41. #else
  42. # define mb() asm volatile("" ::: "memory")
  43. # define wmb() asm volatile("" ::: "memory")
  44. # define rmb() asm volatile("" ::: "memory")
  45. #endif
  46. #define CPUINFO_PROC "cpu type"
  47. #endif
  48. #ifdef __hppa__
  49. #define mb() asm volatile("" ::: "memory")
  50. #define wmb() asm volatile("" ::: "memory")
  51. #define rmb() asm volatile("" ::: "memory")
  52. #define CPUINFO_PROC "cpu"
  53. #endif
  54. #ifdef __sparc__
  55. #ifdef __LP64__
  56. #define mb() asm volatile("ba,pt %%xcc, 1f\n" \
  57. "membar #StoreLoad\n" \
  58. "1:\n":::"memory")
  59. #else
  60. #define mb() asm volatile("":::"memory")
  61. #endif
  62. #define wmb() asm volatile("":::"memory")
  63. #define rmb() asm volatile("":::"memory")
  64. #define CPUINFO_PROC "cpu"
  65. #endif
  66. #ifdef __alpha__
  67. #define mb() asm volatile("mb" ::: "memory")
  68. #define wmb() asm volatile("wmb" ::: "memory")
  69. #define rmb() asm volatile("mb" ::: "memory")
  70. #define CPUINFO_PROC "cpu model"
  71. #endif
  72. #ifdef __ia64__
  73. #define mb() asm volatile ("mf" ::: "memory")
  74. #define wmb() asm volatile ("mf" ::: "memory")
  75. #define rmb() asm volatile ("mf" ::: "memory")
  76. #define cpu_relax() asm volatile ("hint @pause" ::: "memory")
  77. #define CPUINFO_PROC "model name"
  78. #endif
  79. #ifdef __arm__
  80. /*
  81. * Use the __kuser_memory_barrier helper in the CPU helper page. See
  82. * arch/arm/kernel/entry-armv.S in the kernel source for details.
  83. */
  84. #define mb() ((void(*)(void))0xffff0fa0)()
  85. #define wmb() ((void(*)(void))0xffff0fa0)()
  86. #define rmb() ((void(*)(void))0xffff0fa0)()
  87. #define CPUINFO_PROC "Processor"
  88. #endif
  89. #ifdef __aarch64__
  90. #define mb() asm volatile("dmb ish" ::: "memory")
  91. #define wmb() asm volatile("dmb ishst" ::: "memory")
  92. #define rmb() asm volatile("dmb ishld" ::: "memory")
  93. #define cpu_relax() asm volatile("yield" ::: "memory")
  94. #endif
  95. #ifdef __mips__
  96. #define mb() asm volatile( \
  97. ".set mips2\n\t" \
  98. "sync\n\t" \
  99. ".set mips0" \
  100. : /* no output */ \
  101. : /* no input */ \
  102. : "memory")
  103. #define wmb() mb()
  104. #define rmb() mb()
  105. #define CPUINFO_PROC "cpu model"
  106. #endif
  107. #ifdef __arc__
  108. #define mb() asm volatile("" ::: "memory")
  109. #define wmb() asm volatile("" ::: "memory")
  110. #define rmb() asm volatile("" ::: "memory")
  111. #define CPUINFO_PROC "Processor"
  112. #endif
  113. #ifdef __metag__
  114. #define mb() asm volatile("" ::: "memory")
  115. #define wmb() asm volatile("" ::: "memory")
  116. #define rmb() asm volatile("" ::: "memory")
  117. #define CPUINFO_PROC "CPU"
  118. #endif
  119. #ifdef __xtensa__
  120. #define mb() asm volatile("memw" ::: "memory")
  121. #define wmb() asm volatile("memw" ::: "memory")
  122. #define rmb() asm volatile("" ::: "memory")
  123. #define CPUINFO_PROC "core ID"
  124. #endif
  125. #define barrier() asm volatile ("" ::: "memory")
  126. #ifndef cpu_relax
  127. #define cpu_relax() barrier()
  128. #endif
  129. #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
  130. #include <time.h>
  131. #include <unistd.h>
  132. #include <sys/types.h>
  133. #include <sys/syscall.h>
  134. #include <linux/perf_event.h>
  135. #include "util/types.h"
  136. #include <stdbool.h>
  137. /*
  138. * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
  139. * counters in the current task.
  140. */
  141. #define PR_TASK_PERF_EVENTS_DISABLE 31
  142. #define PR_TASK_PERF_EVENTS_ENABLE 32
  143. #ifndef NSEC_PER_SEC
  144. # define NSEC_PER_SEC 1000000000ULL
  145. #endif
  146. #ifndef NSEC_PER_USEC
  147. # define NSEC_PER_USEC 1000ULL
  148. #endif
  149. static inline unsigned long long rdclock(void)
  150. {
  151. struct timespec ts;
  152. clock_gettime(CLOCK_MONOTONIC, &ts);
  153. return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
  154. }
  155. /*
  156. * Pick up some kernel type conventions:
  157. */
  158. #define __user
  159. #define asmlinkage
  160. #define unlikely(x) __builtin_expect(!!(x), 0)
  161. #define min(x, y) ({ \
  162. typeof(x) _min1 = (x); \
  163. typeof(y) _min2 = (y); \
  164. (void) (&_min1 == &_min2); \
  165. _min1 < _min2 ? _min1 : _min2; })
  166. extern bool test_attr__enabled;
  167. void test_attr__init(void);
  168. void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
  169. int fd, int group_fd, unsigned long flags);
  170. static inline int
  171. sys_perf_event_open(struct perf_event_attr *attr,
  172. pid_t pid, int cpu, int group_fd,
  173. unsigned long flags)
  174. {
  175. int fd;
  176. fd = syscall(__NR_perf_event_open, attr, pid, cpu,
  177. group_fd, flags);
  178. if (unlikely(test_attr__enabled))
  179. test_attr__open(attr, pid, cpu, fd, group_fd, flags);
  180. return fd;
  181. }
  182. #define MAX_COUNTERS 256
  183. #define MAX_NR_CPUS 256
  184. struct ip_callchain {
  185. u64 nr;
  186. u64 ips[0];
  187. };
  188. struct branch_flags {
  189. u64 mispred:1;
  190. u64 predicted:1;
  191. u64 in_tx:1;
  192. u64 abort:1;
  193. u64 reserved:60;
  194. };
  195. struct branch_entry {
  196. u64 from;
  197. u64 to;
  198. struct branch_flags flags;
  199. };
  200. struct branch_stack {
  201. u64 nr;
  202. struct branch_entry entries[0];
  203. };
  204. extern const char *input_name;
  205. extern bool perf_host, perf_guest;
  206. extern const char perf_version_string[];
  207. void pthread__unblock_sigwinch(void);
  208. #include "util/target.h"
  209. enum perf_call_graph_mode {
  210. CALLCHAIN_NONE,
  211. CALLCHAIN_FP,
  212. CALLCHAIN_DWARF
  213. };
  214. struct record_opts {
  215. struct target target;
  216. int call_graph;
  217. bool group;
  218. bool inherit_stat;
  219. bool no_buffering;
  220. bool no_inherit;
  221. bool no_inherit_set;
  222. bool no_samples;
  223. bool raw_samples;
  224. bool sample_address;
  225. bool sample_weight;
  226. bool sample_time;
  227. bool period;
  228. unsigned int freq;
  229. unsigned int mmap_pages;
  230. unsigned int user_freq;
  231. u64 branch_stack;
  232. u64 default_interval;
  233. u64 user_interval;
  234. u16 stack_dump_size;
  235. bool sample_transaction;
  236. unsigned initial_delay;
  237. };
  238. #endif