perf-sys.h 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. #ifndef _PERF_SYS_H
  2. #define _PERF_SYS_H
  3. #include <unistd.h>
  4. #include <sys/types.h>
  5. #include <sys/syscall.h>
  6. #include <linux/types.h>
  7. #include <linux/perf_event.h>
  8. #include <asm/unistd.h>
  9. #if defined(__i386__)
  10. #define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
  11. #define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
  12. #define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
  13. #define cpu_relax() asm volatile("rep; nop" ::: "memory");
  14. #define CPUINFO_PROC "model name"
  15. #ifndef __NR_perf_event_open
  16. # define __NR_perf_event_open 336
  17. #endif
  18. #ifndef __NR_futex
  19. # define __NR_futex 240
  20. #endif
  21. #ifndef __NR_gettid
  22. # define __NR_gettid 224
  23. #endif
  24. #endif
  25. #if defined(__x86_64__)
  26. #define mb() asm volatile("mfence" ::: "memory")
  27. #define wmb() asm volatile("sfence" ::: "memory")
  28. #define rmb() asm volatile("lfence" ::: "memory")
  29. #define cpu_relax() asm volatile("rep; nop" ::: "memory");
  30. #define CPUINFO_PROC "model name"
  31. #ifndef __NR_perf_event_open
  32. # define __NR_perf_event_open 298
  33. #endif
  34. #ifndef __NR_futex
  35. # define __NR_futex 202
  36. #endif
  37. #ifndef __NR_gettid
  38. # define __NR_gettid 186
  39. #endif
  40. #endif
  41. #ifdef __powerpc__
  42. #include "../../arch/powerpc/include/uapi/asm/unistd.h"
  43. #define mb() asm volatile ("sync" ::: "memory")
  44. #define wmb() asm volatile ("sync" ::: "memory")
  45. #define rmb() asm volatile ("sync" ::: "memory")
  46. #define CPUINFO_PROC "cpu"
  47. #endif
  48. #ifdef __s390__
  49. #define mb() asm volatile("bcr 15,0" ::: "memory")
  50. #define wmb() asm volatile("bcr 15,0" ::: "memory")
  51. #define rmb() asm volatile("bcr 15,0" ::: "memory")
  52. #define CPUINFO_PROC "vendor_id"
  53. #endif
  54. #ifdef __sh__
  55. #if defined(__SH4A__) || defined(__SH5__)
  56. # define mb() asm volatile("synco" ::: "memory")
  57. # define wmb() asm volatile("synco" ::: "memory")
  58. # define rmb() asm volatile("synco" ::: "memory")
  59. #else
  60. # define mb() asm volatile("" ::: "memory")
  61. # define wmb() asm volatile("" ::: "memory")
  62. # define rmb() asm volatile("" ::: "memory")
  63. #endif
  64. #define CPUINFO_PROC "cpu type"
  65. #endif
  66. #ifdef __hppa__
  67. #define mb() asm volatile("" ::: "memory")
  68. #define wmb() asm volatile("" ::: "memory")
  69. #define rmb() asm volatile("" ::: "memory")
  70. #define CPUINFO_PROC "cpu"
  71. #endif
  72. #ifdef __sparc__
  73. #ifdef __LP64__
  74. #define mb() asm volatile("ba,pt %%xcc, 1f\n" \
  75. "membar #StoreLoad\n" \
  76. "1:\n":::"memory")
  77. #else
  78. #define mb() asm volatile("":::"memory")
  79. #endif
  80. #define wmb() asm volatile("":::"memory")
  81. #define rmb() asm volatile("":::"memory")
  82. #define CPUINFO_PROC "cpu"
  83. #endif
  84. #ifdef __alpha__
  85. #define mb() asm volatile("mb" ::: "memory")
  86. #define wmb() asm volatile("wmb" ::: "memory")
  87. #define rmb() asm volatile("mb" ::: "memory")
  88. #define CPUINFO_PROC "cpu model"
  89. #endif
  90. #ifdef __ia64__
  91. #define mb() asm volatile ("mf" ::: "memory")
  92. #define wmb() asm volatile ("mf" ::: "memory")
  93. #define rmb() asm volatile ("mf" ::: "memory")
  94. #define cpu_relax() asm volatile ("hint @pause" ::: "memory")
  95. #define CPUINFO_PROC "model name"
  96. #endif
  97. #ifdef __arm__
  98. /*
  99. * Use the __kuser_memory_barrier helper in the CPU helper page. See
  100. * arch/arm/kernel/entry-armv.S in the kernel source for details.
  101. */
  102. #define mb() ((void(*)(void))0xffff0fa0)()
  103. #define wmb() ((void(*)(void))0xffff0fa0)()
  104. #define rmb() ((void(*)(void))0xffff0fa0)()
  105. #define CPUINFO_PROC "Processor"
  106. #endif
  107. #ifdef __aarch64__
  108. #define mb() asm volatile("dmb ish" ::: "memory")
  109. #define wmb() asm volatile("dmb ishst" ::: "memory")
  110. #define rmb() asm volatile("dmb ishld" ::: "memory")
  111. #define cpu_relax() asm volatile("yield" ::: "memory")
  112. #endif
  113. #ifdef __mips__
  114. #define mb() asm volatile( \
  115. ".set mips2\n\t" \
  116. "sync\n\t" \
  117. ".set mips0" \
  118. : /* no output */ \
  119. : /* no input */ \
  120. : "memory")
  121. #define wmb() mb()
  122. #define rmb() mb()
  123. #define CPUINFO_PROC "cpu model"
  124. #endif
  125. #ifdef __arc__
  126. #define mb() asm volatile("" ::: "memory")
  127. #define wmb() asm volatile("" ::: "memory")
  128. #define rmb() asm volatile("" ::: "memory")
  129. #define CPUINFO_PROC "Processor"
  130. #endif
  131. #ifdef __metag__
  132. #define mb() asm volatile("" ::: "memory")
  133. #define wmb() asm volatile("" ::: "memory")
  134. #define rmb() asm volatile("" ::: "memory")
  135. #define CPUINFO_PROC "CPU"
  136. #endif
  137. #ifdef __xtensa__
  138. #define mb() asm volatile("memw" ::: "memory")
  139. #define wmb() asm volatile("memw" ::: "memory")
  140. #define rmb() asm volatile("" ::: "memory")
  141. #define CPUINFO_PROC "core ID"
  142. #endif
  143. #ifdef __tile__
  144. #define mb() asm volatile ("mf" ::: "memory")
  145. #define wmb() asm volatile ("mf" ::: "memory")
  146. #define rmb() asm volatile ("mf" ::: "memory")
  147. #define cpu_relax() asm volatile ("mfspr zero, PASS" ::: "memory")
  148. #define CPUINFO_PROC "model name"
  149. #endif
  150. #define barrier() asm volatile ("" ::: "memory")
  151. #ifndef cpu_relax
  152. #define cpu_relax() barrier()
  153. #endif
  154. static inline int
  155. sys_perf_event_open(struct perf_event_attr *attr,
  156. pid_t pid, int cpu, int group_fd,
  157. unsigned long flags)
  158. {
  159. int fd;
  160. fd = syscall(__NR_perf_event_open, attr, pid, cpu,
  161. group_fd, flags);
  162. #ifdef HAVE_ATTR_TEST
  163. if (unlikely(test_attr__enabled))
  164. test_attr__open(attr, pid, cpu, fd, group_fd, flags);
  165. #endif
  166. return fd;
  167. }
  168. #endif /* _PERF_SYS_H */