perf-sys.h 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. #ifndef _PERF_SYS_H
  2. #define _PERF_SYS_H
  3. #include <unistd.h>
  4. #include <sys/types.h>
  5. #include <sys/syscall.h>
  6. #include <linux/types.h>
  7. #include <linux/perf_event.h>
  8. #if defined(__i386__)
  9. #define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
  10. #define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
  11. #define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
  12. #define cpu_relax() asm volatile("rep; nop" ::: "memory");
  13. #define CPUINFO_PROC {"model name"}
  14. #ifndef __NR_perf_event_open
  15. # define __NR_perf_event_open 336
  16. #endif
  17. #ifndef __NR_futex
  18. # define __NR_futex 240
  19. #endif
  20. #ifndef __NR_gettid
  21. # define __NR_gettid 224
  22. #endif
  23. #endif
  24. #if defined(__x86_64__)
  25. #define mb() asm volatile("mfence" ::: "memory")
  26. #define wmb() asm volatile("sfence" ::: "memory")
  27. #define rmb() asm volatile("lfence" ::: "memory")
  28. #define cpu_relax() asm volatile("rep; nop" ::: "memory");
  29. #define CPUINFO_PROC {"model name"}
  30. #ifndef __NR_perf_event_open
  31. # define __NR_perf_event_open 298
  32. #endif
  33. #ifndef __NR_futex
  34. # define __NR_futex 202
  35. #endif
  36. #ifndef __NR_gettid
  37. # define __NR_gettid 186
  38. #endif
  39. #endif
  40. #ifdef __powerpc__
  41. #include "../../arch/powerpc/include/uapi/asm/unistd.h"
  42. #define mb() asm volatile ("sync" ::: "memory")
  43. #define wmb() asm volatile ("sync" ::: "memory")
  44. #define rmb() asm volatile ("sync" ::: "memory")
  45. #define CPUINFO_PROC {"cpu"}
  46. #endif
  47. #ifdef __s390__
  48. #define mb() asm volatile("bcr 15,0" ::: "memory")
  49. #define wmb() asm volatile("bcr 15,0" ::: "memory")
  50. #define rmb() asm volatile("bcr 15,0" ::: "memory")
  51. #define CPUINFO_PROC {"vendor_id"}
  52. #endif
  53. #ifdef __sh__
  54. #if defined(__SH4A__) || defined(__SH5__)
  55. # define mb() asm volatile("synco" ::: "memory")
  56. # define wmb() asm volatile("synco" ::: "memory")
  57. # define rmb() asm volatile("synco" ::: "memory")
  58. #else
  59. # define mb() asm volatile("" ::: "memory")
  60. # define wmb() asm volatile("" ::: "memory")
  61. # define rmb() asm volatile("" ::: "memory")
  62. #endif
  63. #define CPUINFO_PROC {"cpu type"}
  64. #endif
  65. #ifdef __hppa__
  66. #define mb() asm volatile("" ::: "memory")
  67. #define wmb() asm volatile("" ::: "memory")
  68. #define rmb() asm volatile("" ::: "memory")
  69. #define CPUINFO_PROC {"cpu"}
  70. #endif
  71. #ifdef __sparc__
  72. #ifdef __LP64__
  73. #define mb() asm volatile("ba,pt %%xcc, 1f\n" \
  74. "membar #StoreLoad\n" \
  75. "1:\n":::"memory")
  76. #else
  77. #define mb() asm volatile("":::"memory")
  78. #endif
  79. #define wmb() asm volatile("":::"memory")
  80. #define rmb() asm volatile("":::"memory")
  81. #define CPUINFO_PROC {"cpu"}
  82. #endif
  83. #ifdef __alpha__
  84. #define mb() asm volatile("mb" ::: "memory")
  85. #define wmb() asm volatile("wmb" ::: "memory")
  86. #define rmb() asm volatile("mb" ::: "memory")
  87. #define CPUINFO_PROC {"cpu model"}
  88. #endif
  89. #ifdef __ia64__
  90. #define mb() asm volatile ("mf" ::: "memory")
  91. #define wmb() asm volatile ("mf" ::: "memory")
  92. #define rmb() asm volatile ("mf" ::: "memory")
  93. #define cpu_relax() asm volatile ("hint @pause" ::: "memory")
  94. #define CPUINFO_PROC {"model name"}
  95. #endif
  96. #ifdef __arm__
  97. /*
  98. * Use the __kuser_memory_barrier helper in the CPU helper page. See
  99. * arch/arm/kernel/entry-armv.S in the kernel source for details.
  100. */
  101. #define mb() ((void(*)(void))0xffff0fa0)()
  102. #define wmb() ((void(*)(void))0xffff0fa0)()
  103. #define rmb() ((void(*)(void))0xffff0fa0)()
  104. #define CPUINFO_PROC {"model name", "Processor"}
  105. #endif
  106. #ifdef __aarch64__
  107. #define mb() asm volatile("dmb ish" ::: "memory")
  108. #define wmb() asm volatile("dmb ishst" ::: "memory")
  109. #define rmb() asm volatile("dmb ishld" ::: "memory")
  110. #define cpu_relax() asm volatile("yield" ::: "memory")
  111. #endif
  112. #ifdef __mips__
  113. #define mb() asm volatile( \
  114. ".set mips2\n\t" \
  115. "sync\n\t" \
  116. ".set mips0" \
  117. : /* no output */ \
  118. : /* no input */ \
  119. : "memory")
  120. #define wmb() mb()
  121. #define rmb() mb()
  122. #define CPUINFO_PROC {"cpu model"}
  123. #endif
  124. #ifdef __arc__
  125. #define mb() asm volatile("" ::: "memory")
  126. #define wmb() asm volatile("" ::: "memory")
  127. #define rmb() asm volatile("" ::: "memory")
  128. #define CPUINFO_PROC {"Processor"}
  129. #endif
  130. #ifdef __metag__
  131. #define mb() asm volatile("" ::: "memory")
  132. #define wmb() asm volatile("" ::: "memory")
  133. #define rmb() asm volatile("" ::: "memory")
  134. #define CPUINFO_PROC {"CPU"}
  135. #endif
  136. #ifdef __xtensa__
  137. #define mb() asm volatile("memw" ::: "memory")
  138. #define wmb() asm volatile("memw" ::: "memory")
  139. #define rmb() asm volatile("" ::: "memory")
  140. #define CPUINFO_PROC {"core ID"}
  141. #endif
  142. #ifdef __tile__
  143. #define mb() asm volatile ("mf" ::: "memory")
  144. #define wmb() asm volatile ("mf" ::: "memory")
  145. #define rmb() asm volatile ("mf" ::: "memory")
  146. #define cpu_relax() asm volatile ("mfspr zero, PASS" ::: "memory")
  147. #define CPUINFO_PROC {"model name"}
  148. #endif
  149. #define barrier() asm volatile ("" ::: "memory")
  150. #ifndef cpu_relax
  151. #define cpu_relax() barrier()
  152. #endif
  153. static inline int
  154. sys_perf_event_open(struct perf_event_attr *attr,
  155. pid_t pid, int cpu, int group_fd,
  156. unsigned long flags)
  157. {
  158. int fd;
  159. fd = syscall(__NR_perf_event_open, attr, pid, cpu,
  160. group_fd, flags);
  161. #ifdef HAVE_ATTR_TEST
  162. if (unlikely(test_attr__enabled))
  163. test_attr__open(attr, pid, cpu, fd, group_fd, flags);
  164. #endif
  165. return fd;
  166. }
  167. #endif /* _PERF_SYS_H */