tracex6_user.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. #define _GNU_SOURCE
  2. #include <assert.h>
  3. #include <fcntl.h>
  4. #include <linux/perf_event.h>
  5. #include <linux/bpf.h>
  6. #include <sched.h>
  7. #include <stdio.h>
  8. #include <stdlib.h>
  9. #include <sys/ioctl.h>
  10. #include <sys/resource.h>
  11. #include <sys/time.h>
  12. #include <sys/types.h>
  13. #include <sys/wait.h>
  14. #include <unistd.h>
  15. #include "bpf_load.h"
  16. #include "libbpf.h"
  17. #include "perf-sys.h"
  18. #define SAMPLE_PERIOD 0x7fffffffffffffffULL
  19. static void check_on_cpu(int cpu, struct perf_event_attr *attr)
  20. {
  21. int pmu_fd, error = 0;
  22. cpu_set_t set;
  23. __u64 value;
  24. /* Move to target CPU */
  25. CPU_ZERO(&set);
  26. CPU_SET(cpu, &set);
  27. assert(sched_setaffinity(0, sizeof(set), &set) == 0);
  28. /* Open perf event and attach to the perf_event_array */
  29. pmu_fd = sys_perf_event_open(attr, -1/*pid*/, cpu/*cpu*/, -1/*group_fd*/, 0);
  30. if (pmu_fd < 0) {
  31. fprintf(stderr, "sys_perf_event_open failed on CPU %d\n", cpu);
  32. error = 1;
  33. goto on_exit;
  34. }
  35. assert(bpf_map_update_elem(map_fd[0], &cpu, &pmu_fd, BPF_ANY) == 0);
  36. assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0) == 0);
  37. /* Trigger the kprobe */
  38. bpf_map_get_next_key(map_fd[1], &cpu, NULL);
  39. /* Check the value */
  40. if (bpf_map_lookup_elem(map_fd[1], &cpu, &value)) {
  41. fprintf(stderr, "Value missing for CPU %d\n", cpu);
  42. error = 1;
  43. goto on_exit;
  44. }
  45. fprintf(stderr, "CPU %d: %llu\n", cpu, value);
  46. on_exit:
  47. assert(bpf_map_delete_elem(map_fd[0], &cpu) == 0 || error);
  48. assert(ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE, 0) == 0 || error);
  49. assert(close(pmu_fd) == 0 || error);
  50. assert(bpf_map_delete_elem(map_fd[1], &cpu) == 0 || error);
  51. exit(error);
  52. }
  53. static void test_perf_event_array(struct perf_event_attr *attr,
  54. const char *name)
  55. {
  56. int i, status, nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
  57. pid_t pid[nr_cpus];
  58. int err = 0;
  59. printf("Test reading %s counters\n", name);
  60. for (i = 0; i < nr_cpus; i++) {
  61. pid[i] = fork();
  62. assert(pid[i] >= 0);
  63. if (pid[i] == 0) {
  64. check_on_cpu(i, attr);
  65. exit(1);
  66. }
  67. }
  68. for (i = 0; i < nr_cpus; i++) {
  69. assert(waitpid(pid[i], &status, 0) == pid[i]);
  70. err |= status;
  71. }
  72. if (err)
  73. printf("Test: %s FAILED\n", name);
  74. }
  75. static void test_bpf_perf_event(void)
  76. {
  77. struct perf_event_attr attr_cycles = {
  78. .freq = 0,
  79. .sample_period = SAMPLE_PERIOD,
  80. .inherit = 0,
  81. .type = PERF_TYPE_HARDWARE,
  82. .read_format = 0,
  83. .sample_type = 0,
  84. .config = PERF_COUNT_HW_CPU_CYCLES,
  85. };
  86. struct perf_event_attr attr_clock = {
  87. .freq = 0,
  88. .sample_period = SAMPLE_PERIOD,
  89. .inherit = 0,
  90. .type = PERF_TYPE_SOFTWARE,
  91. .read_format = 0,
  92. .sample_type = 0,
  93. .config = PERF_COUNT_SW_CPU_CLOCK,
  94. };
  95. struct perf_event_attr attr_raw = {
  96. .freq = 0,
  97. .sample_period = SAMPLE_PERIOD,
  98. .inherit = 0,
  99. .type = PERF_TYPE_RAW,
  100. .read_format = 0,
  101. .sample_type = 0,
  102. /* Intel Instruction Retired */
  103. .config = 0xc0,
  104. };
  105. struct perf_event_attr attr_l1d_load = {
  106. .freq = 0,
  107. .sample_period = SAMPLE_PERIOD,
  108. .inherit = 0,
  109. .type = PERF_TYPE_HW_CACHE,
  110. .read_format = 0,
  111. .sample_type = 0,
  112. .config =
  113. PERF_COUNT_HW_CACHE_L1D |
  114. (PERF_COUNT_HW_CACHE_OP_READ << 8) |
  115. (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
  116. };
  117. struct perf_event_attr attr_llc_miss = {
  118. .freq = 0,
  119. .sample_period = SAMPLE_PERIOD,
  120. .inherit = 0,
  121. .type = PERF_TYPE_HW_CACHE,
  122. .read_format = 0,
  123. .sample_type = 0,
  124. .config =
  125. PERF_COUNT_HW_CACHE_LL |
  126. (PERF_COUNT_HW_CACHE_OP_READ << 8) |
  127. (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
  128. };
  129. struct perf_event_attr attr_msr_tsc = {
  130. .freq = 0,
  131. .sample_period = 0,
  132. .inherit = 0,
  133. /* From /sys/bus/event_source/devices/msr/ */
  134. .type = 7,
  135. .read_format = 0,
  136. .sample_type = 0,
  137. .config = 0,
  138. };
  139. test_perf_event_array(&attr_cycles, "HARDWARE-cycles");
  140. test_perf_event_array(&attr_clock, "SOFTWARE-clock");
  141. test_perf_event_array(&attr_raw, "RAW-instruction-retired");
  142. test_perf_event_array(&attr_l1d_load, "HW_CACHE-L1D-load");
  143. /* below tests may fail in qemu */
  144. test_perf_event_array(&attr_llc_miss, "HW_CACHE-LLC-miss");
  145. test_perf_event_array(&attr_msr_tsc, "Dynamic-msr-tsc");
  146. }
  147. int main(int argc, char **argv)
  148. {
  149. struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
  150. char filename[256];
  151. snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
  152. setrlimit(RLIMIT_MEMLOCK, &r);
  153. if (load_bpf_file(filename)) {
  154. printf("%s", bpf_log_buf);
  155. return 1;
  156. }
  157. test_bpf_perf_event();
  158. return 0;
  159. }