|
@@ -38,8 +38,10 @@ typedef __u16 __sum16;
|
|
|
#include "bpf_util.h"
|
|
|
#include "bpf_endian.h"
|
|
|
#include "bpf_rlimit.h"
|
|
|
+#include "trace_helpers.h"
|
|
|
|
|
|
static int error_cnt, pass_cnt;
|
|
|
+static bool jit_enabled;
|
|
|
|
|
|
#define MAGIC_BYTES 123
|
|
|
|
|
@@ -391,13 +393,30 @@ static inline __u64 ptr_to_u64(const void *ptr)
|
|
|
return (__u64) (unsigned long) ptr;
|
|
|
}
|
|
|
|
|
|
+static bool is_jit_enabled(void)
|
|
|
+{
|
|
|
+ const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
|
|
|
+ bool enabled = false;
|
|
|
+ int sysctl_fd;
|
|
|
+
|
|
|
+ sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
|
|
|
+ if (sysctl_fd != -1) {
|
|
|
+ char tmpc;
|
|
|
+
|
|
|
+ if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
|
|
|
+ enabled = (tmpc != '0');
|
|
|
+ close(sysctl_fd);
|
|
|
+ }
|
|
|
+
|
|
|
+ return enabled;
|
|
|
+}
|
|
|
+
|
|
|
static void test_bpf_obj_id(void)
|
|
|
{
|
|
|
const __u64 array_magic_value = 0xfaceb00c;
|
|
|
const __u32 array_key = 0;
|
|
|
const int nr_iters = 2;
|
|
|
const char *file = "./test_obj_id.o";
|
|
|
- const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
|
|
|
const char *expected_prog_name = "test_obj_id";
|
|
|
const char *expected_map_name = "test_map_id";
|
|
|
const __u64 nsec_per_sec = 1000000000;
|
|
@@ -414,20 +433,11 @@ static void test_bpf_obj_id(void)
|
|
|
char jited_insns[128], xlated_insns[128], zeros[128];
|
|
|
__u32 i, next_id, info_len, nr_id_found, duration = 0;
|
|
|
struct timespec real_time_ts, boot_time_ts;
|
|
|
- int sysctl_fd, jit_enabled = 0, err = 0;
|
|
|
+ int err = 0;
|
|
|
__u64 array_value;
|
|
|
uid_t my_uid = getuid();
|
|
|
time_t now, load_time;
|
|
|
|
|
|
- sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
|
|
|
- if (sysctl_fd != -1) {
|
|
|
- char tmpc;
|
|
|
-
|
|
|
- if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
|
|
|
- jit_enabled = (tmpc != '0');
|
|
|
- close(sysctl_fd);
|
|
|
- }
|
|
|
-
|
|
|
err = bpf_prog_get_fd_by_id(0);
|
|
|
CHECK(err >= 0 || errno != ENOENT,
|
|
|
"get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
|
|
@@ -1204,8 +1214,147 @@ out:
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
+#define MAX_CNT_RAWTP 10ull
|
|
|
+#define MAX_STACK_RAWTP 100
|
|
|
+struct get_stack_trace_t {
|
|
|
+ int pid;
|
|
|
+ int kern_stack_size;
|
|
|
+ int user_stack_size;
|
|
|
+ int user_stack_buildid_size;
|
|
|
+ __u64 kern_stack[MAX_STACK_RAWTP];
|
|
|
+ __u64 user_stack[MAX_STACK_RAWTP];
|
|
|
+ struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
|
|
|
+};
|
|
|
+
|
|
|
+static int get_stack_print_output(void *data, int size)
|
|
|
+{
|
|
|
+ bool good_kern_stack = false, good_user_stack = false;
|
|
|
+ const char *nonjit_func = "___bpf_prog_run";
|
|
|
+ struct get_stack_trace_t *e = data;
|
|
|
+ int i, num_stack;
|
|
|
+ static __u64 cnt;
|
|
|
+ struct ksym *ks;
|
|
|
+
|
|
|
+ cnt++;
|
|
|
+
|
|
|
+ if (size < sizeof(struct get_stack_trace_t)) {
|
|
|
+ __u64 *raw_data = data;
|
|
|
+ bool found = false;
|
|
|
+
|
|
|
+ num_stack = size / sizeof(__u64);
|
|
|
+ /* If jit is enabled, we do not have a good way to
|
|
|
+ * verify the sanity of the kernel stack. So we
|
|
|
+ * just assume it is good if the stack is not empty.
|
|
|
+ * This could be improved in the future.
|
|
|
+ */
|
|
|
+ if (jit_enabled) {
|
|
|
+ found = num_stack > 0;
|
|
|
+ } else {
|
|
|
+ for (i = 0; i < num_stack; i++) {
|
|
|
+ ks = ksym_search(raw_data[i]);
|
|
|
+ if (strcmp(ks->name, nonjit_func) == 0) {
|
|
|
+ found = true;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (found) {
|
|
|
+ good_kern_stack = true;
|
|
|
+ good_user_stack = true;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ num_stack = e->kern_stack_size / sizeof(__u64);
|
|
|
+ if (jit_enabled) {
|
|
|
+ good_kern_stack = num_stack > 0;
|
|
|
+ } else {
|
|
|
+ for (i = 0; i < num_stack; i++) {
|
|
|
+ ks = ksym_search(e->kern_stack[i]);
|
|
|
+ if (strcmp(ks->name, nonjit_func) == 0) {
|
|
|
+ good_kern_stack = true;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0)
|
|
|
+ good_user_stack = true;
|
|
|
+ }
|
|
|
+ if (!good_kern_stack || !good_user_stack)
|
|
|
+ return PERF_EVENT_ERROR;
|
|
|
+
|
|
|
+ if (cnt == MAX_CNT_RAWTP)
|
|
|
+ return PERF_EVENT_DONE;
|
|
|
+
|
|
|
+ return PERF_EVENT_CONT;
|
|
|
+}
|
|
|
+
|
|
|
+static void test_get_stack_raw_tp(void)
|
|
|
+{
|
|
|
+ const char *file = "./test_get_stack_rawtp.o";
|
|
|
+ int i, efd, err, prog_fd, pmu_fd, perfmap_fd;
|
|
|
+ struct perf_event_attr attr = {};
|
|
|
+ struct timespec tv = {0, 10};
|
|
|
+ __u32 key = 0, duration = 0;
|
|
|
+ struct bpf_object *obj;
|
|
|
+
|
|
|
+ err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
|
|
|
+ if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
|
|
|
+ return;
|
|
|
+
|
|
|
+ efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
|
|
|
+ if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
|
|
|
+ goto close_prog;
|
|
|
+
|
|
|
+ perfmap_fd = bpf_find_map(__func__, obj, "perfmap");
|
|
|
+ if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
|
|
|
+ perfmap_fd, errno))
|
|
|
+ goto close_prog;
|
|
|
+
|
|
|
+ err = load_kallsyms();
|
|
|
+ if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
|
|
|
+ goto close_prog;
|
|
|
+
|
|
|
+ attr.sample_type = PERF_SAMPLE_RAW;
|
|
|
+ attr.type = PERF_TYPE_SOFTWARE;
|
|
|
+ attr.config = PERF_COUNT_SW_BPF_OUTPUT;
|
|
|
+ pmu_fd = syscall(__NR_perf_event_open, &attr, getpid()/*pid*/, -1/*cpu*/,
|
|
|
+ -1/*group_fd*/, 0);
|
|
|
+ if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
|
|
|
+ errno))
|
|
|
+ goto close_prog;
|
|
|
+
|
|
|
+ err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY);
|
|
|
+ if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err,
|
|
|
+ errno))
|
|
|
+ goto close_prog;
|
|
|
+
|
|
|
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
|
|
|
+ if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n",
|
|
|
+ err, errno))
|
|
|
+ goto close_prog;
|
|
|
+
|
|
|
+ err = perf_event_mmap(pmu_fd);
|
|
|
+ if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno))
|
|
|
+ goto close_prog;
|
|
|
+
|
|
|
+ /* trigger some syscall action */
|
|
|
+ for (i = 0; i < MAX_CNT_RAWTP; i++)
|
|
|
+ nanosleep(&tv, NULL);
|
|
|
+
|
|
|
+ err = perf_event_poller(pmu_fd, get_stack_print_output);
|
|
|
+ if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno))
|
|
|
+ goto close_prog;
|
|
|
+
|
|
|
+ goto close_prog_noerr;
|
|
|
+close_prog:
|
|
|
+ error_cnt++;
|
|
|
+close_prog_noerr:
|
|
|
+ bpf_object__close(obj);
|
|
|
+}
|
|
|
+
|
|
|
int main(void)
|
|
|
{
|
|
|
+ jit_enabled = is_jit_enabled();
|
|
|
+
|
|
|
test_pkt_access();
|
|
|
test_xdp();
|
|
|
test_xdp_adjust_tail();
|
|
@@ -1219,6 +1368,7 @@ int main(void)
|
|
|
test_stacktrace_map();
|
|
|
test_stacktrace_build_id();
|
|
|
test_stacktrace_map_raw_tp();
|
|
|
+ test_get_stack_raw_tp();
|
|
|
|
|
|
printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
|
|
|
return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
|