|
@@ -20,6 +20,7 @@
|
|
|
#include "trace.h"
|
|
|
|
|
|
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
|
|
+u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
|
|
|
|
|
/**
|
|
|
* trace_call_bpf - invoke BPF program
|
|
@@ -577,6 +578,8 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
|
|
return &bpf_perf_event_output_proto;
|
|
|
case BPF_FUNC_get_stackid:
|
|
|
return &bpf_get_stackid_proto;
|
|
|
+ case BPF_FUNC_get_stack:
|
|
|
+ return &bpf_get_stack_proto;
|
|
|
case BPF_FUNC_perf_event_read_value:
|
|
|
return &bpf_perf_event_read_value_proto;
|
|
|
#ifdef CONFIG_BPF_KPROBE_OVERRIDE
|
|
@@ -664,6 +667,25 @@ static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
|
|
|
.arg3_type = ARG_ANYTHING,
|
|
|
};
|
|
|
|
|
|
+BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
|
|
|
+ u64, flags)
|
|
|
+{
|
|
|
+ struct pt_regs *regs = *(struct pt_regs **)tp_buff;
|
|
|
+
|
|
|
+ return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
|
|
|
+ (unsigned long) size, flags, 0);
|
|
|
+}
|
|
|
+
|
|
|
+static const struct bpf_func_proto bpf_get_stack_proto_tp = {
|
|
|
+ .func = bpf_get_stack_tp,
|
|
|
+ .gpl_only = true,
|
|
|
+ .ret_type = RET_INTEGER,
|
|
|
+ .arg1_type = ARG_PTR_TO_CTX,
|
|
|
+ .arg2_type = ARG_PTR_TO_UNINIT_MEM,
|
|
|
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
|
|
|
+ .arg4_type = ARG_ANYTHING,
|
|
|
+};
|
|
|
+
|
|
|
static const struct bpf_func_proto *
|
|
|
tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
|
|
{
|
|
@@ -672,6 +694,8 @@ tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
|
|
return &bpf_perf_event_output_proto_tp;
|
|
|
case BPF_FUNC_get_stackid:
|
|
|
return &bpf_get_stackid_proto_tp;
|
|
|
+ case BPF_FUNC_get_stack:
|
|
|
+ return &bpf_get_stack_proto_tp;
|
|
|
default:
|
|
|
return tracing_func_proto(func_id, prog);
|
|
|
}
|
|
@@ -734,6 +758,8 @@ pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
|
|
return &bpf_perf_event_output_proto_tp;
|
|
|
case BPF_FUNC_get_stackid:
|
|
|
return &bpf_get_stackid_proto_tp;
|
|
|
+ case BPF_FUNC_get_stack:
|
|
|
+ return &bpf_get_stack_proto_tp;
|
|
|
case BPF_FUNC_perf_prog_read_value:
|
|
|
return &bpf_perf_prog_read_value_proto;
|
|
|
default:
|
|
@@ -744,7 +770,7 @@ pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
|
|
/*
|
|
|
* bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
|
|
|
* to avoid potential recursive reuse issue when/if tracepoints are added
|
|
|
- * inside bpf_*_event_output and/or bpf_get_stack_id
|
|
|
+ * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack
|
|
|
*/
|
|
|
static DEFINE_PER_CPU(struct pt_regs, bpf_raw_tp_regs);
|
|
|
BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
|
|
@@ -787,6 +813,26 @@ static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
|
|
|
.arg3_type = ARG_ANYTHING,
|
|
|
};
|
|
|
|
|
|
+BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
|
|
|
+ void *, buf, u32, size, u64, flags)
|
|
|
+{
|
|
|
+ struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
|
|
|
+
|
|
|
+ perf_fetch_caller_regs(regs);
|
|
|
+ return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
|
|
|
+ (unsigned long) size, flags, 0);
|
|
|
+}
|
|
|
+
|
|
|
+static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
|
|
|
+ .func = bpf_get_stack_raw_tp,
|
|
|
+ .gpl_only = true,
|
|
|
+ .ret_type = RET_INTEGER,
|
|
|
+ .arg1_type = ARG_PTR_TO_CTX,
|
|
|
+ .arg2_type = ARG_PTR_TO_MEM,
|
|
|
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
|
|
|
+ .arg4_type = ARG_ANYTHING,
|
|
|
+};
|
|
|
+
|
|
|
static const struct bpf_func_proto *
|
|
|
raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
|
|
{
|
|
@@ -795,6 +841,8 @@ raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
|
|
return &bpf_perf_event_output_proto_raw_tp;
|
|
|
case BPF_FUNC_get_stackid:
|
|
|
return &bpf_get_stackid_proto_raw_tp;
|
|
|
+ case BPF_FUNC_get_stack:
|
|
|
+ return &bpf_get_stack_proto_raw_tp;
|
|
|
default:
|
|
|
return tracing_func_proto(func_id, prog);
|
|
|
}
|