|
@@ -665,6 +665,7 @@ static inline int is_software_event(struct perf_event *event)
|
|
|
|
|
|
extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
|
|
|
|
|
|
+extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
|
|
|
extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
|
|
|
|
|
|
#ifndef perf_arch_fetch_caller_regs
|
|
@@ -689,14 +690,25 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs)
|
|
|
static __always_inline void
|
|
|
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
|
|
|
{
|
|
|
- struct pt_regs hot_regs;
|
|
|
+ if (static_key_false(&perf_swevent_enabled[event_id]))
|
|
|
+ __perf_sw_event(event_id, nr, regs, addr);
|
|
|
+}
|
|
|
+
|
|
|
+DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
|
|
|
|
|
|
+/*
|
|
|
+ * 'Special' version for the scheduler, it hard assumes no recursion,
|
|
|
+ * which is guaranteed by us not actually scheduling inside other swevents
|
|
|
+ * because those disable preemption.
|
|
|
+ */
|
|
|
+static __always_inline void
|
|
|
+perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
|
|
|
+{
|
|
|
if (static_key_false(&perf_swevent_enabled[event_id])) {
|
|
|
- if (!regs) {
|
|
|
- perf_fetch_caller_regs(&hot_regs);
|
|
|
- regs = &hot_regs;
|
|
|
- }
|
|
|
- __perf_sw_event(event_id, nr, regs, addr);
|
|
|
+ struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
|
|
|
+
|
|
|
+ perf_fetch_caller_regs(regs);
|
|
|
+ ___perf_sw_event(event_id, nr, regs, addr);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -712,7 +724,7 @@ static inline void perf_event_task_sched_in(struct task_struct *prev,
|
|
|
static inline void perf_event_task_sched_out(struct task_struct *prev,
|
|
|
struct task_struct *next)
|
|
|
{
|
|
|
- perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
|
|
|
+ perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
|
|
|
|
|
|
if (static_key_false(&perf_sched_events.key))
|
|
|
__perf_event_task_sched_out(prev, next);
|
|
@@ -823,6 +835,8 @@ static inline int perf_event_refresh(struct perf_event *event, int refresh)
|
|
|
static inline void
|
|
|
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
|
|
|
static inline void
|
|
|
+perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { }
|
|
|
+static inline void
|
|
|
perf_bp_event(struct perf_event *event, void *data) { }
|
|
|
|
|
|
static inline int perf_register_guest_info_callbacks
|