|
@@ -153,7 +153,6 @@ enum event_type_t {
|
|
|
*/
|
|
|
struct static_key_deferred perf_sched_events __read_mostly;
|
|
|
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
|
|
|
-static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
|
|
|
static DEFINE_PER_CPU(int, perf_sched_cb_usages);
|
|
|
|
|
|
static atomic_t nr_mmap_events __read_mostly;
|
|
@@ -1240,9 +1239,6 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
|
|
|
if (is_cgroup_event(event))
|
|
|
ctx->nr_cgroups++;
|
|
|
|
|
|
- if (has_branch_stack(event))
|
|
|
- ctx->nr_branch_stack++;
|
|
|
-
|
|
|
list_add_rcu(&event->event_entry, &ctx->event_list);
|
|
|
ctx->nr_events++;
|
|
|
if (event->attr.inherit_stat)
|
|
@@ -1409,9 +1405,6 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
|
|
|
cpuctx->cgrp = NULL;
|
|
|
}
|
|
|
|
|
|
- if (has_branch_stack(event))
|
|
|
- ctx->nr_branch_stack--;
|
|
|
-
|
|
|
ctx->nr_events--;
|
|
|
if (event->attr.inherit_stat)
|
|
|
ctx->nr_stat--;
|
|
@@ -2808,64 +2801,6 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
|
|
|
perf_ctx_unlock(cpuctx, ctx);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * When sampling the branck stack in system-wide, it may be necessary
|
|
|
- * to flush the stack on context switch. This happens when the branch
|
|
|
- * stack does not tag its entries with the pid of the current task.
|
|
|
- * Otherwise it becomes impossible to associate a branch entry with a
|
|
|
- * task. This ambiguity is more likely to appear when the branch stack
|
|
|
- * supports priv level filtering and the user sets it to monitor only
|
|
|
- * at the user level (which could be a useful measurement in system-wide
|
|
|
- * mode). In that case, the risk is high of having a branch stack with
|
|
|
- * branch from multiple tasks. Flushing may mean dropping the existing
|
|
|
- * entries or stashing them somewhere in the PMU specific code layer.
|
|
|
- *
|
|
|
- * This function provides the context switch callback to the lower code
|
|
|
- * layer. It is invoked ONLY when there is at least one system-wide context
|
|
|
- * with at least one active event using taken branch sampling.
|
|
|
- */
|
|
|
-static void perf_branch_stack_sched_in(struct task_struct *prev,
|
|
|
- struct task_struct *task)
|
|
|
-{
|
|
|
- struct perf_cpu_context *cpuctx;
|
|
|
- struct pmu *pmu;
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- /* no need to flush branch stack if not changing task */
|
|
|
- if (prev == task)
|
|
|
- return;
|
|
|
-
|
|
|
- local_irq_save(flags);
|
|
|
-
|
|
|
- rcu_read_lock();
|
|
|
-
|
|
|
- list_for_each_entry_rcu(pmu, &pmus, entry) {
|
|
|
- cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
|
|
|
-
|
|
|
- /*
|
|
|
- * check if the context has at least one
|
|
|
- * event using PERF_SAMPLE_BRANCH_STACK
|
|
|
- */
|
|
|
- if (cpuctx->ctx.nr_branch_stack > 0
|
|
|
- && pmu->flush_branch_stack) {
|
|
|
-
|
|
|
- perf_ctx_lock(cpuctx, cpuctx->task_ctx);
|
|
|
-
|
|
|
- perf_pmu_disable(pmu);
|
|
|
-
|
|
|
- pmu->flush_branch_stack();
|
|
|
-
|
|
|
- perf_pmu_enable(pmu);
|
|
|
-
|
|
|
- perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- rcu_read_unlock();
|
|
|
-
|
|
|
- local_irq_restore(flags);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Called from scheduler to add the events of the current task
|
|
|
* with interrupts disabled.
|
|
@@ -2898,10 +2833,6 @@ void __perf_event_task_sched_in(struct task_struct *prev,
|
|
|
if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
|
|
|
perf_cgroup_sched_in(prev, task);
|
|
|
|
|
|
- /* check for system-wide branch_stack events */
|
|
|
- if (atomic_read(this_cpu_ptr(&perf_branch_stack_events)))
|
|
|
- perf_branch_stack_sched_in(prev, task);
|
|
|
-
|
|
|
if (__this_cpu_read(perf_sched_cb_usages))
|
|
|
perf_pmu_sched_task(prev, task, true);
|
|
|
}
|
|
@@ -3480,10 +3411,6 @@ static void unaccount_event_cpu(struct perf_event *event, int cpu)
|
|
|
if (event->parent)
|
|
|
return;
|
|
|
|
|
|
- if (has_branch_stack(event)) {
|
|
|
- if (!(event->attach_state & PERF_ATTACH_TASK))
|
|
|
- atomic_dec(&per_cpu(perf_branch_stack_events, cpu));
|
|
|
- }
|
|
|
if (is_cgroup_event(event))
|
|
|
atomic_dec(&per_cpu(perf_cgroup_events, cpu));
|
|
|
}
|
|
@@ -7139,10 +7066,6 @@ static void account_event_cpu(struct perf_event *event, int cpu)
|
|
|
if (event->parent)
|
|
|
return;
|
|
|
|
|
|
- if (has_branch_stack(event)) {
|
|
|
- if (!(event->attach_state & PERF_ATTACH_TASK))
|
|
|
- atomic_inc(&per_cpu(perf_branch_stack_events, cpu));
|
|
|
- }
|
|
|
if (is_cgroup_event(event))
|
|
|
atomic_inc(&per_cpu(perf_cgroup_events, cpu));
|
|
|
}
|