|
@@ -10255,7 +10255,7 @@ static void __init perf_event_init_all_cpus(void)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-static void perf_event_init_cpu(int cpu)
|
|
|
|
|
|
+int perf_event_init_cpu(unsigned int cpu)
|
|
{
|
|
{
|
|
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
|
|
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
|
|
|
|
|
|
@@ -10268,6 +10268,7 @@ static void perf_event_init_cpu(int cpu)
|
|
rcu_assign_pointer(swhash->swevent_hlist, hlist);
|
|
rcu_assign_pointer(swhash->swevent_hlist, hlist);
|
|
}
|
|
}
|
|
mutex_unlock(&swhash->hlist_mutex);
|
|
mutex_unlock(&swhash->hlist_mutex);
|
|
|
|
+ return 0;
|
|
}
|
|
}
|
|
|
|
|
|
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
|
|
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
|
|
@@ -10299,14 +10300,17 @@ static void perf_event_exit_cpu_context(int cpu)
|
|
}
|
|
}
|
|
srcu_read_unlock(&pmus_srcu, idx);
|
|
srcu_read_unlock(&pmus_srcu, idx);
|
|
}
|
|
}
|
|
|
|
+#else
|
|
|
|
+
|
|
|
|
+static void perf_event_exit_cpu_context(int cpu) { }
|
|
|
|
+
|
|
|
|
+#endif
|
|
|
|
|
|
-static void perf_event_exit_cpu(int cpu)
|
|
|
|
|
|
+int perf_event_exit_cpu(unsigned int cpu)
|
|
{
|
|
{
|
|
perf_event_exit_cpu_context(cpu);
|
|
perf_event_exit_cpu_context(cpu);
|
|
|
|
+ return 0;
|
|
}
|
|
}
|
|
-#else
|
|
|
|
-static inline void perf_event_exit_cpu(int cpu) { }
|
|
|
|
-#endif
|
|
|
|
|
|
|
|
static int
|
|
static int
|
|
perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
|
|
perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
|
|
@@ -10328,46 +10332,6 @@ static struct notifier_block perf_reboot_notifier = {
|
|
.priority = INT_MIN,
|
|
.priority = INT_MIN,
|
|
};
|
|
};
|
|
|
|
|
|
-static int
|
|
|
|
-perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
|
|
|
|
-{
|
|
|
|
- unsigned int cpu = (long)hcpu;
|
|
|
|
-
|
|
|
|
- switch (action & ~CPU_TASKS_FROZEN) {
|
|
|
|
-
|
|
|
|
- case CPU_UP_PREPARE:
|
|
|
|
- /*
|
|
|
|
- * This must be done before the CPU comes alive, because the
|
|
|
|
- * moment we can run tasks we can encounter (software) events.
|
|
|
|
- *
|
|
|
|
- * Specifically, someone can have inherited events on kthreadd
|
|
|
|
- * or a pre-existing worker thread that gets re-bound.
|
|
|
|
- */
|
|
|
|
- perf_event_init_cpu(cpu);
|
|
|
|
- break;
|
|
|
|
-
|
|
|
|
- case CPU_DOWN_PREPARE:
|
|
|
|
- /*
|
|
|
|
- * This must be done before the CPU dies because after that an
|
|
|
|
- * active event might want to IPI the CPU and that'll not work
|
|
|
|
- * so great for dead CPUs.
|
|
|
|
- *
|
|
|
|
- * XXX smp_call_function_single() return -ENXIO without a warn
|
|
|
|
- * so we could possibly deal with this.
|
|
|
|
- *
|
|
|
|
- * This is safe against new events arriving because
|
|
|
|
- * sys_perf_event_open() serializes against hotplug using
|
|
|
|
- * get_online_cpus().
|
|
|
|
- */
|
|
|
|
- perf_event_exit_cpu(cpu);
|
|
|
|
- break;
|
|
|
|
- default:
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- return NOTIFY_OK;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
void __init perf_event_init(void)
|
|
void __init perf_event_init(void)
|
|
{
|
|
{
|
|
int ret;
|
|
int ret;
|
|
@@ -10380,7 +10344,7 @@ void __init perf_event_init(void)
|
|
perf_pmu_register(&perf_cpu_clock, NULL, -1);
|
|
perf_pmu_register(&perf_cpu_clock, NULL, -1);
|
|
perf_pmu_register(&perf_task_clock, NULL, -1);
|
|
perf_pmu_register(&perf_task_clock, NULL, -1);
|
|
perf_tp_register();
|
|
perf_tp_register();
|
|
- perf_cpu_notifier(perf_cpu_notify);
|
|
|
|
|
|
+ perf_event_init_cpu(smp_processor_id());
|
|
register_reboot_notifier(&perf_reboot_notifier);
|
|
register_reboot_notifier(&perf_reboot_notifier);
|
|
|
|
|
|
ret = init_hw_breakpoint();
|
|
ret = init_hw_breakpoint();
|