|
@@ -389,6 +389,7 @@ static atomic_t nr_switch_events __read_mostly;
|
|
|
static LIST_HEAD(pmus);
|
|
|
static DEFINE_MUTEX(pmus_lock);
|
|
|
static struct srcu_struct pmus_srcu;
|
|
|
+static cpumask_var_t perf_online_mask;
|
|
|
|
|
|
/*
|
|
|
* perf event paranoia level:
|
|
@@ -3812,14 +3813,6 @@ find_get_context(struct pmu *pmu, struct task_struct *task,
|
|
|
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
|
|
|
return ERR_PTR(-EACCES);
|
|
|
|
|
|
- /*
|
|
|
- * We could be clever and allow to attach a event to an
|
|
|
- * offline CPU and activate it when the CPU comes up, but
|
|
|
- * that's for later.
|
|
|
- */
|
|
|
- if (!cpu_online(cpu))
|
|
|
- return ERR_PTR(-ENODEV);
|
|
|
-
|
|
|
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
|
|
|
ctx = &cpuctx->ctx;
|
|
|
get_ctx(ctx);
|
|
@@ -7703,7 +7696,8 @@ static int swevent_hlist_get_cpu(int cpu)
|
|
|
int err = 0;
|
|
|
|
|
|
mutex_lock(&swhash->hlist_mutex);
|
|
|
- if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
|
|
|
+ if (!swevent_hlist_deref(swhash) &&
|
|
|
+ cpumask_test_cpu(cpu, perf_online_mask)) {
|
|
|
struct swevent_hlist *hlist;
|
|
|
|
|
|
hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
|
|
@@ -7724,7 +7718,7 @@ static int swevent_hlist_get(void)
|
|
|
{
|
|
|
int err, cpu, failed_cpu;
|
|
|
|
|
|
- get_online_cpus();
|
|
|
+ mutex_lock(&pmus_lock);
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
err = swevent_hlist_get_cpu(cpu);
|
|
|
if (err) {
|
|
@@ -7732,8 +7726,7 @@ static int swevent_hlist_get(void)
|
|
|
goto fail;
|
|
|
}
|
|
|
}
|
|
|
- put_online_cpus();
|
|
|
-
|
|
|
+ mutex_unlock(&pmus_lock);
|
|
|
return 0;
|
|
|
fail:
|
|
|
for_each_possible_cpu(cpu) {
|
|
@@ -7741,8 +7734,7 @@ fail:
|
|
|
break;
|
|
|
swevent_hlist_put_cpu(cpu);
|
|
|
}
|
|
|
-
|
|
|
- put_online_cpus();
|
|
|
+ mutex_unlock(&pmus_lock);
|
|
|
return err;
|
|
|
}
|
|
|
|
|
@@ -8920,7 +8912,7 @@ perf_event_mux_interval_ms_store(struct device *dev,
|
|
|
pmu->hrtimer_interval_ms = timer;
|
|
|
|
|
|
/* update all cpuctx for this PMU */
|
|
|
- get_online_cpus();
|
|
|
+ cpus_read_lock();
|
|
|
for_each_online_cpu(cpu) {
|
|
|
struct perf_cpu_context *cpuctx;
|
|
|
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
|
|
@@ -8929,7 +8921,7 @@ perf_event_mux_interval_ms_store(struct device *dev,
|
|
|
cpu_function_call(cpu,
|
|
|
(remote_function_f)perf_mux_hrtimer_restart, cpuctx);
|
|
|
}
|
|
|
- put_online_cpus();
|
|
|
+ cpus_read_unlock();
|
|
|
mutex_unlock(&mux_interval_mutex);
|
|
|
|
|
|
return count;
|
|
@@ -9059,6 +9051,7 @@ skip_type:
|
|
|
lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
|
|
|
lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
|
|
|
cpuctx->ctx.pmu = pmu;
|
|
|
+ cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask);
|
|
|
|
|
|
__perf_mux_hrtimer_init(cpuctx, cpu);
|
|
|
}
|
|
@@ -9882,12 +9875,10 @@ SYSCALL_DEFINE5(perf_event_open,
|
|
|
goto err_task;
|
|
|
}
|
|
|
|
|
|
- get_online_cpus();
|
|
|
-
|
|
|
if (task) {
|
|
|
err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
|
|
|
if (err)
|
|
|
- goto err_cpus;
|
|
|
+ goto err_cred;
|
|
|
|
|
|
/*
|
|
|
* Reuse ptrace permission checks for now.
|
|
@@ -10073,6 +10064,23 @@ SYSCALL_DEFINE5(perf_event_open,
|
|
|
goto err_locked;
|
|
|
}
|
|
|
|
|
|
+ if (!task) {
|
|
|
+ /*
|
|
|
+ * Check if the @cpu we're creating an event for is online.
|
|
|
+ *
|
|
|
+ * We use the perf_cpu_context::ctx::mutex to serialize against
|
|
|
+ * the hotplug notifiers. See perf_event_{init,exit}_cpu().
|
|
|
+ */
|
|
|
+ struct perf_cpu_context *cpuctx =
|
|
|
+ container_of(ctx, struct perf_cpu_context, ctx);
|
|
|
+
|
|
|
+ if (!cpuctx->online) {
|
|
|
+ err = -ENODEV;
|
|
|
+ goto err_locked;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
/*
|
|
|
* Must be under the same ctx::mutex as perf_install_in_context(),
|
|
|
* because we need to serialize with concurrent event creation.
|
|
@@ -10162,8 +10170,6 @@ SYSCALL_DEFINE5(perf_event_open,
|
|
|
put_task_struct(task);
|
|
|
}
|
|
|
|
|
|
- put_online_cpus();
|
|
|
-
|
|
|
mutex_lock(¤t->perf_event_mutex);
|
|
|
list_add_tail(&event->owner_entry, ¤t->perf_event_list);
|
|
|
mutex_unlock(¤t->perf_event_mutex);
|
|
@@ -10197,8 +10203,6 @@ err_alloc:
|
|
|
err_cred:
|
|
|
if (task)
|
|
|
mutex_unlock(&task->signal->cred_guard_mutex);
|
|
|
-err_cpus:
|
|
|
- put_online_cpus();
|
|
|
err_task:
|
|
|
if (task)
|
|
|
put_task_struct(task);
|
|
@@ -10253,6 +10257,21 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
|
|
|
goto err_unlock;
|
|
|
}
|
|
|
|
|
|
+ if (!task) {
|
|
|
+ /*
|
|
|
+ * Check if the @cpu we're creating an event for is online.
|
|
|
+ *
|
|
|
+ * We use the perf_cpu_context::ctx::mutex to serialize against
|
|
|
+ * the hotplug notifiers. See perf_event_{init,exit}_cpu().
|
|
|
+ */
|
|
|
+ struct perf_cpu_context *cpuctx =
|
|
|
+ container_of(ctx, struct perf_cpu_context, ctx);
|
|
|
+ if (!cpuctx->online) {
|
|
|
+ err = -ENODEV;
|
|
|
+ goto err_unlock;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
if (!exclusive_event_installable(event, ctx)) {
|
|
|
err = -EBUSY;
|
|
|
goto err_unlock;
|
|
@@ -10920,6 +10939,8 @@ static void __init perf_event_init_all_cpus(void)
|
|
|
struct swevent_htable *swhash;
|
|
|
int cpu;
|
|
|
|
|
|
+ zalloc_cpumask_var(&perf_online_mask, GFP_KERNEL);
|
|
|
+
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
swhash = &per_cpu(swevent_htable, cpu);
|
|
|
mutex_init(&swhash->hlist_mutex);
|
|
@@ -10935,7 +10956,7 @@ static void __init perf_event_init_all_cpus(void)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-int perf_event_init_cpu(unsigned int cpu)
|
|
|
+void perf_swevent_init_cpu(unsigned int cpu)
|
|
|
{
|
|
|
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
|
|
|
|
|
@@ -10948,7 +10969,6 @@ int perf_event_init_cpu(unsigned int cpu)
|
|
|
rcu_assign_pointer(swhash->swevent_hlist, hlist);
|
|
|
}
|
|
|
mutex_unlock(&swhash->hlist_mutex);
|
|
|
- return 0;
|
|
|
}
|
|
|
|
|
|
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
|
|
@@ -10966,19 +10986,22 @@ static void __perf_event_exit_context(void *__info)
|
|
|
|
|
|
static void perf_event_exit_cpu_context(int cpu)
|
|
|
{
|
|
|
+ struct perf_cpu_context *cpuctx;
|
|
|
struct perf_event_context *ctx;
|
|
|
struct pmu *pmu;
|
|
|
- int idx;
|
|
|
|
|
|
- idx = srcu_read_lock(&pmus_srcu);
|
|
|
- list_for_each_entry_rcu(pmu, &pmus, entry) {
|
|
|
- ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
|
|
|
+ mutex_lock(&pmus_lock);
|
|
|
+ list_for_each_entry(pmu, &pmus, entry) {
|
|
|
+ cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
|
|
|
+ ctx = &cpuctx->ctx;
|
|
|
|
|
|
mutex_lock(&ctx->mutex);
|
|
|
smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
|
|
|
+ cpuctx->online = 0;
|
|
|
mutex_unlock(&ctx->mutex);
|
|
|
}
|
|
|
- srcu_read_unlock(&pmus_srcu, idx);
|
|
|
+ cpumask_clear_cpu(cpu, perf_online_mask);
|
|
|
+ mutex_unlock(&pmus_lock);
|
|
|
}
|
|
|
#else
|
|
|
|
|
@@ -10986,6 +11009,29 @@ static void perf_event_exit_cpu_context(int cpu) { }
|
|
|
|
|
|
#endif
|
|
|
|
|
|
+int perf_event_init_cpu(unsigned int cpu)
|
|
|
+{
|
|
|
+ struct perf_cpu_context *cpuctx;
|
|
|
+ struct perf_event_context *ctx;
|
|
|
+ struct pmu *pmu;
|
|
|
+
|
|
|
+ perf_swevent_init_cpu(cpu);
|
|
|
+
|
|
|
+ mutex_lock(&pmus_lock);
|
|
|
+ cpumask_set_cpu(cpu, perf_online_mask);
|
|
|
+ list_for_each_entry(pmu, &pmus, entry) {
|
|
|
+ cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
|
|
|
+ ctx = &cpuctx->ctx;
|
|
|
+
|
|
|
+ mutex_lock(&ctx->mutex);
|
|
|
+ cpuctx->online = 1;
|
|
|
+ mutex_unlock(&ctx->mutex);
|
|
|
+ }
|
|
|
+ mutex_unlock(&pmus_lock);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
int perf_event_exit_cpu(unsigned int cpu)
|
|
|
{
|
|
|
perf_event_exit_cpu_context(cpu);
|