|
@@ -3154,15 +3154,16 @@ static int event_enable_on_exec(struct perf_event *event,
|
|
|
* Enable all of a task's events that have been marked enable-on-exec.
|
|
|
* This expects task == current.
|
|
|
*/
|
|
|
-static void perf_event_enable_on_exec(struct perf_event_context *ctx)
|
|
|
+static void perf_event_enable_on_exec(int ctxn)
|
|
|
{
|
|
|
- struct perf_event_context *clone_ctx = NULL;
|
|
|
+ struct perf_event_context *ctx, *clone_ctx = NULL;
|
|
|
struct perf_event *event;
|
|
|
unsigned long flags;
|
|
|
int enabled = 0;
|
|
|
int ret;
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
+ ctx = current->perf_event_ctxp[ctxn];
|
|
|
if (!ctx || !ctx->nr_events)
|
|
|
goto out;
|
|
|
|
|
@@ -3205,17 +3206,11 @@ out:
|
|
|
|
|
|
void perf_event_exec(void)
|
|
|
{
|
|
|
- struct perf_event_context *ctx;
|
|
|
int ctxn;
|
|
|
|
|
|
rcu_read_lock();
|
|
|
- for_each_task_context_nr(ctxn) {
|
|
|
- ctx = current->perf_event_ctxp[ctxn];
|
|
|
- if (!ctx)
|
|
|
- continue;
|
|
|
-
|
|
|
- perf_event_enable_on_exec(ctx);
|
|
|
- }
|
|
|
+ for_each_task_context_nr(ctxn)
|
|
|
+ perf_event_enable_on_exec(ctxn);
|
|
|
rcu_read_unlock();
|
|
|
}
|
|
|
|
|
@@ -6493,9 +6488,6 @@ struct swevent_htable {
|
|
|
|
|
|
/* Recursion avoidance in each contexts */
|
|
|
int recursion[PERF_NR_CONTEXTS];
|
|
|
-
|
|
|
- /* Keeps track of cpu being initialized/exited */
|
|
|
- bool online;
|
|
|
};
|
|
|
|
|
|
static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
|
|
@@ -6753,14 +6745,8 @@ static int perf_swevent_add(struct perf_event *event, int flags)
|
|
|
hwc->state = !(flags & PERF_EF_START);
|
|
|
|
|
|
head = find_swevent_head(swhash, event);
|
|
|
- if (!head) {
|
|
|
- /*
|
|
|
- * We can race with cpu hotplug code. Do not
|
|
|
- * WARN if the cpu just got unplugged.
|
|
|
- */
|
|
|
- WARN_ON_ONCE(swhash->online);
|
|
|
+ if (WARN_ON_ONCE(!head))
|
|
|
return -EINVAL;
|
|
|
- }
|
|
|
|
|
|
hlist_add_head_rcu(&event->hlist_entry, head);
|
|
|
perf_event_update_userpage(event);
|
|
@@ -6828,7 +6814,6 @@ static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
|
|
|
int err = 0;
|
|
|
|
|
|
mutex_lock(&swhash->hlist_mutex);
|
|
|
-
|
|
|
if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
|
|
|
struct swevent_hlist *hlist;
|
|
|
|
|
@@ -9291,7 +9276,6 @@ static void perf_event_init_cpu(int cpu)
|
|
|
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
|
|
|
|
|
|
mutex_lock(&swhash->hlist_mutex);
|
|
|
- swhash->online = true;
|
|
|
if (swhash->hlist_refcount > 0) {
|
|
|
struct swevent_hlist *hlist;
|
|
|
|
|
@@ -9333,14 +9317,7 @@ static void perf_event_exit_cpu_context(int cpu)
|
|
|
|
|
|
static void perf_event_exit_cpu(int cpu)
|
|
|
{
|
|
|
- struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
|
|
|
-
|
|
|
perf_event_exit_cpu_context(cpu);
|
|
|
-
|
|
|
- mutex_lock(&swhash->hlist_mutex);
|
|
|
- swhash->online = false;
|
|
|
- swevent_hlist_release(swhash);
|
|
|
- mutex_unlock(&swhash->hlist_mutex);
|
|
|
}
|
|
|
#else
|
|
|
static inline void perf_event_exit_cpu(int cpu) { }
|