|
@@ -5419,6 +5419,9 @@ struct swevent_htable {
|
|
|
|
|
|
/* Recursion avoidance in each contexts */
|
|
|
int recursion[PERF_NR_CONTEXTS];
|
|
|
+
|
|
|
+ /* Keeps track of cpu being initialized/exited */
|
|
|
+ bool online;
|
|
|
};
|
|
|
|
|
|
static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
|
|
@@ -5665,8 +5668,14 @@ static int perf_swevent_add(struct perf_event *event, int flags)
|
|
|
hwc->state = !(flags & PERF_EF_START);
|
|
|
|
|
|
head = find_swevent_head(swhash, event);
|
|
|
- if (WARN_ON_ONCE(!head))
|
|
|
+ if (!head) {
|
|
|
+ /*
|
|
|
+ * We can race with cpu hotplug code. Do not
|
|
|
+ * WARN if the cpu just got unplugged.
|
|
|
+ */
|
|
|
+ WARN_ON_ONCE(swhash->online);
|
|
|
return -EINVAL;
|
|
|
+ }
|
|
|
|
|
|
hlist_add_head_rcu(&event->hlist_entry, head);
|
|
|
|
|
@@ -7845,6 +7854,7 @@ static void perf_event_init_cpu(int cpu)
|
|
|
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
|
|
|
|
|
|
mutex_lock(&swhash->hlist_mutex);
|
|
|
+ swhash->online = true;
|
|
|
if (swhash->hlist_refcount > 0) {
|
|
|
struct swevent_hlist *hlist;
|
|
|
|
|
@@ -7902,6 +7912,7 @@ static void perf_event_exit_cpu(int cpu)
|
|
|
perf_event_exit_cpu_context(cpu);
|
|
|
|
|
|
mutex_lock(&swhash->hlist_mutex);
|
|
|
+ swhash->online = false;
|
|
|
swevent_hlist_release(swhash);
|
|
|
mutex_unlock(&swhash->hlist_mutex);
|
|
|
}
|