|
@@ -315,6 +315,16 @@ void lockdep_assert_cpus_held(void)
|
|
|
percpu_rwsem_assert_held(&cpu_hotplug_lock);
|
|
|
}
|
|
|
|
|
|
+static void lockdep_acquire_cpus_lock(void)
|
|
|
+{
|
|
|
+ rwsem_acquire(&cpu_hotplug_lock.rw_sem.dep_map, 0, 0, _THIS_IP_);
|
|
|
+}
|
|
|
+
|
|
|
+static void lockdep_release_cpus_lock(void)
|
|
|
+{
|
|
|
+ rwsem_release(&cpu_hotplug_lock.rw_sem.dep_map, 1, _THIS_IP_);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Wait for currently running CPU hotplug operations to complete (if any) and
|
|
|
* disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
|
|
@@ -344,6 +354,17 @@ void cpu_hotplug_enable(void)
|
|
|
cpu_maps_update_done();
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
|
|
|
+
|
|
|
+#else
|
|
|
+
|
|
|
+static void lockdep_acquire_cpus_lock(void)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+static void lockdep_release_cpus_lock(void)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
#endif /* CONFIG_HOTPLUG_CPU */
|
|
|
|
|
|
#ifdef CONFIG_HOTPLUG_SMT
|
|
@@ -616,6 +637,12 @@ static void cpuhp_thread_fun(unsigned int cpu)
|
|
|
*/
|
|
|
smp_mb();
|
|
|
|
|
|
+ /*
|
|
|
+ * The BP holds the hotplug lock, but we're now running on the AP,
|
|
|
+ * ensure that anybody asserting the lock is held, will actually find
|
|
|
+ * it so.
|
|
|
+ */
|
|
|
+ lockdep_acquire_cpus_lock();
|
|
|
cpuhp_lock_acquire(bringup);
|
|
|
|
|
|
if (st->single) {
|
|
@@ -661,6 +688,7 @@ static void cpuhp_thread_fun(unsigned int cpu)
|
|
|
}
|
|
|
|
|
|
cpuhp_lock_release(bringup);
|
|
|
+ lockdep_release_cpus_lock();
|
|
|
|
|
|
if (!st->should_run)
|
|
|
complete_ap_thread(st, bringup);
|