|
@@ -23,6 +23,8 @@
|
|
|
#include <linux/tick.h>
|
|
|
#include <linux/irq.h>
|
|
|
#include <linux/smpboot.h>
|
|
|
+#include <linux/relay.h>
|
|
|
+#include <linux/slab.h>
|
|
|
|
|
|
#include <trace/events/power.h>
|
|
|
#define CREATE_TRACE_POINTS
|
|
@@ -37,8 +39,9 @@
|
|
|
* @thread: Pointer to the hotplug thread
|
|
|
* @should_run: Thread should execute
|
|
|
* @rollback: Perform a rollback
|
|
|
- * @cb_stat: The state for a single callback (install/uninstall)
|
|
|
- * @cb: Single callback function (install/uninstall)
|
|
|
+ * @single: Single callback invocation
|
|
|
+ * @bringup: Single callback bringup or teardown selector
|
|
|
+ * @cb_state: The state for a single callback (install/uninstall)
|
|
|
* @result: Result of the operation
|
|
|
* @done: Signal completion to the issuer of the task
|
|
|
*/
|
|
@@ -49,8 +52,10 @@ struct cpuhp_cpu_state {
|
|
|
struct task_struct *thread;
|
|
|
bool should_run;
|
|
|
bool rollback;
|
|
|
+ bool single;
|
|
|
+ bool bringup;
|
|
|
+ struct hlist_node *node;
|
|
|
enum cpuhp_state cb_state;
|
|
|
- int (*cb)(unsigned int cpu);
|
|
|
int result;
|
|
|
struct completion done;
|
|
|
#endif
|
|
@@ -68,35 +73,103 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
|
|
|
* @cant_stop: Bringup/teardown can't be stopped at this step
|
|
|
*/
|
|
|
struct cpuhp_step {
|
|
|
- const char *name;
|
|
|
- int (*startup)(unsigned int cpu);
|
|
|
- int (*teardown)(unsigned int cpu);
|
|
|
- bool skip_onerr;
|
|
|
- bool cant_stop;
|
|
|
+ const char *name;
|
|
|
+ union {
|
|
|
+ int (*single)(unsigned int cpu);
|
|
|
+ int (*multi)(unsigned int cpu,
|
|
|
+ struct hlist_node *node);
|
|
|
+ } startup;
|
|
|
+ union {
|
|
|
+ int (*single)(unsigned int cpu);
|
|
|
+ int (*multi)(unsigned int cpu,
|
|
|
+ struct hlist_node *node);
|
|
|
+ } teardown;
|
|
|
+ struct hlist_head list;
|
|
|
+ bool skip_onerr;
|
|
|
+ bool cant_stop;
|
|
|
+ bool multi_instance;
|
|
|
};
|
|
|
|
|
|
static DEFINE_MUTEX(cpuhp_state_mutex);
|
|
|
static struct cpuhp_step cpuhp_bp_states[];
|
|
|
static struct cpuhp_step cpuhp_ap_states[];
|
|
|
|
|
|
+static bool cpuhp_is_ap_state(enum cpuhp_state state)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
|
|
|
+ * purposes as that state is handled explicitly in cpu_down.
|
|
|
+ */
|
|
|
+ return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
|
|
|
+}
|
|
|
+
|
|
|
+static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
|
|
|
+{
|
|
|
+ struct cpuhp_step *sp;
|
|
|
+
|
|
|
+ sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
|
|
|
+ return sp + state;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* cpuhp_invoke_callback _ Invoke the callbacks for a given state
|
|
|
* @cpu: The cpu for which the callback should be invoked
|
|
|
* @step: The step in the state machine
|
|
|
- * @cb: The callback function to invoke
|
|
|
+ * @bringup: True if the bringup callback should be invoked
|
|
|
*
|
|
|
- * Called from cpu hotplug and from the state register machinery
|
|
|
+ * Called from cpu hotplug and from the state register machinery.
|
|
|
*/
|
|
|
-static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state step,
|
|
|
- int (*cb)(unsigned int))
|
|
|
+static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
|
|
|
+ bool bringup, struct hlist_node *node)
|
|
|
{
|
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
|
|
- int ret = 0;
|
|
|
-
|
|
|
- if (cb) {
|
|
|
- trace_cpuhp_enter(cpu, st->target, step, cb);
|
|
|
+ struct cpuhp_step *step = cpuhp_get_step(state);
|
|
|
+ int (*cbm)(unsigned int cpu, struct hlist_node *node);
|
|
|
+ int (*cb)(unsigned int cpu);
|
|
|
+ int ret, cnt;
|
|
|
+
|
|
|
+ if (!step->multi_instance) {
|
|
|
+ cb = bringup ? step->startup.single : step->teardown.single;
|
|
|
+ if (!cb)
|
|
|
+ return 0;
|
|
|
+ trace_cpuhp_enter(cpu, st->target, state, cb);
|
|
|
ret = cb(cpu);
|
|
|
- trace_cpuhp_exit(cpu, st->state, step, ret);
|
|
|
+ trace_cpuhp_exit(cpu, st->state, state, ret);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+ cbm = bringup ? step->startup.multi : step->teardown.multi;
|
|
|
+ if (!cbm)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ /* Single invocation for instance add/remove */
|
|
|
+ if (node) {
|
|
|
+ trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
|
|
|
+ ret = cbm(cpu, node);
|
|
|
+ trace_cpuhp_exit(cpu, st->state, state, ret);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* State transition. Invoke on all instances */
|
|
|
+ cnt = 0;
|
|
|
+ hlist_for_each(node, &step->list) {
|
|
|
+ trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
|
|
|
+ ret = cbm(cpu, node);
|
|
|
+ trace_cpuhp_exit(cpu, st->state, state, ret);
|
|
|
+ if (ret)
|
|
|
+ goto err;
|
|
|
+ cnt++;
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+err:
|
|
|
+ /* Rollback the instances if one failed */
|
|
|
+ cbm = !bringup ? step->startup.multi : step->teardown.multi;
|
|
|
+ if (!cbm)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ hlist_for_each(node, &step->list) {
|
|
|
+ if (!cnt--)
|
|
|
+ break;
|
|
|
+ cbm(cpu, node);
|
|
|
}
|
|
|
return ret;
|
|
|
}
|
|
@@ -260,10 +333,17 @@ void cpu_hotplug_disable(void)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
|
|
|
|
|
|
+static void __cpu_hotplug_enable(void)
|
|
|
+{
|
|
|
+ if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
|
|
|
+ return;
|
|
|
+ cpu_hotplug_disabled--;
|
|
|
+}
|
|
|
+
|
|
|
void cpu_hotplug_enable(void)
|
|
|
{
|
|
|
cpu_maps_update_begin();
|
|
|
- WARN_ON(--cpu_hotplug_disabled < 0);
|
|
|
+ __cpu_hotplug_enable();
|
|
|
cpu_maps_update_done();
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
|
|
@@ -330,12 +410,6 @@ static int notify_online(unsigned int cpu)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int notify_starting(unsigned int cpu)
|
|
|
-{
|
|
|
- cpu_notify(CPU_STARTING, cpu);
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
static int bringup_wait_for_ap(unsigned int cpu)
|
|
|
{
|
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
|
@@ -349,8 +423,16 @@ static int bringup_cpu(unsigned int cpu)
|
|
|
struct task_struct *idle = idle_thread_get(cpu);
|
|
|
int ret;
|
|
|
|
|
|
+ /*
|
|
|
+ * Some architectures have to walk the irq descriptors to
|
|
|
+ * setup the vector space for the cpu which comes online.
|
|
|
+ * Prevent irq alloc/free across the bringup.
|
|
|
+ */
|
|
|
+ irq_lock_sparse();
|
|
|
+
|
|
|
/* Arch-specific enabling code. */
|
|
|
ret = __cpu_up(cpu, idle);
|
|
|
+ irq_unlock_sparse();
|
|
|
if (ret) {
|
|
|
cpu_notify(CPU_UP_CANCELED, cpu);
|
|
|
return ret;
|
|
@@ -363,62 +445,55 @@ static int bringup_cpu(unsigned int cpu)
|
|
|
/*
|
|
|
* Hotplug state machine related functions
|
|
|
*/
|
|
|
-static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st,
|
|
|
- struct cpuhp_step *steps)
|
|
|
+static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
|
|
|
{
|
|
|
for (st->state++; st->state < st->target; st->state++) {
|
|
|
- struct cpuhp_step *step = steps + st->state;
|
|
|
+ struct cpuhp_step *step = cpuhp_get_step(st->state);
|
|
|
|
|
|
if (!step->skip_onerr)
|
|
|
- cpuhp_invoke_callback(cpu, st->state, step->startup);
|
|
|
+ cpuhp_invoke_callback(cpu, st->state, true, NULL);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
|
|
|
- struct cpuhp_step *steps, enum cpuhp_state target)
|
|
|
+ enum cpuhp_state target)
|
|
|
{
|
|
|
enum cpuhp_state prev_state = st->state;
|
|
|
int ret = 0;
|
|
|
|
|
|
for (; st->state > target; st->state--) {
|
|
|
- struct cpuhp_step *step = steps + st->state;
|
|
|
-
|
|
|
- ret = cpuhp_invoke_callback(cpu, st->state, step->teardown);
|
|
|
+ ret = cpuhp_invoke_callback(cpu, st->state, false, NULL);
|
|
|
if (ret) {
|
|
|
st->target = prev_state;
|
|
|
- undo_cpu_down(cpu, st, steps);
|
|
|
+ undo_cpu_down(cpu, st);
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st,
|
|
|
- struct cpuhp_step *steps)
|
|
|
+static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
|
|
|
{
|
|
|
for (st->state--; st->state > st->target; st->state--) {
|
|
|
- struct cpuhp_step *step = steps + st->state;
|
|
|
+ struct cpuhp_step *step = cpuhp_get_step(st->state);
|
|
|
|
|
|
if (!step->skip_onerr)
|
|
|
- cpuhp_invoke_callback(cpu, st->state, step->teardown);
|
|
|
+ cpuhp_invoke_callback(cpu, st->state, false, NULL);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
|
|
|
- struct cpuhp_step *steps, enum cpuhp_state target)
|
|
|
+ enum cpuhp_state target)
|
|
|
{
|
|
|
enum cpuhp_state prev_state = st->state;
|
|
|
int ret = 0;
|
|
|
|
|
|
while (st->state < target) {
|
|
|
- struct cpuhp_step *step;
|
|
|
-
|
|
|
st->state++;
|
|
|
- step = steps + st->state;
|
|
|
- ret = cpuhp_invoke_callback(cpu, st->state, step->startup);
|
|
|
+ ret = cpuhp_invoke_callback(cpu, st->state, true, NULL);
|
|
|
if (ret) {
|
|
|
st->target = prev_state;
|
|
|
- undo_cpu_up(cpu, st, steps);
|
|
|
+ undo_cpu_up(cpu, st);
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
@@ -447,13 +522,13 @@ static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
|
|
|
{
|
|
|
enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
|
|
|
|
|
|
- return cpuhp_down_callbacks(cpu, st, cpuhp_ap_states, target);
|
|
|
+ return cpuhp_down_callbacks(cpu, st, target);
|
|
|
}
|
|
|
|
|
|
/* Execute the online startup callbacks. Used to be CPU_ONLINE */
|
|
|
static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
|
|
|
{
|
|
|
- return cpuhp_up_callbacks(cpu, st, cpuhp_ap_states, st->target);
|
|
|
+ return cpuhp_up_callbacks(cpu, st, st->target);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -476,18 +551,20 @@ static void cpuhp_thread_fun(unsigned int cpu)
|
|
|
st->should_run = false;
|
|
|
|
|
|
/* Single callback invocation for [un]install ? */
|
|
|
- if (st->cb) {
|
|
|
+ if (st->single) {
|
|
|
if (st->cb_state < CPUHP_AP_ONLINE) {
|
|
|
local_irq_disable();
|
|
|
- ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb);
|
|
|
+ ret = cpuhp_invoke_callback(cpu, st->cb_state,
|
|
|
+ st->bringup, st->node);
|
|
|
local_irq_enable();
|
|
|
} else {
|
|
|
- ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb);
|
|
|
+ ret = cpuhp_invoke_callback(cpu, st->cb_state,
|
|
|
+ st->bringup, st->node);
|
|
|
}
|
|
|
} else if (st->rollback) {
|
|
|
BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
|
|
|
|
|
|
- undo_cpu_down(cpu, st, cpuhp_ap_states);
|
|
|
+ undo_cpu_down(cpu, st);
|
|
|
/*
|
|
|
* This is a momentary workaround to keep the notifier users
|
|
|
* happy. Will go away once we got rid of the notifiers.
|
|
@@ -509,8 +586,9 @@ static void cpuhp_thread_fun(unsigned int cpu)
|
|
|
}
|
|
|
|
|
|
/* Invoke a single callback on a remote cpu */
|
|
|
-static int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state,
|
|
|
- int (*cb)(unsigned int))
|
|
|
+static int
|
|
|
+cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
|
|
|
+ struct hlist_node *node)
|
|
|
{
|
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
|
|
|
|
@@ -522,10 +600,13 @@ static int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state,
|
|
|
* we invoke the thread function directly.
|
|
|
*/
|
|
|
if (!st->thread)
|
|
|
- return cpuhp_invoke_callback(cpu, state, cb);
|
|
|
+ return cpuhp_invoke_callback(cpu, state, bringup, node);
|
|
|
|
|
|
st->cb_state = state;
|
|
|
- st->cb = cb;
|
|
|
+ st->single = true;
|
|
|
+ st->bringup = bringup;
|
|
|
+ st->node = node;
|
|
|
+
|
|
|
/*
|
|
|
* Make sure the above stores are visible before should_run becomes
|
|
|
* true. Paired with the mb() above in cpuhp_thread_fun()
|
|
@@ -541,7 +622,7 @@ static int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state,
|
|
|
static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
|
|
|
{
|
|
|
st->result = 0;
|
|
|
- st->cb = NULL;
|
|
|
+ st->single = false;
|
|
|
/*
|
|
|
* Make sure the above stores are visible before should_run becomes
|
|
|
* true. Paired with the mb() above in cpuhp_thread_fun()
|
|
@@ -674,12 +755,6 @@ static int notify_down_prepare(unsigned int cpu)
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-static int notify_dying(unsigned int cpu)
|
|
|
-{
|
|
|
- cpu_notify(CPU_DYING, cpu);
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
/* Take this CPU down. */
|
|
|
static int take_cpu_down(void *_param)
|
|
|
{
|
|
@@ -692,12 +767,16 @@ static int take_cpu_down(void *_param)
|
|
|
if (err < 0)
|
|
|
return err;
|
|
|
|
|
|
+ /*
|
|
|
+ * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
|
|
|
+ * do this step again.
|
|
|
+ */
|
|
|
+ WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
|
|
|
+ st->state--;
|
|
|
/* Invoke the former CPU_DYING callbacks */
|
|
|
- for (; st->state > target; st->state--) {
|
|
|
- struct cpuhp_step *step = cpuhp_ap_states + st->state;
|
|
|
+ for (; st->state > target; st->state--)
|
|
|
+ cpuhp_invoke_callback(cpu, st->state, false, NULL);
|
|
|
|
|
|
- cpuhp_invoke_callback(cpu, st->state, step->teardown);
|
|
|
- }
|
|
|
/* Give up timekeeping duties */
|
|
|
tick_handover_do_timer();
|
|
|
/* Park the stopper thread */
|
|
@@ -734,7 +813,7 @@ static int takedown_cpu(unsigned int cpu)
|
|
|
BUG_ON(cpu_online(cpu));
|
|
|
|
|
|
/*
|
|
|
- * The migration_call() CPU_DYING callback will have removed all
|
|
|
+ * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
|
|
|
* runnable tasks from the cpu, there's only the idle task left now
|
|
|
* that the migration thread is done doing the stop_machine thing.
|
|
|
*
|
|
@@ -787,7 +866,6 @@ void cpuhp_report_idle_dead(void)
|
|
|
#define notify_down_prepare NULL
|
|
|
#define takedown_cpu NULL
|
|
|
#define notify_dead NULL
|
|
|
-#define notify_dying NULL
|
|
|
#endif
|
|
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
@@ -836,7 +914,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
|
|
|
* The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
|
|
|
* to do the further cleanups.
|
|
|
*/
|
|
|
- ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target);
|
|
|
+ ret = cpuhp_down_callbacks(cpu, st, target);
|
|
|
if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
|
|
|
st->target = prev_state;
|
|
|
st->rollback = true;
|
|
@@ -877,10 +955,9 @@ EXPORT_SYMBOL(cpu_down);
|
|
|
#endif /*CONFIG_HOTPLUG_CPU*/
|
|
|
|
|
|
/**
|
|
|
- * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
|
|
|
+ * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
|
|
|
* @cpu: cpu that just started
|
|
|
*
|
|
|
- * This function calls the cpu_chain notifiers with CPU_STARTING.
|
|
|
* It must be called by the arch code on the new cpu, before the new cpu
|
|
|
* enables interrupts and before the "boot" cpu returns from __cpu_up().
|
|
|
*/
|
|
@@ -891,11 +968,8 @@ void notify_cpu_starting(unsigned int cpu)
|
|
|
|
|
|
rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
|
|
|
while (st->state < target) {
|
|
|
- struct cpuhp_step *step;
|
|
|
-
|
|
|
st->state++;
|
|
|
- step = cpuhp_ap_states + st->state;
|
|
|
- cpuhp_invoke_callback(cpu, st->state, step->startup);
|
|
|
+ cpuhp_invoke_callback(cpu, st->state, true, NULL);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -980,7 +1054,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
|
|
|
* responsible for bringing it up to the target state.
|
|
|
*/
|
|
|
target = min((int)target, CPUHP_BRINGUP_CPU);
|
|
|
- ret = cpuhp_up_callbacks(cpu, st, cpuhp_bp_states, target);
|
|
|
+ ret = cpuhp_up_callbacks(cpu, st, target);
|
|
|
out:
|
|
|
cpu_hotplug_done();
|
|
|
return ret;
|
|
@@ -1083,7 +1157,7 @@ void enable_nonboot_cpus(void)
|
|
|
|
|
|
/* Allow everyone to use the CPU hotplug again */
|
|
|
cpu_maps_update_begin();
|
|
|
- WARN_ON(--cpu_hotplug_disabled < 0);
|
|
|
+ __cpu_hotplug_enable();
|
|
|
if (cpumask_empty(frozen_cpus))
|
|
|
goto out;
|
|
|
|
|
@@ -1172,40 +1246,50 @@ core_initcall(cpu_hotplug_pm_sync_init);
|
|
|
static struct cpuhp_step cpuhp_bp_states[] = {
|
|
|
[CPUHP_OFFLINE] = {
|
|
|
.name = "offline",
|
|
|
- .startup = NULL,
|
|
|
- .teardown = NULL,
|
|
|
+ .startup.single = NULL,
|
|
|
+ .teardown.single = NULL,
|
|
|
},
|
|
|
#ifdef CONFIG_SMP
|
|
|
[CPUHP_CREATE_THREADS]= {
|
|
|
- .name = "threads:create",
|
|
|
- .startup = smpboot_create_threads,
|
|
|
- .teardown = NULL,
|
|
|
+ .name = "threads:prepare",
|
|
|
+ .startup.single = smpboot_create_threads,
|
|
|
+ .teardown.single = NULL,
|
|
|
.cant_stop = true,
|
|
|
},
|
|
|
[CPUHP_PERF_PREPARE] = {
|
|
|
- .name = "perf prepare",
|
|
|
- .startup = perf_event_init_cpu,
|
|
|
- .teardown = perf_event_exit_cpu,
|
|
|
+ .name = "perf:prepare",
|
|
|
+ .startup.single = perf_event_init_cpu,
|
|
|
+ .teardown.single = perf_event_exit_cpu,
|
|
|
},
|
|
|
[CPUHP_WORKQUEUE_PREP] = {
|
|
|
- .name = "workqueue prepare",
|
|
|
- .startup = workqueue_prepare_cpu,
|
|
|
- .teardown = NULL,
|
|
|
+ .name = "workqueue:prepare",
|
|
|
+ .startup.single = workqueue_prepare_cpu,
|
|
|
+ .teardown.single = NULL,
|
|
|
},
|
|
|
[CPUHP_HRTIMERS_PREPARE] = {
|
|
|
- .name = "hrtimers prepare",
|
|
|
- .startup = hrtimers_prepare_cpu,
|
|
|
- .teardown = hrtimers_dead_cpu,
|
|
|
+ .name = "hrtimers:prepare",
|
|
|
+ .startup.single = hrtimers_prepare_cpu,
|
|
|
+ .teardown.single = hrtimers_dead_cpu,
|
|
|
},
|
|
|
[CPUHP_SMPCFD_PREPARE] = {
|
|
|
- .name = "SMPCFD prepare",
|
|
|
- .startup = smpcfd_prepare_cpu,
|
|
|
- .teardown = smpcfd_dead_cpu,
|
|
|
+ .name = "smpcfd:prepare",
|
|
|
+ .startup.single = smpcfd_prepare_cpu,
|
|
|
+ .teardown.single = smpcfd_dead_cpu,
|
|
|
+ },
|
|
|
+ [CPUHP_RELAY_PREPARE] = {
|
|
|
+ .name = "relay:prepare",
|
|
|
+ .startup.single = relay_prepare_cpu,
|
|
|
+ .teardown.single = NULL,
|
|
|
+ },
|
|
|
+ [CPUHP_SLAB_PREPARE] = {
|
|
|
+ .name = "slab:prepare",
|
|
|
+ .startup.single = slab_prepare_cpu,
|
|
|
+ .teardown.single = slab_dead_cpu,
|
|
|
},
|
|
|
[CPUHP_RCUTREE_PREP] = {
|
|
|
- .name = "RCU-tree prepare",
|
|
|
- .startup = rcutree_prepare_cpu,
|
|
|
- .teardown = rcutree_dead_cpu,
|
|
|
+ .name = "RCU/tree:prepare",
|
|
|
+ .startup.single = rcutree_prepare_cpu,
|
|
|
+ .teardown.single = rcutree_dead_cpu,
|
|
|
},
|
|
|
/*
|
|
|
* Preparatory and dead notifiers. Will be replaced once the notifiers
|
|
@@ -1213,8 +1297,8 @@ static struct cpuhp_step cpuhp_bp_states[] = {
|
|
|
*/
|
|
|
[CPUHP_NOTIFY_PREPARE] = {
|
|
|
.name = "notify:prepare",
|
|
|
- .startup = notify_prepare,
|
|
|
- .teardown = notify_dead,
|
|
|
+ .startup.single = notify_prepare,
|
|
|
+ .teardown.single = notify_dead,
|
|
|
.skip_onerr = true,
|
|
|
.cant_stop = true,
|
|
|
},
|
|
@@ -1224,20 +1308,21 @@ static struct cpuhp_step cpuhp_bp_states[] = {
|
|
|
* otherwise a RCU stall occurs.
|
|
|
*/
|
|
|
[CPUHP_TIMERS_DEAD] = {
|
|
|
- .name = "timers dead",
|
|
|
- .startup = NULL,
|
|
|
- .teardown = timers_dead_cpu,
|
|
|
+ .name = "timers:dead",
|
|
|
+ .startup.single = NULL,
|
|
|
+ .teardown.single = timers_dead_cpu,
|
|
|
},
|
|
|
/* Kicks the plugged cpu into life */
|
|
|
[CPUHP_BRINGUP_CPU] = {
|
|
|
.name = "cpu:bringup",
|
|
|
- .startup = bringup_cpu,
|
|
|
- .teardown = NULL,
|
|
|
+ .startup.single = bringup_cpu,
|
|
|
+ .teardown.single = NULL,
|
|
|
.cant_stop = true,
|
|
|
},
|
|
|
[CPUHP_AP_SMPCFD_DYING] = {
|
|
|
- .startup = NULL,
|
|
|
- .teardown = smpcfd_dying_cpu,
|
|
|
+ .name = "smpcfd:dying",
|
|
|
+ .startup.single = NULL,
|
|
|
+ .teardown.single = smpcfd_dying_cpu,
|
|
|
},
|
|
|
/*
|
|
|
* Handled on controll processor until the plugged processor manages
|
|
@@ -1245,8 +1330,8 @@ static struct cpuhp_step cpuhp_bp_states[] = {
|
|
|
*/
|
|
|
[CPUHP_TEARDOWN_CPU] = {
|
|
|
.name = "cpu:teardown",
|
|
|
- .startup = NULL,
|
|
|
- .teardown = takedown_cpu,
|
|
|
+ .startup.single = NULL,
|
|
|
+ .teardown.single = takedown_cpu,
|
|
|
.cant_stop = true,
|
|
|
},
|
|
|
#else
|
|
@@ -1272,24 +1357,13 @@ static struct cpuhp_step cpuhp_ap_states[] = {
|
|
|
/* First state is scheduler control. Interrupts are disabled */
|
|
|
[CPUHP_AP_SCHED_STARTING] = {
|
|
|
.name = "sched:starting",
|
|
|
- .startup = sched_cpu_starting,
|
|
|
- .teardown = sched_cpu_dying,
|
|
|
+ .startup.single = sched_cpu_starting,
|
|
|
+ .teardown.single = sched_cpu_dying,
|
|
|
},
|
|
|
[CPUHP_AP_RCUTREE_DYING] = {
|
|
|
- .startup = NULL,
|
|
|
- .teardown = rcutree_dying_cpu,
|
|
|
- },
|
|
|
- /*
|
|
|
- * Low level startup/teardown notifiers. Run with interrupts
|
|
|
- * disabled. Will be removed once the notifiers are converted to
|
|
|
- * states.
|
|
|
- */
|
|
|
- [CPUHP_AP_NOTIFY_STARTING] = {
|
|
|
- .name = "notify:starting",
|
|
|
- .startup = notify_starting,
|
|
|
- .teardown = notify_dying,
|
|
|
- .skip_onerr = true,
|
|
|
- .cant_stop = true,
|
|
|
+ .name = "RCU/tree:dying",
|
|
|
+ .startup.single = NULL,
|
|
|
+ .teardown.single = rcutree_dying_cpu,
|
|
|
},
|
|
|
/* Entry state on starting. Interrupts enabled from here on. Transient
|
|
|
* state for synchronsization */
|
|
@@ -1298,24 +1372,24 @@ static struct cpuhp_step cpuhp_ap_states[] = {
|
|
|
},
|
|
|
/* Handle smpboot threads park/unpark */
|
|
|
[CPUHP_AP_SMPBOOT_THREADS] = {
|
|
|
- .name = "smpboot:threads",
|
|
|
- .startup = smpboot_unpark_threads,
|
|
|
- .teardown = NULL,
|
|
|
+ .name = "smpboot/threads:online",
|
|
|
+ .startup.single = smpboot_unpark_threads,
|
|
|
+ .teardown.single = NULL,
|
|
|
},
|
|
|
[CPUHP_AP_PERF_ONLINE] = {
|
|
|
- .name = "perf online",
|
|
|
- .startup = perf_event_init_cpu,
|
|
|
- .teardown = perf_event_exit_cpu,
|
|
|
+ .name = "perf:online",
|
|
|
+ .startup.single = perf_event_init_cpu,
|
|
|
+ .teardown.single = perf_event_exit_cpu,
|
|
|
},
|
|
|
[CPUHP_AP_WORKQUEUE_ONLINE] = {
|
|
|
- .name = "workqueue online",
|
|
|
- .startup = workqueue_online_cpu,
|
|
|
- .teardown = workqueue_offline_cpu,
|
|
|
+ .name = "workqueue:online",
|
|
|
+ .startup.single = workqueue_online_cpu,
|
|
|
+ .teardown.single = workqueue_offline_cpu,
|
|
|
},
|
|
|
[CPUHP_AP_RCUTREE_ONLINE] = {
|
|
|
- .name = "RCU-tree online",
|
|
|
- .startup = rcutree_online_cpu,
|
|
|
- .teardown = rcutree_offline_cpu,
|
|
|
+ .name = "RCU/tree:online",
|
|
|
+ .startup.single = rcutree_online_cpu,
|
|
|
+ .teardown.single = rcutree_offline_cpu,
|
|
|
},
|
|
|
|
|
|
/*
|
|
@@ -1324,8 +1398,8 @@ static struct cpuhp_step cpuhp_ap_states[] = {
|
|
|
*/
|
|
|
[CPUHP_AP_NOTIFY_ONLINE] = {
|
|
|
.name = "notify:online",
|
|
|
- .startup = notify_online,
|
|
|
- .teardown = notify_down_prepare,
|
|
|
+ .startup.single = notify_online,
|
|
|
+ .teardown.single = notify_down_prepare,
|
|
|
.skip_onerr = true,
|
|
|
},
|
|
|
#endif
|
|
@@ -1337,16 +1411,16 @@ static struct cpuhp_step cpuhp_ap_states[] = {
|
|
|
/* Last state is scheduler control setting the cpu active */
|
|
|
[CPUHP_AP_ACTIVE] = {
|
|
|
.name = "sched:active",
|
|
|
- .startup = sched_cpu_activate,
|
|
|
- .teardown = sched_cpu_deactivate,
|
|
|
+ .startup.single = sched_cpu_activate,
|
|
|
+ .teardown.single = sched_cpu_deactivate,
|
|
|
},
|
|
|
#endif
|
|
|
|
|
|
/* CPU is fully up and running. */
|
|
|
[CPUHP_ONLINE] = {
|
|
|
.name = "online",
|
|
|
- .startup = NULL,
|
|
|
- .teardown = NULL,
|
|
|
+ .startup.single = NULL,
|
|
|
+ .teardown.single = NULL,
|
|
|
},
|
|
|
};
|
|
|
|
|
@@ -1358,54 +1432,42 @@ static int cpuhp_cb_check(enum cpuhp_state state)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static bool cpuhp_is_ap_state(enum cpuhp_state state)
|
|
|
-{
|
|
|
- /*
|
|
|
- * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
|
|
|
- * purposes as that state is handled explicitely in cpu_down.
|
|
|
- */
|
|
|
- return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
|
|
|
-}
|
|
|
-
|
|
|
-static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
|
|
|
-{
|
|
|
- struct cpuhp_step *sp;
|
|
|
-
|
|
|
- sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
|
|
|
- return sp + state;
|
|
|
-}
|
|
|
-
|
|
|
static void cpuhp_store_callbacks(enum cpuhp_state state,
|
|
|
const char *name,
|
|
|
int (*startup)(unsigned int cpu),
|
|
|
- int (*teardown)(unsigned int cpu))
|
|
|
+ int (*teardown)(unsigned int cpu),
|
|
|
+ bool multi_instance)
|
|
|
{
|
|
|
/* (Un)Install the callbacks for further cpu hotplug operations */
|
|
|
struct cpuhp_step *sp;
|
|
|
|
|
|
mutex_lock(&cpuhp_state_mutex);
|
|
|
sp = cpuhp_get_step(state);
|
|
|
- sp->startup = startup;
|
|
|
- sp->teardown = teardown;
|
|
|
+ sp->startup.single = startup;
|
|
|
+ sp->teardown.single = teardown;
|
|
|
sp->name = name;
|
|
|
+ sp->multi_instance = multi_instance;
|
|
|
+ INIT_HLIST_HEAD(&sp->list);
|
|
|
mutex_unlock(&cpuhp_state_mutex);
|
|
|
}
|
|
|
|
|
|
static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
|
|
|
{
|
|
|
- return cpuhp_get_step(state)->teardown;
|
|
|
+ return cpuhp_get_step(state)->teardown.single;
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
* Call the startup/teardown function for a step either on the AP or
|
|
|
* on the current CPU.
|
|
|
*/
|
|
|
-static int cpuhp_issue_call(int cpu, enum cpuhp_state state,
|
|
|
- int (*cb)(unsigned int), bool bringup)
|
|
|
+static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
|
|
|
+ struct hlist_node *node)
|
|
|
{
|
|
|
+ struct cpuhp_step *sp = cpuhp_get_step(state);
|
|
|
int ret;
|
|
|
|
|
|
- if (!cb)
|
|
|
+ if ((bringup && !sp->startup.single) ||
|
|
|
+ (!bringup && !sp->teardown.single))
|
|
|
return 0;
|
|
|
/*
|
|
|
* The non AP bound callbacks can fail on bringup. On teardown
|
|
@@ -1413,11 +1475,11 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state,
|
|
|
*/
|
|
|
#ifdef CONFIG_SMP
|
|
|
if (cpuhp_is_ap_state(state))
|
|
|
- ret = cpuhp_invoke_ap_callback(cpu, state, cb);
|
|
|
+ ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
|
|
|
else
|
|
|
- ret = cpuhp_invoke_callback(cpu, state, cb);
|
|
|
+ ret = cpuhp_invoke_callback(cpu, state, bringup, node);
|
|
|
#else
|
|
|
- ret = cpuhp_invoke_callback(cpu, state, cb);
|
|
|
+ ret = cpuhp_invoke_callback(cpu, state, bringup, node);
|
|
|
#endif
|
|
|
BUG_ON(ret && !bringup);
|
|
|
return ret;
|
|
@@ -1429,13 +1491,10 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state,
|
|
|
* Note: The teardown callbacks for rollback are not allowed to fail!
|
|
|
*/
|
|
|
static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
|
|
|
- int (*teardown)(unsigned int cpu))
|
|
|
+ struct hlist_node *node)
|
|
|
{
|
|
|
int cpu;
|
|
|
|
|
|
- if (!teardown)
|
|
|
- return;
|
|
|
-
|
|
|
/* Roll back the already executed steps on the other cpus */
|
|
|
for_each_present_cpu(cpu) {
|
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
|
@@ -1446,7 +1505,7 @@ static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
|
|
|
|
|
|
/* Did we invoke the startup call on that cpu ? */
|
|
|
if (cpustate >= state)
|
|
|
- cpuhp_issue_call(cpu, state, teardown, false);
|
|
|
+ cpuhp_issue_call(cpu, state, false, node);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1473,6 +1532,52 @@ static int cpuhp_reserve_state(enum cpuhp_state state)
|
|
|
return -ENOSPC;
|
|
|
}
|
|
|
|
|
|
+int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
|
|
|
+ bool invoke)
|
|
|
+{
|
|
|
+ struct cpuhp_step *sp;
|
|
|
+ int cpu;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ sp = cpuhp_get_step(state);
|
|
|
+ if (sp->multi_instance == false)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ get_online_cpus();
|
|
|
+
|
|
|
+ if (!invoke || !sp->startup.multi)
|
|
|
+ goto add_node;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Try to call the startup callback for each present cpu
|
|
|
+ * depending on the hotplug state of the cpu.
|
|
|
+ */
|
|
|
+ for_each_present_cpu(cpu) {
|
|
|
+ struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
|
|
+ int cpustate = st->state;
|
|
|
+
|
|
|
+ if (cpustate < state)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ ret = cpuhp_issue_call(cpu, state, true, node);
|
|
|
+ if (ret) {
|
|
|
+ if (sp->teardown.multi)
|
|
|
+ cpuhp_rollback_install(cpu, state, node);
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
+ }
|
|
|
+add_node:
|
|
|
+ ret = 0;
|
|
|
+ mutex_lock(&cpuhp_state_mutex);
|
|
|
+ hlist_add_head(node, &sp->list);
|
|
|
+ mutex_unlock(&cpuhp_state_mutex);
|
|
|
+
|
|
|
+err:
|
|
|
+ put_online_cpus();
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
|
|
|
+
|
|
|
/**
|
|
|
* __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
|
|
|
* @state: The state to setup
|
|
@@ -1486,7 +1591,8 @@ static int cpuhp_reserve_state(enum cpuhp_state state)
|
|
|
int __cpuhp_setup_state(enum cpuhp_state state,
|
|
|
const char *name, bool invoke,
|
|
|
int (*startup)(unsigned int cpu),
|
|
|
- int (*teardown)(unsigned int cpu))
|
|
|
+ int (*teardown)(unsigned int cpu),
|
|
|
+ bool multi_instance)
|
|
|
{
|
|
|
int cpu, ret = 0;
|
|
|
int dyn_state = 0;
|
|
@@ -1505,7 +1611,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
|
|
|
state = ret;
|
|
|
}
|
|
|
|
|
|
- cpuhp_store_callbacks(state, name, startup, teardown);
|
|
|
+ cpuhp_store_callbacks(state, name, startup, teardown, multi_instance);
|
|
|
|
|
|
if (!invoke || !startup)
|
|
|
goto out;
|
|
@@ -1521,10 +1627,11 @@ int __cpuhp_setup_state(enum cpuhp_state state,
|
|
|
if (cpustate < state)
|
|
|
continue;
|
|
|
|
|
|
- ret = cpuhp_issue_call(cpu, state, startup, true);
|
|
|
+ ret = cpuhp_issue_call(cpu, state, true, NULL);
|
|
|
if (ret) {
|
|
|
- cpuhp_rollback_install(cpu, state, teardown);
|
|
|
- cpuhp_store_callbacks(state, NULL, NULL, NULL);
|
|
|
+ if (teardown)
|
|
|
+ cpuhp_rollback_install(cpu, state, NULL);
|
|
|
+ cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
|
|
|
goto out;
|
|
|
}
|
|
|
}
|
|
@@ -1536,6 +1643,42 @@ out:
|
|
|
}
|
|
|
EXPORT_SYMBOL(__cpuhp_setup_state);
|
|
|
|
|
|
+int __cpuhp_state_remove_instance(enum cpuhp_state state,
|
|
|
+ struct hlist_node *node, bool invoke)
|
|
|
+{
|
|
|
+ struct cpuhp_step *sp = cpuhp_get_step(state);
|
|
|
+ int cpu;
|
|
|
+
|
|
|
+ BUG_ON(cpuhp_cb_check(state));
|
|
|
+
|
|
|
+ if (!sp->multi_instance)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ get_online_cpus();
|
|
|
+ if (!invoke || !cpuhp_get_teardown_cb(state))
|
|
|
+ goto remove;
|
|
|
+ /*
|
|
|
+ * Call the teardown callback for each present cpu depending
|
|
|
+ * on the hotplug state of the cpu. This function is not
|
|
|
+ * allowed to fail currently!
|
|
|
+ */
|
|
|
+ for_each_present_cpu(cpu) {
|
|
|
+ struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
|
|
+ int cpustate = st->state;
|
|
|
+
|
|
|
+ if (cpustate >= state)
|
|
|
+ cpuhp_issue_call(cpu, state, false, node);
|
|
|
+ }
|
|
|
+
|
|
|
+remove:
|
|
|
+ mutex_lock(&cpuhp_state_mutex);
|
|
|
+ hlist_del(node);
|
|
|
+ mutex_unlock(&cpuhp_state_mutex);
|
|
|
+ put_online_cpus();
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
|
|
|
/**
|
|
|
* __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
|
|
|
* @state: The state to remove
|
|
@@ -1547,14 +1690,21 @@ EXPORT_SYMBOL(__cpuhp_setup_state);
|
|
|
*/
|
|
|
void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
|
|
|
{
|
|
|
- int (*teardown)(unsigned int cpu) = cpuhp_get_teardown_cb(state);
|
|
|
+ struct cpuhp_step *sp = cpuhp_get_step(state);
|
|
|
int cpu;
|
|
|
|
|
|
BUG_ON(cpuhp_cb_check(state));
|
|
|
|
|
|
get_online_cpus();
|
|
|
|
|
|
- if (!invoke || !teardown)
|
|
|
+ if (sp->multi_instance) {
|
|
|
+ WARN(!hlist_empty(&sp->list),
|
|
|
+ "Error: Removing state %d which has instances left.\n",
|
|
|
+ state);
|
|
|
+ goto remove;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!invoke || !cpuhp_get_teardown_cb(state))
|
|
|
goto remove;
|
|
|
|
|
|
/*
|
|
@@ -1567,10 +1717,10 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
|
|
|
int cpustate = st->state;
|
|
|
|
|
|
if (cpustate >= state)
|
|
|
- cpuhp_issue_call(cpu, state, teardown, false);
|
|
|
+ cpuhp_issue_call(cpu, state, false, NULL);
|
|
|
}
|
|
|
remove:
|
|
|
- cpuhp_store_callbacks(state, NULL, NULL, NULL);
|
|
|
+ cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
|
|
|
put_online_cpus();
|
|
|
}
|
|
|
EXPORT_SYMBOL(__cpuhp_remove_state);
|