|
@@ -3556,24 +3556,21 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
|
|
|
not required for correctness. So if the last cpu in a node goes
|
|
|
away, we get changed to run anywhere: as the first one comes back,
|
|
|
restore their cpu bindings. */
|
|
|
-static int cpu_callback(struct notifier_block *nfb, unsigned long action,
|
|
|
- void *hcpu)
|
|
|
+static int kswapd_cpu_online(unsigned int cpu)
|
|
|
{
|
|
|
int nid;
|
|
|
|
|
|
- if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
|
|
|
- for_each_node_state(nid, N_MEMORY) {
|
|
|
- pg_data_t *pgdat = NODE_DATA(nid);
|
|
|
- const struct cpumask *mask;
|
|
|
+ for_each_node_state(nid, N_MEMORY) {
|
|
|
+ pg_data_t *pgdat = NODE_DATA(nid);
|
|
|
+ const struct cpumask *mask;
|
|
|
|
|
|
- mask = cpumask_of_node(pgdat->node_id);
|
|
|
+ mask = cpumask_of_node(pgdat->node_id);
|
|
|
|
|
|
- if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
|
|
|
- /* One of our CPUs online: restore mask */
|
|
|
- set_cpus_allowed_ptr(pgdat->kswapd, mask);
|
|
|
- }
|
|
|
+ if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
|
|
|
+ /* One of our CPUs online: restore mask */
|
|
|
+ set_cpus_allowed_ptr(pgdat->kswapd, mask);
|
|
|
}
|
|
|
- return NOTIFY_OK;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -3615,12 +3612,15 @@ void kswapd_stop(int nid)
|
|
|
|
|
|
static int __init kswapd_init(void)
|
|
|
{
|
|
|
- int nid;
|
|
|
+ int nid, ret;
|
|
|
|
|
|
swap_setup();
|
|
|
for_each_node_state(nid, N_MEMORY)
|
|
|
kswapd_run(nid);
|
|
|
- hotcpu_notifier(cpu_callback, 0);
|
|
|
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
|
|
|
+ "mm/vmscan:online", kswapd_cpu_online,
|
|
|
+ NULL);
|
|
|
+ WARN_ON(ret < 0);
|
|
|
return 0;
|
|
|
}
|
|
|
|