|
@@ -92,6 +92,10 @@ EXPORT_PER_CPU_SYMBOL(_numa_mem_);
|
|
|
int _node_numa_mem_[MAX_NUMNODES];
|
|
|
#endif
|
|
|
|
|
|
+/* work_structs for global per-cpu drains */
|
|
|
+DEFINE_MUTEX(pcpu_drain_mutex);
|
|
|
+DEFINE_PER_CPU(struct work_struct, pcpu_drain);
|
|
|
+
|
|
|
#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
|
|
|
volatile unsigned long latent_entropy __latent_entropy;
|
|
|
EXPORT_SYMBOL(latent_entropy);
|
|
@@ -2360,7 +2364,6 @@ static void drain_local_pages_wq(struct work_struct *work)
|
|
|
*/
|
|
|
void drain_all_pages(struct zone *zone)
|
|
|
{
|
|
|
- struct work_struct __percpu *works;
|
|
|
int cpu;
|
|
|
|
|
|
/*
|
|
@@ -2373,7 +2376,16 @@ void drain_all_pages(struct zone *zone)
|
|
|
if (current->flags & PF_WQ_WORKER)
|
|
|
return;
|
|
|
|
|
|
- works = alloc_percpu_gfp(struct work_struct, GFP_ATOMIC);
|
|
|
+ /*
|
|
|
+ * Do not drain if one is already in progress unless it's specific to
|
|
|
+ * a zone. Such callers are primarily CMA and memory hotplug and need
|
|
|
+ * the drain to be complete when the call returns.
|
|
|
+ */
|
|
|
+ if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
|
|
|
+ if (!zone)
|
|
|
+ return;
|
|
|
+ mutex_lock(&pcpu_drain_mutex);
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* We don't care about racing with CPU hotplug event
|
|
@@ -2406,23 +2418,15 @@ void drain_all_pages(struct zone *zone)
|
|
|
cpumask_clear_cpu(cpu, &cpus_with_pcps);
|
|
|
}
|
|
|
|
|
|
- if (works) {
|
|
|
- for_each_cpu(cpu, &cpus_with_pcps) {
|
|
|
- struct work_struct *work = per_cpu_ptr(works, cpu);
|
|
|
- INIT_WORK(work, drain_local_pages_wq);
|
|
|
- schedule_work_on(cpu, work);
|
|
|
- }
|
|
|
- for_each_cpu(cpu, &cpus_with_pcps)
|
|
|
- flush_work(per_cpu_ptr(works, cpu));
|
|
|
- } else {
|
|
|
- for_each_cpu(cpu, &cpus_with_pcps) {
|
|
|
- struct work_struct work;
|
|
|
-
|
|
|
- INIT_WORK(&work, drain_local_pages_wq);
|
|
|
- schedule_work_on(cpu, &work);
|
|
|
- flush_work(&work);
|
|
|
- }
|
|
|
+ for_each_cpu(cpu, &cpus_with_pcps) {
|
|
|
+ struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
|
|
|
+ INIT_WORK(work, drain_local_pages_wq);
|
|
|
+ schedule_work_on(cpu, work);
|
|
|
}
|
|
|
+ for_each_cpu(cpu, &cpus_with_pcps)
|
|
|
+ flush_work(per_cpu_ptr(&pcpu_drain, cpu));
|
|
|
+
|
|
|
+ mutex_unlock(&pcpu_drain_mutex);
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_HIBERNATION
|