|
@@ -1396,10 +1396,15 @@ static void vmstat_update(struct work_struct *w)
|
|
|
* Counters were updated so we expect more updates
|
|
|
* to occur in the future. Keep on running the
|
|
|
* update worker thread.
|
|
|
+ * If we were marked on cpu_stat_off clear the flag
|
|
|
+ * so that vmstat_shepherd doesn't schedule us again.
|
|
|
*/
|
|
|
- queue_delayed_work_on(smp_processor_id(), vmstat_wq,
|
|
|
- this_cpu_ptr(&vmstat_work),
|
|
|
- round_jiffies_relative(sysctl_stat_interval));
|
|
|
+ if (!cpumask_test_and_clear_cpu(smp_processor_id(),
|
|
|
+ cpu_stat_off)) {
|
|
|
+ queue_delayed_work_on(smp_processor_id(), vmstat_wq,
|
|
|
+ this_cpu_ptr(&vmstat_work),
|
|
|
+ round_jiffies_relative(sysctl_stat_interval));
|
|
|
+ }
|
|
|
} else {
|
|
|
/*
|
|
|
* We did not update any counters so the app may be in
|
|
@@ -1417,18 +1422,6 @@ static void vmstat_update(struct work_struct *w)
|
|
|
* until the diffs stay at zero. The function is used by NOHZ and can only be
|
|
|
* invoked when tick processing is not active.
|
|
|
*/
|
|
|
-void quiet_vmstat(void)
|
|
|
-{
|
|
|
- if (system_state != SYSTEM_RUNNING)
|
|
|
- return;
|
|
|
-
|
|
|
- do {
|
|
|
- if (!cpumask_test_and_set_cpu(smp_processor_id(), cpu_stat_off))
|
|
|
- cancel_delayed_work(this_cpu_ptr(&vmstat_work));
|
|
|
-
|
|
|
- } while (refresh_cpu_vm_stats(false));
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Check if the diffs for a certain cpu indicate that
|
|
|
* an update is needed.
|
|
@@ -1452,6 +1445,30 @@ static bool need_update(int cpu)
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
+void quiet_vmstat(void)
|
|
|
+{
|
|
|
+ if (system_state != SYSTEM_RUNNING)
|
|
|
+ return;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If we are already in hands of the shepherd then there
|
|
|
+ * is nothing for us to do here.
|
|
|
+ */
|
|
|
+ if (cpumask_test_and_set_cpu(smp_processor_id(), cpu_stat_off))
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (!need_update(smp_processor_id()))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Just refresh counters and do not care about the pending delayed
|
|
|
+ * vmstat_update. It doesn't fire that often to matter and canceling
|
|
|
+ * it would be too expensive from this path.
|
|
|
+ * vmstat_shepherd will take care about that for us.
|
|
|
+ */
|
|
|
+ refresh_cpu_vm_stats(false);
|
|
|
+}
|
|
|
+
|
|
|
|
|
|
/*
|
|
|
* Shepherd worker thread that checks the
|
|
@@ -1469,18 +1486,25 @@ static void vmstat_shepherd(struct work_struct *w)
|
|
|
|
|
|
get_online_cpus();
|
|
|
/* Check processors whose vmstat worker threads have been disabled */
|
|
|
- for_each_cpu(cpu, cpu_stat_off)
|
|
|
- if (need_update(cpu) &&
|
|
|
- cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
|
|
|
-
|
|
|
- queue_delayed_work_on(cpu, vmstat_wq,
|
|
|
- &per_cpu(vmstat_work, cpu), 0);
|
|
|
+ for_each_cpu(cpu, cpu_stat_off) {
|
|
|
+ struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
|
|
|
|
|
|
+ if (need_update(cpu)) {
|
|
|
+ if (cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
|
|
|
+ queue_delayed_work_on(cpu, vmstat_wq, dw, 0);
|
|
|
+ } else {
|
|
|
+ /*
|
|
|
+ * Cancel the work if quiet_vmstat has put this
|
|
|
+ * cpu on cpu_stat_off because the work item might
|
|
|
+ * be still scheduled
|
|
|
+ */
|
|
|
+ cancel_delayed_work(dw);
|
|
|
+ }
|
|
|
+ }
|
|
|
put_online_cpus();
|
|
|
|
|
|
schedule_delayed_work(&shepherd,
|
|
|
round_jiffies_relative(sysctl_stat_interval));
|
|
|
-
|
|
|
}
|
|
|
|
|
|
static void __init start_shepherd_timer(void)
|