123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657 |
- /*
- * Pressure stall information for CPU, memory and IO
- *
- * Copyright (c) 2018 Facebook, Inc.
- * Author: Johannes Weiner <hannes@cmpxchg.org>
- *
- * When CPU, memory and IO are contended, tasks experience delays that
- * reduce throughput and introduce latencies into the workload. Memory
- * and IO contention, in addition, can cause a full loss of forward
- * progress in which the CPU goes idle.
- *
- * This code aggregates individual task delays into resource pressure
- * metrics that indicate problems with both workload health and
- * resource utilization.
- *
- * Model
- *
- * The time in which a task can execute on a CPU is our baseline for
- * productivity. Pressure expresses the amount of time in which this
- * potential cannot be realized due to resource contention.
- *
- * This concept of productivity has two components: the workload and
- * the CPU. To measure the impact of pressure on both, we define two
- * contention states for a resource: SOME and FULL.
- *
- * In the SOME state of a given resource, one or more tasks are
- * delayed on that resource. This affects the workload's ability to
- * perform work, but the CPU may still be executing other tasks.
- *
- * In the FULL state of a given resource, all non-idle tasks are
- * delayed on that resource such that nobody is advancing and the CPU
- * goes idle. This leaves both workload and CPU unproductive.
- *
- * (Naturally, the FULL state doesn't exist for the CPU resource.)
- *
- * SOME = nr_delayed_tasks != 0
- * FULL = nr_delayed_tasks != 0 && nr_running_tasks == 0
- *
- * The percentage of wallclock time spent in those compound stall
- * states gives pressure numbers between 0 and 100 for each resource,
- * where the SOME percentage indicates workload slowdowns and the FULL
- * percentage indicates reduced CPU utilization:
- *
- * %SOME = time(SOME) / period
- * %FULL = time(FULL) / period
- *
- * Multiple CPUs
- *
- * The more tasks and available CPUs there are, the more work can be
- * performed concurrently. This means that the potential that can go
- * unrealized due to resource contention *also* scales with non-idle
- * tasks and CPUs.
- *
- * Consider a scenario where 257 number crunching tasks are trying to
- * run concurrently on 256 CPUs. If we simply aggregated the task
- * states, we would have to conclude a CPU SOME pressure number of
- * 100%, since *somebody* is waiting on a runqueue at all
- * times. However, that is clearly not the amount of contention the
- * workload is experiencing: only one out of 256 possible exceution
- * threads will be contended at any given time, or about 0.4%.
- *
- * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any
- * given time *one* of the tasks is delayed due to a lack of memory.
- * Again, looking purely at the task state would yield a memory FULL
- * pressure number of 0%, since *somebody* is always making forward
- * progress. But again this wouldn't capture the amount of execution
- * potential lost, which is 1 out of 4 CPUs, or 25%.
- *
- * To calculate wasted potential (pressure) with multiple processors,
- * we have to base our calculation on the number of non-idle tasks in
- * conjunction with the number of available CPUs, which is the number
- * of potential execution threads. SOME becomes then the proportion of
- * delayed tasks to possibe threads, and FULL is the share of possible
- * threads that are unproductive due to delays:
- *
- * threads = min(nr_nonidle_tasks, nr_cpus)
- * SOME = min(nr_delayed_tasks / threads, 1)
- * FULL = (threads - min(nr_running_tasks, threads)) / threads
- *
- * For the 257 number crunchers on 256 CPUs, this yields:
- *
- * threads = min(257, 256)
- * SOME = min(1 / 256, 1) = 0.4%
- * FULL = (256 - min(257, 256)) / 256 = 0%
- *
- * For the 1 out of 4 memory-delayed tasks, this yields:
- *
- * threads = min(4, 4)
- * SOME = min(1 / 4, 1) = 25%
- * FULL = (4 - min(3, 4)) / 4 = 25%
- *
- * [ Substitute nr_cpus with 1, and you can see that it's a natural
- * extension of the single-CPU model. ]
- *
- * Implementation
- *
- * To assess the precise time spent in each such state, we would have
- * to freeze the system on task changes and start/stop the state
- * clocks accordingly. Obviously that doesn't scale in practice.
- *
- * Because the scheduler aims to distribute the compute load evenly
- * among the available CPUs, we can track task state locally to each
- * CPU and, at much lower frequency, extrapolate the global state for
- * the cumulative stall times and the running averages.
- *
- * For each runqueue, we track:
- *
- * tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0)
- * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_running_tasks[cpu])
- * tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0)
- *
- * and then periodically aggregate:
- *
- * tNONIDLE = sum(tNONIDLE[i])
- *
- * tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE
- * tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE
- *
- * %SOME = tSOME / period
- * %FULL = tFULL / period
- *
- * This gives us an approximation of pressure that is practical
- * cost-wise, yet way more sensitive and accurate than periodic
- * sampling of the aggregate task states would be.
- */
- #include <linux/sched/loadavg.h>
- #include <linux/seq_file.h>
- #include <linux/proc_fs.h>
- #include <linux/seqlock.h>
- #include <linux/cgroup.h>
- #include <linux/module.h>
- #include <linux/sched.h>
- #include <linux/psi.h>
- #include "sched.h"
- static int psi_bug __read_mostly;
- bool psi_disabled __read_mostly;
- core_param(psi_disabled, psi_disabled, bool, 0644);
- /* Running averages - we need to be higher-res than loadavg */
- #define PSI_FREQ (2*HZ+1) /* 2 sec intervals */
- #define EXP_10s 1677 /* 1/exp(2s/10s) as fixed-point */
- #define EXP_60s 1981 /* 1/exp(2s/60s) */
- #define EXP_300s 2034 /* 1/exp(2s/300s) */
- /* Sampling frequency in nanoseconds */
- static u64 psi_period __read_mostly;
- /* System-level pressure and stall tracking */
- static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu);
- static struct psi_group psi_system = {
- .pcpu = &system_group_pcpu,
- };
- static void psi_update_work(struct work_struct *work);
- static void group_init(struct psi_group *group)
- {
- int cpu;
- for_each_possible_cpu(cpu)
- seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
- group->next_update = sched_clock() + psi_period;
- INIT_DELAYED_WORK(&group->clock_work, psi_update_work);
- mutex_init(&group->stat_lock);
- }
- void __init psi_init(void)
- {
- if (psi_disabled)
- return;
- psi_period = jiffies_to_nsecs(PSI_FREQ);
- group_init(&psi_system);
- }
- static bool test_state(unsigned int *tasks, enum psi_states state)
- {
- switch (state) {
- case PSI_IO_SOME:
- return tasks[NR_IOWAIT];
- case PSI_IO_FULL:
- return tasks[NR_IOWAIT] && !tasks[NR_RUNNING];
- case PSI_MEM_SOME:
- return tasks[NR_MEMSTALL];
- case PSI_MEM_FULL:
- return tasks[NR_MEMSTALL] && !tasks[NR_RUNNING];
- case PSI_CPU_SOME:
- return tasks[NR_RUNNING] > 1;
- case PSI_NONIDLE:
- return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] ||
- tasks[NR_RUNNING];
- default:
- return false;
- }
- }
- static void get_recent_times(struct psi_group *group, int cpu, u32 *times)
- {
- struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
- unsigned int tasks[NR_PSI_TASK_COUNTS];
- u64 now, state_start;
- unsigned int seq;
- int s;
- /* Snapshot a coherent view of the CPU state */
- do {
- seq = read_seqcount_begin(&groupc->seq);
- now = cpu_clock(cpu);
- memcpy(times, groupc->times, sizeof(groupc->times));
- memcpy(tasks, groupc->tasks, sizeof(groupc->tasks));
- state_start = groupc->state_start;
- } while (read_seqcount_retry(&groupc->seq, seq));
- /* Calculate state time deltas against the previous snapshot */
- for (s = 0; s < NR_PSI_STATES; s++) {
- u32 delta;
- /*
- * In addition to already concluded states, we also
- * incorporate currently active states on the CPU,
- * since states may last for many sampling periods.
- *
- * This way we keep our delta sampling buckets small
- * (u32) and our reported pressure close to what's
- * actually happening.
- */
- if (test_state(tasks, s))
- times[s] += now - state_start;
- delta = times[s] - groupc->times_prev[s];
- groupc->times_prev[s] = times[s];
- times[s] = delta;
- }
- }
- static void calc_avgs(unsigned long avg[3], int missed_periods,
- u64 time, u64 period)
- {
- unsigned long pct;
- /* Fill in zeroes for periods of no activity */
- if (missed_periods) {
- avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods);
- avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods);
- avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods);
- }
- /* Sample the most recent active period */
- pct = div_u64(time * 100, period);
- pct *= FIXED_1;
- avg[0] = calc_load(avg[0], EXP_10s, pct);
- avg[1] = calc_load(avg[1], EXP_60s, pct);
- avg[2] = calc_load(avg[2], EXP_300s, pct);
- }
- static bool update_stats(struct psi_group *group)
- {
- u64 deltas[NR_PSI_STATES - 1] = { 0, };
- unsigned long missed_periods = 0;
- unsigned long nonidle_total = 0;
- u64 now, expires, period;
- int cpu;
- int s;
- mutex_lock(&group->stat_lock);
- /*
- * Collect the per-cpu time buckets and average them into a
- * single time sample that is normalized to wallclock time.
- *
- * For averaging, each CPU is weighted by its non-idle time in
- * the sampling period. This eliminates artifacts from uneven
- * loading, or even entirely idle CPUs.
- */
- for_each_possible_cpu(cpu) {
- u32 times[NR_PSI_STATES];
- u32 nonidle;
- get_recent_times(group, cpu, times);
- nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
- nonidle_total += nonidle;
- for (s = 0; s < PSI_NONIDLE; s++)
- deltas[s] += (u64)times[s] * nonidle;
- }
- /*
- * Integrate the sample into the running statistics that are
- * reported to userspace: the cumulative stall times and the
- * decaying averages.
- *
- * Pressure percentages are sampled at PSI_FREQ. We might be
- * called more often when the user polls more frequently than
- * that; we might be called less often when there is no task
- * activity, thus no data, and clock ticks are sporadic. The
- * below handles both.
- */
- /* total= */
- for (s = 0; s < NR_PSI_STATES - 1; s++)
- group->total[s] += div_u64(deltas[s], max(nonidle_total, 1UL));
- /* avgX= */
- now = sched_clock();
- expires = group->next_update;
- if (now < expires)
- goto out;
- if (now - expires > psi_period)
- missed_periods = div_u64(now - expires, psi_period);
- /*
- * The periodic clock tick can get delayed for various
- * reasons, especially on loaded systems. To avoid clock
- * drift, we schedule the clock in fixed psi_period intervals.
- * But the deltas we sample out of the per-cpu buckets above
- * are based on the actual time elapsing between clock ticks.
- */
- group->next_update = expires + ((1 + missed_periods) * psi_period);
- period = now - (group->last_update + (missed_periods * psi_period));
- group->last_update = now;
- for (s = 0; s < NR_PSI_STATES - 1; s++) {
- u32 sample;
- sample = group->total[s] - group->total_prev[s];
- /*
- * Due to the lockless sampling of the time buckets,
- * recorded time deltas can slip into the next period,
- * which under full pressure can result in samples in
- * excess of the period length.
- *
- * We don't want to report non-sensical pressures in
- * excess of 100%, nor do we want to drop such events
- * on the floor. Instead we punt any overage into the
- * future until pressure subsides. By doing this we
- * don't underreport the occurring pressure curve, we
- * just report it delayed by one period length.
- *
- * The error isn't cumulative. As soon as another
- * delta slips from a period P to P+1, by definition
- * it frees up its time T in P.
- */
- if (sample > period)
- sample = period;
- group->total_prev[s] += sample;
- calc_avgs(group->avg[s], missed_periods, sample, period);
- }
- out:
- mutex_unlock(&group->stat_lock);
- return nonidle_total;
- }
- static void psi_update_work(struct work_struct *work)
- {
- struct delayed_work *dwork;
- struct psi_group *group;
- bool nonidle;
- dwork = to_delayed_work(work);
- group = container_of(dwork, struct psi_group, clock_work);
- /*
- * If there is task activity, periodically fold the per-cpu
- * times and feed samples into the running averages. If things
- * are idle and there is no data to process, stop the clock.
- * Once restarted, we'll catch up the running averages in one
- * go - see calc_avgs() and missed_periods.
- */
- nonidle = update_stats(group);
- if (nonidle) {
- unsigned long delay = 0;
- u64 now;
- now = sched_clock();
- if (group->next_update > now)
- delay = nsecs_to_jiffies(group->next_update - now) + 1;
- schedule_delayed_work(dwork, delay);
- }
- }
- static void record_times(struct psi_group_cpu *groupc, int cpu,
- bool memstall_tick)
- {
- u32 delta;
- u64 now;
- now = cpu_clock(cpu);
- delta = now - groupc->state_start;
- groupc->state_start = now;
- if (test_state(groupc->tasks, PSI_IO_SOME)) {
- groupc->times[PSI_IO_SOME] += delta;
- if (test_state(groupc->tasks, PSI_IO_FULL))
- groupc->times[PSI_IO_FULL] += delta;
- }
- if (test_state(groupc->tasks, PSI_MEM_SOME)) {
- groupc->times[PSI_MEM_SOME] += delta;
- if (test_state(groupc->tasks, PSI_MEM_FULL))
- groupc->times[PSI_MEM_FULL] += delta;
- else if (memstall_tick) {
- u32 sample;
- /*
- * Since we care about lost potential, a
- * memstall is FULL when there are no other
- * working tasks, but also when the CPU is
- * actively reclaiming and nothing productive
- * could run even if it were runnable.
- *
- * When the timer tick sees a reclaiming CPU,
- * regardless of runnable tasks, sample a FULL
- * tick (or less if it hasn't been a full tick
- * since the last state change).
- */
- sample = min(delta, (u32)jiffies_to_nsecs(1));
- groupc->times[PSI_MEM_FULL] += sample;
- }
- }
- if (test_state(groupc->tasks, PSI_CPU_SOME))
- groupc->times[PSI_CPU_SOME] += delta;
- if (test_state(groupc->tasks, PSI_NONIDLE))
- groupc->times[PSI_NONIDLE] += delta;
- }
- static void psi_group_change(struct psi_group *group, int cpu,
- unsigned int clear, unsigned int set)
- {
- struct psi_group_cpu *groupc;
- unsigned int t, m;
- groupc = per_cpu_ptr(group->pcpu, cpu);
- /*
- * First we assess the aggregate resource states this CPU's
- * tasks have been in since the last change, and account any
- * SOME and FULL time these may have resulted in.
- *
- * Then we update the task counts according to the state
- * change requested through the @clear and @set bits.
- */
- write_seqcount_begin(&groupc->seq);
- record_times(groupc, cpu, false);
- for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
- if (!(m & (1 << t)))
- continue;
- if (groupc->tasks[t] == 0 && !psi_bug) {
- printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u] clear=%x set=%x\n",
- cpu, t, groupc->tasks[0],
- groupc->tasks[1], groupc->tasks[2],
- clear, set);
- psi_bug = 1;
- }
- groupc->tasks[t]--;
- }
- for (t = 0; set; set &= ~(1 << t), t++)
- if (set & (1 << t))
- groupc->tasks[t]++;
- write_seqcount_end(&groupc->seq);
- if (!delayed_work_pending(&group->clock_work))
- schedule_delayed_work(&group->clock_work, PSI_FREQ);
- }
- void psi_task_change(struct task_struct *task, int clear, int set)
- {
- int cpu = task_cpu(task);
- if (!task->pid)
- return;
- if (((task->psi_flags & set) ||
- (task->psi_flags & clear) != clear) &&
- !psi_bug) {
- printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
- task->pid, task->comm, cpu,
- task->psi_flags, clear, set);
- psi_bug = 1;
- }
- task->psi_flags &= ~clear;
- task->psi_flags |= set;
- psi_group_change(&psi_system, cpu, clear, set);
- }
- void psi_memstall_tick(struct task_struct *task, int cpu)
- {
- struct psi_group_cpu *groupc;
- groupc = per_cpu_ptr(psi_system.pcpu, cpu);
- write_seqcount_begin(&groupc->seq);
- record_times(groupc, cpu, true);
- write_seqcount_end(&groupc->seq);
- }
- /**
- * psi_memstall_enter - mark the beginning of a memory stall section
- * @flags: flags to handle nested sections
- *
- * Marks the calling task as being stalled due to a lack of memory,
- * such as waiting for a refault or performing reclaim.
- */
- void psi_memstall_enter(unsigned long *flags)
- {
- struct rq_flags rf;
- struct rq *rq;
- if (psi_disabled)
- return;
- *flags = current->flags & PF_MEMSTALL;
- if (*flags)
- return;
- /*
- * PF_MEMSTALL setting & accounting needs to be atomic wrt
- * changes to the task's scheduling state, otherwise we can
- * race with CPU migration.
- */
- rq = this_rq_lock_irq(&rf);
- current->flags |= PF_MEMSTALL;
- psi_task_change(current, 0, TSK_MEMSTALL);
- rq_unlock_irq(rq, &rf);
- }
- /**
- * psi_memstall_leave - mark the end of an memory stall section
- * @flags: flags to handle nested memdelay sections
- *
- * Marks the calling task as no longer stalled due to lack of memory.
- */
- void psi_memstall_leave(unsigned long *flags)
- {
- struct rq_flags rf;
- struct rq *rq;
- if (psi_disabled)
- return;
- if (*flags)
- return;
- /*
- * PF_MEMSTALL clearing & accounting needs to be atomic wrt
- * changes to the task's scheduling state, otherwise we could
- * race with CPU migration.
- */
- rq = this_rq_lock_irq(&rf);
- current->flags &= ~PF_MEMSTALL;
- psi_task_change(current, TSK_MEMSTALL, 0);
- rq_unlock_irq(rq, &rf);
- }
- static int psi_show(struct seq_file *m, struct psi_group *group,
- enum psi_res res)
- {
- int full;
- if (psi_disabled)
- return -EOPNOTSUPP;
- update_stats(group);
- for (full = 0; full < 2 - (res == PSI_CPU); full++) {
- unsigned long avg[3];
- u64 total;
- int w;
- for (w = 0; w < 3; w++)
- avg[w] = group->avg[res * 2 + full][w];
- total = div_u64(group->total[res * 2 + full], NSEC_PER_USEC);
- seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
- full ? "full" : "some",
- LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
- LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
- LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
- total);
- }
- return 0;
- }
- static int psi_io_show(struct seq_file *m, void *v)
- {
- return psi_show(m, &psi_system, PSI_IO);
- }
- static int psi_memory_show(struct seq_file *m, void *v)
- {
- return psi_show(m, &psi_system, PSI_MEM);
- }
- static int psi_cpu_show(struct seq_file *m, void *v)
- {
- return psi_show(m, &psi_system, PSI_CPU);
- }
- static int psi_io_open(struct inode *inode, struct file *file)
- {
- return single_open(file, psi_io_show, NULL);
- }
- static int psi_memory_open(struct inode *inode, struct file *file)
- {
- return single_open(file, psi_memory_show, NULL);
- }
- static int psi_cpu_open(struct inode *inode, struct file *file)
- {
- return single_open(file, psi_cpu_show, NULL);
- }
- static const struct file_operations psi_io_fops = {
- .open = psi_io_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- };
- static const struct file_operations psi_memory_fops = {
- .open = psi_memory_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- };
- static const struct file_operations psi_cpu_fops = {
- .open = psi_cpu_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- };
- static int __init psi_proc_init(void)
- {
- proc_mkdir("pressure", NULL);
- proc_create("pressure/io", 0, NULL, &psi_io_fops);
- proc_create("pressure/memory", 0, NULL, &psi_memory_fops);
- proc_create("pressure/cpu", 0, NULL, &psi_cpu_fops);
- return 0;
- }
- module_init(psi_proc_init);
|