|
@@ -13,7 +13,7 @@ static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu)
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * cgroup_rstat_cpu_updated - keep track of updated rstat_cpu
|
|
|
+ * cgroup_rstat_updated - keep track of updated rstat_cpu
|
|
|
* @cgrp: target cgroup
|
|
|
* @cpu: cpu on which rstat_cpu was updated
|
|
|
*
|
|
@@ -21,7 +21,7 @@ static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu)
|
|
|
* rstat_cpu->updated_children list. See the comment on top of
|
|
|
* cgroup_rstat_cpu definition for details.
|
|
|
*/
|
|
|
-static void cgroup_rstat_cpu_updated(struct cgroup *cgrp, int cpu)
|
|
|
+void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
|
|
|
{
|
|
|
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
|
|
|
struct cgroup *parent;
|
|
@@ -59,6 +59,7 @@ static void cgroup_rstat_cpu_updated(struct cgroup *cgrp, int cpu)
|
|
|
|
|
|
raw_spin_unlock_irqrestore(cpu_lock, flags);
|
|
|
}
|
|
|
+EXPORT_SYMBOL_GPL(cgroup_rstat_updated);
|
|
|
|
|
|
/**
|
|
|
* cgroup_rstat_cpu_pop_updated - iterate and dismantle rstat_cpu updated tree
|
|
@@ -167,6 +168,29 @@ void cgroup_rstat_flush(struct cgroup *cgrp)
|
|
|
mutex_unlock(&cgroup_rstat_mutex);
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * cgroup_rstat_flush_begin - flush stats in @cgrp's subtree and hold
|
|
|
+ * @cgrp: target cgroup
|
|
|
+ *
|
|
|
+ * Flush stats in @cgrp's subtree and prevent further flushes. Must be
|
|
|
+ * paired with cgroup_rstat_flush_release().
|
|
|
+ */
|
|
|
+void cgroup_rstat_flush_hold(struct cgroup *cgrp)
|
|
|
+ __acquires(&cgroup_rstat_mutex)
|
|
|
+{
|
|
|
+ mutex_lock(&cgroup_rstat_mutex);
|
|
|
+ cgroup_rstat_flush_locked(cgrp);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * cgroup_rstat_flush_release - release cgroup_rstat_flush_hold()
|
|
|
+ */
|
|
|
+void cgroup_rstat_flush_release(void)
|
|
|
+ __releases(&cgroup_rstat_mutex)
|
|
|
+{
|
|
|
+ mutex_unlock(&cgroup_rstat_mutex);
|
|
|
+}
|
|
|
+
|
|
|
int cgroup_rstat_init(struct cgroup *cgrp)
|
|
|
{
|
|
|
int cpu;
|
|
@@ -239,15 +263,13 @@ static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
|
|
|
struct cgroup_base_stat delta;
|
|
|
unsigned seq;
|
|
|
|
|
|
- lockdep_assert_held(&cgroup_rstat_mutex);
|
|
|
-
|
|
|
/* fetch the current per-cpu values */
|
|
|
do {
|
|
|
seq = __u64_stats_fetch_begin(&rstatc->bsync);
|
|
|
cputime = rstatc->bstat.cputime;
|
|
|
} while (__u64_stats_fetch_retry(&rstatc->bsync, seq));
|
|
|
|
|
|
- /* accumulate the deltas to propgate */
|
|
|
+ /* calculate the delta to propgate */
|
|
|
delta.cputime.utime = cputime.utime - last_cputime->utime;
|
|
|
delta.cputime.stime = cputime.stime - last_cputime->stime;
|
|
|
delta.cputime.sum_exec_runtime = cputime.sum_exec_runtime -
|
|
@@ -278,7 +300,7 @@ static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
|
|
|
struct cgroup_rstat_cpu *rstatc)
|
|
|
{
|
|
|
u64_stats_update_end(&rstatc->bsync);
|
|
|
- cgroup_rstat_cpu_updated(cgrp, smp_processor_id());
|
|
|
+ cgroup_rstat_updated(cgrp, smp_processor_id());
|
|
|
put_cpu_ptr(rstatc);
|
|
|
}
|
|
|
|
|
@@ -323,14 +345,10 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq)
|
|
|
if (!cgroup_parent(cgrp))
|
|
|
return;
|
|
|
|
|
|
- mutex_lock(&cgroup_rstat_mutex);
|
|
|
-
|
|
|
- cgroup_rstat_flush_locked(cgrp);
|
|
|
-
|
|
|
+ cgroup_rstat_flush_hold(cgrp);
|
|
|
usage = cgrp->bstat.cputime.sum_exec_runtime;
|
|
|
cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime, &utime, &stime);
|
|
|
-
|
|
|
- mutex_unlock(&cgroup_rstat_mutex);
|
|
|
+ cgroup_rstat_flush_release();
|
|
|
|
|
|
do_div(usage, NSEC_PER_USEC);
|
|
|
do_div(utime, NSEC_PER_USEC);
|