|
@@ -554,6 +554,23 @@ drop_precision:
|
|
|
return (__force cputime_t) scaled;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Atomically advance counter to the new value. Interrupts, vcpu
|
|
|
+ * scheduling, and scaling inaccuracies can cause cputime_advance
|
|
|
+ * to be occasionally called with a new value smaller than counter.
|
|
|
+ * Let's enforce atomicity.
|
|
|
+ *
|
|
|
+ * Normally a caller will only go through this loop once, or not
|
|
|
+ * at all in case a previous caller updated counter the same jiffy.
|
|
|
+ */
|
|
|
+static void cputime_advance(cputime_t *counter, cputime_t new)
|
|
|
+{
|
|
|
+ cputime_t old;
|
|
|
+
|
|
|
+ while (new > (old = ACCESS_ONCE(*counter)))
|
|
|
+ cmpxchg_cputime(counter, old, new);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Adjust tick based cputime random precision against scheduler
|
|
|
* runtime accounting.
|
|
@@ -599,16 +616,8 @@ static void cputime_adjust(struct task_cputime *curr,
|
|
|
utime = rtime - stime;
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * If the tick based count grows faster than the scheduler one,
|
|
|
- * the result of the scaling may go backward.
|
|
|
- * Let's enforce monotonicity.
|
|
|
- * Atomic exchange protects against concurrent cputime_adjust().
|
|
|
- */
|
|
|
- while (stime > (rtime = ACCESS_ONCE(prev->stime)))
|
|
|
- cmpxchg(&prev->stime, rtime, stime);
|
|
|
- while (utime > (rtime = ACCESS_ONCE(prev->utime)))
|
|
|
- cmpxchg(&prev->utime, rtime, utime);
|
|
|
+ cputime_advance(&prev->stime, stime);
|
|
|
+ cputime_advance(&prev->utime, utime);
|
|
|
|
|
|
out:
|
|
|
*ut = prev->utime;
|