|
@@ -139,6 +139,8 @@ void update_rq_clock(struct rq *rq)
|
|
|
return;
|
|
|
|
|
|
delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
|
|
|
+ if (delta < 0)
|
|
|
+ return;
|
|
|
rq->clock += delta;
|
|
|
update_rq_clock_task(rq, delta);
|
|
|
}
|
|
@@ -2431,7 +2433,12 @@ static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
|
|
|
{
|
|
|
u64 ns = 0;
|
|
|
|
|
|
- if (task_current(rq, p)) {
|
|
|
+ /*
|
|
|
+ * Must be ->curr _and_ ->on_rq. If dequeued, we would
|
|
|
+ * project cycles that may never be accounted to this
|
|
|
+ * thread, breaking clock_gettime().
|
|
|
+ */
|
|
|
+ if (task_current(rq, p) && p->on_rq) {
|
|
|
update_rq_clock(rq);
|
|
|
ns = rq_clock_task(rq) - p->se.exec_start;
|
|
|
if ((s64)ns < 0)
|
|
@@ -2474,8 +2481,10 @@ unsigned long long task_sched_runtime(struct task_struct *p)
|
|
|
* If we race with it leaving cpu, we'll take a lock. So we're correct.
|
|
|
* If we race with it entering cpu, unaccounted time is 0. This is
|
|
|
* indistinguishable from the read occurring a few cycles earlier.
|
|
|
+ * If we see ->on_cpu without ->on_rq, the task is leaving, and has
|
|
|
+ * been accounted, so we're correct here as well.
|
|
|
*/
|
|
|
- if (!p->on_cpu)
|
|
|
+ if (!p->on_cpu || !p->on_rq)
|
|
|
return p->se.sum_exec_runtime;
|
|
|
#endif
|
|
|
|