|
@@ -1153,6 +1153,7 @@ static void update_curr_dl(struct rq *rq)
|
|
|
struct sched_dl_entity *dl_se = &curr->dl;
|
|
|
u64 delta_exec, scaled_delta_exec;
|
|
|
int cpu = cpu_of(rq);
|
|
|
+ u64 now;
|
|
|
|
|
|
if (!dl_task(curr) || !on_dl_rq(dl_se))
|
|
|
return;
|
|
@@ -1165,7 +1166,8 @@ static void update_curr_dl(struct rq *rq)
|
|
|
* natural solution, but the full ramifications of this
|
|
|
* approach need further study.
|
|
|
*/
|
|
|
- delta_exec = rq_clock_task(rq) - curr->se.exec_start;
|
|
|
+ now = rq_clock_task(rq);
|
|
|
+ delta_exec = now - curr->se.exec_start;
|
|
|
if (unlikely((s64)delta_exec <= 0)) {
|
|
|
if (unlikely(dl_se->dl_yielded))
|
|
|
goto throttle;
|
|
@@ -1178,7 +1180,7 @@ static void update_curr_dl(struct rq *rq)
|
|
|
curr->se.sum_exec_runtime += delta_exec;
|
|
|
account_group_exec_runtime(curr, delta_exec);
|
|
|
|
|
|
- curr->se.exec_start = rq_clock_task(rq);
|
|
|
+ curr->se.exec_start = now;
|
|
|
cgroup_account_cputime(curr, delta_exec);
|
|
|
|
|
|
sched_rt_avg_update(rq, delta_exec);
|