|
@@ -683,10 +683,10 @@ static u64 vtime_delta(struct task_struct *tsk)
|
|
|
{
|
|
|
unsigned long now = READ_ONCE(jiffies);
|
|
|
|
|
|
- if (time_before(now, (unsigned long)tsk->vtime_snap))
|
|
|
+ if (time_before(now, (unsigned long)tsk->vtime_starttime))
|
|
|
return 0;
|
|
|
|
|
|
- return jiffies_to_nsecs(now - tsk->vtime_snap);
|
|
|
+ return jiffies_to_nsecs(now - tsk->vtime_starttime);
|
|
|
}
|
|
|
|
|
|
static u64 get_vtime_delta(struct task_struct *tsk)
|
|
@@ -701,10 +701,10 @@ static u64 get_vtime_delta(struct task_struct *tsk)
|
|
|
* elapsed time. Limit account_other_time to prevent rounding
|
|
|
* errors from causing elapsed vtime to go negative.
|
|
|
*/
|
|
|
- delta = jiffies_to_nsecs(now - tsk->vtime_snap);
|
|
|
+ delta = jiffies_to_nsecs(now - tsk->vtime_starttime);
|
|
|
other = account_other_time(delta);
|
|
|
- WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE);
|
|
|
- tsk->vtime_snap = now;
|
|
|
+ WARN_ON_ONCE(tsk->vtime_state == VTIME_INACTIVE);
|
|
|
+ tsk->vtime_starttime = now;
|
|
|
|
|
|
return delta - other;
|
|
|
}
|
|
@@ -746,7 +746,7 @@ void vtime_guest_enter(struct task_struct *tsk)
|
|
|
{
|
|
|
/*
|
|
|
* The flags must be updated under the lock with
|
|
|
- * the vtime_snap flush and update.
|
|
|
+ * the vtime_starttime flush and update.
|
|
|
* That enforces a right ordering and update sequence
|
|
|
* synchronization against the reader (task_gtime())
|
|
|
* that can thus safely catch up with a tickless delta.
|
|
@@ -776,12 +776,12 @@ void vtime_account_idle(struct task_struct *tsk)
|
|
|
void arch_vtime_task_switch(struct task_struct *prev)
|
|
|
{
|
|
|
write_seqcount_begin(&prev->vtime_seqcount);
|
|
|
- prev->vtime_snap_whence = VTIME_INACTIVE;
|
|
|
+ prev->vtime_state = VTIME_INACTIVE;
|
|
|
write_seqcount_end(&prev->vtime_seqcount);
|
|
|
|
|
|
write_seqcount_begin(¤t->vtime_seqcount);
|
|
|
- current->vtime_snap_whence = VTIME_SYS;
|
|
|
- current->vtime_snap = jiffies;
|
|
|
+ current->vtime_state = VTIME_SYS;
|
|
|
+ current->vtime_starttime = jiffies;
|
|
|
write_seqcount_end(¤t->vtime_seqcount);
|
|
|
}
|
|
|
|
|
@@ -791,8 +791,8 @@ void vtime_init_idle(struct task_struct *t, int cpu)
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
write_seqcount_begin(&t->vtime_seqcount);
|
|
|
- t->vtime_snap_whence = VTIME_SYS;
|
|
|
- t->vtime_snap = jiffies;
|
|
|
+ t->vtime_state = VTIME_SYS;
|
|
|
+ t->vtime_starttime = jiffies;
|
|
|
write_seqcount_end(&t->vtime_seqcount);
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
@@ -809,7 +809,7 @@ u64 task_gtime(struct task_struct *t)
|
|
|
seq = read_seqcount_begin(&t->vtime_seqcount);
|
|
|
|
|
|
gtime = t->gtime;
|
|
|
- if (t->vtime_snap_whence == VTIME_SYS && t->flags & PF_VCPU)
|
|
|
+ if (t->vtime_state == VTIME_SYS && t->flags & PF_VCPU)
|
|
|
gtime += vtime_delta(t);
|
|
|
|
|
|
} while (read_seqcount_retry(&t->vtime_seqcount, seq));
|
|
@@ -840,7 +840,7 @@ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
|
|
|
*stime = t->stime;
|
|
|
|
|
|
/* Task is sleeping, nothing to add */
|
|
|
- if (t->vtime_snap_whence == VTIME_INACTIVE || is_idle_task(t))
|
|
|
+ if (t->vtime_state == VTIME_INACTIVE || is_idle_task(t))
|
|
|
continue;
|
|
|
|
|
|
delta = vtime_delta(t);
|
|
@@ -849,9 +849,9 @@ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
|
|
|
* Task runs either in user or kernel space, add pending nohz time to
|
|
|
* the right place.
|
|
|
*/
|
|
|
- if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU)
|
|
|
+ if (t->vtime_state == VTIME_USER || t->flags & PF_VCPU)
|
|
|
*utime += delta;
|
|
|
- else if (t->vtime_snap_whence == VTIME_SYS)
|
|
|
+ else if (t->vtime_state == VTIME_SYS)
|
|
|
*stime += delta;
|
|
|
} while (read_seqcount_retry(&t->vtime_seqcount, seq));
|
|
|
}
|