|
@@ -116,18 +116,18 @@ static inline void task_group_account_field(struct task_struct *p, int index,
|
|
* @p: the process that the cpu time gets accounted to
|
|
* @p: the process that the cpu time gets accounted to
|
|
* @cputime: the cpu time spent in user space since the last update
|
|
* @cputime: the cpu time spent in user space since the last update
|
|
*/
|
|
*/
|
|
-void account_user_time(struct task_struct *p, cputime_t cputime)
|
|
|
|
|
|
+void account_user_time(struct task_struct *p, u64 cputime)
|
|
{
|
|
{
|
|
int index;
|
|
int index;
|
|
|
|
|
|
/* Add user time to process. */
|
|
/* Add user time to process. */
|
|
- p->utime += cputime_to_nsecs(cputime);
|
|
|
|
- account_group_user_time(p, cputime_to_nsecs(cputime));
|
|
|
|
|
|
+ p->utime += cputime;
|
|
|
|
+ account_group_user_time(p, cputime);
|
|
|
|
|
|
index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
|
|
index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
|
|
|
|
|
|
/* Add user time to cpustat. */
|
|
/* Add user time to cpustat. */
|
|
- task_group_account_field(p, index, cputime_to_nsecs(cputime));
|
|
|
|
|
|
+ task_group_account_field(p, index, cputime);
|
|
|
|
|
|
/* Account for user time used */
|
|
/* Account for user time used */
|
|
acct_account_cputime(p);
|
|
acct_account_cputime(p);
|
|
@@ -363,8 +363,9 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
|
|
static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
|
|
static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
|
|
struct rq *rq, int ticks)
|
|
struct rq *rq, int ticks)
|
|
{
|
|
{
|
|
- u64 cputime = (__force u64) cputime_one_jiffy * ticks;
|
|
|
|
|
|
+ u64 old_cputime = (__force u64) cputime_one_jiffy * ticks;
|
|
cputime_t other;
|
|
cputime_t other;
|
|
|
|
+ u64 cputime;
|
|
|
|
|
|
/*
|
|
/*
|
|
* When returning from idle, many ticks can get accounted at
|
|
* When returning from idle, many ticks can get accounted at
|
|
@@ -374,9 +375,11 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
|
|
* other time can exceed ticks occasionally.
|
|
* other time can exceed ticks occasionally.
|
|
*/
|
|
*/
|
|
other = account_other_time(ULONG_MAX);
|
|
other = account_other_time(ULONG_MAX);
|
|
- if (other >= cputime)
|
|
|
|
|
|
+ if (other >= old_cputime)
|
|
return;
|
|
return;
|
|
- cputime -= other;
|
|
|
|
|
|
+
|
|
|
|
+ old_cputime -= other;
|
|
|
|
+ cputime = cputime_to_nsecs(old_cputime);
|
|
|
|
|
|
if (this_cpu_ksoftirqd() == p) {
|
|
if (this_cpu_ksoftirqd() == p) {
|
|
/*
|
|
/*
|
|
@@ -384,15 +387,16 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
|
|
* So, we have to handle it separately here.
|
|
* So, we have to handle it separately here.
|
|
* Also, p->stime needs to be updated for ksoftirqd.
|
|
* Also, p->stime needs to be updated for ksoftirqd.
|
|
*/
|
|
*/
|
|
- account_system_index_time(p, cputime, CPUTIME_SOFTIRQ);
|
|
|
|
|
|
+ account_system_index_time(p, old_cputime, CPUTIME_SOFTIRQ);
|
|
} else if (user_tick) {
|
|
} else if (user_tick) {
|
|
account_user_time(p, cputime);
|
|
account_user_time(p, cputime);
|
|
} else if (p == rq->idle) {
|
|
} else if (p == rq->idle) {
|
|
- account_idle_time(cputime);
|
|
|
|
|
|
+ account_idle_time(old_cputime);
|
|
} else if (p->flags & PF_VCPU) { /* System time or guest time */
|
|
} else if (p->flags & PF_VCPU) { /* System time or guest time */
|
|
- account_guest_time(p, cputime);
|
|
|
|
|
|
+
|
|
|
|
+ account_guest_time(p, old_cputime);
|
|
} else {
|
|
} else {
|
|
- account_system_index_time(p, cputime, CPUTIME_SYSTEM);
|
|
|
|
|
|
+ account_system_index_time(p, old_cputime, CPUTIME_SYSTEM);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -473,7 +477,8 @@ void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
|
|
*/
|
|
*/
|
|
void account_process_tick(struct task_struct *p, int user_tick)
|
|
void account_process_tick(struct task_struct *p, int user_tick)
|
|
{
|
|
{
|
|
- cputime_t cputime, steal;
|
|
|
|
|
|
+ cputime_t old_cputime, steal;
|
|
|
|
+ u64 cputime;
|
|
struct rq *rq = this_rq();
|
|
struct rq *rq = this_rq();
|
|
|
|
|
|
if (vtime_accounting_cpu_enabled())
|
|
if (vtime_accounting_cpu_enabled())
|
|
@@ -484,20 +489,21 @@ void account_process_tick(struct task_struct *p, int user_tick)
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
- cputime = cputime_one_jiffy;
|
|
|
|
|
|
+ old_cputime = cputime_one_jiffy;
|
|
steal = steal_account_process_time(ULONG_MAX);
|
|
steal = steal_account_process_time(ULONG_MAX);
|
|
|
|
|
|
- if (steal >= cputime)
|
|
|
|
|
|
+ if (steal >= old_cputime)
|
|
return;
|
|
return;
|
|
|
|
|
|
- cputime -= steal;
|
|
|
|
|
|
+ old_cputime -= steal;
|
|
|
|
+ cputime = cputime_to_nsecs(old_cputime);
|
|
|
|
|
|
if (user_tick)
|
|
if (user_tick)
|
|
account_user_time(p, cputime);
|
|
account_user_time(p, cputime);
|
|
else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
|
|
else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
|
|
- account_system_time(p, HARDIRQ_OFFSET, cputime);
|
|
|
|
|
|
+ account_system_time(p, HARDIRQ_OFFSET, old_cputime);
|
|
else
|
|
else
|
|
- account_idle_time(cputime);
|
|
|
|
|
|
+ account_idle_time(old_cputime);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -736,7 +742,7 @@ void vtime_account_user(struct task_struct *tsk)
|
|
tsk->vtime_snap_whence = VTIME_SYS;
|
|
tsk->vtime_snap_whence = VTIME_SYS;
|
|
if (vtime_delta(tsk)) {
|
|
if (vtime_delta(tsk)) {
|
|
delta_cpu = get_vtime_delta(tsk);
|
|
delta_cpu = get_vtime_delta(tsk);
|
|
- account_user_time(tsk, delta_cpu);
|
|
|
|
|
|
+ account_user_time(tsk, cputime_to_nsecs(delta_cpu));
|
|
}
|
|
}
|
|
write_seqcount_end(&tsk->vtime_seqcount);
|
|
write_seqcount_end(&tsk->vtime_seqcount);
|
|
}
|
|
}
|