|
@@ -196,39 +196,62 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
|
|
|
+/*
|
|
|
+ * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
|
|
|
+ * to avoid race conditions with concurrent updates to cputime.
|
|
|
+ */
|
|
|
+static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
|
|
|
{
|
|
|
- if (b->utime > a->utime)
|
|
|
- a->utime = b->utime;
|
|
|
+ u64 curr_cputime;
|
|
|
+retry:
|
|
|
+ curr_cputime = atomic64_read(cputime);
|
|
|
+ if (sum_cputime > curr_cputime) {
|
|
|
+ if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
|
|
|
+ goto retry;
|
|
|
+ }
|
|
|
+}
|
|
|
|
|
|
- if (b->stime > a->stime)
|
|
|
- a->stime = b->stime;
|
|
|
+static void update_gt_cputime(struct thread_group_cputimer *cputimer, struct task_cputime *sum)
|
|
|
+{
|
|
|
+ __update_gt_cputime(&cputimer->utime, sum->utime);
|
|
|
+ __update_gt_cputime(&cputimer->stime, sum->stime);
|
|
|
+ __update_gt_cputime(&cputimer->sum_exec_runtime, sum->sum_exec_runtime);
|
|
|
+}
|
|
|
|
|
|
- if (b->sum_exec_runtime > a->sum_exec_runtime)
|
|
|
- a->sum_exec_runtime = b->sum_exec_runtime;
|
|
|
+/* Sample thread_group_cputimer values in "cputimer", store results in "times". */
|
|
|
+static inline void sample_group_cputimer(struct task_cputime *times,
|
|
|
+ struct thread_group_cputimer *cputimer)
|
|
|
+{
|
|
|
+ times->utime = atomic64_read(&cputimer->utime);
|
|
|
+ times->stime = atomic64_read(&cputimer->stime);
|
|
|
+ times->sum_exec_runtime = atomic64_read(&cputimer->sum_exec_runtime);
|
|
|
}
|
|
|
|
|
|
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
|
|
|
{
|
|
|
struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
|
|
|
struct task_cputime sum;
|
|
|
- unsigned long flags;
|
|
|
|
|
|
- if (!cputimer->running) {
|
|
|
+ /* Check if cputimer isn't running. This is accessed without locking. */
|
|
|
+ if (!READ_ONCE(cputimer->running)) {
|
|
|
/*
|
|
|
* The POSIX timer interface allows for absolute time expiry
|
|
|
* values through the TIMER_ABSTIME flag, therefore we have
|
|
|
- * to synchronize the timer to the clock every time we start
|
|
|
- * it.
|
|
|
+ * to synchronize the timer to the clock every time we start it.
|
|
|
*/
|
|
|
thread_group_cputime(tsk, &sum);
|
|
|
- raw_spin_lock_irqsave(&cputimer->lock, flags);
|
|
|
- cputimer->running = 1;
|
|
|
- update_gt_cputime(&cputimer->cputime, &sum);
|
|
|
- } else
|
|
|
- raw_spin_lock_irqsave(&cputimer->lock, flags);
|
|
|
- *times = cputimer->cputime;
|
|
|
- raw_spin_unlock_irqrestore(&cputimer->lock, flags);
|
|
|
+ update_gt_cputime(cputimer, &sum);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We're setting cputimer->running without a lock. Ensure
|
|
|
+ * this only gets written to in one operation. We set
|
|
|
+ * running after update_gt_cputime() as a small optimization,
|
|
|
+ * but barriers are not required because update_gt_cputime()
|
|
|
+ * can handle concurrent updates.
|
|
|
+ */
|
|
|
+ WRITE_ONCE(cputimer->running, 1);
|
|
|
+ }
|
|
|
+ sample_group_cputimer(times, cputimer);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -582,7 +605,8 @@ bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk)
|
|
|
if (!task_cputime_zero(&tsk->cputime_expires))
|
|
|
return false;
|
|
|
|
|
|
- if (tsk->signal->cputimer.running)
|
|
|
+ /* Check if cputimer is running. This is accessed without locking. */
|
|
|
+ if (READ_ONCE(tsk->signal->cputimer.running))
|
|
|
return false;
|
|
|
|
|
|
return true;
|
|
@@ -882,14 +906,12 @@ static void check_thread_timers(struct task_struct *tsk,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static void stop_process_timers(struct signal_struct *sig)
|
|
|
+static inline void stop_process_timers(struct signal_struct *sig)
|
|
|
{
|
|
|
struct thread_group_cputimer *cputimer = &sig->cputimer;
|
|
|
- unsigned long flags;
|
|
|
|
|
|
- raw_spin_lock_irqsave(&cputimer->lock, flags);
|
|
|
- cputimer->running = 0;
|
|
|
- raw_spin_unlock_irqrestore(&cputimer->lock, flags);
|
|
|
+ /* Turn off cputimer->running. This is done without locking. */
|
|
|
+ WRITE_ONCE(cputimer->running, 0);
|
|
|
}
|
|
|
|
|
|
static u32 onecputick;
|
|
@@ -1111,12 +1133,11 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
|
|
|
}
|
|
|
|
|
|
sig = tsk->signal;
|
|
|
- if (sig->cputimer.running) {
|
|
|
+ /* Check if cputimer is running. This is accessed without locking. */
|
|
|
+ if (READ_ONCE(sig->cputimer.running)) {
|
|
|
struct task_cputime group_sample;
|
|
|
|
|
|
- raw_spin_lock(&sig->cputimer.lock);
|
|
|
- group_sample = sig->cputimer.cputime;
|
|
|
- raw_spin_unlock(&sig->cputimer.lock);
|
|
|
+ sample_group_cputimer(&group_sample, &sig->cputimer);
|
|
|
|
|
|
if (task_cputime_expired(&group_sample, &sig->cputime_expires))
|
|
|
return 1;
|
|
@@ -1157,7 +1178,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
|
|
|
* If there are any active process wide timers (POSIX 1.b, itimers,
|
|
|
* RLIMIT_CPU) cputimer must be running.
|
|
|
*/
|
|
|
- if (tsk->signal->cputimer.running)
|
|
|
+ if (READ_ONCE(tsk->signal->cputimer.running))
|
|
|
check_process_timers(tsk, &firing);
|
|
|
|
|
|
/*
|