|
@@ -233,7 +233,8 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
|
|
|
|
|
|
/*
|
|
|
* Sample a process (thread group) clock for the given group_leader task.
|
|
|
- * Must be called with tasklist_lock held for reading.
|
|
|
+ * Must be called with task sighand lock held for safe while_each_thread()
|
|
|
+ * traversal.
|
|
|
*/
|
|
|
static int cpu_clock_sample_group(const clockid_t which_clock,
|
|
|
struct task_struct *p,
|
|
@@ -455,8 +456,7 @@ static inline int expires_gt(cputime_t expires, cputime_t new_exp)
|
|
|
|
|
|
/*
|
|
|
* Insert the timer on the appropriate list before any timers that
|
|
|
- * expire later. This must be called with the tasklist_lock held
|
|
|
- * for reading, interrupts disabled and p->sighand->siglock taken.
|
|
|
+ * expire later. This must be called with the sighand lock held.
|
|
|
*/
|
|
|
static void arm_timer(struct k_itimer *timer)
|
|
|
{
|
|
@@ -547,7 +547,8 @@ static void cpu_timer_fire(struct k_itimer *timer)
|
|
|
|
|
|
/*
|
|
|
* Sample a process (thread group) timer for the given group_leader task.
|
|
|
- * Must be called with tasklist_lock held for reading.
|
|
|
+ * Must be called with task sighand lock held for safe while_each_thread()
|
|
|
+ * traversal.
|
|
|
*/
|
|
|
static int cpu_timer_sample_group(const clockid_t which_clock,
|
|
|
struct task_struct *p,
|
|
@@ -610,9 +611,11 @@ static inline void posix_cpu_timer_kick_nohz(void) { }
|
|
|
* If we return TIMER_RETRY, it's necessary to release the timer's lock
|
|
|
* and try again. (This happens when the timer is in the middle of firing.)
|
|
|
*/
|
|
|
-static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
|
|
|
+static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
|
|
|
struct itimerspec *new, struct itimerspec *old)
|
|
|
{
|
|
|
+ unsigned long flags;
|
|
|
+ struct sighand_struct *sighand;
|
|
|
struct task_struct *p = timer->it.cpu.task;
|
|
|
unsigned long long old_expires, new_expires, old_incr, val;
|
|
|
int ret;
|
|
@@ -621,14 +624,16 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
|
|
|
|
|
|
new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
|
|
|
|
|
|
- read_lock(&tasklist_lock);
|
|
|
/*
|
|
|
- * We need the tasklist_lock to protect against reaping that
|
|
|
- * clears p->sighand. If p has just been reaped, we can no
|
|
|
+ * Protect against sighand release/switch in exit/exec and p->cpu_timers
|
|
|
+ * and p->signal->cpu_timers read/write in arm_timer()
|
|
|
+ */
|
|
|
+ sighand = lock_task_sighand(p, &flags);
|
|
|
+ /*
|
|
|
+ * If p has just been reaped, we can no
|
|
|
* longer get any information about it at all.
|
|
|
*/
|
|
|
- if (unlikely(p->sighand == NULL)) {
|
|
|
- read_unlock(&tasklist_lock);
|
|
|
+ if (unlikely(sighand == NULL)) {
|
|
|
return -ESRCH;
|
|
|
}
|
|
|
|
|
@@ -639,7 +644,6 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
|
|
|
|
|
|
ret = 0;
|
|
|
old_incr = timer->it.cpu.incr;
|
|
|
- spin_lock(&p->sighand->siglock);
|
|
|
old_expires = timer->it.cpu.expires;
|
|
|
if (unlikely(timer->it.cpu.firing)) {
|
|
|
timer->it.cpu.firing = -1;
|
|
@@ -696,12 +700,11 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
|
|
|
* disable this firing since we are already reporting
|
|
|
* it as an overrun (thanks to bump_cpu_timer above).
|
|
|
*/
|
|
|
- spin_unlock(&p->sighand->siglock);
|
|
|
- read_unlock(&tasklist_lock);
|
|
|
+ unlock_task_sighand(p, &flags);
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- if (new_expires != 0 && !(flags & TIMER_ABSTIME)) {
|
|
|
+ if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
|
|
|
new_expires += val;
|
|
|
}
|
|
|
|
|
@@ -715,9 +718,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
|
|
|
arm_timer(timer);
|
|
|
}
|
|
|
|
|
|
- spin_unlock(&p->sighand->siglock);
|
|
|
- read_unlock(&tasklist_lock);
|
|
|
-
|
|
|
+ unlock_task_sighand(p, &flags);
|
|
|
/*
|
|
|
* Install the new reload setting, and
|
|
|
* set up the signal and overrun bookkeeping.
|
|
@@ -779,8 +780,16 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
|
|
|
if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
|
|
|
cpu_clock_sample(timer->it_clock, p, &now);
|
|
|
} else {
|
|
|
- read_lock(&tasklist_lock);
|
|
|
- if (unlikely(p->sighand == NULL)) {
|
|
|
+ struct sighand_struct *sighand;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Protect against sighand release/switch in exit/exec and
|
|
|
+ * also make timer sampling safe if it ends up calling
|
|
|
+ * thread_group_cputime().
|
|
|
+ */
|
|
|
+ sighand = lock_task_sighand(p, &flags);
|
|
|
+ if (unlikely(sighand == NULL)) {
|
|
|
/*
|
|
|
* The process has been reaped.
|
|
|
* We can't even collect a sample any more.
|
|
@@ -789,11 +798,10 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
|
|
|
timer->it.cpu.expires = 0;
|
|
|
sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
|
|
|
&itp->it_value);
|
|
|
- read_unlock(&tasklist_lock);
|
|
|
} else {
|
|
|
cpu_timer_sample_group(timer->it_clock, p, &now);
|
|
|
+ unlock_task_sighand(p, &flags);
|
|
|
}
|
|
|
- read_unlock(&tasklist_lock);
|
|
|
}
|
|
|
|
|
|
if (now < timer->it.cpu.expires) {
|
|
@@ -1007,6 +1015,8 @@ static void check_process_timers(struct task_struct *tsk,
|
|
|
*/
|
|
|
void posix_cpu_timer_schedule(struct k_itimer *timer)
|
|
|
{
|
|
|
+ struct sighand_struct *sighand;
|
|
|
+ unsigned long flags;
|
|
|
struct task_struct *p = timer->it.cpu.task;
|
|
|
unsigned long long now;
|
|
|
|
|
@@ -1021,27 +1031,31 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
|
|
|
if (unlikely(p->exit_state))
|
|
|
goto out;
|
|
|
|
|
|
- read_lock(&tasklist_lock); /* arm_timer needs it. */
|
|
|
- spin_lock(&p->sighand->siglock);
|
|
|
+ /* Protect timer list r/w in arm_timer() */
|
|
|
+ sighand = lock_task_sighand(p, &flags);
|
|
|
+ if (!sighand)
|
|
|
+ goto out;
|
|
|
} else {
|
|
|
- read_lock(&tasklist_lock);
|
|
|
- if (unlikely(p->sighand == NULL)) {
|
|
|
+ /*
|
|
|
+ * Protect arm_timer() and timer sampling in case of call to
|
|
|
+ * thread_group_cputime().
|
|
|
+ */
|
|
|
+ sighand = lock_task_sighand(p, &flags);
|
|
|
+ if (unlikely(sighand == NULL)) {
|
|
|
/*
|
|
|
* The process has been reaped.
|
|
|
* We can't even collect a sample any more.
|
|
|
*/
|
|
|
timer->it.cpu.expires = 0;
|
|
|
- read_unlock(&tasklist_lock);
|
|
|
goto out;
|
|
|
} else if (unlikely(p->exit_state) && thread_group_empty(p)) {
|
|
|
- read_unlock(&tasklist_lock);
|
|
|
+ unlock_task_sighand(p, &flags);
|
|
|
/* Optimizations: if the process is dying, no need to rearm */
|
|
|
goto out;
|
|
|
}
|
|
|
- spin_lock(&p->sighand->siglock);
|
|
|
cpu_timer_sample_group(timer->it_clock, p, &now);
|
|
|
bump_cpu_timer(timer, now);
|
|
|
- /* Leave the tasklist_lock locked for the call below. */
|
|
|
+ /* Leave the sighand locked for the call below. */
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1049,12 +1063,10 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
|
|
|
*/
|
|
|
BUG_ON(!irqs_disabled());
|
|
|
arm_timer(timer);
|
|
|
- spin_unlock(&p->sighand->siglock);
|
|
|
- read_unlock(&tasklist_lock);
|
|
|
+ unlock_task_sighand(p, &flags);
|
|
|
|
|
|
/* Kick full dynticks CPUs in case they need to tick on the new timer */
|
|
|
posix_cpu_timer_kick_nohz();
|
|
|
-
|
|
|
out:
|
|
|
timer->it_overrun_last = timer->it_overrun;
|
|
|
timer->it_overrun = -1;
|