|
@@ -36,7 +36,7 @@ struct cpu_stop_done {
|
|
|
struct cpu_stopper {
|
|
|
struct task_struct *thread;
|
|
|
|
|
|
- spinlock_t lock;
|
|
|
+ raw_spinlock_t lock;
|
|
|
bool enabled; /* is this stopper enabled? */
|
|
|
struct list_head works; /* list of pending works */
|
|
|
|
|
@@ -78,13 +78,13 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
|
|
|
unsigned long flags;
|
|
|
bool enabled;
|
|
|
|
|
|
- spin_lock_irqsave(&stopper->lock, flags);
|
|
|
+ raw_spin_lock_irqsave(&stopper->lock, flags);
|
|
|
enabled = stopper->enabled;
|
|
|
if (enabled)
|
|
|
__cpu_stop_queue_work(stopper, work);
|
|
|
else if (work->done)
|
|
|
cpu_stop_signal_done(work->done);
|
|
|
- spin_unlock_irqrestore(&stopper->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore(&stopper->lock, flags);
|
|
|
|
|
|
return enabled;
|
|
|
}
|
|
@@ -231,8 +231,8 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
|
|
|
struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
|
|
|
int err;
|
|
|
retry:
|
|
|
- spin_lock_irq(&stopper1->lock);
|
|
|
- spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
|
|
|
+ raw_spin_lock_irq(&stopper1->lock);
|
|
|
+ raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
|
|
|
|
|
|
err = -ENOENT;
|
|
|
if (!stopper1->enabled || !stopper2->enabled)
|
|
@@ -255,8 +255,8 @@ retry:
|
|
|
__cpu_stop_queue_work(stopper1, work1);
|
|
|
__cpu_stop_queue_work(stopper2, work2);
|
|
|
unlock:
|
|
|
- spin_unlock(&stopper2->lock);
|
|
|
- spin_unlock_irq(&stopper1->lock);
|
|
|
+ raw_spin_unlock(&stopper2->lock);
|
|
|
+ raw_spin_unlock_irq(&stopper1->lock);
|
|
|
|
|
|
if (unlikely(err == -EDEADLK)) {
|
|
|
while (stop_cpus_in_progress)
|
|
@@ -448,9 +448,9 @@ static int cpu_stop_should_run(unsigned int cpu)
|
|
|
unsigned long flags;
|
|
|
int run;
|
|
|
|
|
|
- spin_lock_irqsave(&stopper->lock, flags);
|
|
|
+ raw_spin_lock_irqsave(&stopper->lock, flags);
|
|
|
run = !list_empty(&stopper->works);
|
|
|
- spin_unlock_irqrestore(&stopper->lock, flags);
|
|
|
+ raw_spin_unlock_irqrestore(&stopper->lock, flags);
|
|
|
return run;
|
|
|
}
|
|
|
|
|
@@ -461,13 +461,13 @@ static void cpu_stopper_thread(unsigned int cpu)
|
|
|
|
|
|
repeat:
|
|
|
work = NULL;
|
|
|
- spin_lock_irq(&stopper->lock);
|
|
|
+ raw_spin_lock_irq(&stopper->lock);
|
|
|
if (!list_empty(&stopper->works)) {
|
|
|
work = list_first_entry(&stopper->works,
|
|
|
struct cpu_stop_work, list);
|
|
|
list_del_init(&work->list);
|
|
|
}
|
|
|
- spin_unlock_irq(&stopper->lock);
|
|
|
+ raw_spin_unlock_irq(&stopper->lock);
|
|
|
|
|
|
if (work) {
|
|
|
cpu_stop_fn_t fn = work->fn;
|
|
@@ -541,7 +541,7 @@ static int __init cpu_stop_init(void)
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
|
|
|
|
|
|
- spin_lock_init(&stopper->lock);
|
|
|
+ raw_spin_lock_init(&stopper->lock);
|
|
|
INIT_LIST_HEAD(&stopper->works);
|
|
|
}
|
|
|
|