|
@@ -1013,10 +1013,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Dump stacks of all tasks running on stalled CPUs. This is a fallback
|
|
|
- * for architectures that do not implement trigger_all_cpu_backtrace().
|
|
|
- * The NMI-triggered stack traces are more accurate because they are
|
|
|
- * printed by the target CPU.
|
|
|
+ * Dump stacks of all tasks running on stalled CPUs.
|
|
|
*/
|
|
|
static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
|
|
|
{
|
|
@@ -1094,7 +1091,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
|
|
|
(long)rsp->gpnum, (long)rsp->completed, totqlen);
|
|
|
if (ndetected == 0)
|
|
|
pr_err("INFO: Stall ended before state dump start\n");
|
|
|
- else if (!trigger_all_cpu_backtrace())
|
|
|
+ else
|
|
|
rcu_dump_cpu_stacks(rsp);
|
|
|
|
|
|
/* Complain about tasks blocking the grace period. */
|
|
@@ -1125,8 +1122,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
|
|
|
pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
|
|
|
jiffies - rsp->gp_start,
|
|
|
(long)rsp->gpnum, (long)rsp->completed, totqlen);
|
|
|
- if (!trigger_all_cpu_backtrace())
|
|
|
- dump_stack();
|
|
|
+ rcu_dump_cpu_stacks(rsp);
|
|
|
|
|
|
raw_spin_lock_irqsave(&rnp->lock, flags);
|
|
|
if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
|