|
@@ -17,20 +17,21 @@
|
|
|
#include <linux/kprobes.h>
|
|
|
#include <linux/nmi.h>
|
|
|
|
|
|
-#ifdef arch_trigger_all_cpu_backtrace
|
|
|
+#ifdef arch_trigger_cpumask_backtrace
|
|
|
/* For reliability, we're prepared to waste bits here. */
|
|
|
static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
|
|
|
|
|
|
-/* "in progress" flag of arch_trigger_all_cpu_backtrace */
|
|
|
+/* "in progress" flag of arch_trigger_cpumask_backtrace */
|
|
|
static unsigned long backtrace_flag;
|
|
|
|
|
|
/*
|
|
|
- * When raise() is called it will be is passed a pointer to the
|
|
|
+ * When raise() is called it will be passed a pointer to the
|
|
|
* backtrace_mask. Architectures that call nmi_cpu_backtrace()
|
|
|
* directly from their raise() functions may rely on the mask
|
|
|
* they are passed being updated as a side effect of this call.
|
|
|
*/
|
|
|
-void nmi_trigger_all_cpu_backtrace(bool include_self,
|
|
|
+void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
|
|
|
+ bool exclude_self,
|
|
|
void (*raise)(cpumask_t *mask))
|
|
|
{
|
|
|
int i, this_cpu = get_cpu();
|
|
@@ -44,13 +45,13 @@ void nmi_trigger_all_cpu_backtrace(bool include_self,
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
|
|
|
- if (!include_self)
|
|
|
+ cpumask_copy(to_cpumask(backtrace_mask), mask);
|
|
|
+ if (exclude_self)
|
|
|
cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask));
|
|
|
|
|
|
if (!cpumask_empty(to_cpumask(backtrace_mask))) {
|
|
|
- pr_info("Sending NMI to %s CPUs:\n",
|
|
|
- (include_self ? "all" : "other"));
|
|
|
+ pr_info("Sending NMI from CPU %d to CPUs %*pbl:\n",
|
|
|
+ this_cpu, nr_cpumask_bits, to_cpumask(backtrace_mask));
|
|
|
raise(to_cpumask(backtrace_mask));
|
|
|
}
|
|
|
|