|
@@ -51,6 +51,9 @@
|
|
|
#include <asm/tlbflush.h>
|
|
|
#include <asm/ptrace.h>
|
|
|
|
|
|
+#define CREATE_TRACE_POINTS
|
|
|
+#include <trace/events/ipi.h>
|
|
|
+
|
|
|
/*
|
|
|
* as from 2.5, kernels no longer have an init_tasks structure
|
|
|
* so we need some other way of telling a new secondary core
|
|
@@ -313,8 +316,6 @@ void __init smp_prepare_boot_cpu(void)
|
|
|
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
|
|
|
}
|
|
|
|
|
|
-static void (*smp_cross_call)(const struct cpumask *, unsigned int);
|
|
|
-
|
|
|
/*
|
|
|
* Enumerate the possible CPU set from the device tree and build the
|
|
|
* cpu logical map array containing MPIDR values related to logical
|
|
@@ -469,32 +470,15 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
|
|
|
|
|
|
void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
|
|
|
{
|
|
|
- smp_cross_call = fn;
|
|
|
+ __smp_cross_call = fn;
|
|
|
}
|
|
|
|
|
|
-void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
|
|
-{
|
|
|
- smp_cross_call(mask, IPI_CALL_FUNC);
|
|
|
-}
|
|
|
-
|
|
|
-void arch_send_call_function_single_ipi(int cpu)
|
|
|
-{
|
|
|
- smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
|
|
|
-}
|
|
|
-
|
|
|
-#ifdef CONFIG_IRQ_WORK
|
|
|
-void arch_irq_work_raise(void)
|
|
|
-{
|
|
|
- if (smp_cross_call)
|
|
|
- smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
|
|
|
-}
|
|
|
-#endif
|
|
|
-
|
|
|
-static const char *ipi_types[NR_IPI] = {
|
|
|
-#define S(x,s) [x - IPI_RESCHEDULE] = s
|
|
|
+static const char *ipi_types[NR_IPI] __tracepoint_string = {
|
|
|
+#define S(x,s) [x] = s
|
|
|
S(IPI_RESCHEDULE, "Rescheduling interrupts"),
|
|
|
S(IPI_CALL_FUNC, "Function call interrupts"),
|
|
|
S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
|
|
@@ -503,12 +487,18 @@ static const char *ipi_types[NR_IPI] = {
|
|
|
S(IPI_IRQ_WORK, "IRQ work interrupts"),
|
|
|
};
|
|
|
|
|
|
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
|
|
|
+{
|
|
|
+ trace_ipi_raise(target, ipi_types[ipinr]);
|
|
|
+ __smp_cross_call(target, ipinr);
|
|
|
+}
|
|
|
+
|
|
|
void show_ipi_list(struct seq_file *p, int prec)
|
|
|
{
|
|
|
unsigned int cpu, i;
|
|
|
|
|
|
for (i = 0; i < NR_IPI; i++) {
|
|
|
- seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE,
|
|
|
+ seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
|
|
|
prec >= 4 ? " " : "");
|
|
|
for_each_online_cpu(cpu)
|
|
|
seq_printf(p, "%10u ",
|
|
@@ -528,6 +518,24 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
|
|
|
return sum;
|
|
|
}
|
|
|
|
|
|
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
|
|
+{
|
|
|
+ smp_cross_call(mask, IPI_CALL_FUNC);
|
|
|
+}
|
|
|
+
|
|
|
+void arch_send_call_function_single_ipi(int cpu)
|
|
|
+{
|
|
|
+ smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_IRQ_WORK
|
|
|
+void arch_irq_work_raise(void)
|
|
|
+{
|
|
|
+ if (__smp_cross_call)
|
|
|
+ smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
static DEFINE_RAW_SPINLOCK(stop_lock);
|
|
|
|
|
|
/*
|
|
@@ -559,8 +567,10 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
|
|
|
unsigned int cpu = smp_processor_id();
|
|
|
struct pt_regs *old_regs = set_irq_regs(regs);
|
|
|
|
|
|
- if (ipinr >= IPI_RESCHEDULE && ipinr < IPI_RESCHEDULE + NR_IPI)
|
|
|
- __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_RESCHEDULE]);
|
|
|
+ if ((unsigned)ipinr < NR_IPI) {
|
|
|
+ trace_ipi_entry(ipi_types[ipinr]);
|
|
|
+ __inc_irq_stat(cpu, ipi_irqs[ipinr]);
|
|
|
+ }
|
|
|
|
|
|
switch (ipinr) {
|
|
|
case IPI_RESCHEDULE:
|
|
@@ -605,6 +615,9 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
|
|
|
pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
|
|
|
break;
|
|
|
}
|
|
|
+
|
|
|
+ if ((unsigned)ipinr < NR_IPI)
|
|
|
+ trace_ipi_exit(ipi_types[ipinr]);
|
|
|
set_irq_regs(old_regs);
|
|
|
}
|
|
|
|