|
@@ -33,12 +33,16 @@
|
|
|
#include <linux/cpu.h>
|
|
|
#include <linux/err.h>
|
|
|
#include <linux/ftrace.h>
|
|
|
+#include <linux/irqdomain.h>
|
|
|
+#include <linux/of.h>
|
|
|
+#include <linux/of_irq.h>
|
|
|
|
|
|
#include <linux/atomic.h>
|
|
|
#include <asm/cpu.h>
|
|
|
#include <asm/processor.h>
|
|
|
#include <asm/idle.h>
|
|
|
#include <asm/r4k-timer.h>
|
|
|
+#include <asm/mips-cpc.h>
|
|
|
#include <asm/mmu_context.h>
|
|
|
#include <asm/time.h>
|
|
|
#include <asm/setup.h>
|
|
@@ -79,6 +83,11 @@ static cpumask_t cpu_core_setup_map;
|
|
|
|
|
|
cpumask_t cpu_coherent_mask;
|
|
|
|
|
|
+#ifdef CONFIG_GENERIC_IRQ_IPI
|
|
|
+static struct irq_desc *call_desc;
|
|
|
+static struct irq_desc *sched_desc;
|
|
|
+#endif
|
|
|
+
|
|
|
static inline void set_cpu_sibling_map(int cpu)
|
|
|
{
|
|
|
int i;
|
|
@@ -145,6 +154,133 @@ void register_smp_ops(struct plat_smp_ops *ops)
|
|
|
mp_ops = ops;
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_GENERIC_IRQ_IPI
|
|
|
+void mips_smp_send_ipi_single(int cpu, unsigned int action)
|
|
|
+{
|
|
|
+ mips_smp_send_ipi_mask(cpumask_of(cpu), action);
|
|
|
+}
|
|
|
+
|
|
|
+void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ unsigned int core;
|
|
|
+ int cpu;
|
|
|
+
|
|
|
+ local_irq_save(flags);
|
|
|
+
|
|
|
+ switch (action) {
|
|
|
+ case SMP_CALL_FUNCTION:
|
|
|
+ __ipi_send_mask(call_desc, mask);
|
|
|
+ break;
|
|
|
+
|
|
|
+ case SMP_RESCHEDULE_YOURSELF:
|
|
|
+ __ipi_send_mask(sched_desc, mask);
|
|
|
+ break;
|
|
|
+
|
|
|
+ default:
|
|
|
+ BUG();
|
|
|
+ }
|
|
|
+
|
|
|
+ if (mips_cpc_present()) {
|
|
|
+ for_each_cpu(cpu, mask) {
|
|
|
+ core = cpu_data[cpu].core;
|
|
|
+
|
|
|
+ if (core == current_cpu_data.core)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
|
|
|
+ mips_cpc_lock_other(core);
|
|
|
+ write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
|
|
|
+ mips_cpc_unlock_other();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ local_irq_restore(flags);
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
|
|
|
+{
|
|
|
+ scheduler_ipi();
|
|
|
+
|
|
|
+ return IRQ_HANDLED;
|
|
|
+}
|
|
|
+
|
|
|
+static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
|
|
|
+{
|
|
|
+ generic_smp_call_function_interrupt();
|
|
|
+
|
|
|
+ return IRQ_HANDLED;
|
|
|
+}
|
|
|
+
|
|
|
+static struct irqaction irq_resched = {
|
|
|
+ .handler = ipi_resched_interrupt,
|
|
|
+ .flags = IRQF_PERCPU,
|
|
|
+ .name = "IPI resched"
|
|
|
+};
|
|
|
+
|
|
|
+static struct irqaction irq_call = {
|
|
|
+ .handler = ipi_call_interrupt,
|
|
|
+ .flags = IRQF_PERCPU,
|
|
|
+ .name = "IPI call"
|
|
|
+};
|
|
|
+
|
|
|
+static __init void smp_ipi_init_one(unsigned int virq,
|
|
|
+ struct irqaction *action)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ irq_set_handler(virq, handle_percpu_irq);
|
|
|
+ ret = setup_irq(virq, action);
|
|
|
+ BUG_ON(ret);
|
|
|
+}
|
|
|
+
|
|
|
+static int __init mips_smp_ipi_init(void)
|
|
|
+{
|
|
|
+ unsigned int call_virq, sched_virq;
|
|
|
+ struct irq_domain *ipidomain;
|
|
|
+ struct device_node *node;
|
|
|
+
|
|
|
+ node = of_irq_find_parent(of_root);
|
|
|
+ ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Some platforms have half DT setup. So if we found irq node but
|
|
|
+ * didn't find an ipidomain, try to search for one that is not in the
|
|
|
+ * DT.
|
|
|
+ */
|
|
|
+ if (node && !ipidomain)
|
|
|
+ ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
|
|
|
+
|
|
|
+ BUG_ON(!ipidomain);
|
|
|
+
|
|
|
+ call_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask);
|
|
|
+ BUG_ON(!call_virq);
|
|
|
+
|
|
|
+ sched_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask);
|
|
|
+ BUG_ON(!sched_virq);
|
|
|
+
|
|
|
+ if (irq_domain_is_ipi_per_cpu(ipidomain)) {
|
|
|
+ int cpu;
|
|
|
+
|
|
|
+ for_each_cpu(cpu, cpu_possible_mask) {
|
|
|
+ smp_ipi_init_one(call_virq + cpu, &irq_call);
|
|
|
+ smp_ipi_init_one(sched_virq + cpu, &irq_resched);
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ smp_ipi_init_one(call_virq, &irq_call);
|
|
|
+ smp_ipi_init_one(sched_virq, &irq_resched);
|
|
|
+ }
|
|
|
+
|
|
|
+ call_desc = irq_to_desc(call_virq);
|
|
|
+ sched_desc = irq_to_desc(sched_virq);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+early_initcall(mips_smp_ipi_init);
|
|
|
+#endif
|
|
|
+
|
|
|
/*
|
|
|
* First C code run on the secondary CPUs after being started up by
|
|
|
* the master.
|