|
@@ -34,8 +34,32 @@
|
|
|
|
|
|
extern void exynos4_secondary_startup(void);
|
|
extern void exynos4_secondary_startup(void);
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Set or clear the USE_DELAYED_RESET_ASSERTION option, set on Exynos4 SoCs
|
|
|
|
+ * during hot-(un)plugging CPUx.
|
|
|
|
+ *
|
|
|
|
+ * The feature can be cleared safely during first boot of secondary CPU.
|
|
|
|
+ *
|
|
|
|
+ * Exynos4 SoCs require setting USE_DELAYED_RESET_ASSERTION during powering
|
|
|
|
+ * down a CPU so the CPU idle clock down feature could properly detect global
|
|
|
|
+ * idle state when CPUx is off.
|
|
|
|
+ */
|
|
|
|
+static void exynos_set_delayed_reset_assertion(u32 core_id, bool enable)
|
|
|
|
+{
|
|
|
|
+ if (soc_is_exynos4()) {
|
|
|
|
+ unsigned int tmp;
|
|
|
|
+
|
|
|
|
+ tmp = pmu_raw_readl(EXYNOS_ARM_CORE_OPTION(core_id));
|
|
|
|
+ if (enable)
|
|
|
|
+ tmp |= S5P_USE_DELAYED_RESET_ASSERTION;
|
|
|
|
+ else
|
|
|
|
+ tmp &= ~(S5P_USE_DELAYED_RESET_ASSERTION);
|
|
|
|
+ pmu_raw_writel(tmp, EXYNOS_ARM_CORE_OPTION(core_id));
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
-static inline void cpu_leave_lowpower(void)
|
|
|
|
|
|
+static inline void cpu_leave_lowpower(u32 core_id)
|
|
{
|
|
{
|
|
unsigned int v;
|
|
unsigned int v;
|
|
|
|
|
|
@@ -49,6 +73,8 @@ static inline void cpu_leave_lowpower(void)
|
|
: "=&r" (v)
|
|
: "=&r" (v)
|
|
: "Ir" (CR_C), "Ir" (0x40)
|
|
: "Ir" (CR_C), "Ir" (0x40)
|
|
: "cc");
|
|
: "cc");
|
|
|
|
+
|
|
|
|
+ exynos_set_delayed_reset_assertion(core_id, false);
|
|
}
|
|
}
|
|
|
|
|
|
static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
|
|
static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
|
|
@@ -61,6 +87,14 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
|
|
/* Turn the CPU off on next WFI instruction. */
|
|
/* Turn the CPU off on next WFI instruction. */
|
|
exynos_cpu_power_down(core_id);
|
|
exynos_cpu_power_down(core_id);
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Exynos4 SoCs require setting
|
|
|
|
+ * USE_DELAYED_RESET_ASSERTION so the CPU idle
|
|
|
|
+ * clock down feature could properly detect
|
|
|
|
+ * global idle state when CPUx is off.
|
|
|
|
+ */
|
|
|
|
+ exynos_set_delayed_reset_assertion(core_id, true);
|
|
|
|
+
|
|
wfi();
|
|
wfi();
|
|
|
|
|
|
if (pen_release == core_id) {
|
|
if (pen_release == core_id) {
|
|
@@ -286,6 +320,9 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
udelay(10);
|
|
udelay(10);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ /* No harm if this is called during first boot of secondary CPU */
|
|
|
|
+ exynos_set_delayed_reset_assertion(core_id, false);
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* now the secondary core is starting up let it run its
|
|
* now the secondary core is starting up let it run its
|
|
* calibrations, then wait for it to finish
|
|
* calibrations, then wait for it to finish
|
|
@@ -376,6 +413,8 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
|
|
static void exynos_cpu_die(unsigned int cpu)
|
|
static void exynos_cpu_die(unsigned int cpu)
|
|
{
|
|
{
|
|
int spurious = 0;
|
|
int spurious = 0;
|
|
|
|
+ u32 mpidr = cpu_logical_map(cpu);
|
|
|
|
+ u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
|
|
|
|
|
v7_exit_coherency_flush(louis);
|
|
v7_exit_coherency_flush(louis);
|
|
|
|
|
|
@@ -385,7 +424,7 @@ static void exynos_cpu_die(unsigned int cpu)
|
|
* bring this CPU back into the world of cache
|
|
* bring this CPU back into the world of cache
|
|
* coherency, and then restore interrupts
|
|
* coherency, and then restore interrupts
|
|
*/
|
|
*/
|
|
- cpu_leave_lowpower();
|
|
|
|
|
|
+ cpu_leave_lowpower(core_id);
|
|
|
|
|
|
if (spurious)
|
|
if (spurious)
|
|
pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
|
|
pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
|