|
@@ -23,7 +23,7 @@ struct mm_struct;
|
|
/*
|
|
/*
|
|
* Don't change this structure - ASM code relies on it.
|
|
* Don't change this structure - ASM code relies on it.
|
|
*/
|
|
*/
|
|
-extern struct processor {
|
|
|
|
|
|
+struct processor {
|
|
/* MISC
|
|
/* MISC
|
|
* get data abort address/flags
|
|
* get data abort address/flags
|
|
*/
|
|
*/
|
|
@@ -79,9 +79,13 @@ extern struct processor {
|
|
unsigned int suspend_size;
|
|
unsigned int suspend_size;
|
|
void (*do_suspend)(void *);
|
|
void (*do_suspend)(void *);
|
|
void (*do_resume)(void *);
|
|
void (*do_resume)(void *);
|
|
-} processor;
|
|
|
|
|
|
+};
|
|
|
|
|
|
#ifndef MULTI_CPU
|
|
#ifndef MULTI_CPU
|
|
|
|
+static inline void init_proc_vtable(const struct processor *p)
|
|
|
|
+{
|
|
|
|
+}
|
|
|
|
+
|
|
extern void cpu_proc_init(void);
|
|
extern void cpu_proc_init(void);
|
|
extern void cpu_proc_fin(void);
|
|
extern void cpu_proc_fin(void);
|
|
extern int cpu_do_idle(void);
|
|
extern int cpu_do_idle(void);
|
|
@@ -98,17 +102,50 @@ extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
|
|
extern void cpu_do_suspend(void *);
|
|
extern void cpu_do_suspend(void *);
|
|
extern void cpu_do_resume(void *);
|
|
extern void cpu_do_resume(void *);
|
|
#else
|
|
#else
|
|
-#define cpu_proc_init processor._proc_init
|
|
|
|
-#define cpu_proc_fin processor._proc_fin
|
|
|
|
-#define cpu_reset processor.reset
|
|
|
|
-#define cpu_do_idle processor._do_idle
|
|
|
|
-#define cpu_dcache_clean_area processor.dcache_clean_area
|
|
|
|
-#define cpu_set_pte_ext processor.set_pte_ext
|
|
|
|
-#define cpu_do_switch_mm processor.switch_mm
|
|
|
|
|
|
|
|
-/* These three are private to arch/arm/kernel/suspend.c */
|
|
|
|
-#define cpu_do_suspend processor.do_suspend
|
|
|
|
-#define cpu_do_resume processor.do_resume
|
|
|
|
|
|
+extern struct processor processor;
|
|
|
|
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
|
|
|
|
+#include <linux/smp.h>
|
|
|
|
+/*
|
|
|
|
+ * This can't be a per-cpu variable because we need to access it before
|
|
|
|
+ * per-cpu has been initialised. We have a couple of functions that are
|
|
|
|
+ * called in a pre-emptible context, and so can't use smp_processor_id()
|
|
|
|
+ * there, hence PROC_TABLE(). We insist in init_proc_vtable() that the
|
|
|
|
+ * function pointers for these are identical across all CPUs.
|
|
|
|
+ */
|
|
|
|
+extern struct processor *cpu_vtable[];
|
|
|
|
+#define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f
|
|
|
|
+#define PROC_TABLE(f) cpu_vtable[0]->f
|
|
|
|
+static inline void init_proc_vtable(const struct processor *p)
|
|
|
|
+{
|
|
|
|
+ unsigned int cpu = smp_processor_id();
|
|
|
|
+ *cpu_vtable[cpu] = *p;
|
|
|
|
+ WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
|
|
|
|
+ cpu_vtable[0]->dcache_clean_area);
|
|
|
|
+ WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
|
|
|
|
+ cpu_vtable[0]->set_pte_ext);
|
|
|
|
+}
|
|
|
|
+#else
|
|
|
|
+#define PROC_VTABLE(f) processor.f
|
|
|
|
+#define PROC_TABLE(f) processor.f
|
|
|
|
+static inline void init_proc_vtable(const struct processor *p)
|
|
|
|
+{
|
|
|
|
+ processor = *p;
|
|
|
|
+}
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+#define cpu_proc_init PROC_VTABLE(_proc_init)
|
|
|
|
+#define cpu_check_bugs PROC_VTABLE(check_bugs)
|
|
|
|
+#define cpu_proc_fin PROC_VTABLE(_proc_fin)
|
|
|
|
+#define cpu_reset PROC_VTABLE(reset)
|
|
|
|
+#define cpu_do_idle PROC_VTABLE(_do_idle)
|
|
|
|
+#define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area)
|
|
|
|
+#define cpu_set_pte_ext PROC_TABLE(set_pte_ext)
|
|
|
|
+#define cpu_do_switch_mm PROC_VTABLE(switch_mm)
|
|
|
|
+
|
|
|
|
+/* These two are private to arch/arm/kernel/suspend.c */
|
|
|
|
+#define cpu_do_suspend PROC_VTABLE(do_suspend)
|
|
|
|
+#define cpu_do_resume PROC_VTABLE(do_resume)
|
|
#endif
|
|
#endif
|
|
|
|
|
|
extern void cpu_resume(void);
|
|
extern void cpu_resume(void);
|