|
@@ -101,12 +101,20 @@ ENTRY(cpu_resume)
|
|
bl el2_setup // if in EL2 drop to EL1 cleanly
|
|
bl el2_setup // if in EL2 drop to EL1 cleanly
|
|
/* enable the MMU early - so we can access sleep_save_stash by va */
|
|
/* enable the MMU early - so we can access sleep_save_stash by va */
|
|
adr_l lr, __enable_mmu /* __cpu_setup will return here */
|
|
adr_l lr, __enable_mmu /* __cpu_setup will return here */
|
|
- ldr x27, =_cpu_resume /* __enable_mmu will branch here */
|
|
|
|
|
|
+ adr_l x27, _resume_switched /* __enable_mmu will branch here */
|
|
adrp x25, idmap_pg_dir
|
|
adrp x25, idmap_pg_dir
|
|
adrp x26, swapper_pg_dir
|
|
adrp x26, swapper_pg_dir
|
|
b __cpu_setup
|
|
b __cpu_setup
|
|
ENDPROC(cpu_resume)
|
|
ENDPROC(cpu_resume)
|
|
|
|
|
|
|
|
+ .pushsection ".idmap.text", "ax"
|
|
|
|
+_resume_switched:
|
|
|
|
+ ldr x8, =_cpu_resume
|
|
|
|
+ br x8
|
|
|
|
+ENDPROC(_resume_switched)
|
|
|
|
+ .ltorg
|
|
|
|
+ .popsection
|
|
|
|
+
|
|
ENTRY(_cpu_resume)
|
|
ENTRY(_cpu_resume)
|
|
mrs x1, mpidr_el1
|
|
mrs x1, mpidr_el1
|
|
adrp x8, mpidr_hash
|
|
adrp x8, mpidr_hash
|