|
@@ -73,8 +73,8 @@ ENTRY(__cpu_suspend_enter)
|
|
|
str x2, [x0, #SLEEP_STACK_DATA_SYSTEM_REGS + CPU_CTX_SP]
|
|
|
|
|
|
/* find the mpidr_hash */
|
|
|
- ldr x1, =sleep_save_sp
|
|
|
- ldr x1, [x1, #SLEEP_SAVE_SP_VIRT]
|
|
|
+ ldr x1, =sleep_save_stash
|
|
|
+ ldr x1, [x1]
|
|
|
mrs x7, mpidr_el1
|
|
|
ldr x9, =mpidr_hash
|
|
|
ldr x10, [x9, #MPIDR_HASH_MASK]
|
|
@@ -87,44 +87,27 @@ ENTRY(__cpu_suspend_enter)
|
|
|
compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
|
|
|
add x1, x1, x8, lsl #3
|
|
|
|
|
|
+ str x0, [x1]
|
|
|
+ add x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS
|
|
|
stp x29, lr, [sp, #-16]!
|
|
|
- bl __cpu_suspend_save
|
|
|
+ bl cpu_do_suspend
|
|
|
ldp x29, lr, [sp], #16
|
|
|
mov x0, #1
|
|
|
ret
|
|
|
ENDPROC(__cpu_suspend_enter)
|
|
|
.ltorg
|
|
|
|
|
|
-/*
|
|
|
- * x0 must contain the sctlr value retrieved from restored context
|
|
|
- */
|
|
|
- .pushsection ".idmap.text", "ax"
|
|
|
-ENTRY(cpu_resume_mmu)
|
|
|
- ldr x3, =cpu_resume_after_mmu
|
|
|
- msr sctlr_el1, x0 // restore sctlr_el1
|
|
|
- isb
|
|
|
- /*
|
|
|
- * Invalidate the local I-cache so that any instructions fetched
|
|
|
- * speculatively from the PoC are discarded, since they may have
|
|
|
- * been dynamically patched at the PoU.
|
|
|
- */
|
|
|
- ic iallu
|
|
|
- dsb nsh
|
|
|
- isb
|
|
|
- br x3 // global jump to virtual address
|
|
|
-ENDPROC(cpu_resume_mmu)
|
|
|
- .popsection
|
|
|
-cpu_resume_after_mmu:
|
|
|
-#ifdef CONFIG_KASAN
|
|
|
- mov x0, sp
|
|
|
- bl kasan_unpoison_remaining_stack
|
|
|
-#endif
|
|
|
- mov x0, #0 // return zero on success
|
|
|
- ret
|
|
|
-ENDPROC(cpu_resume_after_mmu)
|
|
|
-
|
|
|
ENTRY(cpu_resume)
|
|
|
bl el2_setup // if in EL2 drop to EL1 cleanly
|
|
|
+ /* enable the MMU early - so we can access sleep_save_stash by va */
|
|
|
+ adr_l lr, __enable_mmu /* __cpu_setup will return here */
|
|
|
+ ldr x27, =_cpu_resume /* __enable_mmu will branch here */
|
|
|
+ adrp x25, idmap_pg_dir
|
|
|
+ adrp x26, swapper_pg_dir
|
|
|
+ b __cpu_setup
|
|
|
+ENDPROC(cpu_resume)
|
|
|
+
|
|
|
+ENTRY(_cpu_resume)
|
|
|
mrs x1, mpidr_el1
|
|
|
adrp x8, mpidr_hash
|
|
|
add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address
|
|
@@ -134,29 +117,32 @@ ENTRY(cpu_resume)
|
|
|
ldp w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)]
|
|
|
compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2
|
|
|
/* x7 contains hash index, let's use it to grab context pointer */
|
|
|
- ldr_l x0, sleep_save_sp + SLEEP_SAVE_SP_PHYS
|
|
|
+ ldr_l x0, sleep_save_stash
|
|
|
ldr x0, [x0, x7, lsl #3]
|
|
|
add x29, x0, #SLEEP_STACK_DATA_CALLEE_REGS
|
|
|
add x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS
|
|
|
/* load sp from context */
|
|
|
ldr x2, [x0, #CPU_CTX_SP]
|
|
|
- /* load physical address of identity map page table in x1 */
|
|
|
- adrp x1, idmap_pg_dir
|
|
|
mov sp, x2
|
|
|
/* save thread_info */
|
|
|
and x2, x2, #~(THREAD_SIZE - 1)
|
|
|
msr sp_el0, x2
|
|
|
/*
|
|
|
- * cpu_do_resume expects x0 to contain context physical address
|
|
|
- * pointer and x1 to contain physical address of 1:1 page tables
|
|
|
+ * cpu_do_resume expects x0 to contain context address pointer
|
|
|
*/
|
|
|
- bl cpu_do_resume // PC relative jump, MMU off
|
|
|
- /* Can't access these by physical address once the MMU is on */
|
|
|
+ bl cpu_do_resume
|
|
|
+
|
|
|
+#ifdef CONFIG_KASAN
|
|
|
+ mov x0, sp
|
|
|
+ bl kasan_unpoison_remaining_stack
|
|
|
+#endif
|
|
|
+
|
|
|
ldp x19, x20, [x29, #16]
|
|
|
ldp x21, x22, [x29, #32]
|
|
|
ldp x23, x24, [x29, #48]
|
|
|
ldp x25, x26, [x29, #64]
|
|
|
ldp x27, x28, [x29, #80]
|
|
|
ldp x29, lr, [x29]
|
|
|
- b cpu_resume_mmu // Resume MMU, never returns
|
|
|
-ENDPROC(cpu_resume)
|
|
|
+ mov x0, #0
|
|
|
+ ret
|
|
|
+ENDPROC(_cpu_resume)
|