|
@@ -152,8 +152,25 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
|
|
|
static void __hyp_text
|
|
|
__sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
|
|
|
{
|
|
|
+ u64 pstate = ctxt->gp_regs.regs.pstate;
|
|
|
+ u64 mode = pstate & PSR_AA32_MODE_MASK;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Safety check to ensure we're setting the CPU up to enter the guest
|
|
|
+ * in a less privileged mode.
|
|
|
+ *
|
|
|
+ * If we are attempting a return to EL2 or higher in AArch64 state,
|
|
|
+ * program SPSR_EL2 with M=EL2h and the IL bit set which ensures that
|
|
|
+ * we'll take an illegal exception state exception immediately after
|
|
|
+ * the ERET to the guest. Attempts to return to AArch32 Hyp will
|
|
|
+ * result in an illegal exception return because EL2's execution state
|
|
|
+ * is determined by SCR_EL3.RW.
|
|
|
+ */
|
|
|
+ if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t)
|
|
|
+ pstate = PSR_MODE_EL2h | PSR_IL_BIT;
|
|
|
+
|
|
|
write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
|
|
|
- write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
|
|
|
+ write_sysreg_el2(pstate, spsr);
|
|
|
|
|
|
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
|
|
|
write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
|