switch.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652
  1. /*
  2. * Copyright (C) 2015 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/arm-smccc.h>
  18. #include <linux/types.h>
  19. #include <linux/jump_label.h>
  20. #include <uapi/linux/psci.h>
  21. #include <kvm/arm_psci.h>
  22. #include <asm/cpufeature.h>
  23. #include <asm/kvm_asm.h>
  24. #include <asm/kvm_emulate.h>
  25. #include <asm/kvm_host.h>
  26. #include <asm/kvm_hyp.h>
  27. #include <asm/kvm_mmu.h>
  28. #include <asm/fpsimd.h>
  29. #include <asm/debug-monitors.h>
  30. #include <asm/processor.h>
  31. #include <asm/thread_info.h>
  32. /* Check whether the FP regs were dirtied while in the host-side run loop: */
  33. static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
  34. {
  35. if (vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
  36. vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
  37. KVM_ARM64_FP_HOST);
  38. return !!(vcpu->arch.flags & KVM_ARM64_FP_ENABLED);
  39. }
  40. /* Save the 32-bit only FPSIMD system register state */
  41. static void __hyp_text __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
  42. {
  43. if (!vcpu_el1_is_32bit(vcpu))
  44. return;
  45. vcpu->arch.ctxt.sys_regs[FPEXC32_EL2] = read_sysreg(fpexc32_el2);
  46. }
  47. static void __hyp_text __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
  48. {
  49. /*
  50. * We are about to set CPTR_EL2.TFP to trap all floating point
  51. * register accesses to EL2, however, the ARM ARM clearly states that
  52. * traps are only taken to EL2 if the operation would not otherwise
  53. * trap to EL1. Therefore, always make sure that for 32-bit guests,
  54. * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
  55. * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
  56. * it will cause an exception.
  57. */
  58. if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) {
  59. write_sysreg(1 << 30, fpexc32_el2);
  60. isb();
  61. }
  62. }
  63. static void __hyp_text __activate_traps_common(struct kvm_vcpu *vcpu)
  64. {
  65. /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
  66. write_sysreg(1 << 15, hstr_el2);
  67. /*
  68. * Make sure we trap PMU access from EL0 to EL2. Also sanitize
  69. * PMSELR_EL0 to make sure it never contains the cycle
  70. * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
  71. * EL1 instead of being trapped to EL2.
  72. */
  73. write_sysreg(0, pmselr_el0);
  74. write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
  75. write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
  76. }
  77. static void __hyp_text __deactivate_traps_common(void)
  78. {
  79. write_sysreg(0, hstr_el2);
  80. write_sysreg(0, pmuserenr_el0);
  81. }
  82. static void activate_traps_vhe(struct kvm_vcpu *vcpu)
  83. {
  84. u64 val;
  85. val = read_sysreg(cpacr_el1);
  86. val |= CPACR_EL1_TTA;
  87. val &= ~CPACR_EL1_ZEN;
  88. if (!update_fp_enabled(vcpu)) {
  89. val &= ~CPACR_EL1_FPEN;
  90. __activate_traps_fpsimd32(vcpu);
  91. }
  92. write_sysreg(val, cpacr_el1);
  93. write_sysreg(kvm_get_hyp_vector(), vbar_el1);
  94. }
  95. static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
  96. {
  97. u64 val;
  98. __activate_traps_common(vcpu);
  99. val = CPTR_EL2_DEFAULT;
  100. val |= CPTR_EL2_TTA | CPTR_EL2_TZ;
  101. if (!update_fp_enabled(vcpu)) {
  102. val |= CPTR_EL2_TFP;
  103. __activate_traps_fpsimd32(vcpu);
  104. }
  105. write_sysreg(val, cptr_el2);
  106. }
  107. static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
  108. {
  109. u64 hcr = vcpu->arch.hcr_el2;
  110. write_sysreg(hcr, hcr_el2);
  111. if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
  112. write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
  113. if (has_vhe())
  114. activate_traps_vhe(vcpu);
  115. else
  116. __activate_traps_nvhe(vcpu);
  117. }
  118. static void deactivate_traps_vhe(void)
  119. {
  120. extern char vectors[]; /* kernel exception vectors */
  121. write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
  122. write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
  123. write_sysreg(vectors, vbar_el1);
  124. }
  125. static void __hyp_text __deactivate_traps_nvhe(void)
  126. {
  127. u64 mdcr_el2 = read_sysreg(mdcr_el2);
  128. __deactivate_traps_common();
  129. mdcr_el2 &= MDCR_EL2_HPMN_MASK;
  130. mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
  131. write_sysreg(mdcr_el2, mdcr_el2);
  132. write_sysreg(HCR_RW, hcr_el2);
  133. write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
  134. }
  135. static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
  136. {
  137. /*
  138. * If we pended a virtual abort, preserve it until it gets
  139. * cleared. See D1.14.3 (Virtual Interrupts) for details, but
  140. * the crucial bit is "On taking a vSError interrupt,
  141. * HCR_EL2.VSE is cleared to 0."
  142. */
  143. if (vcpu->arch.hcr_el2 & HCR_VSE)
  144. vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
  145. if (has_vhe())
  146. deactivate_traps_vhe();
  147. else
  148. __deactivate_traps_nvhe();
  149. }
  150. void activate_traps_vhe_load(struct kvm_vcpu *vcpu)
  151. {
  152. __activate_traps_common(vcpu);
  153. }
  154. void deactivate_traps_vhe_put(void)
  155. {
  156. u64 mdcr_el2 = read_sysreg(mdcr_el2);
  157. mdcr_el2 &= MDCR_EL2_HPMN_MASK |
  158. MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT |
  159. MDCR_EL2_TPMS;
  160. write_sysreg(mdcr_el2, mdcr_el2);
  161. __deactivate_traps_common();
  162. }
  163. static void __hyp_text __activate_vm(struct kvm *kvm)
  164. {
  165. __load_guest_stage2(kvm);
  166. }
  167. static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
  168. {
  169. write_sysreg(0, vttbr_el2);
  170. }
  171. /* Save VGICv3 state on non-VHE systems */
  172. static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
  173. {
  174. if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
  175. __vgic_v3_save_state(vcpu);
  176. __vgic_v3_deactivate_traps(vcpu);
  177. }
  178. }
  179. /* Restore VGICv3 state on non_VEH systems */
  180. static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
  181. {
  182. if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
  183. __vgic_v3_activate_traps(vcpu);
  184. __vgic_v3_restore_state(vcpu);
  185. }
  186. }
  187. static bool __hyp_text __true_value(void)
  188. {
  189. return true;
  190. }
  191. static bool __hyp_text __false_value(void)
  192. {
  193. return false;
  194. }
  195. static hyp_alternate_select(__check_arm_834220,
  196. __false_value, __true_value,
  197. ARM64_WORKAROUND_834220);
  198. static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
  199. {
  200. u64 par, tmp;
  201. /*
  202. * Resolve the IPA the hard way using the guest VA.
  203. *
  204. * Stage-1 translation already validated the memory access
  205. * rights. As such, we can use the EL1 translation regime, and
  206. * don't have to distinguish between EL0 and EL1 access.
  207. *
  208. * We do need to save/restore PAR_EL1 though, as we haven't
  209. * saved the guest context yet, and we may return early...
  210. */
  211. par = read_sysreg(par_el1);
  212. asm volatile("at s1e1r, %0" : : "r" (far));
  213. isb();
  214. tmp = read_sysreg(par_el1);
  215. write_sysreg(par, par_el1);
  216. if (unlikely(tmp & 1))
  217. return false; /* Translation failed, back to guest */
  218. /* Convert PAR to HPFAR format */
  219. *hpfar = PAR_TO_HPFAR(tmp);
  220. return true;
  221. }
  222. static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
  223. {
  224. u8 ec;
  225. u64 esr;
  226. u64 hpfar, far;
  227. esr = vcpu->arch.fault.esr_el2;
  228. ec = ESR_ELx_EC(esr);
  229. if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
  230. return true;
  231. far = read_sysreg_el2(far);
  232. /*
  233. * The HPFAR can be invalid if the stage 2 fault did not
  234. * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
  235. * bit is clear) and one of the two following cases are true:
  236. * 1. The fault was due to a permission fault
  237. * 2. The processor carries errata 834220
  238. *
  239. * Therefore, for all non S1PTW faults where we either have a
  240. * permission fault or the errata workaround is enabled, we
  241. * resolve the IPA using the AT instruction.
  242. */
  243. if (!(esr & ESR_ELx_S1PTW) &&
  244. (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
  245. if (!__translate_far_to_hpfar(far, &hpfar))
  246. return false;
  247. } else {
  248. hpfar = read_sysreg(hpfar_el2);
  249. }
  250. vcpu->arch.fault.far_el2 = far;
  251. vcpu->arch.fault.hpfar_el2 = hpfar;
  252. return true;
  253. }
  254. /* Skip an instruction which has been emulated. Returns true if
  255. * execution can continue or false if we need to exit hyp mode because
  256. * single-step was in effect.
  257. */
  258. static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
  259. {
  260. *vcpu_pc(vcpu) = read_sysreg_el2(elr);
  261. if (vcpu_mode_is_32bit(vcpu)) {
  262. vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr);
  263. kvm_skip_instr32(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
  264. write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr);
  265. } else {
  266. *vcpu_pc(vcpu) += 4;
  267. }
  268. write_sysreg_el2(*vcpu_pc(vcpu), elr);
  269. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
  270. vcpu->arch.fault.esr_el2 =
  271. (ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT) | 0x22;
  272. return false;
  273. } else {
  274. return true;
  275. }
  276. }
  277. static bool __hyp_text __hyp_switch_fpsimd(struct kvm_vcpu *vcpu)
  278. {
  279. struct user_fpsimd_state *host_fpsimd = vcpu->arch.host_fpsimd_state;
  280. if (has_vhe())
  281. write_sysreg(read_sysreg(cpacr_el1) | CPACR_EL1_FPEN,
  282. cpacr_el1);
  283. else
  284. write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP,
  285. cptr_el2);
  286. isb();
  287. if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
  288. /*
  289. * In the SVE case, VHE is assumed: it is enforced by
  290. * Kconfig and kvm_arch_init().
  291. */
  292. if (system_supports_sve() &&
  293. (vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE)) {
  294. struct thread_struct *thread = container_of(
  295. host_fpsimd,
  296. struct thread_struct, uw.fpsimd_state);
  297. sve_save_state(sve_pffr(thread), &host_fpsimd->fpsr);
  298. } else {
  299. __fpsimd_save_state(host_fpsimd);
  300. }
  301. vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
  302. }
  303. __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs);
  304. /* Skip restoring fpexc32 for AArch64 guests */
  305. if (!(read_sysreg(hcr_el2) & HCR_RW))
  306. write_sysreg(vcpu->arch.ctxt.sys_regs[FPEXC32_EL2],
  307. fpexc32_el2);
  308. vcpu->arch.flags |= KVM_ARM64_FP_ENABLED;
  309. return true;
  310. }
  311. /*
  312. * Return true when we were able to fixup the guest exit and should return to
  313. * the guest, false when we should restore the host state and return to the
  314. * main run loop.
  315. */
  316. static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
  317. {
  318. if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
  319. vcpu->arch.fault.esr_el2 = read_sysreg_el2(esr);
  320. /*
  321. * We're using the raw exception code in order to only process
  322. * the trap if no SError is pending. We will come back to the
  323. * same PC once the SError has been injected, and replay the
  324. * trapping instruction.
  325. */
  326. if (*exit_code != ARM_EXCEPTION_TRAP)
  327. goto exit;
  328. /*
  329. * We trap the first access to the FP/SIMD to save the host context
  330. * and restore the guest context lazily.
  331. * If FP/SIMD is not implemented, handle the trap and inject an
  332. * undefined instruction exception to the guest.
  333. */
  334. if (system_supports_fpsimd() &&
  335. kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_FP_ASIMD)
  336. return __hyp_switch_fpsimd(vcpu);
  337. if (!__populate_fault_info(vcpu))
  338. return true;
  339. if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
  340. bool valid;
  341. valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
  342. kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
  343. kvm_vcpu_dabt_isvalid(vcpu) &&
  344. !kvm_vcpu_dabt_isextabt(vcpu) &&
  345. !kvm_vcpu_dabt_iss1tw(vcpu);
  346. if (valid) {
  347. int ret = __vgic_v2_perform_cpuif_access(vcpu);
  348. if (ret == 1 && __skip_instr(vcpu))
  349. return true;
  350. if (ret == -1) {
  351. /* Promote an illegal access to an
  352. * SError. If we would be returning
  353. * due to single-step clear the SS
  354. * bit so handle_exit knows what to
  355. * do after dealing with the error.
  356. */
  357. if (!__skip_instr(vcpu))
  358. *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
  359. *exit_code = ARM_EXCEPTION_EL1_SERROR;
  360. }
  361. goto exit;
  362. }
  363. }
  364. if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
  365. (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 ||
  366. kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
  367. int ret = __vgic_v3_perform_cpuif_access(vcpu);
  368. if (ret == 1 && __skip_instr(vcpu))
  369. return true;
  370. }
  371. exit:
  372. /* Return to the host kernel and handle the exit */
  373. return false;
  374. }
  375. static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
  376. {
  377. if (!cpus_have_const_cap(ARM64_SSBD))
  378. return false;
  379. return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
  380. }
  381. static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
  382. {
  383. #ifdef CONFIG_ARM64_SSBD
  384. /*
  385. * The host runs with the workaround always present. If the
  386. * guest wants it disabled, so be it...
  387. */
  388. if (__needs_ssbd_off(vcpu) &&
  389. __hyp_this_cpu_read(arm64_ssbd_callback_required))
  390. arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
  391. #endif
  392. }
  393. static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
  394. {
  395. #ifdef CONFIG_ARM64_SSBD
  396. /*
  397. * If the guest has disabled the workaround, bring it back on.
  398. */
  399. if (__needs_ssbd_off(vcpu) &&
  400. __hyp_this_cpu_read(arm64_ssbd_callback_required))
  401. arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
  402. #endif
  403. }
  404. /* Switch to the guest for VHE systems running in EL2 */
  405. int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
  406. {
  407. struct kvm_cpu_context *host_ctxt;
  408. struct kvm_cpu_context *guest_ctxt;
  409. u64 exit_code;
  410. host_ctxt = vcpu->arch.host_cpu_context;
  411. host_ctxt->__hyp_running_vcpu = vcpu;
  412. guest_ctxt = &vcpu->arch.ctxt;
  413. sysreg_save_host_state_vhe(host_ctxt);
  414. __activate_traps(vcpu);
  415. __activate_vm(vcpu->kvm);
  416. sysreg_restore_guest_state_vhe(guest_ctxt);
  417. __debug_switch_to_guest(vcpu);
  418. __set_guest_arch_workaround_state(vcpu);
  419. do {
  420. /* Jump in the fire! */
  421. exit_code = __guest_enter(vcpu, host_ctxt);
  422. /* And we're baaack! */
  423. } while (fixup_guest_exit(vcpu, &exit_code));
  424. __set_host_arch_workaround_state(vcpu);
  425. sysreg_save_guest_state_vhe(guest_ctxt);
  426. __deactivate_traps(vcpu);
  427. sysreg_restore_host_state_vhe(host_ctxt);
  428. if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
  429. __fpsimd_save_fpexc32(vcpu);
  430. __debug_switch_to_host(vcpu);
  431. return exit_code;
  432. }
  433. /* Switch to the guest for legacy non-VHE systems */
  434. int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
  435. {
  436. struct kvm_cpu_context *host_ctxt;
  437. struct kvm_cpu_context *guest_ctxt;
  438. u64 exit_code;
  439. vcpu = kern_hyp_va(vcpu);
  440. host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
  441. host_ctxt->__hyp_running_vcpu = vcpu;
  442. guest_ctxt = &vcpu->arch.ctxt;
  443. __sysreg_save_state_nvhe(host_ctxt);
  444. __activate_traps(vcpu);
  445. __activate_vm(kern_hyp_va(vcpu->kvm));
  446. __hyp_vgic_restore_state(vcpu);
  447. __timer_enable_traps(vcpu);
  448. /*
  449. * We must restore the 32-bit state before the sysregs, thanks
  450. * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
  451. */
  452. __sysreg32_restore_state(vcpu);
  453. __sysreg_restore_state_nvhe(guest_ctxt);
  454. __debug_switch_to_guest(vcpu);
  455. __set_guest_arch_workaround_state(vcpu);
  456. do {
  457. /* Jump in the fire! */
  458. exit_code = __guest_enter(vcpu, host_ctxt);
  459. /* And we're baaack! */
  460. } while (fixup_guest_exit(vcpu, &exit_code));
  461. __set_host_arch_workaround_state(vcpu);
  462. __sysreg_save_state_nvhe(guest_ctxt);
  463. __sysreg32_save_state(vcpu);
  464. __timer_disable_traps(vcpu);
  465. __hyp_vgic_save_state(vcpu);
  466. __deactivate_traps(vcpu);
  467. __deactivate_vm(vcpu);
  468. __sysreg_restore_state_nvhe(host_ctxt);
  469. if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
  470. __fpsimd_save_fpexc32(vcpu);
  471. /*
  472. * This must come after restoring the host sysregs, since a non-VHE
  473. * system may enable SPE here and make use of the TTBRs.
  474. */
  475. __debug_switch_to_host(vcpu);
  476. return exit_code;
  477. }
  478. static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
  479. static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
  480. struct kvm_cpu_context *__host_ctxt)
  481. {
  482. struct kvm_vcpu *vcpu;
  483. unsigned long str_va;
  484. vcpu = __host_ctxt->__hyp_running_vcpu;
  485. if (read_sysreg(vttbr_el2)) {
  486. __timer_disable_traps(vcpu);
  487. __deactivate_traps(vcpu);
  488. __deactivate_vm(vcpu);
  489. __sysreg_restore_state_nvhe(__host_ctxt);
  490. }
  491. /*
  492. * Force the panic string to be loaded from the literal pool,
  493. * making sure it is a kernel address and not a PC-relative
  494. * reference.
  495. */
  496. asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va));
  497. __hyp_do_panic(str_va,
  498. spsr, elr,
  499. read_sysreg(esr_el2), read_sysreg_el2(far),
  500. read_sysreg(hpfar_el2), par, vcpu);
  501. }
  502. static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
  503. struct kvm_cpu_context *host_ctxt)
  504. {
  505. struct kvm_vcpu *vcpu;
  506. vcpu = host_ctxt->__hyp_running_vcpu;
  507. __deactivate_traps(vcpu);
  508. sysreg_restore_host_state_vhe(host_ctxt);
  509. panic(__hyp_panic_string,
  510. spsr, elr,
  511. read_sysreg_el2(esr), read_sysreg_el2(far),
  512. read_sysreg(hpfar_el2), par, vcpu);
  513. }
  514. void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
  515. {
  516. u64 spsr = read_sysreg_el2(spsr);
  517. u64 elr = read_sysreg_el2(elr);
  518. u64 par = read_sysreg(par_el1);
  519. if (!has_vhe())
  520. __hyp_call_panic_nvhe(spsr, elr, par, host_ctxt);
  521. else
  522. __hyp_call_panic_vhe(spsr, elr, par, host_ctxt);
  523. unreachable();
  524. }