switch.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419
  1. /*
  2. * Copyright (C) 2015 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/types.h>
  18. #include <linux/jump_label.h>
  19. #include <asm/kvm_asm.h>
  20. #include <asm/kvm_emulate.h>
  21. #include <asm/kvm_hyp.h>
  22. #include <asm/fpsimd.h>
  23. static bool __hyp_text __fpsimd_enabled_nvhe(void)
  24. {
  25. return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
  26. }
  27. static bool __hyp_text __fpsimd_enabled_vhe(void)
  28. {
  29. return !!(read_sysreg(cpacr_el1) & CPACR_EL1_FPEN);
  30. }
  31. static hyp_alternate_select(__fpsimd_is_enabled,
  32. __fpsimd_enabled_nvhe, __fpsimd_enabled_vhe,
  33. ARM64_HAS_VIRT_HOST_EXTN);
  34. bool __hyp_text __fpsimd_enabled(void)
  35. {
  36. return __fpsimd_is_enabled()();
  37. }
  38. static void __hyp_text __activate_traps_vhe(void)
  39. {
  40. u64 val;
  41. val = read_sysreg(cpacr_el1);
  42. val |= CPACR_EL1_TTA;
  43. val &= ~CPACR_EL1_FPEN;
  44. write_sysreg(val, cpacr_el1);
  45. write_sysreg(__kvm_hyp_vector, vbar_el1);
  46. }
  47. static void __hyp_text __activate_traps_nvhe(void)
  48. {
  49. u64 val;
  50. val = CPTR_EL2_DEFAULT;
  51. val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
  52. write_sysreg(val, cptr_el2);
  53. }
  54. static hyp_alternate_select(__activate_traps_arch,
  55. __activate_traps_nvhe, __activate_traps_vhe,
  56. ARM64_HAS_VIRT_HOST_EXTN);
  57. static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
  58. {
  59. u64 val;
  60. /*
  61. * We are about to set CPTR_EL2.TFP to trap all floating point
  62. * register accesses to EL2, however, the ARM ARM clearly states that
  63. * traps are only taken to EL2 if the operation would not otherwise
  64. * trap to EL1. Therefore, always make sure that for 32-bit guests,
  65. * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
  66. * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
  67. * it will cause an exception.
  68. */
  69. val = vcpu->arch.hcr_el2;
  70. if (!(val & HCR_RW) && system_supports_fpsimd()) {
  71. write_sysreg(1 << 30, fpexc32_el2);
  72. isb();
  73. }
  74. write_sysreg(val, hcr_el2);
  75. /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
  76. write_sysreg(1 << 15, hstr_el2);
  77. /*
  78. * Make sure we trap PMU access from EL0 to EL2. Also sanitize
  79. * PMSELR_EL0 to make sure it never contains the cycle
  80. * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
  81. * EL1 instead of being trapped to EL2.
  82. */
  83. write_sysreg(0, pmselr_el0);
  84. write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
  85. write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
  86. __activate_traps_arch()();
  87. }
  88. static void __hyp_text __deactivate_traps_vhe(void)
  89. {
  90. extern char vectors[]; /* kernel exception vectors */
  91. write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
  92. write_sysreg(CPACR_EL1_FPEN, cpacr_el1);
  93. write_sysreg(vectors, vbar_el1);
  94. }
  95. static void __hyp_text __deactivate_traps_nvhe(void)
  96. {
  97. write_sysreg(HCR_RW, hcr_el2);
  98. write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
  99. }
  100. static hyp_alternate_select(__deactivate_traps_arch,
  101. __deactivate_traps_nvhe, __deactivate_traps_vhe,
  102. ARM64_HAS_VIRT_HOST_EXTN);
  103. static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
  104. {
  105. /*
  106. * If we pended a virtual abort, preserve it until it gets
  107. * cleared. See D1.14.3 (Virtual Interrupts) for details, but
  108. * the crucial bit is "On taking a vSError interrupt,
  109. * HCR_EL2.VSE is cleared to 0."
  110. */
  111. if (vcpu->arch.hcr_el2 & HCR_VSE)
  112. vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
  113. __deactivate_traps_arch()();
  114. write_sysreg(0, hstr_el2);
  115. write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
  116. write_sysreg(0, pmuserenr_el0);
  117. }
  118. static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
  119. {
  120. struct kvm *kvm = kern_hyp_va(vcpu->kvm);
  121. write_sysreg(kvm->arch.vttbr, vttbr_el2);
  122. }
  123. static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
  124. {
  125. write_sysreg(0, vttbr_el2);
  126. }
  127. static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
  128. {
  129. if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
  130. __vgic_v3_save_state(vcpu);
  131. else
  132. __vgic_v2_save_state(vcpu);
  133. write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2);
  134. }
  135. static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
  136. {
  137. u64 val;
  138. val = read_sysreg(hcr_el2);
  139. val |= HCR_INT_OVERRIDE;
  140. val |= vcpu->arch.irq_lines;
  141. write_sysreg(val, hcr_el2);
  142. if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
  143. __vgic_v3_restore_state(vcpu);
  144. else
  145. __vgic_v2_restore_state(vcpu);
  146. }
  147. static bool __hyp_text __true_value(void)
  148. {
  149. return true;
  150. }
  151. static bool __hyp_text __false_value(void)
  152. {
  153. return false;
  154. }
  155. static hyp_alternate_select(__check_arm_834220,
  156. __false_value, __true_value,
  157. ARM64_WORKAROUND_834220);
  158. static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
  159. {
  160. u64 par, tmp;
  161. /*
  162. * Resolve the IPA the hard way using the guest VA.
  163. *
  164. * Stage-1 translation already validated the memory access
  165. * rights. As such, we can use the EL1 translation regime, and
  166. * don't have to distinguish between EL0 and EL1 access.
  167. *
  168. * We do need to save/restore PAR_EL1 though, as we haven't
  169. * saved the guest context yet, and we may return early...
  170. */
  171. par = read_sysreg(par_el1);
  172. asm volatile("at s1e1r, %0" : : "r" (far));
  173. isb();
  174. tmp = read_sysreg(par_el1);
  175. write_sysreg(par, par_el1);
  176. if (unlikely(tmp & 1))
  177. return false; /* Translation failed, back to guest */
  178. /* Convert PAR to HPFAR format */
  179. *hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4;
  180. return true;
  181. }
  182. static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
  183. {
  184. u64 esr = read_sysreg_el2(esr);
  185. u8 ec = ESR_ELx_EC(esr);
  186. u64 hpfar, far;
  187. vcpu->arch.fault.esr_el2 = esr;
  188. if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
  189. return true;
  190. far = read_sysreg_el2(far);
  191. /*
  192. * The HPFAR can be invalid if the stage 2 fault did not
  193. * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
  194. * bit is clear) and one of the two following cases are true:
  195. * 1. The fault was due to a permission fault
  196. * 2. The processor carries errata 834220
  197. *
  198. * Therefore, for all non S1PTW faults where we either have a
  199. * permission fault or the errata workaround is enabled, we
  200. * resolve the IPA using the AT instruction.
  201. */
  202. if (!(esr & ESR_ELx_S1PTW) &&
  203. (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
  204. if (!__translate_far_to_hpfar(far, &hpfar))
  205. return false;
  206. } else {
  207. hpfar = read_sysreg(hpfar_el2);
  208. }
  209. vcpu->arch.fault.far_el2 = far;
  210. vcpu->arch.fault.hpfar_el2 = hpfar;
  211. return true;
  212. }
  213. static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
  214. {
  215. *vcpu_pc(vcpu) = read_sysreg_el2(elr);
  216. if (vcpu_mode_is_32bit(vcpu)) {
  217. vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr);
  218. kvm_skip_instr32(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
  219. write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr);
  220. } else {
  221. *vcpu_pc(vcpu) += 4;
  222. }
  223. write_sysreg_el2(*vcpu_pc(vcpu), elr);
  224. }
  225. int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
  226. {
  227. struct kvm_cpu_context *host_ctxt;
  228. struct kvm_cpu_context *guest_ctxt;
  229. bool fp_enabled;
  230. u64 exit_code;
  231. vcpu = kern_hyp_va(vcpu);
  232. write_sysreg(vcpu, tpidr_el2);
  233. host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
  234. guest_ctxt = &vcpu->arch.ctxt;
  235. __sysreg_save_host_state(host_ctxt);
  236. __debug_cond_save_host_state(vcpu);
  237. __activate_traps(vcpu);
  238. __activate_vm(vcpu);
  239. __vgic_restore_state(vcpu);
  240. __timer_restore_state(vcpu);
  241. /*
  242. * We must restore the 32-bit state before the sysregs, thanks
  243. * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
  244. */
  245. __sysreg32_restore_state(vcpu);
  246. __sysreg_restore_guest_state(guest_ctxt);
  247. __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
  248. /* Jump in the fire! */
  249. again:
  250. exit_code = __guest_enter(vcpu, host_ctxt);
  251. /* And we're baaack! */
  252. /*
  253. * We're using the raw exception code in order to only process
  254. * the trap if no SError is pending. We will come back to the
  255. * same PC once the SError has been injected, and replay the
  256. * trapping instruction.
  257. */
  258. if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
  259. goto again;
  260. if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
  261. exit_code == ARM_EXCEPTION_TRAP) {
  262. bool valid;
  263. valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
  264. kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
  265. kvm_vcpu_dabt_isvalid(vcpu) &&
  266. !kvm_vcpu_dabt_isextabt(vcpu) &&
  267. !kvm_vcpu_dabt_iss1tw(vcpu);
  268. if (valid) {
  269. int ret = __vgic_v2_perform_cpuif_access(vcpu);
  270. if (ret == 1) {
  271. __skip_instr(vcpu);
  272. goto again;
  273. }
  274. if (ret == -1) {
  275. /* Promote an illegal access to an SError */
  276. __skip_instr(vcpu);
  277. exit_code = ARM_EXCEPTION_EL1_SERROR;
  278. }
  279. /* 0 falls through to be handler out of EL2 */
  280. }
  281. }
  282. fp_enabled = __fpsimd_enabled();
  283. __sysreg_save_guest_state(guest_ctxt);
  284. __sysreg32_save_state(vcpu);
  285. __timer_save_state(vcpu);
  286. __vgic_save_state(vcpu);
  287. __deactivate_traps(vcpu);
  288. __deactivate_vm(vcpu);
  289. __sysreg_restore_host_state(host_ctxt);
  290. if (fp_enabled) {
  291. __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs);
  292. __fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs);
  293. }
  294. __debug_save_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
  295. __debug_cond_restore_host_state(vcpu);
  296. return exit_code;
  297. }
  298. static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
  299. static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
  300. {
  301. unsigned long str_va;
  302. /*
  303. * Force the panic string to be loaded from the literal pool,
  304. * making sure it is a kernel address and not a PC-relative
  305. * reference.
  306. */
  307. asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va));
  308. __hyp_do_panic(str_va,
  309. spsr, elr,
  310. read_sysreg(esr_el2), read_sysreg_el2(far),
  311. read_sysreg(hpfar_el2), par,
  312. (void *)read_sysreg(tpidr_el2));
  313. }
  314. static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par)
  315. {
  316. panic(__hyp_panic_string,
  317. spsr, elr,
  318. read_sysreg_el2(esr), read_sysreg_el2(far),
  319. read_sysreg(hpfar_el2), par,
  320. (void *)read_sysreg(tpidr_el2));
  321. }
  322. static hyp_alternate_select(__hyp_call_panic,
  323. __hyp_call_panic_nvhe, __hyp_call_panic_vhe,
  324. ARM64_HAS_VIRT_HOST_EXTN);
  325. void __hyp_text __noreturn __hyp_panic(void)
  326. {
  327. u64 spsr = read_sysreg_el2(spsr);
  328. u64 elr = read_sysreg_el2(elr);
  329. u64 par = read_sysreg(par_el1);
  330. if (read_sysreg(vttbr_el2)) {
  331. struct kvm_vcpu *vcpu;
  332. struct kvm_cpu_context *host_ctxt;
  333. vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
  334. host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
  335. __deactivate_traps(vcpu);
  336. __deactivate_vm(vcpu);
  337. __sysreg_restore_host_state(host_ctxt);
  338. }
  339. /* Call panic for real */
  340. __hyp_call_panic()(spsr, elr, par);
  341. unreachable();
  342. }