sysreg-sr.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. /*
  2. * Copyright (C) 2012-2015 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/compiler.h>
  18. #include <linux/kvm_host.h>
  19. #include <asm/kvm_asm.h>
  20. #include <asm/kvm_emulate.h>
  21. #include <asm/kvm_hyp.h>
  22. /*
  23. * Non-VHE: Both host and guest must save everything.
  24. *
  25. * VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and pstate,
  26. * which are handled as part of the el2 return state) on every switch.
  27. * tpidr_el0 and tpidrro_el0 only need to be switched when going
  28. * to host userspace or a different VCPU. EL1 registers only need to be
  29. * switched when potentially going to run a different VCPU. The latter two
  30. * classes are handled as part of kvm_arch_vcpu_load and kvm_arch_vcpu_put.
  31. */
  32. static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
  33. {
  34. ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
  35. /*
  36. * The host arm64 Linux uses sp_el0 to point to 'current' and it must
  37. * therefore be saved/restored on every entry/exit to/from the guest.
  38. */
  39. ctxt->gp_regs.regs.sp = read_sysreg(sp_el0);
  40. }
  41. static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
  42. {
  43. ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0);
  44. ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
  45. }
  46. static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
  47. {
  48. ctxt->sys_regs[MPIDR_EL1] = read_sysreg(vmpidr_el2);
  49. ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1);
  50. ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(sctlr);
  51. ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1);
  52. ctxt->sys_regs[CPACR_EL1] = read_sysreg_el1(cpacr);
  53. ctxt->sys_regs[TTBR0_EL1] = read_sysreg_el1(ttbr0);
  54. ctxt->sys_regs[TTBR1_EL1] = read_sysreg_el1(ttbr1);
  55. ctxt->sys_regs[TCR_EL1] = read_sysreg_el1(tcr);
  56. ctxt->sys_regs[ESR_EL1] = read_sysreg_el1(esr);
  57. ctxt->sys_regs[AFSR0_EL1] = read_sysreg_el1(afsr0);
  58. ctxt->sys_regs[AFSR1_EL1] = read_sysreg_el1(afsr1);
  59. ctxt->sys_regs[FAR_EL1] = read_sysreg_el1(far);
  60. ctxt->sys_regs[MAIR_EL1] = read_sysreg_el1(mair);
  61. ctxt->sys_regs[VBAR_EL1] = read_sysreg_el1(vbar);
  62. ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg_el1(contextidr);
  63. ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(amair);
  64. ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(cntkctl);
  65. ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1);
  66. ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
  67. ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1);
  68. ctxt->gp_regs.elr_el1 = read_sysreg_el1(elr);
  69. ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr);
  70. }
  71. static void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
  72. {
  73. ctxt->gp_regs.regs.pc = read_sysreg_el2(elr);
  74. ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
  75. if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
  76. ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2);
  77. }
  78. void __hyp_text __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt)
  79. {
  80. __sysreg_save_el1_state(ctxt);
  81. __sysreg_save_common_state(ctxt);
  82. __sysreg_save_user_state(ctxt);
  83. __sysreg_save_el2_return_state(ctxt);
  84. }
  85. void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt)
  86. {
  87. __sysreg_save_common_state(ctxt);
  88. }
  89. void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
  90. {
  91. __sysreg_save_common_state(ctxt);
  92. __sysreg_save_el2_return_state(ctxt);
  93. }
  94. static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
  95. {
  96. write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
  97. /*
  98. * The host arm64 Linux uses sp_el0 to point to 'current' and it must
  99. * therefore be saved/restored on every entry/exit to/from the guest.
  100. */
  101. write_sysreg(ctxt->gp_regs.regs.sp, sp_el0);
  102. }
  103. static void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
  104. {
  105. write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
  106. write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
  107. }
  108. static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
  109. {
  110. write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
  111. write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
  112. write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], sctlr);
  113. write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
  114. write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], cpacr);
  115. write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], ttbr0);
  116. write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], ttbr1);
  117. write_sysreg_el1(ctxt->sys_regs[TCR_EL1], tcr);
  118. write_sysreg_el1(ctxt->sys_regs[ESR_EL1], esr);
  119. write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1], afsr0);
  120. write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1], afsr1);
  121. write_sysreg_el1(ctxt->sys_regs[FAR_EL1], far);
  122. write_sysreg_el1(ctxt->sys_regs[MAIR_EL1], mair);
  123. write_sysreg_el1(ctxt->sys_regs[VBAR_EL1], vbar);
  124. write_sysreg_el1(ctxt->sys_regs[CONTEXTIDR_EL1],contextidr);
  125. write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], amair);
  126. write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], cntkctl);
  127. write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
  128. write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
  129. write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
  130. write_sysreg_el1(ctxt->gp_regs.elr_el1, elr);
  131. write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr);
  132. }
  133. static void __hyp_text
  134. __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
  135. {
  136. u64 pstate = ctxt->gp_regs.regs.pstate;
  137. u64 mode = pstate & PSR_AA32_MODE_MASK;
  138. /*
  139. * Safety check to ensure we're setting the CPU up to enter the guest
  140. * in a less privileged mode.
  141. *
  142. * If we are attempting a return to EL2 or higher in AArch64 state,
  143. * program SPSR_EL2 with M=EL2h and the IL bit set which ensures that
  144. * we'll take an illegal exception state exception immediately after
  145. * the ERET to the guest. Attempts to return to AArch32 Hyp will
  146. * result in an illegal exception return because EL2's execution state
  147. * is determined by SCR_EL3.RW.
  148. */
  149. if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t)
  150. pstate = PSR_MODE_EL2h | PSR_IL_BIT;
  151. write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
  152. write_sysreg_el2(pstate, spsr);
  153. if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
  154. write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
  155. }
  156. void __hyp_text __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt)
  157. {
  158. __sysreg_restore_el1_state(ctxt);
  159. __sysreg_restore_common_state(ctxt);
  160. __sysreg_restore_user_state(ctxt);
  161. __sysreg_restore_el2_return_state(ctxt);
  162. }
  163. void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt)
  164. {
  165. __sysreg_restore_common_state(ctxt);
  166. }
  167. void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
  168. {
  169. __sysreg_restore_common_state(ctxt);
  170. __sysreg_restore_el2_return_state(ctxt);
  171. }
  172. void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
  173. {
  174. u64 *spsr, *sysreg;
  175. if (!vcpu_el1_is_32bit(vcpu))
  176. return;
  177. spsr = vcpu->arch.ctxt.gp_regs.spsr;
  178. sysreg = vcpu->arch.ctxt.sys_regs;
  179. spsr[KVM_SPSR_ABT] = read_sysreg(spsr_abt);
  180. spsr[KVM_SPSR_UND] = read_sysreg(spsr_und);
  181. spsr[KVM_SPSR_IRQ] = read_sysreg(spsr_irq);
  182. spsr[KVM_SPSR_FIQ] = read_sysreg(spsr_fiq);
  183. sysreg[DACR32_EL2] = read_sysreg(dacr32_el2);
  184. sysreg[IFSR32_EL2] = read_sysreg(ifsr32_el2);
  185. if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
  186. sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2);
  187. }
  188. void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
  189. {
  190. u64 *spsr, *sysreg;
  191. if (!vcpu_el1_is_32bit(vcpu))
  192. return;
  193. spsr = vcpu->arch.ctxt.gp_regs.spsr;
  194. sysreg = vcpu->arch.ctxt.sys_regs;
  195. write_sysreg(spsr[KVM_SPSR_ABT], spsr_abt);
  196. write_sysreg(spsr[KVM_SPSR_UND], spsr_und);
  197. write_sysreg(spsr[KVM_SPSR_IRQ], spsr_irq);
  198. write_sysreg(spsr[KVM_SPSR_FIQ], spsr_fiq);
  199. write_sysreg(sysreg[DACR32_EL2], dacr32_el2);
  200. write_sysreg(sysreg[IFSR32_EL2], ifsr32_el2);
  201. if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
  202. write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
  203. }
  204. /**
  205. * kvm_vcpu_load_sysregs - Load guest system registers to the physical CPU
  206. *
  207. * @vcpu: The VCPU pointer
  208. *
  209. * Load system registers that do not affect the host's execution, for
  210. * example EL1 system registers on a VHE system where the host kernel
  211. * runs at EL2. This function is called from KVM's vcpu_load() function
  212. * and loading system register state early avoids having to load them on
  213. * every entry to the VM.
  214. */
  215. void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
  216. {
  217. struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
  218. struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
  219. if (!has_vhe())
  220. return;
  221. __sysreg_save_user_state(host_ctxt);
  222. /*
  223. * Load guest EL1 and user state
  224. *
  225. * We must restore the 32-bit state before the sysregs, thanks
  226. * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
  227. */
  228. __sysreg32_restore_state(vcpu);
  229. __sysreg_restore_user_state(guest_ctxt);
  230. __sysreg_restore_el1_state(guest_ctxt);
  231. vcpu->arch.sysregs_loaded_on_cpu = true;
  232. activate_traps_vhe_load(vcpu);
  233. }
  234. /**
  235. * kvm_vcpu_put_sysregs - Restore host system registers to the physical CPU
  236. *
  237. * @vcpu: The VCPU pointer
  238. *
  239. * Save guest system registers that do not affect the host's execution, for
  240. * example EL1 system registers on a VHE system where the host kernel
  241. * runs at EL2. This function is called from KVM's vcpu_put() function
  242. * and deferring saving system register state until we're no longer running the
  243. * VCPU avoids having to save them on every exit from the VM.
  244. */
  245. void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
  246. {
  247. struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
  248. struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
  249. if (!has_vhe())
  250. return;
  251. deactivate_traps_vhe_put();
  252. __sysreg_save_el1_state(guest_ctxt);
  253. __sysreg_save_user_state(guest_ctxt);
  254. __sysreg32_save_state(vcpu);
  255. /* Restore host user state */
  256. __sysreg_restore_user_state(host_ctxt);
  257. vcpu->arch.sysregs_loaded_on_cpu = false;
  258. }
  259. void __hyp_text __kvm_enable_ssbs(void)
  260. {
  261. u64 tmp;
  262. asm volatile(
  263. "mrs %0, sctlr_el2\n"
  264. "orr %0, %0, %1\n"
  265. "msr sctlr_el2, %0"
  266. : "=&r" (tmp) : "L" (SCTLR_ELx_DSSBS));
  267. }