sysreg-sr.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. /*
  2. * Copyright (C) 2012-2015 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/compiler.h>
  18. #include <linux/kvm_host.h>
  19. #include <asm/kprobes.h>
  20. #include <asm/kvm_asm.h>
  21. #include <asm/kvm_emulate.h>
  22. #include <asm/kvm_hyp.h>
  23. /*
  24. * Non-VHE: Both host and guest must save everything.
  25. *
  26. * VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and pstate,
  27. * which are handled as part of the el2 return state) on every switch.
  28. * tpidr_el0 and tpidrro_el0 only need to be switched when going
  29. * to host userspace or a different VCPU. EL1 registers only need to be
  30. * switched when potentially going to run a different VCPU. The latter two
  31. * classes are handled as part of kvm_arch_vcpu_load and kvm_arch_vcpu_put.
  32. */
  33. static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
  34. {
  35. ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
  36. /*
  37. * The host arm64 Linux uses sp_el0 to point to 'current' and it must
  38. * therefore be saved/restored on every entry/exit to/from the guest.
  39. */
  40. ctxt->gp_regs.regs.sp = read_sysreg(sp_el0);
  41. }
  42. static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
  43. {
  44. ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0);
  45. ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
  46. }
  47. static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
  48. {
  49. ctxt->sys_regs[MPIDR_EL1] = read_sysreg(vmpidr_el2);
  50. ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1);
  51. ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(sctlr);
  52. ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1);
  53. ctxt->sys_regs[CPACR_EL1] = read_sysreg_el1(cpacr);
  54. ctxt->sys_regs[TTBR0_EL1] = read_sysreg_el1(ttbr0);
  55. ctxt->sys_regs[TTBR1_EL1] = read_sysreg_el1(ttbr1);
  56. ctxt->sys_regs[TCR_EL1] = read_sysreg_el1(tcr);
  57. ctxt->sys_regs[ESR_EL1] = read_sysreg_el1(esr);
  58. ctxt->sys_regs[AFSR0_EL1] = read_sysreg_el1(afsr0);
  59. ctxt->sys_regs[AFSR1_EL1] = read_sysreg_el1(afsr1);
  60. ctxt->sys_regs[FAR_EL1] = read_sysreg_el1(far);
  61. ctxt->sys_regs[MAIR_EL1] = read_sysreg_el1(mair);
  62. ctxt->sys_regs[VBAR_EL1] = read_sysreg_el1(vbar);
  63. ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg_el1(contextidr);
  64. ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(amair);
  65. ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(cntkctl);
  66. ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1);
  67. ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
  68. ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1);
  69. ctxt->gp_regs.elr_el1 = read_sysreg_el1(elr);
  70. ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr);
  71. }
  72. static void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
  73. {
  74. ctxt->gp_regs.regs.pc = read_sysreg_el2(elr);
  75. ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
  76. if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
  77. ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2);
  78. }
  79. void __hyp_text __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt)
  80. {
  81. __sysreg_save_el1_state(ctxt);
  82. __sysreg_save_common_state(ctxt);
  83. __sysreg_save_user_state(ctxt);
  84. __sysreg_save_el2_return_state(ctxt);
  85. }
  86. void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt)
  87. {
  88. __sysreg_save_common_state(ctxt);
  89. }
  90. NOKPROBE_SYMBOL(sysreg_save_host_state_vhe);
  91. void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
  92. {
  93. __sysreg_save_common_state(ctxt);
  94. __sysreg_save_el2_return_state(ctxt);
  95. }
  96. NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
  97. static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
  98. {
  99. write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
  100. /*
  101. * The host arm64 Linux uses sp_el0 to point to 'current' and it must
  102. * therefore be saved/restored on every entry/exit to/from the guest.
  103. */
  104. write_sysreg(ctxt->gp_regs.regs.sp, sp_el0);
  105. }
  106. static void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
  107. {
  108. write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
  109. write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
  110. }
  111. static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
  112. {
  113. write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
  114. write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
  115. write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], sctlr);
  116. write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
  117. write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], cpacr);
  118. write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], ttbr0);
  119. write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], ttbr1);
  120. write_sysreg_el1(ctxt->sys_regs[TCR_EL1], tcr);
  121. write_sysreg_el1(ctxt->sys_regs[ESR_EL1], esr);
  122. write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1], afsr0);
  123. write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1], afsr1);
  124. write_sysreg_el1(ctxt->sys_regs[FAR_EL1], far);
  125. write_sysreg_el1(ctxt->sys_regs[MAIR_EL1], mair);
  126. write_sysreg_el1(ctxt->sys_regs[VBAR_EL1], vbar);
  127. write_sysreg_el1(ctxt->sys_regs[CONTEXTIDR_EL1],contextidr);
  128. write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], amair);
  129. write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], cntkctl);
  130. write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
  131. write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
  132. write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
  133. write_sysreg_el1(ctxt->gp_regs.elr_el1, elr);
  134. write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr);
  135. }
  136. static void __hyp_text
  137. __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
  138. {
  139. write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
  140. write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
  141. if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
  142. write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
  143. }
  144. void __hyp_text __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt)
  145. {
  146. __sysreg_restore_el1_state(ctxt);
  147. __sysreg_restore_common_state(ctxt);
  148. __sysreg_restore_user_state(ctxt);
  149. __sysreg_restore_el2_return_state(ctxt);
  150. }
  151. void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt)
  152. {
  153. __sysreg_restore_common_state(ctxt);
  154. }
  155. NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe);
  156. void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
  157. {
  158. __sysreg_restore_common_state(ctxt);
  159. __sysreg_restore_el2_return_state(ctxt);
  160. }
  161. NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
  162. void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
  163. {
  164. u64 *spsr, *sysreg;
  165. if (!vcpu_el1_is_32bit(vcpu))
  166. return;
  167. spsr = vcpu->arch.ctxt.gp_regs.spsr;
  168. sysreg = vcpu->arch.ctxt.sys_regs;
  169. spsr[KVM_SPSR_ABT] = read_sysreg(spsr_abt);
  170. spsr[KVM_SPSR_UND] = read_sysreg(spsr_und);
  171. spsr[KVM_SPSR_IRQ] = read_sysreg(spsr_irq);
  172. spsr[KVM_SPSR_FIQ] = read_sysreg(spsr_fiq);
  173. sysreg[DACR32_EL2] = read_sysreg(dacr32_el2);
  174. sysreg[IFSR32_EL2] = read_sysreg(ifsr32_el2);
  175. if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
  176. sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2);
  177. }
  178. void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
  179. {
  180. u64 *spsr, *sysreg;
  181. if (!vcpu_el1_is_32bit(vcpu))
  182. return;
  183. spsr = vcpu->arch.ctxt.gp_regs.spsr;
  184. sysreg = vcpu->arch.ctxt.sys_regs;
  185. write_sysreg(spsr[KVM_SPSR_ABT], spsr_abt);
  186. write_sysreg(spsr[KVM_SPSR_UND], spsr_und);
  187. write_sysreg(spsr[KVM_SPSR_IRQ], spsr_irq);
  188. write_sysreg(spsr[KVM_SPSR_FIQ], spsr_fiq);
  189. write_sysreg(sysreg[DACR32_EL2], dacr32_el2);
  190. write_sysreg(sysreg[IFSR32_EL2], ifsr32_el2);
  191. if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
  192. write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
  193. }
  194. /**
  195. * kvm_vcpu_load_sysregs - Load guest system registers to the physical CPU
  196. *
  197. * @vcpu: The VCPU pointer
  198. *
  199. * Load system registers that do not affect the host's execution, for
  200. * example EL1 system registers on a VHE system where the host kernel
  201. * runs at EL2. This function is called from KVM's vcpu_load() function
  202. * and loading system register state early avoids having to load them on
  203. * every entry to the VM.
  204. */
  205. void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
  206. {
  207. struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
  208. struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
  209. if (!has_vhe())
  210. return;
  211. __sysreg_save_user_state(host_ctxt);
  212. /*
  213. * Load guest EL1 and user state
  214. *
  215. * We must restore the 32-bit state before the sysregs, thanks
  216. * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
  217. */
  218. __sysreg32_restore_state(vcpu);
  219. __sysreg_restore_user_state(guest_ctxt);
  220. __sysreg_restore_el1_state(guest_ctxt);
  221. vcpu->arch.sysregs_loaded_on_cpu = true;
  222. activate_traps_vhe_load(vcpu);
  223. }
  224. /**
  225. * kvm_vcpu_put_sysregs - Restore host system registers to the physical CPU
  226. *
  227. * @vcpu: The VCPU pointer
  228. *
  229. * Save guest system registers that do not affect the host's execution, for
  230. * example EL1 system registers on a VHE system where the host kernel
  231. * runs at EL2. This function is called from KVM's vcpu_put() function
  232. * and deferring saving system register state until we're no longer running the
  233. * VCPU avoids having to save them on every exit from the VM.
  234. */
  235. void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
  236. {
  237. struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
  238. struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
  239. if (!has_vhe())
  240. return;
  241. deactivate_traps_vhe_put();
  242. __sysreg_save_el1_state(guest_ctxt);
  243. __sysreg_save_user_state(guest_ctxt);
  244. __sysreg32_save_state(vcpu);
  245. /* Restore host user state */
  246. __sysreg_restore_user_state(host_ctxt);
  247. vcpu->arch.sysregs_loaded_on_cpu = false;
  248. }