hyp-entry.S 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. /*
  2. * Copyright (C) 2015-2018 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/arm-smccc.h>
  18. #include <linux/linkage.h>
  19. #include <asm/alternative.h>
  20. #include <asm/assembler.h>
  21. #include <asm/cpufeature.h>
  22. #include <asm/kvm_arm.h>
  23. #include <asm/kvm_asm.h>
  24. #include <asm/kvm_mmu.h>
  25. #include <asm/mmu.h>
  26. .text
  27. .pushsection .hyp.text, "ax"
  28. .macro do_el2_call
  29. /*
  30. * Shuffle the parameters before calling the function
  31. * pointed to in x0. Assumes parameters in x[1,2,3].
  32. */
  33. str lr, [sp, #-16]!
  34. mov lr, x0
  35. mov x0, x1
  36. mov x1, x2
  37. mov x2, x3
  38. blr lr
  39. ldr lr, [sp], #16
  40. .endm
  41. ENTRY(__vhe_hyp_call)
  42. do_el2_call
  43. /*
  44. * We used to rely on having an exception return to get
  45. * an implicit isb. In the E2H case, we don't have it anymore.
  46. * rather than changing all the leaf functions, just do it here
  47. * before returning to the rest of the kernel.
  48. */
  49. isb
  50. ret
  51. ENDPROC(__vhe_hyp_call)
  52. el1_sync: // Guest trapped into EL2
  53. mrs x0, esr_el2
  54. lsr x0, x0, #ESR_ELx_EC_SHIFT
  55. cmp x0, #ESR_ELx_EC_HVC64
  56. ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
  57. b.ne el1_trap
  58. mrs x1, vttbr_el2 // If vttbr is valid, the guest
  59. cbnz x1, el1_hvc_guest // called HVC
  60. /* Here, we're pretty sure the host called HVC. */
  61. ldp x0, x1, [sp], #16
  62. /* Check for a stub HVC call */
  63. cmp x0, #HVC_STUB_HCALL_NR
  64. b.hs 1f
  65. /*
  66. * Compute the idmap address of __kvm_handle_stub_hvc and
  67. * jump there. Since we use kimage_voffset, do not use the
  68. * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
  69. * (by loading it from the constant pool).
  70. *
  71. * Preserve x0-x4, which may contain stub parameters.
  72. */
  73. ldr x5, =__kvm_handle_stub_hvc
  74. ldr_l x6, kimage_voffset
  75. /* x5 = __pa(x5) */
  76. sub x5, x5, x6
  77. br x5
  78. 1:
  79. /*
  80. * Perform the EL2 call
  81. */
  82. kern_hyp_va x0
  83. do_el2_call
  84. eret
  85. el1_hvc_guest:
  86. /*
  87. * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
  88. * The workaround has already been applied on the host,
  89. * so let's quickly get back to the guest. We don't bother
  90. * restoring x1, as it can be clobbered anyway.
  91. */
  92. ldr x1, [sp] // Guest's x0
  93. eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
  94. cbz w1, wa_epilogue
  95. /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
  96. eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
  97. ARM_SMCCC_ARCH_WORKAROUND_2)
  98. cbnz w1, el1_trap
  99. #ifdef CONFIG_ARM64_SSBD
  100. alternative_cb arm64_enable_wa2_handling
  101. b wa2_end
  102. alternative_cb_end
  103. get_vcpu_ptr x2, x0
  104. ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
  105. // Sanitize the argument and update the guest flags
  106. ldr x1, [sp, #8] // Guest's x1
  107. clz w1, w1 // Murphy's device:
  108. lsr w1, w1, #5 // w1 = !!w1 without using
  109. eor w1, w1, #1 // the flags...
  110. bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
  111. str x0, [x2, #VCPU_WORKAROUND_FLAGS]
  112. /* Check that we actually need to perform the call */
  113. hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
  114. cbz x0, wa2_end
  115. mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
  116. smc #0
  117. /* Don't leak data from the SMC call */
  118. mov x3, xzr
  119. wa2_end:
  120. mov x2, xzr
  121. mov x1, xzr
  122. #endif
  123. wa_epilogue:
  124. mov x0, xzr
  125. add sp, sp, #16
  126. eret
  127. el1_trap:
  128. get_vcpu_ptr x1, x0
  129. mov x0, #ARM_EXCEPTION_TRAP
  130. b __guest_exit
  131. el1_irq:
  132. get_vcpu_ptr x1, x0
  133. mov x0, #ARM_EXCEPTION_IRQ
  134. b __guest_exit
  135. el1_error:
  136. get_vcpu_ptr x1, x0
  137. mov x0, #ARM_EXCEPTION_EL1_SERROR
  138. b __guest_exit
  139. el2_sync:
  140. /* Check for illegal exception return, otherwise panic */
  141. mrs x0, spsr_el2
  142. /* if this was something else, then panic! */
  143. tst x0, #PSR_IL_BIT
  144. b.eq __hyp_panic
  145. /* Let's attempt a recovery from the illegal exception return */
  146. get_vcpu_ptr x1, x0
  147. mov x0, #ARM_EXCEPTION_IL
  148. b __guest_exit
  149. el2_error:
  150. ldp x0, x1, [sp], #16
  151. /*
  152. * Only two possibilities:
  153. * 1) Either we come from the exit path, having just unmasked
  154. * PSTATE.A: change the return code to an EL2 fault, and
  155. * carry on, as we're already in a sane state to handle it.
  156. * 2) Or we come from anywhere else, and that's a bug: we panic.
  157. *
  158. * For (1), x0 contains the original return code and x1 doesn't
  159. * contain anything meaningful at that stage. We can reuse them
  160. * as temp registers.
  161. * For (2), who cares?
  162. */
  163. mrs x0, elr_el2
  164. adr x1, abort_guest_exit_start
  165. cmp x0, x1
  166. adr x1, abort_guest_exit_end
  167. ccmp x0, x1, #4, ne
  168. b.ne __hyp_panic
  169. mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
  170. eret
  171. ENTRY(__hyp_do_panic)
  172. mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
  173. PSR_MODE_EL1h)
  174. msr spsr_el2, lr
  175. ldr lr, =panic
  176. msr elr_el2, lr
  177. eret
  178. ENDPROC(__hyp_do_panic)
  179. ENTRY(__hyp_panic)
  180. get_host_ctxt x0, x1
  181. b hyp_panic
  182. ENDPROC(__hyp_panic)
  183. .macro invalid_vector label, target = __hyp_panic
  184. .align 2
  185. \label:
  186. b \target
  187. ENDPROC(\label)
  188. .endm
  189. /* None of these should ever happen */
  190. invalid_vector el2t_sync_invalid
  191. invalid_vector el2t_irq_invalid
  192. invalid_vector el2t_fiq_invalid
  193. invalid_vector el2t_error_invalid
  194. invalid_vector el2h_sync_invalid
  195. invalid_vector el2h_irq_invalid
  196. invalid_vector el2h_fiq_invalid
  197. invalid_vector el1_fiq_invalid
  198. .ltorg
  199. .align 11
  200. .macro valid_vect target
  201. .align 7
  202. stp x0, x1, [sp, #-16]!
  203. b \target
  204. .endm
  205. .macro invalid_vect target
  206. .align 7
  207. b \target
  208. ldp x0, x1, [sp], #16
  209. b \target
  210. .endm
  211. ENTRY(__kvm_hyp_vector)
  212. invalid_vect el2t_sync_invalid // Synchronous EL2t
  213. invalid_vect el2t_irq_invalid // IRQ EL2t
  214. invalid_vect el2t_fiq_invalid // FIQ EL2t
  215. invalid_vect el2t_error_invalid // Error EL2t
  216. valid_vect el2_sync // Synchronous EL2h
  217. invalid_vect el2h_irq_invalid // IRQ EL2h
  218. invalid_vect el2h_fiq_invalid // FIQ EL2h
  219. valid_vect el2_error // Error EL2h
  220. valid_vect el1_sync // Synchronous 64-bit EL1
  221. valid_vect el1_irq // IRQ 64-bit EL1
  222. invalid_vect el1_fiq_invalid // FIQ 64-bit EL1
  223. valid_vect el1_error // Error 64-bit EL1
  224. valid_vect el1_sync // Synchronous 32-bit EL1
  225. valid_vect el1_irq // IRQ 32-bit EL1
  226. invalid_vect el1_fiq_invalid // FIQ 32-bit EL1
  227. valid_vect el1_error // Error 32-bit EL1
  228. ENDPROC(__kvm_hyp_vector)
  229. #ifdef CONFIG_KVM_INDIRECT_VECTORS
  230. .macro hyp_ventry
  231. .align 7
  232. 1: .rept 27
  233. nop
  234. .endr
  235. /*
  236. * The default sequence is to directly branch to the KVM vectors,
  237. * using the computed offset. This applies for VHE as well as
  238. * !ARM64_HARDEN_EL2_VECTORS.
  239. *
  240. * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
  241. * with:
  242. *
  243. * stp x0, x1, [sp, #-16]!
  244. * movz x0, #(addr & 0xffff)
  245. * movk x0, #((addr >> 16) & 0xffff), lsl #16
  246. * movk x0, #((addr >> 32) & 0xffff), lsl #32
  247. * br x0
  248. *
  249. * Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4.
  250. * See kvm_patch_vector_branch for details.
  251. */
  252. alternative_cb kvm_patch_vector_branch
  253. b __kvm_hyp_vector + (1b - 0b)
  254. nop
  255. nop
  256. nop
  257. nop
  258. alternative_cb_end
  259. .endm
  260. .macro generate_vectors
  261. 0:
  262. .rept 16
  263. hyp_ventry
  264. .endr
  265. .org 0b + SZ_2K // Safety measure
  266. .endm
  267. .align 11
  268. ENTRY(__bp_harden_hyp_vecs_start)
  269. .rept BP_HARDEN_EL2_SLOTS
  270. generate_vectors
  271. .endr
  272. ENTRY(__bp_harden_hyp_vecs_end)
  273. .popsection
  274. ENTRY(__smccc_workaround_1_smc_start)
  275. sub sp, sp, #(8 * 4)
  276. stp x2, x3, [sp, #(8 * 0)]
  277. stp x0, x1, [sp, #(8 * 2)]
  278. mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
  279. smc #0
  280. ldp x2, x3, [sp, #(8 * 0)]
  281. ldp x0, x1, [sp, #(8 * 2)]
  282. add sp, sp, #(8 * 4)
  283. ENTRY(__smccc_workaround_1_smc_end)
  284. #endif