hyp-entry.S 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307
  1. /*
  2. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  3. * Author: Christoffer Dall <c.dall@virtualopensystems.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License, version 2, as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  17. */
  18. #include <linux/arm-smccc.h>
  19. #include <linux/linkage.h>
  20. #include <asm/kvm_arm.h>
  21. #include <asm/kvm_asm.h>
  22. .arch_extension virt
  23. .text
  24. .pushsection .hyp.text, "ax"
  25. .macro load_vcpu reg
  26. mrc p15, 4, \reg, c13, c0, 2 @ HTPIDR
  27. .endm
  28. /********************************************************************
  29. * Hypervisor exception vector and handlers
  30. *
  31. *
  32. * The KVM/ARM Hypervisor ABI is defined as follows:
  33. *
  34. * Entry to Hyp mode from the host kernel will happen _only_ when an HVC
  35. * instruction is issued since all traps are disabled when running the host
  36. * kernel as per the Hyp-mode initialization at boot time.
  37. *
  38. * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc
  39. * below) when the HVC instruction is called from SVC mode (i.e. a guest or the
  40. * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC
  41. * instructions are called from within Hyp-mode.
  42. *
  43. * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
  44. * Switching to Hyp mode is done through a simple HVC #0 instruction. The
  45. * exception vector code will check that the HVC comes from VMID==0.
  46. * - r0 contains a pointer to a HYP function
  47. * - r1, r2, and r3 contain arguments to the above function.
  48. * - The HYP function will be called with its arguments in r0, r1 and r2.
  49. * On HYP function return, we return directly to SVC.
  50. *
  51. * Note that the above is used to execute code in Hyp-mode from a host-kernel
  52. * point of view, and is a different concept from performing a world-switch and
  53. * executing guest code SVC mode (with a VMID != 0).
  54. */
  55. .align 5
  56. __kvm_hyp_vector:
  57. .global __kvm_hyp_vector
  58. @ Hyp-mode exception vector
  59. W(b) hyp_reset
  60. W(b) hyp_undef
  61. W(b) hyp_svc
  62. W(b) hyp_pabt
  63. W(b) hyp_dabt
  64. W(b) hyp_hvc
  65. W(b) hyp_irq
  66. W(b) hyp_fiq
  67. #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
  68. .align 5
  69. __kvm_hyp_vector_ic_inv:
  70. .global __kvm_hyp_vector_ic_inv
  71. /*
  72. * We encode the exception entry in the bottom 3 bits of
  73. * SP, and we have to guarantee to be 8 bytes aligned.
  74. */
  75. W(add) sp, sp, #1 /* Reset 7 */
  76. W(add) sp, sp, #1 /* Undef 6 */
  77. W(add) sp, sp, #1 /* Syscall 5 */
  78. W(add) sp, sp, #1 /* Prefetch abort 4 */
  79. W(add) sp, sp, #1 /* Data abort 3 */
  80. W(add) sp, sp, #1 /* HVC 2 */
  81. W(add) sp, sp, #1 /* IRQ 1 */
  82. W(nop) /* FIQ 0 */
  83. mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */
  84. isb
  85. b decode_vectors
  86. .align 5
  87. __kvm_hyp_vector_bp_inv:
  88. .global __kvm_hyp_vector_bp_inv
  89. /*
  90. * We encode the exception entry in the bottom 3 bits of
  91. * SP, and we have to guarantee to be 8 bytes aligned.
  92. */
  93. W(add) sp, sp, #1 /* Reset 7 */
  94. W(add) sp, sp, #1 /* Undef 6 */
  95. W(add) sp, sp, #1 /* Syscall 5 */
  96. W(add) sp, sp, #1 /* Prefetch abort 4 */
  97. W(add) sp, sp, #1 /* Data abort 3 */
  98. W(add) sp, sp, #1 /* HVC 2 */
  99. W(add) sp, sp, #1 /* IRQ 1 */
  100. W(nop) /* FIQ 0 */
  101. mcr p15, 0, r0, c7, c5, 6 /* BPIALL */
  102. isb
  103. decode_vectors:
  104. #ifdef CONFIG_THUMB2_KERNEL
  105. /*
  106. * Yet another silly hack: Use VPIDR as a temp register.
  107. * Thumb2 is really a pain, as SP cannot be used with most
  108. * of the bitwise instructions. The vect_br macro ensures
  109. * things gets cleaned-up.
  110. */
  111. mcr p15, 4, r0, c0, c0, 0 /* VPIDR */
  112. mov r0, sp
  113. and r0, r0, #7
  114. sub sp, sp, r0
  115. push {r1, r2}
  116. mov r1, r0
  117. mrc p15, 4, r0, c0, c0, 0 /* VPIDR */
  118. mrc p15, 0, r2, c0, c0, 0 /* MIDR */
  119. mcr p15, 4, r2, c0, c0, 0 /* VPIDR */
  120. #endif
  121. .macro vect_br val, targ
  122. ARM( eor sp, sp, #\val )
  123. ARM( tst sp, #7 )
  124. ARM( eorne sp, sp, #\val )
  125. THUMB( cmp r1, #\val )
  126. THUMB( popeq {r1, r2} )
  127. beq \targ
  128. .endm
  129. vect_br 0, hyp_fiq
  130. vect_br 1, hyp_irq
  131. vect_br 2, hyp_hvc
  132. vect_br 3, hyp_dabt
  133. vect_br 4, hyp_pabt
  134. vect_br 5, hyp_svc
  135. vect_br 6, hyp_undef
  136. vect_br 7, hyp_reset
  137. #endif
  138. .macro invalid_vector label, cause
  139. .align
  140. \label: mov r0, #\cause
  141. b __hyp_panic
  142. .endm
  143. invalid_vector hyp_reset ARM_EXCEPTION_RESET
  144. invalid_vector hyp_undef ARM_EXCEPTION_UNDEFINED
  145. invalid_vector hyp_svc ARM_EXCEPTION_SOFTWARE
  146. invalid_vector hyp_pabt ARM_EXCEPTION_PREF_ABORT
  147. invalid_vector hyp_fiq ARM_EXCEPTION_FIQ
  148. ENTRY(__hyp_do_panic)
  149. mrs lr, cpsr
  150. bic lr, lr, #MODE_MASK
  151. orr lr, lr, #SVC_MODE
  152. THUMB( orr lr, lr, #PSR_T_BIT )
  153. msr spsr_cxsf, lr
  154. ldr lr, =panic
  155. msr ELR_hyp, lr
  156. ldr lr, =kvm_call_hyp
  157. clrex
  158. eret
  159. ENDPROC(__hyp_do_panic)
  160. hyp_hvc:
  161. /*
  162. * Getting here is either because of a trap from a guest,
  163. * or from executing HVC from the host kernel, which means
  164. * "do something in Hyp mode".
  165. */
  166. push {r0, r1, r2}
  167. @ Check syndrome register
  168. mrc p15, 4, r1, c5, c2, 0 @ HSR
  169. lsr r0, r1, #HSR_EC_SHIFT
  170. cmp r0, #HSR_EC_HVC
  171. bne guest_trap @ Not HVC instr.
  172. /*
  173. * Let's check if the HVC came from VMID 0 and allow simple
  174. * switch to Hyp mode
  175. */
  176. mrrc p15, 6, r0, r2, c2
  177. lsr r2, r2, #16
  178. and r2, r2, #0xff
  179. cmp r2, #0
  180. bne guest_hvc_trap @ Guest called HVC
  181. /*
  182. * Getting here means host called HVC, we shift parameters and branch
  183. * to Hyp function.
  184. */
  185. pop {r0, r1, r2}
  186. /*
  187. * Check if we have a kernel function, which is guaranteed to be
  188. * bigger than the maximum hyp stub hypercall
  189. */
  190. cmp r0, #HVC_STUB_HCALL_NR
  191. bhs 1f
  192. /*
  193. * Not a kernel function, treat it as a stub hypercall.
  194. * Compute the physical address for __kvm_handle_stub_hvc
  195. * (as the code lives in the idmaped page) and branch there.
  196. * We hijack ip (r12) as a tmp register.
  197. */
  198. push {r1}
  199. ldr r1, =kimage_voffset
  200. ldr r1, [r1]
  201. ldr ip, =__kvm_handle_stub_hvc
  202. sub ip, ip, r1
  203. pop {r1}
  204. bx ip
  205. 1:
  206. /*
  207. * Pushing r2 here is just a way of keeping the stack aligned to
  208. * 8 bytes on any path that can trigger a HYP exception. Here,
  209. * we may well be about to jump into the guest, and the guest
  210. * exit would otherwise be badly decoded by our fancy
  211. * "decode-exception-without-a-branch" code...
  212. */
  213. push {r2, lr}
  214. mov lr, r0
  215. mov r0, r1
  216. mov r1, r2
  217. mov r2, r3
  218. THUMB( orr lr, #1)
  219. blx lr @ Call the HYP function
  220. pop {r2, lr}
  221. eret
  222. guest_hvc_trap:
  223. movw r2, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
  224. movt r2, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
  225. ldr r0, [sp] @ Guest's r0
  226. teq r0, r2
  227. bne guest_trap
  228. add sp, sp, #12
  229. @ Returns:
  230. @ r0 = 0
  231. @ r1 = HSR value (perfectly predictable)
  232. @ r2 = ARM_SMCCC_ARCH_WORKAROUND_1
  233. mov r0, #0
  234. eret
  235. guest_trap:
  236. load_vcpu r0 @ Load VCPU pointer to r0
  237. #ifdef CONFIG_VFPv3
  238. @ Check for a VFP access
  239. lsr r1, r1, #HSR_EC_SHIFT
  240. cmp r1, #HSR_EC_CP_0_13
  241. beq __vfp_guest_restore
  242. #endif
  243. mov r1, #ARM_EXCEPTION_HVC
  244. b __guest_exit
  245. hyp_irq:
  246. push {r0, r1, r2}
  247. mov r1, #ARM_EXCEPTION_IRQ
  248. load_vcpu r0 @ Load VCPU pointer to r0
  249. b __guest_exit
  250. hyp_dabt:
  251. push {r0, r1}
  252. mrs r0, ELR_hyp
  253. ldr r1, =abort_guest_exit_start
  254. THUMB( add r1, r1, #1)
  255. cmp r0, r1
  256. ldrne r1, =abort_guest_exit_end
  257. THUMB( addne r1, r1, #1)
  258. cmpne r0, r1
  259. pop {r0, r1}
  260. bne __hyp_panic
  261. orr r0, r0, #(1 << ARM_EXIT_WITH_ABORT_BIT)
  262. eret
  263. .ltorg
  264. .popsection