interrupts.S 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526
  1. /*
  2. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  3. * Author: Christoffer Dall <c.dall@virtualopensystems.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License, version 2, as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  17. */
  18. #include <linux/linkage.h>
  19. #include <linux/const.h>
  20. #include <asm/unified.h>
  21. #include <asm/page.h>
  22. #include <asm/ptrace.h>
  23. #include <asm/asm-offsets.h>
  24. #include <asm/kvm_asm.h>
  25. #include <asm/kvm_arm.h>
  26. #include <asm/vfpmacros.h>
  27. #include "interrupts_head.S"
  28. .text
  29. __kvm_hyp_code_start:
  30. .globl __kvm_hyp_code_start
  31. /********************************************************************
  32. * Flush per-VMID TLBs
  33. *
  34. * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
  35. *
  36. * We rely on the hardware to broadcast the TLB invalidation to all CPUs
  37. * inside the inner-shareable domain (which is the case for all v7
  38. * implementations). If we come across a non-IS SMP implementation, we'll
  39. * have to use an IPI based mechanism. Until then, we stick to the simple
  40. * hardware assisted version.
  41. *
  42. * As v7 does not support flushing per IPA, just nuke the whole TLB
  43. * instead, ignoring the ipa value.
  44. */
  45. ENTRY(__kvm_tlb_flush_vmid_ipa)
  46. push {r2, r3}
  47. dsb ishst
  48. add r0, r0, #KVM_VTTBR
  49. ldrd r2, r3, [r0]
  50. mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR
  51. isb
  52. mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored)
  53. dsb ish
  54. isb
  55. mov r2, #0
  56. mov r3, #0
  57. mcrr p15, 6, r2, r3, c2 @ Back to VMID #0
  58. isb @ Not necessary if followed by eret
  59. pop {r2, r3}
  60. bx lr
  61. ENDPROC(__kvm_tlb_flush_vmid_ipa)
  62. /**
  63. * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs
  64. *
  65. * Reuses __kvm_tlb_flush_vmid_ipa() for ARMv7, without passing address
  66. * parameter
  67. */
  68. ENTRY(__kvm_tlb_flush_vmid)
  69. b __kvm_tlb_flush_vmid_ipa
  70. ENDPROC(__kvm_tlb_flush_vmid)
  71. /********************************************************************
  72. * Flush TLBs and instruction caches of all CPUs inside the inner-shareable
  73. * domain, for all VMIDs
  74. *
  75. * void __kvm_flush_vm_context(void);
  76. */
  77. ENTRY(__kvm_flush_vm_context)
  78. mov r0, #0 @ rn parameter for c15 flushes is SBZ
  79. /* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */
  80. mcr p15, 4, r0, c8, c3, 4
  81. /* Invalidate instruction caches Inner Shareable (ICIALLUIS) */
  82. mcr p15, 0, r0, c7, c1, 0
  83. dsb ish
  84. isb @ Not necessary if followed by eret
  85. bx lr
  86. ENDPROC(__kvm_flush_vm_context)
  87. /********************************************************************
  88. * Hypervisor world-switch code
  89. *
  90. *
  91. * int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
  92. */
  93. ENTRY(__kvm_vcpu_run)
  94. @ Save the vcpu pointer
  95. mcr p15, 4, vcpu, c13, c0, 2 @ HTPIDR
  96. save_host_regs
  97. restore_vgic_state
  98. restore_timer_state
  99. @ Store hardware CP15 state and load guest state
  100. read_cp15_state store_to_vcpu = 0
  101. write_cp15_state read_from_vcpu = 1
  102. @ If the host kernel has not been configured with VFPv3 support,
  103. @ then it is safer if we deny guests from using it as well.
  104. #ifdef CONFIG_VFPv3
  105. @ Set FPEXC_EN so the guest doesn't trap floating point instructions
  106. VFPFMRX r2, FPEXC @ VMRS
  107. push {r2}
  108. orr r2, r2, #FPEXC_EN
  109. VFPFMXR FPEXC, r2 @ VMSR
  110. #endif
  111. @ Configure Hyp-role
  112. configure_hyp_role vmentry
  113. @ Trap coprocessor CRx accesses
  114. set_hstr vmentry
  115. set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
  116. set_hdcr vmentry
  117. @ Write configured ID register into MIDR alias
  118. ldr r1, [vcpu, #VCPU_MIDR]
  119. mcr p15, 4, r1, c0, c0, 0
  120. @ Write guest view of MPIDR into VMPIDR
  121. ldr r1, [vcpu, #CP15_OFFSET(c0_MPIDR)]
  122. mcr p15, 4, r1, c0, c0, 5
  123. @ Set up guest memory translation
  124. ldr r1, [vcpu, #VCPU_KVM]
  125. add r1, r1, #KVM_VTTBR
  126. ldrd r2, r3, [r1]
  127. mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR
  128. @ We're all done, just restore the GPRs and go to the guest
  129. restore_guest_regs
  130. clrex @ Clear exclusive monitor
  131. eret
  132. __kvm_vcpu_return:
  133. /*
  134. * return convention:
  135. * guest r0, r1, r2 saved on the stack
  136. * r0: vcpu pointer
  137. * r1: exception code
  138. */
  139. save_guest_regs
  140. @ Set VMID == 0
  141. mov r2, #0
  142. mov r3, #0
  143. mcrr p15, 6, r2, r3, c2 @ Write VTTBR
  144. @ Don't trap coprocessor accesses for host kernel
  145. set_hstr vmexit
  146. set_hdcr vmexit
  147. set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore
  148. #ifdef CONFIG_VFPv3
  149. @ Switch VFP/NEON hardware state to the host's
  150. add r7, vcpu, #VCPU_VFP_GUEST
  151. store_vfp_state r7
  152. add r7, vcpu, #VCPU_VFP_HOST
  153. ldr r7, [r7]
  154. restore_vfp_state r7
  155. after_vfp_restore:
  156. @ Restore FPEXC_EN which we clobbered on entry
  157. pop {r2}
  158. VFPFMXR FPEXC, r2
  159. #else
  160. after_vfp_restore:
  161. #endif
  162. @ Reset Hyp-role
  163. configure_hyp_role vmexit
  164. @ Let host read hardware MIDR
  165. mrc p15, 0, r2, c0, c0, 0
  166. mcr p15, 4, r2, c0, c0, 0
  167. @ Back to hardware MPIDR
  168. mrc p15, 0, r2, c0, c0, 5
  169. mcr p15, 4, r2, c0, c0, 5
  170. @ Store guest CP15 state and restore host state
  171. read_cp15_state store_to_vcpu = 1
  172. write_cp15_state read_from_vcpu = 0
  173. save_timer_state
  174. save_vgic_state
  175. restore_host_regs
  176. clrex @ Clear exclusive monitor
  177. #ifndef CONFIG_CPU_ENDIAN_BE8
  178. mov r0, r1 @ Return the return code
  179. mov r1, #0 @ Clear upper bits in return value
  180. #else
  181. @ r1 already has return code
  182. mov r0, #0 @ Clear upper bits in return value
  183. #endif /* CONFIG_CPU_ENDIAN_BE8 */
  184. bx lr @ return to IOCTL
  185. /********************************************************************
  186. * Call function in Hyp mode
  187. *
  188. *
  189. * u64 kvm_call_hyp(void *hypfn, ...);
  190. *
  191. * This is not really a variadic function in the classic C-way and care must
  192. * be taken when calling this to ensure parameters are passed in registers
  193. * only, since the stack will change between the caller and the callee.
  194. *
  195. * Call the function with the first argument containing a pointer to the
  196. * function you wish to call in Hyp mode, and subsequent arguments will be
  197. * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the
  198. * function pointer can be passed). The function being called must be mapped
  199. * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
  200. * passed in r0 and r1.
  201. *
  202. * A function pointer with a value of 0xffffffff has a special meaning,
  203. * and is used to implement __hyp_get_vectors in the same way as in
  204. * arch/arm/kernel/hyp_stub.S.
  205. *
  206. * The calling convention follows the standard AAPCS:
  207. * r0 - r3: caller save
  208. * r12: caller save
  209. * rest: callee save
  210. */
  211. ENTRY(kvm_call_hyp)
  212. hvc #0
  213. bx lr
  214. /********************************************************************
  215. * Hypervisor exception vector and handlers
  216. *
  217. *
  218. * The KVM/ARM Hypervisor ABI is defined as follows:
  219. *
  220. * Entry to Hyp mode from the host kernel will happen _only_ when an HVC
  221. * instruction is issued since all traps are disabled when running the host
  222. * kernel as per the Hyp-mode initialization at boot time.
  223. *
  224. * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc
  225. * below) when the HVC instruction is called from SVC mode (i.e. a guest or the
  226. * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC
  227. * instructions are called from within Hyp-mode.
  228. *
  229. * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
  230. * Switching to Hyp mode is done through a simple HVC #0 instruction. The
  231. * exception vector code will check that the HVC comes from VMID==0 and if
  232. * so will push the necessary state (SPSR, lr_usr) on the Hyp stack.
  233. * - r0 contains a pointer to a HYP function
  234. * - r1, r2, and r3 contain arguments to the above function.
  235. * - The HYP function will be called with its arguments in r0, r1 and r2.
  236. * On HYP function return, we return directly to SVC.
  237. *
  238. * Note that the above is used to execute code in Hyp-mode from a host-kernel
  239. * point of view, and is a different concept from performing a world-switch and
  240. * executing guest code SVC mode (with a VMID != 0).
  241. */
  242. /* Handle undef, svc, pabt, or dabt by crashing with a user notice */
  243. .macro bad_exception exception_code, panic_str
  244. push {r0-r2}
  245. mrrc p15, 6, r0, r1, c2 @ Read VTTBR
  246. lsr r1, r1, #16
  247. ands r1, r1, #0xff
  248. beq 99f
  249. load_vcpu @ Load VCPU pointer
  250. .if \exception_code == ARM_EXCEPTION_DATA_ABORT
  251. mrc p15, 4, r2, c5, c2, 0 @ HSR
  252. mrc p15, 4, r1, c6, c0, 0 @ HDFAR
  253. str r2, [vcpu, #VCPU_HSR]
  254. str r1, [vcpu, #VCPU_HxFAR]
  255. .endif
  256. .if \exception_code == ARM_EXCEPTION_PREF_ABORT
  257. mrc p15, 4, r2, c5, c2, 0 @ HSR
  258. mrc p15, 4, r1, c6, c0, 2 @ HIFAR
  259. str r2, [vcpu, #VCPU_HSR]
  260. str r1, [vcpu, #VCPU_HxFAR]
  261. .endif
  262. mov r1, #\exception_code
  263. b __kvm_vcpu_return
  264. @ We were in the host already. Let's craft a panic-ing return to SVC.
  265. 99: mrs r2, cpsr
  266. bic r2, r2, #MODE_MASK
  267. orr r2, r2, #SVC_MODE
  268. THUMB( orr r2, r2, #PSR_T_BIT )
  269. msr spsr_cxsf, r2
  270. mrs r1, ELR_hyp
  271. ldr r2, =panic
  272. msr ELR_hyp, r2
  273. ldr r0, =\panic_str
  274. clrex @ Clear exclusive monitor
  275. eret
  276. .endm
  277. .text
  278. .align 5
  279. __kvm_hyp_vector:
  280. .globl __kvm_hyp_vector
  281. @ Hyp-mode exception vector
  282. W(b) hyp_reset
  283. W(b) hyp_undef
  284. W(b) hyp_svc
  285. W(b) hyp_pabt
  286. W(b) hyp_dabt
  287. W(b) hyp_hvc
  288. W(b) hyp_irq
  289. W(b) hyp_fiq
  290. .align
  291. hyp_reset:
  292. b hyp_reset
  293. .align
  294. hyp_undef:
  295. bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str
  296. .align
  297. hyp_svc:
  298. bad_exception ARM_EXCEPTION_HVC, svc_die_str
  299. .align
  300. hyp_pabt:
  301. bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str
  302. .align
  303. hyp_dabt:
  304. bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str
  305. .align
  306. hyp_hvc:
  307. /*
  308. * Getting here is either becuase of a trap from a guest or from calling
  309. * HVC from the host kernel, which means "switch to Hyp mode".
  310. */
  311. push {r0, r1, r2}
  312. @ Check syndrome register
  313. mrc p15, 4, r1, c5, c2, 0 @ HSR
  314. lsr r0, r1, #HSR_EC_SHIFT
  315. cmp r0, #HSR_EC_HVC
  316. bne guest_trap @ Not HVC instr.
  317. /*
  318. * Let's check if the HVC came from VMID 0 and allow simple
  319. * switch to Hyp mode
  320. */
  321. mrrc p15, 6, r0, r2, c2
  322. lsr r2, r2, #16
  323. and r2, r2, #0xff
  324. cmp r2, #0
  325. bne guest_trap @ Guest called HVC
  326. /*
  327. * Getting here means host called HVC, we shift parameters and branch
  328. * to Hyp function.
  329. */
  330. pop {r0, r1, r2}
  331. /* Check for __hyp_get_vectors */
  332. cmp r0, #-1
  333. mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR
  334. beq 1f
  335. push {lr}
  336. mrs lr, SPSR
  337. push {lr}
  338. mov lr, r0
  339. mov r0, r1
  340. mov r1, r2
  341. mov r2, r3
  342. THUMB( orr lr, #1)
  343. blx lr @ Call the HYP function
  344. pop {lr}
  345. msr SPSR_csxf, lr
  346. pop {lr}
  347. 1: eret
  348. guest_trap:
  349. load_vcpu @ Load VCPU pointer to r0
  350. str r1, [vcpu, #VCPU_HSR]
  351. @ Check if we need the fault information
  352. lsr r1, r1, #HSR_EC_SHIFT
  353. #ifdef CONFIG_VFPv3
  354. cmp r1, #HSR_EC_CP_0_13
  355. beq switch_to_guest_vfp
  356. #endif
  357. cmp r1, #HSR_EC_IABT
  358. mrceq p15, 4, r2, c6, c0, 2 @ HIFAR
  359. beq 2f
  360. cmp r1, #HSR_EC_DABT
  361. bne 1f
  362. mrc p15, 4, r2, c6, c0, 0 @ HDFAR
  363. 2: str r2, [vcpu, #VCPU_HxFAR]
  364. /*
  365. * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
  366. *
  367. * Abort on the stage 2 translation for a memory access from a
  368. * Non-secure PL1 or PL0 mode:
  369. *
  370. * For any Access flag fault or Translation fault, and also for any
  371. * Permission fault on the stage 2 translation of a memory access
  372. * made as part of a translation table walk for a stage 1 translation,
  373. * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
  374. * is UNKNOWN.
  375. */
  376. /* Check for permission fault, and S1PTW */
  377. mrc p15, 4, r1, c5, c2, 0 @ HSR
  378. and r0, r1, #HSR_FSC_TYPE
  379. cmp r0, #FSC_PERM
  380. tsteq r1, #(1 << 7) @ S1PTW
  381. mrcne p15, 4, r2, c6, c0, 4 @ HPFAR
  382. bne 3f
  383. /* Preserve PAR */
  384. mrrc p15, 0, r0, r1, c7 @ PAR
  385. push {r0, r1}
  386. /* Resolve IPA using the xFAR */
  387. mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR
  388. isb
  389. mrrc p15, 0, r0, r1, c7 @ PAR
  390. tst r0, #1
  391. bne 4f @ Failed translation
  392. ubfx r2, r0, #12, #20
  393. lsl r2, r2, #4
  394. orr r2, r2, r1, lsl #24
  395. /* Restore PAR */
  396. pop {r0, r1}
  397. mcrr p15, 0, r0, r1, c7 @ PAR
  398. 3: load_vcpu @ Load VCPU pointer to r0
  399. str r2, [r0, #VCPU_HPFAR]
  400. 1: mov r1, #ARM_EXCEPTION_HVC
  401. b __kvm_vcpu_return
  402. 4: pop {r0, r1} @ Failed translation, return to guest
  403. mcrr p15, 0, r0, r1, c7 @ PAR
  404. clrex
  405. pop {r0, r1, r2}
  406. eret
  407. /*
  408. * If VFPv3 support is not available, then we will not switch the VFP
  409. * registers; however cp10 and cp11 accesses will still trap and fallback
  410. * to the regular coprocessor emulation code, which currently will
  411. * inject an undefined exception to the guest.
  412. */
  413. #ifdef CONFIG_VFPv3
  414. switch_to_guest_vfp:
  415. push {r3-r7}
  416. @ NEON/VFP used. Turn on VFP access.
  417. set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11))
  418. @ Switch VFP/NEON hardware state to the guest's
  419. add r7, r0, #VCPU_VFP_HOST
  420. ldr r7, [r7]
  421. store_vfp_state r7
  422. add r7, r0, #VCPU_VFP_GUEST
  423. restore_vfp_state r7
  424. pop {r3-r7}
  425. pop {r0-r2}
  426. clrex
  427. eret
  428. #endif
  429. .align
  430. hyp_irq:
  431. push {r0, r1, r2}
  432. mov r1, #ARM_EXCEPTION_IRQ
  433. load_vcpu @ Load VCPU pointer to r0
  434. b __kvm_vcpu_return
  435. .align
  436. hyp_fiq:
  437. b hyp_fiq
  438. .ltorg
  439. __kvm_hyp_code_end:
  440. .globl __kvm_hyp_code_end
  441. .section ".rodata"
  442. und_die_str:
  443. .ascii "unexpected undefined exception in Hyp mode at: %#08x\n"
  444. pabt_die_str:
  445. .ascii "unexpected prefetch abort in Hyp mode at: %#08x\n"
  446. dabt_die_str:
  447. .ascii "unexpected data abort in Hyp mode at: %#08x\n"
  448. svc_die_str:
  449. .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x\n"