entry.S 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168
  1. /*
  2. * Copyright (C) 2015 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/linkage.h>
  18. #include <asm/asm-offsets.h>
  19. #include <asm/assembler.h>
  20. #include <asm/fpsimdmacros.h>
  21. #include <asm/kvm.h>
  22. #include <asm/kvm_arm.h>
  23. #include <asm/kvm_asm.h>
  24. #include <asm/kvm_mmu.h>
  25. #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
  26. #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
  27. .text
  28. .pushsection .hyp.text, "ax"
  29. .macro save_callee_saved_regs ctxt
  30. stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
  31. stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
  32. stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
  33. stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
  34. stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
  35. stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
  36. .endm
  37. .macro restore_callee_saved_regs ctxt
  38. ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
  39. ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
  40. ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
  41. ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
  42. ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
  43. ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
  44. .endm
  45. /*
  46. * u64 __guest_enter(struct kvm_vcpu *vcpu,
  47. * struct kvm_cpu_context *host_ctxt);
  48. */
  49. ENTRY(__guest_enter)
  50. // x0: vcpu
  51. // x1: host context
  52. // x2-x17: clobbered by macros
  53. // x18: guest context
  54. // Store the host regs
  55. save_callee_saved_regs x1
  56. add x18, x0, #VCPU_CONTEXT
  57. // Restore guest regs x0-x17
  58. ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
  59. ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
  60. ldp x4, x5, [x18, #CPU_XREG_OFFSET(4)]
  61. ldp x6, x7, [x18, #CPU_XREG_OFFSET(6)]
  62. ldp x8, x9, [x18, #CPU_XREG_OFFSET(8)]
  63. ldp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
  64. ldp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
  65. ldp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
  66. ldp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
  67. // Restore guest regs x19-x29, lr
  68. restore_callee_saved_regs x18
  69. // Restore guest reg x18
  70. ldr x18, [x18, #CPU_XREG_OFFSET(18)]
  71. // Do not touch any register after this!
  72. eret
  73. ENDPROC(__guest_enter)
  74. ENTRY(__guest_exit)
  75. // x0: return code
  76. // x1: vcpu
  77. // x2-x29,lr: vcpu regs
  78. // vcpu x0-x1 on the stack
  79. add x1, x1, #VCPU_CONTEXT
  80. ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
  81. // Store the guest regs x2 and x3
  82. stp x2, x3, [x1, #CPU_XREG_OFFSET(2)]
  83. // Retrieve the guest regs x0-x1 from the stack
  84. ldp x2, x3, [sp], #16 // x0, x1
  85. // Store the guest regs x0-x1 and x4-x18
  86. stp x2, x3, [x1, #CPU_XREG_OFFSET(0)]
  87. stp x4, x5, [x1, #CPU_XREG_OFFSET(4)]
  88. stp x6, x7, [x1, #CPU_XREG_OFFSET(6)]
  89. stp x8, x9, [x1, #CPU_XREG_OFFSET(8)]
  90. stp x10, x11, [x1, #CPU_XREG_OFFSET(10)]
  91. stp x12, x13, [x1, #CPU_XREG_OFFSET(12)]
  92. stp x14, x15, [x1, #CPU_XREG_OFFSET(14)]
  93. stp x16, x17, [x1, #CPU_XREG_OFFSET(16)]
  94. str x18, [x1, #CPU_XREG_OFFSET(18)]
  95. // Store the guest regs x19-x29, lr
  96. save_callee_saved_regs x1
  97. get_host_ctxt x2, x3
  98. // Now restore the host regs
  99. restore_callee_saved_regs x2
  100. alternative_if ARM64_HAS_RAS_EXTN
  101. // If we have the RAS extensions we can consume a pending error
  102. // without an unmask-SError and isb.
  103. esb
  104. mrs_s x2, SYS_DISR_EL1
  105. str x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
  106. cbz x2, 1f
  107. msr_s SYS_DISR_EL1, xzr
  108. orr x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
  109. 1: ret
  110. alternative_else
  111. // If we have a pending asynchronous abort, now is the
  112. // time to find out. From your VAXorcist book, page 666:
  113. // "Threaten me not, oh Evil one! For I speak with
  114. // the power of DEC, and I command thee to show thyself!"
  115. mrs x2, elr_el2
  116. mrs x3, esr_el2
  117. mrs x4, spsr_el2
  118. mov x5, x0
  119. dsb sy // Synchronize against in-flight ld/st
  120. nop
  121. msr daifclr, #4 // Unmask aborts
  122. alternative_endif
  123. // This is our single instruction exception window. A pending
  124. // SError is guaranteed to occur at the earliest when we unmask
  125. // it, and at the latest just after the ISB.
  126. .global abort_guest_exit_start
  127. abort_guest_exit_start:
  128. isb
  129. .global abort_guest_exit_end
  130. abort_guest_exit_end:
  131. // If the exception took place, restore the EL1 exception
  132. // context so that we can report some information.
  133. // Merge the exception code with the SError pending bit.
  134. tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f
  135. msr elr_el2, x2
  136. msr esr_el2, x3
  137. msr spsr_el2, x4
  138. orr x0, x0, x5
  139. 1: ret
  140. ENDPROC(__guest_exit)