kvm_hyp.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. /*
  2. * Copyright (C) 2015 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #ifndef __ARM64_KVM_HYP_H__
  18. #define __ARM64_KVM_HYP_H__
  19. #include <linux/compiler.h>
  20. #include <linux/kvm_host.h>
  21. #include <asm/kvm_mmu.h>
  22. #include <asm/sysreg.h>
  23. #define __hyp_text __section(.hyp.text) notrace
  24. #define read_sysreg_elx(r,nvh,vh) \
  25. ({ \
  26. u64 reg; \
  27. asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\
  28. "mrs_s %0, " __stringify(r##vh),\
  29. ARM64_HAS_VIRT_HOST_EXTN) \
  30. : "=r" (reg)); \
  31. reg; \
  32. })
  33. #define write_sysreg_elx(v,r,nvh,vh) \
  34. do { \
  35. u64 __val = (u64)(v); \
  36. asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\
  37. "msr_s " __stringify(r##vh) ", %x0",\
  38. ARM64_HAS_VIRT_HOST_EXTN) \
  39. : : "rZ" (__val)); \
  40. } while (0)
  41. /*
  42. * Unified accessors for registers that have a different encoding
  43. * between VHE and non-VHE. They must be specified without their "ELx"
  44. * encoding.
  45. */
  46. #define read_sysreg_el2(r) \
  47. ({ \
  48. u64 reg; \
  49. asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##_EL2),\
  50. "mrs %0, " __stringify(r##_EL1),\
  51. ARM64_HAS_VIRT_HOST_EXTN) \
  52. : "=r" (reg)); \
  53. reg; \
  54. })
  55. #define write_sysreg_el2(v,r) \
  56. do { \
  57. u64 __val = (u64)(v); \
  58. asm volatile(ALTERNATIVE("msr " __stringify(r##_EL2) ", %x0",\
  59. "msr " __stringify(r##_EL1) ", %x0",\
  60. ARM64_HAS_VIRT_HOST_EXTN) \
  61. : : "rZ" (__val)); \
  62. } while (0)
  63. #define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02)
  64. #define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02)
  65. #define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12)
  66. #define write_sysreg_el1(v,r) write_sysreg_elx(v, r, _EL1, _EL12)
  67. /* The VHE specific system registers and their encoding */
  68. #define sctlr_EL12 sys_reg(3, 5, 1, 0, 0)
  69. #define cpacr_EL12 sys_reg(3, 5, 1, 0, 2)
  70. #define ttbr0_EL12 sys_reg(3, 5, 2, 0, 0)
  71. #define ttbr1_EL12 sys_reg(3, 5, 2, 0, 1)
  72. #define tcr_EL12 sys_reg(3, 5, 2, 0, 2)
  73. #define afsr0_EL12 sys_reg(3, 5, 5, 1, 0)
  74. #define afsr1_EL12 sys_reg(3, 5, 5, 1, 1)
  75. #define esr_EL12 sys_reg(3, 5, 5, 2, 0)
  76. #define far_EL12 sys_reg(3, 5, 6, 0, 0)
  77. #define mair_EL12 sys_reg(3, 5, 10, 2, 0)
  78. #define amair_EL12 sys_reg(3, 5, 10, 3, 0)
  79. #define vbar_EL12 sys_reg(3, 5, 12, 0, 0)
  80. #define contextidr_EL12 sys_reg(3, 5, 13, 0, 1)
  81. #define cntkctl_EL12 sys_reg(3, 5, 14, 1, 0)
  82. #define cntp_tval_EL02 sys_reg(3, 5, 14, 2, 0)
  83. #define cntp_ctl_EL02 sys_reg(3, 5, 14, 2, 1)
  84. #define cntp_cval_EL02 sys_reg(3, 5, 14, 2, 2)
  85. #define cntv_tval_EL02 sys_reg(3, 5, 14, 3, 0)
  86. #define cntv_ctl_EL02 sys_reg(3, 5, 14, 3, 1)
  87. #define cntv_cval_EL02 sys_reg(3, 5, 14, 3, 2)
  88. #define spsr_EL12 sys_reg(3, 5, 4, 0, 0)
  89. #define elr_EL12 sys_reg(3, 5, 4, 0, 1)
  90. /**
  91. * hyp_alternate_select - Generates patchable code sequences that are
  92. * used to switch between two implementations of a function, depending
  93. * on the availability of a feature.
  94. *
  95. * @fname: a symbol name that will be defined as a function returning a
  96. * function pointer whose type will match @orig and @alt
  97. * @orig: A pointer to the default function, as returned by @fname when
  98. * @cond doesn't hold
  99. * @alt: A pointer to the alternate function, as returned by @fname
  100. * when @cond holds
  101. * @cond: a CPU feature (as described in asm/cpufeature.h)
  102. */
  103. #define hyp_alternate_select(fname, orig, alt, cond) \
  104. typeof(orig) * __hyp_text fname(void) \
  105. { \
  106. typeof(alt) *val = orig; \
  107. asm volatile(ALTERNATIVE("nop \n", \
  108. "mov %0, %1 \n", \
  109. cond) \
  110. : "+r" (val) : "r" (alt)); \
  111. return val; \
  112. }
  113. void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
  114. void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
  115. int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
  116. void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
  117. void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
  118. void __timer_save_state(struct kvm_vcpu *vcpu);
  119. void __timer_restore_state(struct kvm_vcpu *vcpu);
  120. void __sysreg_save_host_state(struct kvm_cpu_context *ctxt);
  121. void __sysreg_restore_host_state(struct kvm_cpu_context *ctxt);
  122. void __sysreg_save_guest_state(struct kvm_cpu_context *ctxt);
  123. void __sysreg_restore_guest_state(struct kvm_cpu_context *ctxt);
  124. void __sysreg32_save_state(struct kvm_vcpu *vcpu);
  125. void __sysreg32_restore_state(struct kvm_vcpu *vcpu);
  126. void __debug_save_state(struct kvm_vcpu *vcpu,
  127. struct kvm_guest_debug_arch *dbg,
  128. struct kvm_cpu_context *ctxt);
  129. void __debug_restore_state(struct kvm_vcpu *vcpu,
  130. struct kvm_guest_debug_arch *dbg,
  131. struct kvm_cpu_context *ctxt);
  132. void __debug_cond_save_host_state(struct kvm_vcpu *vcpu);
  133. void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu);
  134. void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
  135. void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
  136. bool __fpsimd_enabled(void);
  137. u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
  138. void __noreturn __hyp_do_panic(unsigned long, ...);
  139. #endif /* __ARM64_KVM_HYP_H__ */