kvm_emulate.h 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. /*
  2. * Copyright (C) 2012,2013 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * Derived from arch/arm/include/kvm_emulate.h
  6. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  7. * Author: Christoffer Dall <c.dall@virtualopensystems.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #ifndef __ARM64_KVM_EMULATE_H__
  22. #define __ARM64_KVM_EMULATE_H__
  23. #include <linux/kvm_host.h>
  24. #include <asm/kvm_asm.h>
  25. #include <asm/kvm_arm.h>
  26. #include <asm/kvm_mmio.h>
  27. #include <asm/ptrace.h>
  28. unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
  29. unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
  30. bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
  31. void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
  32. void kvm_inject_undefined(struct kvm_vcpu *vcpu);
  33. void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
  34. void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
  35. static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
  36. {
  37. return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
  38. }
  39. static inline unsigned long *vcpu_elr_el1(const struct kvm_vcpu *vcpu)
  40. {
  41. return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
  42. }
  43. static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
  44. {
  45. return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
  46. }
  47. static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
  48. {
  49. return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
  50. }
  51. static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
  52. {
  53. if (vcpu_mode_is_32bit(vcpu))
  54. return kvm_condition_valid32(vcpu);
  55. return true;
  56. }
  57. static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
  58. {
  59. if (vcpu_mode_is_32bit(vcpu))
  60. kvm_skip_instr32(vcpu, is_wide_instr);
  61. else
  62. *vcpu_pc(vcpu) += 4;
  63. }
  64. static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
  65. {
  66. *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
  67. }
  68. static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
  69. {
  70. if (vcpu_mode_is_32bit(vcpu))
  71. return vcpu_reg32(vcpu, reg_num);
  72. return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
  73. }
  74. /* Get vcpu SPSR for current mode */
  75. static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
  76. {
  77. if (vcpu_mode_is_32bit(vcpu))
  78. return vcpu_spsr32(vcpu);
  79. return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
  80. }
  81. static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
  82. {
  83. u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
  84. if (vcpu_mode_is_32bit(vcpu))
  85. return mode > COMPAT_PSR_MODE_USR;
  86. return mode != PSR_MODE_EL0t;
  87. }
  88. static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
  89. {
  90. return vcpu->arch.fault.esr_el2;
  91. }
  92. static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
  93. {
  94. return vcpu->arch.fault.far_el2;
  95. }
  96. static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
  97. {
  98. return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
  99. }
  100. static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
  101. {
  102. return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_ISV);
  103. }
  104. static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
  105. {
  106. return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_WNR);
  107. }
  108. static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
  109. {
  110. return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SSE);
  111. }
  112. static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
  113. {
  114. return (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SRT_MASK) >> ESR_EL2_SRT_SHIFT;
  115. }
  116. static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
  117. {
  118. return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EA);
  119. }
  120. static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
  121. {
  122. return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_S1PTW);
  123. }
  124. static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
  125. {
  126. return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SAS) >> ESR_EL2_SAS_SHIFT);
  127. }
  128. /* This one is not specific to Data Abort */
  129. static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
  130. {
  131. return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_IL);
  132. }
  133. static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
  134. {
  135. return kvm_vcpu_get_hsr(vcpu) >> ESR_EL2_EC_SHIFT;
  136. }
  137. static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
  138. {
  139. return kvm_vcpu_trap_get_class(vcpu) == ESR_EL2_EC_IABT;
  140. }
  141. static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
  142. {
  143. return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC;
  144. }
  145. static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
  146. {
  147. return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE;
  148. }
  149. static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
  150. {
  151. return vcpu_sys_reg(vcpu, MPIDR_EL1);
  152. }
  153. static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
  154. {
  155. if (vcpu_mode_is_32bit(vcpu))
  156. *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
  157. else
  158. vcpu_sys_reg(vcpu, SCTLR_EL1) |= (1 << 25);
  159. }
  160. static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
  161. {
  162. if (vcpu_mode_is_32bit(vcpu))
  163. return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
  164. return !!(vcpu_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
  165. }
  166. static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
  167. unsigned long data,
  168. unsigned int len)
  169. {
  170. if (kvm_vcpu_is_be(vcpu)) {
  171. switch (len) {
  172. case 1:
  173. return data & 0xff;
  174. case 2:
  175. return be16_to_cpu(data & 0xffff);
  176. case 4:
  177. return be32_to_cpu(data & 0xffffffff);
  178. default:
  179. return be64_to_cpu(data);
  180. }
  181. } else {
  182. switch (len) {
  183. case 1:
  184. return data & 0xff;
  185. case 2:
  186. return le16_to_cpu(data & 0xffff);
  187. case 4:
  188. return le32_to_cpu(data & 0xffffffff);
  189. default:
  190. return le64_to_cpu(data);
  191. }
  192. }
  193. return data; /* Leave LE untouched */
  194. }
  195. static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
  196. unsigned long data,
  197. unsigned int len)
  198. {
  199. if (kvm_vcpu_is_be(vcpu)) {
  200. switch (len) {
  201. case 1:
  202. return data & 0xff;
  203. case 2:
  204. return cpu_to_be16(data & 0xffff);
  205. case 4:
  206. return cpu_to_be32(data & 0xffffffff);
  207. default:
  208. return cpu_to_be64(data);
  209. }
  210. } else {
  211. switch (len) {
  212. case 1:
  213. return data & 0xff;
  214. case 2:
  215. return cpu_to_le16(data & 0xffff);
  216. case 4:
  217. return cpu_to_le32(data & 0xffffffff);
  218. default:
  219. return cpu_to_le64(data);
  220. }
  221. }
  222. return data; /* Leave LE untouched */
  223. }
  224. #endif /* __ARM64_KVM_EMULATE_H__ */