kvm_emulate.h 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326
  1. /*
  2. * Copyright (C) 2012,2013 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * Derived from arch/arm/include/kvm_emulate.h
  6. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  7. * Author: Christoffer Dall <c.dall@virtualopensystems.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #ifndef __ARM64_KVM_EMULATE_H__
  22. #define __ARM64_KVM_EMULATE_H__
  23. #include <linux/kvm_host.h>
  24. #include <asm/esr.h>
  25. #include <asm/kvm_arm.h>
  26. #include <asm/kvm_mmio.h>
  27. #include <asm/ptrace.h>
  28. #include <asm/cputype.h>
  29. #include <asm/virt.h>
  30. unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
  31. unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
  32. bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
  33. void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
  34. void kvm_inject_undefined(struct kvm_vcpu *vcpu);
  35. void kvm_inject_vabt(struct kvm_vcpu *vcpu);
  36. void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
  37. void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
  38. static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
  39. {
  40. vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
  41. if (is_kernel_in_hyp_mode())
  42. vcpu->arch.hcr_el2 |= HCR_E2H;
  43. if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
  44. vcpu->arch.hcr_el2 &= ~HCR_RW;
  45. }
  46. static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
  47. {
  48. return vcpu->arch.hcr_el2;
  49. }
  50. static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
  51. {
  52. vcpu->arch.hcr_el2 = hcr;
  53. }
  54. static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
  55. {
  56. return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
  57. }
  58. static inline unsigned long *vcpu_elr_el1(const struct kvm_vcpu *vcpu)
  59. {
  60. return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
  61. }
  62. static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
  63. {
  64. return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
  65. }
  66. static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
  67. {
  68. return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
  69. }
  70. static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
  71. {
  72. if (vcpu_mode_is_32bit(vcpu))
  73. return kvm_condition_valid32(vcpu);
  74. return true;
  75. }
  76. static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
  77. {
  78. if (vcpu_mode_is_32bit(vcpu))
  79. kvm_skip_instr32(vcpu, is_wide_instr);
  80. else
  81. *vcpu_pc(vcpu) += 4;
  82. }
  83. static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
  84. {
  85. *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
  86. }
  87. /*
  88. * vcpu_get_reg and vcpu_set_reg should always be passed a register number
  89. * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
  90. * AArch32 with banked registers.
  91. */
  92. static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
  93. u8 reg_num)
  94. {
  95. return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
  96. }
  97. static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
  98. unsigned long val)
  99. {
  100. if (reg_num != 31)
  101. vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
  102. }
  103. /* Get vcpu SPSR for current mode */
  104. static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
  105. {
  106. if (vcpu_mode_is_32bit(vcpu))
  107. return vcpu_spsr32(vcpu);
  108. return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
  109. }
  110. static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
  111. {
  112. u32 mode;
  113. if (vcpu_mode_is_32bit(vcpu)) {
  114. mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
  115. return mode > COMPAT_PSR_MODE_USR;
  116. }
  117. mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
  118. return mode != PSR_MODE_EL0t;
  119. }
  120. static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
  121. {
  122. return vcpu->arch.fault.esr_el2;
  123. }
  124. static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
  125. {
  126. u32 esr = kvm_vcpu_get_hsr(vcpu);
  127. if (esr & ESR_ELx_CV)
  128. return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
  129. return -1;
  130. }
  131. static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
  132. {
  133. return vcpu->arch.fault.far_el2;
  134. }
  135. static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
  136. {
  137. return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
  138. }
  139. static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
  140. {
  141. return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
  142. }
  143. static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
  144. {
  145. return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
  146. }
  147. static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
  148. {
  149. return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
  150. }
  151. static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
  152. {
  153. return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
  154. }
  155. static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
  156. {
  157. return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_EA);
  158. }
  159. static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
  160. {
  161. return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
  162. }
  163. static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
  164. {
  165. return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
  166. kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
  167. }
  168. static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
  169. {
  170. return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
  171. }
  172. static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
  173. {
  174. return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
  175. }
  176. /* This one is not specific to Data Abort */
  177. static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
  178. {
  179. return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
  180. }
  181. static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
  182. {
  183. return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
  184. }
  185. static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
  186. {
  187. return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
  188. }
  189. static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
  190. {
  191. return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
  192. }
  193. static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
  194. {
  195. return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
  196. }
  197. static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
  198. {
  199. return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
  200. }
  201. static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
  202. {
  203. if (vcpu_mode_is_32bit(vcpu))
  204. *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
  205. else
  206. vcpu_sys_reg(vcpu, SCTLR_EL1) |= (1 << 25);
  207. }
  208. static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
  209. {
  210. if (vcpu_mode_is_32bit(vcpu))
  211. return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
  212. return !!(vcpu_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
  213. }
  214. static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
  215. unsigned long data,
  216. unsigned int len)
  217. {
  218. if (kvm_vcpu_is_be(vcpu)) {
  219. switch (len) {
  220. case 1:
  221. return data & 0xff;
  222. case 2:
  223. return be16_to_cpu(data & 0xffff);
  224. case 4:
  225. return be32_to_cpu(data & 0xffffffff);
  226. default:
  227. return be64_to_cpu(data);
  228. }
  229. } else {
  230. switch (len) {
  231. case 1:
  232. return data & 0xff;
  233. case 2:
  234. return le16_to_cpu(data & 0xffff);
  235. case 4:
  236. return le32_to_cpu(data & 0xffffffff);
  237. default:
  238. return le64_to_cpu(data);
  239. }
  240. }
  241. return data; /* Leave LE untouched */
  242. }
  243. static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
  244. unsigned long data,
  245. unsigned int len)
  246. {
  247. if (kvm_vcpu_is_be(vcpu)) {
  248. switch (len) {
  249. case 1:
  250. return data & 0xff;
  251. case 2:
  252. return cpu_to_be16(data & 0xffff);
  253. case 4:
  254. return cpu_to_be32(data & 0xffffffff);
  255. default:
  256. return cpu_to_be64(data);
  257. }
  258. } else {
  259. switch (len) {
  260. case 1:
  261. return data & 0xff;
  262. case 2:
  263. return cpu_to_le16(data & 0xffff);
  264. case 4:
  265. return cpu_to_le32(data & 0xffffffff);
  266. default:
  267. return cpu_to_le64(data);
  268. }
  269. }
  270. return data; /* Leave LE untouched */
  271. }
  272. #endif /* __ARM64_KVM_EMULATE_H__ */