kvm_host.h 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  7. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  8. */
  9. #ifndef __MIPS_KVM_HOST_H__
  10. #define __MIPS_KVM_HOST_H__
  11. #include <linux/mutex.h>
  12. #include <linux/hrtimer.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/types.h>
  15. #include <linux/kvm.h>
  16. #include <linux/kvm_types.h>
  17. #include <linux/threads.h>
  18. #include <linux/spinlock.h>
  19. /* MIPS KVM register ids */
  20. #define MIPS_CP0_32(_R, _S) \
  21. (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
  22. #define MIPS_CP0_64(_R, _S) \
  23. (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
  24. #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
  25. #define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
  26. #define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
  27. #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
  28. #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
  29. #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
  30. #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
  31. #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
  32. #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
  33. #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
  34. #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
  35. #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
  36. #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
  37. #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
  38. #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
  39. #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
  40. #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
  41. #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
  42. #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
  43. #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
  44. #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
  45. #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
  46. #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
  47. #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
  48. #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
  49. #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
  50. #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
  51. #define KVM_MAX_VCPUS 1
  52. #define KVM_USER_MEM_SLOTS 8
  53. /* memory slots that does not exposed to userspace */
  54. #define KVM_PRIVATE_MEM_SLOTS 0
  55. #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
  56. #define KVM_HALT_POLL_NS_DEFAULT 500000
  57. /* Special address that contains the comm page, used for reducing # of traps */
  58. #define KVM_GUEST_COMMPAGE_ADDR 0x0
  59. #define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
  60. ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
  61. #define KVM_GUEST_KUSEG 0x00000000UL
  62. #define KVM_GUEST_KSEG0 0x40000000UL
  63. #define KVM_GUEST_KSEG23 0x60000000UL
  64. #define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0xe0000000)
  65. #define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
  66. #define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
  67. #define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
  68. #define KVM_GUEST_CKSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
  69. /*
  70. * Map an address to a certain kernel segment
  71. */
  72. #define KVM_GUEST_KSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
  73. #define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
  74. #define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
  75. #define KVM_INVALID_PAGE 0xdeadbeef
  76. #define KVM_INVALID_INST 0xdeadbeef
  77. #define KVM_INVALID_ADDR 0xdeadbeef
  78. extern atomic_t kvm_mips_instance;
  79. extern kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
  80. extern void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn);
  81. extern bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn);
  82. struct kvm_vm_stat {
  83. u32 remote_tlb_flush;
  84. };
  85. struct kvm_vcpu_stat {
  86. u32 wait_exits;
  87. u32 cache_exits;
  88. u32 signal_exits;
  89. u32 int_exits;
  90. u32 cop_unusable_exits;
  91. u32 tlbmod_exits;
  92. u32 tlbmiss_ld_exits;
  93. u32 tlbmiss_st_exits;
  94. u32 addrerr_st_exits;
  95. u32 addrerr_ld_exits;
  96. u32 syscall_exits;
  97. u32 resvd_inst_exits;
  98. u32 break_inst_exits;
  99. u32 trap_inst_exits;
  100. u32 msa_fpe_exits;
  101. u32 fpe_exits;
  102. u32 msa_disabled_exits;
  103. u32 flush_dcache_exits;
  104. u32 halt_successful_poll;
  105. u32 halt_attempted_poll;
  106. u32 halt_poll_invalid;
  107. u32 halt_wakeup;
  108. };
  109. enum kvm_mips_exit_types {
  110. WAIT_EXITS,
  111. CACHE_EXITS,
  112. SIGNAL_EXITS,
  113. INT_EXITS,
  114. COP_UNUSABLE_EXITS,
  115. TLBMOD_EXITS,
  116. TLBMISS_LD_EXITS,
  117. TLBMISS_ST_EXITS,
  118. ADDRERR_ST_EXITS,
  119. ADDRERR_LD_EXITS,
  120. SYSCALL_EXITS,
  121. RESVD_INST_EXITS,
  122. BREAK_INST_EXITS,
  123. TRAP_INST_EXITS,
  124. MSA_FPE_EXITS,
  125. FPE_EXITS,
  126. MSA_DISABLED_EXITS,
  127. FLUSH_DCACHE_EXITS,
  128. MAX_KVM_MIPS_EXIT_TYPES
  129. };
  130. struct kvm_arch_memory_slot {
  131. };
  132. struct kvm_arch {
  133. /* Guest GVA->HPA page table */
  134. unsigned long *guest_pmap;
  135. unsigned long guest_pmap_npages;
  136. /* Wired host TLB used for the commpage */
  137. int commpage_tlb;
  138. };
  139. #define N_MIPS_COPROC_REGS 32
  140. #define N_MIPS_COPROC_SEL 8
  141. struct mips_coproc {
  142. unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
  143. #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
  144. unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
  145. #endif
  146. };
  147. /*
  148. * Coprocessor 0 register names
  149. */
  150. #define MIPS_CP0_TLB_INDEX 0
  151. #define MIPS_CP0_TLB_RANDOM 1
  152. #define MIPS_CP0_TLB_LOW 2
  153. #define MIPS_CP0_TLB_LO0 2
  154. #define MIPS_CP0_TLB_LO1 3
  155. #define MIPS_CP0_TLB_CONTEXT 4
  156. #define MIPS_CP0_TLB_PG_MASK 5
  157. #define MIPS_CP0_TLB_WIRED 6
  158. #define MIPS_CP0_HWRENA 7
  159. #define MIPS_CP0_BAD_VADDR 8
  160. #define MIPS_CP0_COUNT 9
  161. #define MIPS_CP0_TLB_HI 10
  162. #define MIPS_CP0_COMPARE 11
  163. #define MIPS_CP0_STATUS 12
  164. #define MIPS_CP0_CAUSE 13
  165. #define MIPS_CP0_EXC_PC 14
  166. #define MIPS_CP0_PRID 15
  167. #define MIPS_CP0_CONFIG 16
  168. #define MIPS_CP0_LLADDR 17
  169. #define MIPS_CP0_WATCH_LO 18
  170. #define MIPS_CP0_WATCH_HI 19
  171. #define MIPS_CP0_TLB_XCONTEXT 20
  172. #define MIPS_CP0_ECC 26
  173. #define MIPS_CP0_CACHE_ERR 27
  174. #define MIPS_CP0_TAG_LO 28
  175. #define MIPS_CP0_TAG_HI 29
  176. #define MIPS_CP0_ERROR_PC 30
  177. #define MIPS_CP0_DEBUG 23
  178. #define MIPS_CP0_DEPC 24
  179. #define MIPS_CP0_PERFCNT 25
  180. #define MIPS_CP0_ERRCTL 26
  181. #define MIPS_CP0_DATA_LO 28
  182. #define MIPS_CP0_DATA_HI 29
  183. #define MIPS_CP0_DESAVE 31
  184. #define MIPS_CP0_CONFIG_SEL 0
  185. #define MIPS_CP0_CONFIG1_SEL 1
  186. #define MIPS_CP0_CONFIG2_SEL 2
  187. #define MIPS_CP0_CONFIG3_SEL 3
  188. #define MIPS_CP0_CONFIG4_SEL 4
  189. #define MIPS_CP0_CONFIG5_SEL 5
  190. /* Config0 register bits */
  191. #define CP0C0_M 31
  192. #define CP0C0_K23 28
  193. #define CP0C0_KU 25
  194. #define CP0C0_MDU 20
  195. #define CP0C0_MM 17
  196. #define CP0C0_BM 16
  197. #define CP0C0_BE 15
  198. #define CP0C0_AT 13
  199. #define CP0C0_AR 10
  200. #define CP0C0_MT 7
  201. #define CP0C0_VI 3
  202. #define CP0C0_K0 0
  203. /* Config1 register bits */
  204. #define CP0C1_M 31
  205. #define CP0C1_MMU 25
  206. #define CP0C1_IS 22
  207. #define CP0C1_IL 19
  208. #define CP0C1_IA 16
  209. #define CP0C1_DS 13
  210. #define CP0C1_DL 10
  211. #define CP0C1_DA 7
  212. #define CP0C1_C2 6
  213. #define CP0C1_MD 5
  214. #define CP0C1_PC 4
  215. #define CP0C1_WR 3
  216. #define CP0C1_CA 2
  217. #define CP0C1_EP 1
  218. #define CP0C1_FP 0
  219. /* Config2 Register bits */
  220. #define CP0C2_M 31
  221. #define CP0C2_TU 28
  222. #define CP0C2_TS 24
  223. #define CP0C2_TL 20
  224. #define CP0C2_TA 16
  225. #define CP0C2_SU 12
  226. #define CP0C2_SS 8
  227. #define CP0C2_SL 4
  228. #define CP0C2_SA 0
  229. /* Config3 Register bits */
  230. #define CP0C3_M 31
  231. #define CP0C3_ISA_ON_EXC 16
  232. #define CP0C3_ULRI 13
  233. #define CP0C3_DSPP 10
  234. #define CP0C3_LPA 7
  235. #define CP0C3_VEIC 6
  236. #define CP0C3_VInt 5
  237. #define CP0C3_SP 4
  238. #define CP0C3_MT 2
  239. #define CP0C3_SM 1
  240. #define CP0C3_TL 0
  241. /* MMU types, the first four entries have the same layout as the
  242. CP0C0_MT field. */
  243. enum mips_mmu_types {
  244. MMU_TYPE_NONE,
  245. MMU_TYPE_R4000,
  246. MMU_TYPE_RESERVED,
  247. MMU_TYPE_FMT,
  248. MMU_TYPE_R3000,
  249. MMU_TYPE_R6000,
  250. MMU_TYPE_R8000
  251. };
  252. /* Resume Flags */
  253. #define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
  254. #define RESUME_FLAG_HOST (1<<1) /* Resume host? */
  255. #define RESUME_GUEST 0
  256. #define RESUME_GUEST_DR RESUME_FLAG_DR
  257. #define RESUME_HOST RESUME_FLAG_HOST
  258. enum emulation_result {
  259. EMULATE_DONE, /* no further processing */
  260. EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
  261. EMULATE_FAIL, /* can't emulate this instruction */
  262. EMULATE_WAIT, /* WAIT instruction */
  263. EMULATE_PRIV_FAIL,
  264. };
  265. #define MIPS3_PG_G 0x00000001 /* Global; ignore ASID if in lo0 & lo1 */
  266. #define MIPS3_PG_V 0x00000002 /* Valid */
  267. #define MIPS3_PG_NV 0x00000000
  268. #define MIPS3_PG_D 0x00000004 /* Dirty */
  269. #define mips3_paddr_to_tlbpfn(x) \
  270. (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
  271. #define mips3_tlbpfn_to_paddr(x) \
  272. ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
  273. #define MIPS3_PG_SHIFT 6
  274. #define MIPS3_PG_FRAME 0x3fffffc0
  275. #define VPN2_MASK 0xffffe000
  276. #define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID
  277. #define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && \
  278. ((x).tlb_lo1 & MIPS3_PG_G))
  279. #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
  280. #define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID)
  281. #define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) \
  282. ? ((x).tlb_lo1 & MIPS3_PG_V) \
  283. : ((x).tlb_lo0 & MIPS3_PG_V))
  284. #define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \
  285. ((y) & VPN2_MASK & ~(x).tlb_mask))
  286. #define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \
  287. TLB_ASID(x) == ((y) & KVM_ENTRYHI_ASID))
  288. struct kvm_mips_tlb {
  289. long tlb_mask;
  290. long tlb_hi;
  291. long tlb_lo0;
  292. long tlb_lo1;
  293. };
  294. #define KVM_MIPS_FPU_FPU 0x1
  295. #define KVM_MIPS_FPU_MSA 0x2
  296. #define KVM_MIPS_GUEST_TLB_SIZE 64
  297. struct kvm_vcpu_arch {
  298. void *host_ebase, *guest_ebase;
  299. int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
  300. unsigned long host_stack;
  301. unsigned long host_gp;
  302. /* Host CP0 registers used when handling exits from guest */
  303. unsigned long host_cp0_badvaddr;
  304. unsigned long host_cp0_cause;
  305. unsigned long host_cp0_epc;
  306. unsigned long host_cp0_entryhi;
  307. uint32_t guest_inst;
  308. /* GPRS */
  309. unsigned long gprs[32];
  310. unsigned long hi;
  311. unsigned long lo;
  312. unsigned long pc;
  313. /* FPU State */
  314. struct mips_fpu_struct fpu;
  315. /* Which FPU state is loaded (KVM_MIPS_FPU_*) */
  316. unsigned int fpu_inuse;
  317. /* COP0 State */
  318. struct mips_coproc *cop0;
  319. /* Host KSEG0 address of the EI/DI offset */
  320. void *kseg0_commpage;
  321. u32 io_gpr; /* GPR used as IO source/target */
  322. struct hrtimer comparecount_timer;
  323. /* Count timer control KVM register */
  324. uint32_t count_ctl;
  325. /* Count bias from the raw time */
  326. uint32_t count_bias;
  327. /* Frequency of timer in Hz */
  328. uint32_t count_hz;
  329. /* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
  330. s64 count_dyn_bias;
  331. /* Resume time */
  332. ktime_t count_resume;
  333. /* Period of timer tick in ns */
  334. u64 count_period;
  335. /* Bitmask of exceptions that are pending */
  336. unsigned long pending_exceptions;
  337. /* Bitmask of pending exceptions to be cleared */
  338. unsigned long pending_exceptions_clr;
  339. unsigned long pending_load_cause;
  340. /* Save/Restore the entryhi register when are are preempted/scheduled back in */
  341. unsigned long preempt_entryhi;
  342. /* S/W Based TLB for guest */
  343. struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
  344. /* Cached guest kernel/user ASIDs */
  345. uint32_t guest_user_asid[NR_CPUS];
  346. uint32_t guest_kernel_asid[NR_CPUS];
  347. struct mm_struct guest_kernel_mm, guest_user_mm;
  348. int last_sched_cpu;
  349. /* WAIT executed */
  350. int wait;
  351. u8 fpu_enabled;
  352. u8 msa_enabled;
  353. };
  354. #define kvm_read_c0_guest_index(cop0) (cop0->reg[MIPS_CP0_TLB_INDEX][0])
  355. #define kvm_write_c0_guest_index(cop0, val) (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val)
  356. #define kvm_read_c0_guest_entrylo0(cop0) (cop0->reg[MIPS_CP0_TLB_LO0][0])
  357. #define kvm_read_c0_guest_entrylo1(cop0) (cop0->reg[MIPS_CP0_TLB_LO1][0])
  358. #define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
  359. #define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
  360. #define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
  361. #define kvm_write_c0_guest_userlocal(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2] = (val))
  362. #define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
  363. #define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
  364. #define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0])
  365. #define kvm_write_c0_guest_wired(cop0, val) (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val))
  366. #define kvm_read_c0_guest_hwrena(cop0) (cop0->reg[MIPS_CP0_HWRENA][0])
  367. #define kvm_write_c0_guest_hwrena(cop0, val) (cop0->reg[MIPS_CP0_HWRENA][0] = (val))
  368. #define kvm_read_c0_guest_badvaddr(cop0) (cop0->reg[MIPS_CP0_BAD_VADDR][0])
  369. #define kvm_write_c0_guest_badvaddr(cop0, val) (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val))
  370. #define kvm_read_c0_guest_count(cop0) (cop0->reg[MIPS_CP0_COUNT][0])
  371. #define kvm_write_c0_guest_count(cop0, val) (cop0->reg[MIPS_CP0_COUNT][0] = (val))
  372. #define kvm_read_c0_guest_entryhi(cop0) (cop0->reg[MIPS_CP0_TLB_HI][0])
  373. #define kvm_write_c0_guest_entryhi(cop0, val) (cop0->reg[MIPS_CP0_TLB_HI][0] = (val))
  374. #define kvm_read_c0_guest_compare(cop0) (cop0->reg[MIPS_CP0_COMPARE][0])
  375. #define kvm_write_c0_guest_compare(cop0, val) (cop0->reg[MIPS_CP0_COMPARE][0] = (val))
  376. #define kvm_read_c0_guest_status(cop0) (cop0->reg[MIPS_CP0_STATUS][0])
  377. #define kvm_write_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] = (val))
  378. #define kvm_read_c0_guest_intctl(cop0) (cop0->reg[MIPS_CP0_STATUS][1])
  379. #define kvm_write_c0_guest_intctl(cop0, val) (cop0->reg[MIPS_CP0_STATUS][1] = (val))
  380. #define kvm_read_c0_guest_cause(cop0) (cop0->reg[MIPS_CP0_CAUSE][0])
  381. #define kvm_write_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] = (val))
  382. #define kvm_read_c0_guest_epc(cop0) (cop0->reg[MIPS_CP0_EXC_PC][0])
  383. #define kvm_write_c0_guest_epc(cop0, val) (cop0->reg[MIPS_CP0_EXC_PC][0] = (val))
  384. #define kvm_read_c0_guest_prid(cop0) (cop0->reg[MIPS_CP0_PRID][0])
  385. #define kvm_write_c0_guest_prid(cop0, val) (cop0->reg[MIPS_CP0_PRID][0] = (val))
  386. #define kvm_read_c0_guest_ebase(cop0) (cop0->reg[MIPS_CP0_PRID][1])
  387. #define kvm_write_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] = (val))
  388. #define kvm_read_c0_guest_config(cop0) (cop0->reg[MIPS_CP0_CONFIG][0])
  389. #define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1])
  390. #define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2])
  391. #define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3])
  392. #define kvm_read_c0_guest_config4(cop0) (cop0->reg[MIPS_CP0_CONFIG][4])
  393. #define kvm_read_c0_guest_config5(cop0) (cop0->reg[MIPS_CP0_CONFIG][5])
  394. #define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7])
  395. #define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
  396. #define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
  397. #define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
  398. #define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
  399. #define kvm_write_c0_guest_config4(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][4] = (val))
  400. #define kvm_write_c0_guest_config5(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][5] = (val))
  401. #define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
  402. #define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
  403. #define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
  404. /*
  405. * Some of the guest registers may be modified asynchronously (e.g. from a
  406. * hrtimer callback in hard irq context) and therefore need stronger atomicity
  407. * guarantees than other registers.
  408. */
  409. static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
  410. unsigned long val)
  411. {
  412. unsigned long temp;
  413. do {
  414. __asm__ __volatile__(
  415. " .set mips3 \n"
  416. " " __LL "%0, %1 \n"
  417. " or %0, %2 \n"
  418. " " __SC "%0, %1 \n"
  419. " .set mips0 \n"
  420. : "=&r" (temp), "+m" (*reg)
  421. : "r" (val));
  422. } while (unlikely(!temp));
  423. }
  424. static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
  425. unsigned long val)
  426. {
  427. unsigned long temp;
  428. do {
  429. __asm__ __volatile__(
  430. " .set mips3 \n"
  431. " " __LL "%0, %1 \n"
  432. " and %0, %2 \n"
  433. " " __SC "%0, %1 \n"
  434. " .set mips0 \n"
  435. : "=&r" (temp), "+m" (*reg)
  436. : "r" (~val));
  437. } while (unlikely(!temp));
  438. }
  439. static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
  440. unsigned long change,
  441. unsigned long val)
  442. {
  443. unsigned long temp;
  444. do {
  445. __asm__ __volatile__(
  446. " .set mips3 \n"
  447. " " __LL "%0, %1 \n"
  448. " and %0, %2 \n"
  449. " or %0, %3 \n"
  450. " " __SC "%0, %1 \n"
  451. " .set mips0 \n"
  452. : "=&r" (temp), "+m" (*reg)
  453. : "r" (~change), "r" (val & change));
  454. } while (unlikely(!temp));
  455. }
  456. #define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val))
  457. #define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
  458. /* Cause can be modified asynchronously from hardirq hrtimer callback */
  459. #define kvm_set_c0_guest_cause(cop0, val) \
  460. _kvm_atomic_set_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
  461. #define kvm_clear_c0_guest_cause(cop0, val) \
  462. _kvm_atomic_clear_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
  463. #define kvm_change_c0_guest_cause(cop0, change, val) \
  464. _kvm_atomic_change_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], \
  465. change, val)
  466. #define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val))
  467. #define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
  468. #define kvm_change_c0_guest_ebase(cop0, change, val) \
  469. { \
  470. kvm_clear_c0_guest_ebase(cop0, change); \
  471. kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
  472. }
  473. /* Helpers */
  474. static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
  475. {
  476. return (!__builtin_constant_p(cpu_has_fpu) || cpu_has_fpu) &&
  477. vcpu->fpu_enabled;
  478. }
  479. static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
  480. {
  481. return kvm_mips_guest_can_have_fpu(vcpu) &&
  482. kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
  483. }
  484. static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
  485. {
  486. return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) &&
  487. vcpu->msa_enabled;
  488. }
  489. static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
  490. {
  491. return kvm_mips_guest_can_have_msa(vcpu) &&
  492. kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
  493. }
  494. struct kvm_mips_callbacks {
  495. int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
  496. int (*handle_tlb_mod)(struct kvm_vcpu *vcpu);
  497. int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu);
  498. int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu);
  499. int (*handle_addr_err_st)(struct kvm_vcpu *vcpu);
  500. int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu);
  501. int (*handle_syscall)(struct kvm_vcpu *vcpu);
  502. int (*handle_res_inst)(struct kvm_vcpu *vcpu);
  503. int (*handle_break)(struct kvm_vcpu *vcpu);
  504. int (*handle_trap)(struct kvm_vcpu *vcpu);
  505. int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
  506. int (*handle_fpe)(struct kvm_vcpu *vcpu);
  507. int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
  508. int (*vm_init)(struct kvm *kvm);
  509. int (*vcpu_init)(struct kvm_vcpu *vcpu);
  510. int (*vcpu_setup)(struct kvm_vcpu *vcpu);
  511. gpa_t (*gva_to_gpa)(gva_t gva);
  512. void (*queue_timer_int)(struct kvm_vcpu *vcpu);
  513. void (*dequeue_timer_int)(struct kvm_vcpu *vcpu);
  514. void (*queue_io_int)(struct kvm_vcpu *vcpu,
  515. struct kvm_mips_interrupt *irq);
  516. void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
  517. struct kvm_mips_interrupt *irq);
  518. int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
  519. uint32_t cause);
  520. int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
  521. uint32_t cause);
  522. int (*get_one_reg)(struct kvm_vcpu *vcpu,
  523. const struct kvm_one_reg *reg, s64 *v);
  524. int (*set_one_reg)(struct kvm_vcpu *vcpu,
  525. const struct kvm_one_reg *reg, s64 v);
  526. int (*vcpu_get_regs)(struct kvm_vcpu *vcpu);
  527. int (*vcpu_set_regs)(struct kvm_vcpu *vcpu);
  528. };
  529. extern struct kvm_mips_callbacks *kvm_mips_callbacks;
  530. int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
  531. /* Debug: dump vcpu state */
  532. int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
  533. /* Trampoline ASM routine to start running in "Guest" context */
  534. extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
  535. /* FPU/MSA context management */
  536. void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
  537. void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
  538. void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
  539. void __kvm_save_msa(struct kvm_vcpu_arch *vcpu);
  540. void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu);
  541. void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu);
  542. void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu);
  543. void kvm_own_fpu(struct kvm_vcpu *vcpu);
  544. void kvm_own_msa(struct kvm_vcpu *vcpu);
  545. void kvm_drop_fpu(struct kvm_vcpu *vcpu);
  546. void kvm_lose_fpu(struct kvm_vcpu *vcpu);
  547. /* TLB handling */
  548. uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
  549. uint32_t kvm_get_user_asid(struct kvm_vcpu *vcpu);
  550. uint32_t kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
  551. extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
  552. struct kvm_vcpu *vcpu);
  553. extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
  554. struct kvm_vcpu *vcpu);
  555. extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
  556. struct kvm_mips_tlb *tlb,
  557. unsigned long *hpa0,
  558. unsigned long *hpa1);
  559. extern enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
  560. uint32_t *opc,
  561. struct kvm_run *run,
  562. struct kvm_vcpu *vcpu);
  563. extern enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause,
  564. uint32_t *opc,
  565. struct kvm_run *run,
  566. struct kvm_vcpu *vcpu);
  567. extern void kvm_mips_dump_host_tlbs(void);
  568. extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
  569. extern void kvm_mips_flush_host_tlb(int skip_kseg0);
  570. extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
  571. extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
  572. unsigned long entryhi);
  573. extern int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr);
  574. extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
  575. unsigned long gva);
  576. extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
  577. struct kvm_vcpu *vcpu);
  578. extern void kvm_local_flush_tlb_all(void);
  579. extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu);
  580. extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
  581. extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu);
  582. /* Emulation */
  583. uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu);
  584. enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause);
  585. extern enum emulation_result kvm_mips_emulate_inst(unsigned long cause,
  586. uint32_t *opc,
  587. struct kvm_run *run,
  588. struct kvm_vcpu *vcpu);
  589. extern enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
  590. uint32_t *opc,
  591. struct kvm_run *run,
  592. struct kvm_vcpu *vcpu);
  593. extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
  594. uint32_t *opc,
  595. struct kvm_run *run,
  596. struct kvm_vcpu *vcpu);
  597. extern enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
  598. uint32_t *opc,
  599. struct kvm_run *run,
  600. struct kvm_vcpu *vcpu);
  601. extern enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
  602. uint32_t *opc,
  603. struct kvm_run *run,
  604. struct kvm_vcpu *vcpu);
  605. extern enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
  606. uint32_t *opc,
  607. struct kvm_run *run,
  608. struct kvm_vcpu *vcpu);
  609. extern enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
  610. uint32_t *opc,
  611. struct kvm_run *run,
  612. struct kvm_vcpu *vcpu);
  613. extern enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
  614. uint32_t *opc,
  615. struct kvm_run *run,
  616. struct kvm_vcpu *vcpu);
  617. extern enum emulation_result kvm_mips_handle_ri(unsigned long cause,
  618. uint32_t *opc,
  619. struct kvm_run *run,
  620. struct kvm_vcpu *vcpu);
  621. extern enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
  622. uint32_t *opc,
  623. struct kvm_run *run,
  624. struct kvm_vcpu *vcpu);
  625. extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
  626. uint32_t *opc,
  627. struct kvm_run *run,
  628. struct kvm_vcpu *vcpu);
  629. extern enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
  630. uint32_t *opc,
  631. struct kvm_run *run,
  632. struct kvm_vcpu *vcpu);
  633. extern enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
  634. uint32_t *opc,
  635. struct kvm_run *run,
  636. struct kvm_vcpu *vcpu);
  637. extern enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
  638. uint32_t *opc,
  639. struct kvm_run *run,
  640. struct kvm_vcpu *vcpu);
  641. extern enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
  642. uint32_t *opc,
  643. struct kvm_run *run,
  644. struct kvm_vcpu *vcpu);
  645. extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
  646. struct kvm_run *run);
  647. uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu);
  648. void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count);
  649. void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack);
  650. void kvm_mips_init_count(struct kvm_vcpu *vcpu);
  651. int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
  652. int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
  653. int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
  654. void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
  655. void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
  656. enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
  657. enum emulation_result kvm_mips_check_privilege(unsigned long cause,
  658. uint32_t *opc,
  659. struct kvm_run *run,
  660. struct kvm_vcpu *vcpu);
  661. enum emulation_result kvm_mips_emulate_cache(uint32_t inst,
  662. uint32_t *opc,
  663. uint32_t cause,
  664. struct kvm_run *run,
  665. struct kvm_vcpu *vcpu);
  666. enum emulation_result kvm_mips_emulate_CP0(uint32_t inst,
  667. uint32_t *opc,
  668. uint32_t cause,
  669. struct kvm_run *run,
  670. struct kvm_vcpu *vcpu);
  671. enum emulation_result kvm_mips_emulate_store(uint32_t inst,
  672. uint32_t cause,
  673. struct kvm_run *run,
  674. struct kvm_vcpu *vcpu);
  675. enum emulation_result kvm_mips_emulate_load(uint32_t inst,
  676. uint32_t cause,
  677. struct kvm_run *run,
  678. struct kvm_vcpu *vcpu);
  679. unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu);
  680. unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu);
  681. unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
  682. unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
  683. /* Dynamic binary translation */
  684. extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
  685. struct kvm_vcpu *vcpu);
  686. extern int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
  687. struct kvm_vcpu *vcpu);
  688. extern int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc,
  689. struct kvm_vcpu *vcpu);
  690. extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
  691. struct kvm_vcpu *vcpu);
  692. /* Misc */
  693. extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
  694. extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
  695. static inline void kvm_arch_hardware_disable(void) {}
  696. static inline void kvm_arch_hardware_unsetup(void) {}
  697. static inline void kvm_arch_sync_events(struct kvm *kvm) {}
  698. static inline void kvm_arch_free_memslot(struct kvm *kvm,
  699. struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
  700. static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
  701. static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
  702. static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
  703. struct kvm_memory_slot *slot) {}
  704. static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
  705. static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
  706. static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
  707. static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
  708. static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
  709. #endif /* __MIPS_KVM_HOST_H__ */