kvm_host.h 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  7. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  8. */
  9. #ifndef __MIPS_KVM_HOST_H__
  10. #define __MIPS_KVM_HOST_H__
  11. #include <linux/mutex.h>
  12. #include <linux/hrtimer.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/types.h>
  15. #include <linux/kvm.h>
  16. #include <linux/kvm_types.h>
  17. #include <linux/threads.h>
  18. #include <linux/spinlock.h>
  19. #include <asm/inst.h>
  20. #include <asm/mipsregs.h>
  21. /* MIPS KVM register ids */
  22. #define MIPS_CP0_32(_R, _S) \
  23. (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
  24. #define MIPS_CP0_64(_R, _S) \
  25. (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
  26. #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
  27. #define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
  28. #define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
  29. #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
  30. #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
  31. #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
  32. #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
  33. #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
  34. #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
  35. #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
  36. #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
  37. #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
  38. #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
  39. #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
  40. #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
  41. #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
  42. #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
  43. #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
  44. #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
  45. #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
  46. #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
  47. #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
  48. #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
  49. #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
  50. #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
  51. #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
  52. #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
  53. #define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
  54. #define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3)
  55. #define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4)
  56. #define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5)
  57. #define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6)
  58. #define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7)
  59. #define KVM_MAX_VCPUS 1
  60. #define KVM_USER_MEM_SLOTS 8
  61. /* memory slots that does not exposed to userspace */
  62. #define KVM_PRIVATE_MEM_SLOTS 0
  63. #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
  64. #define KVM_HALT_POLL_NS_DEFAULT 500000
  65. /*
  66. * Special address that contains the comm page, used for reducing # of traps
  67. * This needs to be within 32Kb of 0x0 (so the zero register can be used), but
  68. * preferably not at 0x0 so that most kernel NULL pointer dereferences can be
  69. * caught.
  70. */
  71. #define KVM_GUEST_COMMPAGE_ADDR ((PAGE_SIZE > 0x8000) ? 0 : \
  72. (0x8000 - PAGE_SIZE))
  73. #define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
  74. ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
  75. #define KVM_GUEST_KUSEG 0x00000000UL
  76. #define KVM_GUEST_KSEG0 0x40000000UL
  77. #define KVM_GUEST_KSEG23 0x60000000UL
  78. #define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0xe0000000)
  79. #define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
  80. #define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
  81. #define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
  82. #define KVM_GUEST_CKSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
  83. /*
  84. * Map an address to a certain kernel segment
  85. */
  86. #define KVM_GUEST_KSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
  87. #define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
  88. #define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
  89. #define KVM_INVALID_PAGE 0xdeadbeef
  90. #define KVM_INVALID_ADDR 0xdeadbeef
  91. /*
  92. * EVA has overlapping user & kernel address spaces, so user VAs may be >
  93. * PAGE_OFFSET. For this reason we can't use the default KVM_HVA_ERR_BAD of
  94. * PAGE_OFFSET.
  95. */
  96. #define KVM_HVA_ERR_BAD (-1UL)
  97. #define KVM_HVA_ERR_RO_BAD (-2UL)
  98. static inline bool kvm_is_error_hva(unsigned long addr)
  99. {
  100. return IS_ERR_VALUE(addr);
  101. }
  102. struct kvm_vm_stat {
  103. ulong remote_tlb_flush;
  104. };
  105. struct kvm_vcpu_stat {
  106. u64 wait_exits;
  107. u64 cache_exits;
  108. u64 signal_exits;
  109. u64 int_exits;
  110. u64 cop_unusable_exits;
  111. u64 tlbmod_exits;
  112. u64 tlbmiss_ld_exits;
  113. u64 tlbmiss_st_exits;
  114. u64 addrerr_st_exits;
  115. u64 addrerr_ld_exits;
  116. u64 syscall_exits;
  117. u64 resvd_inst_exits;
  118. u64 break_inst_exits;
  119. u64 trap_inst_exits;
  120. u64 msa_fpe_exits;
  121. u64 fpe_exits;
  122. u64 msa_disabled_exits;
  123. u64 flush_dcache_exits;
  124. u64 halt_successful_poll;
  125. u64 halt_attempted_poll;
  126. u64 halt_poll_invalid;
  127. u64 halt_wakeup;
  128. };
  129. struct kvm_arch_memory_slot {
  130. };
  131. struct kvm_arch {
  132. /* Guest physical mm */
  133. struct mm_struct gpa_mm;
  134. };
  135. #define N_MIPS_COPROC_REGS 32
  136. #define N_MIPS_COPROC_SEL 8
  137. struct mips_coproc {
  138. unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
  139. #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
  140. unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
  141. #endif
  142. };
  143. /*
  144. * Coprocessor 0 register names
  145. */
  146. #define MIPS_CP0_TLB_INDEX 0
  147. #define MIPS_CP0_TLB_RANDOM 1
  148. #define MIPS_CP0_TLB_LOW 2
  149. #define MIPS_CP0_TLB_LO0 2
  150. #define MIPS_CP0_TLB_LO1 3
  151. #define MIPS_CP0_TLB_CONTEXT 4
  152. #define MIPS_CP0_TLB_PG_MASK 5
  153. #define MIPS_CP0_TLB_WIRED 6
  154. #define MIPS_CP0_HWRENA 7
  155. #define MIPS_CP0_BAD_VADDR 8
  156. #define MIPS_CP0_COUNT 9
  157. #define MIPS_CP0_TLB_HI 10
  158. #define MIPS_CP0_COMPARE 11
  159. #define MIPS_CP0_STATUS 12
  160. #define MIPS_CP0_CAUSE 13
  161. #define MIPS_CP0_EXC_PC 14
  162. #define MIPS_CP0_PRID 15
  163. #define MIPS_CP0_CONFIG 16
  164. #define MIPS_CP0_LLADDR 17
  165. #define MIPS_CP0_WATCH_LO 18
  166. #define MIPS_CP0_WATCH_HI 19
  167. #define MIPS_CP0_TLB_XCONTEXT 20
  168. #define MIPS_CP0_ECC 26
  169. #define MIPS_CP0_CACHE_ERR 27
  170. #define MIPS_CP0_TAG_LO 28
  171. #define MIPS_CP0_TAG_HI 29
  172. #define MIPS_CP0_ERROR_PC 30
  173. #define MIPS_CP0_DEBUG 23
  174. #define MIPS_CP0_DEPC 24
  175. #define MIPS_CP0_PERFCNT 25
  176. #define MIPS_CP0_ERRCTL 26
  177. #define MIPS_CP0_DATA_LO 28
  178. #define MIPS_CP0_DATA_HI 29
  179. #define MIPS_CP0_DESAVE 31
  180. #define MIPS_CP0_CONFIG_SEL 0
  181. #define MIPS_CP0_CONFIG1_SEL 1
  182. #define MIPS_CP0_CONFIG2_SEL 2
  183. #define MIPS_CP0_CONFIG3_SEL 3
  184. #define MIPS_CP0_CONFIG4_SEL 4
  185. #define MIPS_CP0_CONFIG5_SEL 5
  186. /* Resume Flags */
  187. #define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
  188. #define RESUME_FLAG_HOST (1<<1) /* Resume host? */
  189. #define RESUME_GUEST 0
  190. #define RESUME_GUEST_DR RESUME_FLAG_DR
  191. #define RESUME_HOST RESUME_FLAG_HOST
  192. enum emulation_result {
  193. EMULATE_DONE, /* no further processing */
  194. EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
  195. EMULATE_FAIL, /* can't emulate this instruction */
  196. EMULATE_WAIT, /* WAIT instruction */
  197. EMULATE_PRIV_FAIL,
  198. EMULATE_EXCEPT, /* A guest exception has been generated */
  199. };
  200. #define mips3_paddr_to_tlbpfn(x) \
  201. (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
  202. #define mips3_tlbpfn_to_paddr(x) \
  203. ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
  204. #define MIPS3_PG_SHIFT 6
  205. #define MIPS3_PG_FRAME 0x3fffffc0
  206. #define VPN2_MASK 0xffffe000
  207. #define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID
  208. #define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
  209. #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
  210. #define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID)
  211. #define TLB_LO_IDX(x, va) (((va) >> PAGE_SHIFT) & 1)
  212. #define TLB_IS_VALID(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V)
  213. #define TLB_IS_DIRTY(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_D)
  214. #define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \
  215. ((y) & VPN2_MASK & ~(x).tlb_mask))
  216. #define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \
  217. TLB_ASID(x) == ((y) & KVM_ENTRYHI_ASID))
  218. struct kvm_mips_tlb {
  219. long tlb_mask;
  220. long tlb_hi;
  221. long tlb_lo[2];
  222. };
  223. #define KVM_NR_MEM_OBJS 4
  224. /*
  225. * We don't want allocation failures within the mmu code, so we preallocate
  226. * enough memory for a single page fault in a cache.
  227. */
  228. struct kvm_mmu_memory_cache {
  229. int nobjs;
  230. void *objects[KVM_NR_MEM_OBJS];
  231. };
  232. #define KVM_MIPS_AUX_FPU 0x1
  233. #define KVM_MIPS_AUX_MSA 0x2
  234. #define KVM_MIPS_GUEST_TLB_SIZE 64
  235. struct kvm_vcpu_arch {
  236. void *guest_ebase;
  237. int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
  238. unsigned long host_stack;
  239. unsigned long host_gp;
  240. /* Host CP0 registers used when handling exits from guest */
  241. unsigned long host_cp0_badvaddr;
  242. unsigned long host_cp0_epc;
  243. u32 host_cp0_cause;
  244. u32 host_cp0_badinstr;
  245. u32 host_cp0_badinstrp;
  246. /* GPRS */
  247. unsigned long gprs[32];
  248. unsigned long hi;
  249. unsigned long lo;
  250. unsigned long pc;
  251. /* FPU State */
  252. struct mips_fpu_struct fpu;
  253. /* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */
  254. unsigned int aux_inuse;
  255. /* COP0 State */
  256. struct mips_coproc *cop0;
  257. /* Host KSEG0 address of the EI/DI offset */
  258. void *kseg0_commpage;
  259. /* Resume PC after MMIO completion */
  260. unsigned long io_pc;
  261. /* GPR used as IO source/target */
  262. u32 io_gpr;
  263. struct hrtimer comparecount_timer;
  264. /* Count timer control KVM register */
  265. u32 count_ctl;
  266. /* Count bias from the raw time */
  267. u32 count_bias;
  268. /* Frequency of timer in Hz */
  269. u32 count_hz;
  270. /* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
  271. s64 count_dyn_bias;
  272. /* Resume time */
  273. ktime_t count_resume;
  274. /* Period of timer tick in ns */
  275. u64 count_period;
  276. /* Bitmask of exceptions that are pending */
  277. unsigned long pending_exceptions;
  278. /* Bitmask of pending exceptions to be cleared */
  279. unsigned long pending_exceptions_clr;
  280. /* S/W Based TLB for guest */
  281. struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
  282. /* Guest kernel/user [partial] mm */
  283. struct mm_struct guest_kernel_mm, guest_user_mm;
  284. /* Guest ASID of last user mode execution */
  285. unsigned int last_user_gasid;
  286. /* Cache some mmu pages needed inside spinlock regions */
  287. struct kvm_mmu_memory_cache mmu_page_cache;
  288. int last_sched_cpu;
  289. /* WAIT executed */
  290. int wait;
  291. u8 fpu_enabled;
  292. u8 msa_enabled;
  293. u8 kscratch_enabled;
  294. };
  295. #define kvm_read_c0_guest_index(cop0) (cop0->reg[MIPS_CP0_TLB_INDEX][0])
  296. #define kvm_write_c0_guest_index(cop0, val) (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val)
  297. #define kvm_read_c0_guest_entrylo0(cop0) (cop0->reg[MIPS_CP0_TLB_LO0][0])
  298. #define kvm_read_c0_guest_entrylo1(cop0) (cop0->reg[MIPS_CP0_TLB_LO1][0])
  299. #define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
  300. #define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
  301. #define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
  302. #define kvm_write_c0_guest_userlocal(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2] = (val))
  303. #define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
  304. #define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
  305. #define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0])
  306. #define kvm_write_c0_guest_wired(cop0, val) (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val))
  307. #define kvm_read_c0_guest_hwrena(cop0) (cop0->reg[MIPS_CP0_HWRENA][0])
  308. #define kvm_write_c0_guest_hwrena(cop0, val) (cop0->reg[MIPS_CP0_HWRENA][0] = (val))
  309. #define kvm_read_c0_guest_badvaddr(cop0) (cop0->reg[MIPS_CP0_BAD_VADDR][0])
  310. #define kvm_write_c0_guest_badvaddr(cop0, val) (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val))
  311. #define kvm_read_c0_guest_count(cop0) (cop0->reg[MIPS_CP0_COUNT][0])
  312. #define kvm_write_c0_guest_count(cop0, val) (cop0->reg[MIPS_CP0_COUNT][0] = (val))
  313. #define kvm_read_c0_guest_entryhi(cop0) (cop0->reg[MIPS_CP0_TLB_HI][0])
  314. #define kvm_write_c0_guest_entryhi(cop0, val) (cop0->reg[MIPS_CP0_TLB_HI][0] = (val))
  315. #define kvm_read_c0_guest_compare(cop0) (cop0->reg[MIPS_CP0_COMPARE][0])
  316. #define kvm_write_c0_guest_compare(cop0, val) (cop0->reg[MIPS_CP0_COMPARE][0] = (val))
  317. #define kvm_read_c0_guest_status(cop0) (cop0->reg[MIPS_CP0_STATUS][0])
  318. #define kvm_write_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] = (val))
  319. #define kvm_read_c0_guest_intctl(cop0) (cop0->reg[MIPS_CP0_STATUS][1])
  320. #define kvm_write_c0_guest_intctl(cop0, val) (cop0->reg[MIPS_CP0_STATUS][1] = (val))
  321. #define kvm_read_c0_guest_cause(cop0) (cop0->reg[MIPS_CP0_CAUSE][0])
  322. #define kvm_write_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] = (val))
  323. #define kvm_read_c0_guest_epc(cop0) (cop0->reg[MIPS_CP0_EXC_PC][0])
  324. #define kvm_write_c0_guest_epc(cop0, val) (cop0->reg[MIPS_CP0_EXC_PC][0] = (val))
  325. #define kvm_read_c0_guest_prid(cop0) (cop0->reg[MIPS_CP0_PRID][0])
  326. #define kvm_write_c0_guest_prid(cop0, val) (cop0->reg[MIPS_CP0_PRID][0] = (val))
  327. #define kvm_read_c0_guest_ebase(cop0) (cop0->reg[MIPS_CP0_PRID][1])
  328. #define kvm_write_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] = (val))
  329. #define kvm_read_c0_guest_config(cop0) (cop0->reg[MIPS_CP0_CONFIG][0])
  330. #define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1])
  331. #define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2])
  332. #define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3])
  333. #define kvm_read_c0_guest_config4(cop0) (cop0->reg[MIPS_CP0_CONFIG][4])
  334. #define kvm_read_c0_guest_config5(cop0) (cop0->reg[MIPS_CP0_CONFIG][5])
  335. #define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7])
  336. #define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
  337. #define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
  338. #define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
  339. #define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
  340. #define kvm_write_c0_guest_config4(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][4] = (val))
  341. #define kvm_write_c0_guest_config5(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][5] = (val))
  342. #define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
  343. #define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
  344. #define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
  345. #define kvm_read_c0_guest_kscratch1(cop0) (cop0->reg[MIPS_CP0_DESAVE][2])
  346. #define kvm_read_c0_guest_kscratch2(cop0) (cop0->reg[MIPS_CP0_DESAVE][3])
  347. #define kvm_read_c0_guest_kscratch3(cop0) (cop0->reg[MIPS_CP0_DESAVE][4])
  348. #define kvm_read_c0_guest_kscratch4(cop0) (cop0->reg[MIPS_CP0_DESAVE][5])
  349. #define kvm_read_c0_guest_kscratch5(cop0) (cop0->reg[MIPS_CP0_DESAVE][6])
  350. #define kvm_read_c0_guest_kscratch6(cop0) (cop0->reg[MIPS_CP0_DESAVE][7])
  351. #define kvm_write_c0_guest_kscratch1(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][2] = (val))
  352. #define kvm_write_c0_guest_kscratch2(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][3] = (val))
  353. #define kvm_write_c0_guest_kscratch3(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][4] = (val))
  354. #define kvm_write_c0_guest_kscratch4(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][5] = (val))
  355. #define kvm_write_c0_guest_kscratch5(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][6] = (val))
  356. #define kvm_write_c0_guest_kscratch6(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][7] = (val))
  357. /*
  358. * Some of the guest registers may be modified asynchronously (e.g. from a
  359. * hrtimer callback in hard irq context) and therefore need stronger atomicity
  360. * guarantees than other registers.
  361. */
  362. static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
  363. unsigned long val)
  364. {
  365. unsigned long temp;
  366. do {
  367. __asm__ __volatile__(
  368. " .set "MIPS_ISA_ARCH_LEVEL" \n"
  369. " " __LL "%0, %1 \n"
  370. " or %0, %2 \n"
  371. " " __SC "%0, %1 \n"
  372. " .set mips0 \n"
  373. : "=&r" (temp), "+m" (*reg)
  374. : "r" (val));
  375. } while (unlikely(!temp));
  376. }
  377. static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
  378. unsigned long val)
  379. {
  380. unsigned long temp;
  381. do {
  382. __asm__ __volatile__(
  383. " .set "MIPS_ISA_ARCH_LEVEL" \n"
  384. " " __LL "%0, %1 \n"
  385. " and %0, %2 \n"
  386. " " __SC "%0, %1 \n"
  387. " .set mips0 \n"
  388. : "=&r" (temp), "+m" (*reg)
  389. : "r" (~val));
  390. } while (unlikely(!temp));
  391. }
  392. static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
  393. unsigned long change,
  394. unsigned long val)
  395. {
  396. unsigned long temp;
  397. do {
  398. __asm__ __volatile__(
  399. " .set "MIPS_ISA_ARCH_LEVEL" \n"
  400. " " __LL "%0, %1 \n"
  401. " and %0, %2 \n"
  402. " or %0, %3 \n"
  403. " " __SC "%0, %1 \n"
  404. " .set mips0 \n"
  405. : "=&r" (temp), "+m" (*reg)
  406. : "r" (~change), "r" (val & change));
  407. } while (unlikely(!temp));
  408. }
  409. #define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val))
  410. #define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
  411. /* Cause can be modified asynchronously from hardirq hrtimer callback */
  412. #define kvm_set_c0_guest_cause(cop0, val) \
  413. _kvm_atomic_set_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
  414. #define kvm_clear_c0_guest_cause(cop0, val) \
  415. _kvm_atomic_clear_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
  416. #define kvm_change_c0_guest_cause(cop0, change, val) \
  417. _kvm_atomic_change_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], \
  418. change, val)
  419. #define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val))
  420. #define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
  421. #define kvm_change_c0_guest_ebase(cop0, change, val) \
  422. { \
  423. kvm_clear_c0_guest_ebase(cop0, change); \
  424. kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
  425. }
  426. /* Helpers */
  427. static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
  428. {
  429. return (!__builtin_constant_p(raw_cpu_has_fpu) || raw_cpu_has_fpu) &&
  430. vcpu->fpu_enabled;
  431. }
  432. static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
  433. {
  434. return kvm_mips_guest_can_have_fpu(vcpu) &&
  435. kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
  436. }
  437. static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
  438. {
  439. return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) &&
  440. vcpu->msa_enabled;
  441. }
  442. static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
  443. {
  444. return kvm_mips_guest_can_have_msa(vcpu) &&
  445. kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
  446. }
  447. struct kvm_mips_callbacks {
  448. int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
  449. int (*handle_tlb_mod)(struct kvm_vcpu *vcpu);
  450. int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu);
  451. int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu);
  452. int (*handle_addr_err_st)(struct kvm_vcpu *vcpu);
  453. int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu);
  454. int (*handle_syscall)(struct kvm_vcpu *vcpu);
  455. int (*handle_res_inst)(struct kvm_vcpu *vcpu);
  456. int (*handle_break)(struct kvm_vcpu *vcpu);
  457. int (*handle_trap)(struct kvm_vcpu *vcpu);
  458. int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
  459. int (*handle_fpe)(struct kvm_vcpu *vcpu);
  460. int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
  461. int (*vcpu_init)(struct kvm_vcpu *vcpu);
  462. void (*vcpu_uninit)(struct kvm_vcpu *vcpu);
  463. int (*vcpu_setup)(struct kvm_vcpu *vcpu);
  464. gpa_t (*gva_to_gpa)(gva_t gva);
  465. void (*queue_timer_int)(struct kvm_vcpu *vcpu);
  466. void (*dequeue_timer_int)(struct kvm_vcpu *vcpu);
  467. void (*queue_io_int)(struct kvm_vcpu *vcpu,
  468. struct kvm_mips_interrupt *irq);
  469. void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
  470. struct kvm_mips_interrupt *irq);
  471. int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
  472. u32 cause);
  473. int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
  474. u32 cause);
  475. unsigned long (*num_regs)(struct kvm_vcpu *vcpu);
  476. int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices);
  477. int (*get_one_reg)(struct kvm_vcpu *vcpu,
  478. const struct kvm_one_reg *reg, s64 *v);
  479. int (*set_one_reg)(struct kvm_vcpu *vcpu,
  480. const struct kvm_one_reg *reg, s64 v);
  481. int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
  482. int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
  483. int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
  484. void (*vcpu_reenter)(struct kvm_run *run, struct kvm_vcpu *vcpu);
  485. };
  486. extern struct kvm_mips_callbacks *kvm_mips_callbacks;
  487. int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
  488. /* Debug: dump vcpu state */
  489. int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
  490. extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu);
  491. /* Building of entry/exception code */
  492. int kvm_mips_entry_setup(void);
  493. void *kvm_mips_build_vcpu_run(void *addr);
  494. void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler);
  495. void *kvm_mips_build_exception(void *addr, void *handler);
  496. void *kvm_mips_build_exit(void *addr);
  497. /* FPU/MSA context management */
  498. void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
  499. void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
  500. void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
  501. void __kvm_save_msa(struct kvm_vcpu_arch *vcpu);
  502. void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu);
  503. void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu);
  504. void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu);
  505. void kvm_own_fpu(struct kvm_vcpu *vcpu);
  506. void kvm_own_msa(struct kvm_vcpu *vcpu);
  507. void kvm_drop_fpu(struct kvm_vcpu *vcpu);
  508. void kvm_lose_fpu(struct kvm_vcpu *vcpu);
  509. /* TLB handling */
  510. u32 kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
  511. u32 kvm_get_user_asid(struct kvm_vcpu *vcpu);
  512. u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
  513. extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
  514. struct kvm_vcpu *vcpu);
  515. extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
  516. struct kvm_vcpu *vcpu);
  517. extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
  518. struct kvm_mips_tlb *tlb,
  519. unsigned long gva);
  520. extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
  521. u32 *opc,
  522. struct kvm_run *run,
  523. struct kvm_vcpu *vcpu);
  524. extern enum emulation_result kvm_mips_handle_tlbmod(u32 cause,
  525. u32 *opc,
  526. struct kvm_run *run,
  527. struct kvm_vcpu *vcpu);
  528. extern void kvm_mips_dump_host_tlbs(void);
  529. extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
  530. extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi,
  531. bool user, bool kernel);
  532. extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
  533. unsigned long entryhi);
  534. void kvm_mips_suspend_mm(int cpu);
  535. void kvm_mips_resume_mm(int cpu);
  536. /* MMU handling */
  537. /**
  538. * enum kvm_mips_flush - Types of MMU flushes.
  539. * @KMF_USER: Flush guest user virtual memory mappings.
  540. * Guest USeg only.
  541. * @KMF_KERN: Flush guest kernel virtual memory mappings.
  542. * Guest USeg and KSeg2/3.
  543. * @KMF_GPA: Flush guest physical memory mappings.
  544. * Also includes KSeg0 if KMF_KERN is set.
  545. */
  546. enum kvm_mips_flush {
  547. KMF_USER = 0x0,
  548. KMF_KERN = 0x1,
  549. KMF_GPA = 0x2,
  550. };
  551. void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags);
  552. bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
  553. pgd_t *kvm_pgd_alloc(void);
  554. void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
  555. void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
  556. bool user);
  557. void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu);
  558. void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu);
  559. enum kvm_mips_fault_result {
  560. KVM_MIPS_MAPPED = 0,
  561. KVM_MIPS_GVA,
  562. KVM_MIPS_GPA,
  563. KVM_MIPS_TLB,
  564. KVM_MIPS_TLBINV,
  565. KVM_MIPS_TLBMOD,
  566. };
  567. enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
  568. unsigned long gva,
  569. bool write);
  570. /* Emulation */
  571. int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
  572. enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
  573. int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
  574. int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
  575. /**
  576. * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
  577. * @vcpu: Virtual CPU.
  578. *
  579. * Returns: Whether the TLBL exception was likely due to an instruction
  580. * fetch fault rather than a data load fault.
  581. */
  582. static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *vcpu)
  583. {
  584. unsigned long badvaddr = vcpu->host_cp0_badvaddr;
  585. unsigned long epc = msk_isa16_mode(vcpu->pc);
  586. u32 cause = vcpu->host_cp0_cause;
  587. if (epc == badvaddr)
  588. return true;
  589. /*
  590. * Branches may be 32-bit or 16-bit instructions.
  591. * This isn't exact, but we don't really support MIPS16 or microMIPS yet
  592. * in KVM anyway.
  593. */
  594. if ((cause & CAUSEF_BD) && badvaddr - epc <= 4)
  595. return true;
  596. return false;
  597. }
  598. extern enum emulation_result kvm_mips_emulate_inst(u32 cause,
  599. u32 *opc,
  600. struct kvm_run *run,
  601. struct kvm_vcpu *vcpu);
  602. extern enum emulation_result kvm_mips_emulate_syscall(u32 cause,
  603. u32 *opc,
  604. struct kvm_run *run,
  605. struct kvm_vcpu *vcpu);
  606. extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
  607. u32 *opc,
  608. struct kvm_run *run,
  609. struct kvm_vcpu *vcpu);
  610. extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
  611. u32 *opc,
  612. struct kvm_run *run,
  613. struct kvm_vcpu *vcpu);
  614. extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
  615. u32 *opc,
  616. struct kvm_run *run,
  617. struct kvm_vcpu *vcpu);
  618. extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
  619. u32 *opc,
  620. struct kvm_run *run,
  621. struct kvm_vcpu *vcpu);
  622. extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
  623. u32 *opc,
  624. struct kvm_run *run,
  625. struct kvm_vcpu *vcpu);
  626. extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
  627. u32 *opc,
  628. struct kvm_run *run,
  629. struct kvm_vcpu *vcpu);
  630. extern enum emulation_result kvm_mips_handle_ri(u32 cause,
  631. u32 *opc,
  632. struct kvm_run *run,
  633. struct kvm_vcpu *vcpu);
  634. extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
  635. u32 *opc,
  636. struct kvm_run *run,
  637. struct kvm_vcpu *vcpu);
  638. extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
  639. u32 *opc,
  640. struct kvm_run *run,
  641. struct kvm_vcpu *vcpu);
  642. extern enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
  643. u32 *opc,
  644. struct kvm_run *run,
  645. struct kvm_vcpu *vcpu);
  646. extern enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
  647. u32 *opc,
  648. struct kvm_run *run,
  649. struct kvm_vcpu *vcpu);
  650. extern enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
  651. u32 *opc,
  652. struct kvm_run *run,
  653. struct kvm_vcpu *vcpu);
  654. extern enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
  655. u32 *opc,
  656. struct kvm_run *run,
  657. struct kvm_vcpu *vcpu);
  658. extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
  659. struct kvm_run *run);
  660. u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
  661. void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
  662. void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack);
  663. void kvm_mips_init_count(struct kvm_vcpu *vcpu);
  664. int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
  665. int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
  666. int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
  667. void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
  668. void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
  669. enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
  670. enum emulation_result kvm_mips_check_privilege(u32 cause,
  671. u32 *opc,
  672. struct kvm_run *run,
  673. struct kvm_vcpu *vcpu);
  674. enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
  675. u32 *opc,
  676. u32 cause,
  677. struct kvm_run *run,
  678. struct kvm_vcpu *vcpu);
  679. enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
  680. u32 *opc,
  681. u32 cause,
  682. struct kvm_run *run,
  683. struct kvm_vcpu *vcpu);
  684. enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
  685. u32 cause,
  686. struct kvm_run *run,
  687. struct kvm_vcpu *vcpu);
  688. enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
  689. u32 cause,
  690. struct kvm_run *run,
  691. struct kvm_vcpu *vcpu);
  692. unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu);
  693. unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu);
  694. unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
  695. unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
  696. /* Dynamic binary translation */
  697. extern int kvm_mips_trans_cache_index(union mips_instruction inst,
  698. u32 *opc, struct kvm_vcpu *vcpu);
  699. extern int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
  700. struct kvm_vcpu *vcpu);
  701. extern int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
  702. struct kvm_vcpu *vcpu);
  703. extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
  704. struct kvm_vcpu *vcpu);
  705. /* Misc */
  706. extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
  707. extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
  708. static inline void kvm_arch_hardware_disable(void) {}
  709. static inline void kvm_arch_hardware_unsetup(void) {}
  710. static inline void kvm_arch_sync_events(struct kvm *kvm) {}
  711. static inline void kvm_arch_free_memslot(struct kvm *kvm,
  712. struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
  713. static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
  714. static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
  715. static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
  716. struct kvm_memory_slot *slot) {}
  717. static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
  718. static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
  719. static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
  720. static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
  721. #endif /* __MIPS_KVM_HOST_H__ */