kvm_host.h 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  7. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  8. */
  9. #ifndef __MIPS_KVM_HOST_H__
  10. #define __MIPS_KVM_HOST_H__
  11. #include <linux/mutex.h>
  12. #include <linux/hrtimer.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/types.h>
  15. #include <linux/kvm.h>
  16. #include <linux/kvm_types.h>
  17. #include <linux/threads.h>
  18. #include <linux/spinlock.h>
  19. #include <asm/inst.h>
  20. #include <asm/mipsregs.h>
  21. /* MIPS KVM register ids */
  22. #define MIPS_CP0_32(_R, _S) \
  23. (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
  24. #define MIPS_CP0_64(_R, _S) \
  25. (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
  26. #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
  27. #define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
  28. #define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
  29. #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
  30. #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
  31. #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
  32. #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
  33. #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
  34. #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
  35. #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
  36. #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
  37. #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
  38. #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
  39. #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
  40. #define KVM_REG_MIPS_CP0_INTCTL MIPS_CP0_32(12, 1)
  41. #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
  42. #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
  43. #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
  44. #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
  45. #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
  46. #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
  47. #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
  48. #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
  49. #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
  50. #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
  51. #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
  52. #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
  53. #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
  54. #define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
  55. #define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3)
  56. #define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4)
  57. #define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5)
  58. #define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6)
  59. #define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7)
  60. #define KVM_MAX_VCPUS 8
  61. #define KVM_USER_MEM_SLOTS 8
  62. /* memory slots that does not exposed to userspace */
  63. #define KVM_PRIVATE_MEM_SLOTS 0
  64. #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
  65. #define KVM_HALT_POLL_NS_DEFAULT 500000
  66. /*
  67. * Special address that contains the comm page, used for reducing # of traps
  68. * This needs to be within 32Kb of 0x0 (so the zero register can be used), but
  69. * preferably not at 0x0 so that most kernel NULL pointer dereferences can be
  70. * caught.
  71. */
  72. #define KVM_GUEST_COMMPAGE_ADDR ((PAGE_SIZE > 0x8000) ? 0 : \
  73. (0x8000 - PAGE_SIZE))
  74. #define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
  75. ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
  76. #define KVM_GUEST_KUSEG 0x00000000UL
  77. #define KVM_GUEST_KSEG0 0x40000000UL
  78. #define KVM_GUEST_KSEG1 0x40000000UL
  79. #define KVM_GUEST_KSEG23 0x60000000UL
  80. #define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0xe0000000)
  81. #define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
  82. #define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
  83. #define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
  84. #define KVM_GUEST_CKSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
  85. /*
  86. * Map an address to a certain kernel segment
  87. */
  88. #define KVM_GUEST_KSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
  89. #define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
  90. #define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
  91. #define KVM_INVALID_PAGE 0xdeadbeef
  92. #define KVM_INVALID_ADDR 0xdeadbeef
  93. /*
  94. * EVA has overlapping user & kernel address spaces, so user VAs may be >
  95. * PAGE_OFFSET. For this reason we can't use the default KVM_HVA_ERR_BAD of
  96. * PAGE_OFFSET.
  97. */
  98. #define KVM_HVA_ERR_BAD (-1UL)
  99. #define KVM_HVA_ERR_RO_BAD (-2UL)
  100. static inline bool kvm_is_error_hva(unsigned long addr)
  101. {
  102. return IS_ERR_VALUE(addr);
  103. }
  104. struct kvm_vm_stat {
  105. ulong remote_tlb_flush;
  106. };
  107. struct kvm_vcpu_stat {
  108. u64 wait_exits;
  109. u64 cache_exits;
  110. u64 signal_exits;
  111. u64 int_exits;
  112. u64 cop_unusable_exits;
  113. u64 tlbmod_exits;
  114. u64 tlbmiss_ld_exits;
  115. u64 tlbmiss_st_exits;
  116. u64 addrerr_st_exits;
  117. u64 addrerr_ld_exits;
  118. u64 syscall_exits;
  119. u64 resvd_inst_exits;
  120. u64 break_inst_exits;
  121. u64 trap_inst_exits;
  122. u64 msa_fpe_exits;
  123. u64 fpe_exits;
  124. u64 msa_disabled_exits;
  125. u64 flush_dcache_exits;
  126. #ifdef CONFIG_KVM_MIPS_VZ
  127. u64 vz_gpsi_exits;
  128. u64 vz_gsfc_exits;
  129. u64 vz_hc_exits;
  130. u64 vz_grr_exits;
  131. u64 vz_gva_exits;
  132. u64 vz_ghfc_exits;
  133. u64 vz_gpa_exits;
  134. u64 vz_resvd_exits;
  135. #endif
  136. u64 halt_successful_poll;
  137. u64 halt_attempted_poll;
  138. u64 halt_poll_invalid;
  139. u64 halt_wakeup;
  140. };
  141. struct kvm_arch_memory_slot {
  142. };
  143. struct kvm_arch {
  144. /* Guest physical mm */
  145. struct mm_struct gpa_mm;
  146. };
  147. #define N_MIPS_COPROC_REGS 32
  148. #define N_MIPS_COPROC_SEL 8
  149. struct mips_coproc {
  150. unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
  151. #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
  152. unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
  153. #endif
  154. };
  155. /*
  156. * Coprocessor 0 register names
  157. */
  158. #define MIPS_CP0_TLB_INDEX 0
  159. #define MIPS_CP0_TLB_RANDOM 1
  160. #define MIPS_CP0_TLB_LOW 2
  161. #define MIPS_CP0_TLB_LO0 2
  162. #define MIPS_CP0_TLB_LO1 3
  163. #define MIPS_CP0_TLB_CONTEXT 4
  164. #define MIPS_CP0_TLB_PG_MASK 5
  165. #define MIPS_CP0_TLB_WIRED 6
  166. #define MIPS_CP0_HWRENA 7
  167. #define MIPS_CP0_BAD_VADDR 8
  168. #define MIPS_CP0_COUNT 9
  169. #define MIPS_CP0_TLB_HI 10
  170. #define MIPS_CP0_COMPARE 11
  171. #define MIPS_CP0_STATUS 12
  172. #define MIPS_CP0_CAUSE 13
  173. #define MIPS_CP0_EXC_PC 14
  174. #define MIPS_CP0_PRID 15
  175. #define MIPS_CP0_CONFIG 16
  176. #define MIPS_CP0_LLADDR 17
  177. #define MIPS_CP0_WATCH_LO 18
  178. #define MIPS_CP0_WATCH_HI 19
  179. #define MIPS_CP0_TLB_XCONTEXT 20
  180. #define MIPS_CP0_ECC 26
  181. #define MIPS_CP0_CACHE_ERR 27
  182. #define MIPS_CP0_TAG_LO 28
  183. #define MIPS_CP0_TAG_HI 29
  184. #define MIPS_CP0_ERROR_PC 30
  185. #define MIPS_CP0_DEBUG 23
  186. #define MIPS_CP0_DEPC 24
  187. #define MIPS_CP0_PERFCNT 25
  188. #define MIPS_CP0_ERRCTL 26
  189. #define MIPS_CP0_DATA_LO 28
  190. #define MIPS_CP0_DATA_HI 29
  191. #define MIPS_CP0_DESAVE 31
  192. #define MIPS_CP0_CONFIG_SEL 0
  193. #define MIPS_CP0_CONFIG1_SEL 1
  194. #define MIPS_CP0_CONFIG2_SEL 2
  195. #define MIPS_CP0_CONFIG3_SEL 3
  196. #define MIPS_CP0_CONFIG4_SEL 4
  197. #define MIPS_CP0_CONFIG5_SEL 5
  198. /* Resume Flags */
  199. #define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
  200. #define RESUME_FLAG_HOST (1<<1) /* Resume host? */
  201. #define RESUME_GUEST 0
  202. #define RESUME_GUEST_DR RESUME_FLAG_DR
  203. #define RESUME_HOST RESUME_FLAG_HOST
  204. enum emulation_result {
  205. EMULATE_DONE, /* no further processing */
  206. EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
  207. EMULATE_FAIL, /* can't emulate this instruction */
  208. EMULATE_WAIT, /* WAIT instruction */
  209. EMULATE_PRIV_FAIL,
  210. EMULATE_EXCEPT, /* A guest exception has been generated */
  211. EMULATE_HYPERCALL, /* HYPCALL instruction */
  212. };
  213. #define mips3_paddr_to_tlbpfn(x) \
  214. (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
  215. #define mips3_tlbpfn_to_paddr(x) \
  216. ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
  217. #define MIPS3_PG_SHIFT 6
  218. #define MIPS3_PG_FRAME 0x3fffffc0
  219. #define VPN2_MASK 0xffffe000
  220. #define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID
  221. #define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
  222. #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
  223. #define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID)
  224. #define TLB_LO_IDX(x, va) (((va) >> PAGE_SHIFT) & 1)
  225. #define TLB_IS_VALID(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V)
  226. #define TLB_IS_DIRTY(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_D)
  227. #define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \
  228. ((y) & VPN2_MASK & ~(x).tlb_mask))
  229. #define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \
  230. TLB_ASID(x) == ((y) & KVM_ENTRYHI_ASID))
  231. struct kvm_mips_tlb {
  232. long tlb_mask;
  233. long tlb_hi;
  234. long tlb_lo[2];
  235. };
  236. #define KVM_NR_MEM_OBJS 4
  237. /*
  238. * We don't want allocation failures within the mmu code, so we preallocate
  239. * enough memory for a single page fault in a cache.
  240. */
  241. struct kvm_mmu_memory_cache {
  242. int nobjs;
  243. void *objects[KVM_NR_MEM_OBJS];
  244. };
  245. #define KVM_MIPS_AUX_FPU 0x1
  246. #define KVM_MIPS_AUX_MSA 0x2
  247. #define KVM_MIPS_GUEST_TLB_SIZE 64
  248. struct kvm_vcpu_arch {
  249. void *guest_ebase;
  250. int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
  251. /* Host registers preserved across guest mode execution */
  252. unsigned long host_stack;
  253. unsigned long host_gp;
  254. unsigned long host_pgd;
  255. unsigned long host_entryhi;
  256. /* Host CP0 registers used when handling exits from guest */
  257. unsigned long host_cp0_badvaddr;
  258. unsigned long host_cp0_epc;
  259. u32 host_cp0_cause;
  260. u32 host_cp0_guestctl0;
  261. u32 host_cp0_badinstr;
  262. u32 host_cp0_badinstrp;
  263. /* GPRS */
  264. unsigned long gprs[32];
  265. unsigned long hi;
  266. unsigned long lo;
  267. unsigned long pc;
  268. /* FPU State */
  269. struct mips_fpu_struct fpu;
  270. /* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */
  271. unsigned int aux_inuse;
  272. /* COP0 State */
  273. struct mips_coproc *cop0;
  274. /* Host KSEG0 address of the EI/DI offset */
  275. void *kseg0_commpage;
  276. /* Resume PC after MMIO completion */
  277. unsigned long io_pc;
  278. /* GPR used as IO source/target */
  279. u32 io_gpr;
  280. struct hrtimer comparecount_timer;
  281. /* Count timer control KVM register */
  282. u32 count_ctl;
  283. /* Count bias from the raw time */
  284. u32 count_bias;
  285. /* Frequency of timer in Hz */
  286. u32 count_hz;
  287. /* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
  288. s64 count_dyn_bias;
  289. /* Resume time */
  290. ktime_t count_resume;
  291. /* Period of timer tick in ns */
  292. u64 count_period;
  293. /* Bitmask of exceptions that are pending */
  294. unsigned long pending_exceptions;
  295. /* Bitmask of pending exceptions to be cleared */
  296. unsigned long pending_exceptions_clr;
  297. /* S/W Based TLB for guest */
  298. struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
  299. /* Guest kernel/user [partial] mm */
  300. struct mm_struct guest_kernel_mm, guest_user_mm;
  301. /* Guest ASID of last user mode execution */
  302. unsigned int last_user_gasid;
  303. /* Cache some mmu pages needed inside spinlock regions */
  304. struct kvm_mmu_memory_cache mmu_page_cache;
  305. int last_sched_cpu;
  306. /* WAIT executed */
  307. int wait;
  308. u8 fpu_enabled;
  309. u8 msa_enabled;
  310. };
  311. static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
  312. unsigned long val)
  313. {
  314. unsigned long temp;
  315. do {
  316. __asm__ __volatile__(
  317. " .set "MIPS_ISA_ARCH_LEVEL" \n"
  318. " " __LL "%0, %1 \n"
  319. " or %0, %2 \n"
  320. " " __SC "%0, %1 \n"
  321. " .set mips0 \n"
  322. : "=&r" (temp), "+m" (*reg)
  323. : "r" (val));
  324. } while (unlikely(!temp));
  325. }
  326. static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
  327. unsigned long val)
  328. {
  329. unsigned long temp;
  330. do {
  331. __asm__ __volatile__(
  332. " .set "MIPS_ISA_ARCH_LEVEL" \n"
  333. " " __LL "%0, %1 \n"
  334. " and %0, %2 \n"
  335. " " __SC "%0, %1 \n"
  336. " .set mips0 \n"
  337. : "=&r" (temp), "+m" (*reg)
  338. : "r" (~val));
  339. } while (unlikely(!temp));
  340. }
  341. static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
  342. unsigned long change,
  343. unsigned long val)
  344. {
  345. unsigned long temp;
  346. do {
  347. __asm__ __volatile__(
  348. " .set "MIPS_ISA_ARCH_LEVEL" \n"
  349. " " __LL "%0, %1 \n"
  350. " and %0, %2 \n"
  351. " or %0, %3 \n"
  352. " " __SC "%0, %1 \n"
  353. " .set mips0 \n"
  354. : "=&r" (temp), "+m" (*reg)
  355. : "r" (~change), "r" (val & change));
  356. } while (unlikely(!temp));
  357. }
  358. /* Guest register types, used in accessor build below */
  359. #define __KVMT32 u32
  360. #define __KVMTl unsigned long
  361. /*
  362. * __BUILD_KVM_$ops_SAVED(): kvm_$op_sw_gc0_$reg()
  363. * These operate on the saved guest C0 state in RAM.
  364. */
  365. /* Generate saved context simple accessors */
  366. #define __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
  367. static inline __KVMT##type kvm_read_sw_gc0_##name(struct mips_coproc *cop0) \
  368. { \
  369. return cop0->reg[(_reg)][(sel)]; \
  370. } \
  371. static inline void kvm_write_sw_gc0_##name(struct mips_coproc *cop0, \
  372. __KVMT##type val) \
  373. { \
  374. cop0->reg[(_reg)][(sel)] = val; \
  375. }
  376. /* Generate saved context bitwise modifiers */
  377. #define __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
  378. static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
  379. __KVMT##type val) \
  380. { \
  381. cop0->reg[(_reg)][(sel)] |= val; \
  382. } \
  383. static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
  384. __KVMT##type val) \
  385. { \
  386. cop0->reg[(_reg)][(sel)] &= ~val; \
  387. } \
  388. static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
  389. __KVMT##type mask, \
  390. __KVMT##type val) \
  391. { \
  392. unsigned long _mask = mask; \
  393. cop0->reg[(_reg)][(sel)] &= ~_mask; \
  394. cop0->reg[(_reg)][(sel)] |= val & _mask; \
  395. }
  396. /* Generate saved context atomic bitwise modifiers */
  397. #define __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
  398. static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
  399. __KVMT##type val) \
  400. { \
  401. _kvm_atomic_set_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
  402. } \
  403. static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
  404. __KVMT##type val) \
  405. { \
  406. _kvm_atomic_clear_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
  407. } \
  408. static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
  409. __KVMT##type mask, \
  410. __KVMT##type val) \
  411. { \
  412. _kvm_atomic_change_c0_guest_reg(&cop0->reg[(_reg)][(sel)], mask, \
  413. val); \
  414. }
  415. /*
  416. * __BUILD_KVM_$ops_VZ(): kvm_$op_vz_gc0_$reg()
  417. * These operate on the VZ guest C0 context in hardware.
  418. */
  419. /* Generate VZ guest context simple accessors */
  420. #define __BUILD_KVM_RW_VZ(name, type, _reg, sel) \
  421. static inline __KVMT##type kvm_read_vz_gc0_##name(struct mips_coproc *cop0) \
  422. { \
  423. return read_gc0_##name(); \
  424. } \
  425. static inline void kvm_write_vz_gc0_##name(struct mips_coproc *cop0, \
  426. __KVMT##type val) \
  427. { \
  428. write_gc0_##name(val); \
  429. }
  430. /* Generate VZ guest context bitwise modifiers */
  431. #define __BUILD_KVM_SET_VZ(name, type, _reg, sel) \
  432. static inline void kvm_set_vz_gc0_##name(struct mips_coproc *cop0, \
  433. __KVMT##type val) \
  434. { \
  435. set_gc0_##name(val); \
  436. } \
  437. static inline void kvm_clear_vz_gc0_##name(struct mips_coproc *cop0, \
  438. __KVMT##type val) \
  439. { \
  440. clear_gc0_##name(val); \
  441. } \
  442. static inline void kvm_change_vz_gc0_##name(struct mips_coproc *cop0, \
  443. __KVMT##type mask, \
  444. __KVMT##type val) \
  445. { \
  446. change_gc0_##name(mask, val); \
  447. }
  448. /* Generate VZ guest context save/restore to/from saved context */
  449. #define __BUILD_KVM_SAVE_VZ(name, _reg, sel) \
  450. static inline void kvm_restore_gc0_##name(struct mips_coproc *cop0) \
  451. { \
  452. write_gc0_##name(cop0->reg[(_reg)][(sel)]); \
  453. } \
  454. static inline void kvm_save_gc0_##name(struct mips_coproc *cop0) \
  455. { \
  456. cop0->reg[(_reg)][(sel)] = read_gc0_##name(); \
  457. }
  458. /*
  459. * __BUILD_KVM_$ops_WRAP(): kvm_$op_$name1() -> kvm_$op_$name2()
  460. * These wrap a set of operations to provide them with a different name.
  461. */
  462. /* Generate simple accessor wrapper */
  463. #define __BUILD_KVM_RW_WRAP(name1, name2, type) \
  464. static inline __KVMT##type kvm_read_##name1(struct mips_coproc *cop0) \
  465. { \
  466. return kvm_read_##name2(cop0); \
  467. } \
  468. static inline void kvm_write_##name1(struct mips_coproc *cop0, \
  469. __KVMT##type val) \
  470. { \
  471. kvm_write_##name2(cop0, val); \
  472. }
  473. /* Generate bitwise modifier wrapper */
  474. #define __BUILD_KVM_SET_WRAP(name1, name2, type) \
  475. static inline void kvm_set_##name1(struct mips_coproc *cop0, \
  476. __KVMT##type val) \
  477. { \
  478. kvm_set_##name2(cop0, val); \
  479. } \
  480. static inline void kvm_clear_##name1(struct mips_coproc *cop0, \
  481. __KVMT##type val) \
  482. { \
  483. kvm_clear_##name2(cop0, val); \
  484. } \
  485. static inline void kvm_change_##name1(struct mips_coproc *cop0, \
  486. __KVMT##type mask, \
  487. __KVMT##type val) \
  488. { \
  489. kvm_change_##name2(cop0, mask, val); \
  490. }
  491. /*
  492. * __BUILD_KVM_$ops_SW(): kvm_$op_c0_guest_$reg() -> kvm_$op_sw_gc0_$reg()
  493. * These generate accessors operating on the saved context in RAM, and wrap them
  494. * with the common guest C0 accessors (for use by common emulation code).
  495. */
  496. #define __BUILD_KVM_RW_SW(name, type, _reg, sel) \
  497. __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
  498. __BUILD_KVM_RW_WRAP(c0_guest_##name, sw_gc0_##name, type)
  499. #define __BUILD_KVM_SET_SW(name, type, _reg, sel) \
  500. __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
  501. __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
  502. #define __BUILD_KVM_ATOMIC_SW(name, type, _reg, sel) \
  503. __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
  504. __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
  505. #ifndef CONFIG_KVM_MIPS_VZ
  506. /*
  507. * T&E (trap & emulate software based virtualisation)
  508. * We generate the common accessors operating exclusively on the saved context
  509. * in RAM.
  510. */
  511. #define __BUILD_KVM_RW_HW __BUILD_KVM_RW_SW
  512. #define __BUILD_KVM_SET_HW __BUILD_KVM_SET_SW
  513. #define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_ATOMIC_SW
  514. #else
  515. /*
  516. * VZ (hardware assisted virtualisation)
  517. * These macros use the active guest state in VZ mode (hardware registers),
  518. */
  519. /*
  520. * __BUILD_KVM_$ops_HW(): kvm_$op_c0_guest_$reg() -> kvm_$op_vz_gc0_$reg()
  521. * These generate accessors operating on the VZ guest context in hardware, and
  522. * wrap them with the common guest C0 accessors (for use by common emulation
  523. * code).
  524. *
  525. * Accessors operating on the saved context in RAM are also generated to allow
  526. * convenient explicit saving and restoring of the state.
  527. */
  528. #define __BUILD_KVM_RW_HW(name, type, _reg, sel) \
  529. __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
  530. __BUILD_KVM_RW_VZ(name, type, _reg, sel) \
  531. __BUILD_KVM_RW_WRAP(c0_guest_##name, vz_gc0_##name, type) \
  532. __BUILD_KVM_SAVE_VZ(name, _reg, sel)
  533. #define __BUILD_KVM_SET_HW(name, type, _reg, sel) \
  534. __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
  535. __BUILD_KVM_SET_VZ(name, type, _reg, sel) \
  536. __BUILD_KVM_SET_WRAP(c0_guest_##name, vz_gc0_##name, type)
  537. /*
  538. * We can't do atomic modifications of COP0 state if hardware can modify it.
  539. * Races must be handled explicitly.
  540. */
  541. #define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_SET_HW
  542. #endif
  543. /*
  544. * Define accessors for CP0 registers that are accessible to the guest. These
  545. * are primarily used by common emulation code, which may need to access the
  546. * registers differently depending on the implementation.
  547. *
  548. * fns_hw/sw name type reg num select
  549. */
  550. __BUILD_KVM_RW_HW(index, 32, MIPS_CP0_TLB_INDEX, 0)
  551. __BUILD_KVM_RW_HW(entrylo0, l, MIPS_CP0_TLB_LO0, 0)
  552. __BUILD_KVM_RW_HW(entrylo1, l, MIPS_CP0_TLB_LO1, 0)
  553. __BUILD_KVM_RW_HW(context, l, MIPS_CP0_TLB_CONTEXT, 0)
  554. __BUILD_KVM_RW_HW(userlocal, l, MIPS_CP0_TLB_CONTEXT, 2)
  555. __BUILD_KVM_RW_HW(pagemask, l, MIPS_CP0_TLB_PG_MASK, 0)
  556. __BUILD_KVM_RW_HW(pagegrain, 32, MIPS_CP0_TLB_PG_MASK, 1)
  557. __BUILD_KVM_RW_HW(wired, 32, MIPS_CP0_TLB_WIRED, 0)
  558. __BUILD_KVM_RW_HW(hwrena, 32, MIPS_CP0_HWRENA, 0)
  559. __BUILD_KVM_RW_HW(badvaddr, l, MIPS_CP0_BAD_VADDR, 0)
  560. __BUILD_KVM_RW_SW(count, 32, MIPS_CP0_COUNT, 0)
  561. __BUILD_KVM_RW_HW(entryhi, l, MIPS_CP0_TLB_HI, 0)
  562. __BUILD_KVM_RW_HW(compare, 32, MIPS_CP0_COMPARE, 0)
  563. __BUILD_KVM_RW_HW(status, 32, MIPS_CP0_STATUS, 0)
  564. __BUILD_KVM_RW_HW(intctl, 32, MIPS_CP0_STATUS, 1)
  565. __BUILD_KVM_RW_HW(cause, 32, MIPS_CP0_CAUSE, 0)
  566. __BUILD_KVM_RW_HW(epc, l, MIPS_CP0_EXC_PC, 0)
  567. __BUILD_KVM_RW_SW(prid, 32, MIPS_CP0_PRID, 0)
  568. __BUILD_KVM_RW_HW(ebase, l, MIPS_CP0_PRID, 1)
  569. __BUILD_KVM_RW_HW(config, 32, MIPS_CP0_CONFIG, 0)
  570. __BUILD_KVM_RW_HW(config1, 32, MIPS_CP0_CONFIG, 1)
  571. __BUILD_KVM_RW_HW(config2, 32, MIPS_CP0_CONFIG, 2)
  572. __BUILD_KVM_RW_HW(config3, 32, MIPS_CP0_CONFIG, 3)
  573. __BUILD_KVM_RW_HW(config4, 32, MIPS_CP0_CONFIG, 4)
  574. __BUILD_KVM_RW_HW(config5, 32, MIPS_CP0_CONFIG, 5)
  575. __BUILD_KVM_RW_HW(config6, 32, MIPS_CP0_CONFIG, 6)
  576. __BUILD_KVM_RW_HW(config7, 32, MIPS_CP0_CONFIG, 7)
  577. __BUILD_KVM_RW_HW(errorepc, l, MIPS_CP0_ERROR_PC, 0)
  578. __BUILD_KVM_RW_HW(kscratch1, l, MIPS_CP0_DESAVE, 2)
  579. __BUILD_KVM_RW_HW(kscratch2, l, MIPS_CP0_DESAVE, 3)
  580. __BUILD_KVM_RW_HW(kscratch3, l, MIPS_CP0_DESAVE, 4)
  581. __BUILD_KVM_RW_HW(kscratch4, l, MIPS_CP0_DESAVE, 5)
  582. __BUILD_KVM_RW_HW(kscratch5, l, MIPS_CP0_DESAVE, 6)
  583. __BUILD_KVM_RW_HW(kscratch6, l, MIPS_CP0_DESAVE, 7)
  584. /* Bitwise operations (on HW state) */
  585. __BUILD_KVM_SET_HW(status, 32, MIPS_CP0_STATUS, 0)
  586. /* Cause can be modified asynchronously from hardirq hrtimer callback */
  587. __BUILD_KVM_ATOMIC_HW(cause, 32, MIPS_CP0_CAUSE, 0)
  588. __BUILD_KVM_SET_HW(ebase, l, MIPS_CP0_PRID, 1)
  589. /* Helpers */
  590. static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
  591. {
  592. return (!__builtin_constant_p(raw_cpu_has_fpu) || raw_cpu_has_fpu) &&
  593. vcpu->fpu_enabled;
  594. }
  595. static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
  596. {
  597. return kvm_mips_guest_can_have_fpu(vcpu) &&
  598. kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
  599. }
  600. static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
  601. {
  602. return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) &&
  603. vcpu->msa_enabled;
  604. }
  605. static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
  606. {
  607. return kvm_mips_guest_can_have_msa(vcpu) &&
  608. kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
  609. }
  610. struct kvm_mips_callbacks {
  611. int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
  612. int (*handle_tlb_mod)(struct kvm_vcpu *vcpu);
  613. int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu);
  614. int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu);
  615. int (*handle_addr_err_st)(struct kvm_vcpu *vcpu);
  616. int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu);
  617. int (*handle_syscall)(struct kvm_vcpu *vcpu);
  618. int (*handle_res_inst)(struct kvm_vcpu *vcpu);
  619. int (*handle_break)(struct kvm_vcpu *vcpu);
  620. int (*handle_trap)(struct kvm_vcpu *vcpu);
  621. int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
  622. int (*handle_fpe)(struct kvm_vcpu *vcpu);
  623. int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
  624. int (*handle_guest_exit)(struct kvm_vcpu *vcpu);
  625. int (*hardware_enable)(void);
  626. void (*hardware_disable)(void);
  627. int (*check_extension)(struct kvm *kvm, long ext);
  628. int (*vcpu_init)(struct kvm_vcpu *vcpu);
  629. void (*vcpu_uninit)(struct kvm_vcpu *vcpu);
  630. int (*vcpu_setup)(struct kvm_vcpu *vcpu);
  631. void (*flush_shadow_all)(struct kvm *kvm);
  632. /*
  633. * Must take care of flushing any cached GPA PTEs (e.g. guest entries in
  634. * VZ root TLB, or T&E GVA page tables and corresponding root TLB
  635. * mappings).
  636. */
  637. void (*flush_shadow_memslot)(struct kvm *kvm,
  638. const struct kvm_memory_slot *slot);
  639. gpa_t (*gva_to_gpa)(gva_t gva);
  640. void (*queue_timer_int)(struct kvm_vcpu *vcpu);
  641. void (*dequeue_timer_int)(struct kvm_vcpu *vcpu);
  642. void (*queue_io_int)(struct kvm_vcpu *vcpu,
  643. struct kvm_mips_interrupt *irq);
  644. void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
  645. struct kvm_mips_interrupt *irq);
  646. int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
  647. u32 cause);
  648. int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
  649. u32 cause);
  650. unsigned long (*num_regs)(struct kvm_vcpu *vcpu);
  651. int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices);
  652. int (*get_one_reg)(struct kvm_vcpu *vcpu,
  653. const struct kvm_one_reg *reg, s64 *v);
  654. int (*set_one_reg)(struct kvm_vcpu *vcpu,
  655. const struct kvm_one_reg *reg, s64 v);
  656. int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
  657. int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
  658. int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
  659. void (*vcpu_reenter)(struct kvm_run *run, struct kvm_vcpu *vcpu);
  660. };
  661. extern struct kvm_mips_callbacks *kvm_mips_callbacks;
  662. int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
  663. /* Debug: dump vcpu state */
  664. int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
  665. extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu);
  666. /* Building of entry/exception code */
  667. int kvm_mips_entry_setup(void);
  668. void *kvm_mips_build_vcpu_run(void *addr);
  669. void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler);
  670. void *kvm_mips_build_exception(void *addr, void *handler);
  671. void *kvm_mips_build_exit(void *addr);
  672. /* FPU/MSA context management */
  673. void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
  674. void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
  675. void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
  676. void __kvm_save_msa(struct kvm_vcpu_arch *vcpu);
  677. void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu);
  678. void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu);
  679. void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu);
  680. void kvm_own_fpu(struct kvm_vcpu *vcpu);
  681. void kvm_own_msa(struct kvm_vcpu *vcpu);
  682. void kvm_drop_fpu(struct kvm_vcpu *vcpu);
  683. void kvm_lose_fpu(struct kvm_vcpu *vcpu);
  684. /* TLB handling */
  685. u32 kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
  686. u32 kvm_get_user_asid(struct kvm_vcpu *vcpu);
  687. u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
  688. extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
  689. struct kvm_vcpu *vcpu,
  690. bool write_fault);
  691. extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
  692. struct kvm_vcpu *vcpu);
  693. extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
  694. struct kvm_mips_tlb *tlb,
  695. unsigned long gva,
  696. bool write_fault);
  697. extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
  698. u32 *opc,
  699. struct kvm_run *run,
  700. struct kvm_vcpu *vcpu,
  701. bool write_fault);
  702. extern void kvm_mips_dump_host_tlbs(void);
  703. extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
  704. extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi,
  705. bool user, bool kernel);
  706. extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
  707. unsigned long entryhi);
  708. void kvm_mips_suspend_mm(int cpu);
  709. void kvm_mips_resume_mm(int cpu);
  710. /* MMU handling */
  711. /**
  712. * enum kvm_mips_flush - Types of MMU flushes.
  713. * @KMF_USER: Flush guest user virtual memory mappings.
  714. * Guest USeg only.
  715. * @KMF_KERN: Flush guest kernel virtual memory mappings.
  716. * Guest USeg and KSeg2/3.
  717. * @KMF_GPA: Flush guest physical memory mappings.
  718. * Also includes KSeg0 if KMF_KERN is set.
  719. */
  720. enum kvm_mips_flush {
  721. KMF_USER = 0x0,
  722. KMF_KERN = 0x1,
  723. KMF_GPA = 0x2,
  724. };
  725. void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags);
  726. bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
  727. int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
  728. pgd_t *kvm_pgd_alloc(void);
  729. void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
  730. void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
  731. bool user);
  732. void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu);
  733. void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu);
  734. enum kvm_mips_fault_result {
  735. KVM_MIPS_MAPPED = 0,
  736. KVM_MIPS_GVA,
  737. KVM_MIPS_GPA,
  738. KVM_MIPS_TLB,
  739. KVM_MIPS_TLBINV,
  740. KVM_MIPS_TLBMOD,
  741. };
  742. enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
  743. unsigned long gva,
  744. bool write);
  745. #define KVM_ARCH_WANT_MMU_NOTIFIER
  746. int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
  747. int kvm_unmap_hva_range(struct kvm *kvm,
  748. unsigned long start, unsigned long end);
  749. void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
  750. int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
  751. int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
  752. static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
  753. unsigned long address)
  754. {
  755. }
  756. /* Emulation */
  757. int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
  758. enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
  759. int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
  760. int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
  761. /**
  762. * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
  763. * @vcpu: Virtual CPU.
  764. *
  765. * Returns: Whether the TLBL exception was likely due to an instruction
  766. * fetch fault rather than a data load fault.
  767. */
  768. static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *vcpu)
  769. {
  770. unsigned long badvaddr = vcpu->host_cp0_badvaddr;
  771. unsigned long epc = msk_isa16_mode(vcpu->pc);
  772. u32 cause = vcpu->host_cp0_cause;
  773. if (epc == badvaddr)
  774. return true;
  775. /*
  776. * Branches may be 32-bit or 16-bit instructions.
  777. * This isn't exact, but we don't really support MIPS16 or microMIPS yet
  778. * in KVM anyway.
  779. */
  780. if ((cause & CAUSEF_BD) && badvaddr - epc <= 4)
  781. return true;
  782. return false;
  783. }
  784. extern enum emulation_result kvm_mips_emulate_inst(u32 cause,
  785. u32 *opc,
  786. struct kvm_run *run,
  787. struct kvm_vcpu *vcpu);
  788. long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu);
  789. extern enum emulation_result kvm_mips_emulate_syscall(u32 cause,
  790. u32 *opc,
  791. struct kvm_run *run,
  792. struct kvm_vcpu *vcpu);
  793. extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
  794. u32 *opc,
  795. struct kvm_run *run,
  796. struct kvm_vcpu *vcpu);
  797. extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
  798. u32 *opc,
  799. struct kvm_run *run,
  800. struct kvm_vcpu *vcpu);
  801. extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
  802. u32 *opc,
  803. struct kvm_run *run,
  804. struct kvm_vcpu *vcpu);
  805. extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
  806. u32 *opc,
  807. struct kvm_run *run,
  808. struct kvm_vcpu *vcpu);
  809. extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
  810. u32 *opc,
  811. struct kvm_run *run,
  812. struct kvm_vcpu *vcpu);
  813. extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
  814. u32 *opc,
  815. struct kvm_run *run,
  816. struct kvm_vcpu *vcpu);
  817. extern enum emulation_result kvm_mips_handle_ri(u32 cause,
  818. u32 *opc,
  819. struct kvm_run *run,
  820. struct kvm_vcpu *vcpu);
  821. extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
  822. u32 *opc,
  823. struct kvm_run *run,
  824. struct kvm_vcpu *vcpu);
  825. extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
  826. u32 *opc,
  827. struct kvm_run *run,
  828. struct kvm_vcpu *vcpu);
  829. extern enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
  830. u32 *opc,
  831. struct kvm_run *run,
  832. struct kvm_vcpu *vcpu);
  833. extern enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
  834. u32 *opc,
  835. struct kvm_run *run,
  836. struct kvm_vcpu *vcpu);
  837. extern enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
  838. u32 *opc,
  839. struct kvm_run *run,
  840. struct kvm_vcpu *vcpu);
  841. extern enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
  842. u32 *opc,
  843. struct kvm_run *run,
  844. struct kvm_vcpu *vcpu);
  845. extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
  846. struct kvm_run *run);
  847. u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
  848. void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
  849. void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack);
  850. void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz);
  851. int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
  852. int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
  853. int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
  854. void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
  855. void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
  856. enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
  857. enum emulation_result kvm_mips_check_privilege(u32 cause,
  858. u32 *opc,
  859. struct kvm_run *run,
  860. struct kvm_vcpu *vcpu);
  861. enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
  862. u32 *opc,
  863. u32 cause,
  864. struct kvm_run *run,
  865. struct kvm_vcpu *vcpu);
  866. enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
  867. u32 *opc,
  868. u32 cause,
  869. struct kvm_run *run,
  870. struct kvm_vcpu *vcpu);
  871. enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
  872. u32 cause,
  873. struct kvm_run *run,
  874. struct kvm_vcpu *vcpu);
  875. enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
  876. u32 cause,
  877. struct kvm_run *run,
  878. struct kvm_vcpu *vcpu);
  879. unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu);
  880. unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu);
  881. unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
  882. unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
  883. /* Hypercalls (hypcall.c) */
  884. enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
  885. union mips_instruction inst);
  886. int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu);
  887. /* Dynamic binary translation */
  888. extern int kvm_mips_trans_cache_index(union mips_instruction inst,
  889. u32 *opc, struct kvm_vcpu *vcpu);
  890. extern int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
  891. struct kvm_vcpu *vcpu);
  892. extern int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
  893. struct kvm_vcpu *vcpu);
  894. extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
  895. struct kvm_vcpu *vcpu);
  896. /* Misc */
  897. extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
  898. extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
  899. static inline void kvm_arch_hardware_unsetup(void) {}
  900. static inline void kvm_arch_sync_events(struct kvm *kvm) {}
  901. static inline void kvm_arch_free_memslot(struct kvm *kvm,
  902. struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
  903. static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
  904. static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
  905. static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
  906. static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
  907. static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
  908. #endif /* __MIPS_KVM_HOST_H__ */