kvm_host.h 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  7. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  8. */
  9. #ifndef __MIPS_KVM_HOST_H__
  10. #define __MIPS_KVM_HOST_H__
  11. #include <linux/cpumask.h>
  12. #include <linux/mutex.h>
  13. #include <linux/hrtimer.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/types.h>
  16. #include <linux/kvm.h>
  17. #include <linux/kvm_types.h>
  18. #include <linux/threads.h>
  19. #include <linux/spinlock.h>
  20. #include <asm/inst.h>
  21. #include <asm/mipsregs.h>
  22. /* MIPS KVM register ids */
  23. #define MIPS_CP0_32(_R, _S) \
  24. (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
  25. #define MIPS_CP0_64(_R, _S) \
  26. (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
  27. #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
  28. #define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
  29. #define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
  30. #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
  31. #define KVM_REG_MIPS_CP0_CONTEXTCONFIG MIPS_CP0_32(4, 1)
  32. #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
  33. #define KVM_REG_MIPS_CP0_XCONTEXTCONFIG MIPS_CP0_64(4, 3)
  34. #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
  35. #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
  36. #define KVM_REG_MIPS_CP0_SEGCTL0 MIPS_CP0_64(5, 2)
  37. #define KVM_REG_MIPS_CP0_SEGCTL1 MIPS_CP0_64(5, 3)
  38. #define KVM_REG_MIPS_CP0_SEGCTL2 MIPS_CP0_64(5, 4)
  39. #define KVM_REG_MIPS_CP0_PWBASE MIPS_CP0_64(5, 5)
  40. #define KVM_REG_MIPS_CP0_PWFIELD MIPS_CP0_64(5, 6)
  41. #define KVM_REG_MIPS_CP0_PWSIZE MIPS_CP0_64(5, 7)
  42. #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
  43. #define KVM_REG_MIPS_CP0_PWCTL MIPS_CP0_32(6, 6)
  44. #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
  45. #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
  46. #define KVM_REG_MIPS_CP0_BADINSTR MIPS_CP0_32(8, 1)
  47. #define KVM_REG_MIPS_CP0_BADINSTRP MIPS_CP0_32(8, 2)
  48. #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
  49. #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
  50. #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
  51. #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
  52. #define KVM_REG_MIPS_CP0_INTCTL MIPS_CP0_32(12, 1)
  53. #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
  54. #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
  55. #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
  56. #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
  57. #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
  58. #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
  59. #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
  60. #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
  61. #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
  62. #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
  63. #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
  64. #define KVM_REG_MIPS_CP0_MAARI MIPS_CP0_64(17, 2)
  65. #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
  66. #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
  67. #define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
  68. #define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3)
  69. #define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4)
  70. #define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5)
  71. #define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6)
  72. #define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7)
  73. #define KVM_MAX_VCPUS 8
  74. #define KVM_USER_MEM_SLOTS 8
  75. /* memory slots that does not exposed to userspace */
  76. #define KVM_PRIVATE_MEM_SLOTS 0
  77. #define KVM_HALT_POLL_NS_DEFAULT 500000
  78. #ifdef CONFIG_KVM_MIPS_VZ
  79. extern unsigned long GUESTID_MASK;
  80. extern unsigned long GUESTID_FIRST_VERSION;
  81. extern unsigned long GUESTID_VERSION_MASK;
  82. #endif
  83. /*
  84. * Special address that contains the comm page, used for reducing # of traps
  85. * This needs to be within 32Kb of 0x0 (so the zero register can be used), but
  86. * preferably not at 0x0 so that most kernel NULL pointer dereferences can be
  87. * caught.
  88. */
  89. #define KVM_GUEST_COMMPAGE_ADDR ((PAGE_SIZE > 0x8000) ? 0 : \
  90. (0x8000 - PAGE_SIZE))
  91. #define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
  92. ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
  93. #define KVM_GUEST_KUSEG 0x00000000UL
  94. #define KVM_GUEST_KSEG0 0x40000000UL
  95. #define KVM_GUEST_KSEG1 0x40000000UL
  96. #define KVM_GUEST_KSEG23 0x60000000UL
  97. #define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0xe0000000)
  98. #define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
  99. #define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
  100. #define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
  101. #define KVM_GUEST_CKSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
  102. /*
  103. * Map an address to a certain kernel segment
  104. */
  105. #define KVM_GUEST_KSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
  106. #define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
  107. #define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
  108. #define KVM_INVALID_PAGE 0xdeadbeef
  109. #define KVM_INVALID_ADDR 0xdeadbeef
  110. /*
  111. * EVA has overlapping user & kernel address spaces, so user VAs may be >
  112. * PAGE_OFFSET. For this reason we can't use the default KVM_HVA_ERR_BAD of
  113. * PAGE_OFFSET.
  114. */
  115. #define KVM_HVA_ERR_BAD (-1UL)
  116. #define KVM_HVA_ERR_RO_BAD (-2UL)
  117. static inline bool kvm_is_error_hva(unsigned long addr)
  118. {
  119. return IS_ERR_VALUE(addr);
  120. }
  121. struct kvm_vm_stat {
  122. ulong remote_tlb_flush;
  123. };
  124. struct kvm_vcpu_stat {
  125. u64 wait_exits;
  126. u64 cache_exits;
  127. u64 signal_exits;
  128. u64 int_exits;
  129. u64 cop_unusable_exits;
  130. u64 tlbmod_exits;
  131. u64 tlbmiss_ld_exits;
  132. u64 tlbmiss_st_exits;
  133. u64 addrerr_st_exits;
  134. u64 addrerr_ld_exits;
  135. u64 syscall_exits;
  136. u64 resvd_inst_exits;
  137. u64 break_inst_exits;
  138. u64 trap_inst_exits;
  139. u64 msa_fpe_exits;
  140. u64 fpe_exits;
  141. u64 msa_disabled_exits;
  142. u64 flush_dcache_exits;
  143. #ifdef CONFIG_KVM_MIPS_VZ
  144. u64 vz_gpsi_exits;
  145. u64 vz_gsfc_exits;
  146. u64 vz_hc_exits;
  147. u64 vz_grr_exits;
  148. u64 vz_gva_exits;
  149. u64 vz_ghfc_exits;
  150. u64 vz_gpa_exits;
  151. u64 vz_resvd_exits;
  152. #endif
  153. u64 halt_successful_poll;
  154. u64 halt_attempted_poll;
  155. u64 halt_poll_invalid;
  156. u64 halt_wakeup;
  157. };
  158. struct kvm_arch_memory_slot {
  159. };
  160. struct kvm_arch {
  161. /* Guest physical mm */
  162. struct mm_struct gpa_mm;
  163. /* Mask of CPUs needing GPA ASID flush */
  164. cpumask_t asid_flush_mask;
  165. };
  166. #define N_MIPS_COPROC_REGS 32
  167. #define N_MIPS_COPROC_SEL 8
  168. struct mips_coproc {
  169. unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
  170. #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
  171. unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
  172. #endif
  173. };
  174. /*
  175. * Coprocessor 0 register names
  176. */
  177. #define MIPS_CP0_TLB_INDEX 0
  178. #define MIPS_CP0_TLB_RANDOM 1
  179. #define MIPS_CP0_TLB_LOW 2
  180. #define MIPS_CP0_TLB_LO0 2
  181. #define MIPS_CP0_TLB_LO1 3
  182. #define MIPS_CP0_TLB_CONTEXT 4
  183. #define MIPS_CP0_TLB_PG_MASK 5
  184. #define MIPS_CP0_TLB_WIRED 6
  185. #define MIPS_CP0_HWRENA 7
  186. #define MIPS_CP0_BAD_VADDR 8
  187. #define MIPS_CP0_COUNT 9
  188. #define MIPS_CP0_TLB_HI 10
  189. #define MIPS_CP0_COMPARE 11
  190. #define MIPS_CP0_STATUS 12
  191. #define MIPS_CP0_CAUSE 13
  192. #define MIPS_CP0_EXC_PC 14
  193. #define MIPS_CP0_PRID 15
  194. #define MIPS_CP0_CONFIG 16
  195. #define MIPS_CP0_LLADDR 17
  196. #define MIPS_CP0_WATCH_LO 18
  197. #define MIPS_CP0_WATCH_HI 19
  198. #define MIPS_CP0_TLB_XCONTEXT 20
  199. #define MIPS_CP0_ECC 26
  200. #define MIPS_CP0_CACHE_ERR 27
  201. #define MIPS_CP0_TAG_LO 28
  202. #define MIPS_CP0_TAG_HI 29
  203. #define MIPS_CP0_ERROR_PC 30
  204. #define MIPS_CP0_DEBUG 23
  205. #define MIPS_CP0_DEPC 24
  206. #define MIPS_CP0_PERFCNT 25
  207. #define MIPS_CP0_ERRCTL 26
  208. #define MIPS_CP0_DATA_LO 28
  209. #define MIPS_CP0_DATA_HI 29
  210. #define MIPS_CP0_DESAVE 31
  211. #define MIPS_CP0_CONFIG_SEL 0
  212. #define MIPS_CP0_CONFIG1_SEL 1
  213. #define MIPS_CP0_CONFIG2_SEL 2
  214. #define MIPS_CP0_CONFIG3_SEL 3
  215. #define MIPS_CP0_CONFIG4_SEL 4
  216. #define MIPS_CP0_CONFIG5_SEL 5
  217. #define MIPS_CP0_GUESTCTL2 10
  218. #define MIPS_CP0_GUESTCTL2_SEL 5
  219. #define MIPS_CP0_GTOFFSET 12
  220. #define MIPS_CP0_GTOFFSET_SEL 7
  221. /* Resume Flags */
  222. #define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
  223. #define RESUME_FLAG_HOST (1<<1) /* Resume host? */
  224. #define RESUME_GUEST 0
  225. #define RESUME_GUEST_DR RESUME_FLAG_DR
  226. #define RESUME_HOST RESUME_FLAG_HOST
  227. enum emulation_result {
  228. EMULATE_DONE, /* no further processing */
  229. EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
  230. EMULATE_FAIL, /* can't emulate this instruction */
  231. EMULATE_WAIT, /* WAIT instruction */
  232. EMULATE_PRIV_FAIL,
  233. EMULATE_EXCEPT, /* A guest exception has been generated */
  234. EMULATE_HYPERCALL, /* HYPCALL instruction */
  235. };
  236. #define mips3_paddr_to_tlbpfn(x) \
  237. (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
  238. #define mips3_tlbpfn_to_paddr(x) \
  239. ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
  240. #define MIPS3_PG_SHIFT 6
  241. #define MIPS3_PG_FRAME 0x3fffffc0
  242. #define VPN2_MASK 0xffffe000
  243. #define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID
  244. #define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
  245. #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
  246. #define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID)
  247. #define TLB_LO_IDX(x, va) (((va) >> PAGE_SHIFT) & 1)
  248. #define TLB_IS_VALID(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V)
  249. #define TLB_IS_DIRTY(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_D)
  250. #define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \
  251. ((y) & VPN2_MASK & ~(x).tlb_mask))
  252. #define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \
  253. TLB_ASID(x) == ((y) & KVM_ENTRYHI_ASID))
  254. struct kvm_mips_tlb {
  255. long tlb_mask;
  256. long tlb_hi;
  257. long tlb_lo[2];
  258. };
  259. #define KVM_NR_MEM_OBJS 4
  260. /*
  261. * We don't want allocation failures within the mmu code, so we preallocate
  262. * enough memory for a single page fault in a cache.
  263. */
  264. struct kvm_mmu_memory_cache {
  265. int nobjs;
  266. void *objects[KVM_NR_MEM_OBJS];
  267. };
  268. #define KVM_MIPS_AUX_FPU 0x1
  269. #define KVM_MIPS_AUX_MSA 0x2
  270. #define KVM_MIPS_GUEST_TLB_SIZE 64
  271. struct kvm_vcpu_arch {
  272. void *guest_ebase;
  273. int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
  274. /* Host registers preserved across guest mode execution */
  275. unsigned long host_stack;
  276. unsigned long host_gp;
  277. unsigned long host_pgd;
  278. unsigned long host_entryhi;
  279. /* Host CP0 registers used when handling exits from guest */
  280. unsigned long host_cp0_badvaddr;
  281. unsigned long host_cp0_epc;
  282. u32 host_cp0_cause;
  283. u32 host_cp0_guestctl0;
  284. u32 host_cp0_badinstr;
  285. u32 host_cp0_badinstrp;
  286. /* GPRS */
  287. unsigned long gprs[32];
  288. unsigned long hi;
  289. unsigned long lo;
  290. unsigned long pc;
  291. /* FPU State */
  292. struct mips_fpu_struct fpu;
  293. /* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */
  294. unsigned int aux_inuse;
  295. /* COP0 State */
  296. struct mips_coproc *cop0;
  297. /* Host KSEG0 address of the EI/DI offset */
  298. void *kseg0_commpage;
  299. /* Resume PC after MMIO completion */
  300. unsigned long io_pc;
  301. /* GPR used as IO source/target */
  302. u32 io_gpr;
  303. struct hrtimer comparecount_timer;
  304. /* Count timer control KVM register */
  305. u32 count_ctl;
  306. /* Count bias from the raw time */
  307. u32 count_bias;
  308. /* Frequency of timer in Hz */
  309. u32 count_hz;
  310. /* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
  311. s64 count_dyn_bias;
  312. /* Resume time */
  313. ktime_t count_resume;
  314. /* Period of timer tick in ns */
  315. u64 count_period;
  316. /* Bitmask of exceptions that are pending */
  317. unsigned long pending_exceptions;
  318. /* Bitmask of pending exceptions to be cleared */
  319. unsigned long pending_exceptions_clr;
  320. /* S/W Based TLB for guest */
  321. struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
  322. /* Guest kernel/user [partial] mm */
  323. struct mm_struct guest_kernel_mm, guest_user_mm;
  324. /* Guest ASID of last user mode execution */
  325. unsigned int last_user_gasid;
  326. /* Cache some mmu pages needed inside spinlock regions */
  327. struct kvm_mmu_memory_cache mmu_page_cache;
  328. #ifdef CONFIG_KVM_MIPS_VZ
  329. /* vcpu's vzguestid is different on each host cpu in an smp system */
  330. u32 vzguestid[NR_CPUS];
  331. /* wired guest TLB entries */
  332. struct kvm_mips_tlb *wired_tlb;
  333. unsigned int wired_tlb_limit;
  334. unsigned int wired_tlb_used;
  335. /* emulated guest MAAR registers */
  336. unsigned long maar[6];
  337. #endif
  338. /* Last CPU the VCPU state was loaded on */
  339. int last_sched_cpu;
  340. /* Last CPU the VCPU actually executed guest code on */
  341. int last_exec_cpu;
  342. /* WAIT executed */
  343. int wait;
  344. u8 fpu_enabled;
  345. u8 msa_enabled;
  346. };
  347. static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
  348. unsigned long val)
  349. {
  350. unsigned long temp;
  351. do {
  352. __asm__ __volatile__(
  353. " .set "MIPS_ISA_ARCH_LEVEL" \n"
  354. " " __LL "%0, %1 \n"
  355. " or %0, %2 \n"
  356. " " __SC "%0, %1 \n"
  357. " .set mips0 \n"
  358. : "=&r" (temp), "+m" (*reg)
  359. : "r" (val));
  360. } while (unlikely(!temp));
  361. }
  362. static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
  363. unsigned long val)
  364. {
  365. unsigned long temp;
  366. do {
  367. __asm__ __volatile__(
  368. " .set "MIPS_ISA_ARCH_LEVEL" \n"
  369. " " __LL "%0, %1 \n"
  370. " and %0, %2 \n"
  371. " " __SC "%0, %1 \n"
  372. " .set mips0 \n"
  373. : "=&r" (temp), "+m" (*reg)
  374. : "r" (~val));
  375. } while (unlikely(!temp));
  376. }
  377. static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
  378. unsigned long change,
  379. unsigned long val)
  380. {
  381. unsigned long temp;
  382. do {
  383. __asm__ __volatile__(
  384. " .set "MIPS_ISA_ARCH_LEVEL" \n"
  385. " " __LL "%0, %1 \n"
  386. " and %0, %2 \n"
  387. " or %0, %3 \n"
  388. " " __SC "%0, %1 \n"
  389. " .set mips0 \n"
  390. : "=&r" (temp), "+m" (*reg)
  391. : "r" (~change), "r" (val & change));
  392. } while (unlikely(!temp));
  393. }
  394. /* Guest register types, used in accessor build below */
  395. #define __KVMT32 u32
  396. #define __KVMTl unsigned long
  397. /*
  398. * __BUILD_KVM_$ops_SAVED(): kvm_$op_sw_gc0_$reg()
  399. * These operate on the saved guest C0 state in RAM.
  400. */
  401. /* Generate saved context simple accessors */
  402. #define __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
  403. static inline __KVMT##type kvm_read_sw_gc0_##name(struct mips_coproc *cop0) \
  404. { \
  405. return cop0->reg[(_reg)][(sel)]; \
  406. } \
  407. static inline void kvm_write_sw_gc0_##name(struct mips_coproc *cop0, \
  408. __KVMT##type val) \
  409. { \
  410. cop0->reg[(_reg)][(sel)] = val; \
  411. }
  412. /* Generate saved context bitwise modifiers */
  413. #define __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
  414. static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
  415. __KVMT##type val) \
  416. { \
  417. cop0->reg[(_reg)][(sel)] |= val; \
  418. } \
  419. static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
  420. __KVMT##type val) \
  421. { \
  422. cop0->reg[(_reg)][(sel)] &= ~val; \
  423. } \
  424. static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
  425. __KVMT##type mask, \
  426. __KVMT##type val) \
  427. { \
  428. unsigned long _mask = mask; \
  429. cop0->reg[(_reg)][(sel)] &= ~_mask; \
  430. cop0->reg[(_reg)][(sel)] |= val & _mask; \
  431. }
  432. /* Generate saved context atomic bitwise modifiers */
  433. #define __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
  434. static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
  435. __KVMT##type val) \
  436. { \
  437. _kvm_atomic_set_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
  438. } \
  439. static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
  440. __KVMT##type val) \
  441. { \
  442. _kvm_atomic_clear_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
  443. } \
  444. static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
  445. __KVMT##type mask, \
  446. __KVMT##type val) \
  447. { \
  448. _kvm_atomic_change_c0_guest_reg(&cop0->reg[(_reg)][(sel)], mask, \
  449. val); \
  450. }
  451. /*
  452. * __BUILD_KVM_$ops_VZ(): kvm_$op_vz_gc0_$reg()
  453. * These operate on the VZ guest C0 context in hardware.
  454. */
  455. /* Generate VZ guest context simple accessors */
  456. #define __BUILD_KVM_RW_VZ(name, type, _reg, sel) \
  457. static inline __KVMT##type kvm_read_vz_gc0_##name(struct mips_coproc *cop0) \
  458. { \
  459. return read_gc0_##name(); \
  460. } \
  461. static inline void kvm_write_vz_gc0_##name(struct mips_coproc *cop0, \
  462. __KVMT##type val) \
  463. { \
  464. write_gc0_##name(val); \
  465. }
  466. /* Generate VZ guest context bitwise modifiers */
  467. #define __BUILD_KVM_SET_VZ(name, type, _reg, sel) \
  468. static inline void kvm_set_vz_gc0_##name(struct mips_coproc *cop0, \
  469. __KVMT##type val) \
  470. { \
  471. set_gc0_##name(val); \
  472. } \
  473. static inline void kvm_clear_vz_gc0_##name(struct mips_coproc *cop0, \
  474. __KVMT##type val) \
  475. { \
  476. clear_gc0_##name(val); \
  477. } \
  478. static inline void kvm_change_vz_gc0_##name(struct mips_coproc *cop0, \
  479. __KVMT##type mask, \
  480. __KVMT##type val) \
  481. { \
  482. change_gc0_##name(mask, val); \
  483. }
  484. /* Generate VZ guest context save/restore to/from saved context */
  485. #define __BUILD_KVM_SAVE_VZ(name, _reg, sel) \
  486. static inline void kvm_restore_gc0_##name(struct mips_coproc *cop0) \
  487. { \
  488. write_gc0_##name(cop0->reg[(_reg)][(sel)]); \
  489. } \
  490. static inline void kvm_save_gc0_##name(struct mips_coproc *cop0) \
  491. { \
  492. cop0->reg[(_reg)][(sel)] = read_gc0_##name(); \
  493. }
  494. /*
  495. * __BUILD_KVM_$ops_WRAP(): kvm_$op_$name1() -> kvm_$op_$name2()
  496. * These wrap a set of operations to provide them with a different name.
  497. */
  498. /* Generate simple accessor wrapper */
  499. #define __BUILD_KVM_RW_WRAP(name1, name2, type) \
  500. static inline __KVMT##type kvm_read_##name1(struct mips_coproc *cop0) \
  501. { \
  502. return kvm_read_##name2(cop0); \
  503. } \
  504. static inline void kvm_write_##name1(struct mips_coproc *cop0, \
  505. __KVMT##type val) \
  506. { \
  507. kvm_write_##name2(cop0, val); \
  508. }
  509. /* Generate bitwise modifier wrapper */
  510. #define __BUILD_KVM_SET_WRAP(name1, name2, type) \
  511. static inline void kvm_set_##name1(struct mips_coproc *cop0, \
  512. __KVMT##type val) \
  513. { \
  514. kvm_set_##name2(cop0, val); \
  515. } \
  516. static inline void kvm_clear_##name1(struct mips_coproc *cop0, \
  517. __KVMT##type val) \
  518. { \
  519. kvm_clear_##name2(cop0, val); \
  520. } \
  521. static inline void kvm_change_##name1(struct mips_coproc *cop0, \
  522. __KVMT##type mask, \
  523. __KVMT##type val) \
  524. { \
  525. kvm_change_##name2(cop0, mask, val); \
  526. }
  527. /*
  528. * __BUILD_KVM_$ops_SW(): kvm_$op_c0_guest_$reg() -> kvm_$op_sw_gc0_$reg()
  529. * These generate accessors operating on the saved context in RAM, and wrap them
  530. * with the common guest C0 accessors (for use by common emulation code).
  531. */
  532. #define __BUILD_KVM_RW_SW(name, type, _reg, sel) \
  533. __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
  534. __BUILD_KVM_RW_WRAP(c0_guest_##name, sw_gc0_##name, type)
  535. #define __BUILD_KVM_SET_SW(name, type, _reg, sel) \
  536. __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
  537. __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
  538. #define __BUILD_KVM_ATOMIC_SW(name, type, _reg, sel) \
  539. __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
  540. __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
  541. #ifndef CONFIG_KVM_MIPS_VZ
  542. /*
  543. * T&E (trap & emulate software based virtualisation)
  544. * We generate the common accessors operating exclusively on the saved context
  545. * in RAM.
  546. */
  547. #define __BUILD_KVM_RW_HW __BUILD_KVM_RW_SW
  548. #define __BUILD_KVM_SET_HW __BUILD_KVM_SET_SW
  549. #define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_ATOMIC_SW
  550. #else
  551. /*
  552. * VZ (hardware assisted virtualisation)
  553. * These macros use the active guest state in VZ mode (hardware registers),
  554. */
  555. /*
  556. * __BUILD_KVM_$ops_HW(): kvm_$op_c0_guest_$reg() -> kvm_$op_vz_gc0_$reg()
  557. * These generate accessors operating on the VZ guest context in hardware, and
  558. * wrap them with the common guest C0 accessors (for use by common emulation
  559. * code).
  560. *
  561. * Accessors operating on the saved context in RAM are also generated to allow
  562. * convenient explicit saving and restoring of the state.
  563. */
  564. #define __BUILD_KVM_RW_HW(name, type, _reg, sel) \
  565. __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
  566. __BUILD_KVM_RW_VZ(name, type, _reg, sel) \
  567. __BUILD_KVM_RW_WRAP(c0_guest_##name, vz_gc0_##name, type) \
  568. __BUILD_KVM_SAVE_VZ(name, _reg, sel)
  569. #define __BUILD_KVM_SET_HW(name, type, _reg, sel) \
  570. __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
  571. __BUILD_KVM_SET_VZ(name, type, _reg, sel) \
  572. __BUILD_KVM_SET_WRAP(c0_guest_##name, vz_gc0_##name, type)
  573. /*
  574. * We can't do atomic modifications of COP0 state if hardware can modify it.
  575. * Races must be handled explicitly.
  576. */
  577. #define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_SET_HW
  578. #endif
  579. /*
  580. * Define accessors for CP0 registers that are accessible to the guest. These
  581. * are primarily used by common emulation code, which may need to access the
  582. * registers differently depending on the implementation.
  583. *
  584. * fns_hw/sw name type reg num select
  585. */
  586. __BUILD_KVM_RW_HW(index, 32, MIPS_CP0_TLB_INDEX, 0)
  587. __BUILD_KVM_RW_HW(entrylo0, l, MIPS_CP0_TLB_LO0, 0)
  588. __BUILD_KVM_RW_HW(entrylo1, l, MIPS_CP0_TLB_LO1, 0)
  589. __BUILD_KVM_RW_HW(context, l, MIPS_CP0_TLB_CONTEXT, 0)
  590. __BUILD_KVM_RW_HW(contextconfig, 32, MIPS_CP0_TLB_CONTEXT, 1)
  591. __BUILD_KVM_RW_HW(userlocal, l, MIPS_CP0_TLB_CONTEXT, 2)
  592. __BUILD_KVM_RW_HW(xcontextconfig, l, MIPS_CP0_TLB_CONTEXT, 3)
  593. __BUILD_KVM_RW_HW(pagemask, l, MIPS_CP0_TLB_PG_MASK, 0)
  594. __BUILD_KVM_RW_HW(pagegrain, 32, MIPS_CP0_TLB_PG_MASK, 1)
  595. __BUILD_KVM_RW_HW(segctl0, l, MIPS_CP0_TLB_PG_MASK, 2)
  596. __BUILD_KVM_RW_HW(segctl1, l, MIPS_CP0_TLB_PG_MASK, 3)
  597. __BUILD_KVM_RW_HW(segctl2, l, MIPS_CP0_TLB_PG_MASK, 4)
  598. __BUILD_KVM_RW_HW(pwbase, l, MIPS_CP0_TLB_PG_MASK, 5)
  599. __BUILD_KVM_RW_HW(pwfield, l, MIPS_CP0_TLB_PG_MASK, 6)
  600. __BUILD_KVM_RW_HW(pwsize, l, MIPS_CP0_TLB_PG_MASK, 7)
  601. __BUILD_KVM_RW_HW(wired, 32, MIPS_CP0_TLB_WIRED, 0)
  602. __BUILD_KVM_RW_HW(pwctl, 32, MIPS_CP0_TLB_WIRED, 6)
  603. __BUILD_KVM_RW_HW(hwrena, 32, MIPS_CP0_HWRENA, 0)
  604. __BUILD_KVM_RW_HW(badvaddr, l, MIPS_CP0_BAD_VADDR, 0)
  605. __BUILD_KVM_RW_HW(badinstr, 32, MIPS_CP0_BAD_VADDR, 1)
  606. __BUILD_KVM_RW_HW(badinstrp, 32, MIPS_CP0_BAD_VADDR, 2)
  607. __BUILD_KVM_RW_SW(count, 32, MIPS_CP0_COUNT, 0)
  608. __BUILD_KVM_RW_HW(entryhi, l, MIPS_CP0_TLB_HI, 0)
  609. __BUILD_KVM_RW_HW(compare, 32, MIPS_CP0_COMPARE, 0)
  610. __BUILD_KVM_RW_HW(status, 32, MIPS_CP0_STATUS, 0)
  611. __BUILD_KVM_RW_HW(intctl, 32, MIPS_CP0_STATUS, 1)
  612. __BUILD_KVM_RW_HW(cause, 32, MIPS_CP0_CAUSE, 0)
  613. __BUILD_KVM_RW_HW(epc, l, MIPS_CP0_EXC_PC, 0)
  614. __BUILD_KVM_RW_SW(prid, 32, MIPS_CP0_PRID, 0)
  615. __BUILD_KVM_RW_HW(ebase, l, MIPS_CP0_PRID, 1)
  616. __BUILD_KVM_RW_HW(config, 32, MIPS_CP0_CONFIG, 0)
  617. __BUILD_KVM_RW_HW(config1, 32, MIPS_CP0_CONFIG, 1)
  618. __BUILD_KVM_RW_HW(config2, 32, MIPS_CP0_CONFIG, 2)
  619. __BUILD_KVM_RW_HW(config3, 32, MIPS_CP0_CONFIG, 3)
  620. __BUILD_KVM_RW_HW(config4, 32, MIPS_CP0_CONFIG, 4)
  621. __BUILD_KVM_RW_HW(config5, 32, MIPS_CP0_CONFIG, 5)
  622. __BUILD_KVM_RW_HW(config6, 32, MIPS_CP0_CONFIG, 6)
  623. __BUILD_KVM_RW_HW(config7, 32, MIPS_CP0_CONFIG, 7)
  624. __BUILD_KVM_RW_SW(maari, l, MIPS_CP0_LLADDR, 2)
  625. __BUILD_KVM_RW_HW(xcontext, l, MIPS_CP0_TLB_XCONTEXT, 0)
  626. __BUILD_KVM_RW_HW(errorepc, l, MIPS_CP0_ERROR_PC, 0)
  627. __BUILD_KVM_RW_HW(kscratch1, l, MIPS_CP0_DESAVE, 2)
  628. __BUILD_KVM_RW_HW(kscratch2, l, MIPS_CP0_DESAVE, 3)
  629. __BUILD_KVM_RW_HW(kscratch3, l, MIPS_CP0_DESAVE, 4)
  630. __BUILD_KVM_RW_HW(kscratch4, l, MIPS_CP0_DESAVE, 5)
  631. __BUILD_KVM_RW_HW(kscratch5, l, MIPS_CP0_DESAVE, 6)
  632. __BUILD_KVM_RW_HW(kscratch6, l, MIPS_CP0_DESAVE, 7)
  633. /* Bitwise operations (on HW state) */
  634. __BUILD_KVM_SET_HW(status, 32, MIPS_CP0_STATUS, 0)
  635. /* Cause can be modified asynchronously from hardirq hrtimer callback */
  636. __BUILD_KVM_ATOMIC_HW(cause, 32, MIPS_CP0_CAUSE, 0)
  637. __BUILD_KVM_SET_HW(ebase, l, MIPS_CP0_PRID, 1)
  638. /* Bitwise operations (on saved state) */
  639. __BUILD_KVM_SET_SAVED(config, 32, MIPS_CP0_CONFIG, 0)
  640. __BUILD_KVM_SET_SAVED(config1, 32, MIPS_CP0_CONFIG, 1)
  641. __BUILD_KVM_SET_SAVED(config2, 32, MIPS_CP0_CONFIG, 2)
  642. __BUILD_KVM_SET_SAVED(config3, 32, MIPS_CP0_CONFIG, 3)
  643. __BUILD_KVM_SET_SAVED(config4, 32, MIPS_CP0_CONFIG, 4)
  644. __BUILD_KVM_SET_SAVED(config5, 32, MIPS_CP0_CONFIG, 5)
  645. /* Helpers */
  646. static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
  647. {
  648. return (!__builtin_constant_p(raw_cpu_has_fpu) || raw_cpu_has_fpu) &&
  649. vcpu->fpu_enabled;
  650. }
  651. static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
  652. {
  653. return kvm_mips_guest_can_have_fpu(vcpu) &&
  654. kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
  655. }
  656. static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
  657. {
  658. return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) &&
  659. vcpu->msa_enabled;
  660. }
  661. static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
  662. {
  663. return kvm_mips_guest_can_have_msa(vcpu) &&
  664. kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
  665. }
  666. struct kvm_mips_callbacks {
  667. int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
  668. int (*handle_tlb_mod)(struct kvm_vcpu *vcpu);
  669. int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu);
  670. int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu);
  671. int (*handle_addr_err_st)(struct kvm_vcpu *vcpu);
  672. int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu);
  673. int (*handle_syscall)(struct kvm_vcpu *vcpu);
  674. int (*handle_res_inst)(struct kvm_vcpu *vcpu);
  675. int (*handle_break)(struct kvm_vcpu *vcpu);
  676. int (*handle_trap)(struct kvm_vcpu *vcpu);
  677. int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
  678. int (*handle_fpe)(struct kvm_vcpu *vcpu);
  679. int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
  680. int (*handle_guest_exit)(struct kvm_vcpu *vcpu);
  681. int (*hardware_enable)(void);
  682. void (*hardware_disable)(void);
  683. int (*check_extension)(struct kvm *kvm, long ext);
  684. int (*vcpu_init)(struct kvm_vcpu *vcpu);
  685. void (*vcpu_uninit)(struct kvm_vcpu *vcpu);
  686. int (*vcpu_setup)(struct kvm_vcpu *vcpu);
  687. void (*flush_shadow_all)(struct kvm *kvm);
  688. /*
  689. * Must take care of flushing any cached GPA PTEs (e.g. guest entries in
  690. * VZ root TLB, or T&E GVA page tables and corresponding root TLB
  691. * mappings).
  692. */
  693. void (*flush_shadow_memslot)(struct kvm *kvm,
  694. const struct kvm_memory_slot *slot);
  695. gpa_t (*gva_to_gpa)(gva_t gva);
  696. void (*queue_timer_int)(struct kvm_vcpu *vcpu);
  697. void (*dequeue_timer_int)(struct kvm_vcpu *vcpu);
  698. void (*queue_io_int)(struct kvm_vcpu *vcpu,
  699. struct kvm_mips_interrupt *irq);
  700. void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
  701. struct kvm_mips_interrupt *irq);
  702. int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
  703. u32 cause);
  704. int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
  705. u32 cause);
  706. unsigned long (*num_regs)(struct kvm_vcpu *vcpu);
  707. int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices);
  708. int (*get_one_reg)(struct kvm_vcpu *vcpu,
  709. const struct kvm_one_reg *reg, s64 *v);
  710. int (*set_one_reg)(struct kvm_vcpu *vcpu,
  711. const struct kvm_one_reg *reg, s64 v);
  712. int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
  713. int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
  714. int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
  715. void (*vcpu_reenter)(struct kvm_run *run, struct kvm_vcpu *vcpu);
  716. };
  717. extern struct kvm_mips_callbacks *kvm_mips_callbacks;
  718. int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
  719. /* Debug: dump vcpu state */
  720. int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
  721. extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu);
  722. /* Building of entry/exception code */
  723. int kvm_mips_entry_setup(void);
  724. void *kvm_mips_build_vcpu_run(void *addr);
  725. void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler);
  726. void *kvm_mips_build_exception(void *addr, void *handler);
  727. void *kvm_mips_build_exit(void *addr);
  728. /* FPU/MSA context management */
  729. void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
  730. void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
  731. void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
  732. void __kvm_save_msa(struct kvm_vcpu_arch *vcpu);
  733. void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu);
  734. void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu);
  735. void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu);
  736. void kvm_own_fpu(struct kvm_vcpu *vcpu);
  737. void kvm_own_msa(struct kvm_vcpu *vcpu);
  738. void kvm_drop_fpu(struct kvm_vcpu *vcpu);
  739. void kvm_lose_fpu(struct kvm_vcpu *vcpu);
  740. /* TLB handling */
  741. u32 kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
  742. u32 kvm_get_user_asid(struct kvm_vcpu *vcpu);
  743. u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
  744. #ifdef CONFIG_KVM_MIPS_VZ
  745. int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
  746. struct kvm_vcpu *vcpu, bool write_fault);
  747. #endif
  748. extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
  749. struct kvm_vcpu *vcpu,
  750. bool write_fault);
  751. extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
  752. struct kvm_vcpu *vcpu);
  753. extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
  754. struct kvm_mips_tlb *tlb,
  755. unsigned long gva,
  756. bool write_fault);
  757. extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
  758. u32 *opc,
  759. struct kvm_run *run,
  760. struct kvm_vcpu *vcpu,
  761. bool write_fault);
  762. extern void kvm_mips_dump_host_tlbs(void);
  763. extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
  764. extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi,
  765. bool user, bool kernel);
  766. extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
  767. unsigned long entryhi);
  768. #ifdef CONFIG_KVM_MIPS_VZ
  769. int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
  770. int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
  771. unsigned long *gpa);
  772. void kvm_vz_local_flush_roottlb_all_guests(void);
  773. void kvm_vz_local_flush_guesttlb_all(void);
  774. void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
  775. unsigned int count);
  776. void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
  777. unsigned int count);
  778. #endif
  779. void kvm_mips_suspend_mm(int cpu);
  780. void kvm_mips_resume_mm(int cpu);
  781. /* MMU handling */
  782. /**
  783. * enum kvm_mips_flush - Types of MMU flushes.
  784. * @KMF_USER: Flush guest user virtual memory mappings.
  785. * Guest USeg only.
  786. * @KMF_KERN: Flush guest kernel virtual memory mappings.
  787. * Guest USeg and KSeg2/3.
  788. * @KMF_GPA: Flush guest physical memory mappings.
  789. * Also includes KSeg0 if KMF_KERN is set.
  790. */
  791. enum kvm_mips_flush {
  792. KMF_USER = 0x0,
  793. KMF_KERN = 0x1,
  794. KMF_GPA = 0x2,
  795. };
  796. void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags);
  797. bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
  798. int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
  799. pgd_t *kvm_pgd_alloc(void);
  800. void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
  801. void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
  802. bool user);
  803. void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu);
  804. void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu);
  805. enum kvm_mips_fault_result {
  806. KVM_MIPS_MAPPED = 0,
  807. KVM_MIPS_GVA,
  808. KVM_MIPS_GPA,
  809. KVM_MIPS_TLB,
  810. KVM_MIPS_TLBINV,
  811. KVM_MIPS_TLBMOD,
  812. };
  813. enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
  814. unsigned long gva,
  815. bool write);
  816. #define KVM_ARCH_WANT_MMU_NOTIFIER
  817. int kvm_unmap_hva_range(struct kvm *kvm,
  818. unsigned long start, unsigned long end);
  819. void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
  820. int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
  821. int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
  822. /* Emulation */
  823. int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
  824. enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
  825. int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
  826. int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
  827. /**
  828. * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
  829. * @vcpu: Virtual CPU.
  830. *
  831. * Returns: Whether the TLBL exception was likely due to an instruction
  832. * fetch fault rather than a data load fault.
  833. */
  834. static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *vcpu)
  835. {
  836. unsigned long badvaddr = vcpu->host_cp0_badvaddr;
  837. unsigned long epc = msk_isa16_mode(vcpu->pc);
  838. u32 cause = vcpu->host_cp0_cause;
  839. if (epc == badvaddr)
  840. return true;
  841. /*
  842. * Branches may be 32-bit or 16-bit instructions.
  843. * This isn't exact, but we don't really support MIPS16 or microMIPS yet
  844. * in KVM anyway.
  845. */
  846. if ((cause & CAUSEF_BD) && badvaddr - epc <= 4)
  847. return true;
  848. return false;
  849. }
  850. extern enum emulation_result kvm_mips_emulate_inst(u32 cause,
  851. u32 *opc,
  852. struct kvm_run *run,
  853. struct kvm_vcpu *vcpu);
  854. long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu);
  855. extern enum emulation_result kvm_mips_emulate_syscall(u32 cause,
  856. u32 *opc,
  857. struct kvm_run *run,
  858. struct kvm_vcpu *vcpu);
  859. extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
  860. u32 *opc,
  861. struct kvm_run *run,
  862. struct kvm_vcpu *vcpu);
  863. extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
  864. u32 *opc,
  865. struct kvm_run *run,
  866. struct kvm_vcpu *vcpu);
  867. extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
  868. u32 *opc,
  869. struct kvm_run *run,
  870. struct kvm_vcpu *vcpu);
  871. extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
  872. u32 *opc,
  873. struct kvm_run *run,
  874. struct kvm_vcpu *vcpu);
  875. extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
  876. u32 *opc,
  877. struct kvm_run *run,
  878. struct kvm_vcpu *vcpu);
  879. extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
  880. u32 *opc,
  881. struct kvm_run *run,
  882. struct kvm_vcpu *vcpu);
  883. extern enum emulation_result kvm_mips_handle_ri(u32 cause,
  884. u32 *opc,
  885. struct kvm_run *run,
  886. struct kvm_vcpu *vcpu);
  887. extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
  888. u32 *opc,
  889. struct kvm_run *run,
  890. struct kvm_vcpu *vcpu);
  891. extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
  892. u32 *opc,
  893. struct kvm_run *run,
  894. struct kvm_vcpu *vcpu);
  895. extern enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
  896. u32 *opc,
  897. struct kvm_run *run,
  898. struct kvm_vcpu *vcpu);
  899. extern enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
  900. u32 *opc,
  901. struct kvm_run *run,
  902. struct kvm_vcpu *vcpu);
  903. extern enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
  904. u32 *opc,
  905. struct kvm_run *run,
  906. struct kvm_vcpu *vcpu);
  907. extern enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
  908. u32 *opc,
  909. struct kvm_run *run,
  910. struct kvm_vcpu *vcpu);
  911. extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
  912. struct kvm_run *run);
  913. u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
  914. void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
  915. void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack);
  916. void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz);
  917. int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
  918. int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
  919. int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
  920. void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
  921. void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
  922. enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
  923. /* fairly internal functions requiring some care to use */
  924. int kvm_mips_count_disabled(struct kvm_vcpu *vcpu);
  925. ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count);
  926. int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
  927. u32 count, int min_drift);
  928. #ifdef CONFIG_KVM_MIPS_VZ
  929. void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu);
  930. void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu);
  931. #else
  932. static inline void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu) {}
  933. static inline void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu) {}
  934. #endif
  935. enum emulation_result kvm_mips_check_privilege(u32 cause,
  936. u32 *opc,
  937. struct kvm_run *run,
  938. struct kvm_vcpu *vcpu);
  939. enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
  940. u32 *opc,
  941. u32 cause,
  942. struct kvm_run *run,
  943. struct kvm_vcpu *vcpu);
  944. enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
  945. u32 *opc,
  946. u32 cause,
  947. struct kvm_run *run,
  948. struct kvm_vcpu *vcpu);
  949. enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
  950. u32 cause,
  951. struct kvm_run *run,
  952. struct kvm_vcpu *vcpu);
  953. enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
  954. u32 cause,
  955. struct kvm_run *run,
  956. struct kvm_vcpu *vcpu);
  957. /* COP0 */
  958. enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu);
  959. unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu);
  960. unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu);
  961. unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
  962. unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
  963. /* Hypercalls (hypcall.c) */
  964. enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
  965. union mips_instruction inst);
  966. int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu);
  967. /* Dynamic binary translation */
  968. extern int kvm_mips_trans_cache_index(union mips_instruction inst,
  969. u32 *opc, struct kvm_vcpu *vcpu);
  970. extern int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
  971. struct kvm_vcpu *vcpu);
  972. extern int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
  973. struct kvm_vcpu *vcpu);
  974. extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
  975. struct kvm_vcpu *vcpu);
  976. /* Misc */
  977. extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
  978. extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
  979. static inline void kvm_arch_hardware_unsetup(void) {}
  980. static inline void kvm_arch_sync_events(struct kvm *kvm) {}
  981. static inline void kvm_arch_free_memslot(struct kvm *kvm,
  982. struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
  983. static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
  984. static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
  985. static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
  986. static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
  987. static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
  988. #endif /* __MIPS_KVM_HOST_H__ */