kvm_host.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright IBM Corp. 2007
  16. *
  17. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  18. */
  19. #ifndef __POWERPC_KVM_HOST_H__
  20. #define __POWERPC_KVM_HOST_H__
  21. #include <linux/mutex.h>
  22. #include <linux/hrtimer.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/types.h>
  25. #include <linux/kvm_types.h>
  26. #include <linux/threads.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/kvm_para.h>
  29. #include <linux/list.h>
  30. #include <linux/atomic.h>
  31. #include <asm/kvm_asm.h>
  32. #include <asm/processor.h>
  33. #include <asm/page.h>
  34. #include <asm/cacheflush.h>
  35. #include <asm/hvcall.h>
  36. #define KVM_MAX_VCPUS NR_CPUS
  37. #define KVM_MAX_VCORES NR_CPUS
  38. #define KVM_USER_MEM_SLOTS 512
  39. #include <asm/cputhreads.h>
  40. #define KVM_MAX_VCPU_ID (threads_per_subcore * KVM_MAX_VCORES)
  41. #ifdef CONFIG_KVM_MMIO
  42. #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
  43. #endif
  44. #define KVM_HALT_POLL_NS_DEFAULT 500000
  45. /* These values are internal and can be increased later */
  46. #define KVM_NR_IRQCHIPS 1
  47. #define KVM_IRQCHIP_NUM_PINS 256
  48. /* PPC-specific vcpu->requests bit members */
  49. #define KVM_REQ_WATCHDOG 8
  50. #define KVM_REQ_EPR_EXIT 9
  51. #include <linux/mmu_notifier.h>
  52. #define KVM_ARCH_WANT_MMU_NOTIFIER
  53. extern int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
  54. extern int kvm_unmap_hva_range(struct kvm *kvm,
  55. unsigned long start, unsigned long end);
  56. extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
  57. extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
  58. extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
  59. static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
  60. unsigned long address)
  61. {
  62. }
  63. #define HPTEG_CACHE_NUM (1 << 15)
  64. #define HPTEG_HASH_BITS_PTE 13
  65. #define HPTEG_HASH_BITS_PTE_LONG 12
  66. #define HPTEG_HASH_BITS_VPTE 13
  67. #define HPTEG_HASH_BITS_VPTE_LONG 5
  68. #define HPTEG_HASH_BITS_VPTE_64K 11
  69. #define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE)
  70. #define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG)
  71. #define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE)
  72. #define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG)
  73. #define HPTEG_HASH_NUM_VPTE_64K (1 << HPTEG_HASH_BITS_VPTE_64K)
  74. /* Physical Address Mask - allowed range of real mode RAM access */
  75. #define KVM_PAM 0x0fffffffffffffffULL
  76. struct lppaca;
  77. struct slb_shadow;
  78. struct dtl_entry;
  79. struct kvmppc_vcpu_book3s;
  80. struct kvmppc_book3s_shadow_vcpu;
  81. struct kvm_vm_stat {
  82. u32 remote_tlb_flush;
  83. };
  84. struct kvm_vcpu_stat {
  85. u32 sum_exits;
  86. u32 mmio_exits;
  87. u32 signal_exits;
  88. u32 light_exits;
  89. /* Account for special types of light exits: */
  90. u32 itlb_real_miss_exits;
  91. u32 itlb_virt_miss_exits;
  92. u32 dtlb_real_miss_exits;
  93. u32 dtlb_virt_miss_exits;
  94. u32 syscall_exits;
  95. u32 isi_exits;
  96. u32 dsi_exits;
  97. u32 emulated_inst_exits;
  98. u32 dec_exits;
  99. u32 ext_intr_exits;
  100. u32 halt_successful_poll;
  101. u32 halt_attempted_poll;
  102. u32 halt_poll_invalid;
  103. u32 halt_wakeup;
  104. u32 dbell_exits;
  105. u32 gdbell_exits;
  106. u32 ld;
  107. u32 st;
  108. #ifdef CONFIG_PPC_BOOK3S
  109. u32 pf_storage;
  110. u32 pf_instruc;
  111. u32 sp_storage;
  112. u32 sp_instruc;
  113. u32 queue_intr;
  114. u32 ld_slow;
  115. u32 st_slow;
  116. #endif
  117. };
  118. enum kvm_exit_types {
  119. MMIO_EXITS,
  120. SIGNAL_EXITS,
  121. ITLB_REAL_MISS_EXITS,
  122. ITLB_VIRT_MISS_EXITS,
  123. DTLB_REAL_MISS_EXITS,
  124. DTLB_VIRT_MISS_EXITS,
  125. SYSCALL_EXITS,
  126. ISI_EXITS,
  127. DSI_EXITS,
  128. EMULATED_INST_EXITS,
  129. EMULATED_MTMSRWE_EXITS,
  130. EMULATED_WRTEE_EXITS,
  131. EMULATED_MTSPR_EXITS,
  132. EMULATED_MFSPR_EXITS,
  133. EMULATED_MTMSR_EXITS,
  134. EMULATED_MFMSR_EXITS,
  135. EMULATED_TLBSX_EXITS,
  136. EMULATED_TLBWE_EXITS,
  137. EMULATED_RFI_EXITS,
  138. EMULATED_RFCI_EXITS,
  139. EMULATED_RFDI_EXITS,
  140. DEC_EXITS,
  141. EXT_INTR_EXITS,
  142. HALT_WAKEUP,
  143. USR_PR_INST,
  144. FP_UNAVAIL,
  145. DEBUG_EXITS,
  146. TIMEINGUEST,
  147. DBELL_EXITS,
  148. GDBELL_EXITS,
  149. __NUMBER_OF_KVM_EXIT_TYPES
  150. };
  151. /* allow access to big endian 32bit upper/lower parts and 64bit var */
  152. struct kvmppc_exit_timing {
  153. union {
  154. u64 tv64;
  155. struct {
  156. u32 tbu, tbl;
  157. } tv32;
  158. };
  159. };
  160. struct kvmppc_pginfo {
  161. unsigned long pfn;
  162. atomic_t refcnt;
  163. };
  164. struct kvmppc_spapr_tce_table {
  165. struct list_head list;
  166. struct kvm *kvm;
  167. u64 liobn;
  168. struct rcu_head rcu;
  169. u32 page_shift;
  170. u64 offset; /* in pages */
  171. u64 size; /* window size in pages */
  172. struct page *pages[0];
  173. };
  174. /* XICS components, defined in book3s_xics.c */
  175. struct kvmppc_xics;
  176. struct kvmppc_icp;
  177. /*
  178. * The reverse mapping array has one entry for each HPTE,
  179. * which stores the guest's view of the second word of the HPTE
  180. * (including the guest physical address of the mapping),
  181. * plus forward and backward pointers in a doubly-linked ring
  182. * of HPTEs that map the same host page. The pointers in this
  183. * ring are 32-bit HPTE indexes, to save space.
  184. */
  185. struct revmap_entry {
  186. unsigned long guest_rpte;
  187. unsigned int forw, back;
  188. };
  189. /*
  190. * We use the top bit of each memslot->arch.rmap entry as a lock bit,
  191. * and bit 32 as a present flag. The bottom 32 bits are the
  192. * index in the guest HPT of a HPTE that points to the page.
  193. */
  194. #define KVMPPC_RMAP_LOCK_BIT 63
  195. #define KVMPPC_RMAP_RC_SHIFT 32
  196. #define KVMPPC_RMAP_CHG_SHIFT 48
  197. #define KVMPPC_RMAP_REFERENCED (HPTE_R_R << KVMPPC_RMAP_RC_SHIFT)
  198. #define KVMPPC_RMAP_CHANGED (HPTE_R_C << KVMPPC_RMAP_RC_SHIFT)
  199. #define KVMPPC_RMAP_CHG_ORDER (0x3ful << KVMPPC_RMAP_CHG_SHIFT)
  200. #define KVMPPC_RMAP_PRESENT 0x100000000ul
  201. #define KVMPPC_RMAP_INDEX 0xfffffffful
  202. struct kvm_arch_memory_slot {
  203. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  204. unsigned long *rmap;
  205. #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
  206. };
  207. struct kvm_arch {
  208. unsigned int lpid;
  209. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  210. unsigned long hpt_virt;
  211. struct revmap_entry *revmap;
  212. unsigned int host_lpid;
  213. unsigned long host_lpcr;
  214. unsigned long sdr1;
  215. unsigned long host_sdr1;
  216. int tlbie_lock;
  217. unsigned long lpcr;
  218. unsigned long vrma_slb_v;
  219. int hpte_setup_done;
  220. u32 hpt_order;
  221. atomic_t vcpus_running;
  222. u32 online_vcores;
  223. unsigned long hpt_npte;
  224. unsigned long hpt_mask;
  225. atomic_t hpte_mod_interest;
  226. cpumask_t need_tlb_flush;
  227. int hpt_cma_alloc;
  228. struct dentry *debugfs_dir;
  229. struct dentry *htab_dentry;
  230. #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
  231. #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
  232. struct mutex hpt_mutex;
  233. #endif
  234. #ifdef CONFIG_PPC_BOOK3S_64
  235. struct list_head spapr_tce_tables;
  236. struct list_head rtas_tokens;
  237. DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
  238. #endif
  239. #ifdef CONFIG_KVM_MPIC
  240. struct openpic *mpic;
  241. #endif
  242. #ifdef CONFIG_KVM_XICS
  243. struct kvmppc_xics *xics;
  244. #endif
  245. struct kvmppc_ops *kvm_ops;
  246. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  247. /* This array can grow quite large, keep it at the end */
  248. struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
  249. #endif
  250. };
  251. /*
  252. * Struct for a virtual core.
  253. * Note: entry_exit_map combines a bitmap of threads that have entered
  254. * in the bottom 8 bits and a bitmap of threads that have exited in the
  255. * next 8 bits. This is so that we can atomically set the entry bit
  256. * iff the exit map is 0 without taking a lock.
  257. */
  258. struct kvmppc_vcore {
  259. int n_runnable;
  260. int num_threads;
  261. int entry_exit_map;
  262. int napping_threads;
  263. int first_vcpuid;
  264. u16 pcpu;
  265. u16 last_cpu;
  266. u8 vcore_state;
  267. u8 in_guest;
  268. struct kvmppc_vcore *master_vcore;
  269. struct list_head runnable_threads;
  270. struct list_head preempt_list;
  271. spinlock_t lock;
  272. struct swait_queue_head wq;
  273. spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
  274. u64 stolen_tb;
  275. u64 preempt_tb;
  276. struct kvm_vcpu *runner;
  277. struct kvm *kvm;
  278. u64 tb_offset; /* guest timebase - host timebase */
  279. ulong lpcr;
  280. u32 arch_compat;
  281. ulong pcr;
  282. ulong dpdes; /* doorbell state (POWER8) */
  283. ulong conferring_threads;
  284. };
  285. #define VCORE_ENTRY_MAP(vc) ((vc)->entry_exit_map & 0xff)
  286. #define VCORE_EXIT_MAP(vc) ((vc)->entry_exit_map >> 8)
  287. #define VCORE_IS_EXITING(vc) (VCORE_EXIT_MAP(vc) != 0)
  288. /* This bit is used when a vcore exit is triggered from outside the vcore */
  289. #define VCORE_EXIT_REQ 0x10000
  290. /*
  291. * Values for vcore_state.
  292. * Note that these are arranged such that lower values
  293. * (< VCORE_SLEEPING) don't require stolen time accounting
  294. * on load/unload, and higher values do.
  295. */
  296. #define VCORE_INACTIVE 0
  297. #define VCORE_PREEMPT 1
  298. #define VCORE_PIGGYBACK 2
  299. #define VCORE_SLEEPING 3
  300. #define VCORE_RUNNING 4
  301. #define VCORE_EXITING 5
  302. /*
  303. * Struct used to manage memory for a virtual processor area
  304. * registered by a PAPR guest. There are three types of area
  305. * that a guest can register.
  306. */
  307. struct kvmppc_vpa {
  308. unsigned long gpa; /* Current guest phys addr */
  309. void *pinned_addr; /* Address in kernel linear mapping */
  310. void *pinned_end; /* End of region */
  311. unsigned long next_gpa; /* Guest phys addr for update */
  312. unsigned long len; /* Number of bytes required */
  313. u8 update_pending; /* 1 => update pinned_addr from next_gpa */
  314. bool dirty; /* true => area has been modified by kernel */
  315. };
  316. struct kvmppc_pte {
  317. ulong eaddr;
  318. u64 vpage;
  319. ulong raddr;
  320. bool may_read : 1;
  321. bool may_write : 1;
  322. bool may_execute : 1;
  323. u8 page_size; /* MMU_PAGE_xxx */
  324. };
  325. struct kvmppc_mmu {
  326. /* book3s_64 only */
  327. void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs);
  328. u64 (*slbmfee)(struct kvm_vcpu *vcpu, u64 slb_nr);
  329. u64 (*slbmfev)(struct kvm_vcpu *vcpu, u64 slb_nr);
  330. void (*slbie)(struct kvm_vcpu *vcpu, u64 slb_nr);
  331. void (*slbia)(struct kvm_vcpu *vcpu);
  332. /* book3s */
  333. void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value);
  334. u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum);
  335. int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr,
  336. struct kvmppc_pte *pte, bool data, bool iswrite);
  337. void (*reset_msr)(struct kvm_vcpu *vcpu);
  338. void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
  339. int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
  340. u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
  341. bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
  342. };
  343. struct kvmppc_slb {
  344. u64 esid;
  345. u64 vsid;
  346. u64 orige;
  347. u64 origv;
  348. bool valid : 1;
  349. bool Ks : 1;
  350. bool Kp : 1;
  351. bool nx : 1;
  352. bool large : 1; /* PTEs are 16MB */
  353. bool tb : 1; /* 1TB segment */
  354. bool class : 1;
  355. u8 base_page_size; /* MMU_PAGE_xxx */
  356. };
  357. /* Struct used to accumulate timing information in HV real mode code */
  358. struct kvmhv_tb_accumulator {
  359. u64 seqcount; /* used to synchronize access, also count * 2 */
  360. u64 tb_total; /* total time in timebase ticks */
  361. u64 tb_min; /* min time */
  362. u64 tb_max; /* max time */
  363. };
  364. # ifdef CONFIG_PPC_FSL_BOOK3E
  365. #define KVMPPC_BOOKE_IAC_NUM 2
  366. #define KVMPPC_BOOKE_DAC_NUM 2
  367. # else
  368. #define KVMPPC_BOOKE_IAC_NUM 4
  369. #define KVMPPC_BOOKE_DAC_NUM 2
  370. # endif
  371. #define KVMPPC_BOOKE_MAX_IAC 4
  372. #define KVMPPC_BOOKE_MAX_DAC 2
  373. /* KVMPPC_EPR_USER takes precedence over KVMPPC_EPR_KERNEL */
  374. #define KVMPPC_EPR_NONE 0 /* EPR not supported */
  375. #define KVMPPC_EPR_USER 1 /* exit to userspace to fill EPR */
  376. #define KVMPPC_EPR_KERNEL 2 /* in-kernel irqchip */
  377. #define KVMPPC_IRQ_DEFAULT 0
  378. #define KVMPPC_IRQ_MPIC 1
  379. #define KVMPPC_IRQ_XICS 2
  380. struct openpic;
  381. struct kvm_vcpu_arch {
  382. ulong host_stack;
  383. u32 host_pid;
  384. #ifdef CONFIG_PPC_BOOK3S
  385. struct kvmppc_slb slb[64];
  386. int slb_max; /* 1 + index of last valid entry in slb[] */
  387. int slb_nr; /* total number of entries in SLB */
  388. struct kvmppc_mmu mmu;
  389. struct kvmppc_vcpu_book3s *book3s;
  390. #endif
  391. #ifdef CONFIG_PPC_BOOK3S_32
  392. struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
  393. #endif
  394. ulong gpr[32];
  395. struct thread_fp_state fp;
  396. #ifdef CONFIG_SPE
  397. ulong evr[32];
  398. ulong spefscr;
  399. ulong host_spefscr;
  400. u64 acc;
  401. #endif
  402. #ifdef CONFIG_ALTIVEC
  403. struct thread_vr_state vr;
  404. #endif
  405. #ifdef CONFIG_KVM_BOOKE_HV
  406. u32 host_mas4;
  407. u32 host_mas6;
  408. u32 shadow_epcr;
  409. u32 shadow_msrp;
  410. u32 eplc;
  411. u32 epsc;
  412. u32 oldpir;
  413. #endif
  414. #if defined(CONFIG_BOOKE)
  415. #if defined(CONFIG_KVM_BOOKE_HV) || defined(CONFIG_64BIT)
  416. u32 epcr;
  417. #endif
  418. #endif
  419. #ifdef CONFIG_PPC_BOOK3S
  420. /* For Gekko paired singles */
  421. u32 qpr[32];
  422. #endif
  423. ulong pc;
  424. ulong ctr;
  425. ulong lr;
  426. #ifdef CONFIG_PPC_BOOK3S
  427. ulong tar;
  428. #endif
  429. ulong xer;
  430. u32 cr;
  431. #ifdef CONFIG_PPC_BOOK3S
  432. ulong hflags;
  433. ulong guest_owned_ext;
  434. ulong purr;
  435. ulong spurr;
  436. ulong ic;
  437. ulong vtb;
  438. ulong dscr;
  439. ulong amr;
  440. ulong uamor;
  441. ulong iamr;
  442. u32 ctrl;
  443. u32 dabrx;
  444. ulong dabr;
  445. ulong dawr;
  446. ulong dawrx;
  447. ulong ciabr;
  448. ulong cfar;
  449. ulong ppr;
  450. u32 pspb;
  451. ulong fscr;
  452. ulong shadow_fscr;
  453. ulong ebbhr;
  454. ulong ebbrr;
  455. ulong bescr;
  456. ulong csigr;
  457. ulong tacr;
  458. ulong tcscr;
  459. ulong acop;
  460. ulong wort;
  461. ulong shadow_srr1;
  462. #endif
  463. u32 vrsave; /* also USPRG0 */
  464. u32 mmucr;
  465. /* shadow_msr is unused for BookE HV */
  466. ulong shadow_msr;
  467. ulong csrr0;
  468. ulong csrr1;
  469. ulong dsrr0;
  470. ulong dsrr1;
  471. ulong mcsrr0;
  472. ulong mcsrr1;
  473. ulong mcsr;
  474. u32 dec;
  475. #ifdef CONFIG_BOOKE
  476. u32 decar;
  477. #endif
  478. /* Time base value when we entered the guest */
  479. u64 entry_tb;
  480. u64 entry_vtb;
  481. u64 entry_ic;
  482. u32 tcr;
  483. ulong tsr; /* we need to perform set/clr_bits() which requires ulong */
  484. u32 ivor[64];
  485. ulong ivpr;
  486. u32 pvr;
  487. u32 shadow_pid;
  488. u32 shadow_pid1;
  489. u32 pid;
  490. u32 swap_pid;
  491. u32 ccr0;
  492. u32 ccr1;
  493. u32 dbsr;
  494. u64 mmcr[5];
  495. u32 pmc[8];
  496. u32 spmc[2];
  497. u64 siar;
  498. u64 sdar;
  499. u64 sier;
  500. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  501. u64 tfhar;
  502. u64 texasr;
  503. u64 tfiar;
  504. u32 cr_tm;
  505. u64 lr_tm;
  506. u64 ctr_tm;
  507. u64 amr_tm;
  508. u64 ppr_tm;
  509. u64 dscr_tm;
  510. u64 tar_tm;
  511. ulong gpr_tm[32];
  512. struct thread_fp_state fp_tm;
  513. struct thread_vr_state vr_tm;
  514. u32 vrsave_tm; /* also USPRG0 */
  515. #endif
  516. #ifdef CONFIG_KVM_EXIT_TIMING
  517. struct mutex exit_timing_lock;
  518. struct kvmppc_exit_timing timing_exit;
  519. struct kvmppc_exit_timing timing_last_enter;
  520. u32 last_exit_type;
  521. u32 timing_count_type[__NUMBER_OF_KVM_EXIT_TYPES];
  522. u64 timing_sum_duration[__NUMBER_OF_KVM_EXIT_TYPES];
  523. u64 timing_sum_quad_duration[__NUMBER_OF_KVM_EXIT_TYPES];
  524. u64 timing_min_duration[__NUMBER_OF_KVM_EXIT_TYPES];
  525. u64 timing_max_duration[__NUMBER_OF_KVM_EXIT_TYPES];
  526. u64 timing_last_exit;
  527. struct dentry *debugfs_exit_timing;
  528. #endif
  529. #ifdef CONFIG_PPC_BOOK3S
  530. ulong fault_dar;
  531. u32 fault_dsisr;
  532. unsigned long intr_msr;
  533. #endif
  534. #ifdef CONFIG_BOOKE
  535. ulong fault_dear;
  536. ulong fault_esr;
  537. ulong queued_dear;
  538. ulong queued_esr;
  539. spinlock_t wdt_lock;
  540. struct timer_list wdt_timer;
  541. u32 tlbcfg[4];
  542. u32 tlbps[4];
  543. u32 mmucfg;
  544. u32 eptcfg;
  545. u32 epr;
  546. u64 sprg9;
  547. u32 pwrmgtcr0;
  548. u32 crit_save;
  549. /* guest debug registers*/
  550. struct debug_reg dbg_reg;
  551. #endif
  552. gpa_t paddr_accessed;
  553. gva_t vaddr_accessed;
  554. pgd_t *pgdir;
  555. u8 io_gpr; /* GPR used as IO source/target */
  556. u8 mmio_host_swabbed;
  557. u8 mmio_sign_extend;
  558. u8 osi_needed;
  559. u8 osi_enabled;
  560. u8 papr_enabled;
  561. u8 watchdog_enabled;
  562. u8 sane;
  563. u8 cpu_type;
  564. u8 hcall_needed;
  565. u8 epr_flags; /* KVMPPC_EPR_xxx */
  566. u8 epr_needed;
  567. u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
  568. struct hrtimer dec_timer;
  569. u64 dec_jiffies;
  570. u64 dec_expires;
  571. unsigned long pending_exceptions;
  572. u8 ceded;
  573. u8 prodded;
  574. u32 last_inst;
  575. struct swait_queue_head *wqp;
  576. struct kvmppc_vcore *vcore;
  577. int ret;
  578. int trap;
  579. int state;
  580. int ptid;
  581. int thread_cpu;
  582. bool timer_running;
  583. wait_queue_head_t cpu_run;
  584. struct kvm_vcpu_arch_shared *shared;
  585. #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
  586. bool shared_big_endian;
  587. #endif
  588. unsigned long magic_page_pa; /* phys addr to map the magic page to */
  589. unsigned long magic_page_ea; /* effect. addr to map the magic page to */
  590. bool disable_kernel_nx;
  591. int irq_type; /* one of KVM_IRQ_* */
  592. int irq_cpu_id;
  593. struct openpic *mpic; /* KVM_IRQ_MPIC */
  594. #ifdef CONFIG_KVM_XICS
  595. struct kvmppc_icp *icp; /* XICS presentation controller */
  596. #endif
  597. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  598. struct kvm_vcpu_arch_shared shregs;
  599. unsigned long pgfault_addr;
  600. long pgfault_index;
  601. unsigned long pgfault_hpte[2];
  602. struct list_head run_list;
  603. struct task_struct *run_task;
  604. struct kvm_run *kvm_run;
  605. spinlock_t vpa_update_lock;
  606. struct kvmppc_vpa vpa;
  607. struct kvmppc_vpa dtl;
  608. struct dtl_entry *dtl_ptr;
  609. unsigned long dtl_index;
  610. u64 stolen_logged;
  611. struct kvmppc_vpa slb_shadow;
  612. spinlock_t tbacct_lock;
  613. u64 busy_stolen;
  614. u64 busy_preempt;
  615. u32 emul_inst;
  616. #endif
  617. #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
  618. struct kvmhv_tb_accumulator *cur_activity; /* What we're timing */
  619. u64 cur_tb_start; /* when it started */
  620. struct kvmhv_tb_accumulator rm_entry; /* real-mode entry code */
  621. struct kvmhv_tb_accumulator rm_intr; /* real-mode intr handling */
  622. struct kvmhv_tb_accumulator rm_exit; /* real-mode exit code */
  623. struct kvmhv_tb_accumulator guest_time; /* guest execution */
  624. struct kvmhv_tb_accumulator cede_time; /* time napping inside guest */
  625. struct dentry *debugfs_dir;
  626. struct dentry *debugfs_timings;
  627. #endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
  628. };
  629. #define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
  630. /* Values for vcpu->arch.state */
  631. #define KVMPPC_VCPU_NOTREADY 0
  632. #define KVMPPC_VCPU_RUNNABLE 1
  633. #define KVMPPC_VCPU_BUSY_IN_HOST 2
  634. /* Values for vcpu->arch.io_gpr */
  635. #define KVM_MMIO_REG_MASK 0x001f
  636. #define KVM_MMIO_REG_EXT_MASK 0xffe0
  637. #define KVM_MMIO_REG_GPR 0x0000
  638. #define KVM_MMIO_REG_FPR 0x0020
  639. #define KVM_MMIO_REG_QPR 0x0040
  640. #define KVM_MMIO_REG_FQPR 0x0060
  641. #define __KVM_HAVE_ARCH_WQP
  642. #define __KVM_HAVE_CREATE_DEVICE
  643. static inline void kvm_arch_hardware_disable(void) {}
  644. static inline void kvm_arch_hardware_unsetup(void) {}
  645. static inline void kvm_arch_sync_events(struct kvm *kvm) {}
  646. static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
  647. static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
  648. static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
  649. static inline void kvm_arch_exit(void) {}
  650. static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
  651. static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
  652. static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
  653. #endif /* __POWERPC_KVM_HOST_H__ */