kvm_ppc.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright IBM Corp. 2008
  16. *
  17. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  18. */
  19. #ifndef __POWERPC_KVM_PPC_H__
  20. #define __POWERPC_KVM_PPC_H__
  21. /* This file exists just so we can dereference kvm_vcpu, avoiding nested header
  22. * dependencies. */
  23. #include <linux/mutex.h>
  24. #include <linux/timer.h>
  25. #include <linux/types.h>
  26. #include <linux/kvm_types.h>
  27. #include <linux/kvm_host.h>
  28. #include <linux/bug.h>
  29. #ifdef CONFIG_PPC_BOOK3S
  30. #include <asm/kvm_book3s.h>
  31. #else
  32. #include <asm/kvm_booke.h>
  33. #endif
  34. #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
  35. #include <asm/paca.h>
  36. #endif
  37. enum emulation_result {
  38. EMULATE_DONE, /* no further processing */
  39. EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
  40. EMULATE_DO_DCR, /* kvm_run filled with DCR request */
  41. EMULATE_FAIL, /* can't emulate this instruction */
  42. EMULATE_AGAIN, /* something went wrong. go again */
  43. EMULATE_EXIT_USER, /* emulation requires exit to user-space */
  44. };
  45. extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
  46. extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
  47. extern void kvmppc_handler_highmem(void);
  48. extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
  49. extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
  50. unsigned int rt, unsigned int bytes,
  51. int is_default_endian);
  52. extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
  53. unsigned int rt, unsigned int bytes,
  54. int is_default_endian);
  55. extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
  56. u64 val, unsigned int bytes,
  57. int is_default_endian);
  58. extern int kvmppc_emulate_instruction(struct kvm_run *run,
  59. struct kvm_vcpu *vcpu);
  60. extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
  61. extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
  62. extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
  63. extern void kvmppc_decrementer_func(unsigned long data);
  64. extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
  65. extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
  66. extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
  67. /* Core-specific hooks */
  68. extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
  69. unsigned int gtlb_idx);
  70. extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
  71. extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
  72. extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
  73. extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
  74. extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
  75. extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
  76. extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
  77. gva_t eaddr);
  78. extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
  79. extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
  80. extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
  81. unsigned int id);
  82. extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
  83. extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
  84. extern int kvmppc_core_check_processor_compat(void);
  85. extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
  86. struct kvm_translation *tr);
  87. extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
  88. extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
  89. extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
  90. extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
  91. extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
  92. extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
  93. extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
  94. extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
  95. struct kvm_interrupt *irq);
  96. extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
  97. extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
  98. extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
  99. extern int kvmppc_booke_init(void);
  100. extern void kvmppc_booke_exit(void);
  101. extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
  102. extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
  103. extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
  104. extern long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp);
  105. extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp);
  106. extern void kvmppc_free_hpt(struct kvm *kvm);
  107. extern long kvmppc_prepare_vrma(struct kvm *kvm,
  108. struct kvm_userspace_memory_region *mem);
  109. extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
  110. struct kvm_memory_slot *memslot, unsigned long porder);
  111. extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
  112. extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
  113. struct kvm_create_spapr_tce *args);
  114. extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
  115. unsigned long ioba, unsigned long tce);
  116. extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
  117. unsigned long ioba);
  118. extern struct kvm_rma_info *kvm_alloc_rma(void);
  119. extern void kvm_release_rma(struct kvm_rma_info *ri);
  120. extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
  121. extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
  122. extern int kvmppc_core_init_vm(struct kvm *kvm);
  123. extern void kvmppc_core_destroy_vm(struct kvm *kvm);
  124. extern void kvmppc_core_free_memslot(struct kvm *kvm,
  125. struct kvm_memory_slot *free,
  126. struct kvm_memory_slot *dont);
  127. extern int kvmppc_core_create_memslot(struct kvm *kvm,
  128. struct kvm_memory_slot *slot,
  129. unsigned long npages);
  130. extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
  131. struct kvm_memory_slot *memslot,
  132. struct kvm_userspace_memory_region *mem);
  133. extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
  134. struct kvm_userspace_memory_region *mem,
  135. const struct kvm_memory_slot *old);
  136. extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
  137. struct kvm_ppc_smmu_info *info);
  138. extern void kvmppc_core_flush_memslot(struct kvm *kvm,
  139. struct kvm_memory_slot *memslot);
  140. extern int kvmppc_bookehv_init(void);
  141. extern void kvmppc_bookehv_exit(void);
  142. extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
  143. extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
  144. int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
  145. extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
  146. extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
  147. extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
  148. extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
  149. u32 priority);
  150. extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
  151. u32 *priority);
  152. extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
  153. extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
  154. union kvmppc_one_reg {
  155. u32 wval;
  156. u64 dval;
  157. vector128 vval;
  158. u64 vsxval[2];
  159. struct {
  160. u64 addr;
  161. u64 length;
  162. } vpaval;
  163. };
  164. struct kvmppc_ops {
  165. struct module *owner;
  166. int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
  167. int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
  168. int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
  169. union kvmppc_one_reg *val);
  170. int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
  171. union kvmppc_one_reg *val);
  172. void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
  173. void (*vcpu_put)(struct kvm_vcpu *vcpu);
  174. void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
  175. int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
  176. struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
  177. void (*vcpu_free)(struct kvm_vcpu *vcpu);
  178. int (*check_requests)(struct kvm_vcpu *vcpu);
  179. int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
  180. void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
  181. int (*prepare_memory_region)(struct kvm *kvm,
  182. struct kvm_memory_slot *memslot,
  183. struct kvm_userspace_memory_region *mem);
  184. void (*commit_memory_region)(struct kvm *kvm,
  185. struct kvm_userspace_memory_region *mem,
  186. const struct kvm_memory_slot *old);
  187. int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
  188. int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
  189. unsigned long end);
  190. int (*age_hva)(struct kvm *kvm, unsigned long hva);
  191. int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
  192. void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
  193. void (*mmu_destroy)(struct kvm_vcpu *vcpu);
  194. void (*free_memslot)(struct kvm_memory_slot *free,
  195. struct kvm_memory_slot *dont);
  196. int (*create_memslot)(struct kvm_memory_slot *slot,
  197. unsigned long npages);
  198. int (*init_vm)(struct kvm *kvm);
  199. void (*destroy_vm)(struct kvm *kvm);
  200. int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
  201. int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
  202. unsigned int inst, int *advance);
  203. int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
  204. int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
  205. void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
  206. long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
  207. unsigned long arg);
  208. int (*hcall_implemented)(unsigned long hcall);
  209. };
  210. extern struct kvmppc_ops *kvmppc_hv_ops;
  211. extern struct kvmppc_ops *kvmppc_pr_ops;
  212. static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
  213. {
  214. return kvm->arch.kvm_ops == kvmppc_hv_ops;
  215. }
  216. /*
  217. * Cuts out inst bits with ordering according to spec.
  218. * That means the leftmost bit is zero. All given bits are included.
  219. */
  220. static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
  221. {
  222. u32 r;
  223. u32 mask;
  224. BUG_ON(msb > lsb);
  225. mask = (1 << (lsb - msb + 1)) - 1;
  226. r = (inst >> (63 - lsb)) & mask;
  227. return r;
  228. }
  229. /*
  230. * Replaces inst bits with ordering according to spec.
  231. */
  232. static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
  233. {
  234. u32 r;
  235. u32 mask;
  236. BUG_ON(msb > lsb);
  237. mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
  238. r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
  239. return r;
  240. }
  241. #define one_reg_size(id) \
  242. (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
  243. #define get_reg_val(id, reg) ({ \
  244. union kvmppc_one_reg __u; \
  245. switch (one_reg_size(id)) { \
  246. case 4: __u.wval = (reg); break; \
  247. case 8: __u.dval = (reg); break; \
  248. default: BUG(); \
  249. } \
  250. __u; \
  251. })
  252. #define set_reg_val(id, val) ({ \
  253. u64 __v; \
  254. switch (one_reg_size(id)) { \
  255. case 4: __v = (val).wval; break; \
  256. case 8: __v = (val).dval; break; \
  257. default: BUG(); \
  258. } \
  259. __v; \
  260. })
  261. int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
  262. int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
  263. int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
  264. int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
  265. int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
  266. int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
  267. int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
  268. int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
  269. void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
  270. struct openpic;
  271. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  272. extern void kvm_cma_reserve(void) __init;
  273. static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
  274. {
  275. paca[cpu].kvm_hstate.xics_phys = addr;
  276. }
  277. static inline u32 kvmppc_get_xics_latch(void)
  278. {
  279. u32 xirr;
  280. xirr = get_paca()->kvm_hstate.saved_xirr;
  281. get_paca()->kvm_hstate.saved_xirr = 0;
  282. return xirr;
  283. }
  284. static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
  285. {
  286. paca[cpu].kvm_hstate.host_ipi = host_ipi;
  287. }
  288. static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
  289. {
  290. vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
  291. }
  292. extern void kvm_hv_vm_activated(void);
  293. extern void kvm_hv_vm_deactivated(void);
  294. extern bool kvm_hv_mode_active(void);
  295. #else
  296. static inline void __init kvm_cma_reserve(void)
  297. {}
  298. static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
  299. {}
  300. static inline u32 kvmppc_get_xics_latch(void)
  301. {
  302. return 0;
  303. }
  304. static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
  305. {}
  306. static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
  307. {
  308. kvm_vcpu_kick(vcpu);
  309. }
  310. static inline bool kvm_hv_mode_active(void) { return false; }
  311. #endif
  312. #ifdef CONFIG_KVM_XICS
  313. static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
  314. {
  315. return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
  316. }
  317. extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
  318. extern int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server);
  319. extern int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args);
  320. extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
  321. extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
  322. extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
  323. extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
  324. struct kvm_vcpu *vcpu, u32 cpu);
  325. #else
  326. static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
  327. { return 0; }
  328. static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
  329. static inline int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu,
  330. unsigned long server)
  331. { return -EINVAL; }
  332. static inline int kvm_vm_ioctl_xics_irq(struct kvm *kvm,
  333. struct kvm_irq_level *args)
  334. { return -ENOTTY; }
  335. static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
  336. { return 0; }
  337. #endif
  338. static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
  339. {
  340. #ifdef CONFIG_KVM_BOOKE_HV
  341. mtspr(SPRN_GEPR, epr);
  342. #elif defined(CONFIG_BOOKE)
  343. vcpu->arch.epr = epr;
  344. #endif
  345. }
  346. #ifdef CONFIG_KVM_MPIC
  347. void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
  348. int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
  349. u32 cpu);
  350. void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
  351. #else
  352. static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
  353. {
  354. }
  355. static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
  356. struct kvm_vcpu *vcpu, u32 cpu)
  357. {
  358. return -EINVAL;
  359. }
  360. static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
  361. struct kvm_vcpu *vcpu)
  362. {
  363. }
  364. #endif /* CONFIG_KVM_MPIC */
  365. int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
  366. struct kvm_config_tlb *cfg);
  367. int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
  368. struct kvm_dirty_tlb *cfg);
  369. long kvmppc_alloc_lpid(void);
  370. void kvmppc_claim_lpid(long lpid);
  371. void kvmppc_free_lpid(long lpid);
  372. void kvmppc_init_lpid(unsigned long nr_lpids);
  373. static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
  374. {
  375. struct page *page;
  376. /*
  377. * We can only access pages that the kernel maps
  378. * as memory. Bail out for unmapped ones.
  379. */
  380. if (!pfn_valid(pfn))
  381. return;
  382. /* Clear i-cache for new pages */
  383. page = pfn_to_page(pfn);
  384. if (!test_bit(PG_arch_1, &page->flags)) {
  385. flush_dcache_icache_page(page);
  386. set_bit(PG_arch_1, &page->flags);
  387. }
  388. }
  389. /*
  390. * Shared struct helpers. The shared struct can be little or big endian,
  391. * depending on the guest endianness. So expose helpers to all of them.
  392. */
  393. static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
  394. {
  395. #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
  396. /* Only Book3S_64 PR supports bi-endian for now */
  397. return vcpu->arch.shared_big_endian;
  398. #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
  399. /* Book3s_64 HV on little endian is always little endian */
  400. return false;
  401. #else
  402. return true;
  403. #endif
  404. }
  405. #define SHARED_WRAPPER_GET(reg, size) \
  406. static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
  407. { \
  408. if (kvmppc_shared_big_endian(vcpu)) \
  409. return be##size##_to_cpu(vcpu->arch.shared->reg); \
  410. else \
  411. return le##size##_to_cpu(vcpu->arch.shared->reg); \
  412. } \
  413. #define SHARED_WRAPPER_SET(reg, size) \
  414. static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
  415. { \
  416. if (kvmppc_shared_big_endian(vcpu)) \
  417. vcpu->arch.shared->reg = cpu_to_be##size(val); \
  418. else \
  419. vcpu->arch.shared->reg = cpu_to_le##size(val); \
  420. } \
  421. #define SHARED_WRAPPER(reg, size) \
  422. SHARED_WRAPPER_GET(reg, size) \
  423. SHARED_WRAPPER_SET(reg, size) \
  424. SHARED_WRAPPER(critical, 64)
  425. SHARED_WRAPPER(sprg0, 64)
  426. SHARED_WRAPPER(sprg1, 64)
  427. SHARED_WRAPPER(sprg2, 64)
  428. SHARED_WRAPPER(sprg3, 64)
  429. SHARED_WRAPPER(srr0, 64)
  430. SHARED_WRAPPER(srr1, 64)
  431. SHARED_WRAPPER(dar, 64)
  432. SHARED_WRAPPER_GET(msr, 64)
  433. static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
  434. {
  435. if (kvmppc_shared_big_endian(vcpu))
  436. vcpu->arch.shared->msr = cpu_to_be64(val);
  437. else
  438. vcpu->arch.shared->msr = cpu_to_le64(val);
  439. }
  440. SHARED_WRAPPER(dsisr, 32)
  441. SHARED_WRAPPER(int_pending, 32)
  442. SHARED_WRAPPER(sprg4, 64)
  443. SHARED_WRAPPER(sprg5, 64)
  444. SHARED_WRAPPER(sprg6, 64)
  445. SHARED_WRAPPER(sprg7, 64)
  446. static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
  447. {
  448. if (kvmppc_shared_big_endian(vcpu))
  449. return be32_to_cpu(vcpu->arch.shared->sr[nr]);
  450. else
  451. return le32_to_cpu(vcpu->arch.shared->sr[nr]);
  452. }
  453. static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
  454. {
  455. if (kvmppc_shared_big_endian(vcpu))
  456. vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
  457. else
  458. vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
  459. }
  460. /*
  461. * Please call after prepare_to_enter. This function puts the lazy ee and irq
  462. * disabled tracking state back to normal mode, without actually enabling
  463. * interrupts.
  464. */
  465. static inline void kvmppc_fix_ee_before_entry(void)
  466. {
  467. trace_hardirqs_on();
  468. #ifdef CONFIG_PPC64
  469. /*
  470. * To avoid races, the caller must have gone directly from having
  471. * interrupts fully-enabled to hard-disabled.
  472. */
  473. WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
  474. /* Only need to enable IRQs by hard enabling them after this */
  475. local_paca->irq_happened = 0;
  476. local_paca->soft_enabled = 1;
  477. #endif
  478. }
  479. static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
  480. {
  481. ulong ea;
  482. ulong msr_64bit = 0;
  483. ea = kvmppc_get_gpr(vcpu, rb);
  484. if (ra)
  485. ea += kvmppc_get_gpr(vcpu, ra);
  486. #if defined(CONFIG_PPC_BOOK3E_64)
  487. msr_64bit = MSR_CM;
  488. #elif defined(CONFIG_PPC_BOOK3S_64)
  489. msr_64bit = MSR_SF;
  490. #endif
  491. if (!(kvmppc_get_msr(vcpu) & msr_64bit))
  492. ea = (uint32_t)ea;
  493. return ea;
  494. }
  495. extern void xics_wake_cpu(int cpu);
  496. #endif /* __POWERPC_KVM_PPC_H__ */