kvm_ppc.h 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright IBM Corp. 2008
  16. *
  17. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  18. */
  19. #ifndef __POWERPC_KVM_PPC_H__
  20. #define __POWERPC_KVM_PPC_H__
  21. /* This file exists just so we can dereference kvm_vcpu, avoiding nested header
  22. * dependencies. */
  23. #include <linux/mutex.h>
  24. #include <linux/timer.h>
  25. #include <linux/types.h>
  26. #include <linux/kvm_types.h>
  27. #include <linux/kvm_host.h>
  28. #include <linux/bug.h>
  29. #ifdef CONFIG_PPC_BOOK3S
  30. #include <asm/kvm_book3s.h>
  31. #else
  32. #include <asm/kvm_booke.h>
  33. #endif
  34. #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
  35. #include <asm/paca.h>
  36. #endif
  37. /*
  38. * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
  39. * for supporting software breakpoint.
  40. */
  41. #define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00
  42. enum emulation_result {
  43. EMULATE_DONE, /* no further processing */
  44. EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
  45. EMULATE_FAIL, /* can't emulate this instruction */
  46. EMULATE_AGAIN, /* something went wrong. go again */
  47. EMULATE_EXIT_USER, /* emulation requires exit to user-space */
  48. };
  49. enum instruction_type {
  50. INST_GENERIC,
  51. INST_SC, /* system call */
  52. };
  53. enum xlate_instdata {
  54. XLATE_INST, /* translate instruction address */
  55. XLATE_DATA /* translate data address */
  56. };
  57. enum xlate_readwrite {
  58. XLATE_READ, /* check for read permissions */
  59. XLATE_WRITE /* check for write permissions */
  60. };
  61. extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
  62. extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
  63. extern void kvmppc_handler_highmem(void);
  64. extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
  65. extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
  66. unsigned int rt, unsigned int bytes,
  67. int is_default_endian);
  68. extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
  69. unsigned int rt, unsigned int bytes,
  70. int is_default_endian);
  71. extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
  72. unsigned int rt, unsigned int bytes,
  73. int is_default_endian, int mmio_sign_extend);
  74. extern int kvmppc_handle_load128_by2x64(struct kvm_run *run,
  75. struct kvm_vcpu *vcpu, unsigned int rt, int is_default_endian);
  76. extern int kvmppc_handle_store128_by2x64(struct kvm_run *run,
  77. struct kvm_vcpu *vcpu, unsigned int rs, int is_default_endian);
  78. extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
  79. u64 val, unsigned int bytes,
  80. int is_default_endian);
  81. extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
  82. int rs, unsigned int bytes,
  83. int is_default_endian);
  84. extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
  85. enum instruction_type type, u32 *inst);
  86. extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
  87. bool data);
  88. extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
  89. bool data);
  90. extern int kvmppc_emulate_instruction(struct kvm_run *run,
  91. struct kvm_vcpu *vcpu);
  92. extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
  93. extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
  94. extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
  95. extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
  96. extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
  97. extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
  98. extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
  99. extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
  100. /* Core-specific hooks */
  101. extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
  102. unsigned int gtlb_idx);
  103. extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
  104. extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
  105. extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
  106. extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
  107. extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
  108. extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
  109. extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
  110. gva_t eaddr);
  111. extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
  112. extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
  113. extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
  114. enum xlate_instdata xlid, enum xlate_readwrite xlrw,
  115. struct kvmppc_pte *pte);
  116. extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
  117. unsigned int id);
  118. extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
  119. extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
  120. extern int kvmppc_core_check_processor_compat(void);
  121. extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
  122. struct kvm_translation *tr);
  123. extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
  124. extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
  125. extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
  126. extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
  127. extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
  128. extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
  129. extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
  130. extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
  131. extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
  132. extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
  133. extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
  134. struct kvm_interrupt *irq);
  135. extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
  136. extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
  137. ulong esr_flags);
  138. extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
  139. ulong dear_flags,
  140. ulong esr_flags);
  141. extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
  142. extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
  143. ulong esr_flags);
  144. extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
  145. extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
  146. extern int kvmppc_booke_init(void);
  147. extern void kvmppc_booke_exit(void);
  148. extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
  149. extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
  150. extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
  151. extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
  152. extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
  153. extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
  154. extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
  155. extern void kvmppc_rmap_reset(struct kvm *kvm);
  156. extern long kvmppc_prepare_vrma(struct kvm *kvm,
  157. struct kvm_userspace_memory_region *mem);
  158. extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
  159. struct kvm_memory_slot *memslot, unsigned long porder);
  160. extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
  161. extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
  162. struct iommu_group *grp);
  163. extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
  164. struct iommu_group *grp);
  165. extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
  166. extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
  167. extern void kvmppc_setup_partition_table(struct kvm *kvm);
  168. extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
  169. struct kvm_create_spapr_tce_64 *args);
  170. extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
  171. struct kvm *kvm, unsigned long liobn);
  172. #define kvmppc_ioba_validate(stt, ioba, npages) \
  173. (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
  174. (stt)->size, (ioba), (npages)) ? \
  175. H_PARAMETER : H_SUCCESS)
  176. extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt,
  177. unsigned long tce);
  178. extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
  179. unsigned long *ua, unsigned long **prmap);
  180. extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt,
  181. unsigned long idx, unsigned long tce);
  182. extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
  183. unsigned long ioba, unsigned long tce);
  184. extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
  185. unsigned long liobn, unsigned long ioba,
  186. unsigned long tce_list, unsigned long npages);
  187. extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
  188. unsigned long liobn, unsigned long ioba,
  189. unsigned long tce_value, unsigned long npages);
  190. extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
  191. unsigned long ioba);
  192. extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
  193. extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
  194. extern int kvmppc_core_init_vm(struct kvm *kvm);
  195. extern void kvmppc_core_destroy_vm(struct kvm *kvm);
  196. extern void kvmppc_core_free_memslot(struct kvm *kvm,
  197. struct kvm_memory_slot *free,
  198. struct kvm_memory_slot *dont);
  199. extern int kvmppc_core_create_memslot(struct kvm *kvm,
  200. struct kvm_memory_slot *slot,
  201. unsigned long npages);
  202. extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
  203. struct kvm_memory_slot *memslot,
  204. const struct kvm_userspace_memory_region *mem);
  205. extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
  206. const struct kvm_userspace_memory_region *mem,
  207. const struct kvm_memory_slot *old,
  208. const struct kvm_memory_slot *new);
  209. extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
  210. struct kvm_ppc_smmu_info *info);
  211. extern void kvmppc_core_flush_memslot(struct kvm *kvm,
  212. struct kvm_memory_slot *memslot);
  213. extern int kvmppc_bookehv_init(void);
  214. extern void kvmppc_bookehv_exit(void);
  215. extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
  216. extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
  217. extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
  218. struct kvm_ppc_resize_hpt *rhpt);
  219. extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
  220. struct kvm_ppc_resize_hpt *rhpt);
  221. int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
  222. extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
  223. extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
  224. extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
  225. extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
  226. u32 priority);
  227. extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
  228. u32 *priority);
  229. extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
  230. extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
  231. void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
  232. void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
  233. union kvmppc_one_reg {
  234. u32 wval;
  235. u64 dval;
  236. vector128 vval;
  237. u64 vsxval[2];
  238. u32 vsx32val[4];
  239. struct {
  240. u64 addr;
  241. u64 length;
  242. } vpaval;
  243. };
  244. struct kvmppc_ops {
  245. struct module *owner;
  246. int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
  247. int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
  248. int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
  249. union kvmppc_one_reg *val);
  250. int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
  251. union kvmppc_one_reg *val);
  252. void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
  253. void (*vcpu_put)(struct kvm_vcpu *vcpu);
  254. void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
  255. int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
  256. struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
  257. void (*vcpu_free)(struct kvm_vcpu *vcpu);
  258. int (*check_requests)(struct kvm_vcpu *vcpu);
  259. int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
  260. void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
  261. int (*prepare_memory_region)(struct kvm *kvm,
  262. struct kvm_memory_slot *memslot,
  263. const struct kvm_userspace_memory_region *mem);
  264. void (*commit_memory_region)(struct kvm *kvm,
  265. const struct kvm_userspace_memory_region *mem,
  266. const struct kvm_memory_slot *old,
  267. const struct kvm_memory_slot *new);
  268. int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
  269. unsigned long end);
  270. int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
  271. int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
  272. void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
  273. void (*mmu_destroy)(struct kvm_vcpu *vcpu);
  274. void (*free_memslot)(struct kvm_memory_slot *free,
  275. struct kvm_memory_slot *dont);
  276. int (*create_memslot)(struct kvm_memory_slot *slot,
  277. unsigned long npages);
  278. int (*init_vm)(struct kvm *kvm);
  279. void (*destroy_vm)(struct kvm *kvm);
  280. int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
  281. int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
  282. unsigned int inst, int *advance);
  283. int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
  284. int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
  285. void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
  286. long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
  287. unsigned long arg);
  288. int (*hcall_implemented)(unsigned long hcall);
  289. int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
  290. struct irq_bypass_producer *);
  291. void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
  292. struct irq_bypass_producer *);
  293. int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
  294. int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
  295. int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
  296. unsigned long flags);
  297. };
  298. extern struct kvmppc_ops *kvmppc_hv_ops;
  299. extern struct kvmppc_ops *kvmppc_pr_ops;
  300. static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
  301. enum instruction_type type, u32 *inst)
  302. {
  303. int ret = EMULATE_DONE;
  304. u32 fetched_inst;
  305. /* Load the instruction manually if it failed to do so in the
  306. * exit path */
  307. if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
  308. ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
  309. /* Write fetch_failed unswapped if the fetch failed */
  310. if (ret == EMULATE_DONE)
  311. fetched_inst = kvmppc_need_byteswap(vcpu) ?
  312. swab32(vcpu->arch.last_inst) :
  313. vcpu->arch.last_inst;
  314. else
  315. fetched_inst = vcpu->arch.last_inst;
  316. *inst = fetched_inst;
  317. return ret;
  318. }
  319. static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
  320. {
  321. return kvm->arch.kvm_ops == kvmppc_hv_ops;
  322. }
  323. extern int kvmppc_hwrng_present(void);
  324. /*
  325. * Cuts out inst bits with ordering according to spec.
  326. * That means the leftmost bit is zero. All given bits are included.
  327. */
  328. static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
  329. {
  330. u32 r;
  331. u32 mask;
  332. BUG_ON(msb > lsb);
  333. mask = (1 << (lsb - msb + 1)) - 1;
  334. r = (inst >> (63 - lsb)) & mask;
  335. return r;
  336. }
  337. /*
  338. * Replaces inst bits with ordering according to spec.
  339. */
  340. static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
  341. {
  342. u32 r;
  343. u32 mask;
  344. BUG_ON(msb > lsb);
  345. mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
  346. r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
  347. return r;
  348. }
  349. #define one_reg_size(id) \
  350. (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
  351. #define get_reg_val(id, reg) ({ \
  352. union kvmppc_one_reg __u; \
  353. switch (one_reg_size(id)) { \
  354. case 4: __u.wval = (reg); break; \
  355. case 8: __u.dval = (reg); break; \
  356. default: BUG(); \
  357. } \
  358. __u; \
  359. })
  360. #define set_reg_val(id, val) ({ \
  361. u64 __v; \
  362. switch (one_reg_size(id)) { \
  363. case 4: __v = (val).wval; break; \
  364. case 8: __v = (val).dval; break; \
  365. default: BUG(); \
  366. } \
  367. __v; \
  368. })
  369. int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
  370. int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
  371. int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
  372. int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
  373. int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
  374. int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
  375. int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
  376. int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
  377. void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
  378. struct openpic;
  379. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  380. extern void kvm_cma_reserve(void) __init;
  381. static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
  382. {
  383. paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr;
  384. }
  385. static inline void kvmppc_set_xive_tima(int cpu,
  386. unsigned long phys_addr,
  387. void __iomem *virt_addr)
  388. {
  389. paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
  390. paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr;
  391. }
  392. static inline u32 kvmppc_get_xics_latch(void)
  393. {
  394. u32 xirr;
  395. xirr = get_paca()->kvm_hstate.saved_xirr;
  396. get_paca()->kvm_hstate.saved_xirr = 0;
  397. return xirr;
  398. }
  399. static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
  400. {
  401. paca_ptrs[cpu]->kvm_hstate.host_ipi = host_ipi;
  402. }
  403. static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
  404. {
  405. vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
  406. }
  407. extern void kvm_hv_vm_activated(void);
  408. extern void kvm_hv_vm_deactivated(void);
  409. extern bool kvm_hv_mode_active(void);
  410. #else
  411. static inline void __init kvm_cma_reserve(void)
  412. {}
  413. static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
  414. {}
  415. static inline void kvmppc_set_xive_tima(int cpu,
  416. unsigned long phys_addr,
  417. void __iomem *virt_addr)
  418. {}
  419. static inline u32 kvmppc_get_xics_latch(void)
  420. {
  421. return 0;
  422. }
  423. static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
  424. {}
  425. static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
  426. {
  427. kvm_vcpu_kick(vcpu);
  428. }
  429. static inline bool kvm_hv_mode_active(void) { return false; }
  430. #endif
  431. #ifdef CONFIG_KVM_XICS
  432. static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
  433. {
  434. return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
  435. }
  436. static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
  437. struct kvm *kvm)
  438. {
  439. if (kvm && kvm_irq_bypass)
  440. return kvm->arch.pimap;
  441. return NULL;
  442. }
  443. extern void kvmppc_alloc_host_rm_ops(void);
  444. extern void kvmppc_free_host_rm_ops(void);
  445. extern void kvmppc_free_pimap(struct kvm *kvm);
  446. extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
  447. extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
  448. extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
  449. extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
  450. extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
  451. extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
  452. struct kvm_vcpu *vcpu, u32 cpu);
  453. extern void kvmppc_xics_ipi_action(void);
  454. extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
  455. unsigned long host_irq);
  456. extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
  457. unsigned long host_irq);
  458. extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
  459. struct kvmppc_irq_map *irq_map,
  460. struct kvmppc_passthru_irqmap *pimap,
  461. bool *again);
  462. extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
  463. int level, bool line_status);
  464. extern int h_ipi_redirect;
  465. #else
  466. static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
  467. struct kvm *kvm)
  468. { return NULL; }
  469. static inline void kvmppc_alloc_host_rm_ops(void) {};
  470. static inline void kvmppc_free_host_rm_ops(void) {};
  471. static inline void kvmppc_free_pimap(struct kvm *kvm) {};
  472. static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
  473. { return 0; }
  474. static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
  475. { return 0; }
  476. static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
  477. static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
  478. { return 0; }
  479. #endif
  480. #ifdef CONFIG_KVM_XIVE
  481. /*
  482. * Below the first "xive" is the "eXternal Interrupt Virtualization Engine"
  483. * ie. P9 new interrupt controller, while the second "xive" is the legacy
  484. * "eXternal Interrupt Vector Entry" which is the configuration of an
  485. * interrupt on the "xics" interrupt controller on P8 and earlier. Those
  486. * two function consume or produce a legacy "XIVE" state from the
  487. * new "XIVE" interrupt controller.
  488. */
  489. extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
  490. u32 priority);
  491. extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
  492. u32 *priority);
  493. extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
  494. extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
  495. extern void kvmppc_xive_init_module(void);
  496. extern void kvmppc_xive_exit_module(void);
  497. extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
  498. struct kvm_vcpu *vcpu, u32 cpu);
  499. extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
  500. extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
  501. struct irq_desc *host_desc);
  502. extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
  503. struct irq_desc *host_desc);
  504. extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
  505. extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
  506. extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
  507. int level, bool line_status);
  508. #else
  509. static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
  510. u32 priority) { return -1; }
  511. static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
  512. u32 *priority) { return -1; }
  513. static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
  514. static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
  515. static inline void kvmppc_xive_init_module(void) { }
  516. static inline void kvmppc_xive_exit_module(void) { }
  517. static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
  518. struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
  519. static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
  520. static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
  521. struct irq_desc *host_desc) { return -ENODEV; }
  522. static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
  523. struct irq_desc *host_desc) { return -ENODEV; }
  524. static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
  525. static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
  526. static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
  527. int level, bool line_status) { return -ENODEV; }
  528. #endif /* CONFIG_KVM_XIVE */
  529. /*
  530. * Prototypes for functions called only from assembler code.
  531. * Having prototypes reduces sparse errors.
  532. */
  533. long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
  534. unsigned long ioba, unsigned long tce);
  535. long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
  536. unsigned long liobn, unsigned long ioba,
  537. unsigned long tce_list, unsigned long npages);
  538. long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
  539. unsigned long liobn, unsigned long ioba,
  540. unsigned long tce_value, unsigned long npages);
  541. long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
  542. unsigned int yield_count);
  543. long kvmppc_h_random(struct kvm_vcpu *vcpu);
  544. void kvmhv_commence_exit(int trap);
  545. long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
  546. void kvmppc_subcore_enter_guest(void);
  547. void kvmppc_subcore_exit_guest(void);
  548. long kvmppc_realmode_hmi_handler(void);
  549. long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
  550. long pte_index, unsigned long pteh, unsigned long ptel);
  551. long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
  552. unsigned long pte_index, unsigned long avpn);
  553. long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
  554. long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
  555. unsigned long pte_index, unsigned long avpn,
  556. unsigned long va);
  557. long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
  558. unsigned long pte_index);
  559. long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
  560. unsigned long pte_index);
  561. long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
  562. unsigned long pte_index);
  563. long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
  564. unsigned long slb_v, unsigned int status, bool data);
  565. unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
  566. unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu);
  567. unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
  568. int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
  569. unsigned long mfrr);
  570. int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
  571. int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
  572. /*
  573. * Host-side operations we want to set up while running in real
  574. * mode in the guest operating on the xics.
  575. * Currently only VCPU wakeup is supported.
  576. */
  577. union kvmppc_rm_state {
  578. unsigned long raw;
  579. struct {
  580. u32 in_host;
  581. u32 rm_action;
  582. };
  583. };
  584. struct kvmppc_host_rm_core {
  585. union kvmppc_rm_state rm_state;
  586. void *rm_data;
  587. char pad[112];
  588. };
  589. struct kvmppc_host_rm_ops {
  590. struct kvmppc_host_rm_core *rm_core;
  591. void (*vcpu_kick)(struct kvm_vcpu *vcpu);
  592. };
  593. extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
  594. static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
  595. {
  596. #ifdef CONFIG_KVM_BOOKE_HV
  597. return mfspr(SPRN_GEPR);
  598. #elif defined(CONFIG_BOOKE)
  599. return vcpu->arch.epr;
  600. #else
  601. return 0;
  602. #endif
  603. }
  604. static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
  605. {
  606. #ifdef CONFIG_KVM_BOOKE_HV
  607. mtspr(SPRN_GEPR, epr);
  608. #elif defined(CONFIG_BOOKE)
  609. vcpu->arch.epr = epr;
  610. #endif
  611. }
  612. #ifdef CONFIG_KVM_MPIC
  613. void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
  614. int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
  615. u32 cpu);
  616. void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
  617. #else
  618. static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
  619. {
  620. }
  621. static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
  622. struct kvm_vcpu *vcpu, u32 cpu)
  623. {
  624. return -EINVAL;
  625. }
  626. static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
  627. struct kvm_vcpu *vcpu)
  628. {
  629. }
  630. #endif /* CONFIG_KVM_MPIC */
  631. int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
  632. struct kvm_config_tlb *cfg);
  633. int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
  634. struct kvm_dirty_tlb *cfg);
  635. long kvmppc_alloc_lpid(void);
  636. void kvmppc_claim_lpid(long lpid);
  637. void kvmppc_free_lpid(long lpid);
  638. void kvmppc_init_lpid(unsigned long nr_lpids);
  639. static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
  640. {
  641. struct page *page;
  642. /*
  643. * We can only access pages that the kernel maps
  644. * as memory. Bail out for unmapped ones.
  645. */
  646. if (!pfn_valid(pfn))
  647. return;
  648. /* Clear i-cache for new pages */
  649. page = pfn_to_page(pfn);
  650. if (!test_bit(PG_arch_1, &page->flags)) {
  651. flush_dcache_icache_page(page);
  652. set_bit(PG_arch_1, &page->flags);
  653. }
  654. }
  655. /*
  656. * Shared struct helpers. The shared struct can be little or big endian,
  657. * depending on the guest endianness. So expose helpers to all of them.
  658. */
  659. static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
  660. {
  661. #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
  662. /* Only Book3S_64 PR supports bi-endian for now */
  663. return vcpu->arch.shared_big_endian;
  664. #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
  665. /* Book3s_64 HV on little endian is always little endian */
  666. return false;
  667. #else
  668. return true;
  669. #endif
  670. }
  671. #define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
  672. static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
  673. { \
  674. return mfspr(bookehv_spr); \
  675. } \
  676. #define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
  677. static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
  678. { \
  679. mtspr(bookehv_spr, val); \
  680. } \
  681. #define SHARED_WRAPPER_GET(reg, size) \
  682. static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
  683. { \
  684. if (kvmppc_shared_big_endian(vcpu)) \
  685. return be##size##_to_cpu(vcpu->arch.shared->reg); \
  686. else \
  687. return le##size##_to_cpu(vcpu->arch.shared->reg); \
  688. } \
  689. #define SHARED_WRAPPER_SET(reg, size) \
  690. static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
  691. { \
  692. if (kvmppc_shared_big_endian(vcpu)) \
  693. vcpu->arch.shared->reg = cpu_to_be##size(val); \
  694. else \
  695. vcpu->arch.shared->reg = cpu_to_le##size(val); \
  696. } \
  697. #define SHARED_WRAPPER(reg, size) \
  698. SHARED_WRAPPER_GET(reg, size) \
  699. SHARED_WRAPPER_SET(reg, size) \
  700. #define SPRNG_WRAPPER(reg, bookehv_spr) \
  701. SPRNG_WRAPPER_GET(reg, bookehv_spr) \
  702. SPRNG_WRAPPER_SET(reg, bookehv_spr) \
  703. #ifdef CONFIG_KVM_BOOKE_HV
  704. #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
  705. SPRNG_WRAPPER(reg, bookehv_spr) \
  706. #else
  707. #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
  708. SHARED_WRAPPER(reg, size) \
  709. #endif
  710. SHARED_WRAPPER(critical, 64)
  711. SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
  712. SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
  713. SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
  714. SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
  715. SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
  716. SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
  717. SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
  718. SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
  719. SHARED_WRAPPER_GET(msr, 64)
  720. static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
  721. {
  722. if (kvmppc_shared_big_endian(vcpu))
  723. vcpu->arch.shared->msr = cpu_to_be64(val);
  724. else
  725. vcpu->arch.shared->msr = cpu_to_le64(val);
  726. }
  727. SHARED_WRAPPER(dsisr, 32)
  728. SHARED_WRAPPER(int_pending, 32)
  729. SHARED_WRAPPER(sprg4, 64)
  730. SHARED_WRAPPER(sprg5, 64)
  731. SHARED_WRAPPER(sprg6, 64)
  732. SHARED_WRAPPER(sprg7, 64)
  733. static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
  734. {
  735. if (kvmppc_shared_big_endian(vcpu))
  736. return be32_to_cpu(vcpu->arch.shared->sr[nr]);
  737. else
  738. return le32_to_cpu(vcpu->arch.shared->sr[nr]);
  739. }
  740. static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
  741. {
  742. if (kvmppc_shared_big_endian(vcpu))
  743. vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
  744. else
  745. vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
  746. }
  747. /*
  748. * Please call after prepare_to_enter. This function puts the lazy ee and irq
  749. * disabled tracking state back to normal mode, without actually enabling
  750. * interrupts.
  751. */
  752. static inline void kvmppc_fix_ee_before_entry(void)
  753. {
  754. trace_hardirqs_on();
  755. #ifdef CONFIG_PPC64
  756. /*
  757. * To avoid races, the caller must have gone directly from having
  758. * interrupts fully-enabled to hard-disabled.
  759. */
  760. WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
  761. /* Only need to enable IRQs by hard enabling them after this */
  762. local_paca->irq_happened = 0;
  763. irq_soft_mask_set(IRQS_ENABLED);
  764. #endif
  765. }
  766. static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
  767. {
  768. ulong ea;
  769. ulong msr_64bit = 0;
  770. ea = kvmppc_get_gpr(vcpu, rb);
  771. if (ra)
  772. ea += kvmppc_get_gpr(vcpu, ra);
  773. #if defined(CONFIG_PPC_BOOK3E_64)
  774. msr_64bit = MSR_CM;
  775. #elif defined(CONFIG_PPC_BOOK3S_64)
  776. msr_64bit = MSR_SF;
  777. #endif
  778. if (!(kvmppc_get_msr(vcpu) & msr_64bit))
  779. ea = (uint32_t)ea;
  780. return ea;
  781. }
  782. extern void xics_wake_cpu(int cpu);
  783. #endif /* __POWERPC_KVM_PPC_H__ */