book3s.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042
  1. /*
  2. * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
  3. *
  4. * Authors:
  5. * Alexander Graf <agraf@suse.de>
  6. * Kevin Wolf <mail@kevin-wolf.de>
  7. *
  8. * Description:
  9. * This file is derived from arch/powerpc/kvm/44x.c,
  10. * by Hollis Blanchard <hollisb@us.ibm.com>.
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License, version 2, as
  14. * published by the Free Software Foundation.
  15. */
  16. #include <linux/kvm_host.h>
  17. #include <linux/err.h>
  18. #include <linux/export.h>
  19. #include <linux/slab.h>
  20. #include <linux/module.h>
  21. #include <linux/miscdevice.h>
  22. #include <linux/gfp.h>
  23. #include <linux/sched.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/highmem.h>
  26. #include <asm/reg.h>
  27. #include <asm/cputable.h>
  28. #include <asm/cacheflush.h>
  29. #include <asm/tlbflush.h>
  30. #include <linux/uaccess.h>
  31. #include <asm/io.h>
  32. #include <asm/kvm_ppc.h>
  33. #include <asm/kvm_book3s.h>
  34. #include <asm/mmu_context.h>
  35. #include <asm/page.h>
  36. #include <asm/xive.h>
  37. #include "book3s.h"
  38. #include "trace.h"
  39. #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
  40. /* #define EXIT_DEBUG */
  41. struct kvm_stats_debugfs_item debugfs_entries[] = {
  42. { "exits", VCPU_STAT(sum_exits) },
  43. { "mmio", VCPU_STAT(mmio_exits) },
  44. { "sig", VCPU_STAT(signal_exits) },
  45. { "sysc", VCPU_STAT(syscall_exits) },
  46. { "inst_emu", VCPU_STAT(emulated_inst_exits) },
  47. { "dec", VCPU_STAT(dec_exits) },
  48. { "ext_intr", VCPU_STAT(ext_intr_exits) },
  49. { "queue_intr", VCPU_STAT(queue_intr) },
  50. { "halt_poll_success_ns", VCPU_STAT(halt_poll_success_ns) },
  51. { "halt_poll_fail_ns", VCPU_STAT(halt_poll_fail_ns) },
  52. { "halt_wait_ns", VCPU_STAT(halt_wait_ns) },
  53. { "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
  54. { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), },
  55. { "halt_successful_wait", VCPU_STAT(halt_successful_wait) },
  56. { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
  57. { "halt_wakeup", VCPU_STAT(halt_wakeup) },
  58. { "pf_storage", VCPU_STAT(pf_storage) },
  59. { "sp_storage", VCPU_STAT(sp_storage) },
  60. { "pf_instruc", VCPU_STAT(pf_instruc) },
  61. { "sp_instruc", VCPU_STAT(sp_instruc) },
  62. { "ld", VCPU_STAT(ld) },
  63. { "ld_slow", VCPU_STAT(ld_slow) },
  64. { "st", VCPU_STAT(st) },
  65. { "st_slow", VCPU_STAT(st_slow) },
  66. { "pthru_all", VCPU_STAT(pthru_all) },
  67. { "pthru_host", VCPU_STAT(pthru_host) },
  68. { "pthru_bad_aff", VCPU_STAT(pthru_bad_aff) },
  69. { NULL }
  70. };
  71. void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
  72. {
  73. if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
  74. ulong pc = kvmppc_get_pc(vcpu);
  75. if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
  76. kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
  77. vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
  78. }
  79. }
  80. EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real);
  81. static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
  82. {
  83. if (!is_kvmppc_hv_enabled(vcpu->kvm))
  84. return to_book3s(vcpu)->hior;
  85. return 0;
  86. }
  87. static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
  88. unsigned long pending_now, unsigned long old_pending)
  89. {
  90. if (is_kvmppc_hv_enabled(vcpu->kvm))
  91. return;
  92. if (pending_now)
  93. kvmppc_set_int_pending(vcpu, 1);
  94. else if (old_pending)
  95. kvmppc_set_int_pending(vcpu, 0);
  96. }
  97. static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
  98. {
  99. ulong crit_raw;
  100. ulong crit_r1;
  101. bool crit;
  102. if (is_kvmppc_hv_enabled(vcpu->kvm))
  103. return false;
  104. crit_raw = kvmppc_get_critical(vcpu);
  105. crit_r1 = kvmppc_get_gpr(vcpu, 1);
  106. /* Truncate crit indicators in 32 bit mode */
  107. if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
  108. crit_raw &= 0xffffffff;
  109. crit_r1 &= 0xffffffff;
  110. }
  111. /* Critical section when crit == r1 */
  112. crit = (crit_raw == crit_r1);
  113. /* ... and we're in supervisor mode */
  114. crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
  115. return crit;
  116. }
  117. void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
  118. {
  119. kvmppc_unfixup_split_real(vcpu);
  120. kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
  121. kvmppc_set_srr1(vcpu, (kvmppc_get_msr(vcpu) & ~0x783f0000ul) | flags);
  122. kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
  123. vcpu->arch.mmu.reset_msr(vcpu);
  124. }
  125. static int kvmppc_book3s_vec2irqprio(unsigned int vec)
  126. {
  127. unsigned int prio;
  128. switch (vec) {
  129. case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
  130. case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
  131. case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break;
  132. case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break;
  133. case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
  134. case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
  135. case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
  136. case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break;
  137. case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
  138. case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
  139. case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
  140. case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break;
  141. case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break;
  142. case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break;
  143. case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break;
  144. case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break;
  145. case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL; break;
  146. default: prio = BOOK3S_IRQPRIO_MAX; break;
  147. }
  148. return prio;
  149. }
  150. void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
  151. unsigned int vec)
  152. {
  153. unsigned long old_pending = vcpu->arch.pending_exceptions;
  154. clear_bit(kvmppc_book3s_vec2irqprio(vec),
  155. &vcpu->arch.pending_exceptions);
  156. kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
  157. old_pending);
  158. }
  159. void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
  160. {
  161. vcpu->stat.queue_intr++;
  162. set_bit(kvmppc_book3s_vec2irqprio(vec),
  163. &vcpu->arch.pending_exceptions);
  164. #ifdef EXIT_DEBUG
  165. printk(KERN_INFO "Queueing interrupt %x\n", vec);
  166. #endif
  167. }
  168. EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
  169. void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
  170. {
  171. /* might as well deliver this straight away */
  172. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
  173. }
  174. EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
  175. void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
  176. {
  177. /* might as well deliver this straight away */
  178. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, 0);
  179. }
  180. void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
  181. {
  182. /* might as well deliver this straight away */
  183. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, 0);
  184. }
  185. void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu)
  186. {
  187. /* might as well deliver this straight away */
  188. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, 0);
  189. }
  190. void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
  191. {
  192. kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
  193. }
  194. EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
  195. int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
  196. {
  197. return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
  198. }
  199. EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
  200. void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
  201. {
  202. kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
  203. }
  204. EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
  205. void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
  206. struct kvm_interrupt *irq)
  207. {
  208. unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL;
  209. if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
  210. vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL;
  211. kvmppc_book3s_queue_irqprio(vcpu, vec);
  212. }
  213. void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
  214. {
  215. kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
  216. kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
  217. }
  218. void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
  219. ulong flags)
  220. {
  221. kvmppc_set_dar(vcpu, dar);
  222. kvmppc_set_dsisr(vcpu, flags);
  223. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
  224. }
  225. EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage);
  226. void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
  227. {
  228. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, flags);
  229. }
  230. EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage);
  231. static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
  232. unsigned int priority)
  233. {
  234. int deliver = 1;
  235. int vec = 0;
  236. bool crit = kvmppc_critical_section(vcpu);
  237. switch (priority) {
  238. case BOOK3S_IRQPRIO_DECREMENTER:
  239. deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
  240. vec = BOOK3S_INTERRUPT_DECREMENTER;
  241. break;
  242. case BOOK3S_IRQPRIO_EXTERNAL:
  243. case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
  244. deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
  245. vec = BOOK3S_INTERRUPT_EXTERNAL;
  246. break;
  247. case BOOK3S_IRQPRIO_SYSTEM_RESET:
  248. vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
  249. break;
  250. case BOOK3S_IRQPRIO_MACHINE_CHECK:
  251. vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
  252. break;
  253. case BOOK3S_IRQPRIO_DATA_STORAGE:
  254. vec = BOOK3S_INTERRUPT_DATA_STORAGE;
  255. break;
  256. case BOOK3S_IRQPRIO_INST_STORAGE:
  257. vec = BOOK3S_INTERRUPT_INST_STORAGE;
  258. break;
  259. case BOOK3S_IRQPRIO_DATA_SEGMENT:
  260. vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
  261. break;
  262. case BOOK3S_IRQPRIO_INST_SEGMENT:
  263. vec = BOOK3S_INTERRUPT_INST_SEGMENT;
  264. break;
  265. case BOOK3S_IRQPRIO_ALIGNMENT:
  266. vec = BOOK3S_INTERRUPT_ALIGNMENT;
  267. break;
  268. case BOOK3S_IRQPRIO_PROGRAM:
  269. vec = BOOK3S_INTERRUPT_PROGRAM;
  270. break;
  271. case BOOK3S_IRQPRIO_VSX:
  272. vec = BOOK3S_INTERRUPT_VSX;
  273. break;
  274. case BOOK3S_IRQPRIO_ALTIVEC:
  275. vec = BOOK3S_INTERRUPT_ALTIVEC;
  276. break;
  277. case BOOK3S_IRQPRIO_FP_UNAVAIL:
  278. vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
  279. break;
  280. case BOOK3S_IRQPRIO_SYSCALL:
  281. vec = BOOK3S_INTERRUPT_SYSCALL;
  282. break;
  283. case BOOK3S_IRQPRIO_DEBUG:
  284. vec = BOOK3S_INTERRUPT_TRACE;
  285. break;
  286. case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
  287. vec = BOOK3S_INTERRUPT_PERFMON;
  288. break;
  289. case BOOK3S_IRQPRIO_FAC_UNAVAIL:
  290. vec = BOOK3S_INTERRUPT_FAC_UNAVAIL;
  291. break;
  292. default:
  293. deliver = 0;
  294. printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
  295. break;
  296. }
  297. #if 0
  298. printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
  299. #endif
  300. if (deliver)
  301. kvmppc_inject_interrupt(vcpu, vec, 0);
  302. return deliver;
  303. }
  304. /*
  305. * This function determines if an irqprio should be cleared once issued.
  306. */
  307. static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
  308. {
  309. switch (priority) {
  310. case BOOK3S_IRQPRIO_DECREMENTER:
  311. /* DEC interrupts get cleared by mtdec */
  312. return false;
  313. case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
  314. /* External interrupts get cleared by userspace */
  315. return false;
  316. }
  317. return true;
  318. }
  319. int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
  320. {
  321. unsigned long *pending = &vcpu->arch.pending_exceptions;
  322. unsigned long old_pending = vcpu->arch.pending_exceptions;
  323. unsigned int priority;
  324. #ifdef EXIT_DEBUG
  325. if (vcpu->arch.pending_exceptions)
  326. printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
  327. #endif
  328. priority = __ffs(*pending);
  329. while (priority < BOOK3S_IRQPRIO_MAX) {
  330. if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
  331. clear_irqprio(vcpu, priority)) {
  332. clear_bit(priority, &vcpu->arch.pending_exceptions);
  333. break;
  334. }
  335. priority = find_next_bit(pending,
  336. BITS_PER_BYTE * sizeof(*pending),
  337. priority + 1);
  338. }
  339. /* Tell the guest about our interrupt status */
  340. kvmppc_update_int_pending(vcpu, *pending, old_pending);
  341. return 0;
  342. }
  343. EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
  344. kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
  345. bool *writable)
  346. {
  347. ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
  348. gfn_t gfn = gpa >> PAGE_SHIFT;
  349. if (!(kvmppc_get_msr(vcpu) & MSR_SF))
  350. mp_pa = (uint32_t)mp_pa;
  351. /* Magic page override */
  352. gpa &= ~0xFFFULL;
  353. if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
  354. ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
  355. kvm_pfn_t pfn;
  356. pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
  357. get_page(pfn_to_page(pfn));
  358. if (writable)
  359. *writable = true;
  360. return pfn;
  361. }
  362. return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
  363. }
  364. EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
  365. int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
  366. enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
  367. {
  368. bool data = (xlid == XLATE_DATA);
  369. bool iswrite = (xlrw == XLATE_WRITE);
  370. int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
  371. int r;
  372. if (relocated) {
  373. r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
  374. } else {
  375. pte->eaddr = eaddr;
  376. pte->raddr = eaddr & KVM_PAM;
  377. pte->vpage = VSID_REAL | eaddr >> 12;
  378. pte->may_read = true;
  379. pte->may_write = true;
  380. pte->may_execute = true;
  381. r = 0;
  382. if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
  383. !data) {
  384. if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
  385. ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
  386. pte->raddr &= ~SPLIT_HACK_MASK;
  387. }
  388. }
  389. return r;
  390. }
  391. int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
  392. enum instruction_fetch_type type, u32 *inst)
  393. {
  394. ulong pc = kvmppc_get_pc(vcpu);
  395. int r;
  396. if (type == INST_SC)
  397. pc -= 4;
  398. r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false);
  399. if (r == EMULATE_DONE)
  400. return r;
  401. else
  402. return EMULATE_AGAIN;
  403. }
  404. EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
  405. int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  406. {
  407. return 0;
  408. }
  409. int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
  410. {
  411. return 0;
  412. }
  413. void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
  414. {
  415. }
  416. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  417. struct kvm_sregs *sregs)
  418. {
  419. int ret;
  420. vcpu_load(vcpu);
  421. ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
  422. vcpu_put(vcpu);
  423. return ret;
  424. }
  425. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  426. struct kvm_sregs *sregs)
  427. {
  428. int ret;
  429. vcpu_load(vcpu);
  430. ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
  431. vcpu_put(vcpu);
  432. return ret;
  433. }
  434. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  435. {
  436. int i;
  437. regs->pc = kvmppc_get_pc(vcpu);
  438. regs->cr = kvmppc_get_cr(vcpu);
  439. regs->ctr = kvmppc_get_ctr(vcpu);
  440. regs->lr = kvmppc_get_lr(vcpu);
  441. regs->xer = kvmppc_get_xer(vcpu);
  442. regs->msr = kvmppc_get_msr(vcpu);
  443. regs->srr0 = kvmppc_get_srr0(vcpu);
  444. regs->srr1 = kvmppc_get_srr1(vcpu);
  445. regs->pid = vcpu->arch.pid;
  446. regs->sprg0 = kvmppc_get_sprg0(vcpu);
  447. regs->sprg1 = kvmppc_get_sprg1(vcpu);
  448. regs->sprg2 = kvmppc_get_sprg2(vcpu);
  449. regs->sprg3 = kvmppc_get_sprg3(vcpu);
  450. regs->sprg4 = kvmppc_get_sprg4(vcpu);
  451. regs->sprg5 = kvmppc_get_sprg5(vcpu);
  452. regs->sprg6 = kvmppc_get_sprg6(vcpu);
  453. regs->sprg7 = kvmppc_get_sprg7(vcpu);
  454. for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
  455. regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
  456. return 0;
  457. }
  458. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  459. {
  460. int i;
  461. kvmppc_set_pc(vcpu, regs->pc);
  462. kvmppc_set_cr(vcpu, regs->cr);
  463. kvmppc_set_ctr(vcpu, regs->ctr);
  464. kvmppc_set_lr(vcpu, regs->lr);
  465. kvmppc_set_xer(vcpu, regs->xer);
  466. kvmppc_set_msr(vcpu, regs->msr);
  467. kvmppc_set_srr0(vcpu, regs->srr0);
  468. kvmppc_set_srr1(vcpu, regs->srr1);
  469. kvmppc_set_sprg0(vcpu, regs->sprg0);
  470. kvmppc_set_sprg1(vcpu, regs->sprg1);
  471. kvmppc_set_sprg2(vcpu, regs->sprg2);
  472. kvmppc_set_sprg3(vcpu, regs->sprg3);
  473. kvmppc_set_sprg4(vcpu, regs->sprg4);
  474. kvmppc_set_sprg5(vcpu, regs->sprg5);
  475. kvmppc_set_sprg6(vcpu, regs->sprg6);
  476. kvmppc_set_sprg7(vcpu, regs->sprg7);
  477. for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
  478. kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
  479. return 0;
  480. }
  481. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  482. {
  483. return -ENOTSUPP;
  484. }
  485. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  486. {
  487. return -ENOTSUPP;
  488. }
  489. int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
  490. union kvmppc_one_reg *val)
  491. {
  492. int r = 0;
  493. long int i;
  494. r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
  495. if (r == -EINVAL) {
  496. r = 0;
  497. switch (id) {
  498. case KVM_REG_PPC_DAR:
  499. *val = get_reg_val(id, kvmppc_get_dar(vcpu));
  500. break;
  501. case KVM_REG_PPC_DSISR:
  502. *val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
  503. break;
  504. case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
  505. i = id - KVM_REG_PPC_FPR0;
  506. *val = get_reg_val(id, VCPU_FPR(vcpu, i));
  507. break;
  508. case KVM_REG_PPC_FPSCR:
  509. *val = get_reg_val(id, vcpu->arch.fp.fpscr);
  510. break;
  511. #ifdef CONFIG_VSX
  512. case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
  513. if (cpu_has_feature(CPU_FTR_VSX)) {
  514. i = id - KVM_REG_PPC_VSR0;
  515. val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
  516. val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
  517. } else {
  518. r = -ENXIO;
  519. }
  520. break;
  521. #endif /* CONFIG_VSX */
  522. case KVM_REG_PPC_DEBUG_INST:
  523. *val = get_reg_val(id, INS_TW);
  524. break;
  525. #ifdef CONFIG_KVM_XICS
  526. case KVM_REG_PPC_ICP_STATE:
  527. if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
  528. r = -ENXIO;
  529. break;
  530. }
  531. if (xive_enabled())
  532. *val = get_reg_val(id, kvmppc_xive_get_icp(vcpu));
  533. else
  534. *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
  535. break;
  536. #endif /* CONFIG_KVM_XICS */
  537. case KVM_REG_PPC_FSCR:
  538. *val = get_reg_val(id, vcpu->arch.fscr);
  539. break;
  540. case KVM_REG_PPC_TAR:
  541. *val = get_reg_val(id, vcpu->arch.tar);
  542. break;
  543. case KVM_REG_PPC_EBBHR:
  544. *val = get_reg_val(id, vcpu->arch.ebbhr);
  545. break;
  546. case KVM_REG_PPC_EBBRR:
  547. *val = get_reg_val(id, vcpu->arch.ebbrr);
  548. break;
  549. case KVM_REG_PPC_BESCR:
  550. *val = get_reg_val(id, vcpu->arch.bescr);
  551. break;
  552. case KVM_REG_PPC_IC:
  553. *val = get_reg_val(id, vcpu->arch.ic);
  554. break;
  555. default:
  556. r = -EINVAL;
  557. break;
  558. }
  559. }
  560. return r;
  561. }
  562. int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
  563. union kvmppc_one_reg *val)
  564. {
  565. int r = 0;
  566. long int i;
  567. r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
  568. if (r == -EINVAL) {
  569. r = 0;
  570. switch (id) {
  571. case KVM_REG_PPC_DAR:
  572. kvmppc_set_dar(vcpu, set_reg_val(id, *val));
  573. break;
  574. case KVM_REG_PPC_DSISR:
  575. kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
  576. break;
  577. case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
  578. i = id - KVM_REG_PPC_FPR0;
  579. VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
  580. break;
  581. case KVM_REG_PPC_FPSCR:
  582. vcpu->arch.fp.fpscr = set_reg_val(id, *val);
  583. break;
  584. #ifdef CONFIG_VSX
  585. case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
  586. if (cpu_has_feature(CPU_FTR_VSX)) {
  587. i = id - KVM_REG_PPC_VSR0;
  588. vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
  589. vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
  590. } else {
  591. r = -ENXIO;
  592. }
  593. break;
  594. #endif /* CONFIG_VSX */
  595. #ifdef CONFIG_KVM_XICS
  596. case KVM_REG_PPC_ICP_STATE:
  597. if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
  598. r = -ENXIO;
  599. break;
  600. }
  601. if (xive_enabled())
  602. r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val));
  603. else
  604. r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val));
  605. break;
  606. #endif /* CONFIG_KVM_XICS */
  607. case KVM_REG_PPC_FSCR:
  608. vcpu->arch.fscr = set_reg_val(id, *val);
  609. break;
  610. case KVM_REG_PPC_TAR:
  611. vcpu->arch.tar = set_reg_val(id, *val);
  612. break;
  613. case KVM_REG_PPC_EBBHR:
  614. vcpu->arch.ebbhr = set_reg_val(id, *val);
  615. break;
  616. case KVM_REG_PPC_EBBRR:
  617. vcpu->arch.ebbrr = set_reg_val(id, *val);
  618. break;
  619. case KVM_REG_PPC_BESCR:
  620. vcpu->arch.bescr = set_reg_val(id, *val);
  621. break;
  622. case KVM_REG_PPC_IC:
  623. vcpu->arch.ic = set_reg_val(id, *val);
  624. break;
  625. default:
  626. r = -EINVAL;
  627. break;
  628. }
  629. }
  630. return r;
  631. }
  632. void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  633. {
  634. vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
  635. }
  636. void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
  637. {
  638. vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
  639. }
  640. void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
  641. {
  642. vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
  643. }
  644. EXPORT_SYMBOL_GPL(kvmppc_set_msr);
  645. int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
  646. {
  647. return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
  648. }
  649. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  650. struct kvm_translation *tr)
  651. {
  652. return 0;
  653. }
  654. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  655. struct kvm_guest_debug *dbg)
  656. {
  657. vcpu_load(vcpu);
  658. vcpu->guest_debug = dbg->control;
  659. vcpu_put(vcpu);
  660. return 0;
  661. }
  662. void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
  663. {
  664. kvmppc_core_queue_dec(vcpu);
  665. kvm_vcpu_kick(vcpu);
  666. }
  667. struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
  668. {
  669. return kvm->arch.kvm_ops->vcpu_create(kvm, id);
  670. }
  671. void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
  672. {
  673. vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
  674. }
  675. int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
  676. {
  677. return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
  678. }
  679. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
  680. {
  681. return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
  682. }
  683. void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
  684. struct kvm_memory_slot *dont)
  685. {
  686. kvm->arch.kvm_ops->free_memslot(free, dont);
  687. }
  688. int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
  689. unsigned long npages)
  690. {
  691. return kvm->arch.kvm_ops->create_memslot(slot, npages);
  692. }
  693. void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
  694. {
  695. kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
  696. }
  697. int kvmppc_core_prepare_memory_region(struct kvm *kvm,
  698. struct kvm_memory_slot *memslot,
  699. const struct kvm_userspace_memory_region *mem)
  700. {
  701. return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
  702. }
  703. void kvmppc_core_commit_memory_region(struct kvm *kvm,
  704. const struct kvm_userspace_memory_region *mem,
  705. const struct kvm_memory_slot *old,
  706. const struct kvm_memory_slot *new)
  707. {
  708. kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new);
  709. }
  710. int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
  711. {
  712. return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
  713. }
  714. int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
  715. {
  716. return kvm->arch.kvm_ops->age_hva(kvm, start, end);
  717. }
  718. int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
  719. {
  720. return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
  721. }
  722. void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
  723. {
  724. kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
  725. }
  726. void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
  727. {
  728. vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
  729. }
  730. int kvmppc_core_init_vm(struct kvm *kvm)
  731. {
  732. #ifdef CONFIG_PPC64
  733. INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
  734. INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
  735. #endif
  736. return kvm->arch.kvm_ops->init_vm(kvm);
  737. }
  738. void kvmppc_core_destroy_vm(struct kvm *kvm)
  739. {
  740. kvm->arch.kvm_ops->destroy_vm(kvm);
  741. #ifdef CONFIG_PPC64
  742. kvmppc_rtas_tokens_free(kvm);
  743. WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
  744. #endif
  745. }
  746. int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
  747. {
  748. unsigned long size = kvmppc_get_gpr(vcpu, 4);
  749. unsigned long addr = kvmppc_get_gpr(vcpu, 5);
  750. u64 buf;
  751. int srcu_idx;
  752. int ret;
  753. if (!is_power_of_2(size) || (size > sizeof(buf)))
  754. return H_TOO_HARD;
  755. srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  756. ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
  757. srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
  758. if (ret != 0)
  759. return H_TOO_HARD;
  760. switch (size) {
  761. case 1:
  762. kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf);
  763. break;
  764. case 2:
  765. kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf));
  766. break;
  767. case 4:
  768. kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf));
  769. break;
  770. case 8:
  771. kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf));
  772. break;
  773. default:
  774. BUG();
  775. }
  776. return H_SUCCESS;
  777. }
  778. EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load);
  779. int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
  780. {
  781. unsigned long size = kvmppc_get_gpr(vcpu, 4);
  782. unsigned long addr = kvmppc_get_gpr(vcpu, 5);
  783. unsigned long val = kvmppc_get_gpr(vcpu, 6);
  784. u64 buf;
  785. int srcu_idx;
  786. int ret;
  787. switch (size) {
  788. case 1:
  789. *(u8 *)&buf = val;
  790. break;
  791. case 2:
  792. *(__be16 *)&buf = cpu_to_be16(val);
  793. break;
  794. case 4:
  795. *(__be32 *)&buf = cpu_to_be32(val);
  796. break;
  797. case 8:
  798. *(__be64 *)&buf = cpu_to_be64(val);
  799. break;
  800. default:
  801. return H_TOO_HARD;
  802. }
  803. srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  804. ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
  805. srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
  806. if (ret != 0)
  807. return H_TOO_HARD;
  808. return H_SUCCESS;
  809. }
  810. EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store);
  811. int kvmppc_core_check_processor_compat(void)
  812. {
  813. /*
  814. * We always return 0 for book3s. We check
  815. * for compatibility while loading the HV
  816. * or PR module
  817. */
  818. return 0;
  819. }
  820. int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
  821. {
  822. return kvm->arch.kvm_ops->hcall_implemented(hcall);
  823. }
  824. #ifdef CONFIG_KVM_XICS
  825. int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
  826. bool line_status)
  827. {
  828. if (xive_enabled())
  829. return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level,
  830. line_status);
  831. else
  832. return kvmppc_xics_set_irq(kvm, irq_source_id, irq, level,
  833. line_status);
  834. }
  835. int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
  836. struct kvm *kvm, int irq_source_id,
  837. int level, bool line_status)
  838. {
  839. return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
  840. level, line_status);
  841. }
  842. static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry *e,
  843. struct kvm *kvm, int irq_source_id, int level,
  844. bool line_status)
  845. {
  846. return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
  847. }
  848. int kvm_irq_map_gsi(struct kvm *kvm,
  849. struct kvm_kernel_irq_routing_entry *entries, int gsi)
  850. {
  851. entries->gsi = gsi;
  852. entries->type = KVM_IRQ_ROUTING_IRQCHIP;
  853. entries->set = kvmppc_book3s_set_irq;
  854. entries->irqchip.irqchip = 0;
  855. entries->irqchip.pin = gsi;
  856. return 1;
  857. }
  858. int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
  859. {
  860. return pin;
  861. }
  862. #endif /* CONFIG_KVM_XICS */
  863. static int kvmppc_book3s_init(void)
  864. {
  865. int r;
  866. r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
  867. if (r)
  868. return r;
  869. #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
  870. r = kvmppc_book3s_init_pr();
  871. #endif
  872. #ifdef CONFIG_KVM_XICS
  873. #ifdef CONFIG_KVM_XIVE
  874. if (xive_enabled()) {
  875. kvmppc_xive_init_module();
  876. kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
  877. } else
  878. #endif
  879. kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
  880. #endif
  881. return r;
  882. }
  883. static void kvmppc_book3s_exit(void)
  884. {
  885. #ifdef CONFIG_KVM_XICS
  886. if (xive_enabled())
  887. kvmppc_xive_exit_module();
  888. #endif
  889. #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
  890. kvmppc_book3s_exit_pr();
  891. #endif
  892. kvm_exit();
  893. }
  894. module_init(kvmppc_book3s_init);
  895. module_exit(kvmppc_book3s_exit);
  896. /* On 32bit this is our one and only kernel module */
  897. #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
  898. MODULE_ALIAS_MISCDEV(KVM_MINOR);
  899. MODULE_ALIAS("devname:kvm");
  900. #endif