book3s.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045
  1. /*
  2. * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
  3. *
  4. * Authors:
  5. * Alexander Graf <agraf@suse.de>
  6. * Kevin Wolf <mail@kevin-wolf.de>
  7. *
  8. * Description:
  9. * This file is derived from arch/powerpc/kvm/44x.c,
  10. * by Hollis Blanchard <hollisb@us.ibm.com>.
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License, version 2, as
  14. * published by the Free Software Foundation.
  15. */
  16. #include <linux/kvm_host.h>
  17. #include <linux/err.h>
  18. #include <linux/export.h>
  19. #include <linux/slab.h>
  20. #include <linux/module.h>
  21. #include <linux/miscdevice.h>
  22. #include <linux/gfp.h>
  23. #include <linux/sched.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/highmem.h>
  26. #include <asm/reg.h>
  27. #include <asm/cputable.h>
  28. #include <asm/cacheflush.h>
  29. #include <asm/tlbflush.h>
  30. #include <linux/uaccess.h>
  31. #include <asm/io.h>
  32. #include <asm/kvm_ppc.h>
  33. #include <asm/kvm_book3s.h>
  34. #include <asm/mmu_context.h>
  35. #include <asm/page.h>
  36. #include <asm/xive.h>
  37. #include "book3s.h"
  38. #include "trace.h"
  39. #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
  40. /* #define EXIT_DEBUG */
  41. struct kvm_stats_debugfs_item debugfs_entries[] = {
  42. { "exits", VCPU_STAT(sum_exits) },
  43. { "mmio", VCPU_STAT(mmio_exits) },
  44. { "sig", VCPU_STAT(signal_exits) },
  45. { "sysc", VCPU_STAT(syscall_exits) },
  46. { "inst_emu", VCPU_STAT(emulated_inst_exits) },
  47. { "dec", VCPU_STAT(dec_exits) },
  48. { "ext_intr", VCPU_STAT(ext_intr_exits) },
  49. { "queue_intr", VCPU_STAT(queue_intr) },
  50. { "halt_poll_success_ns", VCPU_STAT(halt_poll_success_ns) },
  51. { "halt_poll_fail_ns", VCPU_STAT(halt_poll_fail_ns) },
  52. { "halt_wait_ns", VCPU_STAT(halt_wait_ns) },
  53. { "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
  54. { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), },
  55. { "halt_successful_wait", VCPU_STAT(halt_successful_wait) },
  56. { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
  57. { "halt_wakeup", VCPU_STAT(halt_wakeup) },
  58. { "pf_storage", VCPU_STAT(pf_storage) },
  59. { "sp_storage", VCPU_STAT(sp_storage) },
  60. { "pf_instruc", VCPU_STAT(pf_instruc) },
  61. { "sp_instruc", VCPU_STAT(sp_instruc) },
  62. { "ld", VCPU_STAT(ld) },
  63. { "ld_slow", VCPU_STAT(ld_slow) },
  64. { "st", VCPU_STAT(st) },
  65. { "st_slow", VCPU_STAT(st_slow) },
  66. { "pthru_all", VCPU_STAT(pthru_all) },
  67. { "pthru_host", VCPU_STAT(pthru_host) },
  68. { "pthru_bad_aff", VCPU_STAT(pthru_bad_aff) },
  69. { NULL }
  70. };
  71. void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
  72. {
  73. if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
  74. ulong pc = kvmppc_get_pc(vcpu);
  75. if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
  76. kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
  77. vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
  78. }
  79. }
  80. EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real);
  81. static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
  82. {
  83. if (!is_kvmppc_hv_enabled(vcpu->kvm))
  84. return to_book3s(vcpu)->hior;
  85. return 0;
  86. }
  87. static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
  88. unsigned long pending_now, unsigned long old_pending)
  89. {
  90. if (is_kvmppc_hv_enabled(vcpu->kvm))
  91. return;
  92. if (pending_now)
  93. kvmppc_set_int_pending(vcpu, 1);
  94. else if (old_pending)
  95. kvmppc_set_int_pending(vcpu, 0);
  96. }
  97. static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
  98. {
  99. ulong crit_raw;
  100. ulong crit_r1;
  101. bool crit;
  102. if (is_kvmppc_hv_enabled(vcpu->kvm))
  103. return false;
  104. crit_raw = kvmppc_get_critical(vcpu);
  105. crit_r1 = kvmppc_get_gpr(vcpu, 1);
  106. /* Truncate crit indicators in 32 bit mode */
  107. if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
  108. crit_raw &= 0xffffffff;
  109. crit_r1 &= 0xffffffff;
  110. }
  111. /* Critical section when crit == r1 */
  112. crit = (crit_raw == crit_r1);
  113. /* ... and we're in supervisor mode */
  114. crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
  115. return crit;
  116. }
  117. void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
  118. {
  119. kvmppc_unfixup_split_real(vcpu);
  120. kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
  121. kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags);
  122. kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
  123. vcpu->arch.mmu.reset_msr(vcpu);
  124. }
  125. static int kvmppc_book3s_vec2irqprio(unsigned int vec)
  126. {
  127. unsigned int prio;
  128. switch (vec) {
  129. case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
  130. case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
  131. case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break;
  132. case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break;
  133. case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
  134. case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
  135. case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
  136. case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break;
  137. case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
  138. case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
  139. case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
  140. case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break;
  141. case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break;
  142. case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break;
  143. case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break;
  144. case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break;
  145. case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL; break;
  146. default: prio = BOOK3S_IRQPRIO_MAX; break;
  147. }
  148. return prio;
  149. }
  150. void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
  151. unsigned int vec)
  152. {
  153. unsigned long old_pending = vcpu->arch.pending_exceptions;
  154. clear_bit(kvmppc_book3s_vec2irqprio(vec),
  155. &vcpu->arch.pending_exceptions);
  156. kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
  157. old_pending);
  158. }
  159. void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
  160. {
  161. vcpu->stat.queue_intr++;
  162. set_bit(kvmppc_book3s_vec2irqprio(vec),
  163. &vcpu->arch.pending_exceptions);
  164. #ifdef EXIT_DEBUG
  165. printk(KERN_INFO "Queueing interrupt %x\n", vec);
  166. #endif
  167. }
  168. EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
  169. void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
  170. {
  171. /* might as well deliver this straight away */
  172. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
  173. }
  174. EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
  175. void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
  176. {
  177. /* might as well deliver this straight away */
  178. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, 0);
  179. }
  180. void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
  181. {
  182. /* might as well deliver this straight away */
  183. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, 0);
  184. }
  185. void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu)
  186. {
  187. /* might as well deliver this straight away */
  188. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, 0);
  189. }
  190. void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
  191. {
  192. kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
  193. }
  194. EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
  195. int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
  196. {
  197. return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
  198. }
  199. EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
  200. void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
  201. {
  202. kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
  203. }
  204. EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
  205. void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
  206. struct kvm_interrupt *irq)
  207. {
  208. unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL;
  209. if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
  210. vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL;
  211. kvmppc_book3s_queue_irqprio(vcpu, vec);
  212. }
  213. void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
  214. {
  215. kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
  216. kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
  217. }
  218. void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
  219. ulong flags)
  220. {
  221. kvmppc_set_dar(vcpu, dar);
  222. kvmppc_set_dsisr(vcpu, flags);
  223. kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
  224. }
  225. EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage); /* used by kvm_hv */
  226. void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
  227. {
  228. u64 msr = kvmppc_get_msr(vcpu);
  229. msr &= ~(SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
  230. msr |= flags & (SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
  231. kvmppc_set_msr_fast(vcpu, msr);
  232. kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
  233. }
  234. static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
  235. unsigned int priority)
  236. {
  237. int deliver = 1;
  238. int vec = 0;
  239. bool crit = kvmppc_critical_section(vcpu);
  240. switch (priority) {
  241. case BOOK3S_IRQPRIO_DECREMENTER:
  242. deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
  243. vec = BOOK3S_INTERRUPT_DECREMENTER;
  244. break;
  245. case BOOK3S_IRQPRIO_EXTERNAL:
  246. case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
  247. deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
  248. vec = BOOK3S_INTERRUPT_EXTERNAL;
  249. break;
  250. case BOOK3S_IRQPRIO_SYSTEM_RESET:
  251. vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
  252. break;
  253. case BOOK3S_IRQPRIO_MACHINE_CHECK:
  254. vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
  255. break;
  256. case BOOK3S_IRQPRIO_DATA_STORAGE:
  257. vec = BOOK3S_INTERRUPT_DATA_STORAGE;
  258. break;
  259. case BOOK3S_IRQPRIO_INST_STORAGE:
  260. vec = BOOK3S_INTERRUPT_INST_STORAGE;
  261. break;
  262. case BOOK3S_IRQPRIO_DATA_SEGMENT:
  263. vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
  264. break;
  265. case BOOK3S_IRQPRIO_INST_SEGMENT:
  266. vec = BOOK3S_INTERRUPT_INST_SEGMENT;
  267. break;
  268. case BOOK3S_IRQPRIO_ALIGNMENT:
  269. vec = BOOK3S_INTERRUPT_ALIGNMENT;
  270. break;
  271. case BOOK3S_IRQPRIO_PROGRAM:
  272. vec = BOOK3S_INTERRUPT_PROGRAM;
  273. break;
  274. case BOOK3S_IRQPRIO_VSX:
  275. vec = BOOK3S_INTERRUPT_VSX;
  276. break;
  277. case BOOK3S_IRQPRIO_ALTIVEC:
  278. vec = BOOK3S_INTERRUPT_ALTIVEC;
  279. break;
  280. case BOOK3S_IRQPRIO_FP_UNAVAIL:
  281. vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
  282. break;
  283. case BOOK3S_IRQPRIO_SYSCALL:
  284. vec = BOOK3S_INTERRUPT_SYSCALL;
  285. break;
  286. case BOOK3S_IRQPRIO_DEBUG:
  287. vec = BOOK3S_INTERRUPT_TRACE;
  288. break;
  289. case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
  290. vec = BOOK3S_INTERRUPT_PERFMON;
  291. break;
  292. case BOOK3S_IRQPRIO_FAC_UNAVAIL:
  293. vec = BOOK3S_INTERRUPT_FAC_UNAVAIL;
  294. break;
  295. default:
  296. deliver = 0;
  297. printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
  298. break;
  299. }
  300. #if 0
  301. printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
  302. #endif
  303. if (deliver)
  304. kvmppc_inject_interrupt(vcpu, vec, 0);
  305. return deliver;
  306. }
  307. /*
  308. * This function determines if an irqprio should be cleared once issued.
  309. */
  310. static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
  311. {
  312. switch (priority) {
  313. case BOOK3S_IRQPRIO_DECREMENTER:
  314. /* DEC interrupts get cleared by mtdec */
  315. return false;
  316. case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
  317. /* External interrupts get cleared by userspace */
  318. return false;
  319. }
  320. return true;
  321. }
  322. int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
  323. {
  324. unsigned long *pending = &vcpu->arch.pending_exceptions;
  325. unsigned long old_pending = vcpu->arch.pending_exceptions;
  326. unsigned int priority;
  327. #ifdef EXIT_DEBUG
  328. if (vcpu->arch.pending_exceptions)
  329. printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
  330. #endif
  331. priority = __ffs(*pending);
  332. while (priority < BOOK3S_IRQPRIO_MAX) {
  333. if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
  334. clear_irqprio(vcpu, priority)) {
  335. clear_bit(priority, &vcpu->arch.pending_exceptions);
  336. break;
  337. }
  338. priority = find_next_bit(pending,
  339. BITS_PER_BYTE * sizeof(*pending),
  340. priority + 1);
  341. }
  342. /* Tell the guest about our interrupt status */
  343. kvmppc_update_int_pending(vcpu, *pending, old_pending);
  344. return 0;
  345. }
  346. EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
  347. kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
  348. bool *writable)
  349. {
  350. ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
  351. gfn_t gfn = gpa >> PAGE_SHIFT;
  352. if (!(kvmppc_get_msr(vcpu) & MSR_SF))
  353. mp_pa = (uint32_t)mp_pa;
  354. /* Magic page override */
  355. gpa &= ~0xFFFULL;
  356. if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
  357. ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
  358. kvm_pfn_t pfn;
  359. pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
  360. get_page(pfn_to_page(pfn));
  361. if (writable)
  362. *writable = true;
  363. return pfn;
  364. }
  365. return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
  366. }
  367. EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
  368. int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
  369. enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
  370. {
  371. bool data = (xlid == XLATE_DATA);
  372. bool iswrite = (xlrw == XLATE_WRITE);
  373. int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
  374. int r;
  375. if (relocated) {
  376. r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
  377. } else {
  378. pte->eaddr = eaddr;
  379. pte->raddr = eaddr & KVM_PAM;
  380. pte->vpage = VSID_REAL | eaddr >> 12;
  381. pte->may_read = true;
  382. pte->may_write = true;
  383. pte->may_execute = true;
  384. r = 0;
  385. if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
  386. !data) {
  387. if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
  388. ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
  389. pte->raddr &= ~SPLIT_HACK_MASK;
  390. }
  391. }
  392. return r;
  393. }
  394. int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
  395. enum instruction_fetch_type type, u32 *inst)
  396. {
  397. ulong pc = kvmppc_get_pc(vcpu);
  398. int r;
  399. if (type == INST_SC)
  400. pc -= 4;
  401. r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false);
  402. if (r == EMULATE_DONE)
  403. return r;
  404. else
  405. return EMULATE_AGAIN;
  406. }
  407. EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
  408. int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  409. {
  410. return 0;
  411. }
  412. int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
  413. {
  414. return 0;
  415. }
  416. void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
  417. {
  418. }
  419. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  420. struct kvm_sregs *sregs)
  421. {
  422. int ret;
  423. vcpu_load(vcpu);
  424. ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
  425. vcpu_put(vcpu);
  426. return ret;
  427. }
  428. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  429. struct kvm_sregs *sregs)
  430. {
  431. int ret;
  432. vcpu_load(vcpu);
  433. ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
  434. vcpu_put(vcpu);
  435. return ret;
  436. }
  437. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  438. {
  439. int i;
  440. regs->pc = kvmppc_get_pc(vcpu);
  441. regs->cr = kvmppc_get_cr(vcpu);
  442. regs->ctr = kvmppc_get_ctr(vcpu);
  443. regs->lr = kvmppc_get_lr(vcpu);
  444. regs->xer = kvmppc_get_xer(vcpu);
  445. regs->msr = kvmppc_get_msr(vcpu);
  446. regs->srr0 = kvmppc_get_srr0(vcpu);
  447. regs->srr1 = kvmppc_get_srr1(vcpu);
  448. regs->pid = vcpu->arch.pid;
  449. regs->sprg0 = kvmppc_get_sprg0(vcpu);
  450. regs->sprg1 = kvmppc_get_sprg1(vcpu);
  451. regs->sprg2 = kvmppc_get_sprg2(vcpu);
  452. regs->sprg3 = kvmppc_get_sprg3(vcpu);
  453. regs->sprg4 = kvmppc_get_sprg4(vcpu);
  454. regs->sprg5 = kvmppc_get_sprg5(vcpu);
  455. regs->sprg6 = kvmppc_get_sprg6(vcpu);
  456. regs->sprg7 = kvmppc_get_sprg7(vcpu);
  457. for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
  458. regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
  459. return 0;
  460. }
  461. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  462. {
  463. int i;
  464. kvmppc_set_pc(vcpu, regs->pc);
  465. kvmppc_set_cr(vcpu, regs->cr);
  466. kvmppc_set_ctr(vcpu, regs->ctr);
  467. kvmppc_set_lr(vcpu, regs->lr);
  468. kvmppc_set_xer(vcpu, regs->xer);
  469. kvmppc_set_msr(vcpu, regs->msr);
  470. kvmppc_set_srr0(vcpu, regs->srr0);
  471. kvmppc_set_srr1(vcpu, regs->srr1);
  472. kvmppc_set_sprg0(vcpu, regs->sprg0);
  473. kvmppc_set_sprg1(vcpu, regs->sprg1);
  474. kvmppc_set_sprg2(vcpu, regs->sprg2);
  475. kvmppc_set_sprg3(vcpu, regs->sprg3);
  476. kvmppc_set_sprg4(vcpu, regs->sprg4);
  477. kvmppc_set_sprg5(vcpu, regs->sprg5);
  478. kvmppc_set_sprg6(vcpu, regs->sprg6);
  479. kvmppc_set_sprg7(vcpu, regs->sprg7);
  480. for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
  481. kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
  482. return 0;
  483. }
  484. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  485. {
  486. return -ENOTSUPP;
  487. }
  488. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  489. {
  490. return -ENOTSUPP;
  491. }
  492. int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
  493. union kvmppc_one_reg *val)
  494. {
  495. int r = 0;
  496. long int i;
  497. r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
  498. if (r == -EINVAL) {
  499. r = 0;
  500. switch (id) {
  501. case KVM_REG_PPC_DAR:
  502. *val = get_reg_val(id, kvmppc_get_dar(vcpu));
  503. break;
  504. case KVM_REG_PPC_DSISR:
  505. *val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
  506. break;
  507. case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
  508. i = id - KVM_REG_PPC_FPR0;
  509. *val = get_reg_val(id, VCPU_FPR(vcpu, i));
  510. break;
  511. case KVM_REG_PPC_FPSCR:
  512. *val = get_reg_val(id, vcpu->arch.fp.fpscr);
  513. break;
  514. #ifdef CONFIG_VSX
  515. case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
  516. if (cpu_has_feature(CPU_FTR_VSX)) {
  517. i = id - KVM_REG_PPC_VSR0;
  518. val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
  519. val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
  520. } else {
  521. r = -ENXIO;
  522. }
  523. break;
  524. #endif /* CONFIG_VSX */
  525. case KVM_REG_PPC_DEBUG_INST:
  526. *val = get_reg_val(id, INS_TW);
  527. break;
  528. #ifdef CONFIG_KVM_XICS
  529. case KVM_REG_PPC_ICP_STATE:
  530. if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
  531. r = -ENXIO;
  532. break;
  533. }
  534. if (xive_enabled())
  535. *val = get_reg_val(id, kvmppc_xive_get_icp(vcpu));
  536. else
  537. *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
  538. break;
  539. #endif /* CONFIG_KVM_XICS */
  540. case KVM_REG_PPC_FSCR:
  541. *val = get_reg_val(id, vcpu->arch.fscr);
  542. break;
  543. case KVM_REG_PPC_TAR:
  544. *val = get_reg_val(id, vcpu->arch.tar);
  545. break;
  546. case KVM_REG_PPC_EBBHR:
  547. *val = get_reg_val(id, vcpu->arch.ebbhr);
  548. break;
  549. case KVM_REG_PPC_EBBRR:
  550. *val = get_reg_val(id, vcpu->arch.ebbrr);
  551. break;
  552. case KVM_REG_PPC_BESCR:
  553. *val = get_reg_val(id, vcpu->arch.bescr);
  554. break;
  555. case KVM_REG_PPC_IC:
  556. *val = get_reg_val(id, vcpu->arch.ic);
  557. break;
  558. default:
  559. r = -EINVAL;
  560. break;
  561. }
  562. }
  563. return r;
  564. }
  565. int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
  566. union kvmppc_one_reg *val)
  567. {
  568. int r = 0;
  569. long int i;
  570. r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
  571. if (r == -EINVAL) {
  572. r = 0;
  573. switch (id) {
  574. case KVM_REG_PPC_DAR:
  575. kvmppc_set_dar(vcpu, set_reg_val(id, *val));
  576. break;
  577. case KVM_REG_PPC_DSISR:
  578. kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
  579. break;
  580. case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
  581. i = id - KVM_REG_PPC_FPR0;
  582. VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
  583. break;
  584. case KVM_REG_PPC_FPSCR:
  585. vcpu->arch.fp.fpscr = set_reg_val(id, *val);
  586. break;
  587. #ifdef CONFIG_VSX
  588. case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
  589. if (cpu_has_feature(CPU_FTR_VSX)) {
  590. i = id - KVM_REG_PPC_VSR0;
  591. vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
  592. vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
  593. } else {
  594. r = -ENXIO;
  595. }
  596. break;
  597. #endif /* CONFIG_VSX */
  598. #ifdef CONFIG_KVM_XICS
  599. case KVM_REG_PPC_ICP_STATE:
  600. if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
  601. r = -ENXIO;
  602. break;
  603. }
  604. if (xive_enabled())
  605. r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val));
  606. else
  607. r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val));
  608. break;
  609. #endif /* CONFIG_KVM_XICS */
  610. case KVM_REG_PPC_FSCR:
  611. vcpu->arch.fscr = set_reg_val(id, *val);
  612. break;
  613. case KVM_REG_PPC_TAR:
  614. vcpu->arch.tar = set_reg_val(id, *val);
  615. break;
  616. case KVM_REG_PPC_EBBHR:
  617. vcpu->arch.ebbhr = set_reg_val(id, *val);
  618. break;
  619. case KVM_REG_PPC_EBBRR:
  620. vcpu->arch.ebbrr = set_reg_val(id, *val);
  621. break;
  622. case KVM_REG_PPC_BESCR:
  623. vcpu->arch.bescr = set_reg_val(id, *val);
  624. break;
  625. case KVM_REG_PPC_IC:
  626. vcpu->arch.ic = set_reg_val(id, *val);
  627. break;
  628. default:
  629. r = -EINVAL;
  630. break;
  631. }
  632. }
  633. return r;
  634. }
  635. void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  636. {
  637. vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
  638. }
  639. void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
  640. {
  641. vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
  642. }
  643. void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
  644. {
  645. vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
  646. }
  647. EXPORT_SYMBOL_GPL(kvmppc_set_msr);
  648. int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
  649. {
  650. return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
  651. }
  652. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  653. struct kvm_translation *tr)
  654. {
  655. return 0;
  656. }
  657. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  658. struct kvm_guest_debug *dbg)
  659. {
  660. vcpu_load(vcpu);
  661. vcpu->guest_debug = dbg->control;
  662. vcpu_put(vcpu);
  663. return 0;
  664. }
  665. void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
  666. {
  667. kvmppc_core_queue_dec(vcpu);
  668. kvm_vcpu_kick(vcpu);
  669. }
  670. struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
  671. {
  672. return kvm->arch.kvm_ops->vcpu_create(kvm, id);
  673. }
  674. void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
  675. {
  676. vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
  677. }
  678. int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
  679. {
  680. return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
  681. }
  682. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
  683. {
  684. return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
  685. }
  686. void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
  687. struct kvm_memory_slot *dont)
  688. {
  689. kvm->arch.kvm_ops->free_memslot(free, dont);
  690. }
  691. int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
  692. unsigned long npages)
  693. {
  694. return kvm->arch.kvm_ops->create_memslot(slot, npages);
  695. }
  696. void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
  697. {
  698. kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
  699. }
  700. int kvmppc_core_prepare_memory_region(struct kvm *kvm,
  701. struct kvm_memory_slot *memslot,
  702. const struct kvm_userspace_memory_region *mem)
  703. {
  704. return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
  705. }
  706. void kvmppc_core_commit_memory_region(struct kvm *kvm,
  707. const struct kvm_userspace_memory_region *mem,
  708. const struct kvm_memory_slot *old,
  709. const struct kvm_memory_slot *new)
  710. {
  711. kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new);
  712. }
  713. int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
  714. {
  715. return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
  716. }
  717. int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
  718. {
  719. return kvm->arch.kvm_ops->age_hva(kvm, start, end);
  720. }
  721. int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
  722. {
  723. return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
  724. }
  725. void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
  726. {
  727. kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
  728. }
  729. void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
  730. {
  731. vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
  732. }
  733. int kvmppc_core_init_vm(struct kvm *kvm)
  734. {
  735. #ifdef CONFIG_PPC64
  736. INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
  737. INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
  738. #endif
  739. return kvm->arch.kvm_ops->init_vm(kvm);
  740. }
  741. void kvmppc_core_destroy_vm(struct kvm *kvm)
  742. {
  743. kvm->arch.kvm_ops->destroy_vm(kvm);
  744. #ifdef CONFIG_PPC64
  745. kvmppc_rtas_tokens_free(kvm);
  746. WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
  747. #endif
  748. }
  749. int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
  750. {
  751. unsigned long size = kvmppc_get_gpr(vcpu, 4);
  752. unsigned long addr = kvmppc_get_gpr(vcpu, 5);
  753. u64 buf;
  754. int srcu_idx;
  755. int ret;
  756. if (!is_power_of_2(size) || (size > sizeof(buf)))
  757. return H_TOO_HARD;
  758. srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  759. ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
  760. srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
  761. if (ret != 0)
  762. return H_TOO_HARD;
  763. switch (size) {
  764. case 1:
  765. kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf);
  766. break;
  767. case 2:
  768. kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf));
  769. break;
  770. case 4:
  771. kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf));
  772. break;
  773. case 8:
  774. kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf));
  775. break;
  776. default:
  777. BUG();
  778. }
  779. return H_SUCCESS;
  780. }
  781. EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load);
  782. int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
  783. {
  784. unsigned long size = kvmppc_get_gpr(vcpu, 4);
  785. unsigned long addr = kvmppc_get_gpr(vcpu, 5);
  786. unsigned long val = kvmppc_get_gpr(vcpu, 6);
  787. u64 buf;
  788. int srcu_idx;
  789. int ret;
  790. switch (size) {
  791. case 1:
  792. *(u8 *)&buf = val;
  793. break;
  794. case 2:
  795. *(__be16 *)&buf = cpu_to_be16(val);
  796. break;
  797. case 4:
  798. *(__be32 *)&buf = cpu_to_be32(val);
  799. break;
  800. case 8:
  801. *(__be64 *)&buf = cpu_to_be64(val);
  802. break;
  803. default:
  804. return H_TOO_HARD;
  805. }
  806. srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  807. ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
  808. srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
  809. if (ret != 0)
  810. return H_TOO_HARD;
  811. return H_SUCCESS;
  812. }
  813. EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store);
  814. int kvmppc_core_check_processor_compat(void)
  815. {
  816. /*
  817. * We always return 0 for book3s. We check
  818. * for compatibility while loading the HV
  819. * or PR module
  820. */
  821. return 0;
  822. }
  823. int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
  824. {
  825. return kvm->arch.kvm_ops->hcall_implemented(hcall);
  826. }
  827. #ifdef CONFIG_KVM_XICS
  828. int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
  829. bool line_status)
  830. {
  831. if (xive_enabled())
  832. return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level,
  833. line_status);
  834. else
  835. return kvmppc_xics_set_irq(kvm, irq_source_id, irq, level,
  836. line_status);
  837. }
  838. int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
  839. struct kvm *kvm, int irq_source_id,
  840. int level, bool line_status)
  841. {
  842. return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
  843. level, line_status);
  844. }
  845. static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry *e,
  846. struct kvm *kvm, int irq_source_id, int level,
  847. bool line_status)
  848. {
  849. return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
  850. }
  851. int kvm_irq_map_gsi(struct kvm *kvm,
  852. struct kvm_kernel_irq_routing_entry *entries, int gsi)
  853. {
  854. entries->gsi = gsi;
  855. entries->type = KVM_IRQ_ROUTING_IRQCHIP;
  856. entries->set = kvmppc_book3s_set_irq;
  857. entries->irqchip.irqchip = 0;
  858. entries->irqchip.pin = gsi;
  859. return 1;
  860. }
  861. int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
  862. {
  863. return pin;
  864. }
  865. #endif /* CONFIG_KVM_XICS */
  866. static int kvmppc_book3s_init(void)
  867. {
  868. int r;
  869. r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
  870. if (r)
  871. return r;
  872. #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
  873. r = kvmppc_book3s_init_pr();
  874. #endif
  875. #ifdef CONFIG_KVM_XICS
  876. #ifdef CONFIG_KVM_XIVE
  877. if (xive_enabled()) {
  878. kvmppc_xive_init_module();
  879. kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
  880. } else
  881. #endif
  882. kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
  883. #endif
  884. return r;
  885. }
  886. static void kvmppc_book3s_exit(void)
  887. {
  888. #ifdef CONFIG_KVM_XICS
  889. if (xive_enabled())
  890. kvmppc_xive_exit_module();
  891. #endif
  892. #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
  893. kvmppc_book3s_exit_pr();
  894. #endif
  895. kvm_exit();
  896. }
  897. module_init(kvmppc_book3s_init);
  898. module_exit(kvmppc_book3s_exit);
  899. /* On 32bit this is our one and only kernel module */
  900. #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
  901. MODULE_ALIAS_MISCDEV(KVM_MINOR);
  902. MODULE_ALIAS("devname:kvm");
  903. #endif