book3s.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042
  1. /*
  2. * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
  3. *
  4. * Authors:
  5. * Alexander Graf <agraf@suse.de>
  6. * Kevin Wolf <mail@kevin-wolf.de>
  7. *
  8. * Description:
  9. * This file is derived from arch/powerpc/kvm/44x.c,
  10. * by Hollis Blanchard <hollisb@us.ibm.com>.
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License, version 2, as
  14. * published by the Free Software Foundation.
  15. */
  16. #include <linux/kvm_host.h>
  17. #include <linux/err.h>
  18. #include <linux/export.h>
  19. #include <linux/slab.h>
  20. #include <linux/module.h>
  21. #include <linux/miscdevice.h>
  22. #include <linux/gfp.h>
  23. #include <linux/sched.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/highmem.h>
  26. #include <asm/reg.h>
  27. #include <asm/cputable.h>
  28. #include <asm/cacheflush.h>
  29. #include <linux/uaccess.h>
  30. #include <asm/io.h>
  31. #include <asm/kvm_ppc.h>
  32. #include <asm/kvm_book3s.h>
  33. #include <asm/mmu_context.h>
  34. #include <asm/page.h>
  35. #include <asm/xive.h>
  36. #include "book3s.h"
  37. #include "trace.h"
  38. #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
  39. /* #define EXIT_DEBUG */
  40. struct kvm_stats_debugfs_item debugfs_entries[] = {
  41. { "exits", VCPU_STAT(sum_exits) },
  42. { "mmio", VCPU_STAT(mmio_exits) },
  43. { "sig", VCPU_STAT(signal_exits) },
  44. { "sysc", VCPU_STAT(syscall_exits) },
  45. { "inst_emu", VCPU_STAT(emulated_inst_exits) },
  46. { "dec", VCPU_STAT(dec_exits) },
  47. { "ext_intr", VCPU_STAT(ext_intr_exits) },
  48. { "queue_intr", VCPU_STAT(queue_intr) },
  49. { "halt_poll_success_ns", VCPU_STAT(halt_poll_success_ns) },
  50. { "halt_poll_fail_ns", VCPU_STAT(halt_poll_fail_ns) },
  51. { "halt_wait_ns", VCPU_STAT(halt_wait_ns) },
  52. { "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
  53. { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), },
  54. { "halt_successful_wait", VCPU_STAT(halt_successful_wait) },
  55. { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
  56. { "halt_wakeup", VCPU_STAT(halt_wakeup) },
  57. { "pf_storage", VCPU_STAT(pf_storage) },
  58. { "sp_storage", VCPU_STAT(sp_storage) },
  59. { "pf_instruc", VCPU_STAT(pf_instruc) },
  60. { "sp_instruc", VCPU_STAT(sp_instruc) },
  61. { "ld", VCPU_STAT(ld) },
  62. { "ld_slow", VCPU_STAT(ld_slow) },
  63. { "st", VCPU_STAT(st) },
  64. { "st_slow", VCPU_STAT(st_slow) },
  65. { "pthru_all", VCPU_STAT(pthru_all) },
  66. { "pthru_host", VCPU_STAT(pthru_host) },
  67. { "pthru_bad_aff", VCPU_STAT(pthru_bad_aff) },
  68. { NULL }
  69. };
  70. void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
  71. {
  72. if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
  73. ulong pc = kvmppc_get_pc(vcpu);
  74. if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
  75. kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
  76. vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
  77. }
  78. }
  79. EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real);
  80. static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
  81. {
  82. if (!is_kvmppc_hv_enabled(vcpu->kvm))
  83. return to_book3s(vcpu)->hior;
  84. return 0;
  85. }
  86. static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
  87. unsigned long pending_now, unsigned long old_pending)
  88. {
  89. if (is_kvmppc_hv_enabled(vcpu->kvm))
  90. return;
  91. if (pending_now)
  92. kvmppc_set_int_pending(vcpu, 1);
  93. else if (old_pending)
  94. kvmppc_set_int_pending(vcpu, 0);
  95. }
  96. static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
  97. {
  98. ulong crit_raw;
  99. ulong crit_r1;
  100. bool crit;
  101. if (is_kvmppc_hv_enabled(vcpu->kvm))
  102. return false;
  103. crit_raw = kvmppc_get_critical(vcpu);
  104. crit_r1 = kvmppc_get_gpr(vcpu, 1);
  105. /* Truncate crit indicators in 32 bit mode */
  106. if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
  107. crit_raw &= 0xffffffff;
  108. crit_r1 &= 0xffffffff;
  109. }
  110. /* Critical section when crit == r1 */
  111. crit = (crit_raw == crit_r1);
  112. /* ... and we're in supervisor mode */
  113. crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
  114. return crit;
  115. }
  116. void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
  117. {
  118. kvmppc_unfixup_split_real(vcpu);
  119. kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
  120. kvmppc_set_srr1(vcpu, (kvmppc_get_msr(vcpu) & ~0x783f0000ul) | flags);
  121. kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
  122. vcpu->arch.mmu.reset_msr(vcpu);
  123. }
  124. static int kvmppc_book3s_vec2irqprio(unsigned int vec)
  125. {
  126. unsigned int prio;
  127. switch (vec) {
  128. case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
  129. case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
  130. case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break;
  131. case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break;
  132. case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
  133. case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
  134. case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
  135. case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break;
  136. case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
  137. case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
  138. case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
  139. case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break;
  140. case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break;
  141. case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break;
  142. case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break;
  143. case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break;
  144. case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL; break;
  145. default: prio = BOOK3S_IRQPRIO_MAX; break;
  146. }
  147. return prio;
  148. }
  149. void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
  150. unsigned int vec)
  151. {
  152. unsigned long old_pending = vcpu->arch.pending_exceptions;
  153. clear_bit(kvmppc_book3s_vec2irqprio(vec),
  154. &vcpu->arch.pending_exceptions);
  155. kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
  156. old_pending);
  157. }
  158. void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
  159. {
  160. vcpu->stat.queue_intr++;
  161. set_bit(kvmppc_book3s_vec2irqprio(vec),
  162. &vcpu->arch.pending_exceptions);
  163. #ifdef EXIT_DEBUG
  164. printk(KERN_INFO "Queueing interrupt %x\n", vec);
  165. #endif
  166. }
  167. EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
  168. void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
  169. {
  170. /* might as well deliver this straight away */
  171. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
  172. }
  173. EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
  174. void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
  175. {
  176. /* might as well deliver this straight away */
  177. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, 0);
  178. }
  179. void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
  180. {
  181. /* might as well deliver this straight away */
  182. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, 0);
  183. }
  184. void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu)
  185. {
  186. /* might as well deliver this straight away */
  187. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, 0);
  188. }
  189. void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
  190. {
  191. kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
  192. }
  193. EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
  194. int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
  195. {
  196. return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
  197. }
  198. EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
  199. void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
  200. {
  201. kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
  202. }
  203. EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
  204. void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
  205. struct kvm_interrupt *irq)
  206. {
  207. unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL;
  208. if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
  209. vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL;
  210. kvmppc_book3s_queue_irqprio(vcpu, vec);
  211. }
  212. void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
  213. {
  214. kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
  215. kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
  216. }
  217. void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
  218. ulong flags)
  219. {
  220. kvmppc_set_dar(vcpu, dar);
  221. kvmppc_set_dsisr(vcpu, flags);
  222. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
  223. }
  224. EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage);
  225. void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
  226. {
  227. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, flags);
  228. }
  229. EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage);
  230. static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
  231. unsigned int priority)
  232. {
  233. int deliver = 1;
  234. int vec = 0;
  235. bool crit = kvmppc_critical_section(vcpu);
  236. switch (priority) {
  237. case BOOK3S_IRQPRIO_DECREMENTER:
  238. deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
  239. vec = BOOK3S_INTERRUPT_DECREMENTER;
  240. break;
  241. case BOOK3S_IRQPRIO_EXTERNAL:
  242. case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
  243. deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
  244. vec = BOOK3S_INTERRUPT_EXTERNAL;
  245. break;
  246. case BOOK3S_IRQPRIO_SYSTEM_RESET:
  247. vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
  248. break;
  249. case BOOK3S_IRQPRIO_MACHINE_CHECK:
  250. vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
  251. break;
  252. case BOOK3S_IRQPRIO_DATA_STORAGE:
  253. vec = BOOK3S_INTERRUPT_DATA_STORAGE;
  254. break;
  255. case BOOK3S_IRQPRIO_INST_STORAGE:
  256. vec = BOOK3S_INTERRUPT_INST_STORAGE;
  257. break;
  258. case BOOK3S_IRQPRIO_DATA_SEGMENT:
  259. vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
  260. break;
  261. case BOOK3S_IRQPRIO_INST_SEGMENT:
  262. vec = BOOK3S_INTERRUPT_INST_SEGMENT;
  263. break;
  264. case BOOK3S_IRQPRIO_ALIGNMENT:
  265. vec = BOOK3S_INTERRUPT_ALIGNMENT;
  266. break;
  267. case BOOK3S_IRQPRIO_PROGRAM:
  268. vec = BOOK3S_INTERRUPT_PROGRAM;
  269. break;
  270. case BOOK3S_IRQPRIO_VSX:
  271. vec = BOOK3S_INTERRUPT_VSX;
  272. break;
  273. case BOOK3S_IRQPRIO_ALTIVEC:
  274. vec = BOOK3S_INTERRUPT_ALTIVEC;
  275. break;
  276. case BOOK3S_IRQPRIO_FP_UNAVAIL:
  277. vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
  278. break;
  279. case BOOK3S_IRQPRIO_SYSCALL:
  280. vec = BOOK3S_INTERRUPT_SYSCALL;
  281. break;
  282. case BOOK3S_IRQPRIO_DEBUG:
  283. vec = BOOK3S_INTERRUPT_TRACE;
  284. break;
  285. case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
  286. vec = BOOK3S_INTERRUPT_PERFMON;
  287. break;
  288. case BOOK3S_IRQPRIO_FAC_UNAVAIL:
  289. vec = BOOK3S_INTERRUPT_FAC_UNAVAIL;
  290. break;
  291. default:
  292. deliver = 0;
  293. printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
  294. break;
  295. }
  296. #if 0
  297. printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
  298. #endif
  299. if (deliver)
  300. kvmppc_inject_interrupt(vcpu, vec, 0);
  301. return deliver;
  302. }
  303. /*
  304. * This function determines if an irqprio should be cleared once issued.
  305. */
  306. static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
  307. {
  308. switch (priority) {
  309. case BOOK3S_IRQPRIO_DECREMENTER:
  310. /* DEC interrupts get cleared by mtdec */
  311. return false;
  312. case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
  313. /* External interrupts get cleared by userspace */
  314. return false;
  315. }
  316. return true;
  317. }
  318. int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
  319. {
  320. unsigned long *pending = &vcpu->arch.pending_exceptions;
  321. unsigned long old_pending = vcpu->arch.pending_exceptions;
  322. unsigned int priority;
  323. #ifdef EXIT_DEBUG
  324. if (vcpu->arch.pending_exceptions)
  325. printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
  326. #endif
  327. priority = __ffs(*pending);
  328. while (priority < BOOK3S_IRQPRIO_MAX) {
  329. if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
  330. clear_irqprio(vcpu, priority)) {
  331. clear_bit(priority, &vcpu->arch.pending_exceptions);
  332. break;
  333. }
  334. priority = find_next_bit(pending,
  335. BITS_PER_BYTE * sizeof(*pending),
  336. priority + 1);
  337. }
  338. /* Tell the guest about our interrupt status */
  339. kvmppc_update_int_pending(vcpu, *pending, old_pending);
  340. return 0;
  341. }
  342. EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
  343. kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
  344. bool *writable)
  345. {
  346. ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
  347. gfn_t gfn = gpa >> PAGE_SHIFT;
  348. if (!(kvmppc_get_msr(vcpu) & MSR_SF))
  349. mp_pa = (uint32_t)mp_pa;
  350. /* Magic page override */
  351. gpa &= ~0xFFFULL;
  352. if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
  353. ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
  354. kvm_pfn_t pfn;
  355. pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
  356. get_page(pfn_to_page(pfn));
  357. if (writable)
  358. *writable = true;
  359. return pfn;
  360. }
  361. return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
  362. }
  363. EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
  364. int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
  365. enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
  366. {
  367. bool data = (xlid == XLATE_DATA);
  368. bool iswrite = (xlrw == XLATE_WRITE);
  369. int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
  370. int r;
  371. if (relocated) {
  372. r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
  373. } else {
  374. pte->eaddr = eaddr;
  375. pte->raddr = eaddr & KVM_PAM;
  376. pte->vpage = VSID_REAL | eaddr >> 12;
  377. pte->may_read = true;
  378. pte->may_write = true;
  379. pte->may_execute = true;
  380. r = 0;
  381. if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
  382. !data) {
  383. if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
  384. ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
  385. pte->raddr &= ~SPLIT_HACK_MASK;
  386. }
  387. }
  388. return r;
  389. }
  390. int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
  391. enum instruction_fetch_type type, u32 *inst)
  392. {
  393. ulong pc = kvmppc_get_pc(vcpu);
  394. int r;
  395. if (type == INST_SC)
  396. pc -= 4;
  397. r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false);
  398. if (r == EMULATE_DONE)
  399. return r;
  400. else
  401. return EMULATE_AGAIN;
  402. }
  403. EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
  404. int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  405. {
  406. return 0;
  407. }
  408. int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
  409. {
  410. return 0;
  411. }
  412. void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
  413. {
  414. }
  415. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  416. struct kvm_sregs *sregs)
  417. {
  418. int ret;
  419. vcpu_load(vcpu);
  420. ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
  421. vcpu_put(vcpu);
  422. return ret;
  423. }
  424. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  425. struct kvm_sregs *sregs)
  426. {
  427. int ret;
  428. vcpu_load(vcpu);
  429. ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
  430. vcpu_put(vcpu);
  431. return ret;
  432. }
  433. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  434. {
  435. int i;
  436. regs->pc = kvmppc_get_pc(vcpu);
  437. regs->cr = kvmppc_get_cr(vcpu);
  438. regs->ctr = kvmppc_get_ctr(vcpu);
  439. regs->lr = kvmppc_get_lr(vcpu);
  440. regs->xer = kvmppc_get_xer(vcpu);
  441. regs->msr = kvmppc_get_msr(vcpu);
  442. regs->srr0 = kvmppc_get_srr0(vcpu);
  443. regs->srr1 = kvmppc_get_srr1(vcpu);
  444. regs->pid = vcpu->arch.pid;
  445. regs->sprg0 = kvmppc_get_sprg0(vcpu);
  446. regs->sprg1 = kvmppc_get_sprg1(vcpu);
  447. regs->sprg2 = kvmppc_get_sprg2(vcpu);
  448. regs->sprg3 = kvmppc_get_sprg3(vcpu);
  449. regs->sprg4 = kvmppc_get_sprg4(vcpu);
  450. regs->sprg5 = kvmppc_get_sprg5(vcpu);
  451. regs->sprg6 = kvmppc_get_sprg6(vcpu);
  452. regs->sprg7 = kvmppc_get_sprg7(vcpu);
  453. for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
  454. regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
  455. return 0;
  456. }
  457. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  458. {
  459. int i;
  460. kvmppc_set_pc(vcpu, regs->pc);
  461. kvmppc_set_cr(vcpu, regs->cr);
  462. kvmppc_set_ctr(vcpu, regs->ctr);
  463. kvmppc_set_lr(vcpu, regs->lr);
  464. kvmppc_set_xer(vcpu, regs->xer);
  465. kvmppc_set_msr(vcpu, regs->msr);
  466. kvmppc_set_srr0(vcpu, regs->srr0);
  467. kvmppc_set_srr1(vcpu, regs->srr1);
  468. kvmppc_set_sprg0(vcpu, regs->sprg0);
  469. kvmppc_set_sprg1(vcpu, regs->sprg1);
  470. kvmppc_set_sprg2(vcpu, regs->sprg2);
  471. kvmppc_set_sprg3(vcpu, regs->sprg3);
  472. kvmppc_set_sprg4(vcpu, regs->sprg4);
  473. kvmppc_set_sprg5(vcpu, regs->sprg5);
  474. kvmppc_set_sprg6(vcpu, regs->sprg6);
  475. kvmppc_set_sprg7(vcpu, regs->sprg7);
  476. for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
  477. kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
  478. return 0;
  479. }
  480. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  481. {
  482. return -ENOTSUPP;
  483. }
  484. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  485. {
  486. return -ENOTSUPP;
  487. }
  488. int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
  489. union kvmppc_one_reg *val)
  490. {
  491. int r = 0;
  492. long int i;
  493. r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
  494. if (r == -EINVAL) {
  495. r = 0;
  496. switch (id) {
  497. case KVM_REG_PPC_DAR:
  498. *val = get_reg_val(id, kvmppc_get_dar(vcpu));
  499. break;
  500. case KVM_REG_PPC_DSISR:
  501. *val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
  502. break;
  503. case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
  504. i = id - KVM_REG_PPC_FPR0;
  505. *val = get_reg_val(id, VCPU_FPR(vcpu, i));
  506. break;
  507. case KVM_REG_PPC_FPSCR:
  508. *val = get_reg_val(id, vcpu->arch.fp.fpscr);
  509. break;
  510. #ifdef CONFIG_VSX
  511. case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
  512. if (cpu_has_feature(CPU_FTR_VSX)) {
  513. i = id - KVM_REG_PPC_VSR0;
  514. val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
  515. val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
  516. } else {
  517. r = -ENXIO;
  518. }
  519. break;
  520. #endif /* CONFIG_VSX */
  521. case KVM_REG_PPC_DEBUG_INST:
  522. *val = get_reg_val(id, INS_TW);
  523. break;
  524. #ifdef CONFIG_KVM_XICS
  525. case KVM_REG_PPC_ICP_STATE:
  526. if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
  527. r = -ENXIO;
  528. break;
  529. }
  530. if (xive_enabled())
  531. *val = get_reg_val(id, kvmppc_xive_get_icp(vcpu));
  532. else
  533. *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
  534. break;
  535. #endif /* CONFIG_KVM_XICS */
  536. case KVM_REG_PPC_FSCR:
  537. *val = get_reg_val(id, vcpu->arch.fscr);
  538. break;
  539. case KVM_REG_PPC_TAR:
  540. *val = get_reg_val(id, vcpu->arch.tar);
  541. break;
  542. case KVM_REG_PPC_EBBHR:
  543. *val = get_reg_val(id, vcpu->arch.ebbhr);
  544. break;
  545. case KVM_REG_PPC_EBBRR:
  546. *val = get_reg_val(id, vcpu->arch.ebbrr);
  547. break;
  548. case KVM_REG_PPC_BESCR:
  549. *val = get_reg_val(id, vcpu->arch.bescr);
  550. break;
  551. case KVM_REG_PPC_IC:
  552. *val = get_reg_val(id, vcpu->arch.ic);
  553. break;
  554. default:
  555. r = -EINVAL;
  556. break;
  557. }
  558. }
  559. return r;
  560. }
  561. int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
  562. union kvmppc_one_reg *val)
  563. {
  564. int r = 0;
  565. long int i;
  566. r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
  567. if (r == -EINVAL) {
  568. r = 0;
  569. switch (id) {
  570. case KVM_REG_PPC_DAR:
  571. kvmppc_set_dar(vcpu, set_reg_val(id, *val));
  572. break;
  573. case KVM_REG_PPC_DSISR:
  574. kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
  575. break;
  576. case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
  577. i = id - KVM_REG_PPC_FPR0;
  578. VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
  579. break;
  580. case KVM_REG_PPC_FPSCR:
  581. vcpu->arch.fp.fpscr = set_reg_val(id, *val);
  582. break;
  583. #ifdef CONFIG_VSX
  584. case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
  585. if (cpu_has_feature(CPU_FTR_VSX)) {
  586. i = id - KVM_REG_PPC_VSR0;
  587. vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
  588. vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
  589. } else {
  590. r = -ENXIO;
  591. }
  592. break;
  593. #endif /* CONFIG_VSX */
  594. #ifdef CONFIG_KVM_XICS
  595. case KVM_REG_PPC_ICP_STATE:
  596. if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
  597. r = -ENXIO;
  598. break;
  599. }
  600. if (xive_enabled())
  601. r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val));
  602. else
  603. r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val));
  604. break;
  605. #endif /* CONFIG_KVM_XICS */
  606. case KVM_REG_PPC_FSCR:
  607. vcpu->arch.fscr = set_reg_val(id, *val);
  608. break;
  609. case KVM_REG_PPC_TAR:
  610. vcpu->arch.tar = set_reg_val(id, *val);
  611. break;
  612. case KVM_REG_PPC_EBBHR:
  613. vcpu->arch.ebbhr = set_reg_val(id, *val);
  614. break;
  615. case KVM_REG_PPC_EBBRR:
  616. vcpu->arch.ebbrr = set_reg_val(id, *val);
  617. break;
  618. case KVM_REG_PPC_BESCR:
  619. vcpu->arch.bescr = set_reg_val(id, *val);
  620. break;
  621. case KVM_REG_PPC_IC:
  622. vcpu->arch.ic = set_reg_val(id, *val);
  623. break;
  624. default:
  625. r = -EINVAL;
  626. break;
  627. }
  628. }
  629. return r;
  630. }
  631. void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  632. {
  633. vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
  634. }
  635. void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
  636. {
  637. vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
  638. }
  639. void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
  640. {
  641. vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
  642. }
  643. EXPORT_SYMBOL_GPL(kvmppc_set_msr);
  644. int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
  645. {
  646. return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
  647. }
  648. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  649. struct kvm_translation *tr)
  650. {
  651. return 0;
  652. }
  653. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  654. struct kvm_guest_debug *dbg)
  655. {
  656. vcpu_load(vcpu);
  657. vcpu->guest_debug = dbg->control;
  658. vcpu_put(vcpu);
  659. return 0;
  660. }
  661. void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
  662. {
  663. kvmppc_core_queue_dec(vcpu);
  664. kvm_vcpu_kick(vcpu);
  665. }
  666. struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
  667. {
  668. return kvm->arch.kvm_ops->vcpu_create(kvm, id);
  669. }
  670. void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
  671. {
  672. vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
  673. }
  674. int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
  675. {
  676. return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
  677. }
  678. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
  679. {
  680. return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
  681. }
  682. void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
  683. struct kvm_memory_slot *dont)
  684. {
  685. kvm->arch.kvm_ops->free_memslot(free, dont);
  686. }
  687. int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
  688. unsigned long npages)
  689. {
  690. return kvm->arch.kvm_ops->create_memslot(slot, npages);
  691. }
  692. void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
  693. {
  694. kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
  695. }
  696. int kvmppc_core_prepare_memory_region(struct kvm *kvm,
  697. struct kvm_memory_slot *memslot,
  698. const struct kvm_userspace_memory_region *mem)
  699. {
  700. return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
  701. }
  702. void kvmppc_core_commit_memory_region(struct kvm *kvm,
  703. const struct kvm_userspace_memory_region *mem,
  704. const struct kvm_memory_slot *old,
  705. const struct kvm_memory_slot *new)
  706. {
  707. kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new);
  708. }
  709. int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
  710. {
  711. return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
  712. }
  713. int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
  714. {
  715. return kvm->arch.kvm_ops->age_hva(kvm, start, end);
  716. }
  717. int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
  718. {
  719. return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
  720. }
  721. void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
  722. {
  723. kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
  724. }
  725. void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
  726. {
  727. vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
  728. }
  729. int kvmppc_core_init_vm(struct kvm *kvm)
  730. {
  731. #ifdef CONFIG_PPC64
  732. INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
  733. INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
  734. mutex_init(&kvm->arch.rtas_token_lock);
  735. #endif
  736. return kvm->arch.kvm_ops->init_vm(kvm);
  737. }
  738. void kvmppc_core_destroy_vm(struct kvm *kvm)
  739. {
  740. kvm->arch.kvm_ops->destroy_vm(kvm);
  741. #ifdef CONFIG_PPC64
  742. kvmppc_rtas_tokens_free(kvm);
  743. WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
  744. #endif
  745. }
  746. int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
  747. {
  748. unsigned long size = kvmppc_get_gpr(vcpu, 4);
  749. unsigned long addr = kvmppc_get_gpr(vcpu, 5);
  750. u64 buf;
  751. int srcu_idx;
  752. int ret;
  753. if (!is_power_of_2(size) || (size > sizeof(buf)))
  754. return H_TOO_HARD;
  755. srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  756. ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
  757. srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
  758. if (ret != 0)
  759. return H_TOO_HARD;
  760. switch (size) {
  761. case 1:
  762. kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf);
  763. break;
  764. case 2:
  765. kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf));
  766. break;
  767. case 4:
  768. kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf));
  769. break;
  770. case 8:
  771. kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf));
  772. break;
  773. default:
  774. BUG();
  775. }
  776. return H_SUCCESS;
  777. }
  778. EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load);
  779. int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
  780. {
  781. unsigned long size = kvmppc_get_gpr(vcpu, 4);
  782. unsigned long addr = kvmppc_get_gpr(vcpu, 5);
  783. unsigned long val = kvmppc_get_gpr(vcpu, 6);
  784. u64 buf;
  785. int srcu_idx;
  786. int ret;
  787. switch (size) {
  788. case 1:
  789. *(u8 *)&buf = val;
  790. break;
  791. case 2:
  792. *(__be16 *)&buf = cpu_to_be16(val);
  793. break;
  794. case 4:
  795. *(__be32 *)&buf = cpu_to_be32(val);
  796. break;
  797. case 8:
  798. *(__be64 *)&buf = cpu_to_be64(val);
  799. break;
  800. default:
  801. return H_TOO_HARD;
  802. }
  803. srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  804. ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
  805. srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
  806. if (ret != 0)
  807. return H_TOO_HARD;
  808. return H_SUCCESS;
  809. }
  810. EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store);
  811. int kvmppc_core_check_processor_compat(void)
  812. {
  813. /*
  814. * We always return 0 for book3s. We check
  815. * for compatibility while loading the HV
  816. * or PR module
  817. */
  818. return 0;
  819. }
  820. int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
  821. {
  822. return kvm->arch.kvm_ops->hcall_implemented(hcall);
  823. }
  824. #ifdef CONFIG_KVM_XICS
  825. int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
  826. bool line_status)
  827. {
  828. if (xive_enabled())
  829. return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level,
  830. line_status);
  831. else
  832. return kvmppc_xics_set_irq(kvm, irq_source_id, irq, level,
  833. line_status);
  834. }
  835. int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
  836. struct kvm *kvm, int irq_source_id,
  837. int level, bool line_status)
  838. {
  839. return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
  840. level, line_status);
  841. }
  842. static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry *e,
  843. struct kvm *kvm, int irq_source_id, int level,
  844. bool line_status)
  845. {
  846. return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
  847. }
  848. int kvm_irq_map_gsi(struct kvm *kvm,
  849. struct kvm_kernel_irq_routing_entry *entries, int gsi)
  850. {
  851. entries->gsi = gsi;
  852. entries->type = KVM_IRQ_ROUTING_IRQCHIP;
  853. entries->set = kvmppc_book3s_set_irq;
  854. entries->irqchip.irqchip = 0;
  855. entries->irqchip.pin = gsi;
  856. return 1;
  857. }
  858. int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
  859. {
  860. return pin;
  861. }
  862. #endif /* CONFIG_KVM_XICS */
  863. static int kvmppc_book3s_init(void)
  864. {
  865. int r;
  866. r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
  867. if (r)
  868. return r;
  869. #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
  870. r = kvmppc_book3s_init_pr();
  871. #endif
  872. #ifdef CONFIG_KVM_XICS
  873. #ifdef CONFIG_KVM_XIVE
  874. if (xive_enabled()) {
  875. kvmppc_xive_init_module();
  876. kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
  877. } else
  878. #endif
  879. kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
  880. #endif
  881. return r;
  882. }
  883. static void kvmppc_book3s_exit(void)
  884. {
  885. #ifdef CONFIG_KVM_XICS
  886. if (xive_enabled())
  887. kvmppc_xive_exit_module();
  888. #endif
  889. #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
  890. kvmppc_book3s_exit_pr();
  891. #endif
  892. kvm_exit();
  893. }
  894. module_init(kvmppc_book3s_init);
  895. module_exit(kvmppc_book3s_exit);
  896. /* On 32bit this is our one and only kernel module */
  897. #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
  898. MODULE_ALIAS_MISCDEV(KVM_MINOR);
  899. MODULE_ALIAS("devname:kvm");
  900. #endif