book3s.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945
  1. /*
  2. * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
  3. *
  4. * Authors:
  5. * Alexander Graf <agraf@suse.de>
  6. * Kevin Wolf <mail@kevin-wolf.de>
  7. *
  8. * Description:
  9. * This file is derived from arch/powerpc/kvm/44x.c,
  10. * by Hollis Blanchard <hollisb@us.ibm.com>.
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License, version 2, as
  14. * published by the Free Software Foundation.
  15. */
  16. #include <linux/kvm_host.h>
  17. #include <linux/err.h>
  18. #include <linux/export.h>
  19. #include <linux/slab.h>
  20. #include <linux/module.h>
  21. #include <linux/miscdevice.h>
  22. #include <asm/reg.h>
  23. #include <asm/cputable.h>
  24. #include <asm/cacheflush.h>
  25. #include <asm/tlbflush.h>
  26. #include <asm/uaccess.h>
  27. #include <asm/io.h>
  28. #include <asm/kvm_ppc.h>
  29. #include <asm/kvm_book3s.h>
  30. #include <asm/mmu_context.h>
  31. #include <asm/page.h>
  32. #include <linux/gfp.h>
  33. #include <linux/sched.h>
  34. #include <linux/vmalloc.h>
  35. #include <linux/highmem.h>
  36. #include "book3s.h"
  37. #include "trace.h"
  38. #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
  39. /* #define EXIT_DEBUG */
  40. struct kvm_stats_debugfs_item debugfs_entries[] = {
  41. { "exits", VCPU_STAT(sum_exits) },
  42. { "mmio", VCPU_STAT(mmio_exits) },
  43. { "sig", VCPU_STAT(signal_exits) },
  44. { "sysc", VCPU_STAT(syscall_exits) },
  45. { "inst_emu", VCPU_STAT(emulated_inst_exits) },
  46. { "dec", VCPU_STAT(dec_exits) },
  47. { "ext_intr", VCPU_STAT(ext_intr_exits) },
  48. { "queue_intr", VCPU_STAT(queue_intr) },
  49. { "halt_wakeup", VCPU_STAT(halt_wakeup) },
  50. { "pf_storage", VCPU_STAT(pf_storage) },
  51. { "sp_storage", VCPU_STAT(sp_storage) },
  52. { "pf_instruc", VCPU_STAT(pf_instruc) },
  53. { "sp_instruc", VCPU_STAT(sp_instruc) },
  54. { "ld", VCPU_STAT(ld) },
  55. { "ld_slow", VCPU_STAT(ld_slow) },
  56. { "st", VCPU_STAT(st) },
  57. { "st_slow", VCPU_STAT(st_slow) },
  58. { NULL }
  59. };
  60. void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
  61. {
  62. }
  63. void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
  64. {
  65. }
  66. static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
  67. {
  68. if (!is_kvmppc_hv_enabled(vcpu->kvm))
  69. return to_book3s(vcpu)->hior;
  70. return 0;
  71. }
  72. static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
  73. unsigned long pending_now, unsigned long old_pending)
  74. {
  75. if (is_kvmppc_hv_enabled(vcpu->kvm))
  76. return;
  77. if (pending_now)
  78. kvmppc_set_int_pending(vcpu, 1);
  79. else if (old_pending)
  80. kvmppc_set_int_pending(vcpu, 0);
  81. }
  82. static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
  83. {
  84. ulong crit_raw;
  85. ulong crit_r1;
  86. bool crit;
  87. if (is_kvmppc_hv_enabled(vcpu->kvm))
  88. return false;
  89. crit_raw = kvmppc_get_critical(vcpu);
  90. crit_r1 = kvmppc_get_gpr(vcpu, 1);
  91. /* Truncate crit indicators in 32 bit mode */
  92. if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
  93. crit_raw &= 0xffffffff;
  94. crit_r1 &= 0xffffffff;
  95. }
  96. /* Critical section when crit == r1 */
  97. crit = (crit_raw == crit_r1);
  98. /* ... and we're in supervisor mode */
  99. crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
  100. return crit;
  101. }
  102. void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
  103. {
  104. kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
  105. kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags);
  106. kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
  107. vcpu->arch.mmu.reset_msr(vcpu);
  108. }
  109. static int kvmppc_book3s_vec2irqprio(unsigned int vec)
  110. {
  111. unsigned int prio;
  112. switch (vec) {
  113. case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
  114. case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
  115. case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break;
  116. case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break;
  117. case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
  118. case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
  119. case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
  120. case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break;
  121. case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
  122. case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
  123. case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
  124. case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break;
  125. case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break;
  126. case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break;
  127. case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break;
  128. case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break;
  129. case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL; break;
  130. default: prio = BOOK3S_IRQPRIO_MAX; break;
  131. }
  132. return prio;
  133. }
  134. void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
  135. unsigned int vec)
  136. {
  137. unsigned long old_pending = vcpu->arch.pending_exceptions;
  138. clear_bit(kvmppc_book3s_vec2irqprio(vec),
  139. &vcpu->arch.pending_exceptions);
  140. kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
  141. old_pending);
  142. }
  143. void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
  144. {
  145. vcpu->stat.queue_intr++;
  146. set_bit(kvmppc_book3s_vec2irqprio(vec),
  147. &vcpu->arch.pending_exceptions);
  148. #ifdef EXIT_DEBUG
  149. printk(KERN_INFO "Queueing interrupt %x\n", vec);
  150. #endif
  151. }
  152. EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
  153. void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
  154. {
  155. /* might as well deliver this straight away */
  156. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
  157. }
  158. EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
  159. void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
  160. {
  161. kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
  162. }
  163. EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
  164. int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
  165. {
  166. return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
  167. }
  168. EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
  169. void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
  170. {
  171. kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
  172. }
  173. EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
  174. void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
  175. struct kvm_interrupt *irq)
  176. {
  177. unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL;
  178. if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
  179. vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL;
  180. kvmppc_book3s_queue_irqprio(vcpu, vec);
  181. }
  182. void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
  183. {
  184. kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
  185. kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
  186. }
  187. int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
  188. {
  189. int deliver = 1;
  190. int vec = 0;
  191. bool crit = kvmppc_critical_section(vcpu);
  192. switch (priority) {
  193. case BOOK3S_IRQPRIO_DECREMENTER:
  194. deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
  195. vec = BOOK3S_INTERRUPT_DECREMENTER;
  196. break;
  197. case BOOK3S_IRQPRIO_EXTERNAL:
  198. case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
  199. deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
  200. vec = BOOK3S_INTERRUPT_EXTERNAL;
  201. break;
  202. case BOOK3S_IRQPRIO_SYSTEM_RESET:
  203. vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
  204. break;
  205. case BOOK3S_IRQPRIO_MACHINE_CHECK:
  206. vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
  207. break;
  208. case BOOK3S_IRQPRIO_DATA_STORAGE:
  209. vec = BOOK3S_INTERRUPT_DATA_STORAGE;
  210. break;
  211. case BOOK3S_IRQPRIO_INST_STORAGE:
  212. vec = BOOK3S_INTERRUPT_INST_STORAGE;
  213. break;
  214. case BOOK3S_IRQPRIO_DATA_SEGMENT:
  215. vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
  216. break;
  217. case BOOK3S_IRQPRIO_INST_SEGMENT:
  218. vec = BOOK3S_INTERRUPT_INST_SEGMENT;
  219. break;
  220. case BOOK3S_IRQPRIO_ALIGNMENT:
  221. vec = BOOK3S_INTERRUPT_ALIGNMENT;
  222. break;
  223. case BOOK3S_IRQPRIO_PROGRAM:
  224. vec = BOOK3S_INTERRUPT_PROGRAM;
  225. break;
  226. case BOOK3S_IRQPRIO_VSX:
  227. vec = BOOK3S_INTERRUPT_VSX;
  228. break;
  229. case BOOK3S_IRQPRIO_ALTIVEC:
  230. vec = BOOK3S_INTERRUPT_ALTIVEC;
  231. break;
  232. case BOOK3S_IRQPRIO_FP_UNAVAIL:
  233. vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
  234. break;
  235. case BOOK3S_IRQPRIO_SYSCALL:
  236. vec = BOOK3S_INTERRUPT_SYSCALL;
  237. break;
  238. case BOOK3S_IRQPRIO_DEBUG:
  239. vec = BOOK3S_INTERRUPT_TRACE;
  240. break;
  241. case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
  242. vec = BOOK3S_INTERRUPT_PERFMON;
  243. break;
  244. case BOOK3S_IRQPRIO_FAC_UNAVAIL:
  245. vec = BOOK3S_INTERRUPT_FAC_UNAVAIL;
  246. break;
  247. default:
  248. deliver = 0;
  249. printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
  250. break;
  251. }
  252. #if 0
  253. printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
  254. #endif
  255. if (deliver)
  256. kvmppc_inject_interrupt(vcpu, vec, 0);
  257. return deliver;
  258. }
  259. /*
  260. * This function determines if an irqprio should be cleared once issued.
  261. */
  262. static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
  263. {
  264. switch (priority) {
  265. case BOOK3S_IRQPRIO_DECREMENTER:
  266. /* DEC interrupts get cleared by mtdec */
  267. return false;
  268. case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
  269. /* External interrupts get cleared by userspace */
  270. return false;
  271. }
  272. return true;
  273. }
  274. int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
  275. {
  276. unsigned long *pending = &vcpu->arch.pending_exceptions;
  277. unsigned long old_pending = vcpu->arch.pending_exceptions;
  278. unsigned int priority;
  279. #ifdef EXIT_DEBUG
  280. if (vcpu->arch.pending_exceptions)
  281. printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
  282. #endif
  283. priority = __ffs(*pending);
  284. while (priority < BOOK3S_IRQPRIO_MAX) {
  285. if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
  286. clear_irqprio(vcpu, priority)) {
  287. clear_bit(priority, &vcpu->arch.pending_exceptions);
  288. break;
  289. }
  290. priority = find_next_bit(pending,
  291. BITS_PER_BYTE * sizeof(*pending),
  292. priority + 1);
  293. }
  294. /* Tell the guest about our interrupt status */
  295. kvmppc_update_int_pending(vcpu, *pending, old_pending);
  296. return 0;
  297. }
  298. EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
  299. pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
  300. bool *writable)
  301. {
  302. ulong mp_pa = vcpu->arch.magic_page_pa;
  303. if (!(kvmppc_get_msr(vcpu) & MSR_SF))
  304. mp_pa = (uint32_t)mp_pa;
  305. /* Magic page override */
  306. if (unlikely(mp_pa) &&
  307. unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) ==
  308. ((mp_pa & PAGE_MASK) & KVM_PAM))) {
  309. ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
  310. pfn_t pfn;
  311. pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
  312. get_page(pfn_to_page(pfn));
  313. if (writable)
  314. *writable = true;
  315. return pfn;
  316. }
  317. return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
  318. }
  319. EXPORT_SYMBOL_GPL(kvmppc_gfn_to_pfn);
  320. static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
  321. bool iswrite, struct kvmppc_pte *pte)
  322. {
  323. int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
  324. int r;
  325. if (relocated) {
  326. r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
  327. } else {
  328. pte->eaddr = eaddr;
  329. pte->raddr = eaddr & KVM_PAM;
  330. pte->vpage = VSID_REAL | eaddr >> 12;
  331. pte->may_read = true;
  332. pte->may_write = true;
  333. pte->may_execute = true;
  334. r = 0;
  335. }
  336. return r;
  337. }
  338. static hva_t kvmppc_bad_hva(void)
  339. {
  340. return PAGE_OFFSET;
  341. }
  342. static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
  343. bool read)
  344. {
  345. hva_t hpage;
  346. if (read && !pte->may_read)
  347. goto err;
  348. if (!read && !pte->may_write)
  349. goto err;
  350. hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
  351. if (kvm_is_error_hva(hpage))
  352. goto err;
  353. return hpage | (pte->raddr & ~PAGE_MASK);
  354. err:
  355. return kvmppc_bad_hva();
  356. }
  357. int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
  358. bool data)
  359. {
  360. struct kvmppc_pte pte;
  361. vcpu->stat.st++;
  362. if (kvmppc_xlate(vcpu, *eaddr, data, true, &pte))
  363. return -ENOENT;
  364. *eaddr = pte.raddr;
  365. if (!pte.may_write)
  366. return -EPERM;
  367. if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
  368. return EMULATE_DO_MMIO;
  369. return EMULATE_DONE;
  370. }
  371. EXPORT_SYMBOL_GPL(kvmppc_st);
  372. int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
  373. bool data)
  374. {
  375. struct kvmppc_pte pte;
  376. hva_t hva = *eaddr;
  377. vcpu->stat.ld++;
  378. if (kvmppc_xlate(vcpu, *eaddr, data, false, &pte))
  379. goto nopte;
  380. *eaddr = pte.raddr;
  381. hva = kvmppc_pte_to_hva(vcpu, &pte, true);
  382. if (kvm_is_error_hva(hva))
  383. goto mmio;
  384. if (copy_from_user(ptr, (void __user *)hva, size)) {
  385. printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva);
  386. goto mmio;
  387. }
  388. return EMULATE_DONE;
  389. nopte:
  390. return -ENOENT;
  391. mmio:
  392. return EMULATE_DO_MMIO;
  393. }
  394. EXPORT_SYMBOL_GPL(kvmppc_ld);
  395. int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  396. {
  397. return 0;
  398. }
  399. int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
  400. {
  401. return 0;
  402. }
  403. void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
  404. {
  405. }
  406. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  407. struct kvm_sregs *sregs)
  408. {
  409. return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
  410. }
  411. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  412. struct kvm_sregs *sregs)
  413. {
  414. return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
  415. }
  416. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  417. {
  418. int i;
  419. regs->pc = kvmppc_get_pc(vcpu);
  420. regs->cr = kvmppc_get_cr(vcpu);
  421. regs->ctr = kvmppc_get_ctr(vcpu);
  422. regs->lr = kvmppc_get_lr(vcpu);
  423. regs->xer = kvmppc_get_xer(vcpu);
  424. regs->msr = kvmppc_get_msr(vcpu);
  425. regs->srr0 = kvmppc_get_srr0(vcpu);
  426. regs->srr1 = kvmppc_get_srr1(vcpu);
  427. regs->pid = vcpu->arch.pid;
  428. regs->sprg0 = kvmppc_get_sprg0(vcpu);
  429. regs->sprg1 = kvmppc_get_sprg1(vcpu);
  430. regs->sprg2 = kvmppc_get_sprg2(vcpu);
  431. regs->sprg3 = kvmppc_get_sprg3(vcpu);
  432. regs->sprg4 = kvmppc_get_sprg4(vcpu);
  433. regs->sprg5 = kvmppc_get_sprg5(vcpu);
  434. regs->sprg6 = kvmppc_get_sprg6(vcpu);
  435. regs->sprg7 = kvmppc_get_sprg7(vcpu);
  436. for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
  437. regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
  438. return 0;
  439. }
  440. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  441. {
  442. int i;
  443. kvmppc_set_pc(vcpu, regs->pc);
  444. kvmppc_set_cr(vcpu, regs->cr);
  445. kvmppc_set_ctr(vcpu, regs->ctr);
  446. kvmppc_set_lr(vcpu, regs->lr);
  447. kvmppc_set_xer(vcpu, regs->xer);
  448. kvmppc_set_msr(vcpu, regs->msr);
  449. kvmppc_set_srr0(vcpu, regs->srr0);
  450. kvmppc_set_srr1(vcpu, regs->srr1);
  451. kvmppc_set_sprg0(vcpu, regs->sprg0);
  452. kvmppc_set_sprg1(vcpu, regs->sprg1);
  453. kvmppc_set_sprg2(vcpu, regs->sprg2);
  454. kvmppc_set_sprg3(vcpu, regs->sprg3);
  455. kvmppc_set_sprg4(vcpu, regs->sprg4);
  456. kvmppc_set_sprg5(vcpu, regs->sprg5);
  457. kvmppc_set_sprg6(vcpu, regs->sprg6);
  458. kvmppc_set_sprg7(vcpu, regs->sprg7);
  459. for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
  460. kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
  461. return 0;
  462. }
  463. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  464. {
  465. return -ENOTSUPP;
  466. }
  467. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  468. {
  469. return -ENOTSUPP;
  470. }
  471. int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
  472. {
  473. int r;
  474. union kvmppc_one_reg val;
  475. int size;
  476. long int i;
  477. size = one_reg_size(reg->id);
  478. if (size > sizeof(val))
  479. return -EINVAL;
  480. r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
  481. if (r == -EINVAL) {
  482. r = 0;
  483. switch (reg->id) {
  484. case KVM_REG_PPC_DAR:
  485. val = get_reg_val(reg->id, kvmppc_get_dar(vcpu));
  486. break;
  487. case KVM_REG_PPC_DSISR:
  488. val = get_reg_val(reg->id, kvmppc_get_dsisr(vcpu));
  489. break;
  490. case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
  491. i = reg->id - KVM_REG_PPC_FPR0;
  492. val = get_reg_val(reg->id, VCPU_FPR(vcpu, i));
  493. break;
  494. case KVM_REG_PPC_FPSCR:
  495. val = get_reg_val(reg->id, vcpu->arch.fp.fpscr);
  496. break;
  497. #ifdef CONFIG_ALTIVEC
  498. case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
  499. if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
  500. r = -ENXIO;
  501. break;
  502. }
  503. val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
  504. break;
  505. case KVM_REG_PPC_VSCR:
  506. if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
  507. r = -ENXIO;
  508. break;
  509. }
  510. val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
  511. break;
  512. case KVM_REG_PPC_VRSAVE:
  513. val = get_reg_val(reg->id, vcpu->arch.vrsave);
  514. break;
  515. #endif /* CONFIG_ALTIVEC */
  516. #ifdef CONFIG_VSX
  517. case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
  518. if (cpu_has_feature(CPU_FTR_VSX)) {
  519. long int i = reg->id - KVM_REG_PPC_VSR0;
  520. val.vsxval[0] = vcpu->arch.fp.fpr[i][0];
  521. val.vsxval[1] = vcpu->arch.fp.fpr[i][1];
  522. } else {
  523. r = -ENXIO;
  524. }
  525. break;
  526. #endif /* CONFIG_VSX */
  527. case KVM_REG_PPC_DEBUG_INST: {
  528. u32 opcode = INS_TW;
  529. r = copy_to_user((u32 __user *)(long)reg->addr,
  530. &opcode, sizeof(u32));
  531. break;
  532. }
  533. #ifdef CONFIG_KVM_XICS
  534. case KVM_REG_PPC_ICP_STATE:
  535. if (!vcpu->arch.icp) {
  536. r = -ENXIO;
  537. break;
  538. }
  539. val = get_reg_val(reg->id, kvmppc_xics_get_icp(vcpu));
  540. break;
  541. #endif /* CONFIG_KVM_XICS */
  542. case KVM_REG_PPC_FSCR:
  543. val = get_reg_val(reg->id, vcpu->arch.fscr);
  544. break;
  545. case KVM_REG_PPC_TAR:
  546. val = get_reg_val(reg->id, vcpu->arch.tar);
  547. break;
  548. case KVM_REG_PPC_EBBHR:
  549. val = get_reg_val(reg->id, vcpu->arch.ebbhr);
  550. break;
  551. case KVM_REG_PPC_EBBRR:
  552. val = get_reg_val(reg->id, vcpu->arch.ebbrr);
  553. break;
  554. case KVM_REG_PPC_BESCR:
  555. val = get_reg_val(reg->id, vcpu->arch.bescr);
  556. break;
  557. default:
  558. r = -EINVAL;
  559. break;
  560. }
  561. }
  562. if (r)
  563. return r;
  564. if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
  565. r = -EFAULT;
  566. return r;
  567. }
  568. int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
  569. {
  570. int r;
  571. union kvmppc_one_reg val;
  572. int size;
  573. long int i;
  574. size = one_reg_size(reg->id);
  575. if (size > sizeof(val))
  576. return -EINVAL;
  577. if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
  578. return -EFAULT;
  579. r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
  580. if (r == -EINVAL) {
  581. r = 0;
  582. switch (reg->id) {
  583. case KVM_REG_PPC_DAR:
  584. kvmppc_set_dar(vcpu, set_reg_val(reg->id, val));
  585. break;
  586. case KVM_REG_PPC_DSISR:
  587. kvmppc_set_dsisr(vcpu, set_reg_val(reg->id, val));
  588. break;
  589. case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
  590. i = reg->id - KVM_REG_PPC_FPR0;
  591. VCPU_FPR(vcpu, i) = set_reg_val(reg->id, val);
  592. break;
  593. case KVM_REG_PPC_FPSCR:
  594. vcpu->arch.fp.fpscr = set_reg_val(reg->id, val);
  595. break;
  596. #ifdef CONFIG_ALTIVEC
  597. case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
  598. if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
  599. r = -ENXIO;
  600. break;
  601. }
  602. vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
  603. break;
  604. case KVM_REG_PPC_VSCR:
  605. if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
  606. r = -ENXIO;
  607. break;
  608. }
  609. vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
  610. break;
  611. case KVM_REG_PPC_VRSAVE:
  612. if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
  613. r = -ENXIO;
  614. break;
  615. }
  616. vcpu->arch.vrsave = set_reg_val(reg->id, val);
  617. break;
  618. #endif /* CONFIG_ALTIVEC */
  619. #ifdef CONFIG_VSX
  620. case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
  621. if (cpu_has_feature(CPU_FTR_VSX)) {
  622. long int i = reg->id - KVM_REG_PPC_VSR0;
  623. vcpu->arch.fp.fpr[i][0] = val.vsxval[0];
  624. vcpu->arch.fp.fpr[i][1] = val.vsxval[1];
  625. } else {
  626. r = -ENXIO;
  627. }
  628. break;
  629. #endif /* CONFIG_VSX */
  630. #ifdef CONFIG_KVM_XICS
  631. case KVM_REG_PPC_ICP_STATE:
  632. if (!vcpu->arch.icp) {
  633. r = -ENXIO;
  634. break;
  635. }
  636. r = kvmppc_xics_set_icp(vcpu,
  637. set_reg_val(reg->id, val));
  638. break;
  639. #endif /* CONFIG_KVM_XICS */
  640. case KVM_REG_PPC_FSCR:
  641. vcpu->arch.fscr = set_reg_val(reg->id, val);
  642. break;
  643. case KVM_REG_PPC_TAR:
  644. vcpu->arch.tar = set_reg_val(reg->id, val);
  645. break;
  646. case KVM_REG_PPC_EBBHR:
  647. vcpu->arch.ebbhr = set_reg_val(reg->id, val);
  648. break;
  649. case KVM_REG_PPC_EBBRR:
  650. vcpu->arch.ebbrr = set_reg_val(reg->id, val);
  651. break;
  652. case KVM_REG_PPC_BESCR:
  653. vcpu->arch.bescr = set_reg_val(reg->id, val);
  654. break;
  655. default:
  656. r = -EINVAL;
  657. break;
  658. }
  659. }
  660. return r;
  661. }
  662. void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  663. {
  664. vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
  665. }
  666. void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
  667. {
  668. vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
  669. }
  670. void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
  671. {
  672. vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
  673. }
  674. EXPORT_SYMBOL_GPL(kvmppc_set_msr);
  675. int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
  676. {
  677. return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
  678. }
  679. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  680. struct kvm_translation *tr)
  681. {
  682. return 0;
  683. }
  684. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  685. struct kvm_guest_debug *dbg)
  686. {
  687. return -EINVAL;
  688. }
  689. void kvmppc_decrementer_func(unsigned long data)
  690. {
  691. struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
  692. kvmppc_core_queue_dec(vcpu);
  693. kvm_vcpu_kick(vcpu);
  694. }
  695. struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
  696. {
  697. return kvm->arch.kvm_ops->vcpu_create(kvm, id);
  698. }
  699. void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
  700. {
  701. vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
  702. }
  703. int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
  704. {
  705. return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
  706. }
  707. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
  708. {
  709. return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
  710. }
  711. void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
  712. struct kvm_memory_slot *dont)
  713. {
  714. kvm->arch.kvm_ops->free_memslot(free, dont);
  715. }
  716. int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
  717. unsigned long npages)
  718. {
  719. return kvm->arch.kvm_ops->create_memslot(slot, npages);
  720. }
  721. void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
  722. {
  723. kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
  724. }
  725. int kvmppc_core_prepare_memory_region(struct kvm *kvm,
  726. struct kvm_memory_slot *memslot,
  727. struct kvm_userspace_memory_region *mem)
  728. {
  729. return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
  730. }
  731. void kvmppc_core_commit_memory_region(struct kvm *kvm,
  732. struct kvm_userspace_memory_region *mem,
  733. const struct kvm_memory_slot *old)
  734. {
  735. kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old);
  736. }
  737. int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
  738. {
  739. return kvm->arch.kvm_ops->unmap_hva(kvm, hva);
  740. }
  741. EXPORT_SYMBOL_GPL(kvm_unmap_hva);
  742. int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
  743. {
  744. return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
  745. }
  746. int kvm_age_hva(struct kvm *kvm, unsigned long hva)
  747. {
  748. return kvm->arch.kvm_ops->age_hva(kvm, hva);
  749. }
  750. int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
  751. {
  752. return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
  753. }
  754. void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
  755. {
  756. kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
  757. }
  758. void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
  759. {
  760. vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
  761. }
  762. int kvmppc_core_init_vm(struct kvm *kvm)
  763. {
  764. #ifdef CONFIG_PPC64
  765. INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
  766. INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
  767. #endif
  768. return kvm->arch.kvm_ops->init_vm(kvm);
  769. }
  770. void kvmppc_core_destroy_vm(struct kvm *kvm)
  771. {
  772. kvm->arch.kvm_ops->destroy_vm(kvm);
  773. #ifdef CONFIG_PPC64
  774. kvmppc_rtas_tokens_free(kvm);
  775. WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
  776. #endif
  777. }
  778. int kvmppc_core_check_processor_compat(void)
  779. {
  780. /*
  781. * We always return 0 for book3s. We check
  782. * for compatability while loading the HV
  783. * or PR module
  784. */
  785. return 0;
  786. }
  787. static int kvmppc_book3s_init(void)
  788. {
  789. int r;
  790. r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
  791. if (r)
  792. return r;
  793. #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
  794. r = kvmppc_book3s_init_pr();
  795. #endif
  796. return r;
  797. }
  798. static void kvmppc_book3s_exit(void)
  799. {
  800. #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
  801. kvmppc_book3s_exit_pr();
  802. #endif
  803. kvm_exit();
  804. }
  805. module_init(kvmppc_book3s_init);
  806. module_exit(kvmppc_book3s_exit);
  807. /* On 32bit this is our one and only kernel module */
  808. #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
  809. MODULE_ALIAS_MISCDEV(KVM_MINOR);
  810. MODULE_ALIAS("devname:kvm");
  811. #endif