book3s_pr.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810
  1. /*
  2. * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
  3. *
  4. * Authors:
  5. * Alexander Graf <agraf@suse.de>
  6. * Kevin Wolf <mail@kevin-wolf.de>
  7. * Paul Mackerras <paulus@samba.org>
  8. *
  9. * Description:
  10. * Functions relating to running KVM on Book 3S processors where
  11. * we don't have access to hypervisor mode, and we run the guest
  12. * in problem state (user mode).
  13. *
  14. * This file is derived from arch/powerpc/kvm/44x.c,
  15. * by Hollis Blanchard <hollisb@us.ibm.com>.
  16. *
  17. * This program is free software; you can redistribute it and/or modify
  18. * it under the terms of the GNU General Public License, version 2, as
  19. * published by the Free Software Foundation.
  20. */
  21. #include <linux/kvm_host.h>
  22. #include <linux/export.h>
  23. #include <linux/err.h>
  24. #include <linux/slab.h>
  25. #include <asm/reg.h>
  26. #include <asm/cputable.h>
  27. #include <asm/cacheflush.h>
  28. #include <asm/tlbflush.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/io.h>
  31. #include <asm/kvm_ppc.h>
  32. #include <asm/kvm_book3s.h>
  33. #include <asm/mmu_context.h>
  34. #include <asm/switch_to.h>
  35. #include <asm/firmware.h>
  36. #include <asm/setup.h>
  37. #include <linux/gfp.h>
  38. #include <linux/sched.h>
  39. #include <linux/vmalloc.h>
  40. #include <linux/highmem.h>
  41. #include <linux/module.h>
  42. #include <linux/miscdevice.h>
  43. #include "book3s.h"
  44. #define CREATE_TRACE_POINTS
  45. #include "trace_pr.h"
  46. /* #define EXIT_DEBUG */
  47. /* #define DEBUG_EXT */
  48. static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
  49. ulong msr);
  50. static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
  51. /* Some compatibility defines */
  52. #ifdef CONFIG_PPC_BOOK3S_32
  53. #define MSR_USER32 MSR_USER
  54. #define MSR_USER64 MSR_USER
  55. #define HW_PAGE_SIZE PAGE_SIZE
  56. #endif
  57. static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
  58. {
  59. ulong msr = kvmppc_get_msr(vcpu);
  60. return (msr & (MSR_IR|MSR_DR)) == MSR_DR;
  61. }
  62. static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu)
  63. {
  64. ulong msr = kvmppc_get_msr(vcpu);
  65. ulong pc = kvmppc_get_pc(vcpu);
  66. /* We are in DR only split real mode */
  67. if ((msr & (MSR_IR|MSR_DR)) != MSR_DR)
  68. return;
  69. /* We have not fixed up the guest already */
  70. if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK)
  71. return;
  72. /* The code is in fixupable address space */
  73. if (pc & SPLIT_HACK_MASK)
  74. return;
  75. vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK;
  76. kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS);
  77. }
  78. void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu);
  79. static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
  80. {
  81. #ifdef CONFIG_PPC_BOOK3S_64
  82. struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
  83. memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
  84. svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
  85. svcpu->in_use = 0;
  86. svcpu_put(svcpu);
  87. #endif
  88. /* Disable AIL if supported */
  89. if (cpu_has_feature(CPU_FTR_HVMODE) &&
  90. cpu_has_feature(CPU_FTR_ARCH_207S))
  91. mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL);
  92. vcpu->cpu = smp_processor_id();
  93. #ifdef CONFIG_PPC_BOOK3S_32
  94. current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
  95. #endif
  96. if (kvmppc_is_split_real(vcpu))
  97. kvmppc_fixup_split_real(vcpu);
  98. }
  99. static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
  100. {
  101. #ifdef CONFIG_PPC_BOOK3S_64
  102. struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
  103. if (svcpu->in_use) {
  104. kvmppc_copy_from_svcpu(vcpu, svcpu);
  105. }
  106. memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
  107. to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
  108. svcpu_put(svcpu);
  109. #endif
  110. if (kvmppc_is_split_real(vcpu))
  111. kvmppc_unfixup_split_real(vcpu);
  112. kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
  113. kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
  114. /* Enable AIL if supported */
  115. if (cpu_has_feature(CPU_FTR_HVMODE) &&
  116. cpu_has_feature(CPU_FTR_ARCH_207S))
  117. mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3);
  118. vcpu->cpu = -1;
  119. }
  120. /* Copy data needed by real-mode code from vcpu to shadow vcpu */
  121. void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
  122. struct kvm_vcpu *vcpu)
  123. {
  124. svcpu->gpr[0] = vcpu->arch.gpr[0];
  125. svcpu->gpr[1] = vcpu->arch.gpr[1];
  126. svcpu->gpr[2] = vcpu->arch.gpr[2];
  127. svcpu->gpr[3] = vcpu->arch.gpr[3];
  128. svcpu->gpr[4] = vcpu->arch.gpr[4];
  129. svcpu->gpr[5] = vcpu->arch.gpr[5];
  130. svcpu->gpr[6] = vcpu->arch.gpr[6];
  131. svcpu->gpr[7] = vcpu->arch.gpr[7];
  132. svcpu->gpr[8] = vcpu->arch.gpr[8];
  133. svcpu->gpr[9] = vcpu->arch.gpr[9];
  134. svcpu->gpr[10] = vcpu->arch.gpr[10];
  135. svcpu->gpr[11] = vcpu->arch.gpr[11];
  136. svcpu->gpr[12] = vcpu->arch.gpr[12];
  137. svcpu->gpr[13] = vcpu->arch.gpr[13];
  138. svcpu->cr = vcpu->arch.cr;
  139. svcpu->xer = vcpu->arch.xer;
  140. svcpu->ctr = vcpu->arch.ctr;
  141. svcpu->lr = vcpu->arch.lr;
  142. svcpu->pc = vcpu->arch.pc;
  143. #ifdef CONFIG_PPC_BOOK3S_64
  144. svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
  145. #endif
  146. /*
  147. * Now also save the current time base value. We use this
  148. * to find the guest purr and spurr value.
  149. */
  150. vcpu->arch.entry_tb = get_tb();
  151. vcpu->arch.entry_vtb = get_vtb();
  152. if (cpu_has_feature(CPU_FTR_ARCH_207S))
  153. vcpu->arch.entry_ic = mfspr(SPRN_IC);
  154. svcpu->in_use = true;
  155. }
  156. /* Copy data touched by real-mode code from shadow vcpu back to vcpu */
  157. void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
  158. struct kvmppc_book3s_shadow_vcpu *svcpu)
  159. {
  160. /*
  161. * vcpu_put would just call us again because in_use hasn't
  162. * been updated yet.
  163. */
  164. preempt_disable();
  165. /*
  166. * Maybe we were already preempted and synced the svcpu from
  167. * our preempt notifiers. Don't bother touching this svcpu then.
  168. */
  169. if (!svcpu->in_use)
  170. goto out;
  171. vcpu->arch.gpr[0] = svcpu->gpr[0];
  172. vcpu->arch.gpr[1] = svcpu->gpr[1];
  173. vcpu->arch.gpr[2] = svcpu->gpr[2];
  174. vcpu->arch.gpr[3] = svcpu->gpr[3];
  175. vcpu->arch.gpr[4] = svcpu->gpr[4];
  176. vcpu->arch.gpr[5] = svcpu->gpr[5];
  177. vcpu->arch.gpr[6] = svcpu->gpr[6];
  178. vcpu->arch.gpr[7] = svcpu->gpr[7];
  179. vcpu->arch.gpr[8] = svcpu->gpr[8];
  180. vcpu->arch.gpr[9] = svcpu->gpr[9];
  181. vcpu->arch.gpr[10] = svcpu->gpr[10];
  182. vcpu->arch.gpr[11] = svcpu->gpr[11];
  183. vcpu->arch.gpr[12] = svcpu->gpr[12];
  184. vcpu->arch.gpr[13] = svcpu->gpr[13];
  185. vcpu->arch.cr = svcpu->cr;
  186. vcpu->arch.xer = svcpu->xer;
  187. vcpu->arch.ctr = svcpu->ctr;
  188. vcpu->arch.lr = svcpu->lr;
  189. vcpu->arch.pc = svcpu->pc;
  190. vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
  191. vcpu->arch.fault_dar = svcpu->fault_dar;
  192. vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
  193. vcpu->arch.last_inst = svcpu->last_inst;
  194. #ifdef CONFIG_PPC_BOOK3S_64
  195. vcpu->arch.shadow_fscr = svcpu->shadow_fscr;
  196. #endif
  197. /*
  198. * Update purr and spurr using time base on exit.
  199. */
  200. vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb;
  201. vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb;
  202. vcpu->arch.vtb += get_vtb() - vcpu->arch.entry_vtb;
  203. if (cpu_has_feature(CPU_FTR_ARCH_207S))
  204. vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic;
  205. svcpu->in_use = false;
  206. out:
  207. preempt_enable();
  208. }
  209. static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
  210. {
  211. int r = 1; /* Indicate we want to get back into the guest */
  212. /* We misuse TLB_FLUSH to indicate that we want to clear
  213. all shadow cache entries */
  214. if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
  215. kvmppc_mmu_pte_flush(vcpu, 0, 0);
  216. return r;
  217. }
  218. /************* MMU Notifiers *************/
  219. static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
  220. unsigned long end)
  221. {
  222. long i;
  223. struct kvm_vcpu *vcpu;
  224. struct kvm_memslots *slots;
  225. struct kvm_memory_slot *memslot;
  226. slots = kvm_memslots(kvm);
  227. kvm_for_each_memslot(memslot, slots) {
  228. unsigned long hva_start, hva_end;
  229. gfn_t gfn, gfn_end;
  230. hva_start = max(start, memslot->userspace_addr);
  231. hva_end = min(end, memslot->userspace_addr +
  232. (memslot->npages << PAGE_SHIFT));
  233. if (hva_start >= hva_end)
  234. continue;
  235. /*
  236. * {gfn(page) | page intersects with [hva_start, hva_end)} =
  237. * {gfn, gfn+1, ..., gfn_end-1}.
  238. */
  239. gfn = hva_to_gfn_memslot(hva_start, memslot);
  240. gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
  241. kvm_for_each_vcpu(i, vcpu, kvm)
  242. kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
  243. gfn_end << PAGE_SHIFT);
  244. }
  245. }
  246. static int kvm_unmap_hva_pr(struct kvm *kvm, unsigned long hva)
  247. {
  248. trace_kvm_unmap_hva(hva);
  249. do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
  250. return 0;
  251. }
  252. static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
  253. unsigned long end)
  254. {
  255. do_kvm_unmap_hva(kvm, start, end);
  256. return 0;
  257. }
  258. static int kvm_age_hva_pr(struct kvm *kvm, unsigned long start,
  259. unsigned long end)
  260. {
  261. /* XXX could be more clever ;) */
  262. return 0;
  263. }
  264. static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
  265. {
  266. /* XXX could be more clever ;) */
  267. return 0;
  268. }
  269. static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
  270. {
  271. /* The page will get remapped properly on its next fault */
  272. do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
  273. }
  274. /*****************************************/
  275. static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
  276. {
  277. ulong guest_msr = kvmppc_get_msr(vcpu);
  278. ulong smsr = guest_msr;
  279. /* Guest MSR values */
  280. smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
  281. /* Process MSR values */
  282. smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
  283. /* External providers the guest reserved */
  284. smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
  285. /* 64-bit Process MSR values */
  286. #ifdef CONFIG_PPC_BOOK3S_64
  287. smsr |= MSR_ISF | MSR_HV;
  288. #endif
  289. vcpu->arch.shadow_msr = smsr;
  290. }
  291. static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
  292. {
  293. ulong old_msr = kvmppc_get_msr(vcpu);
  294. #ifdef EXIT_DEBUG
  295. printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
  296. #endif
  297. msr &= to_book3s(vcpu)->msr_mask;
  298. kvmppc_set_msr_fast(vcpu, msr);
  299. kvmppc_recalc_shadow_msr(vcpu);
  300. if (msr & MSR_POW) {
  301. if (!vcpu->arch.pending_exceptions) {
  302. kvm_vcpu_block(vcpu);
  303. clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
  304. vcpu->stat.halt_wakeup++;
  305. /* Unset POW bit after we woke up */
  306. msr &= ~MSR_POW;
  307. kvmppc_set_msr_fast(vcpu, msr);
  308. }
  309. }
  310. if (kvmppc_is_split_real(vcpu))
  311. kvmppc_fixup_split_real(vcpu);
  312. else
  313. kvmppc_unfixup_split_real(vcpu);
  314. if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
  315. (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
  316. kvmppc_mmu_flush_segments(vcpu);
  317. kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
  318. /* Preload magic page segment when in kernel mode */
  319. if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
  320. struct kvm_vcpu_arch *a = &vcpu->arch;
  321. if (msr & MSR_DR)
  322. kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
  323. else
  324. kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
  325. }
  326. }
  327. /*
  328. * When switching from 32 to 64-bit, we may have a stale 32-bit
  329. * magic page around, we need to flush it. Typically 32-bit magic
  330. * page will be instanciated when calling into RTAS. Note: We
  331. * assume that such transition only happens while in kernel mode,
  332. * ie, we never transition from user 32-bit to kernel 64-bit with
  333. * a 32-bit magic page around.
  334. */
  335. if (vcpu->arch.magic_page_pa &&
  336. !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
  337. /* going from RTAS to normal kernel code */
  338. kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
  339. ~0xFFFUL);
  340. }
  341. /* Preload FPU if it's enabled */
  342. if (kvmppc_get_msr(vcpu) & MSR_FP)
  343. kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
  344. }
  345. void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
  346. {
  347. u32 host_pvr;
  348. vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
  349. vcpu->arch.pvr = pvr;
  350. #ifdef CONFIG_PPC_BOOK3S_64
  351. if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
  352. kvmppc_mmu_book3s_64_init(vcpu);
  353. if (!to_book3s(vcpu)->hior_explicit)
  354. to_book3s(vcpu)->hior = 0xfff00000;
  355. to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
  356. vcpu->arch.cpu_type = KVM_CPU_3S_64;
  357. } else
  358. #endif
  359. {
  360. kvmppc_mmu_book3s_32_init(vcpu);
  361. if (!to_book3s(vcpu)->hior_explicit)
  362. to_book3s(vcpu)->hior = 0;
  363. to_book3s(vcpu)->msr_mask = 0xffffffffULL;
  364. vcpu->arch.cpu_type = KVM_CPU_3S_32;
  365. }
  366. kvmppc_sanity_check(vcpu);
  367. /* If we are in hypervisor level on 970, we can tell the CPU to
  368. * treat DCBZ as 32 bytes store */
  369. vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
  370. if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
  371. !strcmp(cur_cpu_spec->platform, "ppc970"))
  372. vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
  373. /* Cell performs badly if MSR_FEx are set. So let's hope nobody
  374. really needs them in a VM on Cell and force disable them. */
  375. if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
  376. to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
  377. /*
  378. * If they're asking for POWER6 or later, set the flag
  379. * indicating that we can do multiple large page sizes
  380. * and 1TB segments.
  381. * Also set the flag that indicates that tlbie has the large
  382. * page bit in the RB operand instead of the instruction.
  383. */
  384. switch (PVR_VER(pvr)) {
  385. case PVR_POWER6:
  386. case PVR_POWER7:
  387. case PVR_POWER7p:
  388. case PVR_POWER8:
  389. vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
  390. BOOK3S_HFLAG_NEW_TLBIE;
  391. break;
  392. }
  393. #ifdef CONFIG_PPC_BOOK3S_32
  394. /* 32 bit Book3S always has 32 byte dcbz */
  395. vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
  396. #endif
  397. /* On some CPUs we can execute paired single operations natively */
  398. asm ( "mfpvr %0" : "=r"(host_pvr));
  399. switch (host_pvr) {
  400. case 0x00080200: /* lonestar 2.0 */
  401. case 0x00088202: /* lonestar 2.2 */
  402. case 0x70000100: /* gekko 1.0 */
  403. case 0x00080100: /* gekko 2.0 */
  404. case 0x00083203: /* gekko 2.3a */
  405. case 0x00083213: /* gekko 2.3b */
  406. case 0x00083204: /* gekko 2.4 */
  407. case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
  408. case 0x00087200: /* broadway */
  409. vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
  410. /* Enable HID2.PSE - in case we need it later */
  411. mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
  412. }
  413. }
  414. /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
  415. * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
  416. * emulate 32 bytes dcbz length.
  417. *
  418. * The Book3s_64 inventors also realized this case and implemented a special bit
  419. * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
  420. *
  421. * My approach here is to patch the dcbz instruction on executing pages.
  422. */
  423. static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
  424. {
  425. struct page *hpage;
  426. u64 hpage_offset;
  427. u32 *page;
  428. int i;
  429. hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
  430. if (is_error_page(hpage))
  431. return;
  432. hpage_offset = pte->raddr & ~PAGE_MASK;
  433. hpage_offset &= ~0xFFFULL;
  434. hpage_offset /= 4;
  435. get_page(hpage);
  436. page = kmap_atomic(hpage);
  437. /* patch dcbz into reserved instruction, so we trap */
  438. for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
  439. if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ)
  440. page[i] &= cpu_to_be32(0xfffffff7);
  441. kunmap_atomic(page);
  442. put_page(hpage);
  443. }
  444. static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
  445. {
  446. ulong mp_pa = vcpu->arch.magic_page_pa;
  447. if (!(kvmppc_get_msr(vcpu) & MSR_SF))
  448. mp_pa = (uint32_t)mp_pa;
  449. gpa &= ~0xFFFULL;
  450. if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) {
  451. return true;
  452. }
  453. return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
  454. }
  455. int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
  456. ulong eaddr, int vec)
  457. {
  458. bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
  459. bool iswrite = false;
  460. int r = RESUME_GUEST;
  461. int relocated;
  462. int page_found = 0;
  463. struct kvmppc_pte pte;
  464. bool is_mmio = false;
  465. bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
  466. bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
  467. u64 vsid;
  468. relocated = data ? dr : ir;
  469. if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
  470. iswrite = true;
  471. /* Resolve real address if translation turned on */
  472. if (relocated) {
  473. page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
  474. } else {
  475. pte.may_execute = true;
  476. pte.may_read = true;
  477. pte.may_write = true;
  478. pte.raddr = eaddr & KVM_PAM;
  479. pte.eaddr = eaddr;
  480. pte.vpage = eaddr >> 12;
  481. pte.page_size = MMU_PAGE_64K;
  482. }
  483. switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
  484. case 0:
  485. pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
  486. break;
  487. case MSR_DR:
  488. if (!data &&
  489. (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
  490. ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
  491. pte.raddr &= ~SPLIT_HACK_MASK;
  492. /* fall through */
  493. case MSR_IR:
  494. vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
  495. if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR)
  496. pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
  497. else
  498. pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
  499. pte.vpage |= vsid;
  500. if (vsid == -1)
  501. page_found = -EINVAL;
  502. break;
  503. }
  504. if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
  505. (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
  506. /*
  507. * If we do the dcbz hack, we have to NX on every execution,
  508. * so we can patch the executing code. This renders our guest
  509. * NX-less.
  510. */
  511. pte.may_execute = !data;
  512. }
  513. if (page_found == -ENOENT) {
  514. /* Page not found in guest PTE entries */
  515. u64 ssrr1 = vcpu->arch.shadow_srr1;
  516. u64 msr = kvmppc_get_msr(vcpu);
  517. kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
  518. kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr);
  519. kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
  520. kvmppc_book3s_queue_irqprio(vcpu, vec);
  521. } else if (page_found == -EPERM) {
  522. /* Storage protection */
  523. u32 dsisr = vcpu->arch.fault_dsisr;
  524. u64 ssrr1 = vcpu->arch.shadow_srr1;
  525. u64 msr = kvmppc_get_msr(vcpu);
  526. kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
  527. dsisr = (dsisr & ~DSISR_NOHPTE) | DSISR_PROTFAULT;
  528. kvmppc_set_dsisr(vcpu, dsisr);
  529. kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
  530. kvmppc_book3s_queue_irqprio(vcpu, vec);
  531. } else if (page_found == -EINVAL) {
  532. /* Page not found in guest SLB */
  533. kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
  534. kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
  535. } else if (!is_mmio &&
  536. kvmppc_visible_gpa(vcpu, pte.raddr)) {
  537. if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
  538. /*
  539. * There is already a host HPTE there, presumably
  540. * a read-only one for a page the guest thinks
  541. * is writable, so get rid of it first.
  542. */
  543. kvmppc_mmu_unmap_page(vcpu, &pte);
  544. }
  545. /* The guest's PTE is not mapped yet. Map on the host */
  546. kvmppc_mmu_map_page(vcpu, &pte, iswrite);
  547. if (data)
  548. vcpu->stat.sp_storage++;
  549. else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
  550. (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
  551. kvmppc_patch_dcbz(vcpu, &pte);
  552. } else {
  553. /* MMIO */
  554. vcpu->stat.mmio_exits++;
  555. vcpu->arch.paddr_accessed = pte.raddr;
  556. vcpu->arch.vaddr_accessed = pte.eaddr;
  557. r = kvmppc_emulate_mmio(run, vcpu);
  558. if ( r == RESUME_HOST_NV )
  559. r = RESUME_HOST;
  560. }
  561. return r;
  562. }
  563. /* Give up external provider (FPU, Altivec, VSX) */
  564. void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
  565. {
  566. struct thread_struct *t = &current->thread;
  567. /*
  568. * VSX instructions can access FP and vector registers, so if
  569. * we are giving up VSX, make sure we give up FP and VMX as well.
  570. */
  571. if (msr & MSR_VSX)
  572. msr |= MSR_FP | MSR_VEC;
  573. msr &= vcpu->arch.guest_owned_ext;
  574. if (!msr)
  575. return;
  576. #ifdef DEBUG_EXT
  577. printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
  578. #endif
  579. if (msr & MSR_FP) {
  580. /*
  581. * Note that on CPUs with VSX, giveup_fpu stores
  582. * both the traditional FP registers and the added VSX
  583. * registers into thread.fp_state.fpr[].
  584. */
  585. if (t->regs->msr & MSR_FP)
  586. giveup_fpu(current);
  587. t->fp_save_area = NULL;
  588. }
  589. #ifdef CONFIG_ALTIVEC
  590. if (msr & MSR_VEC) {
  591. if (current->thread.regs->msr & MSR_VEC)
  592. giveup_altivec(current);
  593. t->vr_save_area = NULL;
  594. }
  595. #endif
  596. vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
  597. kvmppc_recalc_shadow_msr(vcpu);
  598. }
  599. /* Give up facility (TAR / EBB / DSCR) */
  600. static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
  601. {
  602. #ifdef CONFIG_PPC_BOOK3S_64
  603. if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) {
  604. /* Facility not available to the guest, ignore giveup request*/
  605. return;
  606. }
  607. switch (fac) {
  608. case FSCR_TAR_LG:
  609. vcpu->arch.tar = mfspr(SPRN_TAR);
  610. mtspr(SPRN_TAR, current->thread.tar);
  611. vcpu->arch.shadow_fscr &= ~FSCR_TAR;
  612. break;
  613. }
  614. #endif
  615. }
  616. /* Handle external providers (FPU, Altivec, VSX) */
  617. static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
  618. ulong msr)
  619. {
  620. struct thread_struct *t = &current->thread;
  621. /* When we have paired singles, we emulate in software */
  622. if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
  623. return RESUME_GUEST;
  624. if (!(kvmppc_get_msr(vcpu) & msr)) {
  625. kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
  626. return RESUME_GUEST;
  627. }
  628. if (msr == MSR_VSX) {
  629. /* No VSX? Give an illegal instruction interrupt */
  630. #ifdef CONFIG_VSX
  631. if (!cpu_has_feature(CPU_FTR_VSX))
  632. #endif
  633. {
  634. kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
  635. return RESUME_GUEST;
  636. }
  637. /*
  638. * We have to load up all the FP and VMX registers before
  639. * we can let the guest use VSX instructions.
  640. */
  641. msr = MSR_FP | MSR_VEC | MSR_VSX;
  642. }
  643. /* See if we already own all the ext(s) needed */
  644. msr &= ~vcpu->arch.guest_owned_ext;
  645. if (!msr)
  646. return RESUME_GUEST;
  647. #ifdef DEBUG_EXT
  648. printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
  649. #endif
  650. if (msr & MSR_FP) {
  651. preempt_disable();
  652. enable_kernel_fp();
  653. load_fp_state(&vcpu->arch.fp);
  654. disable_kernel_fp();
  655. t->fp_save_area = &vcpu->arch.fp;
  656. preempt_enable();
  657. }
  658. if (msr & MSR_VEC) {
  659. #ifdef CONFIG_ALTIVEC
  660. preempt_disable();
  661. enable_kernel_altivec();
  662. load_vr_state(&vcpu->arch.vr);
  663. disable_kernel_altivec();
  664. t->vr_save_area = &vcpu->arch.vr;
  665. preempt_enable();
  666. #endif
  667. }
  668. t->regs->msr |= msr;
  669. vcpu->arch.guest_owned_ext |= msr;
  670. kvmppc_recalc_shadow_msr(vcpu);
  671. return RESUME_GUEST;
  672. }
  673. /*
  674. * Kernel code using FP or VMX could have flushed guest state to
  675. * the thread_struct; if so, get it back now.
  676. */
  677. static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
  678. {
  679. unsigned long lost_ext;
  680. lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
  681. if (!lost_ext)
  682. return;
  683. if (lost_ext & MSR_FP) {
  684. preempt_disable();
  685. enable_kernel_fp();
  686. load_fp_state(&vcpu->arch.fp);
  687. disable_kernel_fp();
  688. preempt_enable();
  689. }
  690. #ifdef CONFIG_ALTIVEC
  691. if (lost_ext & MSR_VEC) {
  692. preempt_disable();
  693. enable_kernel_altivec();
  694. load_vr_state(&vcpu->arch.vr);
  695. disable_kernel_altivec();
  696. preempt_enable();
  697. }
  698. #endif
  699. current->thread.regs->msr |= lost_ext;
  700. }
  701. #ifdef CONFIG_PPC_BOOK3S_64
  702. static void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac)
  703. {
  704. /* Inject the Interrupt Cause field and trigger a guest interrupt */
  705. vcpu->arch.fscr &= ~(0xffULL << 56);
  706. vcpu->arch.fscr |= (fac << 56);
  707. kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
  708. }
  709. static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac)
  710. {
  711. enum emulation_result er = EMULATE_FAIL;
  712. if (!(kvmppc_get_msr(vcpu) & MSR_PR))
  713. er = kvmppc_emulate_instruction(vcpu->run, vcpu);
  714. if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) {
  715. /* Couldn't emulate, trigger interrupt in guest */
  716. kvmppc_trigger_fac_interrupt(vcpu, fac);
  717. }
  718. }
  719. /* Enable facilities (TAR, EBB, DSCR) for the guest */
  720. static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
  721. {
  722. bool guest_fac_enabled;
  723. BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S));
  724. /*
  725. * Not every facility is enabled by FSCR bits, check whether the
  726. * guest has this facility enabled at all.
  727. */
  728. switch (fac) {
  729. case FSCR_TAR_LG:
  730. case FSCR_EBB_LG:
  731. guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac));
  732. break;
  733. case FSCR_TM_LG:
  734. guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM;
  735. break;
  736. default:
  737. guest_fac_enabled = false;
  738. break;
  739. }
  740. if (!guest_fac_enabled) {
  741. /* Facility not enabled by the guest */
  742. kvmppc_trigger_fac_interrupt(vcpu, fac);
  743. return RESUME_GUEST;
  744. }
  745. switch (fac) {
  746. case FSCR_TAR_LG:
  747. /* TAR switching isn't lazy in Linux yet */
  748. current->thread.tar = mfspr(SPRN_TAR);
  749. mtspr(SPRN_TAR, vcpu->arch.tar);
  750. vcpu->arch.shadow_fscr |= FSCR_TAR;
  751. break;
  752. default:
  753. kvmppc_emulate_fac(vcpu, fac);
  754. break;
  755. }
  756. return RESUME_GUEST;
  757. }
  758. void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
  759. {
  760. if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
  761. /* TAR got dropped, drop it in shadow too */
  762. kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
  763. }
  764. vcpu->arch.fscr = fscr;
  765. }
  766. #endif
  767. static void kvmppc_setup_debug(struct kvm_vcpu *vcpu)
  768. {
  769. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
  770. u64 msr = kvmppc_get_msr(vcpu);
  771. kvmppc_set_msr(vcpu, msr | MSR_SE);
  772. }
  773. }
  774. static void kvmppc_clear_debug(struct kvm_vcpu *vcpu)
  775. {
  776. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
  777. u64 msr = kvmppc_get_msr(vcpu);
  778. kvmppc_set_msr(vcpu, msr & ~MSR_SE);
  779. }
  780. }
  781. int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
  782. unsigned int exit_nr)
  783. {
  784. int r = RESUME_HOST;
  785. int s;
  786. vcpu->stat.sum_exits++;
  787. run->exit_reason = KVM_EXIT_UNKNOWN;
  788. run->ready_for_interrupt_injection = 1;
  789. /* We get here with MSR.EE=1 */
  790. trace_kvm_exit(exit_nr, vcpu);
  791. guest_exit();
  792. switch (exit_nr) {
  793. case BOOK3S_INTERRUPT_INST_STORAGE:
  794. {
  795. ulong shadow_srr1 = vcpu->arch.shadow_srr1;
  796. vcpu->stat.pf_instruc++;
  797. if (kvmppc_is_split_real(vcpu))
  798. kvmppc_fixup_split_real(vcpu);
  799. #ifdef CONFIG_PPC_BOOK3S_32
  800. /* We set segments as unused segments when invalidating them. So
  801. * treat the respective fault as segment fault. */
  802. {
  803. struct kvmppc_book3s_shadow_vcpu *svcpu;
  804. u32 sr;
  805. svcpu = svcpu_get(vcpu);
  806. sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
  807. svcpu_put(svcpu);
  808. if (sr == SR_INVALID) {
  809. kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
  810. r = RESUME_GUEST;
  811. break;
  812. }
  813. }
  814. #endif
  815. /* only care about PTEG not found errors, but leave NX alone */
  816. if (shadow_srr1 & 0x40000000) {
  817. int idx = srcu_read_lock(&vcpu->kvm->srcu);
  818. r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
  819. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  820. vcpu->stat.sp_instruc++;
  821. } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
  822. (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
  823. /*
  824. * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
  825. * so we can't use the NX bit inside the guest. Let's cross our fingers,
  826. * that no guest that needs the dcbz hack does NX.
  827. */
  828. kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
  829. r = RESUME_GUEST;
  830. } else {
  831. u64 msr = kvmppc_get_msr(vcpu);
  832. msr |= shadow_srr1 & 0x58000000;
  833. kvmppc_set_msr_fast(vcpu, msr);
  834. kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
  835. r = RESUME_GUEST;
  836. }
  837. break;
  838. }
  839. case BOOK3S_INTERRUPT_DATA_STORAGE:
  840. {
  841. ulong dar = kvmppc_get_fault_dar(vcpu);
  842. u32 fault_dsisr = vcpu->arch.fault_dsisr;
  843. vcpu->stat.pf_storage++;
  844. #ifdef CONFIG_PPC_BOOK3S_32
  845. /* We set segments as unused segments when invalidating them. So
  846. * treat the respective fault as segment fault. */
  847. {
  848. struct kvmppc_book3s_shadow_vcpu *svcpu;
  849. u32 sr;
  850. svcpu = svcpu_get(vcpu);
  851. sr = svcpu->sr[dar >> SID_SHIFT];
  852. svcpu_put(svcpu);
  853. if (sr == SR_INVALID) {
  854. kvmppc_mmu_map_segment(vcpu, dar);
  855. r = RESUME_GUEST;
  856. break;
  857. }
  858. }
  859. #endif
  860. /*
  861. * We need to handle missing shadow PTEs, and
  862. * protection faults due to us mapping a page read-only
  863. * when the guest thinks it is writable.
  864. */
  865. if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
  866. int idx = srcu_read_lock(&vcpu->kvm->srcu);
  867. r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
  868. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  869. } else {
  870. kvmppc_set_dar(vcpu, dar);
  871. kvmppc_set_dsisr(vcpu, fault_dsisr);
  872. kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
  873. r = RESUME_GUEST;
  874. }
  875. break;
  876. }
  877. case BOOK3S_INTERRUPT_DATA_SEGMENT:
  878. if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
  879. kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
  880. kvmppc_book3s_queue_irqprio(vcpu,
  881. BOOK3S_INTERRUPT_DATA_SEGMENT);
  882. }
  883. r = RESUME_GUEST;
  884. break;
  885. case BOOK3S_INTERRUPT_INST_SEGMENT:
  886. if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
  887. kvmppc_book3s_queue_irqprio(vcpu,
  888. BOOK3S_INTERRUPT_INST_SEGMENT);
  889. }
  890. r = RESUME_GUEST;
  891. break;
  892. /* We're good on these - the host merely wanted to get our attention */
  893. case BOOK3S_INTERRUPT_DECREMENTER:
  894. case BOOK3S_INTERRUPT_HV_DECREMENTER:
  895. case BOOK3S_INTERRUPT_DOORBELL:
  896. case BOOK3S_INTERRUPT_H_DOORBELL:
  897. vcpu->stat.dec_exits++;
  898. r = RESUME_GUEST;
  899. break;
  900. case BOOK3S_INTERRUPT_EXTERNAL:
  901. case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
  902. case BOOK3S_INTERRUPT_EXTERNAL_HV:
  903. vcpu->stat.ext_intr_exits++;
  904. r = RESUME_GUEST;
  905. break;
  906. case BOOK3S_INTERRUPT_PERFMON:
  907. r = RESUME_GUEST;
  908. break;
  909. case BOOK3S_INTERRUPT_PROGRAM:
  910. case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
  911. {
  912. enum emulation_result er;
  913. ulong flags;
  914. u32 last_inst;
  915. int emul;
  916. program_interrupt:
  917. /*
  918. * shadow_srr1 only contains valid flags if we came here via
  919. * a program exception. The other exceptions (emulation assist,
  920. * FP unavailable, etc.) do not provide flags in SRR1, so use
  921. * an illegal-instruction exception when injecting a program
  922. * interrupt into the guest.
  923. */
  924. if (exit_nr == BOOK3S_INTERRUPT_PROGRAM)
  925. flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
  926. else
  927. flags = SRR1_PROGILL;
  928. emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
  929. if (emul != EMULATE_DONE) {
  930. r = RESUME_GUEST;
  931. break;
  932. }
  933. if (kvmppc_get_msr(vcpu) & MSR_PR) {
  934. #ifdef EXIT_DEBUG
  935. pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n",
  936. kvmppc_get_pc(vcpu), last_inst);
  937. #endif
  938. if ((last_inst & 0xff0007ff) !=
  939. (INS_DCBZ & 0xfffffff7)) {
  940. kvmppc_core_queue_program(vcpu, flags);
  941. r = RESUME_GUEST;
  942. break;
  943. }
  944. }
  945. vcpu->stat.emulated_inst_exits++;
  946. er = kvmppc_emulate_instruction(run, vcpu);
  947. switch (er) {
  948. case EMULATE_DONE:
  949. r = RESUME_GUEST_NV;
  950. break;
  951. case EMULATE_AGAIN:
  952. r = RESUME_GUEST;
  953. break;
  954. case EMULATE_FAIL:
  955. printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
  956. __func__, kvmppc_get_pc(vcpu), last_inst);
  957. kvmppc_core_queue_program(vcpu, flags);
  958. r = RESUME_GUEST;
  959. break;
  960. case EMULATE_DO_MMIO:
  961. run->exit_reason = KVM_EXIT_MMIO;
  962. r = RESUME_HOST_NV;
  963. break;
  964. case EMULATE_EXIT_USER:
  965. r = RESUME_HOST_NV;
  966. break;
  967. default:
  968. BUG();
  969. }
  970. break;
  971. }
  972. case BOOK3S_INTERRUPT_SYSCALL:
  973. {
  974. u32 last_sc;
  975. int emul;
  976. /* Get last sc for papr */
  977. if (vcpu->arch.papr_enabled) {
  978. /* The sc instuction points SRR0 to the next inst */
  979. emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc);
  980. if (emul != EMULATE_DONE) {
  981. kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4);
  982. r = RESUME_GUEST;
  983. break;
  984. }
  985. }
  986. if (vcpu->arch.papr_enabled &&
  987. (last_sc == 0x44000022) &&
  988. !(kvmppc_get_msr(vcpu) & MSR_PR)) {
  989. /* SC 1 papr hypercalls */
  990. ulong cmd = kvmppc_get_gpr(vcpu, 3);
  991. int i;
  992. #ifdef CONFIG_PPC_BOOK3S_64
  993. if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
  994. r = RESUME_GUEST;
  995. break;
  996. }
  997. #endif
  998. run->papr_hcall.nr = cmd;
  999. for (i = 0; i < 9; ++i) {
  1000. ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
  1001. run->papr_hcall.args[i] = gpr;
  1002. }
  1003. run->exit_reason = KVM_EXIT_PAPR_HCALL;
  1004. vcpu->arch.hcall_needed = 1;
  1005. r = RESUME_HOST;
  1006. } else if (vcpu->arch.osi_enabled &&
  1007. (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
  1008. (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
  1009. /* MOL hypercalls */
  1010. u64 *gprs = run->osi.gprs;
  1011. int i;
  1012. run->exit_reason = KVM_EXIT_OSI;
  1013. for (i = 0; i < 32; i++)
  1014. gprs[i] = kvmppc_get_gpr(vcpu, i);
  1015. vcpu->arch.osi_needed = 1;
  1016. r = RESUME_HOST_NV;
  1017. } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) &&
  1018. (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
  1019. /* KVM PV hypercalls */
  1020. kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
  1021. r = RESUME_GUEST;
  1022. } else {
  1023. /* Guest syscalls */
  1024. vcpu->stat.syscall_exits++;
  1025. kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
  1026. r = RESUME_GUEST;
  1027. }
  1028. break;
  1029. }
  1030. case BOOK3S_INTERRUPT_FP_UNAVAIL:
  1031. case BOOK3S_INTERRUPT_ALTIVEC:
  1032. case BOOK3S_INTERRUPT_VSX:
  1033. {
  1034. int ext_msr = 0;
  1035. int emul;
  1036. u32 last_inst;
  1037. if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) {
  1038. /* Do paired single instruction emulation */
  1039. emul = kvmppc_get_last_inst(vcpu, INST_GENERIC,
  1040. &last_inst);
  1041. if (emul == EMULATE_DONE)
  1042. goto program_interrupt;
  1043. else
  1044. r = RESUME_GUEST;
  1045. break;
  1046. }
  1047. /* Enable external provider */
  1048. switch (exit_nr) {
  1049. case BOOK3S_INTERRUPT_FP_UNAVAIL:
  1050. ext_msr = MSR_FP;
  1051. break;
  1052. case BOOK3S_INTERRUPT_ALTIVEC:
  1053. ext_msr = MSR_VEC;
  1054. break;
  1055. case BOOK3S_INTERRUPT_VSX:
  1056. ext_msr = MSR_VSX;
  1057. break;
  1058. }
  1059. r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
  1060. break;
  1061. }
  1062. case BOOK3S_INTERRUPT_ALIGNMENT:
  1063. {
  1064. u32 last_inst;
  1065. int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
  1066. if (emul == EMULATE_DONE) {
  1067. u32 dsisr;
  1068. u64 dar;
  1069. dsisr = kvmppc_alignment_dsisr(vcpu, last_inst);
  1070. dar = kvmppc_alignment_dar(vcpu, last_inst);
  1071. kvmppc_set_dsisr(vcpu, dsisr);
  1072. kvmppc_set_dar(vcpu, dar);
  1073. kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
  1074. }
  1075. r = RESUME_GUEST;
  1076. break;
  1077. }
  1078. #ifdef CONFIG_PPC_BOOK3S_64
  1079. case BOOK3S_INTERRUPT_FAC_UNAVAIL:
  1080. kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56);
  1081. r = RESUME_GUEST;
  1082. break;
  1083. #endif
  1084. case BOOK3S_INTERRUPT_MACHINE_CHECK:
  1085. kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
  1086. r = RESUME_GUEST;
  1087. break;
  1088. case BOOK3S_INTERRUPT_TRACE:
  1089. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
  1090. run->exit_reason = KVM_EXIT_DEBUG;
  1091. r = RESUME_HOST;
  1092. } else {
  1093. kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
  1094. r = RESUME_GUEST;
  1095. }
  1096. break;
  1097. default:
  1098. {
  1099. ulong shadow_srr1 = vcpu->arch.shadow_srr1;
  1100. /* Ugh - bork here! What did we get? */
  1101. printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
  1102. exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
  1103. r = RESUME_HOST;
  1104. BUG();
  1105. break;
  1106. }
  1107. }
  1108. if (!(r & RESUME_HOST)) {
  1109. /* To avoid clobbering exit_reason, only check for signals if
  1110. * we aren't already exiting to userspace for some other
  1111. * reason. */
  1112. /*
  1113. * Interrupts could be timers for the guest which we have to
  1114. * inject again, so let's postpone them until we're in the guest
  1115. * and if we really did time things so badly, then we just exit
  1116. * again due to a host external interrupt.
  1117. */
  1118. s = kvmppc_prepare_to_enter(vcpu);
  1119. if (s <= 0)
  1120. r = s;
  1121. else {
  1122. /* interrupts now hard-disabled */
  1123. kvmppc_fix_ee_before_entry();
  1124. }
  1125. kvmppc_handle_lost_ext(vcpu);
  1126. }
  1127. trace_kvm_book3s_reenter(r, vcpu);
  1128. return r;
  1129. }
  1130. static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
  1131. struct kvm_sregs *sregs)
  1132. {
  1133. struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
  1134. int i;
  1135. sregs->pvr = vcpu->arch.pvr;
  1136. sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
  1137. if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
  1138. for (i = 0; i < 64; i++) {
  1139. sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
  1140. sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
  1141. }
  1142. } else {
  1143. for (i = 0; i < 16; i++)
  1144. sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i);
  1145. for (i = 0; i < 8; i++) {
  1146. sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
  1147. sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
  1148. }
  1149. }
  1150. return 0;
  1151. }
  1152. static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
  1153. struct kvm_sregs *sregs)
  1154. {
  1155. struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
  1156. int i;
  1157. kvmppc_set_pvr_pr(vcpu, sregs->pvr);
  1158. vcpu3s->sdr1 = sregs->u.s.sdr1;
  1159. if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
  1160. for (i = 0; i < 64; i++) {
  1161. vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
  1162. sregs->u.s.ppc64.slb[i].slbe);
  1163. }
  1164. } else {
  1165. for (i = 0; i < 16; i++) {
  1166. vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
  1167. }
  1168. for (i = 0; i < 8; i++) {
  1169. kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
  1170. (u32)sregs->u.s.ppc32.ibat[i]);
  1171. kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
  1172. (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
  1173. kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
  1174. (u32)sregs->u.s.ppc32.dbat[i]);
  1175. kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
  1176. (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
  1177. }
  1178. }
  1179. /* Flush the MMU after messing with the segments */
  1180. kvmppc_mmu_pte_flush(vcpu, 0, 0);
  1181. return 0;
  1182. }
  1183. static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
  1184. union kvmppc_one_reg *val)
  1185. {
  1186. int r = 0;
  1187. switch (id) {
  1188. case KVM_REG_PPC_DEBUG_INST:
  1189. *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
  1190. break;
  1191. case KVM_REG_PPC_HIOR:
  1192. *val = get_reg_val(id, to_book3s(vcpu)->hior);
  1193. break;
  1194. case KVM_REG_PPC_LPCR:
  1195. case KVM_REG_PPC_LPCR_64:
  1196. /*
  1197. * We are only interested in the LPCR_ILE bit
  1198. */
  1199. if (vcpu->arch.intr_msr & MSR_LE)
  1200. *val = get_reg_val(id, LPCR_ILE);
  1201. else
  1202. *val = get_reg_val(id, 0);
  1203. break;
  1204. default:
  1205. r = -EINVAL;
  1206. break;
  1207. }
  1208. return r;
  1209. }
  1210. static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr)
  1211. {
  1212. if (new_lpcr & LPCR_ILE)
  1213. vcpu->arch.intr_msr |= MSR_LE;
  1214. else
  1215. vcpu->arch.intr_msr &= ~MSR_LE;
  1216. }
  1217. static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
  1218. union kvmppc_one_reg *val)
  1219. {
  1220. int r = 0;
  1221. switch (id) {
  1222. case KVM_REG_PPC_HIOR:
  1223. to_book3s(vcpu)->hior = set_reg_val(id, *val);
  1224. to_book3s(vcpu)->hior_explicit = true;
  1225. break;
  1226. case KVM_REG_PPC_LPCR:
  1227. case KVM_REG_PPC_LPCR_64:
  1228. kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
  1229. break;
  1230. default:
  1231. r = -EINVAL;
  1232. break;
  1233. }
  1234. return r;
  1235. }
  1236. static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
  1237. unsigned int id)
  1238. {
  1239. struct kvmppc_vcpu_book3s *vcpu_book3s;
  1240. struct kvm_vcpu *vcpu;
  1241. int err = -ENOMEM;
  1242. unsigned long p;
  1243. vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
  1244. if (!vcpu)
  1245. goto out;
  1246. vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
  1247. if (!vcpu_book3s)
  1248. goto free_vcpu;
  1249. vcpu->arch.book3s = vcpu_book3s;
  1250. #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
  1251. vcpu->arch.shadow_vcpu =
  1252. kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
  1253. if (!vcpu->arch.shadow_vcpu)
  1254. goto free_vcpu3s;
  1255. #endif
  1256. err = kvm_vcpu_init(vcpu, kvm, id);
  1257. if (err)
  1258. goto free_shadow_vcpu;
  1259. err = -ENOMEM;
  1260. p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
  1261. if (!p)
  1262. goto uninit_vcpu;
  1263. vcpu->arch.shared = (void *)p;
  1264. #ifdef CONFIG_PPC_BOOK3S_64
  1265. /* Always start the shared struct in native endian mode */
  1266. #ifdef __BIG_ENDIAN__
  1267. vcpu->arch.shared_big_endian = true;
  1268. #else
  1269. vcpu->arch.shared_big_endian = false;
  1270. #endif
  1271. /*
  1272. * Default to the same as the host if we're on sufficiently
  1273. * recent machine that we have 1TB segments;
  1274. * otherwise default to PPC970FX.
  1275. */
  1276. vcpu->arch.pvr = 0x3C0301;
  1277. if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
  1278. vcpu->arch.pvr = mfspr(SPRN_PVR);
  1279. vcpu->arch.intr_msr = MSR_SF;
  1280. #else
  1281. /* default to book3s_32 (750) */
  1282. vcpu->arch.pvr = 0x84202;
  1283. #endif
  1284. kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
  1285. vcpu->arch.slb_nr = 64;
  1286. vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
  1287. err = kvmppc_mmu_init(vcpu);
  1288. if (err < 0)
  1289. goto uninit_vcpu;
  1290. return vcpu;
  1291. uninit_vcpu:
  1292. kvm_vcpu_uninit(vcpu);
  1293. free_shadow_vcpu:
  1294. #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
  1295. kfree(vcpu->arch.shadow_vcpu);
  1296. free_vcpu3s:
  1297. #endif
  1298. vfree(vcpu_book3s);
  1299. free_vcpu:
  1300. kmem_cache_free(kvm_vcpu_cache, vcpu);
  1301. out:
  1302. return ERR_PTR(err);
  1303. }
  1304. static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
  1305. {
  1306. struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
  1307. free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
  1308. kvm_vcpu_uninit(vcpu);
  1309. #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
  1310. kfree(vcpu->arch.shadow_vcpu);
  1311. #endif
  1312. vfree(vcpu_book3s);
  1313. kmem_cache_free(kvm_vcpu_cache, vcpu);
  1314. }
  1315. static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
  1316. {
  1317. int ret;
  1318. #ifdef CONFIG_ALTIVEC
  1319. unsigned long uninitialized_var(vrsave);
  1320. #endif
  1321. /* Check if we can run the vcpu at all */
  1322. if (!vcpu->arch.sane) {
  1323. kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  1324. ret = -EINVAL;
  1325. goto out;
  1326. }
  1327. kvmppc_setup_debug(vcpu);
  1328. /*
  1329. * Interrupts could be timers for the guest which we have to inject
  1330. * again, so let's postpone them until we're in the guest and if we
  1331. * really did time things so badly, then we just exit again due to
  1332. * a host external interrupt.
  1333. */
  1334. ret = kvmppc_prepare_to_enter(vcpu);
  1335. if (ret <= 0)
  1336. goto out;
  1337. /* interrupts now hard-disabled */
  1338. /* Save FPU, Altivec and VSX state */
  1339. giveup_all(current);
  1340. /* Preload FPU if it's enabled */
  1341. if (kvmppc_get_msr(vcpu) & MSR_FP)
  1342. kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
  1343. kvmppc_fix_ee_before_entry();
  1344. ret = __kvmppc_vcpu_run(kvm_run, vcpu);
  1345. kvmppc_clear_debug(vcpu);
  1346. /* No need for guest_exit. It's done in handle_exit.
  1347. We also get here with interrupts enabled. */
  1348. /* Make sure we save the guest FPU/Altivec/VSX state */
  1349. kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
  1350. /* Make sure we save the guest TAR/EBB/DSCR state */
  1351. kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
  1352. out:
  1353. vcpu->mode = OUTSIDE_GUEST_MODE;
  1354. return ret;
  1355. }
  1356. /*
  1357. * Get (and clear) the dirty memory log for a memory slot.
  1358. */
  1359. static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
  1360. struct kvm_dirty_log *log)
  1361. {
  1362. struct kvm_memslots *slots;
  1363. struct kvm_memory_slot *memslot;
  1364. struct kvm_vcpu *vcpu;
  1365. ulong ga, ga_end;
  1366. int is_dirty = 0;
  1367. int r;
  1368. unsigned long n;
  1369. mutex_lock(&kvm->slots_lock);
  1370. r = kvm_get_dirty_log(kvm, log, &is_dirty);
  1371. if (r)
  1372. goto out;
  1373. /* If nothing is dirty, don't bother messing with page tables. */
  1374. if (is_dirty) {
  1375. slots = kvm_memslots(kvm);
  1376. memslot = id_to_memslot(slots, log->slot);
  1377. ga = memslot->base_gfn << PAGE_SHIFT;
  1378. ga_end = ga + (memslot->npages << PAGE_SHIFT);
  1379. kvm_for_each_vcpu(n, vcpu, kvm)
  1380. kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
  1381. n = kvm_dirty_bitmap_bytes(memslot);
  1382. memset(memslot->dirty_bitmap, 0, n);
  1383. }
  1384. r = 0;
  1385. out:
  1386. mutex_unlock(&kvm->slots_lock);
  1387. return r;
  1388. }
  1389. static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
  1390. struct kvm_memory_slot *memslot)
  1391. {
  1392. return;
  1393. }
  1394. static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
  1395. struct kvm_memory_slot *memslot,
  1396. const struct kvm_userspace_memory_region *mem)
  1397. {
  1398. return 0;
  1399. }
  1400. static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
  1401. const struct kvm_userspace_memory_region *mem,
  1402. const struct kvm_memory_slot *old,
  1403. const struct kvm_memory_slot *new)
  1404. {
  1405. return;
  1406. }
  1407. static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
  1408. struct kvm_memory_slot *dont)
  1409. {
  1410. return;
  1411. }
  1412. static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
  1413. unsigned long npages)
  1414. {
  1415. return 0;
  1416. }
  1417. #ifdef CONFIG_PPC64
  1418. static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
  1419. struct kvm_ppc_smmu_info *info)
  1420. {
  1421. long int i;
  1422. struct kvm_vcpu *vcpu;
  1423. info->flags = 0;
  1424. /* SLB is always 64 entries */
  1425. info->slb_size = 64;
  1426. /* Standard 4k base page size segment */
  1427. info->sps[0].page_shift = 12;
  1428. info->sps[0].slb_enc = 0;
  1429. info->sps[0].enc[0].page_shift = 12;
  1430. info->sps[0].enc[0].pte_enc = 0;
  1431. /*
  1432. * 64k large page size.
  1433. * We only want to put this in if the CPUs we're emulating
  1434. * support it, but unfortunately we don't have a vcpu easily
  1435. * to hand here to test. Just pick the first vcpu, and if
  1436. * that doesn't exist yet, report the minimum capability,
  1437. * i.e., no 64k pages.
  1438. * 1T segment support goes along with 64k pages.
  1439. */
  1440. i = 1;
  1441. vcpu = kvm_get_vcpu(kvm, 0);
  1442. if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
  1443. info->flags = KVM_PPC_1T_SEGMENTS;
  1444. info->sps[i].page_shift = 16;
  1445. info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
  1446. info->sps[i].enc[0].page_shift = 16;
  1447. info->sps[i].enc[0].pte_enc = 1;
  1448. ++i;
  1449. }
  1450. /* Standard 16M large page size segment */
  1451. info->sps[i].page_shift = 24;
  1452. info->sps[i].slb_enc = SLB_VSID_L;
  1453. info->sps[i].enc[0].page_shift = 24;
  1454. info->sps[i].enc[0].pte_enc = 0;
  1455. return 0;
  1456. }
  1457. #else
  1458. static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
  1459. struct kvm_ppc_smmu_info *info)
  1460. {
  1461. /* We should not get called */
  1462. BUG();
  1463. }
  1464. #endif /* CONFIG_PPC64 */
  1465. static unsigned int kvm_global_user_count = 0;
  1466. static DEFINE_SPINLOCK(kvm_global_user_count_lock);
  1467. static int kvmppc_core_init_vm_pr(struct kvm *kvm)
  1468. {
  1469. mutex_init(&kvm->arch.hpt_mutex);
  1470. #ifdef CONFIG_PPC_BOOK3S_64
  1471. /* Start out with the default set of hcalls enabled */
  1472. kvmppc_pr_init_default_hcalls(kvm);
  1473. #endif
  1474. if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
  1475. spin_lock(&kvm_global_user_count_lock);
  1476. if (++kvm_global_user_count == 1)
  1477. pseries_disable_reloc_on_exc();
  1478. spin_unlock(&kvm_global_user_count_lock);
  1479. }
  1480. return 0;
  1481. }
  1482. static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
  1483. {
  1484. #ifdef CONFIG_PPC64
  1485. WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
  1486. #endif
  1487. if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
  1488. spin_lock(&kvm_global_user_count_lock);
  1489. BUG_ON(kvm_global_user_count == 0);
  1490. if (--kvm_global_user_count == 0)
  1491. pseries_enable_reloc_on_exc();
  1492. spin_unlock(&kvm_global_user_count_lock);
  1493. }
  1494. }
  1495. static int kvmppc_core_check_processor_compat_pr(void)
  1496. {
  1497. /*
  1498. * Disable KVM for Power9 untill the required bits merged.
  1499. */
  1500. if (cpu_has_feature(CPU_FTR_ARCH_300))
  1501. return -EIO;
  1502. return 0;
  1503. }
  1504. static long kvm_arch_vm_ioctl_pr(struct file *filp,
  1505. unsigned int ioctl, unsigned long arg)
  1506. {
  1507. return -ENOTTY;
  1508. }
  1509. static struct kvmppc_ops kvm_ops_pr = {
  1510. .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
  1511. .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
  1512. .get_one_reg = kvmppc_get_one_reg_pr,
  1513. .set_one_reg = kvmppc_set_one_reg_pr,
  1514. .vcpu_load = kvmppc_core_vcpu_load_pr,
  1515. .vcpu_put = kvmppc_core_vcpu_put_pr,
  1516. .set_msr = kvmppc_set_msr_pr,
  1517. .vcpu_run = kvmppc_vcpu_run_pr,
  1518. .vcpu_create = kvmppc_core_vcpu_create_pr,
  1519. .vcpu_free = kvmppc_core_vcpu_free_pr,
  1520. .check_requests = kvmppc_core_check_requests_pr,
  1521. .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
  1522. .flush_memslot = kvmppc_core_flush_memslot_pr,
  1523. .prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
  1524. .commit_memory_region = kvmppc_core_commit_memory_region_pr,
  1525. .unmap_hva = kvm_unmap_hva_pr,
  1526. .unmap_hva_range = kvm_unmap_hva_range_pr,
  1527. .age_hva = kvm_age_hva_pr,
  1528. .test_age_hva = kvm_test_age_hva_pr,
  1529. .set_spte_hva = kvm_set_spte_hva_pr,
  1530. .mmu_destroy = kvmppc_mmu_destroy_pr,
  1531. .free_memslot = kvmppc_core_free_memslot_pr,
  1532. .create_memslot = kvmppc_core_create_memslot_pr,
  1533. .init_vm = kvmppc_core_init_vm_pr,
  1534. .destroy_vm = kvmppc_core_destroy_vm_pr,
  1535. .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
  1536. .emulate_op = kvmppc_core_emulate_op_pr,
  1537. .emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
  1538. .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
  1539. .fast_vcpu_kick = kvm_vcpu_kick,
  1540. .arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
  1541. #ifdef CONFIG_PPC_BOOK3S_64
  1542. .hcall_implemented = kvmppc_hcall_impl_pr,
  1543. #endif
  1544. };
  1545. int kvmppc_book3s_init_pr(void)
  1546. {
  1547. int r;
  1548. r = kvmppc_core_check_processor_compat_pr();
  1549. if (r < 0)
  1550. return r;
  1551. kvm_ops_pr.owner = THIS_MODULE;
  1552. kvmppc_pr_ops = &kvm_ops_pr;
  1553. r = kvmppc_mmu_hpte_sysinit();
  1554. return r;
  1555. }
  1556. void kvmppc_book3s_exit_pr(void)
  1557. {
  1558. kvmppc_pr_ops = NULL;
  1559. kvmppc_mmu_hpte_sysexit();
  1560. }
  1561. /*
  1562. * We only support separate modules for book3s 64
  1563. */
  1564. #ifdef CONFIG_PPC_BOOK3S_64
  1565. module_init(kvmppc_book3s_init_pr);
  1566. module_exit(kvmppc_book3s_exit_pr);
  1567. MODULE_LICENSE("GPL");
  1568. MODULE_ALIAS_MISCDEV(KVM_MINOR);
  1569. MODULE_ALIAS("devname:kvm");
  1570. #endif