booke.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright IBM Corp. 2007
  16. * Copyright 2010-2011 Freescale Semiconductor, Inc.
  17. *
  18. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  19. * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
  20. * Scott Wood <scottwood@freescale.com>
  21. * Varun Sethi <varun.sethi@freescale.com>
  22. */
  23. #include <linux/errno.h>
  24. #include <linux/err.h>
  25. #include <linux/kvm_host.h>
  26. #include <linux/gfp.h>
  27. #include <linux/module.h>
  28. #include <linux/vmalloc.h>
  29. #include <linux/fs.h>
  30. #include <asm/cputable.h>
  31. #include <asm/uaccess.h>
  32. #include <asm/kvm_ppc.h>
  33. #include <asm/cacheflush.h>
  34. #include <asm/dbell.h>
  35. #include <asm/hw_irq.h>
  36. #include <asm/irq.h>
  37. #include <asm/time.h>
  38. #include "timing.h"
  39. #include "booke.h"
  40. #define CREATE_TRACE_POINTS
  41. #include "trace_booke.h"
  42. unsigned long kvmppc_booke_handlers;
  43. #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
  44. #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
  45. struct kvm_stats_debugfs_item debugfs_entries[] = {
  46. { "mmio", VCPU_STAT(mmio_exits) },
  47. { "dcr", VCPU_STAT(dcr_exits) },
  48. { "sig", VCPU_STAT(signal_exits) },
  49. { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
  50. { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
  51. { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
  52. { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
  53. { "sysc", VCPU_STAT(syscall_exits) },
  54. { "isi", VCPU_STAT(isi_exits) },
  55. { "dsi", VCPU_STAT(dsi_exits) },
  56. { "inst_emu", VCPU_STAT(emulated_inst_exits) },
  57. { "dec", VCPU_STAT(dec_exits) },
  58. { "ext_intr", VCPU_STAT(ext_intr_exits) },
  59. { "halt_wakeup", VCPU_STAT(halt_wakeup) },
  60. { "doorbell", VCPU_STAT(dbell_exits) },
  61. { "guest doorbell", VCPU_STAT(gdbell_exits) },
  62. { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
  63. { NULL }
  64. };
  65. /* TODO: use vcpu_printf() */
  66. void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
  67. {
  68. int i;
  69. printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
  70. printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
  71. printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
  72. vcpu->arch.shared->srr1);
  73. printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
  74. for (i = 0; i < 32; i += 4) {
  75. printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
  76. kvmppc_get_gpr(vcpu, i),
  77. kvmppc_get_gpr(vcpu, i+1),
  78. kvmppc_get_gpr(vcpu, i+2),
  79. kvmppc_get_gpr(vcpu, i+3));
  80. }
  81. }
  82. #ifdef CONFIG_SPE
  83. void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
  84. {
  85. preempt_disable();
  86. enable_kernel_spe();
  87. kvmppc_save_guest_spe(vcpu);
  88. vcpu->arch.shadow_msr &= ~MSR_SPE;
  89. preempt_enable();
  90. }
  91. static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
  92. {
  93. preempt_disable();
  94. enable_kernel_spe();
  95. kvmppc_load_guest_spe(vcpu);
  96. vcpu->arch.shadow_msr |= MSR_SPE;
  97. preempt_enable();
  98. }
  99. static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
  100. {
  101. if (vcpu->arch.shared->msr & MSR_SPE) {
  102. if (!(vcpu->arch.shadow_msr & MSR_SPE))
  103. kvmppc_vcpu_enable_spe(vcpu);
  104. } else if (vcpu->arch.shadow_msr & MSR_SPE) {
  105. kvmppc_vcpu_disable_spe(vcpu);
  106. }
  107. }
  108. #else
  109. static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
  110. {
  111. }
  112. #endif
  113. static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
  114. {
  115. #if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
  116. /* We always treat the FP bit as enabled from the host
  117. perspective, so only need to adjust the shadow MSR */
  118. vcpu->arch.shadow_msr &= ~MSR_FP;
  119. vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
  120. #endif
  121. }
  122. static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
  123. {
  124. /* Synchronize guest's desire to get debug interrupts into shadow MSR */
  125. #ifndef CONFIG_KVM_BOOKE_HV
  126. vcpu->arch.shadow_msr &= ~MSR_DE;
  127. vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
  128. #endif
  129. /* Force enable debug interrupts when user space wants to debug */
  130. if (vcpu->guest_debug) {
  131. #ifdef CONFIG_KVM_BOOKE_HV
  132. /*
  133. * Since there is no shadow MSR, sync MSR_DE into the guest
  134. * visible MSR.
  135. */
  136. vcpu->arch.shared->msr |= MSR_DE;
  137. #else
  138. vcpu->arch.shadow_msr |= MSR_DE;
  139. vcpu->arch.shared->msr &= ~MSR_DE;
  140. #endif
  141. }
  142. }
  143. /*
  144. * Helper function for "full" MSR writes. No need to call this if only
  145. * EE/CE/ME/DE/RI are changing.
  146. */
  147. void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
  148. {
  149. u32 old_msr = vcpu->arch.shared->msr;
  150. #ifdef CONFIG_KVM_BOOKE_HV
  151. new_msr |= MSR_GS;
  152. #endif
  153. vcpu->arch.shared->msr = new_msr;
  154. kvmppc_mmu_msr_notify(vcpu, old_msr);
  155. kvmppc_vcpu_sync_spe(vcpu);
  156. kvmppc_vcpu_sync_fpu(vcpu);
  157. kvmppc_vcpu_sync_debug(vcpu);
  158. }
  159. static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
  160. unsigned int priority)
  161. {
  162. trace_kvm_booke_queue_irqprio(vcpu, priority);
  163. set_bit(priority, &vcpu->arch.pending_exceptions);
  164. }
  165. static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
  166. ulong dear_flags, ulong esr_flags)
  167. {
  168. vcpu->arch.queued_dear = dear_flags;
  169. vcpu->arch.queued_esr = esr_flags;
  170. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
  171. }
  172. static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
  173. ulong dear_flags, ulong esr_flags)
  174. {
  175. vcpu->arch.queued_dear = dear_flags;
  176. vcpu->arch.queued_esr = esr_flags;
  177. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
  178. }
  179. static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
  180. ulong esr_flags)
  181. {
  182. vcpu->arch.queued_esr = esr_flags;
  183. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
  184. }
  185. static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
  186. ulong esr_flags)
  187. {
  188. vcpu->arch.queued_dear = dear_flags;
  189. vcpu->arch.queued_esr = esr_flags;
  190. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
  191. }
  192. void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
  193. {
  194. vcpu->arch.queued_esr = esr_flags;
  195. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
  196. }
  197. void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
  198. {
  199. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
  200. }
  201. int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
  202. {
  203. return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
  204. }
  205. void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
  206. {
  207. clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
  208. }
  209. void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
  210. struct kvm_interrupt *irq)
  211. {
  212. unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
  213. if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
  214. prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
  215. kvmppc_booke_queue_irqprio(vcpu, prio);
  216. }
  217. void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
  218. {
  219. clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
  220. clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
  221. }
  222. static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
  223. {
  224. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
  225. }
  226. static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
  227. {
  228. clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
  229. }
  230. static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
  231. {
  232. #ifdef CONFIG_KVM_BOOKE_HV
  233. mtspr(SPRN_GSRR0, srr0);
  234. mtspr(SPRN_GSRR1, srr1);
  235. #else
  236. vcpu->arch.shared->srr0 = srr0;
  237. vcpu->arch.shared->srr1 = srr1;
  238. #endif
  239. }
  240. static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
  241. {
  242. vcpu->arch.csrr0 = srr0;
  243. vcpu->arch.csrr1 = srr1;
  244. }
  245. static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
  246. {
  247. if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
  248. vcpu->arch.dsrr0 = srr0;
  249. vcpu->arch.dsrr1 = srr1;
  250. } else {
  251. set_guest_csrr(vcpu, srr0, srr1);
  252. }
  253. }
  254. static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
  255. {
  256. vcpu->arch.mcsrr0 = srr0;
  257. vcpu->arch.mcsrr1 = srr1;
  258. }
  259. static unsigned long get_guest_dear(struct kvm_vcpu *vcpu)
  260. {
  261. #ifdef CONFIG_KVM_BOOKE_HV
  262. return mfspr(SPRN_GDEAR);
  263. #else
  264. return vcpu->arch.shared->dar;
  265. #endif
  266. }
  267. static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear)
  268. {
  269. #ifdef CONFIG_KVM_BOOKE_HV
  270. mtspr(SPRN_GDEAR, dear);
  271. #else
  272. vcpu->arch.shared->dar = dear;
  273. #endif
  274. }
  275. static unsigned long get_guest_esr(struct kvm_vcpu *vcpu)
  276. {
  277. #ifdef CONFIG_KVM_BOOKE_HV
  278. return mfspr(SPRN_GESR);
  279. #else
  280. return vcpu->arch.shared->esr;
  281. #endif
  282. }
  283. static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
  284. {
  285. #ifdef CONFIG_KVM_BOOKE_HV
  286. mtspr(SPRN_GESR, esr);
  287. #else
  288. vcpu->arch.shared->esr = esr;
  289. #endif
  290. }
  291. static unsigned long get_guest_epr(struct kvm_vcpu *vcpu)
  292. {
  293. #ifdef CONFIG_KVM_BOOKE_HV
  294. return mfspr(SPRN_GEPR);
  295. #else
  296. return vcpu->arch.epr;
  297. #endif
  298. }
  299. /* Deliver the interrupt of the corresponding priority, if possible. */
  300. static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
  301. unsigned int priority)
  302. {
  303. int allowed = 0;
  304. ulong msr_mask = 0;
  305. bool update_esr = false, update_dear = false, update_epr = false;
  306. ulong crit_raw = vcpu->arch.shared->critical;
  307. ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
  308. bool crit;
  309. bool keep_irq = false;
  310. enum int_class int_class;
  311. ulong new_msr = vcpu->arch.shared->msr;
  312. /* Truncate crit indicators in 32 bit mode */
  313. if (!(vcpu->arch.shared->msr & MSR_SF)) {
  314. crit_raw &= 0xffffffff;
  315. crit_r1 &= 0xffffffff;
  316. }
  317. /* Critical section when crit == r1 */
  318. crit = (crit_raw == crit_r1);
  319. /* ... and we're in supervisor mode */
  320. crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
  321. if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
  322. priority = BOOKE_IRQPRIO_EXTERNAL;
  323. keep_irq = true;
  324. }
  325. if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
  326. update_epr = true;
  327. switch (priority) {
  328. case BOOKE_IRQPRIO_DTLB_MISS:
  329. case BOOKE_IRQPRIO_DATA_STORAGE:
  330. case BOOKE_IRQPRIO_ALIGNMENT:
  331. update_dear = true;
  332. /* fall through */
  333. case BOOKE_IRQPRIO_INST_STORAGE:
  334. case BOOKE_IRQPRIO_PROGRAM:
  335. update_esr = true;
  336. /* fall through */
  337. case BOOKE_IRQPRIO_ITLB_MISS:
  338. case BOOKE_IRQPRIO_SYSCALL:
  339. case BOOKE_IRQPRIO_FP_UNAVAIL:
  340. case BOOKE_IRQPRIO_SPE_UNAVAIL:
  341. case BOOKE_IRQPRIO_SPE_FP_DATA:
  342. case BOOKE_IRQPRIO_SPE_FP_ROUND:
  343. case BOOKE_IRQPRIO_AP_UNAVAIL:
  344. allowed = 1;
  345. msr_mask = MSR_CE | MSR_ME | MSR_DE;
  346. int_class = INT_CLASS_NONCRIT;
  347. break;
  348. case BOOKE_IRQPRIO_WATCHDOG:
  349. case BOOKE_IRQPRIO_CRITICAL:
  350. case BOOKE_IRQPRIO_DBELL_CRIT:
  351. allowed = vcpu->arch.shared->msr & MSR_CE;
  352. allowed = allowed && !crit;
  353. msr_mask = MSR_ME;
  354. int_class = INT_CLASS_CRIT;
  355. break;
  356. case BOOKE_IRQPRIO_MACHINE_CHECK:
  357. allowed = vcpu->arch.shared->msr & MSR_ME;
  358. allowed = allowed && !crit;
  359. int_class = INT_CLASS_MC;
  360. break;
  361. case BOOKE_IRQPRIO_DECREMENTER:
  362. case BOOKE_IRQPRIO_FIT:
  363. keep_irq = true;
  364. /* fall through */
  365. case BOOKE_IRQPRIO_EXTERNAL:
  366. case BOOKE_IRQPRIO_DBELL:
  367. allowed = vcpu->arch.shared->msr & MSR_EE;
  368. allowed = allowed && !crit;
  369. msr_mask = MSR_CE | MSR_ME | MSR_DE;
  370. int_class = INT_CLASS_NONCRIT;
  371. break;
  372. case BOOKE_IRQPRIO_DEBUG:
  373. allowed = vcpu->arch.shared->msr & MSR_DE;
  374. allowed = allowed && !crit;
  375. msr_mask = MSR_ME;
  376. int_class = INT_CLASS_CRIT;
  377. break;
  378. }
  379. if (allowed) {
  380. switch (int_class) {
  381. case INT_CLASS_NONCRIT:
  382. set_guest_srr(vcpu, vcpu->arch.pc,
  383. vcpu->arch.shared->msr);
  384. break;
  385. case INT_CLASS_CRIT:
  386. set_guest_csrr(vcpu, vcpu->arch.pc,
  387. vcpu->arch.shared->msr);
  388. break;
  389. case INT_CLASS_DBG:
  390. set_guest_dsrr(vcpu, vcpu->arch.pc,
  391. vcpu->arch.shared->msr);
  392. break;
  393. case INT_CLASS_MC:
  394. set_guest_mcsrr(vcpu, vcpu->arch.pc,
  395. vcpu->arch.shared->msr);
  396. break;
  397. }
  398. vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
  399. if (update_esr == true)
  400. set_guest_esr(vcpu, vcpu->arch.queued_esr);
  401. if (update_dear == true)
  402. set_guest_dear(vcpu, vcpu->arch.queued_dear);
  403. if (update_epr == true) {
  404. if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
  405. kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
  406. else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
  407. BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
  408. kvmppc_mpic_set_epr(vcpu);
  409. }
  410. }
  411. new_msr &= msr_mask;
  412. #if defined(CONFIG_64BIT)
  413. if (vcpu->arch.epcr & SPRN_EPCR_ICM)
  414. new_msr |= MSR_CM;
  415. #endif
  416. kvmppc_set_msr(vcpu, new_msr);
  417. if (!keep_irq)
  418. clear_bit(priority, &vcpu->arch.pending_exceptions);
  419. }
  420. #ifdef CONFIG_KVM_BOOKE_HV
  421. /*
  422. * If an interrupt is pending but masked, raise a guest doorbell
  423. * so that we are notified when the guest enables the relevant
  424. * MSR bit.
  425. */
  426. if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
  427. kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
  428. if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
  429. kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
  430. if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
  431. kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
  432. #endif
  433. return allowed;
  434. }
  435. /*
  436. * Return the number of jiffies until the next timeout. If the timeout is
  437. * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
  438. * because the larger value can break the timer APIs.
  439. */
  440. static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
  441. {
  442. u64 tb, wdt_tb, wdt_ticks = 0;
  443. u64 nr_jiffies = 0;
  444. u32 period = TCR_GET_WP(vcpu->arch.tcr);
  445. wdt_tb = 1ULL << (63 - period);
  446. tb = get_tb();
  447. /*
  448. * The watchdog timeout will hapeen when TB bit corresponding
  449. * to watchdog will toggle from 0 to 1.
  450. */
  451. if (tb & wdt_tb)
  452. wdt_ticks = wdt_tb;
  453. wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
  454. /* Convert timebase ticks to jiffies */
  455. nr_jiffies = wdt_ticks;
  456. if (do_div(nr_jiffies, tb_ticks_per_jiffy))
  457. nr_jiffies++;
  458. return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
  459. }
  460. static void arm_next_watchdog(struct kvm_vcpu *vcpu)
  461. {
  462. unsigned long nr_jiffies;
  463. unsigned long flags;
  464. /*
  465. * If TSR_ENW and TSR_WIS are not set then no need to exit to
  466. * userspace, so clear the KVM_REQ_WATCHDOG request.
  467. */
  468. if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
  469. clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests);
  470. spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
  471. nr_jiffies = watchdog_next_timeout(vcpu);
  472. /*
  473. * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
  474. * then do not run the watchdog timer as this can break timer APIs.
  475. */
  476. if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
  477. mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
  478. else
  479. del_timer(&vcpu->arch.wdt_timer);
  480. spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
  481. }
  482. void kvmppc_watchdog_func(unsigned long data)
  483. {
  484. struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
  485. u32 tsr, new_tsr;
  486. int final;
  487. do {
  488. new_tsr = tsr = vcpu->arch.tsr;
  489. final = 0;
  490. /* Time out event */
  491. if (tsr & TSR_ENW) {
  492. if (tsr & TSR_WIS)
  493. final = 1;
  494. else
  495. new_tsr = tsr | TSR_WIS;
  496. } else {
  497. new_tsr = tsr | TSR_ENW;
  498. }
  499. } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
  500. if (new_tsr & TSR_WIS) {
  501. smp_wmb();
  502. kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
  503. kvm_vcpu_kick(vcpu);
  504. }
  505. /*
  506. * If this is final watchdog expiry and some action is required
  507. * then exit to userspace.
  508. */
  509. if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
  510. vcpu->arch.watchdog_enabled) {
  511. smp_wmb();
  512. kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
  513. kvm_vcpu_kick(vcpu);
  514. }
  515. /*
  516. * Stop running the watchdog timer after final expiration to
  517. * prevent the host from being flooded with timers if the
  518. * guest sets a short period.
  519. * Timers will resume when TSR/TCR is updated next time.
  520. */
  521. if (!final)
  522. arm_next_watchdog(vcpu);
  523. }
  524. static void update_timer_ints(struct kvm_vcpu *vcpu)
  525. {
  526. if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
  527. kvmppc_core_queue_dec(vcpu);
  528. else
  529. kvmppc_core_dequeue_dec(vcpu);
  530. if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
  531. kvmppc_core_queue_watchdog(vcpu);
  532. else
  533. kvmppc_core_dequeue_watchdog(vcpu);
  534. }
  535. static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
  536. {
  537. unsigned long *pending = &vcpu->arch.pending_exceptions;
  538. unsigned int priority;
  539. priority = __ffs(*pending);
  540. while (priority < BOOKE_IRQPRIO_MAX) {
  541. if (kvmppc_booke_irqprio_deliver(vcpu, priority))
  542. break;
  543. priority = find_next_bit(pending,
  544. BITS_PER_BYTE * sizeof(*pending),
  545. priority + 1);
  546. }
  547. /* Tell the guest about our interrupt status */
  548. vcpu->arch.shared->int_pending = !!*pending;
  549. }
  550. /* Check pending exceptions and deliver one, if possible. */
  551. int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
  552. {
  553. int r = 0;
  554. WARN_ON_ONCE(!irqs_disabled());
  555. kvmppc_core_check_exceptions(vcpu);
  556. if (vcpu->requests) {
  557. /* Exception delivery raised request; start over */
  558. return 1;
  559. }
  560. if (vcpu->arch.shared->msr & MSR_WE) {
  561. local_irq_enable();
  562. kvm_vcpu_block(vcpu);
  563. clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
  564. hard_irq_disable();
  565. kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
  566. r = 1;
  567. };
  568. return r;
  569. }
  570. int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
  571. {
  572. int r = 1; /* Indicate we want to get back into the guest */
  573. if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
  574. update_timer_ints(vcpu);
  575. #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
  576. if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
  577. kvmppc_core_flush_tlb(vcpu);
  578. #endif
  579. if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
  580. vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
  581. r = 0;
  582. }
  583. if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
  584. vcpu->run->epr.epr = 0;
  585. vcpu->arch.epr_needed = true;
  586. vcpu->run->exit_reason = KVM_EXIT_EPR;
  587. r = 0;
  588. }
  589. return r;
  590. }
  591. int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
  592. {
  593. int ret, s;
  594. struct debug_reg debug;
  595. if (!vcpu->arch.sane) {
  596. kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  597. return -EINVAL;
  598. }
  599. s = kvmppc_prepare_to_enter(vcpu);
  600. if (s <= 0) {
  601. ret = s;
  602. goto out;
  603. }
  604. /* interrupts now hard-disabled */
  605. #ifdef CONFIG_PPC_FPU
  606. /* Save userspace FPU state in stack */
  607. enable_kernel_fp();
  608. /*
  609. * Since we can't trap on MSR_FP in GS-mode, we consider the guest
  610. * as always using the FPU. Kernel usage of FP (via
  611. * enable_kernel_fp()) in this thread must not occur while
  612. * vcpu->fpu_active is set.
  613. */
  614. vcpu->fpu_active = 1;
  615. kvmppc_load_guest_fp(vcpu);
  616. #endif
  617. /* Switch to guest debug context */
  618. debug = vcpu->arch.shadow_dbg_reg;
  619. switch_booke_debug_regs(&debug);
  620. debug = current->thread.debug;
  621. current->thread.debug = vcpu->arch.shadow_dbg_reg;
  622. vcpu->arch.pgdir = current->mm->pgd;
  623. kvmppc_fix_ee_before_entry();
  624. ret = __kvmppc_vcpu_run(kvm_run, vcpu);
  625. /* No need for kvm_guest_exit. It's done in handle_exit.
  626. We also get here with interrupts enabled. */
  627. /* Switch back to user space debug context */
  628. switch_booke_debug_regs(&debug);
  629. current->thread.debug = debug;
  630. #ifdef CONFIG_PPC_FPU
  631. kvmppc_save_guest_fp(vcpu);
  632. vcpu->fpu_active = 0;
  633. #endif
  634. out:
  635. vcpu->mode = OUTSIDE_GUEST_MODE;
  636. return ret;
  637. }
  638. static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
  639. {
  640. enum emulation_result er;
  641. er = kvmppc_emulate_instruction(run, vcpu);
  642. switch (er) {
  643. case EMULATE_DONE:
  644. /* don't overwrite subtypes, just account kvm_stats */
  645. kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
  646. /* Future optimization: only reload non-volatiles if
  647. * they were actually modified by emulation. */
  648. return RESUME_GUEST_NV;
  649. case EMULATE_DO_DCR:
  650. run->exit_reason = KVM_EXIT_DCR;
  651. return RESUME_HOST;
  652. case EMULATE_FAIL:
  653. printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
  654. __func__, vcpu->arch.pc, vcpu->arch.last_inst);
  655. /* For debugging, encode the failing instruction and
  656. * report it to userspace. */
  657. run->hw.hardware_exit_reason = ~0ULL << 32;
  658. run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
  659. kvmppc_core_queue_program(vcpu, ESR_PIL);
  660. return RESUME_HOST;
  661. case EMULATE_EXIT_USER:
  662. return RESUME_HOST;
  663. default:
  664. BUG();
  665. }
  666. }
  667. static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
  668. {
  669. struct debug_reg *dbg_reg = &(vcpu->arch.shadow_dbg_reg);
  670. u32 dbsr = vcpu->arch.dbsr;
  671. run->debug.arch.status = 0;
  672. run->debug.arch.address = vcpu->arch.pc;
  673. if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
  674. run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
  675. } else {
  676. if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
  677. run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
  678. else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
  679. run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
  680. if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
  681. run->debug.arch.address = dbg_reg->dac1;
  682. else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
  683. run->debug.arch.address = dbg_reg->dac2;
  684. }
  685. return RESUME_HOST;
  686. }
  687. static void kvmppc_fill_pt_regs(struct pt_regs *regs)
  688. {
  689. ulong r1, ip, msr, lr;
  690. asm("mr %0, 1" : "=r"(r1));
  691. asm("mflr %0" : "=r"(lr));
  692. asm("mfmsr %0" : "=r"(msr));
  693. asm("bl 1f; 1: mflr %0" : "=r"(ip));
  694. memset(regs, 0, sizeof(*regs));
  695. regs->gpr[1] = r1;
  696. regs->nip = ip;
  697. regs->msr = msr;
  698. regs->link = lr;
  699. }
  700. /*
  701. * For interrupts needed to be handled by host interrupt handlers,
  702. * corresponding host handler are called from here in similar way
  703. * (but not exact) as they are called from low level handler
  704. * (such as from arch/powerpc/kernel/head_fsl_booke.S).
  705. */
  706. static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
  707. unsigned int exit_nr)
  708. {
  709. struct pt_regs regs;
  710. switch (exit_nr) {
  711. case BOOKE_INTERRUPT_EXTERNAL:
  712. kvmppc_fill_pt_regs(&regs);
  713. do_IRQ(&regs);
  714. break;
  715. case BOOKE_INTERRUPT_DECREMENTER:
  716. kvmppc_fill_pt_regs(&regs);
  717. timer_interrupt(&regs);
  718. break;
  719. #if defined(CONFIG_PPC_DOORBELL)
  720. case BOOKE_INTERRUPT_DOORBELL:
  721. kvmppc_fill_pt_regs(&regs);
  722. doorbell_exception(&regs);
  723. break;
  724. #endif
  725. case BOOKE_INTERRUPT_MACHINE_CHECK:
  726. /* FIXME */
  727. break;
  728. case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
  729. kvmppc_fill_pt_regs(&regs);
  730. performance_monitor_exception(&regs);
  731. break;
  732. case BOOKE_INTERRUPT_WATCHDOG:
  733. kvmppc_fill_pt_regs(&regs);
  734. #ifdef CONFIG_BOOKE_WDT
  735. WatchdogException(&regs);
  736. #else
  737. unknown_exception(&regs);
  738. #endif
  739. break;
  740. case BOOKE_INTERRUPT_CRITICAL:
  741. unknown_exception(&regs);
  742. break;
  743. case BOOKE_INTERRUPT_DEBUG:
  744. /* Save DBSR before preemption is enabled */
  745. vcpu->arch.dbsr = mfspr(SPRN_DBSR);
  746. kvmppc_clear_dbsr();
  747. break;
  748. }
  749. }
  750. /**
  751. * kvmppc_handle_exit
  752. *
  753. * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
  754. */
  755. int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
  756. unsigned int exit_nr)
  757. {
  758. int r = RESUME_HOST;
  759. int s;
  760. int idx;
  761. /* update before a new last_exit_type is rewritten */
  762. kvmppc_update_timing_stats(vcpu);
  763. /* restart interrupts if they were meant for the host */
  764. kvmppc_restart_interrupt(vcpu, exit_nr);
  765. local_irq_enable();
  766. trace_kvm_exit(exit_nr, vcpu);
  767. kvm_guest_exit();
  768. run->exit_reason = KVM_EXIT_UNKNOWN;
  769. run->ready_for_interrupt_injection = 1;
  770. switch (exit_nr) {
  771. case BOOKE_INTERRUPT_MACHINE_CHECK:
  772. printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
  773. kvmppc_dump_vcpu(vcpu);
  774. /* For debugging, send invalid exit reason to user space */
  775. run->hw.hardware_exit_reason = ~1ULL << 32;
  776. run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
  777. r = RESUME_HOST;
  778. break;
  779. case BOOKE_INTERRUPT_EXTERNAL:
  780. kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
  781. r = RESUME_GUEST;
  782. break;
  783. case BOOKE_INTERRUPT_DECREMENTER:
  784. kvmppc_account_exit(vcpu, DEC_EXITS);
  785. r = RESUME_GUEST;
  786. break;
  787. case BOOKE_INTERRUPT_WATCHDOG:
  788. r = RESUME_GUEST;
  789. break;
  790. case BOOKE_INTERRUPT_DOORBELL:
  791. kvmppc_account_exit(vcpu, DBELL_EXITS);
  792. r = RESUME_GUEST;
  793. break;
  794. case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
  795. kvmppc_account_exit(vcpu, GDBELL_EXITS);
  796. /*
  797. * We are here because there is a pending guest interrupt
  798. * which could not be delivered as MSR_CE or MSR_ME was not
  799. * set. Once we break from here we will retry delivery.
  800. */
  801. r = RESUME_GUEST;
  802. break;
  803. case BOOKE_INTERRUPT_GUEST_DBELL:
  804. kvmppc_account_exit(vcpu, GDBELL_EXITS);
  805. /*
  806. * We are here because there is a pending guest interrupt
  807. * which could not be delivered as MSR_EE was not set. Once
  808. * we break from here we will retry delivery.
  809. */
  810. r = RESUME_GUEST;
  811. break;
  812. case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
  813. r = RESUME_GUEST;
  814. break;
  815. case BOOKE_INTERRUPT_HV_PRIV:
  816. r = emulation_exit(run, vcpu);
  817. break;
  818. case BOOKE_INTERRUPT_PROGRAM:
  819. if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
  820. /*
  821. * Program traps generated by user-level software must
  822. * be handled by the guest kernel.
  823. *
  824. * In GS mode, hypervisor privileged instructions trap
  825. * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
  826. * actual program interrupts, handled by the guest.
  827. */
  828. kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
  829. r = RESUME_GUEST;
  830. kvmppc_account_exit(vcpu, USR_PR_INST);
  831. break;
  832. }
  833. r = emulation_exit(run, vcpu);
  834. break;
  835. case BOOKE_INTERRUPT_FP_UNAVAIL:
  836. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
  837. kvmppc_account_exit(vcpu, FP_UNAVAIL);
  838. r = RESUME_GUEST;
  839. break;
  840. #ifdef CONFIG_SPE
  841. case BOOKE_INTERRUPT_SPE_UNAVAIL: {
  842. if (vcpu->arch.shared->msr & MSR_SPE)
  843. kvmppc_vcpu_enable_spe(vcpu);
  844. else
  845. kvmppc_booke_queue_irqprio(vcpu,
  846. BOOKE_IRQPRIO_SPE_UNAVAIL);
  847. r = RESUME_GUEST;
  848. break;
  849. }
  850. case BOOKE_INTERRUPT_SPE_FP_DATA:
  851. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
  852. r = RESUME_GUEST;
  853. break;
  854. case BOOKE_INTERRUPT_SPE_FP_ROUND:
  855. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
  856. r = RESUME_GUEST;
  857. break;
  858. #else
  859. case BOOKE_INTERRUPT_SPE_UNAVAIL:
  860. /*
  861. * Guest wants SPE, but host kernel doesn't support it. Send
  862. * an "unimplemented operation" program check to the guest.
  863. */
  864. kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
  865. r = RESUME_GUEST;
  866. break;
  867. /*
  868. * These really should never happen without CONFIG_SPE,
  869. * as we should never enable the real MSR[SPE] in the guest.
  870. */
  871. case BOOKE_INTERRUPT_SPE_FP_DATA:
  872. case BOOKE_INTERRUPT_SPE_FP_ROUND:
  873. printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
  874. __func__, exit_nr, vcpu->arch.pc);
  875. run->hw.hardware_exit_reason = exit_nr;
  876. r = RESUME_HOST;
  877. break;
  878. #endif
  879. case BOOKE_INTERRUPT_DATA_STORAGE:
  880. kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
  881. vcpu->arch.fault_esr);
  882. kvmppc_account_exit(vcpu, DSI_EXITS);
  883. r = RESUME_GUEST;
  884. break;
  885. case BOOKE_INTERRUPT_INST_STORAGE:
  886. kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
  887. kvmppc_account_exit(vcpu, ISI_EXITS);
  888. r = RESUME_GUEST;
  889. break;
  890. case BOOKE_INTERRUPT_ALIGNMENT:
  891. kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
  892. vcpu->arch.fault_esr);
  893. r = RESUME_GUEST;
  894. break;
  895. #ifdef CONFIG_KVM_BOOKE_HV
  896. case BOOKE_INTERRUPT_HV_SYSCALL:
  897. if (!(vcpu->arch.shared->msr & MSR_PR)) {
  898. kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
  899. } else {
  900. /*
  901. * hcall from guest userspace -- send privileged
  902. * instruction program check.
  903. */
  904. kvmppc_core_queue_program(vcpu, ESR_PPR);
  905. }
  906. r = RESUME_GUEST;
  907. break;
  908. #else
  909. case BOOKE_INTERRUPT_SYSCALL:
  910. if (!(vcpu->arch.shared->msr & MSR_PR) &&
  911. (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
  912. /* KVM PV hypercalls */
  913. kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
  914. r = RESUME_GUEST;
  915. } else {
  916. /* Guest syscalls */
  917. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
  918. }
  919. kvmppc_account_exit(vcpu, SYSCALL_EXITS);
  920. r = RESUME_GUEST;
  921. break;
  922. #endif
  923. case BOOKE_INTERRUPT_DTLB_MISS: {
  924. unsigned long eaddr = vcpu->arch.fault_dear;
  925. int gtlb_index;
  926. gpa_t gpaddr;
  927. gfn_t gfn;
  928. #ifdef CONFIG_KVM_E500V2
  929. if (!(vcpu->arch.shared->msr & MSR_PR) &&
  930. (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
  931. kvmppc_map_magic(vcpu);
  932. kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
  933. r = RESUME_GUEST;
  934. break;
  935. }
  936. #endif
  937. /* Check the guest TLB. */
  938. gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
  939. if (gtlb_index < 0) {
  940. /* The guest didn't have a mapping for it. */
  941. kvmppc_core_queue_dtlb_miss(vcpu,
  942. vcpu->arch.fault_dear,
  943. vcpu->arch.fault_esr);
  944. kvmppc_mmu_dtlb_miss(vcpu);
  945. kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
  946. r = RESUME_GUEST;
  947. break;
  948. }
  949. idx = srcu_read_lock(&vcpu->kvm->srcu);
  950. gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
  951. gfn = gpaddr >> PAGE_SHIFT;
  952. if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
  953. /* The guest TLB had a mapping, but the shadow TLB
  954. * didn't, and it is RAM. This could be because:
  955. * a) the entry is mapping the host kernel, or
  956. * b) the guest used a large mapping which we're faking
  957. * Either way, we need to satisfy the fault without
  958. * invoking the guest. */
  959. kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
  960. kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
  961. r = RESUME_GUEST;
  962. } else {
  963. /* Guest has mapped and accessed a page which is not
  964. * actually RAM. */
  965. vcpu->arch.paddr_accessed = gpaddr;
  966. vcpu->arch.vaddr_accessed = eaddr;
  967. r = kvmppc_emulate_mmio(run, vcpu);
  968. kvmppc_account_exit(vcpu, MMIO_EXITS);
  969. }
  970. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  971. break;
  972. }
  973. case BOOKE_INTERRUPT_ITLB_MISS: {
  974. unsigned long eaddr = vcpu->arch.pc;
  975. gpa_t gpaddr;
  976. gfn_t gfn;
  977. int gtlb_index;
  978. r = RESUME_GUEST;
  979. /* Check the guest TLB. */
  980. gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
  981. if (gtlb_index < 0) {
  982. /* The guest didn't have a mapping for it. */
  983. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
  984. kvmppc_mmu_itlb_miss(vcpu);
  985. kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
  986. break;
  987. }
  988. kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
  989. idx = srcu_read_lock(&vcpu->kvm->srcu);
  990. gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
  991. gfn = gpaddr >> PAGE_SHIFT;
  992. if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
  993. /* The guest TLB had a mapping, but the shadow TLB
  994. * didn't. This could be because:
  995. * a) the entry is mapping the host kernel, or
  996. * b) the guest used a large mapping which we're faking
  997. * Either way, we need to satisfy the fault without
  998. * invoking the guest. */
  999. kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
  1000. } else {
  1001. /* Guest mapped and leaped at non-RAM! */
  1002. kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
  1003. }
  1004. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  1005. break;
  1006. }
  1007. case BOOKE_INTERRUPT_DEBUG: {
  1008. r = kvmppc_handle_debug(run, vcpu);
  1009. if (r == RESUME_HOST)
  1010. run->exit_reason = KVM_EXIT_DEBUG;
  1011. kvmppc_account_exit(vcpu, DEBUG_EXITS);
  1012. break;
  1013. }
  1014. default:
  1015. printk(KERN_EMERG "exit_nr %d\n", exit_nr);
  1016. BUG();
  1017. }
  1018. /*
  1019. * To avoid clobbering exit_reason, only check for signals if we
  1020. * aren't already exiting to userspace for some other reason.
  1021. */
  1022. if (!(r & RESUME_HOST)) {
  1023. s = kvmppc_prepare_to_enter(vcpu);
  1024. if (s <= 0)
  1025. r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
  1026. else {
  1027. /* interrupts now hard-disabled */
  1028. kvmppc_fix_ee_before_entry();
  1029. }
  1030. }
  1031. return r;
  1032. }
  1033. static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
  1034. {
  1035. u32 old_tsr = vcpu->arch.tsr;
  1036. vcpu->arch.tsr = new_tsr;
  1037. if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
  1038. arm_next_watchdog(vcpu);
  1039. update_timer_ints(vcpu);
  1040. }
  1041. /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
  1042. int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  1043. {
  1044. int i;
  1045. int r;
  1046. vcpu->arch.pc = 0;
  1047. vcpu->arch.shared->pir = vcpu->vcpu_id;
  1048. kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
  1049. kvmppc_set_msr(vcpu, 0);
  1050. #ifndef CONFIG_KVM_BOOKE_HV
  1051. vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
  1052. vcpu->arch.shadow_pid = 1;
  1053. vcpu->arch.shared->msr = 0;
  1054. #endif
  1055. /* Eye-catching numbers so we know if the guest takes an interrupt
  1056. * before it's programmed its own IVPR/IVORs. */
  1057. vcpu->arch.ivpr = 0x55550000;
  1058. for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
  1059. vcpu->arch.ivor[i] = 0x7700 | i * 4;
  1060. kvmppc_init_timing_stats(vcpu);
  1061. r = kvmppc_core_vcpu_setup(vcpu);
  1062. kvmppc_sanity_check(vcpu);
  1063. return r;
  1064. }
  1065. int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
  1066. {
  1067. /* setup watchdog timer once */
  1068. spin_lock_init(&vcpu->arch.wdt_lock);
  1069. setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
  1070. (unsigned long)vcpu);
  1071. return 0;
  1072. }
  1073. void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
  1074. {
  1075. del_timer_sync(&vcpu->arch.wdt_timer);
  1076. }
  1077. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  1078. {
  1079. int i;
  1080. regs->pc = vcpu->arch.pc;
  1081. regs->cr = kvmppc_get_cr(vcpu);
  1082. regs->ctr = vcpu->arch.ctr;
  1083. regs->lr = vcpu->arch.lr;
  1084. regs->xer = kvmppc_get_xer(vcpu);
  1085. regs->msr = vcpu->arch.shared->msr;
  1086. regs->srr0 = vcpu->arch.shared->srr0;
  1087. regs->srr1 = vcpu->arch.shared->srr1;
  1088. regs->pid = vcpu->arch.pid;
  1089. regs->sprg0 = vcpu->arch.shared->sprg0;
  1090. regs->sprg1 = vcpu->arch.shared->sprg1;
  1091. regs->sprg2 = vcpu->arch.shared->sprg2;
  1092. regs->sprg3 = vcpu->arch.shared->sprg3;
  1093. regs->sprg4 = vcpu->arch.shared->sprg4;
  1094. regs->sprg5 = vcpu->arch.shared->sprg5;
  1095. regs->sprg6 = vcpu->arch.shared->sprg6;
  1096. regs->sprg7 = vcpu->arch.shared->sprg7;
  1097. for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
  1098. regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
  1099. return 0;
  1100. }
  1101. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  1102. {
  1103. int i;
  1104. vcpu->arch.pc = regs->pc;
  1105. kvmppc_set_cr(vcpu, regs->cr);
  1106. vcpu->arch.ctr = regs->ctr;
  1107. vcpu->arch.lr = regs->lr;
  1108. kvmppc_set_xer(vcpu, regs->xer);
  1109. kvmppc_set_msr(vcpu, regs->msr);
  1110. vcpu->arch.shared->srr0 = regs->srr0;
  1111. vcpu->arch.shared->srr1 = regs->srr1;
  1112. kvmppc_set_pid(vcpu, regs->pid);
  1113. vcpu->arch.shared->sprg0 = regs->sprg0;
  1114. vcpu->arch.shared->sprg1 = regs->sprg1;
  1115. vcpu->arch.shared->sprg2 = regs->sprg2;
  1116. vcpu->arch.shared->sprg3 = regs->sprg3;
  1117. vcpu->arch.shared->sprg4 = regs->sprg4;
  1118. vcpu->arch.shared->sprg5 = regs->sprg5;
  1119. vcpu->arch.shared->sprg6 = regs->sprg6;
  1120. vcpu->arch.shared->sprg7 = regs->sprg7;
  1121. for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
  1122. kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
  1123. return 0;
  1124. }
  1125. static void get_sregs_base(struct kvm_vcpu *vcpu,
  1126. struct kvm_sregs *sregs)
  1127. {
  1128. u64 tb = get_tb();
  1129. sregs->u.e.features |= KVM_SREGS_E_BASE;
  1130. sregs->u.e.csrr0 = vcpu->arch.csrr0;
  1131. sregs->u.e.csrr1 = vcpu->arch.csrr1;
  1132. sregs->u.e.mcsr = vcpu->arch.mcsr;
  1133. sregs->u.e.esr = get_guest_esr(vcpu);
  1134. sregs->u.e.dear = get_guest_dear(vcpu);
  1135. sregs->u.e.tsr = vcpu->arch.tsr;
  1136. sregs->u.e.tcr = vcpu->arch.tcr;
  1137. sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
  1138. sregs->u.e.tb = tb;
  1139. sregs->u.e.vrsave = vcpu->arch.vrsave;
  1140. }
  1141. static int set_sregs_base(struct kvm_vcpu *vcpu,
  1142. struct kvm_sregs *sregs)
  1143. {
  1144. if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
  1145. return 0;
  1146. vcpu->arch.csrr0 = sregs->u.e.csrr0;
  1147. vcpu->arch.csrr1 = sregs->u.e.csrr1;
  1148. vcpu->arch.mcsr = sregs->u.e.mcsr;
  1149. set_guest_esr(vcpu, sregs->u.e.esr);
  1150. set_guest_dear(vcpu, sregs->u.e.dear);
  1151. vcpu->arch.vrsave = sregs->u.e.vrsave;
  1152. kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
  1153. if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
  1154. vcpu->arch.dec = sregs->u.e.dec;
  1155. kvmppc_emulate_dec(vcpu);
  1156. }
  1157. if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR)
  1158. kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
  1159. return 0;
  1160. }
  1161. static void get_sregs_arch206(struct kvm_vcpu *vcpu,
  1162. struct kvm_sregs *sregs)
  1163. {
  1164. sregs->u.e.features |= KVM_SREGS_E_ARCH206;
  1165. sregs->u.e.pir = vcpu->vcpu_id;
  1166. sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
  1167. sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
  1168. sregs->u.e.decar = vcpu->arch.decar;
  1169. sregs->u.e.ivpr = vcpu->arch.ivpr;
  1170. }
  1171. static int set_sregs_arch206(struct kvm_vcpu *vcpu,
  1172. struct kvm_sregs *sregs)
  1173. {
  1174. if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
  1175. return 0;
  1176. if (sregs->u.e.pir != vcpu->vcpu_id)
  1177. return -EINVAL;
  1178. vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
  1179. vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
  1180. vcpu->arch.decar = sregs->u.e.decar;
  1181. vcpu->arch.ivpr = sregs->u.e.ivpr;
  1182. return 0;
  1183. }
  1184. int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
  1185. {
  1186. sregs->u.e.features |= KVM_SREGS_E_IVOR;
  1187. sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
  1188. sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
  1189. sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
  1190. sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
  1191. sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
  1192. sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
  1193. sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
  1194. sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
  1195. sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
  1196. sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
  1197. sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
  1198. sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
  1199. sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
  1200. sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
  1201. sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
  1202. sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
  1203. return 0;
  1204. }
  1205. int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
  1206. {
  1207. if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
  1208. return 0;
  1209. vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
  1210. vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
  1211. vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
  1212. vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
  1213. vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
  1214. vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
  1215. vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
  1216. vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
  1217. vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
  1218. vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
  1219. vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
  1220. vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
  1221. vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
  1222. vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
  1223. vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
  1224. vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
  1225. return 0;
  1226. }
  1227. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  1228. struct kvm_sregs *sregs)
  1229. {
  1230. sregs->pvr = vcpu->arch.pvr;
  1231. get_sregs_base(vcpu, sregs);
  1232. get_sregs_arch206(vcpu, sregs);
  1233. return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
  1234. }
  1235. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  1236. struct kvm_sregs *sregs)
  1237. {
  1238. int ret;
  1239. if (vcpu->arch.pvr != sregs->pvr)
  1240. return -EINVAL;
  1241. ret = set_sregs_base(vcpu, sregs);
  1242. if (ret < 0)
  1243. return ret;
  1244. ret = set_sregs_arch206(vcpu, sregs);
  1245. if (ret < 0)
  1246. return ret;
  1247. return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
  1248. }
  1249. int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
  1250. {
  1251. int r = 0;
  1252. union kvmppc_one_reg val;
  1253. int size;
  1254. size = one_reg_size(reg->id);
  1255. if (size > sizeof(val))
  1256. return -EINVAL;
  1257. switch (reg->id) {
  1258. case KVM_REG_PPC_IAC1:
  1259. val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac1);
  1260. break;
  1261. case KVM_REG_PPC_IAC2:
  1262. val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac2);
  1263. break;
  1264. #if CONFIG_PPC_ADV_DEBUG_IACS > 2
  1265. case KVM_REG_PPC_IAC3:
  1266. val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac3);
  1267. break;
  1268. case KVM_REG_PPC_IAC4:
  1269. val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac4);
  1270. break;
  1271. #endif
  1272. case KVM_REG_PPC_DAC1:
  1273. val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac1);
  1274. break;
  1275. case KVM_REG_PPC_DAC2:
  1276. val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac2);
  1277. break;
  1278. case KVM_REG_PPC_EPR: {
  1279. u32 epr = get_guest_epr(vcpu);
  1280. val = get_reg_val(reg->id, epr);
  1281. break;
  1282. }
  1283. #if defined(CONFIG_64BIT)
  1284. case KVM_REG_PPC_EPCR:
  1285. val = get_reg_val(reg->id, vcpu->arch.epcr);
  1286. break;
  1287. #endif
  1288. case KVM_REG_PPC_TCR:
  1289. val = get_reg_val(reg->id, vcpu->arch.tcr);
  1290. break;
  1291. case KVM_REG_PPC_TSR:
  1292. val = get_reg_val(reg->id, vcpu->arch.tsr);
  1293. break;
  1294. case KVM_REG_PPC_DEBUG_INST:
  1295. val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV_DEBUG);
  1296. break;
  1297. case KVM_REG_PPC_VRSAVE:
  1298. val = get_reg_val(reg->id, vcpu->arch.vrsave);
  1299. break;
  1300. default:
  1301. r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
  1302. break;
  1303. }
  1304. if (r)
  1305. return r;
  1306. if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
  1307. r = -EFAULT;
  1308. return r;
  1309. }
  1310. int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
  1311. {
  1312. int r = 0;
  1313. union kvmppc_one_reg val;
  1314. int size;
  1315. size = one_reg_size(reg->id);
  1316. if (size > sizeof(val))
  1317. return -EINVAL;
  1318. if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
  1319. return -EFAULT;
  1320. switch (reg->id) {
  1321. case KVM_REG_PPC_IAC1:
  1322. vcpu->arch.dbg_reg.iac1 = set_reg_val(reg->id, val);
  1323. break;
  1324. case KVM_REG_PPC_IAC2:
  1325. vcpu->arch.dbg_reg.iac2 = set_reg_val(reg->id, val);
  1326. break;
  1327. #if CONFIG_PPC_ADV_DEBUG_IACS > 2
  1328. case KVM_REG_PPC_IAC3:
  1329. vcpu->arch.dbg_reg.iac3 = set_reg_val(reg->id, val);
  1330. break;
  1331. case KVM_REG_PPC_IAC4:
  1332. vcpu->arch.dbg_reg.iac4 = set_reg_val(reg->id, val);
  1333. break;
  1334. #endif
  1335. case KVM_REG_PPC_DAC1:
  1336. vcpu->arch.dbg_reg.dac1 = set_reg_val(reg->id, val);
  1337. break;
  1338. case KVM_REG_PPC_DAC2:
  1339. vcpu->arch.dbg_reg.dac2 = set_reg_val(reg->id, val);
  1340. break;
  1341. case KVM_REG_PPC_EPR: {
  1342. u32 new_epr = set_reg_val(reg->id, val);
  1343. kvmppc_set_epr(vcpu, new_epr);
  1344. break;
  1345. }
  1346. #if defined(CONFIG_64BIT)
  1347. case KVM_REG_PPC_EPCR: {
  1348. u32 new_epcr = set_reg_val(reg->id, val);
  1349. kvmppc_set_epcr(vcpu, new_epcr);
  1350. break;
  1351. }
  1352. #endif
  1353. case KVM_REG_PPC_OR_TSR: {
  1354. u32 tsr_bits = set_reg_val(reg->id, val);
  1355. kvmppc_set_tsr_bits(vcpu, tsr_bits);
  1356. break;
  1357. }
  1358. case KVM_REG_PPC_CLEAR_TSR: {
  1359. u32 tsr_bits = set_reg_val(reg->id, val);
  1360. kvmppc_clr_tsr_bits(vcpu, tsr_bits);
  1361. break;
  1362. }
  1363. case KVM_REG_PPC_TSR: {
  1364. u32 tsr = set_reg_val(reg->id, val);
  1365. kvmppc_set_tsr(vcpu, tsr);
  1366. break;
  1367. }
  1368. case KVM_REG_PPC_TCR: {
  1369. u32 tcr = set_reg_val(reg->id, val);
  1370. kvmppc_set_tcr(vcpu, tcr);
  1371. break;
  1372. }
  1373. case KVM_REG_PPC_VRSAVE:
  1374. vcpu->arch.vrsave = set_reg_val(reg->id, val);
  1375. break;
  1376. default:
  1377. r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
  1378. break;
  1379. }
  1380. return r;
  1381. }
  1382. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  1383. {
  1384. return -ENOTSUPP;
  1385. }
  1386. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  1387. {
  1388. return -ENOTSUPP;
  1389. }
  1390. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  1391. struct kvm_translation *tr)
  1392. {
  1393. int r;
  1394. r = kvmppc_core_vcpu_translate(vcpu, tr);
  1395. return r;
  1396. }
  1397. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
  1398. {
  1399. return -ENOTSUPP;
  1400. }
  1401. void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
  1402. struct kvm_memory_slot *dont)
  1403. {
  1404. }
  1405. int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
  1406. unsigned long npages)
  1407. {
  1408. return 0;
  1409. }
  1410. int kvmppc_core_prepare_memory_region(struct kvm *kvm,
  1411. struct kvm_memory_slot *memslot,
  1412. struct kvm_userspace_memory_region *mem)
  1413. {
  1414. return 0;
  1415. }
  1416. void kvmppc_core_commit_memory_region(struct kvm *kvm,
  1417. struct kvm_userspace_memory_region *mem,
  1418. const struct kvm_memory_slot *old)
  1419. {
  1420. }
  1421. void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
  1422. {
  1423. }
  1424. void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
  1425. {
  1426. #if defined(CONFIG_64BIT)
  1427. vcpu->arch.epcr = new_epcr;
  1428. #ifdef CONFIG_KVM_BOOKE_HV
  1429. vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
  1430. if (vcpu->arch.epcr & SPRN_EPCR_ICM)
  1431. vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
  1432. #endif
  1433. #endif
  1434. }
  1435. void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
  1436. {
  1437. vcpu->arch.tcr = new_tcr;
  1438. arm_next_watchdog(vcpu);
  1439. update_timer_ints(vcpu);
  1440. }
  1441. void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
  1442. {
  1443. set_bits(tsr_bits, &vcpu->arch.tsr);
  1444. smp_wmb();
  1445. kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
  1446. kvm_vcpu_kick(vcpu);
  1447. }
  1448. void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
  1449. {
  1450. clear_bits(tsr_bits, &vcpu->arch.tsr);
  1451. /*
  1452. * We may have stopped the watchdog due to
  1453. * being stuck on final expiration.
  1454. */
  1455. if (tsr_bits & (TSR_ENW | TSR_WIS))
  1456. arm_next_watchdog(vcpu);
  1457. update_timer_ints(vcpu);
  1458. }
  1459. void kvmppc_decrementer_func(unsigned long data)
  1460. {
  1461. struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
  1462. if (vcpu->arch.tcr & TCR_ARE) {
  1463. vcpu->arch.dec = vcpu->arch.decar;
  1464. kvmppc_emulate_dec(vcpu);
  1465. }
  1466. kvmppc_set_tsr_bits(vcpu, TSR_DIS);
  1467. }
  1468. static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
  1469. uint64_t addr, int index)
  1470. {
  1471. switch (index) {
  1472. case 0:
  1473. dbg_reg->dbcr0 |= DBCR0_IAC1;
  1474. dbg_reg->iac1 = addr;
  1475. break;
  1476. case 1:
  1477. dbg_reg->dbcr0 |= DBCR0_IAC2;
  1478. dbg_reg->iac2 = addr;
  1479. break;
  1480. #if CONFIG_PPC_ADV_DEBUG_IACS > 2
  1481. case 2:
  1482. dbg_reg->dbcr0 |= DBCR0_IAC3;
  1483. dbg_reg->iac3 = addr;
  1484. break;
  1485. case 3:
  1486. dbg_reg->dbcr0 |= DBCR0_IAC4;
  1487. dbg_reg->iac4 = addr;
  1488. break;
  1489. #endif
  1490. default:
  1491. return -EINVAL;
  1492. }
  1493. dbg_reg->dbcr0 |= DBCR0_IDM;
  1494. return 0;
  1495. }
  1496. static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
  1497. int type, int index)
  1498. {
  1499. switch (index) {
  1500. case 0:
  1501. if (type & KVMPPC_DEBUG_WATCH_READ)
  1502. dbg_reg->dbcr0 |= DBCR0_DAC1R;
  1503. if (type & KVMPPC_DEBUG_WATCH_WRITE)
  1504. dbg_reg->dbcr0 |= DBCR0_DAC1W;
  1505. dbg_reg->dac1 = addr;
  1506. break;
  1507. case 1:
  1508. if (type & KVMPPC_DEBUG_WATCH_READ)
  1509. dbg_reg->dbcr0 |= DBCR0_DAC2R;
  1510. if (type & KVMPPC_DEBUG_WATCH_WRITE)
  1511. dbg_reg->dbcr0 |= DBCR0_DAC2W;
  1512. dbg_reg->dac2 = addr;
  1513. break;
  1514. default:
  1515. return -EINVAL;
  1516. }
  1517. dbg_reg->dbcr0 |= DBCR0_IDM;
  1518. return 0;
  1519. }
  1520. void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
  1521. {
  1522. /* XXX: Add similar MSR protection for BookE-PR */
  1523. #ifdef CONFIG_KVM_BOOKE_HV
  1524. BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
  1525. if (set) {
  1526. if (prot_bitmap & MSR_UCLE)
  1527. vcpu->arch.shadow_msrp |= MSRP_UCLEP;
  1528. if (prot_bitmap & MSR_DE)
  1529. vcpu->arch.shadow_msrp |= MSRP_DEP;
  1530. if (prot_bitmap & MSR_PMM)
  1531. vcpu->arch.shadow_msrp |= MSRP_PMMP;
  1532. } else {
  1533. if (prot_bitmap & MSR_UCLE)
  1534. vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
  1535. if (prot_bitmap & MSR_DE)
  1536. vcpu->arch.shadow_msrp &= ~MSRP_DEP;
  1537. if (prot_bitmap & MSR_PMM)
  1538. vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
  1539. }
  1540. #endif
  1541. }
  1542. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  1543. struct kvm_guest_debug *dbg)
  1544. {
  1545. struct debug_reg *dbg_reg;
  1546. int n, b = 0, w = 0;
  1547. if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
  1548. vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
  1549. vcpu->guest_debug = 0;
  1550. kvm_guest_protect_msr(vcpu, MSR_DE, false);
  1551. return 0;
  1552. }
  1553. kvm_guest_protect_msr(vcpu, MSR_DE, true);
  1554. vcpu->guest_debug = dbg->control;
  1555. vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
  1556. /* Set DBCR0_EDM in guest visible DBCR0 register. */
  1557. vcpu->arch.dbg_reg.dbcr0 = DBCR0_EDM;
  1558. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
  1559. vcpu->arch.shadow_dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
  1560. /* Code below handles only HW breakpoints */
  1561. dbg_reg = &(vcpu->arch.shadow_dbg_reg);
  1562. #ifdef CONFIG_KVM_BOOKE_HV
  1563. /*
  1564. * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1
  1565. * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0
  1566. */
  1567. dbg_reg->dbcr1 = 0;
  1568. dbg_reg->dbcr2 = 0;
  1569. #else
  1570. /*
  1571. * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1
  1572. * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR
  1573. * is set.
  1574. */
  1575. dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
  1576. DBCR1_IAC4US;
  1577. dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
  1578. #endif
  1579. if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
  1580. return 0;
  1581. for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
  1582. uint64_t addr = dbg->arch.bp[n].addr;
  1583. uint32_t type = dbg->arch.bp[n].type;
  1584. if (type == KVMPPC_DEBUG_NONE)
  1585. continue;
  1586. if (type & !(KVMPPC_DEBUG_WATCH_READ |
  1587. KVMPPC_DEBUG_WATCH_WRITE |
  1588. KVMPPC_DEBUG_BREAKPOINT))
  1589. return -EINVAL;
  1590. if (type & KVMPPC_DEBUG_BREAKPOINT) {
  1591. /* Setting H/W breakpoint */
  1592. if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
  1593. return -EINVAL;
  1594. } else {
  1595. /* Setting H/W watchpoint */
  1596. if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
  1597. type, w++))
  1598. return -EINVAL;
  1599. }
  1600. }
  1601. return 0;
  1602. }
  1603. void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  1604. {
  1605. vcpu->cpu = smp_processor_id();
  1606. current->thread.kvm_vcpu = vcpu;
  1607. }
  1608. void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
  1609. {
  1610. current->thread.kvm_vcpu = NULL;
  1611. vcpu->cpu = -1;
  1612. /* Clear pending debug event in DBSR */
  1613. kvmppc_clear_dbsr();
  1614. }
  1615. void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
  1616. {
  1617. vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
  1618. }
  1619. int kvmppc_core_init_vm(struct kvm *kvm)
  1620. {
  1621. return kvm->arch.kvm_ops->init_vm(kvm);
  1622. }
  1623. struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
  1624. {
  1625. return kvm->arch.kvm_ops->vcpu_create(kvm, id);
  1626. }
  1627. void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
  1628. {
  1629. vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
  1630. }
  1631. void kvmppc_core_destroy_vm(struct kvm *kvm)
  1632. {
  1633. kvm->arch.kvm_ops->destroy_vm(kvm);
  1634. }
  1635. void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  1636. {
  1637. vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
  1638. }
  1639. void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
  1640. {
  1641. vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
  1642. }
  1643. int __init kvmppc_booke_init(void)
  1644. {
  1645. #ifndef CONFIG_KVM_BOOKE_HV
  1646. unsigned long ivor[16];
  1647. unsigned long *handler = kvmppc_booke_handler_addr;
  1648. unsigned long max_ivor = 0;
  1649. unsigned long handler_len;
  1650. int i;
  1651. /* We install our own exception handlers by hijacking IVPR. IVPR must
  1652. * be 16-bit aligned, so we need a 64KB allocation. */
  1653. kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
  1654. VCPU_SIZE_ORDER);
  1655. if (!kvmppc_booke_handlers)
  1656. return -ENOMEM;
  1657. /* XXX make sure our handlers are smaller than Linux's */
  1658. /* Copy our interrupt handlers to match host IVORs. That way we don't
  1659. * have to swap the IVORs on every guest/host transition. */
  1660. ivor[0] = mfspr(SPRN_IVOR0);
  1661. ivor[1] = mfspr(SPRN_IVOR1);
  1662. ivor[2] = mfspr(SPRN_IVOR2);
  1663. ivor[3] = mfspr(SPRN_IVOR3);
  1664. ivor[4] = mfspr(SPRN_IVOR4);
  1665. ivor[5] = mfspr(SPRN_IVOR5);
  1666. ivor[6] = mfspr(SPRN_IVOR6);
  1667. ivor[7] = mfspr(SPRN_IVOR7);
  1668. ivor[8] = mfspr(SPRN_IVOR8);
  1669. ivor[9] = mfspr(SPRN_IVOR9);
  1670. ivor[10] = mfspr(SPRN_IVOR10);
  1671. ivor[11] = mfspr(SPRN_IVOR11);
  1672. ivor[12] = mfspr(SPRN_IVOR12);
  1673. ivor[13] = mfspr(SPRN_IVOR13);
  1674. ivor[14] = mfspr(SPRN_IVOR14);
  1675. ivor[15] = mfspr(SPRN_IVOR15);
  1676. for (i = 0; i < 16; i++) {
  1677. if (ivor[i] > max_ivor)
  1678. max_ivor = i;
  1679. handler_len = handler[i + 1] - handler[i];
  1680. memcpy((void *)kvmppc_booke_handlers + ivor[i],
  1681. (void *)handler[i], handler_len);
  1682. }
  1683. handler_len = handler[max_ivor + 1] - handler[max_ivor];
  1684. flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
  1685. ivor[max_ivor] + handler_len);
  1686. #endif /* !BOOKE_HV */
  1687. return 0;
  1688. }
  1689. void __exit kvmppc_booke_exit(void)
  1690. {
  1691. free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
  1692. kvm_exit();
  1693. }