book3s_emulate.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright SUSE Linux Products GmbH 2009
  16. *
  17. * Authors: Alexander Graf <agraf@suse.de>
  18. */
  19. #include <asm/kvm_ppc.h>
  20. #include <asm/disassemble.h>
  21. #include <asm/kvm_book3s.h>
  22. #include <asm/reg.h>
  23. #include <asm/switch_to.h>
  24. #include <asm/time.h>
  25. #include <asm/tm.h>
  26. #include "book3s.h"
  27. #include <asm/asm-prototypes.h>
  28. #define OP_19_XOP_RFID 18
  29. #define OP_19_XOP_RFI 50
  30. #define OP_31_XOP_MFMSR 83
  31. #define OP_31_XOP_MTMSR 146
  32. #define OP_31_XOP_MTMSRD 178
  33. #define OP_31_XOP_MTSR 210
  34. #define OP_31_XOP_MTSRIN 242
  35. #define OP_31_XOP_TLBIEL 274
  36. /* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */
  37. #define OP_31_XOP_FAKE_SC1 308
  38. #define OP_31_XOP_SLBMTE 402
  39. #define OP_31_XOP_SLBIE 434
  40. #define OP_31_XOP_SLBIA 498
  41. #define OP_31_XOP_MFSR 595
  42. #define OP_31_XOP_MFSRIN 659
  43. #define OP_31_XOP_DCBA 758
  44. #define OP_31_XOP_SLBMFEV 851
  45. #define OP_31_XOP_EIOIO 854
  46. #define OP_31_XOP_SLBMFEE 915
  47. #define OP_31_XOP_TBEGIN 654
  48. #define OP_31_XOP_TABORT 910
  49. #define OP_31_XOP_TRECLAIM 942
  50. #define OP_31_XOP_TRCHKPT 1006
  51. /* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */
  52. #define OP_31_XOP_DCBZ 1010
  53. #define OP_LFS 48
  54. #define OP_LFD 50
  55. #define OP_STFS 52
  56. #define OP_STFD 54
  57. #define SPRN_GQR0 912
  58. #define SPRN_GQR1 913
  59. #define SPRN_GQR2 914
  60. #define SPRN_GQR3 915
  61. #define SPRN_GQR4 916
  62. #define SPRN_GQR5 917
  63. #define SPRN_GQR6 918
  64. #define SPRN_GQR7 919
  65. /* Book3S_32 defines mfsrin(v) - but that messes up our abstract
  66. * function pointers, so let's just disable the define. */
  67. #undef mfsrin
  68. enum priv_level {
  69. PRIV_PROBLEM = 0,
  70. PRIV_SUPER = 1,
  71. PRIV_HYPER = 2,
  72. };
  73. static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
  74. {
  75. /* PAPR VMs only access supervisor SPRs */
  76. if (vcpu->arch.papr_enabled && (level > PRIV_SUPER))
  77. return false;
  78. /* Limit user space to its own small SPR set */
  79. if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM)
  80. return false;
  81. return true;
  82. }
  83. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  84. static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu)
  85. {
  86. memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0],
  87. sizeof(vcpu->arch.gpr_tm));
  88. memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp,
  89. sizeof(struct thread_fp_state));
  90. memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr,
  91. sizeof(struct thread_vr_state));
  92. vcpu->arch.ppr_tm = vcpu->arch.ppr;
  93. vcpu->arch.dscr_tm = vcpu->arch.dscr;
  94. vcpu->arch.amr_tm = vcpu->arch.amr;
  95. vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
  96. vcpu->arch.tar_tm = vcpu->arch.tar;
  97. vcpu->arch.lr_tm = vcpu->arch.regs.link;
  98. vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
  99. vcpu->arch.xer_tm = vcpu->arch.regs.xer;
  100. vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
  101. }
  102. static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu)
  103. {
  104. memcpy(&vcpu->arch.regs.gpr[0], &vcpu->arch.gpr_tm[0],
  105. sizeof(vcpu->arch.regs.gpr));
  106. memcpy(&vcpu->arch.fp, &vcpu->arch.fp_tm,
  107. sizeof(struct thread_fp_state));
  108. memcpy(&vcpu->arch.vr, &vcpu->arch.vr_tm,
  109. sizeof(struct thread_vr_state));
  110. vcpu->arch.ppr = vcpu->arch.ppr_tm;
  111. vcpu->arch.dscr = vcpu->arch.dscr_tm;
  112. vcpu->arch.amr = vcpu->arch.amr_tm;
  113. vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
  114. vcpu->arch.tar = vcpu->arch.tar_tm;
  115. vcpu->arch.regs.link = vcpu->arch.lr_tm;
  116. vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
  117. vcpu->arch.regs.xer = vcpu->arch.xer_tm;
  118. vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
  119. }
  120. static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val)
  121. {
  122. unsigned long guest_msr = kvmppc_get_msr(vcpu);
  123. int fc_val = ra_val ? ra_val : 1;
  124. uint64_t texasr;
  125. /* CR0 = 0 | MSR[TS] | 0 */
  126. vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
  127. (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
  128. << CR0_SHIFT);
  129. preempt_disable();
  130. tm_enable();
  131. texasr = mfspr(SPRN_TEXASR);
  132. kvmppc_save_tm_pr(vcpu);
  133. kvmppc_copyfrom_vcpu_tm(vcpu);
  134. /* failure recording depends on Failure Summary bit */
  135. if (!(texasr & TEXASR_FS)) {
  136. texasr &= ~TEXASR_FC;
  137. texasr |= ((u64)fc_val << TEXASR_FC_LG) | TEXASR_FS;
  138. texasr &= ~(TEXASR_PR | TEXASR_HV);
  139. if (kvmppc_get_msr(vcpu) & MSR_PR)
  140. texasr |= TEXASR_PR;
  141. if (kvmppc_get_msr(vcpu) & MSR_HV)
  142. texasr |= TEXASR_HV;
  143. vcpu->arch.texasr = texasr;
  144. vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
  145. mtspr(SPRN_TEXASR, texasr);
  146. mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
  147. }
  148. tm_disable();
  149. /*
  150. * treclaim need quit to non-transactional state.
  151. */
  152. guest_msr &= ~(MSR_TS_MASK);
  153. kvmppc_set_msr(vcpu, guest_msr);
  154. preempt_enable();
  155. if (vcpu->arch.shadow_fscr & FSCR_TAR)
  156. mtspr(SPRN_TAR, vcpu->arch.tar);
  157. }
  158. static void kvmppc_emulate_trchkpt(struct kvm_vcpu *vcpu)
  159. {
  160. unsigned long guest_msr = kvmppc_get_msr(vcpu);
  161. preempt_disable();
  162. /*
  163. * need flush FP/VEC/VSX to vcpu save area before
  164. * copy.
  165. */
  166. kvmppc_giveup_ext(vcpu, MSR_VSX);
  167. kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
  168. kvmppc_copyto_vcpu_tm(vcpu);
  169. kvmppc_save_tm_sprs(vcpu);
  170. /*
  171. * as a result of trecheckpoint. set TS to suspended.
  172. */
  173. guest_msr &= ~(MSR_TS_MASK);
  174. guest_msr |= MSR_TS_S;
  175. kvmppc_set_msr(vcpu, guest_msr);
  176. kvmppc_restore_tm_pr(vcpu);
  177. preempt_enable();
  178. }
  179. /* emulate tabort. at guest privilege state */
  180. void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
  181. {
  182. /* currently we only emulate tabort. but no emulation of other
  183. * tabort variants since there is no kernel usage of them at
  184. * present.
  185. */
  186. unsigned long guest_msr = kvmppc_get_msr(vcpu);
  187. uint64_t org_texasr;
  188. preempt_disable();
  189. tm_enable();
  190. org_texasr = mfspr(SPRN_TEXASR);
  191. tm_abort(ra_val);
  192. /* CR0 = 0 | MSR[TS] | 0 */
  193. vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
  194. (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
  195. << CR0_SHIFT);
  196. vcpu->arch.texasr = mfspr(SPRN_TEXASR);
  197. /* failure recording depends on Failure Summary bit,
  198. * and tabort will be treated as nops in non-transactional
  199. * state.
  200. */
  201. if (!(org_texasr & TEXASR_FS) &&
  202. MSR_TM_ACTIVE(guest_msr)) {
  203. vcpu->arch.texasr &= ~(TEXASR_PR | TEXASR_HV);
  204. if (guest_msr & MSR_PR)
  205. vcpu->arch.texasr |= TEXASR_PR;
  206. if (guest_msr & MSR_HV)
  207. vcpu->arch.texasr |= TEXASR_HV;
  208. vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
  209. }
  210. tm_disable();
  211. preempt_enable();
  212. }
  213. #endif
  214. int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
  215. unsigned int inst, int *advance)
  216. {
  217. int emulated = EMULATE_DONE;
  218. int rt = get_rt(inst);
  219. int rs = get_rs(inst);
  220. int ra = get_ra(inst);
  221. int rb = get_rb(inst);
  222. u32 inst_sc = 0x44000002;
  223. switch (get_op(inst)) {
  224. case 0:
  225. emulated = EMULATE_FAIL;
  226. if ((kvmppc_get_msr(vcpu) & MSR_LE) &&
  227. (inst == swab32(inst_sc))) {
  228. /*
  229. * This is the byte reversed syscall instruction of our
  230. * hypercall handler. Early versions of LE Linux didn't
  231. * swap the instructions correctly and ended up in
  232. * illegal instructions.
  233. * Just always fail hypercalls on these broken systems.
  234. */
  235. kvmppc_set_gpr(vcpu, 3, EV_UNIMPLEMENTED);
  236. kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
  237. emulated = EMULATE_DONE;
  238. }
  239. break;
  240. case 19:
  241. switch (get_xop(inst)) {
  242. case OP_19_XOP_RFID:
  243. case OP_19_XOP_RFI: {
  244. unsigned long srr1 = kvmppc_get_srr1(vcpu);
  245. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  246. unsigned long cur_msr = kvmppc_get_msr(vcpu);
  247. /*
  248. * add rules to fit in ISA specification regarding TM
  249. * state transistion in TM disable/Suspended state,
  250. * and target TM state is TM inactive(00) state. (the
  251. * change should be suppressed).
  252. */
  253. if (((cur_msr & MSR_TM) == 0) &&
  254. ((srr1 & MSR_TM) == 0) &&
  255. MSR_TM_SUSPENDED(cur_msr) &&
  256. !MSR_TM_ACTIVE(srr1))
  257. srr1 |= MSR_TS_S;
  258. #endif
  259. kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu));
  260. kvmppc_set_msr(vcpu, srr1);
  261. *advance = 0;
  262. break;
  263. }
  264. default:
  265. emulated = EMULATE_FAIL;
  266. break;
  267. }
  268. break;
  269. case 31:
  270. switch (get_xop(inst)) {
  271. case OP_31_XOP_MFMSR:
  272. kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu));
  273. break;
  274. case OP_31_XOP_MTMSRD:
  275. {
  276. ulong rs_val = kvmppc_get_gpr(vcpu, rs);
  277. if (inst & 0x10000) {
  278. ulong new_msr = kvmppc_get_msr(vcpu);
  279. new_msr &= ~(MSR_RI | MSR_EE);
  280. new_msr |= rs_val & (MSR_RI | MSR_EE);
  281. kvmppc_set_msr_fast(vcpu, new_msr);
  282. } else
  283. kvmppc_set_msr(vcpu, rs_val);
  284. break;
  285. }
  286. case OP_31_XOP_MTMSR:
  287. kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
  288. break;
  289. case OP_31_XOP_MFSR:
  290. {
  291. int srnum;
  292. srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32);
  293. if (vcpu->arch.mmu.mfsrin) {
  294. u32 sr;
  295. sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
  296. kvmppc_set_gpr(vcpu, rt, sr);
  297. }
  298. break;
  299. }
  300. case OP_31_XOP_MFSRIN:
  301. {
  302. int srnum;
  303. srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
  304. if (vcpu->arch.mmu.mfsrin) {
  305. u32 sr;
  306. sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
  307. kvmppc_set_gpr(vcpu, rt, sr);
  308. }
  309. break;
  310. }
  311. case OP_31_XOP_MTSR:
  312. vcpu->arch.mmu.mtsrin(vcpu,
  313. (inst >> 16) & 0xf,
  314. kvmppc_get_gpr(vcpu, rs));
  315. break;
  316. case OP_31_XOP_MTSRIN:
  317. vcpu->arch.mmu.mtsrin(vcpu,
  318. (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
  319. kvmppc_get_gpr(vcpu, rs));
  320. break;
  321. case OP_31_XOP_TLBIE:
  322. case OP_31_XOP_TLBIEL:
  323. {
  324. bool large = (inst & 0x00200000) ? true : false;
  325. ulong addr = kvmppc_get_gpr(vcpu, rb);
  326. vcpu->arch.mmu.tlbie(vcpu, addr, large);
  327. break;
  328. }
  329. #ifdef CONFIG_PPC_BOOK3S_64
  330. case OP_31_XOP_FAKE_SC1:
  331. {
  332. /* SC 1 papr hypercalls */
  333. ulong cmd = kvmppc_get_gpr(vcpu, 3);
  334. int i;
  335. if ((kvmppc_get_msr(vcpu) & MSR_PR) ||
  336. !vcpu->arch.papr_enabled) {
  337. emulated = EMULATE_FAIL;
  338. break;
  339. }
  340. if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE)
  341. break;
  342. run->papr_hcall.nr = cmd;
  343. for (i = 0; i < 9; ++i) {
  344. ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
  345. run->papr_hcall.args[i] = gpr;
  346. }
  347. run->exit_reason = KVM_EXIT_PAPR_HCALL;
  348. vcpu->arch.hcall_needed = 1;
  349. emulated = EMULATE_EXIT_USER;
  350. break;
  351. }
  352. #endif
  353. case OP_31_XOP_EIOIO:
  354. break;
  355. case OP_31_XOP_SLBMTE:
  356. if (!vcpu->arch.mmu.slbmte)
  357. return EMULATE_FAIL;
  358. vcpu->arch.mmu.slbmte(vcpu,
  359. kvmppc_get_gpr(vcpu, rs),
  360. kvmppc_get_gpr(vcpu, rb));
  361. break;
  362. case OP_31_XOP_SLBIE:
  363. if (!vcpu->arch.mmu.slbie)
  364. return EMULATE_FAIL;
  365. vcpu->arch.mmu.slbie(vcpu,
  366. kvmppc_get_gpr(vcpu, rb));
  367. break;
  368. case OP_31_XOP_SLBIA:
  369. if (!vcpu->arch.mmu.slbia)
  370. return EMULATE_FAIL;
  371. vcpu->arch.mmu.slbia(vcpu);
  372. break;
  373. case OP_31_XOP_SLBMFEE:
  374. if (!vcpu->arch.mmu.slbmfee) {
  375. emulated = EMULATE_FAIL;
  376. } else {
  377. ulong t, rb_val;
  378. rb_val = kvmppc_get_gpr(vcpu, rb);
  379. t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
  380. kvmppc_set_gpr(vcpu, rt, t);
  381. }
  382. break;
  383. case OP_31_XOP_SLBMFEV:
  384. if (!vcpu->arch.mmu.slbmfev) {
  385. emulated = EMULATE_FAIL;
  386. } else {
  387. ulong t, rb_val;
  388. rb_val = kvmppc_get_gpr(vcpu, rb);
  389. t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
  390. kvmppc_set_gpr(vcpu, rt, t);
  391. }
  392. break;
  393. case OP_31_XOP_DCBA:
  394. /* Gets treated as NOP */
  395. break;
  396. case OP_31_XOP_DCBZ:
  397. {
  398. ulong rb_val = kvmppc_get_gpr(vcpu, rb);
  399. ulong ra_val = 0;
  400. ulong addr, vaddr;
  401. u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
  402. u32 dsisr;
  403. int r;
  404. if (ra)
  405. ra_val = kvmppc_get_gpr(vcpu, ra);
  406. addr = (ra_val + rb_val) & ~31ULL;
  407. if (!(kvmppc_get_msr(vcpu) & MSR_SF))
  408. addr &= 0xffffffff;
  409. vaddr = addr;
  410. r = kvmppc_st(vcpu, &addr, 32, zeros, true);
  411. if ((r == -ENOENT) || (r == -EPERM)) {
  412. *advance = 0;
  413. kvmppc_set_dar(vcpu, vaddr);
  414. vcpu->arch.fault_dar = vaddr;
  415. dsisr = DSISR_ISSTORE;
  416. if (r == -ENOENT)
  417. dsisr |= DSISR_NOHPTE;
  418. else if (r == -EPERM)
  419. dsisr |= DSISR_PROTFAULT;
  420. kvmppc_set_dsisr(vcpu, dsisr);
  421. vcpu->arch.fault_dsisr = dsisr;
  422. kvmppc_book3s_queue_irqprio(vcpu,
  423. BOOK3S_INTERRUPT_DATA_STORAGE);
  424. }
  425. break;
  426. }
  427. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  428. case OP_31_XOP_TBEGIN:
  429. {
  430. if (!cpu_has_feature(CPU_FTR_TM))
  431. break;
  432. if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
  433. kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
  434. emulated = EMULATE_AGAIN;
  435. break;
  436. }
  437. if (!(kvmppc_get_msr(vcpu) & MSR_PR)) {
  438. preempt_disable();
  439. vcpu->arch.regs.ccr = (CR0_TBEGIN_FAILURE |
  440. (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)));
  441. vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT |
  442. (((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
  443. << TEXASR_FC_LG));
  444. if ((inst >> 21) & 0x1)
  445. vcpu->arch.texasr |= TEXASR_ROT;
  446. if (kvmppc_get_msr(vcpu) & MSR_HV)
  447. vcpu->arch.texasr |= TEXASR_HV;
  448. vcpu->arch.tfhar = kvmppc_get_pc(vcpu) + 4;
  449. vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
  450. kvmppc_restore_tm_sprs(vcpu);
  451. preempt_enable();
  452. } else
  453. emulated = EMULATE_FAIL;
  454. break;
  455. }
  456. case OP_31_XOP_TABORT:
  457. {
  458. ulong guest_msr = kvmppc_get_msr(vcpu);
  459. unsigned long ra_val = 0;
  460. if (!cpu_has_feature(CPU_FTR_TM))
  461. break;
  462. if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
  463. kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
  464. emulated = EMULATE_AGAIN;
  465. break;
  466. }
  467. /* only emulate for privilege guest, since problem state
  468. * guest can run with TM enabled and we don't expect to
  469. * trap at here for that case.
  470. */
  471. WARN_ON(guest_msr & MSR_PR);
  472. if (ra)
  473. ra_val = kvmppc_get_gpr(vcpu, ra);
  474. kvmppc_emulate_tabort(vcpu, ra_val);
  475. break;
  476. }
  477. case OP_31_XOP_TRECLAIM:
  478. {
  479. ulong guest_msr = kvmppc_get_msr(vcpu);
  480. unsigned long ra_val = 0;
  481. if (!cpu_has_feature(CPU_FTR_TM))
  482. break;
  483. if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
  484. kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
  485. emulated = EMULATE_AGAIN;
  486. break;
  487. }
  488. /* generate interrupts based on priorities */
  489. if (guest_msr & MSR_PR) {
  490. /* Privileged Instruction type Program Interrupt */
  491. kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
  492. emulated = EMULATE_AGAIN;
  493. break;
  494. }
  495. if (!MSR_TM_ACTIVE(guest_msr)) {
  496. /* TM bad thing interrupt */
  497. kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
  498. emulated = EMULATE_AGAIN;
  499. break;
  500. }
  501. if (ra)
  502. ra_val = kvmppc_get_gpr(vcpu, ra);
  503. kvmppc_emulate_treclaim(vcpu, ra_val);
  504. break;
  505. }
  506. case OP_31_XOP_TRCHKPT:
  507. {
  508. ulong guest_msr = kvmppc_get_msr(vcpu);
  509. unsigned long texasr;
  510. if (!cpu_has_feature(CPU_FTR_TM))
  511. break;
  512. if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
  513. kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
  514. emulated = EMULATE_AGAIN;
  515. break;
  516. }
  517. /* generate interrupt based on priorities */
  518. if (guest_msr & MSR_PR) {
  519. /* Privileged Instruction type Program Intr */
  520. kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
  521. emulated = EMULATE_AGAIN;
  522. break;
  523. }
  524. tm_enable();
  525. texasr = mfspr(SPRN_TEXASR);
  526. tm_disable();
  527. if (MSR_TM_ACTIVE(guest_msr) ||
  528. !(texasr & (TEXASR_FS))) {
  529. /* TM bad thing interrupt */
  530. kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
  531. emulated = EMULATE_AGAIN;
  532. break;
  533. }
  534. kvmppc_emulate_trchkpt(vcpu);
  535. break;
  536. }
  537. #endif
  538. default:
  539. emulated = EMULATE_FAIL;
  540. }
  541. break;
  542. default:
  543. emulated = EMULATE_FAIL;
  544. }
  545. if (emulated == EMULATE_FAIL)
  546. emulated = kvmppc_emulate_paired_single(run, vcpu);
  547. return emulated;
  548. }
  549. void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
  550. u32 val)
  551. {
  552. if (upper) {
  553. /* Upper BAT */
  554. u32 bl = (val >> 2) & 0x7ff;
  555. bat->bepi_mask = (~bl << 17);
  556. bat->bepi = val & 0xfffe0000;
  557. bat->vs = (val & 2) ? 1 : 0;
  558. bat->vp = (val & 1) ? 1 : 0;
  559. bat->raw = (bat->raw & 0xffffffff00000000ULL) | val;
  560. } else {
  561. /* Lower BAT */
  562. bat->brpn = val & 0xfffe0000;
  563. bat->wimg = (val >> 3) & 0xf;
  564. bat->pp = val & 3;
  565. bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32);
  566. }
  567. }
  568. static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
  569. {
  570. struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
  571. struct kvmppc_bat *bat;
  572. switch (sprn) {
  573. case SPRN_IBAT0U ... SPRN_IBAT3L:
  574. bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
  575. break;
  576. case SPRN_IBAT4U ... SPRN_IBAT7L:
  577. bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
  578. break;
  579. case SPRN_DBAT0U ... SPRN_DBAT3L:
  580. bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
  581. break;
  582. case SPRN_DBAT4U ... SPRN_DBAT7L:
  583. bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
  584. break;
  585. default:
  586. BUG();
  587. }
  588. return bat;
  589. }
  590. int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
  591. {
  592. int emulated = EMULATE_DONE;
  593. switch (sprn) {
  594. case SPRN_SDR1:
  595. if (!spr_allowed(vcpu, PRIV_HYPER))
  596. goto unprivileged;
  597. to_book3s(vcpu)->sdr1 = spr_val;
  598. break;
  599. case SPRN_DSISR:
  600. kvmppc_set_dsisr(vcpu, spr_val);
  601. break;
  602. case SPRN_DAR:
  603. kvmppc_set_dar(vcpu, spr_val);
  604. break;
  605. case SPRN_HIOR:
  606. to_book3s(vcpu)->hior = spr_val;
  607. break;
  608. case SPRN_IBAT0U ... SPRN_IBAT3L:
  609. case SPRN_IBAT4U ... SPRN_IBAT7L:
  610. case SPRN_DBAT0U ... SPRN_DBAT3L:
  611. case SPRN_DBAT4U ... SPRN_DBAT7L:
  612. {
  613. struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
  614. kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val);
  615. /* BAT writes happen so rarely that we're ok to flush
  616. * everything here */
  617. kvmppc_mmu_pte_flush(vcpu, 0, 0);
  618. kvmppc_mmu_flush_segments(vcpu);
  619. break;
  620. }
  621. case SPRN_HID0:
  622. to_book3s(vcpu)->hid[0] = spr_val;
  623. break;
  624. case SPRN_HID1:
  625. to_book3s(vcpu)->hid[1] = spr_val;
  626. break;
  627. case SPRN_HID2:
  628. to_book3s(vcpu)->hid[2] = spr_val;
  629. break;
  630. case SPRN_HID2_GEKKO:
  631. to_book3s(vcpu)->hid[2] = spr_val;
  632. /* HID2.PSE controls paired single on gekko */
  633. switch (vcpu->arch.pvr) {
  634. case 0x00080200: /* lonestar 2.0 */
  635. case 0x00088202: /* lonestar 2.2 */
  636. case 0x70000100: /* gekko 1.0 */
  637. case 0x00080100: /* gekko 2.0 */
  638. case 0x00083203: /* gekko 2.3a */
  639. case 0x00083213: /* gekko 2.3b */
  640. case 0x00083204: /* gekko 2.4 */
  641. case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
  642. case 0x00087200: /* broadway */
  643. if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) {
  644. /* Native paired singles */
  645. } else if (spr_val & (1 << 29)) { /* HID2.PSE */
  646. vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE;
  647. kvmppc_giveup_ext(vcpu, MSR_FP);
  648. } else {
  649. vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE;
  650. }
  651. break;
  652. }
  653. break;
  654. case SPRN_HID4:
  655. case SPRN_HID4_GEKKO:
  656. to_book3s(vcpu)->hid[4] = spr_val;
  657. break;
  658. case SPRN_HID5:
  659. to_book3s(vcpu)->hid[5] = spr_val;
  660. /* guest HID5 set can change is_dcbz32 */
  661. if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
  662. (mfmsr() & MSR_HV))
  663. vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
  664. break;
  665. case SPRN_GQR0:
  666. case SPRN_GQR1:
  667. case SPRN_GQR2:
  668. case SPRN_GQR3:
  669. case SPRN_GQR4:
  670. case SPRN_GQR5:
  671. case SPRN_GQR6:
  672. case SPRN_GQR7:
  673. to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
  674. break;
  675. #ifdef CONFIG_PPC_BOOK3S_64
  676. case SPRN_FSCR:
  677. kvmppc_set_fscr(vcpu, spr_val);
  678. break;
  679. case SPRN_BESCR:
  680. vcpu->arch.bescr = spr_val;
  681. break;
  682. case SPRN_EBBHR:
  683. vcpu->arch.ebbhr = spr_val;
  684. break;
  685. case SPRN_EBBRR:
  686. vcpu->arch.ebbrr = spr_val;
  687. break;
  688. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  689. case SPRN_TFHAR:
  690. case SPRN_TEXASR:
  691. case SPRN_TFIAR:
  692. if (!cpu_has_feature(CPU_FTR_TM))
  693. break;
  694. if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
  695. kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
  696. emulated = EMULATE_AGAIN;
  697. break;
  698. }
  699. if (MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)) &&
  700. !((MSR_TM_SUSPENDED(kvmppc_get_msr(vcpu))) &&
  701. (sprn == SPRN_TFHAR))) {
  702. /* it is illegal to mtspr() TM regs in
  703. * other than non-transactional state, with
  704. * the exception of TFHAR in suspend state.
  705. */
  706. kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
  707. emulated = EMULATE_AGAIN;
  708. break;
  709. }
  710. tm_enable();
  711. if (sprn == SPRN_TFHAR)
  712. mtspr(SPRN_TFHAR, spr_val);
  713. else if (sprn == SPRN_TEXASR)
  714. mtspr(SPRN_TEXASR, spr_val);
  715. else
  716. mtspr(SPRN_TFIAR, spr_val);
  717. tm_disable();
  718. break;
  719. #endif
  720. #endif
  721. case SPRN_ICTC:
  722. case SPRN_THRM1:
  723. case SPRN_THRM2:
  724. case SPRN_THRM3:
  725. case SPRN_CTRLF:
  726. case SPRN_CTRLT:
  727. case SPRN_L2CR:
  728. case SPRN_DSCR:
  729. case SPRN_MMCR0_GEKKO:
  730. case SPRN_MMCR1_GEKKO:
  731. case SPRN_PMC1_GEKKO:
  732. case SPRN_PMC2_GEKKO:
  733. case SPRN_PMC3_GEKKO:
  734. case SPRN_PMC4_GEKKO:
  735. case SPRN_WPAR_GEKKO:
  736. case SPRN_MSSSR0:
  737. case SPRN_DABR:
  738. #ifdef CONFIG_PPC_BOOK3S_64
  739. case SPRN_MMCRS:
  740. case SPRN_MMCRA:
  741. case SPRN_MMCR0:
  742. case SPRN_MMCR1:
  743. case SPRN_MMCR2:
  744. case SPRN_UMMCR2:
  745. #endif
  746. break;
  747. unprivileged:
  748. default:
  749. pr_info_ratelimited("KVM: invalid SPR write: %d\n", sprn);
  750. if (sprn & 0x10) {
  751. if (kvmppc_get_msr(vcpu) & MSR_PR) {
  752. kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
  753. emulated = EMULATE_AGAIN;
  754. }
  755. } else {
  756. if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0) {
  757. kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
  758. emulated = EMULATE_AGAIN;
  759. }
  760. }
  761. break;
  762. }
  763. return emulated;
  764. }
  765. int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
  766. {
  767. int emulated = EMULATE_DONE;
  768. switch (sprn) {
  769. case SPRN_IBAT0U ... SPRN_IBAT3L:
  770. case SPRN_IBAT4U ... SPRN_IBAT7L:
  771. case SPRN_DBAT0U ... SPRN_DBAT3L:
  772. case SPRN_DBAT4U ... SPRN_DBAT7L:
  773. {
  774. struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
  775. if (sprn % 2)
  776. *spr_val = bat->raw >> 32;
  777. else
  778. *spr_val = bat->raw;
  779. break;
  780. }
  781. case SPRN_SDR1:
  782. if (!spr_allowed(vcpu, PRIV_HYPER))
  783. goto unprivileged;
  784. *spr_val = to_book3s(vcpu)->sdr1;
  785. break;
  786. case SPRN_DSISR:
  787. *spr_val = kvmppc_get_dsisr(vcpu);
  788. break;
  789. case SPRN_DAR:
  790. *spr_val = kvmppc_get_dar(vcpu);
  791. break;
  792. case SPRN_HIOR:
  793. *spr_val = to_book3s(vcpu)->hior;
  794. break;
  795. case SPRN_HID0:
  796. *spr_val = to_book3s(vcpu)->hid[0];
  797. break;
  798. case SPRN_HID1:
  799. *spr_val = to_book3s(vcpu)->hid[1];
  800. break;
  801. case SPRN_HID2:
  802. case SPRN_HID2_GEKKO:
  803. *spr_val = to_book3s(vcpu)->hid[2];
  804. break;
  805. case SPRN_HID4:
  806. case SPRN_HID4_GEKKO:
  807. *spr_val = to_book3s(vcpu)->hid[4];
  808. break;
  809. case SPRN_HID5:
  810. *spr_val = to_book3s(vcpu)->hid[5];
  811. break;
  812. case SPRN_CFAR:
  813. case SPRN_DSCR:
  814. *spr_val = 0;
  815. break;
  816. case SPRN_PURR:
  817. /*
  818. * On exit we would have updated purr
  819. */
  820. *spr_val = vcpu->arch.purr;
  821. break;
  822. case SPRN_SPURR:
  823. /*
  824. * On exit we would have updated spurr
  825. */
  826. *spr_val = vcpu->arch.spurr;
  827. break;
  828. case SPRN_VTB:
  829. *spr_val = to_book3s(vcpu)->vtb;
  830. break;
  831. case SPRN_IC:
  832. *spr_val = vcpu->arch.ic;
  833. break;
  834. case SPRN_GQR0:
  835. case SPRN_GQR1:
  836. case SPRN_GQR2:
  837. case SPRN_GQR3:
  838. case SPRN_GQR4:
  839. case SPRN_GQR5:
  840. case SPRN_GQR6:
  841. case SPRN_GQR7:
  842. *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
  843. break;
  844. #ifdef CONFIG_PPC_BOOK3S_64
  845. case SPRN_FSCR:
  846. *spr_val = vcpu->arch.fscr;
  847. break;
  848. case SPRN_BESCR:
  849. *spr_val = vcpu->arch.bescr;
  850. break;
  851. case SPRN_EBBHR:
  852. *spr_val = vcpu->arch.ebbhr;
  853. break;
  854. case SPRN_EBBRR:
  855. *spr_val = vcpu->arch.ebbrr;
  856. break;
  857. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  858. case SPRN_TFHAR:
  859. case SPRN_TEXASR:
  860. case SPRN_TFIAR:
  861. if (!cpu_has_feature(CPU_FTR_TM))
  862. break;
  863. if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
  864. kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
  865. emulated = EMULATE_AGAIN;
  866. break;
  867. }
  868. tm_enable();
  869. if (sprn == SPRN_TFHAR)
  870. *spr_val = mfspr(SPRN_TFHAR);
  871. else if (sprn == SPRN_TEXASR)
  872. *spr_val = mfspr(SPRN_TEXASR);
  873. else if (sprn == SPRN_TFIAR)
  874. *spr_val = mfspr(SPRN_TFIAR);
  875. tm_disable();
  876. break;
  877. #endif
  878. #endif
  879. case SPRN_THRM1:
  880. case SPRN_THRM2:
  881. case SPRN_THRM3:
  882. case SPRN_CTRLF:
  883. case SPRN_CTRLT:
  884. case SPRN_L2CR:
  885. case SPRN_MMCR0_GEKKO:
  886. case SPRN_MMCR1_GEKKO:
  887. case SPRN_PMC1_GEKKO:
  888. case SPRN_PMC2_GEKKO:
  889. case SPRN_PMC3_GEKKO:
  890. case SPRN_PMC4_GEKKO:
  891. case SPRN_WPAR_GEKKO:
  892. case SPRN_MSSSR0:
  893. case SPRN_DABR:
  894. #ifdef CONFIG_PPC_BOOK3S_64
  895. case SPRN_MMCRS:
  896. case SPRN_MMCRA:
  897. case SPRN_MMCR0:
  898. case SPRN_MMCR1:
  899. case SPRN_MMCR2:
  900. case SPRN_UMMCR2:
  901. case SPRN_TIR:
  902. #endif
  903. *spr_val = 0;
  904. break;
  905. default:
  906. unprivileged:
  907. pr_info_ratelimited("KVM: invalid SPR read: %d\n", sprn);
  908. if (sprn & 0x10) {
  909. if (kvmppc_get_msr(vcpu) & MSR_PR) {
  910. kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
  911. emulated = EMULATE_AGAIN;
  912. }
  913. } else {
  914. if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0 ||
  915. sprn == 4 || sprn == 5 || sprn == 6) {
  916. kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
  917. emulated = EMULATE_AGAIN;
  918. }
  919. }
  920. break;
  921. }
  922. return emulated;
  923. }
  924. u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
  925. {
  926. return make_dsisr(inst);
  927. }
  928. ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
  929. {
  930. #ifdef CONFIG_PPC_BOOK3S_64
  931. /*
  932. * Linux's fix_alignment() assumes that DAR is valid, so can we
  933. */
  934. return vcpu->arch.fault_dar;
  935. #else
  936. ulong dar = 0;
  937. ulong ra = get_ra(inst);
  938. ulong rb = get_rb(inst);
  939. switch (get_op(inst)) {
  940. case OP_LFS:
  941. case OP_LFD:
  942. case OP_STFD:
  943. case OP_STFS:
  944. if (ra)
  945. dar = kvmppc_get_gpr(vcpu, ra);
  946. dar += (s32)((s16)inst);
  947. break;
  948. case 31:
  949. if (ra)
  950. dar = kvmppc_get_gpr(vcpu, ra);
  951. dar += kvmppc_get_gpr(vcpu, rb);
  952. break;
  953. default:
  954. printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
  955. break;
  956. }
  957. return dar;
  958. #endif
  959. }