emulate_loadstore.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright IBM Corp. 2007
  16. * Copyright 2011 Freescale Semiconductor, Inc.
  17. *
  18. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  19. */
  20. #include <linux/jiffies.h>
  21. #include <linux/hrtimer.h>
  22. #include <linux/types.h>
  23. #include <linux/string.h>
  24. #include <linux/kvm_host.h>
  25. #include <linux/clockchips.h>
  26. #include <asm/reg.h>
  27. #include <asm/time.h>
  28. #include <asm/byteorder.h>
  29. #include <asm/kvm_ppc.h>
  30. #include <asm/disassemble.h>
  31. #include <asm/ppc-opcode.h>
  32. #include "timing.h"
  33. #include "trace.h"
  34. #ifdef CONFIG_PPC_FPU
  35. static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
  36. {
  37. if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
  38. kvmppc_core_queue_fpunavail(vcpu);
  39. return true;
  40. }
  41. return false;
  42. }
  43. #endif /* CONFIG_PPC_FPU */
  44. #ifdef CONFIG_VSX
  45. static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
  46. {
  47. if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
  48. kvmppc_core_queue_vsx_unavail(vcpu);
  49. return true;
  50. }
  51. return false;
  52. }
  53. #endif /* CONFIG_VSX */
  54. /*
  55. * XXX to do:
  56. * lfiwax, lfiwzx
  57. * vector loads and stores
  58. *
  59. * Instructions that trap when used on cache-inhibited mappings
  60. * are not emulated here: multiple and string instructions,
  61. * lq/stq, and the load-reserve/store-conditional instructions.
  62. */
  63. int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
  64. {
  65. struct kvm_run *run = vcpu->run;
  66. u32 inst;
  67. int ra, rs, rt;
  68. enum emulation_result emulated;
  69. int advance = 1;
  70. /* this default type might be overwritten by subcategories */
  71. kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
  72. emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
  73. if (emulated != EMULATE_DONE)
  74. return emulated;
  75. ra = get_ra(inst);
  76. rs = get_rs(inst);
  77. rt = get_rt(inst);
  78. /*
  79. * if mmio_vsx_tx_sx_enabled == 0, copy data between
  80. * VSR[0..31] and memory
  81. * if mmio_vsx_tx_sx_enabled == 1, copy data between
  82. * VSR[32..63] and memory
  83. */
  84. vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst);
  85. vcpu->arch.mmio_vsx_copy_nums = 0;
  86. vcpu->arch.mmio_vsx_offset = 0;
  87. vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE;
  88. vcpu->arch.mmio_sp64_extend = 0;
  89. vcpu->arch.mmio_sign_extend = 0;
  90. switch (get_op(inst)) {
  91. case 31:
  92. switch (get_xop(inst)) {
  93. case OP_31_XOP_LWZX:
  94. emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
  95. break;
  96. case OP_31_XOP_LWZUX:
  97. emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
  98. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  99. break;
  100. case OP_31_XOP_LBZX:
  101. emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
  102. break;
  103. case OP_31_XOP_LBZUX:
  104. emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
  105. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  106. break;
  107. case OP_31_XOP_STDX:
  108. emulated = kvmppc_handle_store(run, vcpu,
  109. kvmppc_get_gpr(vcpu, rs), 8, 1);
  110. break;
  111. case OP_31_XOP_STDUX:
  112. emulated = kvmppc_handle_store(run, vcpu,
  113. kvmppc_get_gpr(vcpu, rs), 8, 1);
  114. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  115. break;
  116. case OP_31_XOP_STWX:
  117. emulated = kvmppc_handle_store(run, vcpu,
  118. kvmppc_get_gpr(vcpu, rs), 4, 1);
  119. break;
  120. case OP_31_XOP_STWUX:
  121. emulated = kvmppc_handle_store(run, vcpu,
  122. kvmppc_get_gpr(vcpu, rs), 4, 1);
  123. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  124. break;
  125. case OP_31_XOP_STBX:
  126. emulated = kvmppc_handle_store(run, vcpu,
  127. kvmppc_get_gpr(vcpu, rs), 1, 1);
  128. break;
  129. case OP_31_XOP_STBUX:
  130. emulated = kvmppc_handle_store(run, vcpu,
  131. kvmppc_get_gpr(vcpu, rs), 1, 1);
  132. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  133. break;
  134. case OP_31_XOP_LHAX:
  135. emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
  136. break;
  137. case OP_31_XOP_LHAUX:
  138. emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
  139. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  140. break;
  141. case OP_31_XOP_LHZX:
  142. emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
  143. break;
  144. case OP_31_XOP_LHZUX:
  145. emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
  146. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  147. break;
  148. case OP_31_XOP_STHX:
  149. emulated = kvmppc_handle_store(run, vcpu,
  150. kvmppc_get_gpr(vcpu, rs), 2, 1);
  151. break;
  152. case OP_31_XOP_STHUX:
  153. emulated = kvmppc_handle_store(run, vcpu,
  154. kvmppc_get_gpr(vcpu, rs), 2, 1);
  155. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  156. break;
  157. case OP_31_XOP_DCBST:
  158. case OP_31_XOP_DCBF:
  159. case OP_31_XOP_DCBI:
  160. /* Do nothing. The guest is performing dcbi because
  161. * hardware DMA is not snooped by the dcache, but
  162. * emulated DMA either goes through the dcache as
  163. * normal writes, or the host kernel has handled dcache
  164. * coherence. */
  165. break;
  166. case OP_31_XOP_LWBRX:
  167. emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
  168. break;
  169. case OP_31_XOP_STWBRX:
  170. emulated = kvmppc_handle_store(run, vcpu,
  171. kvmppc_get_gpr(vcpu, rs), 4, 0);
  172. break;
  173. case OP_31_XOP_LHBRX:
  174. emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
  175. break;
  176. case OP_31_XOP_STHBRX:
  177. emulated = kvmppc_handle_store(run, vcpu,
  178. kvmppc_get_gpr(vcpu, rs), 2, 0);
  179. break;
  180. case OP_31_XOP_LDBRX:
  181. emulated = kvmppc_handle_load(run, vcpu, rt, 8, 0);
  182. break;
  183. case OP_31_XOP_STDBRX:
  184. emulated = kvmppc_handle_store(run, vcpu,
  185. kvmppc_get_gpr(vcpu, rs), 8, 0);
  186. break;
  187. case OP_31_XOP_LDX:
  188. emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
  189. break;
  190. case OP_31_XOP_LDUX:
  191. emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
  192. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  193. break;
  194. case OP_31_XOP_LWAX:
  195. emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1);
  196. break;
  197. case OP_31_XOP_LWAUX:
  198. emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1);
  199. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  200. break;
  201. #ifdef CONFIG_PPC_FPU
  202. case OP_31_XOP_LFSX:
  203. if (kvmppc_check_fp_disabled(vcpu))
  204. return EMULATE_DONE;
  205. vcpu->arch.mmio_sp64_extend = 1;
  206. emulated = kvmppc_handle_load(run, vcpu,
  207. KVM_MMIO_REG_FPR|rt, 4, 1);
  208. break;
  209. case OP_31_XOP_LFSUX:
  210. if (kvmppc_check_fp_disabled(vcpu))
  211. return EMULATE_DONE;
  212. vcpu->arch.mmio_sp64_extend = 1;
  213. emulated = kvmppc_handle_load(run, vcpu,
  214. KVM_MMIO_REG_FPR|rt, 4, 1);
  215. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  216. break;
  217. case OP_31_XOP_LFDX:
  218. if (kvmppc_check_fp_disabled(vcpu))
  219. return EMULATE_DONE;
  220. emulated = kvmppc_handle_load(run, vcpu,
  221. KVM_MMIO_REG_FPR|rt, 8, 1);
  222. break;
  223. case OP_31_XOP_LFDUX:
  224. if (kvmppc_check_fp_disabled(vcpu))
  225. return EMULATE_DONE;
  226. emulated = kvmppc_handle_load(run, vcpu,
  227. KVM_MMIO_REG_FPR|rt, 8, 1);
  228. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  229. break;
  230. case OP_31_XOP_LFIWAX:
  231. if (kvmppc_check_fp_disabled(vcpu))
  232. return EMULATE_DONE;
  233. emulated = kvmppc_handle_loads(run, vcpu,
  234. KVM_MMIO_REG_FPR|rt, 4, 1);
  235. break;
  236. case OP_31_XOP_LFIWZX:
  237. if (kvmppc_check_fp_disabled(vcpu))
  238. return EMULATE_DONE;
  239. emulated = kvmppc_handle_load(run, vcpu,
  240. KVM_MMIO_REG_FPR|rt, 4, 1);
  241. break;
  242. case OP_31_XOP_STFSX:
  243. if (kvmppc_check_fp_disabled(vcpu))
  244. return EMULATE_DONE;
  245. vcpu->arch.mmio_sp64_extend = 1;
  246. emulated = kvmppc_handle_store(run, vcpu,
  247. VCPU_FPR(vcpu, rs), 4, 1);
  248. break;
  249. case OP_31_XOP_STFSUX:
  250. if (kvmppc_check_fp_disabled(vcpu))
  251. return EMULATE_DONE;
  252. vcpu->arch.mmio_sp64_extend = 1;
  253. emulated = kvmppc_handle_store(run, vcpu,
  254. VCPU_FPR(vcpu, rs), 4, 1);
  255. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  256. break;
  257. case OP_31_XOP_STFDX:
  258. if (kvmppc_check_fp_disabled(vcpu))
  259. return EMULATE_DONE;
  260. emulated = kvmppc_handle_store(run, vcpu,
  261. VCPU_FPR(vcpu, rs), 8, 1);
  262. break;
  263. case OP_31_XOP_STFDUX:
  264. if (kvmppc_check_fp_disabled(vcpu))
  265. return EMULATE_DONE;
  266. emulated = kvmppc_handle_store(run, vcpu,
  267. VCPU_FPR(vcpu, rs), 8, 1);
  268. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  269. break;
  270. case OP_31_XOP_STFIWX:
  271. if (kvmppc_check_fp_disabled(vcpu))
  272. return EMULATE_DONE;
  273. emulated = kvmppc_handle_store(run, vcpu,
  274. VCPU_FPR(vcpu, rs), 4, 1);
  275. break;
  276. #endif
  277. #ifdef CONFIG_VSX
  278. case OP_31_XOP_LXSDX:
  279. if (kvmppc_check_vsx_disabled(vcpu))
  280. return EMULATE_DONE;
  281. vcpu->arch.mmio_vsx_copy_nums = 1;
  282. vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
  283. emulated = kvmppc_handle_vsx_load(run, vcpu,
  284. KVM_MMIO_REG_VSX|rt, 8, 1, 0);
  285. break;
  286. case OP_31_XOP_LXSSPX:
  287. if (kvmppc_check_vsx_disabled(vcpu))
  288. return EMULATE_DONE;
  289. vcpu->arch.mmio_vsx_copy_nums = 1;
  290. vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
  291. vcpu->arch.mmio_sp64_extend = 1;
  292. emulated = kvmppc_handle_vsx_load(run, vcpu,
  293. KVM_MMIO_REG_VSX|rt, 4, 1, 0);
  294. break;
  295. case OP_31_XOP_LXSIWAX:
  296. if (kvmppc_check_vsx_disabled(vcpu))
  297. return EMULATE_DONE;
  298. vcpu->arch.mmio_vsx_copy_nums = 1;
  299. vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
  300. emulated = kvmppc_handle_vsx_load(run, vcpu,
  301. KVM_MMIO_REG_VSX|rt, 4, 1, 1);
  302. break;
  303. case OP_31_XOP_LXSIWZX:
  304. if (kvmppc_check_vsx_disabled(vcpu))
  305. return EMULATE_DONE;
  306. vcpu->arch.mmio_vsx_copy_nums = 1;
  307. vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
  308. emulated = kvmppc_handle_vsx_load(run, vcpu,
  309. KVM_MMIO_REG_VSX|rt, 4, 1, 0);
  310. break;
  311. case OP_31_XOP_LXVD2X:
  312. /*
  313. * In this case, the official load/store process is like this:
  314. * Step1, exit from vm by page fault isr, then kvm save vsr.
  315. * Please see guest_exit_cont->store_fp_state->SAVE_32VSRS
  316. * as reference.
  317. *
  318. * Step2, copy data between memory and VCPU
  319. * Notice: for LXVD2X/STXVD2X/LXVW4X/STXVW4X, we use
  320. * 2copies*8bytes or 4copies*4bytes
  321. * to simulate one copy of 16bytes.
  322. * Also there is an endian issue here, we should notice the
  323. * layout of memory.
  324. * Please see MARCO of LXVD2X_ROT/STXVD2X_ROT as more reference.
  325. * If host is little-endian, kvm will call XXSWAPD for
  326. * LXVD2X_ROT/STXVD2X_ROT.
  327. * So, if host is little-endian,
  328. * the postion of memeory should be swapped.
  329. *
  330. * Step3, return to guest, kvm reset register.
  331. * Please see kvmppc_hv_entry->load_fp_state->REST_32VSRS
  332. * as reference.
  333. */
  334. if (kvmppc_check_vsx_disabled(vcpu))
  335. return EMULATE_DONE;
  336. vcpu->arch.mmio_vsx_copy_nums = 2;
  337. vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
  338. emulated = kvmppc_handle_vsx_load(run, vcpu,
  339. KVM_MMIO_REG_VSX|rt, 8, 1, 0);
  340. break;
  341. case OP_31_XOP_LXVW4X:
  342. if (kvmppc_check_vsx_disabled(vcpu))
  343. return EMULATE_DONE;
  344. vcpu->arch.mmio_vsx_copy_nums = 4;
  345. vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
  346. emulated = kvmppc_handle_vsx_load(run, vcpu,
  347. KVM_MMIO_REG_VSX|rt, 4, 1, 0);
  348. break;
  349. case OP_31_XOP_LXVDSX:
  350. if (kvmppc_check_vsx_disabled(vcpu))
  351. return EMULATE_DONE;
  352. vcpu->arch.mmio_vsx_copy_nums = 1;
  353. vcpu->arch.mmio_vsx_copy_type =
  354. KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
  355. emulated = kvmppc_handle_vsx_load(run, vcpu,
  356. KVM_MMIO_REG_VSX|rt, 8, 1, 0);
  357. break;
  358. case OP_31_XOP_STXSDX:
  359. if (kvmppc_check_vsx_disabled(vcpu))
  360. return EMULATE_DONE;
  361. vcpu->arch.mmio_vsx_copy_nums = 1;
  362. vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
  363. emulated = kvmppc_handle_vsx_store(run, vcpu,
  364. rs, 8, 1);
  365. break;
  366. case OP_31_XOP_STXSSPX:
  367. if (kvmppc_check_vsx_disabled(vcpu))
  368. return EMULATE_DONE;
  369. vcpu->arch.mmio_vsx_copy_nums = 1;
  370. vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
  371. vcpu->arch.mmio_sp64_extend = 1;
  372. emulated = kvmppc_handle_vsx_store(run, vcpu,
  373. rs, 4, 1);
  374. break;
  375. case OP_31_XOP_STXSIWX:
  376. if (kvmppc_check_vsx_disabled(vcpu))
  377. return EMULATE_DONE;
  378. vcpu->arch.mmio_vsx_offset = 1;
  379. vcpu->arch.mmio_vsx_copy_nums = 1;
  380. vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
  381. emulated = kvmppc_handle_vsx_store(run, vcpu,
  382. rs, 4, 1);
  383. break;
  384. case OP_31_XOP_STXVD2X:
  385. if (kvmppc_check_vsx_disabled(vcpu))
  386. return EMULATE_DONE;
  387. vcpu->arch.mmio_vsx_copy_nums = 2;
  388. vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
  389. emulated = kvmppc_handle_vsx_store(run, vcpu,
  390. rs, 8, 1);
  391. break;
  392. case OP_31_XOP_STXVW4X:
  393. if (kvmppc_check_vsx_disabled(vcpu))
  394. return EMULATE_DONE;
  395. vcpu->arch.mmio_vsx_copy_nums = 4;
  396. vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
  397. emulated = kvmppc_handle_vsx_store(run, vcpu,
  398. rs, 4, 1);
  399. break;
  400. #endif /* CONFIG_VSX */
  401. default:
  402. emulated = EMULATE_FAIL;
  403. break;
  404. }
  405. break;
  406. case OP_LWZ:
  407. emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
  408. break;
  409. #ifdef CONFIG_PPC_FPU
  410. case OP_STFS:
  411. if (kvmppc_check_fp_disabled(vcpu))
  412. return EMULATE_DONE;
  413. vcpu->arch.mmio_sp64_extend = 1;
  414. emulated = kvmppc_handle_store(run, vcpu,
  415. VCPU_FPR(vcpu, rs),
  416. 4, 1);
  417. break;
  418. case OP_STFSU:
  419. if (kvmppc_check_fp_disabled(vcpu))
  420. return EMULATE_DONE;
  421. vcpu->arch.mmio_sp64_extend = 1;
  422. emulated = kvmppc_handle_store(run, vcpu,
  423. VCPU_FPR(vcpu, rs),
  424. 4, 1);
  425. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  426. break;
  427. case OP_STFD:
  428. if (kvmppc_check_fp_disabled(vcpu))
  429. return EMULATE_DONE;
  430. emulated = kvmppc_handle_store(run, vcpu,
  431. VCPU_FPR(vcpu, rs),
  432. 8, 1);
  433. break;
  434. case OP_STFDU:
  435. if (kvmppc_check_fp_disabled(vcpu))
  436. return EMULATE_DONE;
  437. emulated = kvmppc_handle_store(run, vcpu,
  438. VCPU_FPR(vcpu, rs),
  439. 8, 1);
  440. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  441. break;
  442. #endif
  443. case OP_LD:
  444. rt = get_rt(inst);
  445. switch (inst & 3) {
  446. case 0: /* ld */
  447. emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
  448. break;
  449. case 1: /* ldu */
  450. emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
  451. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  452. break;
  453. case 2: /* lwa */
  454. emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1);
  455. break;
  456. default:
  457. emulated = EMULATE_FAIL;
  458. }
  459. break;
  460. case OP_LWZU:
  461. emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
  462. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  463. break;
  464. case OP_LBZ:
  465. emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
  466. break;
  467. case OP_LBZU:
  468. emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
  469. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  470. break;
  471. case OP_STW:
  472. emulated = kvmppc_handle_store(run, vcpu,
  473. kvmppc_get_gpr(vcpu, rs),
  474. 4, 1);
  475. break;
  476. case OP_STD:
  477. rs = get_rs(inst);
  478. switch (inst & 3) {
  479. case 0: /* std */
  480. emulated = kvmppc_handle_store(run, vcpu,
  481. kvmppc_get_gpr(vcpu, rs), 8, 1);
  482. break;
  483. case 1: /* stdu */
  484. emulated = kvmppc_handle_store(run, vcpu,
  485. kvmppc_get_gpr(vcpu, rs), 8, 1);
  486. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  487. break;
  488. default:
  489. emulated = EMULATE_FAIL;
  490. }
  491. break;
  492. case OP_STWU:
  493. emulated = kvmppc_handle_store(run, vcpu,
  494. kvmppc_get_gpr(vcpu, rs), 4, 1);
  495. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  496. break;
  497. case OP_STB:
  498. emulated = kvmppc_handle_store(run, vcpu,
  499. kvmppc_get_gpr(vcpu, rs), 1, 1);
  500. break;
  501. case OP_STBU:
  502. emulated = kvmppc_handle_store(run, vcpu,
  503. kvmppc_get_gpr(vcpu, rs), 1, 1);
  504. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  505. break;
  506. case OP_LHZ:
  507. emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
  508. break;
  509. case OP_LHZU:
  510. emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
  511. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  512. break;
  513. case OP_LHA:
  514. emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
  515. break;
  516. case OP_LHAU:
  517. emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
  518. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  519. break;
  520. case OP_STH:
  521. emulated = kvmppc_handle_store(run, vcpu,
  522. kvmppc_get_gpr(vcpu, rs), 2, 1);
  523. break;
  524. case OP_STHU:
  525. emulated = kvmppc_handle_store(run, vcpu,
  526. kvmppc_get_gpr(vcpu, rs), 2, 1);
  527. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  528. break;
  529. #ifdef CONFIG_PPC_FPU
  530. case OP_LFS:
  531. if (kvmppc_check_fp_disabled(vcpu))
  532. return EMULATE_DONE;
  533. vcpu->arch.mmio_sp64_extend = 1;
  534. emulated = kvmppc_handle_load(run, vcpu,
  535. KVM_MMIO_REG_FPR|rt, 4, 1);
  536. break;
  537. case OP_LFSU:
  538. if (kvmppc_check_fp_disabled(vcpu))
  539. return EMULATE_DONE;
  540. vcpu->arch.mmio_sp64_extend = 1;
  541. emulated = kvmppc_handle_load(run, vcpu,
  542. KVM_MMIO_REG_FPR|rt, 4, 1);
  543. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  544. break;
  545. case OP_LFD:
  546. if (kvmppc_check_fp_disabled(vcpu))
  547. return EMULATE_DONE;
  548. emulated = kvmppc_handle_load(run, vcpu,
  549. KVM_MMIO_REG_FPR|rt, 8, 1);
  550. break;
  551. case OP_LFDU:
  552. if (kvmppc_check_fp_disabled(vcpu))
  553. return EMULATE_DONE;
  554. emulated = kvmppc_handle_load(run, vcpu,
  555. KVM_MMIO_REG_FPR|rt, 8, 1);
  556. kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
  557. break;
  558. #endif
  559. default:
  560. emulated = EMULATE_FAIL;
  561. break;
  562. }
  563. if (emulated == EMULATE_FAIL) {
  564. advance = 0;
  565. kvmppc_core_queue_program(vcpu, 0);
  566. }
  567. trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
  568. /* Advance past emulated instruction. */
  569. if (advance)
  570. kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
  571. return emulated;
  572. }