priv.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018
  1. /*
  2. * handling privileged instructions
  3. *
  4. * Copyright IBM Corp. 2008, 2013
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Carsten Otte <cotte@de.ibm.com>
  11. * Christian Borntraeger <borntraeger@de.ibm.com>
  12. */
  13. #include <linux/kvm.h>
  14. #include <linux/gfp.h>
  15. #include <linux/errno.h>
  16. #include <linux/compat.h>
  17. #include <asm/asm-offsets.h>
  18. #include <asm/facility.h>
  19. #include <asm/current.h>
  20. #include <asm/debug.h>
  21. #include <asm/ebcdic.h>
  22. #include <asm/sysinfo.h>
  23. #include <asm/pgtable.h>
  24. #include <asm/pgalloc.h>
  25. #include <asm/io.h>
  26. #include <asm/ptrace.h>
  27. #include <asm/compat.h>
  28. #include "gaccess.h"
  29. #include "kvm-s390.h"
  30. #include "trace.h"
  31. /* Handle SCK (SET CLOCK) interception */
  32. static int handle_set_clock(struct kvm_vcpu *vcpu)
  33. {
  34. struct kvm_vcpu *cpup;
  35. s64 hostclk, val;
  36. int i, rc;
  37. u64 op2;
  38. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  39. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  40. op2 = kvm_s390_get_base_disp_s(vcpu);
  41. if (op2 & 7) /* Operand must be on a doubleword boundary */
  42. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  43. rc = read_guest(vcpu, op2, &val, sizeof(val));
  44. if (rc)
  45. return kvm_s390_inject_prog_cond(vcpu, rc);
  46. if (store_tod_clock(&hostclk)) {
  47. kvm_s390_set_psw_cc(vcpu, 3);
  48. return 0;
  49. }
  50. val = (val - hostclk) & ~0x3fUL;
  51. mutex_lock(&vcpu->kvm->lock);
  52. kvm_for_each_vcpu(i, cpup, vcpu->kvm)
  53. cpup->arch.sie_block->epoch = val;
  54. mutex_unlock(&vcpu->kvm->lock);
  55. kvm_s390_set_psw_cc(vcpu, 0);
  56. return 0;
  57. }
  58. static int handle_set_prefix(struct kvm_vcpu *vcpu)
  59. {
  60. u64 operand2;
  61. u32 address;
  62. int rc;
  63. vcpu->stat.instruction_spx++;
  64. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  65. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  66. operand2 = kvm_s390_get_base_disp_s(vcpu);
  67. /* must be word boundary */
  68. if (operand2 & 3)
  69. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  70. /* get the value */
  71. rc = read_guest(vcpu, operand2, &address, sizeof(address));
  72. if (rc)
  73. return kvm_s390_inject_prog_cond(vcpu, rc);
  74. address &= 0x7fffe000u;
  75. /*
  76. * Make sure the new value is valid memory. We only need to check the
  77. * first page, since address is 8k aligned and memory pieces are always
  78. * at least 1MB aligned and have at least a size of 1MB.
  79. */
  80. if (kvm_is_error_gpa(vcpu->kvm, address))
  81. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  82. kvm_s390_set_prefix(vcpu, address);
  83. VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
  84. trace_kvm_s390_handle_prefix(vcpu, 1, address);
  85. return 0;
  86. }
  87. static int handle_store_prefix(struct kvm_vcpu *vcpu)
  88. {
  89. u64 operand2;
  90. u32 address;
  91. int rc;
  92. vcpu->stat.instruction_stpx++;
  93. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  94. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  95. operand2 = kvm_s390_get_base_disp_s(vcpu);
  96. /* must be word boundary */
  97. if (operand2 & 3)
  98. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  99. address = kvm_s390_get_prefix(vcpu);
  100. /* get the value */
  101. rc = write_guest(vcpu, operand2, &address, sizeof(address));
  102. if (rc)
  103. return kvm_s390_inject_prog_cond(vcpu, rc);
  104. VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
  105. trace_kvm_s390_handle_prefix(vcpu, 0, address);
  106. return 0;
  107. }
  108. static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
  109. {
  110. u16 vcpu_id = vcpu->vcpu_id;
  111. u64 ga;
  112. int rc;
  113. vcpu->stat.instruction_stap++;
  114. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  115. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  116. ga = kvm_s390_get_base_disp_s(vcpu);
  117. if (ga & 1)
  118. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  119. rc = write_guest(vcpu, ga, &vcpu_id, sizeof(vcpu_id));
  120. if (rc)
  121. return kvm_s390_inject_prog_cond(vcpu, rc);
  122. VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", ga);
  123. trace_kvm_s390_handle_stap(vcpu, ga);
  124. return 0;
  125. }
  126. static int __skey_check_enable(struct kvm_vcpu *vcpu)
  127. {
  128. int rc = 0;
  129. if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
  130. return rc;
  131. rc = s390_enable_skey();
  132. trace_kvm_s390_skey_related_inst(vcpu);
  133. vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
  134. return rc;
  135. }
  136. static int handle_skey(struct kvm_vcpu *vcpu)
  137. {
  138. int rc = __skey_check_enable(vcpu);
  139. if (rc)
  140. return rc;
  141. vcpu->stat.instruction_storage_key++;
  142. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  143. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  144. kvm_s390_rewind_psw(vcpu, 4);
  145. VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
  146. return 0;
  147. }
  148. static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
  149. {
  150. vcpu->stat.instruction_ipte_interlock++;
  151. if (psw_bits(vcpu->arch.sie_block->gpsw).p)
  152. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  153. wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
  154. kvm_s390_rewind_psw(vcpu, 4);
  155. VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
  156. return 0;
  157. }
  158. static int handle_test_block(struct kvm_vcpu *vcpu)
  159. {
  160. gpa_t addr;
  161. int reg2;
  162. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  163. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  164. kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
  165. addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
  166. addr = kvm_s390_logical_to_effective(vcpu, addr);
  167. if (kvm_s390_check_low_addr_protection(vcpu, addr))
  168. return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
  169. addr = kvm_s390_real_to_abs(vcpu, addr);
  170. if (kvm_is_error_gpa(vcpu->kvm, addr))
  171. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  172. /*
  173. * We don't expect errors on modern systems, and do not care
  174. * about storage keys (yet), so let's just clear the page.
  175. */
  176. if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
  177. return -EFAULT;
  178. kvm_s390_set_psw_cc(vcpu, 0);
  179. vcpu->run->s.regs.gprs[0] = 0;
  180. return 0;
  181. }
  182. static int handle_tpi(struct kvm_vcpu *vcpu)
  183. {
  184. struct kvm_s390_interrupt_info *inti;
  185. unsigned long len;
  186. u32 tpi_data[3];
  187. int cc, rc;
  188. u64 addr;
  189. rc = 0;
  190. addr = kvm_s390_get_base_disp_s(vcpu);
  191. if (addr & 3)
  192. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  193. cc = 0;
  194. inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
  195. if (!inti)
  196. goto no_interrupt;
  197. cc = 1;
  198. tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
  199. tpi_data[1] = inti->io.io_int_parm;
  200. tpi_data[2] = inti->io.io_int_word;
  201. if (addr) {
  202. /*
  203. * Store the two-word I/O interruption code into the
  204. * provided area.
  205. */
  206. len = sizeof(tpi_data) - 4;
  207. rc = write_guest(vcpu, addr, &tpi_data, len);
  208. if (rc)
  209. return kvm_s390_inject_prog_cond(vcpu, rc);
  210. } else {
  211. /*
  212. * Store the three-word I/O interruption code into
  213. * the appropriate lowcore area.
  214. */
  215. len = sizeof(tpi_data);
  216. if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len))
  217. rc = -EFAULT;
  218. }
  219. /*
  220. * If we encounter a problem storing the interruption code, the
  221. * instruction is suppressed from the guest's view: reinject the
  222. * interrupt.
  223. */
  224. if (!rc)
  225. kfree(inti);
  226. else
  227. kvm_s390_reinject_io_int(vcpu->kvm, inti);
  228. no_interrupt:
  229. /* Set condition code and we're done. */
  230. if (!rc)
  231. kvm_s390_set_psw_cc(vcpu, cc);
  232. return rc ? -EFAULT : 0;
  233. }
  234. static int handle_tsch(struct kvm_vcpu *vcpu)
  235. {
  236. struct kvm_s390_interrupt_info *inti;
  237. inti = kvm_s390_get_io_int(vcpu->kvm, 0,
  238. vcpu->run->s.regs.gprs[1]);
  239. /*
  240. * Prepare exit to userspace.
  241. * We indicate whether we dequeued a pending I/O interrupt
  242. * so that userspace can re-inject it if the instruction gets
  243. * a program check. While this may re-order the pending I/O
  244. * interrupts, this is no problem since the priority is kept
  245. * intact.
  246. */
  247. vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
  248. vcpu->run->s390_tsch.dequeued = !!inti;
  249. if (inti) {
  250. vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
  251. vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
  252. vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
  253. vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
  254. }
  255. vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
  256. kfree(inti);
  257. return -EREMOTE;
  258. }
  259. static int handle_io_inst(struct kvm_vcpu *vcpu)
  260. {
  261. VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
  262. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  263. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  264. if (vcpu->kvm->arch.css_support) {
  265. /*
  266. * Most I/O instructions will be handled by userspace.
  267. * Exceptions are tpi and the interrupt portion of tsch.
  268. */
  269. if (vcpu->arch.sie_block->ipa == 0xb236)
  270. return handle_tpi(vcpu);
  271. if (vcpu->arch.sie_block->ipa == 0xb235)
  272. return handle_tsch(vcpu);
  273. /* Handle in userspace. */
  274. return -EOPNOTSUPP;
  275. } else {
  276. /*
  277. * Set condition code 3 to stop the guest from issuing channel
  278. * I/O instructions.
  279. */
  280. kvm_s390_set_psw_cc(vcpu, 3);
  281. return 0;
  282. }
  283. }
  284. static int handle_stfl(struct kvm_vcpu *vcpu)
  285. {
  286. int rc;
  287. unsigned int fac;
  288. vcpu->stat.instruction_stfl++;
  289. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  290. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  291. /*
  292. * We need to shift the lower 32 facility bits (bit 0-31) from a u64
  293. * into a u32 memory representation. They will remain bits 0-31.
  294. */
  295. fac = *vcpu->kvm->arch.model.fac->list >> 32;
  296. rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list),
  297. &fac, sizeof(fac));
  298. if (rc)
  299. return rc;
  300. VCPU_EVENT(vcpu, 5, "store facility list value %x", fac);
  301. trace_kvm_s390_handle_stfl(vcpu, fac);
  302. return 0;
  303. }
  304. #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
  305. #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
  306. #define PSW_ADDR_24 0x0000000000ffffffUL
  307. #define PSW_ADDR_31 0x000000007fffffffUL
  308. int is_valid_psw(psw_t *psw)
  309. {
  310. if (psw->mask & PSW_MASK_UNASSIGNED)
  311. return 0;
  312. if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
  313. if (psw->addr & ~PSW_ADDR_31)
  314. return 0;
  315. }
  316. if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
  317. return 0;
  318. if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)
  319. return 0;
  320. if (psw->addr & 1)
  321. return 0;
  322. return 1;
  323. }
  324. int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
  325. {
  326. psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
  327. psw_compat_t new_psw;
  328. u64 addr;
  329. int rc;
  330. if (gpsw->mask & PSW_MASK_PSTATE)
  331. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  332. addr = kvm_s390_get_base_disp_s(vcpu);
  333. if (addr & 7)
  334. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  335. rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw));
  336. if (rc)
  337. return kvm_s390_inject_prog_cond(vcpu, rc);
  338. if (!(new_psw.mask & PSW32_MASK_BASE))
  339. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  340. gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
  341. gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
  342. gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
  343. if (!is_valid_psw(gpsw))
  344. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  345. return 0;
  346. }
  347. static int handle_lpswe(struct kvm_vcpu *vcpu)
  348. {
  349. psw_t new_psw;
  350. u64 addr;
  351. int rc;
  352. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  353. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  354. addr = kvm_s390_get_base_disp_s(vcpu);
  355. if (addr & 7)
  356. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  357. rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw));
  358. if (rc)
  359. return kvm_s390_inject_prog_cond(vcpu, rc);
  360. vcpu->arch.sie_block->gpsw = new_psw;
  361. if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
  362. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  363. return 0;
  364. }
  365. static int handle_stidp(struct kvm_vcpu *vcpu)
  366. {
  367. u64 stidp_data = vcpu->arch.stidp_data;
  368. u64 operand2;
  369. int rc;
  370. vcpu->stat.instruction_stidp++;
  371. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  372. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  373. operand2 = kvm_s390_get_base_disp_s(vcpu);
  374. if (operand2 & 7)
  375. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  376. rc = write_guest(vcpu, operand2, &stidp_data, sizeof(stidp_data));
  377. if (rc)
  378. return kvm_s390_inject_prog_cond(vcpu, rc);
  379. VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
  380. return 0;
  381. }
  382. static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
  383. {
  384. int cpus = 0;
  385. int n;
  386. cpus = atomic_read(&vcpu->kvm->online_vcpus);
  387. /* deal with other level 3 hypervisors */
  388. if (stsi(mem, 3, 2, 2))
  389. mem->count = 0;
  390. if (mem->count < 8)
  391. mem->count++;
  392. for (n = mem->count - 1; n > 0 ; n--)
  393. memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
  394. mem->vm[0].cpus_total = cpus;
  395. mem->vm[0].cpus_configured = cpus;
  396. mem->vm[0].cpus_standby = 0;
  397. mem->vm[0].cpus_reserved = 0;
  398. mem->vm[0].caf = 1000;
  399. memcpy(mem->vm[0].name, "KVMguest", 8);
  400. ASCEBC(mem->vm[0].name, 8);
  401. memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
  402. ASCEBC(mem->vm[0].cpi, 16);
  403. }
  404. static int handle_stsi(struct kvm_vcpu *vcpu)
  405. {
  406. int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
  407. int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
  408. int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
  409. unsigned long mem = 0;
  410. u64 operand2;
  411. int rc = 0;
  412. vcpu->stat.instruction_stsi++;
  413. VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
  414. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  415. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  416. if (fc > 3) {
  417. kvm_s390_set_psw_cc(vcpu, 3);
  418. return 0;
  419. }
  420. if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
  421. || vcpu->run->s.regs.gprs[1] & 0xffff0000)
  422. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  423. if (fc == 0) {
  424. vcpu->run->s.regs.gprs[0] = 3 << 28;
  425. kvm_s390_set_psw_cc(vcpu, 0);
  426. return 0;
  427. }
  428. operand2 = kvm_s390_get_base_disp_s(vcpu);
  429. if (operand2 & 0xfff)
  430. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  431. switch (fc) {
  432. case 1: /* same handling for 1 and 2 */
  433. case 2:
  434. mem = get_zeroed_page(GFP_KERNEL);
  435. if (!mem)
  436. goto out_no_data;
  437. if (stsi((void *) mem, fc, sel1, sel2))
  438. goto out_no_data;
  439. break;
  440. case 3:
  441. if (sel1 != 2 || sel2 != 2)
  442. goto out_no_data;
  443. mem = get_zeroed_page(GFP_KERNEL);
  444. if (!mem)
  445. goto out_no_data;
  446. handle_stsi_3_2_2(vcpu, (void *) mem);
  447. break;
  448. }
  449. rc = write_guest(vcpu, operand2, (void *)mem, PAGE_SIZE);
  450. if (rc) {
  451. rc = kvm_s390_inject_prog_cond(vcpu, rc);
  452. goto out;
  453. }
  454. trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
  455. free_page(mem);
  456. kvm_s390_set_psw_cc(vcpu, 0);
  457. vcpu->run->s.regs.gprs[0] = 0;
  458. return 0;
  459. out_no_data:
  460. kvm_s390_set_psw_cc(vcpu, 3);
  461. out:
  462. free_page(mem);
  463. return rc;
  464. }
  465. static const intercept_handler_t b2_handlers[256] = {
  466. [0x02] = handle_stidp,
  467. [0x04] = handle_set_clock,
  468. [0x10] = handle_set_prefix,
  469. [0x11] = handle_store_prefix,
  470. [0x12] = handle_store_cpu_address,
  471. [0x21] = handle_ipte_interlock,
  472. [0x29] = handle_skey,
  473. [0x2a] = handle_skey,
  474. [0x2b] = handle_skey,
  475. [0x2c] = handle_test_block,
  476. [0x30] = handle_io_inst,
  477. [0x31] = handle_io_inst,
  478. [0x32] = handle_io_inst,
  479. [0x33] = handle_io_inst,
  480. [0x34] = handle_io_inst,
  481. [0x35] = handle_io_inst,
  482. [0x36] = handle_io_inst,
  483. [0x37] = handle_io_inst,
  484. [0x38] = handle_io_inst,
  485. [0x39] = handle_io_inst,
  486. [0x3a] = handle_io_inst,
  487. [0x3b] = handle_io_inst,
  488. [0x3c] = handle_io_inst,
  489. [0x50] = handle_ipte_interlock,
  490. [0x5f] = handle_io_inst,
  491. [0x74] = handle_io_inst,
  492. [0x76] = handle_io_inst,
  493. [0x7d] = handle_stsi,
  494. [0xb1] = handle_stfl,
  495. [0xb2] = handle_lpswe,
  496. };
  497. int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
  498. {
  499. intercept_handler_t handler;
  500. /*
  501. * A lot of B2 instructions are priviledged. Here we check for
  502. * the privileged ones, that we can handle in the kernel.
  503. * Anything else goes to userspace.
  504. */
  505. handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
  506. if (handler)
  507. return handler(vcpu);
  508. return -EOPNOTSUPP;
  509. }
  510. static int handle_epsw(struct kvm_vcpu *vcpu)
  511. {
  512. int reg1, reg2;
  513. kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
  514. /* This basically extracts the mask half of the psw. */
  515. vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
  516. vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
  517. if (reg2) {
  518. vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
  519. vcpu->run->s.regs.gprs[reg2] |=
  520. vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
  521. }
  522. return 0;
  523. }
  524. #define PFMF_RESERVED 0xfffc0101UL
  525. #define PFMF_SK 0x00020000UL
  526. #define PFMF_CF 0x00010000UL
  527. #define PFMF_UI 0x00008000UL
  528. #define PFMF_FSC 0x00007000UL
  529. #define PFMF_NQ 0x00000800UL
  530. #define PFMF_MR 0x00000400UL
  531. #define PFMF_MC 0x00000200UL
  532. #define PFMF_KEY 0x000000feUL
  533. static int handle_pfmf(struct kvm_vcpu *vcpu)
  534. {
  535. int reg1, reg2;
  536. unsigned long start, end;
  537. vcpu->stat.instruction_pfmf++;
  538. kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
  539. if (!MACHINE_HAS_PFMF)
  540. return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
  541. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  542. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  543. if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
  544. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  545. /* Only provide non-quiescing support if the host supports it */
  546. if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
  547. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  548. /* No support for conditional-SSKE */
  549. if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC))
  550. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  551. start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
  552. start = kvm_s390_logical_to_effective(vcpu, start);
  553. switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
  554. case 0x00000000:
  555. end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
  556. break;
  557. case 0x00001000:
  558. end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
  559. break;
  560. /* We dont support EDAT2
  561. case 0x00002000:
  562. end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
  563. break;*/
  564. default:
  565. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  566. }
  567. if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
  568. if (kvm_s390_check_low_addr_protection(vcpu, start))
  569. return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
  570. }
  571. while (start < end) {
  572. unsigned long useraddr, abs_addr;
  573. /* Translate guest address to host address */
  574. if ((vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) == 0)
  575. abs_addr = kvm_s390_real_to_abs(vcpu, start);
  576. else
  577. abs_addr = start;
  578. useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(abs_addr));
  579. if (kvm_is_error_hva(useraddr))
  580. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  581. if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
  582. if (clear_user((void __user *)useraddr, PAGE_SIZE))
  583. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  584. }
  585. if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
  586. int rc = __skey_check_enable(vcpu);
  587. if (rc)
  588. return rc;
  589. if (set_guest_storage_key(current->mm, useraddr,
  590. vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
  591. vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
  592. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  593. }
  594. start += PAGE_SIZE;
  595. }
  596. if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC)
  597. vcpu->run->s.regs.gprs[reg2] = end;
  598. return 0;
  599. }
  600. static int handle_essa(struct kvm_vcpu *vcpu)
  601. {
  602. /* entries expected to be 1FF */
  603. int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
  604. unsigned long *cbrlo, cbrle;
  605. struct gmap *gmap;
  606. int i;
  607. VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries);
  608. gmap = vcpu->arch.gmap;
  609. vcpu->stat.instruction_essa++;
  610. if (!kvm_s390_cmma_enabled(vcpu->kvm))
  611. return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
  612. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  613. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  614. if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6)
  615. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  616. /* Rewind PSW to repeat the ESSA instruction */
  617. kvm_s390_rewind_psw(vcpu, 4);
  618. vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
  619. cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
  620. down_read(&gmap->mm->mmap_sem);
  621. for (i = 0; i < entries; ++i) {
  622. cbrle = cbrlo[i];
  623. if (unlikely(cbrle & ~PAGE_MASK || cbrle < 2 * PAGE_SIZE))
  624. /* invalid entry */
  625. break;
  626. /* try to free backing */
  627. __gmap_zap(gmap, cbrle);
  628. }
  629. up_read(&gmap->mm->mmap_sem);
  630. if (i < entries)
  631. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  632. return 0;
  633. }
  634. static const intercept_handler_t b9_handlers[256] = {
  635. [0x8a] = handle_ipte_interlock,
  636. [0x8d] = handle_epsw,
  637. [0x8e] = handle_ipte_interlock,
  638. [0x8f] = handle_ipte_interlock,
  639. [0xab] = handle_essa,
  640. [0xaf] = handle_pfmf,
  641. };
  642. int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
  643. {
  644. intercept_handler_t handler;
  645. /* This is handled just as for the B2 instructions. */
  646. handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
  647. if (handler)
  648. return handler(vcpu);
  649. return -EOPNOTSUPP;
  650. }
  651. int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
  652. {
  653. int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
  654. int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
  655. int reg, rc, nr_regs;
  656. u32 ctl_array[16];
  657. u64 ga;
  658. vcpu->stat.instruction_lctl++;
  659. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  660. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  661. ga = kvm_s390_get_base_disp_rs(vcpu);
  662. if (ga & 3)
  663. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  664. VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
  665. trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
  666. nr_regs = ((reg3 - reg1) & 0xf) + 1;
  667. rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32));
  668. if (rc)
  669. return kvm_s390_inject_prog_cond(vcpu, rc);
  670. reg = reg1;
  671. nr_regs = 0;
  672. do {
  673. vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
  674. vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
  675. if (reg == reg3)
  676. break;
  677. reg = (reg + 1) % 16;
  678. } while (1);
  679. kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
  680. return 0;
  681. }
  682. int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
  683. {
  684. int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
  685. int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
  686. int reg, rc, nr_regs;
  687. u32 ctl_array[16];
  688. u64 ga;
  689. vcpu->stat.instruction_stctl++;
  690. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  691. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  692. ga = kvm_s390_get_base_disp_rs(vcpu);
  693. if (ga & 3)
  694. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  695. VCPU_EVENT(vcpu, 5, "stctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
  696. trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
  697. reg = reg1;
  698. nr_regs = 0;
  699. do {
  700. ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
  701. if (reg == reg3)
  702. break;
  703. reg = (reg + 1) % 16;
  704. } while (1);
  705. rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32));
  706. return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
  707. }
  708. static int handle_lctlg(struct kvm_vcpu *vcpu)
  709. {
  710. int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
  711. int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
  712. int reg, rc, nr_regs;
  713. u64 ctl_array[16];
  714. u64 ga;
  715. vcpu->stat.instruction_lctlg++;
  716. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  717. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  718. ga = kvm_s390_get_base_disp_rsy(vcpu);
  719. if (ga & 7)
  720. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  721. VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
  722. trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
  723. nr_regs = ((reg3 - reg1) & 0xf) + 1;
  724. rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64));
  725. if (rc)
  726. return kvm_s390_inject_prog_cond(vcpu, rc);
  727. reg = reg1;
  728. nr_regs = 0;
  729. do {
  730. vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
  731. if (reg == reg3)
  732. break;
  733. reg = (reg + 1) % 16;
  734. } while (1);
  735. kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
  736. return 0;
  737. }
  738. static int handle_stctg(struct kvm_vcpu *vcpu)
  739. {
  740. int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
  741. int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
  742. int reg, rc, nr_regs;
  743. u64 ctl_array[16];
  744. u64 ga;
  745. vcpu->stat.instruction_stctg++;
  746. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  747. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  748. ga = kvm_s390_get_base_disp_rsy(vcpu);
  749. if (ga & 7)
  750. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  751. VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
  752. trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
  753. reg = reg1;
  754. nr_regs = 0;
  755. do {
  756. ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
  757. if (reg == reg3)
  758. break;
  759. reg = (reg + 1) % 16;
  760. } while (1);
  761. rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64));
  762. return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
  763. }
  764. static const intercept_handler_t eb_handlers[256] = {
  765. [0x2f] = handle_lctlg,
  766. [0x25] = handle_stctg,
  767. };
  768. int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
  769. {
  770. intercept_handler_t handler;
  771. handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
  772. if (handler)
  773. return handler(vcpu);
  774. return -EOPNOTSUPP;
  775. }
  776. static int handle_tprot(struct kvm_vcpu *vcpu)
  777. {
  778. u64 address1, address2;
  779. unsigned long hva, gpa;
  780. int ret = 0, cc = 0;
  781. bool writable;
  782. vcpu->stat.instruction_tprot++;
  783. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  784. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  785. kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
  786. /* we only handle the Linux memory detection case:
  787. * access key == 0
  788. * everything else goes to userspace. */
  789. if (address2 & 0xf0)
  790. return -EOPNOTSUPP;
  791. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
  792. ipte_lock(vcpu);
  793. ret = guest_translate_address(vcpu, address1, &gpa, 1);
  794. if (ret == PGM_PROTECTION) {
  795. /* Write protected? Try again with read-only... */
  796. cc = 1;
  797. ret = guest_translate_address(vcpu, address1, &gpa, 0);
  798. }
  799. if (ret) {
  800. if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
  801. ret = kvm_s390_inject_program_int(vcpu, ret);
  802. } else if (ret > 0) {
  803. /* Translation not available */
  804. kvm_s390_set_psw_cc(vcpu, 3);
  805. ret = 0;
  806. }
  807. goto out_unlock;
  808. }
  809. hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
  810. if (kvm_is_error_hva(hva)) {
  811. ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  812. } else {
  813. if (!writable)
  814. cc = 1; /* Write not permitted ==> read-only */
  815. kvm_s390_set_psw_cc(vcpu, cc);
  816. /* Note: CC2 only occurs for storage keys (not supported yet) */
  817. }
  818. out_unlock:
  819. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
  820. ipte_unlock(vcpu);
  821. return ret;
  822. }
  823. int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
  824. {
  825. /* For e5xx... instructions we only handle TPROT */
  826. if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
  827. return handle_tprot(vcpu);
  828. return -EOPNOTSUPP;
  829. }
  830. static int handle_sckpf(struct kvm_vcpu *vcpu)
  831. {
  832. u32 value;
  833. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  834. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  835. if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
  836. return kvm_s390_inject_program_int(vcpu,
  837. PGM_SPECIFICATION);
  838. value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
  839. vcpu->arch.sie_block->todpr = value;
  840. return 0;
  841. }
  842. static const intercept_handler_t x01_handlers[256] = {
  843. [0x07] = handle_sckpf,
  844. };
  845. int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
  846. {
  847. intercept_handler_t handler;
  848. handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
  849. if (handler)
  850. return handler(vcpu);
  851. return -EOPNOTSUPP;
  852. }