priv.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066
  1. /*
  2. * handling privileged instructions
  3. *
  4. * Copyright IBM Corp. 2008, 2013
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Carsten Otte <cotte@de.ibm.com>
  11. * Christian Borntraeger <borntraeger@de.ibm.com>
  12. */
  13. #include <linux/kvm.h>
  14. #include <linux/gfp.h>
  15. #include <linux/errno.h>
  16. #include <linux/compat.h>
  17. #include <asm/asm-offsets.h>
  18. #include <asm/facility.h>
  19. #include <asm/current.h>
  20. #include <asm/debug.h>
  21. #include <asm/ebcdic.h>
  22. #include <asm/sysinfo.h>
  23. #include <asm/pgtable.h>
  24. #include <asm/pgalloc.h>
  25. #include <asm/io.h>
  26. #include <asm/ptrace.h>
  27. #include <asm/compat.h>
  28. #include "gaccess.h"
  29. #include "kvm-s390.h"
  30. #include "trace.h"
  31. /* Handle SCK (SET CLOCK) interception */
  32. static int handle_set_clock(struct kvm_vcpu *vcpu)
  33. {
  34. struct kvm_vcpu *cpup;
  35. s64 hostclk, val;
  36. int i, rc;
  37. ar_t ar;
  38. u64 op2;
  39. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  40. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  41. op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
  42. if (op2 & 7) /* Operand must be on a doubleword boundary */
  43. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  44. rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
  45. if (rc)
  46. return kvm_s390_inject_prog_cond(vcpu, rc);
  47. if (store_tod_clock(&hostclk)) {
  48. kvm_s390_set_psw_cc(vcpu, 3);
  49. return 0;
  50. }
  51. VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
  52. val = (val - hostclk) & ~0x3fUL;
  53. mutex_lock(&vcpu->kvm->lock);
  54. preempt_disable();
  55. kvm_for_each_vcpu(i, cpup, vcpu->kvm)
  56. cpup->arch.sie_block->epoch = val;
  57. preempt_enable();
  58. mutex_unlock(&vcpu->kvm->lock);
  59. kvm_s390_set_psw_cc(vcpu, 0);
  60. return 0;
  61. }
  62. static int handle_set_prefix(struct kvm_vcpu *vcpu)
  63. {
  64. u64 operand2;
  65. u32 address;
  66. int rc;
  67. ar_t ar;
  68. vcpu->stat.instruction_spx++;
  69. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  70. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  71. operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
  72. /* must be word boundary */
  73. if (operand2 & 3)
  74. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  75. /* get the value */
  76. rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
  77. if (rc)
  78. return kvm_s390_inject_prog_cond(vcpu, rc);
  79. address &= 0x7fffe000u;
  80. /*
  81. * Make sure the new value is valid memory. We only need to check the
  82. * first page, since address is 8k aligned and memory pieces are always
  83. * at least 1MB aligned and have at least a size of 1MB.
  84. */
  85. if (kvm_is_error_gpa(vcpu->kvm, address))
  86. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  87. kvm_s390_set_prefix(vcpu, address);
  88. trace_kvm_s390_handle_prefix(vcpu, 1, address);
  89. return 0;
  90. }
  91. static int handle_store_prefix(struct kvm_vcpu *vcpu)
  92. {
  93. u64 operand2;
  94. u32 address;
  95. int rc;
  96. ar_t ar;
  97. vcpu->stat.instruction_stpx++;
  98. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  99. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  100. operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
  101. /* must be word boundary */
  102. if (operand2 & 3)
  103. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  104. address = kvm_s390_get_prefix(vcpu);
  105. /* get the value */
  106. rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
  107. if (rc)
  108. return kvm_s390_inject_prog_cond(vcpu, rc);
  109. VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
  110. trace_kvm_s390_handle_prefix(vcpu, 0, address);
  111. return 0;
  112. }
  113. static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
  114. {
  115. u16 vcpu_id = vcpu->vcpu_id;
  116. u64 ga;
  117. int rc;
  118. ar_t ar;
  119. vcpu->stat.instruction_stap++;
  120. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  121. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  122. ga = kvm_s390_get_base_disp_s(vcpu, &ar);
  123. if (ga & 1)
  124. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  125. rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
  126. if (rc)
  127. return kvm_s390_inject_prog_cond(vcpu, rc);
  128. VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
  129. trace_kvm_s390_handle_stap(vcpu, ga);
  130. return 0;
  131. }
  132. static int __skey_check_enable(struct kvm_vcpu *vcpu)
  133. {
  134. int rc = 0;
  135. if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
  136. return rc;
  137. rc = s390_enable_skey();
  138. VCPU_EVENT(vcpu, 3, "%s", "enabling storage keys for guest");
  139. trace_kvm_s390_skey_related_inst(vcpu);
  140. vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
  141. return rc;
  142. }
  143. static int handle_skey(struct kvm_vcpu *vcpu)
  144. {
  145. int rc = __skey_check_enable(vcpu);
  146. if (rc)
  147. return rc;
  148. vcpu->stat.instruction_storage_key++;
  149. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  150. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  151. kvm_s390_rewind_psw(vcpu, 4);
  152. VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
  153. return 0;
  154. }
  155. static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
  156. {
  157. vcpu->stat.instruction_ipte_interlock++;
  158. if (psw_bits(vcpu->arch.sie_block->gpsw).p)
  159. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  160. wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
  161. kvm_s390_rewind_psw(vcpu, 4);
  162. VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
  163. return 0;
  164. }
  165. static int handle_test_block(struct kvm_vcpu *vcpu)
  166. {
  167. gpa_t addr;
  168. int reg2;
  169. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  170. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  171. kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
  172. addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
  173. addr = kvm_s390_logical_to_effective(vcpu, addr);
  174. if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
  175. return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
  176. addr = kvm_s390_real_to_abs(vcpu, addr);
  177. if (kvm_is_error_gpa(vcpu->kvm, addr))
  178. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  179. /*
  180. * We don't expect errors on modern systems, and do not care
  181. * about storage keys (yet), so let's just clear the page.
  182. */
  183. if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
  184. return -EFAULT;
  185. kvm_s390_set_psw_cc(vcpu, 0);
  186. vcpu->run->s.regs.gprs[0] = 0;
  187. return 0;
  188. }
  189. static int handle_tpi(struct kvm_vcpu *vcpu)
  190. {
  191. struct kvm_s390_interrupt_info *inti;
  192. unsigned long len;
  193. u32 tpi_data[3];
  194. int rc;
  195. u64 addr;
  196. ar_t ar;
  197. addr = kvm_s390_get_base_disp_s(vcpu, &ar);
  198. if (addr & 3)
  199. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  200. inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
  201. if (!inti) {
  202. kvm_s390_set_psw_cc(vcpu, 0);
  203. return 0;
  204. }
  205. tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
  206. tpi_data[1] = inti->io.io_int_parm;
  207. tpi_data[2] = inti->io.io_int_word;
  208. if (addr) {
  209. /*
  210. * Store the two-word I/O interruption code into the
  211. * provided area.
  212. */
  213. len = sizeof(tpi_data) - 4;
  214. rc = write_guest(vcpu, addr, ar, &tpi_data, len);
  215. if (rc) {
  216. rc = kvm_s390_inject_prog_cond(vcpu, rc);
  217. goto reinject_interrupt;
  218. }
  219. } else {
  220. /*
  221. * Store the three-word I/O interruption code into
  222. * the appropriate lowcore area.
  223. */
  224. len = sizeof(tpi_data);
  225. if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
  226. /* failed writes to the low core are not recoverable */
  227. rc = -EFAULT;
  228. goto reinject_interrupt;
  229. }
  230. }
  231. /* irq was successfully handed to the guest */
  232. kfree(inti);
  233. kvm_s390_set_psw_cc(vcpu, 1);
  234. return 0;
  235. reinject_interrupt:
  236. /*
  237. * If we encounter a problem storing the interruption code, the
  238. * instruction is suppressed from the guest's view: reinject the
  239. * interrupt.
  240. */
  241. if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
  242. kfree(inti);
  243. rc = -EFAULT;
  244. }
  245. /* don't set the cc, a pgm irq was injected or we drop to user space */
  246. return rc ? -EFAULT : 0;
  247. }
  248. static int handle_tsch(struct kvm_vcpu *vcpu)
  249. {
  250. struct kvm_s390_interrupt_info *inti = NULL;
  251. const u64 isc_mask = 0xffUL << 24; /* all iscs set */
  252. /* a valid schid has at least one bit set */
  253. if (vcpu->run->s.regs.gprs[1])
  254. inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
  255. vcpu->run->s.regs.gprs[1]);
  256. /*
  257. * Prepare exit to userspace.
  258. * We indicate whether we dequeued a pending I/O interrupt
  259. * so that userspace can re-inject it if the instruction gets
  260. * a program check. While this may re-order the pending I/O
  261. * interrupts, this is no problem since the priority is kept
  262. * intact.
  263. */
  264. vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
  265. vcpu->run->s390_tsch.dequeued = !!inti;
  266. if (inti) {
  267. vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
  268. vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
  269. vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
  270. vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
  271. }
  272. vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
  273. kfree(inti);
  274. return -EREMOTE;
  275. }
  276. static int handle_io_inst(struct kvm_vcpu *vcpu)
  277. {
  278. VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
  279. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  280. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  281. if (vcpu->kvm->arch.css_support) {
  282. /*
  283. * Most I/O instructions will be handled by userspace.
  284. * Exceptions are tpi and the interrupt portion of tsch.
  285. */
  286. if (vcpu->arch.sie_block->ipa == 0xb236)
  287. return handle_tpi(vcpu);
  288. if (vcpu->arch.sie_block->ipa == 0xb235)
  289. return handle_tsch(vcpu);
  290. /* Handle in userspace. */
  291. return -EOPNOTSUPP;
  292. } else {
  293. /*
  294. * Set condition code 3 to stop the guest from issuing channel
  295. * I/O instructions.
  296. */
  297. kvm_s390_set_psw_cc(vcpu, 3);
  298. return 0;
  299. }
  300. }
  301. static int handle_stfl(struct kvm_vcpu *vcpu)
  302. {
  303. int rc;
  304. unsigned int fac;
  305. vcpu->stat.instruction_stfl++;
  306. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  307. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  308. /*
  309. * We need to shift the lower 32 facility bits (bit 0-31) from a u64
  310. * into a u32 memory representation. They will remain bits 0-31.
  311. */
  312. fac = *vcpu->kvm->arch.model.fac->list >> 32;
  313. rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list),
  314. &fac, sizeof(fac));
  315. if (rc)
  316. return rc;
  317. VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
  318. trace_kvm_s390_handle_stfl(vcpu, fac);
  319. return 0;
  320. }
  321. #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
  322. #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
  323. #define PSW_ADDR_24 0x0000000000ffffffUL
  324. #define PSW_ADDR_31 0x000000007fffffffUL
  325. int is_valid_psw(psw_t *psw)
  326. {
  327. if (psw->mask & PSW_MASK_UNASSIGNED)
  328. return 0;
  329. if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
  330. if (psw->addr & ~PSW_ADDR_31)
  331. return 0;
  332. }
  333. if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
  334. return 0;
  335. if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)
  336. return 0;
  337. if (psw->addr & 1)
  338. return 0;
  339. return 1;
  340. }
  341. int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
  342. {
  343. psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
  344. psw_compat_t new_psw;
  345. u64 addr;
  346. int rc;
  347. ar_t ar;
  348. if (gpsw->mask & PSW_MASK_PSTATE)
  349. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  350. addr = kvm_s390_get_base_disp_s(vcpu, &ar);
  351. if (addr & 7)
  352. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  353. rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
  354. if (rc)
  355. return kvm_s390_inject_prog_cond(vcpu, rc);
  356. if (!(new_psw.mask & PSW32_MASK_BASE))
  357. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  358. gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
  359. gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
  360. gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
  361. if (!is_valid_psw(gpsw))
  362. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  363. return 0;
  364. }
  365. static int handle_lpswe(struct kvm_vcpu *vcpu)
  366. {
  367. psw_t new_psw;
  368. u64 addr;
  369. int rc;
  370. ar_t ar;
  371. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  372. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  373. addr = kvm_s390_get_base_disp_s(vcpu, &ar);
  374. if (addr & 7)
  375. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  376. rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
  377. if (rc)
  378. return kvm_s390_inject_prog_cond(vcpu, rc);
  379. vcpu->arch.sie_block->gpsw = new_psw;
  380. if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
  381. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  382. return 0;
  383. }
  384. static int handle_stidp(struct kvm_vcpu *vcpu)
  385. {
  386. u64 stidp_data = vcpu->arch.stidp_data;
  387. u64 operand2;
  388. int rc;
  389. ar_t ar;
  390. vcpu->stat.instruction_stidp++;
  391. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  392. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  393. operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
  394. if (operand2 & 7)
  395. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  396. rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
  397. if (rc)
  398. return kvm_s390_inject_prog_cond(vcpu, rc);
  399. VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
  400. return 0;
  401. }
  402. static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
  403. {
  404. int cpus = 0;
  405. int n;
  406. cpus = atomic_read(&vcpu->kvm->online_vcpus);
  407. /* deal with other level 3 hypervisors */
  408. if (stsi(mem, 3, 2, 2))
  409. mem->count = 0;
  410. if (mem->count < 8)
  411. mem->count++;
  412. for (n = mem->count - 1; n > 0 ; n--)
  413. memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
  414. memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
  415. mem->vm[0].cpus_total = cpus;
  416. mem->vm[0].cpus_configured = cpus;
  417. mem->vm[0].cpus_standby = 0;
  418. mem->vm[0].cpus_reserved = 0;
  419. mem->vm[0].caf = 1000;
  420. memcpy(mem->vm[0].name, "KVMguest", 8);
  421. ASCEBC(mem->vm[0].name, 8);
  422. memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
  423. ASCEBC(mem->vm[0].cpi, 16);
  424. }
  425. static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar,
  426. u8 fc, u8 sel1, u16 sel2)
  427. {
  428. vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
  429. vcpu->run->s390_stsi.addr = addr;
  430. vcpu->run->s390_stsi.ar = ar;
  431. vcpu->run->s390_stsi.fc = fc;
  432. vcpu->run->s390_stsi.sel1 = sel1;
  433. vcpu->run->s390_stsi.sel2 = sel2;
  434. }
  435. static int handle_stsi(struct kvm_vcpu *vcpu)
  436. {
  437. int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
  438. int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
  439. int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
  440. unsigned long mem = 0;
  441. u64 operand2;
  442. int rc = 0;
  443. ar_t ar;
  444. vcpu->stat.instruction_stsi++;
  445. VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
  446. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  447. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  448. if (fc > 3) {
  449. kvm_s390_set_psw_cc(vcpu, 3);
  450. return 0;
  451. }
  452. if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
  453. || vcpu->run->s.regs.gprs[1] & 0xffff0000)
  454. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  455. if (fc == 0) {
  456. vcpu->run->s.regs.gprs[0] = 3 << 28;
  457. kvm_s390_set_psw_cc(vcpu, 0);
  458. return 0;
  459. }
  460. operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
  461. if (operand2 & 0xfff)
  462. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  463. switch (fc) {
  464. case 1: /* same handling for 1 and 2 */
  465. case 2:
  466. mem = get_zeroed_page(GFP_KERNEL);
  467. if (!mem)
  468. goto out_no_data;
  469. if (stsi((void *) mem, fc, sel1, sel2))
  470. goto out_no_data;
  471. break;
  472. case 3:
  473. if (sel1 != 2 || sel2 != 2)
  474. goto out_no_data;
  475. mem = get_zeroed_page(GFP_KERNEL);
  476. if (!mem)
  477. goto out_no_data;
  478. handle_stsi_3_2_2(vcpu, (void *) mem);
  479. break;
  480. }
  481. rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
  482. if (rc) {
  483. rc = kvm_s390_inject_prog_cond(vcpu, rc);
  484. goto out;
  485. }
  486. if (vcpu->kvm->arch.user_stsi) {
  487. insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
  488. rc = -EREMOTE;
  489. }
  490. trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
  491. free_page(mem);
  492. kvm_s390_set_psw_cc(vcpu, 0);
  493. vcpu->run->s.regs.gprs[0] = 0;
  494. return rc;
  495. out_no_data:
  496. kvm_s390_set_psw_cc(vcpu, 3);
  497. out:
  498. free_page(mem);
  499. return rc;
  500. }
  501. static const intercept_handler_t b2_handlers[256] = {
  502. [0x02] = handle_stidp,
  503. [0x04] = handle_set_clock,
  504. [0x10] = handle_set_prefix,
  505. [0x11] = handle_store_prefix,
  506. [0x12] = handle_store_cpu_address,
  507. [0x21] = handle_ipte_interlock,
  508. [0x29] = handle_skey,
  509. [0x2a] = handle_skey,
  510. [0x2b] = handle_skey,
  511. [0x2c] = handle_test_block,
  512. [0x30] = handle_io_inst,
  513. [0x31] = handle_io_inst,
  514. [0x32] = handle_io_inst,
  515. [0x33] = handle_io_inst,
  516. [0x34] = handle_io_inst,
  517. [0x35] = handle_io_inst,
  518. [0x36] = handle_io_inst,
  519. [0x37] = handle_io_inst,
  520. [0x38] = handle_io_inst,
  521. [0x39] = handle_io_inst,
  522. [0x3a] = handle_io_inst,
  523. [0x3b] = handle_io_inst,
  524. [0x3c] = handle_io_inst,
  525. [0x50] = handle_ipte_interlock,
  526. [0x5f] = handle_io_inst,
  527. [0x74] = handle_io_inst,
  528. [0x76] = handle_io_inst,
  529. [0x7d] = handle_stsi,
  530. [0xb1] = handle_stfl,
  531. [0xb2] = handle_lpswe,
  532. };
  533. int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
  534. {
  535. intercept_handler_t handler;
  536. /*
  537. * A lot of B2 instructions are priviledged. Here we check for
  538. * the privileged ones, that we can handle in the kernel.
  539. * Anything else goes to userspace.
  540. */
  541. handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
  542. if (handler)
  543. return handler(vcpu);
  544. return -EOPNOTSUPP;
  545. }
  546. static int handle_epsw(struct kvm_vcpu *vcpu)
  547. {
  548. int reg1, reg2;
  549. kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
  550. /* This basically extracts the mask half of the psw. */
  551. vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
  552. vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
  553. if (reg2) {
  554. vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
  555. vcpu->run->s.regs.gprs[reg2] |=
  556. vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
  557. }
  558. return 0;
  559. }
  560. #define PFMF_RESERVED 0xfffc0101UL
  561. #define PFMF_SK 0x00020000UL
  562. #define PFMF_CF 0x00010000UL
  563. #define PFMF_UI 0x00008000UL
  564. #define PFMF_FSC 0x00007000UL
  565. #define PFMF_NQ 0x00000800UL
  566. #define PFMF_MR 0x00000400UL
  567. #define PFMF_MC 0x00000200UL
  568. #define PFMF_KEY 0x000000feUL
  569. static int handle_pfmf(struct kvm_vcpu *vcpu)
  570. {
  571. int reg1, reg2;
  572. unsigned long start, end;
  573. vcpu->stat.instruction_pfmf++;
  574. kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
  575. if (!MACHINE_HAS_PFMF)
  576. return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
  577. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  578. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  579. if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
  580. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  581. /* Only provide non-quiescing support if the host supports it */
  582. if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
  583. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  584. /* No support for conditional-SSKE */
  585. if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC))
  586. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  587. start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
  588. start = kvm_s390_logical_to_effective(vcpu, start);
  589. switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
  590. case 0x00000000:
  591. end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
  592. break;
  593. case 0x00001000:
  594. end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
  595. break;
  596. case 0x00002000:
  597. /* only support 2G frame size if EDAT2 is available and we are
  598. not in 24-bit addressing mode */
  599. if (!test_kvm_facility(vcpu->kvm, 78) ||
  600. psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_24BIT)
  601. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  602. end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
  603. break;
  604. default:
  605. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  606. }
  607. if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
  608. if (kvm_s390_check_low_addr_prot_real(vcpu, start))
  609. return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
  610. }
  611. while (start < end) {
  612. unsigned long useraddr, abs_addr;
  613. /* Translate guest address to host address */
  614. if ((vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) == 0)
  615. abs_addr = kvm_s390_real_to_abs(vcpu, start);
  616. else
  617. abs_addr = start;
  618. useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(abs_addr));
  619. if (kvm_is_error_hva(useraddr))
  620. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  621. if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
  622. if (clear_user((void __user *)useraddr, PAGE_SIZE))
  623. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  624. }
  625. if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
  626. int rc = __skey_check_enable(vcpu);
  627. if (rc)
  628. return rc;
  629. if (set_guest_storage_key(current->mm, useraddr,
  630. vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
  631. vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
  632. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  633. }
  634. start += PAGE_SIZE;
  635. }
  636. if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC)
  637. vcpu->run->s.regs.gprs[reg2] = end;
  638. return 0;
  639. }
  640. static int handle_essa(struct kvm_vcpu *vcpu)
  641. {
  642. /* entries expected to be 1FF */
  643. int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
  644. unsigned long *cbrlo, cbrle;
  645. struct gmap *gmap;
  646. int i;
  647. VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
  648. gmap = vcpu->arch.gmap;
  649. vcpu->stat.instruction_essa++;
  650. if (!vcpu->kvm->arch.use_cmma)
  651. return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
  652. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  653. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  654. if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6)
  655. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  656. /* Rewind PSW to repeat the ESSA instruction */
  657. kvm_s390_rewind_psw(vcpu, 4);
  658. vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
  659. cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
  660. down_read(&gmap->mm->mmap_sem);
  661. for (i = 0; i < entries; ++i) {
  662. cbrle = cbrlo[i];
  663. if (unlikely(cbrle & ~PAGE_MASK || cbrle < 2 * PAGE_SIZE))
  664. /* invalid entry */
  665. break;
  666. /* try to free backing */
  667. __gmap_zap(gmap, cbrle);
  668. }
  669. up_read(&gmap->mm->mmap_sem);
  670. if (i < entries)
  671. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  672. return 0;
  673. }
  674. static const intercept_handler_t b9_handlers[256] = {
  675. [0x8a] = handle_ipte_interlock,
  676. [0x8d] = handle_epsw,
  677. [0x8e] = handle_ipte_interlock,
  678. [0x8f] = handle_ipte_interlock,
  679. [0xab] = handle_essa,
  680. [0xaf] = handle_pfmf,
  681. };
  682. int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
  683. {
  684. intercept_handler_t handler;
  685. /* This is handled just as for the B2 instructions. */
  686. handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
  687. if (handler)
  688. return handler(vcpu);
  689. return -EOPNOTSUPP;
  690. }
  691. int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
  692. {
  693. int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
  694. int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
  695. int reg, rc, nr_regs;
  696. u32 ctl_array[16];
  697. u64 ga;
  698. ar_t ar;
  699. vcpu->stat.instruction_lctl++;
  700. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  701. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  702. ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
  703. if (ga & 3)
  704. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  705. VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
  706. trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
  707. nr_regs = ((reg3 - reg1) & 0xf) + 1;
  708. rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
  709. if (rc)
  710. return kvm_s390_inject_prog_cond(vcpu, rc);
  711. reg = reg1;
  712. nr_regs = 0;
  713. do {
  714. vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
  715. vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
  716. if (reg == reg3)
  717. break;
  718. reg = (reg + 1) % 16;
  719. } while (1);
  720. kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
  721. return 0;
  722. }
  723. int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
  724. {
  725. int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
  726. int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
  727. int reg, rc, nr_regs;
  728. u32 ctl_array[16];
  729. u64 ga;
  730. ar_t ar;
  731. vcpu->stat.instruction_stctl++;
  732. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  733. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  734. ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
  735. if (ga & 3)
  736. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  737. VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
  738. trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
  739. reg = reg1;
  740. nr_regs = 0;
  741. do {
  742. ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
  743. if (reg == reg3)
  744. break;
  745. reg = (reg + 1) % 16;
  746. } while (1);
  747. rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
  748. return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
  749. }
  750. static int handle_lctlg(struct kvm_vcpu *vcpu)
  751. {
  752. int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
  753. int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
  754. int reg, rc, nr_regs;
  755. u64 ctl_array[16];
  756. u64 ga;
  757. ar_t ar;
  758. vcpu->stat.instruction_lctlg++;
  759. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  760. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  761. ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
  762. if (ga & 7)
  763. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  764. VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
  765. trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
  766. nr_regs = ((reg3 - reg1) & 0xf) + 1;
  767. rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
  768. if (rc)
  769. return kvm_s390_inject_prog_cond(vcpu, rc);
  770. reg = reg1;
  771. nr_regs = 0;
  772. do {
  773. vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
  774. if (reg == reg3)
  775. break;
  776. reg = (reg + 1) % 16;
  777. } while (1);
  778. kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
  779. return 0;
  780. }
  781. static int handle_stctg(struct kvm_vcpu *vcpu)
  782. {
  783. int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
  784. int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
  785. int reg, rc, nr_regs;
  786. u64 ctl_array[16];
  787. u64 ga;
  788. ar_t ar;
  789. vcpu->stat.instruction_stctg++;
  790. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  791. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  792. ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
  793. if (ga & 7)
  794. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  795. VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
  796. trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
  797. reg = reg1;
  798. nr_regs = 0;
  799. do {
  800. ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
  801. if (reg == reg3)
  802. break;
  803. reg = (reg + 1) % 16;
  804. } while (1);
  805. rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
  806. return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
  807. }
  808. static const intercept_handler_t eb_handlers[256] = {
  809. [0x2f] = handle_lctlg,
  810. [0x25] = handle_stctg,
  811. };
  812. int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
  813. {
  814. intercept_handler_t handler;
  815. handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
  816. if (handler)
  817. return handler(vcpu);
  818. return -EOPNOTSUPP;
  819. }
  820. static int handle_tprot(struct kvm_vcpu *vcpu)
  821. {
  822. u64 address1, address2;
  823. unsigned long hva, gpa;
  824. int ret = 0, cc = 0;
  825. bool writable;
  826. ar_t ar;
  827. vcpu->stat.instruction_tprot++;
  828. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  829. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  830. kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL);
  831. /* we only handle the Linux memory detection case:
  832. * access key == 0
  833. * everything else goes to userspace. */
  834. if (address2 & 0xf0)
  835. return -EOPNOTSUPP;
  836. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
  837. ipte_lock(vcpu);
  838. ret = guest_translate_address(vcpu, address1, ar, &gpa, 1);
  839. if (ret == PGM_PROTECTION) {
  840. /* Write protected? Try again with read-only... */
  841. cc = 1;
  842. ret = guest_translate_address(vcpu, address1, ar, &gpa, 0);
  843. }
  844. if (ret) {
  845. if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
  846. ret = kvm_s390_inject_program_int(vcpu, ret);
  847. } else if (ret > 0) {
  848. /* Translation not available */
  849. kvm_s390_set_psw_cc(vcpu, 3);
  850. ret = 0;
  851. }
  852. goto out_unlock;
  853. }
  854. hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
  855. if (kvm_is_error_hva(hva)) {
  856. ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  857. } else {
  858. if (!writable)
  859. cc = 1; /* Write not permitted ==> read-only */
  860. kvm_s390_set_psw_cc(vcpu, cc);
  861. /* Note: CC2 only occurs for storage keys (not supported yet) */
  862. }
  863. out_unlock:
  864. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
  865. ipte_unlock(vcpu);
  866. return ret;
  867. }
  868. int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
  869. {
  870. /* For e5xx... instructions we only handle TPROT */
  871. if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
  872. return handle_tprot(vcpu);
  873. return -EOPNOTSUPP;
  874. }
  875. static int handle_sckpf(struct kvm_vcpu *vcpu)
  876. {
  877. u32 value;
  878. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  879. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  880. if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
  881. return kvm_s390_inject_program_int(vcpu,
  882. PGM_SPECIFICATION);
  883. value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
  884. vcpu->arch.sie_block->todpr = value;
  885. return 0;
  886. }
  887. static const intercept_handler_t x01_handlers[256] = {
  888. [0x07] = handle_sckpf,
  889. };
  890. int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
  891. {
  892. intercept_handler_t handler;
  893. handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
  894. if (handler)
  895. return handler(vcpu);
  896. return -EOPNOTSUPP;
  897. }