intercept.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * in-kernel handling for sie intercepts
  4. *
  5. * Copyright IBM Corp. 2008, 2014
  6. *
  7. * Author(s): Carsten Otte <cotte@de.ibm.com>
  8. * Christian Borntraeger <borntraeger@de.ibm.com>
  9. */
  10. #include <linux/kvm_host.h>
  11. #include <linux/errno.h>
  12. #include <linux/pagemap.h>
  13. #include <asm/kvm_host.h>
  14. #include <asm/asm-offsets.h>
  15. #include <asm/irq.h>
  16. #include <asm/sysinfo.h>
  17. #include "kvm-s390.h"
  18. #include "gaccess.h"
  19. #include "trace.h"
  20. #include "trace-s390.h"
  21. u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu)
  22. {
  23. struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
  24. u8 ilen = 0;
  25. switch (vcpu->arch.sie_block->icptcode) {
  26. case ICPT_INST:
  27. case ICPT_INSTPROGI:
  28. case ICPT_OPEREXC:
  29. case ICPT_PARTEXEC:
  30. case ICPT_IOINST:
  31. /* instruction only stored for these icptcodes */
  32. ilen = insn_length(vcpu->arch.sie_block->ipa >> 8);
  33. /* Use the length of the EXECUTE instruction if necessary */
  34. if (sie_block->icptstatus & 1) {
  35. ilen = (sie_block->icptstatus >> 4) & 0x6;
  36. if (!ilen)
  37. ilen = 4;
  38. }
  39. break;
  40. case ICPT_PROGI:
  41. /* bit 1+2 of pgmilc are the ilc, so we directly get ilen */
  42. ilen = vcpu->arch.sie_block->pgmilc & 0x6;
  43. break;
  44. }
  45. return ilen;
  46. }
  47. static int handle_stop(struct kvm_vcpu *vcpu)
  48. {
  49. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  50. int rc = 0;
  51. uint8_t flags, stop_pending;
  52. vcpu->stat.exit_stop_request++;
  53. /* delay the stop if any non-stop irq is pending */
  54. if (kvm_s390_vcpu_has_irq(vcpu, 1))
  55. return 0;
  56. /* avoid races with the injection/SIGP STOP code */
  57. spin_lock(&li->lock);
  58. flags = li->irq.stop.flags;
  59. stop_pending = kvm_s390_is_stop_irq_pending(vcpu);
  60. spin_unlock(&li->lock);
  61. trace_kvm_s390_stop_request(stop_pending, flags);
  62. if (!stop_pending)
  63. return 0;
  64. if (flags & KVM_S390_STOP_FLAG_STORE_STATUS) {
  65. rc = kvm_s390_vcpu_store_status(vcpu,
  66. KVM_S390_STORE_STATUS_NOADDR);
  67. if (rc)
  68. return rc;
  69. }
  70. if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
  71. kvm_s390_vcpu_stop(vcpu);
  72. return -EOPNOTSUPP;
  73. }
  74. static int handle_validity(struct kvm_vcpu *vcpu)
  75. {
  76. int viwhy = vcpu->arch.sie_block->ipb >> 16;
  77. vcpu->stat.exit_validity++;
  78. trace_kvm_s390_intercept_validity(vcpu, viwhy);
  79. KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%pK)", viwhy,
  80. current->pid, vcpu->kvm);
  81. /* do not warn on invalid runtime instrumentation mode */
  82. WARN_ONCE(viwhy != 0x44, "kvm: unhandled validity intercept 0x%x\n",
  83. viwhy);
  84. return -EINVAL;
  85. }
  86. static int handle_instruction(struct kvm_vcpu *vcpu)
  87. {
  88. vcpu->stat.exit_instruction++;
  89. trace_kvm_s390_intercept_instruction(vcpu,
  90. vcpu->arch.sie_block->ipa,
  91. vcpu->arch.sie_block->ipb);
  92. switch (vcpu->arch.sie_block->ipa >> 8) {
  93. case 0x01:
  94. return kvm_s390_handle_01(vcpu);
  95. case 0x82:
  96. return kvm_s390_handle_lpsw(vcpu);
  97. case 0x83:
  98. return kvm_s390_handle_diag(vcpu);
  99. case 0xaa:
  100. return kvm_s390_handle_aa(vcpu);
  101. case 0xae:
  102. return kvm_s390_handle_sigp(vcpu);
  103. case 0xb2:
  104. return kvm_s390_handle_b2(vcpu);
  105. case 0xb6:
  106. return kvm_s390_handle_stctl(vcpu);
  107. case 0xb7:
  108. return kvm_s390_handle_lctl(vcpu);
  109. case 0xb9:
  110. return kvm_s390_handle_b9(vcpu);
  111. case 0xe3:
  112. return kvm_s390_handle_e3(vcpu);
  113. case 0xe5:
  114. return kvm_s390_handle_e5(vcpu);
  115. case 0xeb:
  116. return kvm_s390_handle_eb(vcpu);
  117. default:
  118. return -EOPNOTSUPP;
  119. }
  120. }
  121. static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu)
  122. {
  123. struct kvm_s390_pgm_info pgm_info = {
  124. .code = vcpu->arch.sie_block->iprcc,
  125. /* the PSW has already been rewound */
  126. .flags = KVM_S390_PGM_FLAGS_NO_REWIND,
  127. };
  128. switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) {
  129. case PGM_AFX_TRANSLATION:
  130. case PGM_ASX_TRANSLATION:
  131. case PGM_EX_TRANSLATION:
  132. case PGM_LFX_TRANSLATION:
  133. case PGM_LSTE_SEQUENCE:
  134. case PGM_LSX_TRANSLATION:
  135. case PGM_LX_TRANSLATION:
  136. case PGM_PRIMARY_AUTHORITY:
  137. case PGM_SECONDARY_AUTHORITY:
  138. case PGM_SPACE_SWITCH:
  139. pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
  140. break;
  141. case PGM_ALEN_TRANSLATION:
  142. case PGM_ALE_SEQUENCE:
  143. case PGM_ASTE_INSTANCE:
  144. case PGM_ASTE_SEQUENCE:
  145. case PGM_ASTE_VALIDITY:
  146. case PGM_EXTENDED_AUTHORITY:
  147. pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
  148. break;
  149. case PGM_ASCE_TYPE:
  150. case PGM_PAGE_TRANSLATION:
  151. case PGM_REGION_FIRST_TRANS:
  152. case PGM_REGION_SECOND_TRANS:
  153. case PGM_REGION_THIRD_TRANS:
  154. case PGM_SEGMENT_TRANSLATION:
  155. pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
  156. pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
  157. pgm_info.op_access_id = vcpu->arch.sie_block->oai;
  158. break;
  159. case PGM_MONITOR:
  160. pgm_info.mon_class_nr = vcpu->arch.sie_block->mcn;
  161. pgm_info.mon_code = vcpu->arch.sie_block->tecmc;
  162. break;
  163. case PGM_VECTOR_PROCESSING:
  164. case PGM_DATA:
  165. pgm_info.data_exc_code = vcpu->arch.sie_block->dxc;
  166. break;
  167. case PGM_PROTECTION:
  168. pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
  169. pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
  170. break;
  171. default:
  172. break;
  173. }
  174. if (vcpu->arch.sie_block->iprcc & PGM_PER) {
  175. pgm_info.per_code = vcpu->arch.sie_block->perc;
  176. pgm_info.per_atmid = vcpu->arch.sie_block->peratmid;
  177. pgm_info.per_address = vcpu->arch.sie_block->peraddr;
  178. pgm_info.per_access_id = vcpu->arch.sie_block->peraid;
  179. }
  180. return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
  181. }
  182. /*
  183. * restore ITDB to program-interruption TDB in guest lowcore
  184. * and set TX abort indication if required
  185. */
  186. static int handle_itdb(struct kvm_vcpu *vcpu)
  187. {
  188. struct kvm_s390_itdb *itdb;
  189. int rc;
  190. if (!IS_TE_ENABLED(vcpu) || !IS_ITDB_VALID(vcpu))
  191. return 0;
  192. if (current->thread.per_flags & PER_FLAG_NO_TE)
  193. return 0;
  194. itdb = (struct kvm_s390_itdb *)vcpu->arch.sie_block->itdba;
  195. rc = write_guest_lc(vcpu, __LC_PGM_TDB, itdb, sizeof(*itdb));
  196. if (rc)
  197. return rc;
  198. memset(itdb, 0, sizeof(*itdb));
  199. return 0;
  200. }
  201. #define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER)
  202. static int handle_prog(struct kvm_vcpu *vcpu)
  203. {
  204. psw_t psw;
  205. int rc;
  206. vcpu->stat.exit_program_interruption++;
  207. if (guestdbg_enabled(vcpu) && per_event(vcpu)) {
  208. rc = kvm_s390_handle_per_event(vcpu);
  209. if (rc)
  210. return rc;
  211. /* the interrupt might have been filtered out completely */
  212. if (vcpu->arch.sie_block->iprcc == 0)
  213. return 0;
  214. }
  215. trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
  216. if (vcpu->arch.sie_block->iprcc == PGM_SPECIFICATION) {
  217. rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &psw, sizeof(psw_t));
  218. if (rc)
  219. return rc;
  220. /* Avoid endless loops of specification exceptions */
  221. if (!is_valid_psw(&psw))
  222. return -EOPNOTSUPP;
  223. }
  224. rc = handle_itdb(vcpu);
  225. if (rc)
  226. return rc;
  227. return inject_prog_on_prog_intercept(vcpu);
  228. }
  229. /**
  230. * handle_external_interrupt - used for external interruption interceptions
  231. *
  232. * This interception only occurs if the CPUSTAT_EXT_INT bit was set, or if
  233. * the new PSW does not have external interrupts disabled. In the first case,
  234. * we've got to deliver the interrupt manually, and in the second case, we
  235. * drop to userspace to handle the situation there.
  236. */
  237. static int handle_external_interrupt(struct kvm_vcpu *vcpu)
  238. {
  239. u16 eic = vcpu->arch.sie_block->eic;
  240. struct kvm_s390_irq irq;
  241. psw_t newpsw;
  242. int rc;
  243. vcpu->stat.exit_external_interrupt++;
  244. rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
  245. if (rc)
  246. return rc;
  247. /* We can not handle clock comparator or timer interrupt with bad PSW */
  248. if ((eic == EXT_IRQ_CLK_COMP || eic == EXT_IRQ_CPU_TIMER) &&
  249. (newpsw.mask & PSW_MASK_EXT))
  250. return -EOPNOTSUPP;
  251. switch (eic) {
  252. case EXT_IRQ_CLK_COMP:
  253. irq.type = KVM_S390_INT_CLOCK_COMP;
  254. break;
  255. case EXT_IRQ_CPU_TIMER:
  256. irq.type = KVM_S390_INT_CPU_TIMER;
  257. break;
  258. case EXT_IRQ_EXTERNAL_CALL:
  259. irq.type = KVM_S390_INT_EXTERNAL_CALL;
  260. irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr;
  261. rc = kvm_s390_inject_vcpu(vcpu, &irq);
  262. /* ignore if another external call is already pending */
  263. if (rc == -EBUSY)
  264. return 0;
  265. return rc;
  266. default:
  267. return -EOPNOTSUPP;
  268. }
  269. return kvm_s390_inject_vcpu(vcpu, &irq);
  270. }
  271. /**
  272. * Handle MOVE PAGE partial execution interception.
  273. *
  274. * This interception can only happen for guests with DAT disabled and
  275. * addresses that are currently not mapped in the host. Thus we try to
  276. * set up the mappings for the corresponding user pages here (or throw
  277. * addressing exceptions in case of illegal guest addresses).
  278. */
  279. static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
  280. {
  281. unsigned long srcaddr, dstaddr;
  282. int reg1, reg2, rc;
  283. kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
  284. /* Make sure that the source is paged-in */
  285. rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2],
  286. reg2, &srcaddr, GACC_FETCH);
  287. if (rc)
  288. return kvm_s390_inject_prog_cond(vcpu, rc);
  289. rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
  290. if (rc != 0)
  291. return rc;
  292. /* Make sure that the destination is paged-in */
  293. rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1],
  294. reg1, &dstaddr, GACC_STORE);
  295. if (rc)
  296. return kvm_s390_inject_prog_cond(vcpu, rc);
  297. rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
  298. if (rc != 0)
  299. return rc;
  300. kvm_s390_retry_instr(vcpu);
  301. return 0;
  302. }
  303. static int handle_partial_execution(struct kvm_vcpu *vcpu)
  304. {
  305. vcpu->stat.exit_pei++;
  306. if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */
  307. return handle_mvpg_pei(vcpu);
  308. if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */
  309. return kvm_s390_handle_sigp_pei(vcpu);
  310. return -EOPNOTSUPP;
  311. }
  312. /*
  313. * Handle the sthyi instruction that provides the guest with system
  314. * information, like current CPU resources available at each level of
  315. * the machine.
  316. */
  317. int handle_sthyi(struct kvm_vcpu *vcpu)
  318. {
  319. int reg1, reg2, r = 0;
  320. u64 code, addr, cc = 0, rc = 0;
  321. struct sthyi_sctns *sctns = NULL;
  322. if (!test_kvm_facility(vcpu->kvm, 74))
  323. return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
  324. kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
  325. code = vcpu->run->s.regs.gprs[reg1];
  326. addr = vcpu->run->s.regs.gprs[reg2];
  327. vcpu->stat.instruction_sthyi++;
  328. VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
  329. trace_kvm_s390_handle_sthyi(vcpu, code, addr);
  330. if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
  331. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  332. if (code & 0xffff) {
  333. cc = 3;
  334. rc = 4;
  335. goto out;
  336. }
  337. if (addr & ~PAGE_MASK)
  338. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  339. sctns = (void *)get_zeroed_page(GFP_KERNEL);
  340. if (!sctns)
  341. return -ENOMEM;
  342. cc = sthyi_fill(sctns, &rc);
  343. out:
  344. if (!cc) {
  345. r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
  346. if (r) {
  347. free_page((unsigned long)sctns);
  348. return kvm_s390_inject_prog_cond(vcpu, r);
  349. }
  350. }
  351. free_page((unsigned long)sctns);
  352. vcpu->run->s.regs.gprs[reg2 + 1] = rc;
  353. kvm_s390_set_psw_cc(vcpu, cc);
  354. return r;
  355. }
  356. static int handle_operexc(struct kvm_vcpu *vcpu)
  357. {
  358. psw_t oldpsw, newpsw;
  359. int rc;
  360. vcpu->stat.exit_operation_exception++;
  361. trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa,
  362. vcpu->arch.sie_block->ipb);
  363. if (vcpu->arch.sie_block->ipa == 0xb256)
  364. return handle_sthyi(vcpu);
  365. if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0)
  366. return -EOPNOTSUPP;
  367. rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &newpsw, sizeof(psw_t));
  368. if (rc)
  369. return rc;
  370. /*
  371. * Avoid endless loops of operation exceptions, if the pgm new
  372. * PSW will cause a new operation exception.
  373. * The heuristic checks if the pgm new psw is within 6 bytes before
  374. * the faulting psw address (with same DAT, AS settings) and the
  375. * new psw is not a wait psw and the fault was not triggered by
  376. * problem state.
  377. */
  378. oldpsw = vcpu->arch.sie_block->gpsw;
  379. if (oldpsw.addr - newpsw.addr <= 6 &&
  380. !(newpsw.mask & PSW_MASK_WAIT) &&
  381. !(oldpsw.mask & PSW_MASK_PSTATE) &&
  382. (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) &&
  383. (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT))
  384. return -EOPNOTSUPP;
  385. return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
  386. }
  387. int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
  388. {
  389. int rc, per_rc = 0;
  390. if (kvm_is_ucontrol(vcpu->kvm))
  391. return -EOPNOTSUPP;
  392. switch (vcpu->arch.sie_block->icptcode) {
  393. case ICPT_EXTREQ:
  394. vcpu->stat.exit_external_request++;
  395. return 0;
  396. case ICPT_IOREQ:
  397. vcpu->stat.exit_io_request++;
  398. return 0;
  399. case ICPT_INST:
  400. rc = handle_instruction(vcpu);
  401. break;
  402. case ICPT_PROGI:
  403. return handle_prog(vcpu);
  404. case ICPT_EXTINT:
  405. return handle_external_interrupt(vcpu);
  406. case ICPT_WAIT:
  407. return kvm_s390_handle_wait(vcpu);
  408. case ICPT_VALIDITY:
  409. return handle_validity(vcpu);
  410. case ICPT_STOP:
  411. return handle_stop(vcpu);
  412. case ICPT_OPEREXC:
  413. rc = handle_operexc(vcpu);
  414. break;
  415. case ICPT_PARTEXEC:
  416. rc = handle_partial_execution(vcpu);
  417. break;
  418. case ICPT_KSS:
  419. rc = kvm_s390_skey_check_enable(vcpu);
  420. break;
  421. default:
  422. return -EOPNOTSUPP;
  423. }
  424. /* process PER, also if the instrution is processed in user space */
  425. if (vcpu->arch.sie_block->icptstatus & 0x02 &&
  426. (!rc || rc == -EOPNOTSUPP))
  427. per_rc = kvm_s390_handle_per_ifetch_icpt(vcpu);
  428. return per_rc ? per_rc : rc;
  429. }