sigp.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464
  1. /*
  2. * handling interprocessor communication
  3. *
  4. * Copyright IBM Corp. 2008, 2013
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Carsten Otte <cotte@de.ibm.com>
  11. * Christian Borntraeger <borntraeger@de.ibm.com>
  12. * Christian Ehrhardt <ehrhardt@de.ibm.com>
  13. */
  14. #include <linux/kvm.h>
  15. #include <linux/kvm_host.h>
  16. #include <linux/slab.h>
  17. #include <asm/sigp.h>
  18. #include "gaccess.h"
  19. #include "kvm-s390.h"
  20. #include "trace.h"
  21. static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
  22. u64 *reg)
  23. {
  24. struct kvm_s390_local_interrupt *li;
  25. int cpuflags;
  26. int rc;
  27. li = &dst_vcpu->arch.local_int;
  28. cpuflags = atomic_read(li->cpuflags);
  29. if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
  30. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  31. else {
  32. *reg &= 0xffffffff00000000UL;
  33. if (cpuflags & CPUSTAT_ECALL_PEND)
  34. *reg |= SIGP_STATUS_EXT_CALL_PENDING;
  35. if (cpuflags & CPUSTAT_STOPPED)
  36. *reg |= SIGP_STATUS_STOPPED;
  37. rc = SIGP_CC_STATUS_STORED;
  38. }
  39. VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id,
  40. rc);
  41. return rc;
  42. }
  43. static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
  44. struct kvm_vcpu *dst_vcpu)
  45. {
  46. struct kvm_s390_irq irq = {
  47. .type = KVM_S390_INT_EMERGENCY,
  48. .u.emerg.code = vcpu->vcpu_id,
  49. };
  50. int rc = 0;
  51. rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
  52. if (!rc)
  53. VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x",
  54. dst_vcpu->vcpu_id);
  55. return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
  56. }
  57. static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
  58. {
  59. return __inject_sigp_emergency(vcpu, dst_vcpu);
  60. }
  61. static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu,
  62. struct kvm_vcpu *dst_vcpu,
  63. u16 asn, u64 *reg)
  64. {
  65. const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
  66. u16 p_asn, s_asn;
  67. psw_t *psw;
  68. u32 flags;
  69. flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
  70. psw = &dst_vcpu->arch.sie_block->gpsw;
  71. p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */
  72. s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */
  73. /* Inject the emergency signal? */
  74. if (!(flags & CPUSTAT_STOPPED)
  75. || (psw->mask & psw_int_mask) != psw_int_mask
  76. || ((flags & CPUSTAT_WAIT) && psw->addr != 0)
  77. || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
  78. return __inject_sigp_emergency(vcpu, dst_vcpu);
  79. } else {
  80. *reg &= 0xffffffff00000000UL;
  81. *reg |= SIGP_STATUS_INCORRECT_STATE;
  82. return SIGP_CC_STATUS_STORED;
  83. }
  84. }
  85. static int __sigp_external_call(struct kvm_vcpu *vcpu,
  86. struct kvm_vcpu *dst_vcpu)
  87. {
  88. struct kvm_s390_irq irq = {
  89. .type = KVM_S390_INT_EXTERNAL_CALL,
  90. .u.extcall.code = vcpu->vcpu_id,
  91. };
  92. int rc;
  93. rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
  94. if (!rc)
  95. VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x",
  96. dst_vcpu->vcpu_id);
  97. return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
  98. }
  99. static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action)
  100. {
  101. struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
  102. int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  103. spin_lock(&li->lock);
  104. if (li->action_bits & ACTION_STOP_ON_STOP) {
  105. /* another SIGP STOP is pending */
  106. rc = SIGP_CC_BUSY;
  107. goto out;
  108. }
  109. if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
  110. if ((action & ACTION_STORE_ON_STOP) != 0)
  111. rc = -ESHUTDOWN;
  112. goto out;
  113. }
  114. set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
  115. li->action_bits |= action;
  116. atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
  117. kvm_s390_vcpu_wakeup(dst_vcpu);
  118. out:
  119. spin_unlock(&li->lock);
  120. return rc;
  121. }
  122. static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
  123. {
  124. int rc;
  125. rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP);
  126. VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", dst_vcpu->vcpu_id);
  127. return rc;
  128. }
  129. static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu,
  130. struct kvm_vcpu *dst_vcpu, u64 *reg)
  131. {
  132. int rc;
  133. rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP |
  134. ACTION_STORE_ON_STOP);
  135. VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x",
  136. dst_vcpu->vcpu_id);
  137. if (rc == -ESHUTDOWN) {
  138. /* If the CPU has already been stopped, we still have
  139. * to save the status when doing stop-and-store. This
  140. * has to be done after unlocking all spinlocks. */
  141. rc = kvm_s390_store_status_unloaded(dst_vcpu,
  142. KVM_S390_STORE_STATUS_NOADDR);
  143. }
  144. return rc;
  145. }
  146. static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
  147. {
  148. int rc;
  149. unsigned int i;
  150. struct kvm_vcpu *v;
  151. switch (parameter & 0xff) {
  152. case 0:
  153. rc = SIGP_CC_NOT_OPERATIONAL;
  154. break;
  155. case 1:
  156. case 2:
  157. kvm_for_each_vcpu(i, v, vcpu->kvm) {
  158. v->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
  159. kvm_clear_async_pf_completion_queue(v);
  160. }
  161. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  162. break;
  163. default:
  164. rc = -EOPNOTSUPP;
  165. }
  166. return rc;
  167. }
  168. static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
  169. u32 address, u64 *reg)
  170. {
  171. struct kvm_s390_local_interrupt *li;
  172. int rc;
  173. li = &dst_vcpu->arch.local_int;
  174. /*
  175. * Make sure the new value is valid memory. We only need to check the
  176. * first page, since address is 8k aligned and memory pieces are always
  177. * at least 1MB aligned and have at least a size of 1MB.
  178. */
  179. address &= 0x7fffe000u;
  180. if (kvm_is_error_gpa(vcpu->kvm, address)) {
  181. *reg &= 0xffffffff00000000UL;
  182. *reg |= SIGP_STATUS_INVALID_PARAMETER;
  183. return SIGP_CC_STATUS_STORED;
  184. }
  185. spin_lock(&li->lock);
  186. /* cpu must be in stopped state */
  187. if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
  188. *reg &= 0xffffffff00000000UL;
  189. *reg |= SIGP_STATUS_INCORRECT_STATE;
  190. rc = SIGP_CC_STATUS_STORED;
  191. goto out_li;
  192. }
  193. li->irq.prefix.address = address;
  194. set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
  195. kvm_s390_vcpu_wakeup(dst_vcpu);
  196. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  197. VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", dst_vcpu->vcpu_id,
  198. address);
  199. out_li:
  200. spin_unlock(&li->lock);
  201. return rc;
  202. }
  203. static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
  204. struct kvm_vcpu *dst_vcpu,
  205. u32 addr, u64 *reg)
  206. {
  207. int flags;
  208. int rc;
  209. spin_lock(&dst_vcpu->arch.local_int.lock);
  210. flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
  211. spin_unlock(&dst_vcpu->arch.local_int.lock);
  212. if (!(flags & CPUSTAT_STOPPED)) {
  213. *reg &= 0xffffffff00000000UL;
  214. *reg |= SIGP_STATUS_INCORRECT_STATE;
  215. return SIGP_CC_STATUS_STORED;
  216. }
  217. addr &= 0x7ffffe00;
  218. rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
  219. if (rc == -EFAULT) {
  220. *reg &= 0xffffffff00000000UL;
  221. *reg |= SIGP_STATUS_INVALID_PARAMETER;
  222. rc = SIGP_CC_STATUS_STORED;
  223. }
  224. return rc;
  225. }
  226. static int __sigp_sense_running(struct kvm_vcpu *vcpu,
  227. struct kvm_vcpu *dst_vcpu, u64 *reg)
  228. {
  229. struct kvm_s390_local_interrupt *li;
  230. int rc;
  231. li = &dst_vcpu->arch.local_int;
  232. if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
  233. /* running */
  234. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  235. } else {
  236. /* not running */
  237. *reg &= 0xffffffff00000000UL;
  238. *reg |= SIGP_STATUS_NOT_RUNNING;
  239. rc = SIGP_CC_STATUS_STORED;
  240. }
  241. VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x",
  242. dst_vcpu->vcpu_id, rc);
  243. return rc;
  244. }
  245. static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu,
  246. struct kvm_vcpu *dst_vcpu, u8 order_code)
  247. {
  248. struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
  249. /* handle (RE)START in user space */
  250. int rc = -EOPNOTSUPP;
  251. spin_lock(&li->lock);
  252. if (li->action_bits & ACTION_STOP_ON_STOP)
  253. rc = SIGP_CC_BUSY;
  254. spin_unlock(&li->lock);
  255. return rc;
  256. }
  257. static int __prepare_sigp_cpu_reset(struct kvm_vcpu *vcpu,
  258. struct kvm_vcpu *dst_vcpu, u8 order_code)
  259. {
  260. /* handle (INITIAL) CPU RESET in user space */
  261. return -EOPNOTSUPP;
  262. }
  263. static int __prepare_sigp_unknown(struct kvm_vcpu *vcpu,
  264. struct kvm_vcpu *dst_vcpu)
  265. {
  266. /* handle unknown orders in user space */
  267. return -EOPNOTSUPP;
  268. }
  269. static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
  270. u16 cpu_addr, u32 parameter, u64 *status_reg)
  271. {
  272. int rc;
  273. struct kvm_vcpu *dst_vcpu;
  274. if (cpu_addr >= KVM_MAX_VCPUS)
  275. return SIGP_CC_NOT_OPERATIONAL;
  276. dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
  277. if (!dst_vcpu)
  278. return SIGP_CC_NOT_OPERATIONAL;
  279. switch (order_code) {
  280. case SIGP_SENSE:
  281. vcpu->stat.instruction_sigp_sense++;
  282. rc = __sigp_sense(vcpu, dst_vcpu, status_reg);
  283. break;
  284. case SIGP_EXTERNAL_CALL:
  285. vcpu->stat.instruction_sigp_external_call++;
  286. rc = __sigp_external_call(vcpu, dst_vcpu);
  287. break;
  288. case SIGP_EMERGENCY_SIGNAL:
  289. vcpu->stat.instruction_sigp_emergency++;
  290. rc = __sigp_emergency(vcpu, dst_vcpu);
  291. break;
  292. case SIGP_STOP:
  293. vcpu->stat.instruction_sigp_stop++;
  294. rc = __sigp_stop(vcpu, dst_vcpu);
  295. break;
  296. case SIGP_STOP_AND_STORE_STATUS:
  297. vcpu->stat.instruction_sigp_stop_store_status++;
  298. rc = __sigp_stop_and_store_status(vcpu, dst_vcpu, status_reg);
  299. break;
  300. case SIGP_STORE_STATUS_AT_ADDRESS:
  301. vcpu->stat.instruction_sigp_store_status++;
  302. rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter,
  303. status_reg);
  304. break;
  305. case SIGP_SET_PREFIX:
  306. vcpu->stat.instruction_sigp_prefix++;
  307. rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg);
  308. break;
  309. case SIGP_COND_EMERGENCY_SIGNAL:
  310. vcpu->stat.instruction_sigp_cond_emergency++;
  311. rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter,
  312. status_reg);
  313. break;
  314. case SIGP_SENSE_RUNNING:
  315. vcpu->stat.instruction_sigp_sense_running++;
  316. rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg);
  317. break;
  318. case SIGP_START:
  319. vcpu->stat.instruction_sigp_start++;
  320. rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
  321. break;
  322. case SIGP_RESTART:
  323. vcpu->stat.instruction_sigp_restart++;
  324. rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
  325. break;
  326. case SIGP_INITIAL_CPU_RESET:
  327. vcpu->stat.instruction_sigp_init_cpu_reset++;
  328. rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
  329. break;
  330. case SIGP_CPU_RESET:
  331. vcpu->stat.instruction_sigp_cpu_reset++;
  332. rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
  333. break;
  334. default:
  335. vcpu->stat.instruction_sigp_unknown++;
  336. rc = __prepare_sigp_unknown(vcpu, dst_vcpu);
  337. }
  338. if (rc == -EOPNOTSUPP)
  339. VCPU_EVENT(vcpu, 4,
  340. "sigp order %u -> cpu %x: handled in user space",
  341. order_code, dst_vcpu->vcpu_id);
  342. return rc;
  343. }
  344. int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
  345. {
  346. int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
  347. int r3 = vcpu->arch.sie_block->ipa & 0x000f;
  348. u32 parameter;
  349. u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
  350. u8 order_code;
  351. int rc;
  352. /* sigp in userspace can exit */
  353. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  354. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  355. order_code = kvm_s390_get_base_disp_rs(vcpu);
  356. if (r1 % 2)
  357. parameter = vcpu->run->s.regs.gprs[r1];
  358. else
  359. parameter = vcpu->run->s.regs.gprs[r1 + 1];
  360. trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
  361. switch (order_code) {
  362. case SIGP_SET_ARCHITECTURE:
  363. vcpu->stat.instruction_sigp_arch++;
  364. rc = __sigp_set_arch(vcpu, parameter);
  365. break;
  366. default:
  367. rc = handle_sigp_dst(vcpu, order_code, cpu_addr,
  368. parameter,
  369. &vcpu->run->s.regs.gprs[r1]);
  370. }
  371. if (rc < 0)
  372. return rc;
  373. kvm_s390_set_psw_cc(vcpu, rc);
  374. return 0;
  375. }
  376. /*
  377. * Handle SIGP partial execution interception.
  378. *
  379. * This interception will occur at the source cpu when a source cpu sends an
  380. * external call to a target cpu and the target cpu has the WAIT bit set in
  381. * its cpuflags. Interception will occurr after the interrupt indicator bits at
  382. * the target cpu have been set. All error cases will lead to instruction
  383. * interception, therefore nothing is to be checked or prepared.
  384. */
  385. int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
  386. {
  387. int r3 = vcpu->arch.sie_block->ipa & 0x000f;
  388. u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
  389. struct kvm_vcpu *dest_vcpu;
  390. u8 order_code = kvm_s390_get_base_disp_rs(vcpu);
  391. trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
  392. if (order_code == SIGP_EXTERNAL_CALL) {
  393. dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
  394. BUG_ON(dest_vcpu == NULL);
  395. kvm_s390_vcpu_wakeup(dest_vcpu);
  396. kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
  397. return 0;
  398. }
  399. return -EOPNOTSUPP;
  400. }