sigp.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475
  1. /*
  2. * handling interprocessor communication
  3. *
  4. * Copyright IBM Corp. 2008, 2013
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Carsten Otte <cotte@de.ibm.com>
  11. * Christian Borntraeger <borntraeger@de.ibm.com>
  12. * Christian Ehrhardt <ehrhardt@de.ibm.com>
  13. */
  14. #include <linux/kvm.h>
  15. #include <linux/kvm_host.h>
  16. #include <linux/slab.h>
  17. #include <asm/sigp.h>
  18. #include "gaccess.h"
  19. #include "kvm-s390.h"
  20. #include "trace.h"
  21. static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
  22. u64 *reg)
  23. {
  24. struct kvm_s390_local_interrupt *li;
  25. struct kvm_vcpu *dst_vcpu = NULL;
  26. int cpuflags;
  27. int rc;
  28. if (cpu_addr >= KVM_MAX_VCPUS)
  29. return SIGP_CC_NOT_OPERATIONAL;
  30. dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
  31. if (!dst_vcpu)
  32. return SIGP_CC_NOT_OPERATIONAL;
  33. li = &dst_vcpu->arch.local_int;
  34. cpuflags = atomic_read(li->cpuflags);
  35. if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
  36. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  37. else {
  38. *reg &= 0xffffffff00000000UL;
  39. if (cpuflags & CPUSTAT_ECALL_PEND)
  40. *reg |= SIGP_STATUS_EXT_CALL_PENDING;
  41. if (cpuflags & CPUSTAT_STOPPED)
  42. *reg |= SIGP_STATUS_STOPPED;
  43. rc = SIGP_CC_STATUS_STORED;
  44. }
  45. VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
  46. return rc;
  47. }
  48. static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
  49. {
  50. struct kvm_s390_interrupt s390int = {
  51. .type = KVM_S390_INT_EMERGENCY,
  52. .parm = vcpu->vcpu_id,
  53. };
  54. struct kvm_vcpu *dst_vcpu = NULL;
  55. int rc = 0;
  56. if (cpu_addr < KVM_MAX_VCPUS)
  57. dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
  58. if (!dst_vcpu)
  59. return SIGP_CC_NOT_OPERATIONAL;
  60. rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
  61. if (!rc)
  62. VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
  63. return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
  64. }
  65. static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
  66. u16 asn, u64 *reg)
  67. {
  68. struct kvm_vcpu *dst_vcpu = NULL;
  69. const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
  70. u16 p_asn, s_asn;
  71. psw_t *psw;
  72. u32 flags;
  73. if (cpu_addr < KVM_MAX_VCPUS)
  74. dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
  75. if (!dst_vcpu)
  76. return SIGP_CC_NOT_OPERATIONAL;
  77. flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
  78. psw = &dst_vcpu->arch.sie_block->gpsw;
  79. p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */
  80. s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */
  81. /* Deliver the emergency signal? */
  82. if (!(flags & CPUSTAT_STOPPED)
  83. || (psw->mask & psw_int_mask) != psw_int_mask
  84. || ((flags & CPUSTAT_WAIT) && psw->addr != 0)
  85. || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
  86. return __sigp_emergency(vcpu, cpu_addr);
  87. } else {
  88. *reg &= 0xffffffff00000000UL;
  89. *reg |= SIGP_STATUS_INCORRECT_STATE;
  90. return SIGP_CC_STATUS_STORED;
  91. }
  92. }
  93. static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
  94. {
  95. struct kvm_s390_interrupt s390int = {
  96. .type = KVM_S390_INT_EXTERNAL_CALL,
  97. .parm = vcpu->vcpu_id,
  98. };
  99. struct kvm_vcpu *dst_vcpu = NULL;
  100. int rc;
  101. if (cpu_addr < KVM_MAX_VCPUS)
  102. dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
  103. if (!dst_vcpu)
  104. return SIGP_CC_NOT_OPERATIONAL;
  105. rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
  106. if (!rc)
  107. VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
  108. return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
  109. }
  110. static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
  111. {
  112. struct kvm_s390_interrupt_info *inti;
  113. int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  114. inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
  115. if (!inti)
  116. return -ENOMEM;
  117. inti->type = KVM_S390_SIGP_STOP;
  118. spin_lock_bh(&li->lock);
  119. if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
  120. kfree(inti);
  121. if ((action & ACTION_STORE_ON_STOP) != 0)
  122. rc = -ESHUTDOWN;
  123. goto out;
  124. }
  125. list_add_tail(&inti->list, &li->list);
  126. atomic_set(&li->active, 1);
  127. atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
  128. li->action_bits |= action;
  129. if (waitqueue_active(li->wq))
  130. wake_up_interruptible(li->wq);
  131. out:
  132. spin_unlock_bh(&li->lock);
  133. return rc;
  134. }
  135. static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
  136. {
  137. struct kvm_s390_local_interrupt *li;
  138. struct kvm_vcpu *dst_vcpu = NULL;
  139. int rc;
  140. if (cpu_addr >= KVM_MAX_VCPUS)
  141. return SIGP_CC_NOT_OPERATIONAL;
  142. dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
  143. if (!dst_vcpu)
  144. return SIGP_CC_NOT_OPERATIONAL;
  145. li = &dst_vcpu->arch.local_int;
  146. rc = __inject_sigp_stop(li, action);
  147. VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
  148. if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
  149. /* If the CPU has already been stopped, we still have
  150. * to save the status when doing stop-and-store. This
  151. * has to be done after unlocking all spinlocks. */
  152. rc = kvm_s390_store_status_unloaded(dst_vcpu,
  153. KVM_S390_STORE_STATUS_NOADDR);
  154. }
  155. return rc;
  156. }
  157. static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
  158. {
  159. int rc;
  160. unsigned int i;
  161. struct kvm_vcpu *v;
  162. switch (parameter & 0xff) {
  163. case 0:
  164. rc = SIGP_CC_NOT_OPERATIONAL;
  165. break;
  166. case 1:
  167. case 2:
  168. kvm_for_each_vcpu(i, v, vcpu->kvm) {
  169. v->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
  170. kvm_clear_async_pf_completion_queue(v);
  171. }
  172. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  173. break;
  174. default:
  175. rc = -EOPNOTSUPP;
  176. }
  177. return rc;
  178. }
  179. static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
  180. u64 *reg)
  181. {
  182. struct kvm_s390_local_interrupt *li;
  183. struct kvm_vcpu *dst_vcpu = NULL;
  184. struct kvm_s390_interrupt_info *inti;
  185. int rc;
  186. if (cpu_addr < KVM_MAX_VCPUS)
  187. dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
  188. if (!dst_vcpu)
  189. return SIGP_CC_NOT_OPERATIONAL;
  190. li = &dst_vcpu->arch.local_int;
  191. /*
  192. * Make sure the new value is valid memory. We only need to check the
  193. * first page, since address is 8k aligned and memory pieces are always
  194. * at least 1MB aligned and have at least a size of 1MB.
  195. */
  196. address &= 0x7fffe000u;
  197. if (kvm_is_error_gpa(vcpu->kvm, address)) {
  198. *reg &= 0xffffffff00000000UL;
  199. *reg |= SIGP_STATUS_INVALID_PARAMETER;
  200. return SIGP_CC_STATUS_STORED;
  201. }
  202. inti = kzalloc(sizeof(*inti), GFP_KERNEL);
  203. if (!inti)
  204. return SIGP_CC_BUSY;
  205. spin_lock_bh(&li->lock);
  206. /* cpu must be in stopped state */
  207. if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
  208. *reg &= 0xffffffff00000000UL;
  209. *reg |= SIGP_STATUS_INCORRECT_STATE;
  210. rc = SIGP_CC_STATUS_STORED;
  211. kfree(inti);
  212. goto out_li;
  213. }
  214. inti->type = KVM_S390_SIGP_SET_PREFIX;
  215. inti->prefix.address = address;
  216. list_add_tail(&inti->list, &li->list);
  217. atomic_set(&li->active, 1);
  218. if (waitqueue_active(li->wq))
  219. wake_up_interruptible(li->wq);
  220. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  221. VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
  222. out_li:
  223. spin_unlock_bh(&li->lock);
  224. return rc;
  225. }
  226. static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
  227. u32 addr, u64 *reg)
  228. {
  229. struct kvm_vcpu *dst_vcpu = NULL;
  230. int flags;
  231. int rc;
  232. if (cpu_id < KVM_MAX_VCPUS)
  233. dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_id);
  234. if (!dst_vcpu)
  235. return SIGP_CC_NOT_OPERATIONAL;
  236. spin_lock_bh(&dst_vcpu->arch.local_int.lock);
  237. flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
  238. spin_unlock_bh(&dst_vcpu->arch.local_int.lock);
  239. if (!(flags & CPUSTAT_STOPPED)) {
  240. *reg &= 0xffffffff00000000UL;
  241. *reg |= SIGP_STATUS_INCORRECT_STATE;
  242. return SIGP_CC_STATUS_STORED;
  243. }
  244. addr &= 0x7ffffe00;
  245. rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
  246. if (rc == -EFAULT) {
  247. *reg &= 0xffffffff00000000UL;
  248. *reg |= SIGP_STATUS_INVALID_PARAMETER;
  249. rc = SIGP_CC_STATUS_STORED;
  250. }
  251. return rc;
  252. }
  253. static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
  254. u64 *reg)
  255. {
  256. struct kvm_s390_local_interrupt *li;
  257. struct kvm_vcpu *dst_vcpu = NULL;
  258. int rc;
  259. if (cpu_addr >= KVM_MAX_VCPUS)
  260. return SIGP_CC_NOT_OPERATIONAL;
  261. dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
  262. if (!dst_vcpu)
  263. return SIGP_CC_NOT_OPERATIONAL;
  264. li = &dst_vcpu->arch.local_int;
  265. if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
  266. /* running */
  267. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  268. } else {
  269. /* not running */
  270. *reg &= 0xffffffff00000000UL;
  271. *reg |= SIGP_STATUS_NOT_RUNNING;
  272. rc = SIGP_CC_STATUS_STORED;
  273. }
  274. VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
  275. rc);
  276. return rc;
  277. }
  278. /* Test whether the destination CPU is available and not busy */
  279. static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
  280. {
  281. struct kvm_s390_local_interrupt *li;
  282. int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  283. struct kvm_vcpu *dst_vcpu = NULL;
  284. if (cpu_addr >= KVM_MAX_VCPUS)
  285. return SIGP_CC_NOT_OPERATIONAL;
  286. dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
  287. if (!dst_vcpu)
  288. return SIGP_CC_NOT_OPERATIONAL;
  289. li = &dst_vcpu->arch.local_int;
  290. spin_lock_bh(&li->lock);
  291. if (li->action_bits & ACTION_STOP_ON_STOP)
  292. rc = SIGP_CC_BUSY;
  293. spin_unlock_bh(&li->lock);
  294. return rc;
  295. }
  296. int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
  297. {
  298. int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
  299. int r3 = vcpu->arch.sie_block->ipa & 0x000f;
  300. u32 parameter;
  301. u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
  302. u8 order_code;
  303. int rc;
  304. /* sigp in userspace can exit */
  305. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  306. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  307. order_code = kvm_s390_get_base_disp_rs(vcpu);
  308. if (r1 % 2)
  309. parameter = vcpu->run->s.regs.gprs[r1];
  310. else
  311. parameter = vcpu->run->s.regs.gprs[r1 + 1];
  312. trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
  313. switch (order_code) {
  314. case SIGP_SENSE:
  315. vcpu->stat.instruction_sigp_sense++;
  316. rc = __sigp_sense(vcpu, cpu_addr,
  317. &vcpu->run->s.regs.gprs[r1]);
  318. break;
  319. case SIGP_EXTERNAL_CALL:
  320. vcpu->stat.instruction_sigp_external_call++;
  321. rc = __sigp_external_call(vcpu, cpu_addr);
  322. break;
  323. case SIGP_EMERGENCY_SIGNAL:
  324. vcpu->stat.instruction_sigp_emergency++;
  325. rc = __sigp_emergency(vcpu, cpu_addr);
  326. break;
  327. case SIGP_STOP:
  328. vcpu->stat.instruction_sigp_stop++;
  329. rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
  330. break;
  331. case SIGP_STOP_AND_STORE_STATUS:
  332. vcpu->stat.instruction_sigp_stop++;
  333. rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
  334. ACTION_STOP_ON_STOP);
  335. break;
  336. case SIGP_STORE_STATUS_AT_ADDRESS:
  337. rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter,
  338. &vcpu->run->s.regs.gprs[r1]);
  339. break;
  340. case SIGP_SET_ARCHITECTURE:
  341. vcpu->stat.instruction_sigp_arch++;
  342. rc = __sigp_set_arch(vcpu, parameter);
  343. break;
  344. case SIGP_SET_PREFIX:
  345. vcpu->stat.instruction_sigp_prefix++;
  346. rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
  347. &vcpu->run->s.regs.gprs[r1]);
  348. break;
  349. case SIGP_COND_EMERGENCY_SIGNAL:
  350. rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter,
  351. &vcpu->run->s.regs.gprs[r1]);
  352. break;
  353. case SIGP_SENSE_RUNNING:
  354. vcpu->stat.instruction_sigp_sense_running++;
  355. rc = __sigp_sense_running(vcpu, cpu_addr,
  356. &vcpu->run->s.regs.gprs[r1]);
  357. break;
  358. case SIGP_START:
  359. rc = sigp_check_callable(vcpu, cpu_addr);
  360. if (rc == SIGP_CC_ORDER_CODE_ACCEPTED)
  361. rc = -EOPNOTSUPP; /* Handle START in user space */
  362. break;
  363. case SIGP_RESTART:
  364. vcpu->stat.instruction_sigp_restart++;
  365. rc = sigp_check_callable(vcpu, cpu_addr);
  366. if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) {
  367. VCPU_EVENT(vcpu, 4,
  368. "sigp restart %x to handle userspace",
  369. cpu_addr);
  370. /* user space must know about restart */
  371. rc = -EOPNOTSUPP;
  372. }
  373. break;
  374. default:
  375. return -EOPNOTSUPP;
  376. }
  377. if (rc < 0)
  378. return rc;
  379. kvm_s390_set_psw_cc(vcpu, rc);
  380. return 0;
  381. }
  382. /*
  383. * Handle SIGP partial execution interception.
  384. *
  385. * This interception will occur at the source cpu when a source cpu sends an
  386. * external call to a target cpu and the target cpu has the WAIT bit set in
  387. * its cpuflags. Interception will occurr after the interrupt indicator bits at
  388. * the target cpu have been set. All error cases will lead to instruction
  389. * interception, therefore nothing is to be checked or prepared.
  390. */
  391. int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
  392. {
  393. int r3 = vcpu->arch.sie_block->ipa & 0x000f;
  394. u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
  395. struct kvm_vcpu *dest_vcpu;
  396. u8 order_code = kvm_s390_get_base_disp_rs(vcpu);
  397. trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
  398. if (order_code == SIGP_EXTERNAL_CALL) {
  399. dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
  400. BUG_ON(dest_vcpu == NULL);
  401. spin_lock_bh(&dest_vcpu->arch.local_int.lock);
  402. if (waitqueue_active(&dest_vcpu->wq))
  403. wake_up_interruptible(&dest_vcpu->wq);
  404. dest_vcpu->preempted = true;
  405. spin_unlock_bh(&dest_vcpu->arch.local_int.lock);
  406. kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
  407. return 0;
  408. }
  409. return -EOPNOTSUPP;
  410. }