sigp.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458
  1. /*
  2. * handling interprocessor communication
  3. *
  4. * Copyright IBM Corp. 2008, 2013
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Carsten Otte <cotte@de.ibm.com>
  11. * Christian Borntraeger <borntraeger@de.ibm.com>
  12. * Christian Ehrhardt <ehrhardt@de.ibm.com>
  13. */
  14. #include <linux/kvm.h>
  15. #include <linux/kvm_host.h>
  16. #include <linux/slab.h>
  17. #include <asm/sigp.h>
  18. #include "gaccess.h"
  19. #include "kvm-s390.h"
  20. #include "trace.h"
  21. static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
  22. u64 *reg)
  23. {
  24. struct kvm_s390_local_interrupt *li;
  25. struct kvm_vcpu *dst_vcpu = NULL;
  26. int cpuflags;
  27. int rc;
  28. if (cpu_addr >= KVM_MAX_VCPUS)
  29. return SIGP_CC_NOT_OPERATIONAL;
  30. dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
  31. if (!dst_vcpu)
  32. return SIGP_CC_NOT_OPERATIONAL;
  33. li = &dst_vcpu->arch.local_int;
  34. cpuflags = atomic_read(li->cpuflags);
  35. if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
  36. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  37. else {
  38. *reg &= 0xffffffff00000000UL;
  39. if (cpuflags & CPUSTAT_ECALL_PEND)
  40. *reg |= SIGP_STATUS_EXT_CALL_PENDING;
  41. if (cpuflags & CPUSTAT_STOPPED)
  42. *reg |= SIGP_STATUS_STOPPED;
  43. rc = SIGP_CC_STATUS_STORED;
  44. }
  45. VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
  46. return rc;
  47. }
  48. static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
  49. {
  50. struct kvm_s390_local_interrupt *li;
  51. struct kvm_s390_interrupt_info *inti;
  52. struct kvm_vcpu *dst_vcpu = NULL;
  53. if (cpu_addr < KVM_MAX_VCPUS)
  54. dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
  55. if (!dst_vcpu)
  56. return SIGP_CC_NOT_OPERATIONAL;
  57. inti = kzalloc(sizeof(*inti), GFP_KERNEL);
  58. if (!inti)
  59. return -ENOMEM;
  60. inti->type = KVM_S390_INT_EMERGENCY;
  61. inti->emerg.code = vcpu->vcpu_id;
  62. li = &dst_vcpu->arch.local_int;
  63. spin_lock_bh(&li->lock);
  64. list_add_tail(&inti->list, &li->list);
  65. atomic_set(&li->active, 1);
  66. atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
  67. if (waitqueue_active(li->wq))
  68. wake_up_interruptible(li->wq);
  69. spin_unlock_bh(&li->lock);
  70. VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
  71. return SIGP_CC_ORDER_CODE_ACCEPTED;
  72. }
  73. static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
  74. u16 asn, u64 *reg)
  75. {
  76. struct kvm_vcpu *dst_vcpu = NULL;
  77. const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
  78. u16 p_asn, s_asn;
  79. psw_t *psw;
  80. u32 flags;
  81. if (cpu_addr < KVM_MAX_VCPUS)
  82. dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
  83. if (!dst_vcpu)
  84. return SIGP_CC_NOT_OPERATIONAL;
  85. flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
  86. psw = &dst_vcpu->arch.sie_block->gpsw;
  87. p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */
  88. s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */
  89. /* Deliver the emergency signal? */
  90. if (!(flags & CPUSTAT_STOPPED)
  91. || (psw->mask & psw_int_mask) != psw_int_mask
  92. || ((flags & CPUSTAT_WAIT) && psw->addr != 0)
  93. || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
  94. return __sigp_emergency(vcpu, cpu_addr);
  95. } else {
  96. *reg &= 0xffffffff00000000UL;
  97. *reg |= SIGP_STATUS_INCORRECT_STATE;
  98. return SIGP_CC_STATUS_STORED;
  99. }
  100. }
  101. static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
  102. {
  103. struct kvm_s390_local_interrupt *li;
  104. struct kvm_s390_interrupt_info *inti;
  105. struct kvm_vcpu *dst_vcpu = NULL;
  106. if (cpu_addr < KVM_MAX_VCPUS)
  107. dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
  108. if (!dst_vcpu)
  109. return SIGP_CC_NOT_OPERATIONAL;
  110. inti = kzalloc(sizeof(*inti), GFP_KERNEL);
  111. if (!inti)
  112. return -ENOMEM;
  113. inti->type = KVM_S390_INT_EXTERNAL_CALL;
  114. inti->extcall.code = vcpu->vcpu_id;
  115. li = &dst_vcpu->arch.local_int;
  116. spin_lock_bh(&li->lock);
  117. list_add_tail(&inti->list, &li->list);
  118. atomic_set(&li->active, 1);
  119. atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
  120. if (waitqueue_active(li->wq))
  121. wake_up_interruptible(li->wq);
  122. spin_unlock_bh(&li->lock);
  123. VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
  124. return SIGP_CC_ORDER_CODE_ACCEPTED;
  125. }
  126. static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
  127. {
  128. struct kvm_s390_interrupt_info *inti;
  129. int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  130. inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
  131. if (!inti)
  132. return -ENOMEM;
  133. inti->type = KVM_S390_SIGP_STOP;
  134. spin_lock_bh(&li->lock);
  135. if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
  136. kfree(inti);
  137. if ((action & ACTION_STORE_ON_STOP) != 0)
  138. rc = -ESHUTDOWN;
  139. goto out;
  140. }
  141. list_add_tail(&inti->list, &li->list);
  142. atomic_set(&li->active, 1);
  143. atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
  144. li->action_bits |= action;
  145. if (waitqueue_active(li->wq))
  146. wake_up_interruptible(li->wq);
  147. out:
  148. spin_unlock_bh(&li->lock);
  149. return rc;
  150. }
  151. static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
  152. {
  153. struct kvm_s390_local_interrupt *li;
  154. struct kvm_vcpu *dst_vcpu = NULL;
  155. int rc;
  156. if (cpu_addr >= KVM_MAX_VCPUS)
  157. return SIGP_CC_NOT_OPERATIONAL;
  158. dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
  159. if (!dst_vcpu)
  160. return SIGP_CC_NOT_OPERATIONAL;
  161. li = &dst_vcpu->arch.local_int;
  162. rc = __inject_sigp_stop(li, action);
  163. VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
  164. if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
  165. /* If the CPU has already been stopped, we still have
  166. * to save the status when doing stop-and-store. This
  167. * has to be done after unlocking all spinlocks. */
  168. rc = kvm_s390_store_status_unloaded(dst_vcpu,
  169. KVM_S390_STORE_STATUS_NOADDR);
  170. }
  171. return rc;
  172. }
  173. static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
  174. {
  175. int rc;
  176. unsigned int i;
  177. struct kvm_vcpu *v;
  178. switch (parameter & 0xff) {
  179. case 0:
  180. rc = SIGP_CC_NOT_OPERATIONAL;
  181. break;
  182. case 1:
  183. case 2:
  184. kvm_for_each_vcpu(i, v, vcpu->kvm) {
  185. v->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
  186. kvm_clear_async_pf_completion_queue(v);
  187. }
  188. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  189. break;
  190. default:
  191. rc = -EOPNOTSUPP;
  192. }
  193. return rc;
  194. }
  195. static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
  196. u64 *reg)
  197. {
  198. struct kvm_s390_local_interrupt *li;
  199. struct kvm_vcpu *dst_vcpu = NULL;
  200. struct kvm_s390_interrupt_info *inti;
  201. int rc;
  202. u8 tmp;
  203. if (cpu_addr < KVM_MAX_VCPUS)
  204. dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
  205. if (!dst_vcpu)
  206. return SIGP_CC_NOT_OPERATIONAL;
  207. li = &dst_vcpu->arch.local_int;
  208. /* make sure that the new value is valid memory */
  209. address = address & 0x7fffe000u;
  210. if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
  211. copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
  212. *reg &= 0xffffffff00000000UL;
  213. *reg |= SIGP_STATUS_INVALID_PARAMETER;
  214. return SIGP_CC_STATUS_STORED;
  215. }
  216. inti = kzalloc(sizeof(*inti), GFP_KERNEL);
  217. if (!inti)
  218. return SIGP_CC_BUSY;
  219. spin_lock_bh(&li->lock);
  220. /* cpu must be in stopped state */
  221. if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
  222. *reg &= 0xffffffff00000000UL;
  223. *reg |= SIGP_STATUS_INCORRECT_STATE;
  224. rc = SIGP_CC_STATUS_STORED;
  225. kfree(inti);
  226. goto out_li;
  227. }
  228. inti->type = KVM_S390_SIGP_SET_PREFIX;
  229. inti->prefix.address = address;
  230. list_add_tail(&inti->list, &li->list);
  231. atomic_set(&li->active, 1);
  232. if (waitqueue_active(li->wq))
  233. wake_up_interruptible(li->wq);
  234. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  235. VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
  236. out_li:
  237. spin_unlock_bh(&li->lock);
  238. return rc;
  239. }
  240. static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
  241. u32 addr, u64 *reg)
  242. {
  243. struct kvm_vcpu *dst_vcpu = NULL;
  244. int flags;
  245. int rc;
  246. if (cpu_id < KVM_MAX_VCPUS)
  247. dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_id);
  248. if (!dst_vcpu)
  249. return SIGP_CC_NOT_OPERATIONAL;
  250. spin_lock_bh(&dst_vcpu->arch.local_int.lock);
  251. flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
  252. spin_unlock_bh(&dst_vcpu->arch.local_int.lock);
  253. if (!(flags & CPUSTAT_STOPPED)) {
  254. *reg &= 0xffffffff00000000UL;
  255. *reg |= SIGP_STATUS_INCORRECT_STATE;
  256. return SIGP_CC_STATUS_STORED;
  257. }
  258. addr &= 0x7ffffe00;
  259. rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
  260. if (rc == -EFAULT) {
  261. *reg &= 0xffffffff00000000UL;
  262. *reg |= SIGP_STATUS_INVALID_PARAMETER;
  263. rc = SIGP_CC_STATUS_STORED;
  264. }
  265. return rc;
  266. }
  267. static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
  268. u64 *reg)
  269. {
  270. struct kvm_s390_local_interrupt *li;
  271. struct kvm_vcpu *dst_vcpu = NULL;
  272. int rc;
  273. if (cpu_addr >= KVM_MAX_VCPUS)
  274. return SIGP_CC_NOT_OPERATIONAL;
  275. dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
  276. if (!dst_vcpu)
  277. return SIGP_CC_NOT_OPERATIONAL;
  278. li = &dst_vcpu->arch.local_int;
  279. if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
  280. /* running */
  281. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  282. } else {
  283. /* not running */
  284. *reg &= 0xffffffff00000000UL;
  285. *reg |= SIGP_STATUS_NOT_RUNNING;
  286. rc = SIGP_CC_STATUS_STORED;
  287. }
  288. VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
  289. rc);
  290. return rc;
  291. }
  292. /* Test whether the destination CPU is available and not busy */
  293. static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
  294. {
  295. struct kvm_s390_local_interrupt *li;
  296. int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  297. struct kvm_vcpu *dst_vcpu = NULL;
  298. if (cpu_addr >= KVM_MAX_VCPUS)
  299. return SIGP_CC_NOT_OPERATIONAL;
  300. dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
  301. if (!dst_vcpu)
  302. return SIGP_CC_NOT_OPERATIONAL;
  303. li = &dst_vcpu->arch.local_int;
  304. spin_lock_bh(&li->lock);
  305. if (li->action_bits & ACTION_STOP_ON_STOP)
  306. rc = SIGP_CC_BUSY;
  307. spin_unlock_bh(&li->lock);
  308. return rc;
  309. }
  310. int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
  311. {
  312. int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
  313. int r3 = vcpu->arch.sie_block->ipa & 0x000f;
  314. u32 parameter;
  315. u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
  316. u8 order_code;
  317. int rc;
  318. /* sigp in userspace can exit */
  319. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  320. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  321. order_code = kvm_s390_get_base_disp_rs(vcpu);
  322. if (r1 % 2)
  323. parameter = vcpu->run->s.regs.gprs[r1];
  324. else
  325. parameter = vcpu->run->s.regs.gprs[r1 + 1];
  326. trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
  327. switch (order_code) {
  328. case SIGP_SENSE:
  329. vcpu->stat.instruction_sigp_sense++;
  330. rc = __sigp_sense(vcpu, cpu_addr,
  331. &vcpu->run->s.regs.gprs[r1]);
  332. break;
  333. case SIGP_EXTERNAL_CALL:
  334. vcpu->stat.instruction_sigp_external_call++;
  335. rc = __sigp_external_call(vcpu, cpu_addr);
  336. break;
  337. case SIGP_EMERGENCY_SIGNAL:
  338. vcpu->stat.instruction_sigp_emergency++;
  339. rc = __sigp_emergency(vcpu, cpu_addr);
  340. break;
  341. case SIGP_STOP:
  342. vcpu->stat.instruction_sigp_stop++;
  343. rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
  344. break;
  345. case SIGP_STOP_AND_STORE_STATUS:
  346. vcpu->stat.instruction_sigp_stop++;
  347. rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
  348. ACTION_STOP_ON_STOP);
  349. break;
  350. case SIGP_STORE_STATUS_AT_ADDRESS:
  351. rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter,
  352. &vcpu->run->s.regs.gprs[r1]);
  353. break;
  354. case SIGP_SET_ARCHITECTURE:
  355. vcpu->stat.instruction_sigp_arch++;
  356. rc = __sigp_set_arch(vcpu, parameter);
  357. break;
  358. case SIGP_SET_PREFIX:
  359. vcpu->stat.instruction_sigp_prefix++;
  360. rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
  361. &vcpu->run->s.regs.gprs[r1]);
  362. break;
  363. case SIGP_COND_EMERGENCY_SIGNAL:
  364. rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter,
  365. &vcpu->run->s.regs.gprs[r1]);
  366. break;
  367. case SIGP_SENSE_RUNNING:
  368. vcpu->stat.instruction_sigp_sense_running++;
  369. rc = __sigp_sense_running(vcpu, cpu_addr,
  370. &vcpu->run->s.regs.gprs[r1]);
  371. break;
  372. case SIGP_START:
  373. rc = sigp_check_callable(vcpu, cpu_addr);
  374. if (rc == SIGP_CC_ORDER_CODE_ACCEPTED)
  375. rc = -EOPNOTSUPP; /* Handle START in user space */
  376. break;
  377. case SIGP_RESTART:
  378. vcpu->stat.instruction_sigp_restart++;
  379. rc = sigp_check_callable(vcpu, cpu_addr);
  380. if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) {
  381. VCPU_EVENT(vcpu, 4,
  382. "sigp restart %x to handle userspace",
  383. cpu_addr);
  384. /* user space must know about restart */
  385. rc = -EOPNOTSUPP;
  386. }
  387. break;
  388. default:
  389. return -EOPNOTSUPP;
  390. }
  391. if (rc < 0)
  392. return rc;
  393. kvm_s390_set_psw_cc(vcpu, rc);
  394. return 0;
  395. }