sigp.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483
  1. /*
  2. * handling interprocessor communication
  3. *
  4. * Copyright IBM Corp. 2008, 2013
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Carsten Otte <cotte@de.ibm.com>
  11. * Christian Borntraeger <borntraeger@de.ibm.com>
  12. * Christian Ehrhardt <ehrhardt@de.ibm.com>
  13. */
  14. #include <linux/kvm.h>
  15. #include <linux/kvm_host.h>
  16. #include <linux/slab.h>
  17. #include <asm/sigp.h>
  18. #include "gaccess.h"
  19. #include "kvm-s390.h"
  20. #include "trace.h"
  21. static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
  22. u64 *reg)
  23. {
  24. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  25. int rc;
  26. if (cpu_addr >= KVM_MAX_VCPUS)
  27. return SIGP_CC_NOT_OPERATIONAL;
  28. spin_lock(&fi->lock);
  29. if (fi->local_int[cpu_addr] == NULL)
  30. rc = SIGP_CC_NOT_OPERATIONAL;
  31. else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags)
  32. & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
  33. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  34. else {
  35. *reg &= 0xffffffff00000000UL;
  36. if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
  37. & CPUSTAT_ECALL_PEND)
  38. *reg |= SIGP_STATUS_EXT_CALL_PENDING;
  39. if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
  40. & CPUSTAT_STOPPED)
  41. *reg |= SIGP_STATUS_STOPPED;
  42. rc = SIGP_CC_STATUS_STORED;
  43. }
  44. spin_unlock(&fi->lock);
  45. VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
  46. return rc;
  47. }
  48. static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
  49. {
  50. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  51. struct kvm_s390_local_interrupt *li;
  52. struct kvm_s390_interrupt_info *inti;
  53. int rc;
  54. if (cpu_addr >= KVM_MAX_VCPUS)
  55. return SIGP_CC_NOT_OPERATIONAL;
  56. inti = kzalloc(sizeof(*inti), GFP_KERNEL);
  57. if (!inti)
  58. return -ENOMEM;
  59. inti->type = KVM_S390_INT_EMERGENCY;
  60. inti->emerg.code = vcpu->vcpu_id;
  61. spin_lock(&fi->lock);
  62. li = fi->local_int[cpu_addr];
  63. if (li == NULL) {
  64. rc = SIGP_CC_NOT_OPERATIONAL;
  65. kfree(inti);
  66. goto unlock;
  67. }
  68. spin_lock_bh(&li->lock);
  69. list_add_tail(&inti->list, &li->list);
  70. atomic_set(&li->active, 1);
  71. atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
  72. if (waitqueue_active(li->wq))
  73. wake_up_interruptible(li->wq);
  74. spin_unlock_bh(&li->lock);
  75. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  76. VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
  77. unlock:
  78. spin_unlock(&fi->lock);
  79. return rc;
  80. }
  81. static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
  82. u16 asn, u64 *reg)
  83. {
  84. struct kvm_vcpu *dst_vcpu = NULL;
  85. const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
  86. u16 p_asn, s_asn;
  87. psw_t *psw;
  88. u32 flags;
  89. if (cpu_addr < KVM_MAX_VCPUS)
  90. dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
  91. if (!dst_vcpu)
  92. return SIGP_CC_NOT_OPERATIONAL;
  93. flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
  94. psw = &dst_vcpu->arch.sie_block->gpsw;
  95. p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */
  96. s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */
  97. /* Deliver the emergency signal? */
  98. if (!(flags & CPUSTAT_STOPPED)
  99. || (psw->mask & psw_int_mask) != psw_int_mask
  100. || ((flags & CPUSTAT_WAIT) && psw->addr != 0)
  101. || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
  102. return __sigp_emergency(vcpu, cpu_addr);
  103. } else {
  104. *reg &= 0xffffffff00000000UL;
  105. *reg |= SIGP_STATUS_INCORRECT_STATE;
  106. return SIGP_CC_STATUS_STORED;
  107. }
  108. }
  109. static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
  110. {
  111. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  112. struct kvm_s390_local_interrupt *li;
  113. struct kvm_s390_interrupt_info *inti;
  114. int rc;
  115. if (cpu_addr >= KVM_MAX_VCPUS)
  116. return SIGP_CC_NOT_OPERATIONAL;
  117. inti = kzalloc(sizeof(*inti), GFP_KERNEL);
  118. if (!inti)
  119. return -ENOMEM;
  120. inti->type = KVM_S390_INT_EXTERNAL_CALL;
  121. inti->extcall.code = vcpu->vcpu_id;
  122. spin_lock(&fi->lock);
  123. li = fi->local_int[cpu_addr];
  124. if (li == NULL) {
  125. rc = SIGP_CC_NOT_OPERATIONAL;
  126. kfree(inti);
  127. goto unlock;
  128. }
  129. spin_lock_bh(&li->lock);
  130. list_add_tail(&inti->list, &li->list);
  131. atomic_set(&li->active, 1);
  132. atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
  133. if (waitqueue_active(li->wq))
  134. wake_up_interruptible(li->wq);
  135. spin_unlock_bh(&li->lock);
  136. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  137. VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
  138. unlock:
  139. spin_unlock(&fi->lock);
  140. return rc;
  141. }
  142. static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
  143. {
  144. struct kvm_s390_interrupt_info *inti;
  145. int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  146. inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
  147. if (!inti)
  148. return -ENOMEM;
  149. inti->type = KVM_S390_SIGP_STOP;
  150. spin_lock_bh(&li->lock);
  151. if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
  152. kfree(inti);
  153. if ((action & ACTION_STORE_ON_STOP) != 0)
  154. rc = -ESHUTDOWN;
  155. goto out;
  156. }
  157. list_add_tail(&inti->list, &li->list);
  158. atomic_set(&li->active, 1);
  159. atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
  160. li->action_bits |= action;
  161. if (waitqueue_active(li->wq))
  162. wake_up_interruptible(li->wq);
  163. out:
  164. spin_unlock_bh(&li->lock);
  165. return rc;
  166. }
  167. static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
  168. {
  169. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  170. struct kvm_s390_local_interrupt *li;
  171. int rc;
  172. if (cpu_addr >= KVM_MAX_VCPUS)
  173. return SIGP_CC_NOT_OPERATIONAL;
  174. spin_lock(&fi->lock);
  175. li = fi->local_int[cpu_addr];
  176. if (li == NULL) {
  177. rc = SIGP_CC_NOT_OPERATIONAL;
  178. goto unlock;
  179. }
  180. rc = __inject_sigp_stop(li, action);
  181. unlock:
  182. spin_unlock(&fi->lock);
  183. VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
  184. if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
  185. /* If the CPU has already been stopped, we still have
  186. * to save the status when doing stop-and-store. This
  187. * has to be done after unlocking all spinlocks. */
  188. struct kvm_vcpu *dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
  189. rc = kvm_s390_store_status_unloaded(dst_vcpu,
  190. KVM_S390_STORE_STATUS_NOADDR);
  191. }
  192. return rc;
  193. }
  194. static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
  195. {
  196. int rc;
  197. switch (parameter & 0xff) {
  198. case 0:
  199. rc = SIGP_CC_NOT_OPERATIONAL;
  200. break;
  201. case 1:
  202. case 2:
  203. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  204. break;
  205. default:
  206. rc = -EOPNOTSUPP;
  207. }
  208. return rc;
  209. }
  210. static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
  211. u64 *reg)
  212. {
  213. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  214. struct kvm_s390_local_interrupt *li = NULL;
  215. struct kvm_s390_interrupt_info *inti;
  216. int rc;
  217. u8 tmp;
  218. /* make sure that the new value is valid memory */
  219. address = address & 0x7fffe000u;
  220. if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
  221. copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
  222. *reg &= 0xffffffff00000000UL;
  223. *reg |= SIGP_STATUS_INVALID_PARAMETER;
  224. return SIGP_CC_STATUS_STORED;
  225. }
  226. inti = kzalloc(sizeof(*inti), GFP_KERNEL);
  227. if (!inti)
  228. return SIGP_CC_BUSY;
  229. spin_lock(&fi->lock);
  230. if (cpu_addr < KVM_MAX_VCPUS)
  231. li = fi->local_int[cpu_addr];
  232. if (li == NULL) {
  233. *reg &= 0xffffffff00000000UL;
  234. *reg |= SIGP_STATUS_INCORRECT_STATE;
  235. rc = SIGP_CC_STATUS_STORED;
  236. kfree(inti);
  237. goto out_fi;
  238. }
  239. spin_lock_bh(&li->lock);
  240. /* cpu must be in stopped state */
  241. if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
  242. *reg &= 0xffffffff00000000UL;
  243. *reg |= SIGP_STATUS_INCORRECT_STATE;
  244. rc = SIGP_CC_STATUS_STORED;
  245. kfree(inti);
  246. goto out_li;
  247. }
  248. inti->type = KVM_S390_SIGP_SET_PREFIX;
  249. inti->prefix.address = address;
  250. list_add_tail(&inti->list, &li->list);
  251. atomic_set(&li->active, 1);
  252. if (waitqueue_active(li->wq))
  253. wake_up_interruptible(li->wq);
  254. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  255. VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
  256. out_li:
  257. spin_unlock_bh(&li->lock);
  258. out_fi:
  259. spin_unlock(&fi->lock);
  260. return rc;
  261. }
  262. static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
  263. u32 addr, u64 *reg)
  264. {
  265. struct kvm_vcpu *dst_vcpu = NULL;
  266. int flags;
  267. int rc;
  268. if (cpu_id < KVM_MAX_VCPUS)
  269. dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_id);
  270. if (!dst_vcpu)
  271. return SIGP_CC_NOT_OPERATIONAL;
  272. spin_lock_bh(&dst_vcpu->arch.local_int.lock);
  273. flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
  274. spin_unlock_bh(&dst_vcpu->arch.local_int.lock);
  275. if (!(flags & CPUSTAT_STOPPED)) {
  276. *reg &= 0xffffffff00000000UL;
  277. *reg |= SIGP_STATUS_INCORRECT_STATE;
  278. return SIGP_CC_STATUS_STORED;
  279. }
  280. addr &= 0x7ffffe00;
  281. rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
  282. if (rc == -EFAULT) {
  283. *reg &= 0xffffffff00000000UL;
  284. *reg |= SIGP_STATUS_INVALID_PARAMETER;
  285. rc = SIGP_CC_STATUS_STORED;
  286. }
  287. return rc;
  288. }
  289. static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
  290. u64 *reg)
  291. {
  292. int rc;
  293. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  294. if (cpu_addr >= KVM_MAX_VCPUS)
  295. return SIGP_CC_NOT_OPERATIONAL;
  296. spin_lock(&fi->lock);
  297. if (fi->local_int[cpu_addr] == NULL)
  298. rc = SIGP_CC_NOT_OPERATIONAL;
  299. else {
  300. if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
  301. & CPUSTAT_RUNNING) {
  302. /* running */
  303. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  304. } else {
  305. /* not running */
  306. *reg &= 0xffffffff00000000UL;
  307. *reg |= SIGP_STATUS_NOT_RUNNING;
  308. rc = SIGP_CC_STATUS_STORED;
  309. }
  310. }
  311. spin_unlock(&fi->lock);
  312. VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
  313. rc);
  314. return rc;
  315. }
  316. /* Test whether the destination CPU is available and not busy */
  317. static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
  318. {
  319. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  320. struct kvm_s390_local_interrupt *li;
  321. int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  322. if (cpu_addr >= KVM_MAX_VCPUS)
  323. return SIGP_CC_NOT_OPERATIONAL;
  324. spin_lock(&fi->lock);
  325. li = fi->local_int[cpu_addr];
  326. if (li == NULL) {
  327. rc = SIGP_CC_NOT_OPERATIONAL;
  328. goto out;
  329. }
  330. spin_lock_bh(&li->lock);
  331. if (li->action_bits & ACTION_STOP_ON_STOP)
  332. rc = SIGP_CC_BUSY;
  333. spin_unlock_bh(&li->lock);
  334. out:
  335. spin_unlock(&fi->lock);
  336. return rc;
  337. }
  338. int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
  339. {
  340. int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
  341. int r3 = vcpu->arch.sie_block->ipa & 0x000f;
  342. u32 parameter;
  343. u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
  344. u8 order_code;
  345. int rc;
  346. /* sigp in userspace can exit */
  347. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  348. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  349. order_code = kvm_s390_get_base_disp_rs(vcpu);
  350. if (r1 % 2)
  351. parameter = vcpu->run->s.regs.gprs[r1];
  352. else
  353. parameter = vcpu->run->s.regs.gprs[r1 + 1];
  354. trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
  355. switch (order_code) {
  356. case SIGP_SENSE:
  357. vcpu->stat.instruction_sigp_sense++;
  358. rc = __sigp_sense(vcpu, cpu_addr,
  359. &vcpu->run->s.regs.gprs[r1]);
  360. break;
  361. case SIGP_EXTERNAL_CALL:
  362. vcpu->stat.instruction_sigp_external_call++;
  363. rc = __sigp_external_call(vcpu, cpu_addr);
  364. break;
  365. case SIGP_EMERGENCY_SIGNAL:
  366. vcpu->stat.instruction_sigp_emergency++;
  367. rc = __sigp_emergency(vcpu, cpu_addr);
  368. break;
  369. case SIGP_STOP:
  370. vcpu->stat.instruction_sigp_stop++;
  371. rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
  372. break;
  373. case SIGP_STOP_AND_STORE_STATUS:
  374. vcpu->stat.instruction_sigp_stop++;
  375. rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
  376. ACTION_STOP_ON_STOP);
  377. break;
  378. case SIGP_STORE_STATUS_AT_ADDRESS:
  379. rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter,
  380. &vcpu->run->s.regs.gprs[r1]);
  381. break;
  382. case SIGP_SET_ARCHITECTURE:
  383. vcpu->stat.instruction_sigp_arch++;
  384. rc = __sigp_set_arch(vcpu, parameter);
  385. break;
  386. case SIGP_SET_PREFIX:
  387. vcpu->stat.instruction_sigp_prefix++;
  388. rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
  389. &vcpu->run->s.regs.gprs[r1]);
  390. break;
  391. case SIGP_COND_EMERGENCY_SIGNAL:
  392. rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter,
  393. &vcpu->run->s.regs.gprs[r1]);
  394. break;
  395. case SIGP_SENSE_RUNNING:
  396. vcpu->stat.instruction_sigp_sense_running++;
  397. rc = __sigp_sense_running(vcpu, cpu_addr,
  398. &vcpu->run->s.regs.gprs[r1]);
  399. break;
  400. case SIGP_START:
  401. rc = sigp_check_callable(vcpu, cpu_addr);
  402. if (rc == SIGP_CC_ORDER_CODE_ACCEPTED)
  403. rc = -EOPNOTSUPP; /* Handle START in user space */
  404. break;
  405. case SIGP_RESTART:
  406. vcpu->stat.instruction_sigp_restart++;
  407. rc = sigp_check_callable(vcpu, cpu_addr);
  408. if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) {
  409. VCPU_EVENT(vcpu, 4,
  410. "sigp restart %x to handle userspace",
  411. cpu_addr);
  412. /* user space must know about restart */
  413. rc = -EOPNOTSUPP;
  414. }
  415. break;
  416. default:
  417. return -EOPNOTSUPP;
  418. }
  419. if (rc < 0)
  420. return rc;
  421. kvm_s390_set_psw_cc(vcpu, rc);
  422. return 0;
  423. }