diag.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251
  1. /*
  2. * handling diagnose instructions
  3. *
  4. * Copyright IBM Corp. 2008, 2011
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Carsten Otte <cotte@de.ibm.com>
  11. * Christian Borntraeger <borntraeger@de.ibm.com>
  12. */
  13. #include <linux/kvm.h>
  14. #include <linux/kvm_host.h>
  15. #include <asm/pgalloc.h>
  16. #include <asm/virtio-ccw.h>
  17. #include "kvm-s390.h"
  18. #include "trace.h"
  19. #include "trace-s390.h"
  20. #include "gaccess.h"
  21. static int diag_release_pages(struct kvm_vcpu *vcpu)
  22. {
  23. unsigned long start, end;
  24. unsigned long prefix = vcpu->arch.sie_block->prefix;
  25. start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
  26. end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096;
  27. if (start & ~PAGE_MASK || end & ~PAGE_MASK || start > end
  28. || start < 2 * PAGE_SIZE)
  29. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  30. VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end);
  31. vcpu->stat.diagnose_10++;
  32. /* we checked for start > end above */
  33. if (end < prefix || start >= prefix + 2 * PAGE_SIZE) {
  34. gmap_discard(start, end, vcpu->arch.gmap);
  35. } else {
  36. if (start < prefix)
  37. gmap_discard(start, prefix, vcpu->arch.gmap);
  38. if (end >= prefix)
  39. gmap_discard(prefix + 2 * PAGE_SIZE,
  40. end, vcpu->arch.gmap);
  41. }
  42. return 0;
  43. }
  44. static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
  45. {
  46. struct prs_parm {
  47. u16 code;
  48. u16 subcode;
  49. u16 parm_len;
  50. u16 parm_version;
  51. u64 token_addr;
  52. u64 select_mask;
  53. u64 compare_mask;
  54. u64 zarch;
  55. };
  56. struct prs_parm parm;
  57. int rc;
  58. u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4;
  59. u16 ry = (vcpu->arch.sie_block->ipa & 0x0f);
  60. unsigned long hva_token = KVM_HVA_ERR_BAD;
  61. if (vcpu->run->s.regs.gprs[rx] & 7)
  62. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  63. if (copy_from_guest(vcpu, &parm, vcpu->run->s.regs.gprs[rx], sizeof(parm)))
  64. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  65. if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)
  66. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  67. switch (parm.subcode) {
  68. case 0: /* TOKEN */
  69. if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) {
  70. /*
  71. * If the pagefault handshake is already activated,
  72. * the token must not be changed. We have to return
  73. * decimal 8 instead, as mandated in SC24-6084.
  74. */
  75. vcpu->run->s.regs.gprs[ry] = 8;
  76. return 0;
  77. }
  78. if ((parm.compare_mask & parm.select_mask) != parm.compare_mask ||
  79. parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL)
  80. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  81. hva_token = gfn_to_hva(vcpu->kvm, gpa_to_gfn(parm.token_addr));
  82. if (kvm_is_error_hva(hva_token))
  83. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  84. vcpu->arch.pfault_token = parm.token_addr;
  85. vcpu->arch.pfault_select = parm.select_mask;
  86. vcpu->arch.pfault_compare = parm.compare_mask;
  87. vcpu->run->s.regs.gprs[ry] = 0;
  88. rc = 0;
  89. break;
  90. case 1: /*
  91. * CANCEL
  92. * Specification allows to let already pending tokens survive
  93. * the cancel, therefore to reduce code complexity, we assume
  94. * all outstanding tokens are already pending.
  95. */
  96. if (parm.token_addr || parm.select_mask ||
  97. parm.compare_mask || parm.zarch)
  98. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  99. vcpu->run->s.regs.gprs[ry] = 0;
  100. /*
  101. * If the pfault handling was not established or is already
  102. * canceled SC24-6084 requests to return decimal 4.
  103. */
  104. if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
  105. vcpu->run->s.regs.gprs[ry] = 4;
  106. else
  107. vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
  108. rc = 0;
  109. break;
  110. default:
  111. rc = -EOPNOTSUPP;
  112. break;
  113. }
  114. return rc;
  115. }
  116. static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
  117. {
  118. VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
  119. vcpu->stat.diagnose_44++;
  120. kvm_vcpu_on_spin(vcpu);
  121. return 0;
  122. }
  123. static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
  124. {
  125. struct kvm *kvm = vcpu->kvm;
  126. struct kvm_vcpu *tcpu;
  127. int tid;
  128. int i;
  129. tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
  130. vcpu->stat.diagnose_9c++;
  131. VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid);
  132. if (tid == vcpu->vcpu_id)
  133. return 0;
  134. kvm_for_each_vcpu(i, tcpu, kvm)
  135. if (tcpu->vcpu_id == tid) {
  136. kvm_vcpu_yield_to(tcpu);
  137. break;
  138. }
  139. return 0;
  140. }
  141. static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
  142. {
  143. unsigned int reg = vcpu->arch.sie_block->ipa & 0xf;
  144. unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff;
  145. VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode);
  146. switch (subcode) {
  147. case 0:
  148. case 1:
  149. page_table_reset_pgste(current->mm, 0, TASK_SIZE);
  150. return -EOPNOTSUPP;
  151. case 3:
  152. vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
  153. page_table_reset_pgste(current->mm, 0, TASK_SIZE);
  154. break;
  155. case 4:
  156. vcpu->run->s390_reset_flags = 0;
  157. page_table_reset_pgste(current->mm, 0, TASK_SIZE);
  158. break;
  159. default:
  160. return -EOPNOTSUPP;
  161. }
  162. atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
  163. vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
  164. vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
  165. vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
  166. vcpu->run->exit_reason = KVM_EXIT_S390_RESET;
  167. VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx",
  168. vcpu->run->s390_reset_flags);
  169. trace_kvm_s390_request_resets(vcpu->run->s390_reset_flags);
  170. return -EREMOTE;
  171. }
  172. static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
  173. {
  174. int ret;
  175. /* No virtio-ccw notification? Get out quickly. */
  176. if (!vcpu->kvm->arch.css_support ||
  177. (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
  178. return -EOPNOTSUPP;
  179. /*
  180. * The layout is as follows:
  181. * - gpr 2 contains the subchannel id (passed as addr)
  182. * - gpr 3 contains the virtqueue index (passed as datamatch)
  183. * - gpr 4 contains the index on the bus (optionally)
  184. */
  185. ret = kvm_io_bus_write_cookie(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS,
  186. vcpu->run->s.regs.gprs[2] & 0xffffffff,
  187. 8, &vcpu->run->s.regs.gprs[3],
  188. vcpu->run->s.regs.gprs[4]);
  189. /*
  190. * Return cookie in gpr 2, but don't overwrite the register if the
  191. * diagnose will be handled by userspace.
  192. */
  193. if (ret != -EOPNOTSUPP)
  194. vcpu->run->s.regs.gprs[2] = ret;
  195. /* kvm_io_bus_write_cookie returns -EOPNOTSUPP if it found no match. */
  196. return ret < 0 ? ret : 0;
  197. }
  198. int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
  199. {
  200. int code = kvm_s390_get_base_disp_rs(vcpu) & 0xffff;
  201. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  202. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  203. trace_kvm_s390_handle_diag(vcpu, code);
  204. switch (code) {
  205. case 0x10:
  206. return diag_release_pages(vcpu);
  207. case 0x44:
  208. return __diag_time_slice_end(vcpu);
  209. case 0x9c:
  210. return __diag_time_slice_end_directed(vcpu);
  211. case 0x258:
  212. return __diag_page_ref_service(vcpu);
  213. case 0x308:
  214. return __diag_ipl_functions(vcpu);
  215. case 0x500:
  216. return __diag_virtio_hypercall(vcpu);
  217. default:
  218. return -EOPNOTSUPP;
  219. }
  220. }