vsie.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * kvm nested virtualization support for s390x
  4. *
  5. * Copyright IBM Corp. 2016, 2018
  6. *
  7. * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
  8. */
  9. #include <linux/vmalloc.h>
  10. #include <linux/kvm_host.h>
  11. #include <linux/bug.h>
  12. #include <linux/list.h>
  13. #include <linux/bitmap.h>
  14. #include <linux/sched/signal.h>
  15. #include <asm/gmap.h>
  16. #include <asm/mmu_context.h>
  17. #include <asm/sclp.h>
  18. #include <asm/nmi.h>
  19. #include <asm/dis.h>
  20. #include "kvm-s390.h"
  21. #include "gaccess.h"
  22. struct vsie_page {
  23. struct kvm_s390_sie_block scb_s; /* 0x0000 */
  24. /*
  25. * the backup info for machine check. ensure it's at
  26. * the same offset as that in struct sie_page!
  27. */
  28. struct mcck_volatile_info mcck_info; /* 0x0200 */
  29. /*
  30. * The pinned original scb. Be aware that other VCPUs can modify
  31. * it while we read from it. Values that are used for conditions or
  32. * are reused conditionally, should be accessed via READ_ONCE.
  33. */
  34. struct kvm_s390_sie_block *scb_o; /* 0x0218 */
  35. /* the shadow gmap in use by the vsie_page */
  36. struct gmap *gmap; /* 0x0220 */
  37. /* address of the last reported fault to guest2 */
  38. unsigned long fault_addr; /* 0x0228 */
  39. /* calculated guest addresses of satellite control blocks */
  40. gpa_t sca_gpa; /* 0x0230 */
  41. gpa_t itdba_gpa; /* 0x0238 */
  42. gpa_t gvrd_gpa; /* 0x0240 */
  43. gpa_t riccbd_gpa; /* 0x0248 */
  44. gpa_t sdnx_gpa; /* 0x0250 */
  45. __u8 reserved[0x0700 - 0x0258]; /* 0x0258 */
  46. struct kvm_s390_crypto_cb crycb; /* 0x0700 */
  47. __u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE]; /* 0x0800 */
  48. };
  49. /* trigger a validity icpt for the given scb */
  50. static int set_validity_icpt(struct kvm_s390_sie_block *scb,
  51. __u16 reason_code)
  52. {
  53. scb->ipa = 0x1000;
  54. scb->ipb = ((__u32) reason_code) << 16;
  55. scb->icptcode = ICPT_VALIDITY;
  56. return 1;
  57. }
  58. /* mark the prefix as unmapped, this will block the VSIE */
  59. static void prefix_unmapped(struct vsie_page *vsie_page)
  60. {
  61. atomic_or(PROG_REQUEST, &vsie_page->scb_s.prog20);
  62. }
  63. /* mark the prefix as unmapped and wait until the VSIE has been left */
  64. static void prefix_unmapped_sync(struct vsie_page *vsie_page)
  65. {
  66. prefix_unmapped(vsie_page);
  67. if (vsie_page->scb_s.prog0c & PROG_IN_SIE)
  68. atomic_or(CPUSTAT_STOP_INT, &vsie_page->scb_s.cpuflags);
  69. while (vsie_page->scb_s.prog0c & PROG_IN_SIE)
  70. cpu_relax();
  71. }
  72. /* mark the prefix as mapped, this will allow the VSIE to run */
  73. static void prefix_mapped(struct vsie_page *vsie_page)
  74. {
  75. atomic_andnot(PROG_REQUEST, &vsie_page->scb_s.prog20);
  76. }
  77. /* test if the prefix is mapped into the gmap shadow */
  78. static int prefix_is_mapped(struct vsie_page *vsie_page)
  79. {
  80. return !(atomic_read(&vsie_page->scb_s.prog20) & PROG_REQUEST);
  81. }
  82. /* copy the updated intervention request bits into the shadow scb */
  83. static void update_intervention_requests(struct vsie_page *vsie_page)
  84. {
  85. const int bits = CPUSTAT_STOP_INT | CPUSTAT_IO_INT | CPUSTAT_EXT_INT;
  86. int cpuflags;
  87. cpuflags = atomic_read(&vsie_page->scb_o->cpuflags);
  88. atomic_andnot(bits, &vsie_page->scb_s.cpuflags);
  89. atomic_or(cpuflags & bits, &vsie_page->scb_s.cpuflags);
  90. }
  91. /* shadow (filter and validate) the cpuflags */
  92. static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  93. {
  94. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  95. struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
  96. int newflags, cpuflags = atomic_read(&scb_o->cpuflags);
  97. /* we don't allow ESA/390 guests */
  98. if (!(cpuflags & CPUSTAT_ZARCH))
  99. return set_validity_icpt(scb_s, 0x0001U);
  100. if (cpuflags & (CPUSTAT_RRF | CPUSTAT_MCDS))
  101. return set_validity_icpt(scb_s, 0x0001U);
  102. else if (cpuflags & (CPUSTAT_SLSV | CPUSTAT_SLSR))
  103. return set_validity_icpt(scb_s, 0x0007U);
  104. /* intervention requests will be set later */
  105. newflags = CPUSTAT_ZARCH;
  106. if (cpuflags & CPUSTAT_GED && test_kvm_facility(vcpu->kvm, 8))
  107. newflags |= CPUSTAT_GED;
  108. if (cpuflags & CPUSTAT_GED2 && test_kvm_facility(vcpu->kvm, 78)) {
  109. if (cpuflags & CPUSTAT_GED)
  110. return set_validity_icpt(scb_s, 0x0001U);
  111. newflags |= CPUSTAT_GED2;
  112. }
  113. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GPERE))
  114. newflags |= cpuflags & CPUSTAT_P;
  115. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GSLS))
  116. newflags |= cpuflags & CPUSTAT_SM;
  117. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS))
  118. newflags |= cpuflags & CPUSTAT_IBS;
  119. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_KSS))
  120. newflags |= cpuflags & CPUSTAT_KSS;
  121. atomic_set(&scb_s->cpuflags, newflags);
  122. return 0;
  123. }
  124. /* Copy to APCB FORMAT1 from APCB FORMAT0 */
  125. static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s,
  126. unsigned long apcb_o, struct kvm_s390_apcb1 *apcb_h)
  127. {
  128. struct kvm_s390_apcb0 tmp;
  129. if (read_guest_real(vcpu, apcb_o, &tmp, sizeof(struct kvm_s390_apcb0)))
  130. return -EFAULT;
  131. apcb_s->apm[0] = apcb_h->apm[0] & tmp.apm[0];
  132. apcb_s->aqm[0] = apcb_h->aqm[0] & tmp.aqm[0] & 0xffff000000000000UL;
  133. apcb_s->adm[0] = apcb_h->adm[0] & tmp.adm[0] & 0xffff000000000000UL;
  134. return 0;
  135. }
  136. /**
  137. * setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0
  138. * @vcpu: pointer to the virtual CPU
  139. * @apcb_s: pointer to start of apcb in the shadow crycb
  140. * @apcb_o: pointer to start of original apcb in the guest2
  141. * @apcb_h: pointer to start of apcb in the guest1
  142. *
  143. * Returns 0 and -EFAULT on error reading guest apcb
  144. */
  145. static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
  146. unsigned long apcb_o, unsigned long *apcb_h)
  147. {
  148. if (read_guest_real(vcpu, apcb_o, apcb_s,
  149. sizeof(struct kvm_s390_apcb0)))
  150. return -EFAULT;
  151. bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb0));
  152. return 0;
  153. }
  154. /**
  155. * setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB
  156. * @vcpu: pointer to the virtual CPU
  157. * @apcb_s: pointer to start of apcb in the shadow crycb
  158. * @apcb_o: pointer to start of original guest apcb
  159. * @apcb_h: pointer to start of apcb in the host
  160. *
  161. * Returns 0 and -EFAULT on error reading guest apcb
  162. */
  163. static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
  164. unsigned long apcb_o,
  165. unsigned long *apcb_h)
  166. {
  167. if (read_guest_real(vcpu, apcb_o, apcb_s,
  168. sizeof(struct kvm_s390_apcb1)))
  169. return -EFAULT;
  170. bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb1));
  171. return 0;
  172. }
  173. /**
  174. * setup_apcb - Create a shadow copy of the apcb.
  175. * @vcpu: pointer to the virtual CPU
  176. * @crycb_s: pointer to shadow crycb
  177. * @crycb_o: pointer to original guest crycb
  178. * @crycb_h: pointer to the host crycb
  179. * @fmt_o: format of the original guest crycb.
  180. * @fmt_h: format of the host crycb.
  181. *
  182. * Checks the compatibility between the guest and host crycb and calls the
  183. * appropriate copy function.
  184. *
  185. * Return 0 or an error number if the guest and host crycb are incompatible.
  186. */
  187. static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s,
  188. const u32 crycb_o,
  189. struct kvm_s390_crypto_cb *crycb_h,
  190. int fmt_o, int fmt_h)
  191. {
  192. struct kvm_s390_crypto_cb *crycb;
  193. crycb = (struct kvm_s390_crypto_cb *) (unsigned long)crycb_o;
  194. switch (fmt_o) {
  195. case CRYCB_FORMAT2:
  196. if ((crycb_o & PAGE_MASK) != ((crycb_o + 256) & PAGE_MASK))
  197. return -EACCES;
  198. if (fmt_h != CRYCB_FORMAT2)
  199. return -EINVAL;
  200. return setup_apcb11(vcpu, (unsigned long *)&crycb_s->apcb1,
  201. (unsigned long) &crycb->apcb1,
  202. (unsigned long *)&crycb_h->apcb1);
  203. case CRYCB_FORMAT1:
  204. switch (fmt_h) {
  205. case CRYCB_FORMAT2:
  206. return setup_apcb10(vcpu, &crycb_s->apcb1,
  207. (unsigned long) &crycb->apcb0,
  208. &crycb_h->apcb1);
  209. case CRYCB_FORMAT1:
  210. return setup_apcb00(vcpu,
  211. (unsigned long *) &crycb_s->apcb0,
  212. (unsigned long) &crycb->apcb0,
  213. (unsigned long *) &crycb_h->apcb0);
  214. }
  215. break;
  216. case CRYCB_FORMAT0:
  217. if ((crycb_o & PAGE_MASK) != ((crycb_o + 32) & PAGE_MASK))
  218. return -EACCES;
  219. switch (fmt_h) {
  220. case CRYCB_FORMAT2:
  221. return setup_apcb10(vcpu, &crycb_s->apcb1,
  222. (unsigned long) &crycb->apcb0,
  223. &crycb_h->apcb1);
  224. case CRYCB_FORMAT1:
  225. case CRYCB_FORMAT0:
  226. return setup_apcb00(vcpu,
  227. (unsigned long *) &crycb_s->apcb0,
  228. (unsigned long) &crycb->apcb0,
  229. (unsigned long *) &crycb_h->apcb0);
  230. }
  231. }
  232. return -EINVAL;
  233. }
  234. /**
  235. * shadow_crycb - Create a shadow copy of the crycb block
  236. * @vcpu: a pointer to the virtual CPU
  237. * @vsie_page: a pointer to internal date used for the vSIE
  238. *
  239. * Create a shadow copy of the crycb block and setup key wrapping, if
  240. * requested for guest 3 and enabled for guest 2.
  241. *
  242. * We accept format-1 or format-2, but we convert format-1 into format-2
  243. * in the shadow CRYCB.
  244. * Using format-2 enables the firmware to choose the right format when
  245. * scheduling the SIE.
  246. * There is nothing to do for format-0.
  247. *
  248. * This function centralize the issuing of set_validity_icpt() for all
  249. * the subfunctions working on the crycb.
  250. *
  251. * Returns: - 0 if shadowed or nothing to do
  252. * - > 0 if control has to be given to guest 2
  253. */
  254. static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  255. {
  256. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  257. struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
  258. const uint32_t crycbd_o = READ_ONCE(scb_o->crycbd);
  259. const u32 crycb_addr = crycbd_o & 0x7ffffff8U;
  260. unsigned long *b1, *b2;
  261. u8 ecb3_flags;
  262. int apie_h;
  263. int key_msk = test_kvm_facility(vcpu->kvm, 76);
  264. int fmt_o = crycbd_o & CRYCB_FORMAT_MASK;
  265. int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK;
  266. int ret = 0;
  267. scb_s->crycbd = 0;
  268. apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
  269. if (!apie_h && !key_msk)
  270. return 0;
  271. if (!crycb_addr)
  272. return set_validity_icpt(scb_s, 0x0039U);
  273. if (fmt_o == CRYCB_FORMAT1)
  274. if ((crycb_addr & PAGE_MASK) !=
  275. ((crycb_addr + 128) & PAGE_MASK))
  276. return set_validity_icpt(scb_s, 0x003CU);
  277. if (apie_h && (scb_o->eca & ECA_APIE)) {
  278. ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr,
  279. vcpu->kvm->arch.crypto.crycb,
  280. fmt_o, fmt_h);
  281. if (ret)
  282. goto end;
  283. scb_s->eca |= scb_o->eca & ECA_APIE;
  284. }
  285. /* we may only allow it if enabled for guest 2 */
  286. ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 &
  287. (ECB3_AES | ECB3_DEA);
  288. if (!ecb3_flags)
  289. goto end;
  290. /* copy only the wrapping keys */
  291. if (read_guest_real(vcpu, crycb_addr + 72,
  292. vsie_page->crycb.dea_wrapping_key_mask, 56))
  293. return set_validity_icpt(scb_s, 0x0035U);
  294. scb_s->ecb3 |= ecb3_flags;
  295. /* xor both blocks in one run */
  296. b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask;
  297. b2 = (unsigned long *)
  298. vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask;
  299. /* as 56%8 == 0, bitmap_xor won't overwrite any data */
  300. bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56);
  301. end:
  302. switch (ret) {
  303. case -EINVAL:
  304. return set_validity_icpt(scb_s, 0x0020U);
  305. case -EFAULT:
  306. return set_validity_icpt(scb_s, 0x0035U);
  307. case -EACCES:
  308. return set_validity_icpt(scb_s, 0x003CU);
  309. }
  310. scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT2;
  311. return 0;
  312. }
  313. /* shadow (round up/down) the ibc to avoid validity icpt */
  314. static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  315. {
  316. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  317. struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
  318. /* READ_ONCE does not work on bitfields - use a temporary variable */
  319. const uint32_t __new_ibc = scb_o->ibc;
  320. const uint32_t new_ibc = READ_ONCE(__new_ibc) & 0x0fffU;
  321. __u64 min_ibc = (sclp.ibc >> 16) & 0x0fffU;
  322. scb_s->ibc = 0;
  323. /* ibc installed in g2 and requested for g3 */
  324. if (vcpu->kvm->arch.model.ibc && new_ibc) {
  325. scb_s->ibc = new_ibc;
  326. /* takte care of the minimum ibc level of the machine */
  327. if (scb_s->ibc < min_ibc)
  328. scb_s->ibc = min_ibc;
  329. /* take care of the maximum ibc level set for the guest */
  330. if (scb_s->ibc > vcpu->kvm->arch.model.ibc)
  331. scb_s->ibc = vcpu->kvm->arch.model.ibc;
  332. }
  333. }
  334. /* unshadow the scb, copying parameters back to the real scb */
  335. static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  336. {
  337. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  338. struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
  339. /* interception */
  340. scb_o->icptcode = scb_s->icptcode;
  341. scb_o->icptstatus = scb_s->icptstatus;
  342. scb_o->ipa = scb_s->ipa;
  343. scb_o->ipb = scb_s->ipb;
  344. scb_o->gbea = scb_s->gbea;
  345. /* timer */
  346. scb_o->cputm = scb_s->cputm;
  347. scb_o->ckc = scb_s->ckc;
  348. scb_o->todpr = scb_s->todpr;
  349. /* guest state */
  350. scb_o->gpsw = scb_s->gpsw;
  351. scb_o->gg14 = scb_s->gg14;
  352. scb_o->gg15 = scb_s->gg15;
  353. memcpy(scb_o->gcr, scb_s->gcr, 128);
  354. scb_o->pp = scb_s->pp;
  355. /* branch prediction */
  356. if (test_kvm_facility(vcpu->kvm, 82)) {
  357. scb_o->fpf &= ~FPF_BPBC;
  358. scb_o->fpf |= scb_s->fpf & FPF_BPBC;
  359. }
  360. /* interrupt intercept */
  361. switch (scb_s->icptcode) {
  362. case ICPT_PROGI:
  363. case ICPT_INSTPROGI:
  364. case ICPT_EXTINT:
  365. memcpy((void *)((u64)scb_o + 0xc0),
  366. (void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0);
  367. break;
  368. case ICPT_PARTEXEC:
  369. /* MVPG only */
  370. memcpy((void *)((u64)scb_o + 0xc0),
  371. (void *)((u64)scb_s + 0xc0), 0xd0 - 0xc0);
  372. break;
  373. }
  374. if (scb_s->ihcpu != 0xffffU)
  375. scb_o->ihcpu = scb_s->ihcpu;
  376. }
  377. /*
  378. * Setup the shadow scb by copying and checking the relevant parts of the g2
  379. * provided scb.
  380. *
  381. * Returns: - 0 if the scb has been shadowed
  382. * - > 0 if control has to be given to guest 2
  383. */
  384. static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  385. {
  386. struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
  387. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  388. /* READ_ONCE does not work on bitfields - use a temporary variable */
  389. const uint32_t __new_prefix = scb_o->prefix;
  390. const uint32_t new_prefix = READ_ONCE(__new_prefix);
  391. const bool wants_tx = READ_ONCE(scb_o->ecb) & ECB_TE;
  392. bool had_tx = scb_s->ecb & ECB_TE;
  393. unsigned long new_mso = 0;
  394. int rc;
  395. /* make sure we don't have any leftovers when reusing the scb */
  396. scb_s->icptcode = 0;
  397. scb_s->eca = 0;
  398. scb_s->ecb = 0;
  399. scb_s->ecb2 = 0;
  400. scb_s->ecb3 = 0;
  401. scb_s->ecd = 0;
  402. scb_s->fac = 0;
  403. scb_s->fpf = 0;
  404. rc = prepare_cpuflags(vcpu, vsie_page);
  405. if (rc)
  406. goto out;
  407. /* timer */
  408. scb_s->cputm = scb_o->cputm;
  409. scb_s->ckc = scb_o->ckc;
  410. scb_s->todpr = scb_o->todpr;
  411. scb_s->epoch = scb_o->epoch;
  412. /* guest state */
  413. scb_s->gpsw = scb_o->gpsw;
  414. scb_s->gg14 = scb_o->gg14;
  415. scb_s->gg15 = scb_o->gg15;
  416. memcpy(scb_s->gcr, scb_o->gcr, 128);
  417. scb_s->pp = scb_o->pp;
  418. /* interception / execution handling */
  419. scb_s->gbea = scb_o->gbea;
  420. scb_s->lctl = scb_o->lctl;
  421. scb_s->svcc = scb_o->svcc;
  422. scb_s->ictl = scb_o->ictl;
  423. /*
  424. * SKEY handling functions can't deal with false setting of PTE invalid
  425. * bits. Therefore we cannot provide interpretation and would later
  426. * have to provide own emulation handlers.
  427. */
  428. if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_KSS))
  429. scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
  430. scb_s->icpua = scb_o->icpua;
  431. if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
  432. new_mso = READ_ONCE(scb_o->mso) & 0xfffffffffff00000UL;
  433. /* if the hva of the prefix changes, we have to remap the prefix */
  434. if (scb_s->mso != new_mso || scb_s->prefix != new_prefix)
  435. prefix_unmapped(vsie_page);
  436. /* SIE will do mso/msl validity and exception checks for us */
  437. scb_s->msl = scb_o->msl & 0xfffffffffff00000UL;
  438. scb_s->mso = new_mso;
  439. scb_s->prefix = new_prefix;
  440. /* We have to definetly flush the tlb if this scb never ran */
  441. if (scb_s->ihcpu != 0xffffU)
  442. scb_s->ihcpu = scb_o->ihcpu;
  443. /* MVPG and Protection Exception Interpretation are always available */
  444. scb_s->eca |= scb_o->eca & (ECA_MVPGI | ECA_PROTEXCI);
  445. /* Host-protection-interruption introduced with ESOP */
  446. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
  447. scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT;
  448. /* transactional execution */
  449. if (test_kvm_facility(vcpu->kvm, 73) && wants_tx) {
  450. /* remap the prefix is tx is toggled on */
  451. if (!had_tx)
  452. prefix_unmapped(vsie_page);
  453. scb_s->ecb |= ECB_TE;
  454. }
  455. /* branch prediction */
  456. if (test_kvm_facility(vcpu->kvm, 82))
  457. scb_s->fpf |= scb_o->fpf & FPF_BPBC;
  458. /* SIMD */
  459. if (test_kvm_facility(vcpu->kvm, 129)) {
  460. scb_s->eca |= scb_o->eca & ECA_VX;
  461. scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
  462. }
  463. /* Run-time-Instrumentation */
  464. if (test_kvm_facility(vcpu->kvm, 64))
  465. scb_s->ecb3 |= scb_o->ecb3 & ECB3_RI;
  466. /* Instruction Execution Prevention */
  467. if (test_kvm_facility(vcpu->kvm, 130))
  468. scb_s->ecb2 |= scb_o->ecb2 & ECB2_IEP;
  469. /* Guarded Storage */
  470. if (test_kvm_facility(vcpu->kvm, 133)) {
  471. scb_s->ecb |= scb_o->ecb & ECB_GS;
  472. scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
  473. }
  474. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF))
  475. scb_s->eca |= scb_o->eca & ECA_SII;
  476. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB))
  477. scb_s->eca |= scb_o->eca & ECA_IB;
  478. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
  479. scb_s->eca |= scb_o->eca & ECA_CEI;
  480. /* Epoch Extension */
  481. if (test_kvm_facility(vcpu->kvm, 139))
  482. scb_s->ecd |= scb_o->ecd & ECD_MEF;
  483. /* etoken */
  484. if (test_kvm_facility(vcpu->kvm, 156))
  485. scb_s->ecd |= scb_o->ecd & ECD_ETOKENF;
  486. scb_s->hpid = HPID_VSIE;
  487. prepare_ibc(vcpu, vsie_page);
  488. rc = shadow_crycb(vcpu, vsie_page);
  489. out:
  490. if (rc)
  491. unshadow_scb(vcpu, vsie_page);
  492. return rc;
  493. }
  494. void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
  495. unsigned long end)
  496. {
  497. struct kvm *kvm = gmap->private;
  498. struct vsie_page *cur;
  499. unsigned long prefix;
  500. struct page *page;
  501. int i;
  502. if (!gmap_is_shadow(gmap))
  503. return;
  504. if (start >= 1UL << 31)
  505. /* We are only interested in prefix pages */
  506. return;
  507. /*
  508. * Only new shadow blocks are added to the list during runtime,
  509. * therefore we can safely reference them all the time.
  510. */
  511. for (i = 0; i < kvm->arch.vsie.page_count; i++) {
  512. page = READ_ONCE(kvm->arch.vsie.pages[i]);
  513. if (!page)
  514. continue;
  515. cur = page_to_virt(page);
  516. if (READ_ONCE(cur->gmap) != gmap)
  517. continue;
  518. prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT;
  519. /* with mso/msl, the prefix lies at an offset */
  520. prefix += cur->scb_s.mso;
  521. if (prefix <= end && start <= prefix + 2 * PAGE_SIZE - 1)
  522. prefix_unmapped_sync(cur);
  523. }
  524. }
  525. /*
  526. * Map the first prefix page and if tx is enabled also the second prefix page.
  527. *
  528. * The prefix will be protected, a gmap notifier will inform about unmaps.
  529. * The shadow scb must not be executed until the prefix is remapped, this is
  530. * guaranteed by properly handling PROG_REQUEST.
  531. *
  532. * Returns: - 0 on if successfully mapped or already mapped
  533. * - > 0 if control has to be given to guest 2
  534. * - -EAGAIN if the caller can retry immediately
  535. * - -ENOMEM if out of memory
  536. */
  537. static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  538. {
  539. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  540. u64 prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
  541. int rc;
  542. if (prefix_is_mapped(vsie_page))
  543. return 0;
  544. /* mark it as mapped so we can catch any concurrent unmappers */
  545. prefix_mapped(vsie_page);
  546. /* with mso/msl, the prefix lies at offset *mso* */
  547. prefix += scb_s->mso;
  548. rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix);
  549. if (!rc && (scb_s->ecb & ECB_TE))
  550. rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
  551. prefix + PAGE_SIZE);
  552. /*
  553. * We don't have to mprotect, we will be called for all unshadows.
  554. * SIE will detect if protection applies and trigger a validity.
  555. */
  556. if (rc)
  557. prefix_unmapped(vsie_page);
  558. if (rc > 0 || rc == -EFAULT)
  559. rc = set_validity_icpt(scb_s, 0x0037U);
  560. return rc;
  561. }
  562. /*
  563. * Pin the guest page given by gpa and set hpa to the pinned host address.
  564. * Will always be pinned writable.
  565. *
  566. * Returns: - 0 on success
  567. * - -EINVAL if the gpa is not valid guest storage
  568. */
  569. static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
  570. {
  571. struct page *page;
  572. page = gfn_to_page(kvm, gpa_to_gfn(gpa));
  573. if (is_error_page(page))
  574. return -EINVAL;
  575. *hpa = (hpa_t) page_to_virt(page) + (gpa & ~PAGE_MASK);
  576. return 0;
  577. }
  578. /* Unpins a page previously pinned via pin_guest_page, marking it as dirty. */
  579. static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa)
  580. {
  581. kvm_release_pfn_dirty(hpa >> PAGE_SHIFT);
  582. /* mark the page always as dirty for migration */
  583. mark_page_dirty(kvm, gpa_to_gfn(gpa));
  584. }
  585. /* unpin all blocks previously pinned by pin_blocks(), marking them dirty */
  586. static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  587. {
  588. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  589. hpa_t hpa;
  590. hpa = (u64) scb_s->scaoh << 32 | scb_s->scaol;
  591. if (hpa) {
  592. unpin_guest_page(vcpu->kvm, vsie_page->sca_gpa, hpa);
  593. vsie_page->sca_gpa = 0;
  594. scb_s->scaol = 0;
  595. scb_s->scaoh = 0;
  596. }
  597. hpa = scb_s->itdba;
  598. if (hpa) {
  599. unpin_guest_page(vcpu->kvm, vsie_page->itdba_gpa, hpa);
  600. vsie_page->itdba_gpa = 0;
  601. scb_s->itdba = 0;
  602. }
  603. hpa = scb_s->gvrd;
  604. if (hpa) {
  605. unpin_guest_page(vcpu->kvm, vsie_page->gvrd_gpa, hpa);
  606. vsie_page->gvrd_gpa = 0;
  607. scb_s->gvrd = 0;
  608. }
  609. hpa = scb_s->riccbd;
  610. if (hpa) {
  611. unpin_guest_page(vcpu->kvm, vsie_page->riccbd_gpa, hpa);
  612. vsie_page->riccbd_gpa = 0;
  613. scb_s->riccbd = 0;
  614. }
  615. hpa = scb_s->sdnxo;
  616. if (hpa) {
  617. unpin_guest_page(vcpu->kvm, vsie_page->sdnx_gpa, hpa);
  618. vsie_page->sdnx_gpa = 0;
  619. scb_s->sdnxo = 0;
  620. }
  621. }
  622. /*
  623. * Instead of shadowing some blocks, we can simply forward them because the
  624. * addresses in the scb are 64 bit long.
  625. *
  626. * This works as long as the data lies in one page. If blocks ever exceed one
  627. * page, we have to fall back to shadowing.
  628. *
  629. * As we reuse the sca, the vcpu pointers contained in it are invalid. We must
  630. * therefore not enable any facilities that access these pointers (e.g. SIGPIF).
  631. *
  632. * Returns: - 0 if all blocks were pinned.
  633. * - > 0 if control has to be given to guest 2
  634. * - -ENOMEM if out of memory
  635. */
  636. static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  637. {
  638. struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
  639. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  640. hpa_t hpa;
  641. gpa_t gpa;
  642. int rc = 0;
  643. gpa = READ_ONCE(scb_o->scaol) & ~0xfUL;
  644. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
  645. gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32;
  646. if (gpa) {
  647. if (gpa < 2 * PAGE_SIZE)
  648. rc = set_validity_icpt(scb_s, 0x0038U);
  649. else if ((gpa & ~0x1fffUL) == kvm_s390_get_prefix(vcpu))
  650. rc = set_validity_icpt(scb_s, 0x0011U);
  651. else if ((gpa & PAGE_MASK) !=
  652. ((gpa + sizeof(struct bsca_block) - 1) & PAGE_MASK))
  653. rc = set_validity_icpt(scb_s, 0x003bU);
  654. if (!rc) {
  655. rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
  656. if (rc)
  657. rc = set_validity_icpt(scb_s, 0x0034U);
  658. }
  659. if (rc)
  660. goto unpin;
  661. vsie_page->sca_gpa = gpa;
  662. scb_s->scaoh = (u32)((u64)hpa >> 32);
  663. scb_s->scaol = (u32)(u64)hpa;
  664. }
  665. gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
  666. if (gpa && (scb_s->ecb & ECB_TE)) {
  667. if (gpa < 2 * PAGE_SIZE) {
  668. rc = set_validity_icpt(scb_s, 0x0080U);
  669. goto unpin;
  670. }
  671. /* 256 bytes cannot cross page boundaries */
  672. rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
  673. if (rc) {
  674. rc = set_validity_icpt(scb_s, 0x0080U);
  675. goto unpin;
  676. }
  677. vsie_page->itdba_gpa = gpa;
  678. scb_s->itdba = hpa;
  679. }
  680. gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL;
  681. if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
  682. if (gpa < 2 * PAGE_SIZE) {
  683. rc = set_validity_icpt(scb_s, 0x1310U);
  684. goto unpin;
  685. }
  686. /*
  687. * 512 bytes vector registers cannot cross page boundaries
  688. * if this block gets bigger, we have to shadow it.
  689. */
  690. rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
  691. if (rc) {
  692. rc = set_validity_icpt(scb_s, 0x1310U);
  693. goto unpin;
  694. }
  695. vsie_page->gvrd_gpa = gpa;
  696. scb_s->gvrd = hpa;
  697. }
  698. gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL;
  699. if (gpa && (scb_s->ecb3 & ECB3_RI)) {
  700. if (gpa < 2 * PAGE_SIZE) {
  701. rc = set_validity_icpt(scb_s, 0x0043U);
  702. goto unpin;
  703. }
  704. /* 64 bytes cannot cross page boundaries */
  705. rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
  706. if (rc) {
  707. rc = set_validity_icpt(scb_s, 0x0043U);
  708. goto unpin;
  709. }
  710. /* Validity 0x0044 will be checked by SIE */
  711. vsie_page->riccbd_gpa = gpa;
  712. scb_s->riccbd = hpa;
  713. }
  714. if (((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) ||
  715. (scb_s->ecd & ECD_ETOKENF)) {
  716. unsigned long sdnxc;
  717. gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL;
  718. sdnxc = READ_ONCE(scb_o->sdnxo) & 0xfUL;
  719. if (!gpa || gpa < 2 * PAGE_SIZE) {
  720. rc = set_validity_icpt(scb_s, 0x10b0U);
  721. goto unpin;
  722. }
  723. if (sdnxc < 6 || sdnxc > 12) {
  724. rc = set_validity_icpt(scb_s, 0x10b1U);
  725. goto unpin;
  726. }
  727. if (gpa & ((1 << sdnxc) - 1)) {
  728. rc = set_validity_icpt(scb_s, 0x10b2U);
  729. goto unpin;
  730. }
  731. /* Due to alignment rules (checked above) this cannot
  732. * cross page boundaries
  733. */
  734. rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
  735. if (rc) {
  736. rc = set_validity_icpt(scb_s, 0x10b0U);
  737. goto unpin;
  738. }
  739. vsie_page->sdnx_gpa = gpa;
  740. scb_s->sdnxo = hpa | sdnxc;
  741. }
  742. return 0;
  743. unpin:
  744. unpin_blocks(vcpu, vsie_page);
  745. return rc;
  746. }
  747. /* unpin the scb provided by guest 2, marking it as dirty */
  748. static void unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
  749. gpa_t gpa)
  750. {
  751. hpa_t hpa = (hpa_t) vsie_page->scb_o;
  752. if (hpa)
  753. unpin_guest_page(vcpu->kvm, gpa, hpa);
  754. vsie_page->scb_o = NULL;
  755. }
  756. /*
  757. * Pin the scb at gpa provided by guest 2 at vsie_page->scb_o.
  758. *
  759. * Returns: - 0 if the scb was pinned.
  760. * - > 0 if control has to be given to guest 2
  761. */
  762. static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
  763. gpa_t gpa)
  764. {
  765. hpa_t hpa;
  766. int rc;
  767. rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
  768. if (rc) {
  769. rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  770. WARN_ON_ONCE(rc);
  771. return 1;
  772. }
  773. vsie_page->scb_o = (struct kvm_s390_sie_block *) hpa;
  774. return 0;
  775. }
  776. /*
  777. * Inject a fault into guest 2.
  778. *
  779. * Returns: - > 0 if control has to be given to guest 2
  780. * < 0 if an error occurred during injection.
  781. */
  782. static int inject_fault(struct kvm_vcpu *vcpu, __u16 code, __u64 vaddr,
  783. bool write_flag)
  784. {
  785. struct kvm_s390_pgm_info pgm = {
  786. .code = code,
  787. .trans_exc_code =
  788. /* 0-51: virtual address */
  789. (vaddr & 0xfffffffffffff000UL) |
  790. /* 52-53: store / fetch */
  791. (((unsigned int) !write_flag) + 1) << 10,
  792. /* 62-63: asce id (alway primary == 0) */
  793. .exc_access_id = 0, /* always primary */
  794. .op_access_id = 0, /* not MVPG */
  795. };
  796. int rc;
  797. if (code == PGM_PROTECTION)
  798. pgm.trans_exc_code |= 0x4UL;
  799. rc = kvm_s390_inject_prog_irq(vcpu, &pgm);
  800. return rc ? rc : 1;
  801. }
  802. /*
  803. * Handle a fault during vsie execution on a gmap shadow.
  804. *
  805. * Returns: - 0 if the fault was resolved
  806. * - > 0 if control has to be given to guest 2
  807. * - < 0 if an error occurred
  808. */
  809. static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  810. {
  811. int rc;
  812. if (current->thread.gmap_int_code == PGM_PROTECTION)
  813. /* we can directly forward all protection exceptions */
  814. return inject_fault(vcpu, PGM_PROTECTION,
  815. current->thread.gmap_addr, 1);
  816. rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
  817. current->thread.gmap_addr);
  818. if (rc > 0) {
  819. rc = inject_fault(vcpu, rc,
  820. current->thread.gmap_addr,
  821. current->thread.gmap_write_flag);
  822. if (rc >= 0)
  823. vsie_page->fault_addr = current->thread.gmap_addr;
  824. }
  825. return rc;
  826. }
  827. /*
  828. * Retry the previous fault that required guest 2 intervention. This avoids
  829. * one superfluous SIE re-entry and direct exit.
  830. *
  831. * Will ignore any errors. The next SIE fault will do proper fault handling.
  832. */
  833. static void handle_last_fault(struct kvm_vcpu *vcpu,
  834. struct vsie_page *vsie_page)
  835. {
  836. if (vsie_page->fault_addr)
  837. kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
  838. vsie_page->fault_addr);
  839. vsie_page->fault_addr = 0;
  840. }
  841. static inline void clear_vsie_icpt(struct vsie_page *vsie_page)
  842. {
  843. vsie_page->scb_s.icptcode = 0;
  844. }
  845. /* rewind the psw and clear the vsie icpt, so we can retry execution */
  846. static void retry_vsie_icpt(struct vsie_page *vsie_page)
  847. {
  848. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  849. int ilen = insn_length(scb_s->ipa >> 8);
  850. /* take care of EXECUTE instructions */
  851. if (scb_s->icptstatus & 1) {
  852. ilen = (scb_s->icptstatus >> 4) & 0x6;
  853. if (!ilen)
  854. ilen = 4;
  855. }
  856. scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, ilen);
  857. clear_vsie_icpt(vsie_page);
  858. }
  859. /*
  860. * Try to shadow + enable the guest 2 provided facility list.
  861. * Retry instruction execution if enabled for and provided by guest 2.
  862. *
  863. * Returns: - 0 if handled (retry or guest 2 icpt)
  864. * - > 0 if control has to be given to guest 2
  865. */
  866. static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  867. {
  868. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  869. __u32 fac = READ_ONCE(vsie_page->scb_o->fac) & 0x7ffffff8U;
  870. if (fac && test_kvm_facility(vcpu->kvm, 7)) {
  871. retry_vsie_icpt(vsie_page);
  872. if (read_guest_real(vcpu, fac, &vsie_page->fac,
  873. sizeof(vsie_page->fac)))
  874. return set_validity_icpt(scb_s, 0x1090U);
  875. scb_s->fac = (__u32)(__u64) &vsie_page->fac;
  876. }
  877. return 0;
  878. }
  879. /*
  880. * Run the vsie on a shadow scb and a shadow gmap, without any further
  881. * sanity checks, handling SIE faults.
  882. *
  883. * Returns: - 0 everything went fine
  884. * - > 0 if control has to be given to guest 2
  885. * - < 0 if an error occurred
  886. */
  887. static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  888. __releases(vcpu->kvm->srcu)
  889. __acquires(vcpu->kvm->srcu)
  890. {
  891. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  892. struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
  893. int guest_bp_isolation;
  894. int rc = 0;
  895. handle_last_fault(vcpu, vsie_page);
  896. if (need_resched())
  897. schedule();
  898. if (test_cpu_flag(CIF_MCCK_PENDING))
  899. s390_handle_mcck();
  900. srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
  901. /* save current guest state of bp isolation override */
  902. guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
  903. /*
  904. * The guest is running with BPBC, so we have to force it on for our
  905. * nested guest. This is done by enabling BPBC globally, so the BPBC
  906. * control in the SCB (which the nested guest can modify) is simply
  907. * ignored.
  908. */
  909. if (test_kvm_facility(vcpu->kvm, 82) &&
  910. vcpu->arch.sie_block->fpf & FPF_BPBC)
  911. set_thread_flag(TIF_ISOLATE_BP_GUEST);
  912. local_irq_disable();
  913. guest_enter_irqoff();
  914. local_irq_enable();
  915. /*
  916. * Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking
  917. * and VCPU requests also hinder the vSIE from running and lead
  918. * to an immediate exit. kvm_s390_vsie_kick() has to be used to
  919. * also kick the vSIE.
  920. */
  921. vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
  922. barrier();
  923. if (!kvm_s390_vcpu_sie_inhibited(vcpu))
  924. rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
  925. barrier();
  926. vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE;
  927. local_irq_disable();
  928. guest_exit_irqoff();
  929. local_irq_enable();
  930. /* restore guest state for bp isolation override */
  931. if (!guest_bp_isolation)
  932. clear_thread_flag(TIF_ISOLATE_BP_GUEST);
  933. vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  934. if (rc == -EINTR) {
  935. VCPU_EVENT(vcpu, 3, "%s", "machine check");
  936. kvm_s390_reinject_machine_check(vcpu, &vsie_page->mcck_info);
  937. return 0;
  938. }
  939. if (rc > 0)
  940. rc = 0; /* we could still have an icpt */
  941. else if (rc == -EFAULT)
  942. return handle_fault(vcpu, vsie_page);
  943. switch (scb_s->icptcode) {
  944. case ICPT_INST:
  945. if (scb_s->ipa == 0xb2b0)
  946. rc = handle_stfle(vcpu, vsie_page);
  947. break;
  948. case ICPT_STOP:
  949. /* stop not requested by g2 - must have been a kick */
  950. if (!(atomic_read(&scb_o->cpuflags) & CPUSTAT_STOP_INT))
  951. clear_vsie_icpt(vsie_page);
  952. break;
  953. case ICPT_VALIDITY:
  954. if ((scb_s->ipa & 0xf000) != 0xf000)
  955. scb_s->ipa += 0x1000;
  956. break;
  957. }
  958. return rc;
  959. }
  960. static void release_gmap_shadow(struct vsie_page *vsie_page)
  961. {
  962. if (vsie_page->gmap)
  963. gmap_put(vsie_page->gmap);
  964. WRITE_ONCE(vsie_page->gmap, NULL);
  965. prefix_unmapped(vsie_page);
  966. }
  967. static int acquire_gmap_shadow(struct kvm_vcpu *vcpu,
  968. struct vsie_page *vsie_page)
  969. {
  970. unsigned long asce;
  971. union ctlreg0 cr0;
  972. struct gmap *gmap;
  973. int edat;
  974. asce = vcpu->arch.sie_block->gcr[1];
  975. cr0.val = vcpu->arch.sie_block->gcr[0];
  976. edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
  977. edat += edat && test_kvm_facility(vcpu->kvm, 78);
  978. /*
  979. * ASCE or EDAT could have changed since last icpt, or the gmap
  980. * we're holding has been unshadowed. If the gmap is still valid,
  981. * we can safely reuse it.
  982. */
  983. if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat))
  984. return 0;
  985. /* release the old shadow - if any, and mark the prefix as unmapped */
  986. release_gmap_shadow(vsie_page);
  987. gmap = gmap_shadow(vcpu->arch.gmap, asce, edat);
  988. if (IS_ERR(gmap))
  989. return PTR_ERR(gmap);
  990. gmap->private = vcpu->kvm;
  991. WRITE_ONCE(vsie_page->gmap, gmap);
  992. return 0;
  993. }
  994. /*
  995. * Register the shadow scb at the VCPU, e.g. for kicking out of vsie.
  996. */
  997. static void register_shadow_scb(struct kvm_vcpu *vcpu,
  998. struct vsie_page *vsie_page)
  999. {
  1000. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  1001. WRITE_ONCE(vcpu->arch.vsie_block, &vsie_page->scb_s);
  1002. /*
  1003. * External calls have to lead to a kick of the vcpu and
  1004. * therefore the vsie -> Simulate Wait state.
  1005. */
  1006. kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
  1007. /*
  1008. * We have to adjust the g3 epoch by the g2 epoch. The epoch will
  1009. * automatically be adjusted on tod clock changes via kvm_sync_clock.
  1010. */
  1011. preempt_disable();
  1012. scb_s->epoch += vcpu->kvm->arch.epoch;
  1013. if (scb_s->ecd & ECD_MEF) {
  1014. scb_s->epdx += vcpu->kvm->arch.epdx;
  1015. if (scb_s->epoch < vcpu->kvm->arch.epoch)
  1016. scb_s->epdx += 1;
  1017. }
  1018. preempt_enable();
  1019. }
  1020. /*
  1021. * Unregister a shadow scb from a VCPU.
  1022. */
  1023. static void unregister_shadow_scb(struct kvm_vcpu *vcpu)
  1024. {
  1025. kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
  1026. WRITE_ONCE(vcpu->arch.vsie_block, NULL);
  1027. }
  1028. /*
  1029. * Run the vsie on a shadowed scb, managing the gmap shadow, handling
  1030. * prefix pages and faults.
  1031. *
  1032. * Returns: - 0 if no errors occurred
  1033. * - > 0 if control has to be given to guest 2
  1034. * - -ENOMEM if out of memory
  1035. */
  1036. static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  1037. {
  1038. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  1039. int rc = 0;
  1040. while (1) {
  1041. rc = acquire_gmap_shadow(vcpu, vsie_page);
  1042. if (!rc)
  1043. rc = map_prefix(vcpu, vsie_page);
  1044. if (!rc) {
  1045. gmap_enable(vsie_page->gmap);
  1046. update_intervention_requests(vsie_page);
  1047. rc = do_vsie_run(vcpu, vsie_page);
  1048. gmap_enable(vcpu->arch.gmap);
  1049. }
  1050. atomic_andnot(PROG_BLOCK_SIE, &scb_s->prog20);
  1051. if (rc == -EAGAIN)
  1052. rc = 0;
  1053. if (rc || scb_s->icptcode || signal_pending(current) ||
  1054. kvm_s390_vcpu_has_irq(vcpu, 0) ||
  1055. kvm_s390_vcpu_sie_inhibited(vcpu))
  1056. break;
  1057. }
  1058. if (rc == -EFAULT) {
  1059. /*
  1060. * Addressing exceptions are always presentes as intercepts.
  1061. * As addressing exceptions are suppressing and our guest 3 PSW
  1062. * points at the responsible instruction, we have to
  1063. * forward the PSW and set the ilc. If we can't read guest 3
  1064. * instruction, we can use an arbitrary ilc. Let's always use
  1065. * ilen = 4 for now, so we can avoid reading in guest 3 virtual
  1066. * memory. (we could also fake the shadow so the hardware
  1067. * handles it).
  1068. */
  1069. scb_s->icptcode = ICPT_PROGI;
  1070. scb_s->iprcc = PGM_ADDRESSING;
  1071. scb_s->pgmilc = 4;
  1072. scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4);
  1073. }
  1074. return rc;
  1075. }
  1076. /*
  1077. * Get or create a vsie page for a scb address.
  1078. *
  1079. * Returns: - address of a vsie page (cached or new one)
  1080. * - NULL if the same scb address is already used by another VCPU
  1081. * - ERR_PTR(-ENOMEM) if out of memory
  1082. */
  1083. static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
  1084. {
  1085. struct vsie_page *vsie_page;
  1086. struct page *page;
  1087. int nr_vcpus;
  1088. rcu_read_lock();
  1089. page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
  1090. rcu_read_unlock();
  1091. if (page) {
  1092. if (page_ref_inc_return(page) == 2)
  1093. return page_to_virt(page);
  1094. page_ref_dec(page);
  1095. }
  1096. /*
  1097. * We want at least #online_vcpus shadows, so every VCPU can execute
  1098. * the VSIE in parallel.
  1099. */
  1100. nr_vcpus = atomic_read(&kvm->online_vcpus);
  1101. mutex_lock(&kvm->arch.vsie.mutex);
  1102. if (kvm->arch.vsie.page_count < nr_vcpus) {
  1103. page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA);
  1104. if (!page) {
  1105. mutex_unlock(&kvm->arch.vsie.mutex);
  1106. return ERR_PTR(-ENOMEM);
  1107. }
  1108. page_ref_inc(page);
  1109. kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page;
  1110. kvm->arch.vsie.page_count++;
  1111. } else {
  1112. /* reuse an existing entry that belongs to nobody */
  1113. while (true) {
  1114. page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
  1115. if (page_ref_inc_return(page) == 2)
  1116. break;
  1117. page_ref_dec(page);
  1118. kvm->arch.vsie.next++;
  1119. kvm->arch.vsie.next %= nr_vcpus;
  1120. }
  1121. radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
  1122. }
  1123. page->index = addr;
  1124. /* double use of the same address */
  1125. if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
  1126. page_ref_dec(page);
  1127. mutex_unlock(&kvm->arch.vsie.mutex);
  1128. return NULL;
  1129. }
  1130. mutex_unlock(&kvm->arch.vsie.mutex);
  1131. vsie_page = page_to_virt(page);
  1132. memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block));
  1133. release_gmap_shadow(vsie_page);
  1134. vsie_page->fault_addr = 0;
  1135. vsie_page->scb_s.ihcpu = 0xffffU;
  1136. return vsie_page;
  1137. }
  1138. /* put a vsie page acquired via get_vsie_page */
  1139. static void put_vsie_page(struct kvm *kvm, struct vsie_page *vsie_page)
  1140. {
  1141. struct page *page = pfn_to_page(__pa(vsie_page) >> PAGE_SHIFT);
  1142. page_ref_dec(page);
  1143. }
  1144. int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
  1145. {
  1146. struct vsie_page *vsie_page;
  1147. unsigned long scb_addr;
  1148. int rc;
  1149. vcpu->stat.instruction_sie++;
  1150. if (!test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIEF2))
  1151. return -EOPNOTSUPP;
  1152. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  1153. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  1154. BUILD_BUG_ON(sizeof(struct vsie_page) != PAGE_SIZE);
  1155. scb_addr = kvm_s390_get_base_disp_s(vcpu, NULL);
  1156. /* 512 byte alignment */
  1157. if (unlikely(scb_addr & 0x1ffUL))
  1158. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  1159. if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0) ||
  1160. kvm_s390_vcpu_sie_inhibited(vcpu))
  1161. return 0;
  1162. vsie_page = get_vsie_page(vcpu->kvm, scb_addr);
  1163. if (IS_ERR(vsie_page))
  1164. return PTR_ERR(vsie_page);
  1165. else if (!vsie_page)
  1166. /* double use of sie control block - simply do nothing */
  1167. return 0;
  1168. rc = pin_scb(vcpu, vsie_page, scb_addr);
  1169. if (rc)
  1170. goto out_put;
  1171. rc = shadow_scb(vcpu, vsie_page);
  1172. if (rc)
  1173. goto out_unpin_scb;
  1174. rc = pin_blocks(vcpu, vsie_page);
  1175. if (rc)
  1176. goto out_unshadow;
  1177. register_shadow_scb(vcpu, vsie_page);
  1178. rc = vsie_run(vcpu, vsie_page);
  1179. unregister_shadow_scb(vcpu);
  1180. unpin_blocks(vcpu, vsie_page);
  1181. out_unshadow:
  1182. unshadow_scb(vcpu, vsie_page);
  1183. out_unpin_scb:
  1184. unpin_scb(vcpu, vsie_page, scb_addr);
  1185. out_put:
  1186. put_vsie_page(vcpu->kvm, vsie_page);
  1187. return rc < 0 ? rc : 0;
  1188. }
  1189. /* Init the vsie data structures. To be called when a vm is initialized. */
  1190. void kvm_s390_vsie_init(struct kvm *kvm)
  1191. {
  1192. mutex_init(&kvm->arch.vsie.mutex);
  1193. INIT_RADIX_TREE(&kvm->arch.vsie.addr_to_page, GFP_KERNEL);
  1194. }
  1195. /* Destroy the vsie data structures. To be called when a vm is destroyed. */
  1196. void kvm_s390_vsie_destroy(struct kvm *kvm)
  1197. {
  1198. struct vsie_page *vsie_page;
  1199. struct page *page;
  1200. int i;
  1201. mutex_lock(&kvm->arch.vsie.mutex);
  1202. for (i = 0; i < kvm->arch.vsie.page_count; i++) {
  1203. page = kvm->arch.vsie.pages[i];
  1204. kvm->arch.vsie.pages[i] = NULL;
  1205. vsie_page = page_to_virt(page);
  1206. release_gmap_shadow(vsie_page);
  1207. /* free the radix tree entry */
  1208. radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
  1209. __free_page(page);
  1210. }
  1211. kvm->arch.vsie.page_count = 0;
  1212. mutex_unlock(&kvm->arch.vsie.mutex);
  1213. }
  1214. void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu)
  1215. {
  1216. struct kvm_s390_sie_block *scb = READ_ONCE(vcpu->arch.vsie_block);
  1217. /*
  1218. * Even if the VCPU lets go of the shadow sie block reference, it is
  1219. * still valid in the cache. So we can safely kick it.
  1220. */
  1221. if (scb) {
  1222. atomic_or(PROG_BLOCK_SIE, &scb->prog20);
  1223. if (scb->prog0c & PROG_IN_SIE)
  1224. atomic_or(CPUSTAT_STOP_INT, &scb->cpuflags);
  1225. }
  1226. }