gaccess.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187
  1. /*
  2. * guest access functions
  3. *
  4. * Copyright IBM Corp. 2014
  5. *
  6. */
  7. #include <linux/vmalloc.h>
  8. #include <linux/err.h>
  9. #include <asm/pgtable.h>
  10. #include <asm/gmap.h>
  11. #include "kvm-s390.h"
  12. #include "gaccess.h"
  13. #include <asm/switch_to.h>
  14. union asce {
  15. unsigned long val;
  16. struct {
  17. unsigned long origin : 52; /* Region- or Segment-Table Origin */
  18. unsigned long : 2;
  19. unsigned long g : 1; /* Subspace Group Control */
  20. unsigned long p : 1; /* Private Space Control */
  21. unsigned long s : 1; /* Storage-Alteration-Event Control */
  22. unsigned long x : 1; /* Space-Switch-Event Control */
  23. unsigned long r : 1; /* Real-Space Control */
  24. unsigned long : 1;
  25. unsigned long dt : 2; /* Designation-Type Control */
  26. unsigned long tl : 2; /* Region- or Segment-Table Length */
  27. };
  28. };
  29. enum {
  30. ASCE_TYPE_SEGMENT = 0,
  31. ASCE_TYPE_REGION3 = 1,
  32. ASCE_TYPE_REGION2 = 2,
  33. ASCE_TYPE_REGION1 = 3
  34. };
  35. union region1_table_entry {
  36. unsigned long val;
  37. struct {
  38. unsigned long rto: 52;/* Region-Table Origin */
  39. unsigned long : 2;
  40. unsigned long p : 1; /* DAT-Protection Bit */
  41. unsigned long : 1;
  42. unsigned long tf : 2; /* Region-Second-Table Offset */
  43. unsigned long i : 1; /* Region-Invalid Bit */
  44. unsigned long : 1;
  45. unsigned long tt : 2; /* Table-Type Bits */
  46. unsigned long tl : 2; /* Region-Second-Table Length */
  47. };
  48. };
  49. union region2_table_entry {
  50. unsigned long val;
  51. struct {
  52. unsigned long rto: 52;/* Region-Table Origin */
  53. unsigned long : 2;
  54. unsigned long p : 1; /* DAT-Protection Bit */
  55. unsigned long : 1;
  56. unsigned long tf : 2; /* Region-Third-Table Offset */
  57. unsigned long i : 1; /* Region-Invalid Bit */
  58. unsigned long : 1;
  59. unsigned long tt : 2; /* Table-Type Bits */
  60. unsigned long tl : 2; /* Region-Third-Table Length */
  61. };
  62. };
  63. struct region3_table_entry_fc0 {
  64. unsigned long sto: 52;/* Segment-Table Origin */
  65. unsigned long : 1;
  66. unsigned long fc : 1; /* Format-Control */
  67. unsigned long p : 1; /* DAT-Protection Bit */
  68. unsigned long : 1;
  69. unsigned long tf : 2; /* Segment-Table Offset */
  70. unsigned long i : 1; /* Region-Invalid Bit */
  71. unsigned long cr : 1; /* Common-Region Bit */
  72. unsigned long tt : 2; /* Table-Type Bits */
  73. unsigned long tl : 2; /* Segment-Table Length */
  74. };
  75. struct region3_table_entry_fc1 {
  76. unsigned long rfaa : 33; /* Region-Frame Absolute Address */
  77. unsigned long : 14;
  78. unsigned long av : 1; /* ACCF-Validity Control */
  79. unsigned long acc: 4; /* Access-Control Bits */
  80. unsigned long f : 1; /* Fetch-Protection Bit */
  81. unsigned long fc : 1; /* Format-Control */
  82. unsigned long p : 1; /* DAT-Protection Bit */
  83. unsigned long co : 1; /* Change-Recording Override */
  84. unsigned long : 2;
  85. unsigned long i : 1; /* Region-Invalid Bit */
  86. unsigned long cr : 1; /* Common-Region Bit */
  87. unsigned long tt : 2; /* Table-Type Bits */
  88. unsigned long : 2;
  89. };
  90. union region3_table_entry {
  91. unsigned long val;
  92. struct region3_table_entry_fc0 fc0;
  93. struct region3_table_entry_fc1 fc1;
  94. struct {
  95. unsigned long : 53;
  96. unsigned long fc : 1; /* Format-Control */
  97. unsigned long : 4;
  98. unsigned long i : 1; /* Region-Invalid Bit */
  99. unsigned long cr : 1; /* Common-Region Bit */
  100. unsigned long tt : 2; /* Table-Type Bits */
  101. unsigned long : 2;
  102. };
  103. };
  104. struct segment_entry_fc0 {
  105. unsigned long pto: 53;/* Page-Table Origin */
  106. unsigned long fc : 1; /* Format-Control */
  107. unsigned long p : 1; /* DAT-Protection Bit */
  108. unsigned long : 3;
  109. unsigned long i : 1; /* Segment-Invalid Bit */
  110. unsigned long cs : 1; /* Common-Segment Bit */
  111. unsigned long tt : 2; /* Table-Type Bits */
  112. unsigned long : 2;
  113. };
  114. struct segment_entry_fc1 {
  115. unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
  116. unsigned long : 3;
  117. unsigned long av : 1; /* ACCF-Validity Control */
  118. unsigned long acc: 4; /* Access-Control Bits */
  119. unsigned long f : 1; /* Fetch-Protection Bit */
  120. unsigned long fc : 1; /* Format-Control */
  121. unsigned long p : 1; /* DAT-Protection Bit */
  122. unsigned long co : 1; /* Change-Recording Override */
  123. unsigned long : 2;
  124. unsigned long i : 1; /* Segment-Invalid Bit */
  125. unsigned long cs : 1; /* Common-Segment Bit */
  126. unsigned long tt : 2; /* Table-Type Bits */
  127. unsigned long : 2;
  128. };
  129. union segment_table_entry {
  130. unsigned long val;
  131. struct segment_entry_fc0 fc0;
  132. struct segment_entry_fc1 fc1;
  133. struct {
  134. unsigned long : 53;
  135. unsigned long fc : 1; /* Format-Control */
  136. unsigned long : 4;
  137. unsigned long i : 1; /* Segment-Invalid Bit */
  138. unsigned long cs : 1; /* Common-Segment Bit */
  139. unsigned long tt : 2; /* Table-Type Bits */
  140. unsigned long : 2;
  141. };
  142. };
  143. enum {
  144. TABLE_TYPE_SEGMENT = 0,
  145. TABLE_TYPE_REGION3 = 1,
  146. TABLE_TYPE_REGION2 = 2,
  147. TABLE_TYPE_REGION1 = 3
  148. };
  149. union page_table_entry {
  150. unsigned long val;
  151. struct {
  152. unsigned long pfra : 52; /* Page-Frame Real Address */
  153. unsigned long z : 1; /* Zero Bit */
  154. unsigned long i : 1; /* Page-Invalid Bit */
  155. unsigned long p : 1; /* DAT-Protection Bit */
  156. unsigned long co : 1; /* Change-Recording Override */
  157. unsigned long : 8;
  158. };
  159. };
  160. /*
  161. * vaddress union in order to easily decode a virtual address into its
  162. * region first index, region second index etc. parts.
  163. */
  164. union vaddress {
  165. unsigned long addr;
  166. struct {
  167. unsigned long rfx : 11;
  168. unsigned long rsx : 11;
  169. unsigned long rtx : 11;
  170. unsigned long sx : 11;
  171. unsigned long px : 8;
  172. unsigned long bx : 12;
  173. };
  174. struct {
  175. unsigned long rfx01 : 2;
  176. unsigned long : 9;
  177. unsigned long rsx01 : 2;
  178. unsigned long : 9;
  179. unsigned long rtx01 : 2;
  180. unsigned long : 9;
  181. unsigned long sx01 : 2;
  182. unsigned long : 29;
  183. };
  184. };
  185. /*
  186. * raddress union which will contain the result (real or absolute address)
  187. * after a page table walk. The rfaa, sfaa and pfra members are used to
  188. * simply assign them the value of a region, segment or page table entry.
  189. */
  190. union raddress {
  191. unsigned long addr;
  192. unsigned long rfaa : 33; /* Region-Frame Absolute Address */
  193. unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
  194. unsigned long pfra : 52; /* Page-Frame Real Address */
  195. };
  196. union alet {
  197. u32 val;
  198. struct {
  199. u32 reserved : 7;
  200. u32 p : 1;
  201. u32 alesn : 8;
  202. u32 alen : 16;
  203. };
  204. };
  205. union ald {
  206. u32 val;
  207. struct {
  208. u32 : 1;
  209. u32 alo : 24;
  210. u32 all : 7;
  211. };
  212. };
  213. struct ale {
  214. unsigned long i : 1; /* ALEN-Invalid Bit */
  215. unsigned long : 5;
  216. unsigned long fo : 1; /* Fetch-Only Bit */
  217. unsigned long p : 1; /* Private Bit */
  218. unsigned long alesn : 8; /* Access-List-Entry Sequence Number */
  219. unsigned long aleax : 16; /* Access-List-Entry Authorization Index */
  220. unsigned long : 32;
  221. unsigned long : 1;
  222. unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */
  223. unsigned long : 6;
  224. unsigned long astesn : 32; /* ASTE Sequence Number */
  225. } __packed;
  226. struct aste {
  227. unsigned long i : 1; /* ASX-Invalid Bit */
  228. unsigned long ato : 29; /* Authority-Table Origin */
  229. unsigned long : 1;
  230. unsigned long b : 1; /* Base-Space Bit */
  231. unsigned long ax : 16; /* Authorization Index */
  232. unsigned long atl : 12; /* Authority-Table Length */
  233. unsigned long : 2;
  234. unsigned long ca : 1; /* Controlled-ASN Bit */
  235. unsigned long ra : 1; /* Reusable-ASN Bit */
  236. unsigned long asce : 64; /* Address-Space-Control Element */
  237. unsigned long ald : 32;
  238. unsigned long astesn : 32;
  239. /* .. more fields there */
  240. } __packed;
  241. int ipte_lock_held(struct kvm_vcpu *vcpu)
  242. {
  243. if (vcpu->arch.sie_block->eca & 1) {
  244. int rc;
  245. read_lock(&vcpu->kvm->arch.sca_lock);
  246. rc = kvm_s390_get_ipte_control(vcpu->kvm)->kh != 0;
  247. read_unlock(&vcpu->kvm->arch.sca_lock);
  248. return rc;
  249. }
  250. return vcpu->kvm->arch.ipte_lock_count != 0;
  251. }
  252. static void ipte_lock_simple(struct kvm_vcpu *vcpu)
  253. {
  254. union ipte_control old, new, *ic;
  255. mutex_lock(&vcpu->kvm->arch.ipte_mutex);
  256. vcpu->kvm->arch.ipte_lock_count++;
  257. if (vcpu->kvm->arch.ipte_lock_count > 1)
  258. goto out;
  259. retry:
  260. read_lock(&vcpu->kvm->arch.sca_lock);
  261. ic = kvm_s390_get_ipte_control(vcpu->kvm);
  262. do {
  263. old = READ_ONCE(*ic);
  264. if (old.k) {
  265. read_unlock(&vcpu->kvm->arch.sca_lock);
  266. cond_resched();
  267. goto retry;
  268. }
  269. new = old;
  270. new.k = 1;
  271. } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
  272. read_unlock(&vcpu->kvm->arch.sca_lock);
  273. out:
  274. mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
  275. }
  276. static void ipte_unlock_simple(struct kvm_vcpu *vcpu)
  277. {
  278. union ipte_control old, new, *ic;
  279. mutex_lock(&vcpu->kvm->arch.ipte_mutex);
  280. vcpu->kvm->arch.ipte_lock_count--;
  281. if (vcpu->kvm->arch.ipte_lock_count)
  282. goto out;
  283. read_lock(&vcpu->kvm->arch.sca_lock);
  284. ic = kvm_s390_get_ipte_control(vcpu->kvm);
  285. do {
  286. old = READ_ONCE(*ic);
  287. new = old;
  288. new.k = 0;
  289. } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
  290. read_unlock(&vcpu->kvm->arch.sca_lock);
  291. wake_up(&vcpu->kvm->arch.ipte_wq);
  292. out:
  293. mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
  294. }
  295. static void ipte_lock_siif(struct kvm_vcpu *vcpu)
  296. {
  297. union ipte_control old, new, *ic;
  298. retry:
  299. read_lock(&vcpu->kvm->arch.sca_lock);
  300. ic = kvm_s390_get_ipte_control(vcpu->kvm);
  301. do {
  302. old = READ_ONCE(*ic);
  303. if (old.kg) {
  304. read_unlock(&vcpu->kvm->arch.sca_lock);
  305. cond_resched();
  306. goto retry;
  307. }
  308. new = old;
  309. new.k = 1;
  310. new.kh++;
  311. } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
  312. read_unlock(&vcpu->kvm->arch.sca_lock);
  313. }
  314. static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
  315. {
  316. union ipte_control old, new, *ic;
  317. read_lock(&vcpu->kvm->arch.sca_lock);
  318. ic = kvm_s390_get_ipte_control(vcpu->kvm);
  319. do {
  320. old = READ_ONCE(*ic);
  321. new = old;
  322. new.kh--;
  323. if (!new.kh)
  324. new.k = 0;
  325. } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
  326. read_unlock(&vcpu->kvm->arch.sca_lock);
  327. if (!new.kh)
  328. wake_up(&vcpu->kvm->arch.ipte_wq);
  329. }
  330. void ipte_lock(struct kvm_vcpu *vcpu)
  331. {
  332. if (vcpu->arch.sie_block->eca & 1)
  333. ipte_lock_siif(vcpu);
  334. else
  335. ipte_lock_simple(vcpu);
  336. }
  337. void ipte_unlock(struct kvm_vcpu *vcpu)
  338. {
  339. if (vcpu->arch.sie_block->eca & 1)
  340. ipte_unlock_siif(vcpu);
  341. else
  342. ipte_unlock_simple(vcpu);
  343. }
  344. static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar,
  345. enum gacc_mode mode)
  346. {
  347. union alet alet;
  348. struct ale ale;
  349. struct aste aste;
  350. unsigned long ald_addr, authority_table_addr;
  351. union ald ald;
  352. int eax, rc;
  353. u8 authority_table;
  354. if (ar >= NUM_ACRS)
  355. return -EINVAL;
  356. save_access_regs(vcpu->run->s.regs.acrs);
  357. alet.val = vcpu->run->s.regs.acrs[ar];
  358. if (ar == 0 || alet.val == 0) {
  359. asce->val = vcpu->arch.sie_block->gcr[1];
  360. return 0;
  361. } else if (alet.val == 1) {
  362. asce->val = vcpu->arch.sie_block->gcr[7];
  363. return 0;
  364. }
  365. if (alet.reserved)
  366. return PGM_ALET_SPECIFICATION;
  367. if (alet.p)
  368. ald_addr = vcpu->arch.sie_block->gcr[5];
  369. else
  370. ald_addr = vcpu->arch.sie_block->gcr[2];
  371. ald_addr &= 0x7fffffc0;
  372. rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald));
  373. if (rc)
  374. return rc;
  375. if (alet.alen / 8 > ald.all)
  376. return PGM_ALEN_TRANSLATION;
  377. if (0x7fffffff - ald.alo * 128 < alet.alen * 16)
  378. return PGM_ADDRESSING;
  379. rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale,
  380. sizeof(struct ale));
  381. if (rc)
  382. return rc;
  383. if (ale.i == 1)
  384. return PGM_ALEN_TRANSLATION;
  385. if (ale.alesn != alet.alesn)
  386. return PGM_ALE_SEQUENCE;
  387. rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste));
  388. if (rc)
  389. return rc;
  390. if (aste.i)
  391. return PGM_ASTE_VALIDITY;
  392. if (aste.astesn != ale.astesn)
  393. return PGM_ASTE_SEQUENCE;
  394. if (ale.p == 1) {
  395. eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff;
  396. if (ale.aleax != eax) {
  397. if (eax / 16 > aste.atl)
  398. return PGM_EXTENDED_AUTHORITY;
  399. authority_table_addr = aste.ato * 4 + eax / 4;
  400. rc = read_guest_real(vcpu, authority_table_addr,
  401. &authority_table,
  402. sizeof(u8));
  403. if (rc)
  404. return rc;
  405. if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0)
  406. return PGM_EXTENDED_AUTHORITY;
  407. }
  408. }
  409. if (ale.fo == 1 && mode == GACC_STORE)
  410. return PGM_PROTECTION;
  411. asce->val = aste.asce;
  412. return 0;
  413. }
  414. struct trans_exc_code_bits {
  415. unsigned long addr : 52; /* Translation-exception Address */
  416. unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
  417. unsigned long : 6;
  418. unsigned long b60 : 1;
  419. unsigned long b61 : 1;
  420. unsigned long as : 2; /* ASCE Identifier */
  421. };
  422. enum {
  423. FSI_UNKNOWN = 0, /* Unknown wether fetch or store */
  424. FSI_STORE = 1, /* Exception was due to store operation */
  425. FSI_FETCH = 2 /* Exception was due to fetch operation */
  426. };
  427. enum prot_type {
  428. PROT_TYPE_LA = 0,
  429. PROT_TYPE_KEYC = 1,
  430. PROT_TYPE_ALC = 2,
  431. PROT_TYPE_DAT = 3,
  432. };
  433. static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
  434. ar_t ar, enum gacc_mode mode, enum prot_type prot)
  435. {
  436. struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
  437. struct trans_exc_code_bits *tec;
  438. memset(pgm, 0, sizeof(*pgm));
  439. pgm->code = code;
  440. tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
  441. switch (code) {
  442. case PGM_ASCE_TYPE:
  443. case PGM_PAGE_TRANSLATION:
  444. case PGM_REGION_FIRST_TRANS:
  445. case PGM_REGION_SECOND_TRANS:
  446. case PGM_REGION_THIRD_TRANS:
  447. case PGM_SEGMENT_TRANSLATION:
  448. /*
  449. * op_access_id only applies to MOVE_PAGE -> set bit 61
  450. * exc_access_id has to be set to 0 for some instructions. Both
  451. * cases have to be handled by the caller. We can always store
  452. * exc_access_id, as it is undefined for non-ar cases.
  453. */
  454. tec->addr = gva >> PAGE_SHIFT;
  455. tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
  456. tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as;
  457. /* FALL THROUGH */
  458. case PGM_ALEN_TRANSLATION:
  459. case PGM_ALE_SEQUENCE:
  460. case PGM_ASTE_VALIDITY:
  461. case PGM_ASTE_SEQUENCE:
  462. case PGM_EXTENDED_AUTHORITY:
  463. pgm->exc_access_id = ar;
  464. break;
  465. case PGM_PROTECTION:
  466. switch (prot) {
  467. case PROT_TYPE_ALC:
  468. tec->b60 = 1;
  469. /* FALL THROUGH */
  470. case PROT_TYPE_DAT:
  471. tec->b61 = 1;
  472. tec->addr = gva >> PAGE_SHIFT;
  473. tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
  474. tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as;
  475. /* exc_access_id is undefined for most cases */
  476. pgm->exc_access_id = ar;
  477. break;
  478. default: /* LA and KEYC set b61 to 0, other params undefined */
  479. break;
  480. }
  481. break;
  482. }
  483. return code;
  484. }
  485. static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
  486. unsigned long ga, ar_t ar, enum gacc_mode mode)
  487. {
  488. int rc;
  489. struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw);
  490. if (!psw.t) {
  491. asce->val = 0;
  492. asce->r = 1;
  493. return 0;
  494. }
  495. if (mode == GACC_IFETCH)
  496. psw.as = psw.as == PSW_AS_HOME ? PSW_AS_HOME : PSW_AS_PRIMARY;
  497. switch (psw.as) {
  498. case PSW_AS_PRIMARY:
  499. asce->val = vcpu->arch.sie_block->gcr[1];
  500. return 0;
  501. case PSW_AS_SECONDARY:
  502. asce->val = vcpu->arch.sie_block->gcr[7];
  503. return 0;
  504. case PSW_AS_HOME:
  505. asce->val = vcpu->arch.sie_block->gcr[13];
  506. return 0;
  507. case PSW_AS_ACCREG:
  508. rc = ar_translation(vcpu, asce, ar, mode);
  509. if (rc > 0)
  510. return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_ALC);
  511. return rc;
  512. }
  513. return 0;
  514. }
  515. static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
  516. {
  517. return kvm_read_guest(kvm, gpa, val, sizeof(*val));
  518. }
  519. /**
  520. * guest_translate - translate a guest virtual into a guest absolute address
  521. * @vcpu: virtual cpu
  522. * @gva: guest virtual address
  523. * @gpa: points to where guest physical (absolute) address should be stored
  524. * @asce: effective asce
  525. * @mode: indicates the access mode to be used
  526. *
  527. * Translate a guest virtual address into a guest absolute address by means
  528. * of dynamic address translation as specified by the architecture.
  529. * If the resulting absolute address is not available in the configuration
  530. * an addressing exception is indicated and @gpa will not be changed.
  531. *
  532. * Returns: - zero on success; @gpa contains the resulting absolute address
  533. * - a negative value if guest access failed due to e.g. broken
  534. * guest mapping
  535. * - a positve value if an access exception happened. In this case
  536. * the returned value is the program interruption code as defined
  537. * by the architecture
  538. */
  539. static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
  540. unsigned long *gpa, const union asce asce,
  541. enum gacc_mode mode)
  542. {
  543. union vaddress vaddr = {.addr = gva};
  544. union raddress raddr = {.addr = gva};
  545. union page_table_entry pte;
  546. int dat_protection = 0;
  547. union ctlreg0 ctlreg0;
  548. unsigned long ptr;
  549. int edat1, edat2;
  550. ctlreg0.val = vcpu->arch.sie_block->gcr[0];
  551. edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8);
  552. edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78);
  553. if (asce.r)
  554. goto real_address;
  555. ptr = asce.origin * 4096;
  556. switch (asce.dt) {
  557. case ASCE_TYPE_REGION1:
  558. if (vaddr.rfx01 > asce.tl)
  559. return PGM_REGION_FIRST_TRANS;
  560. ptr += vaddr.rfx * 8;
  561. break;
  562. case ASCE_TYPE_REGION2:
  563. if (vaddr.rfx)
  564. return PGM_ASCE_TYPE;
  565. if (vaddr.rsx01 > asce.tl)
  566. return PGM_REGION_SECOND_TRANS;
  567. ptr += vaddr.rsx * 8;
  568. break;
  569. case ASCE_TYPE_REGION3:
  570. if (vaddr.rfx || vaddr.rsx)
  571. return PGM_ASCE_TYPE;
  572. if (vaddr.rtx01 > asce.tl)
  573. return PGM_REGION_THIRD_TRANS;
  574. ptr += vaddr.rtx * 8;
  575. break;
  576. case ASCE_TYPE_SEGMENT:
  577. if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
  578. return PGM_ASCE_TYPE;
  579. if (vaddr.sx01 > asce.tl)
  580. return PGM_SEGMENT_TRANSLATION;
  581. ptr += vaddr.sx * 8;
  582. break;
  583. }
  584. switch (asce.dt) {
  585. case ASCE_TYPE_REGION1: {
  586. union region1_table_entry rfte;
  587. if (kvm_is_error_gpa(vcpu->kvm, ptr))
  588. return PGM_ADDRESSING;
  589. if (deref_table(vcpu->kvm, ptr, &rfte.val))
  590. return -EFAULT;
  591. if (rfte.i)
  592. return PGM_REGION_FIRST_TRANS;
  593. if (rfte.tt != TABLE_TYPE_REGION1)
  594. return PGM_TRANSLATION_SPEC;
  595. if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
  596. return PGM_REGION_SECOND_TRANS;
  597. if (edat1)
  598. dat_protection |= rfte.p;
  599. ptr = rfte.rto * 4096 + vaddr.rsx * 8;
  600. }
  601. /* fallthrough */
  602. case ASCE_TYPE_REGION2: {
  603. union region2_table_entry rste;
  604. if (kvm_is_error_gpa(vcpu->kvm, ptr))
  605. return PGM_ADDRESSING;
  606. if (deref_table(vcpu->kvm, ptr, &rste.val))
  607. return -EFAULT;
  608. if (rste.i)
  609. return PGM_REGION_SECOND_TRANS;
  610. if (rste.tt != TABLE_TYPE_REGION2)
  611. return PGM_TRANSLATION_SPEC;
  612. if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
  613. return PGM_REGION_THIRD_TRANS;
  614. if (edat1)
  615. dat_protection |= rste.p;
  616. ptr = rste.rto * 4096 + vaddr.rtx * 8;
  617. }
  618. /* fallthrough */
  619. case ASCE_TYPE_REGION3: {
  620. union region3_table_entry rtte;
  621. if (kvm_is_error_gpa(vcpu->kvm, ptr))
  622. return PGM_ADDRESSING;
  623. if (deref_table(vcpu->kvm, ptr, &rtte.val))
  624. return -EFAULT;
  625. if (rtte.i)
  626. return PGM_REGION_THIRD_TRANS;
  627. if (rtte.tt != TABLE_TYPE_REGION3)
  628. return PGM_TRANSLATION_SPEC;
  629. if (rtte.cr && asce.p && edat2)
  630. return PGM_TRANSLATION_SPEC;
  631. if (rtte.fc && edat2) {
  632. dat_protection |= rtte.fc1.p;
  633. raddr.rfaa = rtte.fc1.rfaa;
  634. goto absolute_address;
  635. }
  636. if (vaddr.sx01 < rtte.fc0.tf)
  637. return PGM_SEGMENT_TRANSLATION;
  638. if (vaddr.sx01 > rtte.fc0.tl)
  639. return PGM_SEGMENT_TRANSLATION;
  640. if (edat1)
  641. dat_protection |= rtte.fc0.p;
  642. ptr = rtte.fc0.sto * 4096 + vaddr.sx * 8;
  643. }
  644. /* fallthrough */
  645. case ASCE_TYPE_SEGMENT: {
  646. union segment_table_entry ste;
  647. if (kvm_is_error_gpa(vcpu->kvm, ptr))
  648. return PGM_ADDRESSING;
  649. if (deref_table(vcpu->kvm, ptr, &ste.val))
  650. return -EFAULT;
  651. if (ste.i)
  652. return PGM_SEGMENT_TRANSLATION;
  653. if (ste.tt != TABLE_TYPE_SEGMENT)
  654. return PGM_TRANSLATION_SPEC;
  655. if (ste.cs && asce.p)
  656. return PGM_TRANSLATION_SPEC;
  657. if (ste.fc && edat1) {
  658. dat_protection |= ste.fc1.p;
  659. raddr.sfaa = ste.fc1.sfaa;
  660. goto absolute_address;
  661. }
  662. dat_protection |= ste.fc0.p;
  663. ptr = ste.fc0.pto * 2048 + vaddr.px * 8;
  664. }
  665. }
  666. if (kvm_is_error_gpa(vcpu->kvm, ptr))
  667. return PGM_ADDRESSING;
  668. if (deref_table(vcpu->kvm, ptr, &pte.val))
  669. return -EFAULT;
  670. if (pte.i)
  671. return PGM_PAGE_TRANSLATION;
  672. if (pte.z)
  673. return PGM_TRANSLATION_SPEC;
  674. if (pte.co && !edat1)
  675. return PGM_TRANSLATION_SPEC;
  676. dat_protection |= pte.p;
  677. raddr.pfra = pte.pfra;
  678. real_address:
  679. raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr);
  680. absolute_address:
  681. if (mode == GACC_STORE && dat_protection)
  682. return PGM_PROTECTION;
  683. if (kvm_is_error_gpa(vcpu->kvm, raddr.addr))
  684. return PGM_ADDRESSING;
  685. *gpa = raddr.addr;
  686. return 0;
  687. }
  688. static inline int is_low_address(unsigned long ga)
  689. {
  690. /* Check for address ranges 0..511 and 4096..4607 */
  691. return (ga & ~0x11fful) == 0;
  692. }
  693. static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
  694. const union asce asce)
  695. {
  696. union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
  697. psw_t *psw = &vcpu->arch.sie_block->gpsw;
  698. if (!ctlreg0.lap)
  699. return 0;
  700. if (psw_bits(*psw).t && asce.p)
  701. return 0;
  702. return 1;
  703. }
  704. static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar,
  705. unsigned long *pages, unsigned long nr_pages,
  706. const union asce asce, enum gacc_mode mode)
  707. {
  708. psw_t *psw = &vcpu->arch.sie_block->gpsw;
  709. int lap_enabled, rc = 0;
  710. lap_enabled = low_address_protection_enabled(vcpu, asce);
  711. while (nr_pages) {
  712. ga = kvm_s390_logical_to_effective(vcpu, ga);
  713. if (mode == GACC_STORE && lap_enabled && is_low_address(ga))
  714. return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode,
  715. PROT_TYPE_LA);
  716. ga &= PAGE_MASK;
  717. if (psw_bits(*psw).t) {
  718. rc = guest_translate(vcpu, ga, pages, asce, mode);
  719. if (rc < 0)
  720. return rc;
  721. } else {
  722. *pages = kvm_s390_real_to_abs(vcpu, ga);
  723. if (kvm_is_error_gpa(vcpu->kvm, *pages))
  724. rc = PGM_ADDRESSING;
  725. }
  726. if (rc)
  727. return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_DAT);
  728. ga += PAGE_SIZE;
  729. pages++;
  730. nr_pages--;
  731. }
  732. return 0;
  733. }
  734. int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
  735. unsigned long len, enum gacc_mode mode)
  736. {
  737. psw_t *psw = &vcpu->arch.sie_block->gpsw;
  738. unsigned long _len, nr_pages, gpa, idx;
  739. unsigned long pages_array[2];
  740. unsigned long *pages;
  741. int need_ipte_lock;
  742. union asce asce;
  743. int rc;
  744. if (!len)
  745. return 0;
  746. ga = kvm_s390_logical_to_effective(vcpu, ga);
  747. rc = get_vcpu_asce(vcpu, &asce, ga, ar, mode);
  748. if (rc)
  749. return rc;
  750. nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
  751. pages = pages_array;
  752. if (nr_pages > ARRAY_SIZE(pages_array))
  753. pages = vmalloc(nr_pages * sizeof(unsigned long));
  754. if (!pages)
  755. return -ENOMEM;
  756. need_ipte_lock = psw_bits(*psw).t && !asce.r;
  757. if (need_ipte_lock)
  758. ipte_lock(vcpu);
  759. rc = guest_page_range(vcpu, ga, ar, pages, nr_pages, asce, mode);
  760. for (idx = 0; idx < nr_pages && !rc; idx++) {
  761. gpa = *(pages + idx) + (ga & ~PAGE_MASK);
  762. _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
  763. if (mode == GACC_STORE)
  764. rc = kvm_write_guest(vcpu->kvm, gpa, data, _len);
  765. else
  766. rc = kvm_read_guest(vcpu->kvm, gpa, data, _len);
  767. len -= _len;
  768. ga += _len;
  769. data += _len;
  770. }
  771. if (need_ipte_lock)
  772. ipte_unlock(vcpu);
  773. if (nr_pages > ARRAY_SIZE(pages_array))
  774. vfree(pages);
  775. return rc;
  776. }
  777. int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
  778. void *data, unsigned long len, enum gacc_mode mode)
  779. {
  780. unsigned long _len, gpa;
  781. int rc = 0;
  782. while (len && !rc) {
  783. gpa = kvm_s390_real_to_abs(vcpu, gra);
  784. _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
  785. if (mode)
  786. rc = write_guest_abs(vcpu, gpa, data, _len);
  787. else
  788. rc = read_guest_abs(vcpu, gpa, data, _len);
  789. len -= _len;
  790. gra += _len;
  791. data += _len;
  792. }
  793. return rc;
  794. }
  795. /**
  796. * guest_translate_address - translate guest logical into guest absolute address
  797. *
  798. * Parameter semantics are the same as the ones from guest_translate.
  799. * The memory contents at the guest address are not changed.
  800. *
  801. * Note: The IPTE lock is not taken during this function, so the caller
  802. * has to take care of this.
  803. */
  804. int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
  805. unsigned long *gpa, enum gacc_mode mode)
  806. {
  807. psw_t *psw = &vcpu->arch.sie_block->gpsw;
  808. union asce asce;
  809. int rc;
  810. gva = kvm_s390_logical_to_effective(vcpu, gva);
  811. rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
  812. if (rc)
  813. return rc;
  814. if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) {
  815. if (mode == GACC_STORE)
  816. return trans_exc(vcpu, PGM_PROTECTION, gva, 0,
  817. mode, PROT_TYPE_LA);
  818. }
  819. if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */
  820. rc = guest_translate(vcpu, gva, gpa, asce, mode);
  821. if (rc > 0)
  822. return trans_exc(vcpu, rc, gva, 0, mode, PROT_TYPE_DAT);
  823. } else {
  824. *gpa = kvm_s390_real_to_abs(vcpu, gva);
  825. if (kvm_is_error_gpa(vcpu->kvm, *gpa))
  826. return trans_exc(vcpu, rc, gva, PGM_ADDRESSING, mode, 0);
  827. }
  828. return rc;
  829. }
  830. /**
  831. * check_gva_range - test a range of guest virtual addresses for accessibility
  832. */
  833. int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
  834. unsigned long length, enum gacc_mode mode)
  835. {
  836. unsigned long gpa;
  837. unsigned long currlen;
  838. int rc = 0;
  839. ipte_lock(vcpu);
  840. while (length > 0 && !rc) {
  841. currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE));
  842. rc = guest_translate_address(vcpu, gva, ar, &gpa, mode);
  843. gva += currlen;
  844. length -= currlen;
  845. }
  846. ipte_unlock(vcpu);
  847. return rc;
  848. }
  849. /**
  850. * kvm_s390_check_low_addr_prot_real - check for low-address protection
  851. * @gra: Guest real address
  852. *
  853. * Checks whether an address is subject to low-address protection and set
  854. * up vcpu->arch.pgm accordingly if necessary.
  855. *
  856. * Return: 0 if no protection exception, or PGM_PROTECTION if protected.
  857. */
  858. int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
  859. {
  860. union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
  861. if (!ctlreg0.lap || !is_low_address(gra))
  862. return 0;
  863. return trans_exc(vcpu, PGM_PROTECTION, gra, 0, GACC_STORE, PROT_TYPE_LA);
  864. }
  865. /**
  866. * kvm_s390_shadow_tables - walk the guest page table and create shadow tables
  867. * @sg: pointer to the shadow guest address space structure
  868. * @saddr: faulting address in the shadow gmap
  869. * @pgt: pointer to the page table address result
  870. * @fake: pgt references contiguous guest memory block, not a pgtable
  871. */
  872. static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
  873. unsigned long *pgt, int *dat_protection,
  874. int *fake)
  875. {
  876. struct gmap *parent;
  877. union asce asce;
  878. union vaddress vaddr;
  879. unsigned long ptr;
  880. int rc;
  881. *fake = 0;
  882. *dat_protection = 0;
  883. parent = sg->parent;
  884. vaddr.addr = saddr;
  885. asce.val = sg->orig_asce;
  886. ptr = asce.origin * 4096;
  887. if (asce.r) {
  888. *fake = 1;
  889. asce.dt = ASCE_TYPE_REGION1;
  890. }
  891. switch (asce.dt) {
  892. case ASCE_TYPE_REGION1:
  893. if (vaddr.rfx01 > asce.tl && !asce.r)
  894. return PGM_REGION_FIRST_TRANS;
  895. break;
  896. case ASCE_TYPE_REGION2:
  897. if (vaddr.rfx)
  898. return PGM_ASCE_TYPE;
  899. if (vaddr.rsx01 > asce.tl)
  900. return PGM_REGION_SECOND_TRANS;
  901. break;
  902. case ASCE_TYPE_REGION3:
  903. if (vaddr.rfx || vaddr.rsx)
  904. return PGM_ASCE_TYPE;
  905. if (vaddr.rtx01 > asce.tl)
  906. return PGM_REGION_THIRD_TRANS;
  907. break;
  908. case ASCE_TYPE_SEGMENT:
  909. if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
  910. return PGM_ASCE_TYPE;
  911. if (vaddr.sx01 > asce.tl)
  912. return PGM_SEGMENT_TRANSLATION;
  913. break;
  914. }
  915. switch (asce.dt) {
  916. case ASCE_TYPE_REGION1: {
  917. union region1_table_entry rfte;
  918. if (*fake) {
  919. /* offset in 16EB guest memory block */
  920. ptr = ptr + ((unsigned long) vaddr.rsx << 53UL);
  921. rfte.val = ptr;
  922. goto shadow_r2t;
  923. }
  924. rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val);
  925. if (rc)
  926. return rc;
  927. if (rfte.i)
  928. return PGM_REGION_FIRST_TRANS;
  929. if (rfte.tt != TABLE_TYPE_REGION1)
  930. return PGM_TRANSLATION_SPEC;
  931. if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
  932. return PGM_REGION_SECOND_TRANS;
  933. if (sg->edat_level >= 1)
  934. *dat_protection |= rfte.p;
  935. ptr = rfte.rto << 12UL;
  936. shadow_r2t:
  937. rc = gmap_shadow_r2t(sg, saddr, rfte.val, *fake);
  938. if (rc)
  939. return rc;
  940. /* fallthrough */
  941. }
  942. case ASCE_TYPE_REGION2: {
  943. union region2_table_entry rste;
  944. if (*fake) {
  945. /* offset in 8PB guest memory block */
  946. ptr = ptr + ((unsigned long) vaddr.rtx << 42UL);
  947. rste.val = ptr;
  948. goto shadow_r3t;
  949. }
  950. rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val);
  951. if (rc)
  952. return rc;
  953. if (rste.i)
  954. return PGM_REGION_SECOND_TRANS;
  955. if (rste.tt != TABLE_TYPE_REGION2)
  956. return PGM_TRANSLATION_SPEC;
  957. if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
  958. return PGM_REGION_THIRD_TRANS;
  959. if (sg->edat_level >= 1)
  960. *dat_protection |= rste.p;
  961. ptr = rste.rto << 12UL;
  962. shadow_r3t:
  963. rste.p |= *dat_protection;
  964. rc = gmap_shadow_r3t(sg, saddr, rste.val, *fake);
  965. if (rc)
  966. return rc;
  967. /* fallthrough */
  968. }
  969. case ASCE_TYPE_REGION3: {
  970. union region3_table_entry rtte;
  971. if (*fake) {
  972. /* offset in 4TB guest memory block */
  973. ptr = ptr + ((unsigned long) vaddr.sx << 31UL);
  974. rtte.val = ptr;
  975. goto shadow_sgt;
  976. }
  977. rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val);
  978. if (rc)
  979. return rc;
  980. if (rtte.i)
  981. return PGM_REGION_THIRD_TRANS;
  982. if (rtte.tt != TABLE_TYPE_REGION3)
  983. return PGM_TRANSLATION_SPEC;
  984. if (rtte.cr && asce.p && sg->edat_level >= 2)
  985. return PGM_TRANSLATION_SPEC;
  986. if (rtte.fc && sg->edat_level >= 2) {
  987. *dat_protection |= rtte.fc0.p;
  988. *fake = 1;
  989. ptr = rtte.fc1.rfaa << 31UL;
  990. rtte.val = ptr;
  991. goto shadow_sgt;
  992. }
  993. if (vaddr.sx01 < rtte.fc0.tf || vaddr.sx01 > rtte.fc0.tl)
  994. return PGM_SEGMENT_TRANSLATION;
  995. if (sg->edat_level >= 1)
  996. *dat_protection |= rtte.fc0.p;
  997. ptr = rtte.fc0.sto << 12UL;
  998. shadow_sgt:
  999. rtte.fc0.p |= *dat_protection;
  1000. rc = gmap_shadow_sgt(sg, saddr, rtte.val, *fake);
  1001. if (rc)
  1002. return rc;
  1003. /* fallthrough */
  1004. }
  1005. case ASCE_TYPE_SEGMENT: {
  1006. union segment_table_entry ste;
  1007. if (*fake) {
  1008. /* offset in 2G guest memory block */
  1009. ptr = ptr + ((unsigned long) vaddr.sx << 20UL);
  1010. ste.val = ptr;
  1011. goto shadow_pgt;
  1012. }
  1013. rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val);
  1014. if (rc)
  1015. return rc;
  1016. if (ste.i)
  1017. return PGM_SEGMENT_TRANSLATION;
  1018. if (ste.tt != TABLE_TYPE_SEGMENT)
  1019. return PGM_TRANSLATION_SPEC;
  1020. if (ste.cs && asce.p)
  1021. return PGM_TRANSLATION_SPEC;
  1022. *dat_protection |= ste.fc0.p;
  1023. if (ste.fc && sg->edat_level >= 1) {
  1024. *fake = 1;
  1025. ptr = ste.fc1.sfaa << 20UL;
  1026. ste.val = ptr;
  1027. goto shadow_pgt;
  1028. }
  1029. ptr = ste.fc0.pto << 11UL;
  1030. shadow_pgt:
  1031. ste.fc0.p |= *dat_protection;
  1032. rc = gmap_shadow_pgt(sg, saddr, ste.val, *fake);
  1033. if (rc)
  1034. return rc;
  1035. }
  1036. }
  1037. /* Return the parent address of the page table */
  1038. *pgt = ptr;
  1039. return 0;
  1040. }
  1041. /**
  1042. * kvm_s390_shadow_fault - handle fault on a shadow page table
  1043. * @vcpu: virtual cpu
  1044. * @sg: pointer to the shadow guest address space structure
  1045. * @saddr: faulting address in the shadow gmap
  1046. *
  1047. * Returns: - 0 if the shadow fault was successfully resolved
  1048. * - > 0 (pgm exception code) on exceptions while faulting
  1049. * - -EAGAIN if the caller can retry immediately
  1050. * - -EFAULT when accessing invalid guest addresses
  1051. * - -ENOMEM if out of memory
  1052. */
  1053. int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
  1054. unsigned long saddr)
  1055. {
  1056. union vaddress vaddr;
  1057. union page_table_entry pte;
  1058. unsigned long pgt;
  1059. int dat_protection, fake;
  1060. int rc;
  1061. down_read(&sg->mm->mmap_sem);
  1062. /*
  1063. * We don't want any guest-2 tables to change - so the parent
  1064. * tables/pointers we read stay valid - unshadowing is however
  1065. * always possible - only guest_table_lock protects us.
  1066. */
  1067. ipte_lock(vcpu);
  1068. rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake);
  1069. if (rc)
  1070. rc = kvm_s390_shadow_tables(sg, saddr, &pgt, &dat_protection,
  1071. &fake);
  1072. vaddr.addr = saddr;
  1073. if (fake) {
  1074. /* offset in 1MB guest memory block */
  1075. pte.val = pgt + ((unsigned long) vaddr.px << 12UL);
  1076. goto shadow_page;
  1077. }
  1078. if (!rc)
  1079. rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val);
  1080. if (!rc && pte.i)
  1081. rc = PGM_PAGE_TRANSLATION;
  1082. if (!rc && (pte.z || (pte.co && sg->edat_level < 1)))
  1083. rc = PGM_TRANSLATION_SPEC;
  1084. shadow_page:
  1085. pte.p |= dat_protection;
  1086. if (!rc)
  1087. rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
  1088. ipte_unlock(vcpu);
  1089. up_read(&sg->mm->mmap_sem);
  1090. return rc;
  1091. }