gmap.c 63 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203
  1. /*
  2. * KVM guest address space mapping code
  3. *
  4. * Copyright IBM Corp. 2007, 2016
  5. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/mm.h>
  9. #include <linux/swap.h>
  10. #include <linux/smp.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/slab.h>
  13. #include <linux/swapops.h>
  14. #include <linux/ksm.h>
  15. #include <linux/mman.h>
  16. #include <asm/pgtable.h>
  17. #include <asm/pgalloc.h>
  18. #include <asm/gmap.h>
  19. #include <asm/tlb.h>
  20. #define GMAP_SHADOW_FAKE_TABLE 1ULL
  21. /**
  22. * gmap_alloc - allocate and initialize a guest address space
  23. * @mm: pointer to the parent mm_struct
  24. * @limit: maximum address of the gmap address space
  25. *
  26. * Returns a guest address space structure.
  27. */
  28. static struct gmap *gmap_alloc(unsigned long limit)
  29. {
  30. struct gmap *gmap;
  31. struct page *page;
  32. unsigned long *table;
  33. unsigned long etype, atype;
  34. if (limit < (1UL << 31)) {
  35. limit = (1UL << 31) - 1;
  36. atype = _ASCE_TYPE_SEGMENT;
  37. etype = _SEGMENT_ENTRY_EMPTY;
  38. } else if (limit < (1UL << 42)) {
  39. limit = (1UL << 42) - 1;
  40. atype = _ASCE_TYPE_REGION3;
  41. etype = _REGION3_ENTRY_EMPTY;
  42. } else if (limit < (1UL << 53)) {
  43. limit = (1UL << 53) - 1;
  44. atype = _ASCE_TYPE_REGION2;
  45. etype = _REGION2_ENTRY_EMPTY;
  46. } else {
  47. limit = -1UL;
  48. atype = _ASCE_TYPE_REGION1;
  49. etype = _REGION1_ENTRY_EMPTY;
  50. }
  51. gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
  52. if (!gmap)
  53. goto out;
  54. INIT_LIST_HEAD(&gmap->crst_list);
  55. INIT_LIST_HEAD(&gmap->children);
  56. INIT_LIST_HEAD(&gmap->pt_list);
  57. INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
  58. INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
  59. INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
  60. spin_lock_init(&gmap->guest_table_lock);
  61. spin_lock_init(&gmap->shadow_lock);
  62. atomic_set(&gmap->ref_count, 1);
  63. page = alloc_pages(GFP_KERNEL, 2);
  64. if (!page)
  65. goto out_free;
  66. page->index = 0;
  67. list_add(&page->lru, &gmap->crst_list);
  68. table = (unsigned long *) page_to_phys(page);
  69. crst_table_init(table, etype);
  70. gmap->table = table;
  71. gmap->asce = atype | _ASCE_TABLE_LENGTH |
  72. _ASCE_USER_BITS | __pa(table);
  73. gmap->asce_end = limit;
  74. return gmap;
  75. out_free:
  76. kfree(gmap);
  77. out:
  78. return NULL;
  79. }
  80. /**
  81. * gmap_create - create a guest address space
  82. * @mm: pointer to the parent mm_struct
  83. * @limit: maximum size of the gmap address space
  84. *
  85. * Returns a guest address space structure.
  86. */
  87. struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
  88. {
  89. struct gmap *gmap;
  90. gmap = gmap_alloc(limit);
  91. if (!gmap)
  92. return NULL;
  93. gmap->mm = mm;
  94. spin_lock(&mm->context.gmap_lock);
  95. list_add_rcu(&gmap->list, &mm->context.gmap_list);
  96. spin_unlock(&mm->context.gmap_lock);
  97. return gmap;
  98. }
  99. EXPORT_SYMBOL_GPL(gmap_create);
  100. static void gmap_flush_tlb(struct gmap *gmap)
  101. {
  102. if (MACHINE_HAS_IDTE)
  103. __tlb_flush_idte(gmap->asce);
  104. else
  105. __tlb_flush_global();
  106. }
  107. static void gmap_radix_tree_free(struct radix_tree_root *root)
  108. {
  109. struct radix_tree_iter iter;
  110. unsigned long indices[16];
  111. unsigned long index;
  112. void **slot;
  113. int i, nr;
  114. /* A radix tree is freed by deleting all of its entries */
  115. index = 0;
  116. do {
  117. nr = 0;
  118. radix_tree_for_each_slot(slot, root, &iter, index) {
  119. indices[nr] = iter.index;
  120. if (++nr == 16)
  121. break;
  122. }
  123. for (i = 0; i < nr; i++) {
  124. index = indices[i];
  125. radix_tree_delete(root, index);
  126. }
  127. } while (nr > 0);
  128. }
  129. static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
  130. {
  131. struct gmap_rmap *rmap, *rnext, *head;
  132. struct radix_tree_iter iter;
  133. unsigned long indices[16];
  134. unsigned long index;
  135. void **slot;
  136. int i, nr;
  137. /* A radix tree is freed by deleting all of its entries */
  138. index = 0;
  139. do {
  140. nr = 0;
  141. radix_tree_for_each_slot(slot, root, &iter, index) {
  142. indices[nr] = iter.index;
  143. if (++nr == 16)
  144. break;
  145. }
  146. for (i = 0; i < nr; i++) {
  147. index = indices[i];
  148. head = radix_tree_delete(root, index);
  149. gmap_for_each_rmap_safe(rmap, rnext, head)
  150. kfree(rmap);
  151. }
  152. } while (nr > 0);
  153. }
  154. /**
  155. * gmap_free - free a guest address space
  156. * @gmap: pointer to the guest address space structure
  157. *
  158. * No locks required. There are no references to this gmap anymore.
  159. */
  160. static void gmap_free(struct gmap *gmap)
  161. {
  162. struct page *page, *next;
  163. /* Flush tlb of all gmaps (if not already done for shadows) */
  164. if (!(gmap_is_shadow(gmap) && gmap->removed))
  165. gmap_flush_tlb(gmap);
  166. /* Free all segment & region tables. */
  167. list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
  168. __free_pages(page, 2);
  169. gmap_radix_tree_free(&gmap->guest_to_host);
  170. gmap_radix_tree_free(&gmap->host_to_guest);
  171. /* Free additional data for a shadow gmap */
  172. if (gmap_is_shadow(gmap)) {
  173. /* Free all page tables. */
  174. list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
  175. page_table_free_pgste(page);
  176. gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
  177. /* Release reference to the parent */
  178. gmap_put(gmap->parent);
  179. }
  180. kfree(gmap);
  181. }
  182. /**
  183. * gmap_get - increase reference counter for guest address space
  184. * @gmap: pointer to the guest address space structure
  185. *
  186. * Returns the gmap pointer
  187. */
  188. struct gmap *gmap_get(struct gmap *gmap)
  189. {
  190. atomic_inc(&gmap->ref_count);
  191. return gmap;
  192. }
  193. EXPORT_SYMBOL_GPL(gmap_get);
  194. /**
  195. * gmap_put - decrease reference counter for guest address space
  196. * @gmap: pointer to the guest address space structure
  197. *
  198. * If the reference counter reaches zero the guest address space is freed.
  199. */
  200. void gmap_put(struct gmap *gmap)
  201. {
  202. if (atomic_dec_return(&gmap->ref_count) == 0)
  203. gmap_free(gmap);
  204. }
  205. EXPORT_SYMBOL_GPL(gmap_put);
  206. /**
  207. * gmap_remove - remove a guest address space but do not free it yet
  208. * @gmap: pointer to the guest address space structure
  209. */
  210. void gmap_remove(struct gmap *gmap)
  211. {
  212. struct gmap *sg, *next;
  213. /* Remove all shadow gmaps linked to this gmap */
  214. if (!list_empty(&gmap->children)) {
  215. spin_lock(&gmap->shadow_lock);
  216. list_for_each_entry_safe(sg, next, &gmap->children, list) {
  217. list_del(&sg->list);
  218. gmap_put(sg);
  219. }
  220. spin_unlock(&gmap->shadow_lock);
  221. }
  222. /* Remove gmap from the pre-mm list */
  223. spin_lock(&gmap->mm->context.gmap_lock);
  224. list_del_rcu(&gmap->list);
  225. spin_unlock(&gmap->mm->context.gmap_lock);
  226. synchronize_rcu();
  227. /* Put reference */
  228. gmap_put(gmap);
  229. }
  230. EXPORT_SYMBOL_GPL(gmap_remove);
  231. /**
  232. * gmap_enable - switch primary space to the guest address space
  233. * @gmap: pointer to the guest address space structure
  234. */
  235. void gmap_enable(struct gmap *gmap)
  236. {
  237. S390_lowcore.gmap = (unsigned long) gmap;
  238. }
  239. EXPORT_SYMBOL_GPL(gmap_enable);
  240. /**
  241. * gmap_disable - switch back to the standard primary address space
  242. * @gmap: pointer to the guest address space structure
  243. */
  244. void gmap_disable(struct gmap *gmap)
  245. {
  246. S390_lowcore.gmap = 0UL;
  247. }
  248. EXPORT_SYMBOL_GPL(gmap_disable);
  249. /**
  250. * gmap_get_enabled - get a pointer to the currently enabled gmap
  251. *
  252. * Returns a pointer to the currently enabled gmap. 0 if none is enabled.
  253. */
  254. struct gmap *gmap_get_enabled(void)
  255. {
  256. return (struct gmap *) S390_lowcore.gmap;
  257. }
  258. EXPORT_SYMBOL_GPL(gmap_get_enabled);
  259. /*
  260. * gmap_alloc_table is assumed to be called with mmap_sem held
  261. */
  262. static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
  263. unsigned long init, unsigned long gaddr)
  264. {
  265. struct page *page;
  266. unsigned long *new;
  267. /* since we dont free the gmap table until gmap_free we can unlock */
  268. page = alloc_pages(GFP_KERNEL, 2);
  269. if (!page)
  270. return -ENOMEM;
  271. new = (unsigned long *) page_to_phys(page);
  272. crst_table_init(new, init);
  273. spin_lock(&gmap->guest_table_lock);
  274. if (*table & _REGION_ENTRY_INVALID) {
  275. list_add(&page->lru, &gmap->crst_list);
  276. *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
  277. (*table & _REGION_ENTRY_TYPE_MASK);
  278. page->index = gaddr;
  279. page = NULL;
  280. }
  281. spin_unlock(&gmap->guest_table_lock);
  282. if (page)
  283. __free_pages(page, 2);
  284. return 0;
  285. }
  286. /**
  287. * __gmap_segment_gaddr - find virtual address from segment pointer
  288. * @entry: pointer to a segment table entry in the guest address space
  289. *
  290. * Returns the virtual address in the guest address space for the segment
  291. */
  292. static unsigned long __gmap_segment_gaddr(unsigned long *entry)
  293. {
  294. struct page *page;
  295. unsigned long offset, mask;
  296. offset = (unsigned long) entry / sizeof(unsigned long);
  297. offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
  298. mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
  299. page = virt_to_page((void *)((unsigned long) entry & mask));
  300. return page->index + offset;
  301. }
  302. /**
  303. * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
  304. * @gmap: pointer to the guest address space structure
  305. * @vmaddr: address in the host process address space
  306. *
  307. * Returns 1 if a TLB flush is required
  308. */
  309. static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
  310. {
  311. unsigned long *entry;
  312. int flush = 0;
  313. BUG_ON(gmap_is_shadow(gmap));
  314. spin_lock(&gmap->guest_table_lock);
  315. entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
  316. if (entry) {
  317. flush = (*entry != _SEGMENT_ENTRY_INVALID);
  318. *entry = _SEGMENT_ENTRY_INVALID;
  319. }
  320. spin_unlock(&gmap->guest_table_lock);
  321. return flush;
  322. }
  323. /**
  324. * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
  325. * @gmap: pointer to the guest address space structure
  326. * @gaddr: address in the guest address space
  327. *
  328. * Returns 1 if a TLB flush is required
  329. */
  330. static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
  331. {
  332. unsigned long vmaddr;
  333. vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
  334. gaddr >> PMD_SHIFT);
  335. return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
  336. }
  337. /**
  338. * gmap_unmap_segment - unmap segment from the guest address space
  339. * @gmap: pointer to the guest address space structure
  340. * @to: address in the guest address space
  341. * @len: length of the memory area to unmap
  342. *
  343. * Returns 0 if the unmap succeeded, -EINVAL if not.
  344. */
  345. int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
  346. {
  347. unsigned long off;
  348. int flush;
  349. BUG_ON(gmap_is_shadow(gmap));
  350. if ((to | len) & (PMD_SIZE - 1))
  351. return -EINVAL;
  352. if (len == 0 || to + len < to)
  353. return -EINVAL;
  354. flush = 0;
  355. down_write(&gmap->mm->mmap_sem);
  356. for (off = 0; off < len; off += PMD_SIZE)
  357. flush |= __gmap_unmap_by_gaddr(gmap, to + off);
  358. up_write(&gmap->mm->mmap_sem);
  359. if (flush)
  360. gmap_flush_tlb(gmap);
  361. return 0;
  362. }
  363. EXPORT_SYMBOL_GPL(gmap_unmap_segment);
  364. /**
  365. * gmap_map_segment - map a segment to the guest address space
  366. * @gmap: pointer to the guest address space structure
  367. * @from: source address in the parent address space
  368. * @to: target address in the guest address space
  369. * @len: length of the memory area to map
  370. *
  371. * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
  372. */
  373. int gmap_map_segment(struct gmap *gmap, unsigned long from,
  374. unsigned long to, unsigned long len)
  375. {
  376. unsigned long off;
  377. int flush;
  378. BUG_ON(gmap_is_shadow(gmap));
  379. if ((from | to | len) & (PMD_SIZE - 1))
  380. return -EINVAL;
  381. if (len == 0 || from + len < from || to + len < to ||
  382. from + len - 1 > TASK_MAX_SIZE || to + len - 1 > gmap->asce_end)
  383. return -EINVAL;
  384. flush = 0;
  385. down_write(&gmap->mm->mmap_sem);
  386. for (off = 0; off < len; off += PMD_SIZE) {
  387. /* Remove old translation */
  388. flush |= __gmap_unmap_by_gaddr(gmap, to + off);
  389. /* Store new translation */
  390. if (radix_tree_insert(&gmap->guest_to_host,
  391. (to + off) >> PMD_SHIFT,
  392. (void *) from + off))
  393. break;
  394. }
  395. up_write(&gmap->mm->mmap_sem);
  396. if (flush)
  397. gmap_flush_tlb(gmap);
  398. if (off >= len)
  399. return 0;
  400. gmap_unmap_segment(gmap, to, len);
  401. return -ENOMEM;
  402. }
  403. EXPORT_SYMBOL_GPL(gmap_map_segment);
  404. /**
  405. * __gmap_translate - translate a guest address to a user space address
  406. * @gmap: pointer to guest mapping meta data structure
  407. * @gaddr: guest address
  408. *
  409. * Returns user space address which corresponds to the guest address or
  410. * -EFAULT if no such mapping exists.
  411. * This function does not establish potentially missing page table entries.
  412. * The mmap_sem of the mm that belongs to the address space must be held
  413. * when this function gets called.
  414. *
  415. * Note: Can also be called for shadow gmaps.
  416. */
  417. unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
  418. {
  419. unsigned long vmaddr;
  420. vmaddr = (unsigned long)
  421. radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
  422. /* Note: guest_to_host is empty for a shadow gmap */
  423. return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
  424. }
  425. EXPORT_SYMBOL_GPL(__gmap_translate);
  426. /**
  427. * gmap_translate - translate a guest address to a user space address
  428. * @gmap: pointer to guest mapping meta data structure
  429. * @gaddr: guest address
  430. *
  431. * Returns user space address which corresponds to the guest address or
  432. * -EFAULT if no such mapping exists.
  433. * This function does not establish potentially missing page table entries.
  434. */
  435. unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
  436. {
  437. unsigned long rc;
  438. down_read(&gmap->mm->mmap_sem);
  439. rc = __gmap_translate(gmap, gaddr);
  440. up_read(&gmap->mm->mmap_sem);
  441. return rc;
  442. }
  443. EXPORT_SYMBOL_GPL(gmap_translate);
  444. /**
  445. * gmap_unlink - disconnect a page table from the gmap shadow tables
  446. * @gmap: pointer to guest mapping meta data structure
  447. * @table: pointer to the host page table
  448. * @vmaddr: vm address associated with the host page table
  449. */
  450. void gmap_unlink(struct mm_struct *mm, unsigned long *table,
  451. unsigned long vmaddr)
  452. {
  453. struct gmap *gmap;
  454. int flush;
  455. rcu_read_lock();
  456. list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
  457. flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
  458. if (flush)
  459. gmap_flush_tlb(gmap);
  460. }
  461. rcu_read_unlock();
  462. }
  463. /**
  464. * gmap_link - set up shadow page tables to connect a host to a guest address
  465. * @gmap: pointer to guest mapping meta data structure
  466. * @gaddr: guest address
  467. * @vmaddr: vm address
  468. *
  469. * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
  470. * if the vm address is already mapped to a different guest segment.
  471. * The mmap_sem of the mm that belongs to the address space must be held
  472. * when this function gets called.
  473. */
  474. int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
  475. {
  476. struct mm_struct *mm;
  477. unsigned long *table;
  478. spinlock_t *ptl;
  479. pgd_t *pgd;
  480. pud_t *pud;
  481. pmd_t *pmd;
  482. int rc;
  483. BUG_ON(gmap_is_shadow(gmap));
  484. /* Create higher level tables in the gmap page table */
  485. table = gmap->table;
  486. if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
  487. table += (gaddr >> 53) & 0x7ff;
  488. if ((*table & _REGION_ENTRY_INVALID) &&
  489. gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
  490. gaddr & 0xffe0000000000000UL))
  491. return -ENOMEM;
  492. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  493. }
  494. if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
  495. table += (gaddr >> 42) & 0x7ff;
  496. if ((*table & _REGION_ENTRY_INVALID) &&
  497. gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
  498. gaddr & 0xfffffc0000000000UL))
  499. return -ENOMEM;
  500. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  501. }
  502. if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
  503. table += (gaddr >> 31) & 0x7ff;
  504. if ((*table & _REGION_ENTRY_INVALID) &&
  505. gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
  506. gaddr & 0xffffffff80000000UL))
  507. return -ENOMEM;
  508. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  509. }
  510. table += (gaddr >> 20) & 0x7ff;
  511. /* Walk the parent mm page table */
  512. mm = gmap->mm;
  513. pgd = pgd_offset(mm, vmaddr);
  514. VM_BUG_ON(pgd_none(*pgd));
  515. pud = pud_offset(pgd, vmaddr);
  516. VM_BUG_ON(pud_none(*pud));
  517. /* large puds cannot yet be handled */
  518. if (pud_large(*pud))
  519. return -EFAULT;
  520. pmd = pmd_offset(pud, vmaddr);
  521. VM_BUG_ON(pmd_none(*pmd));
  522. /* large pmds cannot yet be handled */
  523. if (pmd_large(*pmd))
  524. return -EFAULT;
  525. /* Link gmap segment table entry location to page table. */
  526. rc = radix_tree_preload(GFP_KERNEL);
  527. if (rc)
  528. return rc;
  529. ptl = pmd_lock(mm, pmd);
  530. spin_lock(&gmap->guest_table_lock);
  531. if (*table == _SEGMENT_ENTRY_INVALID) {
  532. rc = radix_tree_insert(&gmap->host_to_guest,
  533. vmaddr >> PMD_SHIFT, table);
  534. if (!rc)
  535. *table = pmd_val(*pmd);
  536. } else
  537. rc = 0;
  538. spin_unlock(&gmap->guest_table_lock);
  539. spin_unlock(ptl);
  540. radix_tree_preload_end();
  541. return rc;
  542. }
  543. /**
  544. * gmap_fault - resolve a fault on a guest address
  545. * @gmap: pointer to guest mapping meta data structure
  546. * @gaddr: guest address
  547. * @fault_flags: flags to pass down to handle_mm_fault()
  548. *
  549. * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
  550. * if the vm address is already mapped to a different guest segment.
  551. */
  552. int gmap_fault(struct gmap *gmap, unsigned long gaddr,
  553. unsigned int fault_flags)
  554. {
  555. unsigned long vmaddr;
  556. int rc;
  557. bool unlocked;
  558. down_read(&gmap->mm->mmap_sem);
  559. retry:
  560. unlocked = false;
  561. vmaddr = __gmap_translate(gmap, gaddr);
  562. if (IS_ERR_VALUE(vmaddr)) {
  563. rc = vmaddr;
  564. goto out_up;
  565. }
  566. if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
  567. &unlocked)) {
  568. rc = -EFAULT;
  569. goto out_up;
  570. }
  571. /*
  572. * In the case that fixup_user_fault unlocked the mmap_sem during
  573. * faultin redo __gmap_translate to not race with a map/unmap_segment.
  574. */
  575. if (unlocked)
  576. goto retry;
  577. rc = __gmap_link(gmap, gaddr, vmaddr);
  578. out_up:
  579. up_read(&gmap->mm->mmap_sem);
  580. return rc;
  581. }
  582. EXPORT_SYMBOL_GPL(gmap_fault);
  583. /*
  584. * this function is assumed to be called with mmap_sem held
  585. */
  586. void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
  587. {
  588. unsigned long vmaddr;
  589. spinlock_t *ptl;
  590. pte_t *ptep;
  591. /* Find the vm address for the guest address */
  592. vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
  593. gaddr >> PMD_SHIFT);
  594. if (vmaddr) {
  595. vmaddr |= gaddr & ~PMD_MASK;
  596. /* Get pointer to the page table entry */
  597. ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
  598. if (likely(ptep))
  599. ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
  600. pte_unmap_unlock(ptep, ptl);
  601. }
  602. }
  603. EXPORT_SYMBOL_GPL(__gmap_zap);
  604. void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
  605. {
  606. unsigned long gaddr, vmaddr, size;
  607. struct vm_area_struct *vma;
  608. down_read(&gmap->mm->mmap_sem);
  609. for (gaddr = from; gaddr < to;
  610. gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
  611. /* Find the vm address for the guest address */
  612. vmaddr = (unsigned long)
  613. radix_tree_lookup(&gmap->guest_to_host,
  614. gaddr >> PMD_SHIFT);
  615. if (!vmaddr)
  616. continue;
  617. vmaddr |= gaddr & ~PMD_MASK;
  618. /* Find vma in the parent mm */
  619. vma = find_vma(gmap->mm, vmaddr);
  620. size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
  621. zap_page_range(vma, vmaddr, size, NULL);
  622. }
  623. up_read(&gmap->mm->mmap_sem);
  624. }
  625. EXPORT_SYMBOL_GPL(gmap_discard);
  626. static LIST_HEAD(gmap_notifier_list);
  627. static DEFINE_SPINLOCK(gmap_notifier_lock);
  628. /**
  629. * gmap_register_pte_notifier - register a pte invalidation callback
  630. * @nb: pointer to the gmap notifier block
  631. */
  632. void gmap_register_pte_notifier(struct gmap_notifier *nb)
  633. {
  634. spin_lock(&gmap_notifier_lock);
  635. list_add_rcu(&nb->list, &gmap_notifier_list);
  636. spin_unlock(&gmap_notifier_lock);
  637. }
  638. EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
  639. /**
  640. * gmap_unregister_pte_notifier - remove a pte invalidation callback
  641. * @nb: pointer to the gmap notifier block
  642. */
  643. void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
  644. {
  645. spin_lock(&gmap_notifier_lock);
  646. list_del_rcu(&nb->list);
  647. spin_unlock(&gmap_notifier_lock);
  648. synchronize_rcu();
  649. }
  650. EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
  651. /**
  652. * gmap_call_notifier - call all registered invalidation callbacks
  653. * @gmap: pointer to guest mapping meta data structure
  654. * @start: start virtual address in the guest address space
  655. * @end: end virtual address in the guest address space
  656. */
  657. static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
  658. unsigned long end)
  659. {
  660. struct gmap_notifier *nb;
  661. list_for_each_entry(nb, &gmap_notifier_list, list)
  662. nb->notifier_call(gmap, start, end);
  663. }
  664. /**
  665. * gmap_table_walk - walk the gmap page tables
  666. * @gmap: pointer to guest mapping meta data structure
  667. * @gaddr: virtual address in the guest address space
  668. * @level: page table level to stop at
  669. *
  670. * Returns a table entry pointer for the given guest address and @level
  671. * @level=0 : returns a pointer to a page table table entry (or NULL)
  672. * @level=1 : returns a pointer to a segment table entry (or NULL)
  673. * @level=2 : returns a pointer to a region-3 table entry (or NULL)
  674. * @level=3 : returns a pointer to a region-2 table entry (or NULL)
  675. * @level=4 : returns a pointer to a region-1 table entry (or NULL)
  676. *
  677. * Returns NULL if the gmap page tables could not be walked to the
  678. * requested level.
  679. *
  680. * Note: Can also be called for shadow gmaps.
  681. */
  682. static inline unsigned long *gmap_table_walk(struct gmap *gmap,
  683. unsigned long gaddr, int level)
  684. {
  685. unsigned long *table;
  686. if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
  687. return NULL;
  688. if (gmap_is_shadow(gmap) && gmap->removed)
  689. return NULL;
  690. if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11)))
  691. return NULL;
  692. table = gmap->table;
  693. switch (gmap->asce & _ASCE_TYPE_MASK) {
  694. case _ASCE_TYPE_REGION1:
  695. table += (gaddr >> 53) & 0x7ff;
  696. if (level == 4)
  697. break;
  698. if (*table & _REGION_ENTRY_INVALID)
  699. return NULL;
  700. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  701. /* Fallthrough */
  702. case _ASCE_TYPE_REGION2:
  703. table += (gaddr >> 42) & 0x7ff;
  704. if (level == 3)
  705. break;
  706. if (*table & _REGION_ENTRY_INVALID)
  707. return NULL;
  708. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  709. /* Fallthrough */
  710. case _ASCE_TYPE_REGION3:
  711. table += (gaddr >> 31) & 0x7ff;
  712. if (level == 2)
  713. break;
  714. if (*table & _REGION_ENTRY_INVALID)
  715. return NULL;
  716. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  717. /* Fallthrough */
  718. case _ASCE_TYPE_SEGMENT:
  719. table += (gaddr >> 20) & 0x7ff;
  720. if (level == 1)
  721. break;
  722. if (*table & _REGION_ENTRY_INVALID)
  723. return NULL;
  724. table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
  725. table += (gaddr >> 12) & 0xff;
  726. }
  727. return table;
  728. }
  729. /**
  730. * gmap_pte_op_walk - walk the gmap page table, get the page table lock
  731. * and return the pte pointer
  732. * @gmap: pointer to guest mapping meta data structure
  733. * @gaddr: virtual address in the guest address space
  734. * @ptl: pointer to the spinlock pointer
  735. *
  736. * Returns a pointer to the locked pte for a guest address, or NULL
  737. *
  738. * Note: Can also be called for shadow gmaps.
  739. */
  740. static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
  741. spinlock_t **ptl)
  742. {
  743. unsigned long *table;
  744. if (gmap_is_shadow(gmap))
  745. spin_lock(&gmap->guest_table_lock);
  746. /* Walk the gmap page table, lock and get pte pointer */
  747. table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
  748. if (!table || *table & _SEGMENT_ENTRY_INVALID) {
  749. if (gmap_is_shadow(gmap))
  750. spin_unlock(&gmap->guest_table_lock);
  751. return NULL;
  752. }
  753. if (gmap_is_shadow(gmap)) {
  754. *ptl = &gmap->guest_table_lock;
  755. return pte_offset_map((pmd_t *) table, gaddr);
  756. }
  757. return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
  758. }
  759. /**
  760. * gmap_pte_op_fixup - force a page in and connect the gmap page table
  761. * @gmap: pointer to guest mapping meta data structure
  762. * @gaddr: virtual address in the guest address space
  763. * @vmaddr: address in the host process address space
  764. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  765. *
  766. * Returns 0 if the caller can retry __gmap_translate (might fail again),
  767. * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
  768. * up or connecting the gmap page table.
  769. */
  770. static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
  771. unsigned long vmaddr, int prot)
  772. {
  773. struct mm_struct *mm = gmap->mm;
  774. unsigned int fault_flags;
  775. bool unlocked = false;
  776. BUG_ON(gmap_is_shadow(gmap));
  777. fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
  778. if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked))
  779. return -EFAULT;
  780. if (unlocked)
  781. /* lost mmap_sem, caller has to retry __gmap_translate */
  782. return 0;
  783. /* Connect the page tables */
  784. return __gmap_link(gmap, gaddr, vmaddr);
  785. }
  786. /**
  787. * gmap_pte_op_end - release the page table lock
  788. * @ptl: pointer to the spinlock pointer
  789. */
  790. static void gmap_pte_op_end(spinlock_t *ptl)
  791. {
  792. spin_unlock(ptl);
  793. }
  794. /*
  795. * gmap_protect_range - remove access rights to memory and set pgste bits
  796. * @gmap: pointer to guest mapping meta data structure
  797. * @gaddr: virtual address in the guest address space
  798. * @len: size of area
  799. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  800. * @bits: pgste notification bits to set
  801. *
  802. * Returns 0 if successfully protected, -ENOMEM if out of memory and
  803. * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
  804. *
  805. * Called with sg->mm->mmap_sem in read.
  806. *
  807. * Note: Can also be called for shadow gmaps.
  808. */
  809. static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
  810. unsigned long len, int prot, unsigned long bits)
  811. {
  812. unsigned long vmaddr;
  813. spinlock_t *ptl;
  814. pte_t *ptep;
  815. int rc;
  816. while (len) {
  817. rc = -EAGAIN;
  818. ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
  819. if (ptep) {
  820. rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, bits);
  821. gmap_pte_op_end(ptl);
  822. }
  823. if (rc) {
  824. vmaddr = __gmap_translate(gmap, gaddr);
  825. if (IS_ERR_VALUE(vmaddr))
  826. return vmaddr;
  827. rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
  828. if (rc)
  829. return rc;
  830. continue;
  831. }
  832. gaddr += PAGE_SIZE;
  833. len -= PAGE_SIZE;
  834. }
  835. return 0;
  836. }
  837. /**
  838. * gmap_mprotect_notify - change access rights for a range of ptes and
  839. * call the notifier if any pte changes again
  840. * @gmap: pointer to guest mapping meta data structure
  841. * @gaddr: virtual address in the guest address space
  842. * @len: size of area
  843. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  844. *
  845. * Returns 0 if for each page in the given range a gmap mapping exists,
  846. * the new access rights could be set and the notifier could be armed.
  847. * If the gmap mapping is missing for one or more pages -EFAULT is
  848. * returned. If no memory could be allocated -ENOMEM is returned.
  849. * This function establishes missing page table entries.
  850. */
  851. int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
  852. unsigned long len, int prot)
  853. {
  854. int rc;
  855. if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
  856. return -EINVAL;
  857. if (!MACHINE_HAS_ESOP && prot == PROT_READ)
  858. return -EINVAL;
  859. down_read(&gmap->mm->mmap_sem);
  860. rc = gmap_protect_range(gmap, gaddr, len, prot, PGSTE_IN_BIT);
  861. up_read(&gmap->mm->mmap_sem);
  862. return rc;
  863. }
  864. EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
  865. /**
  866. * gmap_read_table - get an unsigned long value from a guest page table using
  867. * absolute addressing, without marking the page referenced.
  868. * @gmap: pointer to guest mapping meta data structure
  869. * @gaddr: virtual address in the guest address space
  870. * @val: pointer to the unsigned long value to return
  871. *
  872. * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
  873. * if reading using the virtual address failed.
  874. *
  875. * Called with gmap->mm->mmap_sem in read.
  876. */
  877. int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
  878. {
  879. unsigned long address, vmaddr;
  880. spinlock_t *ptl;
  881. pte_t *ptep, pte;
  882. int rc;
  883. while (1) {
  884. rc = -EAGAIN;
  885. ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
  886. if (ptep) {
  887. pte = *ptep;
  888. if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
  889. address = pte_val(pte) & PAGE_MASK;
  890. address += gaddr & ~PAGE_MASK;
  891. *val = *(unsigned long *) address;
  892. pte_val(*ptep) |= _PAGE_YOUNG;
  893. /* Do *NOT* clear the _PAGE_INVALID bit! */
  894. rc = 0;
  895. }
  896. gmap_pte_op_end(ptl);
  897. }
  898. if (!rc)
  899. break;
  900. vmaddr = __gmap_translate(gmap, gaddr);
  901. if (IS_ERR_VALUE(vmaddr)) {
  902. rc = vmaddr;
  903. break;
  904. }
  905. rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, PROT_READ);
  906. if (rc)
  907. break;
  908. }
  909. return rc;
  910. }
  911. EXPORT_SYMBOL_GPL(gmap_read_table);
  912. /**
  913. * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
  914. * @sg: pointer to the shadow guest address space structure
  915. * @vmaddr: vm address associated with the rmap
  916. * @rmap: pointer to the rmap structure
  917. *
  918. * Called with the sg->guest_table_lock
  919. */
  920. static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
  921. struct gmap_rmap *rmap)
  922. {
  923. void **slot;
  924. BUG_ON(!gmap_is_shadow(sg));
  925. slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
  926. if (slot) {
  927. rmap->next = radix_tree_deref_slot_protected(slot,
  928. &sg->guest_table_lock);
  929. radix_tree_replace_slot(slot, rmap);
  930. } else {
  931. rmap->next = NULL;
  932. radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
  933. rmap);
  934. }
  935. }
  936. /**
  937. * gmap_protect_rmap - modify access rights to memory and create an rmap
  938. * @sg: pointer to the shadow guest address space structure
  939. * @raddr: rmap address in the shadow gmap
  940. * @paddr: address in the parent guest address space
  941. * @len: length of the memory area to protect
  942. * @prot: indicates access rights: none, read-only or read-write
  943. *
  944. * Returns 0 if successfully protected and the rmap was created, -ENOMEM
  945. * if out of memory and -EFAULT if paddr is invalid.
  946. */
  947. static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
  948. unsigned long paddr, unsigned long len, int prot)
  949. {
  950. struct gmap *parent;
  951. struct gmap_rmap *rmap;
  952. unsigned long vmaddr;
  953. spinlock_t *ptl;
  954. pte_t *ptep;
  955. int rc;
  956. BUG_ON(!gmap_is_shadow(sg));
  957. parent = sg->parent;
  958. while (len) {
  959. vmaddr = __gmap_translate(parent, paddr);
  960. if (IS_ERR_VALUE(vmaddr))
  961. return vmaddr;
  962. rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
  963. if (!rmap)
  964. return -ENOMEM;
  965. rmap->raddr = raddr;
  966. rc = radix_tree_preload(GFP_KERNEL);
  967. if (rc) {
  968. kfree(rmap);
  969. return rc;
  970. }
  971. rc = -EAGAIN;
  972. ptep = gmap_pte_op_walk(parent, paddr, &ptl);
  973. if (ptep) {
  974. spin_lock(&sg->guest_table_lock);
  975. rc = ptep_force_prot(parent->mm, paddr, ptep, prot,
  976. PGSTE_VSIE_BIT);
  977. if (!rc)
  978. gmap_insert_rmap(sg, vmaddr, rmap);
  979. spin_unlock(&sg->guest_table_lock);
  980. gmap_pte_op_end(ptl);
  981. }
  982. radix_tree_preload_end();
  983. if (rc) {
  984. kfree(rmap);
  985. rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
  986. if (rc)
  987. return rc;
  988. continue;
  989. }
  990. paddr += PAGE_SIZE;
  991. len -= PAGE_SIZE;
  992. }
  993. return 0;
  994. }
  995. #define _SHADOW_RMAP_MASK 0x7
  996. #define _SHADOW_RMAP_REGION1 0x5
  997. #define _SHADOW_RMAP_REGION2 0x4
  998. #define _SHADOW_RMAP_REGION3 0x3
  999. #define _SHADOW_RMAP_SEGMENT 0x2
  1000. #define _SHADOW_RMAP_PGTABLE 0x1
  1001. /**
  1002. * gmap_idte_one - invalidate a single region or segment table entry
  1003. * @asce: region or segment table *origin* + table-type bits
  1004. * @vaddr: virtual address to identify the table entry to flush
  1005. *
  1006. * The invalid bit of a single region or segment table entry is set
  1007. * and the associated TLB entries depending on the entry are flushed.
  1008. * The table-type of the @asce identifies the portion of the @vaddr
  1009. * that is used as the invalidation index.
  1010. */
  1011. static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
  1012. {
  1013. asm volatile(
  1014. " .insn rrf,0xb98e0000,%0,%1,0,0"
  1015. : : "a" (asce), "a" (vaddr) : "cc", "memory");
  1016. }
  1017. /**
  1018. * gmap_unshadow_page - remove a page from a shadow page table
  1019. * @sg: pointer to the shadow guest address space structure
  1020. * @raddr: rmap address in the shadow guest address space
  1021. *
  1022. * Called with the sg->guest_table_lock
  1023. */
  1024. static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
  1025. {
  1026. unsigned long *table;
  1027. BUG_ON(!gmap_is_shadow(sg));
  1028. table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
  1029. if (!table || *table & _PAGE_INVALID)
  1030. return;
  1031. gmap_call_notifier(sg, raddr, raddr + (1UL << 12) - 1);
  1032. ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
  1033. }
  1034. /**
  1035. * __gmap_unshadow_pgt - remove all entries from a shadow page table
  1036. * @sg: pointer to the shadow guest address space structure
  1037. * @raddr: rmap address in the shadow guest address space
  1038. * @pgt: pointer to the start of a shadow page table
  1039. *
  1040. * Called with the sg->guest_table_lock
  1041. */
  1042. static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
  1043. unsigned long *pgt)
  1044. {
  1045. int i;
  1046. BUG_ON(!gmap_is_shadow(sg));
  1047. for (i = 0; i < 256; i++, raddr += 1UL << 12)
  1048. pgt[i] = _PAGE_INVALID;
  1049. }
  1050. /**
  1051. * gmap_unshadow_pgt - remove a shadow page table from a segment entry
  1052. * @sg: pointer to the shadow guest address space structure
  1053. * @raddr: address in the shadow guest address space
  1054. *
  1055. * Called with the sg->guest_table_lock
  1056. */
  1057. static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
  1058. {
  1059. unsigned long sto, *ste, *pgt;
  1060. struct page *page;
  1061. BUG_ON(!gmap_is_shadow(sg));
  1062. ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
  1063. if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
  1064. return;
  1065. gmap_call_notifier(sg, raddr, raddr + (1UL << 20) - 1);
  1066. sto = (unsigned long) (ste - ((raddr >> 20) & 0x7ff));
  1067. gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
  1068. pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
  1069. *ste = _SEGMENT_ENTRY_EMPTY;
  1070. __gmap_unshadow_pgt(sg, raddr, pgt);
  1071. /* Free page table */
  1072. page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
  1073. list_del(&page->lru);
  1074. page_table_free_pgste(page);
  1075. }
  1076. /**
  1077. * __gmap_unshadow_sgt - remove all entries from a shadow segment table
  1078. * @sg: pointer to the shadow guest address space structure
  1079. * @raddr: rmap address in the shadow guest address space
  1080. * @sgt: pointer to the start of a shadow segment table
  1081. *
  1082. * Called with the sg->guest_table_lock
  1083. */
  1084. static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
  1085. unsigned long *sgt)
  1086. {
  1087. unsigned long asce, *pgt;
  1088. struct page *page;
  1089. int i;
  1090. BUG_ON(!gmap_is_shadow(sg));
  1091. asce = (unsigned long) sgt | _ASCE_TYPE_SEGMENT;
  1092. for (i = 0; i < 2048; i++, raddr += 1UL << 20) {
  1093. if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
  1094. continue;
  1095. pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
  1096. sgt[i] = _SEGMENT_ENTRY_EMPTY;
  1097. __gmap_unshadow_pgt(sg, raddr, pgt);
  1098. /* Free page table */
  1099. page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
  1100. list_del(&page->lru);
  1101. page_table_free_pgste(page);
  1102. }
  1103. }
  1104. /**
  1105. * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
  1106. * @sg: pointer to the shadow guest address space structure
  1107. * @raddr: rmap address in the shadow guest address space
  1108. *
  1109. * Called with the shadow->guest_table_lock
  1110. */
  1111. static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
  1112. {
  1113. unsigned long r3o, *r3e, *sgt;
  1114. struct page *page;
  1115. BUG_ON(!gmap_is_shadow(sg));
  1116. r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
  1117. if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
  1118. return;
  1119. gmap_call_notifier(sg, raddr, raddr + (1UL << 31) - 1);
  1120. r3o = (unsigned long) (r3e - ((raddr >> 31) & 0x7ff));
  1121. gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
  1122. sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
  1123. *r3e = _REGION3_ENTRY_EMPTY;
  1124. __gmap_unshadow_sgt(sg, raddr, sgt);
  1125. /* Free segment table */
  1126. page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
  1127. list_del(&page->lru);
  1128. __free_pages(page, 2);
  1129. }
  1130. /**
  1131. * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
  1132. * @sg: pointer to the shadow guest address space structure
  1133. * @raddr: address in the shadow guest address space
  1134. * @r3t: pointer to the start of a shadow region-3 table
  1135. *
  1136. * Called with the sg->guest_table_lock
  1137. */
  1138. static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
  1139. unsigned long *r3t)
  1140. {
  1141. unsigned long asce, *sgt;
  1142. struct page *page;
  1143. int i;
  1144. BUG_ON(!gmap_is_shadow(sg));
  1145. asce = (unsigned long) r3t | _ASCE_TYPE_REGION3;
  1146. for (i = 0; i < 2048; i++, raddr += 1UL << 31) {
  1147. if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
  1148. continue;
  1149. sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
  1150. r3t[i] = _REGION3_ENTRY_EMPTY;
  1151. __gmap_unshadow_sgt(sg, raddr, sgt);
  1152. /* Free segment table */
  1153. page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
  1154. list_del(&page->lru);
  1155. __free_pages(page, 2);
  1156. }
  1157. }
  1158. /**
  1159. * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
  1160. * @sg: pointer to the shadow guest address space structure
  1161. * @raddr: rmap address in the shadow guest address space
  1162. *
  1163. * Called with the sg->guest_table_lock
  1164. */
  1165. static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
  1166. {
  1167. unsigned long r2o, *r2e, *r3t;
  1168. struct page *page;
  1169. BUG_ON(!gmap_is_shadow(sg));
  1170. r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
  1171. if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
  1172. return;
  1173. gmap_call_notifier(sg, raddr, raddr + (1UL << 42) - 1);
  1174. r2o = (unsigned long) (r2e - ((raddr >> 42) & 0x7ff));
  1175. gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
  1176. r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
  1177. *r2e = _REGION2_ENTRY_EMPTY;
  1178. __gmap_unshadow_r3t(sg, raddr, r3t);
  1179. /* Free region 3 table */
  1180. page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
  1181. list_del(&page->lru);
  1182. __free_pages(page, 2);
  1183. }
  1184. /**
  1185. * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
  1186. * @sg: pointer to the shadow guest address space structure
  1187. * @raddr: rmap address in the shadow guest address space
  1188. * @r2t: pointer to the start of a shadow region-2 table
  1189. *
  1190. * Called with the sg->guest_table_lock
  1191. */
  1192. static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
  1193. unsigned long *r2t)
  1194. {
  1195. unsigned long asce, *r3t;
  1196. struct page *page;
  1197. int i;
  1198. BUG_ON(!gmap_is_shadow(sg));
  1199. asce = (unsigned long) r2t | _ASCE_TYPE_REGION2;
  1200. for (i = 0; i < 2048; i++, raddr += 1UL << 42) {
  1201. if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
  1202. continue;
  1203. r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
  1204. r2t[i] = _REGION2_ENTRY_EMPTY;
  1205. __gmap_unshadow_r3t(sg, raddr, r3t);
  1206. /* Free region 3 table */
  1207. page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
  1208. list_del(&page->lru);
  1209. __free_pages(page, 2);
  1210. }
  1211. }
  1212. /**
  1213. * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
  1214. * @sg: pointer to the shadow guest address space structure
  1215. * @raddr: rmap address in the shadow guest address space
  1216. *
  1217. * Called with the sg->guest_table_lock
  1218. */
  1219. static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
  1220. {
  1221. unsigned long r1o, *r1e, *r2t;
  1222. struct page *page;
  1223. BUG_ON(!gmap_is_shadow(sg));
  1224. r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
  1225. if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
  1226. return;
  1227. gmap_call_notifier(sg, raddr, raddr + (1UL << 53) - 1);
  1228. r1o = (unsigned long) (r1e - ((raddr >> 53) & 0x7ff));
  1229. gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
  1230. r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
  1231. *r1e = _REGION1_ENTRY_EMPTY;
  1232. __gmap_unshadow_r2t(sg, raddr, r2t);
  1233. /* Free region 2 table */
  1234. page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
  1235. list_del(&page->lru);
  1236. __free_pages(page, 2);
  1237. }
  1238. /**
  1239. * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
  1240. * @sg: pointer to the shadow guest address space structure
  1241. * @raddr: rmap address in the shadow guest address space
  1242. * @r1t: pointer to the start of a shadow region-1 table
  1243. *
  1244. * Called with the shadow->guest_table_lock
  1245. */
  1246. static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
  1247. unsigned long *r1t)
  1248. {
  1249. unsigned long asce, *r2t;
  1250. struct page *page;
  1251. int i;
  1252. BUG_ON(!gmap_is_shadow(sg));
  1253. asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
  1254. for (i = 0; i < 2048; i++, raddr += 1UL << 53) {
  1255. if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
  1256. continue;
  1257. r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
  1258. __gmap_unshadow_r2t(sg, raddr, r2t);
  1259. /* Clear entry and flush translation r1t -> r2t */
  1260. gmap_idte_one(asce, raddr);
  1261. r1t[i] = _REGION1_ENTRY_EMPTY;
  1262. /* Free region 2 table */
  1263. page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
  1264. list_del(&page->lru);
  1265. __free_pages(page, 2);
  1266. }
  1267. }
  1268. /**
  1269. * gmap_unshadow - remove a shadow page table completely
  1270. * @sg: pointer to the shadow guest address space structure
  1271. *
  1272. * Called with sg->guest_table_lock
  1273. */
  1274. static void gmap_unshadow(struct gmap *sg)
  1275. {
  1276. unsigned long *table;
  1277. BUG_ON(!gmap_is_shadow(sg));
  1278. if (sg->removed)
  1279. return;
  1280. sg->removed = 1;
  1281. gmap_call_notifier(sg, 0, -1UL);
  1282. gmap_flush_tlb(sg);
  1283. table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
  1284. switch (sg->asce & _ASCE_TYPE_MASK) {
  1285. case _ASCE_TYPE_REGION1:
  1286. __gmap_unshadow_r1t(sg, 0, table);
  1287. break;
  1288. case _ASCE_TYPE_REGION2:
  1289. __gmap_unshadow_r2t(sg, 0, table);
  1290. break;
  1291. case _ASCE_TYPE_REGION3:
  1292. __gmap_unshadow_r3t(sg, 0, table);
  1293. break;
  1294. case _ASCE_TYPE_SEGMENT:
  1295. __gmap_unshadow_sgt(sg, 0, table);
  1296. break;
  1297. }
  1298. }
  1299. /**
  1300. * gmap_find_shadow - find a specific asce in the list of shadow tables
  1301. * @parent: pointer to the parent gmap
  1302. * @asce: ASCE for which the shadow table is created
  1303. * @edat_level: edat level to be used for the shadow translation
  1304. *
  1305. * Returns the pointer to a gmap if a shadow table with the given asce is
  1306. * already available, ERR_PTR(-EAGAIN) if another one is just being created,
  1307. * otherwise NULL
  1308. */
  1309. static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
  1310. int edat_level)
  1311. {
  1312. struct gmap *sg;
  1313. list_for_each_entry(sg, &parent->children, list) {
  1314. if (sg->orig_asce != asce || sg->edat_level != edat_level ||
  1315. sg->removed)
  1316. continue;
  1317. if (!sg->initialized)
  1318. return ERR_PTR(-EAGAIN);
  1319. atomic_inc(&sg->ref_count);
  1320. return sg;
  1321. }
  1322. return NULL;
  1323. }
  1324. /**
  1325. * gmap_shadow_valid - check if a shadow guest address space matches the
  1326. * given properties and is still valid
  1327. * @sg: pointer to the shadow guest address space structure
  1328. * @asce: ASCE for which the shadow table is requested
  1329. * @edat_level: edat level to be used for the shadow translation
  1330. *
  1331. * Returns 1 if the gmap shadow is still valid and matches the given
  1332. * properties, the caller can continue using it. Returns 0 otherwise, the
  1333. * caller has to request a new shadow gmap in this case.
  1334. *
  1335. */
  1336. int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
  1337. {
  1338. if (sg->removed)
  1339. return 0;
  1340. return sg->orig_asce == asce && sg->edat_level == edat_level;
  1341. }
  1342. EXPORT_SYMBOL_GPL(gmap_shadow_valid);
  1343. /**
  1344. * gmap_shadow - create/find a shadow guest address space
  1345. * @parent: pointer to the parent gmap
  1346. * @asce: ASCE for which the shadow table is created
  1347. * @edat_level: edat level to be used for the shadow translation
  1348. *
  1349. * The pages of the top level page table referred by the asce parameter
  1350. * will be set to read-only and marked in the PGSTEs of the kvm process.
  1351. * The shadow table will be removed automatically on any change to the
  1352. * PTE mapping for the source table.
  1353. *
  1354. * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
  1355. * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
  1356. * parent gmap table could not be protected.
  1357. */
  1358. struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
  1359. int edat_level)
  1360. {
  1361. struct gmap *sg, *new;
  1362. unsigned long limit;
  1363. int rc;
  1364. BUG_ON(gmap_is_shadow(parent));
  1365. spin_lock(&parent->shadow_lock);
  1366. sg = gmap_find_shadow(parent, asce, edat_level);
  1367. spin_unlock(&parent->shadow_lock);
  1368. if (sg)
  1369. return sg;
  1370. /* Create a new shadow gmap */
  1371. limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
  1372. if (asce & _ASCE_REAL_SPACE)
  1373. limit = -1UL;
  1374. new = gmap_alloc(limit);
  1375. if (!new)
  1376. return ERR_PTR(-ENOMEM);
  1377. new->mm = parent->mm;
  1378. new->parent = gmap_get(parent);
  1379. new->orig_asce = asce;
  1380. new->edat_level = edat_level;
  1381. new->initialized = false;
  1382. spin_lock(&parent->shadow_lock);
  1383. /* Recheck if another CPU created the same shadow */
  1384. sg = gmap_find_shadow(parent, asce, edat_level);
  1385. if (sg) {
  1386. spin_unlock(&parent->shadow_lock);
  1387. gmap_free(new);
  1388. return sg;
  1389. }
  1390. if (asce & _ASCE_REAL_SPACE) {
  1391. /* only allow one real-space gmap shadow */
  1392. list_for_each_entry(sg, &parent->children, list) {
  1393. if (sg->orig_asce & _ASCE_REAL_SPACE) {
  1394. spin_lock(&sg->guest_table_lock);
  1395. gmap_unshadow(sg);
  1396. spin_unlock(&sg->guest_table_lock);
  1397. list_del(&sg->list);
  1398. gmap_put(sg);
  1399. break;
  1400. }
  1401. }
  1402. }
  1403. atomic_set(&new->ref_count, 2);
  1404. list_add(&new->list, &parent->children);
  1405. if (asce & _ASCE_REAL_SPACE) {
  1406. /* nothing to protect, return right away */
  1407. new->initialized = true;
  1408. spin_unlock(&parent->shadow_lock);
  1409. return new;
  1410. }
  1411. spin_unlock(&parent->shadow_lock);
  1412. /* protect after insertion, so it will get properly invalidated */
  1413. down_read(&parent->mm->mmap_sem);
  1414. rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
  1415. ((asce & _ASCE_TABLE_LENGTH) + 1) * 4096,
  1416. PROT_READ, PGSTE_VSIE_BIT);
  1417. up_read(&parent->mm->mmap_sem);
  1418. spin_lock(&parent->shadow_lock);
  1419. new->initialized = true;
  1420. if (rc) {
  1421. list_del(&new->list);
  1422. gmap_free(new);
  1423. new = ERR_PTR(rc);
  1424. }
  1425. spin_unlock(&parent->shadow_lock);
  1426. return new;
  1427. }
  1428. EXPORT_SYMBOL_GPL(gmap_shadow);
  1429. /**
  1430. * gmap_shadow_r2t - create an empty shadow region 2 table
  1431. * @sg: pointer to the shadow guest address space structure
  1432. * @saddr: faulting address in the shadow gmap
  1433. * @r2t: parent gmap address of the region 2 table to get shadowed
  1434. * @fake: r2t references contiguous guest memory block, not a r2t
  1435. *
  1436. * The r2t parameter specifies the address of the source table. The
  1437. * four pages of the source table are made read-only in the parent gmap
  1438. * address space. A write to the source table area @r2t will automatically
  1439. * remove the shadow r2 table and all of its decendents.
  1440. *
  1441. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1442. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1443. * -EFAULT if an address in the parent gmap could not be resolved.
  1444. *
  1445. * Called with sg->mm->mmap_sem in read.
  1446. */
  1447. int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
  1448. int fake)
  1449. {
  1450. unsigned long raddr, origin, offset, len;
  1451. unsigned long *s_r2t, *table;
  1452. struct page *page;
  1453. int rc;
  1454. BUG_ON(!gmap_is_shadow(sg));
  1455. /* Allocate a shadow region second table */
  1456. page = alloc_pages(GFP_KERNEL, 2);
  1457. if (!page)
  1458. return -ENOMEM;
  1459. page->index = r2t & _REGION_ENTRY_ORIGIN;
  1460. if (fake)
  1461. page->index |= GMAP_SHADOW_FAKE_TABLE;
  1462. s_r2t = (unsigned long *) page_to_phys(page);
  1463. /* Install shadow region second table */
  1464. spin_lock(&sg->guest_table_lock);
  1465. table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
  1466. if (!table) {
  1467. rc = -EAGAIN; /* Race with unshadow */
  1468. goto out_free;
  1469. }
  1470. if (!(*table & _REGION_ENTRY_INVALID)) {
  1471. rc = 0; /* Already established */
  1472. goto out_free;
  1473. } else if (*table & _REGION_ENTRY_ORIGIN) {
  1474. rc = -EAGAIN; /* Race with shadow */
  1475. goto out_free;
  1476. }
  1477. crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
  1478. /* mark as invalid as long as the parent table is not protected */
  1479. *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
  1480. _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
  1481. if (sg->edat_level >= 1)
  1482. *table |= (r2t & _REGION_ENTRY_PROTECT);
  1483. list_add(&page->lru, &sg->crst_list);
  1484. if (fake) {
  1485. /* nothing to protect for fake tables */
  1486. *table &= ~_REGION_ENTRY_INVALID;
  1487. spin_unlock(&sg->guest_table_lock);
  1488. return 0;
  1489. }
  1490. spin_unlock(&sg->guest_table_lock);
  1491. /* Make r2t read-only in parent gmap page table */
  1492. raddr = (saddr & 0xffe0000000000000UL) | _SHADOW_RMAP_REGION1;
  1493. origin = r2t & _REGION_ENTRY_ORIGIN;
  1494. offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
  1495. len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
  1496. rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
  1497. spin_lock(&sg->guest_table_lock);
  1498. if (!rc) {
  1499. table = gmap_table_walk(sg, saddr, 4);
  1500. if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
  1501. (unsigned long) s_r2t)
  1502. rc = -EAGAIN; /* Race with unshadow */
  1503. else
  1504. *table &= ~_REGION_ENTRY_INVALID;
  1505. } else {
  1506. gmap_unshadow_r2t(sg, raddr);
  1507. }
  1508. spin_unlock(&sg->guest_table_lock);
  1509. return rc;
  1510. out_free:
  1511. spin_unlock(&sg->guest_table_lock);
  1512. __free_pages(page, 2);
  1513. return rc;
  1514. }
  1515. EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
  1516. /**
  1517. * gmap_shadow_r3t - create a shadow region 3 table
  1518. * @sg: pointer to the shadow guest address space structure
  1519. * @saddr: faulting address in the shadow gmap
  1520. * @r3t: parent gmap address of the region 3 table to get shadowed
  1521. * @fake: r3t references contiguous guest memory block, not a r3t
  1522. *
  1523. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1524. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1525. * -EFAULT if an address in the parent gmap could not be resolved.
  1526. *
  1527. * Called with sg->mm->mmap_sem in read.
  1528. */
  1529. int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
  1530. int fake)
  1531. {
  1532. unsigned long raddr, origin, offset, len;
  1533. unsigned long *s_r3t, *table;
  1534. struct page *page;
  1535. int rc;
  1536. BUG_ON(!gmap_is_shadow(sg));
  1537. /* Allocate a shadow region second table */
  1538. page = alloc_pages(GFP_KERNEL, 2);
  1539. if (!page)
  1540. return -ENOMEM;
  1541. page->index = r3t & _REGION_ENTRY_ORIGIN;
  1542. if (fake)
  1543. page->index |= GMAP_SHADOW_FAKE_TABLE;
  1544. s_r3t = (unsigned long *) page_to_phys(page);
  1545. /* Install shadow region second table */
  1546. spin_lock(&sg->guest_table_lock);
  1547. table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
  1548. if (!table) {
  1549. rc = -EAGAIN; /* Race with unshadow */
  1550. goto out_free;
  1551. }
  1552. if (!(*table & _REGION_ENTRY_INVALID)) {
  1553. rc = 0; /* Already established */
  1554. goto out_free;
  1555. } else if (*table & _REGION_ENTRY_ORIGIN) {
  1556. rc = -EAGAIN; /* Race with shadow */
  1557. }
  1558. crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
  1559. /* mark as invalid as long as the parent table is not protected */
  1560. *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
  1561. _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
  1562. if (sg->edat_level >= 1)
  1563. *table |= (r3t & _REGION_ENTRY_PROTECT);
  1564. list_add(&page->lru, &sg->crst_list);
  1565. if (fake) {
  1566. /* nothing to protect for fake tables */
  1567. *table &= ~_REGION_ENTRY_INVALID;
  1568. spin_unlock(&sg->guest_table_lock);
  1569. return 0;
  1570. }
  1571. spin_unlock(&sg->guest_table_lock);
  1572. /* Make r3t read-only in parent gmap page table */
  1573. raddr = (saddr & 0xfffffc0000000000UL) | _SHADOW_RMAP_REGION2;
  1574. origin = r3t & _REGION_ENTRY_ORIGIN;
  1575. offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
  1576. len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
  1577. rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
  1578. spin_lock(&sg->guest_table_lock);
  1579. if (!rc) {
  1580. table = gmap_table_walk(sg, saddr, 3);
  1581. if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
  1582. (unsigned long) s_r3t)
  1583. rc = -EAGAIN; /* Race with unshadow */
  1584. else
  1585. *table &= ~_REGION_ENTRY_INVALID;
  1586. } else {
  1587. gmap_unshadow_r3t(sg, raddr);
  1588. }
  1589. spin_unlock(&sg->guest_table_lock);
  1590. return rc;
  1591. out_free:
  1592. spin_unlock(&sg->guest_table_lock);
  1593. __free_pages(page, 2);
  1594. return rc;
  1595. }
  1596. EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
  1597. /**
  1598. * gmap_shadow_sgt - create a shadow segment table
  1599. * @sg: pointer to the shadow guest address space structure
  1600. * @saddr: faulting address in the shadow gmap
  1601. * @sgt: parent gmap address of the segment table to get shadowed
  1602. * @fake: sgt references contiguous guest memory block, not a sgt
  1603. *
  1604. * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1605. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1606. * -EFAULT if an address in the parent gmap could not be resolved.
  1607. *
  1608. * Called with sg->mm->mmap_sem in read.
  1609. */
  1610. int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
  1611. int fake)
  1612. {
  1613. unsigned long raddr, origin, offset, len;
  1614. unsigned long *s_sgt, *table;
  1615. struct page *page;
  1616. int rc;
  1617. BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
  1618. /* Allocate a shadow segment table */
  1619. page = alloc_pages(GFP_KERNEL, 2);
  1620. if (!page)
  1621. return -ENOMEM;
  1622. page->index = sgt & _REGION_ENTRY_ORIGIN;
  1623. if (fake)
  1624. page->index |= GMAP_SHADOW_FAKE_TABLE;
  1625. s_sgt = (unsigned long *) page_to_phys(page);
  1626. /* Install shadow region second table */
  1627. spin_lock(&sg->guest_table_lock);
  1628. table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
  1629. if (!table) {
  1630. rc = -EAGAIN; /* Race with unshadow */
  1631. goto out_free;
  1632. }
  1633. if (!(*table & _REGION_ENTRY_INVALID)) {
  1634. rc = 0; /* Already established */
  1635. goto out_free;
  1636. } else if (*table & _REGION_ENTRY_ORIGIN) {
  1637. rc = -EAGAIN; /* Race with shadow */
  1638. goto out_free;
  1639. }
  1640. crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
  1641. /* mark as invalid as long as the parent table is not protected */
  1642. *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
  1643. _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
  1644. if (sg->edat_level >= 1)
  1645. *table |= sgt & _REGION_ENTRY_PROTECT;
  1646. list_add(&page->lru, &sg->crst_list);
  1647. if (fake) {
  1648. /* nothing to protect for fake tables */
  1649. *table &= ~_REGION_ENTRY_INVALID;
  1650. spin_unlock(&sg->guest_table_lock);
  1651. return 0;
  1652. }
  1653. spin_unlock(&sg->guest_table_lock);
  1654. /* Make sgt read-only in parent gmap page table */
  1655. raddr = (saddr & 0xffffffff80000000UL) | _SHADOW_RMAP_REGION3;
  1656. origin = sgt & _REGION_ENTRY_ORIGIN;
  1657. offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * 4096;
  1658. len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
  1659. rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
  1660. spin_lock(&sg->guest_table_lock);
  1661. if (!rc) {
  1662. table = gmap_table_walk(sg, saddr, 2);
  1663. if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
  1664. (unsigned long) s_sgt)
  1665. rc = -EAGAIN; /* Race with unshadow */
  1666. else
  1667. *table &= ~_REGION_ENTRY_INVALID;
  1668. } else {
  1669. gmap_unshadow_sgt(sg, raddr);
  1670. }
  1671. spin_unlock(&sg->guest_table_lock);
  1672. return rc;
  1673. out_free:
  1674. spin_unlock(&sg->guest_table_lock);
  1675. __free_pages(page, 2);
  1676. return rc;
  1677. }
  1678. EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
  1679. /**
  1680. * gmap_shadow_lookup_pgtable - find a shadow page table
  1681. * @sg: pointer to the shadow guest address space structure
  1682. * @saddr: the address in the shadow aguest address space
  1683. * @pgt: parent gmap address of the page table to get shadowed
  1684. * @dat_protection: if the pgtable is marked as protected by dat
  1685. * @fake: pgt references contiguous guest memory block, not a pgtable
  1686. *
  1687. * Returns 0 if the shadow page table was found and -EAGAIN if the page
  1688. * table was not found.
  1689. *
  1690. * Called with sg->mm->mmap_sem in read.
  1691. */
  1692. int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
  1693. unsigned long *pgt, int *dat_protection,
  1694. int *fake)
  1695. {
  1696. unsigned long *table;
  1697. struct page *page;
  1698. int rc;
  1699. BUG_ON(!gmap_is_shadow(sg));
  1700. spin_lock(&sg->guest_table_lock);
  1701. table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
  1702. if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
  1703. /* Shadow page tables are full pages (pte+pgste) */
  1704. page = pfn_to_page(*table >> PAGE_SHIFT);
  1705. *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
  1706. *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
  1707. *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
  1708. rc = 0;
  1709. } else {
  1710. rc = -EAGAIN;
  1711. }
  1712. spin_unlock(&sg->guest_table_lock);
  1713. return rc;
  1714. }
  1715. EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
  1716. /**
  1717. * gmap_shadow_pgt - instantiate a shadow page table
  1718. * @sg: pointer to the shadow guest address space structure
  1719. * @saddr: faulting address in the shadow gmap
  1720. * @pgt: parent gmap address of the page table to get shadowed
  1721. * @fake: pgt references contiguous guest memory block, not a pgtable
  1722. *
  1723. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1724. * shadow table structure is incomplete, -ENOMEM if out of memory,
  1725. * -EFAULT if an address in the parent gmap could not be resolved and
  1726. *
  1727. * Called with gmap->mm->mmap_sem in read
  1728. */
  1729. int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
  1730. int fake)
  1731. {
  1732. unsigned long raddr, origin;
  1733. unsigned long *s_pgt, *table;
  1734. struct page *page;
  1735. int rc;
  1736. BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
  1737. /* Allocate a shadow page table */
  1738. page = page_table_alloc_pgste(sg->mm);
  1739. if (!page)
  1740. return -ENOMEM;
  1741. page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
  1742. if (fake)
  1743. page->index |= GMAP_SHADOW_FAKE_TABLE;
  1744. s_pgt = (unsigned long *) page_to_phys(page);
  1745. /* Install shadow page table */
  1746. spin_lock(&sg->guest_table_lock);
  1747. table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
  1748. if (!table) {
  1749. rc = -EAGAIN; /* Race with unshadow */
  1750. goto out_free;
  1751. }
  1752. if (!(*table & _SEGMENT_ENTRY_INVALID)) {
  1753. rc = 0; /* Already established */
  1754. goto out_free;
  1755. } else if (*table & _SEGMENT_ENTRY_ORIGIN) {
  1756. rc = -EAGAIN; /* Race with shadow */
  1757. goto out_free;
  1758. }
  1759. /* mark as invalid as long as the parent table is not protected */
  1760. *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
  1761. (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
  1762. list_add(&page->lru, &sg->pt_list);
  1763. if (fake) {
  1764. /* nothing to protect for fake tables */
  1765. *table &= ~_SEGMENT_ENTRY_INVALID;
  1766. spin_unlock(&sg->guest_table_lock);
  1767. return 0;
  1768. }
  1769. spin_unlock(&sg->guest_table_lock);
  1770. /* Make pgt read-only in parent gmap page table (not the pgste) */
  1771. raddr = (saddr & 0xfffffffffff00000UL) | _SHADOW_RMAP_SEGMENT;
  1772. origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
  1773. rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE, PROT_READ);
  1774. spin_lock(&sg->guest_table_lock);
  1775. if (!rc) {
  1776. table = gmap_table_walk(sg, saddr, 1);
  1777. if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) !=
  1778. (unsigned long) s_pgt)
  1779. rc = -EAGAIN; /* Race with unshadow */
  1780. else
  1781. *table &= ~_SEGMENT_ENTRY_INVALID;
  1782. } else {
  1783. gmap_unshadow_pgt(sg, raddr);
  1784. }
  1785. spin_unlock(&sg->guest_table_lock);
  1786. return rc;
  1787. out_free:
  1788. spin_unlock(&sg->guest_table_lock);
  1789. page_table_free_pgste(page);
  1790. return rc;
  1791. }
  1792. EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
  1793. /**
  1794. * gmap_shadow_page - create a shadow page mapping
  1795. * @sg: pointer to the shadow guest address space structure
  1796. * @saddr: faulting address in the shadow gmap
  1797. * @pte: pte in parent gmap address space to get shadowed
  1798. *
  1799. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1800. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1801. * -EFAULT if an address in the parent gmap could not be resolved.
  1802. *
  1803. * Called with sg->mm->mmap_sem in read.
  1804. */
  1805. int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
  1806. {
  1807. struct gmap *parent;
  1808. struct gmap_rmap *rmap;
  1809. unsigned long vmaddr, paddr;
  1810. spinlock_t *ptl;
  1811. pte_t *sptep, *tptep;
  1812. int prot;
  1813. int rc;
  1814. BUG_ON(!gmap_is_shadow(sg));
  1815. parent = sg->parent;
  1816. prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE;
  1817. rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
  1818. if (!rmap)
  1819. return -ENOMEM;
  1820. rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
  1821. while (1) {
  1822. paddr = pte_val(pte) & PAGE_MASK;
  1823. vmaddr = __gmap_translate(parent, paddr);
  1824. if (IS_ERR_VALUE(vmaddr)) {
  1825. rc = vmaddr;
  1826. break;
  1827. }
  1828. rc = radix_tree_preload(GFP_KERNEL);
  1829. if (rc)
  1830. break;
  1831. rc = -EAGAIN;
  1832. sptep = gmap_pte_op_walk(parent, paddr, &ptl);
  1833. if (sptep) {
  1834. spin_lock(&sg->guest_table_lock);
  1835. /* Get page table pointer */
  1836. tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
  1837. if (!tptep) {
  1838. spin_unlock(&sg->guest_table_lock);
  1839. gmap_pte_op_end(ptl);
  1840. radix_tree_preload_end();
  1841. break;
  1842. }
  1843. rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
  1844. if (rc > 0) {
  1845. /* Success and a new mapping */
  1846. gmap_insert_rmap(sg, vmaddr, rmap);
  1847. rmap = NULL;
  1848. rc = 0;
  1849. }
  1850. gmap_pte_op_end(ptl);
  1851. spin_unlock(&sg->guest_table_lock);
  1852. }
  1853. radix_tree_preload_end();
  1854. if (!rc)
  1855. break;
  1856. rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
  1857. if (rc)
  1858. break;
  1859. }
  1860. kfree(rmap);
  1861. return rc;
  1862. }
  1863. EXPORT_SYMBOL_GPL(gmap_shadow_page);
  1864. /**
  1865. * gmap_shadow_notify - handle notifications for shadow gmap
  1866. *
  1867. * Called with sg->parent->shadow_lock.
  1868. */
  1869. static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
  1870. unsigned long offset, pte_t *pte)
  1871. {
  1872. struct gmap_rmap *rmap, *rnext, *head;
  1873. unsigned long gaddr, start, end, bits, raddr;
  1874. unsigned long *table;
  1875. BUG_ON(!gmap_is_shadow(sg));
  1876. spin_lock(&sg->parent->guest_table_lock);
  1877. table = radix_tree_lookup(&sg->parent->host_to_guest,
  1878. vmaddr >> PMD_SHIFT);
  1879. gaddr = table ? __gmap_segment_gaddr(table) + offset : 0;
  1880. spin_unlock(&sg->parent->guest_table_lock);
  1881. if (!table)
  1882. return;
  1883. spin_lock(&sg->guest_table_lock);
  1884. if (sg->removed) {
  1885. spin_unlock(&sg->guest_table_lock);
  1886. return;
  1887. }
  1888. /* Check for top level table */
  1889. start = sg->orig_asce & _ASCE_ORIGIN;
  1890. end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * 4096;
  1891. if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
  1892. gaddr < end) {
  1893. /* The complete shadow table has to go */
  1894. gmap_unshadow(sg);
  1895. spin_unlock(&sg->guest_table_lock);
  1896. list_del(&sg->list);
  1897. gmap_put(sg);
  1898. return;
  1899. }
  1900. /* Remove the page table tree from on specific entry */
  1901. head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> 12);
  1902. gmap_for_each_rmap_safe(rmap, rnext, head) {
  1903. bits = rmap->raddr & _SHADOW_RMAP_MASK;
  1904. raddr = rmap->raddr ^ bits;
  1905. switch (bits) {
  1906. case _SHADOW_RMAP_REGION1:
  1907. gmap_unshadow_r2t(sg, raddr);
  1908. break;
  1909. case _SHADOW_RMAP_REGION2:
  1910. gmap_unshadow_r3t(sg, raddr);
  1911. break;
  1912. case _SHADOW_RMAP_REGION3:
  1913. gmap_unshadow_sgt(sg, raddr);
  1914. break;
  1915. case _SHADOW_RMAP_SEGMENT:
  1916. gmap_unshadow_pgt(sg, raddr);
  1917. break;
  1918. case _SHADOW_RMAP_PGTABLE:
  1919. gmap_unshadow_page(sg, raddr);
  1920. break;
  1921. }
  1922. kfree(rmap);
  1923. }
  1924. spin_unlock(&sg->guest_table_lock);
  1925. }
  1926. /**
  1927. * ptep_notify - call all invalidation callbacks for a specific pte.
  1928. * @mm: pointer to the process mm_struct
  1929. * @addr: virtual address in the process address space
  1930. * @pte: pointer to the page table entry
  1931. * @bits: bits from the pgste that caused the notify call
  1932. *
  1933. * This function is assumed to be called with the page table lock held
  1934. * for the pte to notify.
  1935. */
  1936. void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
  1937. pte_t *pte, unsigned long bits)
  1938. {
  1939. unsigned long offset, gaddr;
  1940. unsigned long *table;
  1941. struct gmap *gmap, *sg, *next;
  1942. offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
  1943. offset = offset * (4096 / sizeof(pte_t));
  1944. rcu_read_lock();
  1945. list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
  1946. if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
  1947. spin_lock(&gmap->shadow_lock);
  1948. list_for_each_entry_safe(sg, next,
  1949. &gmap->children, list)
  1950. gmap_shadow_notify(sg, vmaddr, offset, pte);
  1951. spin_unlock(&gmap->shadow_lock);
  1952. }
  1953. if (!(bits & PGSTE_IN_BIT))
  1954. continue;
  1955. spin_lock(&gmap->guest_table_lock);
  1956. table = radix_tree_lookup(&gmap->host_to_guest,
  1957. vmaddr >> PMD_SHIFT);
  1958. if (table)
  1959. gaddr = __gmap_segment_gaddr(table) + offset;
  1960. spin_unlock(&gmap->guest_table_lock);
  1961. if (table)
  1962. gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
  1963. }
  1964. rcu_read_unlock();
  1965. }
  1966. EXPORT_SYMBOL_GPL(ptep_notify);
  1967. static inline void thp_split_mm(struct mm_struct *mm)
  1968. {
  1969. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  1970. struct vm_area_struct *vma;
  1971. unsigned long addr;
  1972. for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
  1973. for (addr = vma->vm_start;
  1974. addr < vma->vm_end;
  1975. addr += PAGE_SIZE)
  1976. follow_page(vma, addr, FOLL_SPLIT);
  1977. vma->vm_flags &= ~VM_HUGEPAGE;
  1978. vma->vm_flags |= VM_NOHUGEPAGE;
  1979. }
  1980. mm->def_flags |= VM_NOHUGEPAGE;
  1981. #endif
  1982. }
  1983. /*
  1984. * switch on pgstes for its userspace process (for kvm)
  1985. */
  1986. int s390_enable_sie(void)
  1987. {
  1988. struct mm_struct *mm = current->mm;
  1989. /* Do we have pgstes? if yes, we are done */
  1990. if (mm_has_pgste(mm))
  1991. return 0;
  1992. /* Fail if the page tables are 2K */
  1993. if (!mm_alloc_pgste(mm))
  1994. return -EINVAL;
  1995. down_write(&mm->mmap_sem);
  1996. mm->context.has_pgste = 1;
  1997. /* split thp mappings and disable thp for future mappings */
  1998. thp_split_mm(mm);
  1999. up_write(&mm->mmap_sem);
  2000. return 0;
  2001. }
  2002. EXPORT_SYMBOL_GPL(s390_enable_sie);
  2003. /*
  2004. * Enable storage key handling from now on and initialize the storage
  2005. * keys with the default key.
  2006. */
  2007. static int __s390_enable_skey(pte_t *pte, unsigned long addr,
  2008. unsigned long next, struct mm_walk *walk)
  2009. {
  2010. /*
  2011. * Remove all zero page mappings,
  2012. * after establishing a policy to forbid zero page mappings
  2013. * following faults for that page will get fresh anonymous pages
  2014. */
  2015. if (is_zero_pfn(pte_pfn(*pte)))
  2016. ptep_xchg_direct(walk->mm, addr, pte, __pte(_PAGE_INVALID));
  2017. /* Clear storage key */
  2018. ptep_zap_key(walk->mm, addr, pte);
  2019. return 0;
  2020. }
  2021. int s390_enable_skey(void)
  2022. {
  2023. struct mm_walk walk = { .pte_entry = __s390_enable_skey };
  2024. struct mm_struct *mm = current->mm;
  2025. struct vm_area_struct *vma;
  2026. int rc = 0;
  2027. down_write(&mm->mmap_sem);
  2028. if (mm_use_skey(mm))
  2029. goto out_up;
  2030. mm->context.use_skey = 1;
  2031. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  2032. if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
  2033. MADV_UNMERGEABLE, &vma->vm_flags)) {
  2034. mm->context.use_skey = 0;
  2035. rc = -ENOMEM;
  2036. goto out_up;
  2037. }
  2038. }
  2039. mm->def_flags &= ~VM_MERGEABLE;
  2040. walk.mm = mm;
  2041. walk_page_range(0, TASK_SIZE, &walk);
  2042. out_up:
  2043. up_write(&mm->mmap_sem);
  2044. return rc;
  2045. }
  2046. EXPORT_SYMBOL_GPL(s390_enable_skey);
  2047. /*
  2048. * Reset CMMA state, make all pages stable again.
  2049. */
  2050. static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
  2051. unsigned long next, struct mm_walk *walk)
  2052. {
  2053. ptep_zap_unused(walk->mm, addr, pte, 1);
  2054. return 0;
  2055. }
  2056. void s390_reset_cmma(struct mm_struct *mm)
  2057. {
  2058. struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
  2059. down_write(&mm->mmap_sem);
  2060. walk.mm = mm;
  2061. walk_page_range(0, TASK_SIZE, &walk);
  2062. up_write(&mm->mmap_sem);
  2063. }
  2064. EXPORT_SYMBOL_GPL(s390_reset_cmma);