gmap.c 64 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239
  1. /*
  2. * KVM guest address space mapping code
  3. *
  4. * Copyright IBM Corp. 2007, 2016
  5. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/mm.h>
  9. #include <linux/swap.h>
  10. #include <linux/smp.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/slab.h>
  13. #include <linux/swapops.h>
  14. #include <linux/ksm.h>
  15. #include <linux/mman.h>
  16. #include <asm/pgtable.h>
  17. #include <asm/pgalloc.h>
  18. #include <asm/gmap.h>
  19. #include <asm/tlb.h>
  20. #define GMAP_SHADOW_FAKE_TABLE 1ULL
  21. /**
  22. * gmap_alloc - allocate and initialize a guest address space
  23. * @mm: pointer to the parent mm_struct
  24. * @limit: maximum address of the gmap address space
  25. *
  26. * Returns a guest address space structure.
  27. */
  28. static struct gmap *gmap_alloc(unsigned long limit)
  29. {
  30. struct gmap *gmap;
  31. struct page *page;
  32. unsigned long *table;
  33. unsigned long etype, atype;
  34. if (limit < _REGION3_SIZE) {
  35. limit = _REGION3_SIZE - 1;
  36. atype = _ASCE_TYPE_SEGMENT;
  37. etype = _SEGMENT_ENTRY_EMPTY;
  38. } else if (limit < _REGION2_SIZE) {
  39. limit = _REGION2_SIZE - 1;
  40. atype = _ASCE_TYPE_REGION3;
  41. etype = _REGION3_ENTRY_EMPTY;
  42. } else if (limit < _REGION1_SIZE) {
  43. limit = _REGION1_SIZE - 1;
  44. atype = _ASCE_TYPE_REGION2;
  45. etype = _REGION2_ENTRY_EMPTY;
  46. } else {
  47. limit = -1UL;
  48. atype = _ASCE_TYPE_REGION1;
  49. etype = _REGION1_ENTRY_EMPTY;
  50. }
  51. gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
  52. if (!gmap)
  53. goto out;
  54. INIT_LIST_HEAD(&gmap->crst_list);
  55. INIT_LIST_HEAD(&gmap->children);
  56. INIT_LIST_HEAD(&gmap->pt_list);
  57. INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
  58. INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
  59. INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
  60. spin_lock_init(&gmap->guest_table_lock);
  61. spin_lock_init(&gmap->shadow_lock);
  62. atomic_set(&gmap->ref_count, 1);
  63. page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
  64. if (!page)
  65. goto out_free;
  66. page->index = 0;
  67. list_add(&page->lru, &gmap->crst_list);
  68. table = (unsigned long *) page_to_phys(page);
  69. crst_table_init(table, etype);
  70. gmap->table = table;
  71. gmap->asce = atype | _ASCE_TABLE_LENGTH |
  72. _ASCE_USER_BITS | __pa(table);
  73. gmap->asce_end = limit;
  74. return gmap;
  75. out_free:
  76. kfree(gmap);
  77. out:
  78. return NULL;
  79. }
  80. /**
  81. * gmap_create - create a guest address space
  82. * @mm: pointer to the parent mm_struct
  83. * @limit: maximum size of the gmap address space
  84. *
  85. * Returns a guest address space structure.
  86. */
  87. struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
  88. {
  89. struct gmap *gmap;
  90. unsigned long gmap_asce;
  91. gmap = gmap_alloc(limit);
  92. if (!gmap)
  93. return NULL;
  94. gmap->mm = mm;
  95. spin_lock(&mm->context.gmap_lock);
  96. list_add_rcu(&gmap->list, &mm->context.gmap_list);
  97. if (list_is_singular(&mm->context.gmap_list))
  98. gmap_asce = gmap->asce;
  99. else
  100. gmap_asce = -1UL;
  101. WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
  102. spin_unlock(&mm->context.gmap_lock);
  103. return gmap;
  104. }
  105. EXPORT_SYMBOL_GPL(gmap_create);
  106. static void gmap_flush_tlb(struct gmap *gmap)
  107. {
  108. if (MACHINE_HAS_IDTE)
  109. __tlb_flush_idte(gmap->asce);
  110. else
  111. __tlb_flush_global();
  112. }
  113. static void gmap_radix_tree_free(struct radix_tree_root *root)
  114. {
  115. struct radix_tree_iter iter;
  116. unsigned long indices[16];
  117. unsigned long index;
  118. void __rcu **slot;
  119. int i, nr;
  120. /* A radix tree is freed by deleting all of its entries */
  121. index = 0;
  122. do {
  123. nr = 0;
  124. radix_tree_for_each_slot(slot, root, &iter, index) {
  125. indices[nr] = iter.index;
  126. if (++nr == 16)
  127. break;
  128. }
  129. for (i = 0; i < nr; i++) {
  130. index = indices[i];
  131. radix_tree_delete(root, index);
  132. }
  133. } while (nr > 0);
  134. }
  135. static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
  136. {
  137. struct gmap_rmap *rmap, *rnext, *head;
  138. struct radix_tree_iter iter;
  139. unsigned long indices[16];
  140. unsigned long index;
  141. void __rcu **slot;
  142. int i, nr;
  143. /* A radix tree is freed by deleting all of its entries */
  144. index = 0;
  145. do {
  146. nr = 0;
  147. radix_tree_for_each_slot(slot, root, &iter, index) {
  148. indices[nr] = iter.index;
  149. if (++nr == 16)
  150. break;
  151. }
  152. for (i = 0; i < nr; i++) {
  153. index = indices[i];
  154. head = radix_tree_delete(root, index);
  155. gmap_for_each_rmap_safe(rmap, rnext, head)
  156. kfree(rmap);
  157. }
  158. } while (nr > 0);
  159. }
  160. /**
  161. * gmap_free - free a guest address space
  162. * @gmap: pointer to the guest address space structure
  163. *
  164. * No locks required. There are no references to this gmap anymore.
  165. */
  166. static void gmap_free(struct gmap *gmap)
  167. {
  168. struct page *page, *next;
  169. /* Flush tlb of all gmaps (if not already done for shadows) */
  170. if (!(gmap_is_shadow(gmap) && gmap->removed))
  171. gmap_flush_tlb(gmap);
  172. /* Free all segment & region tables. */
  173. list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
  174. __free_pages(page, CRST_ALLOC_ORDER);
  175. gmap_radix_tree_free(&gmap->guest_to_host);
  176. gmap_radix_tree_free(&gmap->host_to_guest);
  177. /* Free additional data for a shadow gmap */
  178. if (gmap_is_shadow(gmap)) {
  179. /* Free all page tables. */
  180. list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
  181. page_table_free_pgste(page);
  182. gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
  183. /* Release reference to the parent */
  184. gmap_put(gmap->parent);
  185. }
  186. kfree(gmap);
  187. }
  188. /**
  189. * gmap_get - increase reference counter for guest address space
  190. * @gmap: pointer to the guest address space structure
  191. *
  192. * Returns the gmap pointer
  193. */
  194. struct gmap *gmap_get(struct gmap *gmap)
  195. {
  196. atomic_inc(&gmap->ref_count);
  197. return gmap;
  198. }
  199. EXPORT_SYMBOL_GPL(gmap_get);
  200. /**
  201. * gmap_put - decrease reference counter for guest address space
  202. * @gmap: pointer to the guest address space structure
  203. *
  204. * If the reference counter reaches zero the guest address space is freed.
  205. */
  206. void gmap_put(struct gmap *gmap)
  207. {
  208. if (atomic_dec_return(&gmap->ref_count) == 0)
  209. gmap_free(gmap);
  210. }
  211. EXPORT_SYMBOL_GPL(gmap_put);
  212. /**
  213. * gmap_remove - remove a guest address space but do not free it yet
  214. * @gmap: pointer to the guest address space structure
  215. */
  216. void gmap_remove(struct gmap *gmap)
  217. {
  218. struct gmap *sg, *next;
  219. unsigned long gmap_asce;
  220. /* Remove all shadow gmaps linked to this gmap */
  221. if (!list_empty(&gmap->children)) {
  222. spin_lock(&gmap->shadow_lock);
  223. list_for_each_entry_safe(sg, next, &gmap->children, list) {
  224. list_del(&sg->list);
  225. gmap_put(sg);
  226. }
  227. spin_unlock(&gmap->shadow_lock);
  228. }
  229. /* Remove gmap from the pre-mm list */
  230. spin_lock(&gmap->mm->context.gmap_lock);
  231. list_del_rcu(&gmap->list);
  232. if (list_empty(&gmap->mm->context.gmap_list))
  233. gmap_asce = 0;
  234. else if (list_is_singular(&gmap->mm->context.gmap_list))
  235. gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
  236. struct gmap, list)->asce;
  237. else
  238. gmap_asce = -1UL;
  239. WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
  240. spin_unlock(&gmap->mm->context.gmap_lock);
  241. synchronize_rcu();
  242. /* Put reference */
  243. gmap_put(gmap);
  244. }
  245. EXPORT_SYMBOL_GPL(gmap_remove);
  246. /**
  247. * gmap_enable - switch primary space to the guest address space
  248. * @gmap: pointer to the guest address space structure
  249. */
  250. void gmap_enable(struct gmap *gmap)
  251. {
  252. S390_lowcore.gmap = (unsigned long) gmap;
  253. }
  254. EXPORT_SYMBOL_GPL(gmap_enable);
  255. /**
  256. * gmap_disable - switch back to the standard primary address space
  257. * @gmap: pointer to the guest address space structure
  258. */
  259. void gmap_disable(struct gmap *gmap)
  260. {
  261. S390_lowcore.gmap = 0UL;
  262. }
  263. EXPORT_SYMBOL_GPL(gmap_disable);
  264. /**
  265. * gmap_get_enabled - get a pointer to the currently enabled gmap
  266. *
  267. * Returns a pointer to the currently enabled gmap. 0 if none is enabled.
  268. */
  269. struct gmap *gmap_get_enabled(void)
  270. {
  271. return (struct gmap *) S390_lowcore.gmap;
  272. }
  273. EXPORT_SYMBOL_GPL(gmap_get_enabled);
  274. /*
  275. * gmap_alloc_table is assumed to be called with mmap_sem held
  276. */
  277. static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
  278. unsigned long init, unsigned long gaddr)
  279. {
  280. struct page *page;
  281. unsigned long *new;
  282. /* since we dont free the gmap table until gmap_free we can unlock */
  283. page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
  284. if (!page)
  285. return -ENOMEM;
  286. new = (unsigned long *) page_to_phys(page);
  287. crst_table_init(new, init);
  288. spin_lock(&gmap->guest_table_lock);
  289. if (*table & _REGION_ENTRY_INVALID) {
  290. list_add(&page->lru, &gmap->crst_list);
  291. *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
  292. (*table & _REGION_ENTRY_TYPE_MASK);
  293. page->index = gaddr;
  294. page = NULL;
  295. }
  296. spin_unlock(&gmap->guest_table_lock);
  297. if (page)
  298. __free_pages(page, CRST_ALLOC_ORDER);
  299. return 0;
  300. }
  301. /**
  302. * __gmap_segment_gaddr - find virtual address from segment pointer
  303. * @entry: pointer to a segment table entry in the guest address space
  304. *
  305. * Returns the virtual address in the guest address space for the segment
  306. */
  307. static unsigned long __gmap_segment_gaddr(unsigned long *entry)
  308. {
  309. struct page *page;
  310. unsigned long offset, mask;
  311. offset = (unsigned long) entry / sizeof(unsigned long);
  312. offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
  313. mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
  314. page = virt_to_page((void *)((unsigned long) entry & mask));
  315. return page->index + offset;
  316. }
  317. /**
  318. * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
  319. * @gmap: pointer to the guest address space structure
  320. * @vmaddr: address in the host process address space
  321. *
  322. * Returns 1 if a TLB flush is required
  323. */
  324. static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
  325. {
  326. unsigned long *entry;
  327. int flush = 0;
  328. BUG_ON(gmap_is_shadow(gmap));
  329. spin_lock(&gmap->guest_table_lock);
  330. entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
  331. if (entry) {
  332. flush = (*entry != _SEGMENT_ENTRY_EMPTY);
  333. *entry = _SEGMENT_ENTRY_EMPTY;
  334. }
  335. spin_unlock(&gmap->guest_table_lock);
  336. return flush;
  337. }
  338. /**
  339. * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
  340. * @gmap: pointer to the guest address space structure
  341. * @gaddr: address in the guest address space
  342. *
  343. * Returns 1 if a TLB flush is required
  344. */
  345. static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
  346. {
  347. unsigned long vmaddr;
  348. vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
  349. gaddr >> PMD_SHIFT);
  350. return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
  351. }
  352. /**
  353. * gmap_unmap_segment - unmap segment from the guest address space
  354. * @gmap: pointer to the guest address space structure
  355. * @to: address in the guest address space
  356. * @len: length of the memory area to unmap
  357. *
  358. * Returns 0 if the unmap succeeded, -EINVAL if not.
  359. */
  360. int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
  361. {
  362. unsigned long off;
  363. int flush;
  364. BUG_ON(gmap_is_shadow(gmap));
  365. if ((to | len) & (PMD_SIZE - 1))
  366. return -EINVAL;
  367. if (len == 0 || to + len < to)
  368. return -EINVAL;
  369. flush = 0;
  370. down_write(&gmap->mm->mmap_sem);
  371. for (off = 0; off < len; off += PMD_SIZE)
  372. flush |= __gmap_unmap_by_gaddr(gmap, to + off);
  373. up_write(&gmap->mm->mmap_sem);
  374. if (flush)
  375. gmap_flush_tlb(gmap);
  376. return 0;
  377. }
  378. EXPORT_SYMBOL_GPL(gmap_unmap_segment);
  379. /**
  380. * gmap_map_segment - map a segment to the guest address space
  381. * @gmap: pointer to the guest address space structure
  382. * @from: source address in the parent address space
  383. * @to: target address in the guest address space
  384. * @len: length of the memory area to map
  385. *
  386. * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
  387. */
  388. int gmap_map_segment(struct gmap *gmap, unsigned long from,
  389. unsigned long to, unsigned long len)
  390. {
  391. unsigned long off;
  392. int flush;
  393. BUG_ON(gmap_is_shadow(gmap));
  394. if ((from | to | len) & (PMD_SIZE - 1))
  395. return -EINVAL;
  396. if (len == 0 || from + len < from || to + len < to ||
  397. from + len - 1 > TASK_SIZE_MAX || to + len - 1 > gmap->asce_end)
  398. return -EINVAL;
  399. flush = 0;
  400. down_write(&gmap->mm->mmap_sem);
  401. for (off = 0; off < len; off += PMD_SIZE) {
  402. /* Remove old translation */
  403. flush |= __gmap_unmap_by_gaddr(gmap, to + off);
  404. /* Store new translation */
  405. if (radix_tree_insert(&gmap->guest_to_host,
  406. (to + off) >> PMD_SHIFT,
  407. (void *) from + off))
  408. break;
  409. }
  410. up_write(&gmap->mm->mmap_sem);
  411. if (flush)
  412. gmap_flush_tlb(gmap);
  413. if (off >= len)
  414. return 0;
  415. gmap_unmap_segment(gmap, to, len);
  416. return -ENOMEM;
  417. }
  418. EXPORT_SYMBOL_GPL(gmap_map_segment);
  419. /**
  420. * __gmap_translate - translate a guest address to a user space address
  421. * @gmap: pointer to guest mapping meta data structure
  422. * @gaddr: guest address
  423. *
  424. * Returns user space address which corresponds to the guest address or
  425. * -EFAULT if no such mapping exists.
  426. * This function does not establish potentially missing page table entries.
  427. * The mmap_sem of the mm that belongs to the address space must be held
  428. * when this function gets called.
  429. *
  430. * Note: Can also be called for shadow gmaps.
  431. */
  432. unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
  433. {
  434. unsigned long vmaddr;
  435. vmaddr = (unsigned long)
  436. radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
  437. /* Note: guest_to_host is empty for a shadow gmap */
  438. return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
  439. }
  440. EXPORT_SYMBOL_GPL(__gmap_translate);
  441. /**
  442. * gmap_translate - translate a guest address to a user space address
  443. * @gmap: pointer to guest mapping meta data structure
  444. * @gaddr: guest address
  445. *
  446. * Returns user space address which corresponds to the guest address or
  447. * -EFAULT if no such mapping exists.
  448. * This function does not establish potentially missing page table entries.
  449. */
  450. unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
  451. {
  452. unsigned long rc;
  453. down_read(&gmap->mm->mmap_sem);
  454. rc = __gmap_translate(gmap, gaddr);
  455. up_read(&gmap->mm->mmap_sem);
  456. return rc;
  457. }
  458. EXPORT_SYMBOL_GPL(gmap_translate);
  459. /**
  460. * gmap_unlink - disconnect a page table from the gmap shadow tables
  461. * @gmap: pointer to guest mapping meta data structure
  462. * @table: pointer to the host page table
  463. * @vmaddr: vm address associated with the host page table
  464. */
  465. void gmap_unlink(struct mm_struct *mm, unsigned long *table,
  466. unsigned long vmaddr)
  467. {
  468. struct gmap *gmap;
  469. int flush;
  470. rcu_read_lock();
  471. list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
  472. flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
  473. if (flush)
  474. gmap_flush_tlb(gmap);
  475. }
  476. rcu_read_unlock();
  477. }
  478. /**
  479. * gmap_link - set up shadow page tables to connect a host to a guest address
  480. * @gmap: pointer to guest mapping meta data structure
  481. * @gaddr: guest address
  482. * @vmaddr: vm address
  483. *
  484. * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
  485. * if the vm address is already mapped to a different guest segment.
  486. * The mmap_sem of the mm that belongs to the address space must be held
  487. * when this function gets called.
  488. */
  489. int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
  490. {
  491. struct mm_struct *mm;
  492. unsigned long *table;
  493. spinlock_t *ptl;
  494. pgd_t *pgd;
  495. p4d_t *p4d;
  496. pud_t *pud;
  497. pmd_t *pmd;
  498. int rc;
  499. BUG_ON(gmap_is_shadow(gmap));
  500. /* Create higher level tables in the gmap page table */
  501. table = gmap->table;
  502. if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
  503. table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
  504. if ((*table & _REGION_ENTRY_INVALID) &&
  505. gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
  506. gaddr & _REGION1_MASK))
  507. return -ENOMEM;
  508. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  509. }
  510. if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
  511. table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
  512. if ((*table & _REGION_ENTRY_INVALID) &&
  513. gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
  514. gaddr & _REGION2_MASK))
  515. return -ENOMEM;
  516. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  517. }
  518. if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
  519. table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
  520. if ((*table & _REGION_ENTRY_INVALID) &&
  521. gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
  522. gaddr & _REGION3_MASK))
  523. return -ENOMEM;
  524. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  525. }
  526. table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
  527. /* Walk the parent mm page table */
  528. mm = gmap->mm;
  529. pgd = pgd_offset(mm, vmaddr);
  530. VM_BUG_ON(pgd_none(*pgd));
  531. p4d = p4d_offset(pgd, vmaddr);
  532. VM_BUG_ON(p4d_none(*p4d));
  533. pud = pud_offset(p4d, vmaddr);
  534. VM_BUG_ON(pud_none(*pud));
  535. /* large puds cannot yet be handled */
  536. if (pud_large(*pud))
  537. return -EFAULT;
  538. pmd = pmd_offset(pud, vmaddr);
  539. VM_BUG_ON(pmd_none(*pmd));
  540. /* large pmds cannot yet be handled */
  541. if (pmd_large(*pmd))
  542. return -EFAULT;
  543. /* Link gmap segment table entry location to page table. */
  544. rc = radix_tree_preload(GFP_KERNEL);
  545. if (rc)
  546. return rc;
  547. ptl = pmd_lock(mm, pmd);
  548. spin_lock(&gmap->guest_table_lock);
  549. if (*table == _SEGMENT_ENTRY_EMPTY) {
  550. rc = radix_tree_insert(&gmap->host_to_guest,
  551. vmaddr >> PMD_SHIFT, table);
  552. if (!rc)
  553. *table = pmd_val(*pmd);
  554. } else
  555. rc = 0;
  556. spin_unlock(&gmap->guest_table_lock);
  557. spin_unlock(ptl);
  558. radix_tree_preload_end();
  559. return rc;
  560. }
  561. /**
  562. * gmap_fault - resolve a fault on a guest address
  563. * @gmap: pointer to guest mapping meta data structure
  564. * @gaddr: guest address
  565. * @fault_flags: flags to pass down to handle_mm_fault()
  566. *
  567. * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
  568. * if the vm address is already mapped to a different guest segment.
  569. */
  570. int gmap_fault(struct gmap *gmap, unsigned long gaddr,
  571. unsigned int fault_flags)
  572. {
  573. unsigned long vmaddr;
  574. int rc;
  575. bool unlocked;
  576. down_read(&gmap->mm->mmap_sem);
  577. retry:
  578. unlocked = false;
  579. vmaddr = __gmap_translate(gmap, gaddr);
  580. if (IS_ERR_VALUE(vmaddr)) {
  581. rc = vmaddr;
  582. goto out_up;
  583. }
  584. if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
  585. &unlocked)) {
  586. rc = -EFAULT;
  587. goto out_up;
  588. }
  589. /*
  590. * In the case that fixup_user_fault unlocked the mmap_sem during
  591. * faultin redo __gmap_translate to not race with a map/unmap_segment.
  592. */
  593. if (unlocked)
  594. goto retry;
  595. rc = __gmap_link(gmap, gaddr, vmaddr);
  596. out_up:
  597. up_read(&gmap->mm->mmap_sem);
  598. return rc;
  599. }
  600. EXPORT_SYMBOL_GPL(gmap_fault);
  601. /*
  602. * this function is assumed to be called with mmap_sem held
  603. */
  604. void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
  605. {
  606. unsigned long vmaddr;
  607. spinlock_t *ptl;
  608. pte_t *ptep;
  609. /* Find the vm address for the guest address */
  610. vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
  611. gaddr >> PMD_SHIFT);
  612. if (vmaddr) {
  613. vmaddr |= gaddr & ~PMD_MASK;
  614. /* Get pointer to the page table entry */
  615. ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
  616. if (likely(ptep))
  617. ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
  618. pte_unmap_unlock(ptep, ptl);
  619. }
  620. }
  621. EXPORT_SYMBOL_GPL(__gmap_zap);
  622. void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
  623. {
  624. unsigned long gaddr, vmaddr, size;
  625. struct vm_area_struct *vma;
  626. down_read(&gmap->mm->mmap_sem);
  627. for (gaddr = from; gaddr < to;
  628. gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
  629. /* Find the vm address for the guest address */
  630. vmaddr = (unsigned long)
  631. radix_tree_lookup(&gmap->guest_to_host,
  632. gaddr >> PMD_SHIFT);
  633. if (!vmaddr)
  634. continue;
  635. vmaddr |= gaddr & ~PMD_MASK;
  636. /* Find vma in the parent mm */
  637. vma = find_vma(gmap->mm, vmaddr);
  638. size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
  639. zap_page_range(vma, vmaddr, size);
  640. }
  641. up_read(&gmap->mm->mmap_sem);
  642. }
  643. EXPORT_SYMBOL_GPL(gmap_discard);
  644. static LIST_HEAD(gmap_notifier_list);
  645. static DEFINE_SPINLOCK(gmap_notifier_lock);
  646. /**
  647. * gmap_register_pte_notifier - register a pte invalidation callback
  648. * @nb: pointer to the gmap notifier block
  649. */
  650. void gmap_register_pte_notifier(struct gmap_notifier *nb)
  651. {
  652. spin_lock(&gmap_notifier_lock);
  653. list_add_rcu(&nb->list, &gmap_notifier_list);
  654. spin_unlock(&gmap_notifier_lock);
  655. }
  656. EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
  657. /**
  658. * gmap_unregister_pte_notifier - remove a pte invalidation callback
  659. * @nb: pointer to the gmap notifier block
  660. */
  661. void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
  662. {
  663. spin_lock(&gmap_notifier_lock);
  664. list_del_rcu(&nb->list);
  665. spin_unlock(&gmap_notifier_lock);
  666. synchronize_rcu();
  667. }
  668. EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
  669. /**
  670. * gmap_call_notifier - call all registered invalidation callbacks
  671. * @gmap: pointer to guest mapping meta data structure
  672. * @start: start virtual address in the guest address space
  673. * @end: end virtual address in the guest address space
  674. */
  675. static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
  676. unsigned long end)
  677. {
  678. struct gmap_notifier *nb;
  679. list_for_each_entry(nb, &gmap_notifier_list, list)
  680. nb->notifier_call(gmap, start, end);
  681. }
  682. /**
  683. * gmap_table_walk - walk the gmap page tables
  684. * @gmap: pointer to guest mapping meta data structure
  685. * @gaddr: virtual address in the guest address space
  686. * @level: page table level to stop at
  687. *
  688. * Returns a table entry pointer for the given guest address and @level
  689. * @level=0 : returns a pointer to a page table table entry (or NULL)
  690. * @level=1 : returns a pointer to a segment table entry (or NULL)
  691. * @level=2 : returns a pointer to a region-3 table entry (or NULL)
  692. * @level=3 : returns a pointer to a region-2 table entry (or NULL)
  693. * @level=4 : returns a pointer to a region-1 table entry (or NULL)
  694. *
  695. * Returns NULL if the gmap page tables could not be walked to the
  696. * requested level.
  697. *
  698. * Note: Can also be called for shadow gmaps.
  699. */
  700. static inline unsigned long *gmap_table_walk(struct gmap *gmap,
  701. unsigned long gaddr, int level)
  702. {
  703. unsigned long *table;
  704. if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
  705. return NULL;
  706. if (gmap_is_shadow(gmap) && gmap->removed)
  707. return NULL;
  708. if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11)))
  709. return NULL;
  710. table = gmap->table;
  711. switch (gmap->asce & _ASCE_TYPE_MASK) {
  712. case _ASCE_TYPE_REGION1:
  713. table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
  714. if (level == 4)
  715. break;
  716. if (*table & _REGION_ENTRY_INVALID)
  717. return NULL;
  718. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  719. /* Fallthrough */
  720. case _ASCE_TYPE_REGION2:
  721. table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
  722. if (level == 3)
  723. break;
  724. if (*table & _REGION_ENTRY_INVALID)
  725. return NULL;
  726. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  727. /* Fallthrough */
  728. case _ASCE_TYPE_REGION3:
  729. table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
  730. if (level == 2)
  731. break;
  732. if (*table & _REGION_ENTRY_INVALID)
  733. return NULL;
  734. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  735. /* Fallthrough */
  736. case _ASCE_TYPE_SEGMENT:
  737. table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
  738. if (level == 1)
  739. break;
  740. if (*table & _REGION_ENTRY_INVALID)
  741. return NULL;
  742. table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
  743. table += (gaddr & _PAGE_INDEX) >> _PAGE_SHIFT;
  744. }
  745. return table;
  746. }
  747. /**
  748. * gmap_pte_op_walk - walk the gmap page table, get the page table lock
  749. * and return the pte pointer
  750. * @gmap: pointer to guest mapping meta data structure
  751. * @gaddr: virtual address in the guest address space
  752. * @ptl: pointer to the spinlock pointer
  753. *
  754. * Returns a pointer to the locked pte for a guest address, or NULL
  755. *
  756. * Note: Can also be called for shadow gmaps.
  757. */
  758. static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
  759. spinlock_t **ptl)
  760. {
  761. unsigned long *table;
  762. if (gmap_is_shadow(gmap))
  763. spin_lock(&gmap->guest_table_lock);
  764. /* Walk the gmap page table, lock and get pte pointer */
  765. table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
  766. if (!table || *table & _SEGMENT_ENTRY_INVALID) {
  767. if (gmap_is_shadow(gmap))
  768. spin_unlock(&gmap->guest_table_lock);
  769. return NULL;
  770. }
  771. if (gmap_is_shadow(gmap)) {
  772. *ptl = &gmap->guest_table_lock;
  773. return pte_offset_map((pmd_t *) table, gaddr);
  774. }
  775. return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
  776. }
  777. /**
  778. * gmap_pte_op_fixup - force a page in and connect the gmap page table
  779. * @gmap: pointer to guest mapping meta data structure
  780. * @gaddr: virtual address in the guest address space
  781. * @vmaddr: address in the host process address space
  782. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  783. *
  784. * Returns 0 if the caller can retry __gmap_translate (might fail again),
  785. * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
  786. * up or connecting the gmap page table.
  787. */
  788. static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
  789. unsigned long vmaddr, int prot)
  790. {
  791. struct mm_struct *mm = gmap->mm;
  792. unsigned int fault_flags;
  793. bool unlocked = false;
  794. BUG_ON(gmap_is_shadow(gmap));
  795. fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
  796. if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked))
  797. return -EFAULT;
  798. if (unlocked)
  799. /* lost mmap_sem, caller has to retry __gmap_translate */
  800. return 0;
  801. /* Connect the page tables */
  802. return __gmap_link(gmap, gaddr, vmaddr);
  803. }
  804. /**
  805. * gmap_pte_op_end - release the page table lock
  806. * @ptl: pointer to the spinlock pointer
  807. */
  808. static void gmap_pte_op_end(spinlock_t *ptl)
  809. {
  810. spin_unlock(ptl);
  811. }
  812. /*
  813. * gmap_protect_range - remove access rights to memory and set pgste bits
  814. * @gmap: pointer to guest mapping meta data structure
  815. * @gaddr: virtual address in the guest address space
  816. * @len: size of area
  817. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  818. * @bits: pgste notification bits to set
  819. *
  820. * Returns 0 if successfully protected, -ENOMEM if out of memory and
  821. * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
  822. *
  823. * Called with sg->mm->mmap_sem in read.
  824. *
  825. * Note: Can also be called for shadow gmaps.
  826. */
  827. static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
  828. unsigned long len, int prot, unsigned long bits)
  829. {
  830. unsigned long vmaddr;
  831. spinlock_t *ptl;
  832. pte_t *ptep;
  833. int rc;
  834. while (len) {
  835. rc = -EAGAIN;
  836. ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
  837. if (ptep) {
  838. rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, bits);
  839. gmap_pte_op_end(ptl);
  840. }
  841. if (rc) {
  842. vmaddr = __gmap_translate(gmap, gaddr);
  843. if (IS_ERR_VALUE(vmaddr))
  844. return vmaddr;
  845. rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
  846. if (rc)
  847. return rc;
  848. continue;
  849. }
  850. gaddr += PAGE_SIZE;
  851. len -= PAGE_SIZE;
  852. }
  853. return 0;
  854. }
  855. /**
  856. * gmap_mprotect_notify - change access rights for a range of ptes and
  857. * call the notifier if any pte changes again
  858. * @gmap: pointer to guest mapping meta data structure
  859. * @gaddr: virtual address in the guest address space
  860. * @len: size of area
  861. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  862. *
  863. * Returns 0 if for each page in the given range a gmap mapping exists,
  864. * the new access rights could be set and the notifier could be armed.
  865. * If the gmap mapping is missing for one or more pages -EFAULT is
  866. * returned. If no memory could be allocated -ENOMEM is returned.
  867. * This function establishes missing page table entries.
  868. */
  869. int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
  870. unsigned long len, int prot)
  871. {
  872. int rc;
  873. if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
  874. return -EINVAL;
  875. if (!MACHINE_HAS_ESOP && prot == PROT_READ)
  876. return -EINVAL;
  877. down_read(&gmap->mm->mmap_sem);
  878. rc = gmap_protect_range(gmap, gaddr, len, prot, PGSTE_IN_BIT);
  879. up_read(&gmap->mm->mmap_sem);
  880. return rc;
  881. }
  882. EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
  883. /**
  884. * gmap_read_table - get an unsigned long value from a guest page table using
  885. * absolute addressing, without marking the page referenced.
  886. * @gmap: pointer to guest mapping meta data structure
  887. * @gaddr: virtual address in the guest address space
  888. * @val: pointer to the unsigned long value to return
  889. *
  890. * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
  891. * if reading using the virtual address failed.
  892. *
  893. * Called with gmap->mm->mmap_sem in read.
  894. */
  895. int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
  896. {
  897. unsigned long address, vmaddr;
  898. spinlock_t *ptl;
  899. pte_t *ptep, pte;
  900. int rc;
  901. while (1) {
  902. rc = -EAGAIN;
  903. ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
  904. if (ptep) {
  905. pte = *ptep;
  906. if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
  907. address = pte_val(pte) & PAGE_MASK;
  908. address += gaddr & ~PAGE_MASK;
  909. *val = *(unsigned long *) address;
  910. pte_val(*ptep) |= _PAGE_YOUNG;
  911. /* Do *NOT* clear the _PAGE_INVALID bit! */
  912. rc = 0;
  913. }
  914. gmap_pte_op_end(ptl);
  915. }
  916. if (!rc)
  917. break;
  918. vmaddr = __gmap_translate(gmap, gaddr);
  919. if (IS_ERR_VALUE(vmaddr)) {
  920. rc = vmaddr;
  921. break;
  922. }
  923. rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, PROT_READ);
  924. if (rc)
  925. break;
  926. }
  927. return rc;
  928. }
  929. EXPORT_SYMBOL_GPL(gmap_read_table);
  930. /**
  931. * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
  932. * @sg: pointer to the shadow guest address space structure
  933. * @vmaddr: vm address associated with the rmap
  934. * @rmap: pointer to the rmap structure
  935. *
  936. * Called with the sg->guest_table_lock
  937. */
  938. static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
  939. struct gmap_rmap *rmap)
  940. {
  941. void __rcu **slot;
  942. BUG_ON(!gmap_is_shadow(sg));
  943. slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
  944. if (slot) {
  945. rmap->next = radix_tree_deref_slot_protected(slot,
  946. &sg->guest_table_lock);
  947. radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap);
  948. } else {
  949. rmap->next = NULL;
  950. radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
  951. rmap);
  952. }
  953. }
  954. /**
  955. * gmap_protect_rmap - modify access rights to memory and create an rmap
  956. * @sg: pointer to the shadow guest address space structure
  957. * @raddr: rmap address in the shadow gmap
  958. * @paddr: address in the parent guest address space
  959. * @len: length of the memory area to protect
  960. * @prot: indicates access rights: none, read-only or read-write
  961. *
  962. * Returns 0 if successfully protected and the rmap was created, -ENOMEM
  963. * if out of memory and -EFAULT if paddr is invalid.
  964. */
  965. static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
  966. unsigned long paddr, unsigned long len, int prot)
  967. {
  968. struct gmap *parent;
  969. struct gmap_rmap *rmap;
  970. unsigned long vmaddr;
  971. spinlock_t *ptl;
  972. pte_t *ptep;
  973. int rc;
  974. BUG_ON(!gmap_is_shadow(sg));
  975. parent = sg->parent;
  976. while (len) {
  977. vmaddr = __gmap_translate(parent, paddr);
  978. if (IS_ERR_VALUE(vmaddr))
  979. return vmaddr;
  980. rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
  981. if (!rmap)
  982. return -ENOMEM;
  983. rmap->raddr = raddr;
  984. rc = radix_tree_preload(GFP_KERNEL);
  985. if (rc) {
  986. kfree(rmap);
  987. return rc;
  988. }
  989. rc = -EAGAIN;
  990. ptep = gmap_pte_op_walk(parent, paddr, &ptl);
  991. if (ptep) {
  992. spin_lock(&sg->guest_table_lock);
  993. rc = ptep_force_prot(parent->mm, paddr, ptep, prot,
  994. PGSTE_VSIE_BIT);
  995. if (!rc)
  996. gmap_insert_rmap(sg, vmaddr, rmap);
  997. spin_unlock(&sg->guest_table_lock);
  998. gmap_pte_op_end(ptl);
  999. }
  1000. radix_tree_preload_end();
  1001. if (rc) {
  1002. kfree(rmap);
  1003. rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
  1004. if (rc)
  1005. return rc;
  1006. continue;
  1007. }
  1008. paddr += PAGE_SIZE;
  1009. len -= PAGE_SIZE;
  1010. }
  1011. return 0;
  1012. }
  1013. #define _SHADOW_RMAP_MASK 0x7
  1014. #define _SHADOW_RMAP_REGION1 0x5
  1015. #define _SHADOW_RMAP_REGION2 0x4
  1016. #define _SHADOW_RMAP_REGION3 0x3
  1017. #define _SHADOW_RMAP_SEGMENT 0x2
  1018. #define _SHADOW_RMAP_PGTABLE 0x1
  1019. /**
  1020. * gmap_idte_one - invalidate a single region or segment table entry
  1021. * @asce: region or segment table *origin* + table-type bits
  1022. * @vaddr: virtual address to identify the table entry to flush
  1023. *
  1024. * The invalid bit of a single region or segment table entry is set
  1025. * and the associated TLB entries depending on the entry are flushed.
  1026. * The table-type of the @asce identifies the portion of the @vaddr
  1027. * that is used as the invalidation index.
  1028. */
  1029. static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
  1030. {
  1031. asm volatile(
  1032. " .insn rrf,0xb98e0000,%0,%1,0,0"
  1033. : : "a" (asce), "a" (vaddr) : "cc", "memory");
  1034. }
  1035. /**
  1036. * gmap_unshadow_page - remove a page from a shadow page table
  1037. * @sg: pointer to the shadow guest address space structure
  1038. * @raddr: rmap address in the shadow guest address space
  1039. *
  1040. * Called with the sg->guest_table_lock
  1041. */
  1042. static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
  1043. {
  1044. unsigned long *table;
  1045. BUG_ON(!gmap_is_shadow(sg));
  1046. table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
  1047. if (!table || *table & _PAGE_INVALID)
  1048. return;
  1049. gmap_call_notifier(sg, raddr, raddr + _PAGE_SIZE - 1);
  1050. ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
  1051. }
  1052. /**
  1053. * __gmap_unshadow_pgt - remove all entries from a shadow page table
  1054. * @sg: pointer to the shadow guest address space structure
  1055. * @raddr: rmap address in the shadow guest address space
  1056. * @pgt: pointer to the start of a shadow page table
  1057. *
  1058. * Called with the sg->guest_table_lock
  1059. */
  1060. static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
  1061. unsigned long *pgt)
  1062. {
  1063. int i;
  1064. BUG_ON(!gmap_is_shadow(sg));
  1065. for (i = 0; i < _PAGE_ENTRIES; i++, raddr += _PAGE_SIZE)
  1066. pgt[i] = _PAGE_INVALID;
  1067. }
  1068. /**
  1069. * gmap_unshadow_pgt - remove a shadow page table from a segment entry
  1070. * @sg: pointer to the shadow guest address space structure
  1071. * @raddr: address in the shadow guest address space
  1072. *
  1073. * Called with the sg->guest_table_lock
  1074. */
  1075. static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
  1076. {
  1077. unsigned long sto, *ste, *pgt;
  1078. struct page *page;
  1079. BUG_ON(!gmap_is_shadow(sg));
  1080. ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
  1081. if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
  1082. return;
  1083. gmap_call_notifier(sg, raddr, raddr + _SEGMENT_SIZE - 1);
  1084. sto = (unsigned long) (ste - ((raddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT));
  1085. gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
  1086. pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
  1087. *ste = _SEGMENT_ENTRY_EMPTY;
  1088. __gmap_unshadow_pgt(sg, raddr, pgt);
  1089. /* Free page table */
  1090. page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
  1091. list_del(&page->lru);
  1092. page_table_free_pgste(page);
  1093. }
  1094. /**
  1095. * __gmap_unshadow_sgt - remove all entries from a shadow segment table
  1096. * @sg: pointer to the shadow guest address space structure
  1097. * @raddr: rmap address in the shadow guest address space
  1098. * @sgt: pointer to the start of a shadow segment table
  1099. *
  1100. * Called with the sg->guest_table_lock
  1101. */
  1102. static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
  1103. unsigned long *sgt)
  1104. {
  1105. unsigned long asce, *pgt;
  1106. struct page *page;
  1107. int i;
  1108. BUG_ON(!gmap_is_shadow(sg));
  1109. asce = (unsigned long) sgt | _ASCE_TYPE_SEGMENT;
  1110. for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) {
  1111. if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
  1112. continue;
  1113. pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
  1114. sgt[i] = _SEGMENT_ENTRY_EMPTY;
  1115. __gmap_unshadow_pgt(sg, raddr, pgt);
  1116. /* Free page table */
  1117. page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
  1118. list_del(&page->lru);
  1119. page_table_free_pgste(page);
  1120. }
  1121. }
  1122. /**
  1123. * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
  1124. * @sg: pointer to the shadow guest address space structure
  1125. * @raddr: rmap address in the shadow guest address space
  1126. *
  1127. * Called with the shadow->guest_table_lock
  1128. */
  1129. static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
  1130. {
  1131. unsigned long r3o, *r3e, *sgt;
  1132. struct page *page;
  1133. BUG_ON(!gmap_is_shadow(sg));
  1134. r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
  1135. if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
  1136. return;
  1137. gmap_call_notifier(sg, raddr, raddr + _REGION3_SIZE - 1);
  1138. r3o = (unsigned long) (r3e - ((raddr & _REGION3_INDEX) >> _REGION3_SHIFT));
  1139. gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
  1140. sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
  1141. *r3e = _REGION3_ENTRY_EMPTY;
  1142. __gmap_unshadow_sgt(sg, raddr, sgt);
  1143. /* Free segment table */
  1144. page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
  1145. list_del(&page->lru);
  1146. __free_pages(page, CRST_ALLOC_ORDER);
  1147. }
  1148. /**
  1149. * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
  1150. * @sg: pointer to the shadow guest address space structure
  1151. * @raddr: address in the shadow guest address space
  1152. * @r3t: pointer to the start of a shadow region-3 table
  1153. *
  1154. * Called with the sg->guest_table_lock
  1155. */
  1156. static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
  1157. unsigned long *r3t)
  1158. {
  1159. unsigned long asce, *sgt;
  1160. struct page *page;
  1161. int i;
  1162. BUG_ON(!gmap_is_shadow(sg));
  1163. asce = (unsigned long) r3t | _ASCE_TYPE_REGION3;
  1164. for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) {
  1165. if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
  1166. continue;
  1167. sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
  1168. r3t[i] = _REGION3_ENTRY_EMPTY;
  1169. __gmap_unshadow_sgt(sg, raddr, sgt);
  1170. /* Free segment table */
  1171. page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
  1172. list_del(&page->lru);
  1173. __free_pages(page, CRST_ALLOC_ORDER);
  1174. }
  1175. }
  1176. /**
  1177. * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
  1178. * @sg: pointer to the shadow guest address space structure
  1179. * @raddr: rmap address in the shadow guest address space
  1180. *
  1181. * Called with the sg->guest_table_lock
  1182. */
  1183. static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
  1184. {
  1185. unsigned long r2o, *r2e, *r3t;
  1186. struct page *page;
  1187. BUG_ON(!gmap_is_shadow(sg));
  1188. r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
  1189. if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
  1190. return;
  1191. gmap_call_notifier(sg, raddr, raddr + _REGION2_SIZE - 1);
  1192. r2o = (unsigned long) (r2e - ((raddr & _REGION2_INDEX) >> _REGION2_SHIFT));
  1193. gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
  1194. r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
  1195. *r2e = _REGION2_ENTRY_EMPTY;
  1196. __gmap_unshadow_r3t(sg, raddr, r3t);
  1197. /* Free region 3 table */
  1198. page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
  1199. list_del(&page->lru);
  1200. __free_pages(page, CRST_ALLOC_ORDER);
  1201. }
  1202. /**
  1203. * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
  1204. * @sg: pointer to the shadow guest address space structure
  1205. * @raddr: rmap address in the shadow guest address space
  1206. * @r2t: pointer to the start of a shadow region-2 table
  1207. *
  1208. * Called with the sg->guest_table_lock
  1209. */
  1210. static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
  1211. unsigned long *r2t)
  1212. {
  1213. unsigned long asce, *r3t;
  1214. struct page *page;
  1215. int i;
  1216. BUG_ON(!gmap_is_shadow(sg));
  1217. asce = (unsigned long) r2t | _ASCE_TYPE_REGION2;
  1218. for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) {
  1219. if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
  1220. continue;
  1221. r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
  1222. r2t[i] = _REGION2_ENTRY_EMPTY;
  1223. __gmap_unshadow_r3t(sg, raddr, r3t);
  1224. /* Free region 3 table */
  1225. page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
  1226. list_del(&page->lru);
  1227. __free_pages(page, CRST_ALLOC_ORDER);
  1228. }
  1229. }
  1230. /**
  1231. * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
  1232. * @sg: pointer to the shadow guest address space structure
  1233. * @raddr: rmap address in the shadow guest address space
  1234. *
  1235. * Called with the sg->guest_table_lock
  1236. */
  1237. static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
  1238. {
  1239. unsigned long r1o, *r1e, *r2t;
  1240. struct page *page;
  1241. BUG_ON(!gmap_is_shadow(sg));
  1242. r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
  1243. if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
  1244. return;
  1245. gmap_call_notifier(sg, raddr, raddr + _REGION1_SIZE - 1);
  1246. r1o = (unsigned long) (r1e - ((raddr & _REGION1_INDEX) >> _REGION1_SHIFT));
  1247. gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
  1248. r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
  1249. *r1e = _REGION1_ENTRY_EMPTY;
  1250. __gmap_unshadow_r2t(sg, raddr, r2t);
  1251. /* Free region 2 table */
  1252. page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
  1253. list_del(&page->lru);
  1254. __free_pages(page, CRST_ALLOC_ORDER);
  1255. }
  1256. /**
  1257. * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
  1258. * @sg: pointer to the shadow guest address space structure
  1259. * @raddr: rmap address in the shadow guest address space
  1260. * @r1t: pointer to the start of a shadow region-1 table
  1261. *
  1262. * Called with the shadow->guest_table_lock
  1263. */
  1264. static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
  1265. unsigned long *r1t)
  1266. {
  1267. unsigned long asce, *r2t;
  1268. struct page *page;
  1269. int i;
  1270. BUG_ON(!gmap_is_shadow(sg));
  1271. asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
  1272. for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION1_SIZE) {
  1273. if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
  1274. continue;
  1275. r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
  1276. __gmap_unshadow_r2t(sg, raddr, r2t);
  1277. /* Clear entry and flush translation r1t -> r2t */
  1278. gmap_idte_one(asce, raddr);
  1279. r1t[i] = _REGION1_ENTRY_EMPTY;
  1280. /* Free region 2 table */
  1281. page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
  1282. list_del(&page->lru);
  1283. __free_pages(page, CRST_ALLOC_ORDER);
  1284. }
  1285. }
  1286. /**
  1287. * gmap_unshadow - remove a shadow page table completely
  1288. * @sg: pointer to the shadow guest address space structure
  1289. *
  1290. * Called with sg->guest_table_lock
  1291. */
  1292. static void gmap_unshadow(struct gmap *sg)
  1293. {
  1294. unsigned long *table;
  1295. BUG_ON(!gmap_is_shadow(sg));
  1296. if (sg->removed)
  1297. return;
  1298. sg->removed = 1;
  1299. gmap_call_notifier(sg, 0, -1UL);
  1300. gmap_flush_tlb(sg);
  1301. table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
  1302. switch (sg->asce & _ASCE_TYPE_MASK) {
  1303. case _ASCE_TYPE_REGION1:
  1304. __gmap_unshadow_r1t(sg, 0, table);
  1305. break;
  1306. case _ASCE_TYPE_REGION2:
  1307. __gmap_unshadow_r2t(sg, 0, table);
  1308. break;
  1309. case _ASCE_TYPE_REGION3:
  1310. __gmap_unshadow_r3t(sg, 0, table);
  1311. break;
  1312. case _ASCE_TYPE_SEGMENT:
  1313. __gmap_unshadow_sgt(sg, 0, table);
  1314. break;
  1315. }
  1316. }
  1317. /**
  1318. * gmap_find_shadow - find a specific asce in the list of shadow tables
  1319. * @parent: pointer to the parent gmap
  1320. * @asce: ASCE for which the shadow table is created
  1321. * @edat_level: edat level to be used for the shadow translation
  1322. *
  1323. * Returns the pointer to a gmap if a shadow table with the given asce is
  1324. * already available, ERR_PTR(-EAGAIN) if another one is just being created,
  1325. * otherwise NULL
  1326. */
  1327. static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
  1328. int edat_level)
  1329. {
  1330. struct gmap *sg;
  1331. list_for_each_entry(sg, &parent->children, list) {
  1332. if (sg->orig_asce != asce || sg->edat_level != edat_level ||
  1333. sg->removed)
  1334. continue;
  1335. if (!sg->initialized)
  1336. return ERR_PTR(-EAGAIN);
  1337. atomic_inc(&sg->ref_count);
  1338. return sg;
  1339. }
  1340. return NULL;
  1341. }
  1342. /**
  1343. * gmap_shadow_valid - check if a shadow guest address space matches the
  1344. * given properties and is still valid
  1345. * @sg: pointer to the shadow guest address space structure
  1346. * @asce: ASCE for which the shadow table is requested
  1347. * @edat_level: edat level to be used for the shadow translation
  1348. *
  1349. * Returns 1 if the gmap shadow is still valid and matches the given
  1350. * properties, the caller can continue using it. Returns 0 otherwise, the
  1351. * caller has to request a new shadow gmap in this case.
  1352. *
  1353. */
  1354. int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
  1355. {
  1356. if (sg->removed)
  1357. return 0;
  1358. return sg->orig_asce == asce && sg->edat_level == edat_level;
  1359. }
  1360. EXPORT_SYMBOL_GPL(gmap_shadow_valid);
  1361. /**
  1362. * gmap_shadow - create/find a shadow guest address space
  1363. * @parent: pointer to the parent gmap
  1364. * @asce: ASCE for which the shadow table is created
  1365. * @edat_level: edat level to be used for the shadow translation
  1366. *
  1367. * The pages of the top level page table referred by the asce parameter
  1368. * will be set to read-only and marked in the PGSTEs of the kvm process.
  1369. * The shadow table will be removed automatically on any change to the
  1370. * PTE mapping for the source table.
  1371. *
  1372. * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
  1373. * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
  1374. * parent gmap table could not be protected.
  1375. */
  1376. struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
  1377. int edat_level)
  1378. {
  1379. struct gmap *sg, *new;
  1380. unsigned long limit;
  1381. int rc;
  1382. BUG_ON(gmap_is_shadow(parent));
  1383. spin_lock(&parent->shadow_lock);
  1384. sg = gmap_find_shadow(parent, asce, edat_level);
  1385. spin_unlock(&parent->shadow_lock);
  1386. if (sg)
  1387. return sg;
  1388. /* Create a new shadow gmap */
  1389. limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
  1390. if (asce & _ASCE_REAL_SPACE)
  1391. limit = -1UL;
  1392. new = gmap_alloc(limit);
  1393. if (!new)
  1394. return ERR_PTR(-ENOMEM);
  1395. new->mm = parent->mm;
  1396. new->parent = gmap_get(parent);
  1397. new->orig_asce = asce;
  1398. new->edat_level = edat_level;
  1399. new->initialized = false;
  1400. spin_lock(&parent->shadow_lock);
  1401. /* Recheck if another CPU created the same shadow */
  1402. sg = gmap_find_shadow(parent, asce, edat_level);
  1403. if (sg) {
  1404. spin_unlock(&parent->shadow_lock);
  1405. gmap_free(new);
  1406. return sg;
  1407. }
  1408. if (asce & _ASCE_REAL_SPACE) {
  1409. /* only allow one real-space gmap shadow */
  1410. list_for_each_entry(sg, &parent->children, list) {
  1411. if (sg->orig_asce & _ASCE_REAL_SPACE) {
  1412. spin_lock(&sg->guest_table_lock);
  1413. gmap_unshadow(sg);
  1414. spin_unlock(&sg->guest_table_lock);
  1415. list_del(&sg->list);
  1416. gmap_put(sg);
  1417. break;
  1418. }
  1419. }
  1420. }
  1421. atomic_set(&new->ref_count, 2);
  1422. list_add(&new->list, &parent->children);
  1423. if (asce & _ASCE_REAL_SPACE) {
  1424. /* nothing to protect, return right away */
  1425. new->initialized = true;
  1426. spin_unlock(&parent->shadow_lock);
  1427. return new;
  1428. }
  1429. spin_unlock(&parent->shadow_lock);
  1430. /* protect after insertion, so it will get properly invalidated */
  1431. down_read(&parent->mm->mmap_sem);
  1432. rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
  1433. ((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
  1434. PROT_READ, PGSTE_VSIE_BIT);
  1435. up_read(&parent->mm->mmap_sem);
  1436. spin_lock(&parent->shadow_lock);
  1437. new->initialized = true;
  1438. if (rc) {
  1439. list_del(&new->list);
  1440. gmap_free(new);
  1441. new = ERR_PTR(rc);
  1442. }
  1443. spin_unlock(&parent->shadow_lock);
  1444. return new;
  1445. }
  1446. EXPORT_SYMBOL_GPL(gmap_shadow);
  1447. /**
  1448. * gmap_shadow_r2t - create an empty shadow region 2 table
  1449. * @sg: pointer to the shadow guest address space structure
  1450. * @saddr: faulting address in the shadow gmap
  1451. * @r2t: parent gmap address of the region 2 table to get shadowed
  1452. * @fake: r2t references contiguous guest memory block, not a r2t
  1453. *
  1454. * The r2t parameter specifies the address of the source table. The
  1455. * four pages of the source table are made read-only in the parent gmap
  1456. * address space. A write to the source table area @r2t will automatically
  1457. * remove the shadow r2 table and all of its decendents.
  1458. *
  1459. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1460. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1461. * -EFAULT if an address in the parent gmap could not be resolved.
  1462. *
  1463. * Called with sg->mm->mmap_sem in read.
  1464. */
  1465. int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
  1466. int fake)
  1467. {
  1468. unsigned long raddr, origin, offset, len;
  1469. unsigned long *s_r2t, *table;
  1470. struct page *page;
  1471. int rc;
  1472. BUG_ON(!gmap_is_shadow(sg));
  1473. /* Allocate a shadow region second table */
  1474. page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
  1475. if (!page)
  1476. return -ENOMEM;
  1477. page->index = r2t & _REGION_ENTRY_ORIGIN;
  1478. if (fake)
  1479. page->index |= GMAP_SHADOW_FAKE_TABLE;
  1480. s_r2t = (unsigned long *) page_to_phys(page);
  1481. /* Install shadow region second table */
  1482. spin_lock(&sg->guest_table_lock);
  1483. table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
  1484. if (!table) {
  1485. rc = -EAGAIN; /* Race with unshadow */
  1486. goto out_free;
  1487. }
  1488. if (!(*table & _REGION_ENTRY_INVALID)) {
  1489. rc = 0; /* Already established */
  1490. goto out_free;
  1491. } else if (*table & _REGION_ENTRY_ORIGIN) {
  1492. rc = -EAGAIN; /* Race with shadow */
  1493. goto out_free;
  1494. }
  1495. crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
  1496. /* mark as invalid as long as the parent table is not protected */
  1497. *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
  1498. _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
  1499. if (sg->edat_level >= 1)
  1500. *table |= (r2t & _REGION_ENTRY_PROTECT);
  1501. list_add(&page->lru, &sg->crst_list);
  1502. if (fake) {
  1503. /* nothing to protect for fake tables */
  1504. *table &= ~_REGION_ENTRY_INVALID;
  1505. spin_unlock(&sg->guest_table_lock);
  1506. return 0;
  1507. }
  1508. spin_unlock(&sg->guest_table_lock);
  1509. /* Make r2t read-only in parent gmap page table */
  1510. raddr = (saddr & _REGION1_MASK) | _SHADOW_RMAP_REGION1;
  1511. origin = r2t & _REGION_ENTRY_ORIGIN;
  1512. offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
  1513. len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
  1514. rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
  1515. spin_lock(&sg->guest_table_lock);
  1516. if (!rc) {
  1517. table = gmap_table_walk(sg, saddr, 4);
  1518. if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
  1519. (unsigned long) s_r2t)
  1520. rc = -EAGAIN; /* Race with unshadow */
  1521. else
  1522. *table &= ~_REGION_ENTRY_INVALID;
  1523. } else {
  1524. gmap_unshadow_r2t(sg, raddr);
  1525. }
  1526. spin_unlock(&sg->guest_table_lock);
  1527. return rc;
  1528. out_free:
  1529. spin_unlock(&sg->guest_table_lock);
  1530. __free_pages(page, CRST_ALLOC_ORDER);
  1531. return rc;
  1532. }
  1533. EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
  1534. /**
  1535. * gmap_shadow_r3t - create a shadow region 3 table
  1536. * @sg: pointer to the shadow guest address space structure
  1537. * @saddr: faulting address in the shadow gmap
  1538. * @r3t: parent gmap address of the region 3 table to get shadowed
  1539. * @fake: r3t references contiguous guest memory block, not a r3t
  1540. *
  1541. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1542. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1543. * -EFAULT if an address in the parent gmap could not be resolved.
  1544. *
  1545. * Called with sg->mm->mmap_sem in read.
  1546. */
  1547. int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
  1548. int fake)
  1549. {
  1550. unsigned long raddr, origin, offset, len;
  1551. unsigned long *s_r3t, *table;
  1552. struct page *page;
  1553. int rc;
  1554. BUG_ON(!gmap_is_shadow(sg));
  1555. /* Allocate a shadow region second table */
  1556. page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
  1557. if (!page)
  1558. return -ENOMEM;
  1559. page->index = r3t & _REGION_ENTRY_ORIGIN;
  1560. if (fake)
  1561. page->index |= GMAP_SHADOW_FAKE_TABLE;
  1562. s_r3t = (unsigned long *) page_to_phys(page);
  1563. /* Install shadow region second table */
  1564. spin_lock(&sg->guest_table_lock);
  1565. table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
  1566. if (!table) {
  1567. rc = -EAGAIN; /* Race with unshadow */
  1568. goto out_free;
  1569. }
  1570. if (!(*table & _REGION_ENTRY_INVALID)) {
  1571. rc = 0; /* Already established */
  1572. goto out_free;
  1573. } else if (*table & _REGION_ENTRY_ORIGIN) {
  1574. rc = -EAGAIN; /* Race with shadow */
  1575. }
  1576. crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
  1577. /* mark as invalid as long as the parent table is not protected */
  1578. *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
  1579. _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
  1580. if (sg->edat_level >= 1)
  1581. *table |= (r3t & _REGION_ENTRY_PROTECT);
  1582. list_add(&page->lru, &sg->crst_list);
  1583. if (fake) {
  1584. /* nothing to protect for fake tables */
  1585. *table &= ~_REGION_ENTRY_INVALID;
  1586. spin_unlock(&sg->guest_table_lock);
  1587. return 0;
  1588. }
  1589. spin_unlock(&sg->guest_table_lock);
  1590. /* Make r3t read-only in parent gmap page table */
  1591. raddr = (saddr & _REGION2_MASK) | _SHADOW_RMAP_REGION2;
  1592. origin = r3t & _REGION_ENTRY_ORIGIN;
  1593. offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
  1594. len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
  1595. rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
  1596. spin_lock(&sg->guest_table_lock);
  1597. if (!rc) {
  1598. table = gmap_table_walk(sg, saddr, 3);
  1599. if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
  1600. (unsigned long) s_r3t)
  1601. rc = -EAGAIN; /* Race with unshadow */
  1602. else
  1603. *table &= ~_REGION_ENTRY_INVALID;
  1604. } else {
  1605. gmap_unshadow_r3t(sg, raddr);
  1606. }
  1607. spin_unlock(&sg->guest_table_lock);
  1608. return rc;
  1609. out_free:
  1610. spin_unlock(&sg->guest_table_lock);
  1611. __free_pages(page, CRST_ALLOC_ORDER);
  1612. return rc;
  1613. }
  1614. EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
  1615. /**
  1616. * gmap_shadow_sgt - create a shadow segment table
  1617. * @sg: pointer to the shadow guest address space structure
  1618. * @saddr: faulting address in the shadow gmap
  1619. * @sgt: parent gmap address of the segment table to get shadowed
  1620. * @fake: sgt references contiguous guest memory block, not a sgt
  1621. *
  1622. * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1623. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1624. * -EFAULT if an address in the parent gmap could not be resolved.
  1625. *
  1626. * Called with sg->mm->mmap_sem in read.
  1627. */
  1628. int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
  1629. int fake)
  1630. {
  1631. unsigned long raddr, origin, offset, len;
  1632. unsigned long *s_sgt, *table;
  1633. struct page *page;
  1634. int rc;
  1635. BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
  1636. /* Allocate a shadow segment table */
  1637. page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
  1638. if (!page)
  1639. return -ENOMEM;
  1640. page->index = sgt & _REGION_ENTRY_ORIGIN;
  1641. if (fake)
  1642. page->index |= GMAP_SHADOW_FAKE_TABLE;
  1643. s_sgt = (unsigned long *) page_to_phys(page);
  1644. /* Install shadow region second table */
  1645. spin_lock(&sg->guest_table_lock);
  1646. table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
  1647. if (!table) {
  1648. rc = -EAGAIN; /* Race with unshadow */
  1649. goto out_free;
  1650. }
  1651. if (!(*table & _REGION_ENTRY_INVALID)) {
  1652. rc = 0; /* Already established */
  1653. goto out_free;
  1654. } else if (*table & _REGION_ENTRY_ORIGIN) {
  1655. rc = -EAGAIN; /* Race with shadow */
  1656. goto out_free;
  1657. }
  1658. crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
  1659. /* mark as invalid as long as the parent table is not protected */
  1660. *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
  1661. _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
  1662. if (sg->edat_level >= 1)
  1663. *table |= sgt & _REGION_ENTRY_PROTECT;
  1664. list_add(&page->lru, &sg->crst_list);
  1665. if (fake) {
  1666. /* nothing to protect for fake tables */
  1667. *table &= ~_REGION_ENTRY_INVALID;
  1668. spin_unlock(&sg->guest_table_lock);
  1669. return 0;
  1670. }
  1671. spin_unlock(&sg->guest_table_lock);
  1672. /* Make sgt read-only in parent gmap page table */
  1673. raddr = (saddr & _REGION3_MASK) | _SHADOW_RMAP_REGION3;
  1674. origin = sgt & _REGION_ENTRY_ORIGIN;
  1675. offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
  1676. len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
  1677. rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
  1678. spin_lock(&sg->guest_table_lock);
  1679. if (!rc) {
  1680. table = gmap_table_walk(sg, saddr, 2);
  1681. if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
  1682. (unsigned long) s_sgt)
  1683. rc = -EAGAIN; /* Race with unshadow */
  1684. else
  1685. *table &= ~_REGION_ENTRY_INVALID;
  1686. } else {
  1687. gmap_unshadow_sgt(sg, raddr);
  1688. }
  1689. spin_unlock(&sg->guest_table_lock);
  1690. return rc;
  1691. out_free:
  1692. spin_unlock(&sg->guest_table_lock);
  1693. __free_pages(page, CRST_ALLOC_ORDER);
  1694. return rc;
  1695. }
  1696. EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
  1697. /**
  1698. * gmap_shadow_lookup_pgtable - find a shadow page table
  1699. * @sg: pointer to the shadow guest address space structure
  1700. * @saddr: the address in the shadow aguest address space
  1701. * @pgt: parent gmap address of the page table to get shadowed
  1702. * @dat_protection: if the pgtable is marked as protected by dat
  1703. * @fake: pgt references contiguous guest memory block, not a pgtable
  1704. *
  1705. * Returns 0 if the shadow page table was found and -EAGAIN if the page
  1706. * table was not found.
  1707. *
  1708. * Called with sg->mm->mmap_sem in read.
  1709. */
  1710. int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
  1711. unsigned long *pgt, int *dat_protection,
  1712. int *fake)
  1713. {
  1714. unsigned long *table;
  1715. struct page *page;
  1716. int rc;
  1717. BUG_ON(!gmap_is_shadow(sg));
  1718. spin_lock(&sg->guest_table_lock);
  1719. table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
  1720. if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
  1721. /* Shadow page tables are full pages (pte+pgste) */
  1722. page = pfn_to_page(*table >> PAGE_SHIFT);
  1723. *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
  1724. *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
  1725. *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
  1726. rc = 0;
  1727. } else {
  1728. rc = -EAGAIN;
  1729. }
  1730. spin_unlock(&sg->guest_table_lock);
  1731. return rc;
  1732. }
  1733. EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
  1734. /**
  1735. * gmap_shadow_pgt - instantiate a shadow page table
  1736. * @sg: pointer to the shadow guest address space structure
  1737. * @saddr: faulting address in the shadow gmap
  1738. * @pgt: parent gmap address of the page table to get shadowed
  1739. * @fake: pgt references contiguous guest memory block, not a pgtable
  1740. *
  1741. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1742. * shadow table structure is incomplete, -ENOMEM if out of memory,
  1743. * -EFAULT if an address in the parent gmap could not be resolved and
  1744. *
  1745. * Called with gmap->mm->mmap_sem in read
  1746. */
  1747. int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
  1748. int fake)
  1749. {
  1750. unsigned long raddr, origin;
  1751. unsigned long *s_pgt, *table;
  1752. struct page *page;
  1753. int rc;
  1754. BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
  1755. /* Allocate a shadow page table */
  1756. page = page_table_alloc_pgste(sg->mm);
  1757. if (!page)
  1758. return -ENOMEM;
  1759. page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
  1760. if (fake)
  1761. page->index |= GMAP_SHADOW_FAKE_TABLE;
  1762. s_pgt = (unsigned long *) page_to_phys(page);
  1763. /* Install shadow page table */
  1764. spin_lock(&sg->guest_table_lock);
  1765. table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
  1766. if (!table) {
  1767. rc = -EAGAIN; /* Race with unshadow */
  1768. goto out_free;
  1769. }
  1770. if (!(*table & _SEGMENT_ENTRY_INVALID)) {
  1771. rc = 0; /* Already established */
  1772. goto out_free;
  1773. } else if (*table & _SEGMENT_ENTRY_ORIGIN) {
  1774. rc = -EAGAIN; /* Race with shadow */
  1775. goto out_free;
  1776. }
  1777. /* mark as invalid as long as the parent table is not protected */
  1778. *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
  1779. (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
  1780. list_add(&page->lru, &sg->pt_list);
  1781. if (fake) {
  1782. /* nothing to protect for fake tables */
  1783. *table &= ~_SEGMENT_ENTRY_INVALID;
  1784. spin_unlock(&sg->guest_table_lock);
  1785. return 0;
  1786. }
  1787. spin_unlock(&sg->guest_table_lock);
  1788. /* Make pgt read-only in parent gmap page table (not the pgste) */
  1789. raddr = (saddr & _SEGMENT_MASK) | _SHADOW_RMAP_SEGMENT;
  1790. origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
  1791. rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE, PROT_READ);
  1792. spin_lock(&sg->guest_table_lock);
  1793. if (!rc) {
  1794. table = gmap_table_walk(sg, saddr, 1);
  1795. if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) !=
  1796. (unsigned long) s_pgt)
  1797. rc = -EAGAIN; /* Race with unshadow */
  1798. else
  1799. *table &= ~_SEGMENT_ENTRY_INVALID;
  1800. } else {
  1801. gmap_unshadow_pgt(sg, raddr);
  1802. }
  1803. spin_unlock(&sg->guest_table_lock);
  1804. return rc;
  1805. out_free:
  1806. spin_unlock(&sg->guest_table_lock);
  1807. page_table_free_pgste(page);
  1808. return rc;
  1809. }
  1810. EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
  1811. /**
  1812. * gmap_shadow_page - create a shadow page mapping
  1813. * @sg: pointer to the shadow guest address space structure
  1814. * @saddr: faulting address in the shadow gmap
  1815. * @pte: pte in parent gmap address space to get shadowed
  1816. *
  1817. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1818. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1819. * -EFAULT if an address in the parent gmap could not be resolved.
  1820. *
  1821. * Called with sg->mm->mmap_sem in read.
  1822. */
  1823. int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
  1824. {
  1825. struct gmap *parent;
  1826. struct gmap_rmap *rmap;
  1827. unsigned long vmaddr, paddr;
  1828. spinlock_t *ptl;
  1829. pte_t *sptep, *tptep;
  1830. int prot;
  1831. int rc;
  1832. BUG_ON(!gmap_is_shadow(sg));
  1833. parent = sg->parent;
  1834. prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE;
  1835. rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
  1836. if (!rmap)
  1837. return -ENOMEM;
  1838. rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
  1839. while (1) {
  1840. paddr = pte_val(pte) & PAGE_MASK;
  1841. vmaddr = __gmap_translate(parent, paddr);
  1842. if (IS_ERR_VALUE(vmaddr)) {
  1843. rc = vmaddr;
  1844. break;
  1845. }
  1846. rc = radix_tree_preload(GFP_KERNEL);
  1847. if (rc)
  1848. break;
  1849. rc = -EAGAIN;
  1850. sptep = gmap_pte_op_walk(parent, paddr, &ptl);
  1851. if (sptep) {
  1852. spin_lock(&sg->guest_table_lock);
  1853. /* Get page table pointer */
  1854. tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
  1855. if (!tptep) {
  1856. spin_unlock(&sg->guest_table_lock);
  1857. gmap_pte_op_end(ptl);
  1858. radix_tree_preload_end();
  1859. break;
  1860. }
  1861. rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
  1862. if (rc > 0) {
  1863. /* Success and a new mapping */
  1864. gmap_insert_rmap(sg, vmaddr, rmap);
  1865. rmap = NULL;
  1866. rc = 0;
  1867. }
  1868. gmap_pte_op_end(ptl);
  1869. spin_unlock(&sg->guest_table_lock);
  1870. }
  1871. radix_tree_preload_end();
  1872. if (!rc)
  1873. break;
  1874. rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
  1875. if (rc)
  1876. break;
  1877. }
  1878. kfree(rmap);
  1879. return rc;
  1880. }
  1881. EXPORT_SYMBOL_GPL(gmap_shadow_page);
  1882. /**
  1883. * gmap_shadow_notify - handle notifications for shadow gmap
  1884. *
  1885. * Called with sg->parent->shadow_lock.
  1886. */
  1887. static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
  1888. unsigned long gaddr, pte_t *pte)
  1889. {
  1890. struct gmap_rmap *rmap, *rnext, *head;
  1891. unsigned long start, end, bits, raddr;
  1892. BUG_ON(!gmap_is_shadow(sg));
  1893. spin_lock(&sg->guest_table_lock);
  1894. if (sg->removed) {
  1895. spin_unlock(&sg->guest_table_lock);
  1896. return;
  1897. }
  1898. /* Check for top level table */
  1899. start = sg->orig_asce & _ASCE_ORIGIN;
  1900. end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE;
  1901. if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
  1902. gaddr < end) {
  1903. /* The complete shadow table has to go */
  1904. gmap_unshadow(sg);
  1905. spin_unlock(&sg->guest_table_lock);
  1906. list_del(&sg->list);
  1907. gmap_put(sg);
  1908. return;
  1909. }
  1910. /* Remove the page table tree from on specific entry */
  1911. head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
  1912. gmap_for_each_rmap_safe(rmap, rnext, head) {
  1913. bits = rmap->raddr & _SHADOW_RMAP_MASK;
  1914. raddr = rmap->raddr ^ bits;
  1915. switch (bits) {
  1916. case _SHADOW_RMAP_REGION1:
  1917. gmap_unshadow_r2t(sg, raddr);
  1918. break;
  1919. case _SHADOW_RMAP_REGION2:
  1920. gmap_unshadow_r3t(sg, raddr);
  1921. break;
  1922. case _SHADOW_RMAP_REGION3:
  1923. gmap_unshadow_sgt(sg, raddr);
  1924. break;
  1925. case _SHADOW_RMAP_SEGMENT:
  1926. gmap_unshadow_pgt(sg, raddr);
  1927. break;
  1928. case _SHADOW_RMAP_PGTABLE:
  1929. gmap_unshadow_page(sg, raddr);
  1930. break;
  1931. }
  1932. kfree(rmap);
  1933. }
  1934. spin_unlock(&sg->guest_table_lock);
  1935. }
  1936. /**
  1937. * ptep_notify - call all invalidation callbacks for a specific pte.
  1938. * @mm: pointer to the process mm_struct
  1939. * @addr: virtual address in the process address space
  1940. * @pte: pointer to the page table entry
  1941. * @bits: bits from the pgste that caused the notify call
  1942. *
  1943. * This function is assumed to be called with the page table lock held
  1944. * for the pte to notify.
  1945. */
  1946. void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
  1947. pte_t *pte, unsigned long bits)
  1948. {
  1949. unsigned long offset, gaddr = 0;
  1950. unsigned long *table;
  1951. struct gmap *gmap, *sg, *next;
  1952. offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
  1953. offset = offset * (PAGE_SIZE / sizeof(pte_t));
  1954. rcu_read_lock();
  1955. list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
  1956. spin_lock(&gmap->guest_table_lock);
  1957. table = radix_tree_lookup(&gmap->host_to_guest,
  1958. vmaddr >> PMD_SHIFT);
  1959. if (table)
  1960. gaddr = __gmap_segment_gaddr(table) + offset;
  1961. spin_unlock(&gmap->guest_table_lock);
  1962. if (!table)
  1963. continue;
  1964. if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
  1965. spin_lock(&gmap->shadow_lock);
  1966. list_for_each_entry_safe(sg, next,
  1967. &gmap->children, list)
  1968. gmap_shadow_notify(sg, vmaddr, gaddr, pte);
  1969. spin_unlock(&gmap->shadow_lock);
  1970. }
  1971. if (bits & PGSTE_IN_BIT)
  1972. gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
  1973. }
  1974. rcu_read_unlock();
  1975. }
  1976. EXPORT_SYMBOL_GPL(ptep_notify);
  1977. static inline void thp_split_mm(struct mm_struct *mm)
  1978. {
  1979. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  1980. struct vm_area_struct *vma;
  1981. unsigned long addr;
  1982. for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
  1983. for (addr = vma->vm_start;
  1984. addr < vma->vm_end;
  1985. addr += PAGE_SIZE)
  1986. follow_page(vma, addr, FOLL_SPLIT);
  1987. vma->vm_flags &= ~VM_HUGEPAGE;
  1988. vma->vm_flags |= VM_NOHUGEPAGE;
  1989. }
  1990. mm->def_flags |= VM_NOHUGEPAGE;
  1991. #endif
  1992. }
  1993. /*
  1994. * Remove all empty zero pages from the mapping for lazy refaulting
  1995. * - This must be called after mm->context.has_pgste is set, to avoid
  1996. * future creation of zero pages
  1997. * - This must be called after THP was enabled
  1998. */
  1999. static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
  2000. unsigned long end, struct mm_walk *walk)
  2001. {
  2002. unsigned long addr;
  2003. for (addr = start; addr != end; addr += PAGE_SIZE) {
  2004. pte_t *ptep;
  2005. spinlock_t *ptl;
  2006. ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
  2007. if (is_zero_pfn(pte_pfn(*ptep)))
  2008. ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
  2009. pte_unmap_unlock(ptep, ptl);
  2010. }
  2011. return 0;
  2012. }
  2013. static inline void zap_zero_pages(struct mm_struct *mm)
  2014. {
  2015. struct mm_walk walk = { .pmd_entry = __zap_zero_pages };
  2016. walk.mm = mm;
  2017. walk_page_range(0, TASK_SIZE, &walk);
  2018. }
  2019. /*
  2020. * switch on pgstes for its userspace process (for kvm)
  2021. */
  2022. int s390_enable_sie(void)
  2023. {
  2024. struct mm_struct *mm = current->mm;
  2025. /* Do we have pgstes? if yes, we are done */
  2026. if (mm_has_pgste(mm))
  2027. return 0;
  2028. /* Fail if the page tables are 2K */
  2029. if (!mm_alloc_pgste(mm))
  2030. return -EINVAL;
  2031. down_write(&mm->mmap_sem);
  2032. mm->context.has_pgste = 1;
  2033. /* split thp mappings and disable thp for future mappings */
  2034. thp_split_mm(mm);
  2035. zap_zero_pages(mm);
  2036. up_write(&mm->mmap_sem);
  2037. return 0;
  2038. }
  2039. EXPORT_SYMBOL_GPL(s390_enable_sie);
  2040. /*
  2041. * Enable storage key handling from now on and initialize the storage
  2042. * keys with the default key.
  2043. */
  2044. static int __s390_enable_skey(pte_t *pte, unsigned long addr,
  2045. unsigned long next, struct mm_walk *walk)
  2046. {
  2047. /* Clear storage key */
  2048. ptep_zap_key(walk->mm, addr, pte);
  2049. return 0;
  2050. }
  2051. int s390_enable_skey(void)
  2052. {
  2053. struct mm_walk walk = { .pte_entry = __s390_enable_skey };
  2054. struct mm_struct *mm = current->mm;
  2055. struct vm_area_struct *vma;
  2056. int rc = 0;
  2057. down_write(&mm->mmap_sem);
  2058. if (mm_use_skey(mm))
  2059. goto out_up;
  2060. mm->context.use_skey = 1;
  2061. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  2062. if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
  2063. MADV_UNMERGEABLE, &vma->vm_flags)) {
  2064. mm->context.use_skey = 0;
  2065. rc = -ENOMEM;
  2066. goto out_up;
  2067. }
  2068. }
  2069. mm->def_flags &= ~VM_MERGEABLE;
  2070. walk.mm = mm;
  2071. walk_page_range(0, TASK_SIZE, &walk);
  2072. out_up:
  2073. up_write(&mm->mmap_sem);
  2074. return rc;
  2075. }
  2076. EXPORT_SYMBOL_GPL(s390_enable_skey);
  2077. /*
  2078. * Reset CMMA state, make all pages stable again.
  2079. */
  2080. static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
  2081. unsigned long next, struct mm_walk *walk)
  2082. {
  2083. ptep_zap_unused(walk->mm, addr, pte, 1);
  2084. return 0;
  2085. }
  2086. void s390_reset_cmma(struct mm_struct *mm)
  2087. {
  2088. struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
  2089. down_write(&mm->mmap_sem);
  2090. walk.mm = mm;
  2091. walk_page_range(0, TASK_SIZE, &walk);
  2092. up_write(&mm->mmap_sem);
  2093. }
  2094. EXPORT_SYMBOL_GPL(s390_reset_cmma);