gmap.c 59 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074
  1. /*
  2. * KVM guest address space mapping code
  3. *
  4. * Copyright IBM Corp. 2007, 2016
  5. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/mm.h>
  9. #include <linux/swap.h>
  10. #include <linux/smp.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/slab.h>
  13. #include <linux/swapops.h>
  14. #include <linux/ksm.h>
  15. #include <linux/mman.h>
  16. #include <asm/pgtable.h>
  17. #include <asm/pgalloc.h>
  18. #include <asm/gmap.h>
  19. #include <asm/tlb.h>
  20. /**
  21. * gmap_alloc - allocate and initialize a guest address space
  22. * @mm: pointer to the parent mm_struct
  23. * @limit: maximum address of the gmap address space
  24. *
  25. * Returns a guest address space structure.
  26. */
  27. static struct gmap *gmap_alloc(unsigned long limit)
  28. {
  29. struct gmap *gmap;
  30. struct page *page;
  31. unsigned long *table;
  32. unsigned long etype, atype;
  33. if (limit < (1UL << 31)) {
  34. limit = (1UL << 31) - 1;
  35. atype = _ASCE_TYPE_SEGMENT;
  36. etype = _SEGMENT_ENTRY_EMPTY;
  37. } else if (limit < (1UL << 42)) {
  38. limit = (1UL << 42) - 1;
  39. atype = _ASCE_TYPE_REGION3;
  40. etype = _REGION3_ENTRY_EMPTY;
  41. } else if (limit < (1UL << 53)) {
  42. limit = (1UL << 53) - 1;
  43. atype = _ASCE_TYPE_REGION2;
  44. etype = _REGION2_ENTRY_EMPTY;
  45. } else {
  46. limit = -1UL;
  47. atype = _ASCE_TYPE_REGION1;
  48. etype = _REGION1_ENTRY_EMPTY;
  49. }
  50. gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
  51. if (!gmap)
  52. goto out;
  53. INIT_LIST_HEAD(&gmap->crst_list);
  54. INIT_LIST_HEAD(&gmap->children);
  55. INIT_LIST_HEAD(&gmap->pt_list);
  56. INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
  57. INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
  58. INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
  59. spin_lock_init(&gmap->guest_table_lock);
  60. spin_lock_init(&gmap->shadow_lock);
  61. atomic_set(&gmap->ref_count, 1);
  62. page = alloc_pages(GFP_KERNEL, 2);
  63. if (!page)
  64. goto out_free;
  65. page->index = 0;
  66. list_add(&page->lru, &gmap->crst_list);
  67. table = (unsigned long *) page_to_phys(page);
  68. crst_table_init(table, etype);
  69. gmap->table = table;
  70. gmap->asce = atype | _ASCE_TABLE_LENGTH |
  71. _ASCE_USER_BITS | __pa(table);
  72. gmap->asce_end = limit;
  73. return gmap;
  74. out_free:
  75. kfree(gmap);
  76. out:
  77. return NULL;
  78. }
  79. /**
  80. * gmap_create - create a guest address space
  81. * @mm: pointer to the parent mm_struct
  82. * @limit: maximum size of the gmap address space
  83. *
  84. * Returns a guest address space structure.
  85. */
  86. struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
  87. {
  88. struct gmap *gmap;
  89. gmap = gmap_alloc(limit);
  90. if (!gmap)
  91. return NULL;
  92. gmap->mm = mm;
  93. spin_lock(&mm->context.gmap_lock);
  94. list_add_rcu(&gmap->list, &mm->context.gmap_list);
  95. spin_unlock(&mm->context.gmap_lock);
  96. return gmap;
  97. }
  98. EXPORT_SYMBOL_GPL(gmap_create);
  99. static void gmap_flush_tlb(struct gmap *gmap)
  100. {
  101. if (MACHINE_HAS_IDTE)
  102. __tlb_flush_asce(gmap->mm, gmap->asce);
  103. else
  104. __tlb_flush_global();
  105. }
  106. static void gmap_radix_tree_free(struct radix_tree_root *root)
  107. {
  108. struct radix_tree_iter iter;
  109. unsigned long indices[16];
  110. unsigned long index;
  111. void **slot;
  112. int i, nr;
  113. /* A radix tree is freed by deleting all of its entries */
  114. index = 0;
  115. do {
  116. nr = 0;
  117. radix_tree_for_each_slot(slot, root, &iter, index) {
  118. indices[nr] = iter.index;
  119. if (++nr == 16)
  120. break;
  121. }
  122. for (i = 0; i < nr; i++) {
  123. index = indices[i];
  124. radix_tree_delete(root, index);
  125. }
  126. } while (nr > 0);
  127. }
  128. static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
  129. {
  130. struct gmap_rmap *rmap, *rnext, *head;
  131. struct radix_tree_iter iter;
  132. unsigned long indices[16];
  133. unsigned long index;
  134. void **slot;
  135. int i, nr;
  136. /* A radix tree is freed by deleting all of its entries */
  137. index = 0;
  138. do {
  139. nr = 0;
  140. radix_tree_for_each_slot(slot, root, &iter, index) {
  141. indices[nr] = iter.index;
  142. if (++nr == 16)
  143. break;
  144. }
  145. for (i = 0; i < nr; i++) {
  146. index = indices[i];
  147. head = radix_tree_delete(root, index);
  148. gmap_for_each_rmap_safe(rmap, rnext, head)
  149. kfree(rmap);
  150. }
  151. } while (nr > 0);
  152. }
  153. /**
  154. * gmap_free - free a guest address space
  155. * @gmap: pointer to the guest address space structure
  156. *
  157. * No locks required. There are no references to this gmap anymore.
  158. */
  159. static void gmap_free(struct gmap *gmap)
  160. {
  161. struct page *page, *next;
  162. /* Flush tlb of all gmaps (if not already done for shadows) */
  163. if (!(gmap_is_shadow(gmap) && gmap->removed))
  164. gmap_flush_tlb(gmap);
  165. /* Free all segment & region tables. */
  166. list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
  167. __free_pages(page, 2);
  168. gmap_radix_tree_free(&gmap->guest_to_host);
  169. gmap_radix_tree_free(&gmap->host_to_guest);
  170. /* Free additional data for a shadow gmap */
  171. if (gmap_is_shadow(gmap)) {
  172. /* Free all page tables. */
  173. list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
  174. page_table_free_pgste(page);
  175. gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
  176. /* Release reference to the parent */
  177. gmap_put(gmap->parent);
  178. }
  179. kfree(gmap);
  180. }
  181. /**
  182. * gmap_get - increase reference counter for guest address space
  183. * @gmap: pointer to the guest address space structure
  184. *
  185. * Returns the gmap pointer
  186. */
  187. struct gmap *gmap_get(struct gmap *gmap)
  188. {
  189. atomic_inc(&gmap->ref_count);
  190. return gmap;
  191. }
  192. EXPORT_SYMBOL_GPL(gmap_get);
  193. /**
  194. * gmap_put - decrease reference counter for guest address space
  195. * @gmap: pointer to the guest address space structure
  196. *
  197. * If the reference counter reaches zero the guest address space is freed.
  198. */
  199. void gmap_put(struct gmap *gmap)
  200. {
  201. if (atomic_dec_return(&gmap->ref_count) == 0)
  202. gmap_free(gmap);
  203. }
  204. EXPORT_SYMBOL_GPL(gmap_put);
  205. /**
  206. * gmap_remove - remove a guest address space but do not free it yet
  207. * @gmap: pointer to the guest address space structure
  208. */
  209. void gmap_remove(struct gmap *gmap)
  210. {
  211. struct gmap *sg, *next;
  212. /* Remove all shadow gmaps linked to this gmap */
  213. if (!list_empty(&gmap->children)) {
  214. spin_lock(&gmap->shadow_lock);
  215. list_for_each_entry_safe(sg, next, &gmap->children, list) {
  216. list_del(&sg->list);
  217. gmap_put(sg);
  218. }
  219. spin_unlock(&gmap->shadow_lock);
  220. }
  221. /* Remove gmap from the pre-mm list */
  222. spin_lock(&gmap->mm->context.gmap_lock);
  223. list_del_rcu(&gmap->list);
  224. spin_unlock(&gmap->mm->context.gmap_lock);
  225. synchronize_rcu();
  226. /* Put reference */
  227. gmap_put(gmap);
  228. }
  229. EXPORT_SYMBOL_GPL(gmap_remove);
  230. /**
  231. * gmap_enable - switch primary space to the guest address space
  232. * @gmap: pointer to the guest address space structure
  233. */
  234. void gmap_enable(struct gmap *gmap)
  235. {
  236. S390_lowcore.gmap = (unsigned long) gmap;
  237. }
  238. EXPORT_SYMBOL_GPL(gmap_enable);
  239. /**
  240. * gmap_disable - switch back to the standard primary address space
  241. * @gmap: pointer to the guest address space structure
  242. */
  243. void gmap_disable(struct gmap *gmap)
  244. {
  245. S390_lowcore.gmap = 0UL;
  246. }
  247. EXPORT_SYMBOL_GPL(gmap_disable);
  248. /*
  249. * gmap_alloc_table is assumed to be called with mmap_sem held
  250. */
  251. static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
  252. unsigned long init, unsigned long gaddr)
  253. {
  254. struct page *page;
  255. unsigned long *new;
  256. /* since we dont free the gmap table until gmap_free we can unlock */
  257. page = alloc_pages(GFP_KERNEL, 2);
  258. if (!page)
  259. return -ENOMEM;
  260. new = (unsigned long *) page_to_phys(page);
  261. crst_table_init(new, init);
  262. spin_lock(&gmap->guest_table_lock);
  263. if (*table & _REGION_ENTRY_INVALID) {
  264. list_add(&page->lru, &gmap->crst_list);
  265. *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
  266. (*table & _REGION_ENTRY_TYPE_MASK);
  267. page->index = gaddr;
  268. page = NULL;
  269. }
  270. spin_unlock(&gmap->guest_table_lock);
  271. if (page)
  272. __free_pages(page, 2);
  273. return 0;
  274. }
  275. /**
  276. * __gmap_segment_gaddr - find virtual address from segment pointer
  277. * @entry: pointer to a segment table entry in the guest address space
  278. *
  279. * Returns the virtual address in the guest address space for the segment
  280. */
  281. static unsigned long __gmap_segment_gaddr(unsigned long *entry)
  282. {
  283. struct page *page;
  284. unsigned long offset, mask;
  285. offset = (unsigned long) entry / sizeof(unsigned long);
  286. offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
  287. mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
  288. page = virt_to_page((void *)((unsigned long) entry & mask));
  289. return page->index + offset;
  290. }
  291. /**
  292. * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
  293. * @gmap: pointer to the guest address space structure
  294. * @vmaddr: address in the host process address space
  295. *
  296. * Returns 1 if a TLB flush is required
  297. */
  298. static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
  299. {
  300. unsigned long *entry;
  301. int flush = 0;
  302. BUG_ON(gmap_is_shadow(gmap));
  303. spin_lock(&gmap->guest_table_lock);
  304. entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
  305. if (entry) {
  306. flush = (*entry != _SEGMENT_ENTRY_INVALID);
  307. *entry = _SEGMENT_ENTRY_INVALID;
  308. }
  309. spin_unlock(&gmap->guest_table_lock);
  310. return flush;
  311. }
  312. /**
  313. * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
  314. * @gmap: pointer to the guest address space structure
  315. * @gaddr: address in the guest address space
  316. *
  317. * Returns 1 if a TLB flush is required
  318. */
  319. static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
  320. {
  321. unsigned long vmaddr;
  322. vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
  323. gaddr >> PMD_SHIFT);
  324. return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
  325. }
  326. /**
  327. * gmap_unmap_segment - unmap segment from the guest address space
  328. * @gmap: pointer to the guest address space structure
  329. * @to: address in the guest address space
  330. * @len: length of the memory area to unmap
  331. *
  332. * Returns 0 if the unmap succeeded, -EINVAL if not.
  333. */
  334. int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
  335. {
  336. unsigned long off;
  337. int flush;
  338. BUG_ON(gmap_is_shadow(gmap));
  339. if ((to | len) & (PMD_SIZE - 1))
  340. return -EINVAL;
  341. if (len == 0 || to + len < to)
  342. return -EINVAL;
  343. flush = 0;
  344. down_write(&gmap->mm->mmap_sem);
  345. for (off = 0; off < len; off += PMD_SIZE)
  346. flush |= __gmap_unmap_by_gaddr(gmap, to + off);
  347. up_write(&gmap->mm->mmap_sem);
  348. if (flush)
  349. gmap_flush_tlb(gmap);
  350. return 0;
  351. }
  352. EXPORT_SYMBOL_GPL(gmap_unmap_segment);
  353. /**
  354. * gmap_map_segment - map a segment to the guest address space
  355. * @gmap: pointer to the guest address space structure
  356. * @from: source address in the parent address space
  357. * @to: target address in the guest address space
  358. * @len: length of the memory area to map
  359. *
  360. * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
  361. */
  362. int gmap_map_segment(struct gmap *gmap, unsigned long from,
  363. unsigned long to, unsigned long len)
  364. {
  365. unsigned long off;
  366. int flush;
  367. BUG_ON(gmap_is_shadow(gmap));
  368. if ((from | to | len) & (PMD_SIZE - 1))
  369. return -EINVAL;
  370. if (len == 0 || from + len < from || to + len < to ||
  371. from + len - 1 > TASK_MAX_SIZE || to + len - 1 > gmap->asce_end)
  372. return -EINVAL;
  373. flush = 0;
  374. down_write(&gmap->mm->mmap_sem);
  375. for (off = 0; off < len; off += PMD_SIZE) {
  376. /* Remove old translation */
  377. flush |= __gmap_unmap_by_gaddr(gmap, to + off);
  378. /* Store new translation */
  379. if (radix_tree_insert(&gmap->guest_to_host,
  380. (to + off) >> PMD_SHIFT,
  381. (void *) from + off))
  382. break;
  383. }
  384. up_write(&gmap->mm->mmap_sem);
  385. if (flush)
  386. gmap_flush_tlb(gmap);
  387. if (off >= len)
  388. return 0;
  389. gmap_unmap_segment(gmap, to, len);
  390. return -ENOMEM;
  391. }
  392. EXPORT_SYMBOL_GPL(gmap_map_segment);
  393. /**
  394. * __gmap_translate - translate a guest address to a user space address
  395. * @gmap: pointer to guest mapping meta data structure
  396. * @gaddr: guest address
  397. *
  398. * Returns user space address which corresponds to the guest address or
  399. * -EFAULT if no such mapping exists.
  400. * This function does not establish potentially missing page table entries.
  401. * The mmap_sem of the mm that belongs to the address space must be held
  402. * when this function gets called.
  403. *
  404. * Note: Can also be called for shadow gmaps.
  405. */
  406. unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
  407. {
  408. unsigned long vmaddr;
  409. vmaddr = (unsigned long)
  410. radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
  411. /* Note: guest_to_host is empty for a shadow gmap */
  412. return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
  413. }
  414. EXPORT_SYMBOL_GPL(__gmap_translate);
  415. /**
  416. * gmap_translate - translate a guest address to a user space address
  417. * @gmap: pointer to guest mapping meta data structure
  418. * @gaddr: guest address
  419. *
  420. * Returns user space address which corresponds to the guest address or
  421. * -EFAULT if no such mapping exists.
  422. * This function does not establish potentially missing page table entries.
  423. */
  424. unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
  425. {
  426. unsigned long rc;
  427. down_read(&gmap->mm->mmap_sem);
  428. rc = __gmap_translate(gmap, gaddr);
  429. up_read(&gmap->mm->mmap_sem);
  430. return rc;
  431. }
  432. EXPORT_SYMBOL_GPL(gmap_translate);
  433. /**
  434. * gmap_unlink - disconnect a page table from the gmap shadow tables
  435. * @gmap: pointer to guest mapping meta data structure
  436. * @table: pointer to the host page table
  437. * @vmaddr: vm address associated with the host page table
  438. */
  439. void gmap_unlink(struct mm_struct *mm, unsigned long *table,
  440. unsigned long vmaddr)
  441. {
  442. struct gmap *gmap;
  443. int flush;
  444. rcu_read_lock();
  445. list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
  446. flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
  447. if (flush)
  448. gmap_flush_tlb(gmap);
  449. }
  450. rcu_read_unlock();
  451. }
  452. /**
  453. * gmap_link - set up shadow page tables to connect a host to a guest address
  454. * @gmap: pointer to guest mapping meta data structure
  455. * @gaddr: guest address
  456. * @vmaddr: vm address
  457. *
  458. * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
  459. * if the vm address is already mapped to a different guest segment.
  460. * The mmap_sem of the mm that belongs to the address space must be held
  461. * when this function gets called.
  462. */
  463. int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
  464. {
  465. struct mm_struct *mm;
  466. unsigned long *table;
  467. spinlock_t *ptl;
  468. pgd_t *pgd;
  469. pud_t *pud;
  470. pmd_t *pmd;
  471. int rc;
  472. BUG_ON(gmap_is_shadow(gmap));
  473. /* Create higher level tables in the gmap page table */
  474. table = gmap->table;
  475. if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
  476. table += (gaddr >> 53) & 0x7ff;
  477. if ((*table & _REGION_ENTRY_INVALID) &&
  478. gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
  479. gaddr & 0xffe0000000000000UL))
  480. return -ENOMEM;
  481. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  482. }
  483. if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
  484. table += (gaddr >> 42) & 0x7ff;
  485. if ((*table & _REGION_ENTRY_INVALID) &&
  486. gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
  487. gaddr & 0xfffffc0000000000UL))
  488. return -ENOMEM;
  489. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  490. }
  491. if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
  492. table += (gaddr >> 31) & 0x7ff;
  493. if ((*table & _REGION_ENTRY_INVALID) &&
  494. gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
  495. gaddr & 0xffffffff80000000UL))
  496. return -ENOMEM;
  497. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  498. }
  499. table += (gaddr >> 20) & 0x7ff;
  500. /* Walk the parent mm page table */
  501. mm = gmap->mm;
  502. pgd = pgd_offset(mm, vmaddr);
  503. VM_BUG_ON(pgd_none(*pgd));
  504. pud = pud_offset(pgd, vmaddr);
  505. VM_BUG_ON(pud_none(*pud));
  506. pmd = pmd_offset(pud, vmaddr);
  507. VM_BUG_ON(pmd_none(*pmd));
  508. /* large pmds cannot yet be handled */
  509. if (pmd_large(*pmd))
  510. return -EFAULT;
  511. /* Link gmap segment table entry location to page table. */
  512. rc = radix_tree_preload(GFP_KERNEL);
  513. if (rc)
  514. return rc;
  515. ptl = pmd_lock(mm, pmd);
  516. spin_lock(&gmap->guest_table_lock);
  517. if (*table == _SEGMENT_ENTRY_INVALID) {
  518. rc = radix_tree_insert(&gmap->host_to_guest,
  519. vmaddr >> PMD_SHIFT, table);
  520. if (!rc)
  521. *table = pmd_val(*pmd);
  522. } else
  523. rc = 0;
  524. spin_unlock(&gmap->guest_table_lock);
  525. spin_unlock(ptl);
  526. radix_tree_preload_end();
  527. return rc;
  528. }
  529. /**
  530. * gmap_fault - resolve a fault on a guest address
  531. * @gmap: pointer to guest mapping meta data structure
  532. * @gaddr: guest address
  533. * @fault_flags: flags to pass down to handle_mm_fault()
  534. *
  535. * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
  536. * if the vm address is already mapped to a different guest segment.
  537. */
  538. int gmap_fault(struct gmap *gmap, unsigned long gaddr,
  539. unsigned int fault_flags)
  540. {
  541. unsigned long vmaddr;
  542. int rc;
  543. bool unlocked;
  544. down_read(&gmap->mm->mmap_sem);
  545. retry:
  546. unlocked = false;
  547. vmaddr = __gmap_translate(gmap, gaddr);
  548. if (IS_ERR_VALUE(vmaddr)) {
  549. rc = vmaddr;
  550. goto out_up;
  551. }
  552. if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
  553. &unlocked)) {
  554. rc = -EFAULT;
  555. goto out_up;
  556. }
  557. /*
  558. * In the case that fixup_user_fault unlocked the mmap_sem during
  559. * faultin redo __gmap_translate to not race with a map/unmap_segment.
  560. */
  561. if (unlocked)
  562. goto retry;
  563. rc = __gmap_link(gmap, gaddr, vmaddr);
  564. out_up:
  565. up_read(&gmap->mm->mmap_sem);
  566. return rc;
  567. }
  568. EXPORT_SYMBOL_GPL(gmap_fault);
  569. /*
  570. * this function is assumed to be called with mmap_sem held
  571. */
  572. void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
  573. {
  574. unsigned long vmaddr;
  575. spinlock_t *ptl;
  576. pte_t *ptep;
  577. /* Find the vm address for the guest address */
  578. vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
  579. gaddr >> PMD_SHIFT);
  580. if (vmaddr) {
  581. vmaddr |= gaddr & ~PMD_MASK;
  582. /* Get pointer to the page table entry */
  583. ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
  584. if (likely(ptep))
  585. ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
  586. pte_unmap_unlock(ptep, ptl);
  587. }
  588. }
  589. EXPORT_SYMBOL_GPL(__gmap_zap);
  590. void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
  591. {
  592. unsigned long gaddr, vmaddr, size;
  593. struct vm_area_struct *vma;
  594. down_read(&gmap->mm->mmap_sem);
  595. for (gaddr = from; gaddr < to;
  596. gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
  597. /* Find the vm address for the guest address */
  598. vmaddr = (unsigned long)
  599. radix_tree_lookup(&gmap->guest_to_host,
  600. gaddr >> PMD_SHIFT);
  601. if (!vmaddr)
  602. continue;
  603. vmaddr |= gaddr & ~PMD_MASK;
  604. /* Find vma in the parent mm */
  605. vma = find_vma(gmap->mm, vmaddr);
  606. size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
  607. zap_page_range(vma, vmaddr, size, NULL);
  608. }
  609. up_read(&gmap->mm->mmap_sem);
  610. }
  611. EXPORT_SYMBOL_GPL(gmap_discard);
  612. static LIST_HEAD(gmap_notifier_list);
  613. static DEFINE_SPINLOCK(gmap_notifier_lock);
  614. /**
  615. * gmap_register_pte_notifier - register a pte invalidation callback
  616. * @nb: pointer to the gmap notifier block
  617. */
  618. void gmap_register_pte_notifier(struct gmap_notifier *nb)
  619. {
  620. spin_lock(&gmap_notifier_lock);
  621. list_add_rcu(&nb->list, &gmap_notifier_list);
  622. spin_unlock(&gmap_notifier_lock);
  623. }
  624. EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
  625. /**
  626. * gmap_unregister_pte_notifier - remove a pte invalidation callback
  627. * @nb: pointer to the gmap notifier block
  628. */
  629. void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
  630. {
  631. spin_lock(&gmap_notifier_lock);
  632. list_del_rcu(&nb->list);
  633. spin_unlock(&gmap_notifier_lock);
  634. synchronize_rcu();
  635. }
  636. EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
  637. /**
  638. * gmap_call_notifier - call all registered invalidation callbacks
  639. * @gmap: pointer to guest mapping meta data structure
  640. * @start: start virtual address in the guest address space
  641. * @end: end virtual address in the guest address space
  642. */
  643. static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
  644. unsigned long end)
  645. {
  646. struct gmap_notifier *nb;
  647. list_for_each_entry(nb, &gmap_notifier_list, list)
  648. nb->notifier_call(gmap, start, end);
  649. }
  650. /**
  651. * gmap_table_walk - walk the gmap page tables
  652. * @gmap: pointer to guest mapping meta data structure
  653. * @gaddr: virtual address in the guest address space
  654. * @level: page table level to stop at
  655. *
  656. * Returns a table entry pointer for the given guest address and @level
  657. * @level=0 : returns a pointer to a page table table entry (or NULL)
  658. * @level=1 : returns a pointer to a segment table entry (or NULL)
  659. * @level=2 : returns a pointer to a region-3 table entry (or NULL)
  660. * @level=3 : returns a pointer to a region-2 table entry (or NULL)
  661. * @level=4 : returns a pointer to a region-1 table entry (or NULL)
  662. *
  663. * Returns NULL if the gmap page tables could not be walked to the
  664. * requested level.
  665. *
  666. * Note: Can also be called for shadow gmaps.
  667. */
  668. static inline unsigned long *gmap_table_walk(struct gmap *gmap,
  669. unsigned long gaddr, int level)
  670. {
  671. unsigned long *table;
  672. if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
  673. return NULL;
  674. if (gmap_is_shadow(gmap) && gmap->removed)
  675. return NULL;
  676. if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11)))
  677. return NULL;
  678. table = gmap->table;
  679. switch (gmap->asce & _ASCE_TYPE_MASK) {
  680. case _ASCE_TYPE_REGION1:
  681. table += (gaddr >> 53) & 0x7ff;
  682. if (level == 4)
  683. break;
  684. if (*table & _REGION_ENTRY_INVALID)
  685. return NULL;
  686. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  687. /* Fallthrough */
  688. case _ASCE_TYPE_REGION2:
  689. table += (gaddr >> 42) & 0x7ff;
  690. if (level == 3)
  691. break;
  692. if (*table & _REGION_ENTRY_INVALID)
  693. return NULL;
  694. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  695. /* Fallthrough */
  696. case _ASCE_TYPE_REGION3:
  697. table += (gaddr >> 31) & 0x7ff;
  698. if (level == 2)
  699. break;
  700. if (*table & _REGION_ENTRY_INVALID)
  701. return NULL;
  702. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  703. /* Fallthrough */
  704. case _ASCE_TYPE_SEGMENT:
  705. table += (gaddr >> 20) & 0x7ff;
  706. if (level == 1)
  707. break;
  708. if (*table & _REGION_ENTRY_INVALID)
  709. return NULL;
  710. table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
  711. table += (gaddr >> 12) & 0xff;
  712. }
  713. return table;
  714. }
  715. /**
  716. * gmap_pte_op_walk - walk the gmap page table, get the page table lock
  717. * and return the pte pointer
  718. * @gmap: pointer to guest mapping meta data structure
  719. * @gaddr: virtual address in the guest address space
  720. * @ptl: pointer to the spinlock pointer
  721. *
  722. * Returns a pointer to the locked pte for a guest address, or NULL
  723. *
  724. * Note: Can also be called for shadow gmaps.
  725. */
  726. static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
  727. spinlock_t **ptl)
  728. {
  729. unsigned long *table;
  730. if (gmap_is_shadow(gmap))
  731. spin_lock(&gmap->guest_table_lock);
  732. /* Walk the gmap page table, lock and get pte pointer */
  733. table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
  734. if (!table || *table & _SEGMENT_ENTRY_INVALID) {
  735. if (gmap_is_shadow(gmap))
  736. spin_unlock(&gmap->guest_table_lock);
  737. return NULL;
  738. }
  739. if (gmap_is_shadow(gmap)) {
  740. *ptl = &gmap->guest_table_lock;
  741. return pte_offset_map((pmd_t *) table, gaddr);
  742. }
  743. return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
  744. }
  745. /**
  746. * gmap_pte_op_fixup - force a page in and connect the gmap page table
  747. * @gmap: pointer to guest mapping meta data structure
  748. * @gaddr: virtual address in the guest address space
  749. * @vmaddr: address in the host process address space
  750. *
  751. * Returns 0 if the caller can retry __gmap_translate (might fail again),
  752. * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
  753. * up or connecting the gmap page table.
  754. */
  755. static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
  756. unsigned long vmaddr)
  757. {
  758. struct mm_struct *mm = gmap->mm;
  759. bool unlocked = false;
  760. BUG_ON(gmap_is_shadow(gmap));
  761. if (fixup_user_fault(current, mm, vmaddr, FAULT_FLAG_WRITE, &unlocked))
  762. return -EFAULT;
  763. if (unlocked)
  764. /* lost mmap_sem, caller has to retry __gmap_translate */
  765. return 0;
  766. /* Connect the page tables */
  767. return __gmap_link(gmap, gaddr, vmaddr);
  768. }
  769. /**
  770. * gmap_pte_op_end - release the page table lock
  771. * @ptl: pointer to the spinlock pointer
  772. */
  773. static void gmap_pte_op_end(spinlock_t *ptl)
  774. {
  775. spin_unlock(ptl);
  776. }
  777. /*
  778. * gmap_protect_range - remove access rights to memory and set pgste bits
  779. * @gmap: pointer to guest mapping meta data structure
  780. * @gaddr: virtual address in the guest address space
  781. * @len: size of area
  782. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  783. * @bits: pgste notification bits to set
  784. *
  785. * Returns 0 if successfully protected, -ENOMEM if out of memory and
  786. * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
  787. *
  788. * Called with sg->mm->mmap_sem in read.
  789. *
  790. * Note: Can also be called for shadow gmaps.
  791. */
  792. static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
  793. unsigned long len, int prot, unsigned long bits)
  794. {
  795. unsigned long vmaddr;
  796. spinlock_t *ptl;
  797. pte_t *ptep;
  798. int rc;
  799. while (len) {
  800. rc = -EAGAIN;
  801. ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
  802. if (ptep) {
  803. rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, bits);
  804. gmap_pte_op_end(ptl);
  805. }
  806. if (rc) {
  807. vmaddr = __gmap_translate(gmap, gaddr);
  808. if (IS_ERR_VALUE(vmaddr))
  809. return vmaddr;
  810. rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr);
  811. if (rc)
  812. return rc;
  813. continue;
  814. }
  815. gaddr += PAGE_SIZE;
  816. len -= PAGE_SIZE;
  817. }
  818. return 0;
  819. }
  820. /**
  821. * gmap_mprotect_notify - change access rights for a range of ptes and
  822. * call the notifier if any pte changes again
  823. * @gmap: pointer to guest mapping meta data structure
  824. * @gaddr: virtual address in the guest address space
  825. * @len: size of area
  826. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  827. *
  828. * Returns 0 if for each page in the given range a gmap mapping exists,
  829. * the new access rights could be set and the notifier could be armed.
  830. * If the gmap mapping is missing for one or more pages -EFAULT is
  831. * returned. If no memory could be allocated -ENOMEM is returned.
  832. * This function establishes missing page table entries.
  833. */
  834. int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
  835. unsigned long len, int prot)
  836. {
  837. int rc;
  838. if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
  839. return -EINVAL;
  840. if (!MACHINE_HAS_ESOP && prot == PROT_READ)
  841. return -EINVAL;
  842. down_read(&gmap->mm->mmap_sem);
  843. rc = gmap_protect_range(gmap, gaddr, len, prot, PGSTE_IN_BIT);
  844. up_read(&gmap->mm->mmap_sem);
  845. return rc;
  846. }
  847. EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
  848. /**
  849. * gmap_read_table - get an unsigned long value from a guest page table using
  850. * absolute addressing, without marking the page referenced.
  851. * @gmap: pointer to guest mapping meta data structure
  852. * @gaddr: virtual address in the guest address space
  853. * @val: pointer to the unsigned long value to return
  854. *
  855. * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
  856. * if reading using the virtual address failed.
  857. *
  858. * Called with gmap->mm->mmap_sem in read.
  859. */
  860. int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
  861. {
  862. unsigned long address, vmaddr;
  863. spinlock_t *ptl;
  864. pte_t *ptep, pte;
  865. int rc;
  866. while (1) {
  867. rc = -EAGAIN;
  868. ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
  869. if (ptep) {
  870. pte = *ptep;
  871. if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
  872. address = pte_val(pte) & PAGE_MASK;
  873. address += gaddr & ~PAGE_MASK;
  874. *val = *(unsigned long *) address;
  875. pte_val(*ptep) |= _PAGE_YOUNG;
  876. /* Do *NOT* clear the _PAGE_INVALID bit! */
  877. rc = 0;
  878. }
  879. gmap_pte_op_end(ptl);
  880. }
  881. if (!rc)
  882. break;
  883. vmaddr = __gmap_translate(gmap, gaddr);
  884. if (IS_ERR_VALUE(vmaddr)) {
  885. rc = vmaddr;
  886. break;
  887. }
  888. rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr);
  889. if (rc)
  890. break;
  891. }
  892. return rc;
  893. }
  894. EXPORT_SYMBOL_GPL(gmap_read_table);
  895. /**
  896. * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
  897. * @sg: pointer to the shadow guest address space structure
  898. * @vmaddr: vm address associated with the rmap
  899. * @rmap: pointer to the rmap structure
  900. *
  901. * Called with the sg->guest_table_lock
  902. */
  903. static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
  904. struct gmap_rmap *rmap)
  905. {
  906. void **slot;
  907. BUG_ON(!gmap_is_shadow(sg));
  908. slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
  909. if (slot) {
  910. rmap->next = radix_tree_deref_slot_protected(slot,
  911. &sg->guest_table_lock);
  912. radix_tree_replace_slot(slot, rmap);
  913. } else {
  914. rmap->next = NULL;
  915. radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
  916. rmap);
  917. }
  918. }
  919. /**
  920. * gmap_protect_rmap - modify access rights to memory and create an rmap
  921. * @sg: pointer to the shadow guest address space structure
  922. * @raddr: rmap address in the shadow gmap
  923. * @paddr: address in the parent guest address space
  924. * @len: length of the memory area to protect
  925. * @prot: indicates access rights: none, read-only or read-write
  926. *
  927. * Returns 0 if successfully protected and the rmap was created, -ENOMEM
  928. * if out of memory and -EFAULT if paddr is invalid.
  929. */
  930. static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
  931. unsigned long paddr, unsigned long len, int prot)
  932. {
  933. struct gmap *parent;
  934. struct gmap_rmap *rmap;
  935. unsigned long vmaddr;
  936. spinlock_t *ptl;
  937. pte_t *ptep;
  938. int rc;
  939. BUG_ON(!gmap_is_shadow(sg));
  940. parent = sg->parent;
  941. while (len) {
  942. vmaddr = __gmap_translate(parent, paddr);
  943. if (IS_ERR_VALUE(vmaddr))
  944. return vmaddr;
  945. rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
  946. if (!rmap)
  947. return -ENOMEM;
  948. rmap->raddr = raddr;
  949. rc = radix_tree_preload(GFP_KERNEL);
  950. if (rc) {
  951. kfree(rmap);
  952. return rc;
  953. }
  954. rc = -EAGAIN;
  955. ptep = gmap_pte_op_walk(parent, paddr, &ptl);
  956. if (ptep) {
  957. spin_lock(&sg->guest_table_lock);
  958. rc = ptep_force_prot(parent->mm, paddr, ptep, prot,
  959. PGSTE_VSIE_BIT);
  960. if (!rc)
  961. gmap_insert_rmap(sg, vmaddr, rmap);
  962. spin_unlock(&sg->guest_table_lock);
  963. gmap_pte_op_end(ptl);
  964. }
  965. radix_tree_preload_end();
  966. if (rc) {
  967. kfree(rmap);
  968. rc = gmap_pte_op_fixup(parent, paddr, vmaddr);
  969. if (rc)
  970. return rc;
  971. continue;
  972. }
  973. paddr += PAGE_SIZE;
  974. len -= PAGE_SIZE;
  975. }
  976. return 0;
  977. }
  978. #define _SHADOW_RMAP_MASK 0x7
  979. #define _SHADOW_RMAP_REGION1 0x5
  980. #define _SHADOW_RMAP_REGION2 0x4
  981. #define _SHADOW_RMAP_REGION3 0x3
  982. #define _SHADOW_RMAP_SEGMENT 0x2
  983. #define _SHADOW_RMAP_PGTABLE 0x1
  984. /**
  985. * gmap_idte_one - invalidate a single region or segment table entry
  986. * @asce: region or segment table *origin* + table-type bits
  987. * @vaddr: virtual address to identify the table entry to flush
  988. *
  989. * The invalid bit of a single region or segment table entry is set
  990. * and the associated TLB entries depending on the entry are flushed.
  991. * The table-type of the @asce identifies the portion of the @vaddr
  992. * that is used as the invalidation index.
  993. */
  994. static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
  995. {
  996. asm volatile(
  997. " .insn rrf,0xb98e0000,%0,%1,0,0"
  998. : : "a" (asce), "a" (vaddr) : "cc", "memory");
  999. }
  1000. /**
  1001. * gmap_unshadow_page - remove a page from a shadow page table
  1002. * @sg: pointer to the shadow guest address space structure
  1003. * @raddr: rmap address in the shadow guest address space
  1004. *
  1005. * Called with the sg->guest_table_lock
  1006. */
  1007. static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
  1008. {
  1009. unsigned long *table;
  1010. BUG_ON(!gmap_is_shadow(sg));
  1011. table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
  1012. if (!table || *table & _PAGE_INVALID)
  1013. return;
  1014. gmap_call_notifier(sg, raddr, raddr + (1UL << 12) - 1);
  1015. ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
  1016. }
  1017. /**
  1018. * __gmap_unshadow_pgt - remove all entries from a shadow page table
  1019. * @sg: pointer to the shadow guest address space structure
  1020. * @raddr: rmap address in the shadow guest address space
  1021. * @pgt: pointer to the start of a shadow page table
  1022. *
  1023. * Called with the sg->guest_table_lock
  1024. */
  1025. static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
  1026. unsigned long *pgt)
  1027. {
  1028. int i;
  1029. BUG_ON(!gmap_is_shadow(sg));
  1030. for (i = 0; i < 256; i++, raddr += 1UL << 12)
  1031. pgt[i] = _PAGE_INVALID;
  1032. }
  1033. /**
  1034. * gmap_unshadow_pgt - remove a shadow page table from a segment entry
  1035. * @sg: pointer to the shadow guest address space structure
  1036. * @raddr: address in the shadow guest address space
  1037. *
  1038. * Called with the sg->guest_table_lock
  1039. */
  1040. static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
  1041. {
  1042. unsigned long sto, *ste, *pgt;
  1043. struct page *page;
  1044. BUG_ON(!gmap_is_shadow(sg));
  1045. ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
  1046. if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
  1047. return;
  1048. gmap_call_notifier(sg, raddr, raddr + (1UL << 20) - 1);
  1049. sto = (unsigned long) (ste - ((raddr >> 20) & 0x7ff));
  1050. gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
  1051. pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
  1052. *ste = _SEGMENT_ENTRY_EMPTY;
  1053. __gmap_unshadow_pgt(sg, raddr, pgt);
  1054. /* Free page table */
  1055. page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
  1056. list_del(&page->lru);
  1057. page_table_free_pgste(page);
  1058. }
  1059. /**
  1060. * __gmap_unshadow_sgt - remove all entries from a shadow segment table
  1061. * @sg: pointer to the shadow guest address space structure
  1062. * @raddr: rmap address in the shadow guest address space
  1063. * @sgt: pointer to the start of a shadow segment table
  1064. *
  1065. * Called with the sg->guest_table_lock
  1066. */
  1067. static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
  1068. unsigned long *sgt)
  1069. {
  1070. unsigned long asce, *pgt;
  1071. struct page *page;
  1072. int i;
  1073. BUG_ON(!gmap_is_shadow(sg));
  1074. asce = (unsigned long) sgt | _ASCE_TYPE_SEGMENT;
  1075. for (i = 0; i < 2048; i++, raddr += 1UL << 20) {
  1076. if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
  1077. continue;
  1078. pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
  1079. sgt[i] = _SEGMENT_ENTRY_EMPTY;
  1080. __gmap_unshadow_pgt(sg, raddr, pgt);
  1081. /* Free page table */
  1082. page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
  1083. list_del(&page->lru);
  1084. page_table_free_pgste(page);
  1085. }
  1086. }
  1087. /**
  1088. * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
  1089. * @sg: pointer to the shadow guest address space structure
  1090. * @raddr: rmap address in the shadow guest address space
  1091. *
  1092. * Called with the shadow->guest_table_lock
  1093. */
  1094. static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
  1095. {
  1096. unsigned long r3o, *r3e, *sgt;
  1097. struct page *page;
  1098. BUG_ON(!gmap_is_shadow(sg));
  1099. r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
  1100. if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
  1101. return;
  1102. gmap_call_notifier(sg, raddr, raddr + (1UL << 31) - 1);
  1103. r3o = (unsigned long) (r3e - ((raddr >> 31) & 0x7ff));
  1104. gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
  1105. sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
  1106. *r3e = _REGION3_ENTRY_EMPTY;
  1107. __gmap_unshadow_sgt(sg, raddr, sgt);
  1108. /* Free segment table */
  1109. page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
  1110. list_del(&page->lru);
  1111. __free_pages(page, 2);
  1112. }
  1113. /**
  1114. * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
  1115. * @sg: pointer to the shadow guest address space structure
  1116. * @raddr: address in the shadow guest address space
  1117. * @r3t: pointer to the start of a shadow region-3 table
  1118. *
  1119. * Called with the sg->guest_table_lock
  1120. */
  1121. static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
  1122. unsigned long *r3t)
  1123. {
  1124. unsigned long asce, *sgt;
  1125. struct page *page;
  1126. int i;
  1127. BUG_ON(!gmap_is_shadow(sg));
  1128. asce = (unsigned long) r3t | _ASCE_TYPE_REGION3;
  1129. for (i = 0; i < 2048; i++, raddr += 1UL << 31) {
  1130. if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
  1131. continue;
  1132. sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
  1133. r3t[i] = _REGION3_ENTRY_EMPTY;
  1134. __gmap_unshadow_sgt(sg, raddr, sgt);
  1135. /* Free segment table */
  1136. page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
  1137. list_del(&page->lru);
  1138. __free_pages(page, 2);
  1139. }
  1140. }
  1141. /**
  1142. * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
  1143. * @sg: pointer to the shadow guest address space structure
  1144. * @raddr: rmap address in the shadow guest address space
  1145. *
  1146. * Called with the sg->guest_table_lock
  1147. */
  1148. static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
  1149. {
  1150. unsigned long r2o, *r2e, *r3t;
  1151. struct page *page;
  1152. BUG_ON(!gmap_is_shadow(sg));
  1153. r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
  1154. if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
  1155. return;
  1156. gmap_call_notifier(sg, raddr, raddr + (1UL << 42) - 1);
  1157. r2o = (unsigned long) (r2e - ((raddr >> 42) & 0x7ff));
  1158. gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
  1159. r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
  1160. *r2e = _REGION2_ENTRY_EMPTY;
  1161. __gmap_unshadow_r3t(sg, raddr, r3t);
  1162. /* Free region 3 table */
  1163. page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
  1164. list_del(&page->lru);
  1165. __free_pages(page, 2);
  1166. }
  1167. /**
  1168. * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
  1169. * @sg: pointer to the shadow guest address space structure
  1170. * @raddr: rmap address in the shadow guest address space
  1171. * @r2t: pointer to the start of a shadow region-2 table
  1172. *
  1173. * Called with the sg->guest_table_lock
  1174. */
  1175. static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
  1176. unsigned long *r2t)
  1177. {
  1178. unsigned long asce, *r3t;
  1179. struct page *page;
  1180. int i;
  1181. BUG_ON(!gmap_is_shadow(sg));
  1182. asce = (unsigned long) r2t | _ASCE_TYPE_REGION2;
  1183. for (i = 0; i < 2048; i++, raddr += 1UL << 42) {
  1184. if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
  1185. continue;
  1186. r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
  1187. r2t[i] = _REGION2_ENTRY_EMPTY;
  1188. __gmap_unshadow_r3t(sg, raddr, r3t);
  1189. /* Free region 3 table */
  1190. page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
  1191. list_del(&page->lru);
  1192. __free_pages(page, 2);
  1193. }
  1194. }
  1195. /**
  1196. * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
  1197. * @sg: pointer to the shadow guest address space structure
  1198. * @raddr: rmap address in the shadow guest address space
  1199. *
  1200. * Called with the sg->guest_table_lock
  1201. */
  1202. static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
  1203. {
  1204. unsigned long r1o, *r1e, *r2t;
  1205. struct page *page;
  1206. BUG_ON(!gmap_is_shadow(sg));
  1207. r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
  1208. if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
  1209. return;
  1210. gmap_call_notifier(sg, raddr, raddr + (1UL << 53) - 1);
  1211. r1o = (unsigned long) (r1e - ((raddr >> 53) & 0x7ff));
  1212. gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
  1213. r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
  1214. *r1e = _REGION1_ENTRY_EMPTY;
  1215. __gmap_unshadow_r2t(sg, raddr, r2t);
  1216. /* Free region 2 table */
  1217. page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
  1218. list_del(&page->lru);
  1219. __free_pages(page, 2);
  1220. }
  1221. /**
  1222. * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
  1223. * @sg: pointer to the shadow guest address space structure
  1224. * @raddr: rmap address in the shadow guest address space
  1225. * @r1t: pointer to the start of a shadow region-1 table
  1226. *
  1227. * Called with the shadow->guest_table_lock
  1228. */
  1229. static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
  1230. unsigned long *r1t)
  1231. {
  1232. unsigned long asce, *r2t;
  1233. struct page *page;
  1234. int i;
  1235. BUG_ON(!gmap_is_shadow(sg));
  1236. asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
  1237. for (i = 0; i < 2048; i++, raddr += 1UL << 53) {
  1238. if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
  1239. continue;
  1240. r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
  1241. __gmap_unshadow_r2t(sg, raddr, r2t);
  1242. /* Clear entry and flush translation r1t -> r2t */
  1243. gmap_idte_one(asce, raddr);
  1244. r1t[i] = _REGION1_ENTRY_EMPTY;
  1245. /* Free region 2 table */
  1246. page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
  1247. list_del(&page->lru);
  1248. __free_pages(page, 2);
  1249. }
  1250. }
  1251. /**
  1252. * gmap_unshadow - remove a shadow page table completely
  1253. * @sg: pointer to the shadow guest address space structure
  1254. *
  1255. * Called with sg->guest_table_lock
  1256. */
  1257. static void gmap_unshadow(struct gmap *sg)
  1258. {
  1259. unsigned long *table;
  1260. BUG_ON(!gmap_is_shadow(sg));
  1261. if (sg->removed)
  1262. return;
  1263. sg->removed = 1;
  1264. gmap_call_notifier(sg, 0, -1UL);
  1265. gmap_flush_tlb(sg);
  1266. table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
  1267. switch (sg->asce & _ASCE_TYPE_MASK) {
  1268. case _ASCE_TYPE_REGION1:
  1269. __gmap_unshadow_r1t(sg, 0, table);
  1270. break;
  1271. case _ASCE_TYPE_REGION2:
  1272. __gmap_unshadow_r2t(sg, 0, table);
  1273. break;
  1274. case _ASCE_TYPE_REGION3:
  1275. __gmap_unshadow_r3t(sg, 0, table);
  1276. break;
  1277. case _ASCE_TYPE_SEGMENT:
  1278. __gmap_unshadow_sgt(sg, 0, table);
  1279. break;
  1280. }
  1281. }
  1282. /**
  1283. * gmap_find_shadow - find a specific asce in the list of shadow tables
  1284. * @parent: pointer to the parent gmap
  1285. * @asce: ASCE for which the shadow table is created
  1286. *
  1287. * Returns the pointer to a gmap if a shadow table with the given asce is
  1288. * already available, otherwise NULL
  1289. */
  1290. static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce)
  1291. {
  1292. struct gmap *sg;
  1293. list_for_each_entry(sg, &parent->children, list) {
  1294. if (sg->orig_asce != asce || sg->removed)
  1295. continue;
  1296. atomic_inc(&sg->ref_count);
  1297. return sg;
  1298. }
  1299. return NULL;
  1300. }
  1301. /**
  1302. * gmap_shadow - create/find a shadow guest address space
  1303. * @parent: pointer to the parent gmap
  1304. * @asce: ASCE for which the shadow table is created
  1305. *
  1306. * The pages of the top level page table referred by the asce parameter
  1307. * will be set to read-only and marked in the PGSTEs of the kvm process.
  1308. * The shadow table will be removed automatically on any change to the
  1309. * PTE mapping for the source table.
  1310. *
  1311. * Returns a guest address space structure, NULL if out of memory or if
  1312. * anything goes wrong while protecting the top level pages.
  1313. */
  1314. struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce)
  1315. {
  1316. struct gmap *sg, *new;
  1317. unsigned long limit;
  1318. int rc;
  1319. BUG_ON(gmap_is_shadow(parent));
  1320. spin_lock(&parent->shadow_lock);
  1321. sg = gmap_find_shadow(parent, asce);
  1322. spin_unlock(&parent->shadow_lock);
  1323. if (sg)
  1324. return sg;
  1325. /* Create a new shadow gmap */
  1326. limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
  1327. new = gmap_alloc(limit);
  1328. if (!new)
  1329. return NULL;
  1330. new->mm = parent->mm;
  1331. new->parent = gmap_get(parent);
  1332. new->orig_asce = asce;
  1333. down_read(&parent->mm->mmap_sem);
  1334. rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
  1335. ((asce & _ASCE_TABLE_LENGTH) + 1) * 4096,
  1336. PROT_READ, PGSTE_VSIE_BIT);
  1337. up_read(&parent->mm->mmap_sem);
  1338. if (rc) {
  1339. atomic_set(&new->ref_count, 2);
  1340. spin_lock(&parent->shadow_lock);
  1341. /* Recheck if another CPU created the same shadow */
  1342. sg = gmap_find_shadow(parent, asce);
  1343. if (!sg) {
  1344. list_add(&new->list, &parent->children);
  1345. sg = new;
  1346. new = NULL;
  1347. }
  1348. spin_unlock(&parent->shadow_lock);
  1349. }
  1350. if (new)
  1351. gmap_free(new);
  1352. return sg;
  1353. }
  1354. EXPORT_SYMBOL_GPL(gmap_shadow);
  1355. /**
  1356. * gmap_shadow_r2t - create an empty shadow region 2 table
  1357. * @sg: pointer to the shadow guest address space structure
  1358. * @saddr: faulting address in the shadow gmap
  1359. * @r2t: parent gmap address of the region 2 table to get shadowed
  1360. *
  1361. * The r2t parameter specifies the address of the source table. The
  1362. * four pages of the source table are made read-only in the parent gmap
  1363. * address space. A write to the source table area @r2t will automatically
  1364. * remove the shadow r2 table and all of its decendents.
  1365. *
  1366. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1367. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1368. * -EFAULT if an address in the parent gmap could not be resolved.
  1369. *
  1370. * Called with sg->mm->mmap_sem in read.
  1371. */
  1372. int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t)
  1373. {
  1374. unsigned long raddr, origin, offset, len;
  1375. unsigned long *s_r2t, *table;
  1376. struct page *page;
  1377. int rc;
  1378. BUG_ON(!gmap_is_shadow(sg));
  1379. /* Allocate a shadow region second table */
  1380. page = alloc_pages(GFP_KERNEL, 2);
  1381. if (!page)
  1382. return -ENOMEM;
  1383. page->index = r2t & _REGION_ENTRY_ORIGIN;
  1384. s_r2t = (unsigned long *) page_to_phys(page);
  1385. /* Install shadow region second table */
  1386. spin_lock(&sg->guest_table_lock);
  1387. table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
  1388. if (!table) {
  1389. rc = -EAGAIN; /* Race with unshadow */
  1390. goto out_free;
  1391. }
  1392. if (!(*table & _REGION_ENTRY_INVALID)) {
  1393. rc = 0; /* Already established */
  1394. goto out_free;
  1395. } else if (*table & _REGION_ENTRY_ORIGIN) {
  1396. rc = -EAGAIN; /* Race with shadow */
  1397. goto out_free;
  1398. }
  1399. crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
  1400. /* mark as invalid as long as the parent table is not protected */
  1401. *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
  1402. _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
  1403. list_add(&page->lru, &sg->crst_list);
  1404. spin_unlock(&sg->guest_table_lock);
  1405. /* Make r2t read-only in parent gmap page table */
  1406. raddr = (saddr & 0xffe0000000000000UL) | _SHADOW_RMAP_REGION1;
  1407. origin = r2t & _REGION_ENTRY_ORIGIN;
  1408. offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
  1409. len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
  1410. rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
  1411. spin_lock(&sg->guest_table_lock);
  1412. if (!rc) {
  1413. table = gmap_table_walk(sg, saddr, 4);
  1414. if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
  1415. (unsigned long) s_r2t)
  1416. rc = -EAGAIN; /* Race with unshadow */
  1417. else
  1418. *table &= ~_REGION_ENTRY_INVALID;
  1419. } else {
  1420. gmap_unshadow_r2t(sg, raddr);
  1421. }
  1422. spin_unlock(&sg->guest_table_lock);
  1423. return rc;
  1424. out_free:
  1425. spin_unlock(&sg->guest_table_lock);
  1426. __free_pages(page, 2);
  1427. return rc;
  1428. }
  1429. EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
  1430. /**
  1431. * gmap_shadow_r3t - create a shadow region 3 table
  1432. * @sg: pointer to the shadow guest address space structure
  1433. * @saddr: faulting address in the shadow gmap
  1434. * @r3t: parent gmap address of the region 3 table to get shadowed
  1435. *
  1436. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1437. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1438. * -EFAULT if an address in the parent gmap could not be resolved.
  1439. *
  1440. * Called with sg->mm->mmap_sem in read.
  1441. */
  1442. int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t)
  1443. {
  1444. unsigned long raddr, origin, offset, len;
  1445. unsigned long *s_r3t, *table;
  1446. struct page *page;
  1447. int rc;
  1448. BUG_ON(!gmap_is_shadow(sg));
  1449. /* Allocate a shadow region second table */
  1450. page = alloc_pages(GFP_KERNEL, 2);
  1451. if (!page)
  1452. return -ENOMEM;
  1453. page->index = r3t & _REGION_ENTRY_ORIGIN;
  1454. s_r3t = (unsigned long *) page_to_phys(page);
  1455. /* Install shadow region second table */
  1456. spin_lock(&sg->guest_table_lock);
  1457. table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
  1458. if (!table) {
  1459. rc = -EAGAIN; /* Race with unshadow */
  1460. goto out_free;
  1461. }
  1462. if (!(*table & _REGION_ENTRY_INVALID)) {
  1463. rc = 0; /* Already established */
  1464. goto out_free;
  1465. } else if (*table & _REGION_ENTRY_ORIGIN) {
  1466. rc = -EAGAIN; /* Race with shadow */
  1467. }
  1468. crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
  1469. /* mark as invalid as long as the parent table is not protected */
  1470. *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
  1471. _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
  1472. list_add(&page->lru, &sg->crst_list);
  1473. spin_unlock(&sg->guest_table_lock);
  1474. /* Make r3t read-only in parent gmap page table */
  1475. raddr = (saddr & 0xfffffc0000000000UL) | _SHADOW_RMAP_REGION2;
  1476. origin = r3t & _REGION_ENTRY_ORIGIN;
  1477. offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
  1478. len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
  1479. rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
  1480. spin_lock(&sg->guest_table_lock);
  1481. if (!rc) {
  1482. table = gmap_table_walk(sg, saddr, 3);
  1483. if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
  1484. (unsigned long) s_r3t)
  1485. rc = -EAGAIN; /* Race with unshadow */
  1486. else
  1487. *table &= ~_REGION_ENTRY_INVALID;
  1488. } else {
  1489. gmap_unshadow_r3t(sg, raddr);
  1490. }
  1491. spin_unlock(&sg->guest_table_lock);
  1492. return rc;
  1493. out_free:
  1494. spin_unlock(&sg->guest_table_lock);
  1495. __free_pages(page, 2);
  1496. return rc;
  1497. }
  1498. EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
  1499. /**
  1500. * gmap_shadow_sgt - create a shadow segment table
  1501. * @sg: pointer to the shadow guest address space structure
  1502. * @saddr: faulting address in the shadow gmap
  1503. * @sgt: parent gmap address of the segment table to get shadowed
  1504. *
  1505. * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1506. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1507. * -EFAULT if an address in the parent gmap could not be resolved.
  1508. *
  1509. * Called with sg->mm->mmap_sem in read.
  1510. */
  1511. int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt)
  1512. {
  1513. unsigned long raddr, origin, offset, len;
  1514. unsigned long *s_sgt, *table;
  1515. struct page *page;
  1516. int rc;
  1517. BUG_ON(!gmap_is_shadow(sg));
  1518. /* Allocate a shadow segment table */
  1519. page = alloc_pages(GFP_KERNEL, 2);
  1520. if (!page)
  1521. return -ENOMEM;
  1522. page->index = sgt & _REGION_ENTRY_ORIGIN;
  1523. s_sgt = (unsigned long *) page_to_phys(page);
  1524. /* Install shadow region second table */
  1525. spin_lock(&sg->guest_table_lock);
  1526. table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
  1527. if (!table) {
  1528. rc = -EAGAIN; /* Race with unshadow */
  1529. goto out_free;
  1530. }
  1531. if (!(*table & _REGION_ENTRY_INVALID)) {
  1532. rc = 0; /* Already established */
  1533. goto out_free;
  1534. } else if (*table & _REGION_ENTRY_ORIGIN) {
  1535. rc = -EAGAIN; /* Race with shadow */
  1536. goto out_free;
  1537. }
  1538. crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
  1539. /* mark as invalid as long as the parent table is not protected */
  1540. *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
  1541. _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
  1542. list_add(&page->lru, &sg->crst_list);
  1543. spin_unlock(&sg->guest_table_lock);
  1544. /* Make sgt read-only in parent gmap page table */
  1545. raddr = (saddr & 0xffffffff80000000UL) | _SHADOW_RMAP_REGION3;
  1546. origin = sgt & _REGION_ENTRY_ORIGIN;
  1547. offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * 4096;
  1548. len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
  1549. rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
  1550. spin_lock(&sg->guest_table_lock);
  1551. if (!rc) {
  1552. table = gmap_table_walk(sg, saddr, 2);
  1553. if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
  1554. (unsigned long) s_sgt)
  1555. rc = -EAGAIN; /* Race with unshadow */
  1556. else
  1557. *table &= ~_REGION_ENTRY_INVALID;
  1558. } else {
  1559. gmap_unshadow_sgt(sg, raddr);
  1560. }
  1561. spin_unlock(&sg->guest_table_lock);
  1562. return rc;
  1563. out_free:
  1564. spin_unlock(&sg->guest_table_lock);
  1565. __free_pages(page, 2);
  1566. return rc;
  1567. }
  1568. EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
  1569. /**
  1570. * gmap_shadow_lookup_pgtable - find a shadow page table
  1571. * @sg: pointer to the shadow guest address space structure
  1572. * @saddr: the address in the shadow aguest address space
  1573. * @pgt: parent gmap address of the page table to get shadowed
  1574. * @dat_protection: if the pgtable is marked as protected by dat
  1575. *
  1576. * Returns 0 if the shadow page table was found and -EAGAIN if the page
  1577. * table was not found.
  1578. *
  1579. * Called with sg->mm->mmap_sem in read.
  1580. */
  1581. int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
  1582. unsigned long *pgt, int *dat_protection)
  1583. {
  1584. unsigned long *table;
  1585. struct page *page;
  1586. int rc;
  1587. BUG_ON(!gmap_is_shadow(sg));
  1588. spin_lock(&sg->guest_table_lock);
  1589. table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
  1590. if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
  1591. /* Shadow page tables are full pages (pte+pgste) */
  1592. page = pfn_to_page(*table >> PAGE_SHIFT);
  1593. *pgt = page->index;
  1594. *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
  1595. rc = 0;
  1596. } else {
  1597. rc = -EAGAIN;
  1598. }
  1599. spin_unlock(&sg->guest_table_lock);
  1600. return rc;
  1601. }
  1602. EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
  1603. /**
  1604. * gmap_shadow_pgt - instantiate a shadow page table
  1605. * @sg: pointer to the shadow guest address space structure
  1606. * @saddr: faulting address in the shadow gmap
  1607. * @pgt: parent gmap address of the page table to get shadowed
  1608. *
  1609. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1610. * shadow table structure is incomplete, -ENOMEM if out of memory,
  1611. * -EFAULT if an address in the parent gmap could not be resolved and
  1612. *
  1613. * Called with gmap->mm->mmap_sem in read
  1614. */
  1615. int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt)
  1616. {
  1617. unsigned long raddr, origin;
  1618. unsigned long *s_pgt, *table;
  1619. struct page *page;
  1620. int rc;
  1621. BUG_ON(!gmap_is_shadow(sg));
  1622. /* Allocate a shadow page table */
  1623. page = page_table_alloc_pgste(sg->mm);
  1624. if (!page)
  1625. return -ENOMEM;
  1626. page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
  1627. s_pgt = (unsigned long *) page_to_phys(page);
  1628. /* Install shadow page table */
  1629. spin_lock(&sg->guest_table_lock);
  1630. table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
  1631. if (!table) {
  1632. rc = -EAGAIN; /* Race with unshadow */
  1633. goto out_free;
  1634. }
  1635. if (!(*table & _SEGMENT_ENTRY_INVALID)) {
  1636. rc = 0; /* Already established */
  1637. goto out_free;
  1638. } else if (*table & _SEGMENT_ENTRY_ORIGIN) {
  1639. rc = -EAGAIN; /* Race with shadow */
  1640. goto out_free;
  1641. }
  1642. /* mark as invalid as long as the parent table is not protected */
  1643. *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
  1644. (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
  1645. list_add(&page->lru, &sg->pt_list);
  1646. spin_unlock(&sg->guest_table_lock);
  1647. /* Make pgt read-only in parent gmap page table (not the pgste) */
  1648. raddr = (saddr & 0xfffffffffff00000UL) | _SHADOW_RMAP_SEGMENT;
  1649. origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
  1650. rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE, PROT_READ);
  1651. spin_lock(&sg->guest_table_lock);
  1652. if (!rc) {
  1653. table = gmap_table_walk(sg, saddr, 1);
  1654. if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) !=
  1655. (unsigned long) s_pgt)
  1656. rc = -EAGAIN; /* Race with unshadow */
  1657. else
  1658. *table &= ~_SEGMENT_ENTRY_INVALID;
  1659. } else {
  1660. gmap_unshadow_pgt(sg, raddr);
  1661. }
  1662. spin_unlock(&sg->guest_table_lock);
  1663. return rc;
  1664. out_free:
  1665. spin_unlock(&sg->guest_table_lock);
  1666. page_table_free_pgste(page);
  1667. return rc;
  1668. }
  1669. EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
  1670. /**
  1671. * gmap_shadow_page - create a shadow page mapping
  1672. * @sg: pointer to the shadow guest address space structure
  1673. * @saddr: faulting address in the shadow gmap
  1674. * @pte: pte in parent gmap address space to get shadowed
  1675. *
  1676. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1677. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1678. * -EFAULT if an address in the parent gmap could not be resolved.
  1679. *
  1680. * Called with sg->mm->mmap_sem in read.
  1681. */
  1682. int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
  1683. {
  1684. struct gmap *parent;
  1685. struct gmap_rmap *rmap;
  1686. unsigned long vmaddr, paddr;
  1687. spinlock_t *ptl;
  1688. pte_t *sptep, *tptep;
  1689. int rc;
  1690. BUG_ON(!gmap_is_shadow(sg));
  1691. parent = sg->parent;
  1692. rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
  1693. if (!rmap)
  1694. return -ENOMEM;
  1695. rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
  1696. while (1) {
  1697. paddr = pte_val(pte) & PAGE_MASK;
  1698. vmaddr = __gmap_translate(parent, paddr);
  1699. if (IS_ERR_VALUE(vmaddr)) {
  1700. rc = vmaddr;
  1701. break;
  1702. }
  1703. rc = radix_tree_preload(GFP_KERNEL);
  1704. if (rc)
  1705. break;
  1706. rc = -EAGAIN;
  1707. sptep = gmap_pte_op_walk(parent, paddr, &ptl);
  1708. if (sptep) {
  1709. spin_lock(&sg->guest_table_lock);
  1710. /* Get page table pointer */
  1711. tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
  1712. if (!tptep) {
  1713. spin_unlock(&sg->guest_table_lock);
  1714. gmap_pte_op_end(ptl);
  1715. radix_tree_preload_end();
  1716. break;
  1717. }
  1718. rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
  1719. if (rc > 0) {
  1720. /* Success and a new mapping */
  1721. gmap_insert_rmap(sg, vmaddr, rmap);
  1722. rmap = NULL;
  1723. rc = 0;
  1724. }
  1725. gmap_pte_op_end(ptl);
  1726. spin_unlock(&sg->guest_table_lock);
  1727. }
  1728. radix_tree_preload_end();
  1729. if (!rc)
  1730. break;
  1731. rc = gmap_pte_op_fixup(parent, paddr, vmaddr);
  1732. if (rc)
  1733. break;
  1734. }
  1735. kfree(rmap);
  1736. return rc;
  1737. }
  1738. EXPORT_SYMBOL_GPL(gmap_shadow_page);
  1739. /**
  1740. * gmap_shadow_notify - handle notifications for shadow gmap
  1741. *
  1742. * Called with sg->parent->shadow_lock.
  1743. */
  1744. static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
  1745. unsigned long offset, pte_t *pte)
  1746. {
  1747. struct gmap_rmap *rmap, *rnext, *head;
  1748. unsigned long gaddr, start, end, bits, raddr;
  1749. unsigned long *table;
  1750. BUG_ON(!gmap_is_shadow(sg));
  1751. spin_lock(&sg->parent->guest_table_lock);
  1752. table = radix_tree_lookup(&sg->parent->host_to_guest,
  1753. vmaddr >> PMD_SHIFT);
  1754. gaddr = table ? __gmap_segment_gaddr(table) + offset : 0;
  1755. spin_unlock(&sg->parent->guest_table_lock);
  1756. if (!table)
  1757. return;
  1758. spin_lock(&sg->guest_table_lock);
  1759. if (sg->removed) {
  1760. spin_unlock(&sg->guest_table_lock);
  1761. return;
  1762. }
  1763. /* Check for top level table */
  1764. start = sg->orig_asce & _ASCE_ORIGIN;
  1765. end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * 4096;
  1766. if (gaddr >= start && gaddr < end) {
  1767. /* The complete shadow table has to go */
  1768. gmap_unshadow(sg);
  1769. spin_unlock(&sg->guest_table_lock);
  1770. list_del(&sg->list);
  1771. gmap_put(sg);
  1772. return;
  1773. }
  1774. /* Remove the page table tree from on specific entry */
  1775. head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> 12);
  1776. gmap_for_each_rmap_safe(rmap, rnext, head) {
  1777. bits = rmap->raddr & _SHADOW_RMAP_MASK;
  1778. raddr = rmap->raddr ^ bits;
  1779. switch (bits) {
  1780. case _SHADOW_RMAP_REGION1:
  1781. gmap_unshadow_r2t(sg, raddr);
  1782. break;
  1783. case _SHADOW_RMAP_REGION2:
  1784. gmap_unshadow_r3t(sg, raddr);
  1785. break;
  1786. case _SHADOW_RMAP_REGION3:
  1787. gmap_unshadow_sgt(sg, raddr);
  1788. break;
  1789. case _SHADOW_RMAP_SEGMENT:
  1790. gmap_unshadow_pgt(sg, raddr);
  1791. break;
  1792. case _SHADOW_RMAP_PGTABLE:
  1793. gmap_unshadow_page(sg, raddr);
  1794. break;
  1795. }
  1796. kfree(rmap);
  1797. }
  1798. spin_unlock(&sg->guest_table_lock);
  1799. }
  1800. /**
  1801. * ptep_notify - call all invalidation callbacks for a specific pte.
  1802. * @mm: pointer to the process mm_struct
  1803. * @addr: virtual address in the process address space
  1804. * @pte: pointer to the page table entry
  1805. * @bits: bits from the pgste that caused the notify call
  1806. *
  1807. * This function is assumed to be called with the page table lock held
  1808. * for the pte to notify.
  1809. */
  1810. void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
  1811. pte_t *pte, unsigned long bits)
  1812. {
  1813. unsigned long offset, gaddr;
  1814. unsigned long *table;
  1815. struct gmap *gmap, *sg, *next;
  1816. offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
  1817. offset = offset * (4096 / sizeof(pte_t));
  1818. rcu_read_lock();
  1819. list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
  1820. if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
  1821. spin_lock(&gmap->shadow_lock);
  1822. list_for_each_entry_safe(sg, next,
  1823. &gmap->children, list)
  1824. gmap_shadow_notify(sg, vmaddr, offset, pte);
  1825. spin_unlock(&gmap->shadow_lock);
  1826. }
  1827. if (!(bits & PGSTE_IN_BIT))
  1828. continue;
  1829. spin_lock(&gmap->guest_table_lock);
  1830. table = radix_tree_lookup(&gmap->host_to_guest,
  1831. vmaddr >> PMD_SHIFT);
  1832. if (table)
  1833. gaddr = __gmap_segment_gaddr(table) + offset;
  1834. spin_unlock(&gmap->guest_table_lock);
  1835. if (table)
  1836. gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
  1837. }
  1838. rcu_read_unlock();
  1839. }
  1840. EXPORT_SYMBOL_GPL(ptep_notify);
  1841. static inline void thp_split_mm(struct mm_struct *mm)
  1842. {
  1843. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  1844. struct vm_area_struct *vma;
  1845. unsigned long addr;
  1846. for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
  1847. for (addr = vma->vm_start;
  1848. addr < vma->vm_end;
  1849. addr += PAGE_SIZE)
  1850. follow_page(vma, addr, FOLL_SPLIT);
  1851. vma->vm_flags &= ~VM_HUGEPAGE;
  1852. vma->vm_flags |= VM_NOHUGEPAGE;
  1853. }
  1854. mm->def_flags |= VM_NOHUGEPAGE;
  1855. #endif
  1856. }
  1857. /*
  1858. * switch on pgstes for its userspace process (for kvm)
  1859. */
  1860. int s390_enable_sie(void)
  1861. {
  1862. struct mm_struct *mm = current->mm;
  1863. /* Do we have pgstes? if yes, we are done */
  1864. if (mm_has_pgste(mm))
  1865. return 0;
  1866. /* Fail if the page tables are 2K */
  1867. if (!mm_alloc_pgste(mm))
  1868. return -EINVAL;
  1869. down_write(&mm->mmap_sem);
  1870. mm->context.has_pgste = 1;
  1871. /* split thp mappings and disable thp for future mappings */
  1872. thp_split_mm(mm);
  1873. up_write(&mm->mmap_sem);
  1874. return 0;
  1875. }
  1876. EXPORT_SYMBOL_GPL(s390_enable_sie);
  1877. /*
  1878. * Enable storage key handling from now on and initialize the storage
  1879. * keys with the default key.
  1880. */
  1881. static int __s390_enable_skey(pte_t *pte, unsigned long addr,
  1882. unsigned long next, struct mm_walk *walk)
  1883. {
  1884. /*
  1885. * Remove all zero page mappings,
  1886. * after establishing a policy to forbid zero page mappings
  1887. * following faults for that page will get fresh anonymous pages
  1888. */
  1889. if (is_zero_pfn(pte_pfn(*pte)))
  1890. ptep_xchg_direct(walk->mm, addr, pte, __pte(_PAGE_INVALID));
  1891. /* Clear storage key */
  1892. ptep_zap_key(walk->mm, addr, pte);
  1893. return 0;
  1894. }
  1895. int s390_enable_skey(void)
  1896. {
  1897. struct mm_walk walk = { .pte_entry = __s390_enable_skey };
  1898. struct mm_struct *mm = current->mm;
  1899. struct vm_area_struct *vma;
  1900. int rc = 0;
  1901. down_write(&mm->mmap_sem);
  1902. if (mm_use_skey(mm))
  1903. goto out_up;
  1904. mm->context.use_skey = 1;
  1905. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  1906. if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
  1907. MADV_UNMERGEABLE, &vma->vm_flags)) {
  1908. mm->context.use_skey = 0;
  1909. rc = -ENOMEM;
  1910. goto out_up;
  1911. }
  1912. }
  1913. mm->def_flags &= ~VM_MERGEABLE;
  1914. walk.mm = mm;
  1915. walk_page_range(0, TASK_SIZE, &walk);
  1916. out_up:
  1917. up_write(&mm->mmap_sem);
  1918. return rc;
  1919. }
  1920. EXPORT_SYMBOL_GPL(s390_enable_skey);
  1921. /*
  1922. * Reset CMMA state, make all pages stable again.
  1923. */
  1924. static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
  1925. unsigned long next, struct mm_walk *walk)
  1926. {
  1927. ptep_zap_unused(walk->mm, addr, pte, 1);
  1928. return 0;
  1929. }
  1930. void s390_reset_cmma(struct mm_struct *mm)
  1931. {
  1932. struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
  1933. down_write(&mm->mmap_sem);
  1934. walk.mm = mm;
  1935. walk_page_range(0, TASK_SIZE, &walk);
  1936. up_write(&mm->mmap_sem);
  1937. }
  1938. EXPORT_SYMBOL_GPL(s390_reset_cmma);