gmap.c 76 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * KVM guest address space mapping code
  4. *
  5. * Copyright IBM Corp. 2007, 2016, 2018
  6. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  7. * David Hildenbrand <david@redhat.com>
  8. * Janosch Frank <frankja@linux.vnet.ibm.com>
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/mm.h>
  12. #include <linux/swap.h>
  13. #include <linux/smp.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/slab.h>
  16. #include <linux/swapops.h>
  17. #include <linux/ksm.h>
  18. #include <linux/mman.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/pgalloc.h>
  21. #include <asm/gmap.h>
  22. #include <asm/tlb.h>
  23. #define GMAP_SHADOW_FAKE_TABLE 1ULL
  24. /**
  25. * gmap_alloc - allocate and initialize a guest address space
  26. * @mm: pointer to the parent mm_struct
  27. * @limit: maximum address of the gmap address space
  28. *
  29. * Returns a guest address space structure.
  30. */
  31. static struct gmap *gmap_alloc(unsigned long limit)
  32. {
  33. struct gmap *gmap;
  34. struct page *page;
  35. unsigned long *table;
  36. unsigned long etype, atype;
  37. if (limit < _REGION3_SIZE) {
  38. limit = _REGION3_SIZE - 1;
  39. atype = _ASCE_TYPE_SEGMENT;
  40. etype = _SEGMENT_ENTRY_EMPTY;
  41. } else if (limit < _REGION2_SIZE) {
  42. limit = _REGION2_SIZE - 1;
  43. atype = _ASCE_TYPE_REGION3;
  44. etype = _REGION3_ENTRY_EMPTY;
  45. } else if (limit < _REGION1_SIZE) {
  46. limit = _REGION1_SIZE - 1;
  47. atype = _ASCE_TYPE_REGION2;
  48. etype = _REGION2_ENTRY_EMPTY;
  49. } else {
  50. limit = -1UL;
  51. atype = _ASCE_TYPE_REGION1;
  52. etype = _REGION1_ENTRY_EMPTY;
  53. }
  54. gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
  55. if (!gmap)
  56. goto out;
  57. INIT_LIST_HEAD(&gmap->crst_list);
  58. INIT_LIST_HEAD(&gmap->children);
  59. INIT_LIST_HEAD(&gmap->pt_list);
  60. INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
  61. INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
  62. INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
  63. spin_lock_init(&gmap->guest_table_lock);
  64. spin_lock_init(&gmap->shadow_lock);
  65. atomic_set(&gmap->ref_count, 1);
  66. page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
  67. if (!page)
  68. goto out_free;
  69. page->index = 0;
  70. list_add(&page->lru, &gmap->crst_list);
  71. table = (unsigned long *) page_to_phys(page);
  72. crst_table_init(table, etype);
  73. gmap->table = table;
  74. gmap->asce = atype | _ASCE_TABLE_LENGTH |
  75. _ASCE_USER_BITS | __pa(table);
  76. gmap->asce_end = limit;
  77. return gmap;
  78. out_free:
  79. kfree(gmap);
  80. out:
  81. return NULL;
  82. }
  83. /**
  84. * gmap_create - create a guest address space
  85. * @mm: pointer to the parent mm_struct
  86. * @limit: maximum size of the gmap address space
  87. *
  88. * Returns a guest address space structure.
  89. */
  90. struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
  91. {
  92. struct gmap *gmap;
  93. unsigned long gmap_asce;
  94. gmap = gmap_alloc(limit);
  95. if (!gmap)
  96. return NULL;
  97. gmap->mm = mm;
  98. spin_lock(&mm->context.lock);
  99. list_add_rcu(&gmap->list, &mm->context.gmap_list);
  100. if (list_is_singular(&mm->context.gmap_list))
  101. gmap_asce = gmap->asce;
  102. else
  103. gmap_asce = -1UL;
  104. WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
  105. spin_unlock(&mm->context.lock);
  106. return gmap;
  107. }
  108. EXPORT_SYMBOL_GPL(gmap_create);
  109. static void gmap_flush_tlb(struct gmap *gmap)
  110. {
  111. if (MACHINE_HAS_IDTE)
  112. __tlb_flush_idte(gmap->asce);
  113. else
  114. __tlb_flush_global();
  115. }
  116. static void gmap_radix_tree_free(struct radix_tree_root *root)
  117. {
  118. struct radix_tree_iter iter;
  119. unsigned long indices[16];
  120. unsigned long index;
  121. void __rcu **slot;
  122. int i, nr;
  123. /* A radix tree is freed by deleting all of its entries */
  124. index = 0;
  125. do {
  126. nr = 0;
  127. radix_tree_for_each_slot(slot, root, &iter, index) {
  128. indices[nr] = iter.index;
  129. if (++nr == 16)
  130. break;
  131. }
  132. for (i = 0; i < nr; i++) {
  133. index = indices[i];
  134. radix_tree_delete(root, index);
  135. }
  136. } while (nr > 0);
  137. }
  138. static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
  139. {
  140. struct gmap_rmap *rmap, *rnext, *head;
  141. struct radix_tree_iter iter;
  142. unsigned long indices[16];
  143. unsigned long index;
  144. void __rcu **slot;
  145. int i, nr;
  146. /* A radix tree is freed by deleting all of its entries */
  147. index = 0;
  148. do {
  149. nr = 0;
  150. radix_tree_for_each_slot(slot, root, &iter, index) {
  151. indices[nr] = iter.index;
  152. if (++nr == 16)
  153. break;
  154. }
  155. for (i = 0; i < nr; i++) {
  156. index = indices[i];
  157. head = radix_tree_delete(root, index);
  158. gmap_for_each_rmap_safe(rmap, rnext, head)
  159. kfree(rmap);
  160. }
  161. } while (nr > 0);
  162. }
  163. /**
  164. * gmap_free - free a guest address space
  165. * @gmap: pointer to the guest address space structure
  166. *
  167. * No locks required. There are no references to this gmap anymore.
  168. */
  169. static void gmap_free(struct gmap *gmap)
  170. {
  171. struct page *page, *next;
  172. /* Flush tlb of all gmaps (if not already done for shadows) */
  173. if (!(gmap_is_shadow(gmap) && gmap->removed))
  174. gmap_flush_tlb(gmap);
  175. /* Free all segment & region tables. */
  176. list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
  177. __free_pages(page, CRST_ALLOC_ORDER);
  178. gmap_radix_tree_free(&gmap->guest_to_host);
  179. gmap_radix_tree_free(&gmap->host_to_guest);
  180. /* Free additional data for a shadow gmap */
  181. if (gmap_is_shadow(gmap)) {
  182. /* Free all page tables. */
  183. list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
  184. page_table_free_pgste(page);
  185. gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
  186. /* Release reference to the parent */
  187. gmap_put(gmap->parent);
  188. }
  189. kfree(gmap);
  190. }
  191. /**
  192. * gmap_get - increase reference counter for guest address space
  193. * @gmap: pointer to the guest address space structure
  194. *
  195. * Returns the gmap pointer
  196. */
  197. struct gmap *gmap_get(struct gmap *gmap)
  198. {
  199. atomic_inc(&gmap->ref_count);
  200. return gmap;
  201. }
  202. EXPORT_SYMBOL_GPL(gmap_get);
  203. /**
  204. * gmap_put - decrease reference counter for guest address space
  205. * @gmap: pointer to the guest address space structure
  206. *
  207. * If the reference counter reaches zero the guest address space is freed.
  208. */
  209. void gmap_put(struct gmap *gmap)
  210. {
  211. if (atomic_dec_return(&gmap->ref_count) == 0)
  212. gmap_free(gmap);
  213. }
  214. EXPORT_SYMBOL_GPL(gmap_put);
  215. /**
  216. * gmap_remove - remove a guest address space but do not free it yet
  217. * @gmap: pointer to the guest address space structure
  218. */
  219. void gmap_remove(struct gmap *gmap)
  220. {
  221. struct gmap *sg, *next;
  222. unsigned long gmap_asce;
  223. /* Remove all shadow gmaps linked to this gmap */
  224. if (!list_empty(&gmap->children)) {
  225. spin_lock(&gmap->shadow_lock);
  226. list_for_each_entry_safe(sg, next, &gmap->children, list) {
  227. list_del(&sg->list);
  228. gmap_put(sg);
  229. }
  230. spin_unlock(&gmap->shadow_lock);
  231. }
  232. /* Remove gmap from the pre-mm list */
  233. spin_lock(&gmap->mm->context.lock);
  234. list_del_rcu(&gmap->list);
  235. if (list_empty(&gmap->mm->context.gmap_list))
  236. gmap_asce = 0;
  237. else if (list_is_singular(&gmap->mm->context.gmap_list))
  238. gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
  239. struct gmap, list)->asce;
  240. else
  241. gmap_asce = -1UL;
  242. WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
  243. spin_unlock(&gmap->mm->context.lock);
  244. synchronize_rcu();
  245. /* Put reference */
  246. gmap_put(gmap);
  247. }
  248. EXPORT_SYMBOL_GPL(gmap_remove);
  249. /**
  250. * gmap_enable - switch primary space to the guest address space
  251. * @gmap: pointer to the guest address space structure
  252. */
  253. void gmap_enable(struct gmap *gmap)
  254. {
  255. S390_lowcore.gmap = (unsigned long) gmap;
  256. }
  257. EXPORT_SYMBOL_GPL(gmap_enable);
  258. /**
  259. * gmap_disable - switch back to the standard primary address space
  260. * @gmap: pointer to the guest address space structure
  261. */
  262. void gmap_disable(struct gmap *gmap)
  263. {
  264. S390_lowcore.gmap = 0UL;
  265. }
  266. EXPORT_SYMBOL_GPL(gmap_disable);
  267. /**
  268. * gmap_get_enabled - get a pointer to the currently enabled gmap
  269. *
  270. * Returns a pointer to the currently enabled gmap. 0 if none is enabled.
  271. */
  272. struct gmap *gmap_get_enabled(void)
  273. {
  274. return (struct gmap *) S390_lowcore.gmap;
  275. }
  276. EXPORT_SYMBOL_GPL(gmap_get_enabled);
  277. /*
  278. * gmap_alloc_table is assumed to be called with mmap_sem held
  279. */
  280. static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
  281. unsigned long init, unsigned long gaddr)
  282. {
  283. struct page *page;
  284. unsigned long *new;
  285. /* since we dont free the gmap table until gmap_free we can unlock */
  286. page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
  287. if (!page)
  288. return -ENOMEM;
  289. new = (unsigned long *) page_to_phys(page);
  290. crst_table_init(new, init);
  291. spin_lock(&gmap->guest_table_lock);
  292. if (*table & _REGION_ENTRY_INVALID) {
  293. list_add(&page->lru, &gmap->crst_list);
  294. *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
  295. (*table & _REGION_ENTRY_TYPE_MASK);
  296. page->index = gaddr;
  297. page = NULL;
  298. }
  299. spin_unlock(&gmap->guest_table_lock);
  300. if (page)
  301. __free_pages(page, CRST_ALLOC_ORDER);
  302. return 0;
  303. }
  304. /**
  305. * __gmap_segment_gaddr - find virtual address from segment pointer
  306. * @entry: pointer to a segment table entry in the guest address space
  307. *
  308. * Returns the virtual address in the guest address space for the segment
  309. */
  310. static unsigned long __gmap_segment_gaddr(unsigned long *entry)
  311. {
  312. struct page *page;
  313. unsigned long offset, mask;
  314. offset = (unsigned long) entry / sizeof(unsigned long);
  315. offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
  316. mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
  317. page = virt_to_page((void *)((unsigned long) entry & mask));
  318. return page->index + offset;
  319. }
  320. /**
  321. * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
  322. * @gmap: pointer to the guest address space structure
  323. * @vmaddr: address in the host process address space
  324. *
  325. * Returns 1 if a TLB flush is required
  326. */
  327. static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
  328. {
  329. unsigned long *entry;
  330. int flush = 0;
  331. BUG_ON(gmap_is_shadow(gmap));
  332. spin_lock(&gmap->guest_table_lock);
  333. entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
  334. if (entry) {
  335. flush = (*entry != _SEGMENT_ENTRY_EMPTY);
  336. *entry = _SEGMENT_ENTRY_EMPTY;
  337. }
  338. spin_unlock(&gmap->guest_table_lock);
  339. return flush;
  340. }
  341. /**
  342. * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
  343. * @gmap: pointer to the guest address space structure
  344. * @gaddr: address in the guest address space
  345. *
  346. * Returns 1 if a TLB flush is required
  347. */
  348. static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
  349. {
  350. unsigned long vmaddr;
  351. vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
  352. gaddr >> PMD_SHIFT);
  353. return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
  354. }
  355. /**
  356. * gmap_unmap_segment - unmap segment from the guest address space
  357. * @gmap: pointer to the guest address space structure
  358. * @to: address in the guest address space
  359. * @len: length of the memory area to unmap
  360. *
  361. * Returns 0 if the unmap succeeded, -EINVAL if not.
  362. */
  363. int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
  364. {
  365. unsigned long off;
  366. int flush;
  367. BUG_ON(gmap_is_shadow(gmap));
  368. if ((to | len) & (PMD_SIZE - 1))
  369. return -EINVAL;
  370. if (len == 0 || to + len < to)
  371. return -EINVAL;
  372. flush = 0;
  373. down_write(&gmap->mm->mmap_sem);
  374. for (off = 0; off < len; off += PMD_SIZE)
  375. flush |= __gmap_unmap_by_gaddr(gmap, to + off);
  376. up_write(&gmap->mm->mmap_sem);
  377. if (flush)
  378. gmap_flush_tlb(gmap);
  379. return 0;
  380. }
  381. EXPORT_SYMBOL_GPL(gmap_unmap_segment);
  382. /**
  383. * gmap_map_segment - map a segment to the guest address space
  384. * @gmap: pointer to the guest address space structure
  385. * @from: source address in the parent address space
  386. * @to: target address in the guest address space
  387. * @len: length of the memory area to map
  388. *
  389. * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
  390. */
  391. int gmap_map_segment(struct gmap *gmap, unsigned long from,
  392. unsigned long to, unsigned long len)
  393. {
  394. unsigned long off;
  395. int flush;
  396. BUG_ON(gmap_is_shadow(gmap));
  397. if ((from | to | len) & (PMD_SIZE - 1))
  398. return -EINVAL;
  399. if (len == 0 || from + len < from || to + len < to ||
  400. from + len - 1 > TASK_SIZE_MAX || to + len - 1 > gmap->asce_end)
  401. return -EINVAL;
  402. flush = 0;
  403. down_write(&gmap->mm->mmap_sem);
  404. for (off = 0; off < len; off += PMD_SIZE) {
  405. /* Remove old translation */
  406. flush |= __gmap_unmap_by_gaddr(gmap, to + off);
  407. /* Store new translation */
  408. if (radix_tree_insert(&gmap->guest_to_host,
  409. (to + off) >> PMD_SHIFT,
  410. (void *) from + off))
  411. break;
  412. }
  413. up_write(&gmap->mm->mmap_sem);
  414. if (flush)
  415. gmap_flush_tlb(gmap);
  416. if (off >= len)
  417. return 0;
  418. gmap_unmap_segment(gmap, to, len);
  419. return -ENOMEM;
  420. }
  421. EXPORT_SYMBOL_GPL(gmap_map_segment);
  422. /**
  423. * __gmap_translate - translate a guest address to a user space address
  424. * @gmap: pointer to guest mapping meta data structure
  425. * @gaddr: guest address
  426. *
  427. * Returns user space address which corresponds to the guest address or
  428. * -EFAULT if no such mapping exists.
  429. * This function does not establish potentially missing page table entries.
  430. * The mmap_sem of the mm that belongs to the address space must be held
  431. * when this function gets called.
  432. *
  433. * Note: Can also be called for shadow gmaps.
  434. */
  435. unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
  436. {
  437. unsigned long vmaddr;
  438. vmaddr = (unsigned long)
  439. radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
  440. /* Note: guest_to_host is empty for a shadow gmap */
  441. return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
  442. }
  443. EXPORT_SYMBOL_GPL(__gmap_translate);
  444. /**
  445. * gmap_translate - translate a guest address to a user space address
  446. * @gmap: pointer to guest mapping meta data structure
  447. * @gaddr: guest address
  448. *
  449. * Returns user space address which corresponds to the guest address or
  450. * -EFAULT if no such mapping exists.
  451. * This function does not establish potentially missing page table entries.
  452. */
  453. unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
  454. {
  455. unsigned long rc;
  456. down_read(&gmap->mm->mmap_sem);
  457. rc = __gmap_translate(gmap, gaddr);
  458. up_read(&gmap->mm->mmap_sem);
  459. return rc;
  460. }
  461. EXPORT_SYMBOL_GPL(gmap_translate);
  462. /**
  463. * gmap_unlink - disconnect a page table from the gmap shadow tables
  464. * @gmap: pointer to guest mapping meta data structure
  465. * @table: pointer to the host page table
  466. * @vmaddr: vm address associated with the host page table
  467. */
  468. void gmap_unlink(struct mm_struct *mm, unsigned long *table,
  469. unsigned long vmaddr)
  470. {
  471. struct gmap *gmap;
  472. int flush;
  473. rcu_read_lock();
  474. list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
  475. flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
  476. if (flush)
  477. gmap_flush_tlb(gmap);
  478. }
  479. rcu_read_unlock();
  480. }
  481. static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *old, pmd_t new,
  482. unsigned long gaddr);
  483. /**
  484. * gmap_link - set up shadow page tables to connect a host to a guest address
  485. * @gmap: pointer to guest mapping meta data structure
  486. * @gaddr: guest address
  487. * @vmaddr: vm address
  488. *
  489. * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
  490. * if the vm address is already mapped to a different guest segment.
  491. * The mmap_sem of the mm that belongs to the address space must be held
  492. * when this function gets called.
  493. */
  494. int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
  495. {
  496. struct mm_struct *mm;
  497. unsigned long *table;
  498. spinlock_t *ptl;
  499. pgd_t *pgd;
  500. p4d_t *p4d;
  501. pud_t *pud;
  502. pmd_t *pmd;
  503. u64 unprot;
  504. int rc;
  505. BUG_ON(gmap_is_shadow(gmap));
  506. /* Create higher level tables in the gmap page table */
  507. table = gmap->table;
  508. if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
  509. table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
  510. if ((*table & _REGION_ENTRY_INVALID) &&
  511. gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
  512. gaddr & _REGION1_MASK))
  513. return -ENOMEM;
  514. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  515. }
  516. if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
  517. table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
  518. if ((*table & _REGION_ENTRY_INVALID) &&
  519. gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
  520. gaddr & _REGION2_MASK))
  521. return -ENOMEM;
  522. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  523. }
  524. if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
  525. table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
  526. if ((*table & _REGION_ENTRY_INVALID) &&
  527. gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
  528. gaddr & _REGION3_MASK))
  529. return -ENOMEM;
  530. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  531. }
  532. table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
  533. /* Walk the parent mm page table */
  534. mm = gmap->mm;
  535. pgd = pgd_offset(mm, vmaddr);
  536. VM_BUG_ON(pgd_none(*pgd));
  537. p4d = p4d_offset(pgd, vmaddr);
  538. VM_BUG_ON(p4d_none(*p4d));
  539. pud = pud_offset(p4d, vmaddr);
  540. VM_BUG_ON(pud_none(*pud));
  541. /* large puds cannot yet be handled */
  542. if (pud_large(*pud))
  543. return -EFAULT;
  544. pmd = pmd_offset(pud, vmaddr);
  545. VM_BUG_ON(pmd_none(*pmd));
  546. /* Are we allowed to use huge pages? */
  547. if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
  548. return -EFAULT;
  549. /* Link gmap segment table entry location to page table. */
  550. rc = radix_tree_preload(GFP_KERNEL);
  551. if (rc)
  552. return rc;
  553. ptl = pmd_lock(mm, pmd);
  554. spin_lock(&gmap->guest_table_lock);
  555. if (*table == _SEGMENT_ENTRY_EMPTY) {
  556. rc = radix_tree_insert(&gmap->host_to_guest,
  557. vmaddr >> PMD_SHIFT, table);
  558. if (!rc) {
  559. if (pmd_large(*pmd)) {
  560. *table = (pmd_val(*pmd) &
  561. _SEGMENT_ENTRY_HARDWARE_BITS_LARGE)
  562. | _SEGMENT_ENTRY_GMAP_UC;
  563. } else
  564. *table = pmd_val(*pmd) &
  565. _SEGMENT_ENTRY_HARDWARE_BITS;
  566. }
  567. } else if (*table & _SEGMENT_ENTRY_PROTECT &&
  568. !(pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT)) {
  569. unprot = (u64)*table;
  570. unprot &= ~_SEGMENT_ENTRY_PROTECT;
  571. unprot |= _SEGMENT_ENTRY_GMAP_UC;
  572. gmap_pmdp_xchg(gmap, (pmd_t *)table, __pmd(unprot), gaddr);
  573. }
  574. spin_unlock(&gmap->guest_table_lock);
  575. spin_unlock(ptl);
  576. radix_tree_preload_end();
  577. return rc;
  578. }
  579. /**
  580. * gmap_fault - resolve a fault on a guest address
  581. * @gmap: pointer to guest mapping meta data structure
  582. * @gaddr: guest address
  583. * @fault_flags: flags to pass down to handle_mm_fault()
  584. *
  585. * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
  586. * if the vm address is already mapped to a different guest segment.
  587. */
  588. int gmap_fault(struct gmap *gmap, unsigned long gaddr,
  589. unsigned int fault_flags)
  590. {
  591. unsigned long vmaddr;
  592. int rc;
  593. bool unlocked;
  594. down_read(&gmap->mm->mmap_sem);
  595. retry:
  596. unlocked = false;
  597. vmaddr = __gmap_translate(gmap, gaddr);
  598. if (IS_ERR_VALUE(vmaddr)) {
  599. rc = vmaddr;
  600. goto out_up;
  601. }
  602. if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
  603. &unlocked)) {
  604. rc = -EFAULT;
  605. goto out_up;
  606. }
  607. /*
  608. * In the case that fixup_user_fault unlocked the mmap_sem during
  609. * faultin redo __gmap_translate to not race with a map/unmap_segment.
  610. */
  611. if (unlocked)
  612. goto retry;
  613. rc = __gmap_link(gmap, gaddr, vmaddr);
  614. out_up:
  615. up_read(&gmap->mm->mmap_sem);
  616. return rc;
  617. }
  618. EXPORT_SYMBOL_GPL(gmap_fault);
  619. /*
  620. * this function is assumed to be called with mmap_sem held
  621. */
  622. void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
  623. {
  624. unsigned long vmaddr;
  625. spinlock_t *ptl;
  626. pte_t *ptep;
  627. /* Find the vm address for the guest address */
  628. vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
  629. gaddr >> PMD_SHIFT);
  630. if (vmaddr) {
  631. vmaddr |= gaddr & ~PMD_MASK;
  632. /* Get pointer to the page table entry */
  633. ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
  634. if (likely(ptep))
  635. ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
  636. pte_unmap_unlock(ptep, ptl);
  637. }
  638. }
  639. EXPORT_SYMBOL_GPL(__gmap_zap);
  640. void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
  641. {
  642. unsigned long gaddr, vmaddr, size;
  643. struct vm_area_struct *vma;
  644. down_read(&gmap->mm->mmap_sem);
  645. for (gaddr = from; gaddr < to;
  646. gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
  647. /* Find the vm address for the guest address */
  648. vmaddr = (unsigned long)
  649. radix_tree_lookup(&gmap->guest_to_host,
  650. gaddr >> PMD_SHIFT);
  651. if (!vmaddr)
  652. continue;
  653. vmaddr |= gaddr & ~PMD_MASK;
  654. /* Find vma in the parent mm */
  655. vma = find_vma(gmap->mm, vmaddr);
  656. if (!vma)
  657. continue;
  658. /*
  659. * We do not discard pages that are backed by
  660. * hugetlbfs, so we don't have to refault them.
  661. */
  662. if (is_vm_hugetlb_page(vma))
  663. continue;
  664. size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
  665. zap_page_range(vma, vmaddr, size);
  666. }
  667. up_read(&gmap->mm->mmap_sem);
  668. }
  669. EXPORT_SYMBOL_GPL(gmap_discard);
  670. static LIST_HEAD(gmap_notifier_list);
  671. static DEFINE_SPINLOCK(gmap_notifier_lock);
  672. /**
  673. * gmap_register_pte_notifier - register a pte invalidation callback
  674. * @nb: pointer to the gmap notifier block
  675. */
  676. void gmap_register_pte_notifier(struct gmap_notifier *nb)
  677. {
  678. spin_lock(&gmap_notifier_lock);
  679. list_add_rcu(&nb->list, &gmap_notifier_list);
  680. spin_unlock(&gmap_notifier_lock);
  681. }
  682. EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
  683. /**
  684. * gmap_unregister_pte_notifier - remove a pte invalidation callback
  685. * @nb: pointer to the gmap notifier block
  686. */
  687. void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
  688. {
  689. spin_lock(&gmap_notifier_lock);
  690. list_del_rcu(&nb->list);
  691. spin_unlock(&gmap_notifier_lock);
  692. synchronize_rcu();
  693. }
  694. EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
  695. /**
  696. * gmap_call_notifier - call all registered invalidation callbacks
  697. * @gmap: pointer to guest mapping meta data structure
  698. * @start: start virtual address in the guest address space
  699. * @end: end virtual address in the guest address space
  700. */
  701. static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
  702. unsigned long end)
  703. {
  704. struct gmap_notifier *nb;
  705. list_for_each_entry(nb, &gmap_notifier_list, list)
  706. nb->notifier_call(gmap, start, end);
  707. }
  708. /**
  709. * gmap_table_walk - walk the gmap page tables
  710. * @gmap: pointer to guest mapping meta data structure
  711. * @gaddr: virtual address in the guest address space
  712. * @level: page table level to stop at
  713. *
  714. * Returns a table entry pointer for the given guest address and @level
  715. * @level=0 : returns a pointer to a page table table entry (or NULL)
  716. * @level=1 : returns a pointer to a segment table entry (or NULL)
  717. * @level=2 : returns a pointer to a region-3 table entry (or NULL)
  718. * @level=3 : returns a pointer to a region-2 table entry (or NULL)
  719. * @level=4 : returns a pointer to a region-1 table entry (or NULL)
  720. *
  721. * Returns NULL if the gmap page tables could not be walked to the
  722. * requested level.
  723. *
  724. * Note: Can also be called for shadow gmaps.
  725. */
  726. static inline unsigned long *gmap_table_walk(struct gmap *gmap,
  727. unsigned long gaddr, int level)
  728. {
  729. unsigned long *table;
  730. if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
  731. return NULL;
  732. if (gmap_is_shadow(gmap) && gmap->removed)
  733. return NULL;
  734. if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11)))
  735. return NULL;
  736. table = gmap->table;
  737. switch (gmap->asce & _ASCE_TYPE_MASK) {
  738. case _ASCE_TYPE_REGION1:
  739. table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
  740. if (level == 4)
  741. break;
  742. if (*table & _REGION_ENTRY_INVALID)
  743. return NULL;
  744. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  745. /* Fallthrough */
  746. case _ASCE_TYPE_REGION2:
  747. table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
  748. if (level == 3)
  749. break;
  750. if (*table & _REGION_ENTRY_INVALID)
  751. return NULL;
  752. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  753. /* Fallthrough */
  754. case _ASCE_TYPE_REGION3:
  755. table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
  756. if (level == 2)
  757. break;
  758. if (*table & _REGION_ENTRY_INVALID)
  759. return NULL;
  760. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  761. /* Fallthrough */
  762. case _ASCE_TYPE_SEGMENT:
  763. table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
  764. if (level == 1)
  765. break;
  766. if (*table & _REGION_ENTRY_INVALID)
  767. return NULL;
  768. table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
  769. table += (gaddr & _PAGE_INDEX) >> _PAGE_SHIFT;
  770. }
  771. return table;
  772. }
  773. /**
  774. * gmap_pte_op_walk - walk the gmap page table, get the page table lock
  775. * and return the pte pointer
  776. * @gmap: pointer to guest mapping meta data structure
  777. * @gaddr: virtual address in the guest address space
  778. * @ptl: pointer to the spinlock pointer
  779. *
  780. * Returns a pointer to the locked pte for a guest address, or NULL
  781. */
  782. static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
  783. spinlock_t **ptl)
  784. {
  785. unsigned long *table;
  786. BUG_ON(gmap_is_shadow(gmap));
  787. /* Walk the gmap page table, lock and get pte pointer */
  788. table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
  789. if (!table || *table & _SEGMENT_ENTRY_INVALID)
  790. return NULL;
  791. return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
  792. }
  793. /**
  794. * gmap_pte_op_fixup - force a page in and connect the gmap page table
  795. * @gmap: pointer to guest mapping meta data structure
  796. * @gaddr: virtual address in the guest address space
  797. * @vmaddr: address in the host process address space
  798. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  799. *
  800. * Returns 0 if the caller can retry __gmap_translate (might fail again),
  801. * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
  802. * up or connecting the gmap page table.
  803. */
  804. static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
  805. unsigned long vmaddr, int prot)
  806. {
  807. struct mm_struct *mm = gmap->mm;
  808. unsigned int fault_flags;
  809. bool unlocked = false;
  810. BUG_ON(gmap_is_shadow(gmap));
  811. fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
  812. if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked))
  813. return -EFAULT;
  814. if (unlocked)
  815. /* lost mmap_sem, caller has to retry __gmap_translate */
  816. return 0;
  817. /* Connect the page tables */
  818. return __gmap_link(gmap, gaddr, vmaddr);
  819. }
  820. /**
  821. * gmap_pte_op_end - release the page table lock
  822. * @ptl: pointer to the spinlock pointer
  823. */
  824. static void gmap_pte_op_end(spinlock_t *ptl)
  825. {
  826. if (ptl)
  827. spin_unlock(ptl);
  828. }
  829. /**
  830. * gmap_pmd_op_walk - walk the gmap tables, get the guest table lock
  831. * and return the pmd pointer
  832. * @gmap: pointer to guest mapping meta data structure
  833. * @gaddr: virtual address in the guest address space
  834. *
  835. * Returns a pointer to the pmd for a guest address, or NULL
  836. */
  837. static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
  838. {
  839. pmd_t *pmdp;
  840. BUG_ON(gmap_is_shadow(gmap));
  841. pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1);
  842. if (!pmdp)
  843. return NULL;
  844. /* without huge pages, there is no need to take the table lock */
  845. if (!gmap->mm->context.allow_gmap_hpage_1m)
  846. return pmd_none(*pmdp) ? NULL : pmdp;
  847. spin_lock(&gmap->guest_table_lock);
  848. if (pmd_none(*pmdp)) {
  849. spin_unlock(&gmap->guest_table_lock);
  850. return NULL;
  851. }
  852. /* 4k page table entries are locked via the pte (pte_alloc_map_lock). */
  853. if (!pmd_large(*pmdp))
  854. spin_unlock(&gmap->guest_table_lock);
  855. return pmdp;
  856. }
  857. /**
  858. * gmap_pmd_op_end - release the guest_table_lock if needed
  859. * @gmap: pointer to the guest mapping meta data structure
  860. * @pmdp: pointer to the pmd
  861. */
  862. static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp)
  863. {
  864. if (pmd_large(*pmdp))
  865. spin_unlock(&gmap->guest_table_lock);
  866. }
  867. /*
  868. * gmap_protect_pmd - remove access rights to memory and set pmd notification bits
  869. * @pmdp: pointer to the pmd to be protected
  870. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  871. * @bits: notification bits to set
  872. *
  873. * Returns:
  874. * 0 if successfully protected
  875. * -EAGAIN if a fixup is needed
  876. * -EINVAL if unsupported notifier bits have been specified
  877. *
  878. * Expected to be called with sg->mm->mmap_sem in read and
  879. * guest_table_lock held.
  880. */
  881. static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
  882. pmd_t *pmdp, int prot, unsigned long bits)
  883. {
  884. int pmd_i = pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID;
  885. int pmd_p = pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT;
  886. pmd_t new = *pmdp;
  887. /* Fixup needed */
  888. if ((pmd_i && (prot != PROT_NONE)) || (pmd_p && (prot == PROT_WRITE)))
  889. return -EAGAIN;
  890. if (prot == PROT_NONE && !pmd_i) {
  891. pmd_val(new) |= _SEGMENT_ENTRY_INVALID;
  892. gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
  893. }
  894. if (prot == PROT_READ && !pmd_p) {
  895. pmd_val(new) &= ~_SEGMENT_ENTRY_INVALID;
  896. pmd_val(new) |= _SEGMENT_ENTRY_PROTECT;
  897. gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
  898. }
  899. if (bits & GMAP_NOTIFY_MPROT)
  900. pmd_val(*pmdp) |= _SEGMENT_ENTRY_GMAP_IN;
  901. /* Shadow GMAP protection needs split PMDs */
  902. if (bits & GMAP_NOTIFY_SHADOW)
  903. return -EINVAL;
  904. return 0;
  905. }
  906. /*
  907. * gmap_protect_pte - remove access rights to memory and set pgste bits
  908. * @gmap: pointer to guest mapping meta data structure
  909. * @gaddr: virtual address in the guest address space
  910. * @pmdp: pointer to the pmd associated with the pte
  911. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  912. * @bits: notification bits to set
  913. *
  914. * Returns 0 if successfully protected, -ENOMEM if out of memory and
  915. * -EAGAIN if a fixup is needed.
  916. *
  917. * Expected to be called with sg->mm->mmap_sem in read
  918. */
  919. static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
  920. pmd_t *pmdp, int prot, unsigned long bits)
  921. {
  922. int rc;
  923. pte_t *ptep;
  924. spinlock_t *ptl = NULL;
  925. unsigned long pbits = 0;
  926. if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
  927. return -EAGAIN;
  928. ptep = pte_alloc_map_lock(gmap->mm, pmdp, gaddr, &ptl);
  929. if (!ptep)
  930. return -ENOMEM;
  931. pbits |= (bits & GMAP_NOTIFY_MPROT) ? PGSTE_IN_BIT : 0;
  932. pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0;
  933. /* Protect and unlock. */
  934. rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits);
  935. gmap_pte_op_end(ptl);
  936. return rc;
  937. }
  938. /*
  939. * gmap_protect_range - remove access rights to memory and set pgste bits
  940. * @gmap: pointer to guest mapping meta data structure
  941. * @gaddr: virtual address in the guest address space
  942. * @len: size of area
  943. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  944. * @bits: pgste notification bits to set
  945. *
  946. * Returns 0 if successfully protected, -ENOMEM if out of memory and
  947. * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
  948. *
  949. * Called with sg->mm->mmap_sem in read.
  950. */
  951. static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
  952. unsigned long len, int prot, unsigned long bits)
  953. {
  954. unsigned long vmaddr, dist;
  955. pmd_t *pmdp;
  956. int rc;
  957. BUG_ON(gmap_is_shadow(gmap));
  958. while (len) {
  959. rc = -EAGAIN;
  960. pmdp = gmap_pmd_op_walk(gmap, gaddr);
  961. if (pmdp) {
  962. if (!pmd_large(*pmdp)) {
  963. rc = gmap_protect_pte(gmap, gaddr, pmdp, prot,
  964. bits);
  965. if (!rc) {
  966. len -= PAGE_SIZE;
  967. gaddr += PAGE_SIZE;
  968. }
  969. } else {
  970. rc = gmap_protect_pmd(gmap, gaddr, pmdp, prot,
  971. bits);
  972. if (!rc) {
  973. dist = HPAGE_SIZE - (gaddr & ~HPAGE_MASK);
  974. len = len < dist ? 0 : len - dist;
  975. gaddr = (gaddr & HPAGE_MASK) + HPAGE_SIZE;
  976. }
  977. }
  978. gmap_pmd_op_end(gmap, pmdp);
  979. }
  980. if (rc) {
  981. if (rc == -EINVAL)
  982. return rc;
  983. /* -EAGAIN, fixup of userspace mm and gmap */
  984. vmaddr = __gmap_translate(gmap, gaddr);
  985. if (IS_ERR_VALUE(vmaddr))
  986. return vmaddr;
  987. rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
  988. if (rc)
  989. return rc;
  990. }
  991. }
  992. return 0;
  993. }
  994. /**
  995. * gmap_mprotect_notify - change access rights for a range of ptes and
  996. * call the notifier if any pte changes again
  997. * @gmap: pointer to guest mapping meta data structure
  998. * @gaddr: virtual address in the guest address space
  999. * @len: size of area
  1000. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  1001. *
  1002. * Returns 0 if for each page in the given range a gmap mapping exists,
  1003. * the new access rights could be set and the notifier could be armed.
  1004. * If the gmap mapping is missing for one or more pages -EFAULT is
  1005. * returned. If no memory could be allocated -ENOMEM is returned.
  1006. * This function establishes missing page table entries.
  1007. */
  1008. int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
  1009. unsigned long len, int prot)
  1010. {
  1011. int rc;
  1012. if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
  1013. return -EINVAL;
  1014. if (!MACHINE_HAS_ESOP && prot == PROT_READ)
  1015. return -EINVAL;
  1016. down_read(&gmap->mm->mmap_sem);
  1017. rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT);
  1018. up_read(&gmap->mm->mmap_sem);
  1019. return rc;
  1020. }
  1021. EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
  1022. /**
  1023. * gmap_read_table - get an unsigned long value from a guest page table using
  1024. * absolute addressing, without marking the page referenced.
  1025. * @gmap: pointer to guest mapping meta data structure
  1026. * @gaddr: virtual address in the guest address space
  1027. * @val: pointer to the unsigned long value to return
  1028. *
  1029. * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
  1030. * if reading using the virtual address failed. -EINVAL if called on a gmap
  1031. * shadow.
  1032. *
  1033. * Called with gmap->mm->mmap_sem in read.
  1034. */
  1035. int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
  1036. {
  1037. unsigned long address, vmaddr;
  1038. spinlock_t *ptl;
  1039. pte_t *ptep, pte;
  1040. int rc;
  1041. if (gmap_is_shadow(gmap))
  1042. return -EINVAL;
  1043. while (1) {
  1044. rc = -EAGAIN;
  1045. ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
  1046. if (ptep) {
  1047. pte = *ptep;
  1048. if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
  1049. address = pte_val(pte) & PAGE_MASK;
  1050. address += gaddr & ~PAGE_MASK;
  1051. *val = *(unsigned long *) address;
  1052. pte_val(*ptep) |= _PAGE_YOUNG;
  1053. /* Do *NOT* clear the _PAGE_INVALID bit! */
  1054. rc = 0;
  1055. }
  1056. gmap_pte_op_end(ptl);
  1057. }
  1058. if (!rc)
  1059. break;
  1060. vmaddr = __gmap_translate(gmap, gaddr);
  1061. if (IS_ERR_VALUE(vmaddr)) {
  1062. rc = vmaddr;
  1063. break;
  1064. }
  1065. rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, PROT_READ);
  1066. if (rc)
  1067. break;
  1068. }
  1069. return rc;
  1070. }
  1071. EXPORT_SYMBOL_GPL(gmap_read_table);
  1072. /**
  1073. * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
  1074. * @sg: pointer to the shadow guest address space structure
  1075. * @vmaddr: vm address associated with the rmap
  1076. * @rmap: pointer to the rmap structure
  1077. *
  1078. * Called with the sg->guest_table_lock
  1079. */
  1080. static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
  1081. struct gmap_rmap *rmap)
  1082. {
  1083. void __rcu **slot;
  1084. BUG_ON(!gmap_is_shadow(sg));
  1085. slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
  1086. if (slot) {
  1087. rmap->next = radix_tree_deref_slot_protected(slot,
  1088. &sg->guest_table_lock);
  1089. radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap);
  1090. } else {
  1091. rmap->next = NULL;
  1092. radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
  1093. rmap);
  1094. }
  1095. }
  1096. /**
  1097. * gmap_protect_rmap - restrict access rights to memory (RO) and create an rmap
  1098. * @sg: pointer to the shadow guest address space structure
  1099. * @raddr: rmap address in the shadow gmap
  1100. * @paddr: address in the parent guest address space
  1101. * @len: length of the memory area to protect
  1102. *
  1103. * Returns 0 if successfully protected and the rmap was created, -ENOMEM
  1104. * if out of memory and -EFAULT if paddr is invalid.
  1105. */
  1106. static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
  1107. unsigned long paddr, unsigned long len)
  1108. {
  1109. struct gmap *parent;
  1110. struct gmap_rmap *rmap;
  1111. unsigned long vmaddr;
  1112. spinlock_t *ptl;
  1113. pte_t *ptep;
  1114. int rc;
  1115. BUG_ON(!gmap_is_shadow(sg));
  1116. parent = sg->parent;
  1117. while (len) {
  1118. vmaddr = __gmap_translate(parent, paddr);
  1119. if (IS_ERR_VALUE(vmaddr))
  1120. return vmaddr;
  1121. rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
  1122. if (!rmap)
  1123. return -ENOMEM;
  1124. rmap->raddr = raddr;
  1125. rc = radix_tree_preload(GFP_KERNEL);
  1126. if (rc) {
  1127. kfree(rmap);
  1128. return rc;
  1129. }
  1130. rc = -EAGAIN;
  1131. ptep = gmap_pte_op_walk(parent, paddr, &ptl);
  1132. if (ptep) {
  1133. spin_lock(&sg->guest_table_lock);
  1134. rc = ptep_force_prot(parent->mm, paddr, ptep, PROT_READ,
  1135. PGSTE_VSIE_BIT);
  1136. if (!rc)
  1137. gmap_insert_rmap(sg, vmaddr, rmap);
  1138. spin_unlock(&sg->guest_table_lock);
  1139. gmap_pte_op_end(ptl);
  1140. }
  1141. radix_tree_preload_end();
  1142. if (rc) {
  1143. kfree(rmap);
  1144. rc = gmap_pte_op_fixup(parent, paddr, vmaddr, PROT_READ);
  1145. if (rc)
  1146. return rc;
  1147. continue;
  1148. }
  1149. paddr += PAGE_SIZE;
  1150. len -= PAGE_SIZE;
  1151. }
  1152. return 0;
  1153. }
  1154. #define _SHADOW_RMAP_MASK 0x7
  1155. #define _SHADOW_RMAP_REGION1 0x5
  1156. #define _SHADOW_RMAP_REGION2 0x4
  1157. #define _SHADOW_RMAP_REGION3 0x3
  1158. #define _SHADOW_RMAP_SEGMENT 0x2
  1159. #define _SHADOW_RMAP_PGTABLE 0x1
  1160. /**
  1161. * gmap_idte_one - invalidate a single region or segment table entry
  1162. * @asce: region or segment table *origin* + table-type bits
  1163. * @vaddr: virtual address to identify the table entry to flush
  1164. *
  1165. * The invalid bit of a single region or segment table entry is set
  1166. * and the associated TLB entries depending on the entry are flushed.
  1167. * The table-type of the @asce identifies the portion of the @vaddr
  1168. * that is used as the invalidation index.
  1169. */
  1170. static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
  1171. {
  1172. asm volatile(
  1173. " .insn rrf,0xb98e0000,%0,%1,0,0"
  1174. : : "a" (asce), "a" (vaddr) : "cc", "memory");
  1175. }
  1176. /**
  1177. * gmap_unshadow_page - remove a page from a shadow page table
  1178. * @sg: pointer to the shadow guest address space structure
  1179. * @raddr: rmap address in the shadow guest address space
  1180. *
  1181. * Called with the sg->guest_table_lock
  1182. */
  1183. static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
  1184. {
  1185. unsigned long *table;
  1186. BUG_ON(!gmap_is_shadow(sg));
  1187. table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
  1188. if (!table || *table & _PAGE_INVALID)
  1189. return;
  1190. gmap_call_notifier(sg, raddr, raddr + _PAGE_SIZE - 1);
  1191. ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
  1192. }
  1193. /**
  1194. * __gmap_unshadow_pgt - remove all entries from a shadow page table
  1195. * @sg: pointer to the shadow guest address space structure
  1196. * @raddr: rmap address in the shadow guest address space
  1197. * @pgt: pointer to the start of a shadow page table
  1198. *
  1199. * Called with the sg->guest_table_lock
  1200. */
  1201. static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
  1202. unsigned long *pgt)
  1203. {
  1204. int i;
  1205. BUG_ON(!gmap_is_shadow(sg));
  1206. for (i = 0; i < _PAGE_ENTRIES; i++, raddr += _PAGE_SIZE)
  1207. pgt[i] = _PAGE_INVALID;
  1208. }
  1209. /**
  1210. * gmap_unshadow_pgt - remove a shadow page table from a segment entry
  1211. * @sg: pointer to the shadow guest address space structure
  1212. * @raddr: address in the shadow guest address space
  1213. *
  1214. * Called with the sg->guest_table_lock
  1215. */
  1216. static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
  1217. {
  1218. unsigned long sto, *ste, *pgt;
  1219. struct page *page;
  1220. BUG_ON(!gmap_is_shadow(sg));
  1221. ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
  1222. if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
  1223. return;
  1224. gmap_call_notifier(sg, raddr, raddr + _SEGMENT_SIZE - 1);
  1225. sto = (unsigned long) (ste - ((raddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT));
  1226. gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
  1227. pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
  1228. *ste = _SEGMENT_ENTRY_EMPTY;
  1229. __gmap_unshadow_pgt(sg, raddr, pgt);
  1230. /* Free page table */
  1231. page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
  1232. list_del(&page->lru);
  1233. page_table_free_pgste(page);
  1234. }
  1235. /**
  1236. * __gmap_unshadow_sgt - remove all entries from a shadow segment table
  1237. * @sg: pointer to the shadow guest address space structure
  1238. * @raddr: rmap address in the shadow guest address space
  1239. * @sgt: pointer to the start of a shadow segment table
  1240. *
  1241. * Called with the sg->guest_table_lock
  1242. */
  1243. static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
  1244. unsigned long *sgt)
  1245. {
  1246. unsigned long *pgt;
  1247. struct page *page;
  1248. int i;
  1249. BUG_ON(!gmap_is_shadow(sg));
  1250. for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) {
  1251. if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
  1252. continue;
  1253. pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
  1254. sgt[i] = _SEGMENT_ENTRY_EMPTY;
  1255. __gmap_unshadow_pgt(sg, raddr, pgt);
  1256. /* Free page table */
  1257. page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
  1258. list_del(&page->lru);
  1259. page_table_free_pgste(page);
  1260. }
  1261. }
  1262. /**
  1263. * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
  1264. * @sg: pointer to the shadow guest address space structure
  1265. * @raddr: rmap address in the shadow guest address space
  1266. *
  1267. * Called with the shadow->guest_table_lock
  1268. */
  1269. static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
  1270. {
  1271. unsigned long r3o, *r3e, *sgt;
  1272. struct page *page;
  1273. BUG_ON(!gmap_is_shadow(sg));
  1274. r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
  1275. if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
  1276. return;
  1277. gmap_call_notifier(sg, raddr, raddr + _REGION3_SIZE - 1);
  1278. r3o = (unsigned long) (r3e - ((raddr & _REGION3_INDEX) >> _REGION3_SHIFT));
  1279. gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
  1280. sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
  1281. *r3e = _REGION3_ENTRY_EMPTY;
  1282. __gmap_unshadow_sgt(sg, raddr, sgt);
  1283. /* Free segment table */
  1284. page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
  1285. list_del(&page->lru);
  1286. __free_pages(page, CRST_ALLOC_ORDER);
  1287. }
  1288. /**
  1289. * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
  1290. * @sg: pointer to the shadow guest address space structure
  1291. * @raddr: address in the shadow guest address space
  1292. * @r3t: pointer to the start of a shadow region-3 table
  1293. *
  1294. * Called with the sg->guest_table_lock
  1295. */
  1296. static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
  1297. unsigned long *r3t)
  1298. {
  1299. unsigned long *sgt;
  1300. struct page *page;
  1301. int i;
  1302. BUG_ON(!gmap_is_shadow(sg));
  1303. for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) {
  1304. if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
  1305. continue;
  1306. sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
  1307. r3t[i] = _REGION3_ENTRY_EMPTY;
  1308. __gmap_unshadow_sgt(sg, raddr, sgt);
  1309. /* Free segment table */
  1310. page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
  1311. list_del(&page->lru);
  1312. __free_pages(page, CRST_ALLOC_ORDER);
  1313. }
  1314. }
  1315. /**
  1316. * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
  1317. * @sg: pointer to the shadow guest address space structure
  1318. * @raddr: rmap address in the shadow guest address space
  1319. *
  1320. * Called with the sg->guest_table_lock
  1321. */
  1322. static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
  1323. {
  1324. unsigned long r2o, *r2e, *r3t;
  1325. struct page *page;
  1326. BUG_ON(!gmap_is_shadow(sg));
  1327. r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
  1328. if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
  1329. return;
  1330. gmap_call_notifier(sg, raddr, raddr + _REGION2_SIZE - 1);
  1331. r2o = (unsigned long) (r2e - ((raddr & _REGION2_INDEX) >> _REGION2_SHIFT));
  1332. gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
  1333. r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
  1334. *r2e = _REGION2_ENTRY_EMPTY;
  1335. __gmap_unshadow_r3t(sg, raddr, r3t);
  1336. /* Free region 3 table */
  1337. page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
  1338. list_del(&page->lru);
  1339. __free_pages(page, CRST_ALLOC_ORDER);
  1340. }
  1341. /**
  1342. * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
  1343. * @sg: pointer to the shadow guest address space structure
  1344. * @raddr: rmap address in the shadow guest address space
  1345. * @r2t: pointer to the start of a shadow region-2 table
  1346. *
  1347. * Called with the sg->guest_table_lock
  1348. */
  1349. static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
  1350. unsigned long *r2t)
  1351. {
  1352. unsigned long *r3t;
  1353. struct page *page;
  1354. int i;
  1355. BUG_ON(!gmap_is_shadow(sg));
  1356. for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) {
  1357. if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
  1358. continue;
  1359. r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
  1360. r2t[i] = _REGION2_ENTRY_EMPTY;
  1361. __gmap_unshadow_r3t(sg, raddr, r3t);
  1362. /* Free region 3 table */
  1363. page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
  1364. list_del(&page->lru);
  1365. __free_pages(page, CRST_ALLOC_ORDER);
  1366. }
  1367. }
  1368. /**
  1369. * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
  1370. * @sg: pointer to the shadow guest address space structure
  1371. * @raddr: rmap address in the shadow guest address space
  1372. *
  1373. * Called with the sg->guest_table_lock
  1374. */
  1375. static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
  1376. {
  1377. unsigned long r1o, *r1e, *r2t;
  1378. struct page *page;
  1379. BUG_ON(!gmap_is_shadow(sg));
  1380. r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
  1381. if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
  1382. return;
  1383. gmap_call_notifier(sg, raddr, raddr + _REGION1_SIZE - 1);
  1384. r1o = (unsigned long) (r1e - ((raddr & _REGION1_INDEX) >> _REGION1_SHIFT));
  1385. gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
  1386. r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
  1387. *r1e = _REGION1_ENTRY_EMPTY;
  1388. __gmap_unshadow_r2t(sg, raddr, r2t);
  1389. /* Free region 2 table */
  1390. page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
  1391. list_del(&page->lru);
  1392. __free_pages(page, CRST_ALLOC_ORDER);
  1393. }
  1394. /**
  1395. * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
  1396. * @sg: pointer to the shadow guest address space structure
  1397. * @raddr: rmap address in the shadow guest address space
  1398. * @r1t: pointer to the start of a shadow region-1 table
  1399. *
  1400. * Called with the shadow->guest_table_lock
  1401. */
  1402. static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
  1403. unsigned long *r1t)
  1404. {
  1405. unsigned long asce, *r2t;
  1406. struct page *page;
  1407. int i;
  1408. BUG_ON(!gmap_is_shadow(sg));
  1409. asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
  1410. for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION1_SIZE) {
  1411. if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
  1412. continue;
  1413. r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
  1414. __gmap_unshadow_r2t(sg, raddr, r2t);
  1415. /* Clear entry and flush translation r1t -> r2t */
  1416. gmap_idte_one(asce, raddr);
  1417. r1t[i] = _REGION1_ENTRY_EMPTY;
  1418. /* Free region 2 table */
  1419. page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
  1420. list_del(&page->lru);
  1421. __free_pages(page, CRST_ALLOC_ORDER);
  1422. }
  1423. }
  1424. /**
  1425. * gmap_unshadow - remove a shadow page table completely
  1426. * @sg: pointer to the shadow guest address space structure
  1427. *
  1428. * Called with sg->guest_table_lock
  1429. */
  1430. static void gmap_unshadow(struct gmap *sg)
  1431. {
  1432. unsigned long *table;
  1433. BUG_ON(!gmap_is_shadow(sg));
  1434. if (sg->removed)
  1435. return;
  1436. sg->removed = 1;
  1437. gmap_call_notifier(sg, 0, -1UL);
  1438. gmap_flush_tlb(sg);
  1439. table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
  1440. switch (sg->asce & _ASCE_TYPE_MASK) {
  1441. case _ASCE_TYPE_REGION1:
  1442. __gmap_unshadow_r1t(sg, 0, table);
  1443. break;
  1444. case _ASCE_TYPE_REGION2:
  1445. __gmap_unshadow_r2t(sg, 0, table);
  1446. break;
  1447. case _ASCE_TYPE_REGION3:
  1448. __gmap_unshadow_r3t(sg, 0, table);
  1449. break;
  1450. case _ASCE_TYPE_SEGMENT:
  1451. __gmap_unshadow_sgt(sg, 0, table);
  1452. break;
  1453. }
  1454. }
  1455. /**
  1456. * gmap_find_shadow - find a specific asce in the list of shadow tables
  1457. * @parent: pointer to the parent gmap
  1458. * @asce: ASCE for which the shadow table is created
  1459. * @edat_level: edat level to be used for the shadow translation
  1460. *
  1461. * Returns the pointer to a gmap if a shadow table with the given asce is
  1462. * already available, ERR_PTR(-EAGAIN) if another one is just being created,
  1463. * otherwise NULL
  1464. */
  1465. static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
  1466. int edat_level)
  1467. {
  1468. struct gmap *sg;
  1469. list_for_each_entry(sg, &parent->children, list) {
  1470. if (sg->orig_asce != asce || sg->edat_level != edat_level ||
  1471. sg->removed)
  1472. continue;
  1473. if (!sg->initialized)
  1474. return ERR_PTR(-EAGAIN);
  1475. atomic_inc(&sg->ref_count);
  1476. return sg;
  1477. }
  1478. return NULL;
  1479. }
  1480. /**
  1481. * gmap_shadow_valid - check if a shadow guest address space matches the
  1482. * given properties and is still valid
  1483. * @sg: pointer to the shadow guest address space structure
  1484. * @asce: ASCE for which the shadow table is requested
  1485. * @edat_level: edat level to be used for the shadow translation
  1486. *
  1487. * Returns 1 if the gmap shadow is still valid and matches the given
  1488. * properties, the caller can continue using it. Returns 0 otherwise, the
  1489. * caller has to request a new shadow gmap in this case.
  1490. *
  1491. */
  1492. int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
  1493. {
  1494. if (sg->removed)
  1495. return 0;
  1496. return sg->orig_asce == asce && sg->edat_level == edat_level;
  1497. }
  1498. EXPORT_SYMBOL_GPL(gmap_shadow_valid);
  1499. /**
  1500. * gmap_shadow - create/find a shadow guest address space
  1501. * @parent: pointer to the parent gmap
  1502. * @asce: ASCE for which the shadow table is created
  1503. * @edat_level: edat level to be used for the shadow translation
  1504. *
  1505. * The pages of the top level page table referred by the asce parameter
  1506. * will be set to read-only and marked in the PGSTEs of the kvm process.
  1507. * The shadow table will be removed automatically on any change to the
  1508. * PTE mapping for the source table.
  1509. *
  1510. * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
  1511. * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
  1512. * parent gmap table could not be protected.
  1513. */
  1514. struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
  1515. int edat_level)
  1516. {
  1517. struct gmap *sg, *new;
  1518. unsigned long limit;
  1519. int rc;
  1520. BUG_ON(parent->mm->context.allow_gmap_hpage_1m);
  1521. BUG_ON(gmap_is_shadow(parent));
  1522. spin_lock(&parent->shadow_lock);
  1523. sg = gmap_find_shadow(parent, asce, edat_level);
  1524. spin_unlock(&parent->shadow_lock);
  1525. if (sg)
  1526. return sg;
  1527. /* Create a new shadow gmap */
  1528. limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
  1529. if (asce & _ASCE_REAL_SPACE)
  1530. limit = -1UL;
  1531. new = gmap_alloc(limit);
  1532. if (!new)
  1533. return ERR_PTR(-ENOMEM);
  1534. new->mm = parent->mm;
  1535. new->parent = gmap_get(parent);
  1536. new->orig_asce = asce;
  1537. new->edat_level = edat_level;
  1538. new->initialized = false;
  1539. spin_lock(&parent->shadow_lock);
  1540. /* Recheck if another CPU created the same shadow */
  1541. sg = gmap_find_shadow(parent, asce, edat_level);
  1542. if (sg) {
  1543. spin_unlock(&parent->shadow_lock);
  1544. gmap_free(new);
  1545. return sg;
  1546. }
  1547. if (asce & _ASCE_REAL_SPACE) {
  1548. /* only allow one real-space gmap shadow */
  1549. list_for_each_entry(sg, &parent->children, list) {
  1550. if (sg->orig_asce & _ASCE_REAL_SPACE) {
  1551. spin_lock(&sg->guest_table_lock);
  1552. gmap_unshadow(sg);
  1553. spin_unlock(&sg->guest_table_lock);
  1554. list_del(&sg->list);
  1555. gmap_put(sg);
  1556. break;
  1557. }
  1558. }
  1559. }
  1560. atomic_set(&new->ref_count, 2);
  1561. list_add(&new->list, &parent->children);
  1562. if (asce & _ASCE_REAL_SPACE) {
  1563. /* nothing to protect, return right away */
  1564. new->initialized = true;
  1565. spin_unlock(&parent->shadow_lock);
  1566. return new;
  1567. }
  1568. spin_unlock(&parent->shadow_lock);
  1569. /* protect after insertion, so it will get properly invalidated */
  1570. down_read(&parent->mm->mmap_sem);
  1571. rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
  1572. ((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
  1573. PROT_READ, GMAP_NOTIFY_SHADOW);
  1574. up_read(&parent->mm->mmap_sem);
  1575. spin_lock(&parent->shadow_lock);
  1576. new->initialized = true;
  1577. if (rc) {
  1578. list_del(&new->list);
  1579. gmap_free(new);
  1580. new = ERR_PTR(rc);
  1581. }
  1582. spin_unlock(&parent->shadow_lock);
  1583. return new;
  1584. }
  1585. EXPORT_SYMBOL_GPL(gmap_shadow);
  1586. /**
  1587. * gmap_shadow_r2t - create an empty shadow region 2 table
  1588. * @sg: pointer to the shadow guest address space structure
  1589. * @saddr: faulting address in the shadow gmap
  1590. * @r2t: parent gmap address of the region 2 table to get shadowed
  1591. * @fake: r2t references contiguous guest memory block, not a r2t
  1592. *
  1593. * The r2t parameter specifies the address of the source table. The
  1594. * four pages of the source table are made read-only in the parent gmap
  1595. * address space. A write to the source table area @r2t will automatically
  1596. * remove the shadow r2 table and all of its decendents.
  1597. *
  1598. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1599. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1600. * -EFAULT if an address in the parent gmap could not be resolved.
  1601. *
  1602. * Called with sg->mm->mmap_sem in read.
  1603. */
  1604. int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
  1605. int fake)
  1606. {
  1607. unsigned long raddr, origin, offset, len;
  1608. unsigned long *s_r2t, *table;
  1609. struct page *page;
  1610. int rc;
  1611. BUG_ON(!gmap_is_shadow(sg));
  1612. /* Allocate a shadow region second table */
  1613. page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
  1614. if (!page)
  1615. return -ENOMEM;
  1616. page->index = r2t & _REGION_ENTRY_ORIGIN;
  1617. if (fake)
  1618. page->index |= GMAP_SHADOW_FAKE_TABLE;
  1619. s_r2t = (unsigned long *) page_to_phys(page);
  1620. /* Install shadow region second table */
  1621. spin_lock(&sg->guest_table_lock);
  1622. table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
  1623. if (!table) {
  1624. rc = -EAGAIN; /* Race with unshadow */
  1625. goto out_free;
  1626. }
  1627. if (!(*table & _REGION_ENTRY_INVALID)) {
  1628. rc = 0; /* Already established */
  1629. goto out_free;
  1630. } else if (*table & _REGION_ENTRY_ORIGIN) {
  1631. rc = -EAGAIN; /* Race with shadow */
  1632. goto out_free;
  1633. }
  1634. crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
  1635. /* mark as invalid as long as the parent table is not protected */
  1636. *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
  1637. _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
  1638. if (sg->edat_level >= 1)
  1639. *table |= (r2t & _REGION_ENTRY_PROTECT);
  1640. list_add(&page->lru, &sg->crst_list);
  1641. if (fake) {
  1642. /* nothing to protect for fake tables */
  1643. *table &= ~_REGION_ENTRY_INVALID;
  1644. spin_unlock(&sg->guest_table_lock);
  1645. return 0;
  1646. }
  1647. spin_unlock(&sg->guest_table_lock);
  1648. /* Make r2t read-only in parent gmap page table */
  1649. raddr = (saddr & _REGION1_MASK) | _SHADOW_RMAP_REGION1;
  1650. origin = r2t & _REGION_ENTRY_ORIGIN;
  1651. offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
  1652. len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
  1653. rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
  1654. spin_lock(&sg->guest_table_lock);
  1655. if (!rc) {
  1656. table = gmap_table_walk(sg, saddr, 4);
  1657. if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
  1658. (unsigned long) s_r2t)
  1659. rc = -EAGAIN; /* Race with unshadow */
  1660. else
  1661. *table &= ~_REGION_ENTRY_INVALID;
  1662. } else {
  1663. gmap_unshadow_r2t(sg, raddr);
  1664. }
  1665. spin_unlock(&sg->guest_table_lock);
  1666. return rc;
  1667. out_free:
  1668. spin_unlock(&sg->guest_table_lock);
  1669. __free_pages(page, CRST_ALLOC_ORDER);
  1670. return rc;
  1671. }
  1672. EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
  1673. /**
  1674. * gmap_shadow_r3t - create a shadow region 3 table
  1675. * @sg: pointer to the shadow guest address space structure
  1676. * @saddr: faulting address in the shadow gmap
  1677. * @r3t: parent gmap address of the region 3 table to get shadowed
  1678. * @fake: r3t references contiguous guest memory block, not a r3t
  1679. *
  1680. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1681. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1682. * -EFAULT if an address in the parent gmap could not be resolved.
  1683. *
  1684. * Called with sg->mm->mmap_sem in read.
  1685. */
  1686. int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
  1687. int fake)
  1688. {
  1689. unsigned long raddr, origin, offset, len;
  1690. unsigned long *s_r3t, *table;
  1691. struct page *page;
  1692. int rc;
  1693. BUG_ON(!gmap_is_shadow(sg));
  1694. /* Allocate a shadow region second table */
  1695. page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
  1696. if (!page)
  1697. return -ENOMEM;
  1698. page->index = r3t & _REGION_ENTRY_ORIGIN;
  1699. if (fake)
  1700. page->index |= GMAP_SHADOW_FAKE_TABLE;
  1701. s_r3t = (unsigned long *) page_to_phys(page);
  1702. /* Install shadow region second table */
  1703. spin_lock(&sg->guest_table_lock);
  1704. table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
  1705. if (!table) {
  1706. rc = -EAGAIN; /* Race with unshadow */
  1707. goto out_free;
  1708. }
  1709. if (!(*table & _REGION_ENTRY_INVALID)) {
  1710. rc = 0; /* Already established */
  1711. goto out_free;
  1712. } else if (*table & _REGION_ENTRY_ORIGIN) {
  1713. rc = -EAGAIN; /* Race with shadow */
  1714. }
  1715. crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
  1716. /* mark as invalid as long as the parent table is not protected */
  1717. *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
  1718. _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
  1719. if (sg->edat_level >= 1)
  1720. *table |= (r3t & _REGION_ENTRY_PROTECT);
  1721. list_add(&page->lru, &sg->crst_list);
  1722. if (fake) {
  1723. /* nothing to protect for fake tables */
  1724. *table &= ~_REGION_ENTRY_INVALID;
  1725. spin_unlock(&sg->guest_table_lock);
  1726. return 0;
  1727. }
  1728. spin_unlock(&sg->guest_table_lock);
  1729. /* Make r3t read-only in parent gmap page table */
  1730. raddr = (saddr & _REGION2_MASK) | _SHADOW_RMAP_REGION2;
  1731. origin = r3t & _REGION_ENTRY_ORIGIN;
  1732. offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
  1733. len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
  1734. rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
  1735. spin_lock(&sg->guest_table_lock);
  1736. if (!rc) {
  1737. table = gmap_table_walk(sg, saddr, 3);
  1738. if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
  1739. (unsigned long) s_r3t)
  1740. rc = -EAGAIN; /* Race with unshadow */
  1741. else
  1742. *table &= ~_REGION_ENTRY_INVALID;
  1743. } else {
  1744. gmap_unshadow_r3t(sg, raddr);
  1745. }
  1746. spin_unlock(&sg->guest_table_lock);
  1747. return rc;
  1748. out_free:
  1749. spin_unlock(&sg->guest_table_lock);
  1750. __free_pages(page, CRST_ALLOC_ORDER);
  1751. return rc;
  1752. }
  1753. EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
  1754. /**
  1755. * gmap_shadow_sgt - create a shadow segment table
  1756. * @sg: pointer to the shadow guest address space structure
  1757. * @saddr: faulting address in the shadow gmap
  1758. * @sgt: parent gmap address of the segment table to get shadowed
  1759. * @fake: sgt references contiguous guest memory block, not a sgt
  1760. *
  1761. * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1762. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1763. * -EFAULT if an address in the parent gmap could not be resolved.
  1764. *
  1765. * Called with sg->mm->mmap_sem in read.
  1766. */
  1767. int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
  1768. int fake)
  1769. {
  1770. unsigned long raddr, origin, offset, len;
  1771. unsigned long *s_sgt, *table;
  1772. struct page *page;
  1773. int rc;
  1774. BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
  1775. /* Allocate a shadow segment table */
  1776. page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
  1777. if (!page)
  1778. return -ENOMEM;
  1779. page->index = sgt & _REGION_ENTRY_ORIGIN;
  1780. if (fake)
  1781. page->index |= GMAP_SHADOW_FAKE_TABLE;
  1782. s_sgt = (unsigned long *) page_to_phys(page);
  1783. /* Install shadow region second table */
  1784. spin_lock(&sg->guest_table_lock);
  1785. table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
  1786. if (!table) {
  1787. rc = -EAGAIN; /* Race with unshadow */
  1788. goto out_free;
  1789. }
  1790. if (!(*table & _REGION_ENTRY_INVALID)) {
  1791. rc = 0; /* Already established */
  1792. goto out_free;
  1793. } else if (*table & _REGION_ENTRY_ORIGIN) {
  1794. rc = -EAGAIN; /* Race with shadow */
  1795. goto out_free;
  1796. }
  1797. crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
  1798. /* mark as invalid as long as the parent table is not protected */
  1799. *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
  1800. _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
  1801. if (sg->edat_level >= 1)
  1802. *table |= sgt & _REGION_ENTRY_PROTECT;
  1803. list_add(&page->lru, &sg->crst_list);
  1804. if (fake) {
  1805. /* nothing to protect for fake tables */
  1806. *table &= ~_REGION_ENTRY_INVALID;
  1807. spin_unlock(&sg->guest_table_lock);
  1808. return 0;
  1809. }
  1810. spin_unlock(&sg->guest_table_lock);
  1811. /* Make sgt read-only in parent gmap page table */
  1812. raddr = (saddr & _REGION3_MASK) | _SHADOW_RMAP_REGION3;
  1813. origin = sgt & _REGION_ENTRY_ORIGIN;
  1814. offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
  1815. len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
  1816. rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
  1817. spin_lock(&sg->guest_table_lock);
  1818. if (!rc) {
  1819. table = gmap_table_walk(sg, saddr, 2);
  1820. if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
  1821. (unsigned long) s_sgt)
  1822. rc = -EAGAIN; /* Race with unshadow */
  1823. else
  1824. *table &= ~_REGION_ENTRY_INVALID;
  1825. } else {
  1826. gmap_unshadow_sgt(sg, raddr);
  1827. }
  1828. spin_unlock(&sg->guest_table_lock);
  1829. return rc;
  1830. out_free:
  1831. spin_unlock(&sg->guest_table_lock);
  1832. __free_pages(page, CRST_ALLOC_ORDER);
  1833. return rc;
  1834. }
  1835. EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
  1836. /**
  1837. * gmap_shadow_lookup_pgtable - find a shadow page table
  1838. * @sg: pointer to the shadow guest address space structure
  1839. * @saddr: the address in the shadow aguest address space
  1840. * @pgt: parent gmap address of the page table to get shadowed
  1841. * @dat_protection: if the pgtable is marked as protected by dat
  1842. * @fake: pgt references contiguous guest memory block, not a pgtable
  1843. *
  1844. * Returns 0 if the shadow page table was found and -EAGAIN if the page
  1845. * table was not found.
  1846. *
  1847. * Called with sg->mm->mmap_sem in read.
  1848. */
  1849. int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
  1850. unsigned long *pgt, int *dat_protection,
  1851. int *fake)
  1852. {
  1853. unsigned long *table;
  1854. struct page *page;
  1855. int rc;
  1856. BUG_ON(!gmap_is_shadow(sg));
  1857. spin_lock(&sg->guest_table_lock);
  1858. table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
  1859. if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
  1860. /* Shadow page tables are full pages (pte+pgste) */
  1861. page = pfn_to_page(*table >> PAGE_SHIFT);
  1862. *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
  1863. *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
  1864. *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
  1865. rc = 0;
  1866. } else {
  1867. rc = -EAGAIN;
  1868. }
  1869. spin_unlock(&sg->guest_table_lock);
  1870. return rc;
  1871. }
  1872. EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
  1873. /**
  1874. * gmap_shadow_pgt - instantiate a shadow page table
  1875. * @sg: pointer to the shadow guest address space structure
  1876. * @saddr: faulting address in the shadow gmap
  1877. * @pgt: parent gmap address of the page table to get shadowed
  1878. * @fake: pgt references contiguous guest memory block, not a pgtable
  1879. *
  1880. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1881. * shadow table structure is incomplete, -ENOMEM if out of memory,
  1882. * -EFAULT if an address in the parent gmap could not be resolved and
  1883. *
  1884. * Called with gmap->mm->mmap_sem in read
  1885. */
  1886. int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
  1887. int fake)
  1888. {
  1889. unsigned long raddr, origin;
  1890. unsigned long *s_pgt, *table;
  1891. struct page *page;
  1892. int rc;
  1893. BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
  1894. /* Allocate a shadow page table */
  1895. page = page_table_alloc_pgste(sg->mm);
  1896. if (!page)
  1897. return -ENOMEM;
  1898. page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
  1899. if (fake)
  1900. page->index |= GMAP_SHADOW_FAKE_TABLE;
  1901. s_pgt = (unsigned long *) page_to_phys(page);
  1902. /* Install shadow page table */
  1903. spin_lock(&sg->guest_table_lock);
  1904. table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
  1905. if (!table) {
  1906. rc = -EAGAIN; /* Race with unshadow */
  1907. goto out_free;
  1908. }
  1909. if (!(*table & _SEGMENT_ENTRY_INVALID)) {
  1910. rc = 0; /* Already established */
  1911. goto out_free;
  1912. } else if (*table & _SEGMENT_ENTRY_ORIGIN) {
  1913. rc = -EAGAIN; /* Race with shadow */
  1914. goto out_free;
  1915. }
  1916. /* mark as invalid as long as the parent table is not protected */
  1917. *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
  1918. (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
  1919. list_add(&page->lru, &sg->pt_list);
  1920. if (fake) {
  1921. /* nothing to protect for fake tables */
  1922. *table &= ~_SEGMENT_ENTRY_INVALID;
  1923. spin_unlock(&sg->guest_table_lock);
  1924. return 0;
  1925. }
  1926. spin_unlock(&sg->guest_table_lock);
  1927. /* Make pgt read-only in parent gmap page table (not the pgste) */
  1928. raddr = (saddr & _SEGMENT_MASK) | _SHADOW_RMAP_SEGMENT;
  1929. origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
  1930. rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE);
  1931. spin_lock(&sg->guest_table_lock);
  1932. if (!rc) {
  1933. table = gmap_table_walk(sg, saddr, 1);
  1934. if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) !=
  1935. (unsigned long) s_pgt)
  1936. rc = -EAGAIN; /* Race with unshadow */
  1937. else
  1938. *table &= ~_SEGMENT_ENTRY_INVALID;
  1939. } else {
  1940. gmap_unshadow_pgt(sg, raddr);
  1941. }
  1942. spin_unlock(&sg->guest_table_lock);
  1943. return rc;
  1944. out_free:
  1945. spin_unlock(&sg->guest_table_lock);
  1946. page_table_free_pgste(page);
  1947. return rc;
  1948. }
  1949. EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
  1950. /**
  1951. * gmap_shadow_page - create a shadow page mapping
  1952. * @sg: pointer to the shadow guest address space structure
  1953. * @saddr: faulting address in the shadow gmap
  1954. * @pte: pte in parent gmap address space to get shadowed
  1955. *
  1956. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1957. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1958. * -EFAULT if an address in the parent gmap could not be resolved.
  1959. *
  1960. * Called with sg->mm->mmap_sem in read.
  1961. */
  1962. int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
  1963. {
  1964. struct gmap *parent;
  1965. struct gmap_rmap *rmap;
  1966. unsigned long vmaddr, paddr;
  1967. spinlock_t *ptl;
  1968. pte_t *sptep, *tptep;
  1969. int prot;
  1970. int rc;
  1971. BUG_ON(!gmap_is_shadow(sg));
  1972. parent = sg->parent;
  1973. prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE;
  1974. rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
  1975. if (!rmap)
  1976. return -ENOMEM;
  1977. rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
  1978. while (1) {
  1979. paddr = pte_val(pte) & PAGE_MASK;
  1980. vmaddr = __gmap_translate(parent, paddr);
  1981. if (IS_ERR_VALUE(vmaddr)) {
  1982. rc = vmaddr;
  1983. break;
  1984. }
  1985. rc = radix_tree_preload(GFP_KERNEL);
  1986. if (rc)
  1987. break;
  1988. rc = -EAGAIN;
  1989. sptep = gmap_pte_op_walk(parent, paddr, &ptl);
  1990. if (sptep) {
  1991. spin_lock(&sg->guest_table_lock);
  1992. /* Get page table pointer */
  1993. tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
  1994. if (!tptep) {
  1995. spin_unlock(&sg->guest_table_lock);
  1996. gmap_pte_op_end(ptl);
  1997. radix_tree_preload_end();
  1998. break;
  1999. }
  2000. rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
  2001. if (rc > 0) {
  2002. /* Success and a new mapping */
  2003. gmap_insert_rmap(sg, vmaddr, rmap);
  2004. rmap = NULL;
  2005. rc = 0;
  2006. }
  2007. gmap_pte_op_end(ptl);
  2008. spin_unlock(&sg->guest_table_lock);
  2009. }
  2010. radix_tree_preload_end();
  2011. if (!rc)
  2012. break;
  2013. rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
  2014. if (rc)
  2015. break;
  2016. }
  2017. kfree(rmap);
  2018. return rc;
  2019. }
  2020. EXPORT_SYMBOL_GPL(gmap_shadow_page);
  2021. /**
  2022. * gmap_shadow_notify - handle notifications for shadow gmap
  2023. *
  2024. * Called with sg->parent->shadow_lock.
  2025. */
  2026. static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
  2027. unsigned long gaddr)
  2028. {
  2029. struct gmap_rmap *rmap, *rnext, *head;
  2030. unsigned long start, end, bits, raddr;
  2031. BUG_ON(!gmap_is_shadow(sg));
  2032. spin_lock(&sg->guest_table_lock);
  2033. if (sg->removed) {
  2034. spin_unlock(&sg->guest_table_lock);
  2035. return;
  2036. }
  2037. /* Check for top level table */
  2038. start = sg->orig_asce & _ASCE_ORIGIN;
  2039. end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE;
  2040. if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
  2041. gaddr < end) {
  2042. /* The complete shadow table has to go */
  2043. gmap_unshadow(sg);
  2044. spin_unlock(&sg->guest_table_lock);
  2045. list_del(&sg->list);
  2046. gmap_put(sg);
  2047. return;
  2048. }
  2049. /* Remove the page table tree from on specific entry */
  2050. head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
  2051. gmap_for_each_rmap_safe(rmap, rnext, head) {
  2052. bits = rmap->raddr & _SHADOW_RMAP_MASK;
  2053. raddr = rmap->raddr ^ bits;
  2054. switch (bits) {
  2055. case _SHADOW_RMAP_REGION1:
  2056. gmap_unshadow_r2t(sg, raddr);
  2057. break;
  2058. case _SHADOW_RMAP_REGION2:
  2059. gmap_unshadow_r3t(sg, raddr);
  2060. break;
  2061. case _SHADOW_RMAP_REGION3:
  2062. gmap_unshadow_sgt(sg, raddr);
  2063. break;
  2064. case _SHADOW_RMAP_SEGMENT:
  2065. gmap_unshadow_pgt(sg, raddr);
  2066. break;
  2067. case _SHADOW_RMAP_PGTABLE:
  2068. gmap_unshadow_page(sg, raddr);
  2069. break;
  2070. }
  2071. kfree(rmap);
  2072. }
  2073. spin_unlock(&sg->guest_table_lock);
  2074. }
  2075. /**
  2076. * ptep_notify - call all invalidation callbacks for a specific pte.
  2077. * @mm: pointer to the process mm_struct
  2078. * @addr: virtual address in the process address space
  2079. * @pte: pointer to the page table entry
  2080. * @bits: bits from the pgste that caused the notify call
  2081. *
  2082. * This function is assumed to be called with the page table lock held
  2083. * for the pte to notify.
  2084. */
  2085. void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
  2086. pte_t *pte, unsigned long bits)
  2087. {
  2088. unsigned long offset, gaddr = 0;
  2089. unsigned long *table;
  2090. struct gmap *gmap, *sg, *next;
  2091. offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
  2092. offset = offset * (PAGE_SIZE / sizeof(pte_t));
  2093. rcu_read_lock();
  2094. list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
  2095. spin_lock(&gmap->guest_table_lock);
  2096. table = radix_tree_lookup(&gmap->host_to_guest,
  2097. vmaddr >> PMD_SHIFT);
  2098. if (table)
  2099. gaddr = __gmap_segment_gaddr(table) + offset;
  2100. spin_unlock(&gmap->guest_table_lock);
  2101. if (!table)
  2102. continue;
  2103. if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
  2104. spin_lock(&gmap->shadow_lock);
  2105. list_for_each_entry_safe(sg, next,
  2106. &gmap->children, list)
  2107. gmap_shadow_notify(sg, vmaddr, gaddr);
  2108. spin_unlock(&gmap->shadow_lock);
  2109. }
  2110. if (bits & PGSTE_IN_BIT)
  2111. gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
  2112. }
  2113. rcu_read_unlock();
  2114. }
  2115. EXPORT_SYMBOL_GPL(ptep_notify);
  2116. static void pmdp_notify_gmap(struct gmap *gmap, pmd_t *pmdp,
  2117. unsigned long gaddr)
  2118. {
  2119. pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_IN;
  2120. gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
  2121. }
  2122. /**
  2123. * gmap_pmdp_xchg - exchange a gmap pmd with another
  2124. * @gmap: pointer to the guest address space structure
  2125. * @pmdp: pointer to the pmd entry
  2126. * @new: replacement entry
  2127. * @gaddr: the affected guest address
  2128. *
  2129. * This function is assumed to be called with the guest_table_lock
  2130. * held.
  2131. */
  2132. static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new,
  2133. unsigned long gaddr)
  2134. {
  2135. gaddr &= HPAGE_MASK;
  2136. pmdp_notify_gmap(gmap, pmdp, gaddr);
  2137. pmd_val(new) &= ~_SEGMENT_ENTRY_GMAP_IN;
  2138. if (MACHINE_HAS_TLB_GUEST)
  2139. __pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce,
  2140. IDTE_GLOBAL);
  2141. else if (MACHINE_HAS_IDTE)
  2142. __pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL);
  2143. else
  2144. __pmdp_csp(pmdp);
  2145. *pmdp = new;
  2146. }
  2147. static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
  2148. int purge)
  2149. {
  2150. pmd_t *pmdp;
  2151. struct gmap *gmap;
  2152. unsigned long gaddr;
  2153. rcu_read_lock();
  2154. list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
  2155. spin_lock(&gmap->guest_table_lock);
  2156. pmdp = (pmd_t *)radix_tree_delete(&gmap->host_to_guest,
  2157. vmaddr >> PMD_SHIFT);
  2158. if (pmdp) {
  2159. gaddr = __gmap_segment_gaddr((unsigned long *)pmdp);
  2160. pmdp_notify_gmap(gmap, pmdp, gaddr);
  2161. WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
  2162. _SEGMENT_ENTRY_GMAP_UC));
  2163. if (purge)
  2164. __pmdp_csp(pmdp);
  2165. pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
  2166. }
  2167. spin_unlock(&gmap->guest_table_lock);
  2168. }
  2169. rcu_read_unlock();
  2170. }
  2171. /**
  2172. * gmap_pmdp_invalidate - invalidate all affected guest pmd entries without
  2173. * flushing
  2174. * @mm: pointer to the process mm_struct
  2175. * @vmaddr: virtual address in the process address space
  2176. */
  2177. void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr)
  2178. {
  2179. gmap_pmdp_clear(mm, vmaddr, 0);
  2180. }
  2181. EXPORT_SYMBOL_GPL(gmap_pmdp_invalidate);
  2182. /**
  2183. * gmap_pmdp_csp - csp all affected guest pmd entries
  2184. * @mm: pointer to the process mm_struct
  2185. * @vmaddr: virtual address in the process address space
  2186. */
  2187. void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr)
  2188. {
  2189. gmap_pmdp_clear(mm, vmaddr, 1);
  2190. }
  2191. EXPORT_SYMBOL_GPL(gmap_pmdp_csp);
  2192. /**
  2193. * gmap_pmdp_idte_local - invalidate and clear a guest pmd entry
  2194. * @mm: pointer to the process mm_struct
  2195. * @vmaddr: virtual address in the process address space
  2196. */
  2197. void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr)
  2198. {
  2199. unsigned long *entry, gaddr;
  2200. struct gmap *gmap;
  2201. pmd_t *pmdp;
  2202. rcu_read_lock();
  2203. list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
  2204. spin_lock(&gmap->guest_table_lock);
  2205. entry = radix_tree_delete(&gmap->host_to_guest,
  2206. vmaddr >> PMD_SHIFT);
  2207. if (entry) {
  2208. pmdp = (pmd_t *)entry;
  2209. gaddr = __gmap_segment_gaddr(entry);
  2210. pmdp_notify_gmap(gmap, pmdp, gaddr);
  2211. WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
  2212. _SEGMENT_ENTRY_GMAP_UC));
  2213. if (MACHINE_HAS_TLB_GUEST)
  2214. __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
  2215. gmap->asce, IDTE_LOCAL);
  2216. else if (MACHINE_HAS_IDTE)
  2217. __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_LOCAL);
  2218. *entry = _SEGMENT_ENTRY_EMPTY;
  2219. }
  2220. spin_unlock(&gmap->guest_table_lock);
  2221. }
  2222. rcu_read_unlock();
  2223. }
  2224. EXPORT_SYMBOL_GPL(gmap_pmdp_idte_local);
  2225. /**
  2226. * gmap_pmdp_idte_global - invalidate and clear a guest pmd entry
  2227. * @mm: pointer to the process mm_struct
  2228. * @vmaddr: virtual address in the process address space
  2229. */
  2230. void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr)
  2231. {
  2232. unsigned long *entry, gaddr;
  2233. struct gmap *gmap;
  2234. pmd_t *pmdp;
  2235. rcu_read_lock();
  2236. list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
  2237. spin_lock(&gmap->guest_table_lock);
  2238. entry = radix_tree_delete(&gmap->host_to_guest,
  2239. vmaddr >> PMD_SHIFT);
  2240. if (entry) {
  2241. pmdp = (pmd_t *)entry;
  2242. gaddr = __gmap_segment_gaddr(entry);
  2243. pmdp_notify_gmap(gmap, pmdp, gaddr);
  2244. WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
  2245. _SEGMENT_ENTRY_GMAP_UC));
  2246. if (MACHINE_HAS_TLB_GUEST)
  2247. __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
  2248. gmap->asce, IDTE_GLOBAL);
  2249. else if (MACHINE_HAS_IDTE)
  2250. __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL);
  2251. else
  2252. __pmdp_csp(pmdp);
  2253. *entry = _SEGMENT_ENTRY_EMPTY;
  2254. }
  2255. spin_unlock(&gmap->guest_table_lock);
  2256. }
  2257. rcu_read_unlock();
  2258. }
  2259. EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global);
  2260. /**
  2261. * gmap_test_and_clear_dirty_pmd - test and reset segment dirty status
  2262. * @gmap: pointer to guest address space
  2263. * @pmdp: pointer to the pmd to be tested
  2264. * @gaddr: virtual address in the guest address space
  2265. *
  2266. * This function is assumed to be called with the guest_table_lock
  2267. * held.
  2268. */
  2269. bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
  2270. unsigned long gaddr)
  2271. {
  2272. if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
  2273. return false;
  2274. /* Already protected memory, which did not change is clean */
  2275. if (pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT &&
  2276. !(pmd_val(*pmdp) & _SEGMENT_ENTRY_GMAP_UC))
  2277. return false;
  2278. /* Clear UC indication and reset protection */
  2279. pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_UC;
  2280. gmap_protect_pmd(gmap, gaddr, pmdp, PROT_READ, 0);
  2281. return true;
  2282. }
  2283. /**
  2284. * gmap_sync_dirty_log_pmd - set bitmap based on dirty status of segment
  2285. * @gmap: pointer to guest address space
  2286. * @bitmap: dirty bitmap for this pmd
  2287. * @gaddr: virtual address in the guest address space
  2288. * @vmaddr: virtual address in the host address space
  2289. *
  2290. * This function is assumed to be called with the guest_table_lock
  2291. * held.
  2292. */
  2293. void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
  2294. unsigned long gaddr, unsigned long vmaddr)
  2295. {
  2296. int i;
  2297. pmd_t *pmdp;
  2298. pte_t *ptep;
  2299. spinlock_t *ptl;
  2300. pmdp = gmap_pmd_op_walk(gmap, gaddr);
  2301. if (!pmdp)
  2302. return;
  2303. if (pmd_large(*pmdp)) {
  2304. if (gmap_test_and_clear_dirty_pmd(gmap, pmdp, gaddr))
  2305. bitmap_fill(bitmap, _PAGE_ENTRIES);
  2306. } else {
  2307. for (i = 0; i < _PAGE_ENTRIES; i++, vmaddr += PAGE_SIZE) {
  2308. ptep = pte_alloc_map_lock(gmap->mm, pmdp, vmaddr, &ptl);
  2309. if (!ptep)
  2310. continue;
  2311. if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep))
  2312. set_bit(i, bitmap);
  2313. spin_unlock(ptl);
  2314. }
  2315. }
  2316. gmap_pmd_op_end(gmap, pmdp);
  2317. }
  2318. EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd);
  2319. static inline void thp_split_mm(struct mm_struct *mm)
  2320. {
  2321. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  2322. struct vm_area_struct *vma;
  2323. unsigned long addr;
  2324. for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
  2325. for (addr = vma->vm_start;
  2326. addr < vma->vm_end;
  2327. addr += PAGE_SIZE)
  2328. follow_page(vma, addr, FOLL_SPLIT);
  2329. vma->vm_flags &= ~VM_HUGEPAGE;
  2330. vma->vm_flags |= VM_NOHUGEPAGE;
  2331. }
  2332. mm->def_flags |= VM_NOHUGEPAGE;
  2333. #endif
  2334. }
  2335. /*
  2336. * Remove all empty zero pages from the mapping for lazy refaulting
  2337. * - This must be called after mm->context.has_pgste is set, to avoid
  2338. * future creation of zero pages
  2339. * - This must be called after THP was enabled
  2340. */
  2341. static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
  2342. unsigned long end, struct mm_walk *walk)
  2343. {
  2344. unsigned long addr;
  2345. for (addr = start; addr != end; addr += PAGE_SIZE) {
  2346. pte_t *ptep;
  2347. spinlock_t *ptl;
  2348. ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
  2349. if (is_zero_pfn(pte_pfn(*ptep)))
  2350. ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
  2351. pte_unmap_unlock(ptep, ptl);
  2352. }
  2353. return 0;
  2354. }
  2355. static inline void zap_zero_pages(struct mm_struct *mm)
  2356. {
  2357. struct mm_walk walk = { .pmd_entry = __zap_zero_pages };
  2358. walk.mm = mm;
  2359. walk_page_range(0, TASK_SIZE, &walk);
  2360. }
  2361. /*
  2362. * switch on pgstes for its userspace process (for kvm)
  2363. */
  2364. int s390_enable_sie(void)
  2365. {
  2366. struct mm_struct *mm = current->mm;
  2367. /* Do we have pgstes? if yes, we are done */
  2368. if (mm_has_pgste(mm))
  2369. return 0;
  2370. /* Fail if the page tables are 2K */
  2371. if (!mm_alloc_pgste(mm))
  2372. return -EINVAL;
  2373. down_write(&mm->mmap_sem);
  2374. mm->context.has_pgste = 1;
  2375. /* split thp mappings and disable thp for future mappings */
  2376. thp_split_mm(mm);
  2377. zap_zero_pages(mm);
  2378. up_write(&mm->mmap_sem);
  2379. return 0;
  2380. }
  2381. EXPORT_SYMBOL_GPL(s390_enable_sie);
  2382. /*
  2383. * Enable storage key handling from now on and initialize the storage
  2384. * keys with the default key.
  2385. */
  2386. static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr,
  2387. unsigned long next, struct mm_walk *walk)
  2388. {
  2389. /* Clear storage key */
  2390. ptep_zap_key(walk->mm, addr, pte);
  2391. return 0;
  2392. }
  2393. static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
  2394. unsigned long hmask, unsigned long next,
  2395. struct mm_walk *walk)
  2396. {
  2397. pmd_t *pmd = (pmd_t *)pte;
  2398. unsigned long start, end;
  2399. struct page *page = pmd_page(*pmd);
  2400. /*
  2401. * The write check makes sure we do not set a key on shared
  2402. * memory. This is needed as the walker does not differentiate
  2403. * between actual guest memory and the process executable or
  2404. * shared libraries.
  2405. */
  2406. if (pmd_val(*pmd) & _SEGMENT_ENTRY_INVALID ||
  2407. !(pmd_val(*pmd) & _SEGMENT_ENTRY_WRITE))
  2408. return 0;
  2409. start = pmd_val(*pmd) & HPAGE_MASK;
  2410. end = start + HPAGE_SIZE - 1;
  2411. __storage_key_init_range(start, end);
  2412. set_bit(PG_arch_1, &page->flags);
  2413. return 0;
  2414. }
  2415. int s390_enable_skey(void)
  2416. {
  2417. struct mm_walk walk = {
  2418. .hugetlb_entry = __s390_enable_skey_hugetlb,
  2419. .pte_entry = __s390_enable_skey_pte,
  2420. };
  2421. struct mm_struct *mm = current->mm;
  2422. struct vm_area_struct *vma;
  2423. int rc = 0;
  2424. down_write(&mm->mmap_sem);
  2425. if (mm_uses_skeys(mm))
  2426. goto out_up;
  2427. mm->context.uses_skeys = 1;
  2428. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  2429. if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
  2430. MADV_UNMERGEABLE, &vma->vm_flags)) {
  2431. mm->context.uses_skeys = 0;
  2432. rc = -ENOMEM;
  2433. goto out_up;
  2434. }
  2435. }
  2436. mm->def_flags &= ~VM_MERGEABLE;
  2437. walk.mm = mm;
  2438. walk_page_range(0, TASK_SIZE, &walk);
  2439. out_up:
  2440. up_write(&mm->mmap_sem);
  2441. return rc;
  2442. }
  2443. EXPORT_SYMBOL_GPL(s390_enable_skey);
  2444. /*
  2445. * Reset CMMA state, make all pages stable again.
  2446. */
  2447. static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
  2448. unsigned long next, struct mm_walk *walk)
  2449. {
  2450. ptep_zap_unused(walk->mm, addr, pte, 1);
  2451. return 0;
  2452. }
  2453. void s390_reset_cmma(struct mm_struct *mm)
  2454. {
  2455. struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
  2456. down_write(&mm->mmap_sem);
  2457. walk.mm = mm;
  2458. walk_page_range(0, TASK_SIZE, &walk);
  2459. up_write(&mm->mmap_sem);
  2460. }
  2461. EXPORT_SYMBOL_GPL(s390_reset_cmma);