pgtable.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433
  1. /*
  2. * Copyright IBM Corp. 2007, 2011
  3. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  4. */
  5. #include <linux/sched.h>
  6. #include <linux/kernel.h>
  7. #include <linux/errno.h>
  8. #include <linux/gfp.h>
  9. #include <linux/mm.h>
  10. #include <linux/swap.h>
  11. #include <linux/smp.h>
  12. #include <linux/highmem.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/module.h>
  16. #include <linux/quicklist.h>
  17. #include <linux/rcupdate.h>
  18. #include <linux/slab.h>
  19. #include <linux/swapops.h>
  20. #include <asm/pgtable.h>
  21. #include <asm/pgalloc.h>
  22. #include <asm/tlb.h>
  23. #include <asm/tlbflush.h>
  24. #include <asm/mmu_context.h>
  25. #ifndef CONFIG_64BIT
  26. #define ALLOC_ORDER 1
  27. #define FRAG_MASK 0x0f
  28. #else
  29. #define ALLOC_ORDER 2
  30. #define FRAG_MASK 0x03
  31. #endif
  32. unsigned long *crst_table_alloc(struct mm_struct *mm)
  33. {
  34. struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  35. if (!page)
  36. return NULL;
  37. return (unsigned long *) page_to_phys(page);
  38. }
  39. void crst_table_free(struct mm_struct *mm, unsigned long *table)
  40. {
  41. free_pages((unsigned long) table, ALLOC_ORDER);
  42. }
  43. #ifdef CONFIG_64BIT
  44. static void __crst_table_upgrade(void *arg)
  45. {
  46. struct mm_struct *mm = arg;
  47. if (current->active_mm == mm)
  48. update_mm(mm, current);
  49. __tlb_flush_local();
  50. }
  51. int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
  52. {
  53. unsigned long *table, *pgd;
  54. unsigned long entry;
  55. int flush;
  56. BUG_ON(limit > (1UL << 53));
  57. flush = 0;
  58. repeat:
  59. table = crst_table_alloc(mm);
  60. if (!table)
  61. return -ENOMEM;
  62. spin_lock_bh(&mm->page_table_lock);
  63. if (mm->context.asce_limit < limit) {
  64. pgd = (unsigned long *) mm->pgd;
  65. if (mm->context.asce_limit <= (1UL << 31)) {
  66. entry = _REGION3_ENTRY_EMPTY;
  67. mm->context.asce_limit = 1UL << 42;
  68. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  69. _ASCE_USER_BITS |
  70. _ASCE_TYPE_REGION3;
  71. } else {
  72. entry = _REGION2_ENTRY_EMPTY;
  73. mm->context.asce_limit = 1UL << 53;
  74. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  75. _ASCE_USER_BITS |
  76. _ASCE_TYPE_REGION2;
  77. }
  78. crst_table_init(table, entry);
  79. pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
  80. mm->pgd = (pgd_t *) table;
  81. mm->task_size = mm->context.asce_limit;
  82. table = NULL;
  83. flush = 1;
  84. }
  85. spin_unlock_bh(&mm->page_table_lock);
  86. if (table)
  87. crst_table_free(mm, table);
  88. if (mm->context.asce_limit < limit)
  89. goto repeat;
  90. if (flush)
  91. on_each_cpu(__crst_table_upgrade, mm, 0);
  92. return 0;
  93. }
  94. void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
  95. {
  96. pgd_t *pgd;
  97. if (current->active_mm == mm)
  98. __tlb_flush_mm(mm);
  99. while (mm->context.asce_limit > limit) {
  100. pgd = mm->pgd;
  101. switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
  102. case _REGION_ENTRY_TYPE_R2:
  103. mm->context.asce_limit = 1UL << 42;
  104. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  105. _ASCE_USER_BITS |
  106. _ASCE_TYPE_REGION3;
  107. break;
  108. case _REGION_ENTRY_TYPE_R3:
  109. mm->context.asce_limit = 1UL << 31;
  110. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  111. _ASCE_USER_BITS |
  112. _ASCE_TYPE_SEGMENT;
  113. break;
  114. default:
  115. BUG();
  116. }
  117. mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
  118. mm->task_size = mm->context.asce_limit;
  119. crst_table_free(mm, (unsigned long *) pgd);
  120. }
  121. if (current->active_mm == mm)
  122. update_mm(mm, current);
  123. }
  124. #endif
  125. #ifdef CONFIG_PGSTE
  126. /**
  127. * gmap_alloc - allocate a guest address space
  128. * @mm: pointer to the parent mm_struct
  129. *
  130. * Returns a guest address space structure.
  131. */
  132. struct gmap *gmap_alloc(struct mm_struct *mm)
  133. {
  134. struct gmap *gmap;
  135. struct page *page;
  136. unsigned long *table;
  137. gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
  138. if (!gmap)
  139. goto out;
  140. INIT_LIST_HEAD(&gmap->crst_list);
  141. gmap->mm = mm;
  142. page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  143. if (!page)
  144. goto out_free;
  145. list_add(&page->lru, &gmap->crst_list);
  146. table = (unsigned long *) page_to_phys(page);
  147. crst_table_init(table, _REGION1_ENTRY_EMPTY);
  148. gmap->table = table;
  149. gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
  150. _ASCE_USER_BITS | __pa(table);
  151. list_add(&gmap->list, &mm->context.gmap_list);
  152. return gmap;
  153. out_free:
  154. kfree(gmap);
  155. out:
  156. return NULL;
  157. }
  158. EXPORT_SYMBOL_GPL(gmap_alloc);
  159. static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
  160. {
  161. struct gmap_pgtable *mp;
  162. struct gmap_rmap *rmap;
  163. struct page *page;
  164. if (*table & _SEGMENT_ENTRY_INVALID)
  165. return 0;
  166. page = pfn_to_page(*table >> PAGE_SHIFT);
  167. mp = (struct gmap_pgtable *) page->index;
  168. list_for_each_entry(rmap, &mp->mapper, list) {
  169. if (rmap->entry != table)
  170. continue;
  171. list_del(&rmap->list);
  172. kfree(rmap);
  173. break;
  174. }
  175. *table = mp->vmaddr | _SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_PROTECT;
  176. return 1;
  177. }
  178. static void gmap_flush_tlb(struct gmap *gmap)
  179. {
  180. if (MACHINE_HAS_IDTE)
  181. __tlb_flush_idte((unsigned long) gmap->table |
  182. _ASCE_TYPE_REGION1);
  183. else
  184. __tlb_flush_global();
  185. }
  186. /**
  187. * gmap_free - free a guest address space
  188. * @gmap: pointer to the guest address space structure
  189. */
  190. void gmap_free(struct gmap *gmap)
  191. {
  192. struct page *page, *next;
  193. unsigned long *table;
  194. int i;
  195. /* Flush tlb. */
  196. if (MACHINE_HAS_IDTE)
  197. __tlb_flush_idte((unsigned long) gmap->table |
  198. _ASCE_TYPE_REGION1);
  199. else
  200. __tlb_flush_global();
  201. /* Free all segment & region tables. */
  202. down_read(&gmap->mm->mmap_sem);
  203. spin_lock(&gmap->mm->page_table_lock);
  204. list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
  205. table = (unsigned long *) page_to_phys(page);
  206. if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
  207. /* Remove gmap rmap structures for segment table. */
  208. for (i = 0; i < PTRS_PER_PMD; i++, table++)
  209. gmap_unlink_segment(gmap, table);
  210. __free_pages(page, ALLOC_ORDER);
  211. }
  212. spin_unlock(&gmap->mm->page_table_lock);
  213. up_read(&gmap->mm->mmap_sem);
  214. list_del(&gmap->list);
  215. kfree(gmap);
  216. }
  217. EXPORT_SYMBOL_GPL(gmap_free);
  218. /**
  219. * gmap_enable - switch primary space to the guest address space
  220. * @gmap: pointer to the guest address space structure
  221. */
  222. void gmap_enable(struct gmap *gmap)
  223. {
  224. S390_lowcore.gmap = (unsigned long) gmap;
  225. }
  226. EXPORT_SYMBOL_GPL(gmap_enable);
  227. /**
  228. * gmap_disable - switch back to the standard primary address space
  229. * @gmap: pointer to the guest address space structure
  230. */
  231. void gmap_disable(struct gmap *gmap)
  232. {
  233. S390_lowcore.gmap = 0UL;
  234. }
  235. EXPORT_SYMBOL_GPL(gmap_disable);
  236. /*
  237. * gmap_alloc_table is assumed to be called with mmap_sem held
  238. */
  239. static int gmap_alloc_table(struct gmap *gmap,
  240. unsigned long *table, unsigned long init)
  241. __releases(&gmap->mm->page_table_lock)
  242. __acquires(&gmap->mm->page_table_lock)
  243. {
  244. struct page *page;
  245. unsigned long *new;
  246. /* since we dont free the gmap table until gmap_free we can unlock */
  247. spin_unlock(&gmap->mm->page_table_lock);
  248. page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  249. spin_lock(&gmap->mm->page_table_lock);
  250. if (!page)
  251. return -ENOMEM;
  252. new = (unsigned long *) page_to_phys(page);
  253. crst_table_init(new, init);
  254. if (*table & _REGION_ENTRY_INVALID) {
  255. list_add(&page->lru, &gmap->crst_list);
  256. *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
  257. (*table & _REGION_ENTRY_TYPE_MASK);
  258. } else
  259. __free_pages(page, ALLOC_ORDER);
  260. return 0;
  261. }
  262. /**
  263. * gmap_unmap_segment - unmap segment from the guest address space
  264. * @gmap: pointer to the guest address space structure
  265. * @addr: address in the guest address space
  266. * @len: length of the memory area to unmap
  267. *
  268. * Returns 0 if the unmap succeeded, -EINVAL if not.
  269. */
  270. int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
  271. {
  272. unsigned long *table;
  273. unsigned long off;
  274. int flush;
  275. if ((to | len) & (PMD_SIZE - 1))
  276. return -EINVAL;
  277. if (len == 0 || to + len < to)
  278. return -EINVAL;
  279. flush = 0;
  280. down_read(&gmap->mm->mmap_sem);
  281. spin_lock(&gmap->mm->page_table_lock);
  282. for (off = 0; off < len; off += PMD_SIZE) {
  283. /* Walk the guest addr space page table */
  284. table = gmap->table + (((to + off) >> 53) & 0x7ff);
  285. if (*table & _REGION_ENTRY_INVALID)
  286. goto out;
  287. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  288. table = table + (((to + off) >> 42) & 0x7ff);
  289. if (*table & _REGION_ENTRY_INVALID)
  290. goto out;
  291. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  292. table = table + (((to + off) >> 31) & 0x7ff);
  293. if (*table & _REGION_ENTRY_INVALID)
  294. goto out;
  295. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  296. table = table + (((to + off) >> 20) & 0x7ff);
  297. /* Clear segment table entry in guest address space. */
  298. flush |= gmap_unlink_segment(gmap, table);
  299. *table = _SEGMENT_ENTRY_INVALID;
  300. }
  301. out:
  302. spin_unlock(&gmap->mm->page_table_lock);
  303. up_read(&gmap->mm->mmap_sem);
  304. if (flush)
  305. gmap_flush_tlb(gmap);
  306. return 0;
  307. }
  308. EXPORT_SYMBOL_GPL(gmap_unmap_segment);
  309. /**
  310. * gmap_mmap_segment - map a segment to the guest address space
  311. * @gmap: pointer to the guest address space structure
  312. * @from: source address in the parent address space
  313. * @to: target address in the guest address space
  314. *
  315. * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
  316. */
  317. int gmap_map_segment(struct gmap *gmap, unsigned long from,
  318. unsigned long to, unsigned long len)
  319. {
  320. unsigned long *table;
  321. unsigned long off;
  322. int flush;
  323. if ((from | to | len) & (PMD_SIZE - 1))
  324. return -EINVAL;
  325. if (len == 0 || from + len > TASK_MAX_SIZE ||
  326. from + len < from || to + len < to)
  327. return -EINVAL;
  328. flush = 0;
  329. down_read(&gmap->mm->mmap_sem);
  330. spin_lock(&gmap->mm->page_table_lock);
  331. for (off = 0; off < len; off += PMD_SIZE) {
  332. /* Walk the gmap address space page table */
  333. table = gmap->table + (((to + off) >> 53) & 0x7ff);
  334. if ((*table & _REGION_ENTRY_INVALID) &&
  335. gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
  336. goto out_unmap;
  337. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  338. table = table + (((to + off) >> 42) & 0x7ff);
  339. if ((*table & _REGION_ENTRY_INVALID) &&
  340. gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
  341. goto out_unmap;
  342. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  343. table = table + (((to + off) >> 31) & 0x7ff);
  344. if ((*table & _REGION_ENTRY_INVALID) &&
  345. gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
  346. goto out_unmap;
  347. table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
  348. table = table + (((to + off) >> 20) & 0x7ff);
  349. /* Store 'from' address in an invalid segment table entry. */
  350. flush |= gmap_unlink_segment(gmap, table);
  351. *table = (from + off) | (_SEGMENT_ENTRY_INVALID |
  352. _SEGMENT_ENTRY_PROTECT);
  353. }
  354. spin_unlock(&gmap->mm->page_table_lock);
  355. up_read(&gmap->mm->mmap_sem);
  356. if (flush)
  357. gmap_flush_tlb(gmap);
  358. return 0;
  359. out_unmap:
  360. spin_unlock(&gmap->mm->page_table_lock);
  361. up_read(&gmap->mm->mmap_sem);
  362. gmap_unmap_segment(gmap, to, len);
  363. return -ENOMEM;
  364. }
  365. EXPORT_SYMBOL_GPL(gmap_map_segment);
  366. static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
  367. {
  368. unsigned long *table;
  369. table = gmap->table + ((address >> 53) & 0x7ff);
  370. if (unlikely(*table & _REGION_ENTRY_INVALID))
  371. return ERR_PTR(-EFAULT);
  372. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  373. table = table + ((address >> 42) & 0x7ff);
  374. if (unlikely(*table & _REGION_ENTRY_INVALID))
  375. return ERR_PTR(-EFAULT);
  376. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  377. table = table + ((address >> 31) & 0x7ff);
  378. if (unlikely(*table & _REGION_ENTRY_INVALID))
  379. return ERR_PTR(-EFAULT);
  380. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  381. table = table + ((address >> 20) & 0x7ff);
  382. return table;
  383. }
  384. /**
  385. * __gmap_translate - translate a guest address to a user space address
  386. * @address: guest address
  387. * @gmap: pointer to guest mapping meta data structure
  388. *
  389. * Returns user space address which corresponds to the guest address or
  390. * -EFAULT if no such mapping exists.
  391. * This function does not establish potentially missing page table entries.
  392. * The mmap_sem of the mm that belongs to the address space must be held
  393. * when this function gets called.
  394. */
  395. unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
  396. {
  397. unsigned long *segment_ptr, vmaddr, segment;
  398. struct gmap_pgtable *mp;
  399. struct page *page;
  400. current->thread.gmap_addr = address;
  401. segment_ptr = gmap_table_walk(address, gmap);
  402. if (IS_ERR(segment_ptr))
  403. return PTR_ERR(segment_ptr);
  404. /* Convert the gmap address to an mm address. */
  405. segment = *segment_ptr;
  406. if (!(segment & _SEGMENT_ENTRY_INVALID)) {
  407. page = pfn_to_page(segment >> PAGE_SHIFT);
  408. mp = (struct gmap_pgtable *) page->index;
  409. return mp->vmaddr | (address & ~PMD_MASK);
  410. } else if (segment & _SEGMENT_ENTRY_PROTECT) {
  411. vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
  412. return vmaddr | (address & ~PMD_MASK);
  413. }
  414. return -EFAULT;
  415. }
  416. EXPORT_SYMBOL_GPL(__gmap_translate);
  417. /**
  418. * gmap_translate - translate a guest address to a user space address
  419. * @address: guest address
  420. * @gmap: pointer to guest mapping meta data structure
  421. *
  422. * Returns user space address which corresponds to the guest address or
  423. * -EFAULT if no such mapping exists.
  424. * This function does not establish potentially missing page table entries.
  425. */
  426. unsigned long gmap_translate(unsigned long address, struct gmap *gmap)
  427. {
  428. unsigned long rc;
  429. down_read(&gmap->mm->mmap_sem);
  430. rc = __gmap_translate(address, gmap);
  431. up_read(&gmap->mm->mmap_sem);
  432. return rc;
  433. }
  434. EXPORT_SYMBOL_GPL(gmap_translate);
  435. static int gmap_connect_pgtable(unsigned long address, unsigned long segment,
  436. unsigned long *segment_ptr, struct gmap *gmap)
  437. {
  438. unsigned long vmaddr;
  439. struct vm_area_struct *vma;
  440. struct gmap_pgtable *mp;
  441. struct gmap_rmap *rmap;
  442. struct mm_struct *mm;
  443. struct page *page;
  444. pgd_t *pgd;
  445. pud_t *pud;
  446. pmd_t *pmd;
  447. mm = gmap->mm;
  448. vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
  449. vma = find_vma(mm, vmaddr);
  450. if (!vma || vma->vm_start > vmaddr)
  451. return -EFAULT;
  452. /* Walk the parent mm page table */
  453. pgd = pgd_offset(mm, vmaddr);
  454. pud = pud_alloc(mm, pgd, vmaddr);
  455. if (!pud)
  456. return -ENOMEM;
  457. pmd = pmd_alloc(mm, pud, vmaddr);
  458. if (!pmd)
  459. return -ENOMEM;
  460. if (!pmd_present(*pmd) &&
  461. __pte_alloc(mm, vma, pmd, vmaddr))
  462. return -ENOMEM;
  463. /* pmd now points to a valid segment table entry. */
  464. rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
  465. if (!rmap)
  466. return -ENOMEM;
  467. /* Link gmap segment table entry location to page table. */
  468. page = pmd_page(*pmd);
  469. mp = (struct gmap_pgtable *) page->index;
  470. rmap->gmap = gmap;
  471. rmap->entry = segment_ptr;
  472. rmap->vmaddr = address & PMD_MASK;
  473. spin_lock(&mm->page_table_lock);
  474. if (*segment_ptr == segment) {
  475. list_add(&rmap->list, &mp->mapper);
  476. /* Set gmap segment table entry to page table. */
  477. *segment_ptr = pmd_val(*pmd) & PAGE_MASK;
  478. rmap = NULL;
  479. }
  480. spin_unlock(&mm->page_table_lock);
  481. kfree(rmap);
  482. return 0;
  483. }
  484. static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
  485. {
  486. struct gmap_rmap *rmap, *next;
  487. struct gmap_pgtable *mp;
  488. struct page *page;
  489. int flush;
  490. flush = 0;
  491. spin_lock(&mm->page_table_lock);
  492. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  493. mp = (struct gmap_pgtable *) page->index;
  494. list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
  495. *rmap->entry = mp->vmaddr | (_SEGMENT_ENTRY_INVALID |
  496. _SEGMENT_ENTRY_PROTECT);
  497. list_del(&rmap->list);
  498. kfree(rmap);
  499. flush = 1;
  500. }
  501. spin_unlock(&mm->page_table_lock);
  502. if (flush)
  503. __tlb_flush_global();
  504. }
  505. /*
  506. * this function is assumed to be called with mmap_sem held
  507. */
  508. unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
  509. {
  510. unsigned long *segment_ptr, segment;
  511. struct gmap_pgtable *mp;
  512. struct page *page;
  513. int rc;
  514. current->thread.gmap_addr = address;
  515. segment_ptr = gmap_table_walk(address, gmap);
  516. if (IS_ERR(segment_ptr))
  517. return -EFAULT;
  518. /* Convert the gmap address to an mm address. */
  519. while (1) {
  520. segment = *segment_ptr;
  521. if (!(segment & _SEGMENT_ENTRY_INVALID)) {
  522. /* Page table is present */
  523. page = pfn_to_page(segment >> PAGE_SHIFT);
  524. mp = (struct gmap_pgtable *) page->index;
  525. return mp->vmaddr | (address & ~PMD_MASK);
  526. }
  527. if (!(segment & _SEGMENT_ENTRY_PROTECT))
  528. /* Nothing mapped in the gmap address space. */
  529. break;
  530. rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap);
  531. if (rc)
  532. return rc;
  533. }
  534. return -EFAULT;
  535. }
  536. unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
  537. {
  538. unsigned long rc;
  539. down_read(&gmap->mm->mmap_sem);
  540. rc = __gmap_fault(address, gmap);
  541. up_read(&gmap->mm->mmap_sem);
  542. return rc;
  543. }
  544. EXPORT_SYMBOL_GPL(gmap_fault);
  545. static void gmap_zap_swap_entry(swp_entry_t entry, struct mm_struct *mm)
  546. {
  547. if (!non_swap_entry(entry))
  548. dec_mm_counter(mm, MM_SWAPENTS);
  549. else if (is_migration_entry(entry)) {
  550. struct page *page = migration_entry_to_page(entry);
  551. if (PageAnon(page))
  552. dec_mm_counter(mm, MM_ANONPAGES);
  553. else
  554. dec_mm_counter(mm, MM_FILEPAGES);
  555. }
  556. free_swap_and_cache(entry);
  557. }
  558. /**
  559. * The mm->mmap_sem lock must be held
  560. */
  561. static void gmap_zap_unused(struct mm_struct *mm, unsigned long address)
  562. {
  563. unsigned long ptev, pgstev;
  564. spinlock_t *ptl;
  565. pgste_t pgste;
  566. pte_t *ptep, pte;
  567. ptep = get_locked_pte(mm, address, &ptl);
  568. if (unlikely(!ptep))
  569. return;
  570. pte = *ptep;
  571. if (!pte_swap(pte))
  572. goto out_pte;
  573. /* Zap unused and logically-zero pages */
  574. pgste = pgste_get_lock(ptep);
  575. pgstev = pgste_val(pgste);
  576. ptev = pte_val(pte);
  577. if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) ||
  578. ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID))) {
  579. gmap_zap_swap_entry(pte_to_swp_entry(pte), mm);
  580. pte_clear(mm, address, ptep);
  581. }
  582. pgste_set_unlock(ptep, pgste);
  583. out_pte:
  584. pte_unmap_unlock(*ptep, ptl);
  585. }
  586. /*
  587. * this function is assumed to be called with mmap_sem held
  588. */
  589. void __gmap_zap(unsigned long address, struct gmap *gmap)
  590. {
  591. unsigned long *table, *segment_ptr;
  592. unsigned long segment, pgstev, ptev;
  593. struct gmap_pgtable *mp;
  594. struct page *page;
  595. segment_ptr = gmap_table_walk(address, gmap);
  596. if (IS_ERR(segment_ptr))
  597. return;
  598. segment = *segment_ptr;
  599. if (segment & _SEGMENT_ENTRY_INVALID)
  600. return;
  601. page = pfn_to_page(segment >> PAGE_SHIFT);
  602. mp = (struct gmap_pgtable *) page->index;
  603. address = mp->vmaddr | (address & ~PMD_MASK);
  604. /* Page table is present */
  605. table = (unsigned long *)(segment & _SEGMENT_ENTRY_ORIGIN);
  606. table = table + ((address >> 12) & 0xff);
  607. pgstev = table[PTRS_PER_PTE];
  608. ptev = table[0];
  609. /* quick check, checked again with locks held */
  610. if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) ||
  611. ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID)))
  612. gmap_zap_unused(gmap->mm, address);
  613. }
  614. EXPORT_SYMBOL_GPL(__gmap_zap);
  615. void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
  616. {
  617. unsigned long *table, address, size;
  618. struct vm_area_struct *vma;
  619. struct gmap_pgtable *mp;
  620. struct page *page;
  621. down_read(&gmap->mm->mmap_sem);
  622. address = from;
  623. while (address < to) {
  624. /* Walk the gmap address space page table */
  625. table = gmap->table + ((address >> 53) & 0x7ff);
  626. if (unlikely(*table & _REGION_ENTRY_INVALID)) {
  627. address = (address + PMD_SIZE) & PMD_MASK;
  628. continue;
  629. }
  630. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  631. table = table + ((address >> 42) & 0x7ff);
  632. if (unlikely(*table & _REGION_ENTRY_INVALID)) {
  633. address = (address + PMD_SIZE) & PMD_MASK;
  634. continue;
  635. }
  636. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  637. table = table + ((address >> 31) & 0x7ff);
  638. if (unlikely(*table & _REGION_ENTRY_INVALID)) {
  639. address = (address + PMD_SIZE) & PMD_MASK;
  640. continue;
  641. }
  642. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  643. table = table + ((address >> 20) & 0x7ff);
  644. if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) {
  645. address = (address + PMD_SIZE) & PMD_MASK;
  646. continue;
  647. }
  648. page = pfn_to_page(*table >> PAGE_SHIFT);
  649. mp = (struct gmap_pgtable *) page->index;
  650. vma = find_vma(gmap->mm, mp->vmaddr);
  651. size = min(to - address, PMD_SIZE - (address & ~PMD_MASK));
  652. zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK),
  653. size, NULL);
  654. address = (address + PMD_SIZE) & PMD_MASK;
  655. }
  656. up_read(&gmap->mm->mmap_sem);
  657. }
  658. EXPORT_SYMBOL_GPL(gmap_discard);
  659. static LIST_HEAD(gmap_notifier_list);
  660. static DEFINE_SPINLOCK(gmap_notifier_lock);
  661. /**
  662. * gmap_register_ipte_notifier - register a pte invalidation callback
  663. * @nb: pointer to the gmap notifier block
  664. */
  665. void gmap_register_ipte_notifier(struct gmap_notifier *nb)
  666. {
  667. spin_lock(&gmap_notifier_lock);
  668. list_add(&nb->list, &gmap_notifier_list);
  669. spin_unlock(&gmap_notifier_lock);
  670. }
  671. EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
  672. /**
  673. * gmap_unregister_ipte_notifier - remove a pte invalidation callback
  674. * @nb: pointer to the gmap notifier block
  675. */
  676. void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
  677. {
  678. spin_lock(&gmap_notifier_lock);
  679. list_del_init(&nb->list);
  680. spin_unlock(&gmap_notifier_lock);
  681. }
  682. EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
  683. /**
  684. * gmap_ipte_notify - mark a range of ptes for invalidation notification
  685. * @gmap: pointer to guest mapping meta data structure
  686. * @address: virtual address in the guest address space
  687. * @len: size of area
  688. *
  689. * Returns 0 if for each page in the given range a gmap mapping exists and
  690. * the invalidation notification could be set. If the gmap mapping is missing
  691. * for one or more pages -EFAULT is returned. If no memory could be allocated
  692. * -ENOMEM is returned. This function establishes missing page table entries.
  693. */
  694. int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
  695. {
  696. unsigned long addr;
  697. spinlock_t *ptl;
  698. pte_t *ptep, entry;
  699. pgste_t pgste;
  700. int rc = 0;
  701. if ((start & ~PAGE_MASK) || (len & ~PAGE_MASK))
  702. return -EINVAL;
  703. down_read(&gmap->mm->mmap_sem);
  704. while (len) {
  705. /* Convert gmap address and connect the page tables */
  706. addr = __gmap_fault(start, gmap);
  707. if (IS_ERR_VALUE(addr)) {
  708. rc = addr;
  709. break;
  710. }
  711. /* Get the page mapped */
  712. if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) {
  713. rc = -EFAULT;
  714. break;
  715. }
  716. /* Walk the process page table, lock and get pte pointer */
  717. ptep = get_locked_pte(gmap->mm, addr, &ptl);
  718. if (unlikely(!ptep))
  719. continue;
  720. /* Set notification bit in the pgste of the pte */
  721. entry = *ptep;
  722. if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
  723. pgste = pgste_get_lock(ptep);
  724. pgste_val(pgste) |= PGSTE_IN_BIT;
  725. pgste_set_unlock(ptep, pgste);
  726. start += PAGE_SIZE;
  727. len -= PAGE_SIZE;
  728. }
  729. spin_unlock(ptl);
  730. }
  731. up_read(&gmap->mm->mmap_sem);
  732. return rc;
  733. }
  734. EXPORT_SYMBOL_GPL(gmap_ipte_notify);
  735. /**
  736. * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte.
  737. * @mm: pointer to the process mm_struct
  738. * @addr: virtual address in the process address space
  739. * @pte: pointer to the page table entry
  740. *
  741. * This function is assumed to be called with the page table lock held
  742. * for the pte to notify.
  743. */
  744. void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long addr, pte_t *pte)
  745. {
  746. unsigned long segment_offset;
  747. struct gmap_notifier *nb;
  748. struct gmap_pgtable *mp;
  749. struct gmap_rmap *rmap;
  750. struct page *page;
  751. segment_offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
  752. segment_offset = segment_offset * (4096 / sizeof(pte_t));
  753. page = pfn_to_page(__pa(pte) >> PAGE_SHIFT);
  754. mp = (struct gmap_pgtable *) page->index;
  755. spin_lock(&gmap_notifier_lock);
  756. list_for_each_entry(rmap, &mp->mapper, list) {
  757. list_for_each_entry(nb, &gmap_notifier_list, list)
  758. nb->notifier_call(rmap->gmap,
  759. rmap->vmaddr + segment_offset);
  760. }
  761. spin_unlock(&gmap_notifier_lock);
  762. }
  763. static inline int page_table_with_pgste(struct page *page)
  764. {
  765. return atomic_read(&page->_mapcount) == 0;
  766. }
  767. static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
  768. unsigned long vmaddr)
  769. {
  770. struct page *page;
  771. unsigned long *table;
  772. struct gmap_pgtable *mp;
  773. page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
  774. if (!page)
  775. return NULL;
  776. mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
  777. if (!mp) {
  778. __free_page(page);
  779. return NULL;
  780. }
  781. if (!pgtable_page_ctor(page)) {
  782. kfree(mp);
  783. __free_page(page);
  784. return NULL;
  785. }
  786. mp->vmaddr = vmaddr & PMD_MASK;
  787. INIT_LIST_HEAD(&mp->mapper);
  788. page->index = (unsigned long) mp;
  789. atomic_set(&page->_mapcount, 0);
  790. table = (unsigned long *) page_to_phys(page);
  791. clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
  792. clear_table(table + PTRS_PER_PTE, PGSTE_HR_BIT | PGSTE_HC_BIT,
  793. PAGE_SIZE/2);
  794. return table;
  795. }
  796. static inline void page_table_free_pgste(unsigned long *table)
  797. {
  798. struct page *page;
  799. struct gmap_pgtable *mp;
  800. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  801. mp = (struct gmap_pgtable *) page->index;
  802. BUG_ON(!list_empty(&mp->mapper));
  803. pgtable_page_dtor(page);
  804. atomic_set(&page->_mapcount, -1);
  805. kfree(mp);
  806. __free_page(page);
  807. }
  808. static inline unsigned long page_table_reset_pte(struct mm_struct *mm,
  809. pmd_t *pmd, unsigned long addr, unsigned long end)
  810. {
  811. pte_t *start_pte, *pte;
  812. spinlock_t *ptl;
  813. pgste_t pgste;
  814. start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
  815. pte = start_pte;
  816. do {
  817. pgste = pgste_get_lock(pte);
  818. pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
  819. pgste_set_unlock(pte, pgste);
  820. } while (pte++, addr += PAGE_SIZE, addr != end);
  821. pte_unmap_unlock(start_pte, ptl);
  822. return addr;
  823. }
  824. static inline unsigned long page_table_reset_pmd(struct mm_struct *mm,
  825. pud_t *pud, unsigned long addr, unsigned long end)
  826. {
  827. unsigned long next;
  828. pmd_t *pmd;
  829. pmd = pmd_offset(pud, addr);
  830. do {
  831. next = pmd_addr_end(addr, end);
  832. if (pmd_none_or_clear_bad(pmd))
  833. continue;
  834. next = page_table_reset_pte(mm, pmd, addr, next);
  835. } while (pmd++, addr = next, addr != end);
  836. return addr;
  837. }
  838. static inline unsigned long page_table_reset_pud(struct mm_struct *mm,
  839. pgd_t *pgd, unsigned long addr, unsigned long end)
  840. {
  841. unsigned long next;
  842. pud_t *pud;
  843. pud = pud_offset(pgd, addr);
  844. do {
  845. next = pud_addr_end(addr, end);
  846. if (pud_none_or_clear_bad(pud))
  847. continue;
  848. next = page_table_reset_pmd(mm, pud, addr, next);
  849. } while (pud++, addr = next, addr != end);
  850. return addr;
  851. }
  852. void page_table_reset_pgste(struct mm_struct *mm,
  853. unsigned long start, unsigned long end)
  854. {
  855. unsigned long addr, next;
  856. pgd_t *pgd;
  857. addr = start;
  858. down_read(&mm->mmap_sem);
  859. pgd = pgd_offset(mm, addr);
  860. do {
  861. next = pgd_addr_end(addr, end);
  862. if (pgd_none_or_clear_bad(pgd))
  863. continue;
  864. next = page_table_reset_pud(mm, pgd, addr, next);
  865. } while (pgd++, addr = next, addr != end);
  866. up_read(&mm->mmap_sem);
  867. }
  868. EXPORT_SYMBOL(page_table_reset_pgste);
  869. int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
  870. unsigned long key, bool nq)
  871. {
  872. spinlock_t *ptl;
  873. pgste_t old, new;
  874. pte_t *ptep;
  875. down_read(&mm->mmap_sem);
  876. ptep = get_locked_pte(current->mm, addr, &ptl);
  877. if (unlikely(!ptep)) {
  878. up_read(&mm->mmap_sem);
  879. return -EFAULT;
  880. }
  881. new = old = pgste_get_lock(ptep);
  882. pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
  883. PGSTE_ACC_BITS | PGSTE_FP_BIT);
  884. pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
  885. pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
  886. if (!(pte_val(*ptep) & _PAGE_INVALID)) {
  887. unsigned long address, bits, skey;
  888. address = pte_val(*ptep) & PAGE_MASK;
  889. skey = (unsigned long) page_get_storage_key(address);
  890. bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
  891. skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
  892. /* Set storage key ACC and FP */
  893. page_set_storage_key(address, skey, !nq);
  894. /* Merge host changed & referenced into pgste */
  895. pgste_val(new) |= bits << 52;
  896. }
  897. /* changing the guest storage key is considered a change of the page */
  898. if ((pgste_val(new) ^ pgste_val(old)) &
  899. (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
  900. pgste_val(new) |= PGSTE_HC_BIT;
  901. pgste_set_unlock(ptep, new);
  902. pte_unmap_unlock(*ptep, ptl);
  903. up_read(&mm->mmap_sem);
  904. return 0;
  905. }
  906. EXPORT_SYMBOL(set_guest_storage_key);
  907. #else /* CONFIG_PGSTE */
  908. static inline int page_table_with_pgste(struct page *page)
  909. {
  910. return 0;
  911. }
  912. static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
  913. unsigned long vmaddr)
  914. {
  915. return NULL;
  916. }
  917. static inline void page_table_free_pgste(unsigned long *table)
  918. {
  919. }
  920. static inline void gmap_disconnect_pgtable(struct mm_struct *mm,
  921. unsigned long *table)
  922. {
  923. }
  924. #endif /* CONFIG_PGSTE */
  925. static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
  926. {
  927. unsigned int old, new;
  928. do {
  929. old = atomic_read(v);
  930. new = old ^ bits;
  931. } while (atomic_cmpxchg(v, old, new) != old);
  932. return new;
  933. }
  934. /*
  935. * page table entry allocation/free routines.
  936. */
  937. unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
  938. {
  939. unsigned long *uninitialized_var(table);
  940. struct page *uninitialized_var(page);
  941. unsigned int mask, bit;
  942. if (mm_has_pgste(mm))
  943. return page_table_alloc_pgste(mm, vmaddr);
  944. /* Allocate fragments of a 4K page as 1K/2K page table */
  945. spin_lock_bh(&mm->context.list_lock);
  946. mask = FRAG_MASK;
  947. if (!list_empty(&mm->context.pgtable_list)) {
  948. page = list_first_entry(&mm->context.pgtable_list,
  949. struct page, lru);
  950. table = (unsigned long *) page_to_phys(page);
  951. mask = atomic_read(&page->_mapcount);
  952. mask = mask | (mask >> 4);
  953. }
  954. if ((mask & FRAG_MASK) == FRAG_MASK) {
  955. spin_unlock_bh(&mm->context.list_lock);
  956. page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
  957. if (!page)
  958. return NULL;
  959. if (!pgtable_page_ctor(page)) {
  960. __free_page(page);
  961. return NULL;
  962. }
  963. atomic_set(&page->_mapcount, 1);
  964. table = (unsigned long *) page_to_phys(page);
  965. clear_table(table, _PAGE_INVALID, PAGE_SIZE);
  966. spin_lock_bh(&mm->context.list_lock);
  967. list_add(&page->lru, &mm->context.pgtable_list);
  968. } else {
  969. for (bit = 1; mask & bit; bit <<= 1)
  970. table += PTRS_PER_PTE;
  971. mask = atomic_xor_bits(&page->_mapcount, bit);
  972. if ((mask & FRAG_MASK) == FRAG_MASK)
  973. list_del(&page->lru);
  974. }
  975. spin_unlock_bh(&mm->context.list_lock);
  976. return table;
  977. }
  978. void page_table_free(struct mm_struct *mm, unsigned long *table)
  979. {
  980. struct page *page;
  981. unsigned int bit, mask;
  982. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  983. if (page_table_with_pgste(page)) {
  984. gmap_disconnect_pgtable(mm, table);
  985. return page_table_free_pgste(table);
  986. }
  987. /* Free 1K/2K page table fragment of a 4K page */
  988. bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
  989. spin_lock_bh(&mm->context.list_lock);
  990. if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
  991. list_del(&page->lru);
  992. mask = atomic_xor_bits(&page->_mapcount, bit);
  993. if (mask & FRAG_MASK)
  994. list_add(&page->lru, &mm->context.pgtable_list);
  995. spin_unlock_bh(&mm->context.list_lock);
  996. if (mask == 0) {
  997. pgtable_page_dtor(page);
  998. atomic_set(&page->_mapcount, -1);
  999. __free_page(page);
  1000. }
  1001. }
  1002. static void __page_table_free_rcu(void *table, unsigned bit)
  1003. {
  1004. struct page *page;
  1005. if (bit == FRAG_MASK)
  1006. return page_table_free_pgste(table);
  1007. /* Free 1K/2K page table fragment of a 4K page */
  1008. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  1009. if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
  1010. pgtable_page_dtor(page);
  1011. atomic_set(&page->_mapcount, -1);
  1012. __free_page(page);
  1013. }
  1014. }
  1015. void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
  1016. {
  1017. struct mm_struct *mm;
  1018. struct page *page;
  1019. unsigned int bit, mask;
  1020. mm = tlb->mm;
  1021. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  1022. if (page_table_with_pgste(page)) {
  1023. gmap_disconnect_pgtable(mm, table);
  1024. table = (unsigned long *) (__pa(table) | FRAG_MASK);
  1025. tlb_remove_table(tlb, table);
  1026. return;
  1027. }
  1028. bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
  1029. spin_lock_bh(&mm->context.list_lock);
  1030. if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
  1031. list_del(&page->lru);
  1032. mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
  1033. if (mask & FRAG_MASK)
  1034. list_add_tail(&page->lru, &mm->context.pgtable_list);
  1035. spin_unlock_bh(&mm->context.list_lock);
  1036. table = (unsigned long *) (__pa(table) | (bit << 4));
  1037. tlb_remove_table(tlb, table);
  1038. }
  1039. static void __tlb_remove_table(void *_table)
  1040. {
  1041. const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
  1042. void *table = (void *)((unsigned long) _table & ~mask);
  1043. unsigned type = (unsigned long) _table & mask;
  1044. if (type)
  1045. __page_table_free_rcu(table, type);
  1046. else
  1047. free_pages((unsigned long) table, ALLOC_ORDER);
  1048. }
  1049. static void tlb_remove_table_smp_sync(void *arg)
  1050. {
  1051. /* Simply deliver the interrupt */
  1052. }
  1053. static void tlb_remove_table_one(void *table)
  1054. {
  1055. /*
  1056. * This isn't an RCU grace period and hence the page-tables cannot be
  1057. * assumed to be actually RCU-freed.
  1058. *
  1059. * It is however sufficient for software page-table walkers that rely
  1060. * on IRQ disabling. See the comment near struct mmu_table_batch.
  1061. */
  1062. smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
  1063. __tlb_remove_table(table);
  1064. }
  1065. static void tlb_remove_table_rcu(struct rcu_head *head)
  1066. {
  1067. struct mmu_table_batch *batch;
  1068. int i;
  1069. batch = container_of(head, struct mmu_table_batch, rcu);
  1070. for (i = 0; i < batch->nr; i++)
  1071. __tlb_remove_table(batch->tables[i]);
  1072. free_page((unsigned long)batch);
  1073. }
  1074. void tlb_table_flush(struct mmu_gather *tlb)
  1075. {
  1076. struct mmu_table_batch **batch = &tlb->batch;
  1077. if (*batch) {
  1078. call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
  1079. *batch = NULL;
  1080. }
  1081. }
  1082. void tlb_remove_table(struct mmu_gather *tlb, void *table)
  1083. {
  1084. struct mmu_table_batch **batch = &tlb->batch;
  1085. tlb->mm->context.flush_mm = 1;
  1086. if (*batch == NULL) {
  1087. *batch = (struct mmu_table_batch *)
  1088. __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
  1089. if (*batch == NULL) {
  1090. __tlb_flush_mm_lazy(tlb->mm);
  1091. tlb_remove_table_one(table);
  1092. return;
  1093. }
  1094. (*batch)->nr = 0;
  1095. }
  1096. (*batch)->tables[(*batch)->nr++] = table;
  1097. if ((*batch)->nr == MAX_TABLE_BATCH)
  1098. tlb_flush_mmu(tlb);
  1099. }
  1100. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  1101. static inline void thp_split_vma(struct vm_area_struct *vma)
  1102. {
  1103. unsigned long addr;
  1104. for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE)
  1105. follow_page(vma, addr, FOLL_SPLIT);
  1106. }
  1107. static inline void thp_split_mm(struct mm_struct *mm)
  1108. {
  1109. struct vm_area_struct *vma;
  1110. for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
  1111. thp_split_vma(vma);
  1112. vma->vm_flags &= ~VM_HUGEPAGE;
  1113. vma->vm_flags |= VM_NOHUGEPAGE;
  1114. }
  1115. mm->def_flags |= VM_NOHUGEPAGE;
  1116. }
  1117. #else
  1118. static inline void thp_split_mm(struct mm_struct *mm)
  1119. {
  1120. }
  1121. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  1122. static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb,
  1123. struct mm_struct *mm, pud_t *pud,
  1124. unsigned long addr, unsigned long end)
  1125. {
  1126. unsigned long next, *table, *new;
  1127. struct page *page;
  1128. pmd_t *pmd;
  1129. pmd = pmd_offset(pud, addr);
  1130. do {
  1131. next = pmd_addr_end(addr, end);
  1132. again:
  1133. if (pmd_none_or_clear_bad(pmd))
  1134. continue;
  1135. table = (unsigned long *) pmd_deref(*pmd);
  1136. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  1137. if (page_table_with_pgste(page))
  1138. continue;
  1139. /* Allocate new page table with pgstes */
  1140. new = page_table_alloc_pgste(mm, addr);
  1141. if (!new)
  1142. return -ENOMEM;
  1143. spin_lock(&mm->page_table_lock);
  1144. if (likely((unsigned long *) pmd_deref(*pmd) == table)) {
  1145. /* Nuke pmd entry pointing to the "short" page table */
  1146. pmdp_flush_lazy(mm, addr, pmd);
  1147. pmd_clear(pmd);
  1148. /* Copy ptes from old table to new table */
  1149. memcpy(new, table, PAGE_SIZE/2);
  1150. clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
  1151. /* Establish new table */
  1152. pmd_populate(mm, pmd, (pte_t *) new);
  1153. /* Free old table with rcu, there might be a walker! */
  1154. page_table_free_rcu(tlb, table);
  1155. new = NULL;
  1156. }
  1157. spin_unlock(&mm->page_table_lock);
  1158. if (new) {
  1159. page_table_free_pgste(new);
  1160. goto again;
  1161. }
  1162. } while (pmd++, addr = next, addr != end);
  1163. return addr;
  1164. }
  1165. static unsigned long page_table_realloc_pud(struct mmu_gather *tlb,
  1166. struct mm_struct *mm, pgd_t *pgd,
  1167. unsigned long addr, unsigned long end)
  1168. {
  1169. unsigned long next;
  1170. pud_t *pud;
  1171. pud = pud_offset(pgd, addr);
  1172. do {
  1173. next = pud_addr_end(addr, end);
  1174. if (pud_none_or_clear_bad(pud))
  1175. continue;
  1176. next = page_table_realloc_pmd(tlb, mm, pud, addr, next);
  1177. if (unlikely(IS_ERR_VALUE(next)))
  1178. return next;
  1179. } while (pud++, addr = next, addr != end);
  1180. return addr;
  1181. }
  1182. static unsigned long page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
  1183. unsigned long addr, unsigned long end)
  1184. {
  1185. unsigned long next;
  1186. pgd_t *pgd;
  1187. pgd = pgd_offset(mm, addr);
  1188. do {
  1189. next = pgd_addr_end(addr, end);
  1190. if (pgd_none_or_clear_bad(pgd))
  1191. continue;
  1192. next = page_table_realloc_pud(tlb, mm, pgd, addr, next);
  1193. if (unlikely(IS_ERR_VALUE(next)))
  1194. return next;
  1195. } while (pgd++, addr = next, addr != end);
  1196. return 0;
  1197. }
  1198. /*
  1199. * switch on pgstes for its userspace process (for kvm)
  1200. */
  1201. int s390_enable_sie(void)
  1202. {
  1203. struct task_struct *tsk = current;
  1204. struct mm_struct *mm = tsk->mm;
  1205. struct mmu_gather tlb;
  1206. /* Do we have pgstes? if yes, we are done */
  1207. if (mm_has_pgste(tsk->mm))
  1208. return 0;
  1209. down_write(&mm->mmap_sem);
  1210. /* split thp mappings and disable thp for future mappings */
  1211. thp_split_mm(mm);
  1212. /* Reallocate the page tables with pgstes */
  1213. tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE);
  1214. if (!page_table_realloc(&tlb, mm, 0, TASK_SIZE))
  1215. mm->context.has_pgste = 1;
  1216. tlb_finish_mmu(&tlb, 0, TASK_SIZE);
  1217. up_write(&mm->mmap_sem);
  1218. return mm->context.has_pgste ? 0 : -ENOMEM;
  1219. }
  1220. EXPORT_SYMBOL_GPL(s390_enable_sie);
  1221. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  1222. int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
  1223. pmd_t *pmdp)
  1224. {
  1225. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  1226. /* No need to flush TLB
  1227. * On s390 reference bits are in storage key and never in TLB */
  1228. return pmdp_test_and_clear_young(vma, address, pmdp);
  1229. }
  1230. int pmdp_set_access_flags(struct vm_area_struct *vma,
  1231. unsigned long address, pmd_t *pmdp,
  1232. pmd_t entry, int dirty)
  1233. {
  1234. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  1235. if (pmd_same(*pmdp, entry))
  1236. return 0;
  1237. pmdp_invalidate(vma, address, pmdp);
  1238. set_pmd_at(vma->vm_mm, address, pmdp, entry);
  1239. return 1;
  1240. }
  1241. static void pmdp_splitting_flush_sync(void *arg)
  1242. {
  1243. /* Simply deliver the interrupt */
  1244. }
  1245. void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
  1246. pmd_t *pmdp)
  1247. {
  1248. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  1249. if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT,
  1250. (unsigned long *) pmdp)) {
  1251. /* need to serialize against gup-fast (IRQ disabled) */
  1252. smp_call_function(pmdp_splitting_flush_sync, NULL, 1);
  1253. }
  1254. }
  1255. void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
  1256. pgtable_t pgtable)
  1257. {
  1258. struct list_head *lh = (struct list_head *) pgtable;
  1259. assert_spin_locked(pmd_lockptr(mm, pmdp));
  1260. /* FIFO */
  1261. if (!pmd_huge_pte(mm, pmdp))
  1262. INIT_LIST_HEAD(lh);
  1263. else
  1264. list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
  1265. pmd_huge_pte(mm, pmdp) = pgtable;
  1266. }
  1267. pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
  1268. {
  1269. struct list_head *lh;
  1270. pgtable_t pgtable;
  1271. pte_t *ptep;
  1272. assert_spin_locked(pmd_lockptr(mm, pmdp));
  1273. /* FIFO */
  1274. pgtable = pmd_huge_pte(mm, pmdp);
  1275. lh = (struct list_head *) pgtable;
  1276. if (list_empty(lh))
  1277. pmd_huge_pte(mm, pmdp) = NULL;
  1278. else {
  1279. pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
  1280. list_del(lh);
  1281. }
  1282. ptep = (pte_t *) pgtable;
  1283. pte_val(*ptep) = _PAGE_INVALID;
  1284. ptep++;
  1285. pte_val(*ptep) = _PAGE_INVALID;
  1286. return pgtable;
  1287. }
  1288. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */