pgtable.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435
  1. /*
  2. * Copyright IBM Corp. 2007, 2011
  3. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  4. */
  5. #include <linux/sched.h>
  6. #include <linux/kernel.h>
  7. #include <linux/errno.h>
  8. #include <linux/gfp.h>
  9. #include <linux/mm.h>
  10. #include <linux/swap.h>
  11. #include <linux/smp.h>
  12. #include <linux/highmem.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/module.h>
  16. #include <linux/quicklist.h>
  17. #include <linux/rcupdate.h>
  18. #include <linux/slab.h>
  19. #include <linux/swapops.h>
  20. #include <asm/pgtable.h>
  21. #include <asm/pgalloc.h>
  22. #include <asm/tlb.h>
  23. #include <asm/tlbflush.h>
  24. #include <asm/mmu_context.h>
  25. #ifndef CONFIG_64BIT
  26. #define ALLOC_ORDER 1
  27. #define FRAG_MASK 0x0f
  28. #else
  29. #define ALLOC_ORDER 2
  30. #define FRAG_MASK 0x03
  31. #endif
  32. unsigned long *crst_table_alloc(struct mm_struct *mm)
  33. {
  34. struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  35. if (!page)
  36. return NULL;
  37. return (unsigned long *) page_to_phys(page);
  38. }
  39. void crst_table_free(struct mm_struct *mm, unsigned long *table)
  40. {
  41. free_pages((unsigned long) table, ALLOC_ORDER);
  42. }
  43. #ifdef CONFIG_64BIT
  44. static void __crst_table_upgrade(void *arg)
  45. {
  46. struct mm_struct *mm = arg;
  47. if (current->active_mm == mm)
  48. update_mm(mm, current);
  49. __tlb_flush_local();
  50. }
  51. int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
  52. {
  53. unsigned long *table, *pgd;
  54. unsigned long entry;
  55. int flush;
  56. BUG_ON(limit > (1UL << 53));
  57. flush = 0;
  58. repeat:
  59. table = crst_table_alloc(mm);
  60. if (!table)
  61. return -ENOMEM;
  62. spin_lock_bh(&mm->page_table_lock);
  63. if (mm->context.asce_limit < limit) {
  64. pgd = (unsigned long *) mm->pgd;
  65. if (mm->context.asce_limit <= (1UL << 31)) {
  66. entry = _REGION3_ENTRY_EMPTY;
  67. mm->context.asce_limit = 1UL << 42;
  68. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  69. _ASCE_USER_BITS |
  70. _ASCE_TYPE_REGION3;
  71. } else {
  72. entry = _REGION2_ENTRY_EMPTY;
  73. mm->context.asce_limit = 1UL << 53;
  74. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  75. _ASCE_USER_BITS |
  76. _ASCE_TYPE_REGION2;
  77. }
  78. crst_table_init(table, entry);
  79. pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
  80. mm->pgd = (pgd_t *) table;
  81. mm->task_size = mm->context.asce_limit;
  82. table = NULL;
  83. flush = 1;
  84. }
  85. spin_unlock_bh(&mm->page_table_lock);
  86. if (table)
  87. crst_table_free(mm, table);
  88. if (mm->context.asce_limit < limit)
  89. goto repeat;
  90. if (flush)
  91. on_each_cpu(__crst_table_upgrade, mm, 0);
  92. return 0;
  93. }
  94. void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
  95. {
  96. pgd_t *pgd;
  97. if (current->active_mm == mm)
  98. __tlb_flush_mm(mm);
  99. while (mm->context.asce_limit > limit) {
  100. pgd = mm->pgd;
  101. switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
  102. case _REGION_ENTRY_TYPE_R2:
  103. mm->context.asce_limit = 1UL << 42;
  104. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  105. _ASCE_USER_BITS |
  106. _ASCE_TYPE_REGION3;
  107. break;
  108. case _REGION_ENTRY_TYPE_R3:
  109. mm->context.asce_limit = 1UL << 31;
  110. mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  111. _ASCE_USER_BITS |
  112. _ASCE_TYPE_SEGMENT;
  113. break;
  114. default:
  115. BUG();
  116. }
  117. mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
  118. mm->task_size = mm->context.asce_limit;
  119. crst_table_free(mm, (unsigned long *) pgd);
  120. }
  121. if (current->active_mm == mm)
  122. update_mm(mm, current);
  123. }
  124. #endif
  125. #ifdef CONFIG_PGSTE
  126. /**
  127. * gmap_alloc - allocate a guest address space
  128. * @mm: pointer to the parent mm_struct
  129. *
  130. * Returns a guest address space structure.
  131. */
  132. struct gmap *gmap_alloc(struct mm_struct *mm)
  133. {
  134. struct gmap *gmap;
  135. struct page *page;
  136. unsigned long *table;
  137. gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
  138. if (!gmap)
  139. goto out;
  140. INIT_LIST_HEAD(&gmap->crst_list);
  141. gmap->mm = mm;
  142. page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  143. if (!page)
  144. goto out_free;
  145. list_add(&page->lru, &gmap->crst_list);
  146. table = (unsigned long *) page_to_phys(page);
  147. crst_table_init(table, _REGION1_ENTRY_EMPTY);
  148. gmap->table = table;
  149. gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
  150. _ASCE_USER_BITS | __pa(table);
  151. list_add(&gmap->list, &mm->context.gmap_list);
  152. return gmap;
  153. out_free:
  154. kfree(gmap);
  155. out:
  156. return NULL;
  157. }
  158. EXPORT_SYMBOL_GPL(gmap_alloc);
  159. static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
  160. {
  161. struct gmap_pgtable *mp;
  162. struct gmap_rmap *rmap;
  163. struct page *page;
  164. if (*table & _SEGMENT_ENTRY_INVALID)
  165. return 0;
  166. page = pfn_to_page(*table >> PAGE_SHIFT);
  167. mp = (struct gmap_pgtable *) page->index;
  168. list_for_each_entry(rmap, &mp->mapper, list) {
  169. if (rmap->entry != table)
  170. continue;
  171. list_del(&rmap->list);
  172. kfree(rmap);
  173. break;
  174. }
  175. *table = mp->vmaddr | _SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_PROTECT;
  176. return 1;
  177. }
  178. static void gmap_flush_tlb(struct gmap *gmap)
  179. {
  180. if (MACHINE_HAS_IDTE)
  181. __tlb_flush_idte((unsigned long) gmap->table |
  182. _ASCE_TYPE_REGION1);
  183. else
  184. __tlb_flush_global();
  185. }
  186. /**
  187. * gmap_free - free a guest address space
  188. * @gmap: pointer to the guest address space structure
  189. */
  190. void gmap_free(struct gmap *gmap)
  191. {
  192. struct page *page, *next;
  193. unsigned long *table;
  194. int i;
  195. /* Flush tlb. */
  196. if (MACHINE_HAS_IDTE)
  197. __tlb_flush_idte((unsigned long) gmap->table |
  198. _ASCE_TYPE_REGION1);
  199. else
  200. __tlb_flush_global();
  201. /* Free all segment & region tables. */
  202. down_read(&gmap->mm->mmap_sem);
  203. spin_lock(&gmap->mm->page_table_lock);
  204. list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
  205. table = (unsigned long *) page_to_phys(page);
  206. if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
  207. /* Remove gmap rmap structures for segment table. */
  208. for (i = 0; i < PTRS_PER_PMD; i++, table++)
  209. gmap_unlink_segment(gmap, table);
  210. __free_pages(page, ALLOC_ORDER);
  211. }
  212. spin_unlock(&gmap->mm->page_table_lock);
  213. up_read(&gmap->mm->mmap_sem);
  214. list_del(&gmap->list);
  215. kfree(gmap);
  216. }
  217. EXPORT_SYMBOL_GPL(gmap_free);
  218. /**
  219. * gmap_enable - switch primary space to the guest address space
  220. * @gmap: pointer to the guest address space structure
  221. */
  222. void gmap_enable(struct gmap *gmap)
  223. {
  224. S390_lowcore.gmap = (unsigned long) gmap;
  225. }
  226. EXPORT_SYMBOL_GPL(gmap_enable);
  227. /**
  228. * gmap_disable - switch back to the standard primary address space
  229. * @gmap: pointer to the guest address space structure
  230. */
  231. void gmap_disable(struct gmap *gmap)
  232. {
  233. S390_lowcore.gmap = 0UL;
  234. }
  235. EXPORT_SYMBOL_GPL(gmap_disable);
  236. /*
  237. * gmap_alloc_table is assumed to be called with mmap_sem held
  238. */
  239. static int gmap_alloc_table(struct gmap *gmap,
  240. unsigned long *table, unsigned long init)
  241. __releases(&gmap->mm->page_table_lock)
  242. __acquires(&gmap->mm->page_table_lock)
  243. {
  244. struct page *page;
  245. unsigned long *new;
  246. /* since we dont free the gmap table until gmap_free we can unlock */
  247. spin_unlock(&gmap->mm->page_table_lock);
  248. page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  249. spin_lock(&gmap->mm->page_table_lock);
  250. if (!page)
  251. return -ENOMEM;
  252. new = (unsigned long *) page_to_phys(page);
  253. crst_table_init(new, init);
  254. if (*table & _REGION_ENTRY_INVALID) {
  255. list_add(&page->lru, &gmap->crst_list);
  256. *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
  257. (*table & _REGION_ENTRY_TYPE_MASK);
  258. } else
  259. __free_pages(page, ALLOC_ORDER);
  260. return 0;
  261. }
  262. /**
  263. * gmap_unmap_segment - unmap segment from the guest address space
  264. * @gmap: pointer to the guest address space structure
  265. * @addr: address in the guest address space
  266. * @len: length of the memory area to unmap
  267. *
  268. * Returns 0 if the unmap succeeded, -EINVAL if not.
  269. */
  270. int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
  271. {
  272. unsigned long *table;
  273. unsigned long off;
  274. int flush;
  275. if ((to | len) & (PMD_SIZE - 1))
  276. return -EINVAL;
  277. if (len == 0 || to + len < to)
  278. return -EINVAL;
  279. flush = 0;
  280. down_read(&gmap->mm->mmap_sem);
  281. spin_lock(&gmap->mm->page_table_lock);
  282. for (off = 0; off < len; off += PMD_SIZE) {
  283. /* Walk the guest addr space page table */
  284. table = gmap->table + (((to + off) >> 53) & 0x7ff);
  285. if (*table & _REGION_ENTRY_INVALID)
  286. goto out;
  287. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  288. table = table + (((to + off) >> 42) & 0x7ff);
  289. if (*table & _REGION_ENTRY_INVALID)
  290. goto out;
  291. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  292. table = table + (((to + off) >> 31) & 0x7ff);
  293. if (*table & _REGION_ENTRY_INVALID)
  294. goto out;
  295. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  296. table = table + (((to + off) >> 20) & 0x7ff);
  297. /* Clear segment table entry in guest address space. */
  298. flush |= gmap_unlink_segment(gmap, table);
  299. *table = _SEGMENT_ENTRY_INVALID;
  300. }
  301. out:
  302. spin_unlock(&gmap->mm->page_table_lock);
  303. up_read(&gmap->mm->mmap_sem);
  304. if (flush)
  305. gmap_flush_tlb(gmap);
  306. return 0;
  307. }
  308. EXPORT_SYMBOL_GPL(gmap_unmap_segment);
  309. /**
  310. * gmap_mmap_segment - map a segment to the guest address space
  311. * @gmap: pointer to the guest address space structure
  312. * @from: source address in the parent address space
  313. * @to: target address in the guest address space
  314. *
  315. * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
  316. */
  317. int gmap_map_segment(struct gmap *gmap, unsigned long from,
  318. unsigned long to, unsigned long len)
  319. {
  320. unsigned long *table;
  321. unsigned long off;
  322. int flush;
  323. if ((from | to | len) & (PMD_SIZE - 1))
  324. return -EINVAL;
  325. if (len == 0 || from + len > TASK_MAX_SIZE ||
  326. from + len < from || to + len < to)
  327. return -EINVAL;
  328. flush = 0;
  329. down_read(&gmap->mm->mmap_sem);
  330. spin_lock(&gmap->mm->page_table_lock);
  331. for (off = 0; off < len; off += PMD_SIZE) {
  332. /* Walk the gmap address space page table */
  333. table = gmap->table + (((to + off) >> 53) & 0x7ff);
  334. if ((*table & _REGION_ENTRY_INVALID) &&
  335. gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
  336. goto out_unmap;
  337. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  338. table = table + (((to + off) >> 42) & 0x7ff);
  339. if ((*table & _REGION_ENTRY_INVALID) &&
  340. gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
  341. goto out_unmap;
  342. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  343. table = table + (((to + off) >> 31) & 0x7ff);
  344. if ((*table & _REGION_ENTRY_INVALID) &&
  345. gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
  346. goto out_unmap;
  347. table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
  348. table = table + (((to + off) >> 20) & 0x7ff);
  349. /* Store 'from' address in an invalid segment table entry. */
  350. flush |= gmap_unlink_segment(gmap, table);
  351. *table = (from + off) | (_SEGMENT_ENTRY_INVALID |
  352. _SEGMENT_ENTRY_PROTECT);
  353. }
  354. spin_unlock(&gmap->mm->page_table_lock);
  355. up_read(&gmap->mm->mmap_sem);
  356. if (flush)
  357. gmap_flush_tlb(gmap);
  358. return 0;
  359. out_unmap:
  360. spin_unlock(&gmap->mm->page_table_lock);
  361. up_read(&gmap->mm->mmap_sem);
  362. gmap_unmap_segment(gmap, to, len);
  363. return -ENOMEM;
  364. }
  365. EXPORT_SYMBOL_GPL(gmap_map_segment);
  366. static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
  367. {
  368. unsigned long *table;
  369. table = gmap->table + ((address >> 53) & 0x7ff);
  370. if (unlikely(*table & _REGION_ENTRY_INVALID))
  371. return ERR_PTR(-EFAULT);
  372. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  373. table = table + ((address >> 42) & 0x7ff);
  374. if (unlikely(*table & _REGION_ENTRY_INVALID))
  375. return ERR_PTR(-EFAULT);
  376. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  377. table = table + ((address >> 31) & 0x7ff);
  378. if (unlikely(*table & _REGION_ENTRY_INVALID))
  379. return ERR_PTR(-EFAULT);
  380. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  381. table = table + ((address >> 20) & 0x7ff);
  382. return table;
  383. }
  384. /**
  385. * __gmap_translate - translate a guest address to a user space address
  386. * @address: guest address
  387. * @gmap: pointer to guest mapping meta data structure
  388. *
  389. * Returns user space address which corresponds to the guest address or
  390. * -EFAULT if no such mapping exists.
  391. * This function does not establish potentially missing page table entries.
  392. * The mmap_sem of the mm that belongs to the address space must be held
  393. * when this function gets called.
  394. */
  395. unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
  396. {
  397. unsigned long *segment_ptr, vmaddr, segment;
  398. struct gmap_pgtable *mp;
  399. struct page *page;
  400. current->thread.gmap_addr = address;
  401. segment_ptr = gmap_table_walk(address, gmap);
  402. if (IS_ERR(segment_ptr))
  403. return PTR_ERR(segment_ptr);
  404. /* Convert the gmap address to an mm address. */
  405. segment = *segment_ptr;
  406. if (!(segment & _SEGMENT_ENTRY_INVALID)) {
  407. page = pfn_to_page(segment >> PAGE_SHIFT);
  408. mp = (struct gmap_pgtable *) page->index;
  409. return mp->vmaddr | (address & ~PMD_MASK);
  410. } else if (segment & _SEGMENT_ENTRY_PROTECT) {
  411. vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
  412. return vmaddr | (address & ~PMD_MASK);
  413. }
  414. return -EFAULT;
  415. }
  416. EXPORT_SYMBOL_GPL(__gmap_translate);
  417. /**
  418. * gmap_translate - translate a guest address to a user space address
  419. * @address: guest address
  420. * @gmap: pointer to guest mapping meta data structure
  421. *
  422. * Returns user space address which corresponds to the guest address or
  423. * -EFAULT if no such mapping exists.
  424. * This function does not establish potentially missing page table entries.
  425. */
  426. unsigned long gmap_translate(unsigned long address, struct gmap *gmap)
  427. {
  428. unsigned long rc;
  429. down_read(&gmap->mm->mmap_sem);
  430. rc = __gmap_translate(address, gmap);
  431. up_read(&gmap->mm->mmap_sem);
  432. return rc;
  433. }
  434. EXPORT_SYMBOL_GPL(gmap_translate);
  435. static int gmap_connect_pgtable(unsigned long address, unsigned long segment,
  436. unsigned long *segment_ptr, struct gmap *gmap)
  437. {
  438. unsigned long vmaddr;
  439. struct vm_area_struct *vma;
  440. struct gmap_pgtable *mp;
  441. struct gmap_rmap *rmap;
  442. struct mm_struct *mm;
  443. struct page *page;
  444. pgd_t *pgd;
  445. pud_t *pud;
  446. pmd_t *pmd;
  447. mm = gmap->mm;
  448. vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
  449. vma = find_vma(mm, vmaddr);
  450. if (!vma || vma->vm_start > vmaddr)
  451. return -EFAULT;
  452. /* Walk the parent mm page table */
  453. pgd = pgd_offset(mm, vmaddr);
  454. pud = pud_alloc(mm, pgd, vmaddr);
  455. if (!pud)
  456. return -ENOMEM;
  457. pmd = pmd_alloc(mm, pud, vmaddr);
  458. if (!pmd)
  459. return -ENOMEM;
  460. if (!pmd_present(*pmd) &&
  461. __pte_alloc(mm, vma, pmd, vmaddr))
  462. return -ENOMEM;
  463. /* large pmds cannot yet be handled */
  464. if (pmd_large(*pmd))
  465. return -EFAULT;
  466. /* pmd now points to a valid segment table entry. */
  467. rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
  468. if (!rmap)
  469. return -ENOMEM;
  470. /* Link gmap segment table entry location to page table. */
  471. page = pmd_page(*pmd);
  472. mp = (struct gmap_pgtable *) page->index;
  473. rmap->gmap = gmap;
  474. rmap->entry = segment_ptr;
  475. rmap->vmaddr = address & PMD_MASK;
  476. spin_lock(&mm->page_table_lock);
  477. if (*segment_ptr == segment) {
  478. list_add(&rmap->list, &mp->mapper);
  479. /* Set gmap segment table entry to page table. */
  480. *segment_ptr = pmd_val(*pmd) & PAGE_MASK;
  481. rmap = NULL;
  482. }
  483. spin_unlock(&mm->page_table_lock);
  484. kfree(rmap);
  485. return 0;
  486. }
  487. static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
  488. {
  489. struct gmap_rmap *rmap, *next;
  490. struct gmap_pgtable *mp;
  491. struct page *page;
  492. int flush;
  493. flush = 0;
  494. spin_lock(&mm->page_table_lock);
  495. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  496. mp = (struct gmap_pgtable *) page->index;
  497. list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
  498. *rmap->entry = mp->vmaddr | (_SEGMENT_ENTRY_INVALID |
  499. _SEGMENT_ENTRY_PROTECT);
  500. list_del(&rmap->list);
  501. kfree(rmap);
  502. flush = 1;
  503. }
  504. spin_unlock(&mm->page_table_lock);
  505. if (flush)
  506. __tlb_flush_global();
  507. }
  508. /*
  509. * this function is assumed to be called with mmap_sem held
  510. */
  511. unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
  512. {
  513. unsigned long *segment_ptr, segment;
  514. struct gmap_pgtable *mp;
  515. struct page *page;
  516. int rc;
  517. current->thread.gmap_addr = address;
  518. segment_ptr = gmap_table_walk(address, gmap);
  519. if (IS_ERR(segment_ptr))
  520. return -EFAULT;
  521. /* Convert the gmap address to an mm address. */
  522. while (1) {
  523. segment = *segment_ptr;
  524. if (!(segment & _SEGMENT_ENTRY_INVALID)) {
  525. /* Page table is present */
  526. page = pfn_to_page(segment >> PAGE_SHIFT);
  527. mp = (struct gmap_pgtable *) page->index;
  528. return mp->vmaddr | (address & ~PMD_MASK);
  529. }
  530. if (!(segment & _SEGMENT_ENTRY_PROTECT))
  531. /* Nothing mapped in the gmap address space. */
  532. break;
  533. rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap);
  534. if (rc)
  535. return rc;
  536. }
  537. return -EFAULT;
  538. }
  539. unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
  540. {
  541. unsigned long rc;
  542. down_read(&gmap->mm->mmap_sem);
  543. rc = __gmap_fault(address, gmap);
  544. up_read(&gmap->mm->mmap_sem);
  545. return rc;
  546. }
  547. EXPORT_SYMBOL_GPL(gmap_fault);
  548. static void gmap_zap_swap_entry(swp_entry_t entry, struct mm_struct *mm)
  549. {
  550. if (!non_swap_entry(entry))
  551. dec_mm_counter(mm, MM_SWAPENTS);
  552. else if (is_migration_entry(entry)) {
  553. struct page *page = migration_entry_to_page(entry);
  554. if (PageAnon(page))
  555. dec_mm_counter(mm, MM_ANONPAGES);
  556. else
  557. dec_mm_counter(mm, MM_FILEPAGES);
  558. }
  559. free_swap_and_cache(entry);
  560. }
  561. /**
  562. * The mm->mmap_sem lock must be held
  563. */
  564. static void gmap_zap_unused(struct mm_struct *mm, unsigned long address)
  565. {
  566. unsigned long ptev, pgstev;
  567. spinlock_t *ptl;
  568. pgste_t pgste;
  569. pte_t *ptep, pte;
  570. ptep = get_locked_pte(mm, address, &ptl);
  571. if (unlikely(!ptep))
  572. return;
  573. pte = *ptep;
  574. if (!pte_swap(pte))
  575. goto out_pte;
  576. /* Zap unused and logically-zero pages */
  577. pgste = pgste_get_lock(ptep);
  578. pgstev = pgste_val(pgste);
  579. ptev = pte_val(pte);
  580. if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) ||
  581. ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID))) {
  582. gmap_zap_swap_entry(pte_to_swp_entry(pte), mm);
  583. pte_clear(mm, address, ptep);
  584. }
  585. pgste_set_unlock(ptep, pgste);
  586. out_pte:
  587. pte_unmap_unlock(*ptep, ptl);
  588. }
  589. /*
  590. * this function is assumed to be called with mmap_sem held
  591. */
  592. void __gmap_zap(unsigned long address, struct gmap *gmap)
  593. {
  594. unsigned long *table, *segment_ptr;
  595. unsigned long segment, pgstev, ptev;
  596. struct gmap_pgtable *mp;
  597. struct page *page;
  598. segment_ptr = gmap_table_walk(address, gmap);
  599. if (IS_ERR(segment_ptr))
  600. return;
  601. segment = *segment_ptr;
  602. if (segment & _SEGMENT_ENTRY_INVALID)
  603. return;
  604. page = pfn_to_page(segment >> PAGE_SHIFT);
  605. mp = (struct gmap_pgtable *) page->index;
  606. address = mp->vmaddr | (address & ~PMD_MASK);
  607. /* Page table is present */
  608. table = (unsigned long *)(segment & _SEGMENT_ENTRY_ORIGIN);
  609. table = table + ((address >> 12) & 0xff);
  610. pgstev = table[PTRS_PER_PTE];
  611. ptev = table[0];
  612. /* quick check, checked again with locks held */
  613. if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) ||
  614. ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID)))
  615. gmap_zap_unused(gmap->mm, address);
  616. }
  617. EXPORT_SYMBOL_GPL(__gmap_zap);
  618. void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
  619. {
  620. unsigned long *table, address, size;
  621. struct vm_area_struct *vma;
  622. struct gmap_pgtable *mp;
  623. struct page *page;
  624. down_read(&gmap->mm->mmap_sem);
  625. address = from;
  626. while (address < to) {
  627. /* Walk the gmap address space page table */
  628. table = gmap->table + ((address >> 53) & 0x7ff);
  629. if (unlikely(*table & _REGION_ENTRY_INVALID)) {
  630. address = (address + PMD_SIZE) & PMD_MASK;
  631. continue;
  632. }
  633. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  634. table = table + ((address >> 42) & 0x7ff);
  635. if (unlikely(*table & _REGION_ENTRY_INVALID)) {
  636. address = (address + PMD_SIZE) & PMD_MASK;
  637. continue;
  638. }
  639. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  640. table = table + ((address >> 31) & 0x7ff);
  641. if (unlikely(*table & _REGION_ENTRY_INVALID)) {
  642. address = (address + PMD_SIZE) & PMD_MASK;
  643. continue;
  644. }
  645. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  646. table = table + ((address >> 20) & 0x7ff);
  647. if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) {
  648. address = (address + PMD_SIZE) & PMD_MASK;
  649. continue;
  650. }
  651. page = pfn_to_page(*table >> PAGE_SHIFT);
  652. mp = (struct gmap_pgtable *) page->index;
  653. vma = find_vma(gmap->mm, mp->vmaddr);
  654. size = min(to - address, PMD_SIZE - (address & ~PMD_MASK));
  655. zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK),
  656. size, NULL);
  657. address = (address + PMD_SIZE) & PMD_MASK;
  658. }
  659. up_read(&gmap->mm->mmap_sem);
  660. }
  661. EXPORT_SYMBOL_GPL(gmap_discard);
  662. static LIST_HEAD(gmap_notifier_list);
  663. static DEFINE_SPINLOCK(gmap_notifier_lock);
  664. /**
  665. * gmap_register_ipte_notifier - register a pte invalidation callback
  666. * @nb: pointer to the gmap notifier block
  667. */
  668. void gmap_register_ipte_notifier(struct gmap_notifier *nb)
  669. {
  670. spin_lock(&gmap_notifier_lock);
  671. list_add(&nb->list, &gmap_notifier_list);
  672. spin_unlock(&gmap_notifier_lock);
  673. }
  674. EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
  675. /**
  676. * gmap_unregister_ipte_notifier - remove a pte invalidation callback
  677. * @nb: pointer to the gmap notifier block
  678. */
  679. void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
  680. {
  681. spin_lock(&gmap_notifier_lock);
  682. list_del_init(&nb->list);
  683. spin_unlock(&gmap_notifier_lock);
  684. }
  685. EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
  686. /**
  687. * gmap_ipte_notify - mark a range of ptes for invalidation notification
  688. * @gmap: pointer to guest mapping meta data structure
  689. * @start: virtual address in the guest address space
  690. * @len: size of area
  691. *
  692. * Returns 0 if for each page in the given range a gmap mapping exists and
  693. * the invalidation notification could be set. If the gmap mapping is missing
  694. * for one or more pages -EFAULT is returned. If no memory could be allocated
  695. * -ENOMEM is returned. This function establishes missing page table entries.
  696. */
  697. int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
  698. {
  699. unsigned long addr;
  700. spinlock_t *ptl;
  701. pte_t *ptep, entry;
  702. pgste_t pgste;
  703. int rc = 0;
  704. if ((start & ~PAGE_MASK) || (len & ~PAGE_MASK))
  705. return -EINVAL;
  706. down_read(&gmap->mm->mmap_sem);
  707. while (len) {
  708. /* Convert gmap address and connect the page tables */
  709. addr = __gmap_fault(start, gmap);
  710. if (IS_ERR_VALUE(addr)) {
  711. rc = addr;
  712. break;
  713. }
  714. /* Get the page mapped */
  715. if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) {
  716. rc = -EFAULT;
  717. break;
  718. }
  719. /* Walk the process page table, lock and get pte pointer */
  720. ptep = get_locked_pte(gmap->mm, addr, &ptl);
  721. if (unlikely(!ptep))
  722. continue;
  723. /* Set notification bit in the pgste of the pte */
  724. entry = *ptep;
  725. if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
  726. pgste = pgste_get_lock(ptep);
  727. pgste_val(pgste) |= PGSTE_IN_BIT;
  728. pgste_set_unlock(ptep, pgste);
  729. start += PAGE_SIZE;
  730. len -= PAGE_SIZE;
  731. }
  732. spin_unlock(ptl);
  733. }
  734. up_read(&gmap->mm->mmap_sem);
  735. return rc;
  736. }
  737. EXPORT_SYMBOL_GPL(gmap_ipte_notify);
  738. /**
  739. * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte.
  740. * @mm: pointer to the process mm_struct
  741. * @pte: pointer to the page table entry
  742. *
  743. * This function is assumed to be called with the page table lock held
  744. * for the pte to notify.
  745. */
  746. void gmap_do_ipte_notify(struct mm_struct *mm, pte_t *pte)
  747. {
  748. unsigned long segment_offset;
  749. struct gmap_notifier *nb;
  750. struct gmap_pgtable *mp;
  751. struct gmap_rmap *rmap;
  752. struct page *page;
  753. segment_offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
  754. segment_offset = segment_offset * (4096 / sizeof(pte_t));
  755. page = pfn_to_page(__pa(pte) >> PAGE_SHIFT);
  756. mp = (struct gmap_pgtable *) page->index;
  757. spin_lock(&gmap_notifier_lock);
  758. list_for_each_entry(rmap, &mp->mapper, list) {
  759. list_for_each_entry(nb, &gmap_notifier_list, list)
  760. nb->notifier_call(rmap->gmap,
  761. rmap->vmaddr + segment_offset);
  762. }
  763. spin_unlock(&gmap_notifier_lock);
  764. }
  765. static inline int page_table_with_pgste(struct page *page)
  766. {
  767. return atomic_read(&page->_mapcount) == 0;
  768. }
  769. static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
  770. unsigned long vmaddr)
  771. {
  772. struct page *page;
  773. unsigned long *table;
  774. struct gmap_pgtable *mp;
  775. page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
  776. if (!page)
  777. return NULL;
  778. mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
  779. if (!mp) {
  780. __free_page(page);
  781. return NULL;
  782. }
  783. if (!pgtable_page_ctor(page)) {
  784. kfree(mp);
  785. __free_page(page);
  786. return NULL;
  787. }
  788. mp->vmaddr = vmaddr & PMD_MASK;
  789. INIT_LIST_HEAD(&mp->mapper);
  790. page->index = (unsigned long) mp;
  791. atomic_set(&page->_mapcount, 0);
  792. table = (unsigned long *) page_to_phys(page);
  793. clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
  794. clear_table(table + PTRS_PER_PTE, PGSTE_HR_BIT | PGSTE_HC_BIT,
  795. PAGE_SIZE/2);
  796. return table;
  797. }
  798. static inline void page_table_free_pgste(unsigned long *table)
  799. {
  800. struct page *page;
  801. struct gmap_pgtable *mp;
  802. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  803. mp = (struct gmap_pgtable *) page->index;
  804. BUG_ON(!list_empty(&mp->mapper));
  805. pgtable_page_dtor(page);
  806. atomic_set(&page->_mapcount, -1);
  807. kfree(mp);
  808. __free_page(page);
  809. }
  810. static inline unsigned long page_table_reset_pte(struct mm_struct *mm,
  811. pmd_t *pmd, unsigned long addr, unsigned long end)
  812. {
  813. pte_t *start_pte, *pte;
  814. spinlock_t *ptl;
  815. pgste_t pgste;
  816. start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
  817. pte = start_pte;
  818. do {
  819. pgste = pgste_get_lock(pte);
  820. pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
  821. pgste_set_unlock(pte, pgste);
  822. } while (pte++, addr += PAGE_SIZE, addr != end);
  823. pte_unmap_unlock(start_pte, ptl);
  824. return addr;
  825. }
  826. static inline unsigned long page_table_reset_pmd(struct mm_struct *mm,
  827. pud_t *pud, unsigned long addr, unsigned long end)
  828. {
  829. unsigned long next;
  830. pmd_t *pmd;
  831. pmd = pmd_offset(pud, addr);
  832. do {
  833. next = pmd_addr_end(addr, end);
  834. if (pmd_none_or_clear_bad(pmd))
  835. continue;
  836. next = page_table_reset_pte(mm, pmd, addr, next);
  837. } while (pmd++, addr = next, addr != end);
  838. return addr;
  839. }
  840. static inline unsigned long page_table_reset_pud(struct mm_struct *mm,
  841. pgd_t *pgd, unsigned long addr, unsigned long end)
  842. {
  843. unsigned long next;
  844. pud_t *pud;
  845. pud = pud_offset(pgd, addr);
  846. do {
  847. next = pud_addr_end(addr, end);
  848. if (pud_none_or_clear_bad(pud))
  849. continue;
  850. next = page_table_reset_pmd(mm, pud, addr, next);
  851. } while (pud++, addr = next, addr != end);
  852. return addr;
  853. }
  854. void page_table_reset_pgste(struct mm_struct *mm,
  855. unsigned long start, unsigned long end)
  856. {
  857. unsigned long addr, next;
  858. pgd_t *pgd;
  859. addr = start;
  860. down_read(&mm->mmap_sem);
  861. pgd = pgd_offset(mm, addr);
  862. do {
  863. next = pgd_addr_end(addr, end);
  864. if (pgd_none_or_clear_bad(pgd))
  865. continue;
  866. next = page_table_reset_pud(mm, pgd, addr, next);
  867. } while (pgd++, addr = next, addr != end);
  868. up_read(&mm->mmap_sem);
  869. }
  870. EXPORT_SYMBOL(page_table_reset_pgste);
  871. int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
  872. unsigned long key, bool nq)
  873. {
  874. spinlock_t *ptl;
  875. pgste_t old, new;
  876. pte_t *ptep;
  877. down_read(&mm->mmap_sem);
  878. ptep = get_locked_pte(current->mm, addr, &ptl);
  879. if (unlikely(!ptep)) {
  880. up_read(&mm->mmap_sem);
  881. return -EFAULT;
  882. }
  883. new = old = pgste_get_lock(ptep);
  884. pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
  885. PGSTE_ACC_BITS | PGSTE_FP_BIT);
  886. pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
  887. pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
  888. if (!(pte_val(*ptep) & _PAGE_INVALID)) {
  889. unsigned long address, bits, skey;
  890. address = pte_val(*ptep) & PAGE_MASK;
  891. skey = (unsigned long) page_get_storage_key(address);
  892. bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
  893. skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
  894. /* Set storage key ACC and FP */
  895. page_set_storage_key(address, skey, !nq);
  896. /* Merge host changed & referenced into pgste */
  897. pgste_val(new) |= bits << 52;
  898. }
  899. /* changing the guest storage key is considered a change of the page */
  900. if ((pgste_val(new) ^ pgste_val(old)) &
  901. (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
  902. pgste_val(new) |= PGSTE_HC_BIT;
  903. pgste_set_unlock(ptep, new);
  904. pte_unmap_unlock(*ptep, ptl);
  905. up_read(&mm->mmap_sem);
  906. return 0;
  907. }
  908. EXPORT_SYMBOL(set_guest_storage_key);
  909. #else /* CONFIG_PGSTE */
  910. static inline int page_table_with_pgste(struct page *page)
  911. {
  912. return 0;
  913. }
  914. static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
  915. unsigned long vmaddr)
  916. {
  917. return NULL;
  918. }
  919. static inline void page_table_free_pgste(unsigned long *table)
  920. {
  921. }
  922. static inline void gmap_disconnect_pgtable(struct mm_struct *mm,
  923. unsigned long *table)
  924. {
  925. }
  926. #endif /* CONFIG_PGSTE */
  927. static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
  928. {
  929. unsigned int old, new;
  930. do {
  931. old = atomic_read(v);
  932. new = old ^ bits;
  933. } while (atomic_cmpxchg(v, old, new) != old);
  934. return new;
  935. }
  936. /*
  937. * page table entry allocation/free routines.
  938. */
  939. unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
  940. {
  941. unsigned long *uninitialized_var(table);
  942. struct page *uninitialized_var(page);
  943. unsigned int mask, bit;
  944. if (mm_has_pgste(mm))
  945. return page_table_alloc_pgste(mm, vmaddr);
  946. /* Allocate fragments of a 4K page as 1K/2K page table */
  947. spin_lock_bh(&mm->context.list_lock);
  948. mask = FRAG_MASK;
  949. if (!list_empty(&mm->context.pgtable_list)) {
  950. page = list_first_entry(&mm->context.pgtable_list,
  951. struct page, lru);
  952. table = (unsigned long *) page_to_phys(page);
  953. mask = atomic_read(&page->_mapcount);
  954. mask = mask | (mask >> 4);
  955. }
  956. if ((mask & FRAG_MASK) == FRAG_MASK) {
  957. spin_unlock_bh(&mm->context.list_lock);
  958. page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
  959. if (!page)
  960. return NULL;
  961. if (!pgtable_page_ctor(page)) {
  962. __free_page(page);
  963. return NULL;
  964. }
  965. atomic_set(&page->_mapcount, 1);
  966. table = (unsigned long *) page_to_phys(page);
  967. clear_table(table, _PAGE_INVALID, PAGE_SIZE);
  968. spin_lock_bh(&mm->context.list_lock);
  969. list_add(&page->lru, &mm->context.pgtable_list);
  970. } else {
  971. for (bit = 1; mask & bit; bit <<= 1)
  972. table += PTRS_PER_PTE;
  973. mask = atomic_xor_bits(&page->_mapcount, bit);
  974. if ((mask & FRAG_MASK) == FRAG_MASK)
  975. list_del(&page->lru);
  976. }
  977. spin_unlock_bh(&mm->context.list_lock);
  978. return table;
  979. }
  980. void page_table_free(struct mm_struct *mm, unsigned long *table)
  981. {
  982. struct page *page;
  983. unsigned int bit, mask;
  984. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  985. if (page_table_with_pgste(page)) {
  986. gmap_disconnect_pgtable(mm, table);
  987. return page_table_free_pgste(table);
  988. }
  989. /* Free 1K/2K page table fragment of a 4K page */
  990. bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
  991. spin_lock_bh(&mm->context.list_lock);
  992. if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
  993. list_del(&page->lru);
  994. mask = atomic_xor_bits(&page->_mapcount, bit);
  995. if (mask & FRAG_MASK)
  996. list_add(&page->lru, &mm->context.pgtable_list);
  997. spin_unlock_bh(&mm->context.list_lock);
  998. if (mask == 0) {
  999. pgtable_page_dtor(page);
  1000. atomic_set(&page->_mapcount, -1);
  1001. __free_page(page);
  1002. }
  1003. }
  1004. static void __page_table_free_rcu(void *table, unsigned bit)
  1005. {
  1006. struct page *page;
  1007. if (bit == FRAG_MASK)
  1008. return page_table_free_pgste(table);
  1009. /* Free 1K/2K page table fragment of a 4K page */
  1010. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  1011. if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
  1012. pgtable_page_dtor(page);
  1013. atomic_set(&page->_mapcount, -1);
  1014. __free_page(page);
  1015. }
  1016. }
  1017. void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
  1018. {
  1019. struct mm_struct *mm;
  1020. struct page *page;
  1021. unsigned int bit, mask;
  1022. mm = tlb->mm;
  1023. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  1024. if (page_table_with_pgste(page)) {
  1025. gmap_disconnect_pgtable(mm, table);
  1026. table = (unsigned long *) (__pa(table) | FRAG_MASK);
  1027. tlb_remove_table(tlb, table);
  1028. return;
  1029. }
  1030. bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
  1031. spin_lock_bh(&mm->context.list_lock);
  1032. if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
  1033. list_del(&page->lru);
  1034. mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
  1035. if (mask & FRAG_MASK)
  1036. list_add_tail(&page->lru, &mm->context.pgtable_list);
  1037. spin_unlock_bh(&mm->context.list_lock);
  1038. table = (unsigned long *) (__pa(table) | (bit << 4));
  1039. tlb_remove_table(tlb, table);
  1040. }
  1041. static void __tlb_remove_table(void *_table)
  1042. {
  1043. const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
  1044. void *table = (void *)((unsigned long) _table & ~mask);
  1045. unsigned type = (unsigned long) _table & mask;
  1046. if (type)
  1047. __page_table_free_rcu(table, type);
  1048. else
  1049. free_pages((unsigned long) table, ALLOC_ORDER);
  1050. }
  1051. static void tlb_remove_table_smp_sync(void *arg)
  1052. {
  1053. /* Simply deliver the interrupt */
  1054. }
  1055. static void tlb_remove_table_one(void *table)
  1056. {
  1057. /*
  1058. * This isn't an RCU grace period and hence the page-tables cannot be
  1059. * assumed to be actually RCU-freed.
  1060. *
  1061. * It is however sufficient for software page-table walkers that rely
  1062. * on IRQ disabling. See the comment near struct mmu_table_batch.
  1063. */
  1064. smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
  1065. __tlb_remove_table(table);
  1066. }
  1067. static void tlb_remove_table_rcu(struct rcu_head *head)
  1068. {
  1069. struct mmu_table_batch *batch;
  1070. int i;
  1071. batch = container_of(head, struct mmu_table_batch, rcu);
  1072. for (i = 0; i < batch->nr; i++)
  1073. __tlb_remove_table(batch->tables[i]);
  1074. free_page((unsigned long)batch);
  1075. }
  1076. void tlb_table_flush(struct mmu_gather *tlb)
  1077. {
  1078. struct mmu_table_batch **batch = &tlb->batch;
  1079. if (*batch) {
  1080. call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
  1081. *batch = NULL;
  1082. }
  1083. }
  1084. void tlb_remove_table(struct mmu_gather *tlb, void *table)
  1085. {
  1086. struct mmu_table_batch **batch = &tlb->batch;
  1087. tlb->mm->context.flush_mm = 1;
  1088. if (*batch == NULL) {
  1089. *batch = (struct mmu_table_batch *)
  1090. __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
  1091. if (*batch == NULL) {
  1092. __tlb_flush_mm_lazy(tlb->mm);
  1093. tlb_remove_table_one(table);
  1094. return;
  1095. }
  1096. (*batch)->nr = 0;
  1097. }
  1098. (*batch)->tables[(*batch)->nr++] = table;
  1099. if ((*batch)->nr == MAX_TABLE_BATCH)
  1100. tlb_flush_mmu(tlb);
  1101. }
  1102. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  1103. static inline void thp_split_vma(struct vm_area_struct *vma)
  1104. {
  1105. unsigned long addr;
  1106. for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE)
  1107. follow_page(vma, addr, FOLL_SPLIT);
  1108. }
  1109. static inline void thp_split_mm(struct mm_struct *mm)
  1110. {
  1111. struct vm_area_struct *vma;
  1112. for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
  1113. thp_split_vma(vma);
  1114. vma->vm_flags &= ~VM_HUGEPAGE;
  1115. vma->vm_flags |= VM_NOHUGEPAGE;
  1116. }
  1117. mm->def_flags |= VM_NOHUGEPAGE;
  1118. }
  1119. #else
  1120. static inline void thp_split_mm(struct mm_struct *mm)
  1121. {
  1122. }
  1123. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  1124. static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb,
  1125. struct mm_struct *mm, pud_t *pud,
  1126. unsigned long addr, unsigned long end)
  1127. {
  1128. unsigned long next, *table, *new;
  1129. struct page *page;
  1130. pmd_t *pmd;
  1131. pmd = pmd_offset(pud, addr);
  1132. do {
  1133. next = pmd_addr_end(addr, end);
  1134. again:
  1135. if (pmd_none_or_clear_bad(pmd))
  1136. continue;
  1137. table = (unsigned long *) pmd_deref(*pmd);
  1138. page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
  1139. if (page_table_with_pgste(page))
  1140. continue;
  1141. /* Allocate new page table with pgstes */
  1142. new = page_table_alloc_pgste(mm, addr);
  1143. if (!new)
  1144. return -ENOMEM;
  1145. spin_lock(&mm->page_table_lock);
  1146. if (likely((unsigned long *) pmd_deref(*pmd) == table)) {
  1147. /* Nuke pmd entry pointing to the "short" page table */
  1148. pmdp_flush_lazy(mm, addr, pmd);
  1149. pmd_clear(pmd);
  1150. /* Copy ptes from old table to new table */
  1151. memcpy(new, table, PAGE_SIZE/2);
  1152. clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
  1153. /* Establish new table */
  1154. pmd_populate(mm, pmd, (pte_t *) new);
  1155. /* Free old table with rcu, there might be a walker! */
  1156. page_table_free_rcu(tlb, table);
  1157. new = NULL;
  1158. }
  1159. spin_unlock(&mm->page_table_lock);
  1160. if (new) {
  1161. page_table_free_pgste(new);
  1162. goto again;
  1163. }
  1164. } while (pmd++, addr = next, addr != end);
  1165. return addr;
  1166. }
  1167. static unsigned long page_table_realloc_pud(struct mmu_gather *tlb,
  1168. struct mm_struct *mm, pgd_t *pgd,
  1169. unsigned long addr, unsigned long end)
  1170. {
  1171. unsigned long next;
  1172. pud_t *pud;
  1173. pud = pud_offset(pgd, addr);
  1174. do {
  1175. next = pud_addr_end(addr, end);
  1176. if (pud_none_or_clear_bad(pud))
  1177. continue;
  1178. next = page_table_realloc_pmd(tlb, mm, pud, addr, next);
  1179. if (unlikely(IS_ERR_VALUE(next)))
  1180. return next;
  1181. } while (pud++, addr = next, addr != end);
  1182. return addr;
  1183. }
  1184. static unsigned long page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
  1185. unsigned long addr, unsigned long end)
  1186. {
  1187. unsigned long next;
  1188. pgd_t *pgd;
  1189. pgd = pgd_offset(mm, addr);
  1190. do {
  1191. next = pgd_addr_end(addr, end);
  1192. if (pgd_none_or_clear_bad(pgd))
  1193. continue;
  1194. next = page_table_realloc_pud(tlb, mm, pgd, addr, next);
  1195. if (unlikely(IS_ERR_VALUE(next)))
  1196. return next;
  1197. } while (pgd++, addr = next, addr != end);
  1198. return 0;
  1199. }
  1200. /*
  1201. * switch on pgstes for its userspace process (for kvm)
  1202. */
  1203. int s390_enable_sie(void)
  1204. {
  1205. struct task_struct *tsk = current;
  1206. struct mm_struct *mm = tsk->mm;
  1207. struct mmu_gather tlb;
  1208. /* Do we have pgstes? if yes, we are done */
  1209. if (mm_has_pgste(tsk->mm))
  1210. return 0;
  1211. down_write(&mm->mmap_sem);
  1212. /* split thp mappings and disable thp for future mappings */
  1213. thp_split_mm(mm);
  1214. /* Reallocate the page tables with pgstes */
  1215. tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE);
  1216. if (!page_table_realloc(&tlb, mm, 0, TASK_SIZE))
  1217. mm->context.has_pgste = 1;
  1218. tlb_finish_mmu(&tlb, 0, TASK_SIZE);
  1219. up_write(&mm->mmap_sem);
  1220. return mm->context.has_pgste ? 0 : -ENOMEM;
  1221. }
  1222. EXPORT_SYMBOL_GPL(s390_enable_sie);
  1223. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  1224. int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
  1225. pmd_t *pmdp)
  1226. {
  1227. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  1228. /* No need to flush TLB
  1229. * On s390 reference bits are in storage key and never in TLB */
  1230. return pmdp_test_and_clear_young(vma, address, pmdp);
  1231. }
  1232. int pmdp_set_access_flags(struct vm_area_struct *vma,
  1233. unsigned long address, pmd_t *pmdp,
  1234. pmd_t entry, int dirty)
  1235. {
  1236. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  1237. if (pmd_same(*pmdp, entry))
  1238. return 0;
  1239. pmdp_invalidate(vma, address, pmdp);
  1240. set_pmd_at(vma->vm_mm, address, pmdp, entry);
  1241. return 1;
  1242. }
  1243. static void pmdp_splitting_flush_sync(void *arg)
  1244. {
  1245. /* Simply deliver the interrupt */
  1246. }
  1247. void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
  1248. pmd_t *pmdp)
  1249. {
  1250. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  1251. if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT,
  1252. (unsigned long *) pmdp)) {
  1253. /* need to serialize against gup-fast (IRQ disabled) */
  1254. smp_call_function(pmdp_splitting_flush_sync, NULL, 1);
  1255. }
  1256. }
  1257. void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
  1258. pgtable_t pgtable)
  1259. {
  1260. struct list_head *lh = (struct list_head *) pgtable;
  1261. assert_spin_locked(pmd_lockptr(mm, pmdp));
  1262. /* FIFO */
  1263. if (!pmd_huge_pte(mm, pmdp))
  1264. INIT_LIST_HEAD(lh);
  1265. else
  1266. list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
  1267. pmd_huge_pte(mm, pmdp) = pgtable;
  1268. }
  1269. pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
  1270. {
  1271. struct list_head *lh;
  1272. pgtable_t pgtable;
  1273. pte_t *ptep;
  1274. assert_spin_locked(pmd_lockptr(mm, pmdp));
  1275. /* FIFO */
  1276. pgtable = pmd_huge_pte(mm, pmdp);
  1277. lh = (struct list_head *) pgtable;
  1278. if (list_empty(lh))
  1279. pmd_huge_pte(mm, pmdp) = NULL;
  1280. else {
  1281. pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
  1282. list_del(lh);
  1283. }
  1284. ptep = (pte_t *) pgtable;
  1285. pte_val(*ptep) = _PAGE_INVALID;
  1286. ptep++;
  1287. pte_val(*ptep) = _PAGE_INVALID;
  1288. return pgtable;
  1289. }
  1290. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */