pgtable.h 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * S390 version
  4. * Copyright IBM Corp. 1999, 2000
  5. * Author(s): Hartmut Penner (hp@de.ibm.com)
  6. * Ulrich Weigand (weigand@de.ibm.com)
  7. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  8. *
  9. * Derived from "include/asm-i386/pgtable.h"
  10. */
  11. #ifndef _ASM_S390_PGTABLE_H
  12. #define _ASM_S390_PGTABLE_H
  13. #include <linux/sched.h>
  14. #include <linux/mm_types.h>
  15. #include <linux/page-flags.h>
  16. #include <linux/radix-tree.h>
  17. #include <linux/atomic.h>
  18. #include <asm/bug.h>
  19. #include <asm/page.h>
  20. extern pgd_t swapper_pg_dir[];
  21. extern void paging_init(void);
  22. enum {
  23. PG_DIRECT_MAP_4K = 0,
  24. PG_DIRECT_MAP_1M,
  25. PG_DIRECT_MAP_2G,
  26. PG_DIRECT_MAP_MAX
  27. };
  28. extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
  29. static inline void update_page_count(int level, long count)
  30. {
  31. if (IS_ENABLED(CONFIG_PROC_FS))
  32. atomic_long_add(count, &direct_pages_count[level]);
  33. }
  34. struct seq_file;
  35. void arch_report_meminfo(struct seq_file *m);
  36. /*
  37. * The S390 doesn't have any external MMU info: the kernel page
  38. * tables contain all the necessary information.
  39. */
  40. #define update_mmu_cache(vma, address, ptep) do { } while (0)
  41. #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
  42. /*
  43. * ZERO_PAGE is a global shared page that is always zero; used
  44. * for zero-mapped memory areas etc..
  45. */
  46. extern unsigned long empty_zero_page;
  47. extern unsigned long zero_page_mask;
  48. #define ZERO_PAGE(vaddr) \
  49. (virt_to_page((void *)(empty_zero_page + \
  50. (((unsigned long)(vaddr)) &zero_page_mask))))
  51. #define __HAVE_COLOR_ZERO_PAGE
  52. /* TODO: s390 cannot support io_remap_pfn_range... */
  53. #define FIRST_USER_ADDRESS 0UL
  54. #define pte_ERROR(e) \
  55. printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
  56. #define pmd_ERROR(e) \
  57. printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
  58. #define pud_ERROR(e) \
  59. printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
  60. #define p4d_ERROR(e) \
  61. printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
  62. #define pgd_ERROR(e) \
  63. printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
  64. /*
  65. * The vmalloc and module area will always be on the topmost area of the
  66. * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
  67. * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
  68. * modules will reside. That makes sure that inter module branches always
  69. * happen without trampolines and in addition the placement within a 2GB frame
  70. * is branch prediction unit friendly.
  71. */
  72. extern unsigned long VMALLOC_START;
  73. extern unsigned long VMALLOC_END;
  74. extern struct page *vmemmap;
  75. #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
  76. extern unsigned long MODULES_VADDR;
  77. extern unsigned long MODULES_END;
  78. #define MODULES_VADDR MODULES_VADDR
  79. #define MODULES_END MODULES_END
  80. #define MODULES_LEN (1UL << 31)
  81. static inline int is_module_addr(void *addr)
  82. {
  83. BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
  84. if (addr < (void *)MODULES_VADDR)
  85. return 0;
  86. if (addr > (void *)MODULES_END)
  87. return 0;
  88. return 1;
  89. }
  90. /*
  91. * A 64 bit pagetable entry of S390 has following format:
  92. * | PFRA |0IPC| OS |
  93. * 0000000000111111111122222222223333333333444444444455555555556666
  94. * 0123456789012345678901234567890123456789012345678901234567890123
  95. *
  96. * I Page-Invalid Bit: Page is not available for address-translation
  97. * P Page-Protection Bit: Store access not possible for page
  98. * C Change-bit override: HW is not required to set change bit
  99. *
  100. * A 64 bit segmenttable entry of S390 has following format:
  101. * | P-table origin | TT
  102. * 0000000000111111111122222222223333333333444444444455555555556666
  103. * 0123456789012345678901234567890123456789012345678901234567890123
  104. *
  105. * I Segment-Invalid Bit: Segment is not available for address-translation
  106. * C Common-Segment Bit: Segment is not private (PoP 3-30)
  107. * P Page-Protection Bit: Store access not possible for page
  108. * TT Type 00
  109. *
  110. * A 64 bit region table entry of S390 has following format:
  111. * | S-table origin | TF TTTL
  112. * 0000000000111111111122222222223333333333444444444455555555556666
  113. * 0123456789012345678901234567890123456789012345678901234567890123
  114. *
  115. * I Segment-Invalid Bit: Segment is not available for address-translation
  116. * TT Type 01
  117. * TF
  118. * TL Table length
  119. *
  120. * The 64 bit regiontable origin of S390 has following format:
  121. * | region table origon | DTTL
  122. * 0000000000111111111122222222223333333333444444444455555555556666
  123. * 0123456789012345678901234567890123456789012345678901234567890123
  124. *
  125. * X Space-Switch event:
  126. * G Segment-Invalid Bit:
  127. * P Private-Space Bit:
  128. * S Storage-Alteration:
  129. * R Real space
  130. * TL Table-Length:
  131. *
  132. * A storage key has the following format:
  133. * | ACC |F|R|C|0|
  134. * 0 3 4 5 6 7
  135. * ACC: access key
  136. * F : fetch protection bit
  137. * R : referenced bit
  138. * C : changed bit
  139. */
  140. /* Hardware bits in the page table entry */
  141. #define _PAGE_NOEXEC 0x100 /* HW no-execute bit */
  142. #define _PAGE_PROTECT 0x200 /* HW read-only bit */
  143. #define _PAGE_INVALID 0x400 /* HW invalid bit */
  144. #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
  145. /* Software bits in the page table entry */
  146. #define _PAGE_PRESENT 0x001 /* SW pte present bit */
  147. #define _PAGE_YOUNG 0x004 /* SW pte young bit */
  148. #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
  149. #define _PAGE_READ 0x010 /* SW pte read bit */
  150. #define _PAGE_WRITE 0x020 /* SW pte write bit */
  151. #define _PAGE_SPECIAL 0x040 /* SW associated with special page */
  152. #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
  153. #ifdef CONFIG_MEM_SOFT_DIRTY
  154. #define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
  155. #else
  156. #define _PAGE_SOFT_DIRTY 0x000
  157. #endif
  158. /* Set of bits not changed in pte_modify */
  159. #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
  160. _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
  161. /*
  162. * handle_pte_fault uses pte_present and pte_none to find out the pte type
  163. * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
  164. * distinguish present from not-present ptes. It is changed only with the page
  165. * table lock held.
  166. *
  167. * The following table gives the different possible bit combinations for
  168. * the pte hardware and software bits in the last 12 bits of a pte
  169. * (. unassigned bit, x don't care, t swap type):
  170. *
  171. * 842100000000
  172. * 000084210000
  173. * 000000008421
  174. * .IR.uswrdy.p
  175. * empty .10.00000000
  176. * swap .11..ttttt.0
  177. * prot-none, clean, old .11.xx0000.1
  178. * prot-none, clean, young .11.xx0001.1
  179. * prot-none, dirty, old .11.xx0010.1
  180. * prot-none, dirty, young .11.xx0011.1
  181. * read-only, clean, old .11.xx0100.1
  182. * read-only, clean, young .01.xx0101.1
  183. * read-only, dirty, old .11.xx0110.1
  184. * read-only, dirty, young .01.xx0111.1
  185. * read-write, clean, old .11.xx1100.1
  186. * read-write, clean, young .01.xx1101.1
  187. * read-write, dirty, old .10.xx1110.1
  188. * read-write, dirty, young .00.xx1111.1
  189. * HW-bits: R read-only, I invalid
  190. * SW-bits: p present, y young, d dirty, r read, w write, s special,
  191. * u unused, l large
  192. *
  193. * pte_none is true for the bit pattern .10.00000000, pte == 0x400
  194. * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
  195. * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
  196. */
  197. /* Bits in the segment/region table address-space-control-element */
  198. #define _ASCE_ORIGIN ~0xfffUL/* region/segment table origin */
  199. #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
  200. #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
  201. #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
  202. #define _ASCE_REAL_SPACE 0x20 /* real space control */
  203. #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
  204. #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
  205. #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
  206. #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
  207. #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
  208. #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
  209. /* Bits in the region table entry */
  210. #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
  211. #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
  212. #define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
  213. #define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
  214. #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
  215. #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
  216. #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
  217. #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
  218. #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
  219. #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
  220. #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
  221. #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
  222. #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
  223. #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
  224. #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
  225. #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
  226. #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
  227. #define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
  228. #define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
  229. #define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
  230. #define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */
  231. #define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */
  232. #ifdef CONFIG_MEM_SOFT_DIRTY
  233. #define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
  234. #else
  235. #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
  236. #endif
  237. #define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
  238. #define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL
  239. /* Bits in the segment table entry */
  240. #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
  241. #define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
  242. #define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
  243. #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
  244. #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
  245. #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */
  246. #define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
  247. #define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */
  248. #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
  249. #define _SEGMENT_ENTRY (0)
  250. #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
  251. #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
  252. #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
  253. #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
  254. #define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */
  255. #define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */
  256. #ifdef CONFIG_MEM_SOFT_DIRTY
  257. #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
  258. #else
  259. #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
  260. #endif
  261. #define _CRST_ENTRIES 2048 /* number of region/segment table entries */
  262. #define _PAGE_ENTRIES 256 /* number of page table entries */
  263. #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
  264. #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
  265. #define _REGION1_SHIFT 53
  266. #define _REGION2_SHIFT 42
  267. #define _REGION3_SHIFT 31
  268. #define _SEGMENT_SHIFT 20
  269. #define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT)
  270. #define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
  271. #define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
  272. #define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
  273. #define _PAGE_INDEX (0xffUL << _PAGE_SHIFT)
  274. #define _REGION1_SIZE (1UL << _REGION1_SHIFT)
  275. #define _REGION2_SIZE (1UL << _REGION2_SHIFT)
  276. #define _REGION3_SIZE (1UL << _REGION3_SHIFT)
  277. #define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT)
  278. #define _REGION1_MASK (~(_REGION1_SIZE - 1))
  279. #define _REGION2_MASK (~(_REGION2_SIZE - 1))
  280. #define _REGION3_MASK (~(_REGION3_SIZE - 1))
  281. #define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1))
  282. #define PMD_SHIFT _SEGMENT_SHIFT
  283. #define PUD_SHIFT _REGION3_SHIFT
  284. #define P4D_SHIFT _REGION2_SHIFT
  285. #define PGDIR_SHIFT _REGION1_SHIFT
  286. #define PMD_SIZE _SEGMENT_SIZE
  287. #define PUD_SIZE _REGION3_SIZE
  288. #define P4D_SIZE _REGION2_SIZE
  289. #define PGDIR_SIZE _REGION1_SIZE
  290. #define PMD_MASK _SEGMENT_MASK
  291. #define PUD_MASK _REGION3_MASK
  292. #define P4D_MASK _REGION2_MASK
  293. #define PGDIR_MASK _REGION1_MASK
  294. #define PTRS_PER_PTE _PAGE_ENTRIES
  295. #define PTRS_PER_PMD _CRST_ENTRIES
  296. #define PTRS_PER_PUD _CRST_ENTRIES
  297. #define PTRS_PER_P4D _CRST_ENTRIES
  298. #define PTRS_PER_PGD _CRST_ENTRIES
  299. #define MAX_PTRS_PER_P4D PTRS_PER_P4D
  300. /*
  301. * Segment table and region3 table entry encoding
  302. * (R = read-only, I = invalid, y = young bit):
  303. * dy..R...I...wr
  304. * prot-none, clean, old 00..1...1...00
  305. * prot-none, clean, young 01..1...1...00
  306. * prot-none, dirty, old 10..1...1...00
  307. * prot-none, dirty, young 11..1...1...00
  308. * read-only, clean, old 00..1...1...01
  309. * read-only, clean, young 01..1...0...01
  310. * read-only, dirty, old 10..1...1...01
  311. * read-only, dirty, young 11..1...0...01
  312. * read-write, clean, old 00..1...1...11
  313. * read-write, clean, young 01..1...0...11
  314. * read-write, dirty, old 10..0...1...11
  315. * read-write, dirty, young 11..0...0...11
  316. * The segment table origin is used to distinguish empty (origin==0) from
  317. * read-write, old segment table entries (origin!=0)
  318. * HW-bits: R read-only, I invalid
  319. * SW-bits: y young, d dirty, r read, w write
  320. */
  321. /* Page status table bits for virtualization */
  322. #define PGSTE_ACC_BITS 0xf000000000000000UL
  323. #define PGSTE_FP_BIT 0x0800000000000000UL
  324. #define PGSTE_PCL_BIT 0x0080000000000000UL
  325. #define PGSTE_HR_BIT 0x0040000000000000UL
  326. #define PGSTE_HC_BIT 0x0020000000000000UL
  327. #define PGSTE_GR_BIT 0x0004000000000000UL
  328. #define PGSTE_GC_BIT 0x0002000000000000UL
  329. #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
  330. #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
  331. #define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
  332. /* Guest Page State used for virtualization */
  333. #define _PGSTE_GPS_ZERO 0x0000000080000000UL
  334. #define _PGSTE_GPS_NODAT 0x0000000040000000UL
  335. #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
  336. #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
  337. #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
  338. #define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL
  339. #define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK
  340. /*
  341. * A user page table pointer has the space-switch-event bit, the
  342. * private-space-control bit and the storage-alteration-event-control
  343. * bit set. A kernel page table pointer doesn't need them.
  344. */
  345. #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
  346. _ASCE_ALT_EVENT)
  347. /*
  348. * Page protection definitions.
  349. */
  350. #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
  351. #define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
  352. _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
  353. #define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
  354. _PAGE_INVALID | _PAGE_PROTECT)
  355. #define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
  356. _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
  357. #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
  358. _PAGE_INVALID | _PAGE_PROTECT)
  359. #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
  360. _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
  361. #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
  362. _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
  363. #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
  364. _PAGE_PROTECT | _PAGE_NOEXEC)
  365. #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
  366. _PAGE_YOUNG | _PAGE_DIRTY)
  367. /*
  368. * On s390 the page table entry has an invalid bit and a read-only bit.
  369. * Read permission implies execute permission and write permission
  370. * implies read permission.
  371. */
  372. /*xwr*/
  373. #define __P000 PAGE_NONE
  374. #define __P001 PAGE_RO
  375. #define __P010 PAGE_RO
  376. #define __P011 PAGE_RO
  377. #define __P100 PAGE_RX
  378. #define __P101 PAGE_RX
  379. #define __P110 PAGE_RX
  380. #define __P111 PAGE_RX
  381. #define __S000 PAGE_NONE
  382. #define __S001 PAGE_RO
  383. #define __S010 PAGE_RW
  384. #define __S011 PAGE_RW
  385. #define __S100 PAGE_RX
  386. #define __S101 PAGE_RX
  387. #define __S110 PAGE_RWX
  388. #define __S111 PAGE_RWX
  389. /*
  390. * Segment entry (large page) protection definitions.
  391. */
  392. #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
  393. _SEGMENT_ENTRY_PROTECT)
  394. #define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
  395. _SEGMENT_ENTRY_READ | \
  396. _SEGMENT_ENTRY_NOEXEC)
  397. #define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
  398. _SEGMENT_ENTRY_READ)
  399. #define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
  400. _SEGMENT_ENTRY_WRITE | \
  401. _SEGMENT_ENTRY_NOEXEC)
  402. #define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
  403. _SEGMENT_ENTRY_WRITE)
  404. #define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
  405. _SEGMENT_ENTRY_LARGE | \
  406. _SEGMENT_ENTRY_READ | \
  407. _SEGMENT_ENTRY_WRITE | \
  408. _SEGMENT_ENTRY_YOUNG | \
  409. _SEGMENT_ENTRY_DIRTY | \
  410. _SEGMENT_ENTRY_NOEXEC)
  411. #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
  412. _SEGMENT_ENTRY_LARGE | \
  413. _SEGMENT_ENTRY_READ | \
  414. _SEGMENT_ENTRY_YOUNG | \
  415. _SEGMENT_ENTRY_PROTECT | \
  416. _SEGMENT_ENTRY_NOEXEC)
  417. #define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \
  418. _SEGMENT_ENTRY_LARGE | \
  419. _SEGMENT_ENTRY_READ | \
  420. _SEGMENT_ENTRY_WRITE | \
  421. _SEGMENT_ENTRY_YOUNG | \
  422. _SEGMENT_ENTRY_DIRTY)
  423. /*
  424. * Region3 entry (large page) protection definitions.
  425. */
  426. #define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
  427. _REGION3_ENTRY_LARGE | \
  428. _REGION3_ENTRY_READ | \
  429. _REGION3_ENTRY_WRITE | \
  430. _REGION3_ENTRY_YOUNG | \
  431. _REGION3_ENTRY_DIRTY | \
  432. _REGION_ENTRY_NOEXEC)
  433. #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
  434. _REGION3_ENTRY_LARGE | \
  435. _REGION3_ENTRY_READ | \
  436. _REGION3_ENTRY_YOUNG | \
  437. _REGION_ENTRY_PROTECT | \
  438. _REGION_ENTRY_NOEXEC)
  439. static inline bool mm_p4d_folded(struct mm_struct *mm)
  440. {
  441. return mm->context.asce_limit <= _REGION1_SIZE;
  442. }
  443. #define mm_p4d_folded(mm) mm_p4d_folded(mm)
  444. static inline bool mm_pud_folded(struct mm_struct *mm)
  445. {
  446. return mm->context.asce_limit <= _REGION2_SIZE;
  447. }
  448. #define mm_pud_folded(mm) mm_pud_folded(mm)
  449. static inline bool mm_pmd_folded(struct mm_struct *mm)
  450. {
  451. return mm->context.asce_limit <= _REGION3_SIZE;
  452. }
  453. #define mm_pmd_folded(mm) mm_pmd_folded(mm)
  454. static inline int mm_has_pgste(struct mm_struct *mm)
  455. {
  456. #ifdef CONFIG_PGSTE
  457. if (unlikely(mm->context.has_pgste))
  458. return 1;
  459. #endif
  460. return 0;
  461. }
  462. static inline int mm_alloc_pgste(struct mm_struct *mm)
  463. {
  464. #ifdef CONFIG_PGSTE
  465. if (unlikely(mm->context.alloc_pgste))
  466. return 1;
  467. #endif
  468. return 0;
  469. }
  470. /*
  471. * In the case that a guest uses storage keys
  472. * faults should no longer be backed by zero pages
  473. */
  474. #define mm_forbids_zeropage mm_has_pgste
  475. static inline int mm_uses_skeys(struct mm_struct *mm)
  476. {
  477. #ifdef CONFIG_PGSTE
  478. if (mm->context.uses_skeys)
  479. return 1;
  480. #endif
  481. return 0;
  482. }
  483. static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
  484. {
  485. register unsigned long reg2 asm("2") = old;
  486. register unsigned long reg3 asm("3") = new;
  487. unsigned long address = (unsigned long)ptr | 1;
  488. asm volatile(
  489. " csp %0,%3"
  490. : "+d" (reg2), "+m" (*ptr)
  491. : "d" (reg3), "d" (address)
  492. : "cc");
  493. }
  494. static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
  495. {
  496. register unsigned long reg2 asm("2") = old;
  497. register unsigned long reg3 asm("3") = new;
  498. unsigned long address = (unsigned long)ptr | 1;
  499. asm volatile(
  500. " .insn rre,0xb98a0000,%0,%3"
  501. : "+d" (reg2), "+m" (*ptr)
  502. : "d" (reg3), "d" (address)
  503. : "cc");
  504. }
  505. #define CRDTE_DTT_PAGE 0x00UL
  506. #define CRDTE_DTT_SEGMENT 0x10UL
  507. #define CRDTE_DTT_REGION3 0x14UL
  508. #define CRDTE_DTT_REGION2 0x18UL
  509. #define CRDTE_DTT_REGION1 0x1cUL
  510. static inline void crdte(unsigned long old, unsigned long new,
  511. unsigned long table, unsigned long dtt,
  512. unsigned long address, unsigned long asce)
  513. {
  514. register unsigned long reg2 asm("2") = old;
  515. register unsigned long reg3 asm("3") = new;
  516. register unsigned long reg4 asm("4") = table | dtt;
  517. register unsigned long reg5 asm("5") = address;
  518. asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
  519. : "+d" (reg2)
  520. : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
  521. : "memory", "cc");
  522. }
  523. /*
  524. * pgd/p4d/pud/pmd/pte query functions
  525. */
  526. static inline int pgd_folded(pgd_t pgd)
  527. {
  528. return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
  529. }
  530. static inline int pgd_present(pgd_t pgd)
  531. {
  532. if (pgd_folded(pgd))
  533. return 1;
  534. return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
  535. }
  536. static inline int pgd_none(pgd_t pgd)
  537. {
  538. if (pgd_folded(pgd))
  539. return 0;
  540. return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
  541. }
  542. static inline int pgd_bad(pgd_t pgd)
  543. {
  544. /*
  545. * With dynamic page table levels the pgd can be a region table
  546. * entry or a segment table entry. Check for the bit that are
  547. * invalid for either table entry.
  548. */
  549. unsigned long mask =
  550. ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
  551. ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
  552. return (pgd_val(pgd) & mask) != 0;
  553. }
  554. static inline unsigned long pgd_pfn(pgd_t pgd)
  555. {
  556. unsigned long origin_mask;
  557. origin_mask = _REGION_ENTRY_ORIGIN;
  558. return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
  559. }
  560. static inline int p4d_folded(p4d_t p4d)
  561. {
  562. return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
  563. }
  564. static inline int p4d_present(p4d_t p4d)
  565. {
  566. if (p4d_folded(p4d))
  567. return 1;
  568. return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
  569. }
  570. static inline int p4d_none(p4d_t p4d)
  571. {
  572. if (p4d_folded(p4d))
  573. return 0;
  574. return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
  575. }
  576. static inline unsigned long p4d_pfn(p4d_t p4d)
  577. {
  578. unsigned long origin_mask;
  579. origin_mask = _REGION_ENTRY_ORIGIN;
  580. return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
  581. }
  582. static inline int pud_folded(pud_t pud)
  583. {
  584. return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
  585. }
  586. static inline int pud_present(pud_t pud)
  587. {
  588. if (pud_folded(pud))
  589. return 1;
  590. return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
  591. }
  592. static inline int pud_none(pud_t pud)
  593. {
  594. if (pud_folded(pud))
  595. return 0;
  596. return pud_val(pud) == _REGION3_ENTRY_EMPTY;
  597. }
  598. static inline int pud_large(pud_t pud)
  599. {
  600. if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
  601. return 0;
  602. return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
  603. }
  604. static inline unsigned long pud_pfn(pud_t pud)
  605. {
  606. unsigned long origin_mask;
  607. origin_mask = _REGION_ENTRY_ORIGIN;
  608. if (pud_large(pud))
  609. origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
  610. return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
  611. }
  612. static inline int pmd_large(pmd_t pmd)
  613. {
  614. return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
  615. }
  616. static inline int pmd_bad(pmd_t pmd)
  617. {
  618. if (pmd_large(pmd))
  619. return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
  620. return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
  621. }
  622. static inline int pud_bad(pud_t pud)
  623. {
  624. if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
  625. return pmd_bad(__pmd(pud_val(pud)));
  626. if (pud_large(pud))
  627. return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0;
  628. return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
  629. }
  630. static inline int p4d_bad(p4d_t p4d)
  631. {
  632. if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
  633. return pud_bad(__pud(p4d_val(p4d)));
  634. return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
  635. }
  636. static inline int pmd_present(pmd_t pmd)
  637. {
  638. return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
  639. }
  640. static inline int pmd_none(pmd_t pmd)
  641. {
  642. return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
  643. }
  644. static inline unsigned long pmd_pfn(pmd_t pmd)
  645. {
  646. unsigned long origin_mask;
  647. origin_mask = _SEGMENT_ENTRY_ORIGIN;
  648. if (pmd_large(pmd))
  649. origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
  650. return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
  651. }
  652. #define pmd_write pmd_write
  653. static inline int pmd_write(pmd_t pmd)
  654. {
  655. return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
  656. }
  657. static inline int pmd_dirty(pmd_t pmd)
  658. {
  659. int dirty = 1;
  660. if (pmd_large(pmd))
  661. dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
  662. return dirty;
  663. }
  664. static inline int pmd_young(pmd_t pmd)
  665. {
  666. int young = 1;
  667. if (pmd_large(pmd))
  668. young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
  669. return young;
  670. }
  671. static inline int pte_present(pte_t pte)
  672. {
  673. /* Bit pattern: (pte & 0x001) == 0x001 */
  674. return (pte_val(pte) & _PAGE_PRESENT) != 0;
  675. }
  676. static inline int pte_none(pte_t pte)
  677. {
  678. /* Bit pattern: pte == 0x400 */
  679. return pte_val(pte) == _PAGE_INVALID;
  680. }
  681. static inline int pte_swap(pte_t pte)
  682. {
  683. /* Bit pattern: (pte & 0x201) == 0x200 */
  684. return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
  685. == _PAGE_PROTECT;
  686. }
  687. static inline int pte_special(pte_t pte)
  688. {
  689. return (pte_val(pte) & _PAGE_SPECIAL);
  690. }
  691. #define __HAVE_ARCH_PTE_SAME
  692. static inline int pte_same(pte_t a, pte_t b)
  693. {
  694. return pte_val(a) == pte_val(b);
  695. }
  696. #ifdef CONFIG_NUMA_BALANCING
  697. static inline int pte_protnone(pte_t pte)
  698. {
  699. return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
  700. }
  701. static inline int pmd_protnone(pmd_t pmd)
  702. {
  703. /* pmd_large(pmd) implies pmd_present(pmd) */
  704. return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
  705. }
  706. #endif
  707. static inline int pte_soft_dirty(pte_t pte)
  708. {
  709. return pte_val(pte) & _PAGE_SOFT_DIRTY;
  710. }
  711. #define pte_swp_soft_dirty pte_soft_dirty
  712. static inline pte_t pte_mksoft_dirty(pte_t pte)
  713. {
  714. pte_val(pte) |= _PAGE_SOFT_DIRTY;
  715. return pte;
  716. }
  717. #define pte_swp_mksoft_dirty pte_mksoft_dirty
  718. static inline pte_t pte_clear_soft_dirty(pte_t pte)
  719. {
  720. pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
  721. return pte;
  722. }
  723. #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
  724. static inline int pmd_soft_dirty(pmd_t pmd)
  725. {
  726. return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
  727. }
  728. static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
  729. {
  730. pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
  731. return pmd;
  732. }
  733. static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
  734. {
  735. pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
  736. return pmd;
  737. }
  738. /*
  739. * query functions pte_write/pte_dirty/pte_young only work if
  740. * pte_present() is true. Undefined behaviour if not..
  741. */
  742. static inline int pte_write(pte_t pte)
  743. {
  744. return (pte_val(pte) & _PAGE_WRITE) != 0;
  745. }
  746. static inline int pte_dirty(pte_t pte)
  747. {
  748. return (pte_val(pte) & _PAGE_DIRTY) != 0;
  749. }
  750. static inline int pte_young(pte_t pte)
  751. {
  752. return (pte_val(pte) & _PAGE_YOUNG) != 0;
  753. }
  754. #define __HAVE_ARCH_PTE_UNUSED
  755. static inline int pte_unused(pte_t pte)
  756. {
  757. return pte_val(pte) & _PAGE_UNUSED;
  758. }
  759. /*
  760. * pgd/pmd/pte modification functions
  761. */
  762. static inline void pgd_clear(pgd_t *pgd)
  763. {
  764. if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
  765. pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
  766. }
  767. static inline void p4d_clear(p4d_t *p4d)
  768. {
  769. if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
  770. p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
  771. }
  772. static inline void pud_clear(pud_t *pud)
  773. {
  774. if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
  775. pud_val(*pud) = _REGION3_ENTRY_EMPTY;
  776. }
  777. static inline void pmd_clear(pmd_t *pmdp)
  778. {
  779. pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
  780. }
  781. static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  782. {
  783. pte_val(*ptep) = _PAGE_INVALID;
  784. }
  785. /*
  786. * The following pte modification functions only work if
  787. * pte_present() is true. Undefined behaviour if not..
  788. */
  789. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  790. {
  791. pte_val(pte) &= _PAGE_CHG_MASK;
  792. pte_val(pte) |= pgprot_val(newprot);
  793. /*
  794. * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
  795. * has the invalid bit set, clear it again for readable, young pages
  796. */
  797. if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
  798. pte_val(pte) &= ~_PAGE_INVALID;
  799. /*
  800. * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
  801. * protection bit set, clear it again for writable, dirty pages
  802. */
  803. if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
  804. pte_val(pte) &= ~_PAGE_PROTECT;
  805. return pte;
  806. }
  807. static inline pte_t pte_wrprotect(pte_t pte)
  808. {
  809. pte_val(pte) &= ~_PAGE_WRITE;
  810. pte_val(pte) |= _PAGE_PROTECT;
  811. return pte;
  812. }
  813. static inline pte_t pte_mkwrite(pte_t pte)
  814. {
  815. pte_val(pte) |= _PAGE_WRITE;
  816. if (pte_val(pte) & _PAGE_DIRTY)
  817. pte_val(pte) &= ~_PAGE_PROTECT;
  818. return pte;
  819. }
  820. static inline pte_t pte_mkclean(pte_t pte)
  821. {
  822. pte_val(pte) &= ~_PAGE_DIRTY;
  823. pte_val(pte) |= _PAGE_PROTECT;
  824. return pte;
  825. }
  826. static inline pte_t pte_mkdirty(pte_t pte)
  827. {
  828. pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
  829. if (pte_val(pte) & _PAGE_WRITE)
  830. pte_val(pte) &= ~_PAGE_PROTECT;
  831. return pte;
  832. }
  833. static inline pte_t pte_mkold(pte_t pte)
  834. {
  835. pte_val(pte) &= ~_PAGE_YOUNG;
  836. pte_val(pte) |= _PAGE_INVALID;
  837. return pte;
  838. }
  839. static inline pte_t pte_mkyoung(pte_t pte)
  840. {
  841. pte_val(pte) |= _PAGE_YOUNG;
  842. if (pte_val(pte) & _PAGE_READ)
  843. pte_val(pte) &= ~_PAGE_INVALID;
  844. return pte;
  845. }
  846. static inline pte_t pte_mkspecial(pte_t pte)
  847. {
  848. pte_val(pte) |= _PAGE_SPECIAL;
  849. return pte;
  850. }
  851. #ifdef CONFIG_HUGETLB_PAGE
  852. static inline pte_t pte_mkhuge(pte_t pte)
  853. {
  854. pte_val(pte) |= _PAGE_LARGE;
  855. return pte;
  856. }
  857. #endif
  858. #define IPTE_GLOBAL 0
  859. #define IPTE_LOCAL 1
  860. #define IPTE_NODAT 0x400
  861. #define IPTE_GUEST_ASCE 0x800
  862. static inline void __ptep_ipte(unsigned long address, pte_t *ptep,
  863. unsigned long opt, unsigned long asce,
  864. int local)
  865. {
  866. unsigned long pto = (unsigned long) ptep;
  867. if (__builtin_constant_p(opt) && opt == 0) {
  868. /* Invalidation + TLB flush for the pte */
  869. asm volatile(
  870. " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
  871. : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
  872. [m4] "i" (local));
  873. return;
  874. }
  875. /* Invalidate ptes with options + TLB flush of the ptes */
  876. opt = opt | (asce & _ASCE_ORIGIN);
  877. asm volatile(
  878. " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
  879. : [r2] "+a" (address), [r3] "+a" (opt)
  880. : [r1] "a" (pto), [m4] "i" (local) : "memory");
  881. }
  882. static inline void __ptep_ipte_range(unsigned long address, int nr,
  883. pte_t *ptep, int local)
  884. {
  885. unsigned long pto = (unsigned long) ptep;
  886. /* Invalidate a range of ptes + TLB flush of the ptes */
  887. do {
  888. asm volatile(
  889. " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
  890. : [r2] "+a" (address), [r3] "+a" (nr)
  891. : [r1] "a" (pto), [m4] "i" (local) : "memory");
  892. } while (nr != 255);
  893. }
  894. /*
  895. * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
  896. * both clear the TLB for the unmapped pte. The reason is that
  897. * ptep_get_and_clear is used in common code (e.g. change_pte_range)
  898. * to modify an active pte. The sequence is
  899. * 1) ptep_get_and_clear
  900. * 2) set_pte_at
  901. * 3) flush_tlb_range
  902. * On s390 the tlb needs to get flushed with the modification of the pte
  903. * if the pte is active. The only way how this can be implemented is to
  904. * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
  905. * is a nop.
  906. */
  907. pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
  908. pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
  909. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  910. static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
  911. unsigned long addr, pte_t *ptep)
  912. {
  913. pte_t pte = *ptep;
  914. pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
  915. return pte_young(pte);
  916. }
  917. #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
  918. static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
  919. unsigned long address, pte_t *ptep)
  920. {
  921. return ptep_test_and_clear_young(vma, address, ptep);
  922. }
  923. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
  924. static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
  925. unsigned long addr, pte_t *ptep)
  926. {
  927. return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
  928. }
  929. #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
  930. pte_t ptep_modify_prot_start(struct mm_struct *, unsigned long, pte_t *);
  931. void ptep_modify_prot_commit(struct mm_struct *, unsigned long, pte_t *, pte_t);
  932. #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
  933. static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
  934. unsigned long addr, pte_t *ptep)
  935. {
  936. return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
  937. }
  938. /*
  939. * The batched pte unmap code uses ptep_get_and_clear_full to clear the
  940. * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
  941. * tlbs of an mm if it can guarantee that the ptes of the mm_struct
  942. * cannot be accessed while the batched unmap is running. In this case
  943. * full==1 and a simple pte_clear is enough. See tlb.h.
  944. */
  945. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
  946. static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
  947. unsigned long addr,
  948. pte_t *ptep, int full)
  949. {
  950. if (full) {
  951. pte_t pte = *ptep;
  952. *ptep = __pte(_PAGE_INVALID);
  953. return pte;
  954. }
  955. return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
  956. }
  957. #define __HAVE_ARCH_PTEP_SET_WRPROTECT
  958. static inline void ptep_set_wrprotect(struct mm_struct *mm,
  959. unsigned long addr, pte_t *ptep)
  960. {
  961. pte_t pte = *ptep;
  962. if (pte_write(pte))
  963. ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
  964. }
  965. #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  966. static inline int ptep_set_access_flags(struct vm_area_struct *vma,
  967. unsigned long addr, pte_t *ptep,
  968. pte_t entry, int dirty)
  969. {
  970. if (pte_same(*ptep, entry))
  971. return 0;
  972. ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
  973. return 1;
  974. }
  975. /*
  976. * Additional functions to handle KVM guest page tables
  977. */
  978. void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
  979. pte_t *ptep, pte_t entry);
  980. void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
  981. void ptep_notify(struct mm_struct *mm, unsigned long addr,
  982. pte_t *ptep, unsigned long bits);
  983. int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
  984. pte_t *ptep, int prot, unsigned long bit);
  985. void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
  986. pte_t *ptep , int reset);
  987. void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
  988. int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
  989. pte_t *sptep, pte_t *tptep, pte_t pte);
  990. void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
  991. bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
  992. pte_t *ptep);
  993. int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
  994. unsigned char key, bool nq);
  995. int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
  996. unsigned char key, unsigned char *oldkey,
  997. bool nq, bool mr, bool mc);
  998. int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
  999. int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
  1000. unsigned char *key);
  1001. int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
  1002. unsigned long bits, unsigned long value);
  1003. int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
  1004. int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
  1005. unsigned long *oldpte, unsigned long *oldpgste);
  1006. void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
  1007. void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
  1008. void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
  1009. void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
  1010. /*
  1011. * Certain architectures need to do special things when PTEs
  1012. * within a page table are directly modified. Thus, the following
  1013. * hook is made available.
  1014. */
  1015. static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
  1016. pte_t *ptep, pte_t entry)
  1017. {
  1018. if (!MACHINE_HAS_NX)
  1019. pte_val(entry) &= ~_PAGE_NOEXEC;
  1020. if (pte_present(entry))
  1021. pte_val(entry) &= ~_PAGE_UNUSED;
  1022. if (mm_has_pgste(mm))
  1023. ptep_set_pte_at(mm, addr, ptep, entry);
  1024. else
  1025. *ptep = entry;
  1026. }
  1027. /*
  1028. * Conversion functions: convert a page and protection to a page entry,
  1029. * and a page entry and page directory to the page they refer to.
  1030. */
  1031. static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
  1032. {
  1033. pte_t __pte;
  1034. pte_val(__pte) = physpage + pgprot_val(pgprot);
  1035. return pte_mkyoung(__pte);
  1036. }
  1037. static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
  1038. {
  1039. unsigned long physpage = page_to_phys(page);
  1040. pte_t __pte = mk_pte_phys(physpage, pgprot);
  1041. if (pte_write(__pte) && PageDirty(page))
  1042. __pte = pte_mkdirty(__pte);
  1043. return __pte;
  1044. }
  1045. #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
  1046. #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
  1047. #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
  1048. #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
  1049. #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
  1050. #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
  1051. #define pgd_offset_k(address) pgd_offset(&init_mm, address)
  1052. #define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr))
  1053. #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
  1054. #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
  1055. #define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
  1056. #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
  1057. static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
  1058. {
  1059. p4d_t *p4d = (p4d_t *) pgd;
  1060. if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
  1061. p4d = (p4d_t *) pgd_deref(*pgd);
  1062. return p4d + p4d_index(address);
  1063. }
  1064. static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
  1065. {
  1066. pud_t *pud = (pud_t *) p4d;
  1067. if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
  1068. pud = (pud_t *) p4d_deref(*p4d);
  1069. return pud + pud_index(address);
  1070. }
  1071. static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
  1072. {
  1073. pmd_t *pmd = (pmd_t *) pud;
  1074. if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
  1075. pmd = (pmd_t *) pud_deref(*pud);
  1076. return pmd + pmd_index(address);
  1077. }
  1078. #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
  1079. #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
  1080. #define pte_page(x) pfn_to_page(pte_pfn(x))
  1081. #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
  1082. #define pud_page(pud) pfn_to_page(pud_pfn(pud))
  1083. #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
  1084. #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
  1085. /* Find an entry in the lowest level page table.. */
  1086. #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
  1087. #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
  1088. #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
  1089. #define pte_unmap(pte) do { } while (0)
  1090. static inline pmd_t pmd_wrprotect(pmd_t pmd)
  1091. {
  1092. pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
  1093. pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
  1094. return pmd;
  1095. }
  1096. static inline pmd_t pmd_mkwrite(pmd_t pmd)
  1097. {
  1098. pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
  1099. if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
  1100. return pmd;
  1101. pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
  1102. return pmd;
  1103. }
  1104. static inline pmd_t pmd_mkclean(pmd_t pmd)
  1105. {
  1106. if (pmd_large(pmd)) {
  1107. pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
  1108. pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
  1109. }
  1110. return pmd;
  1111. }
  1112. static inline pmd_t pmd_mkdirty(pmd_t pmd)
  1113. {
  1114. if (pmd_large(pmd)) {
  1115. pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY |
  1116. _SEGMENT_ENTRY_SOFT_DIRTY;
  1117. if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
  1118. pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
  1119. }
  1120. return pmd;
  1121. }
  1122. static inline pud_t pud_wrprotect(pud_t pud)
  1123. {
  1124. pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
  1125. pud_val(pud) |= _REGION_ENTRY_PROTECT;
  1126. return pud;
  1127. }
  1128. static inline pud_t pud_mkwrite(pud_t pud)
  1129. {
  1130. pud_val(pud) |= _REGION3_ENTRY_WRITE;
  1131. if (pud_large(pud) && !(pud_val(pud) & _REGION3_ENTRY_DIRTY))
  1132. return pud;
  1133. pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
  1134. return pud;
  1135. }
  1136. static inline pud_t pud_mkclean(pud_t pud)
  1137. {
  1138. if (pud_large(pud)) {
  1139. pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
  1140. pud_val(pud) |= _REGION_ENTRY_PROTECT;
  1141. }
  1142. return pud;
  1143. }
  1144. static inline pud_t pud_mkdirty(pud_t pud)
  1145. {
  1146. if (pud_large(pud)) {
  1147. pud_val(pud) |= _REGION3_ENTRY_DIRTY |
  1148. _REGION3_ENTRY_SOFT_DIRTY;
  1149. if (pud_val(pud) & _REGION3_ENTRY_WRITE)
  1150. pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
  1151. }
  1152. return pud;
  1153. }
  1154. #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
  1155. static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
  1156. {
  1157. /*
  1158. * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
  1159. * (see __Pxxx / __Sxxx). Convert to segment table entry format.
  1160. */
  1161. if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
  1162. return pgprot_val(SEGMENT_NONE);
  1163. if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
  1164. return pgprot_val(SEGMENT_RO);
  1165. if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
  1166. return pgprot_val(SEGMENT_RX);
  1167. if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
  1168. return pgprot_val(SEGMENT_RW);
  1169. return pgprot_val(SEGMENT_RWX);
  1170. }
  1171. static inline pmd_t pmd_mkyoung(pmd_t pmd)
  1172. {
  1173. if (pmd_large(pmd)) {
  1174. pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
  1175. if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
  1176. pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
  1177. }
  1178. return pmd;
  1179. }
  1180. static inline pmd_t pmd_mkold(pmd_t pmd)
  1181. {
  1182. if (pmd_large(pmd)) {
  1183. pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
  1184. pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
  1185. }
  1186. return pmd;
  1187. }
  1188. static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
  1189. {
  1190. if (pmd_large(pmd)) {
  1191. pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
  1192. _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
  1193. _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
  1194. pmd_val(pmd) |= massage_pgprot_pmd(newprot);
  1195. if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
  1196. pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
  1197. if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
  1198. pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
  1199. return pmd;
  1200. }
  1201. pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
  1202. pmd_val(pmd) |= massage_pgprot_pmd(newprot);
  1203. return pmd;
  1204. }
  1205. static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
  1206. {
  1207. pmd_t __pmd;
  1208. pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
  1209. return __pmd;
  1210. }
  1211. #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
  1212. static inline void __pmdp_csp(pmd_t *pmdp)
  1213. {
  1214. csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
  1215. pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
  1216. }
  1217. #define IDTE_GLOBAL 0
  1218. #define IDTE_LOCAL 1
  1219. #define IDTE_PTOA 0x0800
  1220. #define IDTE_NODAT 0x1000
  1221. #define IDTE_GUEST_ASCE 0x2000
  1222. static inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
  1223. unsigned long opt, unsigned long asce,
  1224. int local)
  1225. {
  1226. unsigned long sto;
  1227. sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t);
  1228. if (__builtin_constant_p(opt) && opt == 0) {
  1229. /* flush without guest asce */
  1230. asm volatile(
  1231. " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
  1232. : "+m" (*pmdp)
  1233. : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
  1234. [m4] "i" (local)
  1235. : "cc" );
  1236. } else {
  1237. /* flush with guest asce */
  1238. asm volatile(
  1239. " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
  1240. : "+m" (*pmdp)
  1241. : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
  1242. [r3] "a" (asce), [m4] "i" (local)
  1243. : "cc" );
  1244. }
  1245. }
  1246. static inline void __pudp_idte(unsigned long addr, pud_t *pudp,
  1247. unsigned long opt, unsigned long asce,
  1248. int local)
  1249. {
  1250. unsigned long r3o;
  1251. r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t);
  1252. r3o |= _ASCE_TYPE_REGION3;
  1253. if (__builtin_constant_p(opt) && opt == 0) {
  1254. /* flush without guest asce */
  1255. asm volatile(
  1256. " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
  1257. : "+m" (*pudp)
  1258. : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
  1259. [m4] "i" (local)
  1260. : "cc");
  1261. } else {
  1262. /* flush with guest asce */
  1263. asm volatile(
  1264. " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
  1265. : "+m" (*pudp)
  1266. : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
  1267. [r3] "a" (asce), [m4] "i" (local)
  1268. : "cc" );
  1269. }
  1270. }
  1271. pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
  1272. pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
  1273. pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
  1274. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  1275. #define __HAVE_ARCH_PGTABLE_DEPOSIT
  1276. void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
  1277. pgtable_t pgtable);
  1278. #define __HAVE_ARCH_PGTABLE_WITHDRAW
  1279. pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
  1280. #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
  1281. static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
  1282. unsigned long addr, pmd_t *pmdp,
  1283. pmd_t entry, int dirty)
  1284. {
  1285. VM_BUG_ON(addr & ~HPAGE_MASK);
  1286. entry = pmd_mkyoung(entry);
  1287. if (dirty)
  1288. entry = pmd_mkdirty(entry);
  1289. if (pmd_val(*pmdp) == pmd_val(entry))
  1290. return 0;
  1291. pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
  1292. return 1;
  1293. }
  1294. #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
  1295. static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
  1296. unsigned long addr, pmd_t *pmdp)
  1297. {
  1298. pmd_t pmd = *pmdp;
  1299. pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
  1300. return pmd_young(pmd);
  1301. }
  1302. #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
  1303. static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
  1304. unsigned long addr, pmd_t *pmdp)
  1305. {
  1306. VM_BUG_ON(addr & ~HPAGE_MASK);
  1307. return pmdp_test_and_clear_young(vma, addr, pmdp);
  1308. }
  1309. static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
  1310. pmd_t *pmdp, pmd_t entry)
  1311. {
  1312. if (!MACHINE_HAS_NX)
  1313. pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
  1314. *pmdp = entry;
  1315. }
  1316. static inline pmd_t pmd_mkhuge(pmd_t pmd)
  1317. {
  1318. pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
  1319. pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
  1320. pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
  1321. return pmd;
  1322. }
  1323. #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
  1324. static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
  1325. unsigned long addr, pmd_t *pmdp)
  1326. {
  1327. return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
  1328. }
  1329. #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
  1330. static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
  1331. unsigned long addr,
  1332. pmd_t *pmdp, int full)
  1333. {
  1334. if (full) {
  1335. pmd_t pmd = *pmdp;
  1336. *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
  1337. return pmd;
  1338. }
  1339. return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
  1340. }
  1341. #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
  1342. static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
  1343. unsigned long addr, pmd_t *pmdp)
  1344. {
  1345. return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
  1346. }
  1347. #define __HAVE_ARCH_PMDP_INVALIDATE
  1348. static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
  1349. unsigned long addr, pmd_t *pmdp)
  1350. {
  1351. pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
  1352. return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
  1353. }
  1354. #define __HAVE_ARCH_PMDP_SET_WRPROTECT
  1355. static inline void pmdp_set_wrprotect(struct mm_struct *mm,
  1356. unsigned long addr, pmd_t *pmdp)
  1357. {
  1358. pmd_t pmd = *pmdp;
  1359. if (pmd_write(pmd))
  1360. pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
  1361. }
  1362. static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
  1363. unsigned long address,
  1364. pmd_t *pmdp)
  1365. {
  1366. return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
  1367. }
  1368. #define pmdp_collapse_flush pmdp_collapse_flush
  1369. #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
  1370. #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
  1371. static inline int pmd_trans_huge(pmd_t pmd)
  1372. {
  1373. return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
  1374. }
  1375. #define has_transparent_hugepage has_transparent_hugepage
  1376. static inline int has_transparent_hugepage(void)
  1377. {
  1378. return MACHINE_HAS_EDAT1 ? 1 : 0;
  1379. }
  1380. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  1381. /*
  1382. * 64 bit swap entry format:
  1383. * A page-table entry has some bits we have to treat in a special way.
  1384. * Bits 52 and bit 55 have to be zero, otherwise a specification
  1385. * exception will occur instead of a page translation exception. The
  1386. * specification exception has the bad habit not to store necessary
  1387. * information in the lowcore.
  1388. * Bits 54 and 63 are used to indicate the page type.
  1389. * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
  1390. * This leaves the bits 0-51 and bits 56-62 to store type and offset.
  1391. * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
  1392. * for the offset.
  1393. * | offset |01100|type |00|
  1394. * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
  1395. * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
  1396. */
  1397. #define __SWP_OFFSET_MASK ((1UL << 52) - 1)
  1398. #define __SWP_OFFSET_SHIFT 12
  1399. #define __SWP_TYPE_MASK ((1UL << 5) - 1)
  1400. #define __SWP_TYPE_SHIFT 2
  1401. static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
  1402. {
  1403. pte_t pte;
  1404. pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
  1405. pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
  1406. pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
  1407. return pte;
  1408. }
  1409. static inline unsigned long __swp_type(swp_entry_t entry)
  1410. {
  1411. return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
  1412. }
  1413. static inline unsigned long __swp_offset(swp_entry_t entry)
  1414. {
  1415. return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
  1416. }
  1417. static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
  1418. {
  1419. return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
  1420. }
  1421. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  1422. #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  1423. #define kern_addr_valid(addr) (1)
  1424. extern int vmem_add_mapping(unsigned long start, unsigned long size);
  1425. extern int vmem_remove_mapping(unsigned long start, unsigned long size);
  1426. extern int s390_enable_sie(void);
  1427. extern int s390_enable_skey(void);
  1428. extern void s390_reset_cmma(struct mm_struct *mm);
  1429. /* s390 has a private copy of get unmapped area to deal with cache synonyms */
  1430. #define HAVE_ARCH_UNMAPPED_AREA
  1431. #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
  1432. /*
  1433. * No page table caches to initialise
  1434. */
  1435. static inline void pgtable_cache_init(void) { }
  1436. static inline void check_pgt_cache(void) { }
  1437. #include <asm-generic/pgtable.h>
  1438. #endif /* _S390_PAGE_H */