page.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357
  1. #ifndef _ASM_POWERPC_PAGE_H
  2. #define _ASM_POWERPC_PAGE_H
  3. /*
  4. * Copyright (C) 2001,2005 IBM Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #ifndef __ASSEMBLY__
  12. #include <linux/types.h>
  13. #include <linux/kernel.h>
  14. #else
  15. #include <asm/types.h>
  16. #endif
  17. #include <asm/asm-const.h>
  18. /*
  19. * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages
  20. * on PPC44x). For PPC64 we support either 4K or 64K software
  21. * page size. When using 64K pages however, whether we are really supporting
  22. * 64K pages in HW or not is irrelevant to those definitions.
  23. */
  24. #if defined(CONFIG_PPC_256K_PAGES)
  25. #define PAGE_SHIFT 18
  26. #elif defined(CONFIG_PPC_64K_PAGES)
  27. #define PAGE_SHIFT 16
  28. #elif defined(CONFIG_PPC_16K_PAGES)
  29. #define PAGE_SHIFT 14
  30. #else
  31. #define PAGE_SHIFT 12
  32. #endif
  33. #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
  34. #ifndef __ASSEMBLY__
  35. #ifdef CONFIG_HUGETLB_PAGE
  36. extern bool hugetlb_disabled;
  37. extern unsigned int HPAGE_SHIFT;
  38. #else
  39. #define HPAGE_SHIFT PAGE_SHIFT
  40. #endif
  41. #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
  42. #define HPAGE_MASK (~(HPAGE_SIZE - 1))
  43. #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
  44. #define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1)
  45. #endif
  46. /*
  47. * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
  48. * assign PAGE_MASK to a larger type it gets extended the way we want
  49. * (i.e. with 1s in the high bits)
  50. */
  51. #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
  52. /*
  53. * KERNELBASE is the virtual address of the start of the kernel, it's often
  54. * the same as PAGE_OFFSET, but _might not be_.
  55. *
  56. * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET.
  57. *
  58. * PAGE_OFFSET is the virtual address of the start of lowmem.
  59. *
  60. * PHYSICAL_START is the physical address of the start of the kernel.
  61. *
  62. * MEMORY_START is the physical address of the start of lowmem.
  63. *
  64. * KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on
  65. * ppc32 and based on how they are set we determine MEMORY_START.
  66. *
  67. * For the linear mapping the following equation should be true:
  68. * KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START
  69. *
  70. * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START
  71. *
  72. * There are two ways to determine a physical address from a virtual one:
  73. * va = pa + PAGE_OFFSET - MEMORY_START
  74. * va = pa + KERNELBASE - PHYSICAL_START
  75. *
  76. * If you want to know something's offset from the start of the kernel you
  77. * should subtract KERNELBASE.
  78. *
  79. * If you want to test if something's a kernel address, use is_kernel_addr().
  80. */
  81. #define KERNELBASE ASM_CONST(CONFIG_KERNEL_START)
  82. #define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET)
  83. #define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
  84. #if defined(CONFIG_NONSTATIC_KERNEL)
  85. #ifndef __ASSEMBLY__
  86. extern phys_addr_t memstart_addr;
  87. extern phys_addr_t kernstart_addr;
  88. #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC32)
  89. extern long long virt_phys_offset;
  90. #endif
  91. #endif /* __ASSEMBLY__ */
  92. #define PHYSICAL_START kernstart_addr
  93. #else /* !CONFIG_NONSTATIC_KERNEL */
  94. #define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START)
  95. #endif
  96. /* See Description below for VIRT_PHYS_OFFSET */
  97. #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
  98. #ifdef CONFIG_RELOCATABLE
  99. #define VIRT_PHYS_OFFSET virt_phys_offset
  100. #else
  101. #define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
  102. #endif
  103. #endif
  104. #ifdef CONFIG_PPC64
  105. #define MEMORY_START 0UL
  106. #elif defined(CONFIG_NONSTATIC_KERNEL)
  107. #define MEMORY_START memstart_addr
  108. #else
  109. #define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
  110. #endif
  111. #ifdef CONFIG_FLATMEM
  112. #define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT))
  113. #ifndef __ASSEMBLY__
  114. extern unsigned long max_mapnr;
  115. static inline bool pfn_valid(unsigned long pfn)
  116. {
  117. unsigned long min_pfn = ARCH_PFN_OFFSET;
  118. return pfn >= min_pfn && pfn < max_mapnr;
  119. }
  120. #endif
  121. #endif
  122. #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
  123. #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
  124. #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
  125. #ifdef CONFIG_PPC_BOOK3S_64
  126. /*
  127. * On hash the vmalloc and other regions alias to the kernel region when passed
  128. * through __pa(), which virt_to_pfn() uses. That means virt_addr_valid() can
  129. * return true for some vmalloc addresses, which is incorrect. So explicitly
  130. * check that the address is in the kernel region.
  131. */
  132. #define virt_addr_valid(kaddr) (REGION_ID(kaddr) == KERNEL_REGION_ID && \
  133. pfn_valid(virt_to_pfn(kaddr)))
  134. #else
  135. #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
  136. #endif
  137. /*
  138. * On Book-E parts we need __va to parse the device tree and we can't
  139. * determine MEMORY_START until then. However we can determine PHYSICAL_START
  140. * from information at hand (program counter, TLB lookup).
  141. *
  142. * On BookE with RELOCATABLE && PPC32
  143. *
  144. * With RELOCATABLE && PPC32, we support loading the kernel at any physical
  145. * address without any restriction on the page alignment.
  146. *
  147. * We find the runtime address of _stext and relocate ourselves based on
  148. * the following calculation:
  149. *
  150. * virtual_base = ALIGN_DOWN(KERNELBASE,256M) +
  151. * MODULO(_stext.run,256M)
  152. * and create the following mapping:
  153. *
  154. * ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M)
  155. *
  156. * When we process relocations, we cannot depend on the
  157. * existing equation for the __va()/__pa() translations:
  158. *
  159. * __va(x) = (x) - PHYSICAL_START + KERNELBASE
  160. *
  161. * Where:
  162. * PHYSICAL_START = kernstart_addr = Physical address of _stext
  163. * KERNELBASE = Compiled virtual address of _stext.
  164. *
  165. * This formula holds true iff, kernel load address is TLB page aligned.
  166. *
  167. * In our case, we need to also account for the shift in the kernel Virtual
  168. * address.
  169. *
  170. * E.g.,
  171. *
  172. * Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET).
  173. * In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M
  174. *
  175. * Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000
  176. * = 0xbc100000 , which is wrong.
  177. *
  178. * Rather, it should be : 0xc0000000 + 0x100000 = 0xc0100000
  179. * according to our mapping.
  180. *
  181. * Hence we use the following formula to get the translations right:
  182. *
  183. * __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ]
  184. *
  185. * Where :
  186. * PHYSICAL_START = dynamic load address.(kernstart_addr variable)
  187. * Effective KERNELBASE = virtual_base =
  188. * = ALIGN_DOWN(KERNELBASE,256M) +
  189. * MODULO(PHYSICAL_START,256M)
  190. *
  191. * To make the cost of __va() / __pa() more light weight, we introduce
  192. * a new variable virt_phys_offset, which will hold :
  193. *
  194. * virt_phys_offset = Effective KERNELBASE - PHYSICAL_START
  195. * = ALIGN_DOWN(KERNELBASE,256M) -
  196. * ALIGN_DOWN(PHYSICALSTART,256M)
  197. *
  198. * Hence :
  199. *
  200. * __va(x) = x - PHYSICAL_START + Effective KERNELBASE
  201. * = x + virt_phys_offset
  202. *
  203. * and
  204. * __pa(x) = x + PHYSICAL_START - Effective KERNELBASE
  205. * = x - virt_phys_offset
  206. *
  207. * On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use
  208. * the other definitions for __va & __pa.
  209. */
  210. #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
  211. #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
  212. #define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
  213. #else
  214. #ifdef CONFIG_PPC64
  215. /*
  216. * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
  217. * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
  218. */
  219. #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET))
  220. #define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL)
  221. #else /* 32-bit, non book E */
  222. #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
  223. #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
  224. #endif
  225. #endif
  226. /*
  227. * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
  228. * and needs to be executable. This means the whole heap ends
  229. * up being executable.
  230. */
  231. #define VM_DATA_DEFAULT_FLAGS32 \
  232. (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
  233. VM_READ | VM_WRITE | \
  234. VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
  235. #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
  236. VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
  237. #ifdef __powerpc64__
  238. #include <asm/page_64.h>
  239. #else
  240. #include <asm/page_32.h>
  241. #endif
  242. /* align addr on a size boundary - adjust address up/down if needed */
  243. #define _ALIGN_UP(addr, size) __ALIGN_KERNEL(addr, size)
  244. #define _ALIGN_DOWN(addr, size) ((addr)&(~((typeof(addr))(size)-1)))
  245. /* align addr on a size boundary - adjust address up if needed */
  246. #define _ALIGN(addr,size) _ALIGN_UP(addr,size)
  247. /*
  248. * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
  249. * "kernelness", use is_kernel_addr() - it should do what you want.
  250. */
  251. #ifdef CONFIG_PPC_BOOK3E_64
  252. #define is_kernel_addr(x) ((x) >= 0x8000000000000000ul)
  253. #else
  254. #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
  255. #endif
  256. #ifndef CONFIG_PPC_BOOK3S_64
  257. /*
  258. * Use the top bit of the higher-level page table entries to indicate whether
  259. * the entries we point to contain hugepages. This works because we know that
  260. * the page tables live in kernel space. If we ever decide to support having
  261. * page tables at arbitrary addresses, this breaks and will have to change.
  262. */
  263. #ifdef CONFIG_PPC64
  264. #define PD_HUGE 0x8000000000000000
  265. #else
  266. #define PD_HUGE 0x80000000
  267. #endif
  268. #else /* CONFIG_PPC_BOOK3S_64 */
  269. /*
  270. * Book3S 64 stores real addresses in the hugepd entries to
  271. * avoid overlaps with _PAGE_PRESENT and _PAGE_PTE.
  272. */
  273. #define HUGEPD_ADDR_MASK (0x0ffffffffffffffful & ~HUGEPD_SHIFT_MASK)
  274. #endif /* CONFIG_PPC_BOOK3S_64 */
  275. /*
  276. * Some number of bits at the level of the page table that points to
  277. * a hugepte are used to encode the size. This masks those bits.
  278. */
  279. #define HUGEPD_SHIFT_MASK 0x3f
  280. #ifndef __ASSEMBLY__
  281. #ifdef CONFIG_PPC_BOOK3S_64
  282. #include <asm/pgtable-be-types.h>
  283. #else
  284. #include <asm/pgtable-types.h>
  285. #endif
  286. #ifndef CONFIG_HUGETLB_PAGE
  287. #define is_hugepd(pdep) (0)
  288. #define pgd_huge(pgd) (0)
  289. #endif /* CONFIG_HUGETLB_PAGE */
  290. struct page;
  291. extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
  292. extern void copy_user_page(void *to, void *from, unsigned long vaddr,
  293. struct page *p);
  294. extern int page_is_ram(unsigned long pfn);
  295. extern int devmem_is_allowed(unsigned long pfn);
  296. #ifdef CONFIG_PPC_SMLPAR
  297. void arch_free_page(struct page *page, int order);
  298. #define HAVE_ARCH_FREE_PAGE
  299. #endif
  300. struct vm_area_struct;
  301. #ifdef CONFIG_PPC_BOOK3S_64
  302. /*
  303. * For BOOK3s 64 with 4k and 64K linux page size
  304. * we want to use pointers, because the page table
  305. * actually store pfn
  306. */
  307. typedef pte_t *pgtable_t;
  308. #else
  309. #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC64)
  310. typedef pte_t *pgtable_t;
  311. #else
  312. typedef struct page *pgtable_t;
  313. #endif
  314. #endif
  315. #include <asm-generic/memory_model.h>
  316. #endif /* __ASSEMBLY__ */
  317. #include <asm/slice.h>
  318. #endif /* _ASM_POWERPC_PAGE_H */