init_64.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418
  1. /*
  2. * PowerPC version
  3. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4. *
  5. * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  6. * and Cort Dougan (PReP) (cort@cs.nmt.edu)
  7. * Copyright (C) 1996 Paul Mackerras
  8. *
  9. * Derived from "arch/i386/mm/init.c"
  10. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  11. *
  12. * Dave Engebretsen <engebret@us.ibm.com>
  13. * Rework for PPC64 port.
  14. *
  15. * This program is free software; you can redistribute it and/or
  16. * modify it under the terms of the GNU General Public License
  17. * as published by the Free Software Foundation; either version
  18. * 2 of the License, or (at your option) any later version.
  19. *
  20. */
  21. #undef DEBUG
  22. #include <linux/signal.h>
  23. #include <linux/sched.h>
  24. #include <linux/kernel.h>
  25. #include <linux/errno.h>
  26. #include <linux/string.h>
  27. #include <linux/types.h>
  28. #include <linux/mman.h>
  29. #include <linux/mm.h>
  30. #include <linux/swap.h>
  31. #include <linux/stddef.h>
  32. #include <linux/vmalloc.h>
  33. #include <linux/init.h>
  34. #include <linux/delay.h>
  35. #include <linux/highmem.h>
  36. #include <linux/idr.h>
  37. #include <linux/nodemask.h>
  38. #include <linux/module.h>
  39. #include <linux/poison.h>
  40. #include <linux/memblock.h>
  41. #include <linux/hugetlb.h>
  42. #include <linux/slab.h>
  43. #include <linux/of_fdt.h>
  44. #include <linux/libfdt.h>
  45. #include <asm/pgalloc.h>
  46. #include <asm/page.h>
  47. #include <asm/prom.h>
  48. #include <asm/rtas.h>
  49. #include <asm/io.h>
  50. #include <asm/mmu_context.h>
  51. #include <asm/pgtable.h>
  52. #include <asm/mmu.h>
  53. #include <linux/uaccess.h>
  54. #include <asm/smp.h>
  55. #include <asm/machdep.h>
  56. #include <asm/tlb.h>
  57. #include <asm/eeh.h>
  58. #include <asm/processor.h>
  59. #include <asm/mmzone.h>
  60. #include <asm/cputable.h>
  61. #include <asm/sections.h>
  62. #include <asm/iommu.h>
  63. #include <asm/vdso.h>
  64. #include "mmu_decl.h"
  65. #ifdef CONFIG_PPC_STD_MMU_64
  66. #if H_PGTABLE_RANGE > USER_VSID_RANGE
  67. #warning Limited user VSID range means pagetable space is wasted
  68. #endif
  69. #if (TASK_SIZE_USER64 < H_PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
  70. #warning TASK_SIZE is smaller than it needs to be.
  71. #endif
  72. #endif /* CONFIG_PPC_STD_MMU_64 */
  73. phys_addr_t memstart_addr = ~0;
  74. EXPORT_SYMBOL_GPL(memstart_addr);
  75. phys_addr_t kernstart_addr;
  76. EXPORT_SYMBOL_GPL(kernstart_addr);
  77. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  78. /*
  79. * Given an address within the vmemmap, determine the pfn of the page that
  80. * represents the start of the section it is within. Note that we have to
  81. * do this by hand as the proffered address may not be correctly aligned.
  82. * Subtraction of non-aligned pointers produces undefined results.
  83. */
  84. static unsigned long __meminit vmemmap_section_start(unsigned long page)
  85. {
  86. unsigned long offset = page - ((unsigned long)(vmemmap));
  87. /* Return the pfn of the start of the section. */
  88. return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
  89. }
  90. /*
  91. * Check if this vmemmap page is already initialised. If any section
  92. * which overlaps this vmemmap page is initialised then this page is
  93. * initialised already.
  94. */
  95. static int __meminit vmemmap_populated(unsigned long start, int page_size)
  96. {
  97. unsigned long end = start + page_size;
  98. start = (unsigned long)(pfn_to_page(vmemmap_section_start(start)));
  99. for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
  100. if (pfn_valid(page_to_pfn((struct page *)start)))
  101. return 1;
  102. return 0;
  103. }
  104. struct vmemmap_backing *vmemmap_list;
  105. static struct vmemmap_backing *next;
  106. static int num_left;
  107. static int num_freed;
  108. static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
  109. {
  110. struct vmemmap_backing *vmem_back;
  111. /* get from freed entries first */
  112. if (num_freed) {
  113. num_freed--;
  114. vmem_back = next;
  115. next = next->list;
  116. return vmem_back;
  117. }
  118. /* allocate a page when required and hand out chunks */
  119. if (!num_left) {
  120. next = vmemmap_alloc_block(PAGE_SIZE, node);
  121. if (unlikely(!next)) {
  122. WARN_ON(1);
  123. return NULL;
  124. }
  125. num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
  126. }
  127. num_left--;
  128. return next++;
  129. }
  130. static __meminit void vmemmap_list_populate(unsigned long phys,
  131. unsigned long start,
  132. int node)
  133. {
  134. struct vmemmap_backing *vmem_back;
  135. vmem_back = vmemmap_list_alloc(node);
  136. if (unlikely(!vmem_back)) {
  137. WARN_ON(1);
  138. return;
  139. }
  140. vmem_back->phys = phys;
  141. vmem_back->virt_addr = start;
  142. vmem_back->list = vmemmap_list;
  143. vmemmap_list = vmem_back;
  144. }
  145. int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
  146. {
  147. unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
  148. /* Align to the page size of the linear mapping. */
  149. start = _ALIGN_DOWN(start, page_size);
  150. pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
  151. for (; start < end; start += page_size) {
  152. void *p;
  153. int rc;
  154. if (vmemmap_populated(start, page_size))
  155. continue;
  156. p = vmemmap_alloc_block(page_size, node);
  157. if (!p)
  158. return -ENOMEM;
  159. vmemmap_list_populate(__pa(p), start, node);
  160. pr_debug(" * %016lx..%016lx allocated at %p\n",
  161. start, start + page_size, p);
  162. rc = vmemmap_create_mapping(start, page_size, __pa(p));
  163. if (rc < 0) {
  164. pr_warning(
  165. "vmemmap_populate: Unable to create vmemmap mapping: %d\n",
  166. rc);
  167. return -EFAULT;
  168. }
  169. }
  170. return 0;
  171. }
  172. #ifdef CONFIG_MEMORY_HOTPLUG
  173. static unsigned long vmemmap_list_free(unsigned long start)
  174. {
  175. struct vmemmap_backing *vmem_back, *vmem_back_prev;
  176. vmem_back_prev = vmem_back = vmemmap_list;
  177. /* look for it with prev pointer recorded */
  178. for (; vmem_back; vmem_back = vmem_back->list) {
  179. if (vmem_back->virt_addr == start)
  180. break;
  181. vmem_back_prev = vmem_back;
  182. }
  183. if (unlikely(!vmem_back)) {
  184. WARN_ON(1);
  185. return 0;
  186. }
  187. /* remove it from vmemmap_list */
  188. if (vmem_back == vmemmap_list) /* remove head */
  189. vmemmap_list = vmem_back->list;
  190. else
  191. vmem_back_prev->list = vmem_back->list;
  192. /* next point to this freed entry */
  193. vmem_back->list = next;
  194. next = vmem_back;
  195. num_freed++;
  196. return vmem_back->phys;
  197. }
  198. void __ref vmemmap_free(unsigned long start, unsigned long end)
  199. {
  200. unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
  201. start = _ALIGN_DOWN(start, page_size);
  202. pr_debug("vmemmap_free %lx...%lx\n", start, end);
  203. for (; start < end; start += page_size) {
  204. unsigned long addr;
  205. /*
  206. * the section has already be marked as invalid, so
  207. * vmemmap_populated() true means some other sections still
  208. * in this page, so skip it.
  209. */
  210. if (vmemmap_populated(start, page_size))
  211. continue;
  212. addr = vmemmap_list_free(start);
  213. if (addr) {
  214. struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
  215. if (PageReserved(page)) {
  216. /* allocated from bootmem */
  217. if (page_size < PAGE_SIZE) {
  218. /*
  219. * this shouldn't happen, but if it is
  220. * the case, leave the memory there
  221. */
  222. WARN_ON_ONCE(1);
  223. } else {
  224. unsigned int nr_pages =
  225. 1 << get_order(page_size);
  226. while (nr_pages--)
  227. free_reserved_page(page++);
  228. }
  229. } else
  230. free_pages((unsigned long)(__va(addr)),
  231. get_order(page_size));
  232. vmemmap_remove_mapping(start, page_size);
  233. }
  234. }
  235. }
  236. #endif
  237. void register_page_bootmem_memmap(unsigned long section_nr,
  238. struct page *start_page, unsigned long size)
  239. {
  240. }
  241. /*
  242. * We do not have access to the sparsemem vmemmap, so we fallback to
  243. * walking the list of sparsemem blocks which we already maintain for
  244. * the sake of crashdump. In the long run, we might want to maintain
  245. * a tree if performance of that linear walk becomes a problem.
  246. *
  247. * realmode_pfn_to_page functions can fail due to:
  248. * 1) As real sparsemem blocks do not lay in RAM continously (they
  249. * are in virtual address space which is not available in the real mode),
  250. * the requested page struct can be split between blocks so get_page/put_page
  251. * may fail.
  252. * 2) When huge pages are used, the get_page/put_page API will fail
  253. * in real mode as the linked addresses in the page struct are virtual
  254. * too.
  255. */
  256. struct page *realmode_pfn_to_page(unsigned long pfn)
  257. {
  258. struct vmemmap_backing *vmem_back;
  259. struct page *page;
  260. unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
  261. unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
  262. for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
  263. if (pg_va < vmem_back->virt_addr)
  264. continue;
  265. /* After vmemmap_list entry free is possible, need check all */
  266. if ((pg_va + sizeof(struct page)) <=
  267. (vmem_back->virt_addr + page_size)) {
  268. page = (struct page *) (vmem_back->phys + pg_va -
  269. vmem_back->virt_addr);
  270. return page;
  271. }
  272. }
  273. /* Probably that page struct is split between real pages */
  274. return NULL;
  275. }
  276. EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
  277. #elif defined(CONFIG_FLATMEM)
  278. struct page *realmode_pfn_to_page(unsigned long pfn)
  279. {
  280. struct page *page = pfn_to_page(pfn);
  281. return page;
  282. }
  283. EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
  284. #endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */
  285. #ifdef CONFIG_PPC_STD_MMU_64
  286. static bool disable_radix;
  287. static int __init parse_disable_radix(char *p)
  288. {
  289. disable_radix = true;
  290. return 0;
  291. }
  292. early_param("disable_radix", parse_disable_radix);
  293. /*
  294. * If we're running under a hypervisor, we need to check the contents of
  295. * /chosen/ibm,architecture-vec-5 to see if the hypervisor is willing to do
  296. * radix. If not, we clear the radix feature bit so we fall back to hash.
  297. */
  298. static void early_check_vec5(void)
  299. {
  300. unsigned long root, chosen;
  301. int size;
  302. const u8 *vec5;
  303. u8 mmu_supported;
  304. root = of_get_flat_dt_root();
  305. chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
  306. if (chosen == -FDT_ERR_NOTFOUND) {
  307. cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
  308. return;
  309. }
  310. vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
  311. if (!vec5) {
  312. cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
  313. return;
  314. }
  315. if (size <= OV5_INDX(OV5_MMU_SUPPORT)) {
  316. cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
  317. return;
  318. }
  319. /* Check for supported configuration */
  320. mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] &
  321. OV5_FEAT(OV5_MMU_SUPPORT);
  322. if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) {
  323. /* Hypervisor only supports radix - check enabled && GTSE */
  324. if (!early_radix_enabled()) {
  325. pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
  326. }
  327. if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
  328. OV5_FEAT(OV5_RADIX_GTSE))) {
  329. pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n");
  330. }
  331. /* Do radix anyway - the hypervisor said we had to */
  332. cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
  333. } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
  334. /* Hypervisor only supports hash - disable radix */
  335. cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
  336. }
  337. }
  338. void __init mmu_early_init_devtree(void)
  339. {
  340. /* Disable radix mode based on kernel command line. */
  341. /* We don't yet have the machinery to do radix as a guest. */
  342. if (disable_radix || !(mfmsr() & MSR_HV))
  343. cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
  344. /*
  345. * Check /chosen/ibm,architecture-vec-5 if running as a guest.
  346. * When running bare-metal, we can use radix if we like
  347. * even though the ibm,architecture-vec-5 property created by
  348. * skiboot doesn't have the necessary bits set.
  349. */
  350. if (!(mfmsr() & MSR_HV))
  351. early_check_vec5();
  352. if (early_radix_enabled())
  353. radix__early_init_devtree();
  354. else
  355. hash__early_init_devtree();
  356. }
  357. #endif /* CONFIG_PPC_STD_MMU_64 */