init_64.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454
  1. /*
  2. * PowerPC version
  3. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4. *
  5. * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  6. * and Cort Dougan (PReP) (cort@cs.nmt.edu)
  7. * Copyright (C) 1996 Paul Mackerras
  8. *
  9. * Derived from "arch/i386/mm/init.c"
  10. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  11. *
  12. * Dave Engebretsen <engebret@us.ibm.com>
  13. * Rework for PPC64 port.
  14. *
  15. * This program is free software; you can redistribute it and/or
  16. * modify it under the terms of the GNU General Public License
  17. * as published by the Free Software Foundation; either version
  18. * 2 of the License, or (at your option) any later version.
  19. *
  20. */
  21. #undef DEBUG
  22. #include <linux/signal.h>
  23. #include <linux/sched.h>
  24. #include <linux/kernel.h>
  25. #include <linux/errno.h>
  26. #include <linux/string.h>
  27. #include <linux/types.h>
  28. #include <linux/mman.h>
  29. #include <linux/mm.h>
  30. #include <linux/swap.h>
  31. #include <linux/stddef.h>
  32. #include <linux/vmalloc.h>
  33. #include <linux/init.h>
  34. #include <linux/delay.h>
  35. #include <linux/highmem.h>
  36. #include <linux/idr.h>
  37. #include <linux/nodemask.h>
  38. #include <linux/module.h>
  39. #include <linux/poison.h>
  40. #include <linux/memblock.h>
  41. #include <linux/hugetlb.h>
  42. #include <linux/slab.h>
  43. #include <linux/of_fdt.h>
  44. #include <linux/libfdt.h>
  45. #include <linux/memremap.h>
  46. #include <asm/pgalloc.h>
  47. #include <asm/page.h>
  48. #include <asm/prom.h>
  49. #include <asm/rtas.h>
  50. #include <asm/io.h>
  51. #include <asm/mmu_context.h>
  52. #include <asm/pgtable.h>
  53. #include <asm/mmu.h>
  54. #include <linux/uaccess.h>
  55. #include <asm/smp.h>
  56. #include <asm/machdep.h>
  57. #include <asm/tlb.h>
  58. #include <asm/eeh.h>
  59. #include <asm/processor.h>
  60. #include <asm/mmzone.h>
  61. #include <asm/cputable.h>
  62. #include <asm/sections.h>
  63. #include <asm/iommu.h>
  64. #include <asm/vdso.h>
  65. #include "mmu_decl.h"
  66. #ifdef CONFIG_PPC_BOOK3S_64
  67. #if H_PGTABLE_RANGE > USER_VSID_RANGE
  68. #warning Limited user VSID range means pagetable space is wasted
  69. #endif
  70. #endif /* CONFIG_PPC_BOOK3S_64 */
  71. phys_addr_t memstart_addr = ~0;
  72. EXPORT_SYMBOL_GPL(memstart_addr);
  73. phys_addr_t kernstart_addr;
  74. EXPORT_SYMBOL_GPL(kernstart_addr);
  75. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  76. /*
  77. * Given an address within the vmemmap, determine the pfn of the page that
  78. * represents the start of the section it is within. Note that we have to
  79. * do this by hand as the proffered address may not be correctly aligned.
  80. * Subtraction of non-aligned pointers produces undefined results.
  81. */
  82. static unsigned long __meminit vmemmap_section_start(unsigned long page)
  83. {
  84. unsigned long offset = page - ((unsigned long)(vmemmap));
  85. /* Return the pfn of the start of the section. */
  86. return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
  87. }
  88. /*
  89. * Check if this vmemmap page is already initialised. If any section
  90. * which overlaps this vmemmap page is initialised then this page is
  91. * initialised already.
  92. */
  93. static int __meminit vmemmap_populated(unsigned long start, int page_size)
  94. {
  95. unsigned long end = start + page_size;
  96. start = (unsigned long)(pfn_to_page(vmemmap_section_start(start)));
  97. for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
  98. if (pfn_valid(page_to_pfn((struct page *)start)))
  99. return 1;
  100. return 0;
  101. }
  102. /*
  103. * vmemmap virtual address space management does not have a traditonal page
  104. * table to track which virtual struct pages are backed by physical mapping.
  105. * The virtual to physical mappings are tracked in a simple linked list
  106. * format. 'vmemmap_list' maintains the entire vmemmap physical mapping at
  107. * all times where as the 'next' list maintains the available
  108. * vmemmap_backing structures which have been deleted from the
  109. * 'vmemmap_global' list during system runtime (memory hotplug remove
  110. * operation). The freed 'vmemmap_backing' structures are reused later when
  111. * new requests come in without allocating fresh memory. This pointer also
  112. * tracks the allocated 'vmemmap_backing' structures as we allocate one
  113. * full page memory at a time when we dont have any.
  114. */
  115. struct vmemmap_backing *vmemmap_list;
  116. static struct vmemmap_backing *next;
  117. /*
  118. * The same pointer 'next' tracks individual chunks inside the allocated
  119. * full page during the boot time and again tracks the freeed nodes during
  120. * runtime. It is racy but it does not happen as they are separated by the
  121. * boot process. Will create problem if some how we have memory hotplug
  122. * operation during boot !!
  123. */
  124. static int num_left;
  125. static int num_freed;
  126. static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
  127. {
  128. struct vmemmap_backing *vmem_back;
  129. /* get from freed entries first */
  130. if (num_freed) {
  131. num_freed--;
  132. vmem_back = next;
  133. next = next->list;
  134. return vmem_back;
  135. }
  136. /* allocate a page when required and hand out chunks */
  137. if (!num_left) {
  138. next = vmemmap_alloc_block(PAGE_SIZE, node);
  139. if (unlikely(!next)) {
  140. WARN_ON(1);
  141. return NULL;
  142. }
  143. num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
  144. }
  145. num_left--;
  146. return next++;
  147. }
  148. static __meminit void vmemmap_list_populate(unsigned long phys,
  149. unsigned long start,
  150. int node)
  151. {
  152. struct vmemmap_backing *vmem_back;
  153. vmem_back = vmemmap_list_alloc(node);
  154. if (unlikely(!vmem_back)) {
  155. WARN_ON(1);
  156. return;
  157. }
  158. vmem_back->phys = phys;
  159. vmem_back->virt_addr = start;
  160. vmem_back->list = vmemmap_list;
  161. vmemmap_list = vmem_back;
  162. }
  163. int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
  164. struct vmem_altmap *altmap)
  165. {
  166. unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
  167. /* Align to the page size of the linear mapping. */
  168. start = _ALIGN_DOWN(start, page_size);
  169. pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
  170. for (; start < end; start += page_size) {
  171. void *p;
  172. int rc;
  173. if (vmemmap_populated(start, page_size))
  174. continue;
  175. if (altmap)
  176. p = altmap_alloc_block_buf(page_size, altmap);
  177. else
  178. p = vmemmap_alloc_block_buf(page_size, node);
  179. if (!p)
  180. return -ENOMEM;
  181. vmemmap_list_populate(__pa(p), start, node);
  182. pr_debug(" * %016lx..%016lx allocated at %p\n",
  183. start, start + page_size, p);
  184. rc = vmemmap_create_mapping(start, page_size, __pa(p));
  185. if (rc < 0) {
  186. pr_warn("%s: Unable to create vmemmap mapping: %d\n",
  187. __func__, rc);
  188. return -EFAULT;
  189. }
  190. }
  191. return 0;
  192. }
  193. #ifdef CONFIG_MEMORY_HOTPLUG
  194. static unsigned long vmemmap_list_free(unsigned long start)
  195. {
  196. struct vmemmap_backing *vmem_back, *vmem_back_prev;
  197. vmem_back_prev = vmem_back = vmemmap_list;
  198. /* look for it with prev pointer recorded */
  199. for (; vmem_back; vmem_back = vmem_back->list) {
  200. if (vmem_back->virt_addr == start)
  201. break;
  202. vmem_back_prev = vmem_back;
  203. }
  204. if (unlikely(!vmem_back)) {
  205. WARN_ON(1);
  206. return 0;
  207. }
  208. /* remove it from vmemmap_list */
  209. if (vmem_back == vmemmap_list) /* remove head */
  210. vmemmap_list = vmem_back->list;
  211. else
  212. vmem_back_prev->list = vmem_back->list;
  213. /* next point to this freed entry */
  214. vmem_back->list = next;
  215. next = vmem_back;
  216. num_freed++;
  217. return vmem_back->phys;
  218. }
  219. void __ref vmemmap_free(unsigned long start, unsigned long end,
  220. struct vmem_altmap *altmap)
  221. {
  222. unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
  223. unsigned long page_order = get_order(page_size);
  224. start = _ALIGN_DOWN(start, page_size);
  225. pr_debug("vmemmap_free %lx...%lx\n", start, end);
  226. for (; start < end; start += page_size) {
  227. unsigned long nr_pages, addr;
  228. struct page *section_base;
  229. struct page *page;
  230. /*
  231. * the section has already be marked as invalid, so
  232. * vmemmap_populated() true means some other sections still
  233. * in this page, so skip it.
  234. */
  235. if (vmemmap_populated(start, page_size))
  236. continue;
  237. addr = vmemmap_list_free(start);
  238. if (!addr)
  239. continue;
  240. page = pfn_to_page(addr >> PAGE_SHIFT);
  241. section_base = pfn_to_page(vmemmap_section_start(start));
  242. nr_pages = 1 << page_order;
  243. if (altmap) {
  244. vmem_altmap_free(altmap, nr_pages);
  245. } else if (PageReserved(page)) {
  246. /* allocated from bootmem */
  247. if (page_size < PAGE_SIZE) {
  248. /*
  249. * this shouldn't happen, but if it is
  250. * the case, leave the memory there
  251. */
  252. WARN_ON_ONCE(1);
  253. } else {
  254. while (nr_pages--)
  255. free_reserved_page(page++);
  256. }
  257. } else {
  258. free_pages((unsigned long)(__va(addr)), page_order);
  259. }
  260. vmemmap_remove_mapping(start, page_size);
  261. }
  262. }
  263. #endif
  264. void register_page_bootmem_memmap(unsigned long section_nr,
  265. struct page *start_page, unsigned long size)
  266. {
  267. }
  268. /*
  269. * We do not have access to the sparsemem vmemmap, so we fallback to
  270. * walking the list of sparsemem blocks which we already maintain for
  271. * the sake of crashdump. In the long run, we might want to maintain
  272. * a tree if performance of that linear walk becomes a problem.
  273. *
  274. * realmode_pfn_to_page functions can fail due to:
  275. * 1) As real sparsemem blocks do not lay in RAM continously (they
  276. * are in virtual address space which is not available in the real mode),
  277. * the requested page struct can be split between blocks so get_page/put_page
  278. * may fail.
  279. * 2) When huge pages are used, the get_page/put_page API will fail
  280. * in real mode as the linked addresses in the page struct are virtual
  281. * too.
  282. */
  283. struct page *realmode_pfn_to_page(unsigned long pfn)
  284. {
  285. struct vmemmap_backing *vmem_back;
  286. struct page *page;
  287. unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
  288. unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
  289. for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
  290. if (pg_va < vmem_back->virt_addr)
  291. continue;
  292. /* After vmemmap_list entry free is possible, need check all */
  293. if ((pg_va + sizeof(struct page)) <=
  294. (vmem_back->virt_addr + page_size)) {
  295. page = (struct page *) (vmem_back->phys + pg_va -
  296. vmem_back->virt_addr);
  297. return page;
  298. }
  299. }
  300. /* Probably that page struct is split between real pages */
  301. return NULL;
  302. }
  303. EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
  304. #else
  305. struct page *realmode_pfn_to_page(unsigned long pfn)
  306. {
  307. struct page *page = pfn_to_page(pfn);
  308. return page;
  309. }
  310. EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
  311. #endif /* CONFIG_SPARSEMEM_VMEMMAP */
  312. #ifdef CONFIG_PPC_BOOK3S_64
  313. static bool disable_radix = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
  314. static int __init parse_disable_radix(char *p)
  315. {
  316. bool val;
  317. if (strlen(p) == 0)
  318. val = true;
  319. else if (kstrtobool(p, &val))
  320. return -EINVAL;
  321. disable_radix = val;
  322. return 0;
  323. }
  324. early_param("disable_radix", parse_disable_radix);
  325. /*
  326. * If we're running under a hypervisor, we need to check the contents of
  327. * /chosen/ibm,architecture-vec-5 to see if the hypervisor is willing to do
  328. * radix. If not, we clear the radix feature bit so we fall back to hash.
  329. */
  330. static void __init early_check_vec5(void)
  331. {
  332. unsigned long root, chosen;
  333. int size;
  334. const u8 *vec5;
  335. u8 mmu_supported;
  336. root = of_get_flat_dt_root();
  337. chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
  338. if (chosen == -FDT_ERR_NOTFOUND) {
  339. cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
  340. return;
  341. }
  342. vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
  343. if (!vec5) {
  344. cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
  345. return;
  346. }
  347. if (size <= OV5_INDX(OV5_MMU_SUPPORT)) {
  348. cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
  349. return;
  350. }
  351. /* Check for supported configuration */
  352. mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] &
  353. OV5_FEAT(OV5_MMU_SUPPORT);
  354. if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) {
  355. /* Hypervisor only supports radix - check enabled && GTSE */
  356. if (!early_radix_enabled()) {
  357. pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
  358. }
  359. if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
  360. OV5_FEAT(OV5_RADIX_GTSE))) {
  361. pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n");
  362. }
  363. /* Do radix anyway - the hypervisor said we had to */
  364. cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
  365. } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
  366. /* Hypervisor only supports hash - disable radix */
  367. cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
  368. }
  369. }
  370. void __init mmu_early_init_devtree(void)
  371. {
  372. /* Disable radix mode based on kernel command line. */
  373. if (disable_radix)
  374. cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
  375. /*
  376. * Check /chosen/ibm,architecture-vec-5 if running as a guest.
  377. * When running bare-metal, we can use radix if we like
  378. * even though the ibm,architecture-vec-5 property created by
  379. * skiboot doesn't have the necessary bits set.
  380. */
  381. if (!(mfmsr() & MSR_HV))
  382. early_check_vec5();
  383. if (early_radix_enabled())
  384. radix__early_init_devtree();
  385. else
  386. hash__early_init_devtree();
  387. }
  388. #endif /* CONFIG_PPC_BOOK3S_64 */