init_32.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933
  1. /*
  2. *
  3. * Copyright (C) 1995 Linus Torvalds
  4. *
  5. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  6. */
  7. #include <linux/signal.h>
  8. #include <linux/sched.h>
  9. #include <linux/kernel.h>
  10. #include <linux/errno.h>
  11. #include <linux/string.h>
  12. #include <linux/types.h>
  13. #include <linux/ptrace.h>
  14. #include <linux/mman.h>
  15. #include <linux/mm.h>
  16. #include <linux/hugetlb.h>
  17. #include <linux/swap.h>
  18. #include <linux/smp.h>
  19. #include <linux/init.h>
  20. #include <linux/highmem.h>
  21. #include <linux/pagemap.h>
  22. #include <linux/pci.h>
  23. #include <linux/pfn.h>
  24. #include <linux/poison.h>
  25. #include <linux/bootmem.h>
  26. #include <linux/memblock.h>
  27. #include <linux/proc_fs.h>
  28. #include <linux/memory_hotplug.h>
  29. #include <linux/initrd.h>
  30. #include <linux/cpumask.h>
  31. #include <linux/gfp.h>
  32. #include <asm/asm.h>
  33. #include <asm/bios_ebda.h>
  34. #include <asm/processor.h>
  35. #include <linux/uaccess.h>
  36. #include <asm/pgtable.h>
  37. #include <asm/dma.h>
  38. #include <asm/fixmap.h>
  39. #include <asm/e820/api.h>
  40. #include <asm/apic.h>
  41. #include <asm/bugs.h>
  42. #include <asm/tlb.h>
  43. #include <asm/tlbflush.h>
  44. #include <asm/olpc_ofw.h>
  45. #include <asm/pgalloc.h>
  46. #include <asm/sections.h>
  47. #include <asm/paravirt.h>
  48. #include <asm/setup.h>
  49. #include <asm/set_memory.h>
  50. #include <asm/page_types.h>
  51. #include <asm/init.h>
  52. #include "mm_internal.h"
  53. unsigned long highstart_pfn, highend_pfn;
  54. bool __read_mostly __vmalloc_start_set = false;
  55. /*
  56. * Creates a middle page table and puts a pointer to it in the
  57. * given global directory entry. This only returns the gd entry
  58. * in non-PAE compilation mode, since the middle layer is folded.
  59. */
  60. static pmd_t * __init one_md_table_init(pgd_t *pgd)
  61. {
  62. p4d_t *p4d;
  63. pud_t *pud;
  64. pmd_t *pmd_table;
  65. #ifdef CONFIG_X86_PAE
  66. if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
  67. pmd_table = (pmd_t *)alloc_low_page();
  68. paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
  69. set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
  70. p4d = p4d_offset(pgd, 0);
  71. pud = pud_offset(p4d, 0);
  72. BUG_ON(pmd_table != pmd_offset(pud, 0));
  73. return pmd_table;
  74. }
  75. #endif
  76. p4d = p4d_offset(pgd, 0);
  77. pud = pud_offset(p4d, 0);
  78. pmd_table = pmd_offset(pud, 0);
  79. return pmd_table;
  80. }
  81. /*
  82. * Create a page table and place a pointer to it in a middle page
  83. * directory entry:
  84. */
  85. static pte_t * __init one_page_table_init(pmd_t *pmd)
  86. {
  87. if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
  88. pte_t *page_table = (pte_t *)alloc_low_page();
  89. paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
  90. set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
  91. BUG_ON(page_table != pte_offset_kernel(pmd, 0));
  92. }
  93. return pte_offset_kernel(pmd, 0);
  94. }
  95. pmd_t * __init populate_extra_pmd(unsigned long vaddr)
  96. {
  97. int pgd_idx = pgd_index(vaddr);
  98. int pmd_idx = pmd_index(vaddr);
  99. return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx;
  100. }
  101. pte_t * __init populate_extra_pte(unsigned long vaddr)
  102. {
  103. int pte_idx = pte_index(vaddr);
  104. pmd_t *pmd;
  105. pmd = populate_extra_pmd(vaddr);
  106. return one_page_table_init(pmd) + pte_idx;
  107. }
  108. static unsigned long __init
  109. page_table_range_init_count(unsigned long start, unsigned long end)
  110. {
  111. unsigned long count = 0;
  112. #ifdef CONFIG_HIGHMEM
  113. int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
  114. int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
  115. int pgd_idx, pmd_idx;
  116. unsigned long vaddr;
  117. if (pmd_idx_kmap_begin == pmd_idx_kmap_end)
  118. return 0;
  119. vaddr = start;
  120. pgd_idx = pgd_index(vaddr);
  121. pmd_idx = pmd_index(vaddr);
  122. for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) {
  123. for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
  124. pmd_idx++) {
  125. if ((vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin &&
  126. (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end)
  127. count++;
  128. vaddr += PMD_SIZE;
  129. }
  130. pmd_idx = 0;
  131. }
  132. #endif
  133. return count;
  134. }
  135. static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
  136. unsigned long vaddr, pte_t *lastpte,
  137. void **adr)
  138. {
  139. #ifdef CONFIG_HIGHMEM
  140. /*
  141. * Something (early fixmap) may already have put a pte
  142. * page here, which causes the page table allocation
  143. * to become nonlinear. Attempt to fix it, and if it
  144. * is still nonlinear then we have to bug.
  145. */
  146. int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
  147. int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
  148. if (pmd_idx_kmap_begin != pmd_idx_kmap_end
  149. && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
  150. && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) {
  151. pte_t *newpte;
  152. int i;
  153. BUG_ON(after_bootmem);
  154. newpte = *adr;
  155. for (i = 0; i < PTRS_PER_PTE; i++)
  156. set_pte(newpte + i, pte[i]);
  157. *adr = (void *)(((unsigned long)(*adr)) + PAGE_SIZE);
  158. paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
  159. set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
  160. BUG_ON(newpte != pte_offset_kernel(pmd, 0));
  161. __flush_tlb_all();
  162. paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
  163. pte = newpte;
  164. }
  165. BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
  166. && vaddr > fix_to_virt(FIX_KMAP_END)
  167. && lastpte && lastpte + PTRS_PER_PTE != pte);
  168. #endif
  169. return pte;
  170. }
  171. /*
  172. * This function initializes a certain range of kernel virtual memory
  173. * with new bootmem page tables, everywhere page tables are missing in
  174. * the given range.
  175. *
  176. * NOTE: The pagetables are allocated contiguous on the physical space
  177. * so we can cache the place of the first one and move around without
  178. * checking the pgd every time.
  179. */
  180. static void __init
  181. page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
  182. {
  183. int pgd_idx, pmd_idx;
  184. unsigned long vaddr;
  185. pgd_t *pgd;
  186. pmd_t *pmd;
  187. pte_t *pte = NULL;
  188. unsigned long count = page_table_range_init_count(start, end);
  189. void *adr = NULL;
  190. if (count)
  191. adr = alloc_low_pages(count);
  192. vaddr = start;
  193. pgd_idx = pgd_index(vaddr);
  194. pmd_idx = pmd_index(vaddr);
  195. pgd = pgd_base + pgd_idx;
  196. for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
  197. pmd = one_md_table_init(pgd);
  198. pmd = pmd + pmd_index(vaddr);
  199. for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
  200. pmd++, pmd_idx++) {
  201. pte = page_table_kmap_check(one_page_table_init(pmd),
  202. pmd, vaddr, pte, &adr);
  203. vaddr += PMD_SIZE;
  204. }
  205. pmd_idx = 0;
  206. }
  207. }
  208. static inline int is_kernel_text(unsigned long addr)
  209. {
  210. if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
  211. return 1;
  212. return 0;
  213. }
  214. /*
  215. * This maps the physical memory to kernel virtual address space, a total
  216. * of max_low_pfn pages, by creating page tables starting from address
  217. * PAGE_OFFSET:
  218. */
  219. unsigned long __init
  220. kernel_physical_mapping_init(unsigned long start,
  221. unsigned long end,
  222. unsigned long page_size_mask)
  223. {
  224. int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
  225. unsigned long last_map_addr = end;
  226. unsigned long start_pfn, end_pfn;
  227. pgd_t *pgd_base = swapper_pg_dir;
  228. int pgd_idx, pmd_idx, pte_ofs;
  229. unsigned long pfn;
  230. pgd_t *pgd;
  231. pmd_t *pmd;
  232. pte_t *pte;
  233. unsigned pages_2m, pages_4k;
  234. int mapping_iter;
  235. start_pfn = start >> PAGE_SHIFT;
  236. end_pfn = end >> PAGE_SHIFT;
  237. /*
  238. * First iteration will setup identity mapping using large/small pages
  239. * based on use_pse, with other attributes same as set by
  240. * the early code in head_32.S
  241. *
  242. * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
  243. * as desired for the kernel identity mapping.
  244. *
  245. * This two pass mechanism conforms to the TLB app note which says:
  246. *
  247. * "Software should not write to a paging-structure entry in a way
  248. * that would change, for any linear address, both the page size
  249. * and either the page frame or attributes."
  250. */
  251. mapping_iter = 1;
  252. if (!boot_cpu_has(X86_FEATURE_PSE))
  253. use_pse = 0;
  254. repeat:
  255. pages_2m = pages_4k = 0;
  256. pfn = start_pfn;
  257. pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  258. pgd = pgd_base + pgd_idx;
  259. for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
  260. pmd = one_md_table_init(pgd);
  261. if (pfn >= end_pfn)
  262. continue;
  263. #ifdef CONFIG_X86_PAE
  264. pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  265. pmd += pmd_idx;
  266. #else
  267. pmd_idx = 0;
  268. #endif
  269. for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
  270. pmd++, pmd_idx++) {
  271. unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
  272. /*
  273. * Map with big pages if possible, otherwise
  274. * create normal page tables:
  275. */
  276. if (use_pse) {
  277. unsigned int addr2;
  278. pgprot_t prot = PAGE_KERNEL_LARGE;
  279. /*
  280. * first pass will use the same initial
  281. * identity mapping attribute + _PAGE_PSE.
  282. */
  283. pgprot_t init_prot =
  284. __pgprot(PTE_IDENT_ATTR |
  285. _PAGE_PSE);
  286. pfn &= PMD_MASK >> PAGE_SHIFT;
  287. addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
  288. PAGE_OFFSET + PAGE_SIZE-1;
  289. if (is_kernel_text(addr) ||
  290. is_kernel_text(addr2))
  291. prot = PAGE_KERNEL_LARGE_EXEC;
  292. pages_2m++;
  293. if (mapping_iter == 1)
  294. set_pmd(pmd, pfn_pmd(pfn, init_prot));
  295. else
  296. set_pmd(pmd, pfn_pmd(pfn, prot));
  297. pfn += PTRS_PER_PTE;
  298. continue;
  299. }
  300. pte = one_page_table_init(pmd);
  301. pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  302. pte += pte_ofs;
  303. for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
  304. pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
  305. pgprot_t prot = PAGE_KERNEL;
  306. /*
  307. * first pass will use the same initial
  308. * identity mapping attribute.
  309. */
  310. pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
  311. if (is_kernel_text(addr))
  312. prot = PAGE_KERNEL_EXEC;
  313. pages_4k++;
  314. if (mapping_iter == 1) {
  315. set_pte(pte, pfn_pte(pfn, init_prot));
  316. last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE;
  317. } else
  318. set_pte(pte, pfn_pte(pfn, prot));
  319. }
  320. }
  321. }
  322. if (mapping_iter == 1) {
  323. /*
  324. * update direct mapping page count only in the first
  325. * iteration.
  326. */
  327. update_page_count(PG_LEVEL_2M, pages_2m);
  328. update_page_count(PG_LEVEL_4K, pages_4k);
  329. /*
  330. * local global flush tlb, which will flush the previous
  331. * mappings present in both small and large page TLB's.
  332. */
  333. __flush_tlb_all();
  334. /*
  335. * Second iteration will set the actual desired PTE attributes.
  336. */
  337. mapping_iter = 2;
  338. goto repeat;
  339. }
  340. return last_map_addr;
  341. }
  342. pte_t *kmap_pte;
  343. static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
  344. {
  345. pgd_t *pgd = pgd_offset_k(vaddr);
  346. p4d_t *p4d = p4d_offset(pgd, vaddr);
  347. pud_t *pud = pud_offset(p4d, vaddr);
  348. pmd_t *pmd = pmd_offset(pud, vaddr);
  349. return pte_offset_kernel(pmd, vaddr);
  350. }
  351. static void __init kmap_init(void)
  352. {
  353. unsigned long kmap_vstart;
  354. /*
  355. * Cache the first kmap pte:
  356. */
  357. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  358. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  359. }
  360. #ifdef CONFIG_HIGHMEM
  361. static void __init permanent_kmaps_init(pgd_t *pgd_base)
  362. {
  363. unsigned long vaddr;
  364. pgd_t *pgd;
  365. p4d_t *p4d;
  366. pud_t *pud;
  367. pmd_t *pmd;
  368. pte_t *pte;
  369. vaddr = PKMAP_BASE;
  370. page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
  371. pgd = swapper_pg_dir + pgd_index(vaddr);
  372. p4d = p4d_offset(pgd, vaddr);
  373. pud = pud_offset(p4d, vaddr);
  374. pmd = pmd_offset(pud, vaddr);
  375. pte = pte_offset_kernel(pmd, vaddr);
  376. pkmap_page_table = pte;
  377. }
  378. void __init add_highpages_with_active_regions(int nid,
  379. unsigned long start_pfn, unsigned long end_pfn)
  380. {
  381. phys_addr_t start, end;
  382. u64 i;
  383. for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) {
  384. unsigned long pfn = clamp_t(unsigned long, PFN_UP(start),
  385. start_pfn, end_pfn);
  386. unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end),
  387. start_pfn, end_pfn);
  388. for ( ; pfn < e_pfn; pfn++)
  389. if (pfn_valid(pfn))
  390. free_highmem_page(pfn_to_page(pfn));
  391. }
  392. }
  393. #else
  394. static inline void permanent_kmaps_init(pgd_t *pgd_base)
  395. {
  396. }
  397. #endif /* CONFIG_HIGHMEM */
  398. void __init native_pagetable_init(void)
  399. {
  400. unsigned long pfn, va;
  401. pgd_t *pgd, *base = swapper_pg_dir;
  402. p4d_t *p4d;
  403. pud_t *pud;
  404. pmd_t *pmd;
  405. pte_t *pte;
  406. /*
  407. * Remove any mappings which extend past the end of physical
  408. * memory from the boot time page table.
  409. * In virtual address space, we should have at least two pages
  410. * from VMALLOC_END to pkmap or fixmap according to VMALLOC_END
  411. * definition. And max_low_pfn is set to VMALLOC_END physical
  412. * address. If initial memory mapping is doing right job, we
  413. * should have pte used near max_low_pfn or one pmd is not present.
  414. */
  415. for (pfn = max_low_pfn; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
  416. va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
  417. pgd = base + pgd_index(va);
  418. if (!pgd_present(*pgd))
  419. break;
  420. p4d = p4d_offset(pgd, va);
  421. pud = pud_offset(p4d, va);
  422. pmd = pmd_offset(pud, va);
  423. if (!pmd_present(*pmd))
  424. break;
  425. /* should not be large page here */
  426. if (pmd_large(*pmd)) {
  427. pr_warn("try to clear pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx, but pmd is big page and is not using pte !\n",
  428. pfn, pmd, __pa(pmd));
  429. BUG_ON(1);
  430. }
  431. pte = pte_offset_kernel(pmd, va);
  432. if (!pte_present(*pte))
  433. break;
  434. printk(KERN_DEBUG "clearing pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx pte: %p pte phys: %lx\n",
  435. pfn, pmd, __pa(pmd), pte, __pa(pte));
  436. pte_clear(NULL, va, pte);
  437. }
  438. paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
  439. paging_init();
  440. }
  441. /*
  442. * Build a proper pagetable for the kernel mappings. Up until this
  443. * point, we've been running on some set of pagetables constructed by
  444. * the boot process.
  445. *
  446. * If we're booting on native hardware, this will be a pagetable
  447. * constructed in arch/x86/kernel/head_32.S. The root of the
  448. * pagetable will be swapper_pg_dir.
  449. *
  450. * If we're booting paravirtualized under a hypervisor, then there are
  451. * more options: we may already be running PAE, and the pagetable may
  452. * or may not be based in swapper_pg_dir. In any case,
  453. * paravirt_pagetable_init() will set up swapper_pg_dir
  454. * appropriately for the rest of the initialization to work.
  455. *
  456. * In general, pagetable_init() assumes that the pagetable may already
  457. * be partially populated, and so it avoids stomping on any existing
  458. * mappings.
  459. */
  460. void __init early_ioremap_page_table_range_init(void)
  461. {
  462. pgd_t *pgd_base = swapper_pg_dir;
  463. unsigned long vaddr, end;
  464. /*
  465. * Fixed mappings, only the page table structure has to be
  466. * created - mappings will be set by set_fixmap():
  467. */
  468. vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
  469. end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
  470. page_table_range_init(vaddr, end, pgd_base);
  471. early_ioremap_reset();
  472. }
  473. static void __init pagetable_init(void)
  474. {
  475. pgd_t *pgd_base = swapper_pg_dir;
  476. permanent_kmaps_init(pgd_base);
  477. }
  478. pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
  479. EXPORT_SYMBOL_GPL(__supported_pte_mask);
  480. /* user-defined highmem size */
  481. static unsigned int highmem_pages = -1;
  482. /*
  483. * highmem=size forces highmem to be exactly 'size' bytes.
  484. * This works even on boxes that have no highmem otherwise.
  485. * This also works to reduce highmem size on bigger boxes.
  486. */
  487. static int __init parse_highmem(char *arg)
  488. {
  489. if (!arg)
  490. return -EINVAL;
  491. highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
  492. return 0;
  493. }
  494. early_param("highmem", parse_highmem);
  495. #define MSG_HIGHMEM_TOO_BIG \
  496. "highmem size (%luMB) is bigger than pages available (%luMB)!\n"
  497. #define MSG_LOWMEM_TOO_SMALL \
  498. "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
  499. /*
  500. * All of RAM fits into lowmem - but if user wants highmem
  501. * artificially via the highmem=x boot parameter then create
  502. * it:
  503. */
  504. static void __init lowmem_pfn_init(void)
  505. {
  506. /* max_low_pfn is 0, we already have early_res support */
  507. max_low_pfn = max_pfn;
  508. if (highmem_pages == -1)
  509. highmem_pages = 0;
  510. #ifdef CONFIG_HIGHMEM
  511. if (highmem_pages >= max_pfn) {
  512. printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
  513. pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
  514. highmem_pages = 0;
  515. }
  516. if (highmem_pages) {
  517. if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
  518. printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
  519. pages_to_mb(highmem_pages));
  520. highmem_pages = 0;
  521. }
  522. max_low_pfn -= highmem_pages;
  523. }
  524. #else
  525. if (highmem_pages)
  526. printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
  527. #endif
  528. }
  529. #define MSG_HIGHMEM_TOO_SMALL \
  530. "only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
  531. #define MSG_HIGHMEM_TRIMMED \
  532. "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
  533. /*
  534. * We have more RAM than fits into lowmem - we try to put it into
  535. * highmem, also taking the highmem=x boot parameter into account:
  536. */
  537. static void __init highmem_pfn_init(void)
  538. {
  539. max_low_pfn = MAXMEM_PFN;
  540. if (highmem_pages == -1)
  541. highmem_pages = max_pfn - MAXMEM_PFN;
  542. if (highmem_pages + MAXMEM_PFN < max_pfn)
  543. max_pfn = MAXMEM_PFN + highmem_pages;
  544. if (highmem_pages + MAXMEM_PFN > max_pfn) {
  545. printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
  546. pages_to_mb(max_pfn - MAXMEM_PFN),
  547. pages_to_mb(highmem_pages));
  548. highmem_pages = 0;
  549. }
  550. #ifndef CONFIG_HIGHMEM
  551. /* Maximum memory usable is what is directly addressable */
  552. printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
  553. if (max_pfn > MAX_NONPAE_PFN)
  554. printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
  555. else
  556. printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
  557. max_pfn = MAXMEM_PFN;
  558. #else /* !CONFIG_HIGHMEM */
  559. #ifndef CONFIG_HIGHMEM64G
  560. if (max_pfn > MAX_NONPAE_PFN) {
  561. max_pfn = MAX_NONPAE_PFN;
  562. printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
  563. }
  564. #endif /* !CONFIG_HIGHMEM64G */
  565. #endif /* !CONFIG_HIGHMEM */
  566. }
  567. /*
  568. * Determine low and high memory ranges:
  569. */
  570. void __init find_low_pfn_range(void)
  571. {
  572. /* it could update max_pfn */
  573. if (max_pfn <= MAXMEM_PFN)
  574. lowmem_pfn_init();
  575. else
  576. highmem_pfn_init();
  577. }
  578. #ifndef CONFIG_NEED_MULTIPLE_NODES
  579. void __init initmem_init(void)
  580. {
  581. #ifdef CONFIG_HIGHMEM
  582. highstart_pfn = highend_pfn = max_pfn;
  583. if (max_pfn > max_low_pfn)
  584. highstart_pfn = max_low_pfn;
  585. printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
  586. pages_to_mb(highend_pfn - highstart_pfn));
  587. high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
  588. #else
  589. high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
  590. #endif
  591. memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
  592. sparse_memory_present_with_active_regions(0);
  593. #ifdef CONFIG_FLATMEM
  594. max_mapnr = IS_ENABLED(CONFIG_HIGHMEM) ? highend_pfn : max_low_pfn;
  595. #endif
  596. __vmalloc_start_set = true;
  597. printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
  598. pages_to_mb(max_low_pfn));
  599. setup_bootmem_allocator();
  600. }
  601. #endif /* !CONFIG_NEED_MULTIPLE_NODES */
  602. void __init setup_bootmem_allocator(void)
  603. {
  604. printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
  605. max_pfn_mapped<<PAGE_SHIFT);
  606. printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
  607. }
  608. /*
  609. * paging_init() sets up the page tables - note that the first 8MB are
  610. * already mapped by head.S.
  611. *
  612. * This routines also unmaps the page at virtual kernel address 0, so
  613. * that we can trap those pesky NULL-reference errors in the kernel.
  614. */
  615. void __init paging_init(void)
  616. {
  617. pagetable_init();
  618. __flush_tlb_all();
  619. kmap_init();
  620. /*
  621. * NOTE: at this point the bootmem allocator is fully available.
  622. */
  623. olpc_dt_build_devicetree();
  624. sparse_memory_present_with_active_regions(MAX_NUMNODES);
  625. sparse_init();
  626. zone_sizes_init();
  627. }
  628. /*
  629. * Test if the WP bit works in supervisor mode. It isn't supported on 386's
  630. * and also on some strange 486's. All 586+'s are OK. This used to involve
  631. * black magic jumps to work around some nasty CPU bugs, but fortunately the
  632. * switch to using exceptions got rid of all that.
  633. */
  634. static void __init test_wp_bit(void)
  635. {
  636. char z = 0;
  637. printk(KERN_INFO "Checking if this processor honours the WP bit even in supervisor mode...");
  638. __set_fixmap(FIX_WP_TEST, __pa_symbol(empty_zero_page), PAGE_KERNEL_RO);
  639. if (probe_kernel_write((char *)fix_to_virt(FIX_WP_TEST), &z, 1)) {
  640. clear_fixmap(FIX_WP_TEST);
  641. printk(KERN_CONT "Ok.\n");
  642. return;
  643. }
  644. printk(KERN_CONT "No.\n");
  645. panic("Linux doesn't support CPUs with broken WP.");
  646. }
  647. void __init mem_init(void)
  648. {
  649. pci_iommu_alloc();
  650. #ifdef CONFIG_FLATMEM
  651. BUG_ON(!mem_map);
  652. #endif
  653. /*
  654. * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to
  655. * be done before free_all_bootmem(). Memblock use free low memory for
  656. * temporary data (see find_range_array()) and for this purpose can use
  657. * pages that was already passed to the buddy allocator, hence marked as
  658. * not accessible in the page tables when compiled with
  659. * CONFIG_DEBUG_PAGEALLOC. Otherwise order of initialization is not
  660. * important here.
  661. */
  662. set_highmem_pages_init();
  663. /* this will put all low memory onto the freelists */
  664. free_all_bootmem();
  665. after_bootmem = 1;
  666. mem_init_print_info(NULL);
  667. printk(KERN_INFO "virtual kernel memory layout:\n"
  668. " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  669. #ifdef CONFIG_HIGHMEM
  670. " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  671. #endif
  672. " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
  673. " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
  674. " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
  675. " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
  676. " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
  677. FIXADDR_START, FIXADDR_TOP,
  678. (FIXADDR_TOP - FIXADDR_START) >> 10,
  679. #ifdef CONFIG_HIGHMEM
  680. PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
  681. (LAST_PKMAP*PAGE_SIZE) >> 10,
  682. #endif
  683. VMALLOC_START, VMALLOC_END,
  684. (VMALLOC_END - VMALLOC_START) >> 20,
  685. (unsigned long)__va(0), (unsigned long)high_memory,
  686. ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
  687. (unsigned long)&__init_begin, (unsigned long)&__init_end,
  688. ((unsigned long)&__init_end -
  689. (unsigned long)&__init_begin) >> 10,
  690. (unsigned long)&_etext, (unsigned long)&_edata,
  691. ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
  692. (unsigned long)&_text, (unsigned long)&_etext,
  693. ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
  694. /*
  695. * Check boundaries twice: Some fundamental inconsistencies can
  696. * be detected at build time already.
  697. */
  698. #define __FIXADDR_TOP (-PAGE_SIZE)
  699. #ifdef CONFIG_HIGHMEM
  700. BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  701. BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE);
  702. #endif
  703. #define high_memory (-128UL << 20)
  704. BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
  705. #undef high_memory
  706. #undef __FIXADDR_TOP
  707. #ifdef CONFIG_HIGHMEM
  708. BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  709. BUG_ON(VMALLOC_END > PKMAP_BASE);
  710. #endif
  711. BUG_ON(VMALLOC_START >= VMALLOC_END);
  712. BUG_ON((unsigned long)high_memory > VMALLOC_START);
  713. test_wp_bit();
  714. }
  715. #ifdef CONFIG_MEMORY_HOTPLUG
  716. int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
  717. {
  718. struct pglist_data *pgdata = NODE_DATA(nid);
  719. struct zone *zone = pgdata->node_zones +
  720. zone_for_memory(nid, start, size, ZONE_HIGHMEM, for_device);
  721. unsigned long start_pfn = start >> PAGE_SHIFT;
  722. unsigned long nr_pages = size >> PAGE_SHIFT;
  723. return __add_pages(nid, zone, start_pfn, nr_pages);
  724. }
  725. #ifdef CONFIG_MEMORY_HOTREMOVE
  726. int arch_remove_memory(u64 start, u64 size)
  727. {
  728. unsigned long start_pfn = start >> PAGE_SHIFT;
  729. unsigned long nr_pages = size >> PAGE_SHIFT;
  730. struct zone *zone;
  731. zone = page_zone(pfn_to_page(start_pfn));
  732. return __remove_pages(zone, start_pfn, nr_pages);
  733. }
  734. #endif
  735. #endif
  736. int kernel_set_to_readonly __read_mostly;
  737. void set_kernel_text_rw(void)
  738. {
  739. unsigned long start = PFN_ALIGN(_text);
  740. unsigned long size = PFN_ALIGN(_etext) - start;
  741. if (!kernel_set_to_readonly)
  742. return;
  743. pr_debug("Set kernel text: %lx - %lx for read write\n",
  744. start, start+size);
  745. set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
  746. }
  747. void set_kernel_text_ro(void)
  748. {
  749. unsigned long start = PFN_ALIGN(_text);
  750. unsigned long size = PFN_ALIGN(_etext) - start;
  751. if (!kernel_set_to_readonly)
  752. return;
  753. pr_debug("Set kernel text: %lx - %lx for read only\n",
  754. start, start+size);
  755. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  756. }
  757. static void mark_nxdata_nx(void)
  758. {
  759. /*
  760. * When this called, init has already been executed and released,
  761. * so everything past _etext should be NX.
  762. */
  763. unsigned long start = PFN_ALIGN(_etext);
  764. /*
  765. * This comes from is_kernel_text upper limit. Also HPAGE where used:
  766. */
  767. unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start;
  768. if (__supported_pte_mask & _PAGE_NX)
  769. printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10);
  770. set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT);
  771. }
  772. void mark_rodata_ro(void)
  773. {
  774. unsigned long start = PFN_ALIGN(_text);
  775. unsigned long size = PFN_ALIGN(_etext) - start;
  776. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  777. printk(KERN_INFO "Write protecting the kernel text: %luk\n",
  778. size >> 10);
  779. kernel_set_to_readonly = 1;
  780. #ifdef CONFIG_CPA_DEBUG
  781. printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
  782. start, start+size);
  783. set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
  784. printk(KERN_INFO "Testing CPA: write protecting again\n");
  785. set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
  786. #endif
  787. start += size;
  788. size = (unsigned long)__end_rodata - start;
  789. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  790. printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
  791. size >> 10);
  792. #ifdef CONFIG_CPA_DEBUG
  793. printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
  794. set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
  795. printk(KERN_INFO "Testing CPA: write protecting again\n");
  796. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  797. #endif
  798. mark_nxdata_nx();
  799. if (__supported_pte_mask & _PAGE_NX)
  800. debug_checkwx();
  801. }