init.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980
  1. /*
  2. * Copyright (C) 1995 Linus Torvalds
  3. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation, version 2.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  12. * NON INFRINGEMENT. See the GNU General Public License for
  13. * more details.
  14. */
  15. #include <linux/module.h>
  16. #include <linux/signal.h>
  17. #include <linux/sched.h>
  18. #include <linux/kernel.h>
  19. #include <linux/errno.h>
  20. #include <linux/string.h>
  21. #include <linux/types.h>
  22. #include <linux/ptrace.h>
  23. #include <linux/mman.h>
  24. #include <linux/mm.h>
  25. #include <linux/hugetlb.h>
  26. #include <linux/swap.h>
  27. #include <linux/smp.h>
  28. #include <linux/init.h>
  29. #include <linux/highmem.h>
  30. #include <linux/pagemap.h>
  31. #include <linux/poison.h>
  32. #include <linux/bootmem.h>
  33. #include <linux/slab.h>
  34. #include <linux/proc_fs.h>
  35. #include <linux/efi.h>
  36. #include <linux/memory_hotplug.h>
  37. #include <linux/uaccess.h>
  38. #include <asm/mmu_context.h>
  39. #include <asm/processor.h>
  40. #include <asm/pgtable.h>
  41. #include <asm/pgalloc.h>
  42. #include <asm/dma.h>
  43. #include <asm/fixmap.h>
  44. #include <asm/tlb.h>
  45. #include <asm/tlbflush.h>
  46. #include <asm/sections.h>
  47. #include <asm/setup.h>
  48. #include <asm/homecache.h>
  49. #include <hv/hypervisor.h>
  50. #include <arch/chip.h>
  51. #include "migrate.h"
  52. #define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0))
  53. #ifndef __tilegx__
  54. unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE;
  55. EXPORT_SYMBOL(VMALLOC_RESERVE);
  56. #endif
  57. /* Create an L2 page table */
  58. static pte_t * __init alloc_pte(void)
  59. {
  60. return __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0);
  61. }
  62. /*
  63. * L2 page tables per controller. We allocate these all at once from
  64. * the bootmem allocator and store them here. This saves on kernel L2
  65. * page table memory, compared to allocating a full 64K page per L2
  66. * page table, and also means that in cases where we use huge pages,
  67. * we are guaranteed to later be able to shatter those huge pages and
  68. * switch to using these page tables instead, without requiring
  69. * further allocation. Each l2_ptes[] entry points to the first page
  70. * table for the first hugepage-size piece of memory on the
  71. * controller; other page tables are just indexed directly, i.e. the
  72. * L2 page tables are contiguous in memory for each controller.
  73. */
  74. static pte_t *l2_ptes[MAX_NUMNODES];
  75. static int num_l2_ptes[MAX_NUMNODES];
  76. static void init_prealloc_ptes(int node, int pages)
  77. {
  78. BUG_ON(pages & (PTRS_PER_PTE - 1));
  79. if (pages) {
  80. num_l2_ptes[node] = pages;
  81. l2_ptes[node] = __alloc_bootmem(pages * sizeof(pte_t),
  82. HV_PAGE_TABLE_ALIGN, 0);
  83. }
  84. }
  85. pte_t *get_prealloc_pte(unsigned long pfn)
  86. {
  87. int node = pfn_to_nid(pfn);
  88. pfn &= ~(-1UL << (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT));
  89. BUG_ON(node >= MAX_NUMNODES);
  90. BUG_ON(pfn >= num_l2_ptes[node]);
  91. return &l2_ptes[node][pfn];
  92. }
  93. /*
  94. * What caching do we expect pages from the heap to have when
  95. * they are allocated during bootup? (Once we've installed the
  96. * "real" swapper_pg_dir.)
  97. */
  98. static int initial_heap_home(void)
  99. {
  100. if (hash_default)
  101. return PAGE_HOME_HASH;
  102. return smp_processor_id();
  103. }
  104. /*
  105. * Place a pointer to an L2 page table in a middle page
  106. * directory entry.
  107. */
  108. static void __init assign_pte(pmd_t *pmd, pte_t *page_table)
  109. {
  110. phys_addr_t pa = __pa(page_table);
  111. unsigned long l2_ptfn = pa >> HV_LOG2_PAGE_TABLE_ALIGN;
  112. pte_t pteval = hv_pte_set_ptfn(__pgprot(_PAGE_TABLE), l2_ptfn);
  113. BUG_ON((pa & (HV_PAGE_TABLE_ALIGN-1)) != 0);
  114. pteval = pte_set_home(pteval, initial_heap_home());
  115. *(pte_t *)pmd = pteval;
  116. if (page_table != (pte_t *)pmd_page_vaddr(*pmd))
  117. BUG();
  118. }
  119. #ifdef __tilegx__
  120. static inline pmd_t *alloc_pmd(void)
  121. {
  122. return __alloc_bootmem(L1_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0);
  123. }
  124. static inline void assign_pmd(pud_t *pud, pmd_t *pmd)
  125. {
  126. assign_pte((pmd_t *)pud, (pte_t *)pmd);
  127. }
  128. #endif /* __tilegx__ */
  129. /* Replace the given pmd with a full PTE table. */
  130. void __init shatter_pmd(pmd_t *pmd)
  131. {
  132. pte_t *pte = get_prealloc_pte(pte_pfn(*(pte_t *)pmd));
  133. assign_pte(pmd, pte);
  134. }
  135. #ifdef __tilegx__
  136. static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va)
  137. {
  138. pud_t *pud = pud_offset(&pgtables[pgd_index(va)], va);
  139. if (pud_none(*pud))
  140. assign_pmd(pud, alloc_pmd());
  141. return pmd_offset(pud, va);
  142. }
  143. #else
  144. static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va)
  145. {
  146. return pmd_offset(pud_offset(&pgtables[pgd_index(va)], va), va);
  147. }
  148. #endif
  149. /*
  150. * This function initializes a certain range of kernel virtual memory
  151. * with new bootmem page tables, everywhere page tables are missing in
  152. * the given range.
  153. */
  154. /*
  155. * NOTE: The pagetables are allocated contiguous on the physical space
  156. * so we can cache the place of the first one and move around without
  157. * checking the pgd every time.
  158. */
  159. static void __init page_table_range_init(unsigned long start,
  160. unsigned long end, pgd_t *pgd)
  161. {
  162. unsigned long vaddr;
  163. start = round_down(start, PMD_SIZE);
  164. end = round_up(end, PMD_SIZE);
  165. for (vaddr = start; vaddr < end; vaddr += PMD_SIZE) {
  166. pmd_t *pmd = get_pmd(pgd, vaddr);
  167. if (pmd_none(*pmd))
  168. assign_pte(pmd, alloc_pte());
  169. }
  170. }
  171. static int __initdata ktext_hash = 1; /* .text pages */
  172. static int __initdata kdata_hash = 1; /* .data and .bss pages */
  173. int __write_once hash_default = 1; /* kernel allocator pages */
  174. EXPORT_SYMBOL(hash_default);
  175. int __write_once kstack_hash = 1; /* if no homecaching, use h4h */
  176. /*
  177. * CPUs to use to for striping the pages of kernel data. If hash-for-home
  178. * is available, this is only relevant if kcache_hash sets up the
  179. * .data and .bss to be page-homed, and we don't want the default mode
  180. * of using the full set of kernel cpus for the striping.
  181. */
  182. static __initdata struct cpumask kdata_mask;
  183. static __initdata int kdata_arg_seen;
  184. int __write_once kdata_huge; /* if no homecaching, small pages */
  185. /* Combine a generic pgprot_t with cache home to get a cache-aware pgprot. */
  186. static pgprot_t __init construct_pgprot(pgprot_t prot, int home)
  187. {
  188. prot = pte_set_home(prot, home);
  189. if (home == PAGE_HOME_IMMUTABLE) {
  190. if (ktext_hash)
  191. prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_HASH_L3);
  192. else
  193. prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_NO_L3);
  194. }
  195. return prot;
  196. }
  197. /*
  198. * For a given kernel data VA, how should it be cached?
  199. * We return the complete pgprot_t with caching bits set.
  200. */
  201. static pgprot_t __init init_pgprot(ulong address)
  202. {
  203. int cpu;
  204. unsigned long page;
  205. enum { CODE_DELTA = MEM_SV_START - PAGE_OFFSET };
  206. /* For kdata=huge, everything is just hash-for-home. */
  207. if (kdata_huge)
  208. return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
  209. /* We map the aliased pages of permanent text inaccessible. */
  210. if (address < (ulong) _sinittext - CODE_DELTA)
  211. return PAGE_NONE;
  212. /* We map read-only data non-coherent for performance. */
  213. if ((address >= (ulong) __start_rodata &&
  214. address < (ulong) __end_rodata) ||
  215. address == (ulong) empty_zero_page) {
  216. return construct_pgprot(PAGE_KERNEL_RO, PAGE_HOME_IMMUTABLE);
  217. }
  218. #ifndef __tilegx__
  219. /* Force the atomic_locks[] array page to be hash-for-home. */
  220. if (address == (ulong) atomic_locks)
  221. return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
  222. #endif
  223. /*
  224. * Everything else that isn't data or bss is heap, so mark it
  225. * with the initial heap home (hash-for-home, or this cpu). This
  226. * includes any addresses after the loaded image and any address before
  227. * __init_end, since we already captured the case of text before
  228. * _sinittext, and __pa(einittext) is approximately __pa(__init_begin).
  229. *
  230. * All the LOWMEM pages that we mark this way will get their
  231. * struct page homecache properly marked later, in set_page_homes().
  232. * The HIGHMEM pages we leave with a default zero for their
  233. * homes, but with a zero free_time we don't have to actually
  234. * do a flush action the first time we use them, either.
  235. */
  236. if (address >= (ulong) _end || address < (ulong) __init_end)
  237. return construct_pgprot(PAGE_KERNEL, initial_heap_home());
  238. /* Use hash-for-home if requested for data/bss. */
  239. if (kdata_hash)
  240. return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
  241. /*
  242. * Otherwise we just hand out consecutive cpus. To avoid
  243. * requiring this function to hold state, we just walk forward from
  244. * __end_rodata by PAGE_SIZE, skipping the readonly and init data, to
  245. * reach the requested address, while walking cpu home around
  246. * kdata_mask. This is typically no more than a dozen or so iterations.
  247. */
  248. page = (((ulong)__end_rodata) + PAGE_SIZE - 1) & PAGE_MASK;
  249. BUG_ON(address < page || address >= (ulong)_end);
  250. cpu = cpumask_first(&kdata_mask);
  251. for (; page < address; page += PAGE_SIZE) {
  252. if (page >= (ulong)&init_thread_union &&
  253. page < (ulong)&init_thread_union + THREAD_SIZE)
  254. continue;
  255. if (page == (ulong)empty_zero_page)
  256. continue;
  257. #ifndef __tilegx__
  258. if (page == (ulong)atomic_locks)
  259. continue;
  260. #endif
  261. cpu = cpumask_next(cpu, &kdata_mask);
  262. if (cpu == NR_CPUS)
  263. cpu = cpumask_first(&kdata_mask);
  264. }
  265. return construct_pgprot(PAGE_KERNEL, cpu);
  266. }
  267. /*
  268. * This function sets up how we cache the kernel text. If we have
  269. * hash-for-home support, normally that is used instead (see the
  270. * kcache_hash boot flag for more information). But if we end up
  271. * using a page-based caching technique, this option sets up the
  272. * details of that. In addition, the "ktext=nocache" option may
  273. * always be used to disable local caching of text pages, if desired.
  274. */
  275. static int __initdata ktext_arg_seen;
  276. static int __initdata ktext_small;
  277. static int __initdata ktext_local;
  278. static int __initdata ktext_all;
  279. static int __initdata ktext_nondataplane;
  280. static int __initdata ktext_nocache;
  281. static struct cpumask __initdata ktext_mask;
  282. static int __init setup_ktext(char *str)
  283. {
  284. if (str == NULL)
  285. return -EINVAL;
  286. /* If you have a leading "nocache", turn off ktext caching */
  287. if (strncmp(str, "nocache", 7) == 0) {
  288. ktext_nocache = 1;
  289. pr_info("ktext: disabling local caching of kernel text\n");
  290. str += 7;
  291. if (*str == ',')
  292. ++str;
  293. if (*str == '\0')
  294. return 0;
  295. }
  296. ktext_arg_seen = 1;
  297. /* Default setting: use a huge page */
  298. if (strcmp(str, "huge") == 0)
  299. pr_info("ktext: using one huge locally cached page\n");
  300. /* Pay TLB cost but get no cache benefit: cache small pages locally */
  301. else if (strcmp(str, "local") == 0) {
  302. ktext_small = 1;
  303. ktext_local = 1;
  304. pr_info("ktext: using small pages with local caching\n");
  305. }
  306. /* Neighborhood cache ktext pages on all cpus. */
  307. else if (strcmp(str, "all") == 0) {
  308. ktext_small = 1;
  309. ktext_all = 1;
  310. pr_info("ktext: using maximal caching neighborhood\n");
  311. }
  312. /* Neighborhood ktext pages on specified mask */
  313. else if (cpulist_parse(str, &ktext_mask) == 0) {
  314. if (cpumask_weight(&ktext_mask) > 1) {
  315. ktext_small = 1;
  316. pr_info("ktext: using caching neighborhood %*pbl with small pages\n",
  317. cpumask_pr_args(&ktext_mask));
  318. } else {
  319. pr_info("ktext: caching on cpu %*pbl with one huge page\n",
  320. cpumask_pr_args(&ktext_mask));
  321. }
  322. }
  323. else if (*str)
  324. return -EINVAL;
  325. return 0;
  326. }
  327. early_param("ktext", setup_ktext);
  328. static inline pgprot_t ktext_set_nocache(pgprot_t prot)
  329. {
  330. if (!ktext_nocache)
  331. prot = hv_pte_set_nc(prot);
  332. else
  333. prot = hv_pte_set_no_alloc_l2(prot);
  334. return prot;
  335. }
  336. /* Temporary page table we use for staging. */
  337. static pgd_t pgtables[PTRS_PER_PGD]
  338. __attribute__((aligned(HV_PAGE_TABLE_ALIGN)));
  339. /*
  340. * This maps the physical memory to kernel virtual address space, a total
  341. * of max_low_pfn pages, by creating page tables starting from address
  342. * PAGE_OFFSET.
  343. *
  344. * This routine transitions us from using a set of compiled-in large
  345. * pages to using some more precise caching, including removing access
  346. * to code pages mapped at PAGE_OFFSET (executed only at MEM_SV_START)
  347. * marking read-only data as locally cacheable, striping the remaining
  348. * .data and .bss across all the available tiles, and removing access
  349. * to pages above the top of RAM (thus ensuring a page fault from a bad
  350. * virtual address rather than a hypervisor shoot down for accessing
  351. * memory outside the assigned limits).
  352. */
  353. static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
  354. {
  355. unsigned long long irqmask;
  356. unsigned long address, pfn;
  357. pmd_t *pmd;
  358. pte_t *pte;
  359. int pte_ofs;
  360. const struct cpumask *my_cpu_mask = cpumask_of(smp_processor_id());
  361. struct cpumask kstripe_mask;
  362. int rc, i;
  363. if (ktext_arg_seen && ktext_hash) {
  364. pr_warn("warning: \"ktext\" boot argument ignored if \"kcache_hash\" sets up text hash-for-home\n");
  365. ktext_small = 0;
  366. }
  367. if (kdata_arg_seen && kdata_hash) {
  368. pr_warn("warning: \"kdata\" boot argument ignored if \"kcache_hash\" sets up data hash-for-home\n");
  369. }
  370. if (kdata_huge && !hash_default) {
  371. pr_warn("warning: disabling \"kdata=huge\"; requires kcache_hash=all or =allbutstack\n");
  372. kdata_huge = 0;
  373. }
  374. /*
  375. * Set up a mask for cpus to use for kernel striping.
  376. * This is normally all cpus, but minus dataplane cpus if any.
  377. * If the dataplane covers the whole chip, we stripe over
  378. * the whole chip too.
  379. */
  380. cpumask_copy(&kstripe_mask, cpu_possible_mask);
  381. if (!kdata_arg_seen)
  382. kdata_mask = kstripe_mask;
  383. /* Allocate and fill in L2 page tables */
  384. for (i = 0; i < MAX_NUMNODES; ++i) {
  385. #ifdef CONFIG_HIGHMEM
  386. unsigned long end_pfn = node_lowmem_end_pfn[i];
  387. #else
  388. unsigned long end_pfn = node_end_pfn[i];
  389. #endif
  390. unsigned long end_huge_pfn = 0;
  391. /* Pre-shatter the last huge page to allow per-cpu pages. */
  392. if (kdata_huge)
  393. end_huge_pfn = end_pfn - (HPAGE_SIZE >> PAGE_SHIFT);
  394. pfn = node_start_pfn[i];
  395. /* Allocate enough memory to hold L2 page tables for node. */
  396. init_prealloc_ptes(i, end_pfn - pfn);
  397. address = (unsigned long) pfn_to_kaddr(pfn);
  398. while (pfn < end_pfn) {
  399. BUG_ON(address & (HPAGE_SIZE-1));
  400. pmd = get_pmd(pgtables, address);
  401. pte = get_prealloc_pte(pfn);
  402. if (pfn < end_huge_pfn) {
  403. pgprot_t prot = init_pgprot(address);
  404. *(pte_t *)pmd = pte_mkhuge(pfn_pte(pfn, prot));
  405. for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
  406. pfn++, pte_ofs++, address += PAGE_SIZE)
  407. pte[pte_ofs] = pfn_pte(pfn, prot);
  408. } else {
  409. if (kdata_huge)
  410. printk(KERN_DEBUG "pre-shattered huge page at %#lx\n",
  411. address);
  412. for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
  413. pfn++, pte_ofs++, address += PAGE_SIZE) {
  414. pgprot_t prot = init_pgprot(address);
  415. pte[pte_ofs] = pfn_pte(pfn, prot);
  416. }
  417. assign_pte(pmd, pte);
  418. }
  419. }
  420. }
  421. /*
  422. * Set or check ktext_map now that we have cpu_possible_mask
  423. * and kstripe_mask to work with.
  424. */
  425. if (ktext_all)
  426. cpumask_copy(&ktext_mask, cpu_possible_mask);
  427. else if (ktext_nondataplane)
  428. ktext_mask = kstripe_mask;
  429. else if (!cpumask_empty(&ktext_mask)) {
  430. /* Sanity-check any mask that was requested */
  431. struct cpumask bad;
  432. cpumask_andnot(&bad, &ktext_mask, cpu_possible_mask);
  433. cpumask_and(&ktext_mask, &ktext_mask, cpu_possible_mask);
  434. if (!cpumask_empty(&bad))
  435. pr_info("ktext: not using unavailable cpus %*pbl\n",
  436. cpumask_pr_args(&bad));
  437. if (cpumask_empty(&ktext_mask)) {
  438. pr_warn("ktext: no valid cpus; caching on %d\n",
  439. smp_processor_id());
  440. cpumask_copy(&ktext_mask,
  441. cpumask_of(smp_processor_id()));
  442. }
  443. }
  444. address = MEM_SV_START;
  445. pmd = get_pmd(pgtables, address);
  446. pfn = 0; /* code starts at PA 0 */
  447. if (ktext_small) {
  448. /* Allocate an L2 PTE for the kernel text */
  449. int cpu = 0;
  450. pgprot_t prot = construct_pgprot(PAGE_KERNEL_EXEC,
  451. PAGE_HOME_IMMUTABLE);
  452. if (ktext_local) {
  453. if (ktext_nocache)
  454. prot = hv_pte_set_mode(prot,
  455. HV_PTE_MODE_UNCACHED);
  456. else
  457. prot = hv_pte_set_mode(prot,
  458. HV_PTE_MODE_CACHE_NO_L3);
  459. } else {
  460. prot = hv_pte_set_mode(prot,
  461. HV_PTE_MODE_CACHE_TILE_L3);
  462. cpu = cpumask_first(&ktext_mask);
  463. prot = ktext_set_nocache(prot);
  464. }
  465. BUG_ON(address != (unsigned long)_text);
  466. pte = NULL;
  467. for (; address < (unsigned long)_einittext;
  468. pfn++, address += PAGE_SIZE) {
  469. pte_ofs = pte_index(address);
  470. if (pte_ofs == 0) {
  471. if (pte)
  472. assign_pte(pmd++, pte);
  473. pte = alloc_pte();
  474. }
  475. if (!ktext_local) {
  476. prot = set_remote_cache_cpu(prot, cpu);
  477. cpu = cpumask_next(cpu, &ktext_mask);
  478. if (cpu == NR_CPUS)
  479. cpu = cpumask_first(&ktext_mask);
  480. }
  481. pte[pte_ofs] = pfn_pte(pfn, prot);
  482. }
  483. if (pte)
  484. assign_pte(pmd, pte);
  485. } else {
  486. pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC);
  487. pteval = pte_mkhuge(pteval);
  488. if (ktext_hash) {
  489. pteval = hv_pte_set_mode(pteval,
  490. HV_PTE_MODE_CACHE_HASH_L3);
  491. pteval = ktext_set_nocache(pteval);
  492. } else
  493. if (cpumask_weight(&ktext_mask) == 1) {
  494. pteval = set_remote_cache_cpu(pteval,
  495. cpumask_first(&ktext_mask));
  496. pteval = hv_pte_set_mode(pteval,
  497. HV_PTE_MODE_CACHE_TILE_L3);
  498. pteval = ktext_set_nocache(pteval);
  499. } else if (ktext_nocache)
  500. pteval = hv_pte_set_mode(pteval,
  501. HV_PTE_MODE_UNCACHED);
  502. else
  503. pteval = hv_pte_set_mode(pteval,
  504. HV_PTE_MODE_CACHE_NO_L3);
  505. for (; address < (unsigned long)_einittext;
  506. pfn += PFN_DOWN(HPAGE_SIZE), address += HPAGE_SIZE)
  507. *(pte_t *)(pmd++) = pfn_pte(pfn, pteval);
  508. }
  509. /* Set swapper_pgprot here so it is flushed to memory right away. */
  510. swapper_pgprot = init_pgprot((unsigned long)swapper_pg_dir);
  511. /*
  512. * Since we may be changing the caching of the stack and page
  513. * table itself, we invoke an assembly helper to do the
  514. * following steps:
  515. *
  516. * - flush the cache so we start with an empty slate
  517. * - install pgtables[] as the real page table
  518. * - flush the TLB so the new page table takes effect
  519. */
  520. irqmask = interrupt_mask_save_mask();
  521. interrupt_mask_set_mask(-1ULL);
  522. rc = flush_and_install_context(__pa(pgtables),
  523. init_pgprot((unsigned long)pgtables),
  524. __this_cpu_read(current_asid),
  525. cpumask_bits(my_cpu_mask));
  526. interrupt_mask_restore_mask(irqmask);
  527. BUG_ON(rc != 0);
  528. /* Copy the page table back to the normal swapper_pg_dir. */
  529. memcpy(pgd_base, pgtables, sizeof(pgtables));
  530. __install_page_table(pgd_base, __this_cpu_read(current_asid),
  531. swapper_pgprot);
  532. /*
  533. * We just read swapper_pgprot and thus brought it into the cache,
  534. * with its new home & caching mode. When we start the other CPUs,
  535. * they're going to reference swapper_pgprot via their initial fake
  536. * VA-is-PA mappings, which cache everything locally. At that
  537. * time, if it's in our cache with a conflicting home, the
  538. * simulator's coherence checker will complain. So, flush it out
  539. * of our cache; we're not going to ever use it again anyway.
  540. */
  541. __insn_finv(&swapper_pgprot);
  542. }
  543. /*
  544. * devmem_is_allowed() checks to see if /dev/mem access to a certain address
  545. * is valid. The argument is a physical page number.
  546. *
  547. * On Tile, the only valid things for which we can just hand out unchecked
  548. * PTEs are the kernel code and data. Anything else might change its
  549. * homing with time, and we wouldn't know to adjust the /dev/mem PTEs.
  550. * Note that init_thread_union is released to heap soon after boot,
  551. * so we include it in the init data.
  552. *
  553. * For TILE-Gx, we might want to consider allowing access to PA
  554. * regions corresponding to PCI space, etc.
  555. */
  556. int devmem_is_allowed(unsigned long pagenr)
  557. {
  558. return pagenr < kaddr_to_pfn(_end) &&
  559. !(pagenr >= kaddr_to_pfn(&init_thread_union) ||
  560. pagenr < kaddr_to_pfn(__init_end)) &&
  561. !(pagenr >= kaddr_to_pfn(_sinittext) ||
  562. pagenr <= kaddr_to_pfn(_einittext-1));
  563. }
  564. #ifdef CONFIG_HIGHMEM
  565. static void __init permanent_kmaps_init(pgd_t *pgd_base)
  566. {
  567. pgd_t *pgd;
  568. pud_t *pud;
  569. pmd_t *pmd;
  570. pte_t *pte;
  571. unsigned long vaddr;
  572. vaddr = PKMAP_BASE;
  573. page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
  574. pgd = swapper_pg_dir + pgd_index(vaddr);
  575. pud = pud_offset(pgd, vaddr);
  576. pmd = pmd_offset(pud, vaddr);
  577. pte = pte_offset_kernel(pmd, vaddr);
  578. pkmap_page_table = pte;
  579. }
  580. #endif /* CONFIG_HIGHMEM */
  581. #ifndef CONFIG_64BIT
  582. static void __init init_free_pfn_range(unsigned long start, unsigned long end)
  583. {
  584. unsigned long pfn;
  585. struct page *page = pfn_to_page(start);
  586. for (pfn = start; pfn < end; ) {
  587. /* Optimize by freeing pages in large batches */
  588. int order = __ffs(pfn);
  589. int count, i;
  590. struct page *p;
  591. if (order >= MAX_ORDER)
  592. order = MAX_ORDER-1;
  593. count = 1 << order;
  594. while (pfn + count > end) {
  595. count >>= 1;
  596. --order;
  597. }
  598. for (p = page, i = 0; i < count; ++i, ++p) {
  599. __ClearPageReserved(p);
  600. /*
  601. * Hacky direct set to avoid unnecessary
  602. * lock take/release for EVERY page here.
  603. */
  604. p->_count.counter = 0;
  605. p->_mapcount.counter = -1;
  606. }
  607. init_page_count(page);
  608. __free_pages(page, order);
  609. adjust_managed_page_count(page, count);
  610. page += count;
  611. pfn += count;
  612. }
  613. }
  614. static void __init set_non_bootmem_pages_init(void)
  615. {
  616. struct zone *z;
  617. for_each_zone(z) {
  618. unsigned long start, end;
  619. int nid = z->zone_pgdat->node_id;
  620. #ifdef CONFIG_HIGHMEM
  621. int idx = zone_idx(z);
  622. #endif
  623. start = z->zone_start_pfn;
  624. end = start + z->spanned_pages;
  625. start = max(start, node_free_pfn[nid]);
  626. start = max(start, max_low_pfn);
  627. #ifdef CONFIG_HIGHMEM
  628. if (idx == ZONE_HIGHMEM)
  629. totalhigh_pages += z->spanned_pages;
  630. #endif
  631. if (kdata_huge) {
  632. unsigned long percpu_pfn = node_percpu_pfn[nid];
  633. if (start < percpu_pfn && end > percpu_pfn)
  634. end = percpu_pfn;
  635. }
  636. #ifdef CONFIG_PCI
  637. if (start <= pci_reserve_start_pfn &&
  638. end > pci_reserve_start_pfn) {
  639. if (end > pci_reserve_end_pfn)
  640. init_free_pfn_range(pci_reserve_end_pfn, end);
  641. end = pci_reserve_start_pfn;
  642. }
  643. #endif
  644. init_free_pfn_range(start, end);
  645. }
  646. }
  647. #endif
  648. /*
  649. * paging_init() sets up the page tables - note that all of lowmem is
  650. * already mapped by head.S.
  651. */
  652. void __init paging_init(void)
  653. {
  654. #ifdef __tilegx__
  655. pud_t *pud;
  656. #endif
  657. pgd_t *pgd_base = swapper_pg_dir;
  658. kernel_physical_mapping_init(pgd_base);
  659. /* Fixed mappings, only the page table structure has to be created. */
  660. page_table_range_init(fix_to_virt(__end_of_fixed_addresses - 1),
  661. FIXADDR_TOP, pgd_base);
  662. #ifdef CONFIG_HIGHMEM
  663. permanent_kmaps_init(pgd_base);
  664. #endif
  665. #ifdef __tilegx__
  666. /*
  667. * Since GX allocates just one pmd_t array worth of vmalloc space,
  668. * we go ahead and allocate it statically here, then share it
  669. * globally. As a result we don't have to worry about any task
  670. * changing init_mm once we get up and running, and there's no
  671. * need for e.g. vmalloc_sync_all().
  672. */
  673. BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END - 1));
  674. pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START);
  675. assign_pmd(pud, alloc_pmd());
  676. #endif
  677. }
  678. /*
  679. * Walk the kernel page tables and derive the page_home() from
  680. * the PTEs, so that set_pte() can properly validate the caching
  681. * of all PTEs it sees.
  682. */
  683. void __init set_page_homes(void)
  684. {
  685. }
  686. static void __init set_max_mapnr_init(void)
  687. {
  688. #ifdef CONFIG_FLATMEM
  689. max_mapnr = max_low_pfn;
  690. #endif
  691. }
  692. void __init mem_init(void)
  693. {
  694. int i;
  695. #ifndef __tilegx__
  696. void *last;
  697. #endif
  698. #ifdef CONFIG_FLATMEM
  699. BUG_ON(!mem_map);
  700. #endif
  701. #ifdef CONFIG_HIGHMEM
  702. /* check that fixmap and pkmap do not overlap */
  703. if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) {
  704. pr_err("fixmap and kmap areas overlap - this will crash\n");
  705. pr_err("pkstart: %lxh pkend: %lxh fixstart %lxh\n",
  706. PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1), FIXADDR_START);
  707. BUG();
  708. }
  709. #endif
  710. set_max_mapnr_init();
  711. /* this will put all bootmem onto the freelists */
  712. free_all_bootmem();
  713. #ifndef CONFIG_64BIT
  714. /* count all remaining LOWMEM and give all HIGHMEM to page allocator */
  715. set_non_bootmem_pages_init();
  716. #endif
  717. mem_init_print_info(NULL);
  718. /*
  719. * In debug mode, dump some interesting memory mappings.
  720. */
  721. #ifdef CONFIG_HIGHMEM
  722. printk(KERN_DEBUG " KMAP %#lx - %#lx\n",
  723. FIXADDR_START, FIXADDR_TOP + PAGE_SIZE - 1);
  724. printk(KERN_DEBUG " PKMAP %#lx - %#lx\n",
  725. PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP) - 1);
  726. #endif
  727. printk(KERN_DEBUG " VMALLOC %#lx - %#lx\n",
  728. _VMALLOC_START, _VMALLOC_END - 1);
  729. #ifdef __tilegx__
  730. for (i = MAX_NUMNODES-1; i >= 0; --i) {
  731. struct pglist_data *node = &node_data[i];
  732. if (node->node_present_pages) {
  733. unsigned long start = (unsigned long)
  734. pfn_to_kaddr(node->node_start_pfn);
  735. unsigned long end = start +
  736. (node->node_present_pages << PAGE_SHIFT);
  737. printk(KERN_DEBUG " MEM%d %#lx - %#lx\n",
  738. i, start, end - 1);
  739. }
  740. }
  741. #else
  742. last = high_memory;
  743. for (i = MAX_NUMNODES-1; i >= 0; --i) {
  744. if ((unsigned long)vbase_map[i] != -1UL) {
  745. printk(KERN_DEBUG " LOWMEM%d %#lx - %#lx\n",
  746. i, (unsigned long) (vbase_map[i]),
  747. (unsigned long) (last-1));
  748. last = vbase_map[i];
  749. }
  750. }
  751. #endif
  752. #ifndef __tilegx__
  753. /*
  754. * Convert from using one lock for all atomic operations to
  755. * one per cpu.
  756. */
  757. __init_atomic_per_cpu();
  758. #endif
  759. }
  760. /*
  761. * this is for the non-NUMA, single node SMP system case.
  762. * Specifically, in the case of x86, we will always add
  763. * memory to the highmem for now.
  764. */
  765. #ifndef CONFIG_NEED_MULTIPLE_NODES
  766. int arch_add_memory(u64 start, u64 size)
  767. {
  768. struct pglist_data *pgdata = &contig_page_data;
  769. struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
  770. unsigned long start_pfn = start >> PAGE_SHIFT;
  771. unsigned long nr_pages = size >> PAGE_SHIFT;
  772. return __add_pages(zone, start_pfn, nr_pages);
  773. }
  774. int remove_memory(u64 start, u64 size)
  775. {
  776. return -EINVAL;
  777. }
  778. #ifdef CONFIG_MEMORY_HOTREMOVE
  779. int arch_remove_memory(u64 start, u64 size)
  780. {
  781. /* TODO */
  782. return -EBUSY;
  783. }
  784. #endif
  785. #endif
  786. struct kmem_cache *pgd_cache;
  787. void __init pgtable_cache_init(void)
  788. {
  789. pgd_cache = kmem_cache_create("pgd", SIZEOF_PGD, SIZEOF_PGD, 0, NULL);
  790. if (!pgd_cache)
  791. panic("pgtable_cache_init(): Cannot create pgd cache");
  792. }
  793. #ifdef CONFIG_DEBUG_PAGEALLOC
  794. static long __write_once initfree;
  795. #else
  796. static long __write_once initfree = 1;
  797. #endif
  798. /* Select whether to free (1) or mark unusable (0) the __init pages. */
  799. static int __init set_initfree(char *str)
  800. {
  801. long val;
  802. if (kstrtol(str, 0, &val) == 0) {
  803. initfree = val;
  804. pr_info("initfree: %s free init pages\n",
  805. initfree ? "will" : "won't");
  806. }
  807. return 1;
  808. }
  809. __setup("initfree=", set_initfree);
  810. static void free_init_pages(char *what, unsigned long begin, unsigned long end)
  811. {
  812. unsigned long addr = (unsigned long) begin;
  813. if (kdata_huge && !initfree) {
  814. pr_warn("Warning: ignoring initfree=0: incompatible with kdata=huge\n");
  815. initfree = 1;
  816. }
  817. end = (end + PAGE_SIZE - 1) & PAGE_MASK;
  818. local_flush_tlb_pages(NULL, begin, PAGE_SIZE, end - begin);
  819. for (addr = begin; addr < end; addr += PAGE_SIZE) {
  820. /*
  821. * Note we just reset the home here directly in the
  822. * page table. We know this is safe because our caller
  823. * just flushed the caches on all the other cpus,
  824. * and they won't be touching any of these pages.
  825. */
  826. int pfn = kaddr_to_pfn((void *)addr);
  827. struct page *page = pfn_to_page(pfn);
  828. pte_t *ptep = virt_to_kpte(addr);
  829. if (!initfree) {
  830. /*
  831. * If debugging page accesses then do not free
  832. * this memory but mark them not present - any
  833. * buggy init-section access will create a
  834. * kernel page fault:
  835. */
  836. pte_clear(&init_mm, addr, ptep);
  837. continue;
  838. }
  839. if (pte_huge(*ptep))
  840. BUG_ON(!kdata_huge);
  841. else
  842. set_pte_at(&init_mm, addr, ptep,
  843. pfn_pte(pfn, PAGE_KERNEL));
  844. memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
  845. free_reserved_page(page);
  846. }
  847. pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
  848. }
  849. void free_initmem(void)
  850. {
  851. const unsigned long text_delta = MEM_SV_START - PAGE_OFFSET;
  852. /*
  853. * Evict the cache on all cores to avoid incoherence.
  854. * We are guaranteed that no one will touch the init pages any more.
  855. */
  856. homecache_evict(&cpu_cacheable_map);
  857. /* Free the data pages that we won't use again after init. */
  858. free_init_pages("unused kernel data",
  859. (unsigned long)__init_begin,
  860. (unsigned long)__init_end);
  861. /*
  862. * Free the pages mapped from 0xc0000000 that correspond to code
  863. * pages from MEM_SV_START that we won't use again after init.
  864. */
  865. free_init_pages("unused kernel text",
  866. (unsigned long)_sinittext - text_delta,
  867. (unsigned long)_einittext - text_delta);
  868. /* Do a global TLB flush so everyone sees the changes. */
  869. flush_tlb_all();
  870. }