init.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994 - 2000 Ralf Baechle
  7. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8. * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  9. * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
  10. */
  11. #include <linux/bug.h>
  12. #include <linux/init.h>
  13. #include <linux/module.h>
  14. #include <linux/signal.h>
  15. #include <linux/sched.h>
  16. #include <linux/smp.h>
  17. #include <linux/kernel.h>
  18. #include <linux/errno.h>
  19. #include <linux/string.h>
  20. #include <linux/types.h>
  21. #include <linux/pagemap.h>
  22. #include <linux/ptrace.h>
  23. #include <linux/mman.h>
  24. #include <linux/mm.h>
  25. #include <linux/bootmem.h>
  26. #include <linux/highmem.h>
  27. #include <linux/swap.h>
  28. #include <linux/proc_fs.h>
  29. #include <linux/pfn.h>
  30. #include <linux/hardirq.h>
  31. #include <linux/gfp.h>
  32. #include <linux/kcore.h>
  33. #include <asm/asm-offsets.h>
  34. #include <asm/bootinfo.h>
  35. #include <asm/cachectl.h>
  36. #include <asm/cpu.h>
  37. #include <asm/dma.h>
  38. #include <asm/kmap_types.h>
  39. #include <asm/mmu_context.h>
  40. #include <asm/sections.h>
  41. #include <asm/pgtable.h>
  42. #include <asm/pgalloc.h>
  43. #include <asm/tlb.h>
  44. #include <asm/fixmap.h>
  45. /*
  46. * We have up to 8 empty zeroed pages so we can map one of the right colour
  47. * when needed. This is necessary only on R4000 / R4400 SC and MC versions
  48. * where we have to avoid VCED / VECI exceptions for good performance at
  49. * any price. Since page is never written to after the initialization we
  50. * don't have to care about aliases on other CPUs.
  51. */
  52. unsigned long empty_zero_page, zero_page_mask;
  53. EXPORT_SYMBOL_GPL(empty_zero_page);
  54. /*
  55. * Not static inline because used by IP27 special magic initialization code
  56. */
  57. void setup_zero_pages(void)
  58. {
  59. unsigned int order, i;
  60. struct page *page;
  61. if (cpu_has_vce)
  62. order = 3;
  63. else
  64. order = 0;
  65. empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
  66. if (!empty_zero_page)
  67. panic("Oh boy, that early out of memory?");
  68. page = virt_to_page((void *)empty_zero_page);
  69. split_page(page, order);
  70. for (i = 0; i < (1 << order); i++, page++)
  71. mark_page_reserved(page);
  72. zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
  73. }
  74. static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
  75. {
  76. enum fixed_addresses idx;
  77. unsigned long vaddr, flags, entrylo;
  78. unsigned long old_ctx;
  79. pte_t pte;
  80. int tlbidx;
  81. BUG_ON(Page_dcache_dirty(page));
  82. pagefault_disable();
  83. idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
  84. idx += in_interrupt() ? FIX_N_COLOURS : 0;
  85. vaddr = __fix_to_virt(FIX_CMAP_END - idx);
  86. pte = mk_pte(page, prot);
  87. #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
  88. entrylo = pte.pte_high;
  89. #else
  90. entrylo = pte_to_entrylo(pte_val(pte));
  91. #endif
  92. local_irq_save(flags);
  93. old_ctx = read_c0_entryhi();
  94. write_c0_entryhi(vaddr & (PAGE_MASK << 1));
  95. write_c0_entrylo0(entrylo);
  96. write_c0_entrylo1(entrylo);
  97. tlbidx = read_c0_wired();
  98. write_c0_wired(tlbidx + 1);
  99. write_c0_index(tlbidx);
  100. mtc0_tlbw_hazard();
  101. tlb_write_indexed();
  102. tlbw_use_hazard();
  103. write_c0_entryhi(old_ctx);
  104. local_irq_restore(flags);
  105. return (void*) vaddr;
  106. }
  107. void *kmap_coherent(struct page *page, unsigned long addr)
  108. {
  109. return __kmap_pgprot(page, addr, PAGE_KERNEL);
  110. }
  111. void *kmap_noncoherent(struct page *page, unsigned long addr)
  112. {
  113. return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
  114. }
  115. void kunmap_coherent(void)
  116. {
  117. unsigned int wired;
  118. unsigned long flags, old_ctx;
  119. local_irq_save(flags);
  120. old_ctx = read_c0_entryhi();
  121. wired = read_c0_wired() - 1;
  122. write_c0_wired(wired);
  123. write_c0_index(wired);
  124. write_c0_entryhi(UNIQUE_ENTRYHI(wired));
  125. write_c0_entrylo0(0);
  126. write_c0_entrylo1(0);
  127. mtc0_tlbw_hazard();
  128. tlb_write_indexed();
  129. tlbw_use_hazard();
  130. write_c0_entryhi(old_ctx);
  131. local_irq_restore(flags);
  132. pagefault_enable();
  133. }
  134. void copy_user_highpage(struct page *to, struct page *from,
  135. unsigned long vaddr, struct vm_area_struct *vma)
  136. {
  137. void *vfrom, *vto;
  138. vto = kmap_atomic(to);
  139. if (cpu_has_dc_aliases &&
  140. page_mapped(from) && !Page_dcache_dirty(from)) {
  141. vfrom = kmap_coherent(from, vaddr);
  142. copy_page(vto, vfrom);
  143. kunmap_coherent();
  144. } else {
  145. vfrom = kmap_atomic(from);
  146. copy_page(vto, vfrom);
  147. kunmap_atomic(vfrom);
  148. }
  149. if ((!cpu_has_ic_fills_f_dc) ||
  150. pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
  151. flush_data_cache_page((unsigned long)vto);
  152. kunmap_atomic(vto);
  153. /* Make sure this page is cleared on other CPU's too before using it */
  154. smp_wmb();
  155. }
  156. void copy_to_user_page(struct vm_area_struct *vma,
  157. struct page *page, unsigned long vaddr, void *dst, const void *src,
  158. unsigned long len)
  159. {
  160. if (cpu_has_dc_aliases &&
  161. page_mapped(page) && !Page_dcache_dirty(page)) {
  162. void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
  163. memcpy(vto, src, len);
  164. kunmap_coherent();
  165. } else {
  166. memcpy(dst, src, len);
  167. if (cpu_has_dc_aliases)
  168. SetPageDcacheDirty(page);
  169. }
  170. if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
  171. flush_cache_page(vma, vaddr, page_to_pfn(page));
  172. }
  173. void copy_from_user_page(struct vm_area_struct *vma,
  174. struct page *page, unsigned long vaddr, void *dst, const void *src,
  175. unsigned long len)
  176. {
  177. if (cpu_has_dc_aliases &&
  178. page_mapped(page) && !Page_dcache_dirty(page)) {
  179. void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
  180. memcpy(dst, vfrom, len);
  181. kunmap_coherent();
  182. } else {
  183. memcpy(dst, src, len);
  184. if (cpu_has_dc_aliases)
  185. SetPageDcacheDirty(page);
  186. }
  187. }
  188. EXPORT_SYMBOL_GPL(copy_from_user_page);
  189. void __init fixrange_init(unsigned long start, unsigned long end,
  190. pgd_t *pgd_base)
  191. {
  192. #ifdef CONFIG_HIGHMEM
  193. pgd_t *pgd;
  194. pud_t *pud;
  195. pmd_t *pmd;
  196. pte_t *pte;
  197. int i, j, k;
  198. unsigned long vaddr;
  199. vaddr = start;
  200. i = __pgd_offset(vaddr);
  201. j = __pud_offset(vaddr);
  202. k = __pmd_offset(vaddr);
  203. pgd = pgd_base + i;
  204. for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
  205. pud = (pud_t *)pgd;
  206. for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
  207. pmd = (pmd_t *)pud;
  208. for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
  209. if (pmd_none(*pmd)) {
  210. pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
  211. set_pmd(pmd, __pmd((unsigned long)pte));
  212. BUG_ON(pte != pte_offset_kernel(pmd, 0));
  213. }
  214. vaddr += PMD_SIZE;
  215. }
  216. k = 0;
  217. }
  218. j = 0;
  219. }
  220. #endif
  221. }
  222. #ifndef CONFIG_NEED_MULTIPLE_NODES
  223. int page_is_ram(unsigned long pagenr)
  224. {
  225. int i;
  226. for (i = 0; i < boot_mem_map.nr_map; i++) {
  227. unsigned long addr, end;
  228. switch (boot_mem_map.map[i].type) {
  229. case BOOT_MEM_RAM:
  230. case BOOT_MEM_INIT_RAM:
  231. break;
  232. default:
  233. /* not usable memory */
  234. continue;
  235. }
  236. addr = PFN_UP(boot_mem_map.map[i].addr);
  237. end = PFN_DOWN(boot_mem_map.map[i].addr +
  238. boot_mem_map.map[i].size);
  239. if (pagenr >= addr && pagenr < end)
  240. return 1;
  241. }
  242. return 0;
  243. }
  244. void __init paging_init(void)
  245. {
  246. unsigned long max_zone_pfns[MAX_NR_ZONES];
  247. unsigned long lastpfn __maybe_unused;
  248. pagetable_init();
  249. #ifdef CONFIG_HIGHMEM
  250. kmap_init();
  251. #endif
  252. #ifdef CONFIG_ZONE_DMA
  253. max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
  254. #endif
  255. #ifdef CONFIG_ZONE_DMA32
  256. max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
  257. #endif
  258. max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
  259. lastpfn = max_low_pfn;
  260. #ifdef CONFIG_HIGHMEM
  261. max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
  262. lastpfn = highend_pfn;
  263. if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
  264. printk(KERN_WARNING "This processor doesn't support highmem."
  265. " %ldk highmem ignored\n",
  266. (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
  267. max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
  268. lastpfn = max_low_pfn;
  269. }
  270. #endif
  271. free_area_init_nodes(max_zone_pfns);
  272. }
  273. #ifdef CONFIG_64BIT
  274. static struct kcore_list kcore_kseg0;
  275. #endif
  276. static inline void mem_init_free_highmem(void)
  277. {
  278. #ifdef CONFIG_HIGHMEM
  279. unsigned long tmp;
  280. for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
  281. struct page *page = pfn_to_page(tmp);
  282. if (!page_is_ram(tmp))
  283. SetPageReserved(page);
  284. else
  285. free_highmem_page(page);
  286. }
  287. #endif
  288. }
  289. void __init mem_init(void)
  290. {
  291. #ifdef CONFIG_HIGHMEM
  292. #ifdef CONFIG_DISCONTIGMEM
  293. #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
  294. #endif
  295. max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
  296. #else
  297. max_mapnr = max_low_pfn;
  298. #endif
  299. high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
  300. free_all_bootmem();
  301. setup_zero_pages(); /* Setup zeroed pages. */
  302. mem_init_free_highmem();
  303. mem_init_print_info(NULL);
  304. #ifdef CONFIG_64BIT
  305. if ((unsigned long) &_text > (unsigned long) CKSEG0)
  306. /* The -4 is a hack so that user tools don't have to handle
  307. the overflow. */
  308. kclist_add(&kcore_kseg0, (void *) CKSEG0,
  309. 0x80000000 - 4, KCORE_TEXT);
  310. #endif
  311. }
  312. #endif /* !CONFIG_NEED_MULTIPLE_NODES */
  313. void free_init_pages(const char *what, unsigned long begin, unsigned long end)
  314. {
  315. unsigned long pfn;
  316. for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
  317. struct page *page = pfn_to_page(pfn);
  318. void *addr = phys_to_virt(PFN_PHYS(pfn));
  319. memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
  320. free_reserved_page(page);
  321. }
  322. printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
  323. }
  324. #ifdef CONFIG_BLK_DEV_INITRD
  325. void free_initrd_mem(unsigned long start, unsigned long end)
  326. {
  327. free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
  328. "initrd");
  329. }
  330. #endif
  331. void (*free_init_pages_eva)(void *begin, void *end) = NULL;
  332. void __init_refok free_initmem(void)
  333. {
  334. prom_free_prom_memory();
  335. /*
  336. * Let the platform define a specific function to free the
  337. * init section since EVA may have used any possible mapping
  338. * between virtual and physical addresses.
  339. */
  340. if (free_init_pages_eva)
  341. free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
  342. else
  343. free_initmem_default(POISON_FREE_INITMEM);
  344. }
  345. #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
  346. unsigned long pgd_current[NR_CPUS];
  347. #endif
  348. /*
  349. * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
  350. * are constants. So we use the variants from asm-offset.h until that gcc
  351. * will officially be retired.
  352. *
  353. * Align swapper_pg_dir in to 64K, allows its address to be loaded
  354. * with a single LUI instruction in the TLB handlers. If we used
  355. * __aligned(64K), its size would get rounded up to the alignment
  356. * size, and waste space. So we place it in its own section and align
  357. * it in the linker script.
  358. */
  359. pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
  360. #ifndef __PAGETABLE_PMD_FOLDED
  361. pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
  362. #endif
  363. pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;