mem.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239
  1. /*
  2. * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3. * Licensed under the GPL
  4. */
  5. #include <linux/stddef.h>
  6. #include <linux/module.h>
  7. #include <linux/bootmem.h>
  8. #include <linux/highmem.h>
  9. #include <linux/mm.h>
  10. #include <linux/swap.h>
  11. #include <linux/slab.h>
  12. #include <asm/fixmap.h>
  13. #include <asm/page.h>
  14. #include <as-layout.h>
  15. #include <init.h>
  16. #include <kern.h>
  17. #include <kern_util.h>
  18. #include <mem_user.h>
  19. #include <os.h>
  20. /* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
  21. unsigned long *empty_zero_page = NULL;
  22. EXPORT_SYMBOL(empty_zero_page);
  23. /*
  24. * Initialized during boot, and readonly for initializing page tables
  25. * afterwards
  26. */
  27. pgd_t swapper_pg_dir[PTRS_PER_PGD];
  28. /* Initialized at boot time, and readonly after that */
  29. unsigned long long highmem;
  30. int kmalloc_ok = 0;
  31. /* Used during early boot */
  32. static unsigned long brk_end;
  33. void __init mem_init(void)
  34. {
  35. /* clear the zero-page */
  36. memset(empty_zero_page, 0, PAGE_SIZE);
  37. /* Map in the area just after the brk now that kmalloc is about
  38. * to be turned on.
  39. */
  40. brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
  41. map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
  42. free_bootmem(__pa(brk_end), uml_reserved - brk_end);
  43. uml_reserved = brk_end;
  44. /* this will put all low memory onto the freelists */
  45. free_all_bootmem();
  46. max_low_pfn = totalram_pages;
  47. max_pfn = totalram_pages;
  48. mem_init_print_info(NULL);
  49. kmalloc_ok = 1;
  50. }
  51. /*
  52. * Create a page table and place a pointer to it in a middle page
  53. * directory entry.
  54. */
  55. static void __init one_page_table_init(pmd_t *pmd)
  56. {
  57. if (pmd_none(*pmd)) {
  58. pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
  59. PAGE_SIZE);
  60. set_pmd(pmd, __pmd(_KERNPG_TABLE +
  61. (unsigned long) __pa(pte)));
  62. if (pte != pte_offset_kernel(pmd, 0))
  63. BUG();
  64. }
  65. }
  66. static void __init one_md_table_init(pud_t *pud)
  67. {
  68. #ifdef CONFIG_3_LEVEL_PGTABLES
  69. pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
  70. set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
  71. if (pmd_table != pmd_offset(pud, 0))
  72. BUG();
  73. #endif
  74. }
  75. static void __init fixrange_init(unsigned long start, unsigned long end,
  76. pgd_t *pgd_base)
  77. {
  78. pgd_t *pgd;
  79. pud_t *pud;
  80. pmd_t *pmd;
  81. int i, j;
  82. unsigned long vaddr;
  83. vaddr = start;
  84. i = pgd_index(vaddr);
  85. j = pmd_index(vaddr);
  86. pgd = pgd_base + i;
  87. for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
  88. pud = pud_offset(pgd, vaddr);
  89. if (pud_none(*pud))
  90. one_md_table_init(pud);
  91. pmd = pmd_offset(pud, vaddr);
  92. for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
  93. one_page_table_init(pmd);
  94. vaddr += PMD_SIZE;
  95. }
  96. j = 0;
  97. }
  98. }
  99. static void __init fixaddr_user_init( void)
  100. {
  101. #ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
  102. long size = FIXADDR_USER_END - FIXADDR_USER_START;
  103. pgd_t *pgd;
  104. pud_t *pud;
  105. pmd_t *pmd;
  106. pte_t *pte;
  107. phys_t p;
  108. unsigned long v, vaddr = FIXADDR_USER_START;
  109. if (!size)
  110. return;
  111. fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
  112. v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
  113. memcpy((void *) v , (void *) FIXADDR_USER_START, size);
  114. p = __pa(v);
  115. for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
  116. p += PAGE_SIZE) {
  117. pgd = swapper_pg_dir + pgd_index(vaddr);
  118. pud = pud_offset(pgd, vaddr);
  119. pmd = pmd_offset(pud, vaddr);
  120. pte = pte_offset_kernel(pmd, vaddr);
  121. pte_set_val(*pte, p, PAGE_READONLY);
  122. }
  123. #endif
  124. }
  125. void __init paging_init(void)
  126. {
  127. unsigned long zones_size[MAX_NR_ZONES], vaddr;
  128. int i;
  129. empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
  130. PAGE_SIZE);
  131. for (i = 0; i < ARRAY_SIZE(zones_size); i++)
  132. zones_size[i] = 0;
  133. zones_size[ZONE_NORMAL] = (end_iomem >> PAGE_SHIFT) -
  134. (uml_physmem >> PAGE_SHIFT);
  135. free_area_init(zones_size);
  136. /*
  137. * Fixed mappings, only the page table structure has to be
  138. * created - mappings will be set by set_fixmap():
  139. */
  140. vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
  141. fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
  142. fixaddr_user_init();
  143. }
  144. /*
  145. * This can't do anything because nothing in the kernel image can be freed
  146. * since it's not in kernel physical memory.
  147. */
  148. void free_initmem(void)
  149. {
  150. }
  151. #ifdef CONFIG_BLK_DEV_INITRD
  152. void free_initrd_mem(unsigned long start, unsigned long end)
  153. {
  154. free_reserved_area((void *)start, (void *)end, -1, "initrd");
  155. }
  156. #endif
  157. /* Allocate and free page tables. */
  158. pgd_t *pgd_alloc(struct mm_struct *mm)
  159. {
  160. pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
  161. if (pgd) {
  162. memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
  163. memcpy(pgd + USER_PTRS_PER_PGD,
  164. swapper_pg_dir + USER_PTRS_PER_PGD,
  165. (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
  166. }
  167. return pgd;
  168. }
  169. void pgd_free(struct mm_struct *mm, pgd_t *pgd)
  170. {
  171. free_page((unsigned long) pgd);
  172. }
  173. pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
  174. {
  175. pte_t *pte;
  176. pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
  177. return pte;
  178. }
  179. pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
  180. {
  181. struct page *pte;
  182. pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
  183. if (!pte)
  184. return NULL;
  185. if (!pgtable_page_ctor(pte)) {
  186. __free_page(pte);
  187. return NULL;
  188. }
  189. return pte;
  190. }
  191. #ifdef CONFIG_3_LEVEL_PGTABLES
  192. pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
  193. {
  194. pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
  195. if (pmd)
  196. memset(pmd, 0, PAGE_SIZE);
  197. return pmd;
  198. }
  199. #endif
  200. void *uml_kmalloc(int size, int flags)
  201. {
  202. return kmalloc(size, flags);
  203. }