mmu.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745
  1. /*
  2. * Based on arch/arm/mm/mmu.c
  3. *
  4. * Copyright (C) 1995-2005 Russell King
  5. * Copyright (C) 2012 ARM Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/export.h>
  20. #include <linux/kernel.h>
  21. #include <linux/errno.h>
  22. #include <linux/init.h>
  23. #include <linux/libfdt.h>
  24. #include <linux/mman.h>
  25. #include <linux/nodemask.h>
  26. #include <linux/memblock.h>
  27. #include <linux/fs.h>
  28. #include <linux/io.h>
  29. #include <linux/slab.h>
  30. #include <linux/stop_machine.h>
  31. #include <asm/cputype.h>
  32. #include <asm/fixmap.h>
  33. #include <asm/kernel-pgtable.h>
  34. #include <asm/sections.h>
  35. #include <asm/setup.h>
  36. #include <asm/sizes.h>
  37. #include <asm/tlb.h>
  38. #include <asm/memblock.h>
  39. #include <asm/mmu_context.h>
  40. #include "mm.h"
  41. u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
  42. /*
  43. * Empty_zero_page is a special page that is used for zero-initialized data
  44. * and COW.
  45. */
  46. struct page *empty_zero_page;
  47. EXPORT_SYMBOL(empty_zero_page);
  48. pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  49. unsigned long size, pgprot_t vma_prot)
  50. {
  51. if (!pfn_valid(pfn))
  52. return pgprot_noncached(vma_prot);
  53. else if (file->f_flags & O_SYNC)
  54. return pgprot_writecombine(vma_prot);
  55. return vma_prot;
  56. }
  57. EXPORT_SYMBOL(phys_mem_access_prot);
  58. static void __init *early_alloc(unsigned long sz)
  59. {
  60. void *ptr = __va(memblock_alloc(sz, sz));
  61. BUG_ON(!ptr);
  62. memset(ptr, 0, sz);
  63. return ptr;
  64. }
  65. /*
  66. * remap a PMD into pages
  67. */
  68. static void split_pmd(pmd_t *pmd, pte_t *pte)
  69. {
  70. unsigned long pfn = pmd_pfn(*pmd);
  71. int i = 0;
  72. do {
  73. /*
  74. * Need to have the least restrictive permissions available
  75. * permissions will be fixed up later. Default the new page
  76. * range as contiguous ptes.
  77. */
  78. set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC_CONT));
  79. pfn++;
  80. } while (pte++, i++, i < PTRS_PER_PTE);
  81. }
  82. /*
  83. * Given a PTE with the CONT bit set, determine where the CONT range
  84. * starts, and clear the entire range of PTE CONT bits.
  85. */
  86. static void clear_cont_pte_range(pte_t *pte, unsigned long addr)
  87. {
  88. int i;
  89. pte -= CONT_RANGE_OFFSET(addr);
  90. for (i = 0; i < CONT_PTES; i++) {
  91. set_pte(pte, pte_mknoncont(*pte));
  92. pte++;
  93. }
  94. flush_tlb_all();
  95. }
  96. /*
  97. * Given a range of PTEs set the pfn and provided page protection flags
  98. */
  99. static void __populate_init_pte(pte_t *pte, unsigned long addr,
  100. unsigned long end, phys_addr_t phys,
  101. pgprot_t prot)
  102. {
  103. unsigned long pfn = __phys_to_pfn(phys);
  104. do {
  105. /* clear all the bits except the pfn, then apply the prot */
  106. set_pte(pte, pfn_pte(pfn, prot));
  107. pte++;
  108. pfn++;
  109. addr += PAGE_SIZE;
  110. } while (addr != end);
  111. }
  112. static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
  113. unsigned long end, phys_addr_t phys,
  114. pgprot_t prot,
  115. void *(*alloc)(unsigned long size))
  116. {
  117. pte_t *pte;
  118. unsigned long next;
  119. if (pmd_none(*pmd) || pmd_sect(*pmd)) {
  120. pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
  121. if (pmd_sect(*pmd))
  122. split_pmd(pmd, pte);
  123. __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
  124. flush_tlb_all();
  125. }
  126. BUG_ON(pmd_bad(*pmd));
  127. pte = pte_offset_kernel(pmd, addr);
  128. do {
  129. next = min(end, (addr + CONT_SIZE) & CONT_MASK);
  130. if (((addr | next | phys) & ~CONT_MASK) == 0) {
  131. /* a block of CONT_PTES */
  132. __populate_init_pte(pte, addr, next, phys,
  133. prot | __pgprot(PTE_CONT));
  134. } else {
  135. /*
  136. * If the range being split is already inside of a
  137. * contiguous range but this PTE isn't going to be
  138. * contiguous, then we want to unmark the adjacent
  139. * ranges, then update the portion of the range we
  140. * are interrested in.
  141. */
  142. clear_cont_pte_range(pte, addr);
  143. __populate_init_pte(pte, addr, next, phys, prot);
  144. }
  145. pte += (next - addr) >> PAGE_SHIFT;
  146. phys += next - addr;
  147. addr = next;
  148. } while (addr != end);
  149. }
  150. void split_pud(pud_t *old_pud, pmd_t *pmd)
  151. {
  152. unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
  153. pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
  154. int i = 0;
  155. do {
  156. set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
  157. addr += PMD_SIZE;
  158. } while (pmd++, i++, i < PTRS_PER_PMD);
  159. }
  160. static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
  161. unsigned long addr, unsigned long end,
  162. phys_addr_t phys, pgprot_t prot,
  163. void *(*alloc)(unsigned long size))
  164. {
  165. pmd_t *pmd;
  166. unsigned long next;
  167. /*
  168. * Check for initial section mappings in the pgd/pud and remove them.
  169. */
  170. if (pud_none(*pud) || pud_sect(*pud)) {
  171. pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
  172. if (pud_sect(*pud)) {
  173. /*
  174. * need to have the 1G of mappings continue to be
  175. * present
  176. */
  177. split_pud(pud, pmd);
  178. }
  179. pud_populate(mm, pud, pmd);
  180. flush_tlb_all();
  181. }
  182. BUG_ON(pud_bad(*pud));
  183. pmd = pmd_offset(pud, addr);
  184. do {
  185. next = pmd_addr_end(addr, end);
  186. /* try section mapping first */
  187. if (((addr | next | phys) & ~SECTION_MASK) == 0) {
  188. pmd_t old_pmd =*pmd;
  189. set_pmd(pmd, __pmd(phys |
  190. pgprot_val(mk_sect_prot(prot))));
  191. /*
  192. * Check for previous table entries created during
  193. * boot (__create_page_tables) and flush them.
  194. */
  195. if (!pmd_none(old_pmd)) {
  196. flush_tlb_all();
  197. if (pmd_table(old_pmd)) {
  198. phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
  199. if (!WARN_ON_ONCE(slab_is_available()))
  200. memblock_free(table, PAGE_SIZE);
  201. }
  202. }
  203. } else {
  204. alloc_init_pte(pmd, addr, next, phys, prot, alloc);
  205. }
  206. phys += next - addr;
  207. } while (pmd++, addr = next, addr != end);
  208. }
  209. static inline bool use_1G_block(unsigned long addr, unsigned long next,
  210. unsigned long phys)
  211. {
  212. if (PAGE_SHIFT != 12)
  213. return false;
  214. if (((addr | next | phys) & ~PUD_MASK) != 0)
  215. return false;
  216. return true;
  217. }
  218. static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
  219. unsigned long addr, unsigned long end,
  220. phys_addr_t phys, pgprot_t prot,
  221. void *(*alloc)(unsigned long size))
  222. {
  223. pud_t *pud;
  224. unsigned long next;
  225. if (pgd_none(*pgd)) {
  226. pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
  227. pgd_populate(mm, pgd, pud);
  228. }
  229. BUG_ON(pgd_bad(*pgd));
  230. pud = pud_offset(pgd, addr);
  231. do {
  232. next = pud_addr_end(addr, end);
  233. /*
  234. * For 4K granule only, attempt to put down a 1GB block
  235. */
  236. if (use_1G_block(addr, next, phys)) {
  237. pud_t old_pud = *pud;
  238. set_pud(pud, __pud(phys |
  239. pgprot_val(mk_sect_prot(prot))));
  240. /*
  241. * If we have an old value for a pud, it will
  242. * be pointing to a pmd table that we no longer
  243. * need (from swapper_pg_dir).
  244. *
  245. * Look up the old pmd table and free it.
  246. */
  247. if (!pud_none(old_pud)) {
  248. flush_tlb_all();
  249. if (pud_table(old_pud)) {
  250. phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
  251. if (!WARN_ON_ONCE(slab_is_available()))
  252. memblock_free(table, PAGE_SIZE);
  253. }
  254. }
  255. } else {
  256. alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc);
  257. }
  258. phys += next - addr;
  259. } while (pud++, addr = next, addr != end);
  260. }
  261. /*
  262. * Create the page directory entries and any necessary page tables for the
  263. * mapping specified by 'md'.
  264. */
  265. static void __create_mapping(struct mm_struct *mm, pgd_t *pgd,
  266. phys_addr_t phys, unsigned long virt,
  267. phys_addr_t size, pgprot_t prot,
  268. void *(*alloc)(unsigned long size))
  269. {
  270. unsigned long addr, length, end, next;
  271. addr = virt & PAGE_MASK;
  272. length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
  273. end = addr + length;
  274. do {
  275. next = pgd_addr_end(addr, end);
  276. alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc);
  277. phys += next - addr;
  278. } while (pgd++, addr = next, addr != end);
  279. }
  280. static void *late_alloc(unsigned long size)
  281. {
  282. void *ptr;
  283. BUG_ON(size > PAGE_SIZE);
  284. ptr = (void *)__get_free_page(PGALLOC_GFP);
  285. BUG_ON(!ptr);
  286. return ptr;
  287. }
  288. static void __init create_mapping(phys_addr_t phys, unsigned long virt,
  289. phys_addr_t size, pgprot_t prot)
  290. {
  291. if (virt < VMALLOC_START) {
  292. pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
  293. &phys, virt);
  294. return;
  295. }
  296. __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
  297. size, prot, early_alloc);
  298. }
  299. void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
  300. unsigned long virt, phys_addr_t size,
  301. pgprot_t prot)
  302. {
  303. __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
  304. late_alloc);
  305. }
  306. static void create_mapping_late(phys_addr_t phys, unsigned long virt,
  307. phys_addr_t size, pgprot_t prot)
  308. {
  309. if (virt < VMALLOC_START) {
  310. pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
  311. &phys, virt);
  312. return;
  313. }
  314. return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK),
  315. phys, virt, size, prot, late_alloc);
  316. }
  317. #ifdef CONFIG_DEBUG_RODATA
  318. static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
  319. {
  320. /*
  321. * Set up the executable regions using the existing section mappings
  322. * for now. This will get more fine grained later once all memory
  323. * is mapped
  324. */
  325. unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
  326. unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
  327. if (end < kernel_x_start) {
  328. create_mapping(start, __phys_to_virt(start),
  329. end - start, PAGE_KERNEL);
  330. } else if (start >= kernel_x_end) {
  331. create_mapping(start, __phys_to_virt(start),
  332. end - start, PAGE_KERNEL);
  333. } else {
  334. if (start < kernel_x_start)
  335. create_mapping(start, __phys_to_virt(start),
  336. kernel_x_start - start,
  337. PAGE_KERNEL);
  338. create_mapping(kernel_x_start,
  339. __phys_to_virt(kernel_x_start),
  340. kernel_x_end - kernel_x_start,
  341. PAGE_KERNEL_EXEC);
  342. if (kernel_x_end < end)
  343. create_mapping(kernel_x_end,
  344. __phys_to_virt(kernel_x_end),
  345. end - kernel_x_end,
  346. PAGE_KERNEL);
  347. }
  348. }
  349. #else
  350. static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
  351. {
  352. create_mapping(start, __phys_to_virt(start), end - start,
  353. PAGE_KERNEL_EXEC);
  354. }
  355. #endif
  356. static void __init map_mem(void)
  357. {
  358. struct memblock_region *reg;
  359. phys_addr_t limit;
  360. /*
  361. * Temporarily limit the memblock range. We need to do this as
  362. * create_mapping requires puds, pmds and ptes to be allocated from
  363. * memory addressable from the initial direct kernel mapping.
  364. *
  365. * The initial direct kernel mapping, located at swapper_pg_dir, gives
  366. * us PUD_SIZE (with SECTION maps) or PMD_SIZE (without SECTION maps,
  367. * memory starting from PHYS_OFFSET (which must be aligned to 2MB as
  368. * per Documentation/arm64/booting.txt).
  369. */
  370. limit = PHYS_OFFSET + SWAPPER_INIT_MAP_SIZE;
  371. memblock_set_current_limit(limit);
  372. /* map all the memory banks */
  373. for_each_memblock(memory, reg) {
  374. phys_addr_t start = reg->base;
  375. phys_addr_t end = start + reg->size;
  376. if (start >= end)
  377. break;
  378. if (ARM64_SWAPPER_USES_SECTION_MAPS) {
  379. /*
  380. * For the first memory bank align the start address and
  381. * current memblock limit to prevent create_mapping() from
  382. * allocating pte page tables from unmapped memory. With
  383. * the section maps, if the first block doesn't end on section
  384. * size boundary, create_mapping() will try to allocate a pte
  385. * page, which may be returned from an unmapped area.
  386. * When section maps are not used, the pte page table for the
  387. * current limit is already present in swapper_pg_dir.
  388. */
  389. if (start < limit)
  390. start = ALIGN(start, SECTION_SIZE);
  391. if (end < limit) {
  392. limit = end & SECTION_MASK;
  393. memblock_set_current_limit(limit);
  394. }
  395. }
  396. __map_memblock(start, end);
  397. }
  398. /* Limit no longer required. */
  399. memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
  400. }
  401. void __init fixup_executable(void)
  402. {
  403. #ifdef CONFIG_DEBUG_RODATA
  404. /* now that we are actually fully mapped, make the start/end more fine grained */
  405. if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
  406. unsigned long aligned_start = round_down(__pa(_stext),
  407. SECTION_SIZE);
  408. create_mapping(aligned_start, __phys_to_virt(aligned_start),
  409. __pa(_stext) - aligned_start,
  410. PAGE_KERNEL);
  411. }
  412. if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) {
  413. unsigned long aligned_end = round_up(__pa(__init_end),
  414. SECTION_SIZE);
  415. create_mapping(__pa(__init_end), (unsigned long)__init_end,
  416. aligned_end - __pa(__init_end),
  417. PAGE_KERNEL);
  418. }
  419. #endif
  420. }
  421. #ifdef CONFIG_DEBUG_RODATA
  422. void mark_rodata_ro(void)
  423. {
  424. create_mapping_late(__pa(_stext), (unsigned long)_stext,
  425. (unsigned long)_etext - (unsigned long)_stext,
  426. PAGE_KERNEL_EXEC | PTE_RDONLY);
  427. }
  428. #endif
  429. void fixup_init(void)
  430. {
  431. create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
  432. (unsigned long)__init_end - (unsigned long)__init_begin,
  433. PAGE_KERNEL);
  434. }
  435. /*
  436. * paging_init() sets up the page tables, initialises the zone memory
  437. * maps and sets up the zero page.
  438. */
  439. void __init paging_init(void)
  440. {
  441. void *zero_page;
  442. map_mem();
  443. fixup_executable();
  444. /* allocate the zero page. */
  445. zero_page = early_alloc(PAGE_SIZE);
  446. bootmem_init();
  447. empty_zero_page = virt_to_page(zero_page);
  448. /*
  449. * TTBR0 is only used for the identity mapping at this stage. Make it
  450. * point to zero page to avoid speculatively fetching new entries.
  451. */
  452. cpu_set_reserved_ttbr0();
  453. local_flush_tlb_all();
  454. cpu_set_default_tcr_t0sz();
  455. }
  456. /*
  457. * Check whether a kernel address is valid (derived from arch/x86/).
  458. */
  459. int kern_addr_valid(unsigned long addr)
  460. {
  461. pgd_t *pgd;
  462. pud_t *pud;
  463. pmd_t *pmd;
  464. pte_t *pte;
  465. if ((((long)addr) >> VA_BITS) != -1UL)
  466. return 0;
  467. pgd = pgd_offset_k(addr);
  468. if (pgd_none(*pgd))
  469. return 0;
  470. pud = pud_offset(pgd, addr);
  471. if (pud_none(*pud))
  472. return 0;
  473. if (pud_sect(*pud))
  474. return pfn_valid(pud_pfn(*pud));
  475. pmd = pmd_offset(pud, addr);
  476. if (pmd_none(*pmd))
  477. return 0;
  478. if (pmd_sect(*pmd))
  479. return pfn_valid(pmd_pfn(*pmd));
  480. pte = pte_offset_kernel(pmd, addr);
  481. if (pte_none(*pte))
  482. return 0;
  483. return pfn_valid(pte_pfn(*pte));
  484. }
  485. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  486. #if !ARM64_SWAPPER_USES_SECTION_MAPS
  487. int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
  488. {
  489. return vmemmap_populate_basepages(start, end, node);
  490. }
  491. #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
  492. int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
  493. {
  494. unsigned long addr = start;
  495. unsigned long next;
  496. pgd_t *pgd;
  497. pud_t *pud;
  498. pmd_t *pmd;
  499. do {
  500. next = pmd_addr_end(addr, end);
  501. pgd = vmemmap_pgd_populate(addr, node);
  502. if (!pgd)
  503. return -ENOMEM;
  504. pud = vmemmap_pud_populate(pgd, addr, node);
  505. if (!pud)
  506. return -ENOMEM;
  507. pmd = pmd_offset(pud, addr);
  508. if (pmd_none(*pmd)) {
  509. void *p = NULL;
  510. p = vmemmap_alloc_block_buf(PMD_SIZE, node);
  511. if (!p)
  512. return -ENOMEM;
  513. set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
  514. } else
  515. vmemmap_verify((pte_t *)pmd, node, addr, next);
  516. } while (addr = next, addr != end);
  517. return 0;
  518. }
  519. #endif /* CONFIG_ARM64_64K_PAGES */
  520. void vmemmap_free(unsigned long start, unsigned long end)
  521. {
  522. }
  523. #endif /* CONFIG_SPARSEMEM_VMEMMAP */
  524. static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
  525. #if CONFIG_PGTABLE_LEVELS > 2
  526. static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
  527. #endif
  528. #if CONFIG_PGTABLE_LEVELS > 3
  529. static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
  530. #endif
  531. static inline pud_t * fixmap_pud(unsigned long addr)
  532. {
  533. pgd_t *pgd = pgd_offset_k(addr);
  534. BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
  535. return pud_offset(pgd, addr);
  536. }
  537. static inline pmd_t * fixmap_pmd(unsigned long addr)
  538. {
  539. pud_t *pud = fixmap_pud(addr);
  540. BUG_ON(pud_none(*pud) || pud_bad(*pud));
  541. return pmd_offset(pud, addr);
  542. }
  543. static inline pte_t * fixmap_pte(unsigned long addr)
  544. {
  545. pmd_t *pmd = fixmap_pmd(addr);
  546. BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
  547. return pte_offset_kernel(pmd, addr);
  548. }
  549. void __init early_fixmap_init(void)
  550. {
  551. pgd_t *pgd;
  552. pud_t *pud;
  553. pmd_t *pmd;
  554. unsigned long addr = FIXADDR_START;
  555. pgd = pgd_offset_k(addr);
  556. pgd_populate(&init_mm, pgd, bm_pud);
  557. pud = pud_offset(pgd, addr);
  558. pud_populate(&init_mm, pud, bm_pmd);
  559. pmd = pmd_offset(pud, addr);
  560. pmd_populate_kernel(&init_mm, pmd, bm_pte);
  561. /*
  562. * The boot-ioremap range spans multiple pmds, for which
  563. * we are not preparted:
  564. */
  565. BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
  566. != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
  567. if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
  568. || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
  569. WARN_ON(1);
  570. pr_warn("pmd %p != %p, %p\n",
  571. pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
  572. fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
  573. pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
  574. fix_to_virt(FIX_BTMAP_BEGIN));
  575. pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
  576. fix_to_virt(FIX_BTMAP_END));
  577. pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
  578. pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
  579. }
  580. }
  581. void __set_fixmap(enum fixed_addresses idx,
  582. phys_addr_t phys, pgprot_t flags)
  583. {
  584. unsigned long addr = __fix_to_virt(idx);
  585. pte_t *pte;
  586. BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
  587. pte = fixmap_pte(addr);
  588. if (pgprot_val(flags)) {
  589. set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
  590. } else {
  591. pte_clear(&init_mm, addr, pte);
  592. flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
  593. }
  594. }
  595. void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
  596. {
  597. const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
  598. pgprot_t prot = PAGE_KERNEL | PTE_RDONLY;
  599. int size, offset;
  600. void *dt_virt;
  601. /*
  602. * Check whether the physical FDT address is set and meets the minimum
  603. * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
  604. * at least 8 bytes so that we can always access the size field of the
  605. * FDT header after mapping the first chunk, double check here if that
  606. * is indeed the case.
  607. */
  608. BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
  609. if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
  610. return NULL;
  611. /*
  612. * Make sure that the FDT region can be mapped without the need to
  613. * allocate additional translation table pages, so that it is safe
  614. * to call create_mapping() this early.
  615. *
  616. * On 64k pages, the FDT will be mapped using PTEs, so we need to
  617. * be in the same PMD as the rest of the fixmap.
  618. * On 4k pages, we'll use section mappings for the FDT so we only
  619. * have to be in the same PUD.
  620. */
  621. BUILD_BUG_ON(dt_virt_base % SZ_2M);
  622. BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
  623. __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
  624. offset = dt_phys % SWAPPER_BLOCK_SIZE;
  625. dt_virt = (void *)dt_virt_base + offset;
  626. /* map the first chunk so we can read the size from the header */
  627. create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
  628. SWAPPER_BLOCK_SIZE, prot);
  629. if (fdt_check_header(dt_virt) != 0)
  630. return NULL;
  631. size = fdt_totalsize(dt_virt);
  632. if (size > MAX_FDT_SIZE)
  633. return NULL;
  634. if (offset + size > SWAPPER_BLOCK_SIZE)
  635. create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
  636. round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
  637. memblock_reserve(dt_phys, size);
  638. return dt_virt;
  639. }