mmu.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645
  1. /*
  2. * Based on arch/arm/mm/mmu.c
  3. *
  4. * Copyright (C) 1995-2005 Russell King
  5. * Copyright (C) 2012 ARM Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/export.h>
  20. #include <linux/kernel.h>
  21. #include <linux/errno.h>
  22. #include <linux/init.h>
  23. #include <linux/mman.h>
  24. #include <linux/nodemask.h>
  25. #include <linux/memblock.h>
  26. #include <linux/fs.h>
  27. #include <linux/io.h>
  28. #include <linux/slab.h>
  29. #include <linux/stop_machine.h>
  30. #include <asm/cputype.h>
  31. #include <asm/fixmap.h>
  32. #include <asm/sections.h>
  33. #include <asm/setup.h>
  34. #include <asm/sizes.h>
  35. #include <asm/tlb.h>
  36. #include <asm/memblock.h>
  37. #include <asm/mmu_context.h>
  38. #include "mm.h"
  39. u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
  40. /*
  41. * Empty_zero_page is a special page that is used for zero-initialized data
  42. * and COW.
  43. */
  44. struct page *empty_zero_page;
  45. EXPORT_SYMBOL(empty_zero_page);
  46. pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  47. unsigned long size, pgprot_t vma_prot)
  48. {
  49. if (!pfn_valid(pfn))
  50. return pgprot_noncached(vma_prot);
  51. else if (file->f_flags & O_SYNC)
  52. return pgprot_writecombine(vma_prot);
  53. return vma_prot;
  54. }
  55. EXPORT_SYMBOL(phys_mem_access_prot);
  56. static void __init *early_alloc(unsigned long sz)
  57. {
  58. void *ptr = __va(memblock_alloc(sz, sz));
  59. BUG_ON(!ptr);
  60. memset(ptr, 0, sz);
  61. return ptr;
  62. }
  63. /*
  64. * remap a PMD into pages
  65. */
  66. static void split_pmd(pmd_t *pmd, pte_t *pte)
  67. {
  68. unsigned long pfn = pmd_pfn(*pmd);
  69. int i = 0;
  70. do {
  71. /*
  72. * Need to have the least restrictive permissions available
  73. * permissions will be fixed up later
  74. */
  75. set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
  76. pfn++;
  77. } while (pte++, i++, i < PTRS_PER_PTE);
  78. }
  79. static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
  80. unsigned long end, unsigned long pfn,
  81. pgprot_t prot,
  82. void *(*alloc)(unsigned long size))
  83. {
  84. pte_t *pte;
  85. if (pmd_none(*pmd) || pmd_sect(*pmd)) {
  86. pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
  87. if (pmd_sect(*pmd))
  88. split_pmd(pmd, pte);
  89. __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
  90. flush_tlb_all();
  91. }
  92. BUG_ON(pmd_bad(*pmd));
  93. pte = pte_offset_kernel(pmd, addr);
  94. do {
  95. set_pte(pte, pfn_pte(pfn, prot));
  96. pfn++;
  97. } while (pte++, addr += PAGE_SIZE, addr != end);
  98. }
  99. void split_pud(pud_t *old_pud, pmd_t *pmd)
  100. {
  101. unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
  102. pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
  103. int i = 0;
  104. do {
  105. set_pmd(pmd, __pmd(addr | prot));
  106. addr += PMD_SIZE;
  107. } while (pmd++, i++, i < PTRS_PER_PMD);
  108. }
  109. static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
  110. unsigned long addr, unsigned long end,
  111. phys_addr_t phys, pgprot_t prot,
  112. void *(*alloc)(unsigned long size))
  113. {
  114. pmd_t *pmd;
  115. unsigned long next;
  116. /*
  117. * Check for initial section mappings in the pgd/pud and remove them.
  118. */
  119. if (pud_none(*pud) || pud_sect(*pud)) {
  120. pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
  121. if (pud_sect(*pud)) {
  122. /*
  123. * need to have the 1G of mappings continue to be
  124. * present
  125. */
  126. split_pud(pud, pmd);
  127. }
  128. pud_populate(mm, pud, pmd);
  129. flush_tlb_all();
  130. }
  131. BUG_ON(pud_bad(*pud));
  132. pmd = pmd_offset(pud, addr);
  133. do {
  134. next = pmd_addr_end(addr, end);
  135. /* try section mapping first */
  136. if (((addr | next | phys) & ~SECTION_MASK) == 0) {
  137. pmd_t old_pmd =*pmd;
  138. set_pmd(pmd, __pmd(phys |
  139. pgprot_val(mk_sect_prot(prot))));
  140. /*
  141. * Check for previous table entries created during
  142. * boot (__create_page_tables) and flush them.
  143. */
  144. if (!pmd_none(old_pmd)) {
  145. flush_tlb_all();
  146. if (pmd_table(old_pmd)) {
  147. phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
  148. if (!WARN_ON_ONCE(slab_is_available()))
  149. memblock_free(table, PAGE_SIZE);
  150. }
  151. }
  152. } else {
  153. alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
  154. prot, alloc);
  155. }
  156. phys += next - addr;
  157. } while (pmd++, addr = next, addr != end);
  158. }
  159. static inline bool use_1G_block(unsigned long addr, unsigned long next,
  160. unsigned long phys)
  161. {
  162. if (PAGE_SHIFT != 12)
  163. return false;
  164. if (((addr | next | phys) & ~PUD_MASK) != 0)
  165. return false;
  166. return true;
  167. }
  168. static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
  169. unsigned long addr, unsigned long end,
  170. phys_addr_t phys, pgprot_t prot,
  171. void *(*alloc)(unsigned long size))
  172. {
  173. pud_t *pud;
  174. unsigned long next;
  175. if (pgd_none(*pgd)) {
  176. pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
  177. pgd_populate(mm, pgd, pud);
  178. }
  179. BUG_ON(pgd_bad(*pgd));
  180. pud = pud_offset(pgd, addr);
  181. do {
  182. next = pud_addr_end(addr, end);
  183. /*
  184. * For 4K granule only, attempt to put down a 1GB block
  185. */
  186. if (use_1G_block(addr, next, phys)) {
  187. pud_t old_pud = *pud;
  188. set_pud(pud, __pud(phys |
  189. pgprot_val(mk_sect_prot(prot))));
  190. /*
  191. * If we have an old value for a pud, it will
  192. * be pointing to a pmd table that we no longer
  193. * need (from swapper_pg_dir).
  194. *
  195. * Look up the old pmd table and free it.
  196. */
  197. if (!pud_none(old_pud)) {
  198. flush_tlb_all();
  199. if (pud_table(old_pud)) {
  200. phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
  201. if (!WARN_ON_ONCE(slab_is_available()))
  202. memblock_free(table, PAGE_SIZE);
  203. }
  204. }
  205. } else {
  206. alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc);
  207. }
  208. phys += next - addr;
  209. } while (pud++, addr = next, addr != end);
  210. }
  211. /*
  212. * Create the page directory entries and any necessary page tables for the
  213. * mapping specified by 'md'.
  214. */
  215. static void __create_mapping(struct mm_struct *mm, pgd_t *pgd,
  216. phys_addr_t phys, unsigned long virt,
  217. phys_addr_t size, pgprot_t prot,
  218. void *(*alloc)(unsigned long size))
  219. {
  220. unsigned long addr, length, end, next;
  221. addr = virt & PAGE_MASK;
  222. length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
  223. end = addr + length;
  224. do {
  225. next = pgd_addr_end(addr, end);
  226. alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc);
  227. phys += next - addr;
  228. } while (pgd++, addr = next, addr != end);
  229. }
  230. static void *late_alloc(unsigned long size)
  231. {
  232. void *ptr;
  233. BUG_ON(size > PAGE_SIZE);
  234. ptr = (void *)__get_free_page(PGALLOC_GFP);
  235. BUG_ON(!ptr);
  236. return ptr;
  237. }
  238. static void __ref create_mapping(phys_addr_t phys, unsigned long virt,
  239. phys_addr_t size, pgprot_t prot)
  240. {
  241. if (virt < VMALLOC_START) {
  242. pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
  243. &phys, virt);
  244. return;
  245. }
  246. __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
  247. size, prot, early_alloc);
  248. }
  249. void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
  250. unsigned long virt, phys_addr_t size,
  251. pgprot_t prot)
  252. {
  253. __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
  254. late_alloc);
  255. }
  256. static void create_mapping_late(phys_addr_t phys, unsigned long virt,
  257. phys_addr_t size, pgprot_t prot)
  258. {
  259. if (virt < VMALLOC_START) {
  260. pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
  261. &phys, virt);
  262. return;
  263. }
  264. return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK),
  265. phys, virt, size, prot, late_alloc);
  266. }
  267. #ifdef CONFIG_DEBUG_RODATA
  268. static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
  269. {
  270. /*
  271. * Set up the executable regions using the existing section mappings
  272. * for now. This will get more fine grained later once all memory
  273. * is mapped
  274. */
  275. unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
  276. unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
  277. if (end < kernel_x_start) {
  278. create_mapping(start, __phys_to_virt(start),
  279. end - start, PAGE_KERNEL);
  280. } else if (start >= kernel_x_end) {
  281. create_mapping(start, __phys_to_virt(start),
  282. end - start, PAGE_KERNEL);
  283. } else {
  284. if (start < kernel_x_start)
  285. create_mapping(start, __phys_to_virt(start),
  286. kernel_x_start - start,
  287. PAGE_KERNEL);
  288. create_mapping(kernel_x_start,
  289. __phys_to_virt(kernel_x_start),
  290. kernel_x_end - kernel_x_start,
  291. PAGE_KERNEL_EXEC);
  292. if (kernel_x_end < end)
  293. create_mapping(kernel_x_end,
  294. __phys_to_virt(kernel_x_end),
  295. end - kernel_x_end,
  296. PAGE_KERNEL);
  297. }
  298. }
  299. #else
  300. static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
  301. {
  302. create_mapping(start, __phys_to_virt(start), end - start,
  303. PAGE_KERNEL_EXEC);
  304. }
  305. #endif
  306. static void __init map_mem(void)
  307. {
  308. struct memblock_region *reg;
  309. phys_addr_t limit;
  310. /*
  311. * Temporarily limit the memblock range. We need to do this as
  312. * create_mapping requires puds, pmds and ptes to be allocated from
  313. * memory addressable from the initial direct kernel mapping.
  314. *
  315. * The initial direct kernel mapping, located at swapper_pg_dir, gives
  316. * us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from
  317. * PHYS_OFFSET (which must be aligned to 2MB as per
  318. * Documentation/arm64/booting.txt).
  319. */
  320. if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
  321. limit = PHYS_OFFSET + PMD_SIZE;
  322. else
  323. limit = PHYS_OFFSET + PUD_SIZE;
  324. memblock_set_current_limit(limit);
  325. /* map all the memory banks */
  326. for_each_memblock(memory, reg) {
  327. phys_addr_t start = reg->base;
  328. phys_addr_t end = start + reg->size;
  329. if (start >= end)
  330. break;
  331. #ifndef CONFIG_ARM64_64K_PAGES
  332. /*
  333. * For the first memory bank align the start address and
  334. * current memblock limit to prevent create_mapping() from
  335. * allocating pte page tables from unmapped memory.
  336. * When 64K pages are enabled, the pte page table for the
  337. * first PGDIR_SIZE is already present in swapper_pg_dir.
  338. */
  339. if (start < limit)
  340. start = ALIGN(start, PMD_SIZE);
  341. if (end < limit) {
  342. limit = end & PMD_MASK;
  343. memblock_set_current_limit(limit);
  344. }
  345. #endif
  346. __map_memblock(start, end);
  347. }
  348. /* Limit no longer required. */
  349. memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
  350. }
  351. void __init fixup_executable(void)
  352. {
  353. #ifdef CONFIG_DEBUG_RODATA
  354. /* now that we are actually fully mapped, make the start/end more fine grained */
  355. if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
  356. unsigned long aligned_start = round_down(__pa(_stext),
  357. SECTION_SIZE);
  358. create_mapping(aligned_start, __phys_to_virt(aligned_start),
  359. __pa(_stext) - aligned_start,
  360. PAGE_KERNEL);
  361. }
  362. if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) {
  363. unsigned long aligned_end = round_up(__pa(__init_end),
  364. SECTION_SIZE);
  365. create_mapping(__pa(__init_end), (unsigned long)__init_end,
  366. aligned_end - __pa(__init_end),
  367. PAGE_KERNEL);
  368. }
  369. #endif
  370. }
  371. #ifdef CONFIG_DEBUG_RODATA
  372. void mark_rodata_ro(void)
  373. {
  374. create_mapping_late(__pa(_stext), (unsigned long)_stext,
  375. (unsigned long)_etext - (unsigned long)_stext,
  376. PAGE_KERNEL_EXEC | PTE_RDONLY);
  377. }
  378. #endif
  379. void fixup_init(void)
  380. {
  381. create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
  382. (unsigned long)__init_end - (unsigned long)__init_begin,
  383. PAGE_KERNEL);
  384. }
  385. /*
  386. * paging_init() sets up the page tables, initialises the zone memory
  387. * maps and sets up the zero page.
  388. */
  389. void __init paging_init(void)
  390. {
  391. void *zero_page;
  392. map_mem();
  393. fixup_executable();
  394. /* allocate the zero page. */
  395. zero_page = early_alloc(PAGE_SIZE);
  396. bootmem_init();
  397. empty_zero_page = virt_to_page(zero_page);
  398. /*
  399. * TTBR0 is only used for the identity mapping at this stage. Make it
  400. * point to zero page to avoid speculatively fetching new entries.
  401. */
  402. cpu_set_reserved_ttbr0();
  403. flush_tlb_all();
  404. cpu_set_default_tcr_t0sz();
  405. }
  406. /*
  407. * Enable the identity mapping to allow the MMU disabling.
  408. */
  409. void setup_mm_for_reboot(void)
  410. {
  411. cpu_set_reserved_ttbr0();
  412. flush_tlb_all();
  413. cpu_set_idmap_tcr_t0sz();
  414. cpu_switch_mm(idmap_pg_dir, &init_mm);
  415. }
  416. /*
  417. * Check whether a kernel address is valid (derived from arch/x86/).
  418. */
  419. int kern_addr_valid(unsigned long addr)
  420. {
  421. pgd_t *pgd;
  422. pud_t *pud;
  423. pmd_t *pmd;
  424. pte_t *pte;
  425. if ((((long)addr) >> VA_BITS) != -1UL)
  426. return 0;
  427. pgd = pgd_offset_k(addr);
  428. if (pgd_none(*pgd))
  429. return 0;
  430. pud = pud_offset(pgd, addr);
  431. if (pud_none(*pud))
  432. return 0;
  433. if (pud_sect(*pud))
  434. return pfn_valid(pud_pfn(*pud));
  435. pmd = pmd_offset(pud, addr);
  436. if (pmd_none(*pmd))
  437. return 0;
  438. if (pmd_sect(*pmd))
  439. return pfn_valid(pmd_pfn(*pmd));
  440. pte = pte_offset_kernel(pmd, addr);
  441. if (pte_none(*pte))
  442. return 0;
  443. return pfn_valid(pte_pfn(*pte));
  444. }
  445. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  446. #ifdef CONFIG_ARM64_64K_PAGES
  447. int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
  448. {
  449. return vmemmap_populate_basepages(start, end, node);
  450. }
  451. #else /* !CONFIG_ARM64_64K_PAGES */
  452. int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
  453. {
  454. unsigned long addr = start;
  455. unsigned long next;
  456. pgd_t *pgd;
  457. pud_t *pud;
  458. pmd_t *pmd;
  459. do {
  460. next = pmd_addr_end(addr, end);
  461. pgd = vmemmap_pgd_populate(addr, node);
  462. if (!pgd)
  463. return -ENOMEM;
  464. pud = vmemmap_pud_populate(pgd, addr, node);
  465. if (!pud)
  466. return -ENOMEM;
  467. pmd = pmd_offset(pud, addr);
  468. if (pmd_none(*pmd)) {
  469. void *p = NULL;
  470. p = vmemmap_alloc_block_buf(PMD_SIZE, node);
  471. if (!p)
  472. return -ENOMEM;
  473. set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
  474. } else
  475. vmemmap_verify((pte_t *)pmd, node, addr, next);
  476. } while (addr = next, addr != end);
  477. return 0;
  478. }
  479. #endif /* CONFIG_ARM64_64K_PAGES */
  480. void vmemmap_free(unsigned long start, unsigned long end)
  481. {
  482. }
  483. #endif /* CONFIG_SPARSEMEM_VMEMMAP */
  484. static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
  485. #if CONFIG_PGTABLE_LEVELS > 2
  486. static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
  487. #endif
  488. #if CONFIG_PGTABLE_LEVELS > 3
  489. static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
  490. #endif
  491. static inline pud_t * fixmap_pud(unsigned long addr)
  492. {
  493. pgd_t *pgd = pgd_offset_k(addr);
  494. BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
  495. return pud_offset(pgd, addr);
  496. }
  497. static inline pmd_t * fixmap_pmd(unsigned long addr)
  498. {
  499. pud_t *pud = fixmap_pud(addr);
  500. BUG_ON(pud_none(*pud) || pud_bad(*pud));
  501. return pmd_offset(pud, addr);
  502. }
  503. static inline pte_t * fixmap_pte(unsigned long addr)
  504. {
  505. pmd_t *pmd = fixmap_pmd(addr);
  506. BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
  507. return pte_offset_kernel(pmd, addr);
  508. }
  509. void __init early_fixmap_init(void)
  510. {
  511. pgd_t *pgd;
  512. pud_t *pud;
  513. pmd_t *pmd;
  514. unsigned long addr = FIXADDR_START;
  515. pgd = pgd_offset_k(addr);
  516. pgd_populate(&init_mm, pgd, bm_pud);
  517. pud = pud_offset(pgd, addr);
  518. pud_populate(&init_mm, pud, bm_pmd);
  519. pmd = pmd_offset(pud, addr);
  520. pmd_populate_kernel(&init_mm, pmd, bm_pte);
  521. /*
  522. * The boot-ioremap range spans multiple pmds, for which
  523. * we are not preparted:
  524. */
  525. BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
  526. != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
  527. if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
  528. || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
  529. WARN_ON(1);
  530. pr_warn("pmd %p != %p, %p\n",
  531. pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
  532. fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
  533. pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
  534. fix_to_virt(FIX_BTMAP_BEGIN));
  535. pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
  536. fix_to_virt(FIX_BTMAP_END));
  537. pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
  538. pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
  539. }
  540. }
  541. void __set_fixmap(enum fixed_addresses idx,
  542. phys_addr_t phys, pgprot_t flags)
  543. {
  544. unsigned long addr = __fix_to_virt(idx);
  545. pte_t *pte;
  546. BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
  547. pte = fixmap_pte(addr);
  548. if (pgprot_val(flags)) {
  549. set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
  550. } else {
  551. pte_clear(&init_mm, addr, pte);
  552. flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
  553. }
  554. }