123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185 |
- /*
- * Based on arch/arm/mm/ioremap.c
- *
- * (C) Copyright 1995 1996 Linus Torvalds
- * Hacked for ARM by Phil Blundell <philb@gnu.org>
- * Hacked to allow all architectures to build, and various cleanups
- * by Russell King
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
- #include <linux/export.h>
- #include <linux/mm.h>
- #include <linux/vmalloc.h>
- #include <linux/io.h>
- #include <asm/fixmap.h>
- #include <asm/tlbflush.h>
- #include <asm/pgalloc.h>
- static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
- pgprot_t prot, void *caller)
- {
- unsigned long last_addr;
- unsigned long offset = phys_addr & ~PAGE_MASK;
- int err;
- unsigned long addr;
- struct vm_struct *area;
- /*
- * Page align the mapping address and size, taking account of any
- * offset.
- */
- phys_addr &= PAGE_MASK;
- size = PAGE_ALIGN(size + offset);
- /*
- * Don't allow wraparound, zero size or outside PHYS_MASK.
- */
- last_addr = phys_addr + size - 1;
- if (!size || last_addr < phys_addr || (last_addr & ~PHYS_MASK))
- return NULL;
- /*
- * Don't allow RAM to be mapped.
- */
- if (WARN_ON(pfn_valid(__phys_to_pfn(phys_addr))))
- return NULL;
- area = get_vm_area_caller(size, VM_IOREMAP, caller);
- if (!area)
- return NULL;
- addr = (unsigned long)area->addr;
- err = ioremap_page_range(addr, addr + size, phys_addr, prot);
- if (err) {
- vunmap((void *)addr);
- return NULL;
- }
- return (void __iomem *)(offset + addr);
- }
- void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot)
- {
- return __ioremap_caller(phys_addr, size, prot,
- __builtin_return_address(0));
- }
- EXPORT_SYMBOL(__ioremap);
- void __iounmap(volatile void __iomem *io_addr)
- {
- unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
- /*
- * We could get an address outside vmalloc range in case
- * of ioremap_cache() reusing a RAM mapping.
- */
- if (VMALLOC_START <= addr && addr < VMALLOC_END)
- vunmap((void *)addr);
- }
- EXPORT_SYMBOL(__iounmap);
- void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
- {
- /* For normal memory we already have a cacheable mapping. */
- if (pfn_valid(__phys_to_pfn(phys_addr)))
- return (void __iomem *)__phys_to_virt(phys_addr);
- return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
- __builtin_return_address(0));
- }
- EXPORT_SYMBOL(ioremap_cache);
- #ifndef CONFIG_ARM64_64K_PAGES
- static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
- #endif
- static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
- {
- pgd_t *pgd;
- pud_t *pud;
- pgd = pgd_offset_k(addr);
- BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
- pud = pud_offset(pgd, addr);
- BUG_ON(pud_none(*pud) || pud_bad(*pud));
- return pmd_offset(pud, addr);
- }
- static inline pte_t * __init early_ioremap_pte(unsigned long addr)
- {
- pmd_t *pmd = early_ioremap_pmd(addr);
- BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
- return pte_offset_kernel(pmd, addr);
- }
- void __init early_ioremap_init(void)
- {
- pmd_t *pmd;
- pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
- #ifndef CONFIG_ARM64_64K_PAGES
- /* need to populate pmd for 4k pagesize only */
- pmd_populate_kernel(&init_mm, pmd, bm_pte);
- #endif
- /*
- * The boot-ioremap range spans multiple pmds, for which
- * we are not prepared:
- */
- BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
- != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
- if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
- WARN_ON(1);
- pr_warn("pmd %p != %p\n",
- pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
- pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
- fix_to_virt(FIX_BTMAP_BEGIN));
- pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
- fix_to_virt(FIX_BTMAP_END));
- pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
- pr_warn("FIX_BTMAP_BEGIN: %d\n",
- FIX_BTMAP_BEGIN);
- }
- early_ioremap_setup();
- }
- void __init __early_set_fixmap(enum fixed_addresses idx,
- phys_addr_t phys, pgprot_t flags)
- {
- unsigned long addr = __fix_to_virt(idx);
- pte_t *pte;
- if (idx >= __end_of_fixed_addresses) {
- BUG();
- return;
- }
- pte = early_ioremap_pte(addr);
- if (pgprot_val(flags))
- set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
- else {
- pte_clear(&init_mm, addr, pte);
- flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
- }
- }
|