|
@@ -11,25 +11,31 @@
|
|
|
*
|
|
|
*/
|
|
|
|
|
|
+#include <linux/atomic.h>
|
|
|
#include <linux/dmi.h>
|
|
|
#include <linux/efi.h>
|
|
|
#include <linux/export.h>
|
|
|
#include <linux/memblock.h>
|
|
|
+#include <linux/mm_types.h>
|
|
|
#include <linux/bootmem.h>
|
|
|
#include <linux/of.h>
|
|
|
#include <linux/of_fdt.h>
|
|
|
+#include <linux/preempt.h>
|
|
|
+#include <linux/rbtree.h>
|
|
|
+#include <linux/rwsem.h>
|
|
|
#include <linux/sched.h>
|
|
|
#include <linux/slab.h>
|
|
|
+#include <linux/spinlock.h>
|
|
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
#include <asm/efi.h>
|
|
|
#include <asm/tlbflush.h>
|
|
|
#include <asm/mmu_context.h>
|
|
|
+#include <asm/mmu.h>
|
|
|
+#include <asm/pgtable.h>
|
|
|
|
|
|
struct efi_memory_map memmap;
|
|
|
|
|
|
-static efi_runtime_services_t *runtime;
|
|
|
-
|
|
|
static u64 efi_system_table;
|
|
|
|
|
|
static int uefi_debug __initdata;
|
|
@@ -69,9 +75,33 @@ static void __init efi_setup_idmap(void)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Translate a EFI virtual address into a physical address: this is necessary,
|
|
|
+ * as some data members of the EFI system table are virtually remapped after
|
|
|
+ * SetVirtualAddressMap() has been called.
|
|
|
+ */
|
|
|
+static phys_addr_t efi_to_phys(unsigned long addr)
|
|
|
+{
|
|
|
+ efi_memory_desc_t *md;
|
|
|
+
|
|
|
+ for_each_efi_memory_desc(&memmap, md) {
|
|
|
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
|
|
|
+ continue;
|
|
|
+ if (md->virt_addr == 0)
|
|
|
+ /* no virtual mapping has been installed by the stub */
|
|
|
+ break;
|
|
|
+ if (md->virt_addr <= addr &&
|
|
|
+ (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT))
|
|
|
+ return md->phys_addr + addr - md->virt_addr;
|
|
|
+ }
|
|
|
+ return addr;
|
|
|
+}
|
|
|
+
|
|
|
static int __init uefi_init(void)
|
|
|
{
|
|
|
efi_char16_t *c16;
|
|
|
+ void *config_tables;
|
|
|
+ u64 table_size;
|
|
|
char vendor[100] = "unknown";
|
|
|
int i, retval;
|
|
|
|
|
@@ -99,7 +129,7 @@ static int __init uefi_init(void)
|
|
|
efi.systab->hdr.revision & 0xffff);
|
|
|
|
|
|
/* Show what we know for posterity */
|
|
|
- c16 = early_memremap(efi.systab->fw_vendor,
|
|
|
+ c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
|
|
|
sizeof(vendor));
|
|
|
if (c16) {
|
|
|
for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
|
|
@@ -112,8 +142,14 @@ static int __init uefi_init(void)
|
|
|
efi.systab->hdr.revision >> 16,
|
|
|
efi.systab->hdr.revision & 0xffff, vendor);
|
|
|
|
|
|
- retval = efi_config_init(NULL);
|
|
|
+ table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables;
|
|
|
+ config_tables = early_memremap(efi_to_phys(efi.systab->tables),
|
|
|
+ table_size);
|
|
|
+
|
|
|
+ retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
|
|
|
+ sizeof(efi_config_table_64_t), NULL);
|
|
|
|
|
|
+ early_memunmap(config_tables, table_size);
|
|
|
out:
|
|
|
early_memunmap(efi.systab, sizeof(efi_system_table_t));
|
|
|
return retval;
|
|
@@ -329,51 +365,14 @@ void __init efi_idmap_init(void)
|
|
|
early_memunmap(memmap.map, memmap.map_end - memmap.map);
|
|
|
}
|
|
|
|
|
|
-static int __init remap_region(efi_memory_desc_t *md, void **new)
|
|
|
-{
|
|
|
- u64 paddr, vaddr, npages, size;
|
|
|
-
|
|
|
- paddr = md->phys_addr;
|
|
|
- npages = md->num_pages;
|
|
|
- memrange_efi_to_native(&paddr, &npages);
|
|
|
- size = npages << PAGE_SHIFT;
|
|
|
-
|
|
|
- if (is_normal_ram(md))
|
|
|
- vaddr = (__force u64)ioremap_cache(paddr, size);
|
|
|
- else
|
|
|
- vaddr = (__force u64)ioremap(paddr, size);
|
|
|
-
|
|
|
- if (!vaddr) {
|
|
|
- pr_err("Unable to remap 0x%llx pages @ %p\n",
|
|
|
- npages, (void *)paddr);
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
- /* adjust for any rounding when EFI and system pagesize differs */
|
|
|
- md->virt_addr = vaddr + (md->phys_addr - paddr);
|
|
|
-
|
|
|
- if (uefi_debug)
|
|
|
- pr_info(" EFI remap 0x%012llx => %p\n",
|
|
|
- md->phys_addr, (void *)md->virt_addr);
|
|
|
-
|
|
|
- memcpy(*new, md, memmap.desc_size);
|
|
|
- *new += memmap.desc_size;
|
|
|
-
|
|
|
- return 1;
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
- * Switch UEFI from an identity map to a kernel virtual map
|
|
|
+ * Enable the UEFI Runtime Services if all prerequisites are in place, i.e.,
|
|
|
+ * non-early mapping of the UEFI system table and virtual mappings for all
|
|
|
+ * EFI_MEMORY_RUNTIME regions.
|
|
|
*/
|
|
|
-static int __init arm64_enter_virtual_mode(void)
|
|
|
+static int __init arm64_enable_runtime_services(void)
|
|
|
{
|
|
|
- efi_memory_desc_t *md;
|
|
|
- phys_addr_t virtmap_phys;
|
|
|
- void *virtmap, *virt_md;
|
|
|
- efi_status_t status;
|
|
|
u64 mapsize;
|
|
|
- int count = 0;
|
|
|
- unsigned long flags;
|
|
|
|
|
|
if (!efi_enabled(EFI_BOOT)) {
|
|
|
pr_info("EFI services will not be available.\n");
|
|
@@ -395,81 +394,30 @@ static int __init arm64_enter_virtual_mode(void)
|
|
|
|
|
|
efi.memmap = &memmap;
|
|
|
|
|
|
- /* Map the runtime regions */
|
|
|
- virtmap = kmalloc(mapsize, GFP_KERNEL);
|
|
|
- if (!virtmap) {
|
|
|
- pr_err("Failed to allocate EFI virtual memmap\n");
|
|
|
- return -1;
|
|
|
- }
|
|
|
- virtmap_phys = virt_to_phys(virtmap);
|
|
|
- virt_md = virtmap;
|
|
|
-
|
|
|
- for_each_efi_memory_desc(&memmap, md) {
|
|
|
- if (!(md->attribute & EFI_MEMORY_RUNTIME))
|
|
|
- continue;
|
|
|
- if (!remap_region(md, &virt_md))
|
|
|
- goto err_unmap;
|
|
|
- ++count;
|
|
|
- }
|
|
|
-
|
|
|
- efi.systab = (__force void *)efi_lookup_mapped_addr(efi_system_table);
|
|
|
+ efi.systab = (__force void *)ioremap_cache(efi_system_table,
|
|
|
+ sizeof(efi_system_table_t));
|
|
|
if (!efi.systab) {
|
|
|
- /*
|
|
|
- * If we have no virtual mapping for the System Table at this
|
|
|
- * point, the memory map doesn't cover the physical offset where
|
|
|
- * it resides. This means the System Table will be inaccessible
|
|
|
- * to Runtime Services themselves once the virtual mapping is
|
|
|
- * installed.
|
|
|
- */
|
|
|
- pr_err("Failed to remap EFI System Table -- buggy firmware?\n");
|
|
|
- goto err_unmap;
|
|
|
+ pr_err("Failed to remap EFI System Table\n");
|
|
|
+ return -1;
|
|
|
}
|
|
|
set_bit(EFI_SYSTEM_TABLES, &efi.flags);
|
|
|
|
|
|
- local_irq_save(flags);
|
|
|
- cpu_switch_mm(idmap_pg_dir, &init_mm);
|
|
|
-
|
|
|
- /* Call SetVirtualAddressMap with the physical address of the map */
|
|
|
- runtime = efi.systab->runtime;
|
|
|
- efi.set_virtual_address_map = runtime->set_virtual_address_map;
|
|
|
-
|
|
|
- status = efi.set_virtual_address_map(count * memmap.desc_size,
|
|
|
- memmap.desc_size,
|
|
|
- memmap.desc_version,
|
|
|
- (efi_memory_desc_t *)virtmap_phys);
|
|
|
- cpu_set_reserved_ttbr0();
|
|
|
- flush_tlb_all();
|
|
|
- local_irq_restore(flags);
|
|
|
-
|
|
|
- kfree(virtmap);
|
|
|
-
|
|
|
free_boot_services();
|
|
|
|
|
|
- if (status != EFI_SUCCESS) {
|
|
|
- pr_err("Failed to set EFI virtual address map! [%lx]\n",
|
|
|
- status);
|
|
|
+ if (!efi_enabled(EFI_VIRTMAP)) {
|
|
|
+ pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n");
|
|
|
return -1;
|
|
|
}
|
|
|
|
|
|
/* Set up runtime services function pointers */
|
|
|
- runtime = efi.systab->runtime;
|
|
|
efi_native_runtime_setup();
|
|
|
set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
|
|
|
|
|
|
efi.runtime_version = efi.systab->hdr.revision;
|
|
|
|
|
|
return 0;
|
|
|
-
|
|
|
-err_unmap:
|
|
|
- /* unmap all mappings that succeeded: there are 'count' of those */
|
|
|
- for (virt_md = virtmap; count--; virt_md += memmap.desc_size) {
|
|
|
- md = virt_md;
|
|
|
- iounmap((__force void __iomem *)md->virt_addr);
|
|
|
- }
|
|
|
- kfree(virtmap);
|
|
|
- return -1;
|
|
|
}
|
|
|
-early_initcall(arm64_enter_virtual_mode);
|
|
|
+early_initcall(arm64_enable_runtime_services);
|
|
|
|
|
|
static int __init arm64_dmi_init(void)
|
|
|
{
|
|
@@ -484,3 +432,79 @@ static int __init arm64_dmi_init(void)
|
|
|
return 0;
|
|
|
}
|
|
|
core_initcall(arm64_dmi_init);
|
|
|
+
|
|
|
+static pgd_t efi_pgd[PTRS_PER_PGD] __page_aligned_bss;
|
|
|
+
|
|
|
+static struct mm_struct efi_mm = {
|
|
|
+ .mm_rb = RB_ROOT,
|
|
|
+ .pgd = efi_pgd,
|
|
|
+ .mm_users = ATOMIC_INIT(2),
|
|
|
+ .mm_count = ATOMIC_INIT(1),
|
|
|
+ .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
|
|
|
+ .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
|
|
|
+ .mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
|
|
|
+ INIT_MM_CONTEXT(efi_mm)
|
|
|
+};
|
|
|
+
|
|
|
+static void efi_set_pgd(struct mm_struct *mm)
|
|
|
+{
|
|
|
+ cpu_switch_mm(mm->pgd, mm);
|
|
|
+ flush_tlb_all();
|
|
|
+ if (icache_is_aivivt())
|
|
|
+ __flush_icache_all();
|
|
|
+}
|
|
|
+
|
|
|
+void efi_virtmap_load(void)
|
|
|
+{
|
|
|
+ preempt_disable();
|
|
|
+ efi_set_pgd(&efi_mm);
|
|
|
+}
|
|
|
+
|
|
|
+void efi_virtmap_unload(void)
|
|
|
+{
|
|
|
+ efi_set_pgd(current->active_mm);
|
|
|
+ preempt_enable();
|
|
|
+}
|
|
|
+
|
|
|
+void __init efi_virtmap_init(void)
|
|
|
+{
|
|
|
+ efi_memory_desc_t *md;
|
|
|
+
|
|
|
+ if (!efi_enabled(EFI_BOOT))
|
|
|
+ return;
|
|
|
+
|
|
|
+ for_each_efi_memory_desc(&memmap, md) {
|
|
|
+ u64 paddr, npages, size;
|
|
|
+ pgprot_t prot;
|
|
|
+
|
|
|
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
|
|
|
+ continue;
|
|
|
+ if (WARN(md->virt_addr == 0,
|
|
|
+ "UEFI virtual mapping incomplete or missing -- no entry found for 0x%llx\n",
|
|
|
+ md->phys_addr))
|
|
|
+ return;
|
|
|
+
|
|
|
+ paddr = md->phys_addr;
|
|
|
+ npages = md->num_pages;
|
|
|
+ memrange_efi_to_native(&paddr, &npages);
|
|
|
+ size = npages << PAGE_SHIFT;
|
|
|
+
|
|
|
+ pr_info(" EFI remap 0x%016llx => %p\n",
|
|
|
+ md->phys_addr, (void *)md->virt_addr);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
|
|
|
+ * executable, everything else can be mapped with the XN bits
|
|
|
+ * set.
|
|
|
+ */
|
|
|
+ if (!is_normal_ram(md))
|
|
|
+ prot = __pgprot(PROT_DEVICE_nGnRE);
|
|
|
+ else if (md->type == EFI_RUNTIME_SERVICES_CODE)
|
|
|
+ prot = PAGE_KERNEL_EXEC;
|
|
|
+ else
|
|
|
+ prot = PAGE_KERNEL;
|
|
|
+
|
|
|
+ create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot);
|
|
|
+ }
|
|
|
+ set_bit(EFI_VIRTMAP, &efi.flags);
|
|
|
+}
|