Explorar el Código

x86/efi: Access EFI data as encrypted when SEV is active

EFI data is encrypted when the kernel is run under SEV. Update the
page table references to be sure the EFI memory areas are accessed
encrypted.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
Tested-by: Borislav Petkov <bp@suse.de>
Cc: linux-efi@vger.kernel.org
Cc: kvm@vger.kernel.org
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Matt Fleming <matt@codeblueprint.co.uk>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@kernel.org>
Link: https://lkml.kernel.org/r/20171020143059.3291-8-brijesh.singh@amd.com
Tom Lendacky hace 7 años
padre
commit
1379edd596
Se han modificado 1 ficheros con 15 adiciones y 1 borrados
  1. 15 1
      arch/x86/platform/efi/efi_64.c

+ 15 - 1
arch/x86/platform/efi/efi_64.c

@@ -33,6 +33,7 @@
 #include <linux/reboot.h>
 #include <linux/reboot.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/ucs2_string.h>
 #include <linux/ucs2_string.h>
+#include <linux/mem_encrypt.h>
 
 
 #include <asm/setup.h>
 #include <asm/setup.h>
 #include <asm/page.h>
 #include <asm/page.h>
@@ -370,7 +371,11 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 	 * as trim_bios_range() will reserve the first page and isolate it away
 	 * as trim_bios_range() will reserve the first page and isolate it away
 	 * from memory allocators anyway.
 	 * from memory allocators anyway.
 	 */
 	 */
-	if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, _PAGE_RW)) {
+	pf = _PAGE_RW;
+	if (sev_active())
+		pf |= _PAGE_ENC;
+
+	if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, pf)) {
 		pr_err("Failed to create 1:1 mapping for the first page!\n");
 		pr_err("Failed to create 1:1 mapping for the first page!\n");
 		return 1;
 		return 1;
 	}
 	}
@@ -413,6 +418,9 @@ static void __init __map_region(efi_memory_desc_t *md, u64 va)
 	if (!(md->attribute & EFI_MEMORY_WB))
 	if (!(md->attribute & EFI_MEMORY_WB))
 		flags |= _PAGE_PCD;
 		flags |= _PAGE_PCD;
 
 
+	if (sev_active())
+		flags |= _PAGE_ENC;
+
 	pfn = md->phys_addr >> PAGE_SHIFT;
 	pfn = md->phys_addr >> PAGE_SHIFT;
 	if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
 	if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
 		pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
 		pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
@@ -539,6 +547,9 @@ static int __init efi_update_mem_attr(struct mm_struct *mm, efi_memory_desc_t *m
 	if (!(md->attribute & EFI_MEMORY_RO))
 	if (!(md->attribute & EFI_MEMORY_RO))
 		pf |= _PAGE_RW;
 		pf |= _PAGE_RW;
 
 
+	if (sev_active())
+		pf |= _PAGE_ENC;
+
 	return efi_update_mappings(md, pf);
 	return efi_update_mappings(md, pf);
 }
 }
 
 
@@ -590,6 +601,9 @@ void __init efi_runtime_update_mappings(void)
 			(md->type != EFI_RUNTIME_SERVICES_CODE))
 			(md->type != EFI_RUNTIME_SERVICES_CODE))
 			pf |= _PAGE_RW;
 			pf |= _PAGE_RW;
 
 
+		if (sev_active())
+			pf |= _PAGE_ENC;
+
 		efi_update_mappings(md, pf);
 		efi_update_mappings(md, pf);
 	}
 	}
 }
 }