|
@@ -27,6 +27,11 @@
|
|
|
|
|
|
#include "physaddr.h"
|
|
|
|
|
|
+struct ioremap_mem_flags {
|
|
|
+ bool system_ram;
|
|
|
+ bool desc_other;
|
|
|
+};
|
|
|
+
|
|
|
/*
|
|
|
* Fix up the linear direct mapping of the kernel to avoid cache attribute
|
|
|
* conflicts.
|
|
@@ -56,17 +61,59 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
|
|
|
- void *arg)
|
|
|
+static bool __ioremap_check_ram(struct resource *res)
|
|
|
{
|
|
|
+ unsigned long start_pfn, stop_pfn;
|
|
|
unsigned long i;
|
|
|
|
|
|
- for (i = 0; i < nr_pages; ++i)
|
|
|
- if (pfn_valid(start_pfn + i) &&
|
|
|
- !PageReserved(pfn_to_page(start_pfn + i)))
|
|
|
- return 1;
|
|
|
+ if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
|
|
|
+ return false;
|
|
|
|
|
|
- return 0;
|
|
|
+ start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
|
+ stop_pfn = (res->end + 1) >> PAGE_SHIFT;
|
|
|
+ if (stop_pfn > start_pfn) {
|
|
|
+ for (i = 0; i < (stop_pfn - start_pfn); ++i)
|
|
|
+ if (pfn_valid(start_pfn + i) &&
|
|
|
+ !PageReserved(pfn_to_page(start_pfn + i)))
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+static int __ioremap_check_desc_other(struct resource *res)
|
|
|
+{
|
|
|
+ return (res->desc != IORES_DESC_NONE);
|
|
|
+}
|
|
|
+
|
|
|
+static int __ioremap_res_check(struct resource *res, void *arg)
|
|
|
+{
|
|
|
+ struct ioremap_mem_flags *flags = arg;
|
|
|
+
|
|
|
+ if (!flags->system_ram)
|
|
|
+ flags->system_ram = __ioremap_check_ram(res);
|
|
|
+
|
|
|
+ if (!flags->desc_other)
|
|
|
+ flags->desc_other = __ioremap_check_desc_other(res);
|
|
|
+
|
|
|
+ return flags->system_ram && flags->desc_other;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * To avoid multiple resource walks, this function walks resources marked as
|
|
|
+ * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
|
|
|
+ * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
|
|
|
+ */
|
|
|
+static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
|
|
|
+ struct ioremap_mem_flags *flags)
|
|
|
+{
|
|
|
+ u64 start, end;
|
|
|
+
|
|
|
+ start = (u64)addr;
|
|
|
+ end = start + size - 1;
|
|
|
+ memset(flags, 0, sizeof(*flags));
|
|
|
+
|
|
|
+ walk_mem_res(start, end, flags, __ioremap_res_check);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -87,9 +134,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
|
|
|
unsigned long size, enum page_cache_mode pcm, void *caller)
|
|
|
{
|
|
|
unsigned long offset, vaddr;
|
|
|
- resource_size_t pfn, last_pfn, last_addr;
|
|
|
+ resource_size_t last_addr;
|
|
|
const resource_size_t unaligned_phys_addr = phys_addr;
|
|
|
const unsigned long unaligned_size = size;
|
|
|
+ struct ioremap_mem_flags mem_flags;
|
|
|
struct vm_struct *area;
|
|
|
enum page_cache_mode new_pcm;
|
|
|
pgprot_t prot;
|
|
@@ -108,13 +156,12 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
+ __ioremap_check_mem(phys_addr, size, &mem_flags);
|
|
|
+
|
|
|
/*
|
|
|
* Don't allow anybody to remap normal RAM that we're using..
|
|
|
*/
|
|
|
- pfn = phys_addr >> PAGE_SHIFT;
|
|
|
- last_pfn = last_addr >> PAGE_SHIFT;
|
|
|
- if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
|
|
|
- __ioremap_check_ram) == 1) {
|
|
|
+ if (mem_flags.system_ram) {
|
|
|
WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
|
|
|
&phys_addr, &last_addr);
|
|
|
return NULL;
|
|
@@ -146,7 +193,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
|
|
|
pcm = new_pcm;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * If the page being mapped is in memory and SEV is active then
|
|
|
+ * make sure the memory encryption attribute is enabled in the
|
|
|
+ * resulting mapping.
|
|
|
+ */
|
|
|
prot = PAGE_KERNEL_IO;
|
|
|
+ if (sev_active() && mem_flags.desc_other)
|
|
|
+ prot = pgprot_encrypted(prot);
|
|
|
+
|
|
|
switch (pcm) {
|
|
|
case _PAGE_CACHE_MODE_UC:
|
|
|
default:
|