|
@@ -400,12 +400,10 @@ void *xlate_dev_mem_ptr(phys_addr_t phys)
|
|
|
unsigned long offset = phys & ~PAGE_MASK;
|
|
|
void *vaddr;
|
|
|
|
|
|
- /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
|
|
|
- if (page_is_ram(start >> PAGE_SHIFT))
|
|
|
- return __va(phys);
|
|
|
+ /* memremap() maps if RAM, otherwise falls back to ioremap() */
|
|
|
+ vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
|
|
|
|
|
|
- vaddr = ioremap_cache(start, PAGE_SIZE);
|
|
|
- /* Only add the offset on success and return NULL if the ioremap() failed: */
|
|
|
+ /* Only add the offset on success and return NULL if memremap() failed */
|
|
|
if (vaddr)
|
|
|
vaddr += offset;
|
|
|
|
|
@@ -414,10 +412,7 @@ void *xlate_dev_mem_ptr(phys_addr_t phys)
|
|
|
|
|
|
void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
|
|
|
{
|
|
|
- if (page_is_ram(phys >> PAGE_SHIFT))
|
|
|
- return;
|
|
|
-
|
|
|
- iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
|
|
|
+ memunmap((void *)((unsigned long)addr & PAGE_MASK));
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -626,6 +621,11 @@ pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
|
|
|
return prot;
|
|
|
}
|
|
|
|
|
|
+bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
|
|
|
+{
|
|
|
+ return arch_memremap_can_ram_remap(phys_addr, size, 0);
|
|
|
+}
|
|
|
+
|
|
|
#ifdef CONFIG_ARCH_USE_MEMREMAP_PROT
|
|
|
/* Remap memory with encryption */
|
|
|
void __init *early_memremap_encrypted(resource_size_t phys_addr,
|