|
@@ -27,6 +27,13 @@ __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
+#ifndef arch_memremap_wb
|
|
|
|
+static void *arch_memremap_wb(resource_size_t offset, unsigned long size)
|
|
|
|
+{
|
|
|
|
+ return (__force void *)ioremap_cache(offset, size);
|
|
|
|
+}
|
|
|
|
+#endif
|
|
|
|
+
|
|
static void *try_ram_remap(resource_size_t offset, size_t size)
|
|
static void *try_ram_remap(resource_size_t offset, size_t size)
|
|
{
|
|
{
|
|
unsigned long pfn = PHYS_PFN(offset);
|
|
unsigned long pfn = PHYS_PFN(offset);
|
|
@@ -34,7 +41,7 @@ static void *try_ram_remap(resource_size_t offset, size_t size)
|
|
/* In the simple case just return the existing linear address */
|
|
/* In the simple case just return the existing linear address */
|
|
if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)))
|
|
if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)))
|
|
return __va(offset);
|
|
return __va(offset);
|
|
- return NULL; /* fallback to ioremap_cache */
|
|
|
|
|
|
+ return NULL; /* fallback to arch_memremap_wb */
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -90,7 +97,7 @@ void *memremap(resource_size_t offset, size_t size, unsigned long flags)
|
|
if (is_ram == REGION_INTERSECTS)
|
|
if (is_ram == REGION_INTERSECTS)
|
|
addr = try_ram_remap(offset, size);
|
|
addr = try_ram_remap(offset, size);
|
|
if (!addr)
|
|
if (!addr)
|
|
- addr = ioremap_cache(offset, size);
|
|
|
|
|
|
+ addr = arch_memremap_wb(offset, size);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|