|
@@ -43,28 +43,9 @@ static inline void flush_kernel_dcache_page(struct page *page)
|
|
|
|
|
|
#define flush_kernel_dcache_range(start,size) \
|
|
#define flush_kernel_dcache_range(start,size) \
|
|
flush_kernel_dcache_range_asm((start), (start)+(size));
|
|
flush_kernel_dcache_range_asm((start), (start)+(size));
|
|
-/* vmap range flushes and invalidates. Architecturally, we don't need
|
|
|
|
- * the invalidate, because the CPU should refuse to speculate once an
|
|
|
|
- * area has been flushed, so invalidate is left empty */
|
|
|
|
-static inline void flush_kernel_vmap_range(void *vaddr, int size)
|
|
|
|
-{
|
|
|
|
- unsigned long start = (unsigned long)vaddr;
|
|
|
|
-
|
|
|
|
- flush_kernel_dcache_range_asm(start, start + size);
|
|
|
|
-}
|
|
|
|
-static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
|
|
|
|
-{
|
|
|
|
- unsigned long start = (unsigned long)vaddr;
|
|
|
|
- void *cursor = vaddr;
|
|
|
|
|
|
|
|
- for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
|
|
|
|
- struct page *page = vmalloc_to_page(cursor);
|
|
|
|
-
|
|
|
|
- if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
|
|
|
|
- flush_kernel_dcache_page(page);
|
|
|
|
- }
|
|
|
|
- flush_kernel_dcache_range_asm(start, start + size);
|
|
|
|
-}
|
|
|
|
|
|
+void flush_kernel_vmap_range(void *vaddr, int size);
|
|
|
|
+void invalidate_kernel_vmap_range(void *vaddr, int size);
|
|
|
|
|
|
#define flush_cache_vmap(start, end) flush_cache_all()
|
|
#define flush_cache_vmap(start, end) flush_cache_all()
|
|
#define flush_cache_vunmap(start, end) flush_cache_all()
|
|
#define flush_cache_vunmap(start, end) flush_cache_all()
|