|
@@ -67,18 +67,19 @@ static inline void arch_wmb_pmem(void)
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * __arch_wb_cache_pmem - write back a cache range with CLWB
|
|
|
+ * arch_wb_cache_pmem - write back a cache range with CLWB
|
|
|
* @vaddr: virtual start address
|
|
|
* @size: number of bytes to write back
|
|
|
*
|
|
|
* Write back a cache range using the CLWB (cache line write back)
|
|
|
* instruction. This function requires explicit ordering with an
|
|
|
- * arch_wmb_pmem() call. This API is internal to the x86 PMEM implementation.
|
|
|
+ * arch_wmb_pmem() call.
|
|
|
*/
|
|
|
-static inline void __arch_wb_cache_pmem(void *vaddr, size_t size)
|
|
|
+static inline void arch_wb_cache_pmem(void __pmem *addr, size_t size)
|
|
|
{
|
|
|
u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
|
|
|
unsigned long clflush_mask = x86_clflush_size - 1;
|
|
|
+ void *vaddr = (void __force *)addr;
|
|
|
void *vend = vaddr + size;
|
|
|
void *p;
|
|
|
|
|
@@ -115,7 +116,7 @@ static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
|
|
|
len = copy_from_iter_nocache(vaddr, bytes, i);
|
|
|
|
|
|
if (__iter_needs_pmem_wb(i))
|
|
|
- __arch_wb_cache_pmem(vaddr, bytes);
|
|
|
+ arch_wb_cache_pmem(addr, bytes);
|
|
|
|
|
|
return len;
|
|
|
}
|
|
@@ -133,7 +134,7 @@ static inline void arch_clear_pmem(void __pmem *addr, size_t size)
|
|
|
void *vaddr = (void __force *)addr;
|
|
|
|
|
|
memset(vaddr, 0, size);
|
|
|
- __arch_wb_cache_pmem(vaddr, size);
|
|
|
+ arch_wb_cache_pmem(addr, size);
|
|
|
}
|
|
|
|
|
|
static inline bool __arch_has_wmb_pmem(void)
|