|
|
@@ -65,56 +65,6 @@ static inline void arch_wb_cache_pmem(void *addr, size_t size)
|
|
|
clwb(p);
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * arch_copy_from_iter_pmem - copy data from an iterator to PMEM
|
|
|
- * @addr: PMEM destination address
|
|
|
- * @bytes: number of bytes to copy
|
|
|
- * @i: iterator with source data
|
|
|
- *
|
|
|
- * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
|
|
|
- */
|
|
|
-static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
|
|
|
- struct iov_iter *i)
|
|
|
-{
|
|
|
- size_t len;
|
|
|
-
|
|
|
- /* TODO: skip the write-back by always using non-temporal stores */
|
|
|
- len = copy_from_iter_nocache(addr, bytes, i);
|
|
|
-
|
|
|
- /*
|
|
|
- * In the iovec case on x86_64 copy_from_iter_nocache() uses
|
|
|
- * non-temporal stores for the bulk of the transfer, but we need
|
|
|
- * to manually flush if the transfer is unaligned. A cached
|
|
|
- * memory copy is used when destination or size is not naturally
|
|
|
- * aligned. That is:
|
|
|
- * - Require 8-byte alignment when size is 8 bytes or larger.
|
|
|
- * - Require 4-byte alignment when size is 4 bytes.
|
|
|
- *
|
|
|
- * In the non-iovec case the entire destination needs to be
|
|
|
- * flushed.
|
|
|
- */
|
|
|
- if (iter_is_iovec(i)) {
|
|
|
- unsigned long flushed, dest = (unsigned long) addr;
|
|
|
-
|
|
|
- if (bytes < 8) {
|
|
|
- if (!IS_ALIGNED(dest, 4) || (bytes != 4))
|
|
|
- arch_wb_cache_pmem(addr, bytes);
|
|
|
- } else {
|
|
|
- if (!IS_ALIGNED(dest, 8)) {
|
|
|
- dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
|
|
|
- arch_wb_cache_pmem(addr, 1);
|
|
|
- }
|
|
|
-
|
|
|
- flushed = dest - (unsigned long) addr;
|
|
|
- if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
|
|
|
- arch_wb_cache_pmem(addr + bytes - 1, 1);
|
|
|
- }
|
|
|
- } else
|
|
|
- arch_wb_cache_pmem(addr, bytes);
|
|
|
-
|
|
|
- return len;
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* arch_clear_pmem - zero a PMEM memory range
|
|
|
* @addr: virtual start address
|