|
@@ -7,6 +7,7 @@
|
|
|
*/
|
|
|
#include <linux/export.h>
|
|
|
#include <linux/uaccess.h>
|
|
|
+#include <linux/highmem.h>
|
|
|
|
|
|
/*
|
|
|
* Zero Userspace
|
|
@@ -73,3 +74,130 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
|
|
|
clac();
|
|
|
return len;
|
|
|
}
|
|
|
+
|
|
|
+#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
|
|
|
+/**
|
|
|
+ * clean_cache_range - write back a cache range with CLWB
|
|
|
+ * @vaddr: virtual start address
|
|
|
+ * @size: number of bytes to write back
|
|
|
+ *
|
|
|
+ * Write back a cache range using the CLWB (cache line write back)
|
|
|
+ * instruction. Note that @size is internally rounded up to be cache
|
|
|
+ * line size aligned.
|
|
|
+ */
|
|
|
+static void clean_cache_range(void *addr, size_t size)
|
|
|
+{
|
|
|
+ u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
|
|
|
+ unsigned long clflush_mask = x86_clflush_size - 1;
|
|
|
+ void *vend = addr + size;
|
|
|
+ void *p;
|
|
|
+
|
|
|
+ for (p = (void *)((unsigned long)addr & ~clflush_mask);
|
|
|
+ p < vend; p += x86_clflush_size)
|
|
|
+ clwb(p);
|
|
|
+}
|
|
|
+
|
|
|
+long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
|
|
|
+{
|
|
|
+ unsigned long flushed, dest = (unsigned long) dst;
|
|
|
+ long rc = __copy_user_nocache(dst, src, size, 0);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * __copy_user_nocache() uses non-temporal stores for the bulk
|
|
|
+ * of the transfer, but we need to manually flush if the
|
|
|
+ * transfer is unaligned. A cached memory copy is used when
|
|
|
+ * destination or size is not naturally aligned. That is:
|
|
|
+ * - Require 8-byte alignment when size is 8 bytes or larger.
|
|
|
+ * - Require 4-byte alignment when size is 4 bytes.
|
|
|
+ */
|
|
|
+ if (size < 8) {
|
|
|
+ if (!IS_ALIGNED(dest, 4) || size != 4)
|
|
|
+ clean_cache_range(dst, 1);
|
|
|
+ } else {
|
|
|
+ if (!IS_ALIGNED(dest, 8)) {
|
|
|
+ dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
|
|
|
+ clean_cache_range(dst, 1);
|
|
|
+ }
|
|
|
+
|
|
|
+ flushed = dest - (unsigned long) dst;
|
|
|
+ if (size > flushed && !IS_ALIGNED(size - flushed, 8))
|
|
|
+ clean_cache_range(dst + size - 1, 1);
|
|
|
+ }
|
|
|
+
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
+void memcpy_flushcache(void *_dst, const void *_src, size_t size)
|
|
|
+{
|
|
|
+ unsigned long dest = (unsigned long) _dst;
|
|
|
+ unsigned long source = (unsigned long) _src;
|
|
|
+
|
|
|
+ /* cache copy and flush to align dest */
|
|
|
+ if (!IS_ALIGNED(dest, 8)) {
|
|
|
+ unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest);
|
|
|
+
|
|
|
+ memcpy((void *) dest, (void *) source, len);
|
|
|
+ clean_cache_range((void *) dest, len);
|
|
|
+ dest += len;
|
|
|
+ source += len;
|
|
|
+ size -= len;
|
|
|
+ if (!size)
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* 4x8 movnti loop */
|
|
|
+ while (size >= 32) {
|
|
|
+ asm("movq (%0), %%r8\n"
|
|
|
+ "movq 8(%0), %%r9\n"
|
|
|
+ "movq 16(%0), %%r10\n"
|
|
|
+ "movq 24(%0), %%r11\n"
|
|
|
+ "movnti %%r8, (%1)\n"
|
|
|
+ "movnti %%r9, 8(%1)\n"
|
|
|
+ "movnti %%r10, 16(%1)\n"
|
|
|
+ "movnti %%r11, 24(%1)\n"
|
|
|
+ :: "r" (source), "r" (dest)
|
|
|
+ : "memory", "r8", "r9", "r10", "r11");
|
|
|
+ dest += 32;
|
|
|
+ source += 32;
|
|
|
+ size -= 32;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* 1x8 movnti loop */
|
|
|
+ while (size >= 8) {
|
|
|
+ asm("movq (%0), %%r8\n"
|
|
|
+ "movnti %%r8, (%1)\n"
|
|
|
+ :: "r" (source), "r" (dest)
|
|
|
+ : "memory", "r8");
|
|
|
+ dest += 8;
|
|
|
+ source += 8;
|
|
|
+ size -= 8;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* 1x4 movnti loop */
|
|
|
+ while (size >= 4) {
|
|
|
+ asm("movl (%0), %%r8d\n"
|
|
|
+ "movnti %%r8d, (%1)\n"
|
|
|
+ :: "r" (source), "r" (dest)
|
|
|
+ : "memory", "r8");
|
|
|
+ dest += 4;
|
|
|
+ source += 4;
|
|
|
+ size -= 4;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* cache copy for remaining bytes */
|
|
|
+ if (size) {
|
|
|
+ memcpy((void *) dest, (void *) source, size);
|
|
|
+ clean_cache_range((void *) dest, size);
|
|
|
+ }
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(memcpy_flushcache);
|
|
|
+
|
|
|
+void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
|
|
|
+ size_t len)
|
|
|
+{
|
|
|
+ char *from = kmap_atomic(page);
|
|
|
+
|
|
|
+ memcpy_flushcache(to, from + offset, len);
|
|
|
+ kunmap_atomic(from);
|
|
|
+}
|
|
|
+#endif
|