Sfoglia il codice sorgente

c6x: switch to RAW_COPY_USER

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Al Viro 8 anni fa
parent
commit
86944ee158
2 ha cambiato i file con 7 aggiunte e 6 eliminazioni
  1. 1 0
      arch/c6x/Kconfig
  2. 6 6
      arch/c6x/include/asm/uaccess.h

+ 1 - 0
arch/c6x/Kconfig

@@ -18,6 +18,7 @@ config C6X
 	select GENERIC_CLOCKEVENTS
 	select MODULES_USE_ELF_RELA
 	select ARCH_NO_COHERENT_DMA_MMAP
+	select ARCH_HAS_RAW_COPY_USER
 
 config MMU
 	def_bool n

+ 6 - 6
arch/c6x/include/asm/uaccess.h

@@ -14,12 +14,10 @@
 #include <linux/string.h>
 
 /*
- * __copy_from_user/copy_to_user are based on ones in asm-generic/uaccess.h
- *
  * C6X supports unaligned 32 and 64 bit loads and stores.
  */
-static inline __must_check long __copy_from_user(void *to,
-		const void __user *from, unsigned long n)
+static inline __must_check unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 	u32 tmp32;
 	u64 tmp64;
@@ -54,8 +52,8 @@ static inline __must_check long __copy_from_user(void *to,
 	return 0;
 }
 
-static inline __must_check long __copy_to_user(void __user *to,
-		const void *from, unsigned long n)
+static inline __must_check unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 	u32 tmp32;
 	u64 tmp64;
@@ -89,6 +87,8 @@ static inline __must_check long __copy_to_user(void __user *to,
 	memcpy((void __force *)to, from, n);
 	return 0;
 }
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
 
 extern int _access_ok(unsigned long addr, unsigned long size);
 #ifdef CONFIG_ACCESS_CHECK