瀏覽代碼

Merge branch 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull uaccess unification updates from Al Viro:
 "This is the uaccess unification pile. It's _not_ the end of uaccess
  work, but the next batch of that will go into the next cycle. This one
  mostly takes copy_from_user() and friends out of arch/* and gets the
  zero-padding behaviour in sync for all architectures.

  Dealing with the nocache/writethrough mess is for the next cycle;
  fortunately, that's x86-only. Same for cleanups in iov_iter.c (I am
  sold on access_ok() in there, BTW; just not in this pile), same for
  reducing __copy_... callsites, strn*... stuff, etc. - there will be a
  pile about as large as this one in the next merge window.

  This one sat in -next for weeks. -3KLoC"

* 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (96 commits)
  HAVE_ARCH_HARDENED_USERCOPY is unconditional now
  CONFIG_ARCH_HAS_RAW_COPY_USER is unconditional now
  m32r: switch to RAW_COPY_USER
  hexagon: switch to RAW_COPY_USER
  microblaze: switch to RAW_COPY_USER
  get rid of padding, switch to RAW_COPY_USER
  ia64: get rid of copy_in_user()
  ia64: sanitize __access_ok()
  ia64: get rid of 'segment' argument of __do_{get,put}_user()
  ia64: get rid of 'segment' argument of __{get,put}_user_check()
  ia64: add extable.h
  powerpc: get rid of zeroing, switch to RAW_COPY_USER
  esas2r: don't open-code memdup_user()
  alpha: fix stack smashing in old_adjtimex(2)
  don't open-code kernel_setsockopt()
  mips: switch to RAW_COPY_USER
  mips: get rid of tail-zeroing in primitives
  mips: make copy_from_user() zero tail explicitly
  mips: clean and reorder the forest of macros...
  mips: consolidate __invoke_... wrappers
  ...
Linus Torvalds 8 年之前
父節點
當前提交
5db6db0d40
共有 100 個文件被更改,包括 899 次插入2607 次删除
  1. 55 0
      arch/alpha/include/asm/extable.h
  2. 4 12
      arch/alpha/include/asm/futex.h
  3. 73 232
      arch/alpha/include/asm/uaccess.h
  4. 42 110
      arch/alpha/kernel/traps.c
  5. 26 40
      arch/alpha/lib/clear_user.S
  6. 34 48
      arch/alpha/lib/copy_user.S
  7. 2 8
      arch/alpha/lib/csum_partial_copy.c
  8. 35 49
      arch/alpha/lib/ev6-clear_user.S
  9. 45 59
      arch/alpha/lib/ev6-copy_user.S
  10. 1 0
      arch/arc/include/asm/Kbuild
  11. 7 18
      arch/arc/include/asm/uaccess.h
  12. 0 14
      arch/arc/mm/extable.c
  13. 0 1
      arch/arm/Kconfig
  14. 1 0
      arch/arm/include/asm/Kbuild
  15. 16 71
      arch/arm/include/asm/uaccess.h
  16. 2 2
      arch/arm/lib/uaccess_with_memcpy.c
  17. 0 1
      arch/arm64/Kconfig
  18. 25 0
      arch/arm64/include/asm/extable.h
  19. 6 77
      arch/arm64/include/asm/uaccess.h
  20. 1 1
      arch/arm64/kernel/arm64ksyms.c
  21. 2 2
      arch/arm64/lib/copy_in_user.S
  22. 1 0
      arch/avr32/include/asm/Kbuild
  23. 7 32
      arch/avr32/include/asm/uaccess.h
  24. 0 2
      arch/avr32/kernel/avr32_ksyms.c
  25. 0 15
      arch/avr32/lib/copy_user.S
  26. 1 0
      arch/blackfin/include/asm/Kbuild
  27. 5 42
      arch/blackfin/include/asm/uaccess.h
  28. 1 1
      arch/blackfin/kernel/process.c
  29. 1 0
      arch/c6x/include/asm/Kbuild
  30. 6 13
      arch/c6x/include/asm/uaccess.h
  31. 1 1
      arch/c6x/kernel/sys_c6x.c
  32. 9 22
      arch/cris/arch-v10/lib/usercopy.c
  33. 9 21
      arch/cris/arch-v32/lib/usercopy.c
  34. 18 28
      arch/cris/include/arch-v10/arch/uaccess.h
  35. 18 36
      arch/cris/include/arch-v32/arch/uaccess.h
  36. 1 0
      arch/cris/include/asm/Kbuild
  37. 12 65
      arch/cris/include/asm/uaccess.h
  38. 1 0
      arch/frv/include/asm/Kbuild
  39. 24 60
      arch/frv/include/asm/uaccess.h
  40. 1 6
      arch/frv/kernel/traps.c
  41. 13 14
      arch/frv/mm/extable.c
  42. 2 4
      arch/frv/mm/fault.c
  43. 1 1
      arch/h8300/include/asm/Kbuild
  44. 54 0
      arch/h8300/include/asm/uaccess.h
  45. 1 0
      arch/hexagon/include/asm/Kbuild
  46. 10 16
      arch/hexagon/include/asm/uaccess.h
  47. 2 2
      arch/hexagon/kernel/hexagon_ksyms.c
  48. 1 1
      arch/hexagon/mm/copy_from_user.S
  49. 1 1
      arch/hexagon/mm/copy_to_user.S
  50. 0 1
      arch/ia64/Kconfig
  51. 11 0
      arch/ia64/include/asm/extable.h
  52. 27 75
      arch/ia64/include/asm/uaccess.h
  53. 1 12
      arch/ia64/lib/memcpy_mck.S
  54. 4 1
      arch/ia64/mm/extable.c
  55. 1 0
      arch/m32r/include/asm/Kbuild
  56. 9 180
      arch/m32r/include/asm/uaccess.h
  57. 0 2
      arch/m32r/kernel/m32r_ksyms.c
  58. 0 21
      arch/m32r/lib/usercopy.c
  59. 1 0
      arch/m68k/include/asm/Kbuild
  60. 0 10
      arch/m68k/include/asm/processor.h
  61. 1 0
      arch/m68k/include/asm/uaccess.h
  62. 49 54
      arch/m68k/include/asm/uaccess_mm.h
  63. 14 29
      arch/m68k/include/asm/uaccess_no.h
  64. 1 1
      arch/m68k/kernel/signal.c
  65. 7 2
      arch/m68k/kernel/traps.c
  66. 3 9
      arch/m68k/lib/uaccess.c
  67. 1 1
      arch/m68k/mm/fault.c
  68. 1 0
      arch/metag/include/asm/Kbuild
  69. 4 56
      arch/metag/include/asm/uaccess.h
  70. 3 3
      arch/metag/lib/usercopy.c
  71. 1 0
      arch/microblaze/include/asm/Kbuild
  72. 9 55
      arch/microblaze/include/asm/uaccess.h
  73. 0 1
      arch/mips/Kconfig
  74. 1 30
      arch/mips/cavium-octeon/octeon-memcpy.S
  75. 2 2
      arch/mips/include/asm/checksum.h
  76. 2 2
      arch/mips/include/asm/r4kcache.h
  77. 60 389
      arch/mips/include/asm/uaccess.h
  78. 12 12
      arch/mips/kernel/mips-r2-to-r6-emul.c
  79. 1 1
      arch/mips/kernel/syscall.c
  80. 5 5
      arch/mips/kernel/unaligned.c
  81. 0 49
      arch/mips/lib/memcpy.S
  82. 1 1
      arch/mips/oprofile/backtrace.c
  83. 1 0
      arch/mn10300/include/asm/Kbuild
  84. 6 181
      arch/mn10300/include/asm/uaccess.h
  85. 0 2
      arch/mn10300/kernel/mn10300_ksyms.c
  86. 0 18
      arch/mn10300/lib/usercopy.c
  87. 1 0
      arch/nios2/include/asm/Kbuild
  88. 7 48
      arch/nios2/include/asm/uaccess.h
  89. 8 8
      arch/nios2/mm/uaccess.c
  90. 1 0
      arch/openrisc/include/asm/Kbuild
  91. 8 45
      arch/openrisc/include/asm/uaccess.h
  92. 0 1
      arch/parisc/Kconfig
  93. 1 1
      arch/parisc/include/asm/futex.h
  94. 8 61
      arch/parisc/include/asm/uaccess.h
  95. 8 8
      arch/parisc/lib/memcpy.c
  96. 0 1
      arch/powerpc/Kconfig
  97. 29 0
      arch/powerpc/include/asm/extable.h
  98. 10 86
      arch/powerpc/include/asm/uaccess.h
  99. 1 1
      arch/powerpc/lib/Makefile
  100. 0 14
      arch/powerpc/lib/copy_32.S

+ 55 - 0
arch/alpha/include/asm/extable.h

@@ -0,0 +1,55 @@
+#ifndef _ASM_EXTABLE_H
+#define _ASM_EXTABLE_H
+
+/*
+ * About the exception table:
+ *
+ * - insn is a 32-bit pc-relative offset from the faulting insn.
+ * - nextinsn is a 16-bit offset off of the faulting instruction
+ *   (not off of the *next* instruction as branches are).
+ * - errreg is the register in which to place -EFAULT.
+ * - valreg is the final target register for the load sequence
+ *   and will be zeroed.
+ *
+ * Either errreg or valreg may be $31, in which case nothing happens.
+ *
+ * The exception fixup information "just so happens" to be arranged
+ * as in a MEM format instruction.  This lets us emit our three
+ * values like so:
+ *
+ *      lda valreg, nextinsn(errreg)
+ *
+ */
+
+struct exception_table_entry
+{
+	signed int insn;
+	union exception_fixup {
+		unsigned unit;
+		struct {
+			signed int nextinsn : 16;
+			unsigned int errreg : 5;
+			unsigned int valreg : 5;
+		} bits;
+	} fixup;
+};
+
+/* Returns the new pc */
+#define fixup_exception(map_reg, _fixup, pc)			\
+({								\
+	if ((_fixup)->fixup.bits.valreg != 31)			\
+		map_reg((_fixup)->fixup.bits.valreg) = 0;	\
+	if ((_fixup)->fixup.bits.errreg != 31)			\
+		map_reg((_fixup)->fixup.bits.errreg) = -EFAULT;	\
+	(pc) + (_fixup)->fixup.bits.nextinsn;			\
+})
+
+#define ARCH_HAS_RELATIVE_EXTABLE
+
+#define swap_ex_entry_fixup(a, b, tmp, delta)			\
+	do {							\
+		(a)->fixup.unit = (b)->fixup.unit;		\
+		(b)->fixup.unit = (tmp).fixup.unit;		\
+	} while (0)
+
+#endif

+ 4 - 12
arch/alpha/include/asm/futex.h

@@ -19,12 +19,8 @@
 	"3:	.subsection 2\n"				\
 	"3:	.subsection 2\n"				\
 	"4:	br	1b\n"					\
 	"4:	br	1b\n"					\
 	"	.previous\n"					\
 	"	.previous\n"					\
-	"	.section __ex_table,\"a\"\n"			\
-	"	.long	1b-.\n"					\
-	"	lda	$31,3b-1b(%1)\n"			\
-	"	.long	2b-.\n"					\
-	"	lda	$31,3b-2b(%1)\n"			\
-	"	.previous\n"					\
+	EXC(1b,3b,%1,$31)					\
+	EXC(2b,3b,%1,$31)					\
 	:	"=&r" (oldval), "=&r"(ret)			\
 	:	"=&r" (oldval), "=&r"(ret)			\
 	:	"r" (uaddr), "r"(oparg)				\
 	:	"r" (uaddr), "r"(oparg)				\
 	:	"memory")
 	:	"memory")
@@ -101,12 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 	"3:	.subsection 2\n"
 	"3:	.subsection 2\n"
 	"4:	br	1b\n"
 	"4:	br	1b\n"
 	"	.previous\n"
 	"	.previous\n"
-	"	.section __ex_table,\"a\"\n"
-	"	.long	1b-.\n"
-	"	lda	$31,3b-1b(%0)\n"
-	"	.long	2b-.\n"
-	"	lda	$31,3b-2b(%0)\n"
-	"	.previous\n"
+	EXC(1b,3b,%0,$31)
+	EXC(2b,3b,%0,$31)
 	:	"+r"(ret), "=&r"(prev), "=&r"(cmp)
 	:	"+r"(ret), "=&r"(prev), "=&r"(cmp)
 	:	"r"(uaddr), "r"((long)(int)oldval), "r"(newval)
 	:	"r"(uaddr), "r"((long)(int)oldval), "r"(newval)
 	:	"memory");
 	:	"memory");

+ 73 - 232
arch/alpha/include/asm/uaccess.h

@@ -1,10 +1,6 @@
 #ifndef __ALPHA_UACCESS_H
 #ifndef __ALPHA_UACCESS_H
 #define __ALPHA_UACCESS_H
 #define __ALPHA_UACCESS_H
 
 
-#include <linux/errno.h>
-#include <linux/sched.h>
-
-
 /*
 /*
  * The fs value determines whether argument validity checking should be
  * The fs value determines whether argument validity checking should be
  * performed or not.  If get_fs() == USER_DS, checking is performed, with
  * performed or not.  If get_fs() == USER_DS, checking is performed, with
@@ -20,9 +16,6 @@
 #define KERNEL_DS	((mm_segment_t) { 0UL })
 #define KERNEL_DS	((mm_segment_t) { 0UL })
 #define USER_DS		((mm_segment_t) { -0x40000000000UL })
 #define USER_DS		((mm_segment_t) { -0x40000000000UL })
 
 
-#define VERIFY_READ	0
-#define VERIFY_WRITE	1
-
 #define get_fs()  (current_thread_info()->addr_limit)
 #define get_fs()  (current_thread_info()->addr_limit)
 #define get_ds()  (KERNEL_DS)
 #define get_ds()  (KERNEL_DS)
 #define set_fs(x) (current_thread_info()->addr_limit = (x))
 #define set_fs(x) (current_thread_info()->addr_limit = (x))
@@ -39,13 +32,13 @@
  *  - AND "addr+size" doesn't have any high-bits set
  *  - AND "addr+size" doesn't have any high-bits set
  *  - OR we are in kernel mode.
  *  - OR we are in kernel mode.
  */
  */
-#define __access_ok(addr, size, segment) \
-	(((segment).seg & (addr | size | (addr+size))) == 0)
+#define __access_ok(addr, size) \
+	((get_fs().seg & (addr | size | (addr+size))) == 0)
 
 
-#define access_ok(type, addr, size)				\
-({								\
-	__chk_user_ptr(addr);					\
-	__access_ok(((unsigned long)(addr)), (size), get_fs());	\
+#define access_ok(type, addr, size)			\
+({							\
+	__chk_user_ptr(addr);				\
+	__access_ok(((unsigned long)(addr)), (size));	\
 })
 })
 
 
 /*
 /*
@@ -61,9 +54,9 @@
  * (b) require any knowledge of processes at this stage
  * (b) require any knowledge of processes at this stage
  */
  */
 #define put_user(x, ptr) \
 #define put_user(x, ptr) \
-  __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), get_fs())
+  __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 #define get_user(x, ptr) \
 #define get_user(x, ptr) \
-  __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs())
+  __get_user_check((x), (ptr), sizeof(*(ptr)))
 
 
 /*
 /*
  * The "__xxx" versions do not do address space checking, useful when
  * The "__xxx" versions do not do address space checking, useful when
@@ -81,6 +74,11 @@
  * more extensive comments with fixup_inline_exception below for
  * more extensive comments with fixup_inline_exception below for
  * more information.
  * more information.
  */
  */
+#define EXC(label,cont,res,err)				\
+	".section __ex_table,\"a\"\n"			\
+	"	.long "#label"-.\n"			\
+	"	lda "#res","#cont"-"#label"("#err")\n"	\
+	".previous\n"
 
 
 extern void __get_user_unknown(void);
 extern void __get_user_unknown(void);
 
 
@@ -100,23 +98,23 @@ extern void __get_user_unknown(void);
 	__gu_err;						\
 	__gu_err;						\
 })
 })
 
 
-#define __get_user_check(x, ptr, size, segment)				\
-({									\
-	long __gu_err = -EFAULT;					\
-	unsigned long __gu_val = 0;					\
-	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);		\
-	if (__access_ok((unsigned long)__gu_addr, size, segment)) {	\
-		__gu_err = 0;						\
-		switch (size) {						\
-		  case 1: __get_user_8(__gu_addr); break;		\
-		  case 2: __get_user_16(__gu_addr); break;		\
-		  case 4: __get_user_32(__gu_addr); break;		\
-		  case 8: __get_user_64(__gu_addr); break;		\
-		  default: __get_user_unknown(); break;			\
-		}							\
-	}								\
-	(x) = (__force __typeof__(*(ptr))) __gu_val;			\
-	__gu_err;							\
+#define __get_user_check(x, ptr, size)				\
+({								\
+	long __gu_err = -EFAULT;				\
+	unsigned long __gu_val = 0;				\
+	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
+	if (__access_ok((unsigned long)__gu_addr, size)) {	\
+		__gu_err = 0;					\
+		switch (size) {					\
+		  case 1: __get_user_8(__gu_addr); break;	\
+		  case 2: __get_user_16(__gu_addr); break;	\
+		  case 4: __get_user_32(__gu_addr); break;	\
+		  case 8: __get_user_64(__gu_addr); break;	\
+		  default: __get_user_unknown(); break;		\
+		}						\
+	}							\
+	(x) = (__force __typeof__(*(ptr))) __gu_val;		\
+	__gu_err;						\
 })
 })
 
 
 struct __large_struct { unsigned long buf[100]; };
 struct __large_struct { unsigned long buf[100]; };
@@ -125,20 +123,14 @@ struct __large_struct { unsigned long buf[100]; };
 #define __get_user_64(addr)				\
 #define __get_user_64(addr)				\
 	__asm__("1: ldq %0,%2\n"			\
 	__asm__("1: ldq %0,%2\n"			\
 	"2:\n"						\
 	"2:\n"						\
-	".section __ex_table,\"a\"\n"			\
-	"	.long 1b - .\n"				\
-	"	lda %0, 2b-1b(%1)\n"			\
-	".previous"					\
+	EXC(1b,2b,%0,%1)				\
 		: "=r"(__gu_val), "=r"(__gu_err)	\
 		: "=r"(__gu_val), "=r"(__gu_err)	\
 		: "m"(__m(addr)), "1"(__gu_err))
 		: "m"(__m(addr)), "1"(__gu_err))
 
 
 #define __get_user_32(addr)				\
 #define __get_user_32(addr)				\
 	__asm__("1: ldl %0,%2\n"			\
 	__asm__("1: ldl %0,%2\n"			\
 	"2:\n"						\
 	"2:\n"						\
-	".section __ex_table,\"a\"\n"			\
-	"	.long 1b - .\n"				\
-	"	lda %0, 2b-1b(%1)\n"			\
-	".previous"					\
+	EXC(1b,2b,%0,%1)				\
 		: "=r"(__gu_val), "=r"(__gu_err)	\
 		: "=r"(__gu_val), "=r"(__gu_err)	\
 		: "m"(__m(addr)), "1"(__gu_err))
 		: "m"(__m(addr)), "1"(__gu_err))
 
 
@@ -148,20 +140,14 @@ struct __large_struct { unsigned long buf[100]; };
 #define __get_user_16(addr)				\
 #define __get_user_16(addr)				\
 	__asm__("1: ldwu %0,%2\n"			\
 	__asm__("1: ldwu %0,%2\n"			\
 	"2:\n"						\
 	"2:\n"						\
-	".section __ex_table,\"a\"\n"			\
-	"	.long 1b - .\n"				\
-	"	lda %0, 2b-1b(%1)\n"			\
-	".previous"					\
+	EXC(1b,2b,%0,%1)				\
 		: "=r"(__gu_val), "=r"(__gu_err)	\
 		: "=r"(__gu_val), "=r"(__gu_err)	\
 		: "m"(__m(addr)), "1"(__gu_err))
 		: "m"(__m(addr)), "1"(__gu_err))
 
 
 #define __get_user_8(addr)				\
 #define __get_user_8(addr)				\
 	__asm__("1: ldbu %0,%2\n"			\
 	__asm__("1: ldbu %0,%2\n"			\
 	"2:\n"						\
 	"2:\n"						\
-	".section __ex_table,\"a\"\n"			\
-	"	.long 1b - .\n"				\
-	"	lda %0, 2b-1b(%1)\n"			\
-	".previous"					\
+	EXC(1b,2b,%0,%1)				\
 		: "=r"(__gu_val), "=r"(__gu_err)	\
 		: "=r"(__gu_val), "=r"(__gu_err)	\
 		: "m"(__m(addr)), "1"(__gu_err))
 		: "m"(__m(addr)), "1"(__gu_err))
 #else
 #else
@@ -177,12 +163,8 @@ struct __large_struct { unsigned long buf[100]; };
 	"	extwh %1,%3,%1\n"					\
 	"	extwh %1,%3,%1\n"					\
 	"	or %0,%1,%0\n"						\
 	"	or %0,%1,%0\n"						\
 	"3:\n"								\
 	"3:\n"								\
-	".section __ex_table,\"a\"\n"					\
-	"	.long 1b - .\n"						\
-	"	lda %0, 3b-1b(%2)\n"					\
-	"	.long 2b - .\n"						\
-	"	lda %0, 3b-2b(%2)\n"					\
-	".previous"							\
+	EXC(1b,3b,%0,%2)						\
+	EXC(2b,3b,%0,%2)						\
 		: "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err)	\
 		: "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err)	\
 		: "r"(addr), "2"(__gu_err));				\
 		: "r"(addr), "2"(__gu_err));				\
 }
 }
@@ -191,10 +173,7 @@ struct __large_struct { unsigned long buf[100]; };
 	__asm__("1: ldq_u %0,0(%2)\n"					\
 	__asm__("1: ldq_u %0,0(%2)\n"					\
 	"	extbl %0,%2,%0\n"					\
 	"	extbl %0,%2,%0\n"					\
 	"2:\n"								\
 	"2:\n"								\
-	".section __ex_table,\"a\"\n"					\
-	"	.long 1b - .\n"						\
-	"	lda %0, 2b-1b(%1)\n"					\
-	".previous"							\
+	EXC(1b,2b,%0,%1)						\
 		: "=&r"(__gu_val), "=r"(__gu_err)			\
 		: "=&r"(__gu_val), "=r"(__gu_err)			\
 		: "r"(addr), "1"(__gu_err))
 		: "r"(addr), "1"(__gu_err))
 #endif
 #endif
@@ -215,21 +194,21 @@ extern void __put_user_unknown(void);
 	__pu_err;						\
 	__pu_err;						\
 })
 })
 
 
-#define __put_user_check(x, ptr, size, segment)				\
-({									\
-	long __pu_err = -EFAULT;					\
-	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
-	if (__access_ok((unsigned long)__pu_addr, size, segment)) {	\
-		__pu_err = 0;						\
-		switch (size) {						\
-		  case 1: __put_user_8(x, __pu_addr); break;		\
-		  case 2: __put_user_16(x, __pu_addr); break;		\
-		  case 4: __put_user_32(x, __pu_addr); break;		\
-		  case 8: __put_user_64(x, __pu_addr); break;		\
-		  default: __put_user_unknown(); break;			\
-		}							\
-	}								\
-	__pu_err;							\
+#define __put_user_check(x, ptr, size)				\
+({								\
+	long __pu_err = -EFAULT;				\
+	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
+	if (__access_ok((unsigned long)__pu_addr, size)) {	\
+		__pu_err = 0;					\
+		switch (size) {					\
+		  case 1: __put_user_8(x, __pu_addr); break;	\
+		  case 2: __put_user_16(x, __pu_addr); break;	\
+		  case 4: __put_user_32(x, __pu_addr); break;	\
+		  case 8: __put_user_64(x, __pu_addr); break;	\
+		  default: __put_user_unknown(); break;		\
+		}						\
+	}							\
+	__pu_err;						\
 })
 })
 
 
 /*
 /*
@@ -240,20 +219,14 @@ extern void __put_user_unknown(void);
 #define __put_user_64(x, addr)					\
 #define __put_user_64(x, addr)					\
 __asm__ __volatile__("1: stq %r2,%1\n"				\
 __asm__ __volatile__("1: stq %r2,%1\n"				\
 	"2:\n"							\
 	"2:\n"							\
-	".section __ex_table,\"a\"\n"				\
-	"	.long 1b - .\n"					\
-	"	lda $31,2b-1b(%0)\n"				\
-	".previous"						\
+	EXC(1b,2b,$31,%0)					\
 		: "=r"(__pu_err)				\
 		: "=r"(__pu_err)				\
 		: "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
 		: "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
 
 
 #define __put_user_32(x, addr)					\
 #define __put_user_32(x, addr)					\
 __asm__ __volatile__("1: stl %r2,%1\n"				\
 __asm__ __volatile__("1: stl %r2,%1\n"				\
 	"2:\n"							\
 	"2:\n"							\
-	".section __ex_table,\"a\"\n"				\
-	"	.long 1b - .\n"					\
-	"	lda $31,2b-1b(%0)\n"				\
-	".previous"						\
+	EXC(1b,2b,$31,%0)					\
 		: "=r"(__pu_err)				\
 		: "=r"(__pu_err)				\
 		: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
 		: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
 
 
@@ -263,20 +236,14 @@ __asm__ __volatile__("1: stl %r2,%1\n"				\
 #define __put_user_16(x, addr)					\
 #define __put_user_16(x, addr)					\
 __asm__ __volatile__("1: stw %r2,%1\n"				\
 __asm__ __volatile__("1: stw %r2,%1\n"				\
 	"2:\n"							\
 	"2:\n"							\
-	".section __ex_table,\"a\"\n"				\
-	"	.long 1b - .\n"					\
-	"	lda $31,2b-1b(%0)\n"				\
-	".previous"						\
+	EXC(1b,2b,$31,%0)					\
 		: "=r"(__pu_err)				\
 		: "=r"(__pu_err)				\
 		: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
 		: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
 
 
 #define __put_user_8(x, addr)					\
 #define __put_user_8(x, addr)					\
 __asm__ __volatile__("1: stb %r2,%1\n"				\
 __asm__ __volatile__("1: stb %r2,%1\n"				\
 	"2:\n"							\
 	"2:\n"							\
-	".section __ex_table,\"a\"\n"				\
-	"	.long 1b - .\n"					\
-	"	lda $31,2b-1b(%0)\n"				\
-	".previous"						\
+	EXC(1b,2b,$31,%0)					\
 		: "=r"(__pu_err)				\
 		: "=r"(__pu_err)				\
 		: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
 		: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
 #else
 #else
@@ -298,16 +265,10 @@ __asm__ __volatile__("1: stb %r2,%1\n"				\
 	"3:	stq_u %2,1(%5)\n"				\
 	"3:	stq_u %2,1(%5)\n"				\
 	"4:	stq_u %1,0(%5)\n"				\
 	"4:	stq_u %1,0(%5)\n"				\
 	"5:\n"							\
 	"5:\n"							\
-	".section __ex_table,\"a\"\n"				\
-	"	.long 1b - .\n"					\
-	"	lda $31, 5b-1b(%0)\n"				\
-	"	.long 2b - .\n"					\
-	"	lda $31, 5b-2b(%0)\n"				\
-	"	.long 3b - .\n"					\
-	"	lda $31, 5b-3b(%0)\n"				\
-	"	.long 4b - .\n"					\
-	"	lda $31, 5b-4b(%0)\n"				\
-	".previous"						\
+	EXC(1b,5b,$31,%0)					\
+	EXC(2b,5b,$31,%0)					\
+	EXC(3b,5b,$31,%0)					\
+	EXC(4b,5b,$31,%0)					\
 		: "=r"(__pu_err), "=&r"(__pu_tmp1), 		\
 		: "=r"(__pu_err), "=&r"(__pu_tmp1), 		\
 		  "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), 		\
 		  "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), 		\
 		  "=&r"(__pu_tmp4)				\
 		  "=&r"(__pu_tmp4)				\
@@ -324,12 +285,8 @@ __asm__ __volatile__("1: stb %r2,%1\n"				\
 	"	or %1,%2,%1\n"					\
 	"	or %1,%2,%1\n"					\
 	"2:	stq_u %1,0(%4)\n"				\
 	"2:	stq_u %1,0(%4)\n"				\
 	"3:\n"							\
 	"3:\n"							\
-	".section __ex_table,\"a\"\n"				\
-	"	.long 1b - .\n"					\
-	"	lda $31, 3b-1b(%0)\n"				\
-	"	.long 2b - .\n"					\
-	"	lda $31, 3b-2b(%0)\n"				\
-	".previous"						\
+	EXC(1b,3b,$31,%0)					\
+	EXC(2b,3b,$31,%0)					\
 		: "=r"(__pu_err), 				\
 		: "=r"(__pu_err), 				\
 	  	  "=&r"(__pu_tmp1), "=&r"(__pu_tmp2)		\
 	  	  "=&r"(__pu_tmp1), "=&r"(__pu_tmp2)		\
 		: "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
 		: "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
@@ -341,153 +298,37 @@ __asm__ __volatile__("1: stb %r2,%1\n"				\
  * Complex access routines
  * Complex access routines
  */
  */
 
 
-/* This little bit of silliness is to get the GP loaded for a function
-   that ordinarily wouldn't.  Otherwise we could have it done by the macro
-   directly, which can be optimized the linker.  */
-#ifdef MODULE
-#define __module_address(sym)		"r"(sym),
-#define __module_call(ra, arg, sym)	"jsr $" #ra ",(%" #arg ")," #sym
-#else
-#define __module_address(sym)
-#define __module_call(ra, arg, sym)	"bsr $" #ra "," #sym " !samegp"
-#endif
-
-extern void __copy_user(void);
-
-extern inline long
-__copy_tofrom_user_nocheck(void *to, const void *from, long len)
-{
-	register void * __cu_to __asm__("$6") = to;
-	register const void * __cu_from __asm__("$7") = from;
-	register long __cu_len __asm__("$0") = len;
-
-	__asm__ __volatile__(
-		__module_call(28, 3, __copy_user)
-		: "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to)
-		: __module_address(__copy_user)
-		  "0" (__cu_len), "1" (__cu_from), "2" (__cu_to)
-		: "$1", "$2", "$3", "$4", "$5", "$28", "memory");
-
-	return __cu_len;
-}
-
-#define __copy_to_user(to, from, n)					\
-({									\
-	__chk_user_ptr(to);						\
-	__copy_tofrom_user_nocheck((__force void *)(to), (from), (n));	\
-})
-#define __copy_from_user(to, from, n)					\
-({									\
-	__chk_user_ptr(from);						\
-	__copy_tofrom_user_nocheck((to), (__force void *)(from), (n));	\
-})
-
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
+extern long __copy_user(void *to, const void *from, long len);
 
 
-extern inline long
-copy_to_user(void __user *to, const void *from, long n)
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long len)
 {
 {
-	if (likely(__access_ok((unsigned long)to, n, get_fs())))
-		n = __copy_tofrom_user_nocheck((__force void *)to, from, n);
-	return n;
+	return __copy_user(to, (__force const void *)from, len);
 }
 }
 
 
-extern inline long
-copy_from_user(void *to, const void __user *from, long n)
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long len)
 {
 {
-	long res = n;
-	if (likely(__access_ok((unsigned long)from, n, get_fs())))
-		res = __copy_from_user_inatomic(to, from, n);
-	if (unlikely(res))
-		memset(to + (n - res), 0, res);
-	return res;
+	return __copy_user((__force void *)to, from, len);
 }
 }
 
 
-extern void __do_clear_user(void);
-
-extern inline long
-__clear_user(void __user *to, long len)
-{
-	register void __user * __cl_to __asm__("$6") = to;
-	register long __cl_len __asm__("$0") = len;
-	__asm__ __volatile__(
-		__module_call(28, 2, __do_clear_user)
-		: "=r"(__cl_len), "=r"(__cl_to)
-		: __module_address(__do_clear_user)
-		  "0"(__cl_len), "1"(__cl_to)
-		: "$1", "$2", "$3", "$4", "$5", "$28", "memory");
-	return __cl_len;
-}
+extern long __clear_user(void __user *to, long len);
 
 
 extern inline long
 extern inline long
 clear_user(void __user *to, long len)
 clear_user(void __user *to, long len)
 {
 {
-	if (__access_ok((unsigned long)to, len, get_fs()))
+	if (__access_ok((unsigned long)to, len))
 		len = __clear_user(to, len);
 		len = __clear_user(to, len);
 	return len;
 	return len;
 }
 }
 
 
-#undef __module_address
-#undef __module_call
-
 #define user_addr_max() \
 #define user_addr_max() \
-        (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
+        (uaccess_kernel() ? ~0UL : TASK_SIZE)
 
 
 extern long strncpy_from_user(char *dest, const char __user *src, long count);
 extern long strncpy_from_user(char *dest, const char __user *src, long count);
 extern __must_check long strlen_user(const char __user *str);
 extern __must_check long strlen_user(const char __user *str);
 extern __must_check long strnlen_user(const char __user *str, long n);
 extern __must_check long strnlen_user(const char __user *str, long n);
 
 
-/*
- * About the exception table:
- *
- * - insn is a 32-bit pc-relative offset from the faulting insn.
- * - nextinsn is a 16-bit offset off of the faulting instruction
- *   (not off of the *next* instruction as branches are).
- * - errreg is the register in which to place -EFAULT.
- * - valreg is the final target register for the load sequence
- *   and will be zeroed.
- *
- * Either errreg or valreg may be $31, in which case nothing happens.
- *
- * The exception fixup information "just so happens" to be arranged
- * as in a MEM format instruction.  This lets us emit our three
- * values like so:
- *
- *      lda valreg, nextinsn(errreg)
- *
- */
-
-struct exception_table_entry
-{
-	signed int insn;
-	union exception_fixup {
-		unsigned unit;
-		struct {
-			signed int nextinsn : 16;
-			unsigned int errreg : 5;
-			unsigned int valreg : 5;
-		} bits;
-	} fixup;
-};
-
-/* Returns the new pc */
-#define fixup_exception(map_reg, _fixup, pc)			\
-({								\
-	if ((_fixup)->fixup.bits.valreg != 31)			\
-		map_reg((_fixup)->fixup.bits.valreg) = 0;	\
-	if ((_fixup)->fixup.bits.errreg != 31)			\
-		map_reg((_fixup)->fixup.bits.errreg) = -EFAULT;	\
-	(pc) + (_fixup)->fixup.bits.nextinsn;			\
-})
-
-#define ARCH_HAS_RELATIVE_EXTABLE
-
-#define swap_ex_entry_fixup(a, b, tmp, delta)			\
-	do {							\
-		(a)->fixup.unit = (b)->fixup.unit;		\
-		(b)->fixup.unit = (tmp).fixup.unit;		\
-	} while (0)
-
+#include <asm/extable.h>
 
 
 #endif /* __ALPHA_UACCESS_H */
 #endif /* __ALPHA_UACCESS_H */

+ 42 - 110
arch/alpha/kernel/traps.c

@@ -482,12 +482,8 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
 		"	extwl %1,%3,%1\n"
 		"	extwl %1,%3,%1\n"
 		"	extwh %2,%3,%2\n"
 		"	extwh %2,%3,%2\n"
 		"3:\n"
 		"3:\n"
-		".section __ex_table,\"a\"\n"
-		"	.long 1b - .\n"
-		"	lda %1,3b-1b(%0)\n"
-		"	.long 2b - .\n"
-		"	lda %2,3b-2b(%0)\n"
-		".previous"
+		EXC(1b,3b,%1,%0)
+		EXC(2b,3b,%2,%0)
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
 			: "r"(va), "0"(0));
 			: "r"(va), "0"(0));
 		if (error)
 		if (error)
@@ -502,12 +498,8 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
 		"	extll %1,%3,%1\n"
 		"	extll %1,%3,%1\n"
 		"	extlh %2,%3,%2\n"
 		"	extlh %2,%3,%2\n"
 		"3:\n"
 		"3:\n"
-		".section __ex_table,\"a\"\n"
-		"	.long 1b - .\n"
-		"	lda %1,3b-1b(%0)\n"
-		"	.long 2b - .\n"
-		"	lda %2,3b-2b(%0)\n"
-		".previous"
+		EXC(1b,3b,%1,%0)
+		EXC(2b,3b,%2,%0)
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
 			: "r"(va), "0"(0));
 			: "r"(va), "0"(0));
 		if (error)
 		if (error)
@@ -522,12 +514,8 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
 		"	extql %1,%3,%1\n"
 		"	extql %1,%3,%1\n"
 		"	extqh %2,%3,%2\n"
 		"	extqh %2,%3,%2\n"
 		"3:\n"
 		"3:\n"
-		".section __ex_table,\"a\"\n"
-		"	.long 1b - .\n"
-		"	lda %1,3b-1b(%0)\n"
-		"	.long 2b - .\n"
-		"	lda %2,3b-2b(%0)\n"
-		".previous"
+		EXC(1b,3b,%1,%0)
+		EXC(2b,3b,%2,%0)
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
 			: "r"(va), "0"(0));
 			: "r"(va), "0"(0));
 		if (error)
 		if (error)
@@ -551,16 +539,10 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
 		"3:	stq_u %2,1(%5)\n"
 		"3:	stq_u %2,1(%5)\n"
 		"4:	stq_u %1,0(%5)\n"
 		"4:	stq_u %1,0(%5)\n"
 		"5:\n"
 		"5:\n"
-		".section __ex_table,\"a\"\n"
-		"	.long 1b - .\n"
-		"	lda %2,5b-1b(%0)\n"
-		"	.long 2b - .\n"
-		"	lda %1,5b-2b(%0)\n"
-		"	.long 3b - .\n"
-		"	lda $31,5b-3b(%0)\n"
-		"	.long 4b - .\n"
-		"	lda $31,5b-4b(%0)\n"
-		".previous"
+		EXC(1b,5b,%2,%0)
+		EXC(2b,5b,%1,%0)
+		EXC(3b,5b,$31,%0)
+		EXC(4b,5b,$31,%0)
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
 			  "=&r"(tmp3), "=&r"(tmp4)
 			  "=&r"(tmp3), "=&r"(tmp4)
 			: "r"(va), "r"(una_reg(reg)), "0"(0));
 			: "r"(va), "r"(una_reg(reg)), "0"(0));
@@ -581,16 +563,10 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
 		"3:	stq_u %2,3(%5)\n"
 		"3:	stq_u %2,3(%5)\n"
 		"4:	stq_u %1,0(%5)\n"
 		"4:	stq_u %1,0(%5)\n"
 		"5:\n"
 		"5:\n"
-		".section __ex_table,\"a\"\n"
-		"	.long 1b - .\n"
-		"	lda %2,5b-1b(%0)\n"
-		"	.long 2b - .\n"
-		"	lda %1,5b-2b(%0)\n"
-		"	.long 3b - .\n"
-		"	lda $31,5b-3b(%0)\n"
-		"	.long 4b - .\n"
-		"	lda $31,5b-4b(%0)\n"
-		".previous"
+		EXC(1b,5b,%2,%0)
+		EXC(2b,5b,%1,%0)
+		EXC(3b,5b,$31,%0)
+		EXC(4b,5b,$31,%0)
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
 			  "=&r"(tmp3), "=&r"(tmp4)
 			  "=&r"(tmp3), "=&r"(tmp4)
 			: "r"(va), "r"(una_reg(reg)), "0"(0));
 			: "r"(va), "r"(una_reg(reg)), "0"(0));
@@ -611,16 +587,10 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
 		"3:	stq_u %2,7(%5)\n"
 		"3:	stq_u %2,7(%5)\n"
 		"4:	stq_u %1,0(%5)\n"
 		"4:	stq_u %1,0(%5)\n"
 		"5:\n"
 		"5:\n"
-		".section __ex_table,\"a\"\n\t"
-		"	.long 1b - .\n"
-		"	lda %2,5b-1b(%0)\n"
-		"	.long 2b - .\n"
-		"	lda %1,5b-2b(%0)\n"
-		"	.long 3b - .\n"
-		"	lda $31,5b-3b(%0)\n"
-		"	.long 4b - .\n"
-		"	lda $31,5b-4b(%0)\n"
-		".previous"
+		EXC(1b,5b,%2,%0)
+		EXC(2b,5b,%1,%0)
+		EXC(3b,5b,$31,%0)
+		EXC(4b,5b,$31,%0)
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
 			  "=&r"(tmp3), "=&r"(tmp4)
 			  "=&r"(tmp3), "=&r"(tmp4)
 			: "r"(va), "r"(una_reg(reg)), "0"(0));
 			: "r"(va), "r"(una_reg(reg)), "0"(0));
@@ -802,7 +772,7 @@ do_entUnaUser(void __user * va, unsigned long opcode,
 	/* Don't bother reading ds in the access check since we already
 	/* Don't bother reading ds in the access check since we already
 	   know that this came from the user.  Also rely on the fact that
 	   know that this came from the user.  Also rely on the fact that
 	   the page at TASK_SIZE is unmapped and so can't be touched anyway. */
 	   the page at TASK_SIZE is unmapped and so can't be touched anyway. */
-	if (!__access_ok((unsigned long)va, 0, USER_DS))
+	if ((unsigned long)va >= TASK_SIZE)
 		goto give_sigsegv;
 		goto give_sigsegv;
 
 
 	++unaligned[1].count;
 	++unaligned[1].count;
@@ -835,12 +805,8 @@ do_entUnaUser(void __user * va, unsigned long opcode,
 		"	extwl %1,%3,%1\n"
 		"	extwl %1,%3,%1\n"
 		"	extwh %2,%3,%2\n"
 		"	extwh %2,%3,%2\n"
 		"3:\n"
 		"3:\n"
-		".section __ex_table,\"a\"\n"
-		"	.long 1b - .\n"
-		"	lda %1,3b-1b(%0)\n"
-		"	.long 2b - .\n"
-		"	lda %2,3b-2b(%0)\n"
-		".previous"
+		EXC(1b,3b,%1,%0)
+		EXC(2b,3b,%2,%0)
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
 			: "r"(va), "0"(0));
 			: "r"(va), "0"(0));
 		if (error)
 		if (error)
@@ -855,12 +821,8 @@ do_entUnaUser(void __user * va, unsigned long opcode,
 		"	extll %1,%3,%1\n"
 		"	extll %1,%3,%1\n"
 		"	extlh %2,%3,%2\n"
 		"	extlh %2,%3,%2\n"
 		"3:\n"
 		"3:\n"
-		".section __ex_table,\"a\"\n"
-		"	.long 1b - .\n"
-		"	lda %1,3b-1b(%0)\n"
-		"	.long 2b - .\n"
-		"	lda %2,3b-2b(%0)\n"
-		".previous"
+		EXC(1b,3b,%1,%0)
+		EXC(2b,3b,%2,%0)
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
 			: "r"(va), "0"(0));
 			: "r"(va), "0"(0));
 		if (error)
 		if (error)
@@ -875,12 +837,8 @@ do_entUnaUser(void __user * va, unsigned long opcode,
 		"	extql %1,%3,%1\n"
 		"	extql %1,%3,%1\n"
 		"	extqh %2,%3,%2\n"
 		"	extqh %2,%3,%2\n"
 		"3:\n"
 		"3:\n"
-		".section __ex_table,\"a\"\n"
-		"	.long 1b - .\n"
-		"	lda %1,3b-1b(%0)\n"
-		"	.long 2b - .\n"
-		"	lda %2,3b-2b(%0)\n"
-		".previous"
+		EXC(1b,3b,%1,%0)
+		EXC(2b,3b,%2,%0)
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
 			: "r"(va), "0"(0));
 			: "r"(va), "0"(0));
 		if (error)
 		if (error)
@@ -895,12 +853,8 @@ do_entUnaUser(void __user * va, unsigned long opcode,
 		"	extll %1,%3,%1\n"
 		"	extll %1,%3,%1\n"
 		"	extlh %2,%3,%2\n"
 		"	extlh %2,%3,%2\n"
 		"3:\n"
 		"3:\n"
-		".section __ex_table,\"a\"\n"
-		"	.long 1b - .\n"
-		"	lda %1,3b-1b(%0)\n"
-		"	.long 2b - .\n"
-		"	lda %2,3b-2b(%0)\n"
-		".previous"
+		EXC(1b,3b,%1,%0)
+		EXC(2b,3b,%2,%0)
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
 			: "r"(va), "0"(0));
 			: "r"(va), "0"(0));
 		if (error)
 		if (error)
@@ -915,12 +869,8 @@ do_entUnaUser(void __user * va, unsigned long opcode,
 		"	extql %1,%3,%1\n"
 		"	extql %1,%3,%1\n"
 		"	extqh %2,%3,%2\n"
 		"	extqh %2,%3,%2\n"
 		"3:\n"
 		"3:\n"
-		".section __ex_table,\"a\"\n"
-		"	.long 1b - .\n"
-		"	lda %1,3b-1b(%0)\n"
-		"	.long 2b - .\n"
-		"	lda %2,3b-2b(%0)\n"
-		".previous"
+		EXC(1b,3b,%1,%0)
+		EXC(2b,3b,%2,%0)
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
 			: "r"(va), "0"(0));
 			: "r"(va), "0"(0));
 		if (error)
 		if (error)
@@ -944,16 +894,10 @@ do_entUnaUser(void __user * va, unsigned long opcode,
 		"3:	stq_u %2,1(%5)\n"
 		"3:	stq_u %2,1(%5)\n"
 		"4:	stq_u %1,0(%5)\n"
 		"4:	stq_u %1,0(%5)\n"
 		"5:\n"
 		"5:\n"
-		".section __ex_table,\"a\"\n"
-		"	.long 1b - .\n"
-		"	lda %2,5b-1b(%0)\n"
-		"	.long 2b - .\n"
-		"	lda %1,5b-2b(%0)\n"
-		"	.long 3b - .\n"
-		"	lda $31,5b-3b(%0)\n"
-		"	.long 4b - .\n"
-		"	lda $31,5b-4b(%0)\n"
-		".previous"
+		EXC(1b,5b,%2,%0)
+		EXC(2b,5b,%1,%0)
+		EXC(3b,5b,$31,%0)
+		EXC(4b,5b,$31,%0)
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
 			  "=&r"(tmp3), "=&r"(tmp4)
 			  "=&r"(tmp3), "=&r"(tmp4)
 			: "r"(va), "r"(*reg_addr), "0"(0));
 			: "r"(va), "r"(*reg_addr), "0"(0));
@@ -978,16 +922,10 @@ do_entUnaUser(void __user * va, unsigned long opcode,
 		"3:	stq_u %2,3(%5)\n"
 		"3:	stq_u %2,3(%5)\n"
 		"4:	stq_u %1,0(%5)\n"
 		"4:	stq_u %1,0(%5)\n"
 		"5:\n"
 		"5:\n"
-		".section __ex_table,\"a\"\n"
-		"	.long 1b - .\n"
-		"	lda %2,5b-1b(%0)\n"
-		"	.long 2b - .\n"
-		"	lda %1,5b-2b(%0)\n"
-		"	.long 3b - .\n"
-		"	lda $31,5b-3b(%0)\n"
-		"	.long 4b - .\n"
-		"	lda $31,5b-4b(%0)\n"
-		".previous"
+		EXC(1b,5b,%2,%0)
+		EXC(2b,5b,%1,%0)
+		EXC(3b,5b,$31,%0)
+		EXC(4b,5b,$31,%0)
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
 			  "=&r"(tmp3), "=&r"(tmp4)
 			  "=&r"(tmp3), "=&r"(tmp4)
 			: "r"(va), "r"(*reg_addr), "0"(0));
 			: "r"(va), "r"(*reg_addr), "0"(0));
@@ -1012,16 +950,10 @@ do_entUnaUser(void __user * va, unsigned long opcode,
 		"3:	stq_u %2,7(%5)\n"
 		"3:	stq_u %2,7(%5)\n"
 		"4:	stq_u %1,0(%5)\n"
 		"4:	stq_u %1,0(%5)\n"
 		"5:\n"
 		"5:\n"
-		".section __ex_table,\"a\"\n\t"
-		"	.long 1b - .\n"
-		"	lda %2,5b-1b(%0)\n"
-		"	.long 2b - .\n"
-		"	lda %1,5b-2b(%0)\n"
-		"	.long 3b - .\n"
-		"	lda $31,5b-3b(%0)\n"
-		"	.long 4b - .\n"
-		"	lda $31,5b-4b(%0)\n"
-		".previous"
+		EXC(1b,5b,%2,%0)
+		EXC(2b,5b,%1,%0)
+		EXC(3b,5b,$31,%0)
+		EXC(4b,5b,$31,%0)
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
 			  "=&r"(tmp3), "=&r"(tmp4)
 			  "=&r"(tmp3), "=&r"(tmp4)
 			: "r"(va), "r"(*reg_addr), "0"(0));
 			: "r"(va), "r"(*reg_addr), "0"(0));
@@ -1047,7 +979,7 @@ give_sigsegv:
 	/* We need to replicate some of the logic in mm/fault.c,
 	/* We need to replicate some of the logic in mm/fault.c,
 	   since we don't have access to the fault code in the
 	   since we don't have access to the fault code in the
 	   exception handling return path.  */
 	   exception handling return path.  */
-	if (!__access_ok((unsigned long)va, 0, USER_DS))
+	if ((unsigned long)va >= TASK_SIZE)
 		info.si_code = SEGV_ACCERR;
 		info.si_code = SEGV_ACCERR;
 	else {
 	else {
 		struct mm_struct *mm = current->mm;
 		struct mm_struct *mm = current->mm;

+ 26 - 40
arch/alpha/lib/clear_user.S

@@ -8,21 +8,6 @@
  * right "bytes left to zero" value (and that it is updated only _after_
  * right "bytes left to zero" value (and that it is updated only _after_
  * a successful copy).  There is also some rather minor exception setup
  * a successful copy).  There is also some rather minor exception setup
  * stuff.
  * stuff.
- *
- * NOTE! This is not directly C-callable, because the calling semantics
- * are different:
- *
- * Inputs:
- *	length in $0
- *	destination address in $6
- *	exception pointer in $7
- *	return address in $28 (exceptions expect it there)
- *
- * Outputs:
- *	bytes left to copy in $0
- *
- * Clobbers:
- *	$1,$2,$3,$4,$5,$6
  */
  */
 #include <asm/export.h>
 #include <asm/export.h>
 
 
@@ -38,62 +23,63 @@
 	.set noreorder
 	.set noreorder
 	.align 4
 	.align 4
 
 
-	.globl __do_clear_user
-	.ent __do_clear_user
-	.frame	$30, 0, $28
+	.globl __clear_user
+	.ent __clear_user
+	.frame	$30, 0, $26
 	.prologue 0
 	.prologue 0
 
 
 $loop:
 $loop:
 	and	$1, 3, $4	# e0    :
 	and	$1, 3, $4	# e0    :
 	beq	$4, 1f		# .. e1 :
 	beq	$4, 1f		# .. e1 :
 
 
-0:	EX( stq_u $31, 0($6) )	# e0    : zero one word
+0:	EX( stq_u $31, 0($16) )	# e0    : zero one word
 	subq	$0, 8, $0	# .. e1 :
 	subq	$0, 8, $0	# .. e1 :
 	subq	$4, 1, $4	# e0    :
 	subq	$4, 1, $4	# e0    :
-	addq	$6, 8, $6	# .. e1 :
+	addq	$16, 8, $16	# .. e1 :
 	bne	$4, 0b		# e1    :
 	bne	$4, 0b		# e1    :
 	unop			#       :
 	unop			#       :
 
 
 1:	bic	$1, 3, $1	# e0    :
 1:	bic	$1, 3, $1	# e0    :
 	beq	$1, $tail	# .. e1 :
 	beq	$1, $tail	# .. e1 :
 
 
-2:	EX( stq_u $31, 0($6) )	# e0    : zero four words
+2:	EX( stq_u $31, 0($16) )	# e0    : zero four words
 	subq	$0, 8, $0	# .. e1 :
 	subq	$0, 8, $0	# .. e1 :
-	EX( stq_u $31, 8($6) )	# e0    :
+	EX( stq_u $31, 8($16) )	# e0    :
 	subq	$0, 8, $0	# .. e1 :
 	subq	$0, 8, $0	# .. e1 :
-	EX( stq_u $31, 16($6) )	# e0    :
+	EX( stq_u $31, 16($16) )	# e0    :
 	subq	$0, 8, $0	# .. e1 :
 	subq	$0, 8, $0	# .. e1 :
-	EX( stq_u $31, 24($6) )	# e0    :
+	EX( stq_u $31, 24($16) )	# e0    :
 	subq	$0, 8, $0	# .. e1 :
 	subq	$0, 8, $0	# .. e1 :
 	subq	$1, 4, $1	# e0    :
 	subq	$1, 4, $1	# e0    :
-	addq	$6, 32, $6	# .. e1 :
+	addq	$16, 32, $16	# .. e1 :
 	bne	$1, 2b		# e1    :
 	bne	$1, 2b		# e1    :
 
 
 $tail:
 $tail:
 	bne	$2, 1f		# e1    : is there a tail to do?
 	bne	$2, 1f		# e1    : is there a tail to do?
-	ret	$31, ($28), 1	# .. e1 :
+	ret	$31, ($26), 1	# .. e1 :
 
 
-1:	EX( ldq_u $5, 0($6) )	# e0    :
+1:	EX( ldq_u $5, 0($16) )	# e0    :
 	clr	$0		# .. e1 :
 	clr	$0		# .. e1 :
 	nop			# e1    :
 	nop			# e1    :
 	mskqh	$5, $0, $5	# e0    :
 	mskqh	$5, $0, $5	# e0    :
-	EX( stq_u $5, 0($6) )	# e0    :
-	ret	$31, ($28), 1	# .. e1 :
+	EX( stq_u $5, 0($16) )	# e0    :
+	ret	$31, ($26), 1	# .. e1 :
 
 
-__do_clear_user:
-	and	$6, 7, $4	# e0    : find dest misalignment
+__clear_user:
+	and	$17, $17, $0
+	and	$16, 7, $4	# e0    : find dest misalignment
 	beq	$0, $zerolength # .. e1 :
 	beq	$0, $zerolength # .. e1 :
 	addq	$0, $4, $1	# e0    : bias counter
 	addq	$0, $4, $1	# e0    : bias counter
 	and	$1, 7, $2	# e1    : number of bytes in tail
 	and	$1, 7, $2	# e1    : number of bytes in tail
 	srl	$1, 3, $1	# e0    :
 	srl	$1, 3, $1	# e0    :
 	beq	$4, $loop	# .. e1 :
 	beq	$4, $loop	# .. e1 :
 
 
-	EX( ldq_u $5, 0($6) )	# e0    : load dst word to mask back in
+	EX( ldq_u $5, 0($16) )	# e0    : load dst word to mask back in
 	beq	$1, $oneword	# .. e1 : sub-word store?
 	beq	$1, $oneword	# .. e1 : sub-word store?
 
 
-	mskql	$5, $6, $5	# e0    : take care of misaligned head
-	addq	$6, 8, $6	# .. e1 :
-	EX( stq_u $5, -8($6) )	# e0    :
+	mskql	$5, $16, $5	# e0    : take care of misaligned head
+	addq	$16, 8, $16	# .. e1 :
+	EX( stq_u $5, -8($16) )	# e0    :
 	addq	$0, $4, $0	# .. e1 : bytes left -= 8 - misalignment
 	addq	$0, $4, $0	# .. e1 : bytes left -= 8 - misalignment
 	subq	$1, 1, $1	# e0    :
 	subq	$1, 1, $1	# e0    :
 	subq	$0, 8, $0	# .. e1 :
 	subq	$0, 8, $0	# .. e1 :
@@ -101,15 +87,15 @@ __do_clear_user:
 	unop			#       :
 	unop			#       :
 
 
 $oneword:
 $oneword:
-	mskql	$5, $6, $4	# e0    :
+	mskql	$5, $16, $4	# e0    :
 	mskqh	$5, $2, $5	# e0    :
 	mskqh	$5, $2, $5	# e0    :
 	or	$5, $4, $5	# e1    :
 	or	$5, $4, $5	# e1    :
-	EX( stq_u $5, 0($6) )	# e0    :
+	EX( stq_u $5, 0($16) )	# e0    :
 	clr	$0		# .. e1 :
 	clr	$0		# .. e1 :
 
 
 $zerolength:
 $zerolength:
 $exception:
 $exception:
-	ret	$31, ($28), 1	# .. e1 :
+	ret	$31, ($26), 1	# .. e1 :
 
 
-	.end __do_clear_user
-	EXPORT_SYMBOL(__do_clear_user)
+	.end __clear_user
+	EXPORT_SYMBOL(__clear_user)

+ 34 - 48
arch/alpha/lib/copy_user.S

@@ -9,21 +9,6 @@
  * contains the right "bytes left to copy" value (and that it is updated
  * contains the right "bytes left to copy" value (and that it is updated
  * only _after_ a successful copy). There is also some rather minor
  * only _after_ a successful copy). There is also some rather minor
  * exception setup stuff..
  * exception setup stuff..
- *
- * NOTE! This is not directly C-callable, because the calling semantics are
- * different:
- *
- * Inputs:
- *	length in $0
- *	destination address in $6
- *	source address in $7
- *	return address in $28
- *
- * Outputs:
- *	bytes left to copy in $0
- *
- * Clobbers:
- *	$1,$2,$3,$4,$5,$6,$7
  */
  */
 
 
 #include <asm/export.h>
 #include <asm/export.h>
@@ -49,58 +34,59 @@
 	.ent __copy_user
 	.ent __copy_user
 __copy_user:
 __copy_user:
 	.prologue 0
 	.prologue 0
-	and $6,7,$3
+	and $18,$18,$0
+	and $16,7,$3
 	beq $0,$35
 	beq $0,$35
 	beq $3,$36
 	beq $3,$36
 	subq $3,8,$3
 	subq $3,8,$3
 	.align 4
 	.align 4
 $37:
 $37:
-	EXI( ldq_u $1,0($7) )
-	EXO( ldq_u $2,0($6) )
-	extbl $1,$7,$1
-	mskbl $2,$6,$2
-	insbl $1,$6,$1
+	EXI( ldq_u $1,0($17) )
+	EXO( ldq_u $2,0($16) )
+	extbl $1,$17,$1
+	mskbl $2,$16,$2
+	insbl $1,$16,$1
 	addq $3,1,$3
 	addq $3,1,$3
 	bis $1,$2,$1
 	bis $1,$2,$1
-	EXO( stq_u $1,0($6) )
+	EXO( stq_u $1,0($16) )
 	subq $0,1,$0
 	subq $0,1,$0
-	addq $6,1,$6
-	addq $7,1,$7
+	addq $16,1,$16
+	addq $17,1,$17
 	beq $0,$41
 	beq $0,$41
 	bne $3,$37
 	bne $3,$37
 $36:
 $36:
-	and $7,7,$1
+	and $17,7,$1
 	bic $0,7,$4
 	bic $0,7,$4
 	beq $1,$43
 	beq $1,$43
 	beq $4,$48
 	beq $4,$48
-	EXI( ldq_u $3,0($7) )
+	EXI( ldq_u $3,0($17) )
 	.align 4
 	.align 4
 $50:
 $50:
-	EXI( ldq_u $2,8($7) )
+	EXI( ldq_u $2,8($17) )
 	subq $4,8,$4
 	subq $4,8,$4
-	extql $3,$7,$3
-	extqh $2,$7,$1
+	extql $3,$17,$3
+	extqh $2,$17,$1
 	bis $3,$1,$1
 	bis $3,$1,$1
-	EXO( stq $1,0($6) )
-	addq $7,8,$7
+	EXO( stq $1,0($16) )
+	addq $17,8,$17
 	subq $0,8,$0
 	subq $0,8,$0
-	addq $6,8,$6
+	addq $16,8,$16
 	bis $2,$2,$3
 	bis $2,$2,$3
 	bne $4,$50
 	bne $4,$50
 $48:
 $48:
 	beq $0,$41
 	beq $0,$41
 	.align 4
 	.align 4
 $57:
 $57:
-	EXI( ldq_u $1,0($7) )
-	EXO( ldq_u $2,0($6) )
-	extbl $1,$7,$1
-	mskbl $2,$6,$2
-	insbl $1,$6,$1
+	EXI( ldq_u $1,0($17) )
+	EXO( ldq_u $2,0($16) )
+	extbl $1,$17,$1
+	mskbl $2,$16,$2
+	insbl $1,$16,$1
 	bis $1,$2,$1
 	bis $1,$2,$1
-	EXO( stq_u $1,0($6) )
+	EXO( stq_u $1,0($16) )
 	subq $0,1,$0
 	subq $0,1,$0
-	addq $6,1,$6
-	addq $7,1,$7
+	addq $16,1,$16
+	addq $17,1,$17
 	bne $0,$57
 	bne $0,$57
 	br $31,$41
 	br $31,$41
 	.align 4
 	.align 4
@@ -108,27 +94,27 @@ $43:
 	beq $4,$65
 	beq $4,$65
 	.align 4
 	.align 4
 $66:
 $66:
-	EXI( ldq $1,0($7) )
+	EXI( ldq $1,0($17) )
 	subq $4,8,$4
 	subq $4,8,$4
-	EXO( stq $1,0($6) )
-	addq $7,8,$7
+	EXO( stq $1,0($16) )
+	addq $17,8,$17
 	subq $0,8,$0
 	subq $0,8,$0
-	addq $6,8,$6
+	addq $16,8,$16
 	bne $4,$66
 	bne $4,$66
 $65:
 $65:
 	beq $0,$41
 	beq $0,$41
-	EXI( ldq $2,0($7) )
-	EXO( ldq $1,0($6) )
+	EXI( ldq $2,0($17) )
+	EXO( ldq $1,0($16) )
 	mskql $2,$0,$2
 	mskql $2,$0,$2
 	mskqh $1,$0,$1
 	mskqh $1,$0,$1
 	bis $2,$1,$2
 	bis $2,$1,$2
-	EXO( stq $2,0($6) )
+	EXO( stq $2,0($16) )
 	bis $31,$31,$0
 	bis $31,$31,$0
 $41:
 $41:
 $35:
 $35:
 $exitin:
 $exitin:
 $exitout:
 $exitout:
-	ret $31,($28),1
+	ret $31,($26),1
 
 
 	.end __copy_user
 	.end __copy_user
 EXPORT_SYMBOL(__copy_user)
 EXPORT_SYMBOL(__copy_user)

+ 2 - 8
arch/alpha/lib/csum_partial_copy.c

@@ -45,10 +45,7 @@ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
 	__asm__ __volatile__(				\
 	__asm__ __volatile__(				\
 	"1:	ldq_u %0,%2\n"				\
 	"1:	ldq_u %0,%2\n"				\
 	"2:\n"						\
 	"2:\n"						\
-	".section __ex_table,\"a\"\n"			\
-	"	.long 1b - .\n"				\
-	"	lda %0,2b-1b(%1)\n"			\
-	".previous"					\
+	EXC(1b,2b,%0,%1)				\
 		: "=r"(x), "=r"(__guu_err)		\
 		: "=r"(x), "=r"(__guu_err)		\
 		: "m"(__m(ptr)), "1"(0));		\
 		: "m"(__m(ptr)), "1"(0));		\
 	__guu_err;					\
 	__guu_err;					\
@@ -60,10 +57,7 @@ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
 	__asm__ __volatile__(				\
 	__asm__ __volatile__(				\
 	"1:	stq_u %2,%1\n"				\
 	"1:	stq_u %2,%1\n"				\
 	"2:\n"						\
 	"2:\n"						\
-	".section __ex_table,\"a\"\n"			\
-	"	.long 1b - ."				\
-	"	lda $31,2b-1b(%0)\n"			\
-	".previous"					\
+	EXC(1b,2b,$31,%0)				\
 		: "=r"(__puu_err)			\
 		: "=r"(__puu_err)			\
 		: "m"(__m(addr)), "rJ"(x), "0"(0));	\
 		: "m"(__m(addr)), "rJ"(x), "0"(0));	\
 	__puu_err;					\
 	__puu_err;					\

+ 35 - 49
arch/alpha/lib/ev6-clear_user.S

@@ -9,21 +9,6 @@
  * a successful copy).  There is also some rather minor exception setup
  * a successful copy).  There is also some rather minor exception setup
  * stuff.
  * stuff.
  *
  *
- * NOTE! This is not directly C-callable, because the calling semantics
- * are different:
- *
- * Inputs:
- *	length in $0
- *	destination address in $6
- *	exception pointer in $7
- *	return address in $28 (exceptions expect it there)
- *
- * Outputs:
- *	bytes left to copy in $0
- *
- * Clobbers:
- *	$1,$2,$3,$4,$5,$6
- *
  * Much of the information about 21264 scheduling/coding comes from:
  * Much of the information about 21264 scheduling/coding comes from:
  *	Compiler Writer's Guide for the Alpha 21264
  *	Compiler Writer's Guide for the Alpha 21264
  *	abbreviated as 'CWG' in other comments here
  *	abbreviated as 'CWG' in other comments here
@@ -56,14 +41,15 @@
 	.set noreorder
 	.set noreorder
 	.align 4
 	.align 4
 
 
-	.globl __do_clear_user
-	.ent __do_clear_user
-	.frame	$30, 0, $28
+	.globl __clear_user
+	.ent __clear_user
+	.frame	$30, 0, $26
 	.prologue 0
 	.prologue 0
 
 
 				# Pipeline info : Slotting & Comments
 				# Pipeline info : Slotting & Comments
-__do_clear_user:
-	and	$6, 7, $4	# .. E  .. ..	: find dest head misalignment
+__clear_user:
+	and	$17, $17, $0
+	and	$16, 7, $4	# .. E  .. ..	: find dest head misalignment
 	beq	$0, $zerolength # U  .. .. ..	:  U L U L
 	beq	$0, $zerolength # U  .. .. ..	:  U L U L
 
 
 	addq	$0, $4, $1	# .. .. .. E	: bias counter
 	addq	$0, $4, $1	# .. .. .. E	: bias counter
@@ -75,14 +61,14 @@ __do_clear_user:
 
 
 /*
 /*
  * Head is not aligned.  Write (8 - $4) bytes to head of destination
  * Head is not aligned.  Write (8 - $4) bytes to head of destination
- * This means $6 is known to be misaligned
+ * This means $16 is known to be misaligned
  */
  */
-	EX( ldq_u $5, 0($6) )	# .. .. .. L	: load dst word to mask back in
+	EX( ldq_u $5, 0($16) )	# .. .. .. L	: load dst word to mask back in
 	beq	$1, $onebyte	# .. .. U  ..	: sub-word store?
 	beq	$1, $onebyte	# .. .. U  ..	: sub-word store?
-	mskql	$5, $6, $5	# .. U  .. ..	: take care of misaligned head
-	addq	$6, 8, $6	# E  .. .. .. 	: L U U L
+	mskql	$5, $16, $5	# .. U  .. ..	: take care of misaligned head
+	addq	$16, 8, $16	# E  .. .. .. 	: L U U L
 
 
-	EX( stq_u $5, -8($6) )	# .. .. .. L	:
+	EX( stq_u $5, -8($16) )	# .. .. .. L	:
 	subq	$1, 1, $1	# .. .. E  ..	:
 	subq	$1, 1, $1	# .. .. E  ..	:
 	addq	$0, $4, $0	# .. E  .. ..	: bytes left -= 8 - misalignment
 	addq	$0, $4, $0	# .. E  .. ..	: bytes left -= 8 - misalignment
 	subq	$0, 8, $0	# E  .. .. ..	: U L U L
 	subq	$0, 8, $0	# E  .. .. ..	: U L U L
@@ -93,11 +79,11 @@ __do_clear_user:
  * values upon initial entry to the loop
  * values upon initial entry to the loop
  * $1 is number of quadwords to clear (zero is a valid value)
  * $1 is number of quadwords to clear (zero is a valid value)
  * $2 is number of trailing bytes (0..7) ($2 never used...)
  * $2 is number of trailing bytes (0..7) ($2 never used...)
- * $6 is known to be aligned 0mod8
+ * $16 is known to be aligned 0mod8
  */
  */
 $headalign:
 $headalign:
 	subq	$1, 16, $4	# .. .. .. E	: If < 16, we can not use the huge loop
 	subq	$1, 16, $4	# .. .. .. E	: If < 16, we can not use the huge loop
-	and	$6, 0x3f, $2	# .. .. E  ..	: Forward work for huge loop
+	and	$16, 0x3f, $2	# .. .. E  ..	: Forward work for huge loop
 	subq	$2, 0x40, $3	# .. E  .. ..	: bias counter (huge loop)
 	subq	$2, 0x40, $3	# .. E  .. ..	: bias counter (huge loop)
 	blt	$4, $trailquad	# U  .. .. ..	: U L U L
 	blt	$4, $trailquad	# U  .. .. ..	: U L U L
 
 
@@ -114,21 +100,21 @@ $headalign:
 	beq	$3, $bigalign	# U  .. .. ..	: U L U L : Aligned 0mod64
 	beq	$3, $bigalign	# U  .. .. ..	: U L U L : Aligned 0mod64
 
 
 $alignmod64:
 $alignmod64:
-	EX( stq_u $31, 0($6) )	# .. .. .. L
+	EX( stq_u $31, 0($16) )	# .. .. .. L
 	addq	$3, 8, $3	# .. .. E  ..
 	addq	$3, 8, $3	# .. .. E  ..
 	subq	$0, 8, $0	# .. E  .. ..
 	subq	$0, 8, $0	# .. E  .. ..
 	nop			# E  .. .. ..	: U L U L
 	nop			# E  .. .. ..	: U L U L
 
 
 	nop			# .. .. .. E
 	nop			# .. .. .. E
 	subq	$1, 1, $1	# .. .. E  ..
 	subq	$1, 1, $1	# .. .. E  ..
-	addq	$6, 8, $6	# .. E  .. ..
+	addq	$16, 8, $16	# .. E  .. ..
 	blt	$3, $alignmod64	# U  .. .. ..	: U L U L
 	blt	$3, $alignmod64	# U  .. .. ..	: U L U L
 
 
 $bigalign:
 $bigalign:
 /*
 /*
  * $0 is the number of bytes left
  * $0 is the number of bytes left
  * $1 is the number of quads left
  * $1 is the number of quads left
- * $6 is aligned 0mod64
+ * $16 is aligned 0mod64
  * we know that we'll be taking a minimum of one trip through
  * we know that we'll be taking a minimum of one trip through
  * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
  * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
  * We are _not_ going to update $0 after every single store.  That
  * We are _not_ going to update $0 after every single store.  That
@@ -145,39 +131,39 @@ $bigalign:
 	nop			# E :
 	nop			# E :
 	nop			# E :
 	nop			# E :
 	nop			# E :
 	nop			# E :
-	bis	$6,$6,$3	# E : U L U L : Initial wh64 address is dest
+	bis	$16,$16,$3	# E : U L U L : Initial wh64 address is dest
 	/* This might actually help for the current trip... */
 	/* This might actually help for the current trip... */
 
 
 $do_wh64:
 $do_wh64:
 	wh64	($3)		# .. .. .. L1	: memory subsystem hint
 	wh64	($3)		# .. .. .. L1	: memory subsystem hint
 	subq	$1, 16, $4	# .. .. E  ..	: Forward calculation - repeat the loop?
 	subq	$1, 16, $4	# .. .. E  ..	: Forward calculation - repeat the loop?
-	EX( stq_u $31, 0($6) )	# .. L  .. ..
+	EX( stq_u $31, 0($16) )	# .. L  .. ..
 	subq	$0, 8, $0	# E  .. .. ..	: U L U L
 	subq	$0, 8, $0	# E  .. .. ..	: U L U L
 
 
-	addq	$6, 128, $3	# E : Target address of wh64
-	EX( stq_u $31, 8($6) )	# L :
-	EX( stq_u $31, 16($6) )	# L :
+	addq	$16, 128, $3	# E : Target address of wh64
+	EX( stq_u $31, 8($16) )	# L :
+	EX( stq_u $31, 16($16) )	# L :
 	subq	$0, 16, $0	# E : U L L U
 	subq	$0, 16, $0	# E : U L L U
 
 
 	nop			# E :
 	nop			# E :
-	EX( stq_u $31, 24($6) )	# L :
-	EX( stq_u $31, 32($6) )	# L :
+	EX( stq_u $31, 24($16) )	# L :
+	EX( stq_u $31, 32($16) )	# L :
 	subq	$0, 168, $5	# E : U L L U : two trips through the loop left?
 	subq	$0, 168, $5	# E : U L L U : two trips through the loop left?
 	/* 168 = 192 - 24, since we've already completed some stores */
 	/* 168 = 192 - 24, since we've already completed some stores */
 
 
 	subq	$0, 16, $0	# E :
 	subq	$0, 16, $0	# E :
-	EX( stq_u $31, 40($6) )	# L :
-	EX( stq_u $31, 48($6) )	# L :
-	cmovlt	$5, $6, $3	# E : U L L U : Latency 2, extra mapping cycle
+	EX( stq_u $31, 40($16) )	# L :
+	EX( stq_u $31, 48($16) )	# L :
+	cmovlt	$5, $16, $3	# E : U L L U : Latency 2, extra mapping cycle
 
 
 	subq	$1, 8, $1	# E :
 	subq	$1, 8, $1	# E :
 	subq	$0, 16, $0	# E :
 	subq	$0, 16, $0	# E :
-	EX( stq_u $31, 56($6) )	# L :
+	EX( stq_u $31, 56($16) )	# L :
 	nop			# E : U L U L
 	nop			# E : U L U L
 
 
 	nop			# E :
 	nop			# E :
 	subq	$0, 8, $0	# E :
 	subq	$0, 8, $0	# E :
-	addq	$6, 64, $6	# E :
+	addq	$16, 64, $16	# E :
 	bge	$4, $do_wh64	# U : U L U L
 	bge	$4, $do_wh64	# U : U L U L
 
 
 $trailquad:
 $trailquad:
@@ -190,14 +176,14 @@ $trailquad:
 	beq	$1, $trailbytes	# U  .. .. ..	: U L U L : Only 0..7 bytes to go
 	beq	$1, $trailbytes	# U  .. .. ..	: U L U L : Only 0..7 bytes to go
 
 
 $onequad:
 $onequad:
-	EX( stq_u $31, 0($6) )	# .. .. .. L
+	EX( stq_u $31, 0($16) )	# .. .. .. L
 	subq	$1, 1, $1	# .. .. E  ..
 	subq	$1, 1, $1	# .. .. E  ..
 	subq	$0, 8, $0	# .. E  .. ..
 	subq	$0, 8, $0	# .. E  .. ..
 	nop			# E  .. .. ..	: U L U L
 	nop			# E  .. .. ..	: U L U L
 
 
 	nop			# .. .. .. E
 	nop			# .. .. .. E
 	nop			# .. .. E  ..
 	nop			# .. .. E  ..
-	addq	$6, 8, $6	# .. E  .. ..
+	addq	$16, 8, $16	# .. E  .. ..
 	bgt	$1, $onequad	# U  .. .. ..	: U L U L
 	bgt	$1, $onequad	# U  .. .. ..	: U L U L
 
 
 	# We have an unknown number of bytes left to go.
 	# We have an unknown number of bytes left to go.
@@ -211,9 +197,9 @@ $trailbytes:
 	# so we will use $0 as the loop counter
 	# so we will use $0 as the loop counter
 	# We know for a fact that $0 > 0 zero due to previous context
 	# We know for a fact that $0 > 0 zero due to previous context
 $onebyte:
 $onebyte:
-	EX( stb $31, 0($6) )	# .. .. .. L
+	EX( stb $31, 0($16) )	# .. .. .. L
 	subq	$0, 1, $0	# .. .. E  ..	:
 	subq	$0, 1, $0	# .. .. E  ..	:
-	addq	$6, 1, $6	# .. E  .. ..	:
+	addq	$16, 1, $16	# .. E  .. ..	:
 	bgt	$0, $onebyte	# U  .. .. ..	: U L U L
 	bgt	$0, $onebyte	# U  .. .. ..	: U L U L
 
 
 $zerolength:
 $zerolength:
@@ -221,6 +207,6 @@ $exception:			# Destination for exception recovery(?)
 	nop			# .. .. .. E	:
 	nop			# .. .. .. E	:
 	nop			# .. .. E  ..	:
 	nop			# .. .. E  ..	:
 	nop			# .. E  .. ..	:
 	nop			# .. E  .. ..	:
-	ret	$31, ($28), 1	# L0 .. .. ..	: L U L U
-	.end __do_clear_user
-	EXPORT_SYMBOL(__do_clear_user)
+	ret	$31, ($26), 1	# L0 .. .. ..	: L U L U
+	.end __clear_user
+	EXPORT_SYMBOL(__clear_user)

+ 45 - 59
arch/alpha/lib/ev6-copy_user.S

@@ -12,21 +12,6 @@
  * only _after_ a successful copy). There is also some rather minor
  * only _after_ a successful copy). There is also some rather minor
  * exception setup stuff..
  * exception setup stuff..
  *
  *
- * NOTE! This is not directly C-callable, because the calling semantics are
- * different:
- *
- * Inputs:
- *	length in $0
- *	destination address in $6
- *	source address in $7
- *	return address in $28
- *
- * Outputs:
- *	bytes left to copy in $0
- *
- * Clobbers:
- *	$1,$2,$3,$4,$5,$6,$7
- *
  * Much of the information about 21264 scheduling/coding comes from:
  * Much of the information about 21264 scheduling/coding comes from:
  *	Compiler Writer's Guide for the Alpha 21264
  *	Compiler Writer's Guide for the Alpha 21264
  *	abbreviated as 'CWG' in other comments here
  *	abbreviated as 'CWG' in other comments here
@@ -60,10 +45,11 @@
 				# Pipeline info: Slotting & Comments
 				# Pipeline info: Slotting & Comments
 __copy_user:
 __copy_user:
 	.prologue 0
 	.prologue 0
-	subq $0, 32, $1		# .. E  .. ..	: Is this going to be a small copy?
+	andq $18, $18, $0
+	subq $18, 32, $1	# .. E  .. ..	: Is this going to be a small copy?
 	beq $0, $zerolength	# U  .. .. ..	: U L U L
 	beq $0, $zerolength	# U  .. .. ..	: U L U L
 
 
-	and $6,7,$3		# .. .. .. E	: is leading dest misalignment
+	and $16,7,$3		# .. .. .. E	: is leading dest misalignment
 	ble $1, $onebyteloop	# .. .. U  ..	: 1st branch : small amount of data
 	ble $1, $onebyteloop	# .. .. U  ..	: 1st branch : small amount of data
 	beq $3, $destaligned	# .. U  .. ..	: 2nd (one cycle fetcher stall)
 	beq $3, $destaligned	# .. U  .. ..	: 2nd (one cycle fetcher stall)
 	subq $3, 8, $3		# E  .. .. ..	: L U U L : trip counter
 	subq $3, 8, $3		# E  .. .. ..	: L U U L : trip counter
@@ -73,17 +59,17 @@ __copy_user:
  * We know we have at least one trip through this loop
  * We know we have at least one trip through this loop
  */
  */
 $aligndest:
 $aligndest:
-	EXI( ldbu $1,0($7) )	# .. .. .. L	: Keep loads separate from stores
-	addq $6,1,$6		# .. .. E  ..	: Section 3.8 in the CWG
+	EXI( ldbu $1,0($17) )	# .. .. .. L	: Keep loads separate from stores
+	addq $16,1,$16		# .. .. E  ..	: Section 3.8 in the CWG
 	addq $3,1,$3		# .. E  .. ..	:
 	addq $3,1,$3		# .. E  .. ..	:
 	nop			# E  .. .. ..	: U L U L
 	nop			# E  .. .. ..	: U L U L
 
 
 /*
 /*
- * the -1 is to compensate for the inc($6) done in a previous quadpack
+ * the -1 is to compensate for the inc($16) done in a previous quadpack
  * which allows us zero dependencies within either quadpack in the loop
  * which allows us zero dependencies within either quadpack in the loop
  */
  */
-	EXO( stb $1,-1($6) )	# .. .. .. L	:
-	addq $7,1,$7		# .. .. E  ..	: Section 3.8 in the CWG
+	EXO( stb $1,-1($16) )	# .. .. .. L	:
+	addq $17,1,$17		# .. .. E  ..	: Section 3.8 in the CWG
 	subq $0,1,$0		# .. E  .. ..	:
 	subq $0,1,$0		# .. E  .. ..	:
 	bne $3, $aligndest	# U  .. .. ..	: U L U L
 	bne $3, $aligndest	# U  .. .. ..	: U L U L
 
 
@@ -92,29 +78,29 @@ $aligndest:
  * If we arrived via branch, we have a minimum of 32 bytes
  * If we arrived via branch, we have a minimum of 32 bytes
  */
  */
 $destaligned:
 $destaligned:
-	and $7,7,$1		# .. .. .. E	: Check _current_ source alignment
+	and $17,7,$1		# .. .. .. E	: Check _current_ source alignment
 	bic $0,7,$4		# .. .. E  ..	: number bytes as a quadword loop
 	bic $0,7,$4		# .. .. E  ..	: number bytes as a quadword loop
-	EXI( ldq_u $3,0($7) )	# .. L  .. ..	: Forward fetch for fallthrough code
+	EXI( ldq_u $3,0($17) )	# .. L  .. ..	: Forward fetch for fallthrough code
 	beq $1,$quadaligned	# U  .. .. ..	: U L U L
 	beq $1,$quadaligned	# U  .. .. ..	: U L U L
 
 
 /*
 /*
- * In the worst case, we've just executed an ldq_u here from 0($7)
+ * In the worst case, we've just executed an ldq_u here from 0($17)
  * and we'll repeat it once if we take the branch
  * and we'll repeat it once if we take the branch
  */
  */
 
 
 /* Misaligned quadword loop - not unrolled.  Leave it that way. */
 /* Misaligned quadword loop - not unrolled.  Leave it that way. */
 $misquad:
 $misquad:
-	EXI( ldq_u $2,8($7) )	# .. .. .. L	:
+	EXI( ldq_u $2,8($17) )	# .. .. .. L	:
 	subq $4,8,$4		# .. .. E  ..	:
 	subq $4,8,$4		# .. .. E  ..	:
-	extql $3,$7,$3		# .. U  .. ..	:
-	extqh $2,$7,$1		# U  .. .. ..	: U U L L
+	extql $3,$17,$3		# .. U  .. ..	:
+	extqh $2,$17,$1		# U  .. .. ..	: U U L L
 
 
 	bis $3,$1,$1		# .. .. .. E	:
 	bis $3,$1,$1		# .. .. .. E	:
-	EXO( stq $1,0($6) )	# .. .. L  ..	:
-	addq $7,8,$7		# .. E  .. ..	:
+	EXO( stq $1,0($16) )	# .. .. L  ..	:
+	addq $17,8,$17		# .. E  .. ..	:
 	subq $0,8,$0		# E  .. .. ..	: U L L U
 	subq $0,8,$0		# E  .. .. ..	: U L L U
 
 
-	addq $6,8,$6		# .. .. .. E	:
+	addq $16,8,$16		# .. .. .. E	:
 	bis $2,$2,$3		# .. .. E  ..	:
 	bis $2,$2,$3		# .. .. E  ..	:
 	nop			# .. E  .. ..	:
 	nop			# .. E  .. ..	:
 	bne $4,$misquad		# U  .. .. ..	: U L U L
 	bne $4,$misquad		# U  .. .. ..	: U L U L
@@ -125,8 +111,8 @@ $misquad:
 	beq $0,$zerolength	# U  .. .. ..	: U L U L
 	beq $0,$zerolength	# U  .. .. ..	: U L U L
 
 
 /* We know we have at least one trip through the byte loop */
 /* We know we have at least one trip through the byte loop */
-	EXI ( ldbu $2,0($7) )	# .. .. .. L	: No loads in the same quad
-	addq $6,1,$6		# .. .. E  ..	: as the store (Section 3.8 in CWG)
+	EXI ( ldbu $2,0($17) )	# .. .. .. L	: No loads in the same quad
+	addq $16,1,$16		# .. .. E  ..	: as the store (Section 3.8 in CWG)
 	nop			# .. E  .. ..	:
 	nop			# .. E  .. ..	:
 	br $31, $dirtyentry	# L0 .. .. ..	: L U U L
 	br $31, $dirtyentry	# L0 .. .. ..	: L U U L
 /* Do the trailing byte loop load, then hop into the store part of the loop */
 /* Do the trailing byte loop load, then hop into the store part of the loop */
@@ -136,8 +122,8 @@ $misquad:
  * Based upon the usage context, it's worth the effort to unroll this loop
  * Based upon the usage context, it's worth the effort to unroll this loop
  * $0 - number of bytes to be moved
  * $0 - number of bytes to be moved
  * $4 - number of bytes to move as quadwords
  * $4 - number of bytes to move as quadwords
- * $6 is current destination address
- * $7 is current source address
+ * $16 is current destination address
+ * $17 is current source address
  */
  */
 $quadaligned:
 $quadaligned:
 	subq	$4, 32, $2	# .. .. .. E	: do not unroll for small stuff
 	subq	$4, 32, $2	# .. .. .. E	: do not unroll for small stuff
@@ -155,29 +141,29 @@ $quadaligned:
  * instruction memory hint instruction).
  * instruction memory hint instruction).
  */
  */
 $unroll4:
 $unroll4:
-	EXI( ldq $1,0($7) )	# .. .. .. L
-	EXI( ldq $2,8($7) )	# .. .. L  ..
+	EXI( ldq $1,0($17) )	# .. .. .. L
+	EXI( ldq $2,8($17) )	# .. .. L  ..
 	subq	$4,32,$4	# .. E  .. ..
 	subq	$4,32,$4	# .. E  .. ..
 	nop			# E  .. .. ..	: U U L L
 	nop			# E  .. .. ..	: U U L L
 
 
-	addq	$7,16,$7	# .. .. .. E
-	EXO( stq $1,0($6) )	# .. .. L  ..
-	EXO( stq $2,8($6) )	# .. L  .. ..
+	addq	$17,16,$17	# .. .. .. E
+	EXO( stq $1,0($16) )	# .. .. L  ..
+	EXO( stq $2,8($16) )	# .. L  .. ..
 	subq	$0,16,$0	# E  .. .. ..	: U L L U
 	subq	$0,16,$0	# E  .. .. ..	: U L L U
 
 
-	addq	$6,16,$6	# .. .. .. E
-	EXI( ldq $1,0($7) )	# .. .. L  ..
-	EXI( ldq $2,8($7) )	# .. L  .. ..
+	addq	$16,16,$16	# .. .. .. E
+	EXI( ldq $1,0($17) )	# .. .. L  ..
+	EXI( ldq $2,8($17) )	# .. L  .. ..
 	subq	$4, 32, $3	# E  .. .. ..	: U U L L : is there enough for another trip?
 	subq	$4, 32, $3	# E  .. .. ..	: U U L L : is there enough for another trip?
 
 
-	EXO( stq $1,0($6) )	# .. .. .. L
-	EXO( stq $2,8($6) )	# .. .. L  ..
+	EXO( stq $1,0($16) )	# .. .. .. L
+	EXO( stq $2,8($16) )	# .. .. L  ..
 	subq	$0,16,$0	# .. E  .. ..
 	subq	$0,16,$0	# .. E  .. ..
-	addq	$7,16,$7	# E  .. .. ..	: U L L U
+	addq	$17,16,$17	# E  .. .. ..	: U L L U
 
 
 	nop			# .. .. .. E
 	nop			# .. .. .. E
 	nop			# .. .. E  ..
 	nop			# .. .. E  ..
-	addq	$6,16,$6	# .. E  .. ..
+	addq	$16,16,$16	# .. E  .. ..
 	bgt	$3,$unroll4	# U  .. .. ..	: U L U L
 	bgt	$3,$unroll4	# U  .. .. ..	: U L U L
 
 
 	nop
 	nop
@@ -186,14 +172,14 @@ $unroll4:
 	beq	$4, $noquads
 	beq	$4, $noquads
 
 
 $onequad:
 $onequad:
-	EXI( ldq $1,0($7) )
+	EXI( ldq $1,0($17) )
 	subq	$4,8,$4
 	subq	$4,8,$4
-	addq	$7,8,$7
+	addq	$17,8,$17
 	nop
 	nop
 
 
-	EXO( stq $1,0($6) )
+	EXO( stq $1,0($16) )
 	subq	$0,8,$0
 	subq	$0,8,$0
-	addq	$6,8,$6
+	addq	$16,8,$16
 	bne	$4,$onequad
 	bne	$4,$onequad
 
 
 $noquads:
 $noquads:
@@ -207,23 +193,23 @@ $noquads:
  * There's no point in doing a lot of complex alignment calculations to try to
  * There's no point in doing a lot of complex alignment calculations to try to
  * to quadword stuff for a small amount of data.
  * to quadword stuff for a small amount of data.
  *	$0 - remaining number of bytes left to copy
  *	$0 - remaining number of bytes left to copy
- *	$6 - current dest addr
- *	$7 - current source addr
+ *	$16 - current dest addr
+ *	$17 - current source addr
  */
  */
 
 
 $onebyteloop:
 $onebyteloop:
-	EXI ( ldbu $2,0($7) )	# .. .. .. L	: No loads in the same quad
-	addq $6,1,$6		# .. .. E  ..	: as the store (Section 3.8 in CWG)
+	EXI ( ldbu $2,0($17) )	# .. .. .. L	: No loads in the same quad
+	addq $16,1,$16		# .. .. E  ..	: as the store (Section 3.8 in CWG)
 	nop			# .. E  .. ..	:
 	nop			# .. E  .. ..	:
 	nop			# E  .. .. ..	: U L U L
 	nop			# E  .. .. ..	: U L U L
 
 
 $dirtyentry:
 $dirtyentry:
 /*
 /*
- * the -1 is to compensate for the inc($6) done in a previous quadpack
+ * the -1 is to compensate for the inc($16) done in a previous quadpack
  * which allows us zero dependencies within either quadpack in the loop
  * which allows us zero dependencies within either quadpack in the loop
  */
  */
-	EXO ( stb $2,-1($6) )	# .. .. .. L	:
-	addq $7,1,$7		# .. .. E  ..	: quadpack as the load
+	EXO ( stb $2,-1($16) )	# .. .. .. L	:
+	addq $17,1,$17		# .. .. E  ..	: quadpack as the load
 	subq $0,1,$0		# .. E  .. ..	: change count _after_ copy
 	subq $0,1,$0		# .. E  .. ..	: change count _after_ copy
 	bgt $0,$onebyteloop	# U  .. .. ..	: U L U L
 	bgt $0,$onebyteloop	# U  .. .. ..	: U L U L
 
 
@@ -233,7 +219,7 @@ $exitout:			# Destination for exception recovery(?)
 	nop			# .. .. .. E
 	nop			# .. .. .. E
 	nop			# .. .. E  ..
 	nop			# .. .. E  ..
 	nop			# .. E  .. ..
 	nop			# .. E  .. ..
-	ret $31,($28),1		# L0 .. .. ..	: L U L U
+	ret $31,($26),1		# L0 .. .. ..	: L U L U
 
 
 	.end __copy_user
 	.end __copy_user
 	EXPORT_SYMBOL(__copy_user)
 	EXPORT_SYMBOL(__copy_user)

+ 1 - 0
arch/arc/include/asm/Kbuild

@@ -6,6 +6,7 @@ generic-y += device.h
 generic-y += div64.h
 generic-y += div64.h
 generic-y += emergency-restart.h
 generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += errno.h
+generic-y += extable.h
 generic-y += fb.h
 generic-y += fb.h
 generic-y += fcntl.h
 generic-y += fcntl.h
 generic-y += ftrace.h
 generic-y += ftrace.h

+ 7 - 18
arch/arc/include/asm/uaccess.h

@@ -24,12 +24,10 @@
 #ifndef _ASM_ARC_UACCESS_H
 #ifndef _ASM_ARC_UACCESS_H
 #define _ASM_ARC_UACCESS_H
 #define _ASM_ARC_UACCESS_H
 
 
-#include <linux/sched.h>
-#include <asm/errno.h>
 #include <linux/string.h>	/* for generic string functions */
 #include <linux/string.h>	/* for generic string functions */
 
 
 
 
-#define __kernel_ok		(segment_eq(get_fs(), KERNEL_DS))
+#define __kernel_ok		(uaccess_kernel())
 
 
 /*
 /*
  * Algorithmically, for __user_ok() we want do:
  * Algorithmically, for __user_ok() we want do:
@@ -170,7 +168,7 @@
 
 
 
 
 static inline unsigned long
 static inline unsigned long
-__arc_copy_from_user(void *to, const void __user *from, unsigned long n)
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 {
 	long res = 0;
 	long res = 0;
 	char val;
 	char val;
@@ -396,11 +394,8 @@ __arc_copy_from_user(void *to, const void __user *from, unsigned long n)
 	return res;
 	return res;
 }
 }
 
 
-extern unsigned long slowpath_copy_to_user(void __user *to, const void *from,
-					   unsigned long n);
-
 static inline unsigned long
 static inline unsigned long
-__arc_copy_to_user(void __user *to, const void *from, unsigned long n)
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 {
 	long res = 0;
 	long res = 0;
 	char val;
 	char val;
@@ -726,24 +721,20 @@ static inline long __arc_strnlen_user(const char __user *s, long n)
 }
 }
 
 
 #ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
 #ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
-#define __copy_from_user(t, f, n)	__arc_copy_from_user(t, f, n)
-#define __copy_to_user(t, f, n)		__arc_copy_to_user(t, f, n)
+
+#define INLINE_COPY_TO_USER
+#define INLINE_COPY_FROM_USER
+
 #define __clear_user(d, n)		__arc_clear_user(d, n)
 #define __clear_user(d, n)		__arc_clear_user(d, n)
 #define __strncpy_from_user(d, s, n)	__arc_strncpy_from_user(d, s, n)
 #define __strncpy_from_user(d, s, n)	__arc_strncpy_from_user(d, s, n)
 #define __strnlen_user(s, n)		__arc_strnlen_user(s, n)
 #define __strnlen_user(s, n)		__arc_strnlen_user(s, n)
 #else
 #else
-extern long arc_copy_from_user_noinline(void *to, const void __user * from,
-		unsigned long n);
-extern long arc_copy_to_user_noinline(void __user *to, const void *from,
-		unsigned long n);
 extern unsigned long arc_clear_user_noinline(void __user *to,
 extern unsigned long arc_clear_user_noinline(void __user *to,
 		unsigned long n);
 		unsigned long n);
 extern long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
 extern long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
 		long count);
 		long count);
 extern long arc_strnlen_user_noinline(const char __user *src, long n);
 extern long arc_strnlen_user_noinline(const char __user *src, long n);
 
 
-#define __copy_from_user(t, f, n)	arc_copy_from_user_noinline(t, f, n)
-#define __copy_to_user(t, f, n)		arc_copy_to_user_noinline(t, f, n)
 #define __clear_user(d, n)		arc_clear_user_noinline(d, n)
 #define __clear_user(d, n)		arc_clear_user_noinline(d, n)
 #define __strncpy_from_user(d, s, n)	arc_strncpy_from_user_noinline(d, s, n)
 #define __strncpy_from_user(d, s, n)	arc_strncpy_from_user_noinline(d, s, n)
 #define __strnlen_user(s, n)		arc_strnlen_user_noinline(s, n)
 #define __strnlen_user(s, n)		arc_strnlen_user_noinline(s, n)
@@ -752,6 +743,4 @@ extern long arc_strnlen_user_noinline(const char __user *src, long n);
 
 
 #include <asm-generic/uaccess.h>
 #include <asm-generic/uaccess.h>
 
 
-extern int fixup_exception(struct pt_regs *regs);
-
 #endif
 #endif

+ 0 - 14
arch/arc/mm/extable.c

@@ -28,20 +28,6 @@ int fixup_exception(struct pt_regs *regs)
 
 
 #ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
 #ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
 
 
-long arc_copy_from_user_noinline(void *to, const void __user *from,
-		unsigned long n)
-{
-	return __arc_copy_from_user(to, from, n);
-}
-EXPORT_SYMBOL(arc_copy_from_user_noinline);
-
-long arc_copy_to_user_noinline(void __user *to, const void *from,
-		unsigned long n)
-{
-	return __arc_copy_to_user(to, from, n);
-}
-EXPORT_SYMBOL(arc_copy_to_user_noinline);
-
 unsigned long arc_clear_user_noinline(void __user *to,
 unsigned long arc_clear_user_noinline(void __user *to,
 		unsigned long n)
 		unsigned long n)
 {
 {

+ 0 - 1
arch/arm/Kconfig

@@ -41,7 +41,6 @@ config ARM
 	select HARDIRQS_SW_RESEND
 	select HARDIRQS_SW_RESEND
 	select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
 	select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
 	select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
 	select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
-	select HAVE_ARCH_HARDENED_USERCOPY
 	select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
 	select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
 	select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
 	select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
 	select HAVE_ARCH_MMAP_RND_BITS if MMU
 	select HAVE_ARCH_MMAP_RND_BITS if MMU

+ 1 - 0
arch/arm/include/asm/Kbuild

@@ -7,6 +7,7 @@ generic-y += early_ioremap.h
 generic-y += emergency-restart.h
 generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += errno.h
 generic-y += exec.h
 generic-y += exec.h
+generic-y += extable.h
 generic-y += ioctl.h
 generic-y += ioctl.h
 generic-y += ipcbuf.h
 generic-y += ipcbuf.h
 generic-y += irq_regs.h
 generic-y += irq_regs.h

+ 16 - 71
arch/arm/include/asm/uaccess.h

@@ -12,8 +12,6 @@
  * User space memory access functions
  * User space memory access functions
  */
  */
 #include <linux/string.h>
 #include <linux/string.h>
-#include <linux/thread_info.h>
-#include <asm/errno.h>
 #include <asm/memory.h>
 #include <asm/memory.h>
 #include <asm/domain.h>
 #include <asm/domain.h>
 #include <asm/unified.h>
 #include <asm/unified.h>
@@ -26,28 +24,7 @@
 #define __put_user_unaligned __put_user
 #define __put_user_unaligned __put_user
 #endif
 #endif
 
 
-#define VERIFY_READ 0
-#define VERIFY_WRITE 1
-
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue.  No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path.  This means when everything is well,
- * we don't even have to jump over them.  Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
-{
-	unsigned long insn, fixup;
-};
-
-extern int fixup_exception(struct pt_regs *regs);
+#include <asm/extable.h>
 
 
 /*
 /*
  * These two functions allow hooking accesses to userspace to increase
  * These two functions allow hooking accesses to userspace to increase
@@ -271,7 +248,7 @@ static inline void set_fs(mm_segment_t fs)
 #define access_ok(type, addr, size)	(__range_ok(addr, size) == 0)
 #define access_ok(type, addr, size)	(__range_ok(addr, size) == 0)
 
 
 #define user_addr_max() \
 #define user_addr_max() \
-	(segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs())
+	(uaccess_kernel() ? ~0UL : get_fs())
 
 
 /*
 /*
  * The "__xxx" versions of the user access functions do not verify the
  * The "__xxx" versions of the user access functions do not verify the
@@ -478,7 +455,7 @@ extern unsigned long __must_check
 arm_copy_from_user(void *to, const void __user *from, unsigned long n);
 arm_copy_from_user(void *to, const void __user *from, unsigned long n);
 
 
 static inline unsigned long __must_check
 static inline unsigned long __must_check
-__arch_copy_from_user(void *to, const void __user *from, unsigned long n)
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 {
 	unsigned int __ua_flags;
 	unsigned int __ua_flags;
 
 
@@ -494,7 +471,7 @@ extern unsigned long __must_check
 __copy_to_user_std(void __user *to, const void *from, unsigned long n);
 __copy_to_user_std(void __user *to, const void *from, unsigned long n);
 
 
 static inline unsigned long __must_check
 static inline unsigned long __must_check
-__arch_copy_to_user(void __user *to, const void *from, unsigned long n)
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 {
 #ifndef CONFIG_UACCESS_WITH_MEMCPY
 #ifndef CONFIG_UACCESS_WITH_MEMCPY
 	unsigned int __ua_flags;
 	unsigned int __ua_flags;
@@ -522,54 +499,22 @@ __clear_user(void __user *addr, unsigned long n)
 }
 }
 
 
 #else
 #else
-#define __arch_copy_from_user(to, from, n)	\
-					(memcpy(to, (void __force *)from, n), 0)
-#define __arch_copy_to_user(to, from, n)	\
-					(memcpy((void __force *)to, from, n), 0)
-#define __clear_user(addr, n)		(memset((void __force *)addr, 0, n), 0)
-#endif
-
-static inline unsigned long __must_check
-__copy_from_user(void *to, const void __user *from, unsigned long n)
-{
-	check_object_size(to, n, false);
-	return __arch_copy_from_user(to, from, n);
-}
-
-static inline unsigned long __must_check
-copy_from_user(void *to, const void __user *from, unsigned long n)
-{
-	unsigned long res = n;
-
-	check_object_size(to, n, false);
-
-	if (likely(access_ok(VERIFY_READ, from, n)))
-		res = __arch_copy_from_user(to, from, n);
-	if (unlikely(res))
-		memset(to + (n - res), 0, res);
-	return res;
-}
-
-static inline unsigned long __must_check
-__copy_to_user(void __user *to, const void *from, unsigned long n)
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 {
-	check_object_size(from, n, true);
-
-	return __arch_copy_to_user(to, from, n);
+	memcpy(to, (const void __force *)from, n);
+	return 0;
 }
 }
-
-static inline unsigned long __must_check
-copy_to_user(void __user *to, const void *from, unsigned long n)
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 {
-	check_object_size(from, n, true);
-
-	if (access_ok(VERIFY_WRITE, to, n))
-		n = __arch_copy_to_user(to, from, n);
-	return n;
+	memcpy((void __force *)to, from, n);
+	return 0;
 }
 }
-
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
+#define __clear_user(addr, n)		(memset((void __force *)addr, 0, n), 0)
+#endif
+#define INLINE_COPY_TO_USER
+#define INLINE_COPY_FROM_USER
 
 
 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
 {
 {

+ 2 - 2
arch/arm/lib/uaccess_with_memcpy.c

@@ -90,7 +90,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
 	unsigned long ua_flags;
 	unsigned long ua_flags;
 	int atomic;
 	int atomic;
 
 
-	if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
+	if (uaccess_kernel()) {
 		memcpy((void *)to, from, n);
 		memcpy((void *)to, from, n);
 		return 0;
 		return 0;
 	}
 	}
@@ -162,7 +162,7 @@ __clear_user_memset(void __user *addr, unsigned long n)
 {
 {
 	unsigned long ua_flags;
 	unsigned long ua_flags;
 
 
-	if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
+	if (uaccess_kernel()) {
 		memset((void *)addr, 0, n);
 		memset((void *)addr, 0, n);
 		return 0;
 		return 0;
 	}
 	}

+ 0 - 1
arch/arm64/Kconfig

@@ -60,7 +60,6 @@ config ARM64
 	select HAVE_ALIGNED_STRUCT_PAGE if SLUB
 	select HAVE_ALIGNED_STRUCT_PAGE if SLUB
 	select HAVE_ARCH_AUDITSYSCALL
 	select HAVE_ARCH_AUDITSYSCALL
 	select HAVE_ARCH_BITREVERSE
 	select HAVE_ARCH_BITREVERSE
-	select HAVE_ARCH_HARDENED_USERCOPY
 	select HAVE_ARCH_HUGE_VMAP
 	select HAVE_ARCH_HUGE_VMAP
 	select HAVE_ARCH_JUMP_LABEL
 	select HAVE_ARCH_JUMP_LABEL
 	select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
 	select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)

+ 25 - 0
arch/arm64/include/asm/extable.h

@@ -0,0 +1,25 @@
+#ifndef __ASM_EXTABLE_H
+#define __ASM_EXTABLE_H
+
+/*
+ * The exception table consists of pairs of relative offsets: the first
+ * is the relative offset to an instruction that is allowed to fault,
+ * and the second is the relative offset at which the program should
+ * continue. No registers are modified, so it is entirely up to the
+ * continuation code to figure out what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path.  This means when everything is well,
+ * we don't even have to jump over them.  Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry
+{
+	int insn, fixup;
+};
+
+#define ARCH_HAS_RELATIVE_EXTABLE
+
+extern int fixup_exception(struct pt_regs *regs);
+#endif

+ 6 - 77
arch/arm64/include/asm/uaccess.h

@@ -28,38 +28,12 @@
 #include <linux/bitops.h>
 #include <linux/bitops.h>
 #include <linux/kasan-checks.h>
 #include <linux/kasan-checks.h>
 #include <linux/string.h>
 #include <linux/string.h>
-#include <linux/thread_info.h>
 
 
 #include <asm/cpufeature.h>
 #include <asm/cpufeature.h>
 #include <asm/ptrace.h>
 #include <asm/ptrace.h>
-#include <asm/errno.h>
 #include <asm/memory.h>
 #include <asm/memory.h>
 #include <asm/compiler.h>
 #include <asm/compiler.h>
-
-#define VERIFY_READ 0
-#define VERIFY_WRITE 1
-
-/*
- * The exception table consists of pairs of relative offsets: the first
- * is the relative offset to an instruction that is allowed to fault,
- * and the second is the relative offset at which the program should
- * continue. No registers are modified, so it is entirely up to the
- * continuation code to figure out what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path.  This means when everything is well,
- * we don't even have to jump over them.  Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
-{
-	int insn, fixup;
-};
-
-#define ARCH_HAS_RELATIVE_EXTABLE
-
-extern int fixup_exception(struct pt_regs *regs);
+#include <asm/extable.h>
 
 
 #define KERNEL_DS	(-1UL)
 #define KERNEL_DS	(-1UL)
 #define get_ds()	(KERNEL_DS)
 #define get_ds()	(KERNEL_DS)
@@ -357,58 +331,13 @@ do {									\
 })
 })
 
 
 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+#define raw_copy_from_user __arch_copy_from_user
 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
+#define raw_copy_to_user __arch_copy_to_user
+extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
-
-static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
-{
-	kasan_check_write(to, n);
-	check_object_size(to, n, false);
-	return __arch_copy_from_user(to, from, n);
-}
-
-static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
-{
-	kasan_check_read(from, n);
-	check_object_size(from, n, true);
-	return __arch_copy_to_user(to, from, n);
-}
-
-static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
-{
-	unsigned long res = n;
-	kasan_check_write(to, n);
-	check_object_size(to, n, false);
-
-	if (access_ok(VERIFY_READ, from, n)) {
-		res = __arch_copy_from_user(to, from, n);
-	}
-	if (unlikely(res))
-		memset(to + (n - res), 0, res);
-	return res;
-}
-
-static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
-{
-	kasan_check_read(from, n);
-	check_object_size(from, n, true);
-
-	if (access_ok(VERIFY_WRITE, to, n)) {
-		n = __arch_copy_to_user(to, from, n);
-	}
-	return n;
-}
-
-static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n)
-{
-	if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))
-		n = __copy_in_user(to, from, n);
-	return n;
-}
-
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
+#define INLINE_COPY_TO_USER
+#define INLINE_COPY_FROM_USER
 
 
 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
 {
 {

+ 1 - 1
arch/arm64/kernel/arm64ksyms.c

@@ -38,7 +38,7 @@ EXPORT_SYMBOL(clear_page);
 EXPORT_SYMBOL(__arch_copy_from_user);
 EXPORT_SYMBOL(__arch_copy_from_user);
 EXPORT_SYMBOL(__arch_copy_to_user);
 EXPORT_SYMBOL(__arch_copy_to_user);
 EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__copy_in_user);
+EXPORT_SYMBOL(raw_copy_in_user);
 
 
 	/* physical memory */
 	/* physical memory */
 EXPORT_SYMBOL(memstart_addr);
 EXPORT_SYMBOL(memstart_addr);

+ 2 - 2
arch/arm64/lib/copy_in_user.S

@@ -64,14 +64,14 @@
 	.endm
 	.endm
 
 
 end	.req	x5
 end	.req	x5
-ENTRY(__copy_in_user)
+ENTRY(raw_copy_in_user)
 	uaccess_enable_not_uao x3, x4
 	uaccess_enable_not_uao x3, x4
 	add	end, x0, x2
 	add	end, x0, x2
 #include "copy_template.S"
 #include "copy_template.S"
 	uaccess_disable_not_uao x3
 	uaccess_disable_not_uao x3
 	mov	x0, #0
 	mov	x0, #0
 	ret
 	ret
-ENDPROC(__copy_in_user)
+ENDPROC(raw_copy_in_user)
 
 
 	.section .fixup,"ax"
 	.section .fixup,"ax"
 	.align	2
 	.align	2

+ 1 - 0
arch/avr32/include/asm/Kbuild

@@ -5,6 +5,7 @@ generic-y += device.h
 generic-y += div64.h
 generic-y += div64.h
 generic-y += emergency-restart.h
 generic-y += emergency-restart.h
 generic-y += exec.h
 generic-y += exec.h
+generic-y += extable.h
 generic-y += futex.h
 generic-y += futex.h
 generic-y += irq_regs.h
 generic-y += irq_regs.h
 generic-y += irq_work.h
 generic-y += irq_work.h

+ 7 - 32
arch/avr32/include/asm/uaccess.h

@@ -8,12 +8,6 @@
 #ifndef __ASM_AVR32_UACCESS_H
 #ifndef __ASM_AVR32_UACCESS_H
 #define __ASM_AVR32_UACCESS_H
 #define __ASM_AVR32_UACCESS_H
 
 
-#include <linux/errno.h>
-#include <linux/sched.h>
-
-#define VERIFY_READ	0
-#define VERIFY_WRITE	1
-
 typedef struct {
 typedef struct {
 	unsigned int is_user_space;
 	unsigned int is_user_space;
 } mm_segment_t;
 } mm_segment_t;
@@ -72,34 +66,18 @@ static inline void set_fs(mm_segment_t s)
 extern __kernel_size_t __copy_user(void *to, const void *from,
 extern __kernel_size_t __copy_user(void *to, const void *from,
 				   __kernel_size_t n);
 				   __kernel_size_t n);
 
 
-extern __kernel_size_t copy_to_user(void __user *to, const void *from,
-				    __kernel_size_t n);
-extern __kernel_size_t ___copy_from_user(void *to, const void __user *from,
-				      __kernel_size_t n);
-
-static inline __kernel_size_t __copy_to_user(void __user *to, const void *from,
-					     __kernel_size_t n)
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 {
 	return __copy_user((void __force *)to, from, n);
 	return __copy_user((void __force *)to, from, n);
 }
 }
-static inline __kernel_size_t __copy_from_user(void *to,
-					       const void __user *from,
-					       __kernel_size_t n)
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 {
 	return __copy_user(to, (const void __force *)from, n);
 	return __copy_user(to, (const void __force *)from, n);
 }
 }
-static inline __kernel_size_t copy_from_user(void *to,
-					       const void __user *from,
-					       __kernel_size_t n)
-{
-	size_t res = ___copy_from_user(to, from, n);
-	if (unlikely(res))
-		memset(to + (n - res), 0, res);
-	return res;
-}
-
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
 
 
 /*
 /*
  * put_user: - Write a simple value into user space.
  * put_user: - Write a simple value into user space.
@@ -329,9 +307,6 @@ extern long __strnlen_user(const char __user *__s, long __n);
 
 
 #define strlen_user(s) strnlen_user(s, ~0UL >> 1)
 #define strlen_user(s) strnlen_user(s, ~0UL >> 1)
 
 
-struct exception_table_entry
-{
-	unsigned long insn, fixup;
-};
+#include <asm/extable.h>
 
 
 #endif /* __ASM_AVR32_UACCESS_H */
 #endif /* __ASM_AVR32_UACCESS_H */

+ 0 - 2
arch/avr32/kernel/avr32_ksyms.c

@@ -36,8 +36,6 @@ EXPORT_SYMBOL(copy_page);
 /*
 /*
  * Userspace access stuff.
  * Userspace access stuff.
  */
  */
-EXPORT_SYMBOL(___copy_from_user);
-EXPORT_SYMBOL(copy_to_user);
 EXPORT_SYMBOL(__copy_user);
 EXPORT_SYMBOL(__copy_user);
 EXPORT_SYMBOL(strncpy_from_user);
 EXPORT_SYMBOL(strncpy_from_user);
 EXPORT_SYMBOL(__strncpy_from_user);
 EXPORT_SYMBOL(__strncpy_from_user);

+ 0 - 15
arch/avr32/lib/copy_user.S

@@ -23,21 +23,6 @@
 	 */
 	 */
 	.text
 	.text
 	.align	1
 	.align	1
-	.global	___copy_from_user
-	.type	___copy_from_user, @function
-___copy_from_user:
-	branch_if_kernel r8, __copy_user
-	ret_if_privileged r8, r11, r10, r10
-	rjmp	__copy_user
-	.size	___copy_from_user, . - ___copy_from_user
-
-	.global	copy_to_user
-	.type	copy_to_user, @function
-copy_to_user:
-	branch_if_kernel r8, __copy_user
-	ret_if_privileged r8, r12, r10, r10
-	.size	copy_to_user, . - copy_to_user
-
 	.global	__copy_user
 	.global	__copy_user
 	.type	__copy_user, @function
 	.type	__copy_user, @function
 __copy_user:
 __copy_user:

+ 1 - 0
arch/blackfin/include/asm/Kbuild

@@ -7,6 +7,7 @@ generic-y += device.h
 generic-y += div64.h
 generic-y += div64.h
 generic-y += emergency-restart.h
 generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += errno.h
+generic-y += extable.h
 generic-y += fb.h
 generic-y += fb.h
 generic-y += futex.h
 generic-y += futex.h
 generic-y += hw_irq.h
 generic-y += hw_irq.h

+ 5 - 42
arch/blackfin/include/asm/uaccess.h

@@ -12,7 +12,6 @@
 /*
 /*
  * User space memory access functions
  * User space memory access functions
  */
  */
-#include <linux/sched.h>
 #include <linux/mm.h>
 #include <linux/mm.h>
 #include <linux/string.h>
 #include <linux/string.h>
 
 
@@ -29,9 +28,6 @@ static inline void set_fs(mm_segment_t fs)
 
 
 #define segment_eq(a, b) ((a) == (b))
 #define segment_eq(a, b) ((a) == (b))
 
 
-#define VERIFY_READ	0
-#define VERIFY_WRITE	1
-
 #define access_ok(type, addr, size) _access_ok((unsigned long)(addr), (size))
 #define access_ok(type, addr, size) _access_ok((unsigned long)(addr), (size))
 
 
 /*
 /*
@@ -46,22 +42,7 @@ static inline int _access_ok(unsigned long addr, unsigned long size) { return 1;
 extern int _access_ok(unsigned long addr, unsigned long size);
 extern int _access_ok(unsigned long addr, unsigned long size);
 #endif
 #endif
 
 
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue.  No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path.  This means when everything is well,
- * we don't even have to jump over them.  Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry {
-	unsigned long insn, fixup;
-};
+#include <asm/extable.h>
 
 
 /*
 /*
  * These are the main single-value transfer routines.  They automatically
  * These are the main single-value transfer routines.  They automatically
@@ -163,41 +144,23 @@ static inline int bad_user_access_length(void)
 		: "a" (__ptr(ptr)));		\
 		: "a" (__ptr(ptr)));		\
 })
 })
 
 
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-
 static inline unsigned long __must_check
 static inline unsigned long __must_check
-__copy_from_user(void *to, const void __user *from, unsigned long n)
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 {
 	memcpy(to, (const void __force *)from, n);
 	memcpy(to, (const void __force *)from, n);
 	return 0;
 	return 0;
 }
 }
 
 
 static inline unsigned long __must_check
 static inline unsigned long __must_check
-__copy_to_user(void __user *to, const void *from, unsigned long n)
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 {
 	memcpy((void __force *)to, from, n);
 	memcpy((void __force *)to, from, n);
 	SSYNC();
 	SSYNC();
 	return 0;
 	return 0;
 }
 }
 
 
-static inline unsigned long __must_check
-copy_from_user(void *to, const void __user *from, unsigned long n)
-{
-	if (likely(access_ok(VERIFY_READ, from, n)))
-		return __copy_from_user(to, from, n);
-	memset(to, 0, n);
-	return n;
-}
-
-static inline unsigned long __must_check
-copy_to_user(void __user *to, const void *from, unsigned long n)
-{
-	if (likely(access_ok(VERIFY_WRITE, to, n)))
-		return __copy_to_user(to, from, n);
-	return n;
-}
-
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
 /*
 /*
  * Copy a null terminated string from userspace.
  * Copy a null terminated string from userspace.
  */
  */

+ 1 - 1
arch/blackfin/kernel/process.c

@@ -370,7 +370,7 @@ int _access_ok(unsigned long addr, unsigned long size)
 	/* Check that things do not wrap around */
 	/* Check that things do not wrap around */
 	if (addr > ULONG_MAX - size)
 	if (addr > ULONG_MAX - size)
 		return 0;
 		return 0;
-	if (segment_eq(get_fs(), KERNEL_DS))
+	if (uaccess_kernel())
 		return 1;
 		return 1;
 #ifdef CONFIG_MTD_UCLINUX
 #ifdef CONFIG_MTD_UCLINUX
 	if (1)
 	if (1)

+ 1 - 0
arch/c6x/include/asm/Kbuild

@@ -12,6 +12,7 @@ generic-y += dma.h
 generic-y += emergency-restart.h
 generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += errno.h
 generic-y += exec.h
 generic-y += exec.h
+generic-y += extable.h
 generic-y += fb.h
 generic-y += fb.h
 generic-y += fcntl.h
 generic-y += fcntl.h
 generic-y += futex.h
 generic-y += futex.h

+ 6 - 13
arch/c6x/include/asm/uaccess.h

@@ -13,17 +13,11 @@
 #include <linux/compiler.h>
 #include <linux/compiler.h>
 #include <linux/string.h>
 #include <linux/string.h>
 
 
-#ifdef CONFIG_ACCESS_CHECK
-#define __access_ok _access_ok
-#endif
-
 /*
 /*
- * __copy_from_user/copy_to_user are based on ones in asm-generic/uaccess.h
- *
  * C6X supports unaligned 32 and 64 bit loads and stores.
  * C6X supports unaligned 32 and 64 bit loads and stores.
  */
  */
-static inline __must_check long __copy_from_user(void *to,
-		const void __user *from, unsigned long n)
+static inline __must_check unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 {
 	u32 tmp32;
 	u32 tmp32;
 	u64 tmp64;
 	u64 tmp64;
@@ -58,8 +52,8 @@ static inline __must_check long __copy_from_user(void *to,
 	return 0;
 	return 0;
 }
 }
 
 
-static inline __must_check long __copy_to_user(void __user *to,
-		const void *from, unsigned long n)
+static inline __must_check unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 {
 	u32 tmp32;
 	u32 tmp32;
 	u64 tmp64;
 	u64 tmp64;
@@ -93,9 +87,8 @@ static inline __must_check long __copy_to_user(void __user *to,
 	memcpy((void __force *)to, from, n);
 	memcpy((void __force *)to, from, n);
 	return 0;
 	return 0;
 }
 }
-
-#define __copy_to_user   __copy_to_user
-#define __copy_from_user __copy_from_user
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
 
 
 extern int _access_ok(unsigned long addr, unsigned long size);
 extern int _access_ok(unsigned long addr, unsigned long size);
 #ifdef CONFIG_ACCESS_CHECK
 #ifdef CONFIG_ACCESS_CHECK

+ 1 - 1
arch/c6x/kernel/sys_c6x.c

@@ -23,7 +23,7 @@ int _access_ok(unsigned long addr, unsigned long size)
 	if (!addr || addr > (0xffffffffUL - (size - 1)))
 	if (!addr || addr > (0xffffffffUL - (size - 1)))
 		goto _bad_access;
 		goto _bad_access;
 
 
-	if (segment_eq(get_fs(), KERNEL_DS))
+	if (uaccess_kernel())
 		return 1;
 		return 1;
 
 
 	if (memory_start <= addr && (addr + size - 1) < memory_end)
 	if (memory_start <= addr && (addr + size - 1) < memory_end)

+ 9 - 22
arch/cris/arch-v10/lib/usercopy.c

@@ -188,11 +188,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn)
 }
 }
 EXPORT_SYMBOL(__copy_user);
 EXPORT_SYMBOL(__copy_user);
 
 
-/* Copy from user to kernel, zeroing the bytes that were inaccessible in
-   userland.  The return-value is the number of bytes that were
+/* Copy from user to kernel.  The return-value is the number of bytes that were
    inaccessible.  */
    inaccessible.  */
 
 
-unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+unsigned long __copy_user_in(void *pdst, const void __user *psrc,
 				  unsigned long pn)
 				  unsigned long pn)
 {
 {
   /* We want the parameters put in special registers.
   /* We want the parameters put in special registers.
@@ -217,19 +216,17 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
     {
     {
       __asm_copy_from_user_1 (dst, src, retn);
       __asm_copy_from_user_1 (dst, src, retn);
       n--;
       n--;
+      if (retn)
+         goto exception;
     }
     }
 
 
     if (((unsigned long) src & 2) && n >= 2)
     if (((unsigned long) src & 2) && n >= 2)
     {
     {
       __asm_copy_from_user_2 (dst, src, retn);
       __asm_copy_from_user_2 (dst, src, retn);
       n -= 2;
       n -= 2;
+      if (retn)
+         goto exception;
     }
     }
-
-    /* We only need one check after the unalignment-adjustments, because
-       if both adjustments were done, either both or neither reference
-       had an exception.  */
-    if (retn != 0)
-      goto copy_exception_bytes;
   }
   }
 
 
   /* Decide which copying method to use. */
   /* Decide which copying method to use. */
@@ -328,7 +325,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
     n -= 4;
     n -= 4;
 
 
     if (retn)
     if (retn)
-      goto copy_exception_bytes;
+      goto exception;
   }
   }
 
 
   /* If we get here, there were no memory read faults.  */
   /* If we get here, there were no memory read faults.  */
@@ -356,20 +353,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
      bytes.  */
      bytes.  */
   return retn;
   return retn;
 
 
-copy_exception_bytes:
-  /* We already have "retn" bytes cleared, and need to clear the
-     remaining "n" bytes.  A non-optimized simple byte-for-byte in-line
-     memset is preferred here, since this isn't speed-critical code and
-     we'd rather have this a leaf-function than calling memset.  */
-  {
-    char *endp;
-    for (endp = dst + n; dst < endp; dst++)
-      *dst = 0;
-  }
-
+exception:
   return retn + n;
   return retn + n;
 }
 }
-EXPORT_SYMBOL(__copy_user_zeroing);
+EXPORT_SYMBOL(__copy_user_in);
 
 
 /* Zero userspace.  */
 /* Zero userspace.  */
 unsigned long __do_clear_user(void __user *pto, unsigned long pn)
 unsigned long __do_clear_user(void __user *pto, unsigned long pn)

+ 9 - 21
arch/cris/arch-v32/lib/usercopy.c

@@ -156,10 +156,9 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn)
 }
 }
 EXPORT_SYMBOL(__copy_user);
 EXPORT_SYMBOL(__copy_user);
 
 
-/* Copy from user to kernel, zeroing the bytes that were inaccessible in
-   userland.  The return-value is the number of bytes that were
+/* Copy from user to kernel.  The return-value is the number of bytes that were
    inaccessible.  */
    inaccessible.  */
-unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+unsigned long __copy_user_in(void *pdst, const void __user *psrc,
 				  unsigned long pn)
 				  unsigned long pn)
 {
 {
   /* We want the parameters put in special registers.
   /* We want the parameters put in special registers.
@@ -184,19 +183,18 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
     {
     {
       __asm_copy_from_user_1 (dst, src, retn);
       __asm_copy_from_user_1 (dst, src, retn);
       n--;
       n--;
+      if (retn != 0)
+        goto exception;
     }
     }
 
 
     if (((unsigned long) src & 2) && n >= 2)
     if (((unsigned long) src & 2) && n >= 2)
     {
     {
       __asm_copy_from_user_2 (dst, src, retn);
       __asm_copy_from_user_2 (dst, src, retn);
       n -= 2;
       n -= 2;
+      if (retn != 0)
+        goto exception;
     }
     }
 
 
-    /* We only need one check after the unalignment-adjustments, because
-       if both adjustments were done, either both or neither reference
-       had an exception.  */
-    if (retn != 0)
-      goto copy_exception_bytes;
   }
   }
 
 
   /* Movem is dirt cheap.  The overheap is low enough to always use the
   /* Movem is dirt cheap.  The overheap is low enough to always use the
@@ -279,7 +277,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
     n -= 4;
     n -= 4;
 
 
     if (retn)
     if (retn)
-      goto copy_exception_bytes;
+      goto exception;
   }
   }
 
 
   /* If we get here, there were no memory read faults.  */
   /* If we get here, there were no memory read faults.  */
@@ -307,20 +305,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
      bytes.  */
      bytes.  */
   return retn;
   return retn;
 
 
-copy_exception_bytes:
-  /* We already have "retn" bytes cleared, and need to clear the
-     remaining "n" bytes.  A non-optimized simple byte-for-byte in-line
-     memset is preferred here, since this isn't speed-critical code and
-     we'd rather have this a leaf-function than calling memset.  */
-  {
-    char *endp;
-    for (endp = dst + n; dst < endp; dst++)
-      *dst = 0;
-  }
-
+exception:
   return retn + n;
   return retn + n;
 }
 }
-EXPORT_SYMBOL(__copy_user_zeroing);
+EXPORT_SYMBOL(__copy_user_in);
 
 
 /* Zero userspace.  */
 /* Zero userspace.  */
 unsigned long __do_clear_user(void __user *pto, unsigned long pn)
 unsigned long __do_clear_user(void __user *pto, unsigned long pn)

+ 18 - 28
arch/cris/include/arch-v10/arch/uaccess.h

@@ -172,16 +172,14 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 	__asm_copy_user_cont(to, from, ret,	\
 	__asm_copy_user_cont(to, from, ret,	\
 		"	move.b [%1+],$r9\n"	\
 		"	move.b [%1+],$r9\n"	\
 		"2:	move.b $r9,[%0+]\n",	\
 		"2:	move.b $r9,[%0+]\n",	\
-		"3:	addq 1,%2\n"		\
-		"	clear.b [%0+]\n",	\
+		"3:	addq 1,%2\n",		\
 		"	.dword 2b,3b\n")
 		"	.dword 2b,3b\n")
 
 
 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 	__asm_copy_user_cont(to, from, ret,		\
 	__asm_copy_user_cont(to, from, ret,		\
 		"	move.w [%1+],$r9\n"		\
 		"	move.w [%1+],$r9\n"		\
 		"2:	move.w $r9,[%0+]\n" COPY,	\
 		"2:	move.w $r9,[%0+]\n" COPY,	\
-		"3:	addq 2,%2\n"			\
-		"	clear.w [%0+]\n" FIXUP,		\
+		"3:	addq 2,%2\n" FIXUP,		\
 		"	.dword 2b,3b\n" TENTRY)
 		"	.dword 2b,3b\n" TENTRY)
 
 
 #define __asm_copy_from_user_2(to, from, ret) \
 #define __asm_copy_from_user_2(to, from, ret) \
@@ -191,16 +189,14 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 	__asm_copy_from_user_2x_cont(to, from, ret,	\
 	__asm_copy_from_user_2x_cont(to, from, ret,	\
 		"	move.b [%1+],$r9\n"		\
 		"	move.b [%1+],$r9\n"		\
 		"4:	move.b $r9,[%0+]\n",		\
 		"4:	move.b $r9,[%0+]\n",		\
-		"5:	addq 1,%2\n"			\
-		"	clear.b [%0+]\n",		\
+		"5:	addq 1,%2\n",			\
 		"	.dword 4b,5b\n")
 		"	.dword 4b,5b\n")
 
 
 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 	__asm_copy_user_cont(to, from, ret,		\
 	__asm_copy_user_cont(to, from, ret,		\
 		"	move.d [%1+],$r9\n"		\
 		"	move.d [%1+],$r9\n"		\
 		"2:	move.d $r9,[%0+]\n" COPY,	\
 		"2:	move.d $r9,[%0+]\n" COPY,	\
-		"3:	addq 4,%2\n"			\
-		"	clear.d [%0+]\n" FIXUP,		\
+		"3:	addq 4,%2\n" FIXUP,		\
 		"	.dword 2b,3b\n" TENTRY)
 		"	.dword 2b,3b\n" TENTRY)
 
 
 #define __asm_copy_from_user_4(to, from, ret) \
 #define __asm_copy_from_user_4(to, from, ret) \
@@ -210,8 +206,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 	__asm_copy_from_user_4x_cont(to, from, ret,	\
 	__asm_copy_from_user_4x_cont(to, from, ret,	\
 		"	move.b [%1+],$r9\n"		\
 		"	move.b [%1+],$r9\n"		\
 		"4:	move.b $r9,[%0+]\n",		\
 		"4:	move.b $r9,[%0+]\n",		\
-		"5:	addq 1,%2\n"			\
-		"	clear.b [%0+]\n",		\
+		"5:	addq 1,%2\n",			\
 		"	.dword 4b,5b\n")
 		"	.dword 4b,5b\n")
 
 
 #define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 #define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -219,7 +214,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 		"	move.w [%1+],$r9\n"		\
 		"	move.w [%1+],$r9\n"		\
 		"4:	move.w $r9,[%0+]\n" COPY,	\
 		"4:	move.w $r9,[%0+]\n" COPY,	\
 		"5:	addq 2,%2\n"			\
 		"5:	addq 2,%2\n"			\
-		"	clear.w [%0+]\n" FIXUP,		\
+			FIXUP,				\
 		"	.dword 4b,5b\n" TENTRY)
 		"	.dword 4b,5b\n" TENTRY)
 
 
 #define __asm_copy_from_user_6(to, from, ret) \
 #define __asm_copy_from_user_6(to, from, ret) \
@@ -229,8 +224,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 	__asm_copy_from_user_6x_cont(to, from, ret,	\
 	__asm_copy_from_user_6x_cont(to, from, ret,	\
 		"	move.b [%1+],$r9\n"		\
 		"	move.b [%1+],$r9\n"		\
 		"6:	move.b $r9,[%0+]\n",		\
 		"6:	move.b $r9,[%0+]\n",		\
-		"7:	addq 1,%2\n"			\
-		"	clear.b [%0+]\n",		\
+		"7:	addq 1,%2\n",			\
 		"	.dword 6b,7b\n")
 		"	.dword 6b,7b\n")
 
 
 #define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 #define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -238,7 +232,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 		"	move.d [%1+],$r9\n"		\
 		"	move.d [%1+],$r9\n"		\
 		"4:	move.d $r9,[%0+]\n" COPY,	\
 		"4:	move.d $r9,[%0+]\n" COPY,	\
 		"5:	addq 4,%2\n"			\
 		"5:	addq 4,%2\n"			\
-		"	clear.d [%0+]\n" FIXUP,		\
+			FIXUP,				\
 		"	.dword 4b,5b\n" TENTRY)
 		"	.dword 4b,5b\n" TENTRY)
 
 
 #define __asm_copy_from_user_8(to, from, ret) \
 #define __asm_copy_from_user_8(to, from, ret) \
@@ -248,8 +242,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 	__asm_copy_from_user_8x_cont(to, from, ret,	\
 	__asm_copy_from_user_8x_cont(to, from, ret,	\
 		"	move.b [%1+],$r9\n"		\
 		"	move.b [%1+],$r9\n"		\
 		"6:	move.b $r9,[%0+]\n",		\
 		"6:	move.b $r9,[%0+]\n",		\
-		"7:	addq 1,%2\n"			\
-		"	clear.b [%0+]\n",		\
+		"7:	addq 1,%2\n",			\
 		"	.dword 6b,7b\n")
 		"	.dword 6b,7b\n")
 
 
 #define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 #define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -257,7 +250,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 		"	move.w [%1+],$r9\n"		\
 		"	move.w [%1+],$r9\n"		\
 		"6:	move.w $r9,[%0+]\n" COPY,	\
 		"6:	move.w $r9,[%0+]\n" COPY,	\
 		"7:	addq 2,%2\n"			\
 		"7:	addq 2,%2\n"			\
-		"	clear.w [%0+]\n" FIXUP,		\
+			FIXUP,				\
 		"	.dword 6b,7b\n" TENTRY)
 		"	.dword 6b,7b\n" TENTRY)
 
 
 #define __asm_copy_from_user_10(to, from, ret) \
 #define __asm_copy_from_user_10(to, from, ret) \
@@ -267,8 +260,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 	__asm_copy_from_user_10x_cont(to, from, ret,	\
 	__asm_copy_from_user_10x_cont(to, from, ret,	\
 		"	move.b [%1+],$r9\n"		\
 		"	move.b [%1+],$r9\n"		\
 		"8:	move.b $r9,[%0+]\n",		\
 		"8:	move.b $r9,[%0+]\n",		\
-		"9:	addq 1,%2\n"			\
-		"	clear.b [%0+]\n",		\
+		"9:	addq 1,%2\n",			\
 		"	.dword 8b,9b\n")
 		"	.dword 8b,9b\n")
 
 
 #define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 #define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -276,7 +268,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 		"	move.d [%1+],$r9\n"		\
 		"	move.d [%1+],$r9\n"		\
 		"6:	move.d $r9,[%0+]\n" COPY,	\
 		"6:	move.d $r9,[%0+]\n" COPY,	\
 		"7:	addq 4,%2\n"			\
 		"7:	addq 4,%2\n"			\
-		"	clear.d [%0+]\n" FIXUP,		\
+			FIXUP,				\
 		"	.dword 6b,7b\n" TENTRY)
 		"	.dword 6b,7b\n" TENTRY)
 
 
 #define __asm_copy_from_user_12(to, from, ret) \
 #define __asm_copy_from_user_12(to, from, ret) \
@@ -286,8 +278,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 	__asm_copy_from_user_12x_cont(to, from, ret,	\
 	__asm_copy_from_user_12x_cont(to, from, ret,	\
 		"	move.b [%1+],$r9\n"		\
 		"	move.b [%1+],$r9\n"		\
 		"8:	move.b $r9,[%0+]\n",		\
 		"8:	move.b $r9,[%0+]\n",		\
-		"9:	addq 1,%2\n"			\
-		"	clear.b [%0+]\n",		\
+		"9:	addq 1,%2\n",			\
 		"	.dword 8b,9b\n")
 		"	.dword 8b,9b\n")
 
 
 #define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 #define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -295,7 +286,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 		"	move.w [%1+],$r9\n"		\
 		"	move.w [%1+],$r9\n"		\
 		"8:	move.w $r9,[%0+]\n" COPY,	\
 		"8:	move.w $r9,[%0+]\n" COPY,	\
 		"9:	addq 2,%2\n"			\
 		"9:	addq 2,%2\n"			\
-		"	clear.w [%0+]\n" FIXUP,		\
+			FIXUP,				\
 		"	.dword 8b,9b\n" TENTRY)
 		"	.dword 8b,9b\n" TENTRY)
 
 
 #define __asm_copy_from_user_14(to, from, ret) \
 #define __asm_copy_from_user_14(to, from, ret) \
@@ -305,8 +296,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 	__asm_copy_from_user_14x_cont(to, from, ret,	\
 	__asm_copy_from_user_14x_cont(to, from, ret,	\
 		"	move.b [%1+],$r9\n"		\
 		"	move.b [%1+],$r9\n"		\
 		"10:	move.b $r9,[%0+]\n",		\
 		"10:	move.b $r9,[%0+]\n",		\
-		"11:	addq 1,%2\n"			\
-		"	clear.b [%0+]\n",		\
+		"11:	addq 1,%2\n",			\
 		"	.dword 10b,11b\n")
 		"	.dword 10b,11b\n")
 
 
 #define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 #define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -314,7 +304,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 		"	move.d [%1+],$r9\n"		\
 		"	move.d [%1+],$r9\n"		\
 		"8:	move.d $r9,[%0+]\n" COPY,	\
 		"8:	move.d $r9,[%0+]\n" COPY,	\
 		"9:	addq 4,%2\n"			\
 		"9:	addq 4,%2\n"			\
-		"	clear.d [%0+]\n" FIXUP,		\
+			FIXUP,				\
 		"	.dword 8b,9b\n" TENTRY)
 		"	.dword 8b,9b\n" TENTRY)
 
 
 #define __asm_copy_from_user_16(to, from, ret) \
 #define __asm_copy_from_user_16(to, from, ret) \
@@ -325,7 +315,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 		"	move.d [%1+],$r9\n"		\
 		"	move.d [%1+],$r9\n"		\
 		"10:	move.d $r9,[%0+]\n" COPY,	\
 		"10:	move.d $r9,[%0+]\n" COPY,	\
 		"11:	addq 4,%2\n"			\
 		"11:	addq 4,%2\n"			\
-		"	clear.d [%0+]\n" FIXUP,		\
+			FIXUP,				\
 		"	.dword 10b,11b\n" TENTRY)
 		"	.dword 10b,11b\n" TENTRY)
 
 
 #define __asm_copy_from_user_20(to, from, ret) \
 #define __asm_copy_from_user_20(to, from, ret) \
@@ -336,7 +326,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 		"	move.d [%1+],$r9\n"		\
 		"	move.d [%1+],$r9\n"		\
 		"12:	move.d $r9,[%0+]\n" COPY,	\
 		"12:	move.d $r9,[%0+]\n" COPY,	\
 		"13:	addq 4,%2\n"			\
 		"13:	addq 4,%2\n"			\
-		"	clear.d [%0+]\n" FIXUP,		\
+			FIXUP,				\
 		"	.dword 12b,13b\n" TENTRY)
 		"	.dword 12b,13b\n" TENTRY)
 
 
 #define __asm_copy_from_user_24(to, from, ret) \
 #define __asm_copy_from_user_24(to, from, ret) \

+ 18 - 36
arch/cris/include/arch-v32/arch/uaccess.h

@@ -178,8 +178,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 		"2:	move.b [%1+],$acr\n"	\
 		"2:	move.b [%1+],$acr\n"	\
 		"	move.b $acr,[%0+]\n",	\
 		"	move.b $acr,[%0+]\n",	\
 		"3:	addq 1,%2\n"		\
 		"3:	addq 1,%2\n"		\
-		"	jump 1b\n"		\
-		"	clear.b [%0+]\n",	\
+		"	jump 1b\n",		\
 		"	.dword 2b,3b\n")
 		"	.dword 2b,3b\n")
 
 
 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -189,8 +188,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 		"	move.w $acr,[%0+]\n",		\
 		"	move.w $acr,[%0+]\n",		\
 			FIXUP				\
 			FIXUP				\
 		"3:	addq 2,%2\n"			\
 		"3:	addq 2,%2\n"			\
-		"	jump 1b\n"			\
-		"	clear.w [%0+]\n",		\
+		"	jump 1b\n",			\
 			TENTRY				\
 			TENTRY				\
 		"	.dword 2b,3b\n")
 		"	.dword 2b,3b\n")
 
 
@@ -201,8 +199,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 	__asm_copy_from_user_2x_cont(to, from, ret,	\
 	__asm_copy_from_user_2x_cont(to, from, ret,	\
 		"4:	move.b [%1+],$acr\n"		\
 		"4:	move.b [%1+],$acr\n"		\
 		"	move.b $acr,[%0+]\n",		\
 		"	move.b $acr,[%0+]\n",		\
-		"5:	addq 1,%2\n"			\
-		"	clear.b [%0+]\n",		\
+		"5:	addq 1,%2\n",			\
 		"	.dword 4b,5b\n")
 		"	.dword 4b,5b\n")
 
 
 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -212,8 +209,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 		"	move.d $acr,[%0+]\n",		\
 		"	move.d $acr,[%0+]\n",		\
 			FIXUP				\
 			FIXUP				\
 		"3:	addq 4,%2\n"			\
 		"3:	addq 4,%2\n"			\
-		"	jump 1b\n"			\
-		"	clear.d [%0+]\n",		\
+		"	jump 1b\n",			\
 			TENTRY				\
 			TENTRY				\
 		"	.dword 2b,3b\n")
 		"	.dword 2b,3b\n")
 
 
@@ -224,8 +220,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 	__asm_copy_from_user_4x_cont(to, from, ret,	\
 	__asm_copy_from_user_4x_cont(to, from, ret,	\
 		"4:	move.b [%1+],$acr\n"		\
 		"4:	move.b [%1+],$acr\n"		\
 		"	move.b $acr,[%0+]\n",		\
 		"	move.b $acr,[%0+]\n",		\
-		"5:	addq 1,%2\n"			\
-		"	clear.b [%0+]\n",		\
+		"5:	addq 1,%2\n",			\
 		"	.dword 4b,5b\n")
 		"	.dword 4b,5b\n")
 
 
 #define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 #define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -234,8 +229,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 		"4:	move.w [%1+],$acr\n"		\
 		"4:	move.w [%1+],$acr\n"		\
 		"	move.w $acr,[%0+]\n",		\
 		"	move.w $acr,[%0+]\n",		\
 			FIXUP				\
 			FIXUP				\
-		"5:	addq 2,%2\n"			\
-		"	clear.w [%0+]\n",		\
+		"5:	addq 2,%2\n",			\
 			TENTRY				\
 			TENTRY				\
 		"	.dword 4b,5b\n")
 		"	.dword 4b,5b\n")
 
 
@@ -246,8 +240,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 	__asm_copy_from_user_6x_cont(to, from, ret,	\
 	__asm_copy_from_user_6x_cont(to, from, ret,	\
 		"6:	move.b [%1+],$acr\n"		\
 		"6:	move.b [%1+],$acr\n"		\
 		"	move.b $acr,[%0+]\n",		\
 		"	move.b $acr,[%0+]\n",		\
-		"7:	addq 1,%2\n"			\
-		"	clear.b [%0+]\n",		\
+		"7:	addq 1,%2\n",			\
 		"	.dword 6b,7b\n")
 		"	.dword 6b,7b\n")
 
 
 #define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 #define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -256,8 +249,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 		"4:	move.d [%1+],$acr\n"		\
 		"4:	move.d [%1+],$acr\n"		\
 		"	move.d $acr,[%0+]\n",		\
 		"	move.d $acr,[%0+]\n",		\
 			FIXUP				\
 			FIXUP				\
-		"5:	addq 4,%2\n"			\
-		"	clear.d [%0+]\n",		\
+		"5:	addq 4,%2\n",			\
 			TENTRY				\
 			TENTRY				\
 		"	.dword 4b,5b\n")
 		"	.dword 4b,5b\n")
 
 
@@ -268,8 +260,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 	__asm_copy_from_user_8x_cont(to, from, ret,	\
 	__asm_copy_from_user_8x_cont(to, from, ret,	\
 		"6:	move.b [%1+],$acr\n"		\
 		"6:	move.b [%1+],$acr\n"		\
 		"	move.b $acr,[%0+]\n",		\
 		"	move.b $acr,[%0+]\n",		\
-		"7:	addq 1,%2\n"			\
-		"	clear.b [%0+]\n",		\
+		"7:	addq 1,%2\n",			\
 		"	.dword 6b,7b\n")
 		"	.dword 6b,7b\n")
 
 
 #define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 #define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -278,8 +269,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 		"6:	move.w [%1+],$acr\n"		\
 		"6:	move.w [%1+],$acr\n"		\
 		"	move.w $acr,[%0+]\n",		\
 		"	move.w $acr,[%0+]\n",		\
 			FIXUP				\
 			FIXUP				\
-		"7:	addq 2,%2\n"			\
-		"	clear.w [%0+]\n",		\
+		"7:	addq 2,%2\n",			\
 			TENTRY				\
 			TENTRY				\
 		"	.dword 6b,7b\n")
 		"	.dword 6b,7b\n")
 
 
@@ -290,8 +280,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 	__asm_copy_from_user_10x_cont(to, from, ret,	\
 	__asm_copy_from_user_10x_cont(to, from, ret,	\
 		"8:	move.b [%1+],$acr\n"		\
 		"8:	move.b [%1+],$acr\n"		\
 		"	move.b $acr,[%0+]\n",		\
 		"	move.b $acr,[%0+]\n",		\
-		"9:	addq 1,%2\n"			\
-		"	clear.b [%0+]\n",		\
+		"9:	addq 1,%2\n",			\
 		"	.dword 8b,9b\n")
 		"	.dword 8b,9b\n")
 
 
 #define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 #define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -300,8 +289,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 		"6:	move.d [%1+],$acr\n"		\
 		"6:	move.d [%1+],$acr\n"		\
 		"	move.d $acr,[%0+]\n",		\
 		"	move.d $acr,[%0+]\n",		\
 			FIXUP				\
 			FIXUP				\
-		"7:	addq 4,%2\n"			\
-		"	clear.d [%0+]\n",		\
+		"7:	addq 4,%2\n",			\
 			TENTRY				\
 			TENTRY				\
 		"	.dword 6b,7b\n")
 		"	.dword 6b,7b\n")
 
 
@@ -312,8 +300,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 	__asm_copy_from_user_12x_cont(to, from, ret,	\
 	__asm_copy_from_user_12x_cont(to, from, ret,	\
 		"8:	move.b [%1+],$acr\n"		\
 		"8:	move.b [%1+],$acr\n"		\
 		"	move.b $acr,[%0+]\n",		\
 		"	move.b $acr,[%0+]\n",		\
-		"9:	addq 1,%2\n"			\
-		"	clear.b [%0+]\n",		\
+		"9:	addq 1,%2\n",			\
 		"	.dword 8b,9b\n")
 		"	.dword 8b,9b\n")
 
 
 #define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 #define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -322,8 +309,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 		"8:	move.w [%1+],$acr\n"		\
 		"8:	move.w [%1+],$acr\n"		\
 		"	move.w $acr,[%0+]\n",		\
 		"	move.w $acr,[%0+]\n",		\
 			FIXUP				\
 			FIXUP				\
-		"9:	addq 2,%2\n"			\
-		"	clear.w [%0+]\n",		\
+		"9:	addq 2,%2\n",			\
 			TENTRY				\
 			TENTRY				\
 		"	.dword 8b,9b\n")
 		"	.dword 8b,9b\n")
 
 
@@ -334,8 +320,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 	__asm_copy_from_user_14x_cont(to, from, ret,	\
 	__asm_copy_from_user_14x_cont(to, from, ret,	\
 		"10:	move.b [%1+],$acr\n"		\
 		"10:	move.b [%1+],$acr\n"		\
 		"	move.b $acr,[%0+]\n",		\
 		"	move.b $acr,[%0+]\n",		\
-		"11:	addq 1,%2\n"			\
-		"	clear.b [%0+]\n",		\
+		"11:	addq 1,%2\n",			\
 		"	.dword 10b,11b\n")
 		"	.dword 10b,11b\n")
 
 
 #define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
 #define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -344,8 +329,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 		"8:	move.d [%1+],$acr\n"		\
 		"8:	move.d [%1+],$acr\n"		\
 		"	move.d $acr,[%0+]\n",		\
 		"	move.d $acr,[%0+]\n",		\
 			FIXUP				\
 			FIXUP				\
-		"9:	addq 4,%2\n"			\
-		"	clear.d [%0+]\n",		\
+		"9:	addq 4,%2\n",			\
 			TENTRY				\
 			TENTRY				\
 		"	.dword 8b,9b\n")
 		"	.dword 8b,9b\n")
 
 
@@ -358,8 +342,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 		"10:	move.d [%1+],$acr\n"		\
 		"10:	move.d [%1+],$acr\n"		\
 		"	move.d $acr,[%0+]\n",		\
 		"	move.d $acr,[%0+]\n",		\
 			FIXUP				\
 			FIXUP				\
-		"11:	addq 4,%2\n"			\
-		"	clear.d [%0+]\n",		\
+		"11:	addq 4,%2\n",			\
 			TENTRY				\
 			TENTRY				\
 		"	.dword 10b,11b\n")
 		"	.dword 10b,11b\n")
 
 
@@ -372,8 +355,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
 		"12:	move.d [%1+],$acr\n"		\
 		"12:	move.d [%1+],$acr\n"		\
 		"	move.d $acr,[%0+]\n",		\
 		"	move.d $acr,[%0+]\n",		\
 			FIXUP				\
 			FIXUP				\
-		"13:	addq 4,%2\n"			\
-		"	clear.d [%0+]\n",		\
+		"13:	addq 4,%2\n",			\
 			TENTRY				\
 			TENTRY				\
 		"	.dword 12b,13b\n")
 		"	.dword 12b,13b\n")
 
 

+ 1 - 0
arch/cris/include/asm/Kbuild

@@ -9,6 +9,7 @@ generic-y += device.h
 generic-y += div64.h
 generic-y += div64.h
 generic-y += errno.h
 generic-y += errno.h
 generic-y += exec.h
 generic-y += exec.h
+generic-y += extable.h
 generic-y += emergency-restart.h
 generic-y += emergency-restart.h
 generic-y += fcntl.h
 generic-y += fcntl.h
 generic-y += futex.h
 generic-y += futex.h

+ 12 - 65
arch/cris/include/asm/uaccess.h

@@ -15,15 +15,9 @@
 #ifndef _CRIS_UACCESS_H
 #ifndef _CRIS_UACCESS_H
 #define _CRIS_UACCESS_H
 #define _CRIS_UACCESS_H
 
 
-#ifndef __ASSEMBLY__
-#include <linux/sched.h>
-#include <linux/errno.h>
 #include <asm/processor.h>
 #include <asm/processor.h>
 #include <asm/page.h>
 #include <asm/page.h>
 
 
-#define VERIFY_READ	0
-#define VERIFY_WRITE	1
-
 /*
 /*
  * The fs value determines whether argument validity checking should be
  * The fs value determines whether argument validity checking should be
  * performed or not.  If get_fs() == USER_DS, checking is performed, with
  * performed or not.  If get_fs() == USER_DS, checking is performed, with
@@ -49,30 +43,14 @@
 
 
 #define segment_eq(a, b)	((a).seg == (b).seg)
 #define segment_eq(a, b)	((a).seg == (b).seg)
 
 
-#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
+#define __kernel_ok (uaccess_kernel())
 #define __user_ok(addr, size) \
 #define __user_ok(addr, size) \
 	(((size) <= TASK_SIZE) && ((addr) <= TASK_SIZE-(size)))
 	(((size) <= TASK_SIZE) && ((addr) <= TASK_SIZE-(size)))
 #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
 #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
 #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size))
 #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size))
 
 
 #include <arch/uaccess.h>
 #include <arch/uaccess.h>
-
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue.  No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path.  This means when everything is well,
- * we don't even have to jump over them.  Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry {
-	unsigned long insn, fixup;
-};
+#include <asm/extable.h>
 
 
 /*
 /*
  * These are the main single-value transfer routines.  They automatically
  * These are the main single-value transfer routines.  They automatically
@@ -191,7 +169,7 @@ extern long __get_user_bad(void);
    live in lib/usercopy.c  */
    live in lib/usercopy.c  */
 
 
 extern unsigned long __copy_user(void __user *to, const void *from, unsigned long n);
 extern unsigned long __copy_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n);
+extern unsigned long __copy_user_in(void *to, const void __user *from, unsigned long n);
 extern unsigned long __do_clear_user(void __user *to, unsigned long n);
 extern unsigned long __do_clear_user(void __user *to, unsigned long n);
 
 
 static inline long
 static inline long
@@ -258,7 +236,7 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
 	else if (n == 24)
 	else if (n == 24)
 		__asm_copy_from_user_24(to, from, ret);
 		__asm_copy_from_user_24(to, from, ret);
 	else
 	else
-		ret = __copy_user_zeroing(to, from, n);
+		ret = __copy_user_in(to, from, n);
 
 
 	return ret;
 	return ret;
 }
 }
@@ -358,64 +336,33 @@ static inline size_t clear_user(void __user *to, size_t n)
 		return __do_clear_user(to, n);
 		return __do_clear_user(to, n);
 }
 }
 
 
-static inline size_t copy_from_user(void *to, const void __user *from, size_t n)
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 {
-	if (unlikely(!access_ok(VERIFY_READ, from, n))) {
-		memset(to, 0, n);
-		return n;
-	}
 	if (__builtin_constant_p(n))
 	if (__builtin_constant_p(n))
 		return __constant_copy_from_user(to, from, n);
 		return __constant_copy_from_user(to, from, n);
 	else
 	else
-		return __copy_user_zeroing(to, from, n);
+		return __copy_user_in(to, from, n);
 }
 }
 
 
-static inline size_t copy_to_user(void __user *to, const void *from, size_t n)
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 {
-	if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
-		return n;
 	if (__builtin_constant_p(n))
 	if (__builtin_constant_p(n))
 		return __constant_copy_to_user(to, from, n);
 		return __constant_copy_to_user(to, from, n);
 	else
 	else
 		return __copy_user(to, from, n);
 		return __copy_user(to, from, n);
 }
 }
 
 
-/* We let the __ versions of copy_from/to_user inline, because they're often
- * used in fast paths and have only a small space overhead.
- */
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
 
 
 static inline unsigned long
 static inline unsigned long
-__generic_copy_from_user_nocheck(void *to, const void __user *from,
-				 unsigned long n)
-{
-	return __copy_user_zeroing(to, from, n);
-}
-
-static inline unsigned long
-__generic_copy_to_user_nocheck(void __user *to, const void *from,
-			       unsigned long n)
-{
-	return __copy_user(to, from, n);
-}
-
-static inline unsigned long
-__generic_clear_user_nocheck(void __user *to, unsigned long n)
+__clear_user(void __user *to, unsigned long n)
 {
 {
 	return __do_clear_user(to, n);
 	return __do_clear_user(to, n);
 }
 }
 
 
-/* without checking */
-
-#define __copy_to_user(to, from, n) \
-	__generic_copy_to_user_nocheck((to), (from), (n))
-#define __copy_from_user(to, from, n) \
-	__generic_copy_from_user_nocheck((to), (from), (n))
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-#define __clear_user(to, n) __generic_clear_user_nocheck((to), (n))
-
 #define strlen_user(str)	strnlen_user((str), 0x7ffffffe)
 #define strlen_user(str)	strnlen_user((str), 0x7ffffffe)
 
 
-#endif  /* __ASSEMBLY__ */
-
 #endif	/* _CRIS_UACCESS_H */
 #endif	/* _CRIS_UACCESS_H */

+ 1 - 0
arch/frv/include/asm/Kbuild

@@ -1,6 +1,7 @@
 
 
 generic-y += clkdev.h
 generic-y += clkdev.h
 generic-y += exec.h
 generic-y += exec.h
+generic-y += extable.h
 generic-y += irq_work.h
 generic-y += irq_work.h
 generic-y += mcs_spinlock.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
 generic-y += mm-arch-hooks.h

+ 24 - 60
arch/frv/include/asm/uaccess.h

@@ -15,16 +15,13 @@
 /*
 /*
  * User space memory access functions
  * User space memory access functions
  */
  */
-#include <linux/sched.h>
 #include <linux/mm.h>
 #include <linux/mm.h>
 #include <asm/segment.h>
 #include <asm/segment.h>
 #include <asm/sections.h>
 #include <asm/sections.h>
+#include <asm/extable.h>
 
 
 #define __ptr(x) ((unsigned long __force *)(x))
 #define __ptr(x) ((unsigned long __force *)(x))
 
 
-#define VERIFY_READ	0
-#define VERIFY_WRITE	1
-
 /*
 /*
  * check that a range of addresses falls within the current address limit
  * check that a range of addresses falls within the current address limit
  */
  */
@@ -63,26 +60,6 @@ static inline int ___range_ok(unsigned long addr, unsigned long size)
 #define access_ok(type,addr,size) (__range_ok((void __user *)(addr), (size)) == 0)
 #define access_ok(type,addr,size) (__range_ok((void __user *)(addr), (size)) == 0)
 #define __access_ok(addr,size) (__range_ok((addr), (size)) == 0)
 #define __access_ok(addr,size) (__range_ok((addr), (size)) == 0)
 
 
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue.  No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path.  This means when everything is well,
- * we don't even have to jump over them.  Further, they do not intrude
- * on our cache or tlb entries.
- */
-struct exception_table_entry
-{
-	unsigned long insn, fixup;
-};
-
-/* Returns 0 if exception not found and fixup otherwise.  */
-extern unsigned long search_exception_table(unsigned long);
-
 
 
 /*
 /*
  * These are the main single-value transfer routines.  They automatically
  * These are the main single-value transfer routines.  They automatically
@@ -256,61 +233,50 @@ do {							\
 /*
 /*
  *
  *
  */
  */
+
 #define ____force(x) (__force void *)(void __user *)(x)
 #define ____force(x) (__force void *)(void __user *)(x)
 #ifdef CONFIG_MMU
 #ifdef CONFIG_MMU
 extern long __memset_user(void *dst, unsigned long count);
 extern long __memset_user(void *dst, unsigned long count);
 extern long __memcpy_user(void *dst, const void *src, unsigned long count);
 extern long __memcpy_user(void *dst, const void *src, unsigned long count);
 
 
 #define __clear_user(dst,count)			__memset_user(____force(dst), (count))
 #define __clear_user(dst,count)			__memset_user(____force(dst), (count))
-#define __copy_from_user_inatomic(to, from, n)	__memcpy_user((to), ____force(from), (n))
-#define __copy_to_user_inatomic(to, from, n)	__memcpy_user(____force(to), (from), (n))
 
 
 #else
 #else
 
 
 #define __clear_user(dst,count)			(memset(____force(dst), 0, (count)), 0)
 #define __clear_user(dst,count)			(memset(____force(dst), 0, (count)), 0)
-#define __copy_from_user_inatomic(to, from, n)	(memcpy((to), ____force(from), (n)), 0)
-#define __copy_to_user_inatomic(to, from, n)	(memcpy(____force(to), (from), (n)), 0)
 
 
 #endif
 #endif
 
 
-static inline unsigned long __must_check
-clear_user(void __user *to, unsigned long n)
-{
-	if (likely(__access_ok(to, n)))
-		n = __clear_user(to, n);
-	return n;
-}
-
-static inline unsigned long __must_check
-__copy_to_user(void __user *to, const void *from, unsigned long n)
-{
-       might_fault();
-       return __copy_to_user_inatomic(to, from, n);
-}
-
 static inline unsigned long
 static inline unsigned long
-__copy_from_user(void *to, const void __user *from, unsigned long n)
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 {
-       might_fault();
-       return __copy_from_user_inatomic(to, from, n);
+#ifdef CONFIG_MMU
+	return __memcpy_user(to, (__force const void *)from, n);
+#else
+	memcpy(to, (__force const void *)from, n);
+	return 0;
+#endif
 }
 }
 
 
-static inline long copy_from_user(void *to, const void __user *from, unsigned long n)
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 {
-	unsigned long ret = n;
-
-	if (likely(__access_ok(from, n)))
-		ret = __copy_from_user(to, from, n);
-
-	if (unlikely(ret != 0))
-		memset(to + (n - ret), 0, ret);
-
-	return ret;
+#ifdef CONFIG_MMU
+	return __memcpy_user((__force void *)to, from, n);
+#else
+	memcpy((__force void *)to, from, n);
+	return 0;
+#endif
 }
 }
+#define INLINE_COPY_TO_USER
+#define INLINE_COPY_FROM_USER
 
 
-static inline long copy_to_user(void __user *to, const void *from, unsigned long n)
+static inline unsigned long __must_check
+clear_user(void __user *to, unsigned long n)
 {
 {
-	return likely(__access_ok(to, n)) ? __copy_to_user(to, from, n) : n;
+	if (likely(__access_ok(to, n)))
+		n = __clear_user(to, n);
+	return n;
 }
 }
 
 
 extern long strncpy_from_user(char *dst, const char __user *src, long count);
 extern long strncpy_from_user(char *dst, const char __user *src, long count);
@@ -318,6 +284,4 @@ extern long strnlen_user(const char __user *src, long count);
 
 
 #define strlen_user(str) strnlen_user(str, 32767)
 #define strlen_user(str) strnlen_user(str, 32767)
 
 
-extern unsigned long search_exception_table(unsigned long addr);
-
 #endif /* _ASM_UACCESS_H */
 #endif /* _ASM_UACCESS_H */

+ 1 - 6
arch/frv/kernel/traps.c

@@ -360,13 +360,8 @@ asmlinkage void memory_access_exception(unsigned long esr0,
 	siginfo_t info;
 	siginfo_t info;
 
 
 #ifdef CONFIG_MMU
 #ifdef CONFIG_MMU
-	unsigned long fixup;
-
-	fixup = search_exception_table(__frame->pc);
-	if (fixup) {
-		__frame->pc = fixup;
+	if (fixup_exception(__frame))
 		return;
 		return;
-	}
 #endif
 #endif
 
 
 	die_if_kernel("-- Memory Access Exception --\n"
 	die_if_kernel("-- Memory Access Exception --\n"

+ 13 - 14
arch/frv/mm/extable.c

@@ -10,40 +10,39 @@ extern const void __memset_end, __memset_user_error_lr, __memset_user_error_hand
 extern const void __memcpy_end, __memcpy_user_error_lr, __memcpy_user_error_handler;
 extern const void __memcpy_end, __memcpy_user_error_lr, __memcpy_user_error_handler;
 extern spinlock_t modlist_lock;
 extern spinlock_t modlist_lock;
 
 
-
-/*****************************************************************************/
-/*
- * see if there's a fixup handler available to deal with a kernel fault
- */
-unsigned long search_exception_table(unsigned long pc)
+int fixup_exception(struct pt_regs *regs)
 {
 {
 	const struct exception_table_entry *extab;
 	const struct exception_table_entry *extab;
+	unsigned long pc = regs->pc;
 
 
 	/* determine if the fault lay during a memcpy_user or a memset_user */
 	/* determine if the fault lay during a memcpy_user or a memset_user */
-	if (__frame->lr == (unsigned long) &__memset_user_error_lr &&
+	if (regs->lr == (unsigned long) &__memset_user_error_lr &&
 	    (unsigned long) &memset <= pc && pc < (unsigned long) &__memset_end
 	    (unsigned long) &memset <= pc && pc < (unsigned long) &__memset_end
 	    ) {
 	    ) {
 		/* the fault occurred in a protected memset
 		/* the fault occurred in a protected memset
 		 * - we search for the return address (in LR) instead of the program counter
 		 * - we search for the return address (in LR) instead of the program counter
 		 * - it was probably during a clear_user()
 		 * - it was probably during a clear_user()
 		 */
 		 */
-		return (unsigned long) &__memset_user_error_handler;
+		regs->pc = (unsigned long) &__memset_user_error_handler;
+		return 1;
 	}
 	}
 
 
-	if (__frame->lr == (unsigned long) &__memcpy_user_error_lr &&
+	if (regs->lr == (unsigned long) &__memcpy_user_error_lr &&
 	    (unsigned long) &memcpy <= pc && pc < (unsigned long) &__memcpy_end
 	    (unsigned long) &memcpy <= pc && pc < (unsigned long) &__memcpy_end
 	    ) {
 	    ) {
 		/* the fault occurred in a protected memset
 		/* the fault occurred in a protected memset
 		 * - we search for the return address (in LR) instead of the program counter
 		 * - we search for the return address (in LR) instead of the program counter
 		 * - it was probably during a copy_to/from_user()
 		 * - it was probably during a copy_to/from_user()
 		 */
 		 */
-		return (unsigned long) &__memcpy_user_error_handler;
+		regs->pc = (unsigned long) &__memcpy_user_error_handler;
+		return 1;
 	}
 	}
 
 
 	extab = search_exception_tables(pc);
 	extab = search_exception_tables(pc);
-	if (extab)
-		return extab->fixup;
+	if (extab) {
+		regs->pc = extab->fixup;
+		return 1;
+	}
 
 
 	return 0;
 	return 0;
-
-} /* end search_exception_table() */
+}

+ 2 - 4
arch/frv/mm/fault.c

@@ -33,7 +33,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
 {
 {
 	struct vm_area_struct *vma;
 	struct vm_area_struct *vma;
 	struct mm_struct *mm;
 	struct mm_struct *mm;
-	unsigned long _pme, lrai, lrad, fixup;
+	unsigned long _pme, lrai, lrad;
 	unsigned long flags = 0;
 	unsigned long flags = 0;
 	siginfo_t info;
 	siginfo_t info;
 	pgd_t *pge;
 	pgd_t *pge;
@@ -201,10 +201,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
 
 
  no_context:
  no_context:
 	/* are we prepared to handle this kernel fault? */
 	/* are we prepared to handle this kernel fault? */
-	if ((fixup = search_exception_table(__frame->pc)) != 0) {
-		__frame->pc = fixup;
+	if (fixup_exception(__frame))
 		return;
 		return;
-	}
 
 
 /*
 /*
  * Oops. The kernel tried to access some bad page. We'll have to
  * Oops. The kernel tried to access some bad page. We'll have to

+ 1 - 1
arch/h8300/include/asm/Kbuild

@@ -13,6 +13,7 @@ generic-y += dma.h
 generic-y += emergency-restart.h
 generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += errno.h
 generic-y += exec.h
 generic-y += exec.h
+generic-y += extable.h
 generic-y += fb.h
 generic-y += fb.h
 generic-y += fcntl.h
 generic-y += fcntl.h
 generic-y += ftrace.h
 generic-y += ftrace.h
@@ -68,7 +69,6 @@ generic-y += tlbflush.h
 generic-y += trace_clock.h
 generic-y += trace_clock.h
 generic-y += topology.h
 generic-y += topology.h
 generic-y += types.h
 generic-y += types.h
-generic-y += uaccess.h
 generic-y += ucontext.h
 generic-y += ucontext.h
 generic-y += unaligned.h
 generic-y += unaligned.h
 generic-y += vga.h
 generic-y += vga.h

+ 54 - 0
arch/h8300/include/asm/uaccess.h

@@ -0,0 +1,54 @@
+#ifndef _ASM_UACCESS_H
+#define _ASM_UACCESS_H
+
+#include <linux/string.h>
+
+static inline __must_check unsigned long
+raw_copy_from_user(void *to, const void __user * from, unsigned long n)
+{
+	if (__builtin_constant_p(n)) {
+		switch(n) {
+		case 1:
+			*(u8 *)to = *(u8 __force *)from;
+			return 0;
+		case 2:
+			*(u16 *)to = *(u16 __force *)from;
+			return 0;
+		case 4:
+			*(u32 *)to = *(u32 __force *)from;
+			return 0;
+		}
+	}
+
+	memcpy(to, (const void __force *)from, n);
+	return 0;
+}
+
+static inline __must_check unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+	if (__builtin_constant_p(n)) {
+		switch(n) {
+		case 1:
+			*(u8 __force *)to = *(u8 *)from;
+			return 0;
+		case 2:
+			*(u16 __force *)to = *(u16 *)from;
+			return 0;
+		case 4:
+			*(u32 __force *)to = *(u32 *)from;
+			return 0;
+		default:
+			break;
+		}
+	}
+
+	memcpy((void __force *)to, from, n);
+	return 0;
+}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
+
+#include <asm-generic/uaccess.h>
+
+#endif

+ 1 - 0
arch/hexagon/include/asm/Kbuild

@@ -11,6 +11,7 @@ generic-y += device.h
 generic-y += div64.h
 generic-y += div64.h
 generic-y += emergency-restart.h
 generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += errno.h
+generic-y += extable.h
 generic-y += fb.h
 generic-y += fb.h
 generic-y += fcntl.h
 generic-y += fcntl.h
 generic-y += ftrace.h
 generic-y += ftrace.h

+ 10 - 16
arch/hexagon/include/asm/uaccess.h

@@ -23,7 +23,6 @@
 /*
 /*
  * User space memory access functions
  * User space memory access functions
  */
  */
-#include <linux/sched.h>
 #include <linux/mm.h>
 #include <linux/mm.h>
 #include <asm/segment.h>
 #include <asm/segment.h>
 #include <asm/sections.h>
 #include <asm/sections.h>
@@ -50,8 +49,6 @@
  * reasonably simple and not *too* slow.  After all, we've got the
  * reasonably simple and not *too* slow.  After all, we've got the
  * MMU for backup.
  * MMU for backup.
  */
  */
-#define VERIFY_READ     0
-#define VERIFY_WRITE    1
 
 
 #define __access_ok(addr, size) \
 #define __access_ok(addr, size) \
 	((get_fs().seg == KERNEL_DS.seg) || \
 	((get_fs().seg == KERNEL_DS.seg) || \
@@ -68,19 +65,12 @@
  */
  */
 
 
 /*  Assembly somewhat optimized copy routines  */
 /*  Assembly somewhat optimized copy routines  */
-unsigned long __copy_from_user_hexagon(void *to, const void __user *from,
+unsigned long raw_copy_from_user(void *to, const void __user *from,
 				     unsigned long n);
 				     unsigned long n);
-unsigned long __copy_to_user_hexagon(void __user *to, const void *from,
+unsigned long raw_copy_to_user(void __user *to, const void *from,
 				   unsigned long n);
 				   unsigned long n);
-
-#define __copy_from_user(to, from, n) __copy_from_user_hexagon(to, from, n)
-#define __copy_to_user(to, from, n) __copy_to_user_hexagon(to, from, n)
-
-/*
- * XXX todo: some additonal performance gain is possible by
- * implementing __copy_to/from_user_inatomic, which is much
- * like __copy_to/from_user, but performs slightly less checking.
- */
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
 
 
 __kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count);
 __kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count);
 #define __clear_user(a, s) __clear_user_hexagon((a), (s))
 #define __clear_user(a, s) __clear_user_hexagon((a), (s))
@@ -107,10 +97,14 @@ static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
 		return -EFAULT;
 		return -EFAULT;
 
 
 	if (res > n) {
 	if (res > n) {
-		copy_from_user(dst, src, n);
+		long left = raw_copy_from_user(dst, src, n);
+		if (unlikely(left))
+			memset(dst + (n - left), 0, left);
 		return n;
 		return n;
 	} else {
 	} else {
-		copy_from_user(dst, src, res);
+		long left = raw_copy_from_user(dst, src, res);
+		if (unlikely(left))
+			memset(dst + (res - left), 0, left);
 		return res-1;
 		return res-1;
 	}
 	}
 }
 }

+ 2 - 2
arch/hexagon/kernel/hexagon_ksyms.c

@@ -25,8 +25,8 @@
 
 
 /* Additional functions */
 /* Additional functions */
 EXPORT_SYMBOL(__clear_user_hexagon);
 EXPORT_SYMBOL(__clear_user_hexagon);
-EXPORT_SYMBOL(__copy_from_user_hexagon);
-EXPORT_SYMBOL(__copy_to_user_hexagon);
+EXPORT_SYMBOL(raw_copy_from_user);
+EXPORT_SYMBOL(raw_copy_to_user);
 EXPORT_SYMBOL(__iounmap);
 EXPORT_SYMBOL(__iounmap);
 EXPORT_SYMBOL(__strnlen_user);
 EXPORT_SYMBOL(__strnlen_user);
 EXPORT_SYMBOL(__vmgetie);
 EXPORT_SYMBOL(__vmgetie);

+ 1 - 1
arch/hexagon/mm/copy_from_user.S

@@ -44,7 +44,7 @@
 #define bytes r2
 #define bytes r2
 #define loopcount r5
 #define loopcount r5
 
 
-#define FUNCNAME __copy_from_user_hexagon
+#define FUNCNAME raw_copy_from_user
 #include "copy_user_template.S"
 #include "copy_user_template.S"
 
 
 	/* LOAD FAULTS from COPY_FROM_USER */
 	/* LOAD FAULTS from COPY_FROM_USER */

+ 1 - 1
arch/hexagon/mm/copy_to_user.S

@@ -43,7 +43,7 @@
 #define bytes r2
 #define bytes r2
 #define loopcount r5
 #define loopcount r5
 
 
-#define FUNCNAME __copy_to_user_hexagon
+#define FUNCNAME raw_copy_to_user
 #include "copy_user_template.S"
 #include "copy_user_template.S"
 
 
 	/* STORE FAULTS from COPY_TO_USER */
 	/* STORE FAULTS from COPY_TO_USER */

+ 0 - 1
arch/ia64/Kconfig

@@ -52,7 +52,6 @@ config IA64
 	select MODULES_USE_ELF_RELA
 	select MODULES_USE_ELF_RELA
 	select ARCH_USE_CMPXCHG_LOCKREF
 	select ARCH_USE_CMPXCHG_LOCKREF
 	select HAVE_ARCH_AUDITSYSCALL
 	select HAVE_ARCH_AUDITSYSCALL
-	select HAVE_ARCH_HARDENED_USERCOPY
 	default y
 	default y
 	help
 	help
 	  The Itanium Processor Family is Intel's 64-bit successor to
 	  The Itanium Processor Family is Intel's 64-bit successor to

+ 11 - 0
arch/ia64/include/asm/extable.h

@@ -0,0 +1,11 @@
+#ifndef _ASM_IA64_EXTABLE_H
+#define _ASM_IA64_EXTABLE_H
+
+#define ARCH_HAS_RELATIVE_EXTABLE
+
+struct exception_table_entry {
+	int insn;	/* location-relative address of insn this fixup is for */
+	int fixup;	/* location-relative continuation addr.; if bit 2 is set, r9 is set to 0 */
+};
+
+#endif

+ 27 - 75
arch/ia64/include/asm/uaccess.h

@@ -33,14 +33,13 @@
  */
  */
 
 
 #include <linux/compiler.h>
 #include <linux/compiler.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
 #include <linux/page-flags.h>
 #include <linux/page-flags.h>
 #include <linux/mm.h>
 #include <linux/mm.h>
 
 
 #include <asm/intrinsics.h>
 #include <asm/intrinsics.h>
 #include <asm/pgtable.h>
 #include <asm/pgtable.h>
 #include <asm/io.h>
 #include <asm/io.h>
+#include <asm/extable.h>
 
 
 /*
 /*
  * For historical reasons, the following macros are grossly misnamed:
  * For historical reasons, the following macros are grossly misnamed:
@@ -48,9 +47,6 @@
 #define KERNEL_DS	((mm_segment_t) { ~0UL })		/* cf. access_ok() */
 #define KERNEL_DS	((mm_segment_t) { ~0UL })		/* cf. access_ok() */
 #define USER_DS		((mm_segment_t) { TASK_SIZE-1 })	/* cf. access_ok() */
 #define USER_DS		((mm_segment_t) { TASK_SIZE-1 })	/* cf. access_ok() */
 
 
-#define VERIFY_READ	0
-#define VERIFY_WRITE	1
-
 #define get_ds()  (KERNEL_DS)
 #define get_ds()  (KERNEL_DS)
 #define get_fs()  (current_thread_info()->addr_limit)
 #define get_fs()  (current_thread_info()->addr_limit)
 #define set_fs(x) (current_thread_info()->addr_limit = (x))
 #define set_fs(x) (current_thread_info()->addr_limit = (x))
@@ -63,14 +59,14 @@
  * address TASK_SIZE is never valid.  We also need to make sure that the address doesn't
  * address TASK_SIZE is never valid.  We also need to make sure that the address doesn't
  * point inside the virtually mapped linear page table.
  * point inside the virtually mapped linear page table.
  */
  */
-#define __access_ok(addr, size, segment)						\
-({											\
-	__chk_user_ptr(addr);								\
-	(likely((unsigned long) (addr) <= (segment).seg)				\
-	 && ((segment).seg == KERNEL_DS.seg						\
-	     || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT)));	\
-})
-#define access_ok(type, addr, size)	__access_ok((addr), (size), get_fs())
+static inline int __access_ok(const void __user *p, unsigned long size)
+{
+	unsigned long addr = (unsigned long)p;
+	unsigned long seg = get_fs().seg;
+	return likely(addr <= seg) &&
+	 (seg == KERNEL_DS.seg || likely(REGION_OFFSET(addr) < RGN_MAP_LIMIT));
+}
+#define access_ok(type, addr, size)	__access_ok((addr), (size))
 
 
 /*
 /*
  * These are the main single-value transfer routines.  They automatically
  * These are the main single-value transfer routines.  They automatically
@@ -80,8 +76,8 @@
  * (a) re-use the arguments for side effects (sizeof/typeof is ok)
  * (a) re-use the arguments for side effects (sizeof/typeof is ok)
  * (b) require any knowledge of processes at this stage
  * (b) require any knowledge of processes at this stage
  */
  */
-#define put_user(x, ptr)	__put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)), get_fs())
-#define get_user(x, ptr)	__get_user_check((x), (ptr), sizeof(*(ptr)), get_fs())
+#define put_user(x, ptr)	__put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)))
+#define get_user(x, ptr)	__get_user_check((x), (ptr), sizeof(*(ptr)))
 
 
 /*
 /*
  * The "__xxx" versions do not do address space checking, useful when
  * The "__xxx" versions do not do address space checking, useful when
@@ -184,13 +180,13 @@ extern void __get_user_unknown (void);
  * could clobber r8 and r9 (among others).  Thus, be careful not to evaluate it while
  * could clobber r8 and r9 (among others).  Thus, be careful not to evaluate it while
  * using r8/r9.
  * using r8/r9.
  */
  */
-#define __do_get_user(check, x, ptr, size, segment)					\
+#define __do_get_user(check, x, ptr, size)						\
 ({											\
 ({											\
 	const __typeof__(*(ptr)) __user *__gu_ptr = (ptr);				\
 	const __typeof__(*(ptr)) __user *__gu_ptr = (ptr);				\
 	__typeof__ (size) __gu_size = (size);						\
 	__typeof__ (size) __gu_size = (size);						\
 	long __gu_err = -EFAULT;							\
 	long __gu_err = -EFAULT;							\
 	unsigned long __gu_val = 0;							\
 	unsigned long __gu_val = 0;							\
-	if (!check || __access_ok(__gu_ptr, size, segment))				\
+	if (!check || __access_ok(__gu_ptr, size))					\
 		switch (__gu_size) {							\
 		switch (__gu_size) {							\
 		      case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break;	\
 		      case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break;	\
 		      case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break;	\
 		      case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break;	\
@@ -202,8 +198,8 @@ extern void __get_user_unknown (void);
 	__gu_err;									\
 	__gu_err;									\
 })
 })
 
 
-#define __get_user_nocheck(x, ptr, size)	__do_get_user(0, x, ptr, size, KERNEL_DS)
-#define __get_user_check(x, ptr, size, segment)	__do_get_user(1, x, ptr, size, segment)
+#define __get_user_nocheck(x, ptr, size)	__do_get_user(0, x, ptr, size)
+#define __get_user_check(x, ptr, size)	__do_get_user(1, x, ptr, size)
 
 
 extern void __put_user_unknown (void);
 extern void __put_user_unknown (void);
 
 
@@ -211,14 +207,14 @@ extern void __put_user_unknown (void);
  * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which
  * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which
  * could clobber r8 (among others).  Thus, be careful not to evaluate them while using r8.
  * could clobber r8 (among others).  Thus, be careful not to evaluate them while using r8.
  */
  */
-#define __do_put_user(check, x, ptr, size, segment)					\
+#define __do_put_user(check, x, ptr, size)						\
 ({											\
 ({											\
 	__typeof__ (x) __pu_x = (x);							\
 	__typeof__ (x) __pu_x = (x);							\
 	__typeof__ (*(ptr)) __user *__pu_ptr = (ptr);					\
 	__typeof__ (*(ptr)) __user *__pu_ptr = (ptr);					\
 	__typeof__ (size) __pu_size = (size);						\
 	__typeof__ (size) __pu_size = (size);						\
 	long __pu_err = -EFAULT;							\
 	long __pu_err = -EFAULT;							\
 											\
 											\
-	if (!check || __access_ok(__pu_ptr, __pu_size, segment))			\
+	if (!check || __access_ok(__pu_ptr, __pu_size))					\
 		switch (__pu_size) {							\
 		switch (__pu_size) {							\
 		      case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break;	\
 		      case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break;	\
 		      case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break;	\
 		      case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break;	\
@@ -229,8 +225,8 @@ extern void __put_user_unknown (void);
 	__pu_err;									\
 	__pu_err;									\
 })
 })
 
 
-#define __put_user_nocheck(x, ptr, size)	__do_put_user(0, x, ptr, size, KERNEL_DS)
-#define __put_user_check(x, ptr, size, segment)	__do_put_user(1, x, ptr, size, segment)
+#define __put_user_nocheck(x, ptr, size)	__do_put_user(0, x, ptr, size)
+#define __put_user_check(x, ptr, size)	__do_put_user(1, x, ptr, size)
 
 
 /*
 /*
  * Complex access routines
  * Complex access routines
@@ -239,56 +235,19 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
 					       unsigned long count);
 					       unsigned long count);
 
 
 static inline unsigned long
 static inline unsigned long
-__copy_to_user (void __user *to, const void *from, unsigned long count)
+raw_copy_to_user(void __user *to, const void *from, unsigned long count)
 {
 {
-	check_object_size(from, count, true);
-
 	return __copy_user(to, (__force void __user *) from, count);
 	return __copy_user(to, (__force void __user *) from, count);
 }
 }
 
 
 static inline unsigned long
 static inline unsigned long
-__copy_from_user (void *to, const void __user *from, unsigned long count)
+raw_copy_from_user(void *to, const void __user *from, unsigned long count)
 {
 {
-	check_object_size(to, count, false);
-
 	return __copy_user((__force void __user *) to, from, count);
 	return __copy_user((__force void __user *) to, from, count);
 }
 }
 
 
-#define __copy_to_user_inatomic		__copy_to_user
-#define __copy_from_user_inatomic	__copy_from_user
-#define copy_to_user(to, from, n)							\
-({											\
-	void __user *__cu_to = (to);							\
-	const void *__cu_from = (from);							\
-	long __cu_len = (n);								\
-											\
-	if (__access_ok(__cu_to, __cu_len, get_fs())) {					\
-		check_object_size(__cu_from, __cu_len, true);			\
-		__cu_len = __copy_user(__cu_to, (__force void __user *)  __cu_from, __cu_len);	\
-	}										\
-	__cu_len;									\
-})
-
-static inline unsigned long
-copy_from_user(void *to, const void __user *from, unsigned long n)
-{
-	check_object_size(to, n, false);
-	if (likely(__access_ok(from, n, get_fs())))
-		n = __copy_user((__force void __user *) to, from, n);
-	else
-		memset(to, 0, n);
-	return n;
-}
-
-#define __copy_in_user(to, from, size)	__copy_user((to), (from), (size))
-
-static inline unsigned long
-copy_in_user (void __user *to, const void __user *from, unsigned long n)
-{
-	if (likely(access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n)))
-		n = __copy_user(to, from, n);
-	return n;
-}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
 
 
 extern unsigned long __do_clear_user (void __user *, unsigned long);
 extern unsigned long __do_clear_user (void __user *, unsigned long);
 
 
@@ -297,7 +256,7 @@ extern unsigned long __do_clear_user (void __user *, unsigned long);
 #define clear_user(to, n)					\
 #define clear_user(to, n)					\
 ({								\
 ({								\
 	unsigned long __cu_len = (n);				\
 	unsigned long __cu_len = (n);				\
-	if (__access_ok(to, __cu_len, get_fs()))		\
+	if (__access_ok(to, __cu_len))				\
 		__cu_len = __do_clear_user(to, __cu_len);	\
 		__cu_len = __do_clear_user(to, __cu_len);	\
 	__cu_len;						\
 	__cu_len;						\
 })
 })
@@ -313,7 +272,7 @@ extern long __must_check __strncpy_from_user (char *to, const char __user *from,
 ({									\
 ({									\
 	const char __user * __sfu_from = (from);			\
 	const char __user * __sfu_from = (from);			\
 	long __sfu_ret = -EFAULT;					\
 	long __sfu_ret = -EFAULT;					\
-	if (__access_ok(__sfu_from, 0, get_fs()))			\
+	if (__access_ok(__sfu_from, 0))					\
 		__sfu_ret = __strncpy_from_user((to), __sfu_from, (n));	\
 		__sfu_ret = __strncpy_from_user((to), __sfu_from, (n));	\
 	__sfu_ret;							\
 	__sfu_ret;							\
 })
 })
@@ -325,7 +284,7 @@ extern unsigned long __strlen_user (const char __user *);
 ({							\
 ({							\
 	const char __user *__su_str = (str);		\
 	const char __user *__su_str = (str);		\
 	unsigned long __su_ret = 0;			\
 	unsigned long __su_ret = 0;			\
-	if (__access_ok(__su_str, 0, get_fs()))		\
+	if (__access_ok(__su_str, 0))			\
 		__su_ret = __strlen_user(__su_str);	\
 		__su_ret = __strlen_user(__su_str);	\
 	__su_ret;					\
 	__su_ret;					\
 })
 })
@@ -341,18 +300,11 @@ extern unsigned long __strnlen_user (const char __user *, long);
 ({								\
 ({								\
 	const char __user *__su_str = (str);			\
 	const char __user *__su_str = (str);			\
 	unsigned long __su_ret = 0;				\
 	unsigned long __su_ret = 0;				\
-	if (__access_ok(__su_str, 0, get_fs()))			\
+	if (__access_ok(__su_str, 0))				\
 		__su_ret = __strnlen_user(__su_str, len);	\
 		__su_ret = __strnlen_user(__su_str, len);	\
 	__su_ret;						\
 	__su_ret;						\
 })
 })
 
 
-#define ARCH_HAS_RELATIVE_EXTABLE
-
-struct exception_table_entry {
-	int insn;	/* location-relative address of insn this fixup is for */
-	int fixup;	/* location-relative continuation addr.; if bit 2 is set, r9 is set to 0 */
-};
-
 #define ARCH_HAS_TRANSLATE_MEM_PTR	1
 #define ARCH_HAS_TRANSLATE_MEM_PTR	1
 static __inline__ void *
 static __inline__ void *
 xlate_dev_mem_ptr(phys_addr_t p)
 xlate_dev_mem_ptr(phys_addr_t p)

+ 1 - 12
arch/ia64/lib/memcpy_mck.S

@@ -556,9 +556,6 @@ EK(.ex_handler,  (p17)	st8	[dst1]=r39,8);						\
 #define D	r22
 #define D	r22
 #define F	r28
 #define F	r28
 
 
-#define memset_arg0	r32
-#define memset_arg2	r33
-
 #define saved_retval	loc0
 #define saved_retval	loc0
 #define saved_rtlink	loc1
 #define saved_rtlink	loc1
 #define saved_pfs_stack	loc2
 #define saved_pfs_stack	loc2
@@ -622,7 +619,7 @@ EK(.ex_handler,  (p17)	st8	[dst1]=r39,8);						\
  * 	(faulting_addr - orig_dst)	-> len to faulting st address
  * 	(faulting_addr - orig_dst)	-> len to faulting st address
  * B =	(cur_dst - orig_dst)		-> len copied so far
  * B =	(cur_dst - orig_dst)		-> len copied so far
  * C =	A - B				-> len need to be copied
  * C =	A - B				-> len need to be copied
- * D =	orig_len - A			-> len need to be zeroed
+ * D =	orig_len - A			-> len need to be left along
  */
  */
 (p6)	sub	A = F, saved_in0
 (p6)	sub	A = F, saved_in0
 (p7)	sub	A = F, saved_in1
 (p7)	sub	A = F, saved_in1
@@ -638,9 +635,6 @@ EK(.ex_handler,  (p17)	st8	[dst1]=r39,8);						\
 	sub	D = saved_in2, A
 	sub	D = saved_in2, A
 	;;
 	;;
 	cmp.gt	p8,p0=C,r0		// more than 1 byte?
 	cmp.gt	p8,p0=C,r0		// more than 1 byte?
-	add	memset_arg0=saved_in0, A
-(p6)	mov	memset_arg2=0		// copy_to_user should not call memset
-(p7)	mov	memset_arg2=D		// copy_from_user need to have kbuf zeroed
 	mov	r8=0
 	mov	r8=0
 	mov	saved_retval = D
 	mov	saved_retval = D
 	mov	saved_rtlink = b0
 	mov	saved_rtlink = b0
@@ -652,11 +646,6 @@ EK(.ex_handler,  (p17)	st8	[dst1]=r39,8);						\
 	;;
 	;;
 
 
 	add	saved_retval=saved_retval,r8	// above might return non-zero value
 	add	saved_retval=saved_retval,r8	// above might return non-zero value
-	cmp.gt	p8,p0=memset_arg2,r0	// more than 1 byte?
-	mov	out0=memset_arg0	// *s
-	mov	out1=r0			// c
-	mov	out2=memset_arg2	// n
-(p8)	br.call.sptk.few b0=memset
 	;;
 	;;
 
 
 	mov	retval=saved_retval
 	mov	retval=saved_retval

+ 4 - 1
arch/ia64/mm/extable.c

@@ -5,7 +5,10 @@
  *	David Mosberger-Tang <davidm@hpl.hp.com>
  *	David Mosberger-Tang <davidm@hpl.hp.com>
  */
  */
 
 
-#include <linux/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/extable.h>
+#include <asm/errno.h>
+#include <asm/processor.h>
 
 
 void
 void
 ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e)
 ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e)

+ 1 - 0
arch/m32r/include/asm/Kbuild

@@ -2,6 +2,7 @@
 generic-y += clkdev.h
 generic-y += clkdev.h
 generic-y += current.h
 generic-y += current.h
 generic-y += exec.h
 generic-y += exec.h
+generic-y += extable.h
 generic-y += irq_work.h
 generic-y += irq_work.h
 generic-y += kvm_para.h
 generic-y += kvm_para.h
 generic-y += mcs_spinlock.h
 generic-y += mcs_spinlock.h

+ 9 - 180
arch/m32r/include/asm/uaccess.h

@@ -11,13 +11,9 @@
 /*
 /*
  * User space memory access functions
  * User space memory access functions
  */
  */
-#include <linux/errno.h>
-#include <linux/thread_info.h>
 #include <asm/page.h>
 #include <asm/page.h>
 #include <asm/setup.h>
 #include <asm/setup.h>
-
-#define VERIFY_READ 0
-#define VERIFY_WRITE 1
+#include <linux/prefetch.h>
 
 
 /*
 /*
  * The fs value determines whether argument validity checking should be
  * The fs value determines whether argument validity checking should be
@@ -114,25 +110,7 @@ static inline int access_ok(int type, const void *addr, unsigned long size)
 }
 }
 #endif /* CONFIG_MMU */
 #endif /* CONFIG_MMU */
 
 
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue.  No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path.  This means when everything is well,
- * we don't even have to jump over them.  Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
-{
-	unsigned long insn, fixup;
-};
-
-extern int fixup_exception(struct pt_regs *regs);
+#include <asm/extable.h>
 
 
 /*
 /*
  * These are the main single-value transfer routines.  They automatically
  * These are the main single-value transfer routines.  They automatically
@@ -483,174 +461,25 @@ do {									\
 		: "r14", "memory");					\
 		: "r14", "memory");					\
 } while (0)
 } while (0)
 
 
-#define __copy_user_zeroing(to, from, size)				\
-do {									\
-	unsigned long __dst, __src, __c;				\
-	__asm__ __volatile__ (						\
-		"	mv	r14, %0\n"				\
-		"	or	r14, %1\n"				\
-		"	beq	%0, %1, 9f\n"				\
-		"	beqz	%2, 9f\n"				\
-		"	and3	r14, r14, #3\n"				\
-		"	bnez	r14, 2f\n"				\
-		"	and3	%2, %2, #3\n"				\
-		"	beqz	%3, 2f\n"				\
-		"	addi	%0, #-4		; word_copy \n"		\
-		"	.fillinsn\n"					\
-		"0:	ld	r14, @%1+\n"				\
-		"	addi	%3, #-1\n"				\
-		"	.fillinsn\n"					\
-		"1:	st	r14, @+%0\n"				\
-		"	bnez	%3, 0b\n"				\
-		"	beqz	%2, 9f\n"				\
-		"	addi	%0, #4\n"				\
-		"	.fillinsn\n"					\
-		"2:	ldb	r14, @%1	; byte_copy \n"		\
-		"	.fillinsn\n"					\
-		"3:	stb	r14, @%0\n"				\
-		"	addi	%1, #1\n"				\
-		"	addi	%2, #-1\n"				\
-		"	addi	%0, #1\n"				\
-		"	bnez	%2, 2b\n"				\
-		"	.fillinsn\n"					\
-		"9:\n"							\
-		".section .fixup,\"ax\"\n"				\
-		"	.balign 4\n"					\
-		"5:	addi	%3, #1\n"				\
-		"	addi	%1, #-4\n"				\
-		"	.fillinsn\n"					\
-		"6:	slli	%3, #2\n"				\
-		"	add	%2, %3\n"				\
-		"	addi	%0, #4\n"				\
-		"	.fillinsn\n"					\
-		"7:	ldi	r14, #0		; store zero \n"	\
-		"	.fillinsn\n"					\
-		"8:	addi	%2, #-1\n"				\
-		"	stb	r14, @%0	; ACE? \n"		\
-		"	addi	%0, #1\n"				\
-		"	bnez	%2, 8b\n"				\
-		"	seth	r14, #high(9b)\n"			\
-		"	or3	r14, r14, #low(9b)\n"			\
-		"	jmp	r14\n"					\
-		".previous\n"						\
-		".section __ex_table,\"a\"\n"				\
-		"	.balign 4\n"					\
-		"	.long 0b,6b\n"					\
-		"	.long 1b,5b\n"					\
-		"	.long 2b,7b\n"					\
-		"	.long 3b,7b\n"					\
-		".previous\n"						\
-		: "=&r" (__dst), "=&r" (__src), "=&r" (size),		\
-		  "=&r" (__c)						\
-		: "0" (to), "1" (from), "2" (size), "3" (size / 4)	\
-		: "r14", "memory");					\
-} while (0)
-
-
 /* We let the __ versions of copy_from/to_user inline, because they're often
 /* We let the __ versions of copy_from/to_user inline, because they're often
  * used in fast paths and have only a small space overhead.
  * used in fast paths and have only a small space overhead.
  */
  */
-static inline unsigned long __generic_copy_from_user_nocheck(void *to,
-	const void __user *from, unsigned long n)
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 {
-	__copy_user_zeroing(to, from, n);
+	prefetchw(to);
+	__copy_user(to, from, n);
 	return n;
 	return n;
 }
 }
 
 
-static inline unsigned long __generic_copy_to_user_nocheck(void __user *to,
-	const void *from, unsigned long n)
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 {
+	prefetch(from);
 	__copy_user(to, from, n);
 	__copy_user(to, from, n);
 	return n;
 	return n;
 }
 }
 
 
-unsigned long __generic_copy_to_user(void __user *, const void *, unsigned long);
-unsigned long __generic_copy_from_user(void *, const void __user *, unsigned long);
-
-/**
- * __copy_to_user: - Copy a block of data into user space, with less checking.
- * @to:   Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n:    Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- *          enabled.
- *
- * Copy data from kernel space to user space.  Caller must check
- * the specified block with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-#define __copy_to_user(to, from, n)			\
-	__generic_copy_to_user_nocheck((to), (from), (n))
-
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-
-/**
- * copy_to_user: - Copy a block of data into user space.
- * @to:   Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n:    Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- *          enabled.
- *
- * Copy data from kernel space to user space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-#define copy_to_user(to, from, n)			\
-({							\
-	might_fault();					\
-	__generic_copy_to_user((to), (from), (n));	\
-})
-
-/**
- * __copy_from_user: - Copy a block of data from user space, with less checking. * @to:   Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n:    Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- *          enabled.
- *
- * Copy data from user space to kernel space.  Caller must check
- * the specified block with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- */
-#define __copy_from_user(to, from, n)			\
-	__generic_copy_from_user_nocheck((to), (from), (n))
-
-/**
- * copy_from_user: - Copy a block of data from user space.
- * @to:   Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n:    Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- *          enabled.
- *
- * Copy data from user space to kernel space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- */
-#define copy_from_user(to, from, n)			\
-({							\
-	might_fault();					\
-	__generic_copy_from_user((to), (from), (n));	\
-})
-
 long __must_check strncpy_from_user(char *dst, const char __user *src,
 long __must_check strncpy_from_user(char *dst, const char __user *src,
 				long count);
 				long count);
 long __must_check __strncpy_from_user(char *dst,
 long __must_check __strncpy_from_user(char *dst,

+ 0 - 2
arch/m32r/kernel/m32r_ksyms.c

@@ -26,8 +26,6 @@ EXPORT_SYMBOL(strncpy_from_user);
 EXPORT_SYMBOL(__strncpy_from_user);
 EXPORT_SYMBOL(__strncpy_from_user);
 EXPORT_SYMBOL(clear_user);
 EXPORT_SYMBOL(clear_user);
 EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__generic_copy_from_user);
-EXPORT_SYMBOL(__generic_copy_to_user);
 EXPORT_SYMBOL(strnlen_user);
 EXPORT_SYMBOL(strnlen_user);
 
 
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP

+ 0 - 21
arch/m32r/lib/usercopy.c

@@ -11,27 +11,6 @@
 #include <linux/thread_info.h>
 #include <linux/thread_info.h>
 #include <linux/uaccess.h>
 #include <linux/uaccess.h>
 
 
-unsigned long
-__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
-{
-	prefetch(from);
-	if (access_ok(VERIFY_WRITE, to, n))
-		__copy_user(to,from,n);
-	return n;
-}
-
-unsigned long
-__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
-{
-	prefetchw(to);
-	if (access_ok(VERIFY_READ, from, n))
-		__copy_user_zeroing(to,from,n);
-	else
-		memset(to, 0, n);
-	return n;
-}
-
-
 /*
 /*
  * Copy a null terminated string from userspace.
  * Copy a null terminated string from userspace.
  */
  */

+ 1 - 0
arch/m68k/include/asm/Kbuild

@@ -5,6 +5,7 @@ generic-y += device.h
 generic-y += emergency-restart.h
 generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += errno.h
 generic-y += exec.h
 generic-y += exec.h
+generic-y += extable.h
 generic-y += futex.h
 generic-y += futex.h
 generic-y += hw_irq.h
 generic-y += hw_irq.h
 generic-y += ioctl.h
 generic-y += ioctl.h

+ 0 - 10
arch/m68k/include/asm/processor.h

@@ -122,16 +122,6 @@ static inline void start_thread(struct pt_regs * regs, unsigned long pc,
 	wrusp(usp);
 	wrusp(usp);
 }
 }
 
 
-#ifdef CONFIG_MMU
-extern int handle_kernel_fault(struct pt_regs *regs);
-#else
-static inline  int handle_kernel_fault(struct pt_regs *regs)
-{
-	/* Any fault in kernel is fatal on non-mmu */
-	return 0;
-}
-#endif
-
 /* Forward declaration, a strange C thing */
 /* Forward declaration, a strange C thing */
 struct task_struct;
 struct task_struct;
 
 

+ 1 - 0
arch/m68k/include/asm/uaccess.h

@@ -4,6 +4,7 @@
 #include <asm/uaccess_mm.h>
 #include <asm/uaccess_mm.h>
 #endif
 #endif
 
 
+#include <asm/extable.h>
 #ifdef CONFIG_CPU_HAS_NO_UNALIGNED
 #ifdef CONFIG_CPU_HAS_NO_UNALIGNED
 #include <asm-generic/uaccess-unaligned.h>
 #include <asm-generic/uaccess-unaligned.h>
 #else
 #else

+ 49 - 54
arch/m68k/include/asm/uaccess_mm.h

@@ -5,14 +5,9 @@
  * User space memory access functions
  * User space memory access functions
  */
  */
 #include <linux/compiler.h>
 #include <linux/compiler.h>
-#include <linux/errno.h>
 #include <linux/types.h>
 #include <linux/types.h>
-#include <linux/sched.h>
 #include <asm/segment.h>
 #include <asm/segment.h>
 
 
-#define VERIFY_READ	0
-#define VERIFY_WRITE	1
-
 /* We let the MMU do all checking */
 /* We let the MMU do all checking */
 static inline int access_ok(int type, const void __user *addr,
 static inline int access_ok(int type, const void __user *addr,
 			    unsigned long size)
 			    unsigned long size)
@@ -36,24 +31,6 @@ static inline int access_ok(int type, const void __user *addr,
 #define	MOVES	"move"
 #define	MOVES	"move"
 #endif
 #endif
 
 
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue.  No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path.  This means when everything is well,
- * we don't even have to jump over them.  Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
-{
-	unsigned long insn, fixup;
-};
-
 extern int __put_user_bad(void);
 extern int __put_user_bad(void);
 extern int __get_user_bad(void);
 extern int __get_user_bad(void);
 
 
@@ -202,39 +179,55 @@ asm volatile ("\n"					\
 unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n);
 unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n);
 unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n);
 unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n);
 
 
-#define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\
+#define __suffix0
+#define __suffix1 b
+#define __suffix2 w
+#define __suffix4 l
+
+#define ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
 	asm volatile ("\n"						\
 	asm volatile ("\n"						\
 		"1:	"MOVES"."#s1"	(%2)+,%3\n"			\
 		"1:	"MOVES"."#s1"	(%2)+,%3\n"			\
 		"	move."#s1"	%3,(%1)+\n"			\
 		"	move."#s1"	%3,(%1)+\n"			\
+		"	.ifnc	\""#s2"\",\"\"\n"			\
 		"2:	"MOVES"."#s2"	(%2)+,%3\n"			\
 		"2:	"MOVES"."#s2"	(%2)+,%3\n"			\
 		"	move."#s2"	%3,(%1)+\n"			\
 		"	move."#s2"	%3,(%1)+\n"			\
 		"	.ifnc	\""#s3"\",\"\"\n"			\
 		"	.ifnc	\""#s3"\",\"\"\n"			\
 		"3:	"MOVES"."#s3"	(%2)+,%3\n"			\
 		"3:	"MOVES"."#s3"	(%2)+,%3\n"			\
 		"	move."#s3"	%3,(%1)+\n"			\
 		"	move."#s3"	%3,(%1)+\n"			\
 		"	.endif\n"					\
 		"	.endif\n"					\
+		"	.endif\n"					\
 		"4:\n"							\
 		"4:\n"							\
 		"	.section __ex_table,\"a\"\n"			\
 		"	.section __ex_table,\"a\"\n"			\
 		"	.align	4\n"					\
 		"	.align	4\n"					\
 		"	.long	1b,10f\n"				\
 		"	.long	1b,10f\n"				\
+		"	.ifnc	\""#s2"\",\"\"\n"			\
 		"	.long	2b,20f\n"				\
 		"	.long	2b,20f\n"				\
 		"	.ifnc	\""#s3"\",\"\"\n"			\
 		"	.ifnc	\""#s3"\",\"\"\n"			\
 		"	.long	3b,30f\n"				\
 		"	.long	3b,30f\n"				\
 		"	.endif\n"					\
 		"	.endif\n"					\
+		"	.endif\n"					\
 		"	.previous\n"					\
 		"	.previous\n"					\
 		"\n"							\
 		"\n"							\
 		"	.section .fixup,\"ax\"\n"			\
 		"	.section .fixup,\"ax\"\n"			\
 		"	.even\n"					\
 		"	.even\n"					\
-		"10:	clr."#s1"	(%1)+\n"			\
-		"20:	clr."#s2"	(%1)+\n"			\
+		"10:	addq.l #"#n1",%0\n"				\
+		"	.ifnc	\""#s2"\",\"\"\n"			\
+		"20:	addq.l #"#n2",%0\n"				\
 		"	.ifnc	\""#s3"\",\"\"\n"			\
 		"	.ifnc	\""#s3"\",\"\"\n"			\
-		"30:	clr."#s3"	(%1)+\n"			\
+		"30:	addq.l #"#n3",%0\n"				\
+		"	.endif\n"					\
 		"	.endif\n"					\
 		"	.endif\n"					\
-		"	moveq.l	#"#n",%0\n"				\
 		"	jra	4b\n"					\
 		"	jra	4b\n"					\
 		"	.previous\n"					\
 		"	.previous\n"					\
 		: "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp)	\
 		: "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp)	\
 		: : "memory")
 		: : "memory")
 
 
+#define ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
+	____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)
+#define __constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3)	\
+	___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3,  \
+					__suffix##n1, __suffix##n2, __suffix##n3)
+
 static __always_inline unsigned long
 static __always_inline unsigned long
 __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
 __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 {
@@ -242,37 +235,37 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
 
 
 	switch (n) {
 	switch (n) {
 	case 1:
 	case 1:
-		__get_user_asm(res, *(u8 *)to, (u8 __user *)from, u8, b, d, 1);
+		__constant_copy_from_user_asm(res, to, from, tmp, 1, 0, 0);
 		break;
 		break;
 	case 2:
 	case 2:
-		__get_user_asm(res, *(u16 *)to, (u16 __user *)from, u16, w, r, 2);
+		__constant_copy_from_user_asm(res, to, from, tmp, 2, 0, 0);
 		break;
 		break;
 	case 3:
 	case 3:
-		__constant_copy_from_user_asm(res, to, from, tmp, 3, w, b,);
+		__constant_copy_from_user_asm(res, to, from, tmp, 2, 1, 0);
 		break;
 		break;
 	case 4:
 	case 4:
-		__get_user_asm(res, *(u32 *)to, (u32 __user *)from, u32, l, r, 4);
+		__constant_copy_from_user_asm(res, to, from, tmp, 4, 0, 0);
 		break;
 		break;
 	case 5:
 	case 5:
-		__constant_copy_from_user_asm(res, to, from, tmp, 5, l, b,);
+		__constant_copy_from_user_asm(res, to, from, tmp, 4, 1, 0);
 		break;
 		break;
 	case 6:
 	case 6:
-		__constant_copy_from_user_asm(res, to, from, tmp, 6, l, w,);
+		__constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 0);
 		break;
 		break;
 	case 7:
 	case 7:
-		__constant_copy_from_user_asm(res, to, from, tmp, 7, l, w, b);
+		__constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 1);
 		break;
 		break;
 	case 8:
 	case 8:
-		__constant_copy_from_user_asm(res, to, from, tmp, 8, l, l,);
+		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 0);
 		break;
 		break;
 	case 9:
 	case 9:
-		__constant_copy_from_user_asm(res, to, from, tmp, 9, l, l, b);
+		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 1);
 		break;
 		break;
 	case 10:
 	case 10:
-		__constant_copy_from_user_asm(res, to, from, tmp, 10, l, l, w);
+		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 2);
 		break;
 		break;
 	case 12:
 	case 12:
-		__constant_copy_from_user_asm(res, to, from, tmp, 12, l, l, l);
+		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 4);
 		break;
 		break;
 	default:
 	default:
 		/* we limit the inlined version to 3 moves */
 		/* we limit the inlined version to 3 moves */
@@ -363,24 +356,26 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
 	return res;
 	return res;
 }
 }
 
 
-#define __copy_from_user(to, from, n)		\
-(__builtin_constant_p(n) ?			\
- __constant_copy_from_user(to, from, n) :	\
- __generic_copy_from_user(to, from, n))
-
-#define __copy_to_user(to, from, n)		\
-(__builtin_constant_p(n) ?			\
- __constant_copy_to_user(to, from, n) :		\
- __generic_copy_to_user(to, from, n))
-
-#define __copy_to_user_inatomic		__copy_to_user
-#define __copy_from_user_inatomic	__copy_from_user
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+	if (__builtin_constant_p(n))
+		return __constant_copy_from_user(to, from, n);
+	return __generic_copy_from_user(to, from, n);
+}
 
 
-#define copy_from_user(to, from, n)	__copy_from_user(to, from, n)
-#define copy_to_user(to, from, n)	__copy_to_user(to, from, n)
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+	if (__builtin_constant_p(n))
+		return __constant_copy_to_user(to, from, n);
+	return __generic_copy_to_user(to, from, n);
+}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
 
 
 #define user_addr_max() \
 #define user_addr_max() \
-	(segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
+	(uaccess_kernel() ? ~0UL : TASK_SIZE)
 
 
 extern long strncpy_from_user(char *dst, const char __user *src, long count);
 extern long strncpy_from_user(char *dst, const char __user *src, long count);
 extern __must_check long strlen_user(const char __user *str);
 extern __must_check long strlen_user(const char __user *str);

+ 14 - 29
arch/m68k/include/asm/uaccess_no.h

@@ -4,15 +4,11 @@
 /*
 /*
  * User space memory access functions
  * User space memory access functions
  */
  */
-#include <linux/sched.h>
 #include <linux/mm.h>
 #include <linux/mm.h>
 #include <linux/string.h>
 #include <linux/string.h>
 
 
 #include <asm/segment.h>
 #include <asm/segment.h>
 
 
-#define VERIFY_READ	0
-#define VERIFY_WRITE	1
-
 #define access_ok(type,addr,size)	_access_ok((unsigned long)(addr),(size))
 #define access_ok(type,addr,size)	_access_ok((unsigned long)(addr),(size))
 
 
 /*
 /*
@@ -26,25 +22,6 @@ static inline int _access_ok(unsigned long addr, unsigned long size)
 	return 1;
 	return 1;
 }
 }
 
 
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue.  No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path.  This means when everything is well,
- * we don't even have to jump over them.  Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
-{
-	unsigned long insn, fixup;
-};
-
-
 /*
 /*
  * These are the main single-value transfer routines.  They automatically
  * These are the main single-value transfer routines.  They automatically
  * use the right size if we just have the right pointer type.
  * use the right size if we just have the right pointer type.
@@ -124,13 +101,21 @@ extern int __get_user_bad(void);
 		 : "=d" (x)					\
 		 : "=d" (x)					\
 		 : "m" (*__ptr(ptr)))
 		 : "m" (*__ptr(ptr)))
 
 
-#define copy_from_user(to, from, n)		(memcpy(to, from, n), 0)
-#define copy_to_user(to, from, n)		(memcpy(to, from, n), 0)
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+	memcpy(to, (__force const void *)from, n);
+	return 0;
+}
 
 
-#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
-#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+	memcpy((__force void *)to, from, n);
+	return 0;
+}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
 
 
 /*
 /*
  * Copy a null terminated string from userspace.
  * Copy a null terminated string from userspace.

+ 1 - 1
arch/m68k/kernel/signal.c

@@ -88,7 +88,7 @@ static inline int frame_extra_sizes(int f)
 	return frame_size_change[f];
 	return frame_size_change[f];
 }
 }
 
 
-int handle_kernel_fault(struct pt_regs *regs)
+int fixup_exception(struct pt_regs *regs)
 {
 {
 	const struct exception_table_entry *fixup;
 	const struct exception_table_entry *fixup;
 	struct pt_regs *tregs;
 	struct pt_regs *tregs;

+ 7 - 2
arch/m68k/kernel/traps.c

@@ -1016,8 +1016,13 @@ asmlinkage void trap_c(struct frame *fp)
 			/* traced a trapping instruction on a 68020/30,
 			/* traced a trapping instruction on a 68020/30,
 			 * real exception will be executed afterwards.
 			 * real exception will be executed afterwards.
 			 */
 			 */
-		} else if (!handle_kernel_fault(&fp->ptregs))
-			bad_super_trap(fp);
+			return;
+		}
+#ifdef CONFIG_MMU
+		if (fixup_exception(&fp->ptregs))
+			return;
+#endif
+		bad_super_trap(fp);
 		return;
 		return;
 	}
 	}
 
 

+ 3 - 9
arch/m68k/lib/uaccess.c

@@ -30,19 +30,13 @@ unsigned long __generic_copy_from_user(void *to, const void __user *from,
 		"6:\n"
 		"6:\n"
 		"	.section .fixup,\"ax\"\n"
 		"	.section .fixup,\"ax\"\n"
 		"	.even\n"
 		"	.even\n"
-		"10:	move.l	%0,%3\n"
-		"7:	clr.l	(%2)+\n"
-		"	subq.l	#1,%3\n"
-		"	jne	7b\n"
-		"	lsl.l	#2,%0\n"
+		"10:	lsl.l	#2,%0\n"
 		"	btst	#1,%5\n"
 		"	btst	#1,%5\n"
 		"	jeq	8f\n"
 		"	jeq	8f\n"
-		"30:	clr.w	(%2)+\n"
-		"	addq.l	#2,%0\n"
+		"30:	addq.l	#2,%0\n"
 		"8:	btst	#0,%5\n"
 		"8:	btst	#0,%5\n"
 		"	jeq	6b\n"
 		"	jeq	6b\n"
-		"50:	clr.b	(%2)+\n"
-		"	addq.l	#1,%0\n"
+		"50:	addq.l	#1,%0\n"
 		"	jra	6b\n"
 		"	jra	6b\n"
 		"	.previous\n"
 		"	.previous\n"
 		"\n"
 		"\n"

+ 1 - 1
arch/m68k/mm/fault.c

@@ -32,7 +32,7 @@ int send_fault_sig(struct pt_regs *regs)
 		force_sig_info(siginfo.si_signo,
 		force_sig_info(siginfo.si_signo,
 			       &siginfo, current);
 			       &siginfo, current);
 	} else {
 	} else {
-		if (handle_kernel_fault(regs))
+		if (fixup_exception(regs))
 			return -1;
 			return -1;
 
 
 		//if (siginfo.si_signo == SIGBUS)
 		//if (siginfo.si_signo == SIGBUS)

+ 1 - 0
arch/metag/include/asm/Kbuild

@@ -8,6 +8,7 @@ generic-y += dma.h
 generic-y += emergency-restart.h
 generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += errno.h
 generic-y += exec.h
 generic-y += exec.h
+generic-y += extable.h
 generic-y += fb.h
 generic-y += fb.h
 generic-y += fcntl.h
 generic-y += fcntl.h
 generic-y += futex.h
 generic-y += futex.h

+ 4 - 56
arch/metag/include/asm/uaccess.h

@@ -4,10 +4,6 @@
 /*
 /*
  * User space memory access functions
  * User space memory access functions
  */
  */
-#include <linux/sched.h>
-
-#define VERIFY_READ	0
-#define VERIFY_WRITE	1
 
 
 /*
 /*
  * The fs value determines whether argument validity checking should be
  * The fs value determines whether argument validity checking should be
@@ -28,7 +24,7 @@
 
 
 #define segment_eq(a, b)	((a).seg == (b).seg)
 #define segment_eq(a, b)	((a).seg == (b).seg)
 
 
-#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
+#define __kernel_ok (uaccess_kernel())
 /*
 /*
  * Explicitly allow NULL pointers here. Parts of the kernel such
  * Explicitly allow NULL pointers here. Parts of the kernel such
  * as readv/writev use access_ok to validate pointers, but want
  * as readv/writev use access_ok to validate pointers, but want
@@ -51,28 +47,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
 #define access_ok(type, addr, size) __access_ok((unsigned long)(addr),	\
 #define access_ok(type, addr, size) __access_ok((unsigned long)(addr),	\
 						(unsigned long)(size))
 						(unsigned long)(size))
 
 
-static inline int verify_area(int type, const void *addr, unsigned long size)
-{
-	return access_ok(type, addr, size) ? 0 : -EFAULT;
-}
-
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue.  No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path.  This means when everything is well,
- * we don't even have to jump over them.  Further, they do not intrude
- * on our cache or tlb entries.
- */
-struct exception_table_entry {
-	unsigned long insn, fixup;
-};
-
-extern int fixup_exception(struct pt_regs *regs);
+#include <asm/extable.h>
 
 
 /*
 /*
  * These are the main single-value transfer routines.  They automatically
  * These are the main single-value transfer routines.  They automatically
@@ -199,35 +174,8 @@ extern long __must_check strnlen_user(const char __user *src, long count);
 
 
 extern unsigned long raw_copy_from_user(void *to, const void __user *from,
 extern unsigned long raw_copy_from_user(void *to, const void __user *from,
 					unsigned long n);
 					unsigned long n);
-
-static inline unsigned long
-copy_from_user(void *to, const void __user *from, unsigned long n)
-{
-	unsigned long res = n;
-	if (likely(access_ok(VERIFY_READ, from, n)))
-		res = raw_copy_from_user(to, from, n);
-	if (unlikely(res))
-		memset(to + (n - res), 0, res);
-	return res;
-}
-
-#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
-#define __copy_from_user_inatomic __copy_from_user
-
-extern unsigned long __must_check __copy_user(void __user *to,
-					      const void *from,
-					      unsigned long n);
-
-static inline unsigned long copy_to_user(void __user *to, const void *from,
-					 unsigned long n)
-{
-	if (access_ok(VERIFY_WRITE, to, n))
-		return __copy_user(to, from, n);
-	return n;
-}
-
-#define __copy_to_user(to, from, n) __copy_user(to, from, n)
-#define __copy_to_user_inatomic __copy_to_user
+extern unsigned long raw_copy_to_user(void __user *to, const void *from,
+				      unsigned long n);
 
 
 /*
 /*
  * Zero Userspace
  * Zero Userspace

+ 3 - 3
arch/metag/lib/usercopy.c

@@ -548,8 +548,8 @@
 		"SUB	%1,	%1,	D0Ar2\n"			\
 		"SUB	%1,	%1,	D0Ar2\n"			\
 		"SUB	%3, %3, D1Ar1\n")
 		"SUB	%3, %3, D1Ar1\n")
 
 
-unsigned long __copy_user(void __user *pdst, const void *psrc,
-			  unsigned long n)
+unsigned long raw_copy_to_user(void __user *pdst, const void *psrc,
+			       unsigned long n)
 {
 {
 	register char __user *dst asm ("A0.2") = pdst;
 	register char __user *dst asm ("A0.2") = pdst;
 	register const char *src asm ("A1.2") = psrc;
 	register const char *src asm ("A1.2") = psrc;
@@ -654,7 +654,7 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
 	 */
 	 */
 	return retn;
 	return retn;
 }
 }
-EXPORT_SYMBOL(__copy_user);
+EXPORT_SYMBOL(raw_copy_to_user);
 
 
 #define __asm_copy_from_user_1(to, from, ret) \
 #define __asm_copy_from_user_1(to, from, ret) \
 	__asm_copy_user_cont(to, from, ret,	\
 	__asm_copy_user_cont(to, from, ret,	\

+ 1 - 0
arch/microblaze/include/asm/Kbuild

@@ -3,6 +3,7 @@ generic-y += barrier.h
 generic-y += clkdev.h
 generic-y += clkdev.h
 generic-y += device.h
 generic-y += device.h
 generic-y += exec.h
 generic-y += exec.h
+generic-y += extable.h
 generic-y += irq_work.h
 generic-y += irq_work.h
 generic-y += mcs_spinlock.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
 generic-y += mm-arch-hooks.h

+ 9 - 55
arch/microblaze/include/asm/uaccess.h

@@ -11,22 +11,15 @@
 #ifndef _ASM_MICROBLAZE_UACCESS_H
 #ifndef _ASM_MICROBLAZE_UACCESS_H
 #define _ASM_MICROBLAZE_UACCESS_H
 #define _ASM_MICROBLAZE_UACCESS_H
 
 
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-
 #include <linux/kernel.h>
 #include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/sched.h> /* RLIMIT_FSIZE */
 #include <linux/mm.h>
 #include <linux/mm.h>
 
 
 #include <asm/mmu.h>
 #include <asm/mmu.h>
 #include <asm/page.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/pgtable.h>
+#include <asm/extable.h>
 #include <linux/string.h>
 #include <linux/string.h>
 
 
-#define VERIFY_READ	0
-#define VERIFY_WRITE	1
-
 /*
 /*
  * On Microblaze the fs value is actually the top of the corresponding
  * On Microblaze the fs value is actually the top of the corresponding
  * address space.
  * address space.
@@ -55,22 +48,6 @@
 
 
 # define segment_eq(a, b)	((a).seg == (b).seg)
 # define segment_eq(a, b)	((a).seg == (b).seg)
 
 
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-struct exception_table_entry {
-	unsigned long insn, fixup;
-};
-
 #ifndef CONFIG_MMU
 #ifndef CONFIG_MMU
 
 
 /* Check against bounds of physical memory */
 /* Check against bounds of physical memory */
@@ -359,39 +336,19 @@ extern long __user_bad(void);
 	__gu_err;							\
 	__gu_err;							\
 })
 })
 
 
-
-/* copy_to_from_user */
-#define __copy_from_user(to, from, n)	\
-	__copy_tofrom_user((__force void __user *)(to), \
-				(void __user *)(from), (n))
-#define __copy_from_user_inatomic(to, from, n) \
-		__copy_from_user((to), (from), (n))
-
-static inline long copy_from_user(void *to,
-		const void __user *from, unsigned long n)
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 {
-	unsigned long res = n;
-	might_fault();
-	if (likely(access_ok(VERIFY_READ, from, n)))
-		res = __copy_from_user(to, from, n);
-	if (unlikely(res))
-		memset(to + (n - res), 0, res);
-	return res;
+	return __copy_tofrom_user((__force void __user *)to, from, n);
 }
 }
 
 
-#define __copy_to_user(to, from, n)	\
-		__copy_tofrom_user((void __user *)(to), \
-			(__force const void __user *)(from), (n))
-#define __copy_to_user_inatomic(to, from, n) __copy_to_user((to), (from), (n))
-
-static inline long copy_to_user(void __user *to,
-		const void *from, unsigned long n)
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 {
-	might_fault();
-	if (access_ok(VERIFY_WRITE, to, n))
-		return __copy_to_user(to, from, n);
-	return n;
+	return __copy_tofrom_user(to, (__force const void __user *)from, n);
 }
 }
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
 
 
 /*
 /*
  * Copy a null terminated string from userspace.
  * Copy a null terminated string from userspace.
@@ -422,7 +379,4 @@ static inline long strnlen_user(const char __user *src, long n)
 	return __strnlen_user(src, n);
 	return __strnlen_user(src, n);
 }
 }
 
 
-#endif  /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
-
 #endif /* _ASM_MICROBLAZE_UACCESS_H */
 #endif /* _ASM_MICROBLAZE_UACCESS_H */

+ 0 - 1
arch/mips/Kconfig

@@ -68,7 +68,6 @@ config MIPS
 	select HANDLE_DOMAIN_IRQ
 	select HANDLE_DOMAIN_IRQ
 	select HAVE_EXIT_THREAD
 	select HAVE_EXIT_THREAD
 	select HAVE_REGS_AND_STACK_ACCESS_API
 	select HAVE_REGS_AND_STACK_ACCESS_API
-	select HAVE_ARCH_HARDENED_USERCOPY
 
 
 menu "Machine selection"
 menu "Machine selection"
 
 

+ 1 - 30
arch/mips/cavium-octeon/octeon-memcpy.S

@@ -139,15 +139,6 @@
 	.set	noreorder
 	.set	noreorder
 	.set	noat
 	.set	noat
 
 
-/*
- * t7 is used as a flag to note inatomic mode.
- */
-LEAF(__copy_user_inatomic)
-EXPORT_SYMBOL(__copy_user_inatomic)
-	b	__copy_user_common
-	 li	t7, 1
-	END(__copy_user_inatomic)
-
 /*
 /*
  * A combined memcpy/__copy_user
  * A combined memcpy/__copy_user
  * __copy_user sets len to 0 for success; else to an upper bound of
  * __copy_user sets len to 0 for success; else to an upper bound of
@@ -161,8 +152,6 @@ EXPORT_SYMBOL(memcpy)
 __memcpy:
 __memcpy:
 FEXPORT(__copy_user)
 FEXPORT(__copy_user)
 EXPORT_SYMBOL(__copy_user)
 EXPORT_SYMBOL(__copy_user)
-	li	t7, 0				/* not inatomic */
-__copy_user_common:
 	/*
 	/*
 	 * Note: dst & src may be unaligned, len may be 0
 	 * Note: dst & src may be unaligned, len may be 0
 	 * Temps
 	 * Temps
@@ -414,25 +403,7 @@ l_exc:
 	LOAD	t0, TI_TASK($28)
 	LOAD	t0, TI_TASK($28)
 	LOAD	t0, THREAD_BUADDR(t0)	# t0 is just past last good address
 	LOAD	t0, THREAD_BUADDR(t0)	# t0 is just past last good address
 	SUB	len, AT, t0		# len number of uncopied bytes
 	SUB	len, AT, t0		# len number of uncopied bytes
-	bnez	t7, 2f		/* Skip the zeroing out part if inatomic */
-	/*
-	 * Here's where we rely on src and dst being incremented in tandem,
-	 *   See (3) above.
-	 * dst += (fault addr - src) to put dst at first byte to clear
-	 */
-	ADD	dst, t0			# compute start address in a1
-	SUB	dst, src
-	/*
-	 * Clear len bytes starting at dst.  Can't call __bzero because it
-	 * might modify len.  An inefficient loop for these rare times...
-	 */
-	beqz	len, done
-	 SUB	src, len, 1
-1:	sb	zero, 0(dst)
-	ADD	dst, dst, 1
-	bnez	src, 1b
-	 SUB	src, src, 1
-2:	jr	ra
+	jr	ra
 	 nop
 	 nop
 
 
 
 

+ 2 - 2
arch/mips/include/asm/checksum.h

@@ -50,7 +50,7 @@ __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
 				   __wsum sum, int *err_ptr)
 				   __wsum sum, int *err_ptr)
 {
 {
 	might_fault();
 	might_fault();
-	if (segment_eq(get_fs(), get_ds()))
+	if (uaccess_kernel())
 		return __csum_partial_copy_kernel((__force void *)src, dst,
 		return __csum_partial_copy_kernel((__force void *)src, dst,
 						  len, sum, err_ptr);
 						  len, sum, err_ptr);
 	else
 	else
@@ -82,7 +82,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
 {
 {
 	might_fault();
 	might_fault();
 	if (access_ok(VERIFY_WRITE, dst, len)) {
 	if (access_ok(VERIFY_WRITE, dst, len)) {
-		if (segment_eq(get_fs(), get_ds()))
+		if (uaccess_kernel())
 			return __csum_partial_copy_kernel(src,
 			return __csum_partial_copy_kernel(src,
 							  (__force void *)dst,
 							  (__force void *)dst,
 							  len, sum, err_ptr);
 							  len, sum, err_ptr);

+ 2 - 2
arch/mips/include/asm/r4kcache.h

@@ -20,7 +20,7 @@
 #include <asm/cpu-features.h>
 #include <asm/cpu-features.h>
 #include <asm/cpu-type.h>
 #include <asm/cpu-type.h>
 #include <asm/mipsmtregs.h>
 #include <asm/mipsmtregs.h>
-#include <linux/uaccess.h> /* for segment_eq() */
+#include <linux/uaccess.h> /* for uaccess_kernel() */
 
 
 extern void (*r4k_blast_dcache)(void);
 extern void (*r4k_blast_dcache)(void);
 extern void (*r4k_blast_icache)(void);
 extern void (*r4k_blast_icache)(void);
@@ -714,7 +714,7 @@ static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
 									\
 									\
 	__##pfx##flush_prologue						\
 	__##pfx##flush_prologue						\
 									\
 									\
-	if (segment_eq(get_fs(), USER_DS)) {				\
+	if (!uaccess_kernel()) {					\
 		while (1) {						\
 		while (1) {						\
 			protected_cachee_op(hitop, addr);		\
 			protected_cachee_op(hitop, addr);		\
 			if (addr == aend)				\
 			if (addr == aend)				\

+ 60 - 389
arch/mips/include/asm/uaccess.h

@@ -12,8 +12,6 @@
 #define _ASM_UACCESS_H
 #define _ASM_UACCESS_H
 
 
 #include <linux/kernel.h>
 #include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/thread_info.h>
 #include <linux/string.h>
 #include <linux/string.h>
 #include <asm/asm-eva.h>
 #include <asm/asm-eva.h>
 #include <asm/extable.h>
 #include <asm/extable.h>
@@ -71,9 +69,6 @@ extern u64 __ua_limit;
 #define USER_DS		((mm_segment_t) { __UA_LIMIT })
 #define USER_DS		((mm_segment_t) { __UA_LIMIT })
 #endif
 #endif
 
 
-#define VERIFY_READ    0
-#define VERIFY_WRITE   1
-
 #define get_ds()	(KERNEL_DS)
 #define get_ds()	(KERNEL_DS)
 #define get_fs()	(current_thread_info()->addr_limit)
 #define get_fs()	(current_thread_info()->addr_limit)
 #define set_fs(x)	(current_thread_info()->addr_limit = (x))
 #define set_fs(x)	(current_thread_info()->addr_limit = (x))
@@ -93,7 +88,7 @@ static inline bool eva_kernel_access(void)
 	if (!IS_ENABLED(CONFIG_EVA))
 	if (!IS_ENABLED(CONFIG_EVA))
 		return false;
 		return false;
 
 
-	return segment_eq(get_fs(), get_ds());
+	return uaccess_kernel();
 }
 }
 
 
 /*
 /*
@@ -133,23 +128,14 @@ static inline bool eva_kernel_access(void)
  * this function, memory access functions may still return -EFAULT.
  * this function, memory access functions may still return -EFAULT.
  */
  */
 
 
-#define __access_mask get_fs().seg
-
-#define __access_ok(addr, size, mask)					\
-({									\
-	unsigned long __addr = (unsigned long) (addr);			\
-	unsigned long __size = size;					\
-	unsigned long __mask = mask;					\
-	unsigned long __ok;						\
-									\
-	__chk_user_ptr(addr);						\
-	__ok = (signed long)(__mask & (__addr | (__addr + __size) |	\
-		__ua_size(__size)));					\
-	__ok == 0;							\
-})
+static inline int __access_ok(const void __user *p, unsigned long size)
+{
+	unsigned long addr = (unsigned long)p;
+	return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0;
+}
 
 
 #define access_ok(type, addr, size)					\
 #define access_ok(type, addr, size)					\
-	likely(__access_ok((addr), (size), __access_mask))
+	likely(__access_ok((addr), (size)))
 
 
 /*
 /*
  * put_user: - Write a simple value into user space.
  * put_user: - Write a simple value into user space.
@@ -811,157 +797,7 @@ extern void __put_user_unaligned_unknown(void);
 
 
 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
 
 
-#ifndef CONFIG_EVA
-#define __invoke_copy_to_user(to, from, n)				\
-({									\
-	register void __user *__cu_to_r __asm__("$4");			\
-	register const void *__cu_from_r __asm__("$5");			\
-	register long __cu_len_r __asm__("$6");				\
-									\
-	__cu_to_r = (to);						\
-	__cu_from_r = (from);						\
-	__cu_len_r = (n);						\
-	__asm__ __volatile__(						\
-	__MODULE_JAL(__copy_user)					\
-	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
-	:								\
-	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
-	  DADDI_SCRATCH, "memory");					\
-	__cu_len_r;							\
-})
-
-#define __invoke_copy_to_kernel(to, from, n)				\
-	__invoke_copy_to_user(to, from, n)
-
-#endif
-
-/*
- * __copy_to_user: - Copy a block of data into user space, with less checking.
- * @to:	  Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n:	  Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- *          enabled.
- *
- * Copy data from kernel space to user space.  Caller must check
- * the specified block with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-#define __copy_to_user(to, from, n)					\
-({									\
-	void __user *__cu_to;						\
-	const void *__cu_from;						\
-	long __cu_len;							\
-									\
-	__cu_to = (to);							\
-	__cu_from = (from);						\
-	__cu_len = (n);							\
-									\
-	check_object_size(__cu_from, __cu_len, true);			\
-	might_fault();							\
-									\
-	if (eva_kernel_access())					\
-		__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,	\
-						   __cu_len);		\
-	else								\
-		__cu_len = __invoke_copy_to_user(__cu_to, __cu_from,	\
-						 __cu_len);		\
-	__cu_len;							\
-})
-
-extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
-
-#define __copy_to_user_inatomic(to, from, n)				\
-({									\
-	void __user *__cu_to;						\
-	const void *__cu_from;						\
-	long __cu_len;							\
-									\
-	__cu_to = (to);							\
-	__cu_from = (from);						\
-	__cu_len = (n);							\
-									\
-	check_object_size(__cu_from, __cu_len, true);			\
-									\
-	if (eva_kernel_access())					\
-		__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,	\
-						   __cu_len);		\
-	else								\
-		__cu_len = __invoke_copy_to_user(__cu_to, __cu_from,	\
-						 __cu_len);		\
-	__cu_len;							\
-})
-
-#define __copy_from_user_inatomic(to, from, n)				\
-({									\
-	void *__cu_to;							\
-	const void __user *__cu_from;					\
-	long __cu_len;							\
-									\
-	__cu_to = (to);							\
-	__cu_from = (from);						\
-	__cu_len = (n);							\
-									\
-	check_object_size(__cu_to, __cu_len, false);			\
-									\
-	if (eva_kernel_access())					\
-		__cu_len = __invoke_copy_from_kernel_inatomic(__cu_to,	\
-							      __cu_from,\
-							      __cu_len);\
-	else								\
-		__cu_len = __invoke_copy_from_user_inatomic(__cu_to,	\
-							    __cu_from,	\
-							    __cu_len);	\
-	__cu_len;							\
-})
-
-/*
- * copy_to_user: - Copy a block of data into user space.
- * @to:	  Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n:	  Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- *          enabled.
- *
- * Copy data from kernel space to user space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-#define copy_to_user(to, from, n)					\
-({									\
-	void __user *__cu_to;						\
-	const void *__cu_from;						\
-	long __cu_len;							\
-									\
-	__cu_to = (to);							\
-	__cu_from = (from);						\
-	__cu_len = (n);							\
-									\
-	check_object_size(__cu_from, __cu_len, true);			\
-									\
-	if (eva_kernel_access()) {					\
-		__cu_len = __invoke_copy_to_kernel(__cu_to,		\
-						   __cu_from,		\
-						   __cu_len);		\
-	} else {							\
-		if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) {       \
-			might_fault();                                  \
-			__cu_len = __invoke_copy_to_user(__cu_to,	\
-							 __cu_from,	\
-							 __cu_len);     \
-		}							\
-	}								\
-	__cu_len;							\
-})
-
-#ifndef CONFIG_EVA
-
-#define __invoke_copy_from_user(to, from, n)				\
+#define __invoke_copy_from(func, to, from, n)				\
 ({									\
 ({									\
 	register void *__cu_to_r __asm__("$4");				\
 	register void *__cu_to_r __asm__("$4");				\
 	register const void __user *__cu_from_r __asm__("$5");		\
 	register const void __user *__cu_from_r __asm__("$5");		\
@@ -972,7 +808,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
 	__cu_len_r = (n);						\
 	__cu_len_r = (n);						\
 	__asm__ __volatile__(						\
 	__asm__ __volatile__(						\
 	".set\tnoreorder\n\t"						\
 	".set\tnoreorder\n\t"						\
-	__MODULE_JAL(__copy_user)					\
+	__MODULE_JAL(func)						\
 	".set\tnoat\n\t"						\
 	".set\tnoat\n\t"						\
 	__UA_ADDU "\t$1, %1, %2\n\t"					\
 	__UA_ADDU "\t$1, %1, %2\n\t"					\
 	".set\tat\n\t"							\
 	".set\tat\n\t"							\
@@ -984,33 +820,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
 	__cu_len_r;							\
 	__cu_len_r;							\
 })
 })
 
 
-#define __invoke_copy_from_kernel(to, from, n)				\
-	__invoke_copy_from_user(to, from, n)
-
-/* For userland <-> userland operations */
-#define ___invoke_copy_in_user(to, from, n)				\
-	__invoke_copy_from_user(to, from, n)
-
-/* For kernel <-> kernel operations */
-#define ___invoke_copy_in_kernel(to, from, n)				\
-	__invoke_copy_from_user(to, from, n)
-
-#define __invoke_copy_from_user_inatomic(to, from, n)			\
+#define __invoke_copy_to(func, to, from, n)				\
 ({									\
 ({									\
-	register void *__cu_to_r __asm__("$4");				\
-	register const void __user *__cu_from_r __asm__("$5");		\
+	register void __user *__cu_to_r __asm__("$4");			\
+	register const void *__cu_from_r __asm__("$5");			\
 	register long __cu_len_r __asm__("$6");				\
 	register long __cu_len_r __asm__("$6");				\
 									\
 									\
 	__cu_to_r = (to);						\
 	__cu_to_r = (to);						\
 	__cu_from_r = (from);						\
 	__cu_from_r = (from);						\
 	__cu_len_r = (n);						\
 	__cu_len_r = (n);						\
 	__asm__ __volatile__(						\
 	__asm__ __volatile__(						\
-	".set\tnoreorder\n\t"						\
-	__MODULE_JAL(__copy_user_inatomic)				\
-	".set\tnoat\n\t"						\
-	__UA_ADDU "\t$1, %1, %2\n\t"					\
-	".set\tat\n\t"							\
-	".set\treorder"							\
+	__MODULE_JAL(func)						\
 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
 	:								\
 	:								\
 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
@@ -1018,228 +838,79 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
 	__cu_len_r;							\
 	__cu_len_r;							\
 })
 })
 
 
-#define __invoke_copy_from_kernel_inatomic(to, from, n)			\
-	__invoke_copy_from_user_inatomic(to, from, n)			\
+#define __invoke_copy_from_kernel(to, from, n)				\
+	__invoke_copy_from(__copy_user, to, from, n)
+
+#define __invoke_copy_to_kernel(to, from, n)				\
+	__invoke_copy_to(__copy_user, to, from, n)
+
+#define ___invoke_copy_in_kernel(to, from, n)				\
+	__invoke_copy_from(__copy_user, to, from, n)
+
+#ifndef CONFIG_EVA
+#define __invoke_copy_from_user(to, from, n)				\
+	__invoke_copy_from(__copy_user, to, from, n)
+
+#define __invoke_copy_to_user(to, from, n)				\
+	__invoke_copy_to(__copy_user, to, from, n)
+
+#define ___invoke_copy_in_user(to, from, n)				\
+	__invoke_copy_from(__copy_user, to, from, n)
 
 
 #else
 #else
 
 
 /* EVA specific functions */
 /* EVA specific functions */
 
 
-extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
-				       size_t __n);
 extern size_t __copy_from_user_eva(void *__to, const void *__from,
 extern size_t __copy_from_user_eva(void *__to, const void *__from,
 				   size_t __n);
 				   size_t __n);
 extern size_t __copy_to_user_eva(void *__to, const void *__from,
 extern size_t __copy_to_user_eva(void *__to, const void *__from,
 				 size_t __n);
 				 size_t __n);
 extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
 extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
 
 
-#define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr)	\
-({									\
-	register void *__cu_to_r __asm__("$4");				\
-	register const void __user *__cu_from_r __asm__("$5");		\
-	register long __cu_len_r __asm__("$6");				\
-									\
-	__cu_to_r = (to);						\
-	__cu_from_r = (from);						\
-	__cu_len_r = (n);						\
-	__asm__ __volatile__(						\
-	".set\tnoreorder\n\t"						\
-	__MODULE_JAL(func_ptr)						\
-	".set\tnoat\n\t"						\
-	__UA_ADDU "\t$1, %1, %2\n\t"					\
-	".set\tat\n\t"							\
-	".set\treorder"							\
-	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
-	:								\
-	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
-	  DADDI_SCRATCH, "memory");					\
-	__cu_len_r;							\
-})
-
-#define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr)	\
-({									\
-	register void *__cu_to_r __asm__("$4");				\
-	register const void __user *__cu_from_r __asm__("$5");		\
-	register long __cu_len_r __asm__("$6");				\
-									\
-	__cu_to_r = (to);						\
-	__cu_from_r = (from);						\
-	__cu_len_r = (n);						\
-	__asm__ __volatile__(						\
-	__MODULE_JAL(func_ptr)						\
-	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
-	:								\
-	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
-	  DADDI_SCRATCH, "memory");					\
-	__cu_len_r;							\
-})
-
 /*
 /*
  * Source or destination address is in userland. We need to go through
  * Source or destination address is in userland. We need to go through
  * the TLB
  * the TLB
  */
  */
 #define __invoke_copy_from_user(to, from, n)				\
 #define __invoke_copy_from_user(to, from, n)				\
-	__invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
-
-#define __invoke_copy_from_user_inatomic(to, from, n)			\
-	__invoke_copy_from_user_eva_generic(to, from, n,		\
-					    __copy_user_inatomic_eva)
+	__invoke_copy_from(__copy_from_user_eva, to, from, n)
 
 
 #define __invoke_copy_to_user(to, from, n)				\
 #define __invoke_copy_to_user(to, from, n)				\
-	__invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
+	__invoke_copy_to(__copy_to_user_eva, to, from, n)
 
 
 #define ___invoke_copy_in_user(to, from, n)				\
 #define ___invoke_copy_in_user(to, from, n)				\
-	__invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
-
-/*
- * Source or destination address in the kernel. We are not going through
- * the TLB
- */
-#define __invoke_copy_from_kernel(to, from, n)				\
-	__invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
-
-#define __invoke_copy_from_kernel_inatomic(to, from, n)			\
-	__invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
-
-#define __invoke_copy_to_kernel(to, from, n)				\
-	__invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
-
-#define ___invoke_copy_in_kernel(to, from, n)				\
-	__invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
+	__invoke_copy_from(__copy_in_user_eva, to, from, n)
 
 
 #endif /* CONFIG_EVA */
 #endif /* CONFIG_EVA */
 
 
-/*
- * __copy_from_user: - Copy a block of data from user space, with less checking.
- * @to:	  Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n:	  Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- *          enabled.
- *
- * Copy data from user space to kernel space.  Caller must check
- * the specified block with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- */
-#define __copy_from_user(to, from, n)					\
-({									\
-	void *__cu_to;							\
-	const void __user *__cu_from;					\
-	long __cu_len;							\
-									\
-	__cu_to = (to);							\
-	__cu_from = (from);						\
-	__cu_len = (n);							\
-									\
-	check_object_size(__cu_to, __cu_len, false);			\
-									\
-	if (eva_kernel_access()) {					\
-		__cu_len = __invoke_copy_from_kernel(__cu_to,		\
-						     __cu_from,		\
-						     __cu_len);		\
-	} else {							\
-		might_fault();						\
-		__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,	\
-						   __cu_len);		\
-	}								\
-	__cu_len;							\
-})
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+	if (eva_kernel_access())
+		return __invoke_copy_to_kernel(to, from, n);
+	else
+		return __invoke_copy_to_user(to, from, n);
+}
 
 
-/*
- * copy_from_user: - Copy a block of data from user space.
- * @to:	  Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n:	  Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- *          enabled.
- *
- * Copy data from user space to kernel space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- */
-#define copy_from_user(to, from, n)					\
-({									\
-	void *__cu_to;							\
-	const void __user *__cu_from;					\
-	long __cu_len;							\
-									\
-	__cu_to = (to);							\
-	__cu_from = (from);						\
-	__cu_len = (n);							\
-									\
-	check_object_size(__cu_to, __cu_len, false);			\
-									\
-	if (eva_kernel_access()) {					\
-		__cu_len = __invoke_copy_from_kernel(__cu_to,		\
-						     __cu_from,		\
-						     __cu_len);		\
-	} else {							\
-		if (access_ok(VERIFY_READ, __cu_from, __cu_len)) {	\
-			might_fault();                                  \
-			__cu_len = __invoke_copy_from_user(__cu_to,	\
-							   __cu_from,	\
-							   __cu_len);   \
-		} else {						\
-			memset(__cu_to, 0, __cu_len);			\
-		}							\
-	}								\
-	__cu_len;							\
-})
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+	if (eva_kernel_access())
+		return __invoke_copy_from_kernel(to, from, n);
+	else
+		return __invoke_copy_from_user(to, from, n);
+}
 
 
-#define __copy_in_user(to, from, n)					\
-({									\
-	void __user *__cu_to;						\
-	const void __user *__cu_from;					\
-	long __cu_len;							\
-									\
-	__cu_to = (to);							\
-	__cu_from = (from);						\
-	__cu_len = (n);							\
-	if (eva_kernel_access()) {					\
-		__cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from,	\
-						    __cu_len);		\
-	} else {							\
-		might_fault();						\
-		__cu_len = ___invoke_copy_in_user(__cu_to, __cu_from,	\
-						  __cu_len);		\
-	}								\
-	__cu_len;							\
-})
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
 
 
-#define copy_in_user(to, from, n)					\
-({									\
-	void __user *__cu_to;						\
-	const void __user *__cu_from;					\
-	long __cu_len;							\
-									\
-	__cu_to = (to);							\
-	__cu_from = (from);						\
-	__cu_len = (n);							\
-	if (eva_kernel_access()) {					\
-		__cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from,	\
-						    __cu_len);		\
-	} else {							\
-		if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
-			   access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
-			might_fault();					\
-			__cu_len = ___invoke_copy_in_user(__cu_to,	\
-							  __cu_from,	\
-							  __cu_len);	\
-		}							\
-	}								\
-	__cu_len;							\
-})
+static inline unsigned long
+raw_copy_in_user(void __user*to, const void __user *from, unsigned long n)
+{
+	if (eva_kernel_access())
+		return ___invoke_copy_in_kernel(to, from, n);
+	else
+		return ___invoke_copy_in_user(to, from,	n);
+}
 
 
 extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size);
 extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size);
 extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
 extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);

+ 12 - 12
arch/mips/kernel/mips-r2-to-r6-emul.c

@@ -1200,7 +1200,7 @@ fpu_emul:
 	case lwl_op:
 	case lwl_op:
 		rt = regs->regs[MIPSInst_RT(inst)];
 		rt = regs->regs[MIPSInst_RT(inst)];
 		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
 		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
-		if (!access_ok(VERIFY_READ, vaddr, 4)) {
+		if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) {
 			current->thread.cp0_baduaddr = vaddr;
 			current->thread.cp0_baduaddr = vaddr;
 			err = SIGSEGV;
 			err = SIGSEGV;
 			break;
 			break;
@@ -1273,7 +1273,7 @@ fpu_emul:
 	case lwr_op:
 	case lwr_op:
 		rt = regs->regs[MIPSInst_RT(inst)];
 		rt = regs->regs[MIPSInst_RT(inst)];
 		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
 		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
-		if (!access_ok(VERIFY_READ, vaddr, 4)) {
+		if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) {
 			current->thread.cp0_baduaddr = vaddr;
 			current->thread.cp0_baduaddr = vaddr;
 			err = SIGSEGV;
 			err = SIGSEGV;
 			break;
 			break;
@@ -1347,7 +1347,7 @@ fpu_emul:
 	case swl_op:
 	case swl_op:
 		rt = regs->regs[MIPSInst_RT(inst)];
 		rt = regs->regs[MIPSInst_RT(inst)];
 		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
 		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
-		if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+		if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) {
 			current->thread.cp0_baduaddr = vaddr;
 			current->thread.cp0_baduaddr = vaddr;
 			err = SIGSEGV;
 			err = SIGSEGV;
 			break;
 			break;
@@ -1417,7 +1417,7 @@ fpu_emul:
 	case swr_op:
 	case swr_op:
 		rt = regs->regs[MIPSInst_RT(inst)];
 		rt = regs->regs[MIPSInst_RT(inst)];
 		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
 		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
-		if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+		if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) {
 			current->thread.cp0_baduaddr = vaddr;
 			current->thread.cp0_baduaddr = vaddr;
 			err = SIGSEGV;
 			err = SIGSEGV;
 			break;
 			break;
@@ -1492,7 +1492,7 @@ fpu_emul:
 
 
 		rt = regs->regs[MIPSInst_RT(inst)];
 		rt = regs->regs[MIPSInst_RT(inst)];
 		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
 		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
-		if (!access_ok(VERIFY_READ, vaddr, 8)) {
+		if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) {
 			current->thread.cp0_baduaddr = vaddr;
 			current->thread.cp0_baduaddr = vaddr;
 			err = SIGSEGV;
 			err = SIGSEGV;
 			break;
 			break;
@@ -1611,7 +1611,7 @@ fpu_emul:
 
 
 		rt = regs->regs[MIPSInst_RT(inst)];
 		rt = regs->regs[MIPSInst_RT(inst)];
 		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
 		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
-		if (!access_ok(VERIFY_READ, vaddr, 8)) {
+		if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) {
 			current->thread.cp0_baduaddr = vaddr;
 			current->thread.cp0_baduaddr = vaddr;
 			err = SIGSEGV;
 			err = SIGSEGV;
 			break;
 			break;
@@ -1730,7 +1730,7 @@ fpu_emul:
 
 
 		rt = regs->regs[MIPSInst_RT(inst)];
 		rt = regs->regs[MIPSInst_RT(inst)];
 		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
 		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
-		if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+		if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) {
 			current->thread.cp0_baduaddr = vaddr;
 			current->thread.cp0_baduaddr = vaddr;
 			err = SIGSEGV;
 			err = SIGSEGV;
 			break;
 			break;
@@ -1848,7 +1848,7 @@ fpu_emul:
 
 
 		rt = regs->regs[MIPSInst_RT(inst)];
 		rt = regs->regs[MIPSInst_RT(inst)];
 		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
 		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
-		if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+		if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) {
 			current->thread.cp0_baduaddr = vaddr;
 			current->thread.cp0_baduaddr = vaddr;
 			err = SIGSEGV;
 			err = SIGSEGV;
 			break;
 			break;
@@ -1965,7 +1965,7 @@ fpu_emul:
 			err = SIGBUS;
 			err = SIGBUS;
 			break;
 			break;
 		}
 		}
-		if (!access_ok(VERIFY_READ, vaddr, 4)) {
+		if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) {
 			current->thread.cp0_baduaddr = vaddr;
 			current->thread.cp0_baduaddr = vaddr;
 			err = SIGBUS;
 			err = SIGBUS;
 			break;
 			break;
@@ -2021,7 +2021,7 @@ fpu_emul:
 			err = SIGBUS;
 			err = SIGBUS;
 			break;
 			break;
 		}
 		}
-		if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+		if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) {
 			current->thread.cp0_baduaddr = vaddr;
 			current->thread.cp0_baduaddr = vaddr;
 			err = SIGBUS;
 			err = SIGBUS;
 			break;
 			break;
@@ -2084,7 +2084,7 @@ fpu_emul:
 			err = SIGBUS;
 			err = SIGBUS;
 			break;
 			break;
 		}
 		}
-		if (!access_ok(VERIFY_READ, vaddr, 8)) {
+		if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) {
 			current->thread.cp0_baduaddr = vaddr;
 			current->thread.cp0_baduaddr = vaddr;
 			err = SIGBUS;
 			err = SIGBUS;
 			break;
 			break;
@@ -2145,7 +2145,7 @@ fpu_emul:
 			err = SIGBUS;
 			err = SIGBUS;
 			break;
 			break;
 		}
 		}
-		if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+		if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) {
 			current->thread.cp0_baduaddr = vaddr;
 			current->thread.cp0_baduaddr = vaddr;
 			err = SIGBUS;
 			err = SIGBUS;
 			break;
 			break;

+ 1 - 1
arch/mips/kernel/syscall.c

@@ -98,7 +98,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
 	if (unlikely(addr & 3))
 	if (unlikely(addr & 3))
 		return -EINVAL;
 		return -EINVAL;
 
 
-	if (unlikely(!access_ok(VERIFY_WRITE, addr, 4)))
+	if (unlikely(!access_ok(VERIFY_WRITE, (const void __user *)addr, 4)))
 		return -EINVAL;
 		return -EINVAL;
 
 
 	if (cpu_has_llsc && R10000_LLSC_WAR) {
 	if (cpu_has_llsc && R10000_LLSC_WAR) {

+ 5 - 5
arch/mips/kernel/unaligned.c

@@ -1026,7 +1026,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
 			goto sigbus;
 			goto sigbus;
 
 
 		if (IS_ENABLED(CONFIG_EVA)) {
 		if (IS_ENABLED(CONFIG_EVA)) {
-			if (segment_eq(get_fs(), get_ds()))
+			if (uaccess_kernel())
 				LoadHW(addr, value, res);
 				LoadHW(addr, value, res);
 			else
 			else
 				LoadHWE(addr, value, res);
 				LoadHWE(addr, value, res);
@@ -1045,7 +1045,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
 			goto sigbus;
 			goto sigbus;
 
 
 		if (IS_ENABLED(CONFIG_EVA)) {
 		if (IS_ENABLED(CONFIG_EVA)) {
-			if (segment_eq(get_fs(), get_ds()))
+			if (uaccess_kernel())
 				LoadW(addr, value, res);
 				LoadW(addr, value, res);
 			else
 			else
 				LoadWE(addr, value, res);
 				LoadWE(addr, value, res);
@@ -1064,7 +1064,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
 			goto sigbus;
 			goto sigbus;
 
 
 		if (IS_ENABLED(CONFIG_EVA)) {
 		if (IS_ENABLED(CONFIG_EVA)) {
-			if (segment_eq(get_fs(), get_ds()))
+			if (uaccess_kernel())
 				LoadHWU(addr, value, res);
 				LoadHWU(addr, value, res);
 			else
 			else
 				LoadHWUE(addr, value, res);
 				LoadHWUE(addr, value, res);
@@ -1132,7 +1132,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
 		value = regs->regs[insn.i_format.rt];
 		value = regs->regs[insn.i_format.rt];
 
 
 		if (IS_ENABLED(CONFIG_EVA)) {
 		if (IS_ENABLED(CONFIG_EVA)) {
-			if (segment_eq(get_fs(), get_ds()))
+			if (uaccess_kernel())
 				StoreHW(addr, value, res);
 				StoreHW(addr, value, res);
 			else
 			else
 				StoreHWE(addr, value, res);
 				StoreHWE(addr, value, res);
@@ -1152,7 +1152,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
 		value = regs->regs[insn.i_format.rt];
 		value = regs->regs[insn.i_format.rt];
 
 
 		if (IS_ENABLED(CONFIG_EVA)) {
 		if (IS_ENABLED(CONFIG_EVA)) {
-			if (segment_eq(get_fs(), get_ds()))
+			if (uaccess_kernel())
 				StoreW(addr, value, res);
 				StoreW(addr, value, res);
 			else
 			else
 				StoreWE(addr, value, res);
 				StoreWE(addr, value, res);

+ 0 - 49
arch/mips/lib/memcpy.S

@@ -562,39 +562,9 @@
 	LOADK	t0, THREAD_BUADDR(t0)	# t0 is just past last good address
 	LOADK	t0, THREAD_BUADDR(t0)	# t0 is just past last good address
 	 nop
 	 nop
 	SUB	len, AT, t0		# len number of uncopied bytes
 	SUB	len, AT, t0		# len number of uncopied bytes
-	bnez	t6, .Ldone\@	/* Skip the zeroing part if inatomic */
-	/*
-	 * Here's where we rely on src and dst being incremented in tandem,
-	 *   See (3) above.
-	 * dst += (fault addr - src) to put dst at first byte to clear
-	 */
-	ADD	dst, t0			# compute start address in a1
-	SUB	dst, src
-	/*
-	 * Clear len bytes starting at dst.  Can't call __bzero because it
-	 * might modify len.  An inefficient loop for these rare times...
-	 */
-	.set	reorder				/* DADDI_WAR */
-	SUB	src, len, 1
-	beqz	len, .Ldone\@
-	.set	noreorder
-1:	sb	zero, 0(dst)
-	ADD	dst, dst, 1
-#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
-	bnez	src, 1b
-	 SUB	src, src, 1
-#else
-	.set	push
-	.set	noat
-	li	v1, 1
-	bnez	src, 1b
-	 SUB	src, src, v1
-	.set	pop
-#endif
 	jr	ra
 	jr	ra
 	 nop
 	 nop
 
 
-
 #define SEXC(n)							\
 #define SEXC(n)							\
 	.set	reorder;			/* DADDI_WAR */ \
 	.set	reorder;			/* DADDI_WAR */ \
 .Ls_exc_p ## n ## u\@:						\
 .Ls_exc_p ## n ## u\@:						\
@@ -672,15 +642,6 @@ LEAF(__rmemcpy)					/* a0=dst a1=src a2=len */
 	 move	a2, zero
 	 move	a2, zero
 	END(__rmemcpy)
 	END(__rmemcpy)
 
 
-/*
- * t6 is used as a flag to note inatomic mode.
- */
-LEAF(__copy_user_inatomic)
-EXPORT_SYMBOL(__copy_user_inatomic)
-	b	__copy_user_common
-	li	t6, 1
-	END(__copy_user_inatomic)
-
 /*
 /*
  * A combined memcpy/__copy_user
  * A combined memcpy/__copy_user
  * __copy_user sets len to 0 for success; else to an upper bound of
  * __copy_user sets len to 0 for success; else to an upper bound of
@@ -694,8 +655,6 @@ EXPORT_SYMBOL(memcpy)
 .L__memcpy:
 .L__memcpy:
 FEXPORT(__copy_user)
 FEXPORT(__copy_user)
 EXPORT_SYMBOL(__copy_user)
 EXPORT_SYMBOL(__copy_user)
-	li	t6, 0	/* not inatomic */
-__copy_user_common:
 	/* Legacy Mode, user <-> user */
 	/* Legacy Mode, user <-> user */
 	__BUILD_COPY_USER LEGACY_MODE USEROP USEROP
 	__BUILD_COPY_USER LEGACY_MODE USEROP USEROP
 
 
@@ -708,20 +667,12 @@ __copy_user_common:
  * space
  * space
  */
  */
 
 
-LEAF(__copy_user_inatomic_eva)
-EXPORT_SYMBOL(__copy_user_inatomic_eva)
-	b       __copy_from_user_common
-	li	t6, 1
-	END(__copy_user_inatomic_eva)
-
 /*
 /*
  * __copy_from_user (EVA)
  * __copy_from_user (EVA)
  */
  */
 
 
 LEAF(__copy_from_user_eva)
 LEAF(__copy_from_user_eva)
 EXPORT_SYMBOL(__copy_from_user_eva)
 EXPORT_SYMBOL(__copy_from_user_eva)
-	li	t6, 0	/* not inatomic */
-__copy_from_user_common:
 	__BUILD_COPY_USER EVA_MODE USEROP KERNELOP
 	__BUILD_COPY_USER EVA_MODE USEROP KERNELOP
 END(__copy_from_user_eva)
 END(__copy_from_user_eva)
 
 

+ 1 - 1
arch/mips/oprofile/backtrace.c

@@ -18,7 +18,7 @@ struct stackframe {
 static inline int get_mem(unsigned long addr, unsigned long *result)
 static inline int get_mem(unsigned long addr, unsigned long *result)
 {
 {
 	unsigned long *address = (unsigned long *) addr;
 	unsigned long *address = (unsigned long *) addr;
-	if (!access_ok(VERIFY_READ, addr, sizeof(unsigned long)))
+	if (!access_ok(VERIFY_READ, address, sizeof(unsigned long)))
 		return -1;
 		return -1;
 	if (__copy_from_user_inatomic(result, address, sizeof(unsigned long)))
 	if (__copy_from_user_inatomic(result, address, sizeof(unsigned long)))
 		return -3;
 		return -3;

+ 1 - 0
arch/mn10300/include/asm/Kbuild

@@ -2,6 +2,7 @@
 generic-y += barrier.h
 generic-y += barrier.h
 generic-y += clkdev.h
 generic-y += clkdev.h
 generic-y += exec.h
 generic-y += exec.h
+generic-y += extable.h
 generic-y += irq_work.h
 generic-y += irq_work.h
 generic-y += mcs_spinlock.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
 generic-y += mm-arch-hooks.h

+ 6 - 181
arch/mn10300/include/asm/uaccess.h

@@ -14,13 +14,8 @@
 /*
 /*
  * User space memory access functions
  * User space memory access functions
  */
  */
-#include <linux/thread_info.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <asm/page.h>
 #include <asm/page.h>
-#include <asm/errno.h>
-
-#define VERIFY_READ 0
-#define VERIFY_WRITE 1
 
 
 /*
 /*
  * The fs value determines whether argument validity checking should be
  * The fs value determines whether argument validity checking should be
@@ -71,26 +66,7 @@ static inline int ___range_ok(unsigned long addr, unsigned int size)
 #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
 #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
 #define __access_ok(addr, size)     (__range_ok((addr), (size)) == 0)
 #define __access_ok(addr, size)     (__range_ok((addr), (size)) == 0)
 
 
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue.  No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path.  This means when everything is well,
- * we don't even have to jump over them.  Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
-{
-	unsigned long insn, fixup;
-};
-
-/* Returns 0 if exception not found and fixup otherwise.  */
-extern int fixup_exception(struct pt_regs *regs);
+#include <asm/extable.h>
 
 
 #define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
 #define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
 #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
 #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
@@ -299,170 +275,19 @@ do {									\
 	}								\
 	}								\
 } while (0)
 } while (0)
 
 
-#define __copy_user_zeroing(to, from, size)				\
-do {									\
-	if (size) {							\
-		void *__to = to;					\
-		const void *__from = from;				\
-		int w;							\
-		asm volatile(						\
-			"0:     movbu	(%0),%3;\n"			\
-			"1:     movbu	%3,(%1);\n"			\
-			"	inc	%0;\n"				\
-			"	inc	%1;\n"				\
-			"       add	-1,%2;\n"			\
-			"       bne	0b;\n"				\
-			"2:\n"						\
-			"	.section .fixup,\"ax\"\n"		\
-			"3:\n"						\
-			"	mov	%2,%0\n"			\
-			"	clr	%3\n"				\
-			"4:     movbu	%3,(%1);\n"			\
-			"	inc	%1;\n"				\
-			"       add	-1,%2;\n"			\
-			"       bne	4b;\n"				\
-			"	mov	%0,%2\n"			\
-			"	jmp	2b\n"				\
-			"	.previous\n"				\
-			"	.section __ex_table,\"a\"\n"		\
-			"       .balign	4\n"				\
-			"       .long	0b,3b\n"			\
-			"       .long	1b,3b\n"			\
-			"	.previous\n"				\
-			: "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
-			: "0"(__from), "1"(__to), "2"(size)		\
-			: "cc", "memory");				\
-	}								\
-} while (0)
-
-/* We let the __ versions of copy_from/to_user inline, because they're often
- * used in fast paths and have only a small space overhead.
- */
-static inline
-unsigned long __generic_copy_from_user_nocheck(void *to, const void *from,
-					       unsigned long n)
-{
-	__copy_user_zeroing(to, from, n);
-	return n;
-}
-
-static inline
-unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
-					     unsigned long n)
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 {
 	__copy_user(to, from, n);
 	__copy_user(to, from, n);
 	return n;
 	return n;
 }
 }
 
 
-
-#if 0
-#error "don't use - these macros don't increment to & from pointers"
-/* Optimize just a little bit when we know the size of the move. */
-#define __constant_copy_user(to, from, size)	\
-do {						\
-	asm volatile(				\
-		"       mov %0,a0;\n"		\
-		"0:     movbu (%1),d3;\n"	\
-		"1:     movbu d3,(%2);\n"	\
-		"       add -1,a0;\n"		\
-		"       bne 0b;\n"		\
-		"2:;"				\
-		".section .fixup,\"ax\"\n"	\
-		"3:	jmp 2b\n"		\
-		".previous\n"			\
-		".section __ex_table,\"a\"\n"	\
-		"       .balign 4\n"		\
-		"       .long 0b,3b\n"		\
-		"       .long 1b,3b\n"		\
-		".previous"			\
-		:				\
-		: "d"(size), "d"(to), "d"(from)	\
-		: "d3", "a0");			\
-} while (0)
-
-/* Optimize just a little bit when we know the size of the move. */
-#define __constant_copy_user_zeroing(to, from, size)	\
-do {							\
-	asm volatile(					\
-		"       mov %0,a0;\n"			\
-		"0:     movbu (%1),d3;\n"		\
-		"1:     movbu d3,(%2);\n"		\
-		"       add -1,a0;\n"			\
-		"       bne 0b;\n"			\
-		"2:;"					\
-		".section .fixup,\"ax\"\n"		\
-		"3:	jmp 2b\n"			\
-		".previous\n"				\
-		".section __ex_table,\"a\"\n"		\
-		"       .balign 4\n"			\
-		"       .long 0b,3b\n"			\
-		"       .long 1b,3b\n"			\
-		".previous"				\
-		:					\
-		: "d"(size), "d"(to), "d"(from)		\
-		: "d3", "a0");				\
-} while (0)
-
-static inline
-unsigned long __constant_copy_to_user(void *to, const void *from,
-				      unsigned long n)
-{
-	if (access_ok(VERIFY_WRITE, to, n))
-		__constant_copy_user(to, from, n);
-	return n;
-}
-
-static inline
-unsigned long __constant_copy_from_user(void *to, const void *from,
-					unsigned long n)
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 {
-	if (access_ok(VERIFY_READ, from, n))
-		__constant_copy_user_zeroing(to, from, n);
-	return n;
-}
-
-static inline
-unsigned long __constant_copy_to_user_nocheck(void *to, const void *from,
-					      unsigned long n)
-{
-	__constant_copy_user(to, from, n);
-	return n;
-}
-
-static inline
-unsigned long __constant_copy_from_user_nocheck(void *to, const void *from,
-						unsigned long n)
-{
-	__constant_copy_user_zeroing(to, from, n);
+	__copy_user(to, from, n);
 	return n;
 	return n;
 }
 }
-#endif
-
-extern unsigned long __generic_copy_to_user(void __user *, const void *,
-					    unsigned long);
-extern unsigned long __generic_copy_from_user(void *, const void __user *,
-					      unsigned long);
-
-#define __copy_to_user_inatomic(to, from, n) \
-	__generic_copy_to_user_nocheck((to), (from), (n))
-#define __copy_from_user_inatomic(to, from, n) \
-	__generic_copy_from_user_nocheck((to), (from), (n))
-
-#define __copy_to_user(to, from, n)			\
-({							\
-	might_fault();					\
-	__copy_to_user_inatomic((to), (from), (n));	\
-})
-
-#define __copy_from_user(to, from, n)			\
-({							\
-	might_fault();					\
-	__copy_from_user_inatomic((to), (from), (n));	\
-})
-
-
-#define copy_to_user(to, from, n)   __generic_copy_to_user((to), (from), (n))
-#define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n))
 
 
 extern long strncpy_from_user(char *dst, const char __user *src, long count);
 extern long strncpy_from_user(char *dst, const char __user *src, long count);
 extern long __strncpy_from_user(char *dst, const char __user *src, long count);
 extern long __strncpy_from_user(char *dst, const char __user *src, long count);

+ 0 - 2
arch/mn10300/kernel/mn10300_ksyms.c

@@ -26,8 +26,6 @@ EXPORT_SYMBOL(strncpy_from_user);
 EXPORT_SYMBOL(__strncpy_from_user);
 EXPORT_SYMBOL(__strncpy_from_user);
 EXPORT_SYMBOL(clear_user);
 EXPORT_SYMBOL(clear_user);
 EXPORT_SYMBOL(__clear_user);
 EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__generic_copy_from_user);
-EXPORT_SYMBOL(__generic_copy_to_user);
 EXPORT_SYMBOL(strnlen_user);
 EXPORT_SYMBOL(strnlen_user);
 
 
 extern u64 __ashrdi3(u64, unsigned);
 extern u64 __ashrdi3(u64, unsigned);

+ 0 - 18
arch/mn10300/lib/usercopy.c

@@ -11,24 +11,6 @@
  */
  */
 #include <linux/uaccess.h>
 #include <linux/uaccess.h>
 
 
-unsigned long
-__generic_copy_to_user(void *to, const void *from, unsigned long n)
-{
-	if (access_ok(VERIFY_WRITE, to, n))
-		__copy_user(to, from, n);
-	return n;
-}
-
-unsigned long
-__generic_copy_from_user(void *to, const void *from, unsigned long n)
-{
-	if (access_ok(VERIFY_READ, from, n))
-		__copy_user_zeroing(to, from, n);
-	else
-		memset(to, 0, n);
-	return n;
-}
-
 /*
 /*
  * Copy a null terminated string from userspace.
  * Copy a null terminated string from userspace.
  */
  */

+ 1 - 0
arch/nios2/include/asm/Kbuild

@@ -13,6 +13,7 @@ generic-y += dma.h
 generic-y += emergency-restart.h
 generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += errno.h
 generic-y += exec.h
 generic-y += exec.h
+generic-y += extable.h
 generic-y += fb.h
 generic-y += fb.h
 generic-y += fcntl.h
 generic-y += fcntl.h
 generic-y += ftrace.h
 generic-y += ftrace.h

+ 7 - 48
arch/nios2/include/asm/uaccess.h

@@ -13,33 +13,11 @@
 #ifndef _ASM_NIOS2_UACCESS_H
 #ifndef _ASM_NIOS2_UACCESS_H
 #define _ASM_NIOS2_UACCESS_H
 #define _ASM_NIOS2_UACCESS_H
 
 
-#include <linux/errno.h>
-#include <linux/thread_info.h>
 #include <linux/string.h>
 #include <linux/string.h>
 
 
 #include <asm/page.h>
 #include <asm/page.h>
 
 
-#define VERIFY_READ	0
-#define VERIFY_WRITE	1
-
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue.  No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path.  This means when everything is well,
- * we don't even have to jump over them.  Further, they do not intrude
- * on our cache or tlb entries.
- */
-struct exception_table_entry {
-	unsigned long insn;
-	unsigned long fixup;
-};
-
-extern int fixup_exception(struct pt_regs *regs);
+#include <asm/extable.h>
 
 
 /*
 /*
  * Segment stuff
  * Segment stuff
@@ -95,36 +73,17 @@ static inline unsigned long __must_check clear_user(void __user *to,
 	return __clear_user(to, n);
 	return __clear_user(to, n);
 }
 }
 
 
-extern long __copy_from_user(void *to, const void __user *from,
-				unsigned long n);
-extern long __copy_to_user(void __user *to, const void *from, unsigned long n);
-
-static inline long copy_from_user(void *to, const void __user *from,
-				unsigned long n)
-{
-	unsigned long res = n;
-	if (access_ok(VERIFY_READ, from, n))
-		res = __copy_from_user(to, from, n);
-	if (unlikely(res))
-		memset(to + (n - res), 0, res);
-	return res;
-}
-
-static inline long copy_to_user(void __user *to, const void *from,
-				unsigned long n)
-{
-	if (!access_ok(VERIFY_WRITE, to, n))
-		return n;
-	return __copy_to_user(to, from, n);
-}
+extern unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n);
+extern unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n);
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
 
 
 extern long strncpy_from_user(char *__to, const char __user *__from,
 extern long strncpy_from_user(char *__to, const char __user *__from,
 				long __len);
 				long __len);
 extern long strnlen_user(const char __user *s, long n);
 extern long strnlen_user(const char __user *s, long n);
 
 
-#define __copy_from_user_inatomic	__copy_from_user
-#define __copy_to_user_inatomic		__copy_to_user
-
 /* Optimized macros */
 /* Optimized macros */
 #define __get_user_asm(val, insn, addr, err)				\
 #define __get_user_asm(val, insn, addr, err)				\
 {									\
 {									\

+ 8 - 8
arch/nios2/mm/uaccess.c

@@ -10,9 +10,9 @@
 #include <linux/export.h>
 #include <linux/export.h>
 #include <linux/uaccess.h>
 #include <linux/uaccess.h>
 
 
-asm(".global	__copy_from_user\n"
-	"   .type __copy_from_user, @function\n"
-	"__copy_from_user:\n"
+asm(".global	raw_copy_from_user\n"
+	"   .type raw_copy_from_user, @function\n"
+	"raw_copy_from_user:\n"
 	"   movi  r2,7\n"
 	"   movi  r2,7\n"
 	"   mov   r3,r4\n"
 	"   mov   r3,r4\n"
 	"   bge   r2,r6,1f\n"
 	"   bge   r2,r6,1f\n"
@@ -65,12 +65,12 @@ asm(".global	__copy_from_user\n"
 	".word 7b,13b\n"
 	".word 7b,13b\n"
 	".previous\n"
 	".previous\n"
 	);
 	);
-EXPORT_SYMBOL(__copy_from_user);
+EXPORT_SYMBOL(raw_copy_from_user);
 
 
 asm(
 asm(
-	"   .global __copy_to_user\n"
-	"   .type __copy_to_user, @function\n"
-	"__copy_to_user:\n"
+	"   .global raw_copy_to_user\n"
+	"   .type raw_copy_to_user, @function\n"
+	"raw_copy_to_user:\n"
 	"   movi  r2,7\n"
 	"   movi  r2,7\n"
 	"   mov   r3,r4\n"
 	"   mov   r3,r4\n"
 	"   bge   r2,r6,1f\n"
 	"   bge   r2,r6,1f\n"
@@ -127,7 +127,7 @@ asm(
 	".word 11b,13b\n"
 	".word 11b,13b\n"
 	".word 12b,13b\n"
 	".word 12b,13b\n"
 	".previous\n");
 	".previous\n");
-EXPORT_SYMBOL(__copy_to_user);
+EXPORT_SYMBOL(raw_copy_to_user);
 
 
 long strncpy_from_user(char *__to, const char __user *__from, long __len)
 long strncpy_from_user(char *__to, const char __user *__from, long __len)
 {
 {

+ 1 - 0
arch/openrisc/include/asm/Kbuild

@@ -16,6 +16,7 @@ generic-y += dma.h
 generic-y += emergency-restart.h
 generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += errno.h
 generic-y += exec.h
 generic-y += exec.h
+generic-y += extable.h
 generic-y += fb.h
 generic-y += fb.h
 generic-y += fcntl.h
 generic-y += fcntl.h
 generic-y += ftrace.h
 generic-y += ftrace.h

+ 8 - 45
arch/openrisc/include/asm/uaccess.h

@@ -22,14 +22,10 @@
 /*
 /*
  * User space memory access functions
  * User space memory access functions
  */
  */
-#include <linux/errno.h>
-#include <linux/thread_info.h>
 #include <linux/prefetch.h>
 #include <linux/prefetch.h>
 #include <linux/string.h>
 #include <linux/string.h>
 #include <asm/page.h>
 #include <asm/page.h>
-
-#define VERIFY_READ	0
-#define VERIFY_WRITE	1
+#include <asm/extable.h>
 
 
 /*
 /*
  * The fs value determines whether argument validity checking should be
  * The fs value determines whether argument validity checking should be
@@ -65,23 +61,6 @@
 #define access_ok(type, addr, size) \
 #define access_ok(type, addr, size) \
 	__range_ok((unsigned long)addr, (unsigned long)size)
 	__range_ok((unsigned long)addr, (unsigned long)size)
 
 
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue.  No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path.  This means when everything is well,
- * we don't even have to jump over them.  Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry {
-	unsigned long insn, fixup;
-};
-
 /*
 /*
  * These are the main single-value transfer routines.  They automatically
  * These are the main single-value transfer routines.  They automatically
  * use the right size if we just have the right pointer type.
  * use the right size if we just have the right pointer type.
@@ -257,34 +236,18 @@ do {									\
 
 
 extern unsigned long __must_check
 extern unsigned long __must_check
 __copy_tofrom_user(void *to, const void *from, unsigned long size);
 __copy_tofrom_user(void *to, const void *from, unsigned long size);
-
-#define __copy_from_user(to, from, size) \
-	__copy_tofrom_user(to, from, size)
-#define __copy_to_user(to, from, size) \
-	__copy_tofrom_user(to, from, size)
-
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-
 static inline unsigned long
 static inline unsigned long
-copy_from_user(void *to, const void *from, unsigned long n)
+raw_copy_from_user(void *to, const void __user *from, unsigned long size)
 {
 {
-	unsigned long res = n;
-
-	if (likely(access_ok(VERIFY_READ, from, n)))
-		res = __copy_tofrom_user(to, from, n);
-	if (unlikely(res))
-		memset(to + (n - res), 0, res);
-	return res;
+	return __copy_tofrom_user(to, (__force const void *)from, size);
 }
 }
-
 static inline unsigned long
 static inline unsigned long
-copy_to_user(void *to, const void *from, unsigned long n)
+raw_copy_to_user(void *to, const void __user *from, unsigned long size)
 {
 {
-	if (likely(access_ok(VERIFY_WRITE, to, n)))
-		n = __copy_tofrom_user(to, from, n);
-	return n;
+	return __copy_tofrom_user((__force void *)to, from, size);
 }
 }
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
 
 
 extern unsigned long __clear_user(void *addr, unsigned long size);
 extern unsigned long __clear_user(void *addr, unsigned long size);
 
 
@@ -297,7 +260,7 @@ clear_user(void *addr, unsigned long size)
 }
 }
 
 
 #define user_addr_max() \
 #define user_addr_max() \
-	(segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
+	(uaccess_kernel() ? ~0UL : TASK_SIZE)
 
 
 extern long strncpy_from_user(char *dest, const char __user *src, long count);
 extern long strncpy_from_user(char *dest, const char __user *src, long count);
 
 

+ 0 - 1
arch/parisc/Kconfig

@@ -26,7 +26,6 @@ config PARISC
 	select SYSCTL_ARCH_UNALIGN_ALLOW
 	select SYSCTL_ARCH_UNALIGN_ALLOW
 	select SYSCTL_EXCEPTION_TRACE
 	select SYSCTL_EXCEPTION_TRACE
 	select HAVE_MOD_ARCH_SPECIFIC
 	select HAVE_MOD_ARCH_SPECIFIC
-	select HAVE_ARCH_HARDENED_USERCOPY
 	select VIRT_TO_BUS
 	select VIRT_TO_BUS
 	select MODULES_USE_ELF_RELA
 	select MODULES_USE_ELF_RELA
 	select CLONE_BACKWARDS
 	select CLONE_BACKWARDS

+ 1 - 1
arch/parisc/include/asm/futex.h

@@ -109,7 +109,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 	/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
 	/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
 	 * our gateway page, and causes no end of trouble...
 	 * our gateway page, and causes no end of trouble...
 	 */
 	 */
-	if (segment_eq(KERNEL_DS, get_fs()) && !uaddr)
+	if (uaccess_kernel() && !uaddr)
 		return -EFAULT;
 		return -EFAULT;
 
 
 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))

+ 8 - 61
arch/parisc/include/asm/uaccess.h

@@ -6,15 +6,10 @@
  */
  */
 #include <asm/page.h>
 #include <asm/page.h>
 #include <asm/cache.h>
 #include <asm/cache.h>
-#include <asm/errno.h>
 #include <asm-generic/uaccess-unaligned.h>
 #include <asm-generic/uaccess-unaligned.h>
 
 
 #include <linux/bug.h>
 #include <linux/bug.h>
 #include <linux/string.h>
 #include <linux/string.h>
-#include <linux/thread_info.h>
-
-#define VERIFY_READ 0
-#define VERIFY_WRITE 1
 
 
 #define KERNEL_DS	((mm_segment_t){0})
 #define KERNEL_DS	((mm_segment_t){0})
 #define USER_DS 	((mm_segment_t){1})
 #define USER_DS 	((mm_segment_t){1})
@@ -216,9 +211,6 @@ struct exception_data {
  * Complex access routines -- external declarations
  * Complex access routines -- external declarations
  */
  */
 
 
-extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long);
-extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long);
-extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long);
 extern long strncpy_from_user(char *, const char __user *, long);
 extern long strncpy_from_user(char *, const char __user *, long);
 extern unsigned lclear_user(void __user *, unsigned long);
 extern unsigned lclear_user(void __user *, unsigned long);
 extern long lstrnlen_user(const char __user *, long);
 extern long lstrnlen_user(const char __user *, long);
@@ -232,59 +224,14 @@ extern long lstrnlen_user(const char __user *, long);
 #define clear_user lclear_user
 #define clear_user lclear_user
 #define __clear_user lclear_user
 #define __clear_user lclear_user
 
 
-unsigned long __must_check __copy_to_user(void __user *dst, const void *src,
-					  unsigned long len);
-unsigned long __must_check __copy_from_user(void *dst, const void __user *src,
-					  unsigned long len);
-unsigned long copy_in_user(void __user *dst, const void __user *src,
-			   unsigned long len);
-#define __copy_in_user copy_in_user
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-
-extern void __compiletime_error("usercopy buffer size is too small")
-__bad_copy_user(void);
-
-static inline void copy_user_overflow(int size, unsigned long count)
-{
-	WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
-}
-
-static __always_inline unsigned long __must_check
-copy_from_user(void *to, const void __user *from, unsigned long n)
-{
-	int sz = __compiletime_object_size(to);
-	unsigned long ret = n;
-
-	if (likely(sz < 0 || sz >= n)) {
-		check_object_size(to, n, false);
-		ret = __copy_from_user(to, from, n);
-	} else if (!__builtin_constant_p(n))
-		copy_user_overflow(sz, n);
-	else
-		__bad_copy_user();
-
-	if (unlikely(ret))
-		memset(to + (n - ret), 0, ret);
-
-	return ret;
-}
-
-static __always_inline unsigned long __must_check
-copy_to_user(void __user *to, const void *from, unsigned long n)
-{
-	int sz = __compiletime_object_size(from);
-
-	if (likely(sz < 0 || sz >= n)) {
-		check_object_size(from, n, true);
-		n = __copy_to_user(to, from, n);
-	} else if (!__builtin_constant_p(n))
-		copy_user_overflow(sz, n);
-	else
-		__bad_copy_user();
-
-	return n;
-}
+unsigned long __must_check raw_copy_to_user(void __user *dst, const void *src,
+					    unsigned long len);
+unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src,
+					    unsigned long len);
+unsigned long __must_check raw_copy_in_user(void __user *dst, const void __user *src,
+					    unsigned long len);
+#define INLINE_COPY_TO_USER
+#define INLINE_COPY_FROM_USER
 
 
 struct pt_regs;
 struct pt_regs;
 int fixup_exception(struct pt_regs *regs);
 int fixup_exception(struct pt_regs *regs);

+ 8 - 8
arch/parisc/lib/memcpy.c

@@ -29,32 +29,32 @@
 
 
 DECLARE_PER_CPU(struct exception_data, exception_data);
 DECLARE_PER_CPU(struct exception_data, exception_data);
 
 
-#define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3))
+#define get_user_space() (uaccess_kernel() ? 0 : mfsp(3))
 #define get_kernel_space() (0)
 #define get_kernel_space() (0)
 
 
 /* Returns 0 for success, otherwise, returns number of bytes not transferred. */
 /* Returns 0 for success, otherwise, returns number of bytes not transferred. */
 extern unsigned long pa_memcpy(void *dst, const void *src,
 extern unsigned long pa_memcpy(void *dst, const void *src,
 				unsigned long len);
 				unsigned long len);
 
 
-unsigned long __copy_to_user(void __user *dst, const void *src,
-			     unsigned long len)
+unsigned long raw_copy_to_user(void __user *dst, const void *src,
+			       unsigned long len)
 {
 {
 	mtsp(get_kernel_space(), 1);
 	mtsp(get_kernel_space(), 1);
 	mtsp(get_user_space(), 2);
 	mtsp(get_user_space(), 2);
 	return pa_memcpy((void __force *)dst, src, len);
 	return pa_memcpy((void __force *)dst, src, len);
 }
 }
-EXPORT_SYMBOL(__copy_to_user);
+EXPORT_SYMBOL(raw_copy_to_user);
 
 
-unsigned long __copy_from_user(void *dst, const void __user *src,
+unsigned long raw_copy_from_user(void *dst, const void __user *src,
 			       unsigned long len)
 			       unsigned long len)
 {
 {
 	mtsp(get_user_space(), 1);
 	mtsp(get_user_space(), 1);
 	mtsp(get_kernel_space(), 2);
 	mtsp(get_kernel_space(), 2);
 	return pa_memcpy(dst, (void __force *)src, len);
 	return pa_memcpy(dst, (void __force *)src, len);
 }
 }
-EXPORT_SYMBOL(__copy_from_user);
+EXPORT_SYMBOL(raw_copy_from_user);
 
 
-unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len)
+unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long len)
 {
 {
 	mtsp(get_user_space(), 1);
 	mtsp(get_user_space(), 1);
 	mtsp(get_user_space(), 2);
 	mtsp(get_user_space(), 2);
@@ -70,7 +70,7 @@ void * memcpy(void * dst,const void *src, size_t count)
 	return dst;
 	return dst;
 }
 }
 
 
-EXPORT_SYMBOL(copy_in_user);
+EXPORT_SYMBOL(raw_copy_in_user);
 EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(memcpy);
 
 
 long probe_kernel_read(void *dst, const void *src, size_t size)
 long probe_kernel_read(void *dst, const void *src, size_t size)

+ 0 - 1
arch/powerpc/Kconfig

@@ -117,7 +117,6 @@ config PPC
 	select GENERIC_STRNLEN_USER
 	select GENERIC_STRNLEN_USER
 	select GENERIC_TIME_VSYSCALL_OLD
 	select GENERIC_TIME_VSYSCALL_OLD
 	select HAVE_ARCH_AUDITSYSCALL
 	select HAVE_ARCH_AUDITSYSCALL
-	select HAVE_ARCH_HARDENED_USERCOPY
 	select HAVE_ARCH_JUMP_LABEL
 	select HAVE_ARCH_JUMP_LABEL
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_SECCOMP_FILTER
 	select HAVE_ARCH_SECCOMP_FILTER

+ 29 - 0
arch/powerpc/include/asm/extable.h

@@ -0,0 +1,29 @@
+#ifndef _ARCH_POWERPC_EXTABLE_H
+#define _ARCH_POWERPC_EXTABLE_H
+
+/*
+ * The exception table consists of pairs of relative addresses: the first is
+ * the address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue.  No registers are
+ * modified, so it is entirely up to the continuation code to figure out what
+ * to do.
+ *
+ * All the routines below use bits of fixup code that are out of line with the
+ * main instruction path.  This means when everything is well, we don't even
+ * have to jump over them.  Further, they do not intrude on our cache or tlb
+ * entries.
+ */
+
+#define ARCH_HAS_RELATIVE_EXTABLE
+
+struct exception_table_entry {
+	int insn;
+	int fixup;
+};
+
+static inline unsigned long extable_fixup(const struct exception_table_entry *x)
+{
+	return (unsigned long)&x->fixup + x->fixup;
+}
+
+#endif

+ 10 - 86
arch/powerpc/include/asm/uaccess.h

@@ -1,18 +1,11 @@
 #ifndef _ARCH_POWERPC_UACCESS_H
 #ifndef _ARCH_POWERPC_UACCESS_H
 #define _ARCH_POWERPC_UACCESS_H
 #define _ARCH_POWERPC_UACCESS_H
 
 
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-
-#include <linux/sched.h>
-#include <linux/errno.h>
 #include <asm/asm-compat.h>
 #include <asm/asm-compat.h>
 #include <asm/ppc_asm.h>
 #include <asm/ppc_asm.h>
 #include <asm/processor.h>
 #include <asm/processor.h>
 #include <asm/page.h>
 #include <asm/page.h>
-
-#define VERIFY_READ	0
-#define VERIFY_WRITE	1
+#include <asm/extable.h>
 
 
 /*
 /*
  * The fs value determines whether argument validity checking should be
  * The fs value determines whether argument validity checking should be
@@ -63,31 +56,6 @@
 	(__chk_user_ptr(addr),			\
 	(__chk_user_ptr(addr),			\
 	 __access_ok((__force unsigned long)(addr), (size), get_fs()))
 	 __access_ok((__force unsigned long)(addr), (size), get_fs()))
 
 
-/*
- * The exception table consists of pairs of relative addresses: the first is
- * the address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue.  No registers are
- * modified, so it is entirely up to the continuation code to figure out what
- * to do.
- *
- * All the routines below use bits of fixup code that are out of line with the
- * main instruction path.  This means when everything is well, we don't even
- * have to jump over them.  Further, they do not intrude on our cache or tlb
- * entries.
- */
-
-#define ARCH_HAS_RELATIVE_EXTABLE
-
-struct exception_table_entry {
-	int insn;
-	int fixup;
-};
-
-static inline unsigned long extable_fixup(const struct exception_table_entry *x)
-{
-	return (unsigned long)&x->fixup + x->fixup;
-}
-
 /*
 /*
  * These are the main single-value transfer routines.  They automatically
  * These are the main single-value transfer routines.  They automatically
  * use the right size if we just have the right pointer type.
  * use the right size if we just have the right pointer type.
@@ -301,42 +269,19 @@ extern unsigned long __copy_tofrom_user(void __user *to,
 
 
 #ifndef __powerpc64__
 #ifndef __powerpc64__
 
 
-static inline unsigned long copy_from_user(void *to,
-		const void __user *from, unsigned long n)
-{
-	if (likely(access_ok(VERIFY_READ, from, n))) {
-		check_object_size(to, n, false);
-		return __copy_tofrom_user((__force void __user *)to, from, n);
-	}
-	memset(to, 0, n);
-	return n;
-}
-
-static inline unsigned long copy_to_user(void __user *to,
-		const void *from, unsigned long n)
-{
-	if (access_ok(VERIFY_WRITE, to, n)) {
-		check_object_size(from, n, true);
-		return __copy_tofrom_user(to, (__force void __user *)from, n);
-	}
-	return n;
-}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
 
 
 #else /* __powerpc64__ */
 #else /* __powerpc64__ */
 
 
-#define __copy_in_user(to, from, size) \
-	__copy_tofrom_user((to), (from), (size))
-
-extern unsigned long copy_from_user(void *to, const void __user *from,
-				    unsigned long n);
-extern unsigned long copy_to_user(void __user *to, const void *from,
-				  unsigned long n);
-extern unsigned long copy_in_user(void __user *to, const void __user *from,
-				  unsigned long n);
-
+static inline unsigned long
+raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
+{
+	return __copy_tofrom_user(to, from, n);
+}
 #endif /* __powerpc64__ */
 #endif /* __powerpc64__ */
 
 
-static inline unsigned long __copy_from_user_inatomic(void *to,
+static inline unsigned long raw_copy_from_user(void *to,
 		const void __user *from, unsigned long n)
 		const void __user *from, unsigned long n)
 {
 {
 	if (__builtin_constant_p(n) && (n <= 8)) {
 	if (__builtin_constant_p(n) && (n <= 8)) {
@@ -360,12 +305,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
 			return 0;
 			return 0;
 	}
 	}
 
 
-	check_object_size(to, n, false);
-
 	return __copy_tofrom_user((__force void __user *)to, from, n);
 	return __copy_tofrom_user((__force void __user *)to, from, n);
 }
 }
 
 
-static inline unsigned long __copy_to_user_inatomic(void __user *to,
+static inline unsigned long raw_copy_to_user(void __user *to,
 		const void *from, unsigned long n)
 		const void *from, unsigned long n)
 {
 {
 	if (__builtin_constant_p(n) && (n <= 8)) {
 	if (__builtin_constant_p(n) && (n <= 8)) {
@@ -389,25 +332,9 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
 			return 0;
 			return 0;
 	}
 	}
 
 
-	check_object_size(from, n, true);
-
 	return __copy_tofrom_user(to, (__force const void __user *)from, n);
 	return __copy_tofrom_user(to, (__force const void __user *)from, n);
 }
 }
 
 
-static inline unsigned long __copy_from_user(void *to,
-		const void __user *from, unsigned long size)
-{
-	might_fault();
-	return __copy_from_user_inatomic(to, from, size);
-}
-
-static inline unsigned long __copy_to_user(void __user *to,
-		const void *from, unsigned long size)
-{
-	might_fault();
-	return __copy_to_user_inatomic(to, from, size);
-}
-
 extern unsigned long __clear_user(void __user *addr, unsigned long size);
 extern unsigned long __clear_user(void __user *addr, unsigned long size);
 
 
 static inline unsigned long clear_user(void __user *addr, unsigned long size)
 static inline unsigned long clear_user(void __user *addr, unsigned long size)
@@ -422,7 +349,4 @@ extern long strncpy_from_user(char *dst, const char __user *src, long count);
 extern __must_check long strlen_user(const char __user *str);
 extern __must_check long strlen_user(const char __user *str);
 extern __must_check long strnlen_user(const char __user *str, long n);
 extern __must_check long strnlen_user(const char __user *str, long n);
 
 
-#endif  /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
-
 #endif	/* _ARCH_POWERPC_UACCESS_H */
 #endif	/* _ARCH_POWERPC_UACCESS_H */

+ 1 - 1
arch/powerpc/lib/Makefile

@@ -14,7 +14,7 @@ obj-y += string.o alloc.o crtsavres.o code-patching.o \
 
 
 obj-$(CONFIG_PPC32)	+= div64.o copy_32.o
 obj-$(CONFIG_PPC32)	+= div64.o copy_32.o
 
 
-obj64-y	+= copypage_64.o copyuser_64.o usercopy_64.o mem_64.o hweight_64.o \
+obj64-y	+= copypage_64.o copyuser_64.o mem_64.o hweight_64.o \
 	   copyuser_power7.o string_64.o copypage_power7.o memcpy_power7.o \
 	   copyuser_power7.o string_64.o copypage_power7.o memcpy_power7.o \
 	   memcpy_64.o memcmp_64.o
 	   memcpy_64.o memcmp_64.o
 
 

+ 0 - 14
arch/powerpc/lib/copy_32.S

@@ -477,18 +477,6 @@ _GLOBAL(__copy_tofrom_user)
 	bdnz	130b
 	bdnz	130b
 /* then clear out the destination: r3 bytes starting at 4(r6) */
 /* then clear out the destination: r3 bytes starting at 4(r6) */
 132:	mfctr	r3
 132:	mfctr	r3
-	srwi.	r0,r3,2
-	li	r9,0
-	mtctr	r0
-	beq	113f
-112:	stwu	r9,4(r6)
-	bdnz	112b
-113:	andi.	r0,r3,3
-	mtctr	r0
-	beq	120f
-114:	stb	r9,4(r6)
-	addi	r6,r6,1
-	bdnz	114b
 120:	blr
 120:	blr
 
 
 	EX_TABLE(30b,108b)
 	EX_TABLE(30b,108b)
@@ -497,7 +485,5 @@ _GLOBAL(__copy_tofrom_user)
 	EX_TABLE(41b,111b)
 	EX_TABLE(41b,111b)
 	EX_TABLE(130b,132b)
 	EX_TABLE(130b,132b)
 	EX_TABLE(131b,120b)
 	EX_TABLE(131b,120b)
-	EX_TABLE(112b,120b)
-	EX_TABLE(114b,120b)
 
 
 EXPORT_SYMBOL(__copy_tofrom_user)
 EXPORT_SYMBOL(__copy_tofrom_user)

部分文件因文件數量過多而無法顯示