|
@@ -697,43 +697,14 @@ unsigned long __must_check _copy_from_user(void *to, const void __user *from,
|
|
|
unsigned long __must_check _copy_to_user(void __user *to, const void *from,
|
|
|
unsigned n);
|
|
|
|
|
|
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
|
|
|
-# define copy_user_diag __compiletime_error
|
|
|
-#else
|
|
|
-# define copy_user_diag __compiletime_warning
|
|
|
-#endif
|
|
|
-
|
|
|
-extern void copy_user_diag("copy_from_user() buffer size is too small")
|
|
|
-copy_from_user_overflow(void);
|
|
|
-extern void copy_user_diag("copy_to_user() buffer size is too small")
|
|
|
-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
|
|
|
-
|
|
|
-#undef copy_user_diag
|
|
|
-
|
|
|
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
|
|
|
-
|
|
|
-extern void
|
|
|
-__compiletime_warning("copy_from_user() buffer size is not provably correct")
|
|
|
-__copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
|
|
|
-#define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
|
|
|
-
|
|
|
-extern void
|
|
|
-__compiletime_warning("copy_to_user() buffer size is not provably correct")
|
|
|
-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
|
|
|
-#define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
|
|
|
-
|
|
|
-#else
|
|
|
+extern void __compiletime_error("usercopy buffer size is too small")
|
|
|
+__bad_copy_user(void);
|
|
|
|
|
|
-static inline void
|
|
|
-__copy_from_user_overflow(int size, unsigned long count)
|
|
|
+static inline void copy_user_overflow(int size, unsigned long count)
|
|
|
{
|
|
|
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
|
|
|
}
|
|
|
|
|
|
-#define __copy_to_user_overflow __copy_from_user_overflow
|
|
|
-
|
|
|
-#endif
|
|
|
-
|
|
|
static inline unsigned long __must_check
|
|
|
copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
|
{
|
|
@@ -743,31 +714,13 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
|
|
|
|
kasan_check_write(to, n);
|
|
|
|
|
|
- /*
|
|
|
- * While we would like to have the compiler do the checking for us
|
|
|
- * even in the non-constant size case, any false positives there are
|
|
|
- * a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even
|
|
|
- * without - the [hopefully] dangerous looking nature of the warning
|
|
|
- * would make people go look at the respecitive call sites over and
|
|
|
- * over again just to find that there's no problem).
|
|
|
- *
|
|
|
- * And there are cases where it's just not realistic for the compiler
|
|
|
- * to prove the count to be in range. For example when multiple call
|
|
|
- * sites of a helper function - perhaps in different source files -
|
|
|
- * all doing proper range checking, yet the helper function not doing
|
|
|
- * so again.
|
|
|
- *
|
|
|
- * Therefore limit the compile time checking to the constant size
|
|
|
- * case, and do only runtime checking for non-constant sizes.
|
|
|
- */
|
|
|
-
|
|
|
if (likely(sz < 0 || sz >= n)) {
|
|
|
check_object_size(to, n, false);
|
|
|
n = _copy_from_user(to, from, n);
|
|
|
- } else if (__builtin_constant_p(n))
|
|
|
- copy_from_user_overflow();
|
|
|
+ } else if (!__builtin_constant_p(n))
|
|
|
+ copy_user_overflow(sz, n);
|
|
|
else
|
|
|
- __copy_from_user_overflow(sz, n);
|
|
|
+ __bad_copy_user();
|
|
|
|
|
|
return n;
|
|
|
}
|
|
@@ -781,21 +734,17 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
|
|
|
|
might_fault();
|
|
|
|
|
|
- /* See the comment in copy_from_user() above. */
|
|
|
if (likely(sz < 0 || sz >= n)) {
|
|
|
check_object_size(from, n, true);
|
|
|
n = _copy_to_user(to, from, n);
|
|
|
- } else if (__builtin_constant_p(n))
|
|
|
- copy_to_user_overflow();
|
|
|
+ } else if (!__builtin_constant_p(n))
|
|
|
+ copy_user_overflow(sz, n);
|
|
|
else
|
|
|
- __copy_to_user_overflow(sz, n);
|
|
|
+ __bad_copy_user();
|
|
|
|
|
|
return n;
|
|
|
}
|
|
|
|
|
|
-#undef __copy_from_user_overflow
|
|
|
-#undef __copy_to_user_overflow
|
|
|
-
|
|
|
/*
|
|
|
* We rely on the nested NMI work to allow atomic faults from the NMI path; the
|
|
|
* nested NMI paths are careful to preserve CR2.
|