|
@@ -35,16 +35,20 @@
|
|
|
#include <asm/compiler.h>
|
|
|
#include <asm/extable.h>
|
|
|
|
|
|
-#define KERNEL_DS (-1UL)
|
|
|
#define get_ds() (KERNEL_DS)
|
|
|
-
|
|
|
-#define USER_DS TASK_SIZE_64
|
|
|
#define get_fs() (current_thread_info()->addr_limit)
|
|
|
|
|
|
static inline void set_fs(mm_segment_t fs)
|
|
|
{
|
|
|
current_thread_info()->addr_limit = fs;
|
|
|
|
|
|
+ /*
|
|
|
+ * Prevent a mispredicted conditional call to set_fs from forwarding
|
|
|
+ * the wrong address limit to access_ok under speculation.
|
|
|
+ */
|
|
|
+ dsb(nsh);
|
|
|
+ isb();
|
|
|
+
|
|
|
/* On user-mode return, check fs is correct */
|
|
|
set_thread_flag(TIF_FSCHECK);
|
|
|
|
|
@@ -66,22 +70,32 @@ static inline void set_fs(mm_segment_t fs)
|
|
|
* Returns 1 if the range is valid, 0 otherwise.
|
|
|
*
|
|
|
* This is equivalent to the following test:
|
|
|
- * (u65)addr + (u65)size <= current->addr_limit
|
|
|
- *
|
|
|
- * This needs 65-bit arithmetic.
|
|
|
+ * (u65)addr + (u65)size <= (u65)current->addr_limit + 1
|
|
|
*/
|
|
|
-#define __range_ok(addr, size) \
|
|
|
-({ \
|
|
|
- unsigned long __addr = (unsigned long)(addr); \
|
|
|
- unsigned long flag, roksum; \
|
|
|
- __chk_user_ptr(addr); \
|
|
|
- asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
|
|
|
- : "=&r" (flag), "=&r" (roksum) \
|
|
|
- : "1" (__addr), "Ir" (size), \
|
|
|
- "r" (current_thread_info()->addr_limit) \
|
|
|
- : "cc"); \
|
|
|
- flag; \
|
|
|
-})
|
|
|
+static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
|
|
|
+{
|
|
|
+ unsigned long limit = current_thread_info()->addr_limit;
|
|
|
+
|
|
|
+ __chk_user_ptr(addr);
|
|
|
+ asm volatile(
|
|
|
+ // A + B <= C + 1 for all A,B,C, in four easy steps:
|
|
|
+ // 1: X = A + B; X' = X % 2^64
|
|
|
+ " adds %0, %0, %2\n"
|
|
|
+ // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
|
|
|
+ " csel %1, xzr, %1, hi\n"
|
|
|
+ // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
|
|
|
+ // to compensate for the carry flag being set in step 4. For
|
|
|
+ // X > 2^64, X' merely has to remain nonzero, which it does.
|
|
|
+ " csinv %0, %0, xzr, cc\n"
|
|
|
+ // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
|
|
|
+ // comes from the carry in being clear. Otherwise, we are
|
|
|
+ // testing X' - C == 0, subject to the previous adjustments.
|
|
|
+ " sbcs xzr, %0, %1\n"
|
|
|
+ " cset %0, ls\n"
|
|
|
+ : "+r" (addr), "+r" (limit) : "Ir" (size) : "cc");
|
|
|
+
|
|
|
+ return addr;
|
|
|
+}
|
|
|
|
|
|
/*
|
|
|
* When dealing with data aborts, watchpoints, or instruction traps we may end
|
|
@@ -90,7 +104,7 @@ static inline void set_fs(mm_segment_t fs)
|
|
|
*/
|
|
|
#define untagged_addr(addr) sign_extend64(addr, 55)
|
|
|
|
|
|
-#define access_ok(type, addr, size) __range_ok(addr, size)
|
|
|
+#define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size)
|
|
|
#define user_addr_max get_fs
|
|
|
|
|
|
#define _ASM_EXTABLE(from, to) \
|
|
@@ -220,6 +234,26 @@ static inline void uaccess_enable_not_uao(void)
|
|
|
__uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Sanitise a uaccess pointer such that it becomes NULL if above the
|
|
|
+ * current addr_limit.
|
|
|
+ */
|
|
|
+#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
|
|
|
+static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
|
|
|
+{
|
|
|
+ void __user *safe_ptr;
|
|
|
+
|
|
|
+ asm volatile(
|
|
|
+ " bics xzr, %1, %2\n"
|
|
|
+ " csel %0, %1, xzr, eq\n"
|
|
|
+ : "=&r" (safe_ptr)
|
|
|
+ : "r" (ptr), "r" (current_thread_info()->addr_limit)
|
|
|
+ : "cc");
|
|
|
+
|
|
|
+ csdb();
|
|
|
+ return safe_ptr;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* The "__xxx" versions of the user access functions do not verify the address
|
|
|
* space - it must have been done previously with a separate "access_ok()"
|
|
@@ -272,28 +306,33 @@ do { \
|
|
|
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
|
|
} while (0)
|
|
|
|
|
|
-#define __get_user(x, ptr) \
|
|
|
+#define __get_user_check(x, ptr, err) \
|
|
|
({ \
|
|
|
- int __gu_err = 0; \
|
|
|
- __get_user_err((x), (ptr), __gu_err); \
|
|
|
- __gu_err; \
|
|
|
+ __typeof__(*(ptr)) __user *__p = (ptr); \
|
|
|
+ might_fault(); \
|
|
|
+ if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \
|
|
|
+ __p = uaccess_mask_ptr(__p); \
|
|
|
+ __get_user_err((x), __p, (err)); \
|
|
|
+ } else { \
|
|
|
+ (x) = 0; (err) = -EFAULT; \
|
|
|
+ } \
|
|
|
})
|
|
|
|
|
|
#define __get_user_error(x, ptr, err) \
|
|
|
({ \
|
|
|
- __get_user_err((x), (ptr), (err)); \
|
|
|
+ __get_user_check((x), (ptr), (err)); \
|
|
|
(void)0; \
|
|
|
})
|
|
|
|
|
|
-#define get_user(x, ptr) \
|
|
|
+#define __get_user(x, ptr) \
|
|
|
({ \
|
|
|
- __typeof__(*(ptr)) __user *__p = (ptr); \
|
|
|
- might_fault(); \
|
|
|
- access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \
|
|
|
- __get_user((x), __p) : \
|
|
|
- ((x) = 0, -EFAULT); \
|
|
|
+ int __gu_err = 0; \
|
|
|
+ __get_user_check((x), (ptr), __gu_err); \
|
|
|
+ __gu_err; \
|
|
|
})
|
|
|
|
|
|
+#define get_user __get_user
|
|
|
+
|
|
|
#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
|
|
|
asm volatile( \
|
|
|
"1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
|
|
@@ -336,43 +375,63 @@ do { \
|
|
|
uaccess_disable_not_uao(); \
|
|
|
} while (0)
|
|
|
|
|
|
-#define __put_user(x, ptr) \
|
|
|
+#define __put_user_check(x, ptr, err) \
|
|
|
({ \
|
|
|
- int __pu_err = 0; \
|
|
|
- __put_user_err((x), (ptr), __pu_err); \
|
|
|
- __pu_err; \
|
|
|
+ __typeof__(*(ptr)) __user *__p = (ptr); \
|
|
|
+ might_fault(); \
|
|
|
+ if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \
|
|
|
+ __p = uaccess_mask_ptr(__p); \
|
|
|
+ __put_user_err((x), __p, (err)); \
|
|
|
+ } else { \
|
|
|
+ (err) = -EFAULT; \
|
|
|
+ } \
|
|
|
})
|
|
|
|
|
|
#define __put_user_error(x, ptr, err) \
|
|
|
({ \
|
|
|
- __put_user_err((x), (ptr), (err)); \
|
|
|
+ __put_user_check((x), (ptr), (err)); \
|
|
|
(void)0; \
|
|
|
})
|
|
|
|
|
|
-#define put_user(x, ptr) \
|
|
|
+#define __put_user(x, ptr) \
|
|
|
({ \
|
|
|
- __typeof__(*(ptr)) __user *__p = (ptr); \
|
|
|
- might_fault(); \
|
|
|
- access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \
|
|
|
- __put_user((x), __p) : \
|
|
|
- -EFAULT; \
|
|
|
+ int __pu_err = 0; \
|
|
|
+ __put_user_check((x), (ptr), __pu_err); \
|
|
|
+ __pu_err; \
|
|
|
})
|
|
|
|
|
|
+#define put_user __put_user
|
|
|
+
|
|
|
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
|
|
|
-#define raw_copy_from_user __arch_copy_from_user
|
|
|
+#define raw_copy_from_user(to, from, n) \
|
|
|
+({ \
|
|
|
+ __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \
|
|
|
+})
|
|
|
+
|
|
|
extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
|
|
|
-#define raw_copy_to_user __arch_copy_to_user
|
|
|
-extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
|
|
|
-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
|
|
|
+#define raw_copy_to_user(to, from, n) \
|
|
|
+({ \
|
|
|
+ __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \
|
|
|
+})
|
|
|
+
|
|
|
+extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
|
|
|
+#define raw_copy_in_user(to, from, n) \
|
|
|
+({ \
|
|
|
+ __arch_copy_in_user(__uaccess_mask_ptr(to), \
|
|
|
+ __uaccess_mask_ptr(from), (n)); \
|
|
|
+})
|
|
|
+
|
|
|
#define INLINE_COPY_TO_USER
|
|
|
#define INLINE_COPY_FROM_USER
|
|
|
|
|
|
-static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
|
|
|
+extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
|
|
|
+static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
|
|
|
{
|
|
|
if (access_ok(VERIFY_WRITE, to, n))
|
|
|
- n = __clear_user(to, n);
|
|
|
+ n = __arch_clear_user(__uaccess_mask_ptr(to), n);
|
|
|
return n;
|
|
|
}
|
|
|
+#define clear_user __clear_user
|
|
|
|
|
|
extern long strncpy_from_user(char *dest, const char __user *src, long count);
|
|
|
|
|
@@ -386,7 +445,7 @@ extern unsigned long __must_check __copy_user_flushcache(void *to, const void __
|
|
|
static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
|
|
|
{
|
|
|
kasan_check_write(dst, size);
|
|
|
- return __copy_user_flushcache(dst, src, size);
|
|
|
+ return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
|
|
|
}
|
|
|
#endif
|
|
|
|