uaccess.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451
  1. /*
  2. * Based on arch/arm/include/asm/uaccess.h
  3. *
  4. * Copyright (C) 2012 ARM Ltd.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #ifndef __ASM_UACCESS_H
  19. #define __ASM_UACCESS_H
  20. #include <asm/alternative.h>
  21. #include <asm/kernel-pgtable.h>
  22. #include <asm/sysreg.h>
  23. /*
  24. * User space memory access functions
  25. */
  26. #include <linux/bitops.h>
  27. #include <linux/kasan-checks.h>
  28. #include <linux/string.h>
  29. #include <asm/cpufeature.h>
  30. #include <asm/ptrace.h>
  31. #include <asm/memory.h>
  32. #include <asm/extable.h>
  33. #define get_ds() (KERNEL_DS)
  34. #define get_fs() (current_thread_info()->addr_limit)
  35. static inline void set_fs(mm_segment_t fs)
  36. {
  37. current_thread_info()->addr_limit = fs;
  38. /*
  39. * Prevent a mispredicted conditional call to set_fs from forwarding
  40. * the wrong address limit to access_ok under speculation.
  41. */
  42. dsb(nsh);
  43. isb();
  44. /* On user-mode return, check fs is correct */
  45. set_thread_flag(TIF_FSCHECK);
  46. /*
  47. * Enable/disable UAO so that copy_to_user() etc can access
  48. * kernel memory with the unprivileged instructions.
  49. */
  50. if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
  51. asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
  52. else
  53. asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
  54. CONFIG_ARM64_UAO));
  55. }
  56. #define segment_eq(a, b) ((a) == (b))
  57. /*
  58. * Test whether a block of memory is a valid user space address.
  59. * Returns 1 if the range is valid, 0 otherwise.
  60. *
  61. * This is equivalent to the following test:
  62. * (u65)addr + (u65)size <= (u65)current->addr_limit + 1
  63. */
  64. static inline unsigned long __range_ok(const void __user *addr, unsigned long size)
  65. {
  66. unsigned long ret, limit = current_thread_info()->addr_limit;
  67. __chk_user_ptr(addr);
  68. asm volatile(
  69. // A + B <= C + 1 for all A,B,C, in four easy steps:
  70. // 1: X = A + B; X' = X % 2^64
  71. " adds %0, %3, %2\n"
  72. // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
  73. " csel %1, xzr, %1, hi\n"
  74. // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
  75. // to compensate for the carry flag being set in step 4. For
  76. // X > 2^64, X' merely has to remain nonzero, which it does.
  77. " csinv %0, %0, xzr, cc\n"
  78. // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
  79. // comes from the carry in being clear. Otherwise, we are
  80. // testing X' - C == 0, subject to the previous adjustments.
  81. " sbcs xzr, %0, %1\n"
  82. " cset %0, ls\n"
  83. : "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc");
  84. return ret;
  85. }
  86. /*
  87. * When dealing with data aborts, watchpoints, or instruction traps we may end
  88. * up with a tagged userland pointer. Clear the tag to get a sane pointer to
  89. * pass on to access_ok(), for instance.
  90. */
  91. #define untagged_addr(addr) sign_extend64(addr, 55)
  92. #define access_ok(type, addr, size) __range_ok(addr, size)
  93. #define user_addr_max get_fs
  94. #define _ASM_EXTABLE(from, to) \
  95. " .pushsection __ex_table, \"a\"\n" \
  96. " .align 3\n" \
  97. " .long (" #from " - .), (" #to " - .)\n" \
  98. " .popsection\n"
  99. /*
  100. * User access enabling/disabling.
  101. */
  102. #ifdef CONFIG_ARM64_SW_TTBR0_PAN
  103. static inline void __uaccess_ttbr0_disable(void)
  104. {
  105. unsigned long flags, ttbr;
  106. local_irq_save(flags);
  107. ttbr = read_sysreg(ttbr1_el1);
  108. ttbr &= ~TTBR_ASID_MASK;
  109. /* reserved_ttbr0 placed before swapper_pg_dir */
  110. write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1);
  111. isb();
  112. /* Set reserved ASID */
  113. write_sysreg(ttbr, ttbr1_el1);
  114. isb();
  115. local_irq_restore(flags);
  116. }
  117. static inline void __uaccess_ttbr0_enable(void)
  118. {
  119. unsigned long flags, ttbr0, ttbr1;
  120. /*
  121. * Disable interrupts to avoid preemption between reading the 'ttbr0'
  122. * variable and the MSR. A context switch could trigger an ASID
  123. * roll-over and an update of 'ttbr0'.
  124. */
  125. local_irq_save(flags);
  126. ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
  127. /* Restore active ASID */
  128. ttbr1 = read_sysreg(ttbr1_el1);
  129. ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */
  130. ttbr1 |= ttbr0 & TTBR_ASID_MASK;
  131. write_sysreg(ttbr1, ttbr1_el1);
  132. isb();
  133. /* Restore user page table */
  134. write_sysreg(ttbr0, ttbr0_el1);
  135. isb();
  136. local_irq_restore(flags);
  137. }
  138. static inline bool uaccess_ttbr0_disable(void)
  139. {
  140. if (!system_uses_ttbr0_pan())
  141. return false;
  142. __uaccess_ttbr0_disable();
  143. return true;
  144. }
  145. static inline bool uaccess_ttbr0_enable(void)
  146. {
  147. if (!system_uses_ttbr0_pan())
  148. return false;
  149. __uaccess_ttbr0_enable();
  150. return true;
  151. }
  152. #else
  153. static inline bool uaccess_ttbr0_disable(void)
  154. {
  155. return false;
  156. }
  157. static inline bool uaccess_ttbr0_enable(void)
  158. {
  159. return false;
  160. }
  161. #endif
  162. static inline void __uaccess_disable_hw_pan(void)
  163. {
  164. asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,
  165. CONFIG_ARM64_PAN));
  166. }
  167. static inline void __uaccess_enable_hw_pan(void)
  168. {
  169. asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
  170. CONFIG_ARM64_PAN));
  171. }
  172. #define __uaccess_disable(alt) \
  173. do { \
  174. if (!uaccess_ttbr0_disable()) \
  175. asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \
  176. CONFIG_ARM64_PAN)); \
  177. } while (0)
  178. #define __uaccess_enable(alt) \
  179. do { \
  180. if (!uaccess_ttbr0_enable()) \
  181. asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \
  182. CONFIG_ARM64_PAN)); \
  183. } while (0)
  184. static inline void uaccess_disable(void)
  185. {
  186. __uaccess_disable(ARM64_HAS_PAN);
  187. }
  188. static inline void uaccess_enable(void)
  189. {
  190. __uaccess_enable(ARM64_HAS_PAN);
  191. }
  192. /*
  193. * These functions are no-ops when UAO is present.
  194. */
  195. static inline void uaccess_disable_not_uao(void)
  196. {
  197. __uaccess_disable(ARM64_ALT_PAN_NOT_UAO);
  198. }
  199. static inline void uaccess_enable_not_uao(void)
  200. {
  201. __uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
  202. }
  203. /*
  204. * Sanitise a uaccess pointer such that it becomes NULL if above the
  205. * current addr_limit.
  206. */
  207. #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
  208. static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
  209. {
  210. void __user *safe_ptr;
  211. asm volatile(
  212. " bics xzr, %1, %2\n"
  213. " csel %0, %1, xzr, eq\n"
  214. : "=&r" (safe_ptr)
  215. : "r" (ptr), "r" (current_thread_info()->addr_limit)
  216. : "cc");
  217. csdb();
  218. return safe_ptr;
  219. }
  220. /*
  221. * The "__xxx" versions of the user access functions do not verify the address
  222. * space - it must have been done previously with a separate "access_ok()"
  223. * call.
  224. *
  225. * The "__xxx_error" versions set the third argument to -EFAULT if an error
  226. * occurs, and leave it unchanged on success.
  227. */
  228. #define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
  229. asm volatile( \
  230. "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
  231. alt_instr " " reg "1, [%2]\n", feature) \
  232. "2:\n" \
  233. " .section .fixup, \"ax\"\n" \
  234. " .align 2\n" \
  235. "3: mov %w0, %3\n" \
  236. " mov %1, #0\n" \
  237. " b 2b\n" \
  238. " .previous\n" \
  239. _ASM_EXTABLE(1b, 3b) \
  240. : "+r" (err), "=&r" (x) \
  241. : "r" (addr), "i" (-EFAULT))
  242. #define __get_user_err(x, ptr, err) \
  243. do { \
  244. unsigned long __gu_val; \
  245. __chk_user_ptr(ptr); \
  246. uaccess_enable_not_uao(); \
  247. switch (sizeof(*(ptr))) { \
  248. case 1: \
  249. __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \
  250. (err), ARM64_HAS_UAO); \
  251. break; \
  252. case 2: \
  253. __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \
  254. (err), ARM64_HAS_UAO); \
  255. break; \
  256. case 4: \
  257. __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \
  258. (err), ARM64_HAS_UAO); \
  259. break; \
  260. case 8: \
  261. __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \
  262. (err), ARM64_HAS_UAO); \
  263. break; \
  264. default: \
  265. BUILD_BUG(); \
  266. } \
  267. uaccess_disable_not_uao(); \
  268. (x) = (__force __typeof__(*(ptr)))__gu_val; \
  269. } while (0)
  270. #define __get_user_check(x, ptr, err) \
  271. ({ \
  272. __typeof__(*(ptr)) __user *__p = (ptr); \
  273. might_fault(); \
  274. if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \
  275. __p = uaccess_mask_ptr(__p); \
  276. __get_user_err((x), __p, (err)); \
  277. } else { \
  278. (x) = 0; (err) = -EFAULT; \
  279. } \
  280. })
  281. #define __get_user_error(x, ptr, err) \
  282. ({ \
  283. __get_user_check((x), (ptr), (err)); \
  284. (void)0; \
  285. })
  286. #define __get_user(x, ptr) \
  287. ({ \
  288. int __gu_err = 0; \
  289. __get_user_check((x), (ptr), __gu_err); \
  290. __gu_err; \
  291. })
  292. #define get_user __get_user
  293. #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
  294. asm volatile( \
  295. "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
  296. alt_instr " " reg "1, [%2]\n", feature) \
  297. "2:\n" \
  298. " .section .fixup,\"ax\"\n" \
  299. " .align 2\n" \
  300. "3: mov %w0, %3\n" \
  301. " b 2b\n" \
  302. " .previous\n" \
  303. _ASM_EXTABLE(1b, 3b) \
  304. : "+r" (err) \
  305. : "r" (x), "r" (addr), "i" (-EFAULT))
  306. #define __put_user_err(x, ptr, err) \
  307. do { \
  308. __typeof__(*(ptr)) __pu_val = (x); \
  309. __chk_user_ptr(ptr); \
  310. uaccess_enable_not_uao(); \
  311. switch (sizeof(*(ptr))) { \
  312. case 1: \
  313. __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \
  314. (err), ARM64_HAS_UAO); \
  315. break; \
  316. case 2: \
  317. __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \
  318. (err), ARM64_HAS_UAO); \
  319. break; \
  320. case 4: \
  321. __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \
  322. (err), ARM64_HAS_UAO); \
  323. break; \
  324. case 8: \
  325. __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \
  326. (err), ARM64_HAS_UAO); \
  327. break; \
  328. default: \
  329. BUILD_BUG(); \
  330. } \
  331. uaccess_disable_not_uao(); \
  332. } while (0)
  333. #define __put_user_check(x, ptr, err) \
  334. ({ \
  335. __typeof__(*(ptr)) __user *__p = (ptr); \
  336. might_fault(); \
  337. if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \
  338. __p = uaccess_mask_ptr(__p); \
  339. __put_user_err((x), __p, (err)); \
  340. } else { \
  341. (err) = -EFAULT; \
  342. } \
  343. })
  344. #define __put_user_error(x, ptr, err) \
  345. ({ \
  346. __put_user_check((x), (ptr), (err)); \
  347. (void)0; \
  348. })
  349. #define __put_user(x, ptr) \
  350. ({ \
  351. int __pu_err = 0; \
  352. __put_user_check((x), (ptr), __pu_err); \
  353. __pu_err; \
  354. })
  355. #define put_user __put_user
  356. extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
  357. #define raw_copy_from_user(to, from, n) \
  358. ({ \
  359. __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \
  360. })
  361. extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
  362. #define raw_copy_to_user(to, from, n) \
  363. ({ \
  364. __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \
  365. })
  366. extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
  367. #define raw_copy_in_user(to, from, n) \
  368. ({ \
  369. __arch_copy_in_user(__uaccess_mask_ptr(to), \
  370. __uaccess_mask_ptr(from), (n)); \
  371. })
  372. #define INLINE_COPY_TO_USER
  373. #define INLINE_COPY_FROM_USER
  374. extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
  375. static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
  376. {
  377. if (access_ok(VERIFY_WRITE, to, n))
  378. n = __arch_clear_user(__uaccess_mask_ptr(to), n);
  379. return n;
  380. }
  381. #define clear_user __clear_user
  382. extern long strncpy_from_user(char *dest, const char __user *src, long count);
  383. extern __must_check long strnlen_user(const char __user *str, long n);
  384. #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
  385. struct page;
  386. void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len);
  387. extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
  388. static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
  389. {
  390. kasan_check_write(dst, size);
  391. return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
  392. }
  393. #endif
  394. #endif /* __ASM_UACCESS_H */