uaccess.h 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
  7. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8. * Copyright (C) 2007 Maciej W. Rozycki
  9. */
  10. #ifndef _ASM_UACCESS_H
  11. #define _ASM_UACCESS_H
  12. #include <linux/kernel.h>
  13. #include <linux/errno.h>
  14. #include <linux/thread_info.h>
  15. /*
  16. * The fs value determines whether argument validity checking should be
  17. * performed or not. If get_fs() == USER_DS, checking is performed, with
  18. * get_fs() == KERNEL_DS, checking is bypassed.
  19. *
  20. * For historical reasons, these macros are grossly misnamed.
  21. */
  22. #ifdef CONFIG_32BIT
  23. #ifdef CONFIG_KVM_GUEST
  24. #define __UA_LIMIT 0x40000000UL
  25. #else
  26. #define __UA_LIMIT 0x80000000UL
  27. #endif
  28. #define __UA_ADDR ".word"
  29. #define __UA_LA "la"
  30. #define __UA_ADDU "addu"
  31. #define __UA_t0 "$8"
  32. #define __UA_t1 "$9"
  33. #endif /* CONFIG_32BIT */
  34. #ifdef CONFIG_64BIT
  35. extern u64 __ua_limit;
  36. #define __UA_LIMIT __ua_limit
  37. #define __UA_ADDR ".dword"
  38. #define __UA_LA "dla"
  39. #define __UA_ADDU "daddu"
  40. #define __UA_t0 "$12"
  41. #define __UA_t1 "$13"
  42. #endif /* CONFIG_64BIT */
  43. /*
  44. * USER_DS is a bitmask that has the bits set that may not be set in a valid
  45. * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but
  46. * the arithmetic we're doing only works if the limit is a power of two, so
  47. * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid
  48. * address in this range it's the process's problem, not ours :-)
  49. */
  50. #ifdef CONFIG_KVM_GUEST
  51. #define KERNEL_DS ((mm_segment_t) { 0x80000000UL })
  52. #define USER_DS ((mm_segment_t) { 0xC0000000UL })
  53. #else
  54. #define KERNEL_DS ((mm_segment_t) { 0UL })
  55. #define USER_DS ((mm_segment_t) { __UA_LIMIT })
  56. #endif
  57. #define VERIFY_READ 0
  58. #define VERIFY_WRITE 1
  59. #define get_ds() (KERNEL_DS)
  60. #define get_fs() (current_thread_info()->addr_limit)
  61. #define set_fs(x) (current_thread_info()->addr_limit = (x))
  62. #define segment_eq(a, b) ((a).seg == (b).seg)
  63. /*
  64. * Is a address valid? This does a straighforward calculation rather
  65. * than tests.
  66. *
  67. * Address valid if:
  68. * - "addr" doesn't have any high-bits set
  69. * - AND "size" doesn't have any high-bits set
  70. * - AND "addr+size" doesn't have any high-bits set
  71. * - OR we are in kernel mode.
  72. *
  73. * __ua_size() is a trick to avoid runtime checking of positive constant
  74. * sizes; for those we already know at compile time that the size is ok.
  75. */
  76. #define __ua_size(size) \
  77. ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
  78. /*
  79. * access_ok: - Checks if a user space pointer is valid
  80. * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
  81. * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
  82. * to write to a block, it is always safe to read from it.
  83. * @addr: User space pointer to start of block to check
  84. * @size: Size of block to check
  85. *
  86. * Context: User context only. This function may sleep.
  87. *
  88. * Checks if a pointer to a block of memory in user space is valid.
  89. *
  90. * Returns true (nonzero) if the memory block may be valid, false (zero)
  91. * if it is definitely invalid.
  92. *
  93. * Note that, depending on architecture, this function probably just
  94. * checks that the pointer is in the user space range - after calling
  95. * this function, memory access functions may still return -EFAULT.
  96. */
  97. #define __access_mask get_fs().seg
  98. #define __access_ok(addr, size, mask) \
  99. ({ \
  100. unsigned long __addr = (unsigned long) (addr); \
  101. unsigned long __size = size; \
  102. unsigned long __mask = mask; \
  103. unsigned long __ok; \
  104. \
  105. __chk_user_ptr(addr); \
  106. __ok = (signed long)(__mask & (__addr | (__addr + __size) | \
  107. __ua_size(__size))); \
  108. __ok == 0; \
  109. })
  110. #define access_ok(type, addr, size) \
  111. likely(__access_ok((addr), (size), __access_mask))
  112. /*
  113. * put_user: - Write a simple value into user space.
  114. * @x: Value to copy to user space.
  115. * @ptr: Destination address, in user space.
  116. *
  117. * Context: User context only. This function may sleep.
  118. *
  119. * This macro copies a single simple value from kernel space to user
  120. * space. It supports simple types like char and int, but not larger
  121. * data types like structures or arrays.
  122. *
  123. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  124. * to the result of dereferencing @ptr.
  125. *
  126. * Returns zero on success, or -EFAULT on error.
  127. */
  128. #define put_user(x,ptr) \
  129. __put_user_check((x), (ptr), sizeof(*(ptr)))
  130. /*
  131. * get_user: - Get a simple variable from user space.
  132. * @x: Variable to store result.
  133. * @ptr: Source address, in user space.
  134. *
  135. * Context: User context only. This function may sleep.
  136. *
  137. * This macro copies a single simple variable from user space to kernel
  138. * space. It supports simple types like char and int, but not larger
  139. * data types like structures or arrays.
  140. *
  141. * @ptr must have pointer-to-simple-variable type, and the result of
  142. * dereferencing @ptr must be assignable to @x without a cast.
  143. *
  144. * Returns zero on success, or -EFAULT on error.
  145. * On error, the variable @x is set to zero.
  146. */
  147. #define get_user(x,ptr) \
  148. __get_user_check((x), (ptr), sizeof(*(ptr)))
  149. /*
  150. * __put_user: - Write a simple value into user space, with less checking.
  151. * @x: Value to copy to user space.
  152. * @ptr: Destination address, in user space.
  153. *
  154. * Context: User context only. This function may sleep.
  155. *
  156. * This macro copies a single simple value from kernel space to user
  157. * space. It supports simple types like char and int, but not larger
  158. * data types like structures or arrays.
  159. *
  160. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  161. * to the result of dereferencing @ptr.
  162. *
  163. * Caller must check the pointer with access_ok() before calling this
  164. * function.
  165. *
  166. * Returns zero on success, or -EFAULT on error.
  167. */
  168. #define __put_user(x,ptr) \
  169. __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
  170. /*
  171. * __get_user: - Get a simple variable from user space, with less checking.
  172. * @x: Variable to store result.
  173. * @ptr: Source address, in user space.
  174. *
  175. * Context: User context only. This function may sleep.
  176. *
  177. * This macro copies a single simple variable from user space to kernel
  178. * space. It supports simple types like char and int, but not larger
  179. * data types like structures or arrays.
  180. *
  181. * @ptr must have pointer-to-simple-variable type, and the result of
  182. * dereferencing @ptr must be assignable to @x without a cast.
  183. *
  184. * Caller must check the pointer with access_ok() before calling this
  185. * function.
  186. *
  187. * Returns zero on success, or -EFAULT on error.
  188. * On error, the variable @x is set to zero.
  189. */
  190. #define __get_user(x,ptr) \
  191. __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
  192. struct __large_struct { unsigned long buf[100]; };
  193. #define __m(x) (*(struct __large_struct __user *)(x))
  194. /*
  195. * Yuck. We need two variants, one for 64bit operation and one
  196. * for 32 bit mode and old iron.
  197. */
  198. #ifdef CONFIG_32BIT
  199. #define __GET_USER_DW(val, insn, ptr) __get_user_asm_ll32(val, insn, ptr)
  200. #endif
  201. #ifdef CONFIG_64BIT
  202. #define __GET_USER_DW(val, insn, ptr) __get_user_asm(val, "ld", ptr)
  203. #endif
  204. extern void __get_user_unknown(void);
  205. #define __get_user_common(val, size, ptr) \
  206. do { \
  207. switch (size) { \
  208. case 1: __get_user_asm(val, "lb", ptr); break; \
  209. case 2: __get_user_asm(val, "lh", ptr); break; \
  210. case 4: __get_user_asm(val, "lw", ptr); break; \
  211. case 8: __GET_USER_DW(val, "lw", ptr); break; \
  212. default: __get_user_unknown(); break; \
  213. } \
  214. } while (0)
  215. #define __get_user_nocheck(x, ptr, size) \
  216. ({ \
  217. int __gu_err; \
  218. \
  219. __chk_user_ptr(ptr); \
  220. __get_user_common((x), size, ptr); \
  221. __gu_err; \
  222. })
  223. #define __get_user_check(x, ptr, size) \
  224. ({ \
  225. int __gu_err = -EFAULT; \
  226. const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
  227. \
  228. might_fault(); \
  229. if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
  230. __get_user_common((x), size, __gu_ptr); \
  231. \
  232. __gu_err; \
  233. })
  234. #define __get_user_asm(val, insn, addr) \
  235. { \
  236. long __gu_tmp; \
  237. \
  238. __asm__ __volatile__( \
  239. "1: " insn " %1, %3 \n" \
  240. "2: \n" \
  241. " .insn \n" \
  242. " .section .fixup,\"ax\" \n" \
  243. "3: li %0, %4 \n" \
  244. " j 2b \n" \
  245. " .previous \n" \
  246. " .section __ex_table,\"a\" \n" \
  247. " "__UA_ADDR "\t1b, 3b \n" \
  248. " .previous \n" \
  249. : "=r" (__gu_err), "=r" (__gu_tmp) \
  250. : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
  251. \
  252. (val) = (__typeof__(*(addr))) __gu_tmp; \
  253. }
  254. /*
  255. * Get a long long 64 using 32 bit registers.
  256. */
  257. #define __get_user_asm_ll32(val, insn, addr) \
  258. { \
  259. union { \
  260. unsigned long long l; \
  261. __typeof__(*(addr)) t; \
  262. } __gu_tmp; \
  263. \
  264. __asm__ __volatile__( \
  265. "1: " insn " %1, (%3) \n" \
  266. "2: " insn " %D1, 4(%3) \n" \
  267. "3: \n" \
  268. " .insn \n" \
  269. " .section .fixup,\"ax\" \n" \
  270. "4: li %0, %4 \n" \
  271. " move %1, $0 \n" \
  272. " move %D1, $0 \n" \
  273. " j 3b \n" \
  274. " .previous \n" \
  275. " .section __ex_table,\"a\" \n" \
  276. " " __UA_ADDR " 1b, 4b \n" \
  277. " " __UA_ADDR " 2b, 4b \n" \
  278. " .previous \n" \
  279. : "=r" (__gu_err), "=&r" (__gu_tmp.l) \
  280. : "0" (0), "r" (addr), "i" (-EFAULT)); \
  281. \
  282. (val) = __gu_tmp.t; \
  283. }
  284. /*
  285. * Yuck. We need two variants, one for 64bit operation and one
  286. * for 32 bit mode and old iron.
  287. */
  288. #ifdef CONFIG_32BIT
  289. #define __PUT_USER_DW(insn, ptr) __put_user_asm_ll32(insn, ptr)
  290. #endif
  291. #ifdef CONFIG_64BIT
  292. #define __PUT_USER_DW(insn, ptr) __put_user_asm("sd", ptr)
  293. #endif
  294. #define __put_user_common(ptr, size) \
  295. do { \
  296. switch (size) { \
  297. case 1: __put_user_asm("sb", ptr); break; \
  298. case 2: __put_user_asm("sh", ptr); break; \
  299. case 4: __put_user_asm("sw", ptr); break; \
  300. case 8: __PUT_USER_DW("sw", ptr); break; \
  301. default: __put_user_unknown(); break; \
  302. } \
  303. } while (0)
  304. #define __put_user_nocheck(x, ptr, size) \
  305. ({ \
  306. __typeof__(*(ptr)) __pu_val; \
  307. int __pu_err = 0; \
  308. \
  309. __chk_user_ptr(ptr); \
  310. __pu_val = (x); \
  311. __put_user_common(ptr, size); \
  312. __pu_err; \
  313. })
  314. #define __put_user_check(x, ptr, size) \
  315. ({ \
  316. __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
  317. __typeof__(*(ptr)) __pu_val = (x); \
  318. int __pu_err = -EFAULT; \
  319. \
  320. might_fault(); \
  321. if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \
  322. __put_user_common(__pu_addr, size); \
  323. \
  324. __pu_err; \
  325. })
  326. #define __put_user_asm(insn, ptr) \
  327. { \
  328. __asm__ __volatile__( \
  329. "1: " insn " %z2, %3 # __put_user_asm\n" \
  330. "2: \n" \
  331. " .insn \n" \
  332. " .section .fixup,\"ax\" \n" \
  333. "3: li %0, %4 \n" \
  334. " j 2b \n" \
  335. " .previous \n" \
  336. " .section __ex_table,\"a\" \n" \
  337. " " __UA_ADDR " 1b, 3b \n" \
  338. " .previous \n" \
  339. : "=r" (__pu_err) \
  340. : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
  341. "i" (-EFAULT)); \
  342. }
  343. #define __put_user_asm_ll32(insn, ptr) \
  344. { \
  345. __asm__ __volatile__( \
  346. "1: " insn " %2, (%3)# __put_user_asm_ll32 \n" \
  347. "2: " insn " %D2, 4(%3) \n" \
  348. "3: \n" \
  349. " .insn \n" \
  350. " .section .fixup,\"ax\" \n" \
  351. "4: li %0, %4 \n" \
  352. " j 3b \n" \
  353. " .previous \n" \
  354. " .section __ex_table,\"a\" \n" \
  355. " " __UA_ADDR " 1b, 4b \n" \
  356. " " __UA_ADDR " 2b, 4b \n" \
  357. " .previous" \
  358. : "=r" (__pu_err) \
  359. : "0" (0), "r" (__pu_val), "r" (ptr), \
  360. "i" (-EFAULT)); \
  361. }
  362. extern void __put_user_unknown(void);
  363. /*
  364. * put_user_unaligned: - Write a simple value into user space.
  365. * @x: Value to copy to user space.
  366. * @ptr: Destination address, in user space.
  367. *
  368. * Context: User context only. This function may sleep.
  369. *
  370. * This macro copies a single simple value from kernel space to user
  371. * space. It supports simple types like char and int, but not larger
  372. * data types like structures or arrays.
  373. *
  374. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  375. * to the result of dereferencing @ptr.
  376. *
  377. * Returns zero on success, or -EFAULT on error.
  378. */
  379. #define put_user_unaligned(x,ptr) \
  380. __put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
  381. /*
  382. * get_user_unaligned: - Get a simple variable from user space.
  383. * @x: Variable to store result.
  384. * @ptr: Source address, in user space.
  385. *
  386. * Context: User context only. This function may sleep.
  387. *
  388. * This macro copies a single simple variable from user space to kernel
  389. * space. It supports simple types like char and int, but not larger
  390. * data types like structures or arrays.
  391. *
  392. * @ptr must have pointer-to-simple-variable type, and the result of
  393. * dereferencing @ptr must be assignable to @x without a cast.
  394. *
  395. * Returns zero on success, or -EFAULT on error.
  396. * On error, the variable @x is set to zero.
  397. */
  398. #define get_user_unaligned(x,ptr) \
  399. __get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
  400. /*
  401. * __put_user_unaligned: - Write a simple value into user space, with less checking.
  402. * @x: Value to copy to user space.
  403. * @ptr: Destination address, in user space.
  404. *
  405. * Context: User context only. This function may sleep.
  406. *
  407. * This macro copies a single simple value from kernel space to user
  408. * space. It supports simple types like char and int, but not larger
  409. * data types like structures or arrays.
  410. *
  411. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  412. * to the result of dereferencing @ptr.
  413. *
  414. * Caller must check the pointer with access_ok() before calling this
  415. * function.
  416. *
  417. * Returns zero on success, or -EFAULT on error.
  418. */
  419. #define __put_user_unaligned(x,ptr) \
  420. __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
  421. /*
  422. * __get_user_unaligned: - Get a simple variable from user space, with less checking.
  423. * @x: Variable to store result.
  424. * @ptr: Source address, in user space.
  425. *
  426. * Context: User context only. This function may sleep.
  427. *
  428. * This macro copies a single simple variable from user space to kernel
  429. * space. It supports simple types like char and int, but not larger
  430. * data types like structures or arrays.
  431. *
  432. * @ptr must have pointer-to-simple-variable type, and the result of
  433. * dereferencing @ptr must be assignable to @x without a cast.
  434. *
  435. * Caller must check the pointer with access_ok() before calling this
  436. * function.
  437. *
  438. * Returns zero on success, or -EFAULT on error.
  439. * On error, the variable @x is set to zero.
  440. */
  441. #define __get_user_unaligned(x,ptr) \
  442. __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
  443. /*
  444. * Yuck. We need two variants, one for 64bit operation and one
  445. * for 32 bit mode and old iron.
  446. */
  447. #ifdef CONFIG_32BIT
  448. #define __GET_USER_UNALIGNED_DW(val, ptr) \
  449. __get_user_unaligned_asm_ll32(val, ptr)
  450. #endif
  451. #ifdef CONFIG_64BIT
  452. #define __GET_USER_UNALIGNED_DW(val, ptr) \
  453. __get_user_unaligned_asm(val, "uld", ptr)
  454. #endif
  455. extern void __get_user_unaligned_unknown(void);
  456. #define __get_user_unaligned_common(val, size, ptr) \
  457. do { \
  458. switch (size) { \
  459. case 1: __get_user_asm(val, "lb", ptr); break; \
  460. case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \
  461. case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \
  462. case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \
  463. default: __get_user_unaligned_unknown(); break; \
  464. } \
  465. } while (0)
  466. #define __get_user_unaligned_nocheck(x,ptr,size) \
  467. ({ \
  468. int __gu_err; \
  469. \
  470. __get_user_unaligned_common((x), size, ptr); \
  471. __gu_err; \
  472. })
  473. #define __get_user_unaligned_check(x,ptr,size) \
  474. ({ \
  475. int __gu_err = -EFAULT; \
  476. const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
  477. \
  478. if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
  479. __get_user_unaligned_common((x), size, __gu_ptr); \
  480. \
  481. __gu_err; \
  482. })
  483. #define __get_user_unaligned_asm(val, insn, addr) \
  484. { \
  485. long __gu_tmp; \
  486. \
  487. __asm__ __volatile__( \
  488. "1: " insn " %1, %3 \n" \
  489. "2: \n" \
  490. " .insn \n" \
  491. " .section .fixup,\"ax\" \n" \
  492. "3: li %0, %4 \n" \
  493. " j 2b \n" \
  494. " .previous \n" \
  495. " .section __ex_table,\"a\" \n" \
  496. " "__UA_ADDR "\t1b, 3b \n" \
  497. " "__UA_ADDR "\t1b + 4, 3b \n" \
  498. " .previous \n" \
  499. : "=r" (__gu_err), "=r" (__gu_tmp) \
  500. : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
  501. \
  502. (val) = (__typeof__(*(addr))) __gu_tmp; \
  503. }
  504. /*
  505. * Get a long long 64 using 32 bit registers.
  506. */
  507. #define __get_user_unaligned_asm_ll32(val, addr) \
  508. { \
  509. unsigned long long __gu_tmp; \
  510. \
  511. __asm__ __volatile__( \
  512. "1: ulw %1, (%3) \n" \
  513. "2: ulw %D1, 4(%3) \n" \
  514. " move %0, $0 \n" \
  515. "3: \n" \
  516. " .insn \n" \
  517. " .section .fixup,\"ax\" \n" \
  518. "4: li %0, %4 \n" \
  519. " move %1, $0 \n" \
  520. " move %D1, $0 \n" \
  521. " j 3b \n" \
  522. " .previous \n" \
  523. " .section __ex_table,\"a\" \n" \
  524. " " __UA_ADDR " 1b, 4b \n" \
  525. " " __UA_ADDR " 1b + 4, 4b \n" \
  526. " " __UA_ADDR " 2b, 4b \n" \
  527. " " __UA_ADDR " 2b + 4, 4b \n" \
  528. " .previous \n" \
  529. : "=r" (__gu_err), "=&r" (__gu_tmp) \
  530. : "0" (0), "r" (addr), "i" (-EFAULT)); \
  531. (val) = (__typeof__(*(addr))) __gu_tmp; \
  532. }
  533. /*
  534. * Yuck. We need two variants, one for 64bit operation and one
  535. * for 32 bit mode and old iron.
  536. */
  537. #ifdef CONFIG_32BIT
  538. #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
  539. #endif
  540. #ifdef CONFIG_64BIT
  541. #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
  542. #endif
  543. #define __put_user_unaligned_common(ptr, size) \
  544. do { \
  545. switch (size) { \
  546. case 1: __put_user_asm("sb", ptr); break; \
  547. case 2: __put_user_unaligned_asm("ush", ptr); break; \
  548. case 4: __put_user_unaligned_asm("usw", ptr); break; \
  549. case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \
  550. default: __put_user_unaligned_unknown(); break; \
  551. } while (0)
  552. #define __put_user_unaligned_nocheck(x,ptr,size) \
  553. ({ \
  554. __typeof__(*(ptr)) __pu_val; \
  555. int __pu_err = 0; \
  556. \
  557. __pu_val = (x); \
  558. __put_user_unaligned_common(ptr, size); \
  559. __pu_err; \
  560. })
  561. #define __put_user_unaligned_check(x,ptr,size) \
  562. ({ \
  563. __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
  564. __typeof__(*(ptr)) __pu_val = (x); \
  565. int __pu_err = -EFAULT; \
  566. \
  567. if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \
  568. __put_user_unaligned_common(__pu_addr, size); \
  569. \
  570. __pu_err; \
  571. })
  572. #define __put_user_unaligned_asm(insn, ptr) \
  573. { \
  574. __asm__ __volatile__( \
  575. "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \
  576. "2: \n" \
  577. " .insn \n" \
  578. " .section .fixup,\"ax\" \n" \
  579. "3: li %0, %4 \n" \
  580. " j 2b \n" \
  581. " .previous \n" \
  582. " .section __ex_table,\"a\" \n" \
  583. " " __UA_ADDR " 1b, 3b \n" \
  584. " .previous \n" \
  585. : "=r" (__pu_err) \
  586. : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
  587. "i" (-EFAULT)); \
  588. }
  589. #define __put_user_unaligned_asm_ll32(ptr) \
  590. { \
  591. __asm__ __volatile__( \
  592. "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \
  593. "2: sw %D2, 4(%3) \n" \
  594. "3: \n" \
  595. " .insn \n" \
  596. " .section .fixup,\"ax\" \n" \
  597. "4: li %0, %4 \n" \
  598. " j 3b \n" \
  599. " .previous \n" \
  600. " .section __ex_table,\"a\" \n" \
  601. " " __UA_ADDR " 1b, 4b \n" \
  602. " " __UA_ADDR " 1b + 4, 4b \n" \
  603. " " __UA_ADDR " 2b, 4b \n" \
  604. " " __UA_ADDR " 2b + 4, 4b \n" \
  605. " .previous" \
  606. : "=r" (__pu_err) \
  607. : "0" (0), "r" (__pu_val), "r" (ptr), \
  608. "i" (-EFAULT)); \
  609. }
  610. extern void __put_user_unaligned_unknown(void);
  611. /*
  612. * We're generating jump to subroutines which will be outside the range of
  613. * jump instructions
  614. */
  615. #ifdef MODULE
  616. #define __MODULE_JAL(destination) \
  617. ".set\tnoat\n\t" \
  618. __UA_LA "\t$1, " #destination "\n\t" \
  619. "jalr\t$1\n\t" \
  620. ".set\tat\n\t"
  621. #else
  622. #define __MODULE_JAL(destination) \
  623. "jal\t" #destination "\n\t"
  624. #endif
  625. #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
  626. #define DADDI_SCRATCH "$0"
  627. #else
  628. #define DADDI_SCRATCH "$3"
  629. #endif
  630. extern size_t __copy_user(void *__to, const void *__from, size_t __n);
  631. #define __invoke_copy_to_user(to, from, n) \
  632. ({ \
  633. register void __user *__cu_to_r __asm__("$4"); \
  634. register const void *__cu_from_r __asm__("$5"); \
  635. register long __cu_len_r __asm__("$6"); \
  636. \
  637. __cu_to_r = (to); \
  638. __cu_from_r = (from); \
  639. __cu_len_r = (n); \
  640. __asm__ __volatile__( \
  641. __MODULE_JAL(__copy_user) \
  642. : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
  643. : \
  644. : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
  645. DADDI_SCRATCH, "memory"); \
  646. __cu_len_r; \
  647. })
  648. /*
  649. * __copy_to_user: - Copy a block of data into user space, with less checking.
  650. * @to: Destination address, in user space.
  651. * @from: Source address, in kernel space.
  652. * @n: Number of bytes to copy.
  653. *
  654. * Context: User context only. This function may sleep.
  655. *
  656. * Copy data from kernel space to user space. Caller must check
  657. * the specified block with access_ok() before calling this function.
  658. *
  659. * Returns number of bytes that could not be copied.
  660. * On success, this will be zero.
  661. */
  662. #define __copy_to_user(to, from, n) \
  663. ({ \
  664. void __user *__cu_to; \
  665. const void *__cu_from; \
  666. long __cu_len; \
  667. \
  668. __cu_to = (to); \
  669. __cu_from = (from); \
  670. __cu_len = (n); \
  671. might_fault(); \
  672. __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
  673. __cu_len; \
  674. })
  675. extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
  676. #define __copy_to_user_inatomic(to, from, n) \
  677. ({ \
  678. void __user *__cu_to; \
  679. const void *__cu_from; \
  680. long __cu_len; \
  681. \
  682. __cu_to = (to); \
  683. __cu_from = (from); \
  684. __cu_len = (n); \
  685. __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
  686. __cu_len; \
  687. })
  688. #define __copy_from_user_inatomic(to, from, n) \
  689. ({ \
  690. void *__cu_to; \
  691. const void __user *__cu_from; \
  692. long __cu_len; \
  693. \
  694. __cu_to = (to); \
  695. __cu_from = (from); \
  696. __cu_len = (n); \
  697. __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \
  698. __cu_len); \
  699. __cu_len; \
  700. })
  701. /*
  702. * copy_to_user: - Copy a block of data into user space.
  703. * @to: Destination address, in user space.
  704. * @from: Source address, in kernel space.
  705. * @n: Number of bytes to copy.
  706. *
  707. * Context: User context only. This function may sleep.
  708. *
  709. * Copy data from kernel space to user space.
  710. *
  711. * Returns number of bytes that could not be copied.
  712. * On success, this will be zero.
  713. */
  714. #define copy_to_user(to, from, n) \
  715. ({ \
  716. void __user *__cu_to; \
  717. const void *__cu_from; \
  718. long __cu_len; \
  719. \
  720. __cu_to = (to); \
  721. __cu_from = (from); \
  722. __cu_len = (n); \
  723. if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
  724. might_fault(); \
  725. __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
  726. __cu_len); \
  727. } \
  728. __cu_len; \
  729. })
  730. #define __invoke_copy_from_user(to, from, n) \
  731. ({ \
  732. register void *__cu_to_r __asm__("$4"); \
  733. register const void __user *__cu_from_r __asm__("$5"); \
  734. register long __cu_len_r __asm__("$6"); \
  735. \
  736. __cu_to_r = (to); \
  737. __cu_from_r = (from); \
  738. __cu_len_r = (n); \
  739. __asm__ __volatile__( \
  740. ".set\tnoreorder\n\t" \
  741. __MODULE_JAL(__copy_user) \
  742. ".set\tnoat\n\t" \
  743. __UA_ADDU "\t$1, %1, %2\n\t" \
  744. ".set\tat\n\t" \
  745. ".set\treorder" \
  746. : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
  747. : \
  748. : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
  749. DADDI_SCRATCH, "memory"); \
  750. __cu_len_r; \
  751. })
  752. #define __invoke_copy_from_user_inatomic(to, from, n) \
  753. ({ \
  754. register void *__cu_to_r __asm__("$4"); \
  755. register const void __user *__cu_from_r __asm__("$5"); \
  756. register long __cu_len_r __asm__("$6"); \
  757. \
  758. __cu_to_r = (to); \
  759. __cu_from_r = (from); \
  760. __cu_len_r = (n); \
  761. __asm__ __volatile__( \
  762. ".set\tnoreorder\n\t" \
  763. __MODULE_JAL(__copy_user_inatomic) \
  764. ".set\tnoat\n\t" \
  765. __UA_ADDU "\t$1, %1, %2\n\t" \
  766. ".set\tat\n\t" \
  767. ".set\treorder" \
  768. : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
  769. : \
  770. : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
  771. DADDI_SCRATCH, "memory"); \
  772. __cu_len_r; \
  773. })
  774. /*
  775. * __copy_from_user: - Copy a block of data from user space, with less checking.
  776. * @to: Destination address, in kernel space.
  777. * @from: Source address, in user space.
  778. * @n: Number of bytes to copy.
  779. *
  780. * Context: User context only. This function may sleep.
  781. *
  782. * Copy data from user space to kernel space. Caller must check
  783. * the specified block with access_ok() before calling this function.
  784. *
  785. * Returns number of bytes that could not be copied.
  786. * On success, this will be zero.
  787. *
  788. * If some data could not be copied, this function will pad the copied
  789. * data to the requested size using zero bytes.
  790. */
  791. #define __copy_from_user(to, from, n) \
  792. ({ \
  793. void *__cu_to; \
  794. const void __user *__cu_from; \
  795. long __cu_len; \
  796. \
  797. __cu_to = (to); \
  798. __cu_from = (from); \
  799. __cu_len = (n); \
  800. might_fault(); \
  801. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  802. __cu_len); \
  803. __cu_len; \
  804. })
  805. /*
  806. * copy_from_user: - Copy a block of data from user space.
  807. * @to: Destination address, in kernel space.
  808. * @from: Source address, in user space.
  809. * @n: Number of bytes to copy.
  810. *
  811. * Context: User context only. This function may sleep.
  812. *
  813. * Copy data from user space to kernel space.
  814. *
  815. * Returns number of bytes that could not be copied.
  816. * On success, this will be zero.
  817. *
  818. * If some data could not be copied, this function will pad the copied
  819. * data to the requested size using zero bytes.
  820. */
  821. #define copy_from_user(to, from, n) \
  822. ({ \
  823. void *__cu_to; \
  824. const void __user *__cu_from; \
  825. long __cu_len; \
  826. \
  827. __cu_to = (to); \
  828. __cu_from = (from); \
  829. __cu_len = (n); \
  830. if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
  831. might_fault(); \
  832. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  833. __cu_len); \
  834. } \
  835. __cu_len; \
  836. })
  837. #define __copy_in_user(to, from, n) \
  838. ({ \
  839. void __user *__cu_to; \
  840. const void __user *__cu_from; \
  841. long __cu_len; \
  842. \
  843. __cu_to = (to); \
  844. __cu_from = (from); \
  845. __cu_len = (n); \
  846. might_fault(); \
  847. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  848. __cu_len); \
  849. __cu_len; \
  850. })
  851. #define copy_in_user(to, from, n) \
  852. ({ \
  853. void __user *__cu_to; \
  854. const void __user *__cu_from; \
  855. long __cu_len; \
  856. \
  857. __cu_to = (to); \
  858. __cu_from = (from); \
  859. __cu_len = (n); \
  860. if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \
  861. access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \
  862. might_fault(); \
  863. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  864. __cu_len); \
  865. } \
  866. __cu_len; \
  867. })
  868. /*
  869. * __clear_user: - Zero a block of memory in user space, with less checking.
  870. * @to: Destination address, in user space.
  871. * @n: Number of bytes to zero.
  872. *
  873. * Zero a block of memory in user space. Caller must check
  874. * the specified block with access_ok() before calling this function.
  875. *
  876. * Returns number of bytes that could not be cleared.
  877. * On success, this will be zero.
  878. */
  879. static inline __kernel_size_t
  880. __clear_user(void __user *addr, __kernel_size_t size)
  881. {
  882. __kernel_size_t res;
  883. might_fault();
  884. __asm__ __volatile__(
  885. "move\t$4, %1\n\t"
  886. "move\t$5, $0\n\t"
  887. "move\t$6, %2\n\t"
  888. __MODULE_JAL(__bzero)
  889. "move\t%0, $6"
  890. : "=r" (res)
  891. : "r" (addr), "r" (size)
  892. : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
  893. return res;
  894. }
  895. #define clear_user(addr,n) \
  896. ({ \
  897. void __user * __cl_addr = (addr); \
  898. unsigned long __cl_size = (n); \
  899. if (__cl_size && access_ok(VERIFY_WRITE, \
  900. __cl_addr, __cl_size)) \
  901. __cl_size = __clear_user(__cl_addr, __cl_size); \
  902. __cl_size; \
  903. })
  904. /*
  905. * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
  906. * @dst: Destination address, in kernel space. This buffer must be at
  907. * least @count bytes long.
  908. * @src: Source address, in user space.
  909. * @count: Maximum number of bytes to copy, including the trailing NUL.
  910. *
  911. * Copies a NUL-terminated string from userspace to kernel space.
  912. * Caller must check the specified block with access_ok() before calling
  913. * this function.
  914. *
  915. * On success, returns the length of the string (not including the trailing
  916. * NUL).
  917. *
  918. * If access to userspace fails, returns -EFAULT (some data may have been
  919. * copied).
  920. *
  921. * If @count is smaller than the length of the string, copies @count bytes
  922. * and returns @count.
  923. */
  924. static inline long
  925. __strncpy_from_user(char *__to, const char __user *__from, long __len)
  926. {
  927. long res;
  928. might_fault();
  929. __asm__ __volatile__(
  930. "move\t$4, %1\n\t"
  931. "move\t$5, %2\n\t"
  932. "move\t$6, %3\n\t"
  933. __MODULE_JAL(__strncpy_from_user_nocheck_asm)
  934. "move\t%0, $2"
  935. : "=r" (res)
  936. : "r" (__to), "r" (__from), "r" (__len)
  937. : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
  938. return res;
  939. }
  940. /*
  941. * strncpy_from_user: - Copy a NUL terminated string from userspace.
  942. * @dst: Destination address, in kernel space. This buffer must be at
  943. * least @count bytes long.
  944. * @src: Source address, in user space.
  945. * @count: Maximum number of bytes to copy, including the trailing NUL.
  946. *
  947. * Copies a NUL-terminated string from userspace to kernel space.
  948. *
  949. * On success, returns the length of the string (not including the trailing
  950. * NUL).
  951. *
  952. * If access to userspace fails, returns -EFAULT (some data may have been
  953. * copied).
  954. *
  955. * If @count is smaller than the length of the string, copies @count bytes
  956. * and returns @count.
  957. */
  958. static inline long
  959. strncpy_from_user(char *__to, const char __user *__from, long __len)
  960. {
  961. long res;
  962. might_fault();
  963. __asm__ __volatile__(
  964. "move\t$4, %1\n\t"
  965. "move\t$5, %2\n\t"
  966. "move\t$6, %3\n\t"
  967. __MODULE_JAL(__strncpy_from_user_asm)
  968. "move\t%0, $2"
  969. : "=r" (res)
  970. : "r" (__to), "r" (__from), "r" (__len)
  971. : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
  972. return res;
  973. }
  974. /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
  975. static inline long __strlen_user(const char __user *s)
  976. {
  977. long res;
  978. might_fault();
  979. __asm__ __volatile__(
  980. "move\t$4, %1\n\t"
  981. __MODULE_JAL(__strlen_user_nocheck_asm)
  982. "move\t%0, $2"
  983. : "=r" (res)
  984. : "r" (s)
  985. : "$2", "$4", __UA_t0, "$31");
  986. return res;
  987. }
  988. /*
  989. * strlen_user: - Get the size of a string in user space.
  990. * @str: The string to measure.
  991. *
  992. * Context: User context only. This function may sleep.
  993. *
  994. * Get the size of a NUL-terminated string in user space.
  995. *
  996. * Returns the size of the string INCLUDING the terminating NUL.
  997. * On exception, returns 0.
  998. *
  999. * If there is a limit on the length of a valid string, you may wish to
  1000. * consider using strnlen_user() instead.
  1001. */
  1002. static inline long strlen_user(const char __user *s)
  1003. {
  1004. long res;
  1005. might_fault();
  1006. __asm__ __volatile__(
  1007. "move\t$4, %1\n\t"
  1008. __MODULE_JAL(__strlen_user_asm)
  1009. "move\t%0, $2"
  1010. : "=r" (res)
  1011. : "r" (s)
  1012. : "$2", "$4", __UA_t0, "$31");
  1013. return res;
  1014. }
  1015. /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
  1016. static inline long __strnlen_user(const char __user *s, long n)
  1017. {
  1018. long res;
  1019. might_fault();
  1020. __asm__ __volatile__(
  1021. "move\t$4, %1\n\t"
  1022. "move\t$5, %2\n\t"
  1023. __MODULE_JAL(__strnlen_user_nocheck_asm)
  1024. "move\t%0, $2"
  1025. : "=r" (res)
  1026. : "r" (s), "r" (n)
  1027. : "$2", "$4", "$5", __UA_t0, "$31");
  1028. return res;
  1029. }
  1030. /*
  1031. * strlen_user: - Get the size of a string in user space.
  1032. * @str: The string to measure.
  1033. *
  1034. * Context: User context only. This function may sleep.
  1035. *
  1036. * Get the size of a NUL-terminated string in user space.
  1037. *
  1038. * Returns the size of the string INCLUDING the terminating NUL.
  1039. * On exception, returns 0.
  1040. *
  1041. * If there is a limit on the length of a valid string, you may wish to
  1042. * consider using strnlen_user() instead.
  1043. */
  1044. static inline long strnlen_user(const char __user *s, long n)
  1045. {
  1046. long res;
  1047. might_fault();
  1048. __asm__ __volatile__(
  1049. "move\t$4, %1\n\t"
  1050. "move\t$5, %2\n\t"
  1051. __MODULE_JAL(__strnlen_user_asm)
  1052. "move\t%0, $2"
  1053. : "=r" (res)
  1054. : "r" (s), "r" (n)
  1055. : "$2", "$4", "$5", __UA_t0, "$31");
  1056. return res;
  1057. }
  1058. struct exception_table_entry
  1059. {
  1060. unsigned long insn;
  1061. unsigned long nextinsn;
  1062. };
  1063. extern int fixup_exception(struct pt_regs *regs);
  1064. #endif /* _ASM_UACCESS_H */