uaccess.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471
  1. /*
  2. * arch/arm/include/asm/uaccess.h
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #ifndef _ASMARM_UACCESS_H
  9. #define _ASMARM_UACCESS_H
  10. /*
  11. * User space memory access functions
  12. */
  13. #include <linux/string.h>
  14. #include <linux/thread_info.h>
  15. #include <asm/errno.h>
  16. #include <asm/memory.h>
  17. #include <asm/domain.h>
  18. #include <asm/unified.h>
  19. #include <asm/compiler.h>
  20. #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  21. #include <asm-generic/uaccess-unaligned.h>
  22. #else
  23. #define __get_user_unaligned __get_user
  24. #define __put_user_unaligned __put_user
  25. #endif
  26. #define VERIFY_READ 0
  27. #define VERIFY_WRITE 1
  28. /*
  29. * The exception table consists of pairs of addresses: the first is the
  30. * address of an instruction that is allowed to fault, and the second is
  31. * the address at which the program should continue. No registers are
  32. * modified, so it is entirely up to the continuation code to figure out
  33. * what to do.
  34. *
  35. * All the routines below use bits of fixup code that are out of line
  36. * with the main instruction path. This means when everything is well,
  37. * we don't even have to jump over them. Further, they do not intrude
  38. * on our cache or tlb entries.
  39. */
  40. struct exception_table_entry
  41. {
  42. unsigned long insn, fixup;
  43. };
  44. extern int fixup_exception(struct pt_regs *regs);
  45. /*
  46. * These two are intentionally not defined anywhere - if the kernel
  47. * code generates any references to them, that's a bug.
  48. */
  49. extern int __get_user_bad(void);
  50. extern int __put_user_bad(void);
  51. /*
  52. * Note that this is actually 0x1,0000,0000
  53. */
  54. #define KERNEL_DS 0x00000000
  55. #define get_ds() (KERNEL_DS)
  56. #ifdef CONFIG_MMU
  57. #define USER_DS TASK_SIZE
  58. #define get_fs() (current_thread_info()->addr_limit)
  59. static inline void set_fs(mm_segment_t fs)
  60. {
  61. current_thread_info()->addr_limit = fs;
  62. modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
  63. }
  64. #define segment_eq(a,b) ((a) == (b))
  65. #define __addr_ok(addr) ({ \
  66. unsigned long flag; \
  67. __asm__("cmp %2, %0; movlo %0, #0" \
  68. : "=&r" (flag) \
  69. : "0" (current_thread_info()->addr_limit), "r" (addr) \
  70. : "cc"); \
  71. (flag == 0); })
  72. /* We use 33-bit arithmetic here... */
  73. #define __range_ok(addr,size) ({ \
  74. unsigned long flag, roksum; \
  75. __chk_user_ptr(addr); \
  76. __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
  77. : "=&r" (flag), "=&r" (roksum) \
  78. : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
  79. : "cc"); \
  80. flag; })
  81. /*
  82. * Single-value transfer routines. They automatically use the right
  83. * size if we just have the right pointer type. Note that the functions
  84. * which read from user space (*get_*) need to take care not to leak
  85. * kernel data even if the calling code is buggy and fails to check
  86. * the return value. This means zeroing out the destination variable
  87. * or buffer on error. Normally this is done out of line by the
  88. * fixup code, but there are a few places where it intrudes on the
  89. * main code path. When we only write to user space, there is no
  90. * problem.
  91. */
  92. extern int __get_user_1(void *);
  93. extern int __get_user_2(void *);
  94. extern int __get_user_4(void *);
  95. #define __GUP_CLOBBER_1 "lr", "cc"
  96. #ifdef CONFIG_CPU_USE_DOMAINS
  97. #define __GUP_CLOBBER_2 "ip", "lr", "cc"
  98. #else
  99. #define __GUP_CLOBBER_2 "lr", "cc"
  100. #endif
  101. #define __GUP_CLOBBER_4 "lr", "cc"
  102. #define __get_user_x(__r2,__p,__e,__l,__s) \
  103. __asm__ __volatile__ ( \
  104. __asmeq("%0", "r0") __asmeq("%1", "r2") \
  105. __asmeq("%3", "r1") \
  106. "bl __get_user_" #__s \
  107. : "=&r" (__e), "=r" (__r2) \
  108. : "0" (__p), "r" (__l) \
  109. : __GUP_CLOBBER_##__s)
  110. #define __get_user_check(x,p) \
  111. ({ \
  112. unsigned long __limit = current_thread_info()->addr_limit - 1; \
  113. register const typeof(*(p)) __user *__p asm("r0") = (p);\
  114. register unsigned long __r2 asm("r2"); \
  115. register unsigned long __l asm("r1") = __limit; \
  116. register int __e asm("r0"); \
  117. switch (sizeof(*(__p))) { \
  118. case 1: \
  119. __get_user_x(__r2, __p, __e, __l, 1); \
  120. break; \
  121. case 2: \
  122. __get_user_x(__r2, __p, __e, __l, 2); \
  123. break; \
  124. case 4: \
  125. __get_user_x(__r2, __p, __e, __l, 4); \
  126. break; \
  127. default: __e = __get_user_bad(); break; \
  128. } \
  129. x = (typeof(*(p))) __r2; \
  130. __e; \
  131. })
  132. #define get_user(x,p) \
  133. ({ \
  134. might_fault(); \
  135. __get_user_check(x,p); \
  136. })
  137. extern int __put_user_1(void *, unsigned int);
  138. extern int __put_user_2(void *, unsigned int);
  139. extern int __put_user_4(void *, unsigned int);
  140. extern int __put_user_8(void *, unsigned long long);
  141. #define __put_user_x(__r2,__p,__e,__l,__s) \
  142. __asm__ __volatile__ ( \
  143. __asmeq("%0", "r0") __asmeq("%2", "r2") \
  144. __asmeq("%3", "r1") \
  145. "bl __put_user_" #__s \
  146. : "=&r" (__e) \
  147. : "0" (__p), "r" (__r2), "r" (__l) \
  148. : "ip", "lr", "cc")
  149. #define __put_user_check(x,p) \
  150. ({ \
  151. unsigned long __limit = current_thread_info()->addr_limit - 1; \
  152. const typeof(*(p)) __user *__tmp_p = (p); \
  153. register const typeof(*(p)) __r2 asm("r2") = (x); \
  154. register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
  155. register unsigned long __l asm("r1") = __limit; \
  156. register int __e asm("r0"); \
  157. switch (sizeof(*(__p))) { \
  158. case 1: \
  159. __put_user_x(__r2, __p, __e, __l, 1); \
  160. break; \
  161. case 2: \
  162. __put_user_x(__r2, __p, __e, __l, 2); \
  163. break; \
  164. case 4: \
  165. __put_user_x(__r2, __p, __e, __l, 4); \
  166. break; \
  167. case 8: \
  168. __put_user_x(__r2, __p, __e, __l, 8); \
  169. break; \
  170. default: __e = __put_user_bad(); break; \
  171. } \
  172. __e; \
  173. })
  174. #define put_user(x,p) \
  175. ({ \
  176. might_fault(); \
  177. __put_user_check(x,p); \
  178. })
  179. #else /* CONFIG_MMU */
  180. /*
  181. * uClinux has only one addr space, so has simplified address limits.
  182. */
  183. #define USER_DS KERNEL_DS
  184. #define segment_eq(a,b) (1)
  185. #define __addr_ok(addr) ((void)(addr),1)
  186. #define __range_ok(addr,size) ((void)(addr),0)
  187. #define get_fs() (KERNEL_DS)
  188. static inline void set_fs(mm_segment_t fs)
  189. {
  190. }
  191. #define get_user(x,p) __get_user(x,p)
  192. #define put_user(x,p) __put_user(x,p)
  193. #endif /* CONFIG_MMU */
  194. #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
  195. #define user_addr_max() \
  196. (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
  197. /*
  198. * The "__xxx" versions of the user access functions do not verify the
  199. * address space - it must have been done previously with a separate
  200. * "access_ok()" call.
  201. *
  202. * The "xxx_error" versions set the third argument to EFAULT if an
  203. * error occurs, and leave it unchanged on success. Note that these
  204. * versions are void (ie, don't return a value as such).
  205. */
  206. #define __get_user(x,ptr) \
  207. ({ \
  208. long __gu_err = 0; \
  209. __get_user_err((x),(ptr),__gu_err); \
  210. __gu_err; \
  211. })
  212. #define __get_user_error(x,ptr,err) \
  213. ({ \
  214. __get_user_err((x),(ptr),err); \
  215. (void) 0; \
  216. })
  217. #define __get_user_err(x,ptr,err) \
  218. do { \
  219. unsigned long __gu_addr = (unsigned long)(ptr); \
  220. unsigned long __gu_val; \
  221. __chk_user_ptr(ptr); \
  222. might_fault(); \
  223. switch (sizeof(*(ptr))) { \
  224. case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \
  225. case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \
  226. case 4: __get_user_asm_word(__gu_val,__gu_addr,err); break; \
  227. default: (__gu_val) = __get_user_bad(); \
  228. } \
  229. (x) = (__typeof__(*(ptr)))__gu_val; \
  230. } while (0)
  231. #define __get_user_asm_byte(x,addr,err) \
  232. __asm__ __volatile__( \
  233. "1: " TUSER(ldrb) " %1,[%2],#0\n" \
  234. "2:\n" \
  235. " .pushsection .fixup,\"ax\"\n" \
  236. " .align 2\n" \
  237. "3: mov %0, %3\n" \
  238. " mov %1, #0\n" \
  239. " b 2b\n" \
  240. " .popsection\n" \
  241. " .pushsection __ex_table,\"a\"\n" \
  242. " .align 3\n" \
  243. " .long 1b, 3b\n" \
  244. " .popsection" \
  245. : "+r" (err), "=&r" (x) \
  246. : "r" (addr), "i" (-EFAULT) \
  247. : "cc")
  248. #ifndef __ARMEB__
  249. #define __get_user_asm_half(x,__gu_addr,err) \
  250. ({ \
  251. unsigned long __b1, __b2; \
  252. __get_user_asm_byte(__b1, __gu_addr, err); \
  253. __get_user_asm_byte(__b2, __gu_addr + 1, err); \
  254. (x) = __b1 | (__b2 << 8); \
  255. })
  256. #else
  257. #define __get_user_asm_half(x,__gu_addr,err) \
  258. ({ \
  259. unsigned long __b1, __b2; \
  260. __get_user_asm_byte(__b1, __gu_addr, err); \
  261. __get_user_asm_byte(__b2, __gu_addr + 1, err); \
  262. (x) = (__b1 << 8) | __b2; \
  263. })
  264. #endif
  265. #define __get_user_asm_word(x,addr,err) \
  266. __asm__ __volatile__( \
  267. "1: " TUSER(ldr) " %1,[%2],#0\n" \
  268. "2:\n" \
  269. " .pushsection .fixup,\"ax\"\n" \
  270. " .align 2\n" \
  271. "3: mov %0, %3\n" \
  272. " mov %1, #0\n" \
  273. " b 2b\n" \
  274. " .popsection\n" \
  275. " .pushsection __ex_table,\"a\"\n" \
  276. " .align 3\n" \
  277. " .long 1b, 3b\n" \
  278. " .popsection" \
  279. : "+r" (err), "=&r" (x) \
  280. : "r" (addr), "i" (-EFAULT) \
  281. : "cc")
  282. #define __put_user(x,ptr) \
  283. ({ \
  284. long __pu_err = 0; \
  285. __put_user_err((x),(ptr),__pu_err); \
  286. __pu_err; \
  287. })
  288. #define __put_user_error(x,ptr,err) \
  289. ({ \
  290. __put_user_err((x),(ptr),err); \
  291. (void) 0; \
  292. })
  293. #define __put_user_err(x,ptr,err) \
  294. do { \
  295. unsigned long __pu_addr = (unsigned long)(ptr); \
  296. __typeof__(*(ptr)) __pu_val = (x); \
  297. __chk_user_ptr(ptr); \
  298. might_fault(); \
  299. switch (sizeof(*(ptr))) { \
  300. case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \
  301. case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \
  302. case 4: __put_user_asm_word(__pu_val,__pu_addr,err); break; \
  303. case 8: __put_user_asm_dword(__pu_val,__pu_addr,err); break; \
  304. default: __put_user_bad(); \
  305. } \
  306. } while (0)
  307. #define __put_user_asm_byte(x,__pu_addr,err) \
  308. __asm__ __volatile__( \
  309. "1: " TUSER(strb) " %1,[%2],#0\n" \
  310. "2:\n" \
  311. " .pushsection .fixup,\"ax\"\n" \
  312. " .align 2\n" \
  313. "3: mov %0, %3\n" \
  314. " b 2b\n" \
  315. " .popsection\n" \
  316. " .pushsection __ex_table,\"a\"\n" \
  317. " .align 3\n" \
  318. " .long 1b, 3b\n" \
  319. " .popsection" \
  320. : "+r" (err) \
  321. : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
  322. : "cc")
  323. #ifndef __ARMEB__
  324. #define __put_user_asm_half(x,__pu_addr,err) \
  325. ({ \
  326. unsigned long __temp = (unsigned long)(x); \
  327. __put_user_asm_byte(__temp, __pu_addr, err); \
  328. __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \
  329. })
  330. #else
  331. #define __put_user_asm_half(x,__pu_addr,err) \
  332. ({ \
  333. unsigned long __temp = (unsigned long)(x); \
  334. __put_user_asm_byte(__temp >> 8, __pu_addr, err); \
  335. __put_user_asm_byte(__temp, __pu_addr + 1, err); \
  336. })
  337. #endif
  338. #define __put_user_asm_word(x,__pu_addr,err) \
  339. __asm__ __volatile__( \
  340. "1: " TUSER(str) " %1,[%2],#0\n" \
  341. "2:\n" \
  342. " .pushsection .fixup,\"ax\"\n" \
  343. " .align 2\n" \
  344. "3: mov %0, %3\n" \
  345. " b 2b\n" \
  346. " .popsection\n" \
  347. " .pushsection __ex_table,\"a\"\n" \
  348. " .align 3\n" \
  349. " .long 1b, 3b\n" \
  350. " .popsection" \
  351. : "+r" (err) \
  352. : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
  353. : "cc")
  354. #ifndef __ARMEB__
  355. #define __reg_oper0 "%R2"
  356. #define __reg_oper1 "%Q2"
  357. #else
  358. #define __reg_oper0 "%Q2"
  359. #define __reg_oper1 "%R2"
  360. #endif
  361. #define __put_user_asm_dword(x,__pu_addr,err) \
  362. __asm__ __volatile__( \
  363. ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \
  364. ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \
  365. THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \
  366. THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \
  367. "3:\n" \
  368. " .pushsection .fixup,\"ax\"\n" \
  369. " .align 2\n" \
  370. "4: mov %0, %3\n" \
  371. " b 3b\n" \
  372. " .popsection\n" \
  373. " .pushsection __ex_table,\"a\"\n" \
  374. " .align 3\n" \
  375. " .long 1b, 4b\n" \
  376. " .long 2b, 4b\n" \
  377. " .popsection" \
  378. : "+r" (err), "+r" (__pu_addr) \
  379. : "r" (x), "i" (-EFAULT) \
  380. : "cc")
  381. #ifdef CONFIG_MMU
  382. extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
  383. extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
  384. extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
  385. extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
  386. extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
  387. #else
  388. #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
  389. #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
  390. #define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0)
  391. #endif
  392. static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
  393. {
  394. if (access_ok(VERIFY_READ, from, n))
  395. n = __copy_from_user(to, from, n);
  396. else /* security hole - plug it */
  397. memset(to, 0, n);
  398. return n;
  399. }
  400. static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
  401. {
  402. if (access_ok(VERIFY_WRITE, to, n))
  403. n = __copy_to_user(to, from, n);
  404. return n;
  405. }
  406. #define __copy_to_user_inatomic __copy_to_user
  407. #define __copy_from_user_inatomic __copy_from_user
  408. static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
  409. {
  410. if (access_ok(VERIFY_WRITE, to, n))
  411. n = __clear_user(to, n);
  412. return n;
  413. }
  414. extern long strncpy_from_user(char *dest, const char __user *src, long count);
  415. extern __must_check long strlen_user(const char __user *str);
  416. extern __must_check long strnlen_user(const char __user *str, long n);
  417. #endif /* _ASMARM_UACCESS_H */