uaccess.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413
  1. /*
  2. * Authors: Bjorn Wesen (bjornw@axis.com)
  3. * Hans-Peter Nilsson (hp@axis.com)
  4. */
  5. /* Asm:s have been tweaked (within the domain of correctness) to give
  6. satisfactory results for "gcc version 2.96 20000427 (experimental)".
  7. Check regularly...
  8. Register $r9 is chosen for temporaries, being a call-clobbered register
  9. first in line to be used (notably for local blocks), not colliding with
  10. parameter registers. */
  11. #ifndef _CRIS_UACCESS_H
  12. #define _CRIS_UACCESS_H
  13. #include <asm/processor.h>
  14. #include <asm/page.h>
  15. /*
  16. * The fs value determines whether argument validity checking should be
  17. * performed or not. If get_fs() == USER_DS, checking is performed, with
  18. * get_fs() == KERNEL_DS, checking is bypassed.
  19. *
  20. * For historical reasons, these macros are grossly misnamed.
  21. */
  22. #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
  23. /* addr_limit is the maximum accessible address for the task. we misuse
  24. * the KERNEL_DS and USER_DS values to both assign and compare the
  25. * addr_limit values through the equally misnamed get/set_fs macros.
  26. * (see above)
  27. */
  28. #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
  29. #define USER_DS MAKE_MM_SEG(TASK_SIZE)
  30. #define get_ds() (KERNEL_DS)
  31. #define get_fs() (current_thread_info()->addr_limit)
  32. #define set_fs(x) (current_thread_info()->addr_limit = (x))
  33. #define segment_eq(a, b) ((a).seg == (b).seg)
  34. #define __kernel_ok (uaccess_kernel())
  35. #define __user_ok(addr, size) \
  36. (((size) <= TASK_SIZE) && ((addr) <= TASK_SIZE-(size)))
  37. #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
  38. #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size))
  39. #include <arch/uaccess.h>
  40. /*
  41. * The exception table consists of pairs of addresses: the first is the
  42. * address of an instruction that is allowed to fault, and the second is
  43. * the address at which the program should continue. No registers are
  44. * modified, so it is entirely up to the continuation code to figure out
  45. * what to do.
  46. *
  47. * All the routines below use bits of fixup code that are out of line
  48. * with the main instruction path. This means when everything is well,
  49. * we don't even have to jump over them. Further, they do not intrude
  50. * on our cache or tlb entries.
  51. */
  52. struct exception_table_entry {
  53. unsigned long insn, fixup;
  54. };
  55. /*
  56. * These are the main single-value transfer routines. They automatically
  57. * use the right size if we just have the right pointer type.
  58. *
  59. * This gets kind of ugly. We want to return _two_ values in "get_user()"
  60. * and yet we don't want to do any pointers, because that is too much
  61. * of a performance impact. Thus we have a few rather ugly macros here,
  62. * and hide all the ugliness from the user.
  63. *
  64. * The "__xxx" versions of the user access functions are versions that
  65. * do not verify the address space, that must have been done previously
  66. * with a separate "access_ok()" call (this is used when we do multiple
  67. * accesses to the same area of user memory).
  68. *
  69. * As we use the same address space for kernel and user data on
  70. * CRIS, we can just do these as direct assignments. (Of course, the
  71. * exception handling means that it's no longer "just"...)
  72. */
  73. #define get_user(x, ptr) \
  74. __get_user_check((x), (ptr), sizeof(*(ptr)))
  75. #define put_user(x, ptr) \
  76. __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
  77. #define __get_user(x, ptr) \
  78. __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
  79. #define __put_user(x, ptr) \
  80. __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
  81. extern long __put_user_bad(void);
  82. #define __put_user_size(x, ptr, size, retval) \
  83. do { \
  84. retval = 0; \
  85. switch (size) { \
  86. case 1: \
  87. __put_user_asm(x, ptr, retval, "move.b"); \
  88. break; \
  89. case 2: \
  90. __put_user_asm(x, ptr, retval, "move.w"); \
  91. break; \
  92. case 4: \
  93. __put_user_asm(x, ptr, retval, "move.d"); \
  94. break; \
  95. case 8: \
  96. __put_user_asm_64(x, ptr, retval); \
  97. break; \
  98. default: \
  99. __put_user_bad(); \
  100. } \
  101. } while (0)
  102. #define __get_user_size(x, ptr, size, retval) \
  103. do { \
  104. retval = 0; \
  105. switch (size) { \
  106. case 1: \
  107. __get_user_asm(x, ptr, retval, "move.b"); \
  108. break; \
  109. case 2: \
  110. __get_user_asm(x, ptr, retval, "move.w"); \
  111. break; \
  112. case 4: \
  113. __get_user_asm(x, ptr, retval, "move.d"); \
  114. break; \
  115. case 8: \
  116. __get_user_asm_64(x, ptr, retval); \
  117. break; \
  118. default: \
  119. (x) = __get_user_bad(); \
  120. } \
  121. } while (0)
  122. #define __put_user_nocheck(x, ptr, size) \
  123. ({ \
  124. long __pu_err; \
  125. __put_user_size((x), (ptr), (size), __pu_err); \
  126. __pu_err; \
  127. })
  128. #define __put_user_check(x, ptr, size) \
  129. ({ \
  130. long __pu_err = -EFAULT; \
  131. __typeof__(*(ptr)) *__pu_addr = (ptr); \
  132. if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
  133. __put_user_size((x), __pu_addr, (size), __pu_err); \
  134. __pu_err; \
  135. })
  136. struct __large_struct { unsigned long buf[100]; };
  137. #define __m(x) (*(struct __large_struct *)(x))
  138. #define __get_user_nocheck(x, ptr, size) \
  139. ({ \
  140. long __gu_err, __gu_val; \
  141. __get_user_size(__gu_val, (ptr), (size), __gu_err); \
  142. (x) = (__force __typeof__(*(ptr)))__gu_val; \
  143. __gu_err; \
  144. })
  145. #define __get_user_check(x, ptr, size) \
  146. ({ \
  147. long __gu_err = -EFAULT, __gu_val = 0; \
  148. const __typeof__(*(ptr)) *__gu_addr = (ptr); \
  149. if (access_ok(VERIFY_READ, __gu_addr, size)) \
  150. __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
  151. (x) = (__force __typeof__(*(ptr)))__gu_val; \
  152. __gu_err; \
  153. })
  154. extern long __get_user_bad(void);
  155. /* More complex functions. Most are inline, but some call functions that
  156. live in lib/usercopy.c */
  157. extern unsigned long __copy_user(void __user *to, const void *from, unsigned long n);
  158. extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n);
  159. extern unsigned long __do_clear_user(void __user *to, unsigned long n);
  160. static inline long
  161. __strncpy_from_user(char *dst, const char __user *src, long count)
  162. {
  163. return __do_strncpy_from_user(dst, src, count);
  164. }
  165. static inline long
  166. strncpy_from_user(char *dst, const char __user *src, long count)
  167. {
  168. long res = -EFAULT;
  169. if (access_ok(VERIFY_READ, src, 1))
  170. res = __do_strncpy_from_user(dst, src, count);
  171. return res;
  172. }
  173. /* Note that these expand awfully if made into switch constructs, so
  174. don't do that. */
  175. static inline unsigned long
  176. __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
  177. {
  178. unsigned long ret = 0;
  179. if (n == 0)
  180. ;
  181. else if (n == 1)
  182. __asm_copy_from_user_1(to, from, ret);
  183. else if (n == 2)
  184. __asm_copy_from_user_2(to, from, ret);
  185. else if (n == 3)
  186. __asm_copy_from_user_3(to, from, ret);
  187. else if (n == 4)
  188. __asm_copy_from_user_4(to, from, ret);
  189. else if (n == 5)
  190. __asm_copy_from_user_5(to, from, ret);
  191. else if (n == 6)
  192. __asm_copy_from_user_6(to, from, ret);
  193. else if (n == 7)
  194. __asm_copy_from_user_7(to, from, ret);
  195. else if (n == 8)
  196. __asm_copy_from_user_8(to, from, ret);
  197. else if (n == 9)
  198. __asm_copy_from_user_9(to, from, ret);
  199. else if (n == 10)
  200. __asm_copy_from_user_10(to, from, ret);
  201. else if (n == 11)
  202. __asm_copy_from_user_11(to, from, ret);
  203. else if (n == 12)
  204. __asm_copy_from_user_12(to, from, ret);
  205. else if (n == 13)
  206. __asm_copy_from_user_13(to, from, ret);
  207. else if (n == 14)
  208. __asm_copy_from_user_14(to, from, ret);
  209. else if (n == 15)
  210. __asm_copy_from_user_15(to, from, ret);
  211. else if (n == 16)
  212. __asm_copy_from_user_16(to, from, ret);
  213. else if (n == 20)
  214. __asm_copy_from_user_20(to, from, ret);
  215. else if (n == 24)
  216. __asm_copy_from_user_24(to, from, ret);
  217. else
  218. ret = __copy_user_zeroing(to, from, n);
  219. return ret;
  220. }
  221. /* Ditto, don't make a switch out of this. */
  222. static inline unsigned long
  223. __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
  224. {
  225. unsigned long ret = 0;
  226. if (n == 0)
  227. ;
  228. else if (n == 1)
  229. __asm_copy_to_user_1(to, from, ret);
  230. else if (n == 2)
  231. __asm_copy_to_user_2(to, from, ret);
  232. else if (n == 3)
  233. __asm_copy_to_user_3(to, from, ret);
  234. else if (n == 4)
  235. __asm_copy_to_user_4(to, from, ret);
  236. else if (n == 5)
  237. __asm_copy_to_user_5(to, from, ret);
  238. else if (n == 6)
  239. __asm_copy_to_user_6(to, from, ret);
  240. else if (n == 7)
  241. __asm_copy_to_user_7(to, from, ret);
  242. else if (n == 8)
  243. __asm_copy_to_user_8(to, from, ret);
  244. else if (n == 9)
  245. __asm_copy_to_user_9(to, from, ret);
  246. else if (n == 10)
  247. __asm_copy_to_user_10(to, from, ret);
  248. else if (n == 11)
  249. __asm_copy_to_user_11(to, from, ret);
  250. else if (n == 12)
  251. __asm_copy_to_user_12(to, from, ret);
  252. else if (n == 13)
  253. __asm_copy_to_user_13(to, from, ret);
  254. else if (n == 14)
  255. __asm_copy_to_user_14(to, from, ret);
  256. else if (n == 15)
  257. __asm_copy_to_user_15(to, from, ret);
  258. else if (n == 16)
  259. __asm_copy_to_user_16(to, from, ret);
  260. else if (n == 20)
  261. __asm_copy_to_user_20(to, from, ret);
  262. else if (n == 24)
  263. __asm_copy_to_user_24(to, from, ret);
  264. else
  265. ret = __copy_user(to, from, n);
  266. return ret;
  267. }
  268. /* No switch, please. */
  269. static inline unsigned long
  270. __constant_clear_user(void __user *to, unsigned long n)
  271. {
  272. unsigned long ret = 0;
  273. if (n == 0)
  274. ;
  275. else if (n == 1)
  276. __asm_clear_1(to, ret);
  277. else if (n == 2)
  278. __asm_clear_2(to, ret);
  279. else if (n == 3)
  280. __asm_clear_3(to, ret);
  281. else if (n == 4)
  282. __asm_clear_4(to, ret);
  283. else if (n == 8)
  284. __asm_clear_8(to, ret);
  285. else if (n == 12)
  286. __asm_clear_12(to, ret);
  287. else if (n == 16)
  288. __asm_clear_16(to, ret);
  289. else if (n == 20)
  290. __asm_clear_20(to, ret);
  291. else if (n == 24)
  292. __asm_clear_24(to, ret);
  293. else
  294. ret = __do_clear_user(to, n);
  295. return ret;
  296. }
  297. static inline size_t clear_user(void __user *to, size_t n)
  298. {
  299. if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
  300. return n;
  301. if (__builtin_constant_p(n))
  302. return __constant_clear_user(to, n);
  303. else
  304. return __do_clear_user(to, n);
  305. }
  306. static inline size_t copy_from_user(void *to, const void __user *from, size_t n)
  307. {
  308. if (unlikely(!access_ok(VERIFY_READ, from, n))) {
  309. memset(to, 0, n);
  310. return n;
  311. }
  312. if (__builtin_constant_p(n))
  313. return __constant_copy_from_user(to, from, n);
  314. else
  315. return __copy_user_zeroing(to, from, n);
  316. }
  317. static inline size_t copy_to_user(void __user *to, const void *from, size_t n)
  318. {
  319. if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
  320. return n;
  321. if (__builtin_constant_p(n))
  322. return __constant_copy_to_user(to, from, n);
  323. else
  324. return __copy_user(to, from, n);
  325. }
  326. /* We let the __ versions of copy_from/to_user inline, because they're often
  327. * used in fast paths and have only a small space overhead.
  328. */
  329. static inline unsigned long
  330. __generic_copy_from_user_nocheck(void *to, const void __user *from,
  331. unsigned long n)
  332. {
  333. return __copy_user_zeroing(to, from, n);
  334. }
  335. static inline unsigned long
  336. __generic_copy_to_user_nocheck(void __user *to, const void *from,
  337. unsigned long n)
  338. {
  339. return __copy_user(to, from, n);
  340. }
  341. static inline unsigned long
  342. __generic_clear_user_nocheck(void __user *to, unsigned long n)
  343. {
  344. return __do_clear_user(to, n);
  345. }
  346. /* without checking */
  347. #define __copy_to_user(to, from, n) \
  348. __generic_copy_to_user_nocheck((to), (from), (n))
  349. #define __copy_from_user(to, from, n) \
  350. __generic_copy_from_user_nocheck((to), (from), (n))
  351. #define __copy_to_user_inatomic __copy_to_user
  352. #define __copy_from_user_inatomic __copy_from_user
  353. #define __clear_user(to, n) __generic_clear_user_nocheck((to), (n))
  354. #define strlen_user(str) strnlen_user((str), 0x7ffffffe)
  355. #endif /* _CRIS_UACCESS_H */