uaccess.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
  3. #ifndef __ASM_CSKY_UACCESS_H
  4. #define __ASM_CSKY_UACCESS_H
  5. /*
  6. * User space memory access functions
  7. */
  8. #include <linux/compiler.h>
  9. #include <linux/errno.h>
  10. #include <linux/types.h>
  11. #include <linux/sched.h>
  12. #include <linux/mm.h>
  13. #include <linux/string.h>
  14. #include <linux/version.h>
  15. #include <asm/segment.h>
  16. #define VERIFY_READ 0
  17. #define VERIFY_WRITE 1
  18. static inline int access_ok(int type, const void *addr, unsigned long size)
  19. {
  20. unsigned long limit = current_thread_info()->addr_limit.seg;
  21. return (((unsigned long)addr < limit) &&
  22. ((unsigned long)(addr + size) < limit));
  23. }
  24. static inline int verify_area(int type, const void *addr, unsigned long size)
  25. {
  26. return access_ok(type, addr, size) ? 0 : -EFAULT;
  27. }
  28. #define __addr_ok(addr) (access_ok(VERIFY_READ, addr, 0))
  29. extern int __put_user_bad(void);
  30. /*
  31. * Tell gcc we read from memory instead of writing: this is because
  32. * we do not write to any memory gcc knows about, so there are no
  33. * aliasing issues.
  34. */
  35. /*
  36. * These are the main single-value transfer routines. They automatically
  37. * use the right size if we just have the right pointer type.
  38. *
  39. * This gets kind of ugly. We want to return _two_ values in "get_user()"
  40. * and yet we don't want to do any pointers, because that is too much
  41. * of a performance impact. Thus we have a few rather ugly macros here,
  42. * and hide all the ugliness from the user.
  43. *
  44. * The "__xxx" versions of the user access functions are versions that
  45. * do not verify the address space, that must have been done previously
  46. * with a separate "access_ok()" call (this is used when we do multiple
  47. * accesses to the same area of user memory).
  48. *
  49. * As we use the same address space for kernel and user data on
  50. * Ckcore, we can just do these as direct assignments. (Of course, the
  51. * exception handling means that it's no longer "just"...)
  52. */
  53. #define put_user(x, ptr) \
  54. __put_user_check((x), (ptr), sizeof(*(ptr)))
  55. #define __put_user(x, ptr) \
  56. __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
  57. #define __ptr(x) ((unsigned long *)(x))
  58. #define get_user(x, ptr) \
  59. __get_user_check((x), (ptr), sizeof(*(ptr)))
  60. #define __get_user(x, ptr) \
  61. __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
  62. #define __put_user_nocheck(x, ptr, size) \
  63. ({ \
  64. long __pu_err = 0; \
  65. typeof(*(ptr)) *__pu_addr = (ptr); \
  66. typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \
  67. if (__pu_addr) \
  68. __put_user_size(__pu_val, (__pu_addr), (size), \
  69. __pu_err); \
  70. __pu_err; \
  71. })
  72. #define __put_user_check(x, ptr, size) \
  73. ({ \
  74. long __pu_err = -EFAULT; \
  75. typeof(*(ptr)) *__pu_addr = (ptr); \
  76. typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \
  77. if (access_ok(VERIFY_WRITE, __pu_addr, size) && __pu_addr) \
  78. __put_user_size(__pu_val, __pu_addr, (size), __pu_err); \
  79. __pu_err; \
  80. })
  81. #define __put_user_size(x, ptr, size, retval) \
  82. do { \
  83. retval = 0; \
  84. switch (size) { \
  85. case 1: \
  86. __put_user_asm_b(x, ptr, retval); \
  87. break; \
  88. case 2: \
  89. __put_user_asm_h(x, ptr, retval); \
  90. break; \
  91. case 4: \
  92. __put_user_asm_w(x, ptr, retval); \
  93. break; \
  94. case 8: \
  95. __put_user_asm_64(x, ptr, retval); \
  96. break; \
  97. default: \
  98. __put_user_bad(); \
  99. } \
  100. } while (0)
  101. /*
  102. * We don't tell gcc that we are accessing memory, but this is OK
  103. * because we do not write to any memory gcc knows about, so there
  104. * are no aliasing issues.
  105. *
  106. * Note that PC at a fault is the address *after* the faulting
  107. * instruction.
  108. */
  109. #define __put_user_asm_b(x, ptr, err) \
  110. do { \
  111. int errcode; \
  112. asm volatile( \
  113. "1: stb %1, (%2,0) \n" \
  114. " br 3f \n" \
  115. "2: mov %0, %3 \n" \
  116. " br 3f \n" \
  117. ".section __ex_table, \"a\" \n" \
  118. ".align 2 \n" \
  119. ".long 1b,2b \n" \
  120. ".previous \n" \
  121. "3: \n" \
  122. : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \
  123. : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \
  124. : "memory"); \
  125. } while (0)
  126. #define __put_user_asm_h(x, ptr, err) \
  127. do { \
  128. int errcode; \
  129. asm volatile( \
  130. "1: sth %1, (%2,0) \n" \
  131. " br 3f \n" \
  132. "2: mov %0, %3 \n" \
  133. " br 3f \n" \
  134. ".section __ex_table, \"a\" \n" \
  135. ".align 2 \n" \
  136. ".long 1b,2b \n" \
  137. ".previous \n" \
  138. "3: \n" \
  139. : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \
  140. : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \
  141. : "memory"); \
  142. } while (0)
  143. #define __put_user_asm_w(x, ptr, err) \
  144. do { \
  145. int errcode; \
  146. asm volatile( \
  147. "1: stw %1, (%2,0) \n" \
  148. " br 3f \n" \
  149. "2: mov %0, %3 \n" \
  150. " br 3f \n" \
  151. ".section __ex_table,\"a\" \n" \
  152. ".align 2 \n" \
  153. ".long 1b, 2b \n" \
  154. ".previous \n" \
  155. "3: \n" \
  156. : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \
  157. : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \
  158. : "memory"); \
  159. } while (0)
  160. #define __put_user_asm_64(x, ptr, err) \
  161. do { \
  162. int tmp; \
  163. int errcode; \
  164. typeof(*(ptr))src = (typeof(*(ptr)))x; \
  165. typeof(*(ptr))*psrc = &src; \
  166. \
  167. asm volatile( \
  168. " ldw %3, (%1, 0) \n" \
  169. "1: stw %3, (%2, 0) \n" \
  170. " ldw %3, (%1, 4) \n" \
  171. "2: stw %3, (%2, 4) \n" \
  172. " br 4f \n" \
  173. "3: mov %0, %4 \n" \
  174. " br 4f \n" \
  175. ".section __ex_table, \"a\" \n" \
  176. ".align 2 \n" \
  177. ".long 1b, 3b \n" \
  178. ".long 2b, 3b \n" \
  179. ".previous \n" \
  180. "4: \n" \
  181. : "=r"(err), "=r"(psrc), "=r"(ptr), \
  182. "=r"(tmp), "=r"(errcode) \
  183. : "0"(err), "1"(psrc), "2"(ptr), "3"(0), "4"(-EFAULT) \
  184. : "memory"); \
  185. } while (0)
  186. #define __get_user_nocheck(x, ptr, size) \
  187. ({ \
  188. long __gu_err; \
  189. __get_user_size(x, (ptr), (size), __gu_err); \
  190. __gu_err; \
  191. })
  192. #define __get_user_check(x, ptr, size) \
  193. ({ \
  194. int __gu_err = -EFAULT; \
  195. const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
  196. if (access_ok(VERIFY_READ, __gu_ptr, size) && __gu_ptr) \
  197. __get_user_size(x, __gu_ptr, size, __gu_err); \
  198. __gu_err; \
  199. })
  200. #define __get_user_size(x, ptr, size, retval) \
  201. do { \
  202. switch (size) { \
  203. case 1: \
  204. __get_user_asm_common((x), ptr, "ldb", retval); \
  205. break; \
  206. case 2: \
  207. __get_user_asm_common((x), ptr, "ldh", retval); \
  208. break; \
  209. case 4: \
  210. __get_user_asm_common((x), ptr, "ldw", retval); \
  211. break; \
  212. default: \
  213. x = 0; \
  214. (retval) = __get_user_bad(); \
  215. } \
  216. } while (0)
  217. #define __get_user_asm_common(x, ptr, ins, err) \
  218. do { \
  219. int errcode; \
  220. asm volatile( \
  221. "1: " ins " %1, (%4,0) \n" \
  222. " br 3f \n" \
  223. /* Fix up codes */ \
  224. "2: mov %0, %2 \n" \
  225. " movi %1, 0 \n" \
  226. " br 3f \n" \
  227. ".section __ex_table,\"a\" \n" \
  228. ".align 2 \n" \
  229. ".long 1b, 2b \n" \
  230. ".previous \n" \
  231. "3: \n" \
  232. : "=r"(err), "=r"(x), "=r"(errcode) \
  233. : "0"(0), "r"(ptr), "2"(-EFAULT) \
  234. : "memory"); \
  235. } while (0)
  236. extern int __get_user_bad(void);
  237. #define __copy_user(to, from, n) \
  238. do { \
  239. int w0, w1, w2, w3; \
  240. asm volatile( \
  241. "0: cmpnei %1, 0 \n" \
  242. " bf 8f \n" \
  243. " mov %3, %1 \n" \
  244. " or %3, %2 \n" \
  245. " andi %3, 3 \n" \
  246. " cmpnei %3, 0 \n" \
  247. " bf 1f \n" \
  248. " br 5f \n" \
  249. "1: cmplti %0, 16 \n" /* 4W */ \
  250. " bt 3f \n" \
  251. " ldw %3, (%2, 0) \n" \
  252. " ldw %4, (%2, 4) \n" \
  253. " ldw %5, (%2, 8) \n" \
  254. " ldw %6, (%2, 12) \n" \
  255. "2: stw %3, (%1, 0) \n" \
  256. "9: stw %4, (%1, 4) \n" \
  257. "10: stw %5, (%1, 8) \n" \
  258. "11: stw %6, (%1, 12) \n" \
  259. " addi %2, 16 \n" \
  260. " addi %1, 16 \n" \
  261. " subi %0, 16 \n" \
  262. " br 1b \n" \
  263. "3: cmplti %0, 4 \n" /* 1W */ \
  264. " bt 5f \n" \
  265. " ldw %3, (%2, 0) \n" \
  266. "4: stw %3, (%1, 0) \n" \
  267. " addi %2, 4 \n" \
  268. " addi %1, 4 \n" \
  269. " subi %0, 4 \n" \
  270. " br 3b \n" \
  271. "5: cmpnei %0, 0 \n" /* 1B */ \
  272. " bf 8f \n" \
  273. " ldb %3, (%2, 0) \n" \
  274. "6: stb %3, (%1, 0) \n" \
  275. " addi %2, 1 \n" \
  276. " addi %1, 1 \n" \
  277. " subi %0, 1 \n" \
  278. " br 5b \n" \
  279. "7: br 8f \n" \
  280. ".section __ex_table, \"a\" \n" \
  281. ".align 2 \n" \
  282. ".long 2b, 7b \n" \
  283. ".long 9b, 7b \n" \
  284. ".long 10b, 7b \n" \
  285. ".long 11b, 7b \n" \
  286. ".long 4b, 7b \n" \
  287. ".long 6b, 7b \n" \
  288. ".previous \n" \
  289. "8: \n" \
  290. : "=r"(n), "=r"(to), "=r"(from), "=r"(w0), \
  291. "=r"(w1), "=r"(w2), "=r"(w3) \
  292. : "0"(n), "1"(to), "2"(from) \
  293. : "memory"); \
  294. } while (0)
  295. #define __copy_user_zeroing(to, from, n) \
  296. do { \
  297. int tmp; \
  298. int nsave; \
  299. asm volatile( \
  300. "0: cmpnei %1, 0 \n" \
  301. " bf 7f \n" \
  302. " mov %3, %1 \n" \
  303. " or %3, %2 \n" \
  304. " andi %3, 3 \n" \
  305. " cmpnei %3, 0 \n" \
  306. " bf 1f \n" \
  307. " br 5f \n" \
  308. "1: cmplti %0, 16 \n" \
  309. " bt 3f \n" \
  310. "2: ldw %3, (%2, 0) \n" \
  311. "10: ldw %4, (%2, 4) \n" \
  312. " stw %3, (%1, 0) \n" \
  313. " stw %4, (%1, 4) \n" \
  314. "11: ldw %3, (%2, 8) \n" \
  315. "12: ldw %4, (%2, 12) \n" \
  316. " stw %3, (%1, 8) \n" \
  317. " stw %4, (%1, 12) \n" \
  318. " addi %2, 16 \n" \
  319. " addi %1, 16 \n" \
  320. " subi %0, 16 \n" \
  321. " br 1b \n" \
  322. "3: cmplti %0, 4 \n" \
  323. " bt 5f \n" \
  324. "4: ldw %3, (%2, 0) \n" \
  325. " stw %3, (%1, 0) \n" \
  326. " addi %2, 4 \n" \
  327. " addi %1, 4 \n" \
  328. " subi %0, 4 \n" \
  329. " br 3b \n" \
  330. "5: cmpnei %0, 0 \n" \
  331. " bf 7f \n" \
  332. "6: ldb %3, (%2, 0) \n" \
  333. " stb %3, (%1, 0) \n" \
  334. " addi %2, 1 \n" \
  335. " addi %1, 1 \n" \
  336. " subi %0, 1 \n" \
  337. " br 5b \n" \
  338. "8: mov %3, %0 \n" \
  339. " movi %4, 0 \n" \
  340. "9: stb %4, (%1, 0) \n" \
  341. " addi %1, 1 \n" \
  342. " subi %3, 1 \n" \
  343. " cmpnei %3, 0 \n" \
  344. " bt 9b \n" \
  345. " br 7f \n" \
  346. ".section __ex_table, \"a\" \n" \
  347. ".align 2 \n" \
  348. ".long 2b, 8b \n" \
  349. ".long 10b, 8b \n" \
  350. ".long 11b, 8b \n" \
  351. ".long 12b, 8b \n" \
  352. ".long 4b, 8b \n" \
  353. ".long 6b, 8b \n" \
  354. ".previous \n" \
  355. "7: \n" \
  356. : "=r"(n), "=r"(to), "=r"(from), "=r"(nsave), \
  357. "=r"(tmp) \
  358. : "0"(n), "1"(to), "2"(from) \
  359. : "memory"); \
  360. } while (0)
  361. unsigned long raw_copy_from_user(void *to, const void *from, unsigned long n);
  362. unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n);
  363. unsigned long clear_user(void *to, unsigned long n);
  364. unsigned long __clear_user(void __user *to, unsigned long n);
  365. long strncpy_from_user(char *dst, const char *src, long count);
  366. long __strncpy_from_user(char *dst, const char *src, long count);
  367. /*
  368. * Return the size of a string (including the ending 0)
  369. *
  370. * Return 0 on exception, a value greater than N if too long
  371. */
  372. long strnlen_user(const char *src, long n);
  373. #define strlen_user(str) strnlen_user(str, 32767)
  374. struct exception_table_entry {
  375. unsigned long insn;
  376. unsigned long nextinsn;
  377. };
  378. extern int fixup_exception(struct pt_regs *regs);
  379. #endif /* __ASM_CSKY_UACCESS_H */