uaccess.h 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. #ifndef __PARISC_UACCESS_H
  2. #define __PARISC_UACCESS_H
  3. /*
  4. * User space memory access functions
  5. */
  6. #include <asm/page.h>
  7. #include <asm/cache.h>
  8. #include <asm/errno.h>
  9. #include <asm-generic/uaccess-unaligned.h>
  10. #include <linux/bug.h>
  11. #include <linux/string.h>
  12. #include <linux/thread_info.h>
  13. #define VERIFY_READ 0
  14. #define VERIFY_WRITE 1
  15. #define KERNEL_DS ((mm_segment_t){0})
  16. #define USER_DS ((mm_segment_t){1})
  17. #define segment_eq(a, b) ((a).seg == (b).seg)
  18. #define get_ds() (KERNEL_DS)
  19. #define get_fs() (current_thread_info()->addr_limit)
  20. #define set_fs(x) (current_thread_info()->addr_limit = (x))
  21. /*
  22. * Note that since kernel addresses are in a separate address space on
  23. * parisc, we don't need to do anything for access_ok().
  24. * We just let the page fault handler do the right thing. This also means
  25. * that put_user is the same as __put_user, etc.
  26. */
  27. #define access_ok(type, uaddr, size) \
  28. ( (uaddr) == (uaddr) )
  29. #define put_user __put_user
  30. #define get_user __get_user
  31. #if !defined(CONFIG_64BIT)
  32. #define LDD_USER(ptr) __get_user_asm64(ptr)
  33. #define STD_USER(x, ptr) __put_user_asm64(x, ptr)
  34. #else
  35. #define LDD_USER(ptr) __get_user_asm("ldd", ptr)
  36. #define STD_USER(x, ptr) __put_user_asm("std", x, ptr)
  37. #endif
  38. /*
  39. * The exception table contains two values: the first is the relative offset to
  40. * the address of the instruction that is allowed to fault, and the second is
  41. * the relative offset to the address of the fixup routine. Since relative
  42. * addresses are used, 32bit values are sufficient even on 64bit kernel.
  43. */
  44. #define ARCH_HAS_RELATIVE_EXTABLE
  45. struct exception_table_entry {
  46. int insn; /* relative address of insn that is allowed to fault. */
  47. int fixup; /* relative address of fixup routine */
  48. };
  49. #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
  50. ".section __ex_table,\"aw\"\n" \
  51. ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
  52. ".previous\n"
  53. /*
  54. * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
  55. * (with lowest bit set) for which the fault handler in fixup_exception() will
  56. * load -EFAULT into %r8 for a read or write fault, and zeroes the target
  57. * register in case of a read fault in get_user().
  58. */
  59. #define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
  60. ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
  61. /*
  62. * The page fault handler stores, in a per-cpu area, the following information
  63. * if a fixup routine is available.
  64. */
  65. struct exception_data {
  66. unsigned long fault_ip;
  67. unsigned long fault_gp;
  68. unsigned long fault_space;
  69. unsigned long fault_addr;
  70. };
  71. /*
  72. * load_sr2() preloads the space register %%sr2 - based on the value of
  73. * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which
  74. * is 0), or with the current value of %%sr3 to access user space (USER_DS)
  75. * memory. The following __get_user_asm() and __put_user_asm() functions have
  76. * %%sr2 hard-coded to access the requested memory.
  77. */
  78. #define load_sr2() \
  79. __asm__(" or,= %0,%%r0,%%r0\n\t" \
  80. " mfsp %%sr3,%0\n\t" \
  81. " mtsp %0,%%sr2\n\t" \
  82. : : "r"(get_fs()) : )
  83. #define __get_user(x, ptr) \
  84. ({ \
  85. register long __gu_err __asm__ ("r8") = 0; \
  86. register long __gu_val; \
  87. \
  88. load_sr2(); \
  89. switch (sizeof(*(ptr))) { \
  90. case 1: __get_user_asm("ldb", ptr); break; \
  91. case 2: __get_user_asm("ldh", ptr); break; \
  92. case 4: __get_user_asm("ldw", ptr); break; \
  93. case 8: LDD_USER(ptr); break; \
  94. default: BUILD_BUG(); break; \
  95. } \
  96. \
  97. (x) = (__force __typeof__(*(ptr))) __gu_val; \
  98. __gu_err; \
  99. })
  100. #define __get_user_asm(ldx, ptr) \
  101. __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
  102. "9:\n" \
  103. ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
  104. : "=r"(__gu_val), "=r"(__gu_err) \
  105. : "r"(ptr), "1"(__gu_err));
  106. #if !defined(CONFIG_64BIT)
  107. #define __get_user_asm64(ptr) \
  108. __asm__(" copy %%r0,%R0\n" \
  109. "1: ldw 0(%%sr2,%2),%0\n" \
  110. "2: ldw 4(%%sr2,%2),%R0\n" \
  111. "9:\n" \
  112. ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
  113. ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
  114. : "=r"(__gu_val), "=r"(__gu_err) \
  115. : "r"(ptr), "1"(__gu_err));
  116. #endif /* !defined(CONFIG_64BIT) */
  117. #define __put_user(x, ptr) \
  118. ({ \
  119. register long __pu_err __asm__ ("r8") = 0; \
  120. __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
  121. \
  122. load_sr2(); \
  123. switch (sizeof(*(ptr))) { \
  124. case 1: __put_user_asm("stb", __x, ptr); break; \
  125. case 2: __put_user_asm("sth", __x, ptr); break; \
  126. case 4: __put_user_asm("stw", __x, ptr); break; \
  127. case 8: STD_USER(__x, ptr); break; \
  128. default: BUILD_BUG(); break; \
  129. } \
  130. \
  131. __pu_err; \
  132. })
  133. /*
  134. * The "__put_user/kernel_asm()" macros tell gcc they read from memory
  135. * instead of writing. This is because they do not write to any memory
  136. * gcc knows about, so there are no aliasing issues. These macros must
  137. * also be aware that fixups are executed in the context of the fault,
  138. * and any registers used there must be listed as clobbers.
  139. * r8 is already listed as err.
  140. */
  141. #define __put_user_asm(stx, x, ptr) \
  142. __asm__ __volatile__ ( \
  143. "1: " stx " %2,0(%%sr2,%1)\n" \
  144. "9:\n" \
  145. ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
  146. : "=r"(__pu_err) \
  147. : "r"(ptr), "r"(x), "0"(__pu_err))
  148. #if !defined(CONFIG_64BIT)
  149. #define __put_user_asm64(__val, ptr) do { \
  150. __asm__ __volatile__ ( \
  151. "1: stw %2,0(%%sr2,%1)\n" \
  152. "2: stw %R2,4(%%sr2,%1)\n" \
  153. "9:\n" \
  154. ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
  155. ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
  156. : "=r"(__pu_err) \
  157. : "r"(ptr), "r"(__val), "0"(__pu_err)); \
  158. } while (0)
  159. #endif /* !defined(CONFIG_64BIT) */
  160. /*
  161. * Complex access routines -- external declarations
  162. */
  163. extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long);
  164. extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long);
  165. extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long);
  166. extern long strncpy_from_user(char *, const char __user *, long);
  167. extern unsigned lclear_user(void __user *, unsigned long);
  168. extern long lstrnlen_user(const char __user *, long);
  169. /*
  170. * Complex access routines -- macros
  171. */
  172. #define user_addr_max() (~0UL)
  173. #define strnlen_user lstrnlen_user
  174. #define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
  175. #define clear_user lclear_user
  176. #define __clear_user lclear_user
  177. unsigned long __must_check __copy_to_user(void __user *dst, const void *src,
  178. unsigned long len);
  179. unsigned long __must_check __copy_from_user(void *dst, const void __user *src,
  180. unsigned long len);
  181. unsigned long copy_in_user(void __user *dst, const void __user *src,
  182. unsigned long len);
  183. #define __copy_in_user copy_in_user
  184. #define __copy_to_user_inatomic __copy_to_user
  185. #define __copy_from_user_inatomic __copy_from_user
  186. extern void __compiletime_error("usercopy buffer size is too small")
  187. __bad_copy_user(void);
  188. static inline void copy_user_overflow(int size, unsigned long count)
  189. {
  190. WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
  191. }
  192. static __always_inline unsigned long __must_check
  193. copy_from_user(void *to, const void __user *from, unsigned long n)
  194. {
  195. int sz = __compiletime_object_size(to);
  196. unsigned long ret = n;
  197. if (likely(sz < 0 || sz >= n)) {
  198. check_object_size(to, n, false);
  199. ret = __copy_from_user(to, from, n);
  200. } else if (!__builtin_constant_p(n))
  201. copy_user_overflow(sz, n);
  202. else
  203. __bad_copy_user();
  204. if (unlikely(ret))
  205. memset(to + (n - ret), 0, ret);
  206. return ret;
  207. }
  208. static __always_inline unsigned long __must_check
  209. copy_to_user(void __user *to, const void *from, unsigned long n)
  210. {
  211. int sz = __compiletime_object_size(from);
  212. if (likely(sz < 0 || sz >= n)) {
  213. check_object_size(from, n, true);
  214. n = __copy_to_user(to, from, n);
  215. } else if (!__builtin_constant_p(n))
  216. copy_user_overflow(sz, n);
  217. else
  218. __bad_copy_user();
  219. return n;
  220. }
  221. struct pt_regs;
  222. int fixup_exception(struct pt_regs *regs);
  223. #endif /* __PARISC_UACCESS_H */