uaccess.h 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. #ifndef __PARISC_UACCESS_H
  2. #define __PARISC_UACCESS_H
  3. /*
  4. * User space memory access functions
  5. */
  6. #include <asm/page.h>
  7. #include <asm/cache.h>
  8. #include <asm/errno.h>
  9. #include <asm-generic/uaccess-unaligned.h>
  10. #include <linux/bug.h>
  11. #include <linux/string.h>
  12. #define VERIFY_READ 0
  13. #define VERIFY_WRITE 1
  14. #define KERNEL_DS ((mm_segment_t){0})
  15. #define USER_DS ((mm_segment_t){1})
  16. #define segment_eq(a, b) ((a).seg == (b).seg)
  17. #define get_ds() (KERNEL_DS)
  18. #define get_fs() (current_thread_info()->addr_limit)
  19. #define set_fs(x) (current_thread_info()->addr_limit = (x))
  20. /*
  21. * Note that since kernel addresses are in a separate address space on
  22. * parisc, we don't need to do anything for access_ok().
  23. * We just let the page fault handler do the right thing. This also means
  24. * that put_user is the same as __put_user, etc.
  25. */
  26. static inline long access_ok(int type, const void __user * addr,
  27. unsigned long size)
  28. {
  29. return 1;
  30. }
  31. #define put_user __put_user
  32. #define get_user __get_user
  33. #if !defined(CONFIG_64BIT)
  34. #define LDD_USER(ptr) __get_user_asm64(ptr)
  35. #define STD_USER(x, ptr) __put_user_asm64(x, ptr)
  36. #else
  37. #define LDD_USER(ptr) __get_user_asm("ldd", ptr)
  38. #define STD_USER(x, ptr) __put_user_asm("std", x, ptr)
  39. #endif
  40. /*
  41. * The exception table contains two values: the first is the relative offset to
  42. * the address of the instruction that is allowed to fault, and the second is
  43. * the relative offset to the address of the fixup routine. Since relative
  44. * addresses are used, 32bit values are sufficient even on 64bit kernel.
  45. */
  46. #define ARCH_HAS_RELATIVE_EXTABLE
  47. struct exception_table_entry {
  48. int insn; /* relative address of insn that is allowed to fault. */
  49. int fixup; /* relative address of fixup routine */
  50. };
  51. #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
  52. ".section __ex_table,\"aw\"\n" \
  53. ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
  54. ".previous\n"
  55. /*
  56. * The page fault handler stores, in a per-cpu area, the following information
  57. * if a fixup routine is available.
  58. */
  59. struct exception_data {
  60. unsigned long fault_ip;
  61. unsigned long fault_gp;
  62. unsigned long fault_space;
  63. unsigned long fault_addr;
  64. };
  65. /*
  66. * load_sr2() preloads the space register %%sr2 - based on the value of
  67. * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which
  68. * is 0), or with the current value of %%sr3 to access user space (USER_DS)
  69. * memory. The following __get_user_asm() and __put_user_asm() functions have
  70. * %%sr2 hard-coded to access the requested memory.
  71. */
  72. #define load_sr2() \
  73. __asm__(" or,= %0,%%r0,%%r0\n\t" \
  74. " mfsp %%sr3,%0\n\t" \
  75. " mtsp %0,%%sr2\n\t" \
  76. : : "r"(get_fs()) : )
  77. #define __get_user(x, ptr) \
  78. ({ \
  79. register long __gu_err __asm__ ("r8") = 0; \
  80. register long __gu_val __asm__ ("r9") = 0; \
  81. \
  82. load_sr2(); \
  83. switch (sizeof(*(ptr))) { \
  84. case 1: __get_user_asm("ldb", ptr); break; \
  85. case 2: __get_user_asm("ldh", ptr); break; \
  86. case 4: __get_user_asm("ldw", ptr); break; \
  87. case 8: LDD_USER(ptr); break; \
  88. default: BUILD_BUG(); break; \
  89. } \
  90. \
  91. (x) = (__force __typeof__(*(ptr))) __gu_val; \
  92. __gu_err; \
  93. })
  94. #define __get_user_asm(ldx, ptr) \
  95. __asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t" \
  96. ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
  97. : "=r"(__gu_val), "=r"(__gu_err) \
  98. : "r"(ptr), "1"(__gu_err) \
  99. : "r1");
  100. #if !defined(CONFIG_64BIT)
  101. #define __get_user_asm64(ptr) \
  102. __asm__("\n1:\tldw 0(%%sr2,%2),%0" \
  103. "\n2:\tldw 4(%%sr2,%2),%R0\n\t" \
  104. ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\
  105. ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\
  106. : "=r"(__gu_val), "=r"(__gu_err) \
  107. : "r"(ptr), "1"(__gu_err) \
  108. : "r1");
  109. #endif /* !defined(CONFIG_64BIT) */
  110. #define __put_user(x, ptr) \
  111. ({ \
  112. register long __pu_err __asm__ ("r8") = 0; \
  113. __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
  114. \
  115. load_sr2(); \
  116. switch (sizeof(*(ptr))) { \
  117. case 1: __put_user_asm("stb", __x, ptr); break; \
  118. case 2: __put_user_asm("sth", __x, ptr); break; \
  119. case 4: __put_user_asm("stw", __x, ptr); break; \
  120. case 8: STD_USER(__x, ptr); break; \
  121. default: BUILD_BUG(); break; \
  122. } \
  123. \
  124. __pu_err; \
  125. })
  126. /*
  127. * The "__put_user/kernel_asm()" macros tell gcc they read from memory
  128. * instead of writing. This is because they do not write to any memory
  129. * gcc knows about, so there are no aliasing issues. These macros must
  130. * also be aware that "fixup_put_user_skip_[12]" are executed in the
  131. * context of the fault, and any registers used there must be listed
  132. * as clobbers. In this case only "r1" is used by the current routines.
  133. * r8/r9 are already listed as err/val.
  134. */
  135. #define __put_user_asm(stx, x, ptr) \
  136. __asm__ __volatile__ ( \
  137. "\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t" \
  138. ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\
  139. : "=r"(__pu_err) \
  140. : "r"(ptr), "r"(x), "0"(__pu_err) \
  141. : "r1")
  142. #if !defined(CONFIG_64BIT)
  143. #define __put_user_asm64(__val, ptr) do { \
  144. __asm__ __volatile__ ( \
  145. "\n1:\tstw %2,0(%%sr2,%1)" \
  146. "\n2:\tstw %R2,4(%%sr2,%1)\n\t" \
  147. ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\
  148. ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\
  149. : "=r"(__pu_err) \
  150. : "r"(ptr), "r"(__val), "0"(__pu_err) \
  151. : "r1"); \
  152. } while (0)
  153. #endif /* !defined(CONFIG_64BIT) */
  154. /*
  155. * Complex access routines -- external declarations
  156. */
  157. extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long);
  158. extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long);
  159. extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long);
  160. extern long strncpy_from_user(char *, const char __user *, long);
  161. extern unsigned lclear_user(void __user *, unsigned long);
  162. extern long lstrnlen_user(const char __user *, long);
  163. /*
  164. * Complex access routines -- macros
  165. */
  166. #define user_addr_max() (~0UL)
  167. #define strnlen_user lstrnlen_user
  168. #define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
  169. #define clear_user lclear_user
  170. #define __clear_user lclear_user
  171. unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len);
  172. #define __copy_to_user copy_to_user
  173. unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long len);
  174. unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len);
  175. #define __copy_in_user copy_in_user
  176. #define __copy_to_user_inatomic __copy_to_user
  177. #define __copy_from_user_inatomic __copy_from_user
  178. extern void __compiletime_error("usercopy buffer size is too small")
  179. __bad_copy_user(void);
  180. static inline void copy_user_overflow(int size, unsigned long count)
  181. {
  182. WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
  183. }
  184. static inline unsigned long __must_check copy_from_user(void *to,
  185. const void __user *from,
  186. unsigned long n)
  187. {
  188. int sz = __compiletime_object_size(to);
  189. unsigned long ret = n;
  190. if (likely(sz == -1 || sz >= n))
  191. ret = __copy_from_user(to, from, n);
  192. else if (!__builtin_constant_p(n))
  193. copy_user_overflow(sz, n);
  194. else
  195. __bad_copy_user();
  196. if (unlikely(ret))
  197. memset(to + (n - ret), 0, ret);
  198. return ret;
  199. }
  200. struct pt_regs;
  201. int fixup_exception(struct pt_regs *regs);
  202. #endif /* __PARISC_UACCESS_H */