uaccess_64.h 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_UACCESS_H
  3. #define _ASM_UACCESS_H
  4. /*
  5. * User space memory access functions
  6. */
  7. #include <linux/compiler.h>
  8. #include <linux/string.h>
  9. #include <asm/asi.h>
  10. #include <asm/spitfire.h>
  11. #include <asm/extable_64.h>
  12. #include <asm/processor.h>
  13. /*
  14. * Sparc64 is segmented, though more like the M68K than the I386.
  15. * We use the secondary ASI to address user memory, which references a
  16. * completely different VM map, thus there is zero chance of the user
  17. * doing something queer and tricking us into poking kernel memory.
  18. *
  19. * What is left here is basically what is needed for the other parts of
  20. * the kernel that expect to be able to manipulate, erum, "segments".
  21. * Or perhaps more properly, permissions.
  22. *
  23. * "For historical reasons, these macros are grossly misnamed." -Linus
  24. */
  25. #define KERNEL_DS ((mm_segment_t) { ASI_P })
  26. #define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */
  27. #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)})
  28. #define get_ds() (KERNEL_DS)
  29. #define segment_eq(a, b) ((a).seg == (b).seg)
  30. #define set_fs(val) \
  31. do { \
  32. current_thread_info()->current_ds = (val).seg; \
  33. __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \
  34. } while(0)
  35. /*
  36. * Test whether a block of memory is a valid user space address.
  37. * Returns 0 if the range is valid, nonzero otherwise.
  38. */
  39. static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
  40. {
  41. if (__builtin_constant_p(size))
  42. return addr > limit - size;
  43. addr += size;
  44. if (addr < size)
  45. return true;
  46. return addr > limit;
  47. }
  48. #define __range_not_ok(addr, size, limit) \
  49. ({ \
  50. __chk_user_ptr(addr); \
  51. __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
  52. })
  53. static inline int __access_ok(const void __user * addr, unsigned long size)
  54. {
  55. return 1;
  56. }
  57. static inline int access_ok(int type, const void __user * addr, unsigned long size)
  58. {
  59. return 1;
  60. }
  61. void __retl_efault(void);
  62. /* Uh, these should become the main single-value transfer routines..
  63. * They automatically use the right size if we just have the right
  64. * pointer type..
  65. *
  66. * This gets kind of ugly. We want to return _two_ values in "get_user()"
  67. * and yet we don't want to do any pointers, because that is too much
  68. * of a performance impact. Thus we have a few rather ugly macros here,
  69. * and hide all the ugliness from the user.
  70. */
  71. #define put_user(x, ptr) ({ \
  72. unsigned long __pu_addr = (unsigned long)(ptr); \
  73. __chk_user_ptr(ptr); \
  74. __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
  75. })
  76. #define get_user(x, ptr) ({ \
  77. unsigned long __gu_addr = (unsigned long)(ptr); \
  78. __chk_user_ptr(ptr); \
  79. __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
  80. })
  81. #define __put_user(x, ptr) put_user(x, ptr)
  82. #define __get_user(x, ptr) get_user(x, ptr)
  83. struct __large_struct { unsigned long buf[100]; };
  84. #define __m(x) ((struct __large_struct *)(x))
  85. #define __put_user_nocheck(data, addr, size) ({ \
  86. register int __pu_ret; \
  87. switch (size) { \
  88. case 1: __put_user_asm(data, b, addr, __pu_ret); break; \
  89. case 2: __put_user_asm(data, h, addr, __pu_ret); break; \
  90. case 4: __put_user_asm(data, w, addr, __pu_ret); break; \
  91. case 8: __put_user_asm(data, x, addr, __pu_ret); break; \
  92. default: __pu_ret = __put_user_bad(); break; \
  93. } \
  94. __pu_ret; \
  95. })
  96. #define __put_user_asm(x, size, addr, ret) \
  97. __asm__ __volatile__( \
  98. "/* Put user asm, inline. */\n" \
  99. "1:\t" "st"#size "a %1, [%2] %%asi\n\t" \
  100. "clr %0\n" \
  101. "2:\n\n\t" \
  102. ".section .fixup,#alloc,#execinstr\n\t" \
  103. ".align 4\n" \
  104. "3:\n\t" \
  105. "sethi %%hi(2b), %0\n\t" \
  106. "jmpl %0 + %%lo(2b), %%g0\n\t" \
  107. " mov %3, %0\n\n\t" \
  108. ".previous\n\t" \
  109. ".section __ex_table,\"a\"\n\t" \
  110. ".align 4\n\t" \
  111. ".word 1b, 3b\n\t" \
  112. ".previous\n\n\t" \
  113. : "=r" (ret) : "r" (x), "r" (__m(addr)), \
  114. "i" (-EFAULT))
  115. int __put_user_bad(void);
  116. #define __get_user_nocheck(data, addr, size, type) ({ \
  117. register int __gu_ret; \
  118. register unsigned long __gu_val; \
  119. switch (size) { \
  120. case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
  121. case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
  122. case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
  123. case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break; \
  124. default: \
  125. __gu_val = 0; \
  126. __gu_ret = __get_user_bad(); \
  127. break; \
  128. } \
  129. data = (__force type) __gu_val; \
  130. __gu_ret; \
  131. })
  132. #define __get_user_asm(x, size, addr, ret) \
  133. __asm__ __volatile__( \
  134. "/* Get user asm, inline. */\n" \
  135. "1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \
  136. "clr %0\n" \
  137. "2:\n\n\t" \
  138. ".section .fixup,#alloc,#execinstr\n\t" \
  139. ".align 4\n" \
  140. "3:\n\t" \
  141. "sethi %%hi(2b), %0\n\t" \
  142. "clr %1\n\t" \
  143. "jmpl %0 + %%lo(2b), %%g0\n\t" \
  144. " mov %3, %0\n\n\t" \
  145. ".previous\n\t" \
  146. ".section __ex_table,\"a\"\n\t" \
  147. ".align 4\n\t" \
  148. ".word 1b, 3b\n\n\t" \
  149. ".previous\n\t" \
  150. : "=r" (ret), "=r" (x) : "r" (__m(addr)), \
  151. "i" (-EFAULT))
  152. int __get_user_bad(void);
  153. unsigned long __must_check raw_copy_from_user(void *to,
  154. const void __user *from,
  155. unsigned long size);
  156. unsigned long __must_check raw_copy_to_user(void __user *to,
  157. const void *from,
  158. unsigned long size);
  159. #define INLINE_COPY_FROM_USER
  160. #define INLINE_COPY_TO_USER
  161. unsigned long __must_check raw_copy_in_user(void __user *to,
  162. const void __user *from,
  163. unsigned long size);
  164. unsigned long __must_check __clear_user(void __user *, unsigned long);
  165. #define clear_user __clear_user
  166. __must_check long strnlen_user(const char __user *str, long n);
  167. struct pt_regs;
  168. unsigned long compute_effective_address(struct pt_regs *,
  169. unsigned int insn,
  170. unsigned int rd);
  171. #endif /* _ASM_UACCESS_H */