uaccess.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406
  1. /*
  2. * Standard user space access functions based on mvcp/mvcs and doing
  3. * interesting things in the secondary space mode.
  4. *
  5. * Copyright IBM Corp. 2006,2014
  6. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  7. * Gerald Schaefer (gerald.schaefer@de.ibm.com)
  8. */
  9. #include <linux/jump_label.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/export.h>
  12. #include <linux/errno.h>
  13. #include <linux/mm.h>
  14. #include <asm/mmu_context.h>
  15. #include <asm/facility.h>
  16. #ifndef CONFIG_64BIT
  17. #define AHI "ahi"
  18. #define ALR "alr"
  19. #define CLR "clr"
  20. #define LHI "lhi"
  21. #define SLR "slr"
  22. #else
  23. #define AHI "aghi"
  24. #define ALR "algr"
  25. #define CLR "clgr"
  26. #define LHI "lghi"
  27. #define SLR "slgr"
  28. #endif
  29. static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE;
  30. static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
  31. unsigned long size)
  32. {
  33. register unsigned long reg0 asm("0") = 0x81UL;
  34. unsigned long tmp1, tmp2;
  35. tmp1 = -4096UL;
  36. asm volatile(
  37. "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
  38. "9: jz 7f\n"
  39. "1:"ALR" %0,%3\n"
  40. " "SLR" %1,%3\n"
  41. " "SLR" %2,%3\n"
  42. " j 0b\n"
  43. "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
  44. " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
  45. " "SLR" %4,%1\n"
  46. " "CLR" %0,%4\n" /* copy crosses next page boundary? */
  47. " jnh 4f\n"
  48. "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
  49. "10:"SLR" %0,%4\n"
  50. " "ALR" %2,%4\n"
  51. "4:"LHI" %4,-1\n"
  52. " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
  53. " bras %3,6f\n" /* memset loop */
  54. " xc 0(1,%2),0(%2)\n"
  55. "5: xc 0(256,%2),0(%2)\n"
  56. " la %2,256(%2)\n"
  57. "6:"AHI" %4,-256\n"
  58. " jnm 5b\n"
  59. " ex %4,0(%3)\n"
  60. " j 8f\n"
  61. "7:"SLR" %0,%0\n"
  62. "8:\n"
  63. EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b)
  64. : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
  65. : "d" (reg0) : "cc", "memory");
  66. return size;
  67. }
  68. static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
  69. unsigned long size)
  70. {
  71. unsigned long tmp1, tmp2;
  72. load_kernel_asce();
  73. tmp1 = -256UL;
  74. asm volatile(
  75. " sacf 0\n"
  76. "0: mvcp 0(%0,%2),0(%1),%3\n"
  77. "10:jz 8f\n"
  78. "1:"ALR" %0,%3\n"
  79. " la %1,256(%1)\n"
  80. " la %2,256(%2)\n"
  81. "2: mvcp 0(%0,%2),0(%1),%3\n"
  82. "11:jnz 1b\n"
  83. " j 8f\n"
  84. "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
  85. " "LHI" %3,-4096\n"
  86. " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
  87. " "SLR" %4,%1\n"
  88. " "CLR" %0,%4\n" /* copy crosses next page boundary? */
  89. " jnh 5f\n"
  90. "4: mvcp 0(%4,%2),0(%1),%3\n"
  91. "12:"SLR" %0,%4\n"
  92. " "ALR" %2,%4\n"
  93. "5:"LHI" %4,-1\n"
  94. " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
  95. " bras %3,7f\n" /* memset loop */
  96. " xc 0(1,%2),0(%2)\n"
  97. "6: xc 0(256,%2),0(%2)\n"
  98. " la %2,256(%2)\n"
  99. "7:"AHI" %4,-256\n"
  100. " jnm 6b\n"
  101. " ex %4,0(%3)\n"
  102. " j 9f\n"
  103. "8:"SLR" %0,%0\n"
  104. "9: sacf 768\n"
  105. EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b)
  106. EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b)
  107. : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
  108. : : "cc", "memory");
  109. return size;
  110. }
  111. unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
  112. {
  113. if (static_key_false(&have_mvcos))
  114. return copy_from_user_mvcos(to, from, n);
  115. return copy_from_user_mvcp(to, from, n);
  116. }
  117. EXPORT_SYMBOL(__copy_from_user);
  118. static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
  119. unsigned long size)
  120. {
  121. register unsigned long reg0 asm("0") = 0x810000UL;
  122. unsigned long tmp1, tmp2;
  123. tmp1 = -4096UL;
  124. asm volatile(
  125. "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
  126. "6: jz 4f\n"
  127. "1:"ALR" %0,%3\n"
  128. " "SLR" %1,%3\n"
  129. " "SLR" %2,%3\n"
  130. " j 0b\n"
  131. "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
  132. " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
  133. " "SLR" %4,%1\n"
  134. " "CLR" %0,%4\n" /* copy crosses next page boundary? */
  135. " jnh 5f\n"
  136. "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
  137. "7:"SLR" %0,%4\n"
  138. " j 5f\n"
  139. "4:"SLR" %0,%0\n"
  140. "5:\n"
  141. EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
  142. : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
  143. : "d" (reg0) : "cc", "memory");
  144. return size;
  145. }
  146. static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
  147. unsigned long size)
  148. {
  149. unsigned long tmp1, tmp2;
  150. load_kernel_asce();
  151. tmp1 = -256UL;
  152. asm volatile(
  153. " sacf 0\n"
  154. "0: mvcs 0(%0,%1),0(%2),%3\n"
  155. "7: jz 5f\n"
  156. "1:"ALR" %0,%3\n"
  157. " la %1,256(%1)\n"
  158. " la %2,256(%2)\n"
  159. "2: mvcs 0(%0,%1),0(%2),%3\n"
  160. "8: jnz 1b\n"
  161. " j 5f\n"
  162. "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
  163. " "LHI" %3,-4096\n"
  164. " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
  165. " "SLR" %4,%1\n"
  166. " "CLR" %0,%4\n" /* copy crosses next page boundary? */
  167. " jnh 6f\n"
  168. "4: mvcs 0(%4,%1),0(%2),%3\n"
  169. "9:"SLR" %0,%4\n"
  170. " j 6f\n"
  171. "5:"SLR" %0,%0\n"
  172. "6: sacf 768\n"
  173. EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
  174. EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
  175. : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
  176. : : "cc", "memory");
  177. return size;
  178. }
  179. unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
  180. {
  181. if (static_key_false(&have_mvcos))
  182. return copy_to_user_mvcos(to, from, n);
  183. return copy_to_user_mvcs(to, from, n);
  184. }
  185. EXPORT_SYMBOL(__copy_to_user);
  186. static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
  187. unsigned long size)
  188. {
  189. register unsigned long reg0 asm("0") = 0x810081UL;
  190. unsigned long tmp1, tmp2;
  191. tmp1 = -4096UL;
  192. /* FIXME: copy with reduced length. */
  193. asm volatile(
  194. "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
  195. " jz 2f\n"
  196. "1:"ALR" %0,%3\n"
  197. " "SLR" %1,%3\n"
  198. " "SLR" %2,%3\n"
  199. " j 0b\n"
  200. "2:"SLR" %0,%0\n"
  201. "3: \n"
  202. EX_TABLE(0b,3b)
  203. : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
  204. : "d" (reg0) : "cc", "memory");
  205. return size;
  206. }
  207. static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from,
  208. unsigned long size)
  209. {
  210. unsigned long tmp1;
  211. load_kernel_asce();
  212. asm volatile(
  213. " sacf 256\n"
  214. " "AHI" %0,-1\n"
  215. " jo 5f\n"
  216. " bras %3,3f\n"
  217. "0:"AHI" %0,257\n"
  218. "1: mvc 0(1,%1),0(%2)\n"
  219. " la %1,1(%1)\n"
  220. " la %2,1(%2)\n"
  221. " "AHI" %0,-1\n"
  222. " jnz 1b\n"
  223. " j 5f\n"
  224. "2: mvc 0(256,%1),0(%2)\n"
  225. " la %1,256(%1)\n"
  226. " la %2,256(%2)\n"
  227. "3:"AHI" %0,-256\n"
  228. " jnm 2b\n"
  229. "4: ex %0,1b-0b(%3)\n"
  230. "5: "SLR" %0,%0\n"
  231. "6: sacf 768\n"
  232. EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
  233. : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
  234. : : "cc", "memory");
  235. return size;
  236. }
  237. unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n)
  238. {
  239. if (static_key_false(&have_mvcos))
  240. return copy_in_user_mvcos(to, from, n);
  241. return copy_in_user_mvc(to, from, n);
  242. }
  243. EXPORT_SYMBOL(__copy_in_user);
  244. static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
  245. {
  246. register unsigned long reg0 asm("0") = 0x810000UL;
  247. unsigned long tmp1, tmp2;
  248. tmp1 = -4096UL;
  249. asm volatile(
  250. "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
  251. " jz 4f\n"
  252. "1:"ALR" %0,%2\n"
  253. " "SLR" %1,%2\n"
  254. " j 0b\n"
  255. "2: la %3,4095(%1)\n"/* %4 = to + 4095 */
  256. " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */
  257. " "SLR" %3,%1\n"
  258. " "CLR" %0,%3\n" /* copy crosses next page boundary? */
  259. " jnh 5f\n"
  260. "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
  261. " "SLR" %0,%3\n"
  262. " j 5f\n"
  263. "4:"SLR" %0,%0\n"
  264. "5:\n"
  265. EX_TABLE(0b,2b) EX_TABLE(3b,5b)
  266. : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
  267. : "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
  268. return size;
  269. }
  270. static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
  271. {
  272. unsigned long tmp1, tmp2;
  273. load_kernel_asce();
  274. asm volatile(
  275. " sacf 256\n"
  276. " "AHI" %0,-1\n"
  277. " jo 5f\n"
  278. " bras %3,3f\n"
  279. " xc 0(1,%1),0(%1)\n"
  280. "0:"AHI" %0,257\n"
  281. " la %2,255(%1)\n" /* %2 = ptr + 255 */
  282. " srl %2,12\n"
  283. " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
  284. " "SLR" %2,%1\n"
  285. " "CLR" %0,%2\n" /* clear crosses next page boundary? */
  286. " jnh 5f\n"
  287. " "AHI" %2,-1\n"
  288. "1: ex %2,0(%3)\n"
  289. " "AHI" %2,1\n"
  290. " "SLR" %0,%2\n"
  291. " j 5f\n"
  292. "2: xc 0(256,%1),0(%1)\n"
  293. " la %1,256(%1)\n"
  294. "3:"AHI" %0,-256\n"
  295. " jnm 2b\n"
  296. "4: ex %0,0(%3)\n"
  297. "5: "SLR" %0,%0\n"
  298. "6: sacf 768\n"
  299. EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
  300. : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
  301. : : "cc", "memory");
  302. return size;
  303. }
  304. unsigned long __clear_user(void __user *to, unsigned long size)
  305. {
  306. if (static_key_false(&have_mvcos))
  307. return clear_user_mvcos(to, size);
  308. return clear_user_xc(to, size);
  309. }
  310. EXPORT_SYMBOL(__clear_user);
  311. static inline unsigned long strnlen_user_srst(const char __user *src,
  312. unsigned long size)
  313. {
  314. register unsigned long reg0 asm("0") = 0;
  315. unsigned long tmp1, tmp2;
  316. asm volatile(
  317. " la %2,0(%1)\n"
  318. " la %3,0(%0,%1)\n"
  319. " "SLR" %0,%0\n"
  320. " sacf 256\n"
  321. "0: srst %3,%2\n"
  322. " jo 0b\n"
  323. " la %0,1(%3)\n" /* strnlen_user results includes \0 */
  324. " "SLR" %0,%1\n"
  325. "1: sacf 768\n"
  326. EX_TABLE(0b,1b)
  327. : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
  328. : "d" (reg0) : "cc", "memory");
  329. return size;
  330. }
  331. unsigned long __strnlen_user(const char __user *src, unsigned long size)
  332. {
  333. if (unlikely(!size))
  334. return 0;
  335. load_kernel_asce();
  336. return strnlen_user_srst(src, size);
  337. }
  338. EXPORT_SYMBOL(__strnlen_user);
  339. long __strncpy_from_user(char *dst, const char __user *src, long size)
  340. {
  341. size_t done, len, offset, len_str;
  342. if (unlikely(size <= 0))
  343. return 0;
  344. done = 0;
  345. do {
  346. offset = (size_t)src & ~PAGE_MASK;
  347. len = min(size - done, PAGE_SIZE - offset);
  348. if (copy_from_user(dst, src, len))
  349. return -EFAULT;
  350. len_str = strnlen(dst, len);
  351. done += len_str;
  352. src += len_str;
  353. dst += len_str;
  354. } while ((len_str == len) && (done < size));
  355. return done;
  356. }
  357. EXPORT_SYMBOL(__strncpy_from_user);
  358. /*
  359. * The "old" uaccess variant without mvcos can be enforced with the
  360. * uaccess_primary kernel parameter. This is mainly for debugging purposes.
  361. */
  362. static int uaccess_primary __initdata;
  363. static int __init parse_uaccess_pt(char *__unused)
  364. {
  365. uaccess_primary = 1;
  366. return 0;
  367. }
  368. early_param("uaccess_primary", parse_uaccess_pt);
  369. static int __init uaccess_init(void)
  370. {
  371. if (IS_ENABLED(CONFIG_64BIT) && !uaccess_primary && test_facility(27))
  372. static_key_slow_inc(&have_mvcos);
  373. return 0;
  374. }
  375. early_initcall(uaccess_init);