ptrace.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351
  1. /*
  2. * Ptrace user space interface.
  3. *
  4. * Copyright IBM Corp. 1999, 2010
  5. * Author(s): Denis Joseph Barrow
  6. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/sched.h>
  10. #include <linux/mm.h>
  11. #include <linux/smp.h>
  12. #include <linux/errno.h>
  13. #include <linux/ptrace.h>
  14. #include <linux/user.h>
  15. #include <linux/security.h>
  16. #include <linux/audit.h>
  17. #include <linux/signal.h>
  18. #include <linux/elf.h>
  19. #include <linux/regset.h>
  20. #include <linux/tracehook.h>
  21. #include <linux/seccomp.h>
  22. #include <linux/compat.h>
  23. #include <trace/syscall.h>
  24. #include <asm/segment.h>
  25. #include <asm/page.h>
  26. #include <asm/pgtable.h>
  27. #include <asm/pgalloc.h>
  28. #include <asm/uaccess.h>
  29. #include <asm/unistd.h>
  30. #include <asm/switch_to.h>
  31. #include "entry.h"
  32. #ifdef CONFIG_COMPAT
  33. #include "compat_ptrace.h"
  34. #endif
  35. #define CREATE_TRACE_POINTS
  36. #include <trace/events/syscalls.h>
  37. enum s390_regset {
  38. REGSET_GENERAL,
  39. REGSET_FP,
  40. REGSET_LAST_BREAK,
  41. REGSET_TDB,
  42. REGSET_SYSTEM_CALL,
  43. REGSET_GENERAL_EXTENDED,
  44. };
  45. void update_cr_regs(struct task_struct *task)
  46. {
  47. struct pt_regs *regs = task_pt_regs(task);
  48. struct thread_struct *thread = &task->thread;
  49. struct per_regs old, new;
  50. #ifdef CONFIG_64BIT
  51. /* Take care of the enable/disable of transactional execution. */
  52. if (MACHINE_HAS_TE) {
  53. unsigned long cr, cr_new;
  54. __ctl_store(cr, 0, 0);
  55. /* Set or clear transaction execution TXC bit 8. */
  56. cr_new = cr | (1UL << 55);
  57. if (task->thread.per_flags & PER_FLAG_NO_TE)
  58. cr_new &= ~(1UL << 55);
  59. if (cr_new != cr)
  60. __ctl_load(cr, 0, 0);
  61. /* Set or clear transaction execution TDC bits 62 and 63. */
  62. __ctl_store(cr, 2, 2);
  63. cr_new = cr & ~3UL;
  64. if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
  65. if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
  66. cr_new |= 1UL;
  67. else
  68. cr_new |= 2UL;
  69. }
  70. if (cr_new != cr)
  71. __ctl_load(cr_new, 2, 2);
  72. }
  73. #endif
  74. /* Copy user specified PER registers */
  75. new.control = thread->per_user.control;
  76. new.start = thread->per_user.start;
  77. new.end = thread->per_user.end;
  78. /* merge TIF_SINGLE_STEP into user specified PER registers. */
  79. if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) {
  80. new.control |= PER_EVENT_IFETCH;
  81. #ifdef CONFIG_64BIT
  82. new.control |= PER_CONTROL_SUSPENSION;
  83. new.control |= PER_EVENT_TRANSACTION_END;
  84. #endif
  85. new.start = 0;
  86. new.end = PSW_ADDR_INSN;
  87. }
  88. /* Take care of the PER enablement bit in the PSW. */
  89. if (!(new.control & PER_EVENT_MASK)) {
  90. regs->psw.mask &= ~PSW_MASK_PER;
  91. return;
  92. }
  93. regs->psw.mask |= PSW_MASK_PER;
  94. __ctl_store(old, 9, 11);
  95. if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
  96. __ctl_load(new, 9, 11);
  97. }
  98. void user_enable_single_step(struct task_struct *task)
  99. {
  100. set_tsk_thread_flag(task, TIF_SINGLE_STEP);
  101. }
  102. void user_disable_single_step(struct task_struct *task)
  103. {
  104. clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
  105. }
  106. /*
  107. * Called by kernel/ptrace.c when detaching..
  108. *
  109. * Clear all debugging related fields.
  110. */
  111. void ptrace_disable(struct task_struct *task)
  112. {
  113. memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
  114. memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
  115. clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
  116. clear_tsk_thread_flag(task, TIF_PER_TRAP);
  117. task->thread.per_flags = 0;
  118. }
  119. #ifndef CONFIG_64BIT
  120. # define __ADDR_MASK 3
  121. #else
  122. # define __ADDR_MASK 7
  123. #endif
  124. static inline unsigned long __peek_user_per(struct task_struct *child,
  125. addr_t addr)
  126. {
  127. struct per_struct_kernel *dummy = NULL;
  128. if (addr == (addr_t) &dummy->cr9)
  129. /* Control bits of the active per set. */
  130. return test_thread_flag(TIF_SINGLE_STEP) ?
  131. PER_EVENT_IFETCH : child->thread.per_user.control;
  132. else if (addr == (addr_t) &dummy->cr10)
  133. /* Start address of the active per set. */
  134. return test_thread_flag(TIF_SINGLE_STEP) ?
  135. 0 : child->thread.per_user.start;
  136. else if (addr == (addr_t) &dummy->cr11)
  137. /* End address of the active per set. */
  138. return test_thread_flag(TIF_SINGLE_STEP) ?
  139. PSW_ADDR_INSN : child->thread.per_user.end;
  140. else if (addr == (addr_t) &dummy->bits)
  141. /* Single-step bit. */
  142. return test_thread_flag(TIF_SINGLE_STEP) ?
  143. (1UL << (BITS_PER_LONG - 1)) : 0;
  144. else if (addr == (addr_t) &dummy->starting_addr)
  145. /* Start address of the user specified per set. */
  146. return child->thread.per_user.start;
  147. else if (addr == (addr_t) &dummy->ending_addr)
  148. /* End address of the user specified per set. */
  149. return child->thread.per_user.end;
  150. else if (addr == (addr_t) &dummy->perc_atmid)
  151. /* PER code, ATMID and AI of the last PER trap */
  152. return (unsigned long)
  153. child->thread.per_event.cause << (BITS_PER_LONG - 16);
  154. else if (addr == (addr_t) &dummy->address)
  155. /* Address of the last PER trap */
  156. return child->thread.per_event.address;
  157. else if (addr == (addr_t) &dummy->access_id)
  158. /* Access id of the last PER trap */
  159. return (unsigned long)
  160. child->thread.per_event.paid << (BITS_PER_LONG - 8);
  161. return 0;
  162. }
  163. /*
  164. * Read the word at offset addr from the user area of a process. The
  165. * trouble here is that the information is littered over different
  166. * locations. The process registers are found on the kernel stack,
  167. * the floating point stuff and the trace settings are stored in
  168. * the task structure. In addition the different structures in
  169. * struct user contain pad bytes that should be read as zeroes.
  170. * Lovely...
  171. */
  172. static unsigned long __peek_user(struct task_struct *child, addr_t addr)
  173. {
  174. struct user *dummy = NULL;
  175. addr_t offset, tmp;
  176. if (addr < (addr_t) &dummy->regs.acrs) {
  177. /*
  178. * psw and gprs are stored on the stack
  179. */
  180. tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
  181. if (addr == (addr_t) &dummy->regs.psw.mask) {
  182. /* Return a clean psw mask. */
  183. tmp &= PSW_MASK_USER | PSW_MASK_RI;
  184. tmp |= PSW_USER_BITS;
  185. }
  186. } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
  187. /*
  188. * access registers are stored in the thread structure
  189. */
  190. offset = addr - (addr_t) &dummy->regs.acrs;
  191. #ifdef CONFIG_64BIT
  192. /*
  193. * Very special case: old & broken 64 bit gdb reading
  194. * from acrs[15]. Result is a 64 bit value. Read the
  195. * 32 bit acrs[15] value and shift it by 32. Sick...
  196. */
  197. if (addr == (addr_t) &dummy->regs.acrs[15])
  198. tmp = ((unsigned long) child->thread.acrs[15]) << 32;
  199. else
  200. #endif
  201. tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
  202. } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
  203. /*
  204. * orig_gpr2 is stored on the kernel stack
  205. */
  206. tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
  207. } else if (addr < (addr_t) &dummy->regs.fp_regs) {
  208. /*
  209. * prevent reads of padding hole between
  210. * orig_gpr2 and fp_regs on s390.
  211. */
  212. tmp = 0;
  213. } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
  214. /*
  215. * floating point regs. are stored in the thread structure
  216. */
  217. offset = addr - (addr_t) &dummy->regs.fp_regs;
  218. tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
  219. if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
  220. tmp <<= BITS_PER_LONG - 32;
  221. } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
  222. /*
  223. * Handle access to the per_info structure.
  224. */
  225. addr -= (addr_t) &dummy->regs.per_info;
  226. tmp = __peek_user_per(child, addr);
  227. } else
  228. tmp = 0;
  229. return tmp;
  230. }
  231. static int
  232. peek_user(struct task_struct *child, addr_t addr, addr_t data)
  233. {
  234. addr_t tmp, mask;
  235. /*
  236. * Stupid gdb peeks/pokes the access registers in 64 bit with
  237. * an alignment of 4. Programmers from hell...
  238. */
  239. mask = __ADDR_MASK;
  240. #ifdef CONFIG_64BIT
  241. if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
  242. addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
  243. mask = 3;
  244. #endif
  245. if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
  246. return -EIO;
  247. tmp = __peek_user(child, addr);
  248. return put_user(tmp, (addr_t __user *) data);
  249. }
  250. static inline void __poke_user_per(struct task_struct *child,
  251. addr_t addr, addr_t data)
  252. {
  253. struct per_struct_kernel *dummy = NULL;
  254. /*
  255. * There are only three fields in the per_info struct that the
  256. * debugger user can write to.
  257. * 1) cr9: the debugger wants to set a new PER event mask
  258. * 2) starting_addr: the debugger wants to set a new starting
  259. * address to use with the PER event mask.
  260. * 3) ending_addr: the debugger wants to set a new ending
  261. * address to use with the PER event mask.
  262. * The user specified PER event mask and the start and end
  263. * addresses are used only if single stepping is not in effect.
  264. * Writes to any other field in per_info are ignored.
  265. */
  266. if (addr == (addr_t) &dummy->cr9)
  267. /* PER event mask of the user specified per set. */
  268. child->thread.per_user.control =
  269. data & (PER_EVENT_MASK | PER_CONTROL_MASK);
  270. else if (addr == (addr_t) &dummy->starting_addr)
  271. /* Starting address of the user specified per set. */
  272. child->thread.per_user.start = data;
  273. else if (addr == (addr_t) &dummy->ending_addr)
  274. /* Ending address of the user specified per set. */
  275. child->thread.per_user.end = data;
  276. }
  277. /*
  278. * Write a word to the user area of a process at location addr. This
  279. * operation does have an additional problem compared to peek_user.
  280. * Stores to the program status word and on the floating point
  281. * control register needs to get checked for validity.
  282. */
  283. static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
  284. {
  285. struct user *dummy = NULL;
  286. addr_t offset;
  287. if (addr < (addr_t) &dummy->regs.acrs) {
  288. /*
  289. * psw and gprs are stored on the stack
  290. */
  291. if (addr == (addr_t) &dummy->regs.psw.mask) {
  292. unsigned long mask = PSW_MASK_USER;
  293. mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
  294. if ((data & ~mask) != PSW_USER_BITS)
  295. return -EINVAL;
  296. if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
  297. return -EINVAL;
  298. }
  299. *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
  300. } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
  301. /*
  302. * access registers are stored in the thread structure
  303. */
  304. offset = addr - (addr_t) &dummy->regs.acrs;
  305. #ifdef CONFIG_64BIT
  306. /*
  307. * Very special case: old & broken 64 bit gdb writing
  308. * to acrs[15] with a 64 bit value. Ignore the lower
  309. * half of the value and write the upper 32 bit to
  310. * acrs[15]. Sick...
  311. */
  312. if (addr == (addr_t) &dummy->regs.acrs[15])
  313. child->thread.acrs[15] = (unsigned int) (data >> 32);
  314. else
  315. #endif
  316. *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
  317. } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
  318. /*
  319. * orig_gpr2 is stored on the kernel stack
  320. */
  321. task_pt_regs(child)->orig_gpr2 = data;
  322. } else if (addr < (addr_t) &dummy->regs.fp_regs) {
  323. /*
  324. * prevent writes of padding hole between
  325. * orig_gpr2 and fp_regs on s390.
  326. */
  327. return 0;
  328. } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
  329. /*
  330. * floating point regs. are stored in the thread structure
  331. */
  332. if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
  333. if ((unsigned int) data != 0 ||
  334. test_fp_ctl(data >> (BITS_PER_LONG - 32)))
  335. return -EINVAL;
  336. offset = addr - (addr_t) &dummy->regs.fp_regs;
  337. *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
  338. } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
  339. /*
  340. * Handle access to the per_info structure.
  341. */
  342. addr -= (addr_t) &dummy->regs.per_info;
  343. __poke_user_per(child, addr, data);
  344. }
  345. return 0;
  346. }
  347. static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
  348. {
  349. addr_t mask;
  350. /*
  351. * Stupid gdb peeks/pokes the access registers in 64 bit with
  352. * an alignment of 4. Programmers from hell indeed...
  353. */
  354. mask = __ADDR_MASK;
  355. #ifdef CONFIG_64BIT
  356. if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
  357. addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
  358. mask = 3;
  359. #endif
  360. if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
  361. return -EIO;
  362. return __poke_user(child, addr, data);
  363. }
  364. long arch_ptrace(struct task_struct *child, long request,
  365. unsigned long addr, unsigned long data)
  366. {
  367. ptrace_area parea;
  368. int copied, ret;
  369. switch (request) {
  370. case PTRACE_PEEKUSR:
  371. /* read the word at location addr in the USER area. */
  372. return peek_user(child, addr, data);
  373. case PTRACE_POKEUSR:
  374. /* write the word at location addr in the USER area */
  375. return poke_user(child, addr, data);
  376. case PTRACE_PEEKUSR_AREA:
  377. case PTRACE_POKEUSR_AREA:
  378. if (copy_from_user(&parea, (void __force __user *) addr,
  379. sizeof(parea)))
  380. return -EFAULT;
  381. addr = parea.kernel_addr;
  382. data = parea.process_addr;
  383. copied = 0;
  384. while (copied < parea.len) {
  385. if (request == PTRACE_PEEKUSR_AREA)
  386. ret = peek_user(child, addr, data);
  387. else {
  388. addr_t utmp;
  389. if (get_user(utmp,
  390. (addr_t __force __user *) data))
  391. return -EFAULT;
  392. ret = poke_user(child, addr, utmp);
  393. }
  394. if (ret)
  395. return ret;
  396. addr += sizeof(unsigned long);
  397. data += sizeof(unsigned long);
  398. copied += sizeof(unsigned long);
  399. }
  400. return 0;
  401. case PTRACE_GET_LAST_BREAK:
  402. put_user(task_thread_info(child)->last_break,
  403. (unsigned long __user *) data);
  404. return 0;
  405. case PTRACE_ENABLE_TE:
  406. if (!MACHINE_HAS_TE)
  407. return -EIO;
  408. child->thread.per_flags &= ~PER_FLAG_NO_TE;
  409. return 0;
  410. case PTRACE_DISABLE_TE:
  411. if (!MACHINE_HAS_TE)
  412. return -EIO;
  413. child->thread.per_flags |= PER_FLAG_NO_TE;
  414. child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
  415. return 0;
  416. case PTRACE_TE_ABORT_RAND:
  417. if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
  418. return -EIO;
  419. switch (data) {
  420. case 0UL:
  421. child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
  422. break;
  423. case 1UL:
  424. child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
  425. child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
  426. break;
  427. case 2UL:
  428. child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
  429. child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
  430. break;
  431. default:
  432. return -EINVAL;
  433. }
  434. return 0;
  435. default:
  436. /* Removing high order bit from addr (only for 31 bit). */
  437. addr &= PSW_ADDR_INSN;
  438. return ptrace_request(child, request, addr, data);
  439. }
  440. }
  441. #ifdef CONFIG_COMPAT
  442. /*
  443. * Now the fun part starts... a 31 bit program running in the
  444. * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
  445. * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
  446. * to handle, the difference to the 64 bit versions of the requests
  447. * is that the access is done in multiples of 4 byte instead of
  448. * 8 bytes (sizeof(unsigned long) on 31/64 bit).
  449. * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
  450. * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
  451. * is a 31 bit program too, the content of struct user can be
  452. * emulated. A 31 bit program peeking into the struct user of
  453. * a 64 bit program is a no-no.
  454. */
  455. /*
  456. * Same as peek_user_per but for a 31 bit program.
  457. */
  458. static inline __u32 __peek_user_per_compat(struct task_struct *child,
  459. addr_t addr)
  460. {
  461. struct compat_per_struct_kernel *dummy32 = NULL;
  462. if (addr == (addr_t) &dummy32->cr9)
  463. /* Control bits of the active per set. */
  464. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  465. PER_EVENT_IFETCH : child->thread.per_user.control;
  466. else if (addr == (addr_t) &dummy32->cr10)
  467. /* Start address of the active per set. */
  468. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  469. 0 : child->thread.per_user.start;
  470. else if (addr == (addr_t) &dummy32->cr11)
  471. /* End address of the active per set. */
  472. return test_thread_flag(TIF_SINGLE_STEP) ?
  473. PSW32_ADDR_INSN : child->thread.per_user.end;
  474. else if (addr == (addr_t) &dummy32->bits)
  475. /* Single-step bit. */
  476. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  477. 0x80000000 : 0;
  478. else if (addr == (addr_t) &dummy32->starting_addr)
  479. /* Start address of the user specified per set. */
  480. return (__u32) child->thread.per_user.start;
  481. else if (addr == (addr_t) &dummy32->ending_addr)
  482. /* End address of the user specified per set. */
  483. return (__u32) child->thread.per_user.end;
  484. else if (addr == (addr_t) &dummy32->perc_atmid)
  485. /* PER code, ATMID and AI of the last PER trap */
  486. return (__u32) child->thread.per_event.cause << 16;
  487. else if (addr == (addr_t) &dummy32->address)
  488. /* Address of the last PER trap */
  489. return (__u32) child->thread.per_event.address;
  490. else if (addr == (addr_t) &dummy32->access_id)
  491. /* Access id of the last PER trap */
  492. return (__u32) child->thread.per_event.paid << 24;
  493. return 0;
  494. }
  495. /*
  496. * Same as peek_user but for a 31 bit program.
  497. */
  498. static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
  499. {
  500. struct compat_user *dummy32 = NULL;
  501. addr_t offset;
  502. __u32 tmp;
  503. if (addr < (addr_t) &dummy32->regs.acrs) {
  504. struct pt_regs *regs = task_pt_regs(child);
  505. /*
  506. * psw and gprs are stored on the stack
  507. */
  508. if (addr == (addr_t) &dummy32->regs.psw.mask) {
  509. /* Fake a 31 bit psw mask. */
  510. tmp = (__u32)(regs->psw.mask >> 32);
  511. tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
  512. tmp |= PSW32_USER_BITS;
  513. } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
  514. /* Fake a 31 bit psw address. */
  515. tmp = (__u32) regs->psw.addr |
  516. (__u32)(regs->psw.mask & PSW_MASK_BA);
  517. } else {
  518. /* gpr 0-15 */
  519. tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
  520. }
  521. } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
  522. /*
  523. * access registers are stored in the thread structure
  524. */
  525. offset = addr - (addr_t) &dummy32->regs.acrs;
  526. tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
  527. } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
  528. /*
  529. * orig_gpr2 is stored on the kernel stack
  530. */
  531. tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
  532. } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
  533. /*
  534. * prevent reads of padding hole between
  535. * orig_gpr2 and fp_regs on s390.
  536. */
  537. tmp = 0;
  538. } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
  539. /*
  540. * floating point regs. are stored in the thread structure
  541. */
  542. offset = addr - (addr_t) &dummy32->regs.fp_regs;
  543. tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset);
  544. } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
  545. /*
  546. * Handle access to the per_info structure.
  547. */
  548. addr -= (addr_t) &dummy32->regs.per_info;
  549. tmp = __peek_user_per_compat(child, addr);
  550. } else
  551. tmp = 0;
  552. return tmp;
  553. }
  554. static int peek_user_compat(struct task_struct *child,
  555. addr_t addr, addr_t data)
  556. {
  557. __u32 tmp;
  558. if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
  559. return -EIO;
  560. tmp = __peek_user_compat(child, addr);
  561. return put_user(tmp, (__u32 __user *) data);
  562. }
  563. /*
  564. * Same as poke_user_per but for a 31 bit program.
  565. */
  566. static inline void __poke_user_per_compat(struct task_struct *child,
  567. addr_t addr, __u32 data)
  568. {
  569. struct compat_per_struct_kernel *dummy32 = NULL;
  570. if (addr == (addr_t) &dummy32->cr9)
  571. /* PER event mask of the user specified per set. */
  572. child->thread.per_user.control =
  573. data & (PER_EVENT_MASK | PER_CONTROL_MASK);
  574. else if (addr == (addr_t) &dummy32->starting_addr)
  575. /* Starting address of the user specified per set. */
  576. child->thread.per_user.start = data;
  577. else if (addr == (addr_t) &dummy32->ending_addr)
  578. /* Ending address of the user specified per set. */
  579. child->thread.per_user.end = data;
  580. }
  581. /*
  582. * Same as poke_user but for a 31 bit program.
  583. */
  584. static int __poke_user_compat(struct task_struct *child,
  585. addr_t addr, addr_t data)
  586. {
  587. struct compat_user *dummy32 = NULL;
  588. __u32 tmp = (__u32) data;
  589. addr_t offset;
  590. if (addr < (addr_t) &dummy32->regs.acrs) {
  591. struct pt_regs *regs = task_pt_regs(child);
  592. /*
  593. * psw, gprs, acrs and orig_gpr2 are stored on the stack
  594. */
  595. if (addr == (addr_t) &dummy32->regs.psw.mask) {
  596. __u32 mask = PSW32_MASK_USER;
  597. mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
  598. /* Build a 64 bit psw mask from 31 bit mask. */
  599. if ((tmp & ~mask) != PSW32_USER_BITS)
  600. /* Invalid psw mask. */
  601. return -EINVAL;
  602. regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
  603. (regs->psw.mask & PSW_MASK_BA) |
  604. (__u64)(tmp & mask) << 32;
  605. } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
  606. /* Build a 64 bit psw address from 31 bit address. */
  607. regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
  608. /* Transfer 31 bit amode bit to psw mask. */
  609. regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
  610. (__u64)(tmp & PSW32_ADDR_AMODE);
  611. } else {
  612. /* gpr 0-15 */
  613. *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
  614. }
  615. } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
  616. /*
  617. * access registers are stored in the thread structure
  618. */
  619. offset = addr - (addr_t) &dummy32->regs.acrs;
  620. *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
  621. } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
  622. /*
  623. * orig_gpr2 is stored on the kernel stack
  624. */
  625. *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
  626. } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
  627. /*
  628. * prevent writess of padding hole between
  629. * orig_gpr2 and fp_regs on s390.
  630. */
  631. return 0;
  632. } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
  633. /*
  634. * floating point regs. are stored in the thread structure
  635. */
  636. if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
  637. test_fp_ctl(tmp))
  638. return -EINVAL;
  639. offset = addr - (addr_t) &dummy32->regs.fp_regs;
  640. *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
  641. } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
  642. /*
  643. * Handle access to the per_info structure.
  644. */
  645. addr -= (addr_t) &dummy32->regs.per_info;
  646. __poke_user_per_compat(child, addr, data);
  647. }
  648. return 0;
  649. }
  650. static int poke_user_compat(struct task_struct *child,
  651. addr_t addr, addr_t data)
  652. {
  653. if (!is_compat_task() || (addr & 3) ||
  654. addr > sizeof(struct compat_user) - 3)
  655. return -EIO;
  656. return __poke_user_compat(child, addr, data);
  657. }
  658. long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
  659. compat_ulong_t caddr, compat_ulong_t cdata)
  660. {
  661. unsigned long addr = caddr;
  662. unsigned long data = cdata;
  663. compat_ptrace_area parea;
  664. int copied, ret;
  665. switch (request) {
  666. case PTRACE_PEEKUSR:
  667. /* read the word at location addr in the USER area. */
  668. return peek_user_compat(child, addr, data);
  669. case PTRACE_POKEUSR:
  670. /* write the word at location addr in the USER area */
  671. return poke_user_compat(child, addr, data);
  672. case PTRACE_PEEKUSR_AREA:
  673. case PTRACE_POKEUSR_AREA:
  674. if (copy_from_user(&parea, (void __force __user *) addr,
  675. sizeof(parea)))
  676. return -EFAULT;
  677. addr = parea.kernel_addr;
  678. data = parea.process_addr;
  679. copied = 0;
  680. while (copied < parea.len) {
  681. if (request == PTRACE_PEEKUSR_AREA)
  682. ret = peek_user_compat(child, addr, data);
  683. else {
  684. __u32 utmp;
  685. if (get_user(utmp,
  686. (__u32 __force __user *) data))
  687. return -EFAULT;
  688. ret = poke_user_compat(child, addr, utmp);
  689. }
  690. if (ret)
  691. return ret;
  692. addr += sizeof(unsigned int);
  693. data += sizeof(unsigned int);
  694. copied += sizeof(unsigned int);
  695. }
  696. return 0;
  697. case PTRACE_GET_LAST_BREAK:
  698. put_user(task_thread_info(child)->last_break,
  699. (unsigned int __user *) data);
  700. return 0;
  701. }
  702. return compat_ptrace_request(child, request, addr, data);
  703. }
  704. #endif
  705. asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
  706. {
  707. long ret = 0;
  708. /* Do the secure computing check first. */
  709. if (secure_computing(regs->gprs[2])) {
  710. /* seccomp failures shouldn't expose any additional code. */
  711. ret = -1;
  712. goto out;
  713. }
  714. /*
  715. * The sysc_tracesys code in entry.S stored the system
  716. * call number to gprs[2].
  717. */
  718. if (test_thread_flag(TIF_SYSCALL_TRACE) &&
  719. (tracehook_report_syscall_entry(regs) ||
  720. regs->gprs[2] >= NR_syscalls)) {
  721. /*
  722. * Tracing decided this syscall should not happen or the
  723. * debugger stored an invalid system call number. Skip
  724. * the system call and the system call restart handling.
  725. */
  726. clear_thread_flag(TIF_SYSCALL);
  727. ret = -1;
  728. }
  729. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  730. trace_sys_enter(regs, regs->gprs[2]);
  731. audit_syscall_entry(is_compat_task() ?
  732. AUDIT_ARCH_S390 : AUDIT_ARCH_S390X,
  733. regs->gprs[2], regs->orig_gpr2,
  734. regs->gprs[3], regs->gprs[4],
  735. regs->gprs[5]);
  736. out:
  737. return ret ?: regs->gprs[2];
  738. }
  739. asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
  740. {
  741. audit_syscall_exit(regs);
  742. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  743. trace_sys_exit(regs, regs->gprs[2]);
  744. if (test_thread_flag(TIF_SYSCALL_TRACE))
  745. tracehook_report_syscall_exit(regs, 0);
  746. }
  747. /*
  748. * user_regset definitions.
  749. */
  750. static int s390_regs_get(struct task_struct *target,
  751. const struct user_regset *regset,
  752. unsigned int pos, unsigned int count,
  753. void *kbuf, void __user *ubuf)
  754. {
  755. if (target == current)
  756. save_access_regs(target->thread.acrs);
  757. if (kbuf) {
  758. unsigned long *k = kbuf;
  759. while (count > 0) {
  760. *k++ = __peek_user(target, pos);
  761. count -= sizeof(*k);
  762. pos += sizeof(*k);
  763. }
  764. } else {
  765. unsigned long __user *u = ubuf;
  766. while (count > 0) {
  767. if (__put_user(__peek_user(target, pos), u++))
  768. return -EFAULT;
  769. count -= sizeof(*u);
  770. pos += sizeof(*u);
  771. }
  772. }
  773. return 0;
  774. }
  775. static int s390_regs_set(struct task_struct *target,
  776. const struct user_regset *regset,
  777. unsigned int pos, unsigned int count,
  778. const void *kbuf, const void __user *ubuf)
  779. {
  780. int rc = 0;
  781. if (target == current)
  782. save_access_regs(target->thread.acrs);
  783. if (kbuf) {
  784. const unsigned long *k = kbuf;
  785. while (count > 0 && !rc) {
  786. rc = __poke_user(target, pos, *k++);
  787. count -= sizeof(*k);
  788. pos += sizeof(*k);
  789. }
  790. } else {
  791. const unsigned long __user *u = ubuf;
  792. while (count > 0 && !rc) {
  793. unsigned long word;
  794. rc = __get_user(word, u++);
  795. if (rc)
  796. break;
  797. rc = __poke_user(target, pos, word);
  798. count -= sizeof(*u);
  799. pos += sizeof(*u);
  800. }
  801. }
  802. if (rc == 0 && target == current)
  803. restore_access_regs(target->thread.acrs);
  804. return rc;
  805. }
  806. static int s390_fpregs_get(struct task_struct *target,
  807. const struct user_regset *regset, unsigned int pos,
  808. unsigned int count, void *kbuf, void __user *ubuf)
  809. {
  810. if (target == current) {
  811. save_fp_ctl(&target->thread.fp_regs.fpc);
  812. save_fp_regs(target->thread.fp_regs.fprs);
  813. }
  814. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  815. &target->thread.fp_regs, 0, -1);
  816. }
  817. static int s390_fpregs_set(struct task_struct *target,
  818. const struct user_regset *regset, unsigned int pos,
  819. unsigned int count, const void *kbuf,
  820. const void __user *ubuf)
  821. {
  822. int rc = 0;
  823. if (target == current) {
  824. save_fp_ctl(&target->thread.fp_regs.fpc);
  825. save_fp_regs(target->thread.fp_regs.fprs);
  826. }
  827. /* If setting FPC, must validate it first. */
  828. if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
  829. u32 ufpc[2] = { target->thread.fp_regs.fpc, 0 };
  830. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
  831. 0, offsetof(s390_fp_regs, fprs));
  832. if (rc)
  833. return rc;
  834. if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
  835. return -EINVAL;
  836. target->thread.fp_regs.fpc = ufpc[0];
  837. }
  838. if (rc == 0 && count > 0)
  839. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  840. target->thread.fp_regs.fprs,
  841. offsetof(s390_fp_regs, fprs), -1);
  842. if (rc == 0 && target == current) {
  843. restore_fp_ctl(&target->thread.fp_regs.fpc);
  844. restore_fp_regs(target->thread.fp_regs.fprs);
  845. }
  846. return rc;
  847. }
  848. #ifdef CONFIG_64BIT
  849. static int s390_last_break_get(struct task_struct *target,
  850. const struct user_regset *regset,
  851. unsigned int pos, unsigned int count,
  852. void *kbuf, void __user *ubuf)
  853. {
  854. if (count > 0) {
  855. if (kbuf) {
  856. unsigned long *k = kbuf;
  857. *k = task_thread_info(target)->last_break;
  858. } else {
  859. unsigned long __user *u = ubuf;
  860. if (__put_user(task_thread_info(target)->last_break, u))
  861. return -EFAULT;
  862. }
  863. }
  864. return 0;
  865. }
  866. static int s390_last_break_set(struct task_struct *target,
  867. const struct user_regset *regset,
  868. unsigned int pos, unsigned int count,
  869. const void *kbuf, const void __user *ubuf)
  870. {
  871. return 0;
  872. }
  873. static int s390_tdb_get(struct task_struct *target,
  874. const struct user_regset *regset,
  875. unsigned int pos, unsigned int count,
  876. void *kbuf, void __user *ubuf)
  877. {
  878. struct pt_regs *regs = task_pt_regs(target);
  879. unsigned char *data;
  880. if (!(regs->int_code & 0x200))
  881. return -ENODATA;
  882. data = target->thread.trap_tdb;
  883. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, data, 0, 256);
  884. }
  885. static int s390_tdb_set(struct task_struct *target,
  886. const struct user_regset *regset,
  887. unsigned int pos, unsigned int count,
  888. const void *kbuf, const void __user *ubuf)
  889. {
  890. return 0;
  891. }
  892. #endif
  893. static int s390_system_call_get(struct task_struct *target,
  894. const struct user_regset *regset,
  895. unsigned int pos, unsigned int count,
  896. void *kbuf, void __user *ubuf)
  897. {
  898. unsigned int *data = &task_thread_info(target)->system_call;
  899. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  900. data, 0, sizeof(unsigned int));
  901. }
  902. static int s390_system_call_set(struct task_struct *target,
  903. const struct user_regset *regset,
  904. unsigned int pos, unsigned int count,
  905. const void *kbuf, const void __user *ubuf)
  906. {
  907. unsigned int *data = &task_thread_info(target)->system_call;
  908. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  909. data, 0, sizeof(unsigned int));
  910. }
  911. static const struct user_regset s390_regsets[] = {
  912. [REGSET_GENERAL] = {
  913. .core_note_type = NT_PRSTATUS,
  914. .n = sizeof(s390_regs) / sizeof(long),
  915. .size = sizeof(long),
  916. .align = sizeof(long),
  917. .get = s390_regs_get,
  918. .set = s390_regs_set,
  919. },
  920. [REGSET_FP] = {
  921. .core_note_type = NT_PRFPREG,
  922. .n = sizeof(s390_fp_regs) / sizeof(long),
  923. .size = sizeof(long),
  924. .align = sizeof(long),
  925. .get = s390_fpregs_get,
  926. .set = s390_fpregs_set,
  927. },
  928. #ifdef CONFIG_64BIT
  929. [REGSET_LAST_BREAK] = {
  930. .core_note_type = NT_S390_LAST_BREAK,
  931. .n = 1,
  932. .size = sizeof(long),
  933. .align = sizeof(long),
  934. .get = s390_last_break_get,
  935. .set = s390_last_break_set,
  936. },
  937. [REGSET_TDB] = {
  938. .core_note_type = NT_S390_TDB,
  939. .n = 1,
  940. .size = 256,
  941. .align = 1,
  942. .get = s390_tdb_get,
  943. .set = s390_tdb_set,
  944. },
  945. #endif
  946. [REGSET_SYSTEM_CALL] = {
  947. .core_note_type = NT_S390_SYSTEM_CALL,
  948. .n = 1,
  949. .size = sizeof(unsigned int),
  950. .align = sizeof(unsigned int),
  951. .get = s390_system_call_get,
  952. .set = s390_system_call_set,
  953. },
  954. };
  955. static const struct user_regset_view user_s390_view = {
  956. .name = UTS_MACHINE,
  957. .e_machine = EM_S390,
  958. .regsets = s390_regsets,
  959. .n = ARRAY_SIZE(s390_regsets)
  960. };
  961. #ifdef CONFIG_COMPAT
  962. static int s390_compat_regs_get(struct task_struct *target,
  963. const struct user_regset *regset,
  964. unsigned int pos, unsigned int count,
  965. void *kbuf, void __user *ubuf)
  966. {
  967. if (target == current)
  968. save_access_regs(target->thread.acrs);
  969. if (kbuf) {
  970. compat_ulong_t *k = kbuf;
  971. while (count > 0) {
  972. *k++ = __peek_user_compat(target, pos);
  973. count -= sizeof(*k);
  974. pos += sizeof(*k);
  975. }
  976. } else {
  977. compat_ulong_t __user *u = ubuf;
  978. while (count > 0) {
  979. if (__put_user(__peek_user_compat(target, pos), u++))
  980. return -EFAULT;
  981. count -= sizeof(*u);
  982. pos += sizeof(*u);
  983. }
  984. }
  985. return 0;
  986. }
  987. static int s390_compat_regs_set(struct task_struct *target,
  988. const struct user_regset *regset,
  989. unsigned int pos, unsigned int count,
  990. const void *kbuf, const void __user *ubuf)
  991. {
  992. int rc = 0;
  993. if (target == current)
  994. save_access_regs(target->thread.acrs);
  995. if (kbuf) {
  996. const compat_ulong_t *k = kbuf;
  997. while (count > 0 && !rc) {
  998. rc = __poke_user_compat(target, pos, *k++);
  999. count -= sizeof(*k);
  1000. pos += sizeof(*k);
  1001. }
  1002. } else {
  1003. const compat_ulong_t __user *u = ubuf;
  1004. while (count > 0 && !rc) {
  1005. compat_ulong_t word;
  1006. rc = __get_user(word, u++);
  1007. if (rc)
  1008. break;
  1009. rc = __poke_user_compat(target, pos, word);
  1010. count -= sizeof(*u);
  1011. pos += sizeof(*u);
  1012. }
  1013. }
  1014. if (rc == 0 && target == current)
  1015. restore_access_regs(target->thread.acrs);
  1016. return rc;
  1017. }
  1018. static int s390_compat_regs_high_get(struct task_struct *target,
  1019. const struct user_regset *regset,
  1020. unsigned int pos, unsigned int count,
  1021. void *kbuf, void __user *ubuf)
  1022. {
  1023. compat_ulong_t *gprs_high;
  1024. gprs_high = (compat_ulong_t *)
  1025. &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
  1026. if (kbuf) {
  1027. compat_ulong_t *k = kbuf;
  1028. while (count > 0) {
  1029. *k++ = *gprs_high;
  1030. gprs_high += 2;
  1031. count -= sizeof(*k);
  1032. }
  1033. } else {
  1034. compat_ulong_t __user *u = ubuf;
  1035. while (count > 0) {
  1036. if (__put_user(*gprs_high, u++))
  1037. return -EFAULT;
  1038. gprs_high += 2;
  1039. count -= sizeof(*u);
  1040. }
  1041. }
  1042. return 0;
  1043. }
  1044. static int s390_compat_regs_high_set(struct task_struct *target,
  1045. const struct user_regset *regset,
  1046. unsigned int pos, unsigned int count,
  1047. const void *kbuf, const void __user *ubuf)
  1048. {
  1049. compat_ulong_t *gprs_high;
  1050. int rc = 0;
  1051. gprs_high = (compat_ulong_t *)
  1052. &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
  1053. if (kbuf) {
  1054. const compat_ulong_t *k = kbuf;
  1055. while (count > 0) {
  1056. *gprs_high = *k++;
  1057. *gprs_high += 2;
  1058. count -= sizeof(*k);
  1059. }
  1060. } else {
  1061. const compat_ulong_t __user *u = ubuf;
  1062. while (count > 0 && !rc) {
  1063. unsigned long word;
  1064. rc = __get_user(word, u++);
  1065. if (rc)
  1066. break;
  1067. *gprs_high = word;
  1068. *gprs_high += 2;
  1069. count -= sizeof(*u);
  1070. }
  1071. }
  1072. return rc;
  1073. }
  1074. static int s390_compat_last_break_get(struct task_struct *target,
  1075. const struct user_regset *regset,
  1076. unsigned int pos, unsigned int count,
  1077. void *kbuf, void __user *ubuf)
  1078. {
  1079. compat_ulong_t last_break;
  1080. if (count > 0) {
  1081. last_break = task_thread_info(target)->last_break;
  1082. if (kbuf) {
  1083. unsigned long *k = kbuf;
  1084. *k = last_break;
  1085. } else {
  1086. unsigned long __user *u = ubuf;
  1087. if (__put_user(last_break, u))
  1088. return -EFAULT;
  1089. }
  1090. }
  1091. return 0;
  1092. }
  1093. static int s390_compat_last_break_set(struct task_struct *target,
  1094. const struct user_regset *regset,
  1095. unsigned int pos, unsigned int count,
  1096. const void *kbuf, const void __user *ubuf)
  1097. {
  1098. return 0;
  1099. }
  1100. static const struct user_regset s390_compat_regsets[] = {
  1101. [REGSET_GENERAL] = {
  1102. .core_note_type = NT_PRSTATUS,
  1103. .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
  1104. .size = sizeof(compat_long_t),
  1105. .align = sizeof(compat_long_t),
  1106. .get = s390_compat_regs_get,
  1107. .set = s390_compat_regs_set,
  1108. },
  1109. [REGSET_FP] = {
  1110. .core_note_type = NT_PRFPREG,
  1111. .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
  1112. .size = sizeof(compat_long_t),
  1113. .align = sizeof(compat_long_t),
  1114. .get = s390_fpregs_get,
  1115. .set = s390_fpregs_set,
  1116. },
  1117. [REGSET_LAST_BREAK] = {
  1118. .core_note_type = NT_S390_LAST_BREAK,
  1119. .n = 1,
  1120. .size = sizeof(long),
  1121. .align = sizeof(long),
  1122. .get = s390_compat_last_break_get,
  1123. .set = s390_compat_last_break_set,
  1124. },
  1125. [REGSET_TDB] = {
  1126. .core_note_type = NT_S390_TDB,
  1127. .n = 1,
  1128. .size = 256,
  1129. .align = 1,
  1130. .get = s390_tdb_get,
  1131. .set = s390_tdb_set,
  1132. },
  1133. [REGSET_SYSTEM_CALL] = {
  1134. .core_note_type = NT_S390_SYSTEM_CALL,
  1135. .n = 1,
  1136. .size = sizeof(compat_uint_t),
  1137. .align = sizeof(compat_uint_t),
  1138. .get = s390_system_call_get,
  1139. .set = s390_system_call_set,
  1140. },
  1141. [REGSET_GENERAL_EXTENDED] = {
  1142. .core_note_type = NT_S390_HIGH_GPRS,
  1143. .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
  1144. .size = sizeof(compat_long_t),
  1145. .align = sizeof(compat_long_t),
  1146. .get = s390_compat_regs_high_get,
  1147. .set = s390_compat_regs_high_set,
  1148. },
  1149. };
  1150. static const struct user_regset_view user_s390_compat_view = {
  1151. .name = "s390",
  1152. .e_machine = EM_S390,
  1153. .regsets = s390_compat_regsets,
  1154. .n = ARRAY_SIZE(s390_compat_regsets)
  1155. };
  1156. #endif
  1157. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  1158. {
  1159. #ifdef CONFIG_COMPAT
  1160. if (test_tsk_thread_flag(task, TIF_31BIT))
  1161. return &user_s390_compat_view;
  1162. #endif
  1163. return &user_s390_view;
  1164. }
  1165. static const char *gpr_names[NUM_GPRS] = {
  1166. "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
  1167. "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
  1168. };
  1169. unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
  1170. {
  1171. if (offset >= NUM_GPRS)
  1172. return 0;
  1173. return regs->gprs[offset];
  1174. }
  1175. int regs_query_register_offset(const char *name)
  1176. {
  1177. unsigned long offset;
  1178. if (!name || *name != 'r')
  1179. return -EINVAL;
  1180. if (kstrtoul(name + 1, 10, &offset))
  1181. return -EINVAL;
  1182. if (offset >= NUM_GPRS)
  1183. return -EINVAL;
  1184. return offset;
  1185. }
  1186. const char *regs_query_register_name(unsigned int offset)
  1187. {
  1188. if (offset >= NUM_GPRS)
  1189. return NULL;
  1190. return gpr_names[offset];
  1191. }
  1192. static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
  1193. {
  1194. unsigned long ksp = kernel_stack_pointer(regs);
  1195. return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
  1196. }
  1197. /**
  1198. * regs_get_kernel_stack_nth() - get Nth entry of the stack
  1199. * @regs:pt_regs which contains kernel stack pointer.
  1200. * @n:stack entry number.
  1201. *
  1202. * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
  1203. * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
  1204. * this returns 0.
  1205. */
  1206. unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
  1207. {
  1208. unsigned long addr;
  1209. addr = kernel_stack_pointer(regs) + n * sizeof(long);
  1210. if (!regs_within_kernel_stack(regs, addr))
  1211. return 0;
  1212. return *(unsigned long *)addr;
  1213. }