ptrace.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569
  1. /*
  2. * Ptrace user space interface.
  3. *
  4. * Copyright IBM Corp. 1999, 2010
  5. * Author(s): Denis Joseph Barrow
  6. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/sched.h>
  10. #include <linux/mm.h>
  11. #include <linux/smp.h>
  12. #include <linux/errno.h>
  13. #include <linux/ptrace.h>
  14. #include <linux/user.h>
  15. #include <linux/security.h>
  16. #include <linux/audit.h>
  17. #include <linux/signal.h>
  18. #include <linux/elf.h>
  19. #include <linux/regset.h>
  20. #include <linux/tracehook.h>
  21. #include <linux/seccomp.h>
  22. #include <linux/compat.h>
  23. #include <trace/syscall.h>
  24. #include <asm/segment.h>
  25. #include <asm/page.h>
  26. #include <asm/pgtable.h>
  27. #include <asm/pgalloc.h>
  28. #include <asm/uaccess.h>
  29. #include <asm/unistd.h>
  30. #include <asm/switch_to.h>
  31. #include "entry.h"
  32. #ifdef CONFIG_COMPAT
  33. #include "compat_ptrace.h"
  34. #endif
  35. #define CREATE_TRACE_POINTS
  36. #include <trace/events/syscalls.h>
  37. void update_cr_regs(struct task_struct *task)
  38. {
  39. struct pt_regs *regs = task_pt_regs(task);
  40. struct thread_struct *thread = &task->thread;
  41. struct per_regs old, new;
  42. #ifdef CONFIG_64BIT
  43. /* Take care of the enable/disable of transactional execution. */
  44. if (MACHINE_HAS_TE || MACHINE_HAS_VX) {
  45. unsigned long cr, cr_new;
  46. __ctl_store(cr, 0, 0);
  47. cr_new = cr;
  48. if (MACHINE_HAS_TE) {
  49. /* Set or clear transaction execution TXC bit 8. */
  50. cr_new |= (1UL << 55);
  51. if (task->thread.per_flags & PER_FLAG_NO_TE)
  52. cr_new &= ~(1UL << 55);
  53. }
  54. if (MACHINE_HAS_VX) {
  55. /* Enable/disable of vector extension */
  56. cr_new &= ~(1UL << 17);
  57. if (task->thread.vxrs)
  58. cr_new |= (1UL << 17);
  59. }
  60. if (cr_new != cr)
  61. __ctl_load(cr_new, 0, 0);
  62. if (MACHINE_HAS_TE) {
  63. /* Set/clear transaction execution TDC bits 62/63. */
  64. __ctl_store(cr, 2, 2);
  65. cr_new = cr & ~3UL;
  66. if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
  67. if (task->thread.per_flags &
  68. PER_FLAG_TE_ABORT_RAND_TEND)
  69. cr_new |= 1UL;
  70. else
  71. cr_new |= 2UL;
  72. }
  73. if (cr_new != cr)
  74. __ctl_load(cr_new, 2, 2);
  75. }
  76. }
  77. #endif
  78. /* Copy user specified PER registers */
  79. new.control = thread->per_user.control;
  80. new.start = thread->per_user.start;
  81. new.end = thread->per_user.end;
  82. /* merge TIF_SINGLE_STEP into user specified PER registers. */
  83. if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
  84. test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
  85. if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
  86. new.control |= PER_EVENT_BRANCH;
  87. else
  88. new.control |= PER_EVENT_IFETCH;
  89. #ifdef CONFIG_64BIT
  90. new.control |= PER_CONTROL_SUSPENSION;
  91. new.control |= PER_EVENT_TRANSACTION_END;
  92. #endif
  93. if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
  94. new.control |= PER_EVENT_IFETCH;
  95. new.start = 0;
  96. new.end = PSW_ADDR_INSN;
  97. }
  98. /* Take care of the PER enablement bit in the PSW. */
  99. if (!(new.control & PER_EVENT_MASK)) {
  100. regs->psw.mask &= ~PSW_MASK_PER;
  101. return;
  102. }
  103. regs->psw.mask |= PSW_MASK_PER;
  104. __ctl_store(old, 9, 11);
  105. if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
  106. __ctl_load(new, 9, 11);
  107. }
  108. void user_enable_single_step(struct task_struct *task)
  109. {
  110. clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
  111. set_tsk_thread_flag(task, TIF_SINGLE_STEP);
  112. }
  113. void user_disable_single_step(struct task_struct *task)
  114. {
  115. clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
  116. clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
  117. }
  118. void user_enable_block_step(struct task_struct *task)
  119. {
  120. set_tsk_thread_flag(task, TIF_SINGLE_STEP);
  121. set_tsk_thread_flag(task, TIF_BLOCK_STEP);
  122. }
  123. /*
  124. * Called by kernel/ptrace.c when detaching..
  125. *
  126. * Clear all debugging related fields.
  127. */
  128. void ptrace_disable(struct task_struct *task)
  129. {
  130. memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
  131. memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
  132. clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
  133. clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP);
  134. task->thread.per_flags = 0;
  135. }
  136. #ifndef CONFIG_64BIT
  137. # define __ADDR_MASK 3
  138. #else
  139. # define __ADDR_MASK 7
  140. #endif
  141. static inline unsigned long __peek_user_per(struct task_struct *child,
  142. addr_t addr)
  143. {
  144. struct per_struct_kernel *dummy = NULL;
  145. if (addr == (addr_t) &dummy->cr9)
  146. /* Control bits of the active per set. */
  147. return test_thread_flag(TIF_SINGLE_STEP) ?
  148. PER_EVENT_IFETCH : child->thread.per_user.control;
  149. else if (addr == (addr_t) &dummy->cr10)
  150. /* Start address of the active per set. */
  151. return test_thread_flag(TIF_SINGLE_STEP) ?
  152. 0 : child->thread.per_user.start;
  153. else if (addr == (addr_t) &dummy->cr11)
  154. /* End address of the active per set. */
  155. return test_thread_flag(TIF_SINGLE_STEP) ?
  156. PSW_ADDR_INSN : child->thread.per_user.end;
  157. else if (addr == (addr_t) &dummy->bits)
  158. /* Single-step bit. */
  159. return test_thread_flag(TIF_SINGLE_STEP) ?
  160. (1UL << (BITS_PER_LONG - 1)) : 0;
  161. else if (addr == (addr_t) &dummy->starting_addr)
  162. /* Start address of the user specified per set. */
  163. return child->thread.per_user.start;
  164. else if (addr == (addr_t) &dummy->ending_addr)
  165. /* End address of the user specified per set. */
  166. return child->thread.per_user.end;
  167. else if (addr == (addr_t) &dummy->perc_atmid)
  168. /* PER code, ATMID and AI of the last PER trap */
  169. return (unsigned long)
  170. child->thread.per_event.cause << (BITS_PER_LONG - 16);
  171. else if (addr == (addr_t) &dummy->address)
  172. /* Address of the last PER trap */
  173. return child->thread.per_event.address;
  174. else if (addr == (addr_t) &dummy->access_id)
  175. /* Access id of the last PER trap */
  176. return (unsigned long)
  177. child->thread.per_event.paid << (BITS_PER_LONG - 8);
  178. return 0;
  179. }
  180. /*
  181. * Read the word at offset addr from the user area of a process. The
  182. * trouble here is that the information is littered over different
  183. * locations. The process registers are found on the kernel stack,
  184. * the floating point stuff and the trace settings are stored in
  185. * the task structure. In addition the different structures in
  186. * struct user contain pad bytes that should be read as zeroes.
  187. * Lovely...
  188. */
  189. static unsigned long __peek_user(struct task_struct *child, addr_t addr)
  190. {
  191. struct user *dummy = NULL;
  192. addr_t offset, tmp;
  193. if (addr < (addr_t) &dummy->regs.acrs) {
  194. /*
  195. * psw and gprs are stored on the stack
  196. */
  197. tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
  198. if (addr == (addr_t) &dummy->regs.psw.mask) {
  199. /* Return a clean psw mask. */
  200. tmp &= PSW_MASK_USER | PSW_MASK_RI;
  201. tmp |= PSW_USER_BITS;
  202. }
  203. } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
  204. /*
  205. * access registers are stored in the thread structure
  206. */
  207. offset = addr - (addr_t) &dummy->regs.acrs;
  208. #ifdef CONFIG_64BIT
  209. /*
  210. * Very special case: old & broken 64 bit gdb reading
  211. * from acrs[15]. Result is a 64 bit value. Read the
  212. * 32 bit acrs[15] value and shift it by 32. Sick...
  213. */
  214. if (addr == (addr_t) &dummy->regs.acrs[15])
  215. tmp = ((unsigned long) child->thread.acrs[15]) << 32;
  216. else
  217. #endif
  218. tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
  219. } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
  220. /*
  221. * orig_gpr2 is stored on the kernel stack
  222. */
  223. tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
  224. } else if (addr < (addr_t) &dummy->regs.fp_regs) {
  225. /*
  226. * prevent reads of padding hole between
  227. * orig_gpr2 and fp_regs on s390.
  228. */
  229. tmp = 0;
  230. } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
  231. /*
  232. * floating point control reg. is in the thread structure
  233. */
  234. tmp = child->thread.fp_regs.fpc;
  235. tmp <<= BITS_PER_LONG - 32;
  236. } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
  237. /*
  238. * floating point regs. are either in child->thread.fp_regs
  239. * or the child->thread.vxrs array
  240. */
  241. offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
  242. #ifdef CONFIG_64BIT
  243. if (child->thread.vxrs)
  244. tmp = *(addr_t *)
  245. ((addr_t) child->thread.vxrs + 2*offset);
  246. else
  247. #endif
  248. tmp = *(addr_t *)
  249. ((addr_t) &child->thread.fp_regs.fprs + offset);
  250. } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
  251. /*
  252. * Handle access to the per_info structure.
  253. */
  254. addr -= (addr_t) &dummy->regs.per_info;
  255. tmp = __peek_user_per(child, addr);
  256. } else
  257. tmp = 0;
  258. return tmp;
  259. }
  260. static int
  261. peek_user(struct task_struct *child, addr_t addr, addr_t data)
  262. {
  263. addr_t tmp, mask;
  264. /*
  265. * Stupid gdb peeks/pokes the access registers in 64 bit with
  266. * an alignment of 4. Programmers from hell...
  267. */
  268. mask = __ADDR_MASK;
  269. #ifdef CONFIG_64BIT
  270. if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
  271. addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
  272. mask = 3;
  273. #endif
  274. if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
  275. return -EIO;
  276. tmp = __peek_user(child, addr);
  277. return put_user(tmp, (addr_t __user *) data);
  278. }
  279. static inline void __poke_user_per(struct task_struct *child,
  280. addr_t addr, addr_t data)
  281. {
  282. struct per_struct_kernel *dummy = NULL;
  283. /*
  284. * There are only three fields in the per_info struct that the
  285. * debugger user can write to.
  286. * 1) cr9: the debugger wants to set a new PER event mask
  287. * 2) starting_addr: the debugger wants to set a new starting
  288. * address to use with the PER event mask.
  289. * 3) ending_addr: the debugger wants to set a new ending
  290. * address to use with the PER event mask.
  291. * The user specified PER event mask and the start and end
  292. * addresses are used only if single stepping is not in effect.
  293. * Writes to any other field in per_info are ignored.
  294. */
  295. if (addr == (addr_t) &dummy->cr9)
  296. /* PER event mask of the user specified per set. */
  297. child->thread.per_user.control =
  298. data & (PER_EVENT_MASK | PER_CONTROL_MASK);
  299. else if (addr == (addr_t) &dummy->starting_addr)
  300. /* Starting address of the user specified per set. */
  301. child->thread.per_user.start = data;
  302. else if (addr == (addr_t) &dummy->ending_addr)
  303. /* Ending address of the user specified per set. */
  304. child->thread.per_user.end = data;
  305. }
  306. /*
  307. * Write a word to the user area of a process at location addr. This
  308. * operation does have an additional problem compared to peek_user.
  309. * Stores to the program status word and on the floating point
  310. * control register needs to get checked for validity.
  311. */
  312. static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
  313. {
  314. struct user *dummy = NULL;
  315. addr_t offset;
  316. if (addr < (addr_t) &dummy->regs.acrs) {
  317. /*
  318. * psw and gprs are stored on the stack
  319. */
  320. if (addr == (addr_t) &dummy->regs.psw.mask) {
  321. unsigned long mask = PSW_MASK_USER;
  322. mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
  323. if ((data ^ PSW_USER_BITS) & ~mask)
  324. /* Invalid psw mask. */
  325. return -EINVAL;
  326. if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
  327. /* Invalid address-space-control bits */
  328. return -EINVAL;
  329. if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
  330. /* Invalid addressing mode bits */
  331. return -EINVAL;
  332. }
  333. *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
  334. } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
  335. /*
  336. * access registers are stored in the thread structure
  337. */
  338. offset = addr - (addr_t) &dummy->regs.acrs;
  339. #ifdef CONFIG_64BIT
  340. /*
  341. * Very special case: old & broken 64 bit gdb writing
  342. * to acrs[15] with a 64 bit value. Ignore the lower
  343. * half of the value and write the upper 32 bit to
  344. * acrs[15]. Sick...
  345. */
  346. if (addr == (addr_t) &dummy->regs.acrs[15])
  347. child->thread.acrs[15] = (unsigned int) (data >> 32);
  348. else
  349. #endif
  350. *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
  351. } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
  352. /*
  353. * orig_gpr2 is stored on the kernel stack
  354. */
  355. task_pt_regs(child)->orig_gpr2 = data;
  356. } else if (addr < (addr_t) &dummy->regs.fp_regs) {
  357. /*
  358. * prevent writes of padding hole between
  359. * orig_gpr2 and fp_regs on s390.
  360. */
  361. return 0;
  362. } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
  363. /*
  364. * floating point control reg. is in the thread structure
  365. */
  366. if ((unsigned int) data != 0 ||
  367. test_fp_ctl(data >> (BITS_PER_LONG - 32)))
  368. return -EINVAL;
  369. child->thread.fp_regs.fpc = data >> (BITS_PER_LONG - 32);
  370. } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
  371. /*
  372. * floating point regs. are either in child->thread.fp_regs
  373. * or the child->thread.vxrs array
  374. */
  375. offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
  376. #ifdef CONFIG_64BIT
  377. if (child->thread.vxrs)
  378. *(addr_t *)((addr_t)
  379. child->thread.vxrs + 2*offset) = data;
  380. else
  381. #endif
  382. *(addr_t *)((addr_t)
  383. &child->thread.fp_regs.fprs + offset) = data;
  384. } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
  385. /*
  386. * Handle access to the per_info structure.
  387. */
  388. addr -= (addr_t) &dummy->regs.per_info;
  389. __poke_user_per(child, addr, data);
  390. }
  391. return 0;
  392. }
  393. static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
  394. {
  395. addr_t mask;
  396. /*
  397. * Stupid gdb peeks/pokes the access registers in 64 bit with
  398. * an alignment of 4. Programmers from hell indeed...
  399. */
  400. mask = __ADDR_MASK;
  401. #ifdef CONFIG_64BIT
  402. if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
  403. addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
  404. mask = 3;
  405. #endif
  406. if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
  407. return -EIO;
  408. return __poke_user(child, addr, data);
  409. }
  410. long arch_ptrace(struct task_struct *child, long request,
  411. unsigned long addr, unsigned long data)
  412. {
  413. ptrace_area parea;
  414. int copied, ret;
  415. switch (request) {
  416. case PTRACE_PEEKUSR:
  417. /* read the word at location addr in the USER area. */
  418. return peek_user(child, addr, data);
  419. case PTRACE_POKEUSR:
  420. /* write the word at location addr in the USER area */
  421. return poke_user(child, addr, data);
  422. case PTRACE_PEEKUSR_AREA:
  423. case PTRACE_POKEUSR_AREA:
  424. if (copy_from_user(&parea, (void __force __user *) addr,
  425. sizeof(parea)))
  426. return -EFAULT;
  427. addr = parea.kernel_addr;
  428. data = parea.process_addr;
  429. copied = 0;
  430. while (copied < parea.len) {
  431. if (request == PTRACE_PEEKUSR_AREA)
  432. ret = peek_user(child, addr, data);
  433. else {
  434. addr_t utmp;
  435. if (get_user(utmp,
  436. (addr_t __force __user *) data))
  437. return -EFAULT;
  438. ret = poke_user(child, addr, utmp);
  439. }
  440. if (ret)
  441. return ret;
  442. addr += sizeof(unsigned long);
  443. data += sizeof(unsigned long);
  444. copied += sizeof(unsigned long);
  445. }
  446. return 0;
  447. case PTRACE_GET_LAST_BREAK:
  448. put_user(task_thread_info(child)->last_break,
  449. (unsigned long __user *) data);
  450. return 0;
  451. case PTRACE_ENABLE_TE:
  452. if (!MACHINE_HAS_TE)
  453. return -EIO;
  454. child->thread.per_flags &= ~PER_FLAG_NO_TE;
  455. return 0;
  456. case PTRACE_DISABLE_TE:
  457. if (!MACHINE_HAS_TE)
  458. return -EIO;
  459. child->thread.per_flags |= PER_FLAG_NO_TE;
  460. child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
  461. return 0;
  462. case PTRACE_TE_ABORT_RAND:
  463. if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
  464. return -EIO;
  465. switch (data) {
  466. case 0UL:
  467. child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
  468. break;
  469. case 1UL:
  470. child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
  471. child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
  472. break;
  473. case 2UL:
  474. child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
  475. child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
  476. break;
  477. default:
  478. return -EINVAL;
  479. }
  480. return 0;
  481. default:
  482. /* Removing high order bit from addr (only for 31 bit). */
  483. addr &= PSW_ADDR_INSN;
  484. return ptrace_request(child, request, addr, data);
  485. }
  486. }
  487. #ifdef CONFIG_COMPAT
  488. /*
  489. * Now the fun part starts... a 31 bit program running in the
  490. * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
  491. * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
  492. * to handle, the difference to the 64 bit versions of the requests
  493. * is that the access is done in multiples of 4 byte instead of
  494. * 8 bytes (sizeof(unsigned long) on 31/64 bit).
  495. * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
  496. * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
  497. * is a 31 bit program too, the content of struct user can be
  498. * emulated. A 31 bit program peeking into the struct user of
  499. * a 64 bit program is a no-no.
  500. */
  501. /*
  502. * Same as peek_user_per but for a 31 bit program.
  503. */
  504. static inline __u32 __peek_user_per_compat(struct task_struct *child,
  505. addr_t addr)
  506. {
  507. struct compat_per_struct_kernel *dummy32 = NULL;
  508. if (addr == (addr_t) &dummy32->cr9)
  509. /* Control bits of the active per set. */
  510. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  511. PER_EVENT_IFETCH : child->thread.per_user.control;
  512. else if (addr == (addr_t) &dummy32->cr10)
  513. /* Start address of the active per set. */
  514. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  515. 0 : child->thread.per_user.start;
  516. else if (addr == (addr_t) &dummy32->cr11)
  517. /* End address of the active per set. */
  518. return test_thread_flag(TIF_SINGLE_STEP) ?
  519. PSW32_ADDR_INSN : child->thread.per_user.end;
  520. else if (addr == (addr_t) &dummy32->bits)
  521. /* Single-step bit. */
  522. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  523. 0x80000000 : 0;
  524. else if (addr == (addr_t) &dummy32->starting_addr)
  525. /* Start address of the user specified per set. */
  526. return (__u32) child->thread.per_user.start;
  527. else if (addr == (addr_t) &dummy32->ending_addr)
  528. /* End address of the user specified per set. */
  529. return (__u32) child->thread.per_user.end;
  530. else if (addr == (addr_t) &dummy32->perc_atmid)
  531. /* PER code, ATMID and AI of the last PER trap */
  532. return (__u32) child->thread.per_event.cause << 16;
  533. else if (addr == (addr_t) &dummy32->address)
  534. /* Address of the last PER trap */
  535. return (__u32) child->thread.per_event.address;
  536. else if (addr == (addr_t) &dummy32->access_id)
  537. /* Access id of the last PER trap */
  538. return (__u32) child->thread.per_event.paid << 24;
  539. return 0;
  540. }
  541. /*
  542. * Same as peek_user but for a 31 bit program.
  543. */
  544. static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
  545. {
  546. struct compat_user *dummy32 = NULL;
  547. addr_t offset;
  548. __u32 tmp;
  549. if (addr < (addr_t) &dummy32->regs.acrs) {
  550. struct pt_regs *regs = task_pt_regs(child);
  551. /*
  552. * psw and gprs are stored on the stack
  553. */
  554. if (addr == (addr_t) &dummy32->regs.psw.mask) {
  555. /* Fake a 31 bit psw mask. */
  556. tmp = (__u32)(regs->psw.mask >> 32);
  557. tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
  558. tmp |= PSW32_USER_BITS;
  559. } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
  560. /* Fake a 31 bit psw address. */
  561. tmp = (__u32) regs->psw.addr |
  562. (__u32)(regs->psw.mask & PSW_MASK_BA);
  563. } else {
  564. /* gpr 0-15 */
  565. tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
  566. }
  567. } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
  568. /*
  569. * access registers are stored in the thread structure
  570. */
  571. offset = addr - (addr_t) &dummy32->regs.acrs;
  572. tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
  573. } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
  574. /*
  575. * orig_gpr2 is stored on the kernel stack
  576. */
  577. tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
  578. } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
  579. /*
  580. * prevent reads of padding hole between
  581. * orig_gpr2 and fp_regs on s390.
  582. */
  583. tmp = 0;
  584. } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
  585. /*
  586. * floating point control reg. is in the thread structure
  587. */
  588. tmp = child->thread.fp_regs.fpc;
  589. } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
  590. /*
  591. * floating point regs. are either in child->thread.fp_regs
  592. * or the child->thread.vxrs array
  593. */
  594. offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
  595. #ifdef CONFIG_64BIT
  596. if (child->thread.vxrs)
  597. tmp = *(__u32 *)
  598. ((addr_t) child->thread.vxrs + 2*offset);
  599. else
  600. #endif
  601. tmp = *(__u32 *)
  602. ((addr_t) &child->thread.fp_regs.fprs + offset);
  603. } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
  604. /*
  605. * Handle access to the per_info structure.
  606. */
  607. addr -= (addr_t) &dummy32->regs.per_info;
  608. tmp = __peek_user_per_compat(child, addr);
  609. } else
  610. tmp = 0;
  611. return tmp;
  612. }
  613. static int peek_user_compat(struct task_struct *child,
  614. addr_t addr, addr_t data)
  615. {
  616. __u32 tmp;
  617. if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
  618. return -EIO;
  619. tmp = __peek_user_compat(child, addr);
  620. return put_user(tmp, (__u32 __user *) data);
  621. }
  622. /*
  623. * Same as poke_user_per but for a 31 bit program.
  624. */
  625. static inline void __poke_user_per_compat(struct task_struct *child,
  626. addr_t addr, __u32 data)
  627. {
  628. struct compat_per_struct_kernel *dummy32 = NULL;
  629. if (addr == (addr_t) &dummy32->cr9)
  630. /* PER event mask of the user specified per set. */
  631. child->thread.per_user.control =
  632. data & (PER_EVENT_MASK | PER_CONTROL_MASK);
  633. else if (addr == (addr_t) &dummy32->starting_addr)
  634. /* Starting address of the user specified per set. */
  635. child->thread.per_user.start = data;
  636. else if (addr == (addr_t) &dummy32->ending_addr)
  637. /* Ending address of the user specified per set. */
  638. child->thread.per_user.end = data;
  639. }
  640. /*
  641. * Same as poke_user but for a 31 bit program.
  642. */
  643. static int __poke_user_compat(struct task_struct *child,
  644. addr_t addr, addr_t data)
  645. {
  646. struct compat_user *dummy32 = NULL;
  647. __u32 tmp = (__u32) data;
  648. addr_t offset;
  649. if (addr < (addr_t) &dummy32->regs.acrs) {
  650. struct pt_regs *regs = task_pt_regs(child);
  651. /*
  652. * psw, gprs, acrs and orig_gpr2 are stored on the stack
  653. */
  654. if (addr == (addr_t) &dummy32->regs.psw.mask) {
  655. __u32 mask = PSW32_MASK_USER;
  656. mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
  657. /* Build a 64 bit psw mask from 31 bit mask. */
  658. if ((tmp ^ PSW32_USER_BITS) & ~mask)
  659. /* Invalid psw mask. */
  660. return -EINVAL;
  661. if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
  662. /* Invalid address-space-control bits */
  663. return -EINVAL;
  664. regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
  665. (regs->psw.mask & PSW_MASK_BA) |
  666. (__u64)(tmp & mask) << 32;
  667. } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
  668. /* Build a 64 bit psw address from 31 bit address. */
  669. regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
  670. /* Transfer 31 bit amode bit to psw mask. */
  671. regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
  672. (__u64)(tmp & PSW32_ADDR_AMODE);
  673. } else {
  674. /* gpr 0-15 */
  675. *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
  676. }
  677. } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
  678. /*
  679. * access registers are stored in the thread structure
  680. */
  681. offset = addr - (addr_t) &dummy32->regs.acrs;
  682. *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
  683. } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
  684. /*
  685. * orig_gpr2 is stored on the kernel stack
  686. */
  687. *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
  688. } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
  689. /*
  690. * prevent writess of padding hole between
  691. * orig_gpr2 and fp_regs on s390.
  692. */
  693. return 0;
  694. } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
  695. /*
  696. * floating point control reg. is in the thread structure
  697. */
  698. if (test_fp_ctl(tmp))
  699. return -EINVAL;
  700. child->thread.fp_regs.fpc = data;
  701. } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
  702. /*
  703. * floating point regs. are either in child->thread.fp_regs
  704. * or the child->thread.vxrs array
  705. */
  706. offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
  707. #ifdef CONFIG_64BIT
  708. if (child->thread.vxrs)
  709. *(__u32 *)((addr_t)
  710. child->thread.vxrs + 2*offset) = tmp;
  711. else
  712. #endif
  713. *(__u32 *)((addr_t)
  714. &child->thread.fp_regs.fprs + offset) = tmp;
  715. } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
  716. /*
  717. * Handle access to the per_info structure.
  718. */
  719. addr -= (addr_t) &dummy32->regs.per_info;
  720. __poke_user_per_compat(child, addr, data);
  721. }
  722. return 0;
  723. }
  724. static int poke_user_compat(struct task_struct *child,
  725. addr_t addr, addr_t data)
  726. {
  727. if (!is_compat_task() || (addr & 3) ||
  728. addr > sizeof(struct compat_user) - 3)
  729. return -EIO;
  730. return __poke_user_compat(child, addr, data);
  731. }
  732. long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
  733. compat_ulong_t caddr, compat_ulong_t cdata)
  734. {
  735. unsigned long addr = caddr;
  736. unsigned long data = cdata;
  737. compat_ptrace_area parea;
  738. int copied, ret;
  739. switch (request) {
  740. case PTRACE_PEEKUSR:
  741. /* read the word at location addr in the USER area. */
  742. return peek_user_compat(child, addr, data);
  743. case PTRACE_POKEUSR:
  744. /* write the word at location addr in the USER area */
  745. return poke_user_compat(child, addr, data);
  746. case PTRACE_PEEKUSR_AREA:
  747. case PTRACE_POKEUSR_AREA:
  748. if (copy_from_user(&parea, (void __force __user *) addr,
  749. sizeof(parea)))
  750. return -EFAULT;
  751. addr = parea.kernel_addr;
  752. data = parea.process_addr;
  753. copied = 0;
  754. while (copied < parea.len) {
  755. if (request == PTRACE_PEEKUSR_AREA)
  756. ret = peek_user_compat(child, addr, data);
  757. else {
  758. __u32 utmp;
  759. if (get_user(utmp,
  760. (__u32 __force __user *) data))
  761. return -EFAULT;
  762. ret = poke_user_compat(child, addr, utmp);
  763. }
  764. if (ret)
  765. return ret;
  766. addr += sizeof(unsigned int);
  767. data += sizeof(unsigned int);
  768. copied += sizeof(unsigned int);
  769. }
  770. return 0;
  771. case PTRACE_GET_LAST_BREAK:
  772. put_user(task_thread_info(child)->last_break,
  773. (unsigned int __user *) data);
  774. return 0;
  775. }
  776. return compat_ptrace_request(child, request, addr, data);
  777. }
  778. #endif
  779. asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
  780. {
  781. long ret = 0;
  782. /* Do the secure computing check first. */
  783. if (secure_computing()) {
  784. /* seccomp failures shouldn't expose any additional code. */
  785. ret = -1;
  786. goto out;
  787. }
  788. /*
  789. * The sysc_tracesys code in entry.S stored the system
  790. * call number to gprs[2].
  791. */
  792. if (test_thread_flag(TIF_SYSCALL_TRACE) &&
  793. (tracehook_report_syscall_entry(regs) ||
  794. regs->gprs[2] >= NR_syscalls)) {
  795. /*
  796. * Tracing decided this syscall should not happen or the
  797. * debugger stored an invalid system call number. Skip
  798. * the system call and the system call restart handling.
  799. */
  800. clear_pt_regs_flag(regs, PIF_SYSCALL);
  801. ret = -1;
  802. }
  803. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  804. trace_sys_enter(regs, regs->gprs[2]);
  805. audit_syscall_entry(regs->gprs[2], regs->orig_gpr2,
  806. regs->gprs[3], regs->gprs[4],
  807. regs->gprs[5]);
  808. out:
  809. return ret ?: regs->gprs[2];
  810. }
  811. asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
  812. {
  813. audit_syscall_exit(regs);
  814. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  815. trace_sys_exit(regs, regs->gprs[2]);
  816. if (test_thread_flag(TIF_SYSCALL_TRACE))
  817. tracehook_report_syscall_exit(regs, 0);
  818. }
  819. /*
  820. * user_regset definitions.
  821. */
  822. static int s390_regs_get(struct task_struct *target,
  823. const struct user_regset *regset,
  824. unsigned int pos, unsigned int count,
  825. void *kbuf, void __user *ubuf)
  826. {
  827. if (target == current)
  828. save_access_regs(target->thread.acrs);
  829. if (kbuf) {
  830. unsigned long *k = kbuf;
  831. while (count > 0) {
  832. *k++ = __peek_user(target, pos);
  833. count -= sizeof(*k);
  834. pos += sizeof(*k);
  835. }
  836. } else {
  837. unsigned long __user *u = ubuf;
  838. while (count > 0) {
  839. if (__put_user(__peek_user(target, pos), u++))
  840. return -EFAULT;
  841. count -= sizeof(*u);
  842. pos += sizeof(*u);
  843. }
  844. }
  845. return 0;
  846. }
  847. static int s390_regs_set(struct task_struct *target,
  848. const struct user_regset *regset,
  849. unsigned int pos, unsigned int count,
  850. const void *kbuf, const void __user *ubuf)
  851. {
  852. int rc = 0;
  853. if (target == current)
  854. save_access_regs(target->thread.acrs);
  855. if (kbuf) {
  856. const unsigned long *k = kbuf;
  857. while (count > 0 && !rc) {
  858. rc = __poke_user(target, pos, *k++);
  859. count -= sizeof(*k);
  860. pos += sizeof(*k);
  861. }
  862. } else {
  863. const unsigned long __user *u = ubuf;
  864. while (count > 0 && !rc) {
  865. unsigned long word;
  866. rc = __get_user(word, u++);
  867. if (rc)
  868. break;
  869. rc = __poke_user(target, pos, word);
  870. count -= sizeof(*u);
  871. pos += sizeof(*u);
  872. }
  873. }
  874. if (rc == 0 && target == current)
  875. restore_access_regs(target->thread.acrs);
  876. return rc;
  877. }
  878. static int s390_fpregs_get(struct task_struct *target,
  879. const struct user_regset *regset, unsigned int pos,
  880. unsigned int count, void *kbuf, void __user *ubuf)
  881. {
  882. if (target == current) {
  883. save_fp_ctl(&target->thread.fp_regs.fpc);
  884. save_fp_regs(target->thread.fp_regs.fprs);
  885. }
  886. #ifdef CONFIG_64BIT
  887. else if (target->thread.vxrs) {
  888. int i;
  889. for (i = 0; i < __NUM_VXRS_LOW; i++)
  890. target->thread.fp_regs.fprs[i] =
  891. *(freg_t *)(target->thread.vxrs + i);
  892. }
  893. #endif
  894. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  895. &target->thread.fp_regs, 0, -1);
  896. }
  897. static int s390_fpregs_set(struct task_struct *target,
  898. const struct user_regset *regset, unsigned int pos,
  899. unsigned int count, const void *kbuf,
  900. const void __user *ubuf)
  901. {
  902. int rc = 0;
  903. if (target == current) {
  904. save_fp_ctl(&target->thread.fp_regs.fpc);
  905. save_fp_regs(target->thread.fp_regs.fprs);
  906. }
  907. /* If setting FPC, must validate it first. */
  908. if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
  909. u32 ufpc[2] = { target->thread.fp_regs.fpc, 0 };
  910. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
  911. 0, offsetof(s390_fp_regs, fprs));
  912. if (rc)
  913. return rc;
  914. if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
  915. return -EINVAL;
  916. target->thread.fp_regs.fpc = ufpc[0];
  917. }
  918. if (rc == 0 && count > 0)
  919. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  920. target->thread.fp_regs.fprs,
  921. offsetof(s390_fp_regs, fprs), -1);
  922. if (rc == 0) {
  923. if (target == current) {
  924. restore_fp_ctl(&target->thread.fp_regs.fpc);
  925. restore_fp_regs(target->thread.fp_regs.fprs);
  926. }
  927. #ifdef CONFIG_64BIT
  928. else if (target->thread.vxrs) {
  929. int i;
  930. for (i = 0; i < __NUM_VXRS_LOW; i++)
  931. *(freg_t *)(target->thread.vxrs + i) =
  932. target->thread.fp_regs.fprs[i];
  933. }
  934. #endif
  935. }
  936. return rc;
  937. }
  938. #ifdef CONFIG_64BIT
  939. static int s390_last_break_get(struct task_struct *target,
  940. const struct user_regset *regset,
  941. unsigned int pos, unsigned int count,
  942. void *kbuf, void __user *ubuf)
  943. {
  944. if (count > 0) {
  945. if (kbuf) {
  946. unsigned long *k = kbuf;
  947. *k = task_thread_info(target)->last_break;
  948. } else {
  949. unsigned long __user *u = ubuf;
  950. if (__put_user(task_thread_info(target)->last_break, u))
  951. return -EFAULT;
  952. }
  953. }
  954. return 0;
  955. }
  956. static int s390_last_break_set(struct task_struct *target,
  957. const struct user_regset *regset,
  958. unsigned int pos, unsigned int count,
  959. const void *kbuf, const void __user *ubuf)
  960. {
  961. return 0;
  962. }
  963. static int s390_tdb_get(struct task_struct *target,
  964. const struct user_regset *regset,
  965. unsigned int pos, unsigned int count,
  966. void *kbuf, void __user *ubuf)
  967. {
  968. struct pt_regs *regs = task_pt_regs(target);
  969. unsigned char *data;
  970. if (!(regs->int_code & 0x200))
  971. return -ENODATA;
  972. data = target->thread.trap_tdb;
  973. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, data, 0, 256);
  974. }
  975. static int s390_tdb_set(struct task_struct *target,
  976. const struct user_regset *regset,
  977. unsigned int pos, unsigned int count,
  978. const void *kbuf, const void __user *ubuf)
  979. {
  980. return 0;
  981. }
  982. static int s390_vxrs_low_get(struct task_struct *target,
  983. const struct user_regset *regset,
  984. unsigned int pos, unsigned int count,
  985. void *kbuf, void __user *ubuf)
  986. {
  987. __u64 vxrs[__NUM_VXRS_LOW];
  988. int i;
  989. if (!MACHINE_HAS_VX)
  990. return -ENODEV;
  991. if (target->thread.vxrs) {
  992. if (target == current)
  993. save_vx_regs(target->thread.vxrs);
  994. for (i = 0; i < __NUM_VXRS_LOW; i++)
  995. vxrs[i] = *((__u64 *)(target->thread.vxrs + i) + 1);
  996. } else
  997. memset(vxrs, 0, sizeof(vxrs));
  998. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
  999. }
  1000. static int s390_vxrs_low_set(struct task_struct *target,
  1001. const struct user_regset *regset,
  1002. unsigned int pos, unsigned int count,
  1003. const void *kbuf, const void __user *ubuf)
  1004. {
  1005. __u64 vxrs[__NUM_VXRS_LOW];
  1006. int i, rc;
  1007. if (!MACHINE_HAS_VX)
  1008. return -ENODEV;
  1009. if (!target->thread.vxrs) {
  1010. rc = alloc_vector_registers(target);
  1011. if (rc)
  1012. return rc;
  1013. } else if (target == current)
  1014. save_vx_regs(target->thread.vxrs);
  1015. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
  1016. if (rc == 0) {
  1017. for (i = 0; i < __NUM_VXRS_LOW; i++)
  1018. *((__u64 *)(target->thread.vxrs + i) + 1) = vxrs[i];
  1019. if (target == current)
  1020. restore_vx_regs(target->thread.vxrs);
  1021. }
  1022. return rc;
  1023. }
  1024. static int s390_vxrs_high_get(struct task_struct *target,
  1025. const struct user_regset *regset,
  1026. unsigned int pos, unsigned int count,
  1027. void *kbuf, void __user *ubuf)
  1028. {
  1029. __vector128 vxrs[__NUM_VXRS_HIGH];
  1030. if (!MACHINE_HAS_VX)
  1031. return -ENODEV;
  1032. if (target->thread.vxrs) {
  1033. if (target == current)
  1034. save_vx_regs(target->thread.vxrs);
  1035. memcpy(vxrs, target->thread.vxrs + __NUM_VXRS_LOW,
  1036. sizeof(vxrs));
  1037. } else
  1038. memset(vxrs, 0, sizeof(vxrs));
  1039. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
  1040. }
  1041. static int s390_vxrs_high_set(struct task_struct *target,
  1042. const struct user_regset *regset,
  1043. unsigned int pos, unsigned int count,
  1044. const void *kbuf, const void __user *ubuf)
  1045. {
  1046. int rc;
  1047. if (!MACHINE_HAS_VX)
  1048. return -ENODEV;
  1049. if (!target->thread.vxrs) {
  1050. rc = alloc_vector_registers(target);
  1051. if (rc)
  1052. return rc;
  1053. } else if (target == current)
  1054. save_vx_regs(target->thread.vxrs);
  1055. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1056. target->thread.vxrs + __NUM_VXRS_LOW, 0, -1);
  1057. if (rc == 0 && target == current)
  1058. restore_vx_regs(target->thread.vxrs);
  1059. return rc;
  1060. }
  1061. #endif
  1062. static int s390_system_call_get(struct task_struct *target,
  1063. const struct user_regset *regset,
  1064. unsigned int pos, unsigned int count,
  1065. void *kbuf, void __user *ubuf)
  1066. {
  1067. unsigned int *data = &task_thread_info(target)->system_call;
  1068. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  1069. data, 0, sizeof(unsigned int));
  1070. }
  1071. static int s390_system_call_set(struct task_struct *target,
  1072. const struct user_regset *regset,
  1073. unsigned int pos, unsigned int count,
  1074. const void *kbuf, const void __user *ubuf)
  1075. {
  1076. unsigned int *data = &task_thread_info(target)->system_call;
  1077. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1078. data, 0, sizeof(unsigned int));
  1079. }
  1080. static const struct user_regset s390_regsets[] = {
  1081. {
  1082. .core_note_type = NT_PRSTATUS,
  1083. .n = sizeof(s390_regs) / sizeof(long),
  1084. .size = sizeof(long),
  1085. .align = sizeof(long),
  1086. .get = s390_regs_get,
  1087. .set = s390_regs_set,
  1088. },
  1089. {
  1090. .core_note_type = NT_PRFPREG,
  1091. .n = sizeof(s390_fp_regs) / sizeof(long),
  1092. .size = sizeof(long),
  1093. .align = sizeof(long),
  1094. .get = s390_fpregs_get,
  1095. .set = s390_fpregs_set,
  1096. },
  1097. {
  1098. .core_note_type = NT_S390_SYSTEM_CALL,
  1099. .n = 1,
  1100. .size = sizeof(unsigned int),
  1101. .align = sizeof(unsigned int),
  1102. .get = s390_system_call_get,
  1103. .set = s390_system_call_set,
  1104. },
  1105. #ifdef CONFIG_64BIT
  1106. {
  1107. .core_note_type = NT_S390_LAST_BREAK,
  1108. .n = 1,
  1109. .size = sizeof(long),
  1110. .align = sizeof(long),
  1111. .get = s390_last_break_get,
  1112. .set = s390_last_break_set,
  1113. },
  1114. {
  1115. .core_note_type = NT_S390_TDB,
  1116. .n = 1,
  1117. .size = 256,
  1118. .align = 1,
  1119. .get = s390_tdb_get,
  1120. .set = s390_tdb_set,
  1121. },
  1122. {
  1123. .core_note_type = NT_S390_VXRS_LOW,
  1124. .n = __NUM_VXRS_LOW,
  1125. .size = sizeof(__u64),
  1126. .align = sizeof(__u64),
  1127. .get = s390_vxrs_low_get,
  1128. .set = s390_vxrs_low_set,
  1129. },
  1130. {
  1131. .core_note_type = NT_S390_VXRS_HIGH,
  1132. .n = __NUM_VXRS_HIGH,
  1133. .size = sizeof(__vector128),
  1134. .align = sizeof(__vector128),
  1135. .get = s390_vxrs_high_get,
  1136. .set = s390_vxrs_high_set,
  1137. },
  1138. #endif
  1139. };
  1140. static const struct user_regset_view user_s390_view = {
  1141. .name = UTS_MACHINE,
  1142. .e_machine = EM_S390,
  1143. .regsets = s390_regsets,
  1144. .n = ARRAY_SIZE(s390_regsets)
  1145. };
  1146. #ifdef CONFIG_COMPAT
  1147. static int s390_compat_regs_get(struct task_struct *target,
  1148. const struct user_regset *regset,
  1149. unsigned int pos, unsigned int count,
  1150. void *kbuf, void __user *ubuf)
  1151. {
  1152. if (target == current)
  1153. save_access_regs(target->thread.acrs);
  1154. if (kbuf) {
  1155. compat_ulong_t *k = kbuf;
  1156. while (count > 0) {
  1157. *k++ = __peek_user_compat(target, pos);
  1158. count -= sizeof(*k);
  1159. pos += sizeof(*k);
  1160. }
  1161. } else {
  1162. compat_ulong_t __user *u = ubuf;
  1163. while (count > 0) {
  1164. if (__put_user(__peek_user_compat(target, pos), u++))
  1165. return -EFAULT;
  1166. count -= sizeof(*u);
  1167. pos += sizeof(*u);
  1168. }
  1169. }
  1170. return 0;
  1171. }
  1172. static int s390_compat_regs_set(struct task_struct *target,
  1173. const struct user_regset *regset,
  1174. unsigned int pos, unsigned int count,
  1175. const void *kbuf, const void __user *ubuf)
  1176. {
  1177. int rc = 0;
  1178. if (target == current)
  1179. save_access_regs(target->thread.acrs);
  1180. if (kbuf) {
  1181. const compat_ulong_t *k = kbuf;
  1182. while (count > 0 && !rc) {
  1183. rc = __poke_user_compat(target, pos, *k++);
  1184. count -= sizeof(*k);
  1185. pos += sizeof(*k);
  1186. }
  1187. } else {
  1188. const compat_ulong_t __user *u = ubuf;
  1189. while (count > 0 && !rc) {
  1190. compat_ulong_t word;
  1191. rc = __get_user(word, u++);
  1192. if (rc)
  1193. break;
  1194. rc = __poke_user_compat(target, pos, word);
  1195. count -= sizeof(*u);
  1196. pos += sizeof(*u);
  1197. }
  1198. }
  1199. if (rc == 0 && target == current)
  1200. restore_access_regs(target->thread.acrs);
  1201. return rc;
  1202. }
  1203. static int s390_compat_regs_high_get(struct task_struct *target,
  1204. const struct user_regset *regset,
  1205. unsigned int pos, unsigned int count,
  1206. void *kbuf, void __user *ubuf)
  1207. {
  1208. compat_ulong_t *gprs_high;
  1209. gprs_high = (compat_ulong_t *)
  1210. &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
  1211. if (kbuf) {
  1212. compat_ulong_t *k = kbuf;
  1213. while (count > 0) {
  1214. *k++ = *gprs_high;
  1215. gprs_high += 2;
  1216. count -= sizeof(*k);
  1217. }
  1218. } else {
  1219. compat_ulong_t __user *u = ubuf;
  1220. while (count > 0) {
  1221. if (__put_user(*gprs_high, u++))
  1222. return -EFAULT;
  1223. gprs_high += 2;
  1224. count -= sizeof(*u);
  1225. }
  1226. }
  1227. return 0;
  1228. }
  1229. static int s390_compat_regs_high_set(struct task_struct *target,
  1230. const struct user_regset *regset,
  1231. unsigned int pos, unsigned int count,
  1232. const void *kbuf, const void __user *ubuf)
  1233. {
  1234. compat_ulong_t *gprs_high;
  1235. int rc = 0;
  1236. gprs_high = (compat_ulong_t *)
  1237. &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
  1238. if (kbuf) {
  1239. const compat_ulong_t *k = kbuf;
  1240. while (count > 0) {
  1241. *gprs_high = *k++;
  1242. *gprs_high += 2;
  1243. count -= sizeof(*k);
  1244. }
  1245. } else {
  1246. const compat_ulong_t __user *u = ubuf;
  1247. while (count > 0 && !rc) {
  1248. unsigned long word;
  1249. rc = __get_user(word, u++);
  1250. if (rc)
  1251. break;
  1252. *gprs_high = word;
  1253. *gprs_high += 2;
  1254. count -= sizeof(*u);
  1255. }
  1256. }
  1257. return rc;
  1258. }
  1259. static int s390_compat_last_break_get(struct task_struct *target,
  1260. const struct user_regset *regset,
  1261. unsigned int pos, unsigned int count,
  1262. void *kbuf, void __user *ubuf)
  1263. {
  1264. compat_ulong_t last_break;
  1265. if (count > 0) {
  1266. last_break = task_thread_info(target)->last_break;
  1267. if (kbuf) {
  1268. unsigned long *k = kbuf;
  1269. *k = last_break;
  1270. } else {
  1271. unsigned long __user *u = ubuf;
  1272. if (__put_user(last_break, u))
  1273. return -EFAULT;
  1274. }
  1275. }
  1276. return 0;
  1277. }
  1278. static int s390_compat_last_break_set(struct task_struct *target,
  1279. const struct user_regset *regset,
  1280. unsigned int pos, unsigned int count,
  1281. const void *kbuf, const void __user *ubuf)
  1282. {
  1283. return 0;
  1284. }
  1285. static const struct user_regset s390_compat_regsets[] = {
  1286. {
  1287. .core_note_type = NT_PRSTATUS,
  1288. .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
  1289. .size = sizeof(compat_long_t),
  1290. .align = sizeof(compat_long_t),
  1291. .get = s390_compat_regs_get,
  1292. .set = s390_compat_regs_set,
  1293. },
  1294. {
  1295. .core_note_type = NT_PRFPREG,
  1296. .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
  1297. .size = sizeof(compat_long_t),
  1298. .align = sizeof(compat_long_t),
  1299. .get = s390_fpregs_get,
  1300. .set = s390_fpregs_set,
  1301. },
  1302. {
  1303. .core_note_type = NT_S390_SYSTEM_CALL,
  1304. .n = 1,
  1305. .size = sizeof(compat_uint_t),
  1306. .align = sizeof(compat_uint_t),
  1307. .get = s390_system_call_get,
  1308. .set = s390_system_call_set,
  1309. },
  1310. {
  1311. .core_note_type = NT_S390_LAST_BREAK,
  1312. .n = 1,
  1313. .size = sizeof(long),
  1314. .align = sizeof(long),
  1315. .get = s390_compat_last_break_get,
  1316. .set = s390_compat_last_break_set,
  1317. },
  1318. {
  1319. .core_note_type = NT_S390_TDB,
  1320. .n = 1,
  1321. .size = 256,
  1322. .align = 1,
  1323. .get = s390_tdb_get,
  1324. .set = s390_tdb_set,
  1325. },
  1326. {
  1327. .core_note_type = NT_S390_VXRS_LOW,
  1328. .n = __NUM_VXRS_LOW,
  1329. .size = sizeof(__u64),
  1330. .align = sizeof(__u64),
  1331. .get = s390_vxrs_low_get,
  1332. .set = s390_vxrs_low_set,
  1333. },
  1334. {
  1335. .core_note_type = NT_S390_VXRS_HIGH,
  1336. .n = __NUM_VXRS_HIGH,
  1337. .size = sizeof(__vector128),
  1338. .align = sizeof(__vector128),
  1339. .get = s390_vxrs_high_get,
  1340. .set = s390_vxrs_high_set,
  1341. },
  1342. {
  1343. .core_note_type = NT_S390_HIGH_GPRS,
  1344. .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
  1345. .size = sizeof(compat_long_t),
  1346. .align = sizeof(compat_long_t),
  1347. .get = s390_compat_regs_high_get,
  1348. .set = s390_compat_regs_high_set,
  1349. },
  1350. };
  1351. static const struct user_regset_view user_s390_compat_view = {
  1352. .name = "s390",
  1353. .e_machine = EM_S390,
  1354. .regsets = s390_compat_regsets,
  1355. .n = ARRAY_SIZE(s390_compat_regsets)
  1356. };
  1357. #endif
  1358. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  1359. {
  1360. #ifdef CONFIG_COMPAT
  1361. if (test_tsk_thread_flag(task, TIF_31BIT))
  1362. return &user_s390_compat_view;
  1363. #endif
  1364. return &user_s390_view;
  1365. }
  1366. static const char *gpr_names[NUM_GPRS] = {
  1367. "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
  1368. "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
  1369. };
  1370. unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
  1371. {
  1372. if (offset >= NUM_GPRS)
  1373. return 0;
  1374. return regs->gprs[offset];
  1375. }
  1376. int regs_query_register_offset(const char *name)
  1377. {
  1378. unsigned long offset;
  1379. if (!name || *name != 'r')
  1380. return -EINVAL;
  1381. if (kstrtoul(name + 1, 10, &offset))
  1382. return -EINVAL;
  1383. if (offset >= NUM_GPRS)
  1384. return -EINVAL;
  1385. return offset;
  1386. }
  1387. const char *regs_query_register_name(unsigned int offset)
  1388. {
  1389. if (offset >= NUM_GPRS)
  1390. return NULL;
  1391. return gpr_names[offset];
  1392. }
  1393. static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
  1394. {
  1395. unsigned long ksp = kernel_stack_pointer(regs);
  1396. return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
  1397. }
  1398. /**
  1399. * regs_get_kernel_stack_nth() - get Nth entry of the stack
  1400. * @regs:pt_regs which contains kernel stack pointer.
  1401. * @n:stack entry number.
  1402. *
  1403. * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
  1404. * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
  1405. * this returns 0.
  1406. */
  1407. unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
  1408. {
  1409. unsigned long addr;
  1410. addr = kernel_stack_pointer(regs) + n * sizeof(long);
  1411. if (!regs_within_kernel_stack(regs, addr))
  1412. return 0;
  1413. return *(unsigned long *)addr;
  1414. }