ptrace.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518
  1. /*
  2. * Ptrace user space interface.
  3. *
  4. * Copyright IBM Corp. 1999, 2010
  5. * Author(s): Denis Joseph Barrow
  6. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/sched.h>
  10. #include <linux/mm.h>
  11. #include <linux/smp.h>
  12. #include <linux/errno.h>
  13. #include <linux/ptrace.h>
  14. #include <linux/user.h>
  15. #include <linux/security.h>
  16. #include <linux/audit.h>
  17. #include <linux/signal.h>
  18. #include <linux/elf.h>
  19. #include <linux/regset.h>
  20. #include <linux/tracehook.h>
  21. #include <linux/seccomp.h>
  22. #include <linux/compat.h>
  23. #include <trace/syscall.h>
  24. #include <asm/segment.h>
  25. #include <asm/page.h>
  26. #include <asm/pgtable.h>
  27. #include <asm/pgalloc.h>
  28. #include <asm/uaccess.h>
  29. #include <asm/unistd.h>
  30. #include <asm/switch_to.h>
  31. #include "entry.h"
  32. #ifdef CONFIG_COMPAT
  33. #include "compat_ptrace.h"
  34. #endif
  35. #define CREATE_TRACE_POINTS
  36. #include <trace/events/syscalls.h>
  37. void update_cr_regs(struct task_struct *task)
  38. {
  39. struct pt_regs *regs = task_pt_regs(task);
  40. struct thread_struct *thread = &task->thread;
  41. struct per_regs old, new;
  42. #ifdef CONFIG_64BIT
  43. /* Take care of the enable/disable of transactional execution. */
  44. if (MACHINE_HAS_TE || MACHINE_HAS_VX) {
  45. unsigned long cr, cr_new;
  46. __ctl_store(cr, 0, 0);
  47. cr_new = cr;
  48. if (MACHINE_HAS_TE) {
  49. /* Set or clear transaction execution TXC bit 8. */
  50. cr_new |= (1UL << 55);
  51. if (task->thread.per_flags & PER_FLAG_NO_TE)
  52. cr_new &= ~(1UL << 55);
  53. }
  54. if (MACHINE_HAS_VX) {
  55. /* Enable/disable of vector extension */
  56. cr_new &= ~(1UL << 17);
  57. if (task->thread.vxrs)
  58. cr_new |= (1UL << 17);
  59. }
  60. if (cr_new != cr)
  61. __ctl_load(cr_new, 0, 0);
  62. if (MACHINE_HAS_TE) {
  63. /* Set/clear transaction execution TDC bits 62/63. */
  64. __ctl_store(cr, 2, 2);
  65. cr_new = cr & ~3UL;
  66. if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
  67. if (task->thread.per_flags &
  68. PER_FLAG_TE_ABORT_RAND_TEND)
  69. cr_new |= 1UL;
  70. else
  71. cr_new |= 2UL;
  72. }
  73. if (cr_new != cr)
  74. __ctl_load(cr_new, 2, 2);
  75. }
  76. }
  77. #endif
  78. /* Copy user specified PER registers */
  79. new.control = thread->per_user.control;
  80. new.start = thread->per_user.start;
  81. new.end = thread->per_user.end;
  82. /* merge TIF_SINGLE_STEP into user specified PER registers. */
  83. if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
  84. test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
  85. if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
  86. new.control |= PER_EVENT_BRANCH;
  87. else
  88. new.control |= PER_EVENT_IFETCH;
  89. #ifdef CONFIG_64BIT
  90. new.control |= PER_CONTROL_SUSPENSION;
  91. new.control |= PER_EVENT_TRANSACTION_END;
  92. #endif
  93. if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
  94. new.control |= PER_EVENT_IFETCH;
  95. new.start = 0;
  96. new.end = PSW_ADDR_INSN;
  97. }
  98. /* Take care of the PER enablement bit in the PSW. */
  99. if (!(new.control & PER_EVENT_MASK)) {
  100. regs->psw.mask &= ~PSW_MASK_PER;
  101. return;
  102. }
  103. regs->psw.mask |= PSW_MASK_PER;
  104. __ctl_store(old, 9, 11);
  105. if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
  106. __ctl_load(new, 9, 11);
  107. }
  108. void user_enable_single_step(struct task_struct *task)
  109. {
  110. clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
  111. set_tsk_thread_flag(task, TIF_SINGLE_STEP);
  112. }
  113. void user_disable_single_step(struct task_struct *task)
  114. {
  115. clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
  116. clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
  117. }
  118. void user_enable_block_step(struct task_struct *task)
  119. {
  120. set_tsk_thread_flag(task, TIF_SINGLE_STEP);
  121. set_tsk_thread_flag(task, TIF_BLOCK_STEP);
  122. }
  123. /*
  124. * Called by kernel/ptrace.c when detaching..
  125. *
  126. * Clear all debugging related fields.
  127. */
  128. void ptrace_disable(struct task_struct *task)
  129. {
  130. memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
  131. memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
  132. clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
  133. clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP);
  134. task->thread.per_flags = 0;
  135. }
  136. #ifndef CONFIG_64BIT
  137. # define __ADDR_MASK 3
  138. #else
  139. # define __ADDR_MASK 7
  140. #endif
  141. static inline unsigned long __peek_user_per(struct task_struct *child,
  142. addr_t addr)
  143. {
  144. struct per_struct_kernel *dummy = NULL;
  145. if (addr == (addr_t) &dummy->cr9)
  146. /* Control bits of the active per set. */
  147. return test_thread_flag(TIF_SINGLE_STEP) ?
  148. PER_EVENT_IFETCH : child->thread.per_user.control;
  149. else if (addr == (addr_t) &dummy->cr10)
  150. /* Start address of the active per set. */
  151. return test_thread_flag(TIF_SINGLE_STEP) ?
  152. 0 : child->thread.per_user.start;
  153. else if (addr == (addr_t) &dummy->cr11)
  154. /* End address of the active per set. */
  155. return test_thread_flag(TIF_SINGLE_STEP) ?
  156. PSW_ADDR_INSN : child->thread.per_user.end;
  157. else if (addr == (addr_t) &dummy->bits)
  158. /* Single-step bit. */
  159. return test_thread_flag(TIF_SINGLE_STEP) ?
  160. (1UL << (BITS_PER_LONG - 1)) : 0;
  161. else if (addr == (addr_t) &dummy->starting_addr)
  162. /* Start address of the user specified per set. */
  163. return child->thread.per_user.start;
  164. else if (addr == (addr_t) &dummy->ending_addr)
  165. /* End address of the user specified per set. */
  166. return child->thread.per_user.end;
  167. else if (addr == (addr_t) &dummy->perc_atmid)
  168. /* PER code, ATMID and AI of the last PER trap */
  169. return (unsigned long)
  170. child->thread.per_event.cause << (BITS_PER_LONG - 16);
  171. else if (addr == (addr_t) &dummy->address)
  172. /* Address of the last PER trap */
  173. return child->thread.per_event.address;
  174. else if (addr == (addr_t) &dummy->access_id)
  175. /* Access id of the last PER trap */
  176. return (unsigned long)
  177. child->thread.per_event.paid << (BITS_PER_LONG - 8);
  178. return 0;
  179. }
  180. /*
  181. * Read the word at offset addr from the user area of a process. The
  182. * trouble here is that the information is littered over different
  183. * locations. The process registers are found on the kernel stack,
  184. * the floating point stuff and the trace settings are stored in
  185. * the task structure. In addition the different structures in
  186. * struct user contain pad bytes that should be read as zeroes.
  187. * Lovely...
  188. */
  189. static unsigned long __peek_user(struct task_struct *child, addr_t addr)
  190. {
  191. struct user *dummy = NULL;
  192. addr_t offset, tmp;
  193. if (addr < (addr_t) &dummy->regs.acrs) {
  194. /*
  195. * psw and gprs are stored on the stack
  196. */
  197. tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
  198. if (addr == (addr_t) &dummy->regs.psw.mask) {
  199. /* Return a clean psw mask. */
  200. tmp &= PSW_MASK_USER | PSW_MASK_RI;
  201. tmp |= PSW_USER_BITS;
  202. }
  203. } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
  204. /*
  205. * access registers are stored in the thread structure
  206. */
  207. offset = addr - (addr_t) &dummy->regs.acrs;
  208. #ifdef CONFIG_64BIT
  209. /*
  210. * Very special case: old & broken 64 bit gdb reading
  211. * from acrs[15]. Result is a 64 bit value. Read the
  212. * 32 bit acrs[15] value and shift it by 32. Sick...
  213. */
  214. if (addr == (addr_t) &dummy->regs.acrs[15])
  215. tmp = ((unsigned long) child->thread.acrs[15]) << 32;
  216. else
  217. #endif
  218. tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
  219. } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
  220. /*
  221. * orig_gpr2 is stored on the kernel stack
  222. */
  223. tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
  224. } else if (addr < (addr_t) &dummy->regs.fp_regs) {
  225. /*
  226. * prevent reads of padding hole between
  227. * orig_gpr2 and fp_regs on s390.
  228. */
  229. tmp = 0;
  230. } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
  231. /*
  232. * floating point regs. are stored in the thread structure
  233. */
  234. offset = addr - (addr_t) &dummy->regs.fp_regs;
  235. tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
  236. if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
  237. tmp <<= BITS_PER_LONG - 32;
  238. } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
  239. /*
  240. * Handle access to the per_info structure.
  241. */
  242. addr -= (addr_t) &dummy->regs.per_info;
  243. tmp = __peek_user_per(child, addr);
  244. } else
  245. tmp = 0;
  246. return tmp;
  247. }
  248. static int
  249. peek_user(struct task_struct *child, addr_t addr, addr_t data)
  250. {
  251. addr_t tmp, mask;
  252. /*
  253. * Stupid gdb peeks/pokes the access registers in 64 bit with
  254. * an alignment of 4. Programmers from hell...
  255. */
  256. mask = __ADDR_MASK;
  257. #ifdef CONFIG_64BIT
  258. if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
  259. addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
  260. mask = 3;
  261. #endif
  262. if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
  263. return -EIO;
  264. tmp = __peek_user(child, addr);
  265. return put_user(tmp, (addr_t __user *) data);
  266. }
  267. static inline void __poke_user_per(struct task_struct *child,
  268. addr_t addr, addr_t data)
  269. {
  270. struct per_struct_kernel *dummy = NULL;
  271. /*
  272. * There are only three fields in the per_info struct that the
  273. * debugger user can write to.
  274. * 1) cr9: the debugger wants to set a new PER event mask
  275. * 2) starting_addr: the debugger wants to set a new starting
  276. * address to use with the PER event mask.
  277. * 3) ending_addr: the debugger wants to set a new ending
  278. * address to use with the PER event mask.
  279. * The user specified PER event mask and the start and end
  280. * addresses are used only if single stepping is not in effect.
  281. * Writes to any other field in per_info are ignored.
  282. */
  283. if (addr == (addr_t) &dummy->cr9)
  284. /* PER event mask of the user specified per set. */
  285. child->thread.per_user.control =
  286. data & (PER_EVENT_MASK | PER_CONTROL_MASK);
  287. else if (addr == (addr_t) &dummy->starting_addr)
  288. /* Starting address of the user specified per set. */
  289. child->thread.per_user.start = data;
  290. else if (addr == (addr_t) &dummy->ending_addr)
  291. /* Ending address of the user specified per set. */
  292. child->thread.per_user.end = data;
  293. }
  294. /*
  295. * Write a word to the user area of a process at location addr. This
  296. * operation does have an additional problem compared to peek_user.
  297. * Stores to the program status word and on the floating point
  298. * control register needs to get checked for validity.
  299. */
  300. static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
  301. {
  302. struct user *dummy = NULL;
  303. addr_t offset;
  304. if (addr < (addr_t) &dummy->regs.acrs) {
  305. /*
  306. * psw and gprs are stored on the stack
  307. */
  308. if (addr == (addr_t) &dummy->regs.psw.mask) {
  309. unsigned long mask = PSW_MASK_USER;
  310. mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
  311. if ((data ^ PSW_USER_BITS) & ~mask)
  312. /* Invalid psw mask. */
  313. return -EINVAL;
  314. if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
  315. /* Invalid address-space-control bits */
  316. return -EINVAL;
  317. if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
  318. /* Invalid addressing mode bits */
  319. return -EINVAL;
  320. }
  321. *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
  322. } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
  323. /*
  324. * access registers are stored in the thread structure
  325. */
  326. offset = addr - (addr_t) &dummy->regs.acrs;
  327. #ifdef CONFIG_64BIT
  328. /*
  329. * Very special case: old & broken 64 bit gdb writing
  330. * to acrs[15] with a 64 bit value. Ignore the lower
  331. * half of the value and write the upper 32 bit to
  332. * acrs[15]. Sick...
  333. */
  334. if (addr == (addr_t) &dummy->regs.acrs[15])
  335. child->thread.acrs[15] = (unsigned int) (data >> 32);
  336. else
  337. #endif
  338. *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
  339. } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
  340. /*
  341. * orig_gpr2 is stored on the kernel stack
  342. */
  343. task_pt_regs(child)->orig_gpr2 = data;
  344. } else if (addr < (addr_t) &dummy->regs.fp_regs) {
  345. /*
  346. * prevent writes of padding hole between
  347. * orig_gpr2 and fp_regs on s390.
  348. */
  349. return 0;
  350. } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
  351. /*
  352. * floating point regs. are stored in the thread structure
  353. */
  354. if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
  355. if ((unsigned int) data != 0 ||
  356. test_fp_ctl(data >> (BITS_PER_LONG - 32)))
  357. return -EINVAL;
  358. offset = addr - (addr_t) &dummy->regs.fp_regs;
  359. *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
  360. } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
  361. /*
  362. * Handle access to the per_info structure.
  363. */
  364. addr -= (addr_t) &dummy->regs.per_info;
  365. __poke_user_per(child, addr, data);
  366. }
  367. return 0;
  368. }
  369. static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
  370. {
  371. addr_t mask;
  372. /*
  373. * Stupid gdb peeks/pokes the access registers in 64 bit with
  374. * an alignment of 4. Programmers from hell indeed...
  375. */
  376. mask = __ADDR_MASK;
  377. #ifdef CONFIG_64BIT
  378. if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
  379. addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
  380. mask = 3;
  381. #endif
  382. if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
  383. return -EIO;
  384. return __poke_user(child, addr, data);
  385. }
  386. long arch_ptrace(struct task_struct *child, long request,
  387. unsigned long addr, unsigned long data)
  388. {
  389. ptrace_area parea;
  390. int copied, ret;
  391. switch (request) {
  392. case PTRACE_PEEKUSR:
  393. /* read the word at location addr in the USER area. */
  394. return peek_user(child, addr, data);
  395. case PTRACE_POKEUSR:
  396. /* write the word at location addr in the USER area */
  397. return poke_user(child, addr, data);
  398. case PTRACE_PEEKUSR_AREA:
  399. case PTRACE_POKEUSR_AREA:
  400. if (copy_from_user(&parea, (void __force __user *) addr,
  401. sizeof(parea)))
  402. return -EFAULT;
  403. addr = parea.kernel_addr;
  404. data = parea.process_addr;
  405. copied = 0;
  406. while (copied < parea.len) {
  407. if (request == PTRACE_PEEKUSR_AREA)
  408. ret = peek_user(child, addr, data);
  409. else {
  410. addr_t utmp;
  411. if (get_user(utmp,
  412. (addr_t __force __user *) data))
  413. return -EFAULT;
  414. ret = poke_user(child, addr, utmp);
  415. }
  416. if (ret)
  417. return ret;
  418. addr += sizeof(unsigned long);
  419. data += sizeof(unsigned long);
  420. copied += sizeof(unsigned long);
  421. }
  422. return 0;
  423. case PTRACE_GET_LAST_BREAK:
  424. put_user(task_thread_info(child)->last_break,
  425. (unsigned long __user *) data);
  426. return 0;
  427. case PTRACE_ENABLE_TE:
  428. if (!MACHINE_HAS_TE)
  429. return -EIO;
  430. child->thread.per_flags &= ~PER_FLAG_NO_TE;
  431. return 0;
  432. case PTRACE_DISABLE_TE:
  433. if (!MACHINE_HAS_TE)
  434. return -EIO;
  435. child->thread.per_flags |= PER_FLAG_NO_TE;
  436. child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
  437. return 0;
  438. case PTRACE_TE_ABORT_RAND:
  439. if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
  440. return -EIO;
  441. switch (data) {
  442. case 0UL:
  443. child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
  444. break;
  445. case 1UL:
  446. child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
  447. child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
  448. break;
  449. case 2UL:
  450. child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
  451. child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
  452. break;
  453. default:
  454. return -EINVAL;
  455. }
  456. return 0;
  457. default:
  458. /* Removing high order bit from addr (only for 31 bit). */
  459. addr &= PSW_ADDR_INSN;
  460. return ptrace_request(child, request, addr, data);
  461. }
  462. }
  463. #ifdef CONFIG_COMPAT
  464. /*
  465. * Now the fun part starts... a 31 bit program running in the
  466. * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
  467. * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
  468. * to handle, the difference to the 64 bit versions of the requests
  469. * is that the access is done in multiples of 4 byte instead of
  470. * 8 bytes (sizeof(unsigned long) on 31/64 bit).
  471. * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
  472. * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
  473. * is a 31 bit program too, the content of struct user can be
  474. * emulated. A 31 bit program peeking into the struct user of
  475. * a 64 bit program is a no-no.
  476. */
  477. /*
  478. * Same as peek_user_per but for a 31 bit program.
  479. */
  480. static inline __u32 __peek_user_per_compat(struct task_struct *child,
  481. addr_t addr)
  482. {
  483. struct compat_per_struct_kernel *dummy32 = NULL;
  484. if (addr == (addr_t) &dummy32->cr9)
  485. /* Control bits of the active per set. */
  486. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  487. PER_EVENT_IFETCH : child->thread.per_user.control;
  488. else if (addr == (addr_t) &dummy32->cr10)
  489. /* Start address of the active per set. */
  490. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  491. 0 : child->thread.per_user.start;
  492. else if (addr == (addr_t) &dummy32->cr11)
  493. /* End address of the active per set. */
  494. return test_thread_flag(TIF_SINGLE_STEP) ?
  495. PSW32_ADDR_INSN : child->thread.per_user.end;
  496. else if (addr == (addr_t) &dummy32->bits)
  497. /* Single-step bit. */
  498. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  499. 0x80000000 : 0;
  500. else if (addr == (addr_t) &dummy32->starting_addr)
  501. /* Start address of the user specified per set. */
  502. return (__u32) child->thread.per_user.start;
  503. else if (addr == (addr_t) &dummy32->ending_addr)
  504. /* End address of the user specified per set. */
  505. return (__u32) child->thread.per_user.end;
  506. else if (addr == (addr_t) &dummy32->perc_atmid)
  507. /* PER code, ATMID and AI of the last PER trap */
  508. return (__u32) child->thread.per_event.cause << 16;
  509. else if (addr == (addr_t) &dummy32->address)
  510. /* Address of the last PER trap */
  511. return (__u32) child->thread.per_event.address;
  512. else if (addr == (addr_t) &dummy32->access_id)
  513. /* Access id of the last PER trap */
  514. return (__u32) child->thread.per_event.paid << 24;
  515. return 0;
  516. }
  517. /*
  518. * Same as peek_user but for a 31 bit program.
  519. */
  520. static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
  521. {
  522. struct compat_user *dummy32 = NULL;
  523. addr_t offset;
  524. __u32 tmp;
  525. if (addr < (addr_t) &dummy32->regs.acrs) {
  526. struct pt_regs *regs = task_pt_regs(child);
  527. /*
  528. * psw and gprs are stored on the stack
  529. */
  530. if (addr == (addr_t) &dummy32->regs.psw.mask) {
  531. /* Fake a 31 bit psw mask. */
  532. tmp = (__u32)(regs->psw.mask >> 32);
  533. tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
  534. tmp |= PSW32_USER_BITS;
  535. } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
  536. /* Fake a 31 bit psw address. */
  537. tmp = (__u32) regs->psw.addr |
  538. (__u32)(regs->psw.mask & PSW_MASK_BA);
  539. } else {
  540. /* gpr 0-15 */
  541. tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
  542. }
  543. } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
  544. /*
  545. * access registers are stored in the thread structure
  546. */
  547. offset = addr - (addr_t) &dummy32->regs.acrs;
  548. tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
  549. } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
  550. /*
  551. * orig_gpr2 is stored on the kernel stack
  552. */
  553. tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
  554. } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
  555. /*
  556. * prevent reads of padding hole between
  557. * orig_gpr2 and fp_regs on s390.
  558. */
  559. tmp = 0;
  560. } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
  561. /*
  562. * floating point regs. are stored in the thread structure
  563. */
  564. offset = addr - (addr_t) &dummy32->regs.fp_regs;
  565. tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset);
  566. } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
  567. /*
  568. * Handle access to the per_info structure.
  569. */
  570. addr -= (addr_t) &dummy32->regs.per_info;
  571. tmp = __peek_user_per_compat(child, addr);
  572. } else
  573. tmp = 0;
  574. return tmp;
  575. }
  576. static int peek_user_compat(struct task_struct *child,
  577. addr_t addr, addr_t data)
  578. {
  579. __u32 tmp;
  580. if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
  581. return -EIO;
  582. tmp = __peek_user_compat(child, addr);
  583. return put_user(tmp, (__u32 __user *) data);
  584. }
  585. /*
  586. * Same as poke_user_per but for a 31 bit program.
  587. */
  588. static inline void __poke_user_per_compat(struct task_struct *child,
  589. addr_t addr, __u32 data)
  590. {
  591. struct compat_per_struct_kernel *dummy32 = NULL;
  592. if (addr == (addr_t) &dummy32->cr9)
  593. /* PER event mask of the user specified per set. */
  594. child->thread.per_user.control =
  595. data & (PER_EVENT_MASK | PER_CONTROL_MASK);
  596. else if (addr == (addr_t) &dummy32->starting_addr)
  597. /* Starting address of the user specified per set. */
  598. child->thread.per_user.start = data;
  599. else if (addr == (addr_t) &dummy32->ending_addr)
  600. /* Ending address of the user specified per set. */
  601. child->thread.per_user.end = data;
  602. }
  603. /*
  604. * Same as poke_user but for a 31 bit program.
  605. */
  606. static int __poke_user_compat(struct task_struct *child,
  607. addr_t addr, addr_t data)
  608. {
  609. struct compat_user *dummy32 = NULL;
  610. __u32 tmp = (__u32) data;
  611. addr_t offset;
  612. if (addr < (addr_t) &dummy32->regs.acrs) {
  613. struct pt_regs *regs = task_pt_regs(child);
  614. /*
  615. * psw, gprs, acrs and orig_gpr2 are stored on the stack
  616. */
  617. if (addr == (addr_t) &dummy32->regs.psw.mask) {
  618. __u32 mask = PSW32_MASK_USER;
  619. mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
  620. /* Build a 64 bit psw mask from 31 bit mask. */
  621. if ((tmp ^ PSW32_USER_BITS) & ~mask)
  622. /* Invalid psw mask. */
  623. return -EINVAL;
  624. if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
  625. /* Invalid address-space-control bits */
  626. return -EINVAL;
  627. regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
  628. (regs->psw.mask & PSW_MASK_BA) |
  629. (__u64)(tmp & mask) << 32;
  630. } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
  631. /* Build a 64 bit psw address from 31 bit address. */
  632. regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
  633. /* Transfer 31 bit amode bit to psw mask. */
  634. regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
  635. (__u64)(tmp & PSW32_ADDR_AMODE);
  636. } else {
  637. /* gpr 0-15 */
  638. *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
  639. }
  640. } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
  641. /*
  642. * access registers are stored in the thread structure
  643. */
  644. offset = addr - (addr_t) &dummy32->regs.acrs;
  645. *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
  646. } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
  647. /*
  648. * orig_gpr2 is stored on the kernel stack
  649. */
  650. *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
  651. } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
  652. /*
  653. * prevent writess of padding hole between
  654. * orig_gpr2 and fp_regs on s390.
  655. */
  656. return 0;
  657. } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
  658. /*
  659. * floating point regs. are stored in the thread structure
  660. */
  661. if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
  662. test_fp_ctl(tmp))
  663. return -EINVAL;
  664. offset = addr - (addr_t) &dummy32->regs.fp_regs;
  665. *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
  666. } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
  667. /*
  668. * Handle access to the per_info structure.
  669. */
  670. addr -= (addr_t) &dummy32->regs.per_info;
  671. __poke_user_per_compat(child, addr, data);
  672. }
  673. return 0;
  674. }
  675. static int poke_user_compat(struct task_struct *child,
  676. addr_t addr, addr_t data)
  677. {
  678. if (!is_compat_task() || (addr & 3) ||
  679. addr > sizeof(struct compat_user) - 3)
  680. return -EIO;
  681. return __poke_user_compat(child, addr, data);
  682. }
  683. long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
  684. compat_ulong_t caddr, compat_ulong_t cdata)
  685. {
  686. unsigned long addr = caddr;
  687. unsigned long data = cdata;
  688. compat_ptrace_area parea;
  689. int copied, ret;
  690. switch (request) {
  691. case PTRACE_PEEKUSR:
  692. /* read the word at location addr in the USER area. */
  693. return peek_user_compat(child, addr, data);
  694. case PTRACE_POKEUSR:
  695. /* write the word at location addr in the USER area */
  696. return poke_user_compat(child, addr, data);
  697. case PTRACE_PEEKUSR_AREA:
  698. case PTRACE_POKEUSR_AREA:
  699. if (copy_from_user(&parea, (void __force __user *) addr,
  700. sizeof(parea)))
  701. return -EFAULT;
  702. addr = parea.kernel_addr;
  703. data = parea.process_addr;
  704. copied = 0;
  705. while (copied < parea.len) {
  706. if (request == PTRACE_PEEKUSR_AREA)
  707. ret = peek_user_compat(child, addr, data);
  708. else {
  709. __u32 utmp;
  710. if (get_user(utmp,
  711. (__u32 __force __user *) data))
  712. return -EFAULT;
  713. ret = poke_user_compat(child, addr, utmp);
  714. }
  715. if (ret)
  716. return ret;
  717. addr += sizeof(unsigned int);
  718. data += sizeof(unsigned int);
  719. copied += sizeof(unsigned int);
  720. }
  721. return 0;
  722. case PTRACE_GET_LAST_BREAK:
  723. put_user(task_thread_info(child)->last_break,
  724. (unsigned int __user *) data);
  725. return 0;
  726. }
  727. return compat_ptrace_request(child, request, addr, data);
  728. }
  729. #endif
  730. asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
  731. {
  732. long ret = 0;
  733. /* Do the secure computing check first. */
  734. if (secure_computing()) {
  735. /* seccomp failures shouldn't expose any additional code. */
  736. ret = -1;
  737. goto out;
  738. }
  739. /*
  740. * The sysc_tracesys code in entry.S stored the system
  741. * call number to gprs[2].
  742. */
  743. if (test_thread_flag(TIF_SYSCALL_TRACE) &&
  744. (tracehook_report_syscall_entry(regs) ||
  745. regs->gprs[2] >= NR_syscalls)) {
  746. /*
  747. * Tracing decided this syscall should not happen or the
  748. * debugger stored an invalid system call number. Skip
  749. * the system call and the system call restart handling.
  750. */
  751. clear_pt_regs_flag(regs, PIF_SYSCALL);
  752. ret = -1;
  753. }
  754. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  755. trace_sys_enter(regs, regs->gprs[2]);
  756. audit_syscall_entry(regs->gprs[2], regs->orig_gpr2,
  757. regs->gprs[3], regs->gprs[4],
  758. regs->gprs[5]);
  759. out:
  760. return ret ?: regs->gprs[2];
  761. }
  762. asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
  763. {
  764. audit_syscall_exit(regs);
  765. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  766. trace_sys_exit(regs, regs->gprs[2]);
  767. if (test_thread_flag(TIF_SYSCALL_TRACE))
  768. tracehook_report_syscall_exit(regs, 0);
  769. }
  770. /*
  771. * user_regset definitions.
  772. */
  773. static int s390_regs_get(struct task_struct *target,
  774. const struct user_regset *regset,
  775. unsigned int pos, unsigned int count,
  776. void *kbuf, void __user *ubuf)
  777. {
  778. if (target == current)
  779. save_access_regs(target->thread.acrs);
  780. if (kbuf) {
  781. unsigned long *k = kbuf;
  782. while (count > 0) {
  783. *k++ = __peek_user(target, pos);
  784. count -= sizeof(*k);
  785. pos += sizeof(*k);
  786. }
  787. } else {
  788. unsigned long __user *u = ubuf;
  789. while (count > 0) {
  790. if (__put_user(__peek_user(target, pos), u++))
  791. return -EFAULT;
  792. count -= sizeof(*u);
  793. pos += sizeof(*u);
  794. }
  795. }
  796. return 0;
  797. }
  798. static int s390_regs_set(struct task_struct *target,
  799. const struct user_regset *regset,
  800. unsigned int pos, unsigned int count,
  801. const void *kbuf, const void __user *ubuf)
  802. {
  803. int rc = 0;
  804. if (target == current)
  805. save_access_regs(target->thread.acrs);
  806. if (kbuf) {
  807. const unsigned long *k = kbuf;
  808. while (count > 0 && !rc) {
  809. rc = __poke_user(target, pos, *k++);
  810. count -= sizeof(*k);
  811. pos += sizeof(*k);
  812. }
  813. } else {
  814. const unsigned long __user *u = ubuf;
  815. while (count > 0 && !rc) {
  816. unsigned long word;
  817. rc = __get_user(word, u++);
  818. if (rc)
  819. break;
  820. rc = __poke_user(target, pos, word);
  821. count -= sizeof(*u);
  822. pos += sizeof(*u);
  823. }
  824. }
  825. if (rc == 0 && target == current)
  826. restore_access_regs(target->thread.acrs);
  827. return rc;
  828. }
  829. static int s390_fpregs_get(struct task_struct *target,
  830. const struct user_regset *regset, unsigned int pos,
  831. unsigned int count, void *kbuf, void __user *ubuf)
  832. {
  833. if (target == current) {
  834. save_fp_ctl(&target->thread.fp_regs.fpc);
  835. save_fp_regs(target->thread.fp_regs.fprs);
  836. }
  837. #ifdef CONFIG_64BIT
  838. else if (target->thread.vxrs) {
  839. int i;
  840. for (i = 0; i < __NUM_VXRS_LOW; i++)
  841. target->thread.fp_regs.fprs[i] =
  842. *(freg_t *)(target->thread.vxrs + i);
  843. }
  844. #endif
  845. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  846. &target->thread.fp_regs, 0, -1);
  847. }
  848. static int s390_fpregs_set(struct task_struct *target,
  849. const struct user_regset *regset, unsigned int pos,
  850. unsigned int count, const void *kbuf,
  851. const void __user *ubuf)
  852. {
  853. int rc = 0;
  854. if (target == current) {
  855. save_fp_ctl(&target->thread.fp_regs.fpc);
  856. save_fp_regs(target->thread.fp_regs.fprs);
  857. }
  858. /* If setting FPC, must validate it first. */
  859. if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
  860. u32 ufpc[2] = { target->thread.fp_regs.fpc, 0 };
  861. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
  862. 0, offsetof(s390_fp_regs, fprs));
  863. if (rc)
  864. return rc;
  865. if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
  866. return -EINVAL;
  867. target->thread.fp_regs.fpc = ufpc[0];
  868. }
  869. if (rc == 0 && count > 0)
  870. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  871. target->thread.fp_regs.fprs,
  872. offsetof(s390_fp_regs, fprs), -1);
  873. if (rc == 0) {
  874. if (target == current) {
  875. restore_fp_ctl(&target->thread.fp_regs.fpc);
  876. restore_fp_regs(target->thread.fp_regs.fprs);
  877. }
  878. #ifdef CONFIG_64BIT
  879. else if (target->thread.vxrs) {
  880. int i;
  881. for (i = 0; i < __NUM_VXRS_LOW; i++)
  882. *(freg_t *)(target->thread.vxrs + i) =
  883. target->thread.fp_regs.fprs[i];
  884. }
  885. #endif
  886. }
  887. return rc;
  888. }
  889. #ifdef CONFIG_64BIT
  890. static int s390_last_break_get(struct task_struct *target,
  891. const struct user_regset *regset,
  892. unsigned int pos, unsigned int count,
  893. void *kbuf, void __user *ubuf)
  894. {
  895. if (count > 0) {
  896. if (kbuf) {
  897. unsigned long *k = kbuf;
  898. *k = task_thread_info(target)->last_break;
  899. } else {
  900. unsigned long __user *u = ubuf;
  901. if (__put_user(task_thread_info(target)->last_break, u))
  902. return -EFAULT;
  903. }
  904. }
  905. return 0;
  906. }
  907. static int s390_last_break_set(struct task_struct *target,
  908. const struct user_regset *regset,
  909. unsigned int pos, unsigned int count,
  910. const void *kbuf, const void __user *ubuf)
  911. {
  912. return 0;
  913. }
  914. static int s390_tdb_get(struct task_struct *target,
  915. const struct user_regset *regset,
  916. unsigned int pos, unsigned int count,
  917. void *kbuf, void __user *ubuf)
  918. {
  919. struct pt_regs *regs = task_pt_regs(target);
  920. unsigned char *data;
  921. if (!(regs->int_code & 0x200))
  922. return -ENODATA;
  923. data = target->thread.trap_tdb;
  924. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, data, 0, 256);
  925. }
  926. static int s390_tdb_set(struct task_struct *target,
  927. const struct user_regset *regset,
  928. unsigned int pos, unsigned int count,
  929. const void *kbuf, const void __user *ubuf)
  930. {
  931. return 0;
  932. }
  933. static int s390_vxrs_active(struct task_struct *target,
  934. const struct user_regset *regset)
  935. {
  936. return !!target->thread.vxrs;
  937. }
  938. static int s390_vxrs_low_get(struct task_struct *target,
  939. const struct user_regset *regset,
  940. unsigned int pos, unsigned int count,
  941. void *kbuf, void __user *ubuf)
  942. {
  943. __u64 vxrs[__NUM_VXRS_LOW];
  944. int i;
  945. if (target->thread.vxrs) {
  946. if (target == current)
  947. save_vx_regs(target->thread.vxrs);
  948. for (i = 0; i < __NUM_VXRS_LOW; i++)
  949. vxrs[i] = *((__u64 *)(target->thread.vxrs + i) + 1);
  950. } else
  951. memset(vxrs, 0, sizeof(vxrs));
  952. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
  953. }
  954. static int s390_vxrs_low_set(struct task_struct *target,
  955. const struct user_regset *regset,
  956. unsigned int pos, unsigned int count,
  957. const void *kbuf, const void __user *ubuf)
  958. {
  959. __u64 vxrs[__NUM_VXRS_LOW];
  960. int i, rc;
  961. if (!target->thread.vxrs) {
  962. rc = alloc_vector_registers(target);
  963. if (rc)
  964. return rc;
  965. } else if (target == current)
  966. save_vx_regs(target->thread.vxrs);
  967. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
  968. if (rc == 0) {
  969. for (i = 0; i < __NUM_VXRS_LOW; i++)
  970. *((__u64 *)(target->thread.vxrs + i) + 1) = vxrs[i];
  971. if (target == current)
  972. restore_vx_regs(target->thread.vxrs);
  973. }
  974. return rc;
  975. }
  976. static int s390_vxrs_high_get(struct task_struct *target,
  977. const struct user_regset *regset,
  978. unsigned int pos, unsigned int count,
  979. void *kbuf, void __user *ubuf)
  980. {
  981. __vector128 vxrs[__NUM_VXRS_HIGH];
  982. if (target->thread.vxrs) {
  983. if (target == current)
  984. save_vx_regs(target->thread.vxrs);
  985. memcpy(vxrs, target->thread.vxrs + __NUM_VXRS_LOW,
  986. sizeof(vxrs));
  987. } else
  988. memset(vxrs, 0, sizeof(vxrs));
  989. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
  990. }
  991. static int s390_vxrs_high_set(struct task_struct *target,
  992. const struct user_regset *regset,
  993. unsigned int pos, unsigned int count,
  994. const void *kbuf, const void __user *ubuf)
  995. {
  996. int rc;
  997. if (!target->thread.vxrs) {
  998. rc = alloc_vector_registers(target);
  999. if (rc)
  1000. return rc;
  1001. } else if (target == current)
  1002. save_vx_regs(target->thread.vxrs);
  1003. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1004. target->thread.vxrs + __NUM_VXRS_LOW, 0, -1);
  1005. if (rc == 0 && target == current)
  1006. restore_vx_regs(target->thread.vxrs);
  1007. return rc;
  1008. }
  1009. #endif
  1010. static int s390_system_call_get(struct task_struct *target,
  1011. const struct user_regset *regset,
  1012. unsigned int pos, unsigned int count,
  1013. void *kbuf, void __user *ubuf)
  1014. {
  1015. unsigned int *data = &task_thread_info(target)->system_call;
  1016. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  1017. data, 0, sizeof(unsigned int));
  1018. }
  1019. static int s390_system_call_set(struct task_struct *target,
  1020. const struct user_regset *regset,
  1021. unsigned int pos, unsigned int count,
  1022. const void *kbuf, const void __user *ubuf)
  1023. {
  1024. unsigned int *data = &task_thread_info(target)->system_call;
  1025. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1026. data, 0, sizeof(unsigned int));
  1027. }
  1028. static const struct user_regset s390_regsets[] = {
  1029. {
  1030. .core_note_type = NT_PRSTATUS,
  1031. .n = sizeof(s390_regs) / sizeof(long),
  1032. .size = sizeof(long),
  1033. .align = sizeof(long),
  1034. .get = s390_regs_get,
  1035. .set = s390_regs_set,
  1036. },
  1037. {
  1038. .core_note_type = NT_PRFPREG,
  1039. .n = sizeof(s390_fp_regs) / sizeof(long),
  1040. .size = sizeof(long),
  1041. .align = sizeof(long),
  1042. .get = s390_fpregs_get,
  1043. .set = s390_fpregs_set,
  1044. },
  1045. {
  1046. .core_note_type = NT_S390_SYSTEM_CALL,
  1047. .n = 1,
  1048. .size = sizeof(unsigned int),
  1049. .align = sizeof(unsigned int),
  1050. .get = s390_system_call_get,
  1051. .set = s390_system_call_set,
  1052. },
  1053. #ifdef CONFIG_64BIT
  1054. {
  1055. .core_note_type = NT_S390_LAST_BREAK,
  1056. .n = 1,
  1057. .size = sizeof(long),
  1058. .align = sizeof(long),
  1059. .get = s390_last_break_get,
  1060. .set = s390_last_break_set,
  1061. },
  1062. {
  1063. .core_note_type = NT_S390_TDB,
  1064. .n = 1,
  1065. .size = 256,
  1066. .align = 1,
  1067. .get = s390_tdb_get,
  1068. .set = s390_tdb_set,
  1069. },
  1070. {
  1071. .core_note_type = NT_S390_VXRS_LOW,
  1072. .n = __NUM_VXRS_LOW,
  1073. .size = sizeof(__u64),
  1074. .align = sizeof(__u64),
  1075. .active = s390_vxrs_active,
  1076. .get = s390_vxrs_low_get,
  1077. .set = s390_vxrs_low_set,
  1078. },
  1079. {
  1080. .core_note_type = NT_S390_VXRS_HIGH,
  1081. .n = __NUM_VXRS_HIGH,
  1082. .size = sizeof(__vector128),
  1083. .align = sizeof(__vector128),
  1084. .active = s390_vxrs_active,
  1085. .get = s390_vxrs_high_get,
  1086. .set = s390_vxrs_high_set,
  1087. },
  1088. #endif
  1089. };
  1090. static const struct user_regset_view user_s390_view = {
  1091. .name = UTS_MACHINE,
  1092. .e_machine = EM_S390,
  1093. .regsets = s390_regsets,
  1094. .n = ARRAY_SIZE(s390_regsets)
  1095. };
  1096. #ifdef CONFIG_COMPAT
  1097. static int s390_compat_regs_get(struct task_struct *target,
  1098. const struct user_regset *regset,
  1099. unsigned int pos, unsigned int count,
  1100. void *kbuf, void __user *ubuf)
  1101. {
  1102. if (target == current)
  1103. save_access_regs(target->thread.acrs);
  1104. if (kbuf) {
  1105. compat_ulong_t *k = kbuf;
  1106. while (count > 0) {
  1107. *k++ = __peek_user_compat(target, pos);
  1108. count -= sizeof(*k);
  1109. pos += sizeof(*k);
  1110. }
  1111. } else {
  1112. compat_ulong_t __user *u = ubuf;
  1113. while (count > 0) {
  1114. if (__put_user(__peek_user_compat(target, pos), u++))
  1115. return -EFAULT;
  1116. count -= sizeof(*u);
  1117. pos += sizeof(*u);
  1118. }
  1119. }
  1120. return 0;
  1121. }
  1122. static int s390_compat_regs_set(struct task_struct *target,
  1123. const struct user_regset *regset,
  1124. unsigned int pos, unsigned int count,
  1125. const void *kbuf, const void __user *ubuf)
  1126. {
  1127. int rc = 0;
  1128. if (target == current)
  1129. save_access_regs(target->thread.acrs);
  1130. if (kbuf) {
  1131. const compat_ulong_t *k = kbuf;
  1132. while (count > 0 && !rc) {
  1133. rc = __poke_user_compat(target, pos, *k++);
  1134. count -= sizeof(*k);
  1135. pos += sizeof(*k);
  1136. }
  1137. } else {
  1138. const compat_ulong_t __user *u = ubuf;
  1139. while (count > 0 && !rc) {
  1140. compat_ulong_t word;
  1141. rc = __get_user(word, u++);
  1142. if (rc)
  1143. break;
  1144. rc = __poke_user_compat(target, pos, word);
  1145. count -= sizeof(*u);
  1146. pos += sizeof(*u);
  1147. }
  1148. }
  1149. if (rc == 0 && target == current)
  1150. restore_access_regs(target->thread.acrs);
  1151. return rc;
  1152. }
  1153. static int s390_compat_regs_high_get(struct task_struct *target,
  1154. const struct user_regset *regset,
  1155. unsigned int pos, unsigned int count,
  1156. void *kbuf, void __user *ubuf)
  1157. {
  1158. compat_ulong_t *gprs_high;
  1159. gprs_high = (compat_ulong_t *)
  1160. &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
  1161. if (kbuf) {
  1162. compat_ulong_t *k = kbuf;
  1163. while (count > 0) {
  1164. *k++ = *gprs_high;
  1165. gprs_high += 2;
  1166. count -= sizeof(*k);
  1167. }
  1168. } else {
  1169. compat_ulong_t __user *u = ubuf;
  1170. while (count > 0) {
  1171. if (__put_user(*gprs_high, u++))
  1172. return -EFAULT;
  1173. gprs_high += 2;
  1174. count -= sizeof(*u);
  1175. }
  1176. }
  1177. return 0;
  1178. }
  1179. static int s390_compat_regs_high_set(struct task_struct *target,
  1180. const struct user_regset *regset,
  1181. unsigned int pos, unsigned int count,
  1182. const void *kbuf, const void __user *ubuf)
  1183. {
  1184. compat_ulong_t *gprs_high;
  1185. int rc = 0;
  1186. gprs_high = (compat_ulong_t *)
  1187. &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
  1188. if (kbuf) {
  1189. const compat_ulong_t *k = kbuf;
  1190. while (count > 0) {
  1191. *gprs_high = *k++;
  1192. *gprs_high += 2;
  1193. count -= sizeof(*k);
  1194. }
  1195. } else {
  1196. const compat_ulong_t __user *u = ubuf;
  1197. while (count > 0 && !rc) {
  1198. unsigned long word;
  1199. rc = __get_user(word, u++);
  1200. if (rc)
  1201. break;
  1202. *gprs_high = word;
  1203. *gprs_high += 2;
  1204. count -= sizeof(*u);
  1205. }
  1206. }
  1207. return rc;
  1208. }
  1209. static int s390_compat_last_break_get(struct task_struct *target,
  1210. const struct user_regset *regset,
  1211. unsigned int pos, unsigned int count,
  1212. void *kbuf, void __user *ubuf)
  1213. {
  1214. compat_ulong_t last_break;
  1215. if (count > 0) {
  1216. last_break = task_thread_info(target)->last_break;
  1217. if (kbuf) {
  1218. unsigned long *k = kbuf;
  1219. *k = last_break;
  1220. } else {
  1221. unsigned long __user *u = ubuf;
  1222. if (__put_user(last_break, u))
  1223. return -EFAULT;
  1224. }
  1225. }
  1226. return 0;
  1227. }
  1228. static int s390_compat_last_break_set(struct task_struct *target,
  1229. const struct user_regset *regset,
  1230. unsigned int pos, unsigned int count,
  1231. const void *kbuf, const void __user *ubuf)
  1232. {
  1233. return 0;
  1234. }
  1235. static const struct user_regset s390_compat_regsets[] = {
  1236. {
  1237. .core_note_type = NT_PRSTATUS,
  1238. .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
  1239. .size = sizeof(compat_long_t),
  1240. .align = sizeof(compat_long_t),
  1241. .get = s390_compat_regs_get,
  1242. .set = s390_compat_regs_set,
  1243. },
  1244. {
  1245. .core_note_type = NT_PRFPREG,
  1246. .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
  1247. .size = sizeof(compat_long_t),
  1248. .align = sizeof(compat_long_t),
  1249. .get = s390_fpregs_get,
  1250. .set = s390_fpregs_set,
  1251. },
  1252. {
  1253. .core_note_type = NT_S390_SYSTEM_CALL,
  1254. .n = 1,
  1255. .size = sizeof(compat_uint_t),
  1256. .align = sizeof(compat_uint_t),
  1257. .get = s390_system_call_get,
  1258. .set = s390_system_call_set,
  1259. },
  1260. {
  1261. .core_note_type = NT_S390_LAST_BREAK,
  1262. .n = 1,
  1263. .size = sizeof(long),
  1264. .align = sizeof(long),
  1265. .get = s390_compat_last_break_get,
  1266. .set = s390_compat_last_break_set,
  1267. },
  1268. {
  1269. .core_note_type = NT_S390_TDB,
  1270. .n = 1,
  1271. .size = 256,
  1272. .align = 1,
  1273. .get = s390_tdb_get,
  1274. .set = s390_tdb_set,
  1275. },
  1276. {
  1277. .core_note_type = NT_S390_VXRS_LOW,
  1278. .n = __NUM_VXRS_LOW,
  1279. .size = sizeof(__u64),
  1280. .align = sizeof(__u64),
  1281. .active = s390_vxrs_active,
  1282. .get = s390_vxrs_low_get,
  1283. .set = s390_vxrs_low_set,
  1284. },
  1285. {
  1286. .core_note_type = NT_S390_VXRS_HIGH,
  1287. .n = __NUM_VXRS_HIGH,
  1288. .size = sizeof(__vector128),
  1289. .align = sizeof(__vector128),
  1290. .active = s390_vxrs_active,
  1291. .get = s390_vxrs_high_get,
  1292. .set = s390_vxrs_high_set,
  1293. },
  1294. {
  1295. .core_note_type = NT_S390_HIGH_GPRS,
  1296. .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
  1297. .size = sizeof(compat_long_t),
  1298. .align = sizeof(compat_long_t),
  1299. .get = s390_compat_regs_high_get,
  1300. .set = s390_compat_regs_high_set,
  1301. },
  1302. };
  1303. static const struct user_regset_view user_s390_compat_view = {
  1304. .name = "s390",
  1305. .e_machine = EM_S390,
  1306. .regsets = s390_compat_regsets,
  1307. .n = ARRAY_SIZE(s390_compat_regsets)
  1308. };
  1309. #endif
  1310. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  1311. {
  1312. #ifdef CONFIG_COMPAT
  1313. if (test_tsk_thread_flag(task, TIF_31BIT))
  1314. return &user_s390_compat_view;
  1315. #endif
  1316. return &user_s390_view;
  1317. }
  1318. static const char *gpr_names[NUM_GPRS] = {
  1319. "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
  1320. "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
  1321. };
  1322. unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
  1323. {
  1324. if (offset >= NUM_GPRS)
  1325. return 0;
  1326. return regs->gprs[offset];
  1327. }
  1328. int regs_query_register_offset(const char *name)
  1329. {
  1330. unsigned long offset;
  1331. if (!name || *name != 'r')
  1332. return -EINVAL;
  1333. if (kstrtoul(name + 1, 10, &offset))
  1334. return -EINVAL;
  1335. if (offset >= NUM_GPRS)
  1336. return -EINVAL;
  1337. return offset;
  1338. }
  1339. const char *regs_query_register_name(unsigned int offset)
  1340. {
  1341. if (offset >= NUM_GPRS)
  1342. return NULL;
  1343. return gpr_names[offset];
  1344. }
  1345. static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
  1346. {
  1347. unsigned long ksp = kernel_stack_pointer(regs);
  1348. return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
  1349. }
  1350. /**
  1351. * regs_get_kernel_stack_nth() - get Nth entry of the stack
  1352. * @regs:pt_regs which contains kernel stack pointer.
  1353. * @n:stack entry number.
  1354. *
  1355. * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
  1356. * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
  1357. * this returns 0.
  1358. */
  1359. unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
  1360. {
  1361. unsigned long addr;
  1362. addr = kernel_stack_pointer(regs) + n * sizeof(long);
  1363. if (!regs_within_kernel_stack(regs, addr))
  1364. return 0;
  1365. return *(unsigned long *)addr;
  1366. }