ptrace.c 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520
  1. /*
  2. * Ptrace user space interface.
  3. *
  4. * Copyright IBM Corp. 1999, 2010
  5. * Author(s): Denis Joseph Barrow
  6. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/sched.h>
  10. #include <linux/mm.h>
  11. #include <linux/smp.h>
  12. #include <linux/errno.h>
  13. #include <linux/ptrace.h>
  14. #include <linux/user.h>
  15. #include <linux/security.h>
  16. #include <linux/audit.h>
  17. #include <linux/signal.h>
  18. #include <linux/elf.h>
  19. #include <linux/regset.h>
  20. #include <linux/tracehook.h>
  21. #include <linux/seccomp.h>
  22. #include <linux/compat.h>
  23. #include <trace/syscall.h>
  24. #include <asm/segment.h>
  25. #include <asm/page.h>
  26. #include <asm/pgtable.h>
  27. #include <asm/pgalloc.h>
  28. #include <asm/uaccess.h>
  29. #include <asm/unistd.h>
  30. #include <asm/switch_to.h>
  31. #include "entry.h"
  32. #ifdef CONFIG_COMPAT
  33. #include "compat_ptrace.h"
  34. #endif
  35. #define CREATE_TRACE_POINTS
  36. #include <trace/events/syscalls.h>
  37. void update_cr_regs(struct task_struct *task)
  38. {
  39. struct pt_regs *regs = task_pt_regs(task);
  40. struct thread_struct *thread = &task->thread;
  41. struct per_regs old, new;
  42. #ifdef CONFIG_64BIT
  43. /* Take care of the enable/disable of transactional execution. */
  44. if (MACHINE_HAS_TE || MACHINE_HAS_VX) {
  45. unsigned long cr, cr_new;
  46. __ctl_store(cr, 0, 0);
  47. cr_new = cr;
  48. if (MACHINE_HAS_TE) {
  49. /* Set or clear transaction execution TXC bit 8. */
  50. cr_new |= (1UL << 55);
  51. if (task->thread.per_flags & PER_FLAG_NO_TE)
  52. cr_new &= ~(1UL << 55);
  53. }
  54. if (MACHINE_HAS_VX) {
  55. /* Enable/disable of vector extension */
  56. cr_new &= ~(1UL << 17);
  57. if (task->thread.vxrs)
  58. cr_new |= (1UL << 17);
  59. }
  60. if (cr_new != cr)
  61. __ctl_load(cr_new, 0, 0);
  62. if (MACHINE_HAS_TE) {
  63. /* Set/clear transaction execution TDC bits 62/63. */
  64. __ctl_store(cr, 2, 2);
  65. cr_new = cr & ~3UL;
  66. if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
  67. if (task->thread.per_flags &
  68. PER_FLAG_TE_ABORT_RAND_TEND)
  69. cr_new |= 1UL;
  70. else
  71. cr_new |= 2UL;
  72. }
  73. if (cr_new != cr)
  74. __ctl_load(cr_new, 2, 2);
  75. }
  76. }
  77. #endif
  78. /* Copy user specified PER registers */
  79. new.control = thread->per_user.control;
  80. new.start = thread->per_user.start;
  81. new.end = thread->per_user.end;
  82. /* merge TIF_SINGLE_STEP into user specified PER registers. */
  83. if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
  84. test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
  85. if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
  86. new.control |= PER_EVENT_BRANCH;
  87. else
  88. new.control |= PER_EVENT_IFETCH;
  89. #ifdef CONFIG_64BIT
  90. new.control |= PER_CONTROL_SUSPENSION;
  91. new.control |= PER_EVENT_TRANSACTION_END;
  92. #endif
  93. if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
  94. new.control |= PER_EVENT_IFETCH;
  95. new.start = 0;
  96. new.end = PSW_ADDR_INSN;
  97. }
  98. /* Take care of the PER enablement bit in the PSW. */
  99. if (!(new.control & PER_EVENT_MASK)) {
  100. regs->psw.mask &= ~PSW_MASK_PER;
  101. return;
  102. }
  103. regs->psw.mask |= PSW_MASK_PER;
  104. __ctl_store(old, 9, 11);
  105. if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
  106. __ctl_load(new, 9, 11);
  107. }
  108. void user_enable_single_step(struct task_struct *task)
  109. {
  110. clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
  111. set_tsk_thread_flag(task, TIF_SINGLE_STEP);
  112. }
  113. void user_disable_single_step(struct task_struct *task)
  114. {
  115. clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
  116. clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
  117. }
  118. void user_enable_block_step(struct task_struct *task)
  119. {
  120. set_tsk_thread_flag(task, TIF_SINGLE_STEP);
  121. set_tsk_thread_flag(task, TIF_BLOCK_STEP);
  122. }
  123. /*
  124. * Called by kernel/ptrace.c when detaching..
  125. *
  126. * Clear all debugging related fields.
  127. */
  128. void ptrace_disable(struct task_struct *task)
  129. {
  130. memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
  131. memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
  132. clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
  133. clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP);
  134. task->thread.per_flags = 0;
  135. }
  136. #ifndef CONFIG_64BIT
  137. # define __ADDR_MASK 3
  138. #else
  139. # define __ADDR_MASK 7
  140. #endif
  141. static inline unsigned long __peek_user_per(struct task_struct *child,
  142. addr_t addr)
  143. {
  144. struct per_struct_kernel *dummy = NULL;
  145. if (addr == (addr_t) &dummy->cr9)
  146. /* Control bits of the active per set. */
  147. return test_thread_flag(TIF_SINGLE_STEP) ?
  148. PER_EVENT_IFETCH : child->thread.per_user.control;
  149. else if (addr == (addr_t) &dummy->cr10)
  150. /* Start address of the active per set. */
  151. return test_thread_flag(TIF_SINGLE_STEP) ?
  152. 0 : child->thread.per_user.start;
  153. else if (addr == (addr_t) &dummy->cr11)
  154. /* End address of the active per set. */
  155. return test_thread_flag(TIF_SINGLE_STEP) ?
  156. PSW_ADDR_INSN : child->thread.per_user.end;
  157. else if (addr == (addr_t) &dummy->bits)
  158. /* Single-step bit. */
  159. return test_thread_flag(TIF_SINGLE_STEP) ?
  160. (1UL << (BITS_PER_LONG - 1)) : 0;
  161. else if (addr == (addr_t) &dummy->starting_addr)
  162. /* Start address of the user specified per set. */
  163. return child->thread.per_user.start;
  164. else if (addr == (addr_t) &dummy->ending_addr)
  165. /* End address of the user specified per set. */
  166. return child->thread.per_user.end;
  167. else if (addr == (addr_t) &dummy->perc_atmid)
  168. /* PER code, ATMID and AI of the last PER trap */
  169. return (unsigned long)
  170. child->thread.per_event.cause << (BITS_PER_LONG - 16);
  171. else if (addr == (addr_t) &dummy->address)
  172. /* Address of the last PER trap */
  173. return child->thread.per_event.address;
  174. else if (addr == (addr_t) &dummy->access_id)
  175. /* Access id of the last PER trap */
  176. return (unsigned long)
  177. child->thread.per_event.paid << (BITS_PER_LONG - 8);
  178. return 0;
  179. }
  180. /*
  181. * Read the word at offset addr from the user area of a process. The
  182. * trouble here is that the information is littered over different
  183. * locations. The process registers are found on the kernel stack,
  184. * the floating point stuff and the trace settings are stored in
  185. * the task structure. In addition the different structures in
  186. * struct user contain pad bytes that should be read as zeroes.
  187. * Lovely...
  188. */
  189. static unsigned long __peek_user(struct task_struct *child, addr_t addr)
  190. {
  191. struct user *dummy = NULL;
  192. addr_t offset, tmp;
  193. if (addr < (addr_t) &dummy->regs.acrs) {
  194. /*
  195. * psw and gprs are stored on the stack
  196. */
  197. tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
  198. if (addr == (addr_t) &dummy->regs.psw.mask) {
  199. /* Return a clean psw mask. */
  200. tmp &= PSW_MASK_USER | PSW_MASK_RI;
  201. tmp |= PSW_USER_BITS;
  202. }
  203. } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
  204. /*
  205. * access registers are stored in the thread structure
  206. */
  207. offset = addr - (addr_t) &dummy->regs.acrs;
  208. #ifdef CONFIG_64BIT
  209. /*
  210. * Very special case: old & broken 64 bit gdb reading
  211. * from acrs[15]. Result is a 64 bit value. Read the
  212. * 32 bit acrs[15] value and shift it by 32. Sick...
  213. */
  214. if (addr == (addr_t) &dummy->regs.acrs[15])
  215. tmp = ((unsigned long) child->thread.acrs[15]) << 32;
  216. else
  217. #endif
  218. tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
  219. } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
  220. /*
  221. * orig_gpr2 is stored on the kernel stack
  222. */
  223. tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
  224. } else if (addr < (addr_t) &dummy->regs.fp_regs) {
  225. /*
  226. * prevent reads of padding hole between
  227. * orig_gpr2 and fp_regs on s390.
  228. */
  229. tmp = 0;
  230. } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
  231. /*
  232. * floating point regs. are stored in the thread structure
  233. */
  234. offset = addr - (addr_t) &dummy->regs.fp_regs;
  235. tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
  236. if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
  237. tmp <<= BITS_PER_LONG - 32;
  238. } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
  239. /*
  240. * Handle access to the per_info structure.
  241. */
  242. addr -= (addr_t) &dummy->regs.per_info;
  243. tmp = __peek_user_per(child, addr);
  244. } else
  245. tmp = 0;
  246. return tmp;
  247. }
  248. static int
  249. peek_user(struct task_struct *child, addr_t addr, addr_t data)
  250. {
  251. addr_t tmp, mask;
  252. /*
  253. * Stupid gdb peeks/pokes the access registers in 64 bit with
  254. * an alignment of 4. Programmers from hell...
  255. */
  256. mask = __ADDR_MASK;
  257. #ifdef CONFIG_64BIT
  258. if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
  259. addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
  260. mask = 3;
  261. #endif
  262. if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
  263. return -EIO;
  264. tmp = __peek_user(child, addr);
  265. return put_user(tmp, (addr_t __user *) data);
  266. }
  267. static inline void __poke_user_per(struct task_struct *child,
  268. addr_t addr, addr_t data)
  269. {
  270. struct per_struct_kernel *dummy = NULL;
  271. /*
  272. * There are only three fields in the per_info struct that the
  273. * debugger user can write to.
  274. * 1) cr9: the debugger wants to set a new PER event mask
  275. * 2) starting_addr: the debugger wants to set a new starting
  276. * address to use with the PER event mask.
  277. * 3) ending_addr: the debugger wants to set a new ending
  278. * address to use with the PER event mask.
  279. * The user specified PER event mask and the start and end
  280. * addresses are used only if single stepping is not in effect.
  281. * Writes to any other field in per_info are ignored.
  282. */
  283. if (addr == (addr_t) &dummy->cr9)
  284. /* PER event mask of the user specified per set. */
  285. child->thread.per_user.control =
  286. data & (PER_EVENT_MASK | PER_CONTROL_MASK);
  287. else if (addr == (addr_t) &dummy->starting_addr)
  288. /* Starting address of the user specified per set. */
  289. child->thread.per_user.start = data;
  290. else if (addr == (addr_t) &dummy->ending_addr)
  291. /* Ending address of the user specified per set. */
  292. child->thread.per_user.end = data;
  293. }
  294. /*
  295. * Write a word to the user area of a process at location addr. This
  296. * operation does have an additional problem compared to peek_user.
  297. * Stores to the program status word and on the floating point
  298. * control register needs to get checked for validity.
  299. */
  300. static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
  301. {
  302. struct user *dummy = NULL;
  303. addr_t offset;
  304. if (addr < (addr_t) &dummy->regs.acrs) {
  305. /*
  306. * psw and gprs are stored on the stack
  307. */
  308. if (addr == (addr_t) &dummy->regs.psw.mask) {
  309. unsigned long mask = PSW_MASK_USER;
  310. mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
  311. if ((data ^ PSW_USER_BITS) & ~mask)
  312. /* Invalid psw mask. */
  313. return -EINVAL;
  314. if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
  315. /* Invalid address-space-control bits */
  316. return -EINVAL;
  317. if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
  318. /* Invalid addressing mode bits */
  319. return -EINVAL;
  320. }
  321. *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
  322. } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
  323. /*
  324. * access registers are stored in the thread structure
  325. */
  326. offset = addr - (addr_t) &dummy->regs.acrs;
  327. #ifdef CONFIG_64BIT
  328. /*
  329. * Very special case: old & broken 64 bit gdb writing
  330. * to acrs[15] with a 64 bit value. Ignore the lower
  331. * half of the value and write the upper 32 bit to
  332. * acrs[15]. Sick...
  333. */
  334. if (addr == (addr_t) &dummy->regs.acrs[15])
  335. child->thread.acrs[15] = (unsigned int) (data >> 32);
  336. else
  337. #endif
  338. *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
  339. } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
  340. /*
  341. * orig_gpr2 is stored on the kernel stack
  342. */
  343. task_pt_regs(child)->orig_gpr2 = data;
  344. } else if (addr < (addr_t) &dummy->regs.fp_regs) {
  345. /*
  346. * prevent writes of padding hole between
  347. * orig_gpr2 and fp_regs on s390.
  348. */
  349. return 0;
  350. } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
  351. /*
  352. * floating point regs. are stored in the thread structure
  353. */
  354. if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
  355. if ((unsigned int) data != 0 ||
  356. test_fp_ctl(data >> (BITS_PER_LONG - 32)))
  357. return -EINVAL;
  358. offset = addr - (addr_t) &dummy->regs.fp_regs;
  359. *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
  360. } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
  361. /*
  362. * Handle access to the per_info structure.
  363. */
  364. addr -= (addr_t) &dummy->regs.per_info;
  365. __poke_user_per(child, addr, data);
  366. }
  367. return 0;
  368. }
  369. static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
  370. {
  371. addr_t mask;
  372. /*
  373. * Stupid gdb peeks/pokes the access registers in 64 bit with
  374. * an alignment of 4. Programmers from hell indeed...
  375. */
  376. mask = __ADDR_MASK;
  377. #ifdef CONFIG_64BIT
  378. if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
  379. addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
  380. mask = 3;
  381. #endif
  382. if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
  383. return -EIO;
  384. return __poke_user(child, addr, data);
  385. }
  386. long arch_ptrace(struct task_struct *child, long request,
  387. unsigned long addr, unsigned long data)
  388. {
  389. ptrace_area parea;
  390. int copied, ret;
  391. switch (request) {
  392. case PTRACE_PEEKUSR:
  393. /* read the word at location addr in the USER area. */
  394. return peek_user(child, addr, data);
  395. case PTRACE_POKEUSR:
  396. /* write the word at location addr in the USER area */
  397. return poke_user(child, addr, data);
  398. case PTRACE_PEEKUSR_AREA:
  399. case PTRACE_POKEUSR_AREA:
  400. if (copy_from_user(&parea, (void __force __user *) addr,
  401. sizeof(parea)))
  402. return -EFAULT;
  403. addr = parea.kernel_addr;
  404. data = parea.process_addr;
  405. copied = 0;
  406. while (copied < parea.len) {
  407. if (request == PTRACE_PEEKUSR_AREA)
  408. ret = peek_user(child, addr, data);
  409. else {
  410. addr_t utmp;
  411. if (get_user(utmp,
  412. (addr_t __force __user *) data))
  413. return -EFAULT;
  414. ret = poke_user(child, addr, utmp);
  415. }
  416. if (ret)
  417. return ret;
  418. addr += sizeof(unsigned long);
  419. data += sizeof(unsigned long);
  420. copied += sizeof(unsigned long);
  421. }
  422. return 0;
  423. case PTRACE_GET_LAST_BREAK:
  424. put_user(task_thread_info(child)->last_break,
  425. (unsigned long __user *) data);
  426. return 0;
  427. case PTRACE_ENABLE_TE:
  428. if (!MACHINE_HAS_TE)
  429. return -EIO;
  430. child->thread.per_flags &= ~PER_FLAG_NO_TE;
  431. return 0;
  432. case PTRACE_DISABLE_TE:
  433. if (!MACHINE_HAS_TE)
  434. return -EIO;
  435. child->thread.per_flags |= PER_FLAG_NO_TE;
  436. child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
  437. return 0;
  438. case PTRACE_TE_ABORT_RAND:
  439. if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
  440. return -EIO;
  441. switch (data) {
  442. case 0UL:
  443. child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
  444. break;
  445. case 1UL:
  446. child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
  447. child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
  448. break;
  449. case 2UL:
  450. child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
  451. child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
  452. break;
  453. default:
  454. return -EINVAL;
  455. }
  456. return 0;
  457. default:
  458. /* Removing high order bit from addr (only for 31 bit). */
  459. addr &= PSW_ADDR_INSN;
  460. return ptrace_request(child, request, addr, data);
  461. }
  462. }
  463. #ifdef CONFIG_COMPAT
  464. /*
  465. * Now the fun part starts... a 31 bit program running in the
  466. * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
  467. * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
  468. * to handle, the difference to the 64 bit versions of the requests
  469. * is that the access is done in multiples of 4 byte instead of
  470. * 8 bytes (sizeof(unsigned long) on 31/64 bit).
  471. * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
  472. * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
  473. * is a 31 bit program too, the content of struct user can be
  474. * emulated. A 31 bit program peeking into the struct user of
  475. * a 64 bit program is a no-no.
  476. */
  477. /*
  478. * Same as peek_user_per but for a 31 bit program.
  479. */
  480. static inline __u32 __peek_user_per_compat(struct task_struct *child,
  481. addr_t addr)
  482. {
  483. struct compat_per_struct_kernel *dummy32 = NULL;
  484. if (addr == (addr_t) &dummy32->cr9)
  485. /* Control bits of the active per set. */
  486. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  487. PER_EVENT_IFETCH : child->thread.per_user.control;
  488. else if (addr == (addr_t) &dummy32->cr10)
  489. /* Start address of the active per set. */
  490. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  491. 0 : child->thread.per_user.start;
  492. else if (addr == (addr_t) &dummy32->cr11)
  493. /* End address of the active per set. */
  494. return test_thread_flag(TIF_SINGLE_STEP) ?
  495. PSW32_ADDR_INSN : child->thread.per_user.end;
  496. else if (addr == (addr_t) &dummy32->bits)
  497. /* Single-step bit. */
  498. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  499. 0x80000000 : 0;
  500. else if (addr == (addr_t) &dummy32->starting_addr)
  501. /* Start address of the user specified per set. */
  502. return (__u32) child->thread.per_user.start;
  503. else if (addr == (addr_t) &dummy32->ending_addr)
  504. /* End address of the user specified per set. */
  505. return (__u32) child->thread.per_user.end;
  506. else if (addr == (addr_t) &dummy32->perc_atmid)
  507. /* PER code, ATMID and AI of the last PER trap */
  508. return (__u32) child->thread.per_event.cause << 16;
  509. else if (addr == (addr_t) &dummy32->address)
  510. /* Address of the last PER trap */
  511. return (__u32) child->thread.per_event.address;
  512. else if (addr == (addr_t) &dummy32->access_id)
  513. /* Access id of the last PER trap */
  514. return (__u32) child->thread.per_event.paid << 24;
  515. return 0;
  516. }
  517. /*
  518. * Same as peek_user but for a 31 bit program.
  519. */
  520. static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
  521. {
  522. struct compat_user *dummy32 = NULL;
  523. addr_t offset;
  524. __u32 tmp;
  525. if (addr < (addr_t) &dummy32->regs.acrs) {
  526. struct pt_regs *regs = task_pt_regs(child);
  527. /*
  528. * psw and gprs are stored on the stack
  529. */
  530. if (addr == (addr_t) &dummy32->regs.psw.mask) {
  531. /* Fake a 31 bit psw mask. */
  532. tmp = (__u32)(regs->psw.mask >> 32);
  533. tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
  534. tmp |= PSW32_USER_BITS;
  535. } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
  536. /* Fake a 31 bit psw address. */
  537. tmp = (__u32) regs->psw.addr |
  538. (__u32)(regs->psw.mask & PSW_MASK_BA);
  539. } else {
  540. /* gpr 0-15 */
  541. tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
  542. }
  543. } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
  544. /*
  545. * access registers are stored in the thread structure
  546. */
  547. offset = addr - (addr_t) &dummy32->regs.acrs;
  548. tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
  549. } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
  550. /*
  551. * orig_gpr2 is stored on the kernel stack
  552. */
  553. tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
  554. } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
  555. /*
  556. * prevent reads of padding hole between
  557. * orig_gpr2 and fp_regs on s390.
  558. */
  559. tmp = 0;
  560. } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
  561. /*
  562. * floating point regs. are stored in the thread structure
  563. */
  564. offset = addr - (addr_t) &dummy32->regs.fp_regs;
  565. tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset);
  566. } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
  567. /*
  568. * Handle access to the per_info structure.
  569. */
  570. addr -= (addr_t) &dummy32->regs.per_info;
  571. tmp = __peek_user_per_compat(child, addr);
  572. } else
  573. tmp = 0;
  574. return tmp;
  575. }
  576. static int peek_user_compat(struct task_struct *child,
  577. addr_t addr, addr_t data)
  578. {
  579. __u32 tmp;
  580. if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
  581. return -EIO;
  582. tmp = __peek_user_compat(child, addr);
  583. return put_user(tmp, (__u32 __user *) data);
  584. }
  585. /*
  586. * Same as poke_user_per but for a 31 bit program.
  587. */
  588. static inline void __poke_user_per_compat(struct task_struct *child,
  589. addr_t addr, __u32 data)
  590. {
  591. struct compat_per_struct_kernel *dummy32 = NULL;
  592. if (addr == (addr_t) &dummy32->cr9)
  593. /* PER event mask of the user specified per set. */
  594. child->thread.per_user.control =
  595. data & (PER_EVENT_MASK | PER_CONTROL_MASK);
  596. else if (addr == (addr_t) &dummy32->starting_addr)
  597. /* Starting address of the user specified per set. */
  598. child->thread.per_user.start = data;
  599. else if (addr == (addr_t) &dummy32->ending_addr)
  600. /* Ending address of the user specified per set. */
  601. child->thread.per_user.end = data;
  602. }
  603. /*
  604. * Same as poke_user but for a 31 bit program.
  605. */
  606. static int __poke_user_compat(struct task_struct *child,
  607. addr_t addr, addr_t data)
  608. {
  609. struct compat_user *dummy32 = NULL;
  610. __u32 tmp = (__u32) data;
  611. addr_t offset;
  612. if (addr < (addr_t) &dummy32->regs.acrs) {
  613. struct pt_regs *regs = task_pt_regs(child);
  614. /*
  615. * psw, gprs, acrs and orig_gpr2 are stored on the stack
  616. */
  617. if (addr == (addr_t) &dummy32->regs.psw.mask) {
  618. __u32 mask = PSW32_MASK_USER;
  619. mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
  620. /* Build a 64 bit psw mask from 31 bit mask. */
  621. if ((tmp ^ PSW32_USER_BITS) & ~mask)
  622. /* Invalid psw mask. */
  623. return -EINVAL;
  624. if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
  625. /* Invalid address-space-control bits */
  626. return -EINVAL;
  627. regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
  628. (regs->psw.mask & PSW_MASK_BA) |
  629. (__u64)(tmp & mask) << 32;
  630. } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
  631. /* Build a 64 bit psw address from 31 bit address. */
  632. regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
  633. /* Transfer 31 bit amode bit to psw mask. */
  634. regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
  635. (__u64)(tmp & PSW32_ADDR_AMODE);
  636. } else {
  637. /* gpr 0-15 */
  638. *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
  639. }
  640. } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
  641. /*
  642. * access registers are stored in the thread structure
  643. */
  644. offset = addr - (addr_t) &dummy32->regs.acrs;
  645. *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
  646. } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
  647. /*
  648. * orig_gpr2 is stored on the kernel stack
  649. */
  650. *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
  651. } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
  652. /*
  653. * prevent writess of padding hole between
  654. * orig_gpr2 and fp_regs on s390.
  655. */
  656. return 0;
  657. } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
  658. /*
  659. * floating point regs. are stored in the thread structure
  660. */
  661. if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
  662. test_fp_ctl(tmp))
  663. return -EINVAL;
  664. offset = addr - (addr_t) &dummy32->regs.fp_regs;
  665. *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
  666. } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
  667. /*
  668. * Handle access to the per_info structure.
  669. */
  670. addr -= (addr_t) &dummy32->regs.per_info;
  671. __poke_user_per_compat(child, addr, data);
  672. }
  673. return 0;
  674. }
  675. static int poke_user_compat(struct task_struct *child,
  676. addr_t addr, addr_t data)
  677. {
  678. if (!is_compat_task() || (addr & 3) ||
  679. addr > sizeof(struct compat_user) - 3)
  680. return -EIO;
  681. return __poke_user_compat(child, addr, data);
  682. }
  683. long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
  684. compat_ulong_t caddr, compat_ulong_t cdata)
  685. {
  686. unsigned long addr = caddr;
  687. unsigned long data = cdata;
  688. compat_ptrace_area parea;
  689. int copied, ret;
  690. switch (request) {
  691. case PTRACE_PEEKUSR:
  692. /* read the word at location addr in the USER area. */
  693. return peek_user_compat(child, addr, data);
  694. case PTRACE_POKEUSR:
  695. /* write the word at location addr in the USER area */
  696. return poke_user_compat(child, addr, data);
  697. case PTRACE_PEEKUSR_AREA:
  698. case PTRACE_POKEUSR_AREA:
  699. if (copy_from_user(&parea, (void __force __user *) addr,
  700. sizeof(parea)))
  701. return -EFAULT;
  702. addr = parea.kernel_addr;
  703. data = parea.process_addr;
  704. copied = 0;
  705. while (copied < parea.len) {
  706. if (request == PTRACE_PEEKUSR_AREA)
  707. ret = peek_user_compat(child, addr, data);
  708. else {
  709. __u32 utmp;
  710. if (get_user(utmp,
  711. (__u32 __force __user *) data))
  712. return -EFAULT;
  713. ret = poke_user_compat(child, addr, utmp);
  714. }
  715. if (ret)
  716. return ret;
  717. addr += sizeof(unsigned int);
  718. data += sizeof(unsigned int);
  719. copied += sizeof(unsigned int);
  720. }
  721. return 0;
  722. case PTRACE_GET_LAST_BREAK:
  723. put_user(task_thread_info(child)->last_break,
  724. (unsigned int __user *) data);
  725. return 0;
  726. }
  727. return compat_ptrace_request(child, request, addr, data);
  728. }
  729. #endif
  730. asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
  731. {
  732. long ret = 0;
  733. /* Do the secure computing check first. */
  734. if (secure_computing()) {
  735. /* seccomp failures shouldn't expose any additional code. */
  736. ret = -1;
  737. goto out;
  738. }
  739. /*
  740. * The sysc_tracesys code in entry.S stored the system
  741. * call number to gprs[2].
  742. */
  743. if (test_thread_flag(TIF_SYSCALL_TRACE) &&
  744. (tracehook_report_syscall_entry(regs) ||
  745. regs->gprs[2] >= NR_syscalls)) {
  746. /*
  747. * Tracing decided this syscall should not happen or the
  748. * debugger stored an invalid system call number. Skip
  749. * the system call and the system call restart handling.
  750. */
  751. clear_pt_regs_flag(regs, PIF_SYSCALL);
  752. ret = -1;
  753. }
  754. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  755. trace_sys_enter(regs, regs->gprs[2]);
  756. audit_syscall_entry(is_compat_task() ?
  757. AUDIT_ARCH_S390 : AUDIT_ARCH_S390X,
  758. regs->gprs[2], regs->orig_gpr2,
  759. regs->gprs[3], regs->gprs[4],
  760. regs->gprs[5]);
  761. out:
  762. return ret ?: regs->gprs[2];
  763. }
  764. asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
  765. {
  766. audit_syscall_exit(regs);
  767. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  768. trace_sys_exit(regs, regs->gprs[2]);
  769. if (test_thread_flag(TIF_SYSCALL_TRACE))
  770. tracehook_report_syscall_exit(regs, 0);
  771. }
  772. /*
  773. * user_regset definitions.
  774. */
  775. static int s390_regs_get(struct task_struct *target,
  776. const struct user_regset *regset,
  777. unsigned int pos, unsigned int count,
  778. void *kbuf, void __user *ubuf)
  779. {
  780. if (target == current)
  781. save_access_regs(target->thread.acrs);
  782. if (kbuf) {
  783. unsigned long *k = kbuf;
  784. while (count > 0) {
  785. *k++ = __peek_user(target, pos);
  786. count -= sizeof(*k);
  787. pos += sizeof(*k);
  788. }
  789. } else {
  790. unsigned long __user *u = ubuf;
  791. while (count > 0) {
  792. if (__put_user(__peek_user(target, pos), u++))
  793. return -EFAULT;
  794. count -= sizeof(*u);
  795. pos += sizeof(*u);
  796. }
  797. }
  798. return 0;
  799. }
  800. static int s390_regs_set(struct task_struct *target,
  801. const struct user_regset *regset,
  802. unsigned int pos, unsigned int count,
  803. const void *kbuf, const void __user *ubuf)
  804. {
  805. int rc = 0;
  806. if (target == current)
  807. save_access_regs(target->thread.acrs);
  808. if (kbuf) {
  809. const unsigned long *k = kbuf;
  810. while (count > 0 && !rc) {
  811. rc = __poke_user(target, pos, *k++);
  812. count -= sizeof(*k);
  813. pos += sizeof(*k);
  814. }
  815. } else {
  816. const unsigned long __user *u = ubuf;
  817. while (count > 0 && !rc) {
  818. unsigned long word;
  819. rc = __get_user(word, u++);
  820. if (rc)
  821. break;
  822. rc = __poke_user(target, pos, word);
  823. count -= sizeof(*u);
  824. pos += sizeof(*u);
  825. }
  826. }
  827. if (rc == 0 && target == current)
  828. restore_access_regs(target->thread.acrs);
  829. return rc;
  830. }
  831. static int s390_fpregs_get(struct task_struct *target,
  832. const struct user_regset *regset, unsigned int pos,
  833. unsigned int count, void *kbuf, void __user *ubuf)
  834. {
  835. if (target == current) {
  836. save_fp_ctl(&target->thread.fp_regs.fpc);
  837. save_fp_regs(target->thread.fp_regs.fprs);
  838. }
  839. #ifdef CONFIG_64BIT
  840. else if (target->thread.vxrs) {
  841. int i;
  842. for (i = 0; i < __NUM_VXRS_LOW; i++)
  843. target->thread.fp_regs.fprs[i] =
  844. *(freg_t *)(target->thread.vxrs + i);
  845. }
  846. #endif
  847. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  848. &target->thread.fp_regs, 0, -1);
  849. }
  850. static int s390_fpregs_set(struct task_struct *target,
  851. const struct user_regset *regset, unsigned int pos,
  852. unsigned int count, const void *kbuf,
  853. const void __user *ubuf)
  854. {
  855. int rc = 0;
  856. if (target == current) {
  857. save_fp_ctl(&target->thread.fp_regs.fpc);
  858. save_fp_regs(target->thread.fp_regs.fprs);
  859. }
  860. /* If setting FPC, must validate it first. */
  861. if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
  862. u32 ufpc[2] = { target->thread.fp_regs.fpc, 0 };
  863. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
  864. 0, offsetof(s390_fp_regs, fprs));
  865. if (rc)
  866. return rc;
  867. if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
  868. return -EINVAL;
  869. target->thread.fp_regs.fpc = ufpc[0];
  870. }
  871. if (rc == 0 && count > 0)
  872. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  873. target->thread.fp_regs.fprs,
  874. offsetof(s390_fp_regs, fprs), -1);
  875. if (rc == 0) {
  876. if (target == current) {
  877. restore_fp_ctl(&target->thread.fp_regs.fpc);
  878. restore_fp_regs(target->thread.fp_regs.fprs);
  879. }
  880. #ifdef CONFIG_64BIT
  881. else if (target->thread.vxrs) {
  882. int i;
  883. for (i = 0; i < __NUM_VXRS_LOW; i++)
  884. *(freg_t *)(target->thread.vxrs + i) =
  885. target->thread.fp_regs.fprs[i];
  886. }
  887. #endif
  888. }
  889. return rc;
  890. }
  891. #ifdef CONFIG_64BIT
  892. static int s390_last_break_get(struct task_struct *target,
  893. const struct user_regset *regset,
  894. unsigned int pos, unsigned int count,
  895. void *kbuf, void __user *ubuf)
  896. {
  897. if (count > 0) {
  898. if (kbuf) {
  899. unsigned long *k = kbuf;
  900. *k = task_thread_info(target)->last_break;
  901. } else {
  902. unsigned long __user *u = ubuf;
  903. if (__put_user(task_thread_info(target)->last_break, u))
  904. return -EFAULT;
  905. }
  906. }
  907. return 0;
  908. }
  909. static int s390_last_break_set(struct task_struct *target,
  910. const struct user_regset *regset,
  911. unsigned int pos, unsigned int count,
  912. const void *kbuf, const void __user *ubuf)
  913. {
  914. return 0;
  915. }
  916. static int s390_tdb_get(struct task_struct *target,
  917. const struct user_regset *regset,
  918. unsigned int pos, unsigned int count,
  919. void *kbuf, void __user *ubuf)
  920. {
  921. struct pt_regs *regs = task_pt_regs(target);
  922. unsigned char *data;
  923. if (!(regs->int_code & 0x200))
  924. return -ENODATA;
  925. data = target->thread.trap_tdb;
  926. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, data, 0, 256);
  927. }
  928. static int s390_tdb_set(struct task_struct *target,
  929. const struct user_regset *regset,
  930. unsigned int pos, unsigned int count,
  931. const void *kbuf, const void __user *ubuf)
  932. {
  933. return 0;
  934. }
  935. static int s390_vxrs_active(struct task_struct *target,
  936. const struct user_regset *regset)
  937. {
  938. return !!target->thread.vxrs;
  939. }
  940. static int s390_vxrs_low_get(struct task_struct *target,
  941. const struct user_regset *regset,
  942. unsigned int pos, unsigned int count,
  943. void *kbuf, void __user *ubuf)
  944. {
  945. __u64 vxrs[__NUM_VXRS_LOW];
  946. int i;
  947. if (target->thread.vxrs) {
  948. if (target == current)
  949. save_vx_regs(target->thread.vxrs);
  950. for (i = 0; i < __NUM_VXRS_LOW; i++)
  951. vxrs[i] = *((__u64 *)(target->thread.vxrs + i) + 1);
  952. } else
  953. memset(vxrs, 0, sizeof(vxrs));
  954. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
  955. }
  956. static int s390_vxrs_low_set(struct task_struct *target,
  957. const struct user_regset *regset,
  958. unsigned int pos, unsigned int count,
  959. const void *kbuf, const void __user *ubuf)
  960. {
  961. __u64 vxrs[__NUM_VXRS_LOW];
  962. int i, rc;
  963. if (!target->thread.vxrs) {
  964. rc = alloc_vector_registers(target);
  965. if (rc)
  966. return rc;
  967. } else if (target == current)
  968. save_vx_regs(target->thread.vxrs);
  969. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
  970. if (rc == 0) {
  971. for (i = 0; i < __NUM_VXRS_LOW; i++)
  972. *((__u64 *)(target->thread.vxrs + i) + 1) = vxrs[i];
  973. if (target == current)
  974. restore_vx_regs(target->thread.vxrs);
  975. }
  976. return rc;
  977. }
  978. static int s390_vxrs_high_get(struct task_struct *target,
  979. const struct user_regset *regset,
  980. unsigned int pos, unsigned int count,
  981. void *kbuf, void __user *ubuf)
  982. {
  983. __vector128 vxrs[__NUM_VXRS_HIGH];
  984. if (target->thread.vxrs) {
  985. if (target == current)
  986. save_vx_regs(target->thread.vxrs);
  987. memcpy(vxrs, target->thread.vxrs + __NUM_VXRS_LOW,
  988. sizeof(vxrs));
  989. } else
  990. memset(vxrs, 0, sizeof(vxrs));
  991. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
  992. }
  993. static int s390_vxrs_high_set(struct task_struct *target,
  994. const struct user_regset *regset,
  995. unsigned int pos, unsigned int count,
  996. const void *kbuf, const void __user *ubuf)
  997. {
  998. int rc;
  999. if (!target->thread.vxrs) {
  1000. rc = alloc_vector_registers(target);
  1001. if (rc)
  1002. return rc;
  1003. } else if (target == current)
  1004. save_vx_regs(target->thread.vxrs);
  1005. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1006. target->thread.vxrs + __NUM_VXRS_LOW, 0, -1);
  1007. if (rc == 0 && target == current)
  1008. restore_vx_regs(target->thread.vxrs);
  1009. return rc;
  1010. }
  1011. #endif
  1012. static int s390_system_call_get(struct task_struct *target,
  1013. const struct user_regset *regset,
  1014. unsigned int pos, unsigned int count,
  1015. void *kbuf, void __user *ubuf)
  1016. {
  1017. unsigned int *data = &task_thread_info(target)->system_call;
  1018. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  1019. data, 0, sizeof(unsigned int));
  1020. }
  1021. static int s390_system_call_set(struct task_struct *target,
  1022. const struct user_regset *regset,
  1023. unsigned int pos, unsigned int count,
  1024. const void *kbuf, const void __user *ubuf)
  1025. {
  1026. unsigned int *data = &task_thread_info(target)->system_call;
  1027. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1028. data, 0, sizeof(unsigned int));
  1029. }
  1030. static const struct user_regset s390_regsets[] = {
  1031. {
  1032. .core_note_type = NT_PRSTATUS,
  1033. .n = sizeof(s390_regs) / sizeof(long),
  1034. .size = sizeof(long),
  1035. .align = sizeof(long),
  1036. .get = s390_regs_get,
  1037. .set = s390_regs_set,
  1038. },
  1039. {
  1040. .core_note_type = NT_PRFPREG,
  1041. .n = sizeof(s390_fp_regs) / sizeof(long),
  1042. .size = sizeof(long),
  1043. .align = sizeof(long),
  1044. .get = s390_fpregs_get,
  1045. .set = s390_fpregs_set,
  1046. },
  1047. {
  1048. .core_note_type = NT_S390_SYSTEM_CALL,
  1049. .n = 1,
  1050. .size = sizeof(unsigned int),
  1051. .align = sizeof(unsigned int),
  1052. .get = s390_system_call_get,
  1053. .set = s390_system_call_set,
  1054. },
  1055. #ifdef CONFIG_64BIT
  1056. {
  1057. .core_note_type = NT_S390_LAST_BREAK,
  1058. .n = 1,
  1059. .size = sizeof(long),
  1060. .align = sizeof(long),
  1061. .get = s390_last_break_get,
  1062. .set = s390_last_break_set,
  1063. },
  1064. {
  1065. .core_note_type = NT_S390_TDB,
  1066. .n = 1,
  1067. .size = 256,
  1068. .align = 1,
  1069. .get = s390_tdb_get,
  1070. .set = s390_tdb_set,
  1071. },
  1072. {
  1073. .core_note_type = NT_S390_VXRS_LOW,
  1074. .n = __NUM_VXRS_LOW,
  1075. .size = sizeof(__u64),
  1076. .align = sizeof(__u64),
  1077. .active = s390_vxrs_active,
  1078. .get = s390_vxrs_low_get,
  1079. .set = s390_vxrs_low_set,
  1080. },
  1081. {
  1082. .core_note_type = NT_S390_VXRS_HIGH,
  1083. .n = __NUM_VXRS_HIGH,
  1084. .size = sizeof(__vector128),
  1085. .align = sizeof(__vector128),
  1086. .active = s390_vxrs_active,
  1087. .get = s390_vxrs_high_get,
  1088. .set = s390_vxrs_high_set,
  1089. },
  1090. #endif
  1091. };
  1092. static const struct user_regset_view user_s390_view = {
  1093. .name = UTS_MACHINE,
  1094. .e_machine = EM_S390,
  1095. .regsets = s390_regsets,
  1096. .n = ARRAY_SIZE(s390_regsets)
  1097. };
  1098. #ifdef CONFIG_COMPAT
  1099. static int s390_compat_regs_get(struct task_struct *target,
  1100. const struct user_regset *regset,
  1101. unsigned int pos, unsigned int count,
  1102. void *kbuf, void __user *ubuf)
  1103. {
  1104. if (target == current)
  1105. save_access_regs(target->thread.acrs);
  1106. if (kbuf) {
  1107. compat_ulong_t *k = kbuf;
  1108. while (count > 0) {
  1109. *k++ = __peek_user_compat(target, pos);
  1110. count -= sizeof(*k);
  1111. pos += sizeof(*k);
  1112. }
  1113. } else {
  1114. compat_ulong_t __user *u = ubuf;
  1115. while (count > 0) {
  1116. if (__put_user(__peek_user_compat(target, pos), u++))
  1117. return -EFAULT;
  1118. count -= sizeof(*u);
  1119. pos += sizeof(*u);
  1120. }
  1121. }
  1122. return 0;
  1123. }
  1124. static int s390_compat_regs_set(struct task_struct *target,
  1125. const struct user_regset *regset,
  1126. unsigned int pos, unsigned int count,
  1127. const void *kbuf, const void __user *ubuf)
  1128. {
  1129. int rc = 0;
  1130. if (target == current)
  1131. save_access_regs(target->thread.acrs);
  1132. if (kbuf) {
  1133. const compat_ulong_t *k = kbuf;
  1134. while (count > 0 && !rc) {
  1135. rc = __poke_user_compat(target, pos, *k++);
  1136. count -= sizeof(*k);
  1137. pos += sizeof(*k);
  1138. }
  1139. } else {
  1140. const compat_ulong_t __user *u = ubuf;
  1141. while (count > 0 && !rc) {
  1142. compat_ulong_t word;
  1143. rc = __get_user(word, u++);
  1144. if (rc)
  1145. break;
  1146. rc = __poke_user_compat(target, pos, word);
  1147. count -= sizeof(*u);
  1148. pos += sizeof(*u);
  1149. }
  1150. }
  1151. if (rc == 0 && target == current)
  1152. restore_access_regs(target->thread.acrs);
  1153. return rc;
  1154. }
  1155. static int s390_compat_regs_high_get(struct task_struct *target,
  1156. const struct user_regset *regset,
  1157. unsigned int pos, unsigned int count,
  1158. void *kbuf, void __user *ubuf)
  1159. {
  1160. compat_ulong_t *gprs_high;
  1161. gprs_high = (compat_ulong_t *)
  1162. &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
  1163. if (kbuf) {
  1164. compat_ulong_t *k = kbuf;
  1165. while (count > 0) {
  1166. *k++ = *gprs_high;
  1167. gprs_high += 2;
  1168. count -= sizeof(*k);
  1169. }
  1170. } else {
  1171. compat_ulong_t __user *u = ubuf;
  1172. while (count > 0) {
  1173. if (__put_user(*gprs_high, u++))
  1174. return -EFAULT;
  1175. gprs_high += 2;
  1176. count -= sizeof(*u);
  1177. }
  1178. }
  1179. return 0;
  1180. }
  1181. static int s390_compat_regs_high_set(struct task_struct *target,
  1182. const struct user_regset *regset,
  1183. unsigned int pos, unsigned int count,
  1184. const void *kbuf, const void __user *ubuf)
  1185. {
  1186. compat_ulong_t *gprs_high;
  1187. int rc = 0;
  1188. gprs_high = (compat_ulong_t *)
  1189. &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
  1190. if (kbuf) {
  1191. const compat_ulong_t *k = kbuf;
  1192. while (count > 0) {
  1193. *gprs_high = *k++;
  1194. *gprs_high += 2;
  1195. count -= sizeof(*k);
  1196. }
  1197. } else {
  1198. const compat_ulong_t __user *u = ubuf;
  1199. while (count > 0 && !rc) {
  1200. unsigned long word;
  1201. rc = __get_user(word, u++);
  1202. if (rc)
  1203. break;
  1204. *gprs_high = word;
  1205. *gprs_high += 2;
  1206. count -= sizeof(*u);
  1207. }
  1208. }
  1209. return rc;
  1210. }
  1211. static int s390_compat_last_break_get(struct task_struct *target,
  1212. const struct user_regset *regset,
  1213. unsigned int pos, unsigned int count,
  1214. void *kbuf, void __user *ubuf)
  1215. {
  1216. compat_ulong_t last_break;
  1217. if (count > 0) {
  1218. last_break = task_thread_info(target)->last_break;
  1219. if (kbuf) {
  1220. unsigned long *k = kbuf;
  1221. *k = last_break;
  1222. } else {
  1223. unsigned long __user *u = ubuf;
  1224. if (__put_user(last_break, u))
  1225. return -EFAULT;
  1226. }
  1227. }
  1228. return 0;
  1229. }
  1230. static int s390_compat_last_break_set(struct task_struct *target,
  1231. const struct user_regset *regset,
  1232. unsigned int pos, unsigned int count,
  1233. const void *kbuf, const void __user *ubuf)
  1234. {
  1235. return 0;
  1236. }
  1237. static const struct user_regset s390_compat_regsets[] = {
  1238. {
  1239. .core_note_type = NT_PRSTATUS,
  1240. .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
  1241. .size = sizeof(compat_long_t),
  1242. .align = sizeof(compat_long_t),
  1243. .get = s390_compat_regs_get,
  1244. .set = s390_compat_regs_set,
  1245. },
  1246. {
  1247. .core_note_type = NT_PRFPREG,
  1248. .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
  1249. .size = sizeof(compat_long_t),
  1250. .align = sizeof(compat_long_t),
  1251. .get = s390_fpregs_get,
  1252. .set = s390_fpregs_set,
  1253. },
  1254. {
  1255. .core_note_type = NT_S390_SYSTEM_CALL,
  1256. .n = 1,
  1257. .size = sizeof(compat_uint_t),
  1258. .align = sizeof(compat_uint_t),
  1259. .get = s390_system_call_get,
  1260. .set = s390_system_call_set,
  1261. },
  1262. {
  1263. .core_note_type = NT_S390_LAST_BREAK,
  1264. .n = 1,
  1265. .size = sizeof(long),
  1266. .align = sizeof(long),
  1267. .get = s390_compat_last_break_get,
  1268. .set = s390_compat_last_break_set,
  1269. },
  1270. {
  1271. .core_note_type = NT_S390_TDB,
  1272. .n = 1,
  1273. .size = 256,
  1274. .align = 1,
  1275. .get = s390_tdb_get,
  1276. .set = s390_tdb_set,
  1277. },
  1278. {
  1279. .core_note_type = NT_S390_VXRS_LOW,
  1280. .n = __NUM_VXRS_LOW,
  1281. .size = sizeof(__u64),
  1282. .align = sizeof(__u64),
  1283. .active = s390_vxrs_active,
  1284. .get = s390_vxrs_low_get,
  1285. .set = s390_vxrs_low_set,
  1286. },
  1287. {
  1288. .core_note_type = NT_S390_VXRS_HIGH,
  1289. .n = __NUM_VXRS_HIGH,
  1290. .size = sizeof(__vector128),
  1291. .align = sizeof(__vector128),
  1292. .active = s390_vxrs_active,
  1293. .get = s390_vxrs_high_get,
  1294. .set = s390_vxrs_high_set,
  1295. },
  1296. {
  1297. .core_note_type = NT_S390_HIGH_GPRS,
  1298. .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
  1299. .size = sizeof(compat_long_t),
  1300. .align = sizeof(compat_long_t),
  1301. .get = s390_compat_regs_high_get,
  1302. .set = s390_compat_regs_high_set,
  1303. },
  1304. };
  1305. static const struct user_regset_view user_s390_compat_view = {
  1306. .name = "s390",
  1307. .e_machine = EM_S390,
  1308. .regsets = s390_compat_regsets,
  1309. .n = ARRAY_SIZE(s390_compat_regsets)
  1310. };
  1311. #endif
  1312. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  1313. {
  1314. #ifdef CONFIG_COMPAT
  1315. if (test_tsk_thread_flag(task, TIF_31BIT))
  1316. return &user_s390_compat_view;
  1317. #endif
  1318. return &user_s390_view;
  1319. }
  1320. static const char *gpr_names[NUM_GPRS] = {
  1321. "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
  1322. "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
  1323. };
  1324. unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
  1325. {
  1326. if (offset >= NUM_GPRS)
  1327. return 0;
  1328. return regs->gprs[offset];
  1329. }
  1330. int regs_query_register_offset(const char *name)
  1331. {
  1332. unsigned long offset;
  1333. if (!name || *name != 'r')
  1334. return -EINVAL;
  1335. if (kstrtoul(name + 1, 10, &offset))
  1336. return -EINVAL;
  1337. if (offset >= NUM_GPRS)
  1338. return -EINVAL;
  1339. return offset;
  1340. }
  1341. const char *regs_query_register_name(unsigned int offset)
  1342. {
  1343. if (offset >= NUM_GPRS)
  1344. return NULL;
  1345. return gpr_names[offset];
  1346. }
  1347. static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
  1348. {
  1349. unsigned long ksp = kernel_stack_pointer(regs);
  1350. return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
  1351. }
  1352. /**
  1353. * regs_get_kernel_stack_nth() - get Nth entry of the stack
  1354. * @regs:pt_regs which contains kernel stack pointer.
  1355. * @n:stack entry number.
  1356. *
  1357. * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
  1358. * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
  1359. * this returns 0.
  1360. */
  1361. unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
  1362. {
  1363. unsigned long addr;
  1364. addr = kernel_stack_pointer(regs) + n * sizeof(long);
  1365. if (!regs_within_kernel_stack(regs, addr))
  1366. return 0;
  1367. return *(unsigned long *)addr;
  1368. }