ptrace.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604
  1. /*
  2. * Ptrace user space interface.
  3. *
  4. * Copyright IBM Corp. 1999, 2010
  5. * Author(s): Denis Joseph Barrow
  6. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/sched.h>
  10. #include <linux/sched/task_stack.h>
  11. #include <linux/mm.h>
  12. #include <linux/smp.h>
  13. #include <linux/errno.h>
  14. #include <linux/ptrace.h>
  15. #include <linux/user.h>
  16. #include <linux/security.h>
  17. #include <linux/audit.h>
  18. #include <linux/signal.h>
  19. #include <linux/elf.h>
  20. #include <linux/regset.h>
  21. #include <linux/tracehook.h>
  22. #include <linux/seccomp.h>
  23. #include <linux/compat.h>
  24. #include <trace/syscall.h>
  25. #include <asm/segment.h>
  26. #include <asm/page.h>
  27. #include <asm/pgtable.h>
  28. #include <asm/pgalloc.h>
  29. #include <linux/uaccess.h>
  30. #include <asm/unistd.h>
  31. #include <asm/switch_to.h>
  32. #include "entry.h"
  33. #ifdef CONFIG_COMPAT
  34. #include "compat_ptrace.h"
  35. #endif
  36. #define CREATE_TRACE_POINTS
  37. #include <trace/events/syscalls.h>
  38. void update_cr_regs(struct task_struct *task)
  39. {
  40. struct pt_regs *regs = task_pt_regs(task);
  41. struct thread_struct *thread = &task->thread;
  42. struct per_regs old, new;
  43. unsigned long cr0_old, cr0_new;
  44. unsigned long cr2_old, cr2_new;
  45. int cr0_changed, cr2_changed;
  46. __ctl_store(cr0_old, 0, 0);
  47. __ctl_store(cr2_old, 2, 2);
  48. cr0_new = cr0_old;
  49. cr2_new = cr2_old;
  50. /* Take care of the enable/disable of transactional execution. */
  51. if (MACHINE_HAS_TE) {
  52. /* Set or clear transaction execution TXC bit 8. */
  53. cr0_new |= (1UL << 55);
  54. if (task->thread.per_flags & PER_FLAG_NO_TE)
  55. cr0_new &= ~(1UL << 55);
  56. /* Set or clear transaction execution TDC bits 62 and 63. */
  57. cr2_new &= ~3UL;
  58. if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
  59. if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
  60. cr2_new |= 1UL;
  61. else
  62. cr2_new |= 2UL;
  63. }
  64. }
  65. /* Take care of enable/disable of guarded storage. */
  66. if (MACHINE_HAS_GS) {
  67. cr2_new &= ~(1UL << 4);
  68. if (task->thread.gs_cb)
  69. cr2_new |= (1UL << 4);
  70. }
  71. /* Load control register 0/2 iff changed */
  72. cr0_changed = cr0_new != cr0_old;
  73. cr2_changed = cr2_new != cr2_old;
  74. if (cr0_changed)
  75. __ctl_load(cr0_new, 0, 0);
  76. if (cr2_changed)
  77. __ctl_load(cr2_new, 2, 2);
  78. /* Copy user specified PER registers */
  79. new.control = thread->per_user.control;
  80. new.start = thread->per_user.start;
  81. new.end = thread->per_user.end;
  82. /* merge TIF_SINGLE_STEP into user specified PER registers. */
  83. if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
  84. test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
  85. if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
  86. new.control |= PER_EVENT_BRANCH;
  87. else
  88. new.control |= PER_EVENT_IFETCH;
  89. new.control |= PER_CONTROL_SUSPENSION;
  90. new.control |= PER_EVENT_TRANSACTION_END;
  91. if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
  92. new.control |= PER_EVENT_IFETCH;
  93. new.start = 0;
  94. new.end = -1UL;
  95. }
  96. /* Take care of the PER enablement bit in the PSW. */
  97. if (!(new.control & PER_EVENT_MASK)) {
  98. regs->psw.mask &= ~PSW_MASK_PER;
  99. return;
  100. }
  101. regs->psw.mask |= PSW_MASK_PER;
  102. __ctl_store(old, 9, 11);
  103. if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
  104. __ctl_load(new, 9, 11);
  105. }
  106. void user_enable_single_step(struct task_struct *task)
  107. {
  108. clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
  109. set_tsk_thread_flag(task, TIF_SINGLE_STEP);
  110. }
  111. void user_disable_single_step(struct task_struct *task)
  112. {
  113. clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
  114. clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
  115. }
  116. void user_enable_block_step(struct task_struct *task)
  117. {
  118. set_tsk_thread_flag(task, TIF_SINGLE_STEP);
  119. set_tsk_thread_flag(task, TIF_BLOCK_STEP);
  120. }
  121. /*
  122. * Called by kernel/ptrace.c when detaching..
  123. *
  124. * Clear all debugging related fields.
  125. */
  126. void ptrace_disable(struct task_struct *task)
  127. {
  128. memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
  129. memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
  130. clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
  131. clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP);
  132. task->thread.per_flags = 0;
  133. }
  134. #define __ADDR_MASK 7
  135. static inline unsigned long __peek_user_per(struct task_struct *child,
  136. addr_t addr)
  137. {
  138. struct per_struct_kernel *dummy = NULL;
  139. if (addr == (addr_t) &dummy->cr9)
  140. /* Control bits of the active per set. */
  141. return test_thread_flag(TIF_SINGLE_STEP) ?
  142. PER_EVENT_IFETCH : child->thread.per_user.control;
  143. else if (addr == (addr_t) &dummy->cr10)
  144. /* Start address of the active per set. */
  145. return test_thread_flag(TIF_SINGLE_STEP) ?
  146. 0 : child->thread.per_user.start;
  147. else if (addr == (addr_t) &dummy->cr11)
  148. /* End address of the active per set. */
  149. return test_thread_flag(TIF_SINGLE_STEP) ?
  150. -1UL : child->thread.per_user.end;
  151. else if (addr == (addr_t) &dummy->bits)
  152. /* Single-step bit. */
  153. return test_thread_flag(TIF_SINGLE_STEP) ?
  154. (1UL << (BITS_PER_LONG - 1)) : 0;
  155. else if (addr == (addr_t) &dummy->starting_addr)
  156. /* Start address of the user specified per set. */
  157. return child->thread.per_user.start;
  158. else if (addr == (addr_t) &dummy->ending_addr)
  159. /* End address of the user specified per set. */
  160. return child->thread.per_user.end;
  161. else if (addr == (addr_t) &dummy->perc_atmid)
  162. /* PER code, ATMID and AI of the last PER trap */
  163. return (unsigned long)
  164. child->thread.per_event.cause << (BITS_PER_LONG - 16);
  165. else if (addr == (addr_t) &dummy->address)
  166. /* Address of the last PER trap */
  167. return child->thread.per_event.address;
  168. else if (addr == (addr_t) &dummy->access_id)
  169. /* Access id of the last PER trap */
  170. return (unsigned long)
  171. child->thread.per_event.paid << (BITS_PER_LONG - 8);
  172. return 0;
  173. }
  174. /*
  175. * Read the word at offset addr from the user area of a process. The
  176. * trouble here is that the information is littered over different
  177. * locations. The process registers are found on the kernel stack,
  178. * the floating point stuff and the trace settings are stored in
  179. * the task structure. In addition the different structures in
  180. * struct user contain pad bytes that should be read as zeroes.
  181. * Lovely...
  182. */
  183. static unsigned long __peek_user(struct task_struct *child, addr_t addr)
  184. {
  185. struct user *dummy = NULL;
  186. addr_t offset, tmp;
  187. if (addr < (addr_t) &dummy->regs.acrs) {
  188. /*
  189. * psw and gprs are stored on the stack
  190. */
  191. tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
  192. if (addr == (addr_t) &dummy->regs.psw.mask) {
  193. /* Return a clean psw mask. */
  194. tmp &= PSW_MASK_USER | PSW_MASK_RI;
  195. tmp |= PSW_USER_BITS;
  196. }
  197. } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
  198. /*
  199. * access registers are stored in the thread structure
  200. */
  201. offset = addr - (addr_t) &dummy->regs.acrs;
  202. /*
  203. * Very special case: old & broken 64 bit gdb reading
  204. * from acrs[15]. Result is a 64 bit value. Read the
  205. * 32 bit acrs[15] value and shift it by 32. Sick...
  206. */
  207. if (addr == (addr_t) &dummy->regs.acrs[15])
  208. tmp = ((unsigned long) child->thread.acrs[15]) << 32;
  209. else
  210. tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
  211. } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
  212. /*
  213. * orig_gpr2 is stored on the kernel stack
  214. */
  215. tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
  216. } else if (addr < (addr_t) &dummy->regs.fp_regs) {
  217. /*
  218. * prevent reads of padding hole between
  219. * orig_gpr2 and fp_regs on s390.
  220. */
  221. tmp = 0;
  222. } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
  223. /*
  224. * floating point control reg. is in the thread structure
  225. */
  226. tmp = child->thread.fpu.fpc;
  227. tmp <<= BITS_PER_LONG - 32;
  228. } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
  229. /*
  230. * floating point regs. are either in child->thread.fpu
  231. * or the child->thread.fpu.vxrs array
  232. */
  233. offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
  234. if (MACHINE_HAS_VX)
  235. tmp = *(addr_t *)
  236. ((addr_t) child->thread.fpu.vxrs + 2*offset);
  237. else
  238. tmp = *(addr_t *)
  239. ((addr_t) child->thread.fpu.fprs + offset);
  240. } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
  241. /*
  242. * Handle access to the per_info structure.
  243. */
  244. addr -= (addr_t) &dummy->regs.per_info;
  245. tmp = __peek_user_per(child, addr);
  246. } else
  247. tmp = 0;
  248. return tmp;
  249. }
  250. static int
  251. peek_user(struct task_struct *child, addr_t addr, addr_t data)
  252. {
  253. addr_t tmp, mask;
  254. /*
  255. * Stupid gdb peeks/pokes the access registers in 64 bit with
  256. * an alignment of 4. Programmers from hell...
  257. */
  258. mask = __ADDR_MASK;
  259. if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
  260. addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
  261. mask = 3;
  262. if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
  263. return -EIO;
  264. tmp = __peek_user(child, addr);
  265. return put_user(tmp, (addr_t __user *) data);
  266. }
  267. static inline void __poke_user_per(struct task_struct *child,
  268. addr_t addr, addr_t data)
  269. {
  270. struct per_struct_kernel *dummy = NULL;
  271. /*
  272. * There are only three fields in the per_info struct that the
  273. * debugger user can write to.
  274. * 1) cr9: the debugger wants to set a new PER event mask
  275. * 2) starting_addr: the debugger wants to set a new starting
  276. * address to use with the PER event mask.
  277. * 3) ending_addr: the debugger wants to set a new ending
  278. * address to use with the PER event mask.
  279. * The user specified PER event mask and the start and end
  280. * addresses are used only if single stepping is not in effect.
  281. * Writes to any other field in per_info are ignored.
  282. */
  283. if (addr == (addr_t) &dummy->cr9)
  284. /* PER event mask of the user specified per set. */
  285. child->thread.per_user.control =
  286. data & (PER_EVENT_MASK | PER_CONTROL_MASK);
  287. else if (addr == (addr_t) &dummy->starting_addr)
  288. /* Starting address of the user specified per set. */
  289. child->thread.per_user.start = data;
  290. else if (addr == (addr_t) &dummy->ending_addr)
  291. /* Ending address of the user specified per set. */
  292. child->thread.per_user.end = data;
  293. }
  294. /*
  295. * Write a word to the user area of a process at location addr. This
  296. * operation does have an additional problem compared to peek_user.
  297. * Stores to the program status word and on the floating point
  298. * control register needs to get checked for validity.
  299. */
  300. static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
  301. {
  302. struct user *dummy = NULL;
  303. addr_t offset;
  304. if (addr < (addr_t) &dummy->regs.acrs) {
  305. /*
  306. * psw and gprs are stored on the stack
  307. */
  308. if (addr == (addr_t) &dummy->regs.psw.mask) {
  309. unsigned long mask = PSW_MASK_USER;
  310. mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
  311. if ((data ^ PSW_USER_BITS) & ~mask)
  312. /* Invalid psw mask. */
  313. return -EINVAL;
  314. if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
  315. /* Invalid address-space-control bits */
  316. return -EINVAL;
  317. if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
  318. /* Invalid addressing mode bits */
  319. return -EINVAL;
  320. }
  321. *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
  322. } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
  323. /*
  324. * access registers are stored in the thread structure
  325. */
  326. offset = addr - (addr_t) &dummy->regs.acrs;
  327. /*
  328. * Very special case: old & broken 64 bit gdb writing
  329. * to acrs[15] with a 64 bit value. Ignore the lower
  330. * half of the value and write the upper 32 bit to
  331. * acrs[15]. Sick...
  332. */
  333. if (addr == (addr_t) &dummy->regs.acrs[15])
  334. child->thread.acrs[15] = (unsigned int) (data >> 32);
  335. else
  336. *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
  337. } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
  338. /*
  339. * orig_gpr2 is stored on the kernel stack
  340. */
  341. task_pt_regs(child)->orig_gpr2 = data;
  342. } else if (addr < (addr_t) &dummy->regs.fp_regs) {
  343. /*
  344. * prevent writes of padding hole between
  345. * orig_gpr2 and fp_regs on s390.
  346. */
  347. return 0;
  348. } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
  349. /*
  350. * floating point control reg. is in the thread structure
  351. */
  352. if ((unsigned int) data != 0 ||
  353. test_fp_ctl(data >> (BITS_PER_LONG - 32)))
  354. return -EINVAL;
  355. child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32);
  356. } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
  357. /*
  358. * floating point regs. are either in child->thread.fpu
  359. * or the child->thread.fpu.vxrs array
  360. */
  361. offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
  362. if (MACHINE_HAS_VX)
  363. *(addr_t *)((addr_t)
  364. child->thread.fpu.vxrs + 2*offset) = data;
  365. else
  366. *(addr_t *)((addr_t)
  367. child->thread.fpu.fprs + offset) = data;
  368. } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
  369. /*
  370. * Handle access to the per_info structure.
  371. */
  372. addr -= (addr_t) &dummy->regs.per_info;
  373. __poke_user_per(child, addr, data);
  374. }
  375. return 0;
  376. }
  377. static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
  378. {
  379. addr_t mask;
  380. /*
  381. * Stupid gdb peeks/pokes the access registers in 64 bit with
  382. * an alignment of 4. Programmers from hell indeed...
  383. */
  384. mask = __ADDR_MASK;
  385. if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
  386. addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
  387. mask = 3;
  388. if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
  389. return -EIO;
  390. return __poke_user(child, addr, data);
  391. }
  392. long arch_ptrace(struct task_struct *child, long request,
  393. unsigned long addr, unsigned long data)
  394. {
  395. ptrace_area parea;
  396. int copied, ret;
  397. switch (request) {
  398. case PTRACE_PEEKUSR:
  399. /* read the word at location addr in the USER area. */
  400. return peek_user(child, addr, data);
  401. case PTRACE_POKEUSR:
  402. /* write the word at location addr in the USER area */
  403. return poke_user(child, addr, data);
  404. case PTRACE_PEEKUSR_AREA:
  405. case PTRACE_POKEUSR_AREA:
  406. if (copy_from_user(&parea, (void __force __user *) addr,
  407. sizeof(parea)))
  408. return -EFAULT;
  409. addr = parea.kernel_addr;
  410. data = parea.process_addr;
  411. copied = 0;
  412. while (copied < parea.len) {
  413. if (request == PTRACE_PEEKUSR_AREA)
  414. ret = peek_user(child, addr, data);
  415. else {
  416. addr_t utmp;
  417. if (get_user(utmp,
  418. (addr_t __force __user *) data))
  419. return -EFAULT;
  420. ret = poke_user(child, addr, utmp);
  421. }
  422. if (ret)
  423. return ret;
  424. addr += sizeof(unsigned long);
  425. data += sizeof(unsigned long);
  426. copied += sizeof(unsigned long);
  427. }
  428. return 0;
  429. case PTRACE_GET_LAST_BREAK:
  430. put_user(child->thread.last_break,
  431. (unsigned long __user *) data);
  432. return 0;
  433. case PTRACE_ENABLE_TE:
  434. if (!MACHINE_HAS_TE)
  435. return -EIO;
  436. child->thread.per_flags &= ~PER_FLAG_NO_TE;
  437. return 0;
  438. case PTRACE_DISABLE_TE:
  439. if (!MACHINE_HAS_TE)
  440. return -EIO;
  441. child->thread.per_flags |= PER_FLAG_NO_TE;
  442. child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
  443. return 0;
  444. case PTRACE_TE_ABORT_RAND:
  445. if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
  446. return -EIO;
  447. switch (data) {
  448. case 0UL:
  449. child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
  450. break;
  451. case 1UL:
  452. child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
  453. child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
  454. break;
  455. case 2UL:
  456. child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
  457. child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
  458. break;
  459. default:
  460. return -EINVAL;
  461. }
  462. return 0;
  463. default:
  464. return ptrace_request(child, request, addr, data);
  465. }
  466. }
  467. #ifdef CONFIG_COMPAT
  468. /*
  469. * Now the fun part starts... a 31 bit program running in the
  470. * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
  471. * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
  472. * to handle, the difference to the 64 bit versions of the requests
  473. * is that the access is done in multiples of 4 byte instead of
  474. * 8 bytes (sizeof(unsigned long) on 31/64 bit).
  475. * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
  476. * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
  477. * is a 31 bit program too, the content of struct user can be
  478. * emulated. A 31 bit program peeking into the struct user of
  479. * a 64 bit program is a no-no.
  480. */
  481. /*
  482. * Same as peek_user_per but for a 31 bit program.
  483. */
  484. static inline __u32 __peek_user_per_compat(struct task_struct *child,
  485. addr_t addr)
  486. {
  487. struct compat_per_struct_kernel *dummy32 = NULL;
  488. if (addr == (addr_t) &dummy32->cr9)
  489. /* Control bits of the active per set. */
  490. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  491. PER_EVENT_IFETCH : child->thread.per_user.control;
  492. else if (addr == (addr_t) &dummy32->cr10)
  493. /* Start address of the active per set. */
  494. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  495. 0 : child->thread.per_user.start;
  496. else if (addr == (addr_t) &dummy32->cr11)
  497. /* End address of the active per set. */
  498. return test_thread_flag(TIF_SINGLE_STEP) ?
  499. PSW32_ADDR_INSN : child->thread.per_user.end;
  500. else if (addr == (addr_t) &dummy32->bits)
  501. /* Single-step bit. */
  502. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  503. 0x80000000 : 0;
  504. else if (addr == (addr_t) &dummy32->starting_addr)
  505. /* Start address of the user specified per set. */
  506. return (__u32) child->thread.per_user.start;
  507. else if (addr == (addr_t) &dummy32->ending_addr)
  508. /* End address of the user specified per set. */
  509. return (__u32) child->thread.per_user.end;
  510. else if (addr == (addr_t) &dummy32->perc_atmid)
  511. /* PER code, ATMID and AI of the last PER trap */
  512. return (__u32) child->thread.per_event.cause << 16;
  513. else if (addr == (addr_t) &dummy32->address)
  514. /* Address of the last PER trap */
  515. return (__u32) child->thread.per_event.address;
  516. else if (addr == (addr_t) &dummy32->access_id)
  517. /* Access id of the last PER trap */
  518. return (__u32) child->thread.per_event.paid << 24;
  519. return 0;
  520. }
  521. /*
  522. * Same as peek_user but for a 31 bit program.
  523. */
  524. static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
  525. {
  526. struct compat_user *dummy32 = NULL;
  527. addr_t offset;
  528. __u32 tmp;
  529. if (addr < (addr_t) &dummy32->regs.acrs) {
  530. struct pt_regs *regs = task_pt_regs(child);
  531. /*
  532. * psw and gprs are stored on the stack
  533. */
  534. if (addr == (addr_t) &dummy32->regs.psw.mask) {
  535. /* Fake a 31 bit psw mask. */
  536. tmp = (__u32)(regs->psw.mask >> 32);
  537. tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
  538. tmp |= PSW32_USER_BITS;
  539. } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
  540. /* Fake a 31 bit psw address. */
  541. tmp = (__u32) regs->psw.addr |
  542. (__u32)(regs->psw.mask & PSW_MASK_BA);
  543. } else {
  544. /* gpr 0-15 */
  545. tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
  546. }
  547. } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
  548. /*
  549. * access registers are stored in the thread structure
  550. */
  551. offset = addr - (addr_t) &dummy32->regs.acrs;
  552. tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
  553. } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
  554. /*
  555. * orig_gpr2 is stored on the kernel stack
  556. */
  557. tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
  558. } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
  559. /*
  560. * prevent reads of padding hole between
  561. * orig_gpr2 and fp_regs on s390.
  562. */
  563. tmp = 0;
  564. } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
  565. /*
  566. * floating point control reg. is in the thread structure
  567. */
  568. tmp = child->thread.fpu.fpc;
  569. } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
  570. /*
  571. * floating point regs. are either in child->thread.fpu
  572. * or the child->thread.fpu.vxrs array
  573. */
  574. offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
  575. if (MACHINE_HAS_VX)
  576. tmp = *(__u32 *)
  577. ((addr_t) child->thread.fpu.vxrs + 2*offset);
  578. else
  579. tmp = *(__u32 *)
  580. ((addr_t) child->thread.fpu.fprs + offset);
  581. } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
  582. /*
  583. * Handle access to the per_info structure.
  584. */
  585. addr -= (addr_t) &dummy32->regs.per_info;
  586. tmp = __peek_user_per_compat(child, addr);
  587. } else
  588. tmp = 0;
  589. return tmp;
  590. }
  591. static int peek_user_compat(struct task_struct *child,
  592. addr_t addr, addr_t data)
  593. {
  594. __u32 tmp;
  595. if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
  596. return -EIO;
  597. tmp = __peek_user_compat(child, addr);
  598. return put_user(tmp, (__u32 __user *) data);
  599. }
  600. /*
  601. * Same as poke_user_per but for a 31 bit program.
  602. */
  603. static inline void __poke_user_per_compat(struct task_struct *child,
  604. addr_t addr, __u32 data)
  605. {
  606. struct compat_per_struct_kernel *dummy32 = NULL;
  607. if (addr == (addr_t) &dummy32->cr9)
  608. /* PER event mask of the user specified per set. */
  609. child->thread.per_user.control =
  610. data & (PER_EVENT_MASK | PER_CONTROL_MASK);
  611. else if (addr == (addr_t) &dummy32->starting_addr)
  612. /* Starting address of the user specified per set. */
  613. child->thread.per_user.start = data;
  614. else if (addr == (addr_t) &dummy32->ending_addr)
  615. /* Ending address of the user specified per set. */
  616. child->thread.per_user.end = data;
  617. }
  618. /*
  619. * Same as poke_user but for a 31 bit program.
  620. */
  621. static int __poke_user_compat(struct task_struct *child,
  622. addr_t addr, addr_t data)
  623. {
  624. struct compat_user *dummy32 = NULL;
  625. __u32 tmp = (__u32) data;
  626. addr_t offset;
  627. if (addr < (addr_t) &dummy32->regs.acrs) {
  628. struct pt_regs *regs = task_pt_regs(child);
  629. /*
  630. * psw, gprs, acrs and orig_gpr2 are stored on the stack
  631. */
  632. if (addr == (addr_t) &dummy32->regs.psw.mask) {
  633. __u32 mask = PSW32_MASK_USER;
  634. mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
  635. /* Build a 64 bit psw mask from 31 bit mask. */
  636. if ((tmp ^ PSW32_USER_BITS) & ~mask)
  637. /* Invalid psw mask. */
  638. return -EINVAL;
  639. if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
  640. /* Invalid address-space-control bits */
  641. return -EINVAL;
  642. regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
  643. (regs->psw.mask & PSW_MASK_BA) |
  644. (__u64)(tmp & mask) << 32;
  645. } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
  646. /* Build a 64 bit psw address from 31 bit address. */
  647. regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
  648. /* Transfer 31 bit amode bit to psw mask. */
  649. regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
  650. (__u64)(tmp & PSW32_ADDR_AMODE);
  651. } else {
  652. /* gpr 0-15 */
  653. *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
  654. }
  655. } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
  656. /*
  657. * access registers are stored in the thread structure
  658. */
  659. offset = addr - (addr_t) &dummy32->regs.acrs;
  660. *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
  661. } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
  662. /*
  663. * orig_gpr2 is stored on the kernel stack
  664. */
  665. *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
  666. } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
  667. /*
  668. * prevent writess of padding hole between
  669. * orig_gpr2 and fp_regs on s390.
  670. */
  671. return 0;
  672. } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
  673. /*
  674. * floating point control reg. is in the thread structure
  675. */
  676. if (test_fp_ctl(tmp))
  677. return -EINVAL;
  678. child->thread.fpu.fpc = data;
  679. } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
  680. /*
  681. * floating point regs. are either in child->thread.fpu
  682. * or the child->thread.fpu.vxrs array
  683. */
  684. offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
  685. if (MACHINE_HAS_VX)
  686. *(__u32 *)((addr_t)
  687. child->thread.fpu.vxrs + 2*offset) = tmp;
  688. else
  689. *(__u32 *)((addr_t)
  690. child->thread.fpu.fprs + offset) = tmp;
  691. } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
  692. /*
  693. * Handle access to the per_info structure.
  694. */
  695. addr -= (addr_t) &dummy32->regs.per_info;
  696. __poke_user_per_compat(child, addr, data);
  697. }
  698. return 0;
  699. }
  700. static int poke_user_compat(struct task_struct *child,
  701. addr_t addr, addr_t data)
  702. {
  703. if (!is_compat_task() || (addr & 3) ||
  704. addr > sizeof(struct compat_user) - 3)
  705. return -EIO;
  706. return __poke_user_compat(child, addr, data);
  707. }
  708. long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
  709. compat_ulong_t caddr, compat_ulong_t cdata)
  710. {
  711. unsigned long addr = caddr;
  712. unsigned long data = cdata;
  713. compat_ptrace_area parea;
  714. int copied, ret;
  715. switch (request) {
  716. case PTRACE_PEEKUSR:
  717. /* read the word at location addr in the USER area. */
  718. return peek_user_compat(child, addr, data);
  719. case PTRACE_POKEUSR:
  720. /* write the word at location addr in the USER area */
  721. return poke_user_compat(child, addr, data);
  722. case PTRACE_PEEKUSR_AREA:
  723. case PTRACE_POKEUSR_AREA:
  724. if (copy_from_user(&parea, (void __force __user *) addr,
  725. sizeof(parea)))
  726. return -EFAULT;
  727. addr = parea.kernel_addr;
  728. data = parea.process_addr;
  729. copied = 0;
  730. while (copied < parea.len) {
  731. if (request == PTRACE_PEEKUSR_AREA)
  732. ret = peek_user_compat(child, addr, data);
  733. else {
  734. __u32 utmp;
  735. if (get_user(utmp,
  736. (__u32 __force __user *) data))
  737. return -EFAULT;
  738. ret = poke_user_compat(child, addr, utmp);
  739. }
  740. if (ret)
  741. return ret;
  742. addr += sizeof(unsigned int);
  743. data += sizeof(unsigned int);
  744. copied += sizeof(unsigned int);
  745. }
  746. return 0;
  747. case PTRACE_GET_LAST_BREAK:
  748. put_user(child->thread.last_break,
  749. (unsigned int __user *) data);
  750. return 0;
  751. }
  752. return compat_ptrace_request(child, request, addr, data);
  753. }
  754. #endif
  755. asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
  756. {
  757. unsigned long mask = -1UL;
  758. /*
  759. * The sysc_tracesys code in entry.S stored the system
  760. * call number to gprs[2].
  761. */
  762. if (test_thread_flag(TIF_SYSCALL_TRACE) &&
  763. (tracehook_report_syscall_entry(regs) ||
  764. regs->gprs[2] >= NR_syscalls)) {
  765. /*
  766. * Tracing decided this syscall should not happen or the
  767. * debugger stored an invalid system call number. Skip
  768. * the system call and the system call restart handling.
  769. */
  770. clear_pt_regs_flag(regs, PIF_SYSCALL);
  771. return -1;
  772. }
  773. /* Do the secure computing check after ptrace. */
  774. if (secure_computing(NULL)) {
  775. /* seccomp failures shouldn't expose any additional code. */
  776. return -1;
  777. }
  778. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  779. trace_sys_enter(regs, regs->gprs[2]);
  780. if (is_compat_task())
  781. mask = 0xffffffff;
  782. audit_syscall_entry(regs->gprs[2], regs->orig_gpr2 & mask,
  783. regs->gprs[3] &mask, regs->gprs[4] &mask,
  784. regs->gprs[5] &mask);
  785. return regs->gprs[2];
  786. }
  787. asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
  788. {
  789. audit_syscall_exit(regs);
  790. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  791. trace_sys_exit(regs, regs->gprs[2]);
  792. if (test_thread_flag(TIF_SYSCALL_TRACE))
  793. tracehook_report_syscall_exit(regs, 0);
  794. }
  795. /*
  796. * user_regset definitions.
  797. */
  798. static int s390_regs_get(struct task_struct *target,
  799. const struct user_regset *regset,
  800. unsigned int pos, unsigned int count,
  801. void *kbuf, void __user *ubuf)
  802. {
  803. if (target == current)
  804. save_access_regs(target->thread.acrs);
  805. if (kbuf) {
  806. unsigned long *k = kbuf;
  807. while (count > 0) {
  808. *k++ = __peek_user(target, pos);
  809. count -= sizeof(*k);
  810. pos += sizeof(*k);
  811. }
  812. } else {
  813. unsigned long __user *u = ubuf;
  814. while (count > 0) {
  815. if (__put_user(__peek_user(target, pos), u++))
  816. return -EFAULT;
  817. count -= sizeof(*u);
  818. pos += sizeof(*u);
  819. }
  820. }
  821. return 0;
  822. }
  823. static int s390_regs_set(struct task_struct *target,
  824. const struct user_regset *regset,
  825. unsigned int pos, unsigned int count,
  826. const void *kbuf, const void __user *ubuf)
  827. {
  828. int rc = 0;
  829. if (target == current)
  830. save_access_regs(target->thread.acrs);
  831. if (kbuf) {
  832. const unsigned long *k = kbuf;
  833. while (count > 0 && !rc) {
  834. rc = __poke_user(target, pos, *k++);
  835. count -= sizeof(*k);
  836. pos += sizeof(*k);
  837. }
  838. } else {
  839. const unsigned long __user *u = ubuf;
  840. while (count > 0 && !rc) {
  841. unsigned long word;
  842. rc = __get_user(word, u++);
  843. if (rc)
  844. break;
  845. rc = __poke_user(target, pos, word);
  846. count -= sizeof(*u);
  847. pos += sizeof(*u);
  848. }
  849. }
  850. if (rc == 0 && target == current)
  851. restore_access_regs(target->thread.acrs);
  852. return rc;
  853. }
  854. static int s390_fpregs_get(struct task_struct *target,
  855. const struct user_regset *regset, unsigned int pos,
  856. unsigned int count, void *kbuf, void __user *ubuf)
  857. {
  858. _s390_fp_regs fp_regs;
  859. if (target == current)
  860. save_fpu_regs();
  861. fp_regs.fpc = target->thread.fpu.fpc;
  862. fpregs_store(&fp_regs, &target->thread.fpu);
  863. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  864. &fp_regs, 0, -1);
  865. }
  866. static int s390_fpregs_set(struct task_struct *target,
  867. const struct user_regset *regset, unsigned int pos,
  868. unsigned int count, const void *kbuf,
  869. const void __user *ubuf)
  870. {
  871. int rc = 0;
  872. freg_t fprs[__NUM_FPRS];
  873. if (target == current)
  874. save_fpu_regs();
  875. if (MACHINE_HAS_VX)
  876. convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
  877. else
  878. memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
  879. /* If setting FPC, must validate it first. */
  880. if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
  881. u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
  882. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
  883. 0, offsetof(s390_fp_regs, fprs));
  884. if (rc)
  885. return rc;
  886. if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
  887. return -EINVAL;
  888. target->thread.fpu.fpc = ufpc[0];
  889. }
  890. if (rc == 0 && count > 0)
  891. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  892. fprs, offsetof(s390_fp_regs, fprs), -1);
  893. if (rc)
  894. return rc;
  895. if (MACHINE_HAS_VX)
  896. convert_fp_to_vx(target->thread.fpu.vxrs, fprs);
  897. else
  898. memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs));
  899. return rc;
  900. }
  901. static int s390_last_break_get(struct task_struct *target,
  902. const struct user_regset *regset,
  903. unsigned int pos, unsigned int count,
  904. void *kbuf, void __user *ubuf)
  905. {
  906. if (count > 0) {
  907. if (kbuf) {
  908. unsigned long *k = kbuf;
  909. *k = target->thread.last_break;
  910. } else {
  911. unsigned long __user *u = ubuf;
  912. if (__put_user(target->thread.last_break, u))
  913. return -EFAULT;
  914. }
  915. }
  916. return 0;
  917. }
  918. static int s390_last_break_set(struct task_struct *target,
  919. const struct user_regset *regset,
  920. unsigned int pos, unsigned int count,
  921. const void *kbuf, const void __user *ubuf)
  922. {
  923. return 0;
  924. }
  925. static int s390_tdb_get(struct task_struct *target,
  926. const struct user_regset *regset,
  927. unsigned int pos, unsigned int count,
  928. void *kbuf, void __user *ubuf)
  929. {
  930. struct pt_regs *regs = task_pt_regs(target);
  931. unsigned char *data;
  932. if (!(regs->int_code & 0x200))
  933. return -ENODATA;
  934. data = target->thread.trap_tdb;
  935. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, data, 0, 256);
  936. }
  937. static int s390_tdb_set(struct task_struct *target,
  938. const struct user_regset *regset,
  939. unsigned int pos, unsigned int count,
  940. const void *kbuf, const void __user *ubuf)
  941. {
  942. return 0;
  943. }
  944. static int s390_vxrs_low_get(struct task_struct *target,
  945. const struct user_regset *regset,
  946. unsigned int pos, unsigned int count,
  947. void *kbuf, void __user *ubuf)
  948. {
  949. __u64 vxrs[__NUM_VXRS_LOW];
  950. int i;
  951. if (!MACHINE_HAS_VX)
  952. return -ENODEV;
  953. if (target == current)
  954. save_fpu_regs();
  955. for (i = 0; i < __NUM_VXRS_LOW; i++)
  956. vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
  957. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
  958. }
  959. static int s390_vxrs_low_set(struct task_struct *target,
  960. const struct user_regset *regset,
  961. unsigned int pos, unsigned int count,
  962. const void *kbuf, const void __user *ubuf)
  963. {
  964. __u64 vxrs[__NUM_VXRS_LOW];
  965. int i, rc;
  966. if (!MACHINE_HAS_VX)
  967. return -ENODEV;
  968. if (target == current)
  969. save_fpu_regs();
  970. for (i = 0; i < __NUM_VXRS_LOW; i++)
  971. vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
  972. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
  973. if (rc == 0)
  974. for (i = 0; i < __NUM_VXRS_LOW; i++)
  975. *((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i];
  976. return rc;
  977. }
  978. static int s390_vxrs_high_get(struct task_struct *target,
  979. const struct user_regset *regset,
  980. unsigned int pos, unsigned int count,
  981. void *kbuf, void __user *ubuf)
  982. {
  983. __vector128 vxrs[__NUM_VXRS_HIGH];
  984. if (!MACHINE_HAS_VX)
  985. return -ENODEV;
  986. if (target == current)
  987. save_fpu_regs();
  988. memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, sizeof(vxrs));
  989. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
  990. }
  991. static int s390_vxrs_high_set(struct task_struct *target,
  992. const struct user_regset *regset,
  993. unsigned int pos, unsigned int count,
  994. const void *kbuf, const void __user *ubuf)
  995. {
  996. int rc;
  997. if (!MACHINE_HAS_VX)
  998. return -ENODEV;
  999. if (target == current)
  1000. save_fpu_regs();
  1001. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1002. target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1);
  1003. return rc;
  1004. }
  1005. static int s390_system_call_get(struct task_struct *target,
  1006. const struct user_regset *regset,
  1007. unsigned int pos, unsigned int count,
  1008. void *kbuf, void __user *ubuf)
  1009. {
  1010. unsigned int *data = &target->thread.system_call;
  1011. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  1012. data, 0, sizeof(unsigned int));
  1013. }
  1014. static int s390_system_call_set(struct task_struct *target,
  1015. const struct user_regset *regset,
  1016. unsigned int pos, unsigned int count,
  1017. const void *kbuf, const void __user *ubuf)
  1018. {
  1019. unsigned int *data = &target->thread.system_call;
  1020. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1021. data, 0, sizeof(unsigned int));
  1022. }
  1023. static int s390_gs_cb_get(struct task_struct *target,
  1024. const struct user_regset *regset,
  1025. unsigned int pos, unsigned int count,
  1026. void *kbuf, void __user *ubuf)
  1027. {
  1028. struct gs_cb *data = target->thread.gs_cb;
  1029. if (!MACHINE_HAS_GS)
  1030. return -ENODEV;
  1031. if (!data)
  1032. return -ENODATA;
  1033. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  1034. data, 0, sizeof(struct gs_cb));
  1035. }
  1036. static int s390_gs_cb_set(struct task_struct *target,
  1037. const struct user_regset *regset,
  1038. unsigned int pos, unsigned int count,
  1039. const void *kbuf, const void __user *ubuf)
  1040. {
  1041. struct gs_cb *data = target->thread.gs_cb;
  1042. if (!MACHINE_HAS_GS)
  1043. return -ENODEV;
  1044. if (!data) {
  1045. data = kzalloc(sizeof(*data), GFP_KERNEL);
  1046. if (!data)
  1047. return -ENOMEM;
  1048. target->thread.gs_cb = data;
  1049. }
  1050. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1051. data, 0, sizeof(struct gs_cb));
  1052. }
  1053. static int s390_gs_bc_get(struct task_struct *target,
  1054. const struct user_regset *regset,
  1055. unsigned int pos, unsigned int count,
  1056. void *kbuf, void __user *ubuf)
  1057. {
  1058. struct gs_cb *data = target->thread.gs_bc_cb;
  1059. if (!MACHINE_HAS_GS)
  1060. return -ENODEV;
  1061. if (!data)
  1062. return -ENODATA;
  1063. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  1064. data, 0, sizeof(struct gs_cb));
  1065. }
  1066. static int s390_gs_bc_set(struct task_struct *target,
  1067. const struct user_regset *regset,
  1068. unsigned int pos, unsigned int count,
  1069. const void *kbuf, const void __user *ubuf)
  1070. {
  1071. struct gs_cb *data = target->thread.gs_bc_cb;
  1072. if (!MACHINE_HAS_GS)
  1073. return -ENODEV;
  1074. if (!data) {
  1075. data = kzalloc(sizeof(*data), GFP_KERNEL);
  1076. if (!data)
  1077. return -ENOMEM;
  1078. target->thread.gs_bc_cb = data;
  1079. }
  1080. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1081. data, 0, sizeof(struct gs_cb));
  1082. }
  1083. static const struct user_regset s390_regsets[] = {
  1084. {
  1085. .core_note_type = NT_PRSTATUS,
  1086. .n = sizeof(s390_regs) / sizeof(long),
  1087. .size = sizeof(long),
  1088. .align = sizeof(long),
  1089. .get = s390_regs_get,
  1090. .set = s390_regs_set,
  1091. },
  1092. {
  1093. .core_note_type = NT_PRFPREG,
  1094. .n = sizeof(s390_fp_regs) / sizeof(long),
  1095. .size = sizeof(long),
  1096. .align = sizeof(long),
  1097. .get = s390_fpregs_get,
  1098. .set = s390_fpregs_set,
  1099. },
  1100. {
  1101. .core_note_type = NT_S390_SYSTEM_CALL,
  1102. .n = 1,
  1103. .size = sizeof(unsigned int),
  1104. .align = sizeof(unsigned int),
  1105. .get = s390_system_call_get,
  1106. .set = s390_system_call_set,
  1107. },
  1108. {
  1109. .core_note_type = NT_S390_LAST_BREAK,
  1110. .n = 1,
  1111. .size = sizeof(long),
  1112. .align = sizeof(long),
  1113. .get = s390_last_break_get,
  1114. .set = s390_last_break_set,
  1115. },
  1116. {
  1117. .core_note_type = NT_S390_TDB,
  1118. .n = 1,
  1119. .size = 256,
  1120. .align = 1,
  1121. .get = s390_tdb_get,
  1122. .set = s390_tdb_set,
  1123. },
  1124. {
  1125. .core_note_type = NT_S390_VXRS_LOW,
  1126. .n = __NUM_VXRS_LOW,
  1127. .size = sizeof(__u64),
  1128. .align = sizeof(__u64),
  1129. .get = s390_vxrs_low_get,
  1130. .set = s390_vxrs_low_set,
  1131. },
  1132. {
  1133. .core_note_type = NT_S390_VXRS_HIGH,
  1134. .n = __NUM_VXRS_HIGH,
  1135. .size = sizeof(__vector128),
  1136. .align = sizeof(__vector128),
  1137. .get = s390_vxrs_high_get,
  1138. .set = s390_vxrs_high_set,
  1139. },
  1140. {
  1141. .core_note_type = NT_S390_GS_CB,
  1142. .n = sizeof(struct gs_cb) / sizeof(__u64),
  1143. .size = sizeof(__u64),
  1144. .align = sizeof(__u64),
  1145. .get = s390_gs_cb_get,
  1146. .set = s390_gs_cb_set,
  1147. },
  1148. {
  1149. .core_note_type = NT_S390_GS_BC,
  1150. .n = sizeof(struct gs_cb) / sizeof(__u64),
  1151. .size = sizeof(__u64),
  1152. .align = sizeof(__u64),
  1153. .get = s390_gs_bc_get,
  1154. .set = s390_gs_bc_set,
  1155. },
  1156. };
  1157. static const struct user_regset_view user_s390_view = {
  1158. .name = UTS_MACHINE,
  1159. .e_machine = EM_S390,
  1160. .regsets = s390_regsets,
  1161. .n = ARRAY_SIZE(s390_regsets)
  1162. };
  1163. #ifdef CONFIG_COMPAT
  1164. static int s390_compat_regs_get(struct task_struct *target,
  1165. const struct user_regset *regset,
  1166. unsigned int pos, unsigned int count,
  1167. void *kbuf, void __user *ubuf)
  1168. {
  1169. if (target == current)
  1170. save_access_regs(target->thread.acrs);
  1171. if (kbuf) {
  1172. compat_ulong_t *k = kbuf;
  1173. while (count > 0) {
  1174. *k++ = __peek_user_compat(target, pos);
  1175. count -= sizeof(*k);
  1176. pos += sizeof(*k);
  1177. }
  1178. } else {
  1179. compat_ulong_t __user *u = ubuf;
  1180. while (count > 0) {
  1181. if (__put_user(__peek_user_compat(target, pos), u++))
  1182. return -EFAULT;
  1183. count -= sizeof(*u);
  1184. pos += sizeof(*u);
  1185. }
  1186. }
  1187. return 0;
  1188. }
  1189. static int s390_compat_regs_set(struct task_struct *target,
  1190. const struct user_regset *regset,
  1191. unsigned int pos, unsigned int count,
  1192. const void *kbuf, const void __user *ubuf)
  1193. {
  1194. int rc = 0;
  1195. if (target == current)
  1196. save_access_regs(target->thread.acrs);
  1197. if (kbuf) {
  1198. const compat_ulong_t *k = kbuf;
  1199. while (count > 0 && !rc) {
  1200. rc = __poke_user_compat(target, pos, *k++);
  1201. count -= sizeof(*k);
  1202. pos += sizeof(*k);
  1203. }
  1204. } else {
  1205. const compat_ulong_t __user *u = ubuf;
  1206. while (count > 0 && !rc) {
  1207. compat_ulong_t word;
  1208. rc = __get_user(word, u++);
  1209. if (rc)
  1210. break;
  1211. rc = __poke_user_compat(target, pos, word);
  1212. count -= sizeof(*u);
  1213. pos += sizeof(*u);
  1214. }
  1215. }
  1216. if (rc == 0 && target == current)
  1217. restore_access_regs(target->thread.acrs);
  1218. return rc;
  1219. }
  1220. static int s390_compat_regs_high_get(struct task_struct *target,
  1221. const struct user_regset *regset,
  1222. unsigned int pos, unsigned int count,
  1223. void *kbuf, void __user *ubuf)
  1224. {
  1225. compat_ulong_t *gprs_high;
  1226. gprs_high = (compat_ulong_t *)
  1227. &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
  1228. if (kbuf) {
  1229. compat_ulong_t *k = kbuf;
  1230. while (count > 0) {
  1231. *k++ = *gprs_high;
  1232. gprs_high += 2;
  1233. count -= sizeof(*k);
  1234. }
  1235. } else {
  1236. compat_ulong_t __user *u = ubuf;
  1237. while (count > 0) {
  1238. if (__put_user(*gprs_high, u++))
  1239. return -EFAULT;
  1240. gprs_high += 2;
  1241. count -= sizeof(*u);
  1242. }
  1243. }
  1244. return 0;
  1245. }
  1246. static int s390_compat_regs_high_set(struct task_struct *target,
  1247. const struct user_regset *regset,
  1248. unsigned int pos, unsigned int count,
  1249. const void *kbuf, const void __user *ubuf)
  1250. {
  1251. compat_ulong_t *gprs_high;
  1252. int rc = 0;
  1253. gprs_high = (compat_ulong_t *)
  1254. &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
  1255. if (kbuf) {
  1256. const compat_ulong_t *k = kbuf;
  1257. while (count > 0) {
  1258. *gprs_high = *k++;
  1259. *gprs_high += 2;
  1260. count -= sizeof(*k);
  1261. }
  1262. } else {
  1263. const compat_ulong_t __user *u = ubuf;
  1264. while (count > 0 && !rc) {
  1265. unsigned long word;
  1266. rc = __get_user(word, u++);
  1267. if (rc)
  1268. break;
  1269. *gprs_high = word;
  1270. *gprs_high += 2;
  1271. count -= sizeof(*u);
  1272. }
  1273. }
  1274. return rc;
  1275. }
  1276. static int s390_compat_last_break_get(struct task_struct *target,
  1277. const struct user_regset *regset,
  1278. unsigned int pos, unsigned int count,
  1279. void *kbuf, void __user *ubuf)
  1280. {
  1281. compat_ulong_t last_break;
  1282. if (count > 0) {
  1283. last_break = target->thread.last_break;
  1284. if (kbuf) {
  1285. unsigned long *k = kbuf;
  1286. *k = last_break;
  1287. } else {
  1288. unsigned long __user *u = ubuf;
  1289. if (__put_user(last_break, u))
  1290. return -EFAULT;
  1291. }
  1292. }
  1293. return 0;
  1294. }
  1295. static int s390_compat_last_break_set(struct task_struct *target,
  1296. const struct user_regset *regset,
  1297. unsigned int pos, unsigned int count,
  1298. const void *kbuf, const void __user *ubuf)
  1299. {
  1300. return 0;
  1301. }
  1302. static const struct user_regset s390_compat_regsets[] = {
  1303. {
  1304. .core_note_type = NT_PRSTATUS,
  1305. .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
  1306. .size = sizeof(compat_long_t),
  1307. .align = sizeof(compat_long_t),
  1308. .get = s390_compat_regs_get,
  1309. .set = s390_compat_regs_set,
  1310. },
  1311. {
  1312. .core_note_type = NT_PRFPREG,
  1313. .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
  1314. .size = sizeof(compat_long_t),
  1315. .align = sizeof(compat_long_t),
  1316. .get = s390_fpregs_get,
  1317. .set = s390_fpregs_set,
  1318. },
  1319. {
  1320. .core_note_type = NT_S390_SYSTEM_CALL,
  1321. .n = 1,
  1322. .size = sizeof(compat_uint_t),
  1323. .align = sizeof(compat_uint_t),
  1324. .get = s390_system_call_get,
  1325. .set = s390_system_call_set,
  1326. },
  1327. {
  1328. .core_note_type = NT_S390_LAST_BREAK,
  1329. .n = 1,
  1330. .size = sizeof(long),
  1331. .align = sizeof(long),
  1332. .get = s390_compat_last_break_get,
  1333. .set = s390_compat_last_break_set,
  1334. },
  1335. {
  1336. .core_note_type = NT_S390_TDB,
  1337. .n = 1,
  1338. .size = 256,
  1339. .align = 1,
  1340. .get = s390_tdb_get,
  1341. .set = s390_tdb_set,
  1342. },
  1343. {
  1344. .core_note_type = NT_S390_VXRS_LOW,
  1345. .n = __NUM_VXRS_LOW,
  1346. .size = sizeof(__u64),
  1347. .align = sizeof(__u64),
  1348. .get = s390_vxrs_low_get,
  1349. .set = s390_vxrs_low_set,
  1350. },
  1351. {
  1352. .core_note_type = NT_S390_VXRS_HIGH,
  1353. .n = __NUM_VXRS_HIGH,
  1354. .size = sizeof(__vector128),
  1355. .align = sizeof(__vector128),
  1356. .get = s390_vxrs_high_get,
  1357. .set = s390_vxrs_high_set,
  1358. },
  1359. {
  1360. .core_note_type = NT_S390_HIGH_GPRS,
  1361. .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
  1362. .size = sizeof(compat_long_t),
  1363. .align = sizeof(compat_long_t),
  1364. .get = s390_compat_regs_high_get,
  1365. .set = s390_compat_regs_high_set,
  1366. },
  1367. {
  1368. .core_note_type = NT_S390_GS_CB,
  1369. .n = sizeof(struct gs_cb) / sizeof(__u64),
  1370. .size = sizeof(__u64),
  1371. .align = sizeof(__u64),
  1372. .get = s390_gs_cb_get,
  1373. .set = s390_gs_cb_set,
  1374. },
  1375. };
  1376. static const struct user_regset_view user_s390_compat_view = {
  1377. .name = "s390",
  1378. .e_machine = EM_S390,
  1379. .regsets = s390_compat_regsets,
  1380. .n = ARRAY_SIZE(s390_compat_regsets)
  1381. };
  1382. #endif
  1383. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  1384. {
  1385. #ifdef CONFIG_COMPAT
  1386. if (test_tsk_thread_flag(task, TIF_31BIT))
  1387. return &user_s390_compat_view;
  1388. #endif
  1389. return &user_s390_view;
  1390. }
  1391. static const char *gpr_names[NUM_GPRS] = {
  1392. "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
  1393. "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
  1394. };
  1395. unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
  1396. {
  1397. if (offset >= NUM_GPRS)
  1398. return 0;
  1399. return regs->gprs[offset];
  1400. }
  1401. int regs_query_register_offset(const char *name)
  1402. {
  1403. unsigned long offset;
  1404. if (!name || *name != 'r')
  1405. return -EINVAL;
  1406. if (kstrtoul(name + 1, 10, &offset))
  1407. return -EINVAL;
  1408. if (offset >= NUM_GPRS)
  1409. return -EINVAL;
  1410. return offset;
  1411. }
  1412. const char *regs_query_register_name(unsigned int offset)
  1413. {
  1414. if (offset >= NUM_GPRS)
  1415. return NULL;
  1416. return gpr_names[offset];
  1417. }
  1418. static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
  1419. {
  1420. unsigned long ksp = kernel_stack_pointer(regs);
  1421. return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
  1422. }
  1423. /**
  1424. * regs_get_kernel_stack_nth() - get Nth entry of the stack
  1425. * @regs:pt_regs which contains kernel stack pointer.
  1426. * @n:stack entry number.
  1427. *
  1428. * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
  1429. * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
  1430. * this returns 0.
  1431. */
  1432. unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
  1433. {
  1434. unsigned long addr;
  1435. addr = kernel_stack_pointer(regs) + n * sizeof(long);
  1436. if (!regs_within_kernel_stack(regs, addr))
  1437. return 0;
  1438. return *(unsigned long *)addr;
  1439. }