ptrace.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967
  1. /*
  2. * linux/arch/arm/kernel/ptrace.c
  3. *
  4. * By Ross Biro 1/23/92
  5. * edited by Linus Torvalds
  6. * ARM modifications Copyright (C) 2000 Russell King
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/sched/signal.h>
  14. #include <linux/sched/task_stack.h>
  15. #include <linux/mm.h>
  16. #include <linux/elf.h>
  17. #include <linux/smp.h>
  18. #include <linux/ptrace.h>
  19. #include <linux/user.h>
  20. #include <linux/security.h>
  21. #include <linux/init.h>
  22. #include <linux/signal.h>
  23. #include <linux/uaccess.h>
  24. #include <linux/perf_event.h>
  25. #include <linux/hw_breakpoint.h>
  26. #include <linux/regset.h>
  27. #include <linux/audit.h>
  28. #include <linux/tracehook.h>
  29. #include <linux/unistd.h>
  30. #include <asm/pgtable.h>
  31. #include <asm/traps.h>
  32. #define CREATE_TRACE_POINTS
  33. #include <trace/events/syscalls.h>
  34. #define REG_PC 15
  35. #define REG_PSR 16
  36. /*
  37. * does not yet catch signals sent when the child dies.
  38. * in exit.c or in signal.c.
  39. */
  40. #if 0
  41. /*
  42. * Breakpoint SWI instruction: SWI &9F0001
  43. */
  44. #define BREAKINST_ARM 0xef9f0001
  45. #define BREAKINST_THUMB 0xdf00 /* fill this in later */
  46. #else
  47. /*
  48. * New breakpoints - use an undefined instruction. The ARM architecture
  49. * reference manual guarantees that the following instruction space
  50. * will produce an undefined instruction exception on all CPUs:
  51. *
  52. * ARM: xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
  53. * Thumb: 1101 1110 xxxx xxxx
  54. */
  55. #define BREAKINST_ARM 0xe7f001f0
  56. #define BREAKINST_THUMB 0xde01
  57. #endif
  58. struct pt_regs_offset {
  59. const char *name;
  60. int offset;
  61. };
  62. #define REG_OFFSET_NAME(r) \
  63. {.name = #r, .offset = offsetof(struct pt_regs, ARM_##r)}
  64. #define REG_OFFSET_END {.name = NULL, .offset = 0}
  65. static const struct pt_regs_offset regoffset_table[] = {
  66. REG_OFFSET_NAME(r0),
  67. REG_OFFSET_NAME(r1),
  68. REG_OFFSET_NAME(r2),
  69. REG_OFFSET_NAME(r3),
  70. REG_OFFSET_NAME(r4),
  71. REG_OFFSET_NAME(r5),
  72. REG_OFFSET_NAME(r6),
  73. REG_OFFSET_NAME(r7),
  74. REG_OFFSET_NAME(r8),
  75. REG_OFFSET_NAME(r9),
  76. REG_OFFSET_NAME(r10),
  77. REG_OFFSET_NAME(fp),
  78. REG_OFFSET_NAME(ip),
  79. REG_OFFSET_NAME(sp),
  80. REG_OFFSET_NAME(lr),
  81. REG_OFFSET_NAME(pc),
  82. REG_OFFSET_NAME(cpsr),
  83. REG_OFFSET_NAME(ORIG_r0),
  84. REG_OFFSET_END,
  85. };
  86. /**
  87. * regs_query_register_offset() - query register offset from its name
  88. * @name: the name of a register
  89. *
  90. * regs_query_register_offset() returns the offset of a register in struct
  91. * pt_regs from its name. If the name is invalid, this returns -EINVAL;
  92. */
  93. int regs_query_register_offset(const char *name)
  94. {
  95. const struct pt_regs_offset *roff;
  96. for (roff = regoffset_table; roff->name != NULL; roff++)
  97. if (!strcmp(roff->name, name))
  98. return roff->offset;
  99. return -EINVAL;
  100. }
  101. /**
  102. * regs_query_register_name() - query register name from its offset
  103. * @offset: the offset of a register in struct pt_regs.
  104. *
  105. * regs_query_register_name() returns the name of a register from its
  106. * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
  107. */
  108. const char *regs_query_register_name(unsigned int offset)
  109. {
  110. const struct pt_regs_offset *roff;
  111. for (roff = regoffset_table; roff->name != NULL; roff++)
  112. if (roff->offset == offset)
  113. return roff->name;
  114. return NULL;
  115. }
  116. /**
  117. * regs_within_kernel_stack() - check the address in the stack
  118. * @regs: pt_regs which contains kernel stack pointer.
  119. * @addr: address which is checked.
  120. *
  121. * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
  122. * If @addr is within the kernel stack, it returns true. If not, returns false.
  123. */
  124. bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
  125. {
  126. return ((addr & ~(THREAD_SIZE - 1)) ==
  127. (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
  128. }
  129. /**
  130. * regs_get_kernel_stack_nth() - get Nth entry of the stack
  131. * @regs: pt_regs which contains kernel stack pointer.
  132. * @n: stack entry number.
  133. *
  134. * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
  135. * is specified by @regs. If the @n th entry is NOT in the kernel stack,
  136. * this returns 0.
  137. */
  138. unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
  139. {
  140. unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
  141. addr += n;
  142. if (regs_within_kernel_stack(regs, (unsigned long)addr))
  143. return *addr;
  144. else
  145. return 0;
  146. }
  147. /*
  148. * this routine will get a word off of the processes privileged stack.
  149. * the offset is how far from the base addr as stored in the THREAD.
  150. * this routine assumes that all the privileged stacks are in our
  151. * data space.
  152. */
  153. static inline long get_user_reg(struct task_struct *task, int offset)
  154. {
  155. return task_pt_regs(task)->uregs[offset];
  156. }
  157. /*
  158. * this routine will put a word on the processes privileged stack.
  159. * the offset is how far from the base addr as stored in the THREAD.
  160. * this routine assumes that all the privileged stacks are in our
  161. * data space.
  162. */
  163. static inline int
  164. put_user_reg(struct task_struct *task, int offset, long data)
  165. {
  166. struct pt_regs newregs, *regs = task_pt_regs(task);
  167. int ret = -EINVAL;
  168. newregs = *regs;
  169. newregs.uregs[offset] = data;
  170. if (valid_user_regs(&newregs)) {
  171. regs->uregs[offset] = data;
  172. ret = 0;
  173. }
  174. return ret;
  175. }
  176. /*
  177. * Called by kernel/ptrace.c when detaching..
  178. */
  179. void ptrace_disable(struct task_struct *child)
  180. {
  181. /* Nothing to do. */
  182. }
  183. /*
  184. * Handle hitting a breakpoint.
  185. */
  186. void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
  187. {
  188. force_sig_fault(SIGTRAP, TRAP_BRKPT,
  189. (void __user *)instruction_pointer(regs), tsk);
  190. }
  191. static int break_trap(struct pt_regs *regs, unsigned int instr)
  192. {
  193. ptrace_break(current, regs);
  194. return 0;
  195. }
  196. static struct undef_hook arm_break_hook = {
  197. .instr_mask = 0x0fffffff,
  198. .instr_val = 0x07f001f0,
  199. .cpsr_mask = PSR_T_BIT,
  200. .cpsr_val = 0,
  201. .fn = break_trap,
  202. };
  203. static struct undef_hook thumb_break_hook = {
  204. .instr_mask = 0xffff,
  205. .instr_val = 0xde01,
  206. .cpsr_mask = PSR_T_BIT,
  207. .cpsr_val = PSR_T_BIT,
  208. .fn = break_trap,
  209. };
  210. static struct undef_hook thumb2_break_hook = {
  211. .instr_mask = 0xffffffff,
  212. .instr_val = 0xf7f0a000,
  213. .cpsr_mask = PSR_T_BIT,
  214. .cpsr_val = PSR_T_BIT,
  215. .fn = break_trap,
  216. };
  217. static int __init ptrace_break_init(void)
  218. {
  219. register_undef_hook(&arm_break_hook);
  220. register_undef_hook(&thumb_break_hook);
  221. register_undef_hook(&thumb2_break_hook);
  222. return 0;
  223. }
  224. core_initcall(ptrace_break_init);
  225. /*
  226. * Read the word at offset "off" into the "struct user". We
  227. * actually access the pt_regs stored on the kernel stack.
  228. */
  229. static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
  230. unsigned long __user *ret)
  231. {
  232. unsigned long tmp;
  233. if (off & 3)
  234. return -EIO;
  235. tmp = 0;
  236. if (off == PT_TEXT_ADDR)
  237. tmp = tsk->mm->start_code;
  238. else if (off == PT_DATA_ADDR)
  239. tmp = tsk->mm->start_data;
  240. else if (off == PT_TEXT_END_ADDR)
  241. tmp = tsk->mm->end_code;
  242. else if (off < sizeof(struct pt_regs))
  243. tmp = get_user_reg(tsk, off >> 2);
  244. else if (off >= sizeof(struct user))
  245. return -EIO;
  246. return put_user(tmp, ret);
  247. }
  248. /*
  249. * Write the word at offset "off" into "struct user". We
  250. * actually access the pt_regs stored on the kernel stack.
  251. */
  252. static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
  253. unsigned long val)
  254. {
  255. if (off & 3 || off >= sizeof(struct user))
  256. return -EIO;
  257. if (off >= sizeof(struct pt_regs))
  258. return 0;
  259. return put_user_reg(tsk, off >> 2, val);
  260. }
  261. #ifdef CONFIG_IWMMXT
  262. /*
  263. * Get the child iWMMXt state.
  264. */
  265. static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
  266. {
  267. struct thread_info *thread = task_thread_info(tsk);
  268. if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
  269. return -ENODATA;
  270. iwmmxt_task_disable(thread); /* force it to ram */
  271. return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE)
  272. ? -EFAULT : 0;
  273. }
  274. /*
  275. * Set the child iWMMXt state.
  276. */
  277. static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
  278. {
  279. struct thread_info *thread = task_thread_info(tsk);
  280. if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
  281. return -EACCES;
  282. iwmmxt_task_release(thread); /* force a reload */
  283. return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE)
  284. ? -EFAULT : 0;
  285. }
  286. #endif
  287. #ifdef CONFIG_CRUNCH
  288. /*
  289. * Get the child Crunch state.
  290. */
  291. static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp)
  292. {
  293. struct thread_info *thread = task_thread_info(tsk);
  294. crunch_task_disable(thread); /* force it to ram */
  295. return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE)
  296. ? -EFAULT : 0;
  297. }
  298. /*
  299. * Set the child Crunch state.
  300. */
  301. static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp)
  302. {
  303. struct thread_info *thread = task_thread_info(tsk);
  304. crunch_task_release(thread); /* force a reload */
  305. return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE)
  306. ? -EFAULT : 0;
  307. }
  308. #endif
  309. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  310. /*
  311. * Convert a virtual register number into an index for a thread_info
  312. * breakpoint array. Breakpoints are identified using positive numbers
  313. * whilst watchpoints are negative. The registers are laid out as pairs
  314. * of (address, control), each pair mapping to a unique hw_breakpoint struct.
  315. * Register 0 is reserved for describing resource information.
  316. */
  317. static int ptrace_hbp_num_to_idx(long num)
  318. {
  319. if (num < 0)
  320. num = (ARM_MAX_BRP << 1) - num;
  321. return (num - 1) >> 1;
  322. }
  323. /*
  324. * Returns the virtual register number for the address of the
  325. * breakpoint at index idx.
  326. */
  327. static long ptrace_hbp_idx_to_num(int idx)
  328. {
  329. long mid = ARM_MAX_BRP << 1;
  330. long num = (idx << 1) + 1;
  331. return num > mid ? mid - num : num;
  332. }
  333. /*
  334. * Handle hitting a HW-breakpoint.
  335. */
  336. static void ptrace_hbptriggered(struct perf_event *bp,
  337. struct perf_sample_data *data,
  338. struct pt_regs *regs)
  339. {
  340. struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
  341. long num;
  342. int i;
  343. for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i)
  344. if (current->thread.debug.hbp[i] == bp)
  345. break;
  346. num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i);
  347. force_sig_ptrace_errno_trap((int)num, (void __user *)(bkpt->trigger));
  348. }
  349. /*
  350. * Set ptrace breakpoint pointers to zero for this task.
  351. * This is required in order to prevent child processes from unregistering
  352. * breakpoints held by their parent.
  353. */
  354. void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
  355. {
  356. memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp));
  357. }
  358. /*
  359. * Unregister breakpoints from this task and reset the pointers in
  360. * the thread_struct.
  361. */
  362. void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
  363. {
  364. int i;
  365. struct thread_struct *t = &tsk->thread;
  366. for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) {
  367. if (t->debug.hbp[i]) {
  368. unregister_hw_breakpoint(t->debug.hbp[i]);
  369. t->debug.hbp[i] = NULL;
  370. }
  371. }
  372. }
  373. static u32 ptrace_get_hbp_resource_info(void)
  374. {
  375. u8 num_brps, num_wrps, debug_arch, wp_len;
  376. u32 reg = 0;
  377. num_brps = hw_breakpoint_slots(TYPE_INST);
  378. num_wrps = hw_breakpoint_slots(TYPE_DATA);
  379. debug_arch = arch_get_debug_arch();
  380. wp_len = arch_get_max_wp_len();
  381. reg |= debug_arch;
  382. reg <<= 8;
  383. reg |= wp_len;
  384. reg <<= 8;
  385. reg |= num_wrps;
  386. reg <<= 8;
  387. reg |= num_brps;
  388. return reg;
  389. }
  390. static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
  391. {
  392. struct perf_event_attr attr;
  393. ptrace_breakpoint_init(&attr);
  394. /* Initialise fields to sane defaults. */
  395. attr.bp_addr = 0;
  396. attr.bp_len = HW_BREAKPOINT_LEN_4;
  397. attr.bp_type = type;
  398. attr.disabled = 1;
  399. return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL,
  400. tsk);
  401. }
  402. static int ptrace_gethbpregs(struct task_struct *tsk, long num,
  403. unsigned long __user *data)
  404. {
  405. u32 reg;
  406. int idx, ret = 0;
  407. struct perf_event *bp;
  408. struct arch_hw_breakpoint_ctrl arch_ctrl;
  409. if (num == 0) {
  410. reg = ptrace_get_hbp_resource_info();
  411. } else {
  412. idx = ptrace_hbp_num_to_idx(num);
  413. if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
  414. ret = -EINVAL;
  415. goto out;
  416. }
  417. bp = tsk->thread.debug.hbp[idx];
  418. if (!bp) {
  419. reg = 0;
  420. goto put;
  421. }
  422. arch_ctrl = counter_arch_bp(bp)->ctrl;
  423. /*
  424. * Fix up the len because we may have adjusted it
  425. * to compensate for an unaligned address.
  426. */
  427. while (!(arch_ctrl.len & 0x1))
  428. arch_ctrl.len >>= 1;
  429. if (num & 0x1)
  430. reg = bp->attr.bp_addr;
  431. else
  432. reg = encode_ctrl_reg(arch_ctrl);
  433. }
  434. put:
  435. if (put_user(reg, data))
  436. ret = -EFAULT;
  437. out:
  438. return ret;
  439. }
  440. static int ptrace_sethbpregs(struct task_struct *tsk, long num,
  441. unsigned long __user *data)
  442. {
  443. int idx, gen_len, gen_type, implied_type, ret = 0;
  444. u32 user_val;
  445. struct perf_event *bp;
  446. struct arch_hw_breakpoint_ctrl ctrl;
  447. struct perf_event_attr attr;
  448. if (num == 0)
  449. goto out;
  450. else if (num < 0)
  451. implied_type = HW_BREAKPOINT_RW;
  452. else
  453. implied_type = HW_BREAKPOINT_X;
  454. idx = ptrace_hbp_num_to_idx(num);
  455. if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
  456. ret = -EINVAL;
  457. goto out;
  458. }
  459. if (get_user(user_val, data)) {
  460. ret = -EFAULT;
  461. goto out;
  462. }
  463. bp = tsk->thread.debug.hbp[idx];
  464. if (!bp) {
  465. bp = ptrace_hbp_create(tsk, implied_type);
  466. if (IS_ERR(bp)) {
  467. ret = PTR_ERR(bp);
  468. goto out;
  469. }
  470. tsk->thread.debug.hbp[idx] = bp;
  471. }
  472. attr = bp->attr;
  473. if (num & 0x1) {
  474. /* Address */
  475. attr.bp_addr = user_val;
  476. } else {
  477. /* Control */
  478. decode_ctrl_reg(user_val, &ctrl);
  479. ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type);
  480. if (ret)
  481. goto out;
  482. if ((gen_type & implied_type) != gen_type) {
  483. ret = -EINVAL;
  484. goto out;
  485. }
  486. attr.bp_len = gen_len;
  487. attr.bp_type = gen_type;
  488. attr.disabled = !ctrl.enabled;
  489. }
  490. ret = modify_user_hw_breakpoint(bp, &attr);
  491. out:
  492. return ret;
  493. }
  494. #endif
  495. /* regset get/set implementations */
  496. static int gpr_get(struct task_struct *target,
  497. const struct user_regset *regset,
  498. unsigned int pos, unsigned int count,
  499. void *kbuf, void __user *ubuf)
  500. {
  501. struct pt_regs *regs = task_pt_regs(target);
  502. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  503. regs,
  504. 0, sizeof(*regs));
  505. }
  506. static int gpr_set(struct task_struct *target,
  507. const struct user_regset *regset,
  508. unsigned int pos, unsigned int count,
  509. const void *kbuf, const void __user *ubuf)
  510. {
  511. int ret;
  512. struct pt_regs newregs = *task_pt_regs(target);
  513. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  514. &newregs,
  515. 0, sizeof(newregs));
  516. if (ret)
  517. return ret;
  518. if (!valid_user_regs(&newregs))
  519. return -EINVAL;
  520. *task_pt_regs(target) = newregs;
  521. return 0;
  522. }
  523. static int fpa_get(struct task_struct *target,
  524. const struct user_regset *regset,
  525. unsigned int pos, unsigned int count,
  526. void *kbuf, void __user *ubuf)
  527. {
  528. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  529. &task_thread_info(target)->fpstate,
  530. 0, sizeof(struct user_fp));
  531. }
  532. static int fpa_set(struct task_struct *target,
  533. const struct user_regset *regset,
  534. unsigned int pos, unsigned int count,
  535. const void *kbuf, const void __user *ubuf)
  536. {
  537. struct thread_info *thread = task_thread_info(target);
  538. thread->used_cp[1] = thread->used_cp[2] = 1;
  539. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  540. &thread->fpstate,
  541. 0, sizeof(struct user_fp));
  542. }
  543. #ifdef CONFIG_VFP
  544. /*
  545. * VFP register get/set implementations.
  546. *
  547. * With respect to the kernel, struct user_fp is divided into three chunks:
  548. * 16 or 32 real VFP registers (d0-d15 or d0-31)
  549. * These are transferred to/from the real registers in the task's
  550. * vfp_hard_struct. The number of registers depends on the kernel
  551. * configuration.
  552. *
  553. * 16 or 0 fake VFP registers (d16-d31 or empty)
  554. * i.e., the user_vfp structure has space for 32 registers even if
  555. * the kernel doesn't have them all.
  556. *
  557. * vfp_get() reads this chunk as zero where applicable
  558. * vfp_set() ignores this chunk
  559. *
  560. * 1 word for the FPSCR
  561. *
  562. * The bounds-checking logic built into user_regset_copyout and friends
  563. * means that we can make a simple sequence of calls to map the relevant data
  564. * to/from the specified slice of the user regset structure.
  565. */
  566. static int vfp_get(struct task_struct *target,
  567. const struct user_regset *regset,
  568. unsigned int pos, unsigned int count,
  569. void *kbuf, void __user *ubuf)
  570. {
  571. int ret;
  572. struct thread_info *thread = task_thread_info(target);
  573. struct vfp_hard_struct const *vfp = &thread->vfpstate.hard;
  574. const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
  575. const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
  576. vfp_sync_hwstate(thread);
  577. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  578. &vfp->fpregs,
  579. user_fpregs_offset,
  580. user_fpregs_offset + sizeof(vfp->fpregs));
  581. if (ret)
  582. return ret;
  583. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  584. user_fpregs_offset + sizeof(vfp->fpregs),
  585. user_fpscr_offset);
  586. if (ret)
  587. return ret;
  588. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  589. &vfp->fpscr,
  590. user_fpscr_offset,
  591. user_fpscr_offset + sizeof(vfp->fpscr));
  592. }
  593. /*
  594. * For vfp_set() a read-modify-write is done on the VFP registers,
  595. * in order to avoid writing back a half-modified set of registers on
  596. * failure.
  597. */
  598. static int vfp_set(struct task_struct *target,
  599. const struct user_regset *regset,
  600. unsigned int pos, unsigned int count,
  601. const void *kbuf, const void __user *ubuf)
  602. {
  603. int ret;
  604. struct thread_info *thread = task_thread_info(target);
  605. struct vfp_hard_struct new_vfp;
  606. const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
  607. const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
  608. vfp_sync_hwstate(thread);
  609. new_vfp = thread->vfpstate.hard;
  610. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  611. &new_vfp.fpregs,
  612. user_fpregs_offset,
  613. user_fpregs_offset + sizeof(new_vfp.fpregs));
  614. if (ret)
  615. return ret;
  616. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  617. user_fpregs_offset + sizeof(new_vfp.fpregs),
  618. user_fpscr_offset);
  619. if (ret)
  620. return ret;
  621. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  622. &new_vfp.fpscr,
  623. user_fpscr_offset,
  624. user_fpscr_offset + sizeof(new_vfp.fpscr));
  625. if (ret)
  626. return ret;
  627. thread->vfpstate.hard = new_vfp;
  628. vfp_flush_hwstate(thread);
  629. return 0;
  630. }
  631. #endif /* CONFIG_VFP */
  632. enum arm_regset {
  633. REGSET_GPR,
  634. REGSET_FPR,
  635. #ifdef CONFIG_VFP
  636. REGSET_VFP,
  637. #endif
  638. };
  639. static const struct user_regset arm_regsets[] = {
  640. [REGSET_GPR] = {
  641. .core_note_type = NT_PRSTATUS,
  642. .n = ELF_NGREG,
  643. .size = sizeof(u32),
  644. .align = sizeof(u32),
  645. .get = gpr_get,
  646. .set = gpr_set
  647. },
  648. [REGSET_FPR] = {
  649. /*
  650. * For the FPA regs in fpstate, the real fields are a mixture
  651. * of sizes, so pretend that the registers are word-sized:
  652. */
  653. .core_note_type = NT_PRFPREG,
  654. .n = sizeof(struct user_fp) / sizeof(u32),
  655. .size = sizeof(u32),
  656. .align = sizeof(u32),
  657. .get = fpa_get,
  658. .set = fpa_set
  659. },
  660. #ifdef CONFIG_VFP
  661. [REGSET_VFP] = {
  662. /*
  663. * Pretend that the VFP regs are word-sized, since the FPSCR is
  664. * a single word dangling at the end of struct user_vfp:
  665. */
  666. .core_note_type = NT_ARM_VFP,
  667. .n = ARM_VFPREGS_SIZE / sizeof(u32),
  668. .size = sizeof(u32),
  669. .align = sizeof(u32),
  670. .get = vfp_get,
  671. .set = vfp_set
  672. },
  673. #endif /* CONFIG_VFP */
  674. };
  675. static const struct user_regset_view user_arm_view = {
  676. .name = "arm", .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
  677. .regsets = arm_regsets, .n = ARRAY_SIZE(arm_regsets)
  678. };
  679. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  680. {
  681. return &user_arm_view;
  682. }
  683. long arch_ptrace(struct task_struct *child, long request,
  684. unsigned long addr, unsigned long data)
  685. {
  686. int ret;
  687. unsigned long __user *datap = (unsigned long __user *) data;
  688. switch (request) {
  689. case PTRACE_PEEKUSR:
  690. ret = ptrace_read_user(child, addr, datap);
  691. break;
  692. case PTRACE_POKEUSR:
  693. ret = ptrace_write_user(child, addr, data);
  694. break;
  695. case PTRACE_GETREGS:
  696. ret = copy_regset_to_user(child,
  697. &user_arm_view, REGSET_GPR,
  698. 0, sizeof(struct pt_regs),
  699. datap);
  700. break;
  701. case PTRACE_SETREGS:
  702. ret = copy_regset_from_user(child,
  703. &user_arm_view, REGSET_GPR,
  704. 0, sizeof(struct pt_regs),
  705. datap);
  706. break;
  707. case PTRACE_GETFPREGS:
  708. ret = copy_regset_to_user(child,
  709. &user_arm_view, REGSET_FPR,
  710. 0, sizeof(union fp_state),
  711. datap);
  712. break;
  713. case PTRACE_SETFPREGS:
  714. ret = copy_regset_from_user(child,
  715. &user_arm_view, REGSET_FPR,
  716. 0, sizeof(union fp_state),
  717. datap);
  718. break;
  719. #ifdef CONFIG_IWMMXT
  720. case PTRACE_GETWMMXREGS:
  721. ret = ptrace_getwmmxregs(child, datap);
  722. break;
  723. case PTRACE_SETWMMXREGS:
  724. ret = ptrace_setwmmxregs(child, datap);
  725. break;
  726. #endif
  727. case PTRACE_GET_THREAD_AREA:
  728. ret = put_user(task_thread_info(child)->tp_value[0],
  729. datap);
  730. break;
  731. case PTRACE_SET_SYSCALL:
  732. task_thread_info(child)->syscall = data;
  733. ret = 0;
  734. break;
  735. #ifdef CONFIG_CRUNCH
  736. case PTRACE_GETCRUNCHREGS:
  737. ret = ptrace_getcrunchregs(child, datap);
  738. break;
  739. case PTRACE_SETCRUNCHREGS:
  740. ret = ptrace_setcrunchregs(child, datap);
  741. break;
  742. #endif
  743. #ifdef CONFIG_VFP
  744. case PTRACE_GETVFPREGS:
  745. ret = copy_regset_to_user(child,
  746. &user_arm_view, REGSET_VFP,
  747. 0, ARM_VFPREGS_SIZE,
  748. datap);
  749. break;
  750. case PTRACE_SETVFPREGS:
  751. ret = copy_regset_from_user(child,
  752. &user_arm_view, REGSET_VFP,
  753. 0, ARM_VFPREGS_SIZE,
  754. datap);
  755. break;
  756. #endif
  757. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  758. case PTRACE_GETHBPREGS:
  759. ret = ptrace_gethbpregs(child, addr,
  760. (unsigned long __user *)data);
  761. break;
  762. case PTRACE_SETHBPREGS:
  763. ret = ptrace_sethbpregs(child, addr,
  764. (unsigned long __user *)data);
  765. break;
  766. #endif
  767. default:
  768. ret = ptrace_request(child, request, addr, data);
  769. break;
  770. }
  771. return ret;
  772. }
  773. enum ptrace_syscall_dir {
  774. PTRACE_SYSCALL_ENTER = 0,
  775. PTRACE_SYSCALL_EXIT,
  776. };
  777. static void tracehook_report_syscall(struct pt_regs *regs,
  778. enum ptrace_syscall_dir dir)
  779. {
  780. unsigned long ip;
  781. /*
  782. * IP is used to denote syscall entry/exit:
  783. * IP = 0 -> entry, =1 -> exit
  784. */
  785. ip = regs->ARM_ip;
  786. regs->ARM_ip = dir;
  787. if (dir == PTRACE_SYSCALL_EXIT)
  788. tracehook_report_syscall_exit(regs, 0);
  789. else if (tracehook_report_syscall_entry(regs))
  790. current_thread_info()->syscall = -1;
  791. regs->ARM_ip = ip;
  792. }
  793. asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
  794. {
  795. current_thread_info()->syscall = scno;
  796. if (test_thread_flag(TIF_SYSCALL_TRACE))
  797. tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
  798. /* Do seccomp after ptrace; syscall may have changed. */
  799. #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
  800. if (secure_computing(NULL) == -1)
  801. return -1;
  802. #else
  803. /* XXX: remove this once OABI gets fixed */
  804. secure_computing_strict(current_thread_info()->syscall);
  805. #endif
  806. /* Tracer or seccomp may have changed syscall. */
  807. scno = current_thread_info()->syscall;
  808. if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
  809. trace_sys_enter(regs, scno);
  810. audit_syscall_entry(scno, regs->ARM_r0, regs->ARM_r1, regs->ARM_r2,
  811. regs->ARM_r3);
  812. return scno;
  813. }
  814. asmlinkage void syscall_trace_exit(struct pt_regs *regs)
  815. {
  816. /*
  817. * Audit the syscall before anything else, as a debugger may
  818. * come in and change the current registers.
  819. */
  820. audit_syscall_exit(regs);
  821. /*
  822. * Note that we haven't updated the ->syscall field for the
  823. * current thread. This isn't a problem because it will have
  824. * been set on syscall entry and there hasn't been an opportunity
  825. * for a PTRACE_SET_SYSCALL since then.
  826. */
  827. if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
  828. trace_sys_exit(regs, regs_return_value(regs));
  829. if (test_thread_flag(TIF_SYSCALL_TRACE))
  830. tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
  831. }