ptrace.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092
  1. /*
  2. * Based on arch/arm/kernel/ptrace.c
  3. *
  4. * By Ross Biro 1/23/92
  5. * edited by Linus Torvalds
  6. * ARM modifications Copyright (C) 2000 Russell King
  7. * Copyright (C) 2012 ARM Ltd.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #include <linux/kernel.h>
  22. #include <linux/sched.h>
  23. #include <linux/mm.h>
  24. #include <linux/smp.h>
  25. #include <linux/ptrace.h>
  26. #include <linux/user.h>
  27. #include <linux/security.h>
  28. #include <linux/init.h>
  29. #include <linux/signal.h>
  30. #include <linux/uaccess.h>
  31. #include <linux/perf_event.h>
  32. #include <linux/hw_breakpoint.h>
  33. #include <linux/regset.h>
  34. #include <linux/tracehook.h>
  35. #include <linux/elf.h>
  36. #include <asm/compat.h>
  37. #include <asm/debug-monitors.h>
  38. #include <asm/pgtable.h>
  39. #include <asm/traps.h>
  40. #include <asm/system_misc.h>
  41. /*
  42. * TODO: does not yet catch signals sent when the child dies.
  43. * in exit.c or in signal.c.
  44. */
  45. /*
  46. * Called by kernel/ptrace.c when detaching..
  47. */
  48. void ptrace_disable(struct task_struct *child)
  49. {
  50. }
  51. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  52. /*
  53. * Handle hitting a HW-breakpoint.
  54. */
  55. static void ptrace_hbptriggered(struct perf_event *bp,
  56. struct perf_sample_data *data,
  57. struct pt_regs *regs)
  58. {
  59. struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
  60. siginfo_t info = {
  61. .si_signo = SIGTRAP,
  62. .si_errno = 0,
  63. .si_code = TRAP_HWBKPT,
  64. .si_addr = (void __user *)(bkpt->trigger),
  65. };
  66. #ifdef CONFIG_COMPAT
  67. int i;
  68. if (!is_compat_task())
  69. goto send_sig;
  70. for (i = 0; i < ARM_MAX_BRP; ++i) {
  71. if (current->thread.debug.hbp_break[i] == bp) {
  72. info.si_errno = (i << 1) + 1;
  73. break;
  74. }
  75. }
  76. for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) {
  77. if (current->thread.debug.hbp_watch[i] == bp) {
  78. info.si_errno = -((i << 1) + 1);
  79. break;
  80. }
  81. }
  82. send_sig:
  83. #endif
  84. force_sig_info(SIGTRAP, &info, current);
  85. }
  86. /*
  87. * Unregister breakpoints from this task and reset the pointers in
  88. * the thread_struct.
  89. */
  90. void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
  91. {
  92. int i;
  93. struct thread_struct *t = &tsk->thread;
  94. for (i = 0; i < ARM_MAX_BRP; i++) {
  95. if (t->debug.hbp_break[i]) {
  96. unregister_hw_breakpoint(t->debug.hbp_break[i]);
  97. t->debug.hbp_break[i] = NULL;
  98. }
  99. }
  100. for (i = 0; i < ARM_MAX_WRP; i++) {
  101. if (t->debug.hbp_watch[i]) {
  102. unregister_hw_breakpoint(t->debug.hbp_watch[i]);
  103. t->debug.hbp_watch[i] = NULL;
  104. }
  105. }
  106. }
  107. void ptrace_hw_copy_thread(struct task_struct *tsk)
  108. {
  109. memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
  110. }
  111. static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
  112. struct task_struct *tsk,
  113. unsigned long idx)
  114. {
  115. struct perf_event *bp = ERR_PTR(-EINVAL);
  116. switch (note_type) {
  117. case NT_ARM_HW_BREAK:
  118. if (idx < ARM_MAX_BRP)
  119. bp = tsk->thread.debug.hbp_break[idx];
  120. break;
  121. case NT_ARM_HW_WATCH:
  122. if (idx < ARM_MAX_WRP)
  123. bp = tsk->thread.debug.hbp_watch[idx];
  124. break;
  125. }
  126. return bp;
  127. }
  128. static int ptrace_hbp_set_event(unsigned int note_type,
  129. struct task_struct *tsk,
  130. unsigned long idx,
  131. struct perf_event *bp)
  132. {
  133. int err = -EINVAL;
  134. switch (note_type) {
  135. case NT_ARM_HW_BREAK:
  136. if (idx < ARM_MAX_BRP) {
  137. tsk->thread.debug.hbp_break[idx] = bp;
  138. err = 0;
  139. }
  140. break;
  141. case NT_ARM_HW_WATCH:
  142. if (idx < ARM_MAX_WRP) {
  143. tsk->thread.debug.hbp_watch[idx] = bp;
  144. err = 0;
  145. }
  146. break;
  147. }
  148. return err;
  149. }
  150. static struct perf_event *ptrace_hbp_create(unsigned int note_type,
  151. struct task_struct *tsk,
  152. unsigned long idx)
  153. {
  154. struct perf_event *bp;
  155. struct perf_event_attr attr;
  156. int err, type;
  157. switch (note_type) {
  158. case NT_ARM_HW_BREAK:
  159. type = HW_BREAKPOINT_X;
  160. break;
  161. case NT_ARM_HW_WATCH:
  162. type = HW_BREAKPOINT_RW;
  163. break;
  164. default:
  165. return ERR_PTR(-EINVAL);
  166. }
  167. ptrace_breakpoint_init(&attr);
  168. /*
  169. * Initialise fields to sane defaults
  170. * (i.e. values that will pass validation).
  171. */
  172. attr.bp_addr = 0;
  173. attr.bp_len = HW_BREAKPOINT_LEN_4;
  174. attr.bp_type = type;
  175. attr.disabled = 1;
  176. bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
  177. if (IS_ERR(bp))
  178. return bp;
  179. err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
  180. if (err)
  181. return ERR_PTR(err);
  182. return bp;
  183. }
  184. static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
  185. struct arch_hw_breakpoint_ctrl ctrl,
  186. struct perf_event_attr *attr)
  187. {
  188. int err, len, type, disabled = !ctrl.enabled;
  189. attr->disabled = disabled;
  190. if (disabled)
  191. return 0;
  192. err = arch_bp_generic_fields(ctrl, &len, &type);
  193. if (err)
  194. return err;
  195. switch (note_type) {
  196. case NT_ARM_HW_BREAK:
  197. if ((type & HW_BREAKPOINT_X) != type)
  198. return -EINVAL;
  199. break;
  200. case NT_ARM_HW_WATCH:
  201. if ((type & HW_BREAKPOINT_RW) != type)
  202. return -EINVAL;
  203. break;
  204. default:
  205. return -EINVAL;
  206. }
  207. attr->bp_len = len;
  208. attr->bp_type = type;
  209. return 0;
  210. }
  211. static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
  212. {
  213. u8 num;
  214. u32 reg = 0;
  215. switch (note_type) {
  216. case NT_ARM_HW_BREAK:
  217. num = hw_breakpoint_slots(TYPE_INST);
  218. break;
  219. case NT_ARM_HW_WATCH:
  220. num = hw_breakpoint_slots(TYPE_DATA);
  221. break;
  222. default:
  223. return -EINVAL;
  224. }
  225. reg |= debug_monitors_arch();
  226. reg <<= 8;
  227. reg |= num;
  228. *info = reg;
  229. return 0;
  230. }
  231. static int ptrace_hbp_get_ctrl(unsigned int note_type,
  232. struct task_struct *tsk,
  233. unsigned long idx,
  234. u32 *ctrl)
  235. {
  236. struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
  237. if (IS_ERR(bp))
  238. return PTR_ERR(bp);
  239. *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
  240. return 0;
  241. }
  242. static int ptrace_hbp_get_addr(unsigned int note_type,
  243. struct task_struct *tsk,
  244. unsigned long idx,
  245. u64 *addr)
  246. {
  247. struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
  248. if (IS_ERR(bp))
  249. return PTR_ERR(bp);
  250. *addr = bp ? bp->attr.bp_addr : 0;
  251. return 0;
  252. }
  253. static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
  254. struct task_struct *tsk,
  255. unsigned long idx)
  256. {
  257. struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
  258. if (!bp)
  259. bp = ptrace_hbp_create(note_type, tsk, idx);
  260. return bp;
  261. }
  262. static int ptrace_hbp_set_ctrl(unsigned int note_type,
  263. struct task_struct *tsk,
  264. unsigned long idx,
  265. u32 uctrl)
  266. {
  267. int err;
  268. struct perf_event *bp;
  269. struct perf_event_attr attr;
  270. struct arch_hw_breakpoint_ctrl ctrl;
  271. bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
  272. if (IS_ERR(bp)) {
  273. err = PTR_ERR(bp);
  274. return err;
  275. }
  276. attr = bp->attr;
  277. decode_ctrl_reg(uctrl, &ctrl);
  278. err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
  279. if (err)
  280. return err;
  281. return modify_user_hw_breakpoint(bp, &attr);
  282. }
  283. static int ptrace_hbp_set_addr(unsigned int note_type,
  284. struct task_struct *tsk,
  285. unsigned long idx,
  286. u64 addr)
  287. {
  288. int err;
  289. struct perf_event *bp;
  290. struct perf_event_attr attr;
  291. bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
  292. if (IS_ERR(bp)) {
  293. err = PTR_ERR(bp);
  294. return err;
  295. }
  296. attr = bp->attr;
  297. attr.bp_addr = addr;
  298. err = modify_user_hw_breakpoint(bp, &attr);
  299. return err;
  300. }
  301. #define PTRACE_HBP_ADDR_SZ sizeof(u64)
  302. #define PTRACE_HBP_CTRL_SZ sizeof(u32)
  303. #define PTRACE_HBP_PAD_SZ sizeof(u32)
  304. static int hw_break_get(struct task_struct *target,
  305. const struct user_regset *regset,
  306. unsigned int pos, unsigned int count,
  307. void *kbuf, void __user *ubuf)
  308. {
  309. unsigned int note_type = regset->core_note_type;
  310. int ret, idx = 0, offset, limit;
  311. u32 info, ctrl;
  312. u64 addr;
  313. /* Resource info */
  314. ret = ptrace_hbp_get_resource_info(note_type, &info);
  315. if (ret)
  316. return ret;
  317. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0,
  318. sizeof(info));
  319. if (ret)
  320. return ret;
  321. /* Pad */
  322. offset = offsetof(struct user_hwdebug_state, pad);
  323. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset,
  324. offset + PTRACE_HBP_PAD_SZ);
  325. if (ret)
  326. return ret;
  327. /* (address, ctrl) registers */
  328. offset = offsetof(struct user_hwdebug_state, dbg_regs);
  329. limit = regset->n * regset->size;
  330. while (count && offset < limit) {
  331. ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
  332. if (ret)
  333. return ret;
  334. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr,
  335. offset, offset + PTRACE_HBP_ADDR_SZ);
  336. if (ret)
  337. return ret;
  338. offset += PTRACE_HBP_ADDR_SZ;
  339. ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
  340. if (ret)
  341. return ret;
  342. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl,
  343. offset, offset + PTRACE_HBP_CTRL_SZ);
  344. if (ret)
  345. return ret;
  346. offset += PTRACE_HBP_CTRL_SZ;
  347. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  348. offset,
  349. offset + PTRACE_HBP_PAD_SZ);
  350. if (ret)
  351. return ret;
  352. offset += PTRACE_HBP_PAD_SZ;
  353. idx++;
  354. }
  355. return 0;
  356. }
  357. static int hw_break_set(struct task_struct *target,
  358. const struct user_regset *regset,
  359. unsigned int pos, unsigned int count,
  360. const void *kbuf, const void __user *ubuf)
  361. {
  362. unsigned int note_type = regset->core_note_type;
  363. int ret, idx = 0, offset, limit;
  364. u32 ctrl;
  365. u64 addr;
  366. /* Resource info and pad */
  367. offset = offsetof(struct user_hwdebug_state, dbg_regs);
  368. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
  369. if (ret)
  370. return ret;
  371. /* (address, ctrl) registers */
  372. limit = regset->n * regset->size;
  373. while (count && offset < limit) {
  374. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
  375. offset, offset + PTRACE_HBP_ADDR_SZ);
  376. if (ret)
  377. return ret;
  378. ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
  379. if (ret)
  380. return ret;
  381. offset += PTRACE_HBP_ADDR_SZ;
  382. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
  383. offset, offset + PTRACE_HBP_CTRL_SZ);
  384. if (ret)
  385. return ret;
  386. ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
  387. if (ret)
  388. return ret;
  389. offset += PTRACE_HBP_CTRL_SZ;
  390. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  391. offset,
  392. offset + PTRACE_HBP_PAD_SZ);
  393. if (ret)
  394. return ret;
  395. offset += PTRACE_HBP_PAD_SZ;
  396. idx++;
  397. }
  398. return 0;
  399. }
  400. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  401. static int gpr_get(struct task_struct *target,
  402. const struct user_regset *regset,
  403. unsigned int pos, unsigned int count,
  404. void *kbuf, void __user *ubuf)
  405. {
  406. struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
  407. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
  408. }
  409. static int gpr_set(struct task_struct *target, const struct user_regset *regset,
  410. unsigned int pos, unsigned int count,
  411. const void *kbuf, const void __user *ubuf)
  412. {
  413. int ret;
  414. struct user_pt_regs newregs;
  415. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
  416. if (ret)
  417. return ret;
  418. if (!valid_user_regs(&newregs))
  419. return -EINVAL;
  420. task_pt_regs(target)->user_regs = newregs;
  421. return 0;
  422. }
  423. /*
  424. * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
  425. */
  426. static int fpr_get(struct task_struct *target, const struct user_regset *regset,
  427. unsigned int pos, unsigned int count,
  428. void *kbuf, void __user *ubuf)
  429. {
  430. struct user_fpsimd_state *uregs;
  431. uregs = &target->thread.fpsimd_state.user_fpsimd;
  432. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
  433. }
  434. static int fpr_set(struct task_struct *target, const struct user_regset *regset,
  435. unsigned int pos, unsigned int count,
  436. const void *kbuf, const void __user *ubuf)
  437. {
  438. int ret;
  439. struct user_fpsimd_state newstate;
  440. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
  441. if (ret)
  442. return ret;
  443. target->thread.fpsimd_state.user_fpsimd = newstate;
  444. return ret;
  445. }
  446. static int tls_get(struct task_struct *target, const struct user_regset *regset,
  447. unsigned int pos, unsigned int count,
  448. void *kbuf, void __user *ubuf)
  449. {
  450. unsigned long *tls = &target->thread.tp_value;
  451. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1);
  452. }
  453. static int tls_set(struct task_struct *target, const struct user_regset *regset,
  454. unsigned int pos, unsigned int count,
  455. const void *kbuf, const void __user *ubuf)
  456. {
  457. int ret;
  458. unsigned long tls;
  459. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
  460. if (ret)
  461. return ret;
  462. target->thread.tp_value = tls;
  463. return ret;
  464. }
  465. enum aarch64_regset {
  466. REGSET_GPR,
  467. REGSET_FPR,
  468. REGSET_TLS,
  469. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  470. REGSET_HW_BREAK,
  471. REGSET_HW_WATCH,
  472. #endif
  473. };
  474. static const struct user_regset aarch64_regsets[] = {
  475. [REGSET_GPR] = {
  476. .core_note_type = NT_PRSTATUS,
  477. .n = sizeof(struct user_pt_regs) / sizeof(u64),
  478. .size = sizeof(u64),
  479. .align = sizeof(u64),
  480. .get = gpr_get,
  481. .set = gpr_set
  482. },
  483. [REGSET_FPR] = {
  484. .core_note_type = NT_PRFPREG,
  485. .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
  486. /*
  487. * We pretend we have 32-bit registers because the fpsr and
  488. * fpcr are 32-bits wide.
  489. */
  490. .size = sizeof(u32),
  491. .align = sizeof(u32),
  492. .get = fpr_get,
  493. .set = fpr_set
  494. },
  495. [REGSET_TLS] = {
  496. .core_note_type = NT_ARM_TLS,
  497. .n = 1,
  498. .size = sizeof(void *),
  499. .align = sizeof(void *),
  500. .get = tls_get,
  501. .set = tls_set,
  502. },
  503. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  504. [REGSET_HW_BREAK] = {
  505. .core_note_type = NT_ARM_HW_BREAK,
  506. .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
  507. .size = sizeof(u32),
  508. .align = sizeof(u32),
  509. .get = hw_break_get,
  510. .set = hw_break_set,
  511. },
  512. [REGSET_HW_WATCH] = {
  513. .core_note_type = NT_ARM_HW_WATCH,
  514. .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
  515. .size = sizeof(u32),
  516. .align = sizeof(u32),
  517. .get = hw_break_get,
  518. .set = hw_break_set,
  519. },
  520. #endif
  521. };
  522. static const struct user_regset_view user_aarch64_view = {
  523. .name = "aarch64", .e_machine = EM_AARCH64,
  524. .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
  525. };
  526. #ifdef CONFIG_COMPAT
  527. #include <linux/compat.h>
  528. enum compat_regset {
  529. REGSET_COMPAT_GPR,
  530. REGSET_COMPAT_VFP,
  531. };
  532. static int compat_gpr_get(struct task_struct *target,
  533. const struct user_regset *regset,
  534. unsigned int pos, unsigned int count,
  535. void *kbuf, void __user *ubuf)
  536. {
  537. int ret = 0;
  538. unsigned int i, start, num_regs;
  539. /* Calculate the number of AArch32 registers contained in count */
  540. num_regs = count / regset->size;
  541. /* Convert pos into an register number */
  542. start = pos / regset->size;
  543. if (start + num_regs > regset->n)
  544. return -EIO;
  545. for (i = 0; i < num_regs; ++i) {
  546. unsigned int idx = start + i;
  547. compat_ulong_t reg;
  548. switch (idx) {
  549. case 15:
  550. reg = task_pt_regs(target)->pc;
  551. break;
  552. case 16:
  553. reg = task_pt_regs(target)->pstate;
  554. break;
  555. case 17:
  556. reg = task_pt_regs(target)->orig_x0;
  557. break;
  558. default:
  559. reg = task_pt_regs(target)->regs[idx];
  560. }
  561. ret = copy_to_user(ubuf, &reg, sizeof(reg));
  562. if (ret)
  563. break;
  564. ubuf += sizeof(reg);
  565. }
  566. return ret;
  567. }
  568. static int compat_gpr_set(struct task_struct *target,
  569. const struct user_regset *regset,
  570. unsigned int pos, unsigned int count,
  571. const void *kbuf, const void __user *ubuf)
  572. {
  573. struct pt_regs newregs;
  574. int ret = 0;
  575. unsigned int i, start, num_regs;
  576. /* Calculate the number of AArch32 registers contained in count */
  577. num_regs = count / regset->size;
  578. /* Convert pos into an register number */
  579. start = pos / regset->size;
  580. if (start + num_regs > regset->n)
  581. return -EIO;
  582. newregs = *task_pt_regs(target);
  583. for (i = 0; i < num_regs; ++i) {
  584. unsigned int idx = start + i;
  585. compat_ulong_t reg;
  586. ret = copy_from_user(&reg, ubuf, sizeof(reg));
  587. if (ret)
  588. return ret;
  589. ubuf += sizeof(reg);
  590. switch (idx) {
  591. case 15:
  592. newregs.pc = reg;
  593. break;
  594. case 16:
  595. newregs.pstate = reg;
  596. break;
  597. case 17:
  598. newregs.orig_x0 = reg;
  599. break;
  600. default:
  601. newregs.regs[idx] = reg;
  602. }
  603. }
  604. if (valid_user_regs(&newregs.user_regs))
  605. *task_pt_regs(target) = newregs;
  606. else
  607. ret = -EINVAL;
  608. return ret;
  609. }
  610. static int compat_vfp_get(struct task_struct *target,
  611. const struct user_regset *regset,
  612. unsigned int pos, unsigned int count,
  613. void *kbuf, void __user *ubuf)
  614. {
  615. struct user_fpsimd_state *uregs;
  616. compat_ulong_t fpscr;
  617. int ret;
  618. uregs = &target->thread.fpsimd_state.user_fpsimd;
  619. /*
  620. * The VFP registers are packed into the fpsimd_state, so they all sit
  621. * nicely together for us. We just need to create the fpscr separately.
  622. */
  623. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
  624. VFP_STATE_SIZE - sizeof(compat_ulong_t));
  625. if (count && !ret) {
  626. fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
  627. (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
  628. ret = put_user(fpscr, (compat_ulong_t *)ubuf);
  629. }
  630. return ret;
  631. }
  632. static int compat_vfp_set(struct task_struct *target,
  633. const struct user_regset *regset,
  634. unsigned int pos, unsigned int count,
  635. const void *kbuf, const void __user *ubuf)
  636. {
  637. struct user_fpsimd_state *uregs;
  638. compat_ulong_t fpscr;
  639. int ret;
  640. if (pos + count > VFP_STATE_SIZE)
  641. return -EIO;
  642. uregs = &target->thread.fpsimd_state.user_fpsimd;
  643. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
  644. VFP_STATE_SIZE - sizeof(compat_ulong_t));
  645. if (count && !ret) {
  646. ret = get_user(fpscr, (compat_ulong_t *)ubuf);
  647. uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
  648. uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
  649. }
  650. return ret;
  651. }
  652. static const struct user_regset aarch32_regsets[] = {
  653. [REGSET_COMPAT_GPR] = {
  654. .core_note_type = NT_PRSTATUS,
  655. .n = COMPAT_ELF_NGREG,
  656. .size = sizeof(compat_elf_greg_t),
  657. .align = sizeof(compat_elf_greg_t),
  658. .get = compat_gpr_get,
  659. .set = compat_gpr_set
  660. },
  661. [REGSET_COMPAT_VFP] = {
  662. .core_note_type = NT_ARM_VFP,
  663. .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
  664. .size = sizeof(compat_ulong_t),
  665. .align = sizeof(compat_ulong_t),
  666. .get = compat_vfp_get,
  667. .set = compat_vfp_set
  668. },
  669. };
  670. static const struct user_regset_view user_aarch32_view = {
  671. .name = "aarch32", .e_machine = EM_ARM,
  672. .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
  673. };
  674. static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
  675. compat_ulong_t __user *ret)
  676. {
  677. compat_ulong_t tmp;
  678. if (off & 3)
  679. return -EIO;
  680. if (off == COMPAT_PT_TEXT_ADDR)
  681. tmp = tsk->mm->start_code;
  682. else if (off == COMPAT_PT_DATA_ADDR)
  683. tmp = tsk->mm->start_data;
  684. else if (off == COMPAT_PT_TEXT_END_ADDR)
  685. tmp = tsk->mm->end_code;
  686. else if (off < sizeof(compat_elf_gregset_t))
  687. return copy_regset_to_user(tsk, &user_aarch32_view,
  688. REGSET_COMPAT_GPR, off,
  689. sizeof(compat_ulong_t), ret);
  690. else if (off >= COMPAT_USER_SZ)
  691. return -EIO;
  692. else
  693. tmp = 0;
  694. return put_user(tmp, ret);
  695. }
  696. static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
  697. compat_ulong_t val)
  698. {
  699. int ret;
  700. if (off & 3 || off >= COMPAT_USER_SZ)
  701. return -EIO;
  702. if (off >= sizeof(compat_elf_gregset_t))
  703. return 0;
  704. ret = copy_regset_from_user(tsk, &user_aarch32_view,
  705. REGSET_COMPAT_GPR, off,
  706. sizeof(compat_ulong_t),
  707. &val);
  708. return ret;
  709. }
  710. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  711. /*
  712. * Convert a virtual register number into an index for a thread_info
  713. * breakpoint array. Breakpoints are identified using positive numbers
  714. * whilst watchpoints are negative. The registers are laid out as pairs
  715. * of (address, control), each pair mapping to a unique hw_breakpoint struct.
  716. * Register 0 is reserved for describing resource information.
  717. */
  718. static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
  719. {
  720. return (abs(num) - 1) >> 1;
  721. }
  722. static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
  723. {
  724. u8 num_brps, num_wrps, debug_arch, wp_len;
  725. u32 reg = 0;
  726. num_brps = hw_breakpoint_slots(TYPE_INST);
  727. num_wrps = hw_breakpoint_slots(TYPE_DATA);
  728. debug_arch = debug_monitors_arch();
  729. wp_len = 8;
  730. reg |= debug_arch;
  731. reg <<= 8;
  732. reg |= wp_len;
  733. reg <<= 8;
  734. reg |= num_wrps;
  735. reg <<= 8;
  736. reg |= num_brps;
  737. *kdata = reg;
  738. return 0;
  739. }
  740. static int compat_ptrace_hbp_get(unsigned int note_type,
  741. struct task_struct *tsk,
  742. compat_long_t num,
  743. u32 *kdata)
  744. {
  745. u64 addr = 0;
  746. u32 ctrl = 0;
  747. int err, idx = compat_ptrace_hbp_num_to_idx(num);;
  748. if (num & 1) {
  749. err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
  750. *kdata = (u32)addr;
  751. } else {
  752. err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
  753. *kdata = ctrl;
  754. }
  755. return err;
  756. }
  757. static int compat_ptrace_hbp_set(unsigned int note_type,
  758. struct task_struct *tsk,
  759. compat_long_t num,
  760. u32 *kdata)
  761. {
  762. u64 addr;
  763. u32 ctrl;
  764. int err, idx = compat_ptrace_hbp_num_to_idx(num);
  765. if (num & 1) {
  766. addr = *kdata;
  767. err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
  768. } else {
  769. ctrl = *kdata;
  770. err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
  771. }
  772. return err;
  773. }
  774. static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
  775. compat_ulong_t __user *data)
  776. {
  777. int ret;
  778. u32 kdata;
  779. mm_segment_t old_fs = get_fs();
  780. set_fs(KERNEL_DS);
  781. /* Watchpoint */
  782. if (num < 0) {
  783. ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
  784. /* Resource info */
  785. } else if (num == 0) {
  786. ret = compat_ptrace_hbp_get_resource_info(&kdata);
  787. /* Breakpoint */
  788. } else {
  789. ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
  790. }
  791. set_fs(old_fs);
  792. if (!ret)
  793. ret = put_user(kdata, data);
  794. return ret;
  795. }
  796. static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
  797. compat_ulong_t __user *data)
  798. {
  799. int ret;
  800. u32 kdata = 0;
  801. mm_segment_t old_fs = get_fs();
  802. if (num == 0)
  803. return 0;
  804. ret = get_user(kdata, data);
  805. if (ret)
  806. return ret;
  807. set_fs(KERNEL_DS);
  808. if (num < 0)
  809. ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
  810. else
  811. ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
  812. set_fs(old_fs);
  813. return ret;
  814. }
  815. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  816. long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
  817. compat_ulong_t caddr, compat_ulong_t cdata)
  818. {
  819. unsigned long addr = caddr;
  820. unsigned long data = cdata;
  821. void __user *datap = compat_ptr(data);
  822. int ret;
  823. switch (request) {
  824. case PTRACE_PEEKUSR:
  825. ret = compat_ptrace_read_user(child, addr, datap);
  826. break;
  827. case PTRACE_POKEUSR:
  828. ret = compat_ptrace_write_user(child, addr, data);
  829. break;
  830. case COMPAT_PTRACE_GETREGS:
  831. ret = copy_regset_to_user(child,
  832. &user_aarch32_view,
  833. REGSET_COMPAT_GPR,
  834. 0, sizeof(compat_elf_gregset_t),
  835. datap);
  836. break;
  837. case COMPAT_PTRACE_SETREGS:
  838. ret = copy_regset_from_user(child,
  839. &user_aarch32_view,
  840. REGSET_COMPAT_GPR,
  841. 0, sizeof(compat_elf_gregset_t),
  842. datap);
  843. break;
  844. case COMPAT_PTRACE_GET_THREAD_AREA:
  845. ret = put_user((compat_ulong_t)child->thread.tp_value,
  846. (compat_ulong_t __user *)datap);
  847. break;
  848. case COMPAT_PTRACE_SET_SYSCALL:
  849. task_pt_regs(child)->syscallno = data;
  850. ret = 0;
  851. break;
  852. case COMPAT_PTRACE_GETVFPREGS:
  853. ret = copy_regset_to_user(child,
  854. &user_aarch32_view,
  855. REGSET_COMPAT_VFP,
  856. 0, VFP_STATE_SIZE,
  857. datap);
  858. break;
  859. case COMPAT_PTRACE_SETVFPREGS:
  860. ret = copy_regset_from_user(child,
  861. &user_aarch32_view,
  862. REGSET_COMPAT_VFP,
  863. 0, VFP_STATE_SIZE,
  864. datap);
  865. break;
  866. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  867. case COMPAT_PTRACE_GETHBPREGS:
  868. ret = compat_ptrace_gethbpregs(child, addr, datap);
  869. break;
  870. case COMPAT_PTRACE_SETHBPREGS:
  871. ret = compat_ptrace_sethbpregs(child, addr, datap);
  872. break;
  873. #endif
  874. default:
  875. ret = compat_ptrace_request(child, request, addr,
  876. data);
  877. break;
  878. }
  879. return ret;
  880. }
  881. #endif /* CONFIG_COMPAT */
  882. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  883. {
  884. #ifdef CONFIG_COMPAT
  885. if (is_compat_thread(task_thread_info(task)))
  886. return &user_aarch32_view;
  887. #endif
  888. return &user_aarch64_view;
  889. }
  890. long arch_ptrace(struct task_struct *child, long request,
  891. unsigned long addr, unsigned long data)
  892. {
  893. return ptrace_request(child, request, addr, data);
  894. }
  895. asmlinkage int syscall_trace(int dir, struct pt_regs *regs)
  896. {
  897. unsigned long saved_reg;
  898. if (!test_thread_flag(TIF_SYSCALL_TRACE))
  899. return regs->syscallno;
  900. if (is_compat_task()) {
  901. /* AArch32 uses ip (r12) for scratch */
  902. saved_reg = regs->regs[12];
  903. regs->regs[12] = dir;
  904. } else {
  905. /*
  906. * Save X7. X7 is used to denote syscall entry/exit:
  907. * X7 = 0 -> entry, = 1 -> exit
  908. */
  909. saved_reg = regs->regs[7];
  910. regs->regs[7] = dir;
  911. }
  912. if (dir)
  913. tracehook_report_syscall_exit(regs, 0);
  914. else if (tracehook_report_syscall_entry(regs))
  915. regs->syscallno = ~0UL;
  916. if (is_compat_task())
  917. regs->regs[12] = saved_reg;
  918. else
  919. regs->regs[7] = saved_reg;
  920. return regs->syscallno;
  921. }