ptrace.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744
  1. /*
  2. * Based on arch/arm/kernel/ptrace.c
  3. *
  4. * By Ross Biro 1/23/92
  5. * edited by Linus Torvalds
  6. * ARM modifications Copyright (C) 2000 Russell King
  7. * Copyright (C) 2012 ARM Ltd.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #include <linux/audit.h>
  22. #include <linux/compat.h>
  23. #include <linux/kernel.h>
  24. #include <linux/sched/signal.h>
  25. #include <linux/sched/task_stack.h>
  26. #include <linux/mm.h>
  27. #include <linux/nospec.h>
  28. #include <linux/smp.h>
  29. #include <linux/ptrace.h>
  30. #include <linux/user.h>
  31. #include <linux/seccomp.h>
  32. #include <linux/security.h>
  33. #include <linux/init.h>
  34. #include <linux/signal.h>
  35. #include <linux/string.h>
  36. #include <linux/uaccess.h>
  37. #include <linux/perf_event.h>
  38. #include <linux/hw_breakpoint.h>
  39. #include <linux/regset.h>
  40. #include <linux/tracehook.h>
  41. #include <linux/elf.h>
  42. #include <asm/compat.h>
  43. #include <asm/cpufeature.h>
  44. #include <asm/debug-monitors.h>
  45. #include <asm/fpsimd.h>
  46. #include <asm/pgtable.h>
  47. #include <asm/stacktrace.h>
  48. #include <asm/syscall.h>
  49. #include <asm/traps.h>
  50. #include <asm/system_misc.h>
  51. #define CREATE_TRACE_POINTS
  52. #include <trace/events/syscalls.h>
  53. struct pt_regs_offset {
  54. const char *name;
  55. int offset;
  56. };
  57. #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
  58. #define REG_OFFSET_END {.name = NULL, .offset = 0}
  59. #define GPR_OFFSET_NAME(r) \
  60. {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
  61. static const struct pt_regs_offset regoffset_table[] = {
  62. GPR_OFFSET_NAME(0),
  63. GPR_OFFSET_NAME(1),
  64. GPR_OFFSET_NAME(2),
  65. GPR_OFFSET_NAME(3),
  66. GPR_OFFSET_NAME(4),
  67. GPR_OFFSET_NAME(5),
  68. GPR_OFFSET_NAME(6),
  69. GPR_OFFSET_NAME(7),
  70. GPR_OFFSET_NAME(8),
  71. GPR_OFFSET_NAME(9),
  72. GPR_OFFSET_NAME(10),
  73. GPR_OFFSET_NAME(11),
  74. GPR_OFFSET_NAME(12),
  75. GPR_OFFSET_NAME(13),
  76. GPR_OFFSET_NAME(14),
  77. GPR_OFFSET_NAME(15),
  78. GPR_OFFSET_NAME(16),
  79. GPR_OFFSET_NAME(17),
  80. GPR_OFFSET_NAME(18),
  81. GPR_OFFSET_NAME(19),
  82. GPR_OFFSET_NAME(20),
  83. GPR_OFFSET_NAME(21),
  84. GPR_OFFSET_NAME(22),
  85. GPR_OFFSET_NAME(23),
  86. GPR_OFFSET_NAME(24),
  87. GPR_OFFSET_NAME(25),
  88. GPR_OFFSET_NAME(26),
  89. GPR_OFFSET_NAME(27),
  90. GPR_OFFSET_NAME(28),
  91. GPR_OFFSET_NAME(29),
  92. GPR_OFFSET_NAME(30),
  93. {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
  94. REG_OFFSET_NAME(sp),
  95. REG_OFFSET_NAME(pc),
  96. REG_OFFSET_NAME(pstate),
  97. REG_OFFSET_END,
  98. };
  99. /**
  100. * regs_query_register_offset() - query register offset from its name
  101. * @name: the name of a register
  102. *
  103. * regs_query_register_offset() returns the offset of a register in struct
  104. * pt_regs from its name. If the name is invalid, this returns -EINVAL;
  105. */
  106. int regs_query_register_offset(const char *name)
  107. {
  108. const struct pt_regs_offset *roff;
  109. for (roff = regoffset_table; roff->name != NULL; roff++)
  110. if (!strcmp(roff->name, name))
  111. return roff->offset;
  112. return -EINVAL;
  113. }
  114. /**
  115. * regs_within_kernel_stack() - check the address in the stack
  116. * @regs: pt_regs which contains kernel stack pointer.
  117. * @addr: address which is checked.
  118. *
  119. * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
  120. * If @addr is within the kernel stack, it returns true. If not, returns false.
  121. */
  122. static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
  123. {
  124. return ((addr & ~(THREAD_SIZE - 1)) ==
  125. (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
  126. on_irq_stack(addr, NULL);
  127. }
  128. /**
  129. * regs_get_kernel_stack_nth() - get Nth entry of the stack
  130. * @regs: pt_regs which contains kernel stack pointer.
  131. * @n: stack entry number.
  132. *
  133. * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
  134. * is specified by @regs. If the @n th entry is NOT in the kernel stack,
  135. * this returns 0.
  136. */
  137. unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
  138. {
  139. unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
  140. addr += n;
  141. if (regs_within_kernel_stack(regs, (unsigned long)addr))
  142. return *addr;
  143. else
  144. return 0;
  145. }
  146. /*
  147. * TODO: does not yet catch signals sent when the child dies.
  148. * in exit.c or in signal.c.
  149. */
  150. /*
  151. * Called by kernel/ptrace.c when detaching..
  152. */
  153. void ptrace_disable(struct task_struct *child)
  154. {
  155. /*
  156. * This would be better off in core code, but PTRACE_DETACH has
  157. * grown its fair share of arch-specific worts and changing it
  158. * is likely to cause regressions on obscure architectures.
  159. */
  160. user_disable_single_step(child);
  161. }
  162. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  163. /*
  164. * Handle hitting a HW-breakpoint.
  165. */
  166. static void ptrace_hbptriggered(struct perf_event *bp,
  167. struct perf_sample_data *data,
  168. struct pt_regs *regs)
  169. {
  170. struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
  171. const char *desc = "Hardware breakpoint trap (ptrace)";
  172. #ifdef CONFIG_COMPAT
  173. if (is_compat_task()) {
  174. int si_errno = 0;
  175. int i;
  176. for (i = 0; i < ARM_MAX_BRP; ++i) {
  177. if (current->thread.debug.hbp_break[i] == bp) {
  178. si_errno = (i << 1) + 1;
  179. break;
  180. }
  181. }
  182. for (i = 0; i < ARM_MAX_WRP; ++i) {
  183. if (current->thread.debug.hbp_watch[i] == bp) {
  184. si_errno = -((i << 1) + 1);
  185. break;
  186. }
  187. }
  188. arm64_force_sig_ptrace_errno_trap(si_errno,
  189. (void __user *)bkpt->trigger,
  190. desc);
  191. }
  192. #endif
  193. arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT,
  194. (void __user *)(bkpt->trigger),
  195. desc);
  196. }
  197. /*
  198. * Unregister breakpoints from this task and reset the pointers in
  199. * the thread_struct.
  200. */
  201. void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
  202. {
  203. int i;
  204. struct thread_struct *t = &tsk->thread;
  205. for (i = 0; i < ARM_MAX_BRP; i++) {
  206. if (t->debug.hbp_break[i]) {
  207. unregister_hw_breakpoint(t->debug.hbp_break[i]);
  208. t->debug.hbp_break[i] = NULL;
  209. }
  210. }
  211. for (i = 0; i < ARM_MAX_WRP; i++) {
  212. if (t->debug.hbp_watch[i]) {
  213. unregister_hw_breakpoint(t->debug.hbp_watch[i]);
  214. t->debug.hbp_watch[i] = NULL;
  215. }
  216. }
  217. }
  218. void ptrace_hw_copy_thread(struct task_struct *tsk)
  219. {
  220. memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
  221. }
  222. static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
  223. struct task_struct *tsk,
  224. unsigned long idx)
  225. {
  226. struct perf_event *bp = ERR_PTR(-EINVAL);
  227. switch (note_type) {
  228. case NT_ARM_HW_BREAK:
  229. if (idx >= ARM_MAX_BRP)
  230. goto out;
  231. idx = array_index_nospec(idx, ARM_MAX_BRP);
  232. bp = tsk->thread.debug.hbp_break[idx];
  233. break;
  234. case NT_ARM_HW_WATCH:
  235. if (idx >= ARM_MAX_WRP)
  236. goto out;
  237. idx = array_index_nospec(idx, ARM_MAX_WRP);
  238. bp = tsk->thread.debug.hbp_watch[idx];
  239. break;
  240. }
  241. out:
  242. return bp;
  243. }
  244. static int ptrace_hbp_set_event(unsigned int note_type,
  245. struct task_struct *tsk,
  246. unsigned long idx,
  247. struct perf_event *bp)
  248. {
  249. int err = -EINVAL;
  250. switch (note_type) {
  251. case NT_ARM_HW_BREAK:
  252. if (idx >= ARM_MAX_BRP)
  253. goto out;
  254. idx = array_index_nospec(idx, ARM_MAX_BRP);
  255. tsk->thread.debug.hbp_break[idx] = bp;
  256. err = 0;
  257. break;
  258. case NT_ARM_HW_WATCH:
  259. if (idx >= ARM_MAX_WRP)
  260. goto out;
  261. idx = array_index_nospec(idx, ARM_MAX_WRP);
  262. tsk->thread.debug.hbp_watch[idx] = bp;
  263. err = 0;
  264. break;
  265. }
  266. out:
  267. return err;
  268. }
  269. static struct perf_event *ptrace_hbp_create(unsigned int note_type,
  270. struct task_struct *tsk,
  271. unsigned long idx)
  272. {
  273. struct perf_event *bp;
  274. struct perf_event_attr attr;
  275. int err, type;
  276. switch (note_type) {
  277. case NT_ARM_HW_BREAK:
  278. type = HW_BREAKPOINT_X;
  279. break;
  280. case NT_ARM_HW_WATCH:
  281. type = HW_BREAKPOINT_RW;
  282. break;
  283. default:
  284. return ERR_PTR(-EINVAL);
  285. }
  286. ptrace_breakpoint_init(&attr);
  287. /*
  288. * Initialise fields to sane defaults
  289. * (i.e. values that will pass validation).
  290. */
  291. attr.bp_addr = 0;
  292. attr.bp_len = HW_BREAKPOINT_LEN_4;
  293. attr.bp_type = type;
  294. attr.disabled = 1;
  295. bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
  296. if (IS_ERR(bp))
  297. return bp;
  298. err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
  299. if (err)
  300. return ERR_PTR(err);
  301. return bp;
  302. }
  303. static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
  304. struct arch_hw_breakpoint_ctrl ctrl,
  305. struct perf_event_attr *attr)
  306. {
  307. int err, len, type, offset, disabled = !ctrl.enabled;
  308. attr->disabled = disabled;
  309. if (disabled)
  310. return 0;
  311. err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
  312. if (err)
  313. return err;
  314. switch (note_type) {
  315. case NT_ARM_HW_BREAK:
  316. if ((type & HW_BREAKPOINT_X) != type)
  317. return -EINVAL;
  318. break;
  319. case NT_ARM_HW_WATCH:
  320. if ((type & HW_BREAKPOINT_RW) != type)
  321. return -EINVAL;
  322. break;
  323. default:
  324. return -EINVAL;
  325. }
  326. attr->bp_len = len;
  327. attr->bp_type = type;
  328. attr->bp_addr += offset;
  329. return 0;
  330. }
  331. static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
  332. {
  333. u8 num;
  334. u32 reg = 0;
  335. switch (note_type) {
  336. case NT_ARM_HW_BREAK:
  337. num = hw_breakpoint_slots(TYPE_INST);
  338. break;
  339. case NT_ARM_HW_WATCH:
  340. num = hw_breakpoint_slots(TYPE_DATA);
  341. break;
  342. default:
  343. return -EINVAL;
  344. }
  345. reg |= debug_monitors_arch();
  346. reg <<= 8;
  347. reg |= num;
  348. *info = reg;
  349. return 0;
  350. }
  351. static int ptrace_hbp_get_ctrl(unsigned int note_type,
  352. struct task_struct *tsk,
  353. unsigned long idx,
  354. u32 *ctrl)
  355. {
  356. struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
  357. if (IS_ERR(bp))
  358. return PTR_ERR(bp);
  359. *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
  360. return 0;
  361. }
  362. static int ptrace_hbp_get_addr(unsigned int note_type,
  363. struct task_struct *tsk,
  364. unsigned long idx,
  365. u64 *addr)
  366. {
  367. struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
  368. if (IS_ERR(bp))
  369. return PTR_ERR(bp);
  370. *addr = bp ? counter_arch_bp(bp)->address : 0;
  371. return 0;
  372. }
  373. static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
  374. struct task_struct *tsk,
  375. unsigned long idx)
  376. {
  377. struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
  378. if (!bp)
  379. bp = ptrace_hbp_create(note_type, tsk, idx);
  380. return bp;
  381. }
  382. static int ptrace_hbp_set_ctrl(unsigned int note_type,
  383. struct task_struct *tsk,
  384. unsigned long idx,
  385. u32 uctrl)
  386. {
  387. int err;
  388. struct perf_event *bp;
  389. struct perf_event_attr attr;
  390. struct arch_hw_breakpoint_ctrl ctrl;
  391. bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
  392. if (IS_ERR(bp)) {
  393. err = PTR_ERR(bp);
  394. return err;
  395. }
  396. attr = bp->attr;
  397. decode_ctrl_reg(uctrl, &ctrl);
  398. err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
  399. if (err)
  400. return err;
  401. return modify_user_hw_breakpoint(bp, &attr);
  402. }
  403. static int ptrace_hbp_set_addr(unsigned int note_type,
  404. struct task_struct *tsk,
  405. unsigned long idx,
  406. u64 addr)
  407. {
  408. int err;
  409. struct perf_event *bp;
  410. struct perf_event_attr attr;
  411. bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
  412. if (IS_ERR(bp)) {
  413. err = PTR_ERR(bp);
  414. return err;
  415. }
  416. attr = bp->attr;
  417. attr.bp_addr = addr;
  418. err = modify_user_hw_breakpoint(bp, &attr);
  419. return err;
  420. }
  421. #define PTRACE_HBP_ADDR_SZ sizeof(u64)
  422. #define PTRACE_HBP_CTRL_SZ sizeof(u32)
  423. #define PTRACE_HBP_PAD_SZ sizeof(u32)
  424. static int hw_break_get(struct task_struct *target,
  425. const struct user_regset *regset,
  426. unsigned int pos, unsigned int count,
  427. void *kbuf, void __user *ubuf)
  428. {
  429. unsigned int note_type = regset->core_note_type;
  430. int ret, idx = 0, offset, limit;
  431. u32 info, ctrl;
  432. u64 addr;
  433. /* Resource info */
  434. ret = ptrace_hbp_get_resource_info(note_type, &info);
  435. if (ret)
  436. return ret;
  437. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0,
  438. sizeof(info));
  439. if (ret)
  440. return ret;
  441. /* Pad */
  442. offset = offsetof(struct user_hwdebug_state, pad);
  443. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset,
  444. offset + PTRACE_HBP_PAD_SZ);
  445. if (ret)
  446. return ret;
  447. /* (address, ctrl) registers */
  448. offset = offsetof(struct user_hwdebug_state, dbg_regs);
  449. limit = regset->n * regset->size;
  450. while (count && offset < limit) {
  451. ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
  452. if (ret)
  453. return ret;
  454. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr,
  455. offset, offset + PTRACE_HBP_ADDR_SZ);
  456. if (ret)
  457. return ret;
  458. offset += PTRACE_HBP_ADDR_SZ;
  459. ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
  460. if (ret)
  461. return ret;
  462. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl,
  463. offset, offset + PTRACE_HBP_CTRL_SZ);
  464. if (ret)
  465. return ret;
  466. offset += PTRACE_HBP_CTRL_SZ;
  467. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  468. offset,
  469. offset + PTRACE_HBP_PAD_SZ);
  470. if (ret)
  471. return ret;
  472. offset += PTRACE_HBP_PAD_SZ;
  473. idx++;
  474. }
  475. return 0;
  476. }
  477. static int hw_break_set(struct task_struct *target,
  478. const struct user_regset *regset,
  479. unsigned int pos, unsigned int count,
  480. const void *kbuf, const void __user *ubuf)
  481. {
  482. unsigned int note_type = regset->core_note_type;
  483. int ret, idx = 0, offset, limit;
  484. u32 ctrl;
  485. u64 addr;
  486. /* Resource info and pad */
  487. offset = offsetof(struct user_hwdebug_state, dbg_regs);
  488. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
  489. if (ret)
  490. return ret;
  491. /* (address, ctrl) registers */
  492. limit = regset->n * regset->size;
  493. while (count && offset < limit) {
  494. if (count < PTRACE_HBP_ADDR_SZ)
  495. return -EINVAL;
  496. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
  497. offset, offset + PTRACE_HBP_ADDR_SZ);
  498. if (ret)
  499. return ret;
  500. ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
  501. if (ret)
  502. return ret;
  503. offset += PTRACE_HBP_ADDR_SZ;
  504. if (!count)
  505. break;
  506. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
  507. offset, offset + PTRACE_HBP_CTRL_SZ);
  508. if (ret)
  509. return ret;
  510. ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
  511. if (ret)
  512. return ret;
  513. offset += PTRACE_HBP_CTRL_SZ;
  514. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  515. offset,
  516. offset + PTRACE_HBP_PAD_SZ);
  517. if (ret)
  518. return ret;
  519. offset += PTRACE_HBP_PAD_SZ;
  520. idx++;
  521. }
  522. return 0;
  523. }
  524. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  525. static int gpr_get(struct task_struct *target,
  526. const struct user_regset *regset,
  527. unsigned int pos, unsigned int count,
  528. void *kbuf, void __user *ubuf)
  529. {
  530. struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
  531. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
  532. }
  533. static int gpr_set(struct task_struct *target, const struct user_regset *regset,
  534. unsigned int pos, unsigned int count,
  535. const void *kbuf, const void __user *ubuf)
  536. {
  537. int ret;
  538. struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
  539. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
  540. if (ret)
  541. return ret;
  542. if (!valid_user_regs(&newregs, target))
  543. return -EINVAL;
  544. task_pt_regs(target)->user_regs = newregs;
  545. return 0;
  546. }
  547. /*
  548. * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
  549. */
  550. static int __fpr_get(struct task_struct *target,
  551. const struct user_regset *regset,
  552. unsigned int pos, unsigned int count,
  553. void *kbuf, void __user *ubuf, unsigned int start_pos)
  554. {
  555. struct user_fpsimd_state *uregs;
  556. sve_sync_to_fpsimd(target);
  557. uregs = &target->thread.uw.fpsimd_state;
  558. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,
  559. start_pos, start_pos + sizeof(*uregs));
  560. }
  561. static int fpr_get(struct task_struct *target, const struct user_regset *regset,
  562. unsigned int pos, unsigned int count,
  563. void *kbuf, void __user *ubuf)
  564. {
  565. if (target == current)
  566. fpsimd_preserve_current_state();
  567. return __fpr_get(target, regset, pos, count, kbuf, ubuf, 0);
  568. }
  569. static int __fpr_set(struct task_struct *target,
  570. const struct user_regset *regset,
  571. unsigned int pos, unsigned int count,
  572. const void *kbuf, const void __user *ubuf,
  573. unsigned int start_pos)
  574. {
  575. int ret;
  576. struct user_fpsimd_state newstate;
  577. /*
  578. * Ensure target->thread.uw.fpsimd_state is up to date, so that a
  579. * short copyin can't resurrect stale data.
  580. */
  581. sve_sync_to_fpsimd(target);
  582. newstate = target->thread.uw.fpsimd_state;
  583. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
  584. start_pos, start_pos + sizeof(newstate));
  585. if (ret)
  586. return ret;
  587. target->thread.uw.fpsimd_state = newstate;
  588. return ret;
  589. }
  590. static int fpr_set(struct task_struct *target, const struct user_regset *regset,
  591. unsigned int pos, unsigned int count,
  592. const void *kbuf, const void __user *ubuf)
  593. {
  594. int ret;
  595. ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
  596. if (ret)
  597. return ret;
  598. sve_sync_from_fpsimd_zeropad(target);
  599. fpsimd_flush_task_state(target);
  600. return ret;
  601. }
  602. static int tls_get(struct task_struct *target, const struct user_regset *regset,
  603. unsigned int pos, unsigned int count,
  604. void *kbuf, void __user *ubuf)
  605. {
  606. unsigned long *tls = &target->thread.uw.tp_value;
  607. if (target == current)
  608. tls_preserve_current_state();
  609. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1);
  610. }
  611. static int tls_set(struct task_struct *target, const struct user_regset *regset,
  612. unsigned int pos, unsigned int count,
  613. const void *kbuf, const void __user *ubuf)
  614. {
  615. int ret;
  616. unsigned long tls = target->thread.uw.tp_value;
  617. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
  618. if (ret)
  619. return ret;
  620. target->thread.uw.tp_value = tls;
  621. return ret;
  622. }
  623. static int system_call_get(struct task_struct *target,
  624. const struct user_regset *regset,
  625. unsigned int pos, unsigned int count,
  626. void *kbuf, void __user *ubuf)
  627. {
  628. int syscallno = task_pt_regs(target)->syscallno;
  629. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  630. &syscallno, 0, -1);
  631. }
  632. static int system_call_set(struct task_struct *target,
  633. const struct user_regset *regset,
  634. unsigned int pos, unsigned int count,
  635. const void *kbuf, const void __user *ubuf)
  636. {
  637. int syscallno = task_pt_regs(target)->syscallno;
  638. int ret;
  639. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
  640. if (ret)
  641. return ret;
  642. task_pt_regs(target)->syscallno = syscallno;
  643. return ret;
  644. }
  645. #ifdef CONFIG_ARM64_SVE
  646. static void sve_init_header_from_task(struct user_sve_header *header,
  647. struct task_struct *target)
  648. {
  649. unsigned int vq;
  650. memset(header, 0, sizeof(*header));
  651. header->flags = test_tsk_thread_flag(target, TIF_SVE) ?
  652. SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD;
  653. if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
  654. header->flags |= SVE_PT_VL_INHERIT;
  655. header->vl = target->thread.sve_vl;
  656. vq = sve_vq_from_vl(header->vl);
  657. header->max_vl = sve_max_vl;
  658. header->size = SVE_PT_SIZE(vq, header->flags);
  659. header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
  660. SVE_PT_REGS_SVE);
  661. }
  662. static unsigned int sve_size_from_header(struct user_sve_header const *header)
  663. {
  664. return ALIGN(header->size, SVE_VQ_BYTES);
  665. }
  666. static unsigned int sve_get_size(struct task_struct *target,
  667. const struct user_regset *regset)
  668. {
  669. struct user_sve_header header;
  670. if (!system_supports_sve())
  671. return 0;
  672. sve_init_header_from_task(&header, target);
  673. return sve_size_from_header(&header);
  674. }
  675. static int sve_get(struct task_struct *target,
  676. const struct user_regset *regset,
  677. unsigned int pos, unsigned int count,
  678. void *kbuf, void __user *ubuf)
  679. {
  680. int ret;
  681. struct user_sve_header header;
  682. unsigned int vq;
  683. unsigned long start, end;
  684. if (!system_supports_sve())
  685. return -EINVAL;
  686. /* Header */
  687. sve_init_header_from_task(&header, target);
  688. vq = sve_vq_from_vl(header.vl);
  689. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &header,
  690. 0, sizeof(header));
  691. if (ret)
  692. return ret;
  693. if (target == current)
  694. fpsimd_preserve_current_state();
  695. /* Registers: FPSIMD-only case */
  696. BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
  697. if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD)
  698. return __fpr_get(target, regset, pos, count, kbuf, ubuf,
  699. SVE_PT_FPSIMD_OFFSET);
  700. /* Otherwise: full SVE case */
  701. BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
  702. start = SVE_PT_SVE_OFFSET;
  703. end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
  704. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  705. target->thread.sve_state,
  706. start, end);
  707. if (ret)
  708. return ret;
  709. start = end;
  710. end = SVE_PT_SVE_FPSR_OFFSET(vq);
  711. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  712. start, end);
  713. if (ret)
  714. return ret;
  715. /*
  716. * Copy fpsr, and fpcr which must follow contiguously in
  717. * struct fpsimd_state:
  718. */
  719. start = end;
  720. end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
  721. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  722. &target->thread.uw.fpsimd_state.fpsr,
  723. start, end);
  724. if (ret)
  725. return ret;
  726. start = end;
  727. end = sve_size_from_header(&header);
  728. return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  729. start, end);
  730. }
  731. static int sve_set(struct task_struct *target,
  732. const struct user_regset *regset,
  733. unsigned int pos, unsigned int count,
  734. const void *kbuf, const void __user *ubuf)
  735. {
  736. int ret;
  737. struct user_sve_header header;
  738. unsigned int vq;
  739. unsigned long start, end;
  740. if (!system_supports_sve())
  741. return -EINVAL;
  742. /* Header */
  743. if (count < sizeof(header))
  744. return -EINVAL;
  745. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
  746. 0, sizeof(header));
  747. if (ret)
  748. goto out;
  749. /*
  750. * Apart from PT_SVE_REGS_MASK, all PT_SVE_* flags are consumed by
  751. * sve_set_vector_length(), which will also validate them for us:
  752. */
  753. ret = sve_set_vector_length(target, header.vl,
  754. ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
  755. if (ret)
  756. goto out;
  757. /* Actual VL set may be less than the user asked for: */
  758. vq = sve_vq_from_vl(target->thread.sve_vl);
  759. /* Registers: FPSIMD-only case */
  760. BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
  761. if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) {
  762. ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
  763. SVE_PT_FPSIMD_OFFSET);
  764. clear_tsk_thread_flag(target, TIF_SVE);
  765. goto out;
  766. }
  767. /* Otherwise: full SVE case */
  768. /*
  769. * If setting a different VL from the requested VL and there is
  770. * register data, the data layout will be wrong: don't even
  771. * try to set the registers in this case.
  772. */
  773. if (count && vq != sve_vq_from_vl(header.vl)) {
  774. ret = -EIO;
  775. goto out;
  776. }
  777. sve_alloc(target);
  778. /*
  779. * Ensure target->thread.sve_state is up to date with target's
  780. * FPSIMD regs, so that a short copyin leaves trailing registers
  781. * unmodified.
  782. */
  783. fpsimd_sync_to_sve(target);
  784. set_tsk_thread_flag(target, TIF_SVE);
  785. BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
  786. start = SVE_PT_SVE_OFFSET;
  787. end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
  788. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  789. target->thread.sve_state,
  790. start, end);
  791. if (ret)
  792. goto out;
  793. start = end;
  794. end = SVE_PT_SVE_FPSR_OFFSET(vq);
  795. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  796. start, end);
  797. if (ret)
  798. goto out;
  799. /*
  800. * Copy fpsr, and fpcr which must follow contiguously in
  801. * struct fpsimd_state:
  802. */
  803. start = end;
  804. end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
  805. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  806. &target->thread.uw.fpsimd_state.fpsr,
  807. start, end);
  808. out:
  809. fpsimd_flush_task_state(target);
  810. return ret;
  811. }
  812. #endif /* CONFIG_ARM64_SVE */
  813. enum aarch64_regset {
  814. REGSET_GPR,
  815. REGSET_FPR,
  816. REGSET_TLS,
  817. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  818. REGSET_HW_BREAK,
  819. REGSET_HW_WATCH,
  820. #endif
  821. REGSET_SYSTEM_CALL,
  822. #ifdef CONFIG_ARM64_SVE
  823. REGSET_SVE,
  824. #endif
  825. };
  826. static const struct user_regset aarch64_regsets[] = {
  827. [REGSET_GPR] = {
  828. .core_note_type = NT_PRSTATUS,
  829. .n = sizeof(struct user_pt_regs) / sizeof(u64),
  830. .size = sizeof(u64),
  831. .align = sizeof(u64),
  832. .get = gpr_get,
  833. .set = gpr_set
  834. },
  835. [REGSET_FPR] = {
  836. .core_note_type = NT_PRFPREG,
  837. .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
  838. /*
  839. * We pretend we have 32-bit registers because the fpsr and
  840. * fpcr are 32-bits wide.
  841. */
  842. .size = sizeof(u32),
  843. .align = sizeof(u32),
  844. .get = fpr_get,
  845. .set = fpr_set
  846. },
  847. [REGSET_TLS] = {
  848. .core_note_type = NT_ARM_TLS,
  849. .n = 1,
  850. .size = sizeof(void *),
  851. .align = sizeof(void *),
  852. .get = tls_get,
  853. .set = tls_set,
  854. },
  855. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  856. [REGSET_HW_BREAK] = {
  857. .core_note_type = NT_ARM_HW_BREAK,
  858. .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
  859. .size = sizeof(u32),
  860. .align = sizeof(u32),
  861. .get = hw_break_get,
  862. .set = hw_break_set,
  863. },
  864. [REGSET_HW_WATCH] = {
  865. .core_note_type = NT_ARM_HW_WATCH,
  866. .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
  867. .size = sizeof(u32),
  868. .align = sizeof(u32),
  869. .get = hw_break_get,
  870. .set = hw_break_set,
  871. },
  872. #endif
  873. [REGSET_SYSTEM_CALL] = {
  874. .core_note_type = NT_ARM_SYSTEM_CALL,
  875. .n = 1,
  876. .size = sizeof(int),
  877. .align = sizeof(int),
  878. .get = system_call_get,
  879. .set = system_call_set,
  880. },
  881. #ifdef CONFIG_ARM64_SVE
  882. [REGSET_SVE] = { /* Scalable Vector Extension */
  883. .core_note_type = NT_ARM_SVE,
  884. .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),
  885. SVE_VQ_BYTES),
  886. .size = SVE_VQ_BYTES,
  887. .align = SVE_VQ_BYTES,
  888. .get = sve_get,
  889. .set = sve_set,
  890. .get_size = sve_get_size,
  891. },
  892. #endif
  893. };
  894. static const struct user_regset_view user_aarch64_view = {
  895. .name = "aarch64", .e_machine = EM_AARCH64,
  896. .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
  897. };
  898. #ifdef CONFIG_COMPAT
  899. enum compat_regset {
  900. REGSET_COMPAT_GPR,
  901. REGSET_COMPAT_VFP,
  902. };
  903. static int compat_gpr_get(struct task_struct *target,
  904. const struct user_regset *regset,
  905. unsigned int pos, unsigned int count,
  906. void *kbuf, void __user *ubuf)
  907. {
  908. int ret = 0;
  909. unsigned int i, start, num_regs;
  910. /* Calculate the number of AArch32 registers contained in count */
  911. num_regs = count / regset->size;
  912. /* Convert pos into an register number */
  913. start = pos / regset->size;
  914. if (start + num_regs > regset->n)
  915. return -EIO;
  916. for (i = 0; i < num_regs; ++i) {
  917. unsigned int idx = start + i;
  918. compat_ulong_t reg;
  919. switch (idx) {
  920. case 15:
  921. reg = task_pt_regs(target)->pc;
  922. break;
  923. case 16:
  924. reg = task_pt_regs(target)->pstate;
  925. reg = pstate_to_compat_psr(reg);
  926. break;
  927. case 17:
  928. reg = task_pt_regs(target)->orig_x0;
  929. break;
  930. default:
  931. reg = task_pt_regs(target)->regs[idx];
  932. }
  933. if (kbuf) {
  934. memcpy(kbuf, &reg, sizeof(reg));
  935. kbuf += sizeof(reg);
  936. } else {
  937. ret = copy_to_user(ubuf, &reg, sizeof(reg));
  938. if (ret) {
  939. ret = -EFAULT;
  940. break;
  941. }
  942. ubuf += sizeof(reg);
  943. }
  944. }
  945. return ret;
  946. }
  947. static int compat_gpr_set(struct task_struct *target,
  948. const struct user_regset *regset,
  949. unsigned int pos, unsigned int count,
  950. const void *kbuf, const void __user *ubuf)
  951. {
  952. struct pt_regs newregs;
  953. int ret = 0;
  954. unsigned int i, start, num_regs;
  955. /* Calculate the number of AArch32 registers contained in count */
  956. num_regs = count / regset->size;
  957. /* Convert pos into an register number */
  958. start = pos / regset->size;
  959. if (start + num_regs > regset->n)
  960. return -EIO;
  961. newregs = *task_pt_regs(target);
  962. for (i = 0; i < num_regs; ++i) {
  963. unsigned int idx = start + i;
  964. compat_ulong_t reg;
  965. if (kbuf) {
  966. memcpy(&reg, kbuf, sizeof(reg));
  967. kbuf += sizeof(reg);
  968. } else {
  969. ret = copy_from_user(&reg, ubuf, sizeof(reg));
  970. if (ret) {
  971. ret = -EFAULT;
  972. break;
  973. }
  974. ubuf += sizeof(reg);
  975. }
  976. switch (idx) {
  977. case 15:
  978. newregs.pc = reg;
  979. break;
  980. case 16:
  981. reg = compat_psr_to_pstate(reg);
  982. newregs.pstate = reg;
  983. break;
  984. case 17:
  985. newregs.orig_x0 = reg;
  986. break;
  987. default:
  988. newregs.regs[idx] = reg;
  989. }
  990. }
  991. if (valid_user_regs(&newregs.user_regs, target))
  992. *task_pt_regs(target) = newregs;
  993. else
  994. ret = -EINVAL;
  995. return ret;
  996. }
  997. static int compat_vfp_get(struct task_struct *target,
  998. const struct user_regset *regset,
  999. unsigned int pos, unsigned int count,
  1000. void *kbuf, void __user *ubuf)
  1001. {
  1002. struct user_fpsimd_state *uregs;
  1003. compat_ulong_t fpscr;
  1004. int ret, vregs_end_pos;
  1005. uregs = &target->thread.uw.fpsimd_state;
  1006. if (target == current)
  1007. fpsimd_preserve_current_state();
  1008. /*
  1009. * The VFP registers are packed into the fpsimd_state, so they all sit
  1010. * nicely together for us. We just need to create the fpscr separately.
  1011. */
  1012. vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
  1013. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,
  1014. 0, vregs_end_pos);
  1015. if (count && !ret) {
  1016. fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
  1017. (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
  1018. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fpscr,
  1019. vregs_end_pos, VFP_STATE_SIZE);
  1020. }
  1021. return ret;
  1022. }
  1023. static int compat_vfp_set(struct task_struct *target,
  1024. const struct user_regset *regset,
  1025. unsigned int pos, unsigned int count,
  1026. const void *kbuf, const void __user *ubuf)
  1027. {
  1028. struct user_fpsimd_state *uregs;
  1029. compat_ulong_t fpscr;
  1030. int ret, vregs_end_pos;
  1031. uregs = &target->thread.uw.fpsimd_state;
  1032. vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
  1033. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
  1034. vregs_end_pos);
  1035. if (count && !ret) {
  1036. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr,
  1037. vregs_end_pos, VFP_STATE_SIZE);
  1038. if (!ret) {
  1039. uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
  1040. uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
  1041. }
  1042. }
  1043. fpsimd_flush_task_state(target);
  1044. return ret;
  1045. }
  1046. static int compat_tls_get(struct task_struct *target,
  1047. const struct user_regset *regset, unsigned int pos,
  1048. unsigned int count, void *kbuf, void __user *ubuf)
  1049. {
  1050. compat_ulong_t tls = (compat_ulong_t)target->thread.uw.tp_value;
  1051. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
  1052. }
  1053. static int compat_tls_set(struct task_struct *target,
  1054. const struct user_regset *regset, unsigned int pos,
  1055. unsigned int count, const void *kbuf,
  1056. const void __user *ubuf)
  1057. {
  1058. int ret;
  1059. compat_ulong_t tls = target->thread.uw.tp_value;
  1060. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
  1061. if (ret)
  1062. return ret;
  1063. target->thread.uw.tp_value = tls;
  1064. return ret;
  1065. }
  1066. static const struct user_regset aarch32_regsets[] = {
  1067. [REGSET_COMPAT_GPR] = {
  1068. .core_note_type = NT_PRSTATUS,
  1069. .n = COMPAT_ELF_NGREG,
  1070. .size = sizeof(compat_elf_greg_t),
  1071. .align = sizeof(compat_elf_greg_t),
  1072. .get = compat_gpr_get,
  1073. .set = compat_gpr_set
  1074. },
  1075. [REGSET_COMPAT_VFP] = {
  1076. .core_note_type = NT_ARM_VFP,
  1077. .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
  1078. .size = sizeof(compat_ulong_t),
  1079. .align = sizeof(compat_ulong_t),
  1080. .get = compat_vfp_get,
  1081. .set = compat_vfp_set
  1082. },
  1083. };
  1084. static const struct user_regset_view user_aarch32_view = {
  1085. .name = "aarch32", .e_machine = EM_ARM,
  1086. .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
  1087. };
  1088. static const struct user_regset aarch32_ptrace_regsets[] = {
  1089. [REGSET_GPR] = {
  1090. .core_note_type = NT_PRSTATUS,
  1091. .n = COMPAT_ELF_NGREG,
  1092. .size = sizeof(compat_elf_greg_t),
  1093. .align = sizeof(compat_elf_greg_t),
  1094. .get = compat_gpr_get,
  1095. .set = compat_gpr_set
  1096. },
  1097. [REGSET_FPR] = {
  1098. .core_note_type = NT_ARM_VFP,
  1099. .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
  1100. .size = sizeof(compat_ulong_t),
  1101. .align = sizeof(compat_ulong_t),
  1102. .get = compat_vfp_get,
  1103. .set = compat_vfp_set
  1104. },
  1105. [REGSET_TLS] = {
  1106. .core_note_type = NT_ARM_TLS,
  1107. .n = 1,
  1108. .size = sizeof(compat_ulong_t),
  1109. .align = sizeof(compat_ulong_t),
  1110. .get = compat_tls_get,
  1111. .set = compat_tls_set,
  1112. },
  1113. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  1114. [REGSET_HW_BREAK] = {
  1115. .core_note_type = NT_ARM_HW_BREAK,
  1116. .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
  1117. .size = sizeof(u32),
  1118. .align = sizeof(u32),
  1119. .get = hw_break_get,
  1120. .set = hw_break_set,
  1121. },
  1122. [REGSET_HW_WATCH] = {
  1123. .core_note_type = NT_ARM_HW_WATCH,
  1124. .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
  1125. .size = sizeof(u32),
  1126. .align = sizeof(u32),
  1127. .get = hw_break_get,
  1128. .set = hw_break_set,
  1129. },
  1130. #endif
  1131. [REGSET_SYSTEM_CALL] = {
  1132. .core_note_type = NT_ARM_SYSTEM_CALL,
  1133. .n = 1,
  1134. .size = sizeof(int),
  1135. .align = sizeof(int),
  1136. .get = system_call_get,
  1137. .set = system_call_set,
  1138. },
  1139. };
  1140. static const struct user_regset_view user_aarch32_ptrace_view = {
  1141. .name = "aarch32", .e_machine = EM_ARM,
  1142. .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
  1143. };
  1144. static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
  1145. compat_ulong_t __user *ret)
  1146. {
  1147. compat_ulong_t tmp;
  1148. if (off & 3)
  1149. return -EIO;
  1150. if (off == COMPAT_PT_TEXT_ADDR)
  1151. tmp = tsk->mm->start_code;
  1152. else if (off == COMPAT_PT_DATA_ADDR)
  1153. tmp = tsk->mm->start_data;
  1154. else if (off == COMPAT_PT_TEXT_END_ADDR)
  1155. tmp = tsk->mm->end_code;
  1156. else if (off < sizeof(compat_elf_gregset_t))
  1157. return copy_regset_to_user(tsk, &user_aarch32_view,
  1158. REGSET_COMPAT_GPR, off,
  1159. sizeof(compat_ulong_t), ret);
  1160. else if (off >= COMPAT_USER_SZ)
  1161. return -EIO;
  1162. else
  1163. tmp = 0;
  1164. return put_user(tmp, ret);
  1165. }
  1166. static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
  1167. compat_ulong_t val)
  1168. {
  1169. int ret;
  1170. mm_segment_t old_fs = get_fs();
  1171. if (off & 3 || off >= COMPAT_USER_SZ)
  1172. return -EIO;
  1173. if (off >= sizeof(compat_elf_gregset_t))
  1174. return 0;
  1175. set_fs(KERNEL_DS);
  1176. ret = copy_regset_from_user(tsk, &user_aarch32_view,
  1177. REGSET_COMPAT_GPR, off,
  1178. sizeof(compat_ulong_t),
  1179. &val);
  1180. set_fs(old_fs);
  1181. return ret;
  1182. }
  1183. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  1184. /*
  1185. * Convert a virtual register number into an index for a thread_info
  1186. * breakpoint array. Breakpoints are identified using positive numbers
  1187. * whilst watchpoints are negative. The registers are laid out as pairs
  1188. * of (address, control), each pair mapping to a unique hw_breakpoint struct.
  1189. * Register 0 is reserved for describing resource information.
  1190. */
  1191. static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
  1192. {
  1193. return (abs(num) - 1) >> 1;
  1194. }
  1195. static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
  1196. {
  1197. u8 num_brps, num_wrps, debug_arch, wp_len;
  1198. u32 reg = 0;
  1199. num_brps = hw_breakpoint_slots(TYPE_INST);
  1200. num_wrps = hw_breakpoint_slots(TYPE_DATA);
  1201. debug_arch = debug_monitors_arch();
  1202. wp_len = 8;
  1203. reg |= debug_arch;
  1204. reg <<= 8;
  1205. reg |= wp_len;
  1206. reg <<= 8;
  1207. reg |= num_wrps;
  1208. reg <<= 8;
  1209. reg |= num_brps;
  1210. *kdata = reg;
  1211. return 0;
  1212. }
  1213. static int compat_ptrace_hbp_get(unsigned int note_type,
  1214. struct task_struct *tsk,
  1215. compat_long_t num,
  1216. u32 *kdata)
  1217. {
  1218. u64 addr = 0;
  1219. u32 ctrl = 0;
  1220. int err, idx = compat_ptrace_hbp_num_to_idx(num);
  1221. if (num & 1) {
  1222. err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
  1223. *kdata = (u32)addr;
  1224. } else {
  1225. err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
  1226. *kdata = ctrl;
  1227. }
  1228. return err;
  1229. }
  1230. static int compat_ptrace_hbp_set(unsigned int note_type,
  1231. struct task_struct *tsk,
  1232. compat_long_t num,
  1233. u32 *kdata)
  1234. {
  1235. u64 addr;
  1236. u32 ctrl;
  1237. int err, idx = compat_ptrace_hbp_num_to_idx(num);
  1238. if (num & 1) {
  1239. addr = *kdata;
  1240. err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
  1241. } else {
  1242. ctrl = *kdata;
  1243. err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
  1244. }
  1245. return err;
  1246. }
  1247. static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
  1248. compat_ulong_t __user *data)
  1249. {
  1250. int ret;
  1251. u32 kdata;
  1252. /* Watchpoint */
  1253. if (num < 0) {
  1254. ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
  1255. /* Resource info */
  1256. } else if (num == 0) {
  1257. ret = compat_ptrace_hbp_get_resource_info(&kdata);
  1258. /* Breakpoint */
  1259. } else {
  1260. ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
  1261. }
  1262. if (!ret)
  1263. ret = put_user(kdata, data);
  1264. return ret;
  1265. }
  1266. static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
  1267. compat_ulong_t __user *data)
  1268. {
  1269. int ret;
  1270. u32 kdata = 0;
  1271. if (num == 0)
  1272. return 0;
  1273. ret = get_user(kdata, data);
  1274. if (ret)
  1275. return ret;
  1276. if (num < 0)
  1277. ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
  1278. else
  1279. ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
  1280. return ret;
  1281. }
  1282. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  1283. long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
  1284. compat_ulong_t caddr, compat_ulong_t cdata)
  1285. {
  1286. unsigned long addr = caddr;
  1287. unsigned long data = cdata;
  1288. void __user *datap = compat_ptr(data);
  1289. int ret;
  1290. switch (request) {
  1291. case PTRACE_PEEKUSR:
  1292. ret = compat_ptrace_read_user(child, addr, datap);
  1293. break;
  1294. case PTRACE_POKEUSR:
  1295. ret = compat_ptrace_write_user(child, addr, data);
  1296. break;
  1297. case COMPAT_PTRACE_GETREGS:
  1298. ret = copy_regset_to_user(child,
  1299. &user_aarch32_view,
  1300. REGSET_COMPAT_GPR,
  1301. 0, sizeof(compat_elf_gregset_t),
  1302. datap);
  1303. break;
  1304. case COMPAT_PTRACE_SETREGS:
  1305. ret = copy_regset_from_user(child,
  1306. &user_aarch32_view,
  1307. REGSET_COMPAT_GPR,
  1308. 0, sizeof(compat_elf_gregset_t),
  1309. datap);
  1310. break;
  1311. case COMPAT_PTRACE_GET_THREAD_AREA:
  1312. ret = put_user((compat_ulong_t)child->thread.uw.tp_value,
  1313. (compat_ulong_t __user *)datap);
  1314. break;
  1315. case COMPAT_PTRACE_SET_SYSCALL:
  1316. task_pt_regs(child)->syscallno = data;
  1317. ret = 0;
  1318. break;
  1319. case COMPAT_PTRACE_GETVFPREGS:
  1320. ret = copy_regset_to_user(child,
  1321. &user_aarch32_view,
  1322. REGSET_COMPAT_VFP,
  1323. 0, VFP_STATE_SIZE,
  1324. datap);
  1325. break;
  1326. case COMPAT_PTRACE_SETVFPREGS:
  1327. ret = copy_regset_from_user(child,
  1328. &user_aarch32_view,
  1329. REGSET_COMPAT_VFP,
  1330. 0, VFP_STATE_SIZE,
  1331. datap);
  1332. break;
  1333. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  1334. case COMPAT_PTRACE_GETHBPREGS:
  1335. ret = compat_ptrace_gethbpregs(child, addr, datap);
  1336. break;
  1337. case COMPAT_PTRACE_SETHBPREGS:
  1338. ret = compat_ptrace_sethbpregs(child, addr, datap);
  1339. break;
  1340. #endif
  1341. default:
  1342. ret = compat_ptrace_request(child, request, addr,
  1343. data);
  1344. break;
  1345. }
  1346. return ret;
  1347. }
  1348. #endif /* CONFIG_COMPAT */
  1349. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  1350. {
  1351. #ifdef CONFIG_COMPAT
  1352. /*
  1353. * Core dumping of 32-bit tasks or compat ptrace requests must use the
  1354. * user_aarch32_view compatible with arm32. Native ptrace requests on
  1355. * 32-bit children use an extended user_aarch32_ptrace_view to allow
  1356. * access to the TLS register.
  1357. */
  1358. if (is_compat_task())
  1359. return &user_aarch32_view;
  1360. else if (is_compat_thread(task_thread_info(task)))
  1361. return &user_aarch32_ptrace_view;
  1362. #endif
  1363. return &user_aarch64_view;
  1364. }
  1365. long arch_ptrace(struct task_struct *child, long request,
  1366. unsigned long addr, unsigned long data)
  1367. {
  1368. return ptrace_request(child, request, addr, data);
  1369. }
  1370. enum ptrace_syscall_dir {
  1371. PTRACE_SYSCALL_ENTER = 0,
  1372. PTRACE_SYSCALL_EXIT,
  1373. };
  1374. static void tracehook_report_syscall(struct pt_regs *regs,
  1375. enum ptrace_syscall_dir dir)
  1376. {
  1377. int regno;
  1378. unsigned long saved_reg;
  1379. /*
  1380. * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
  1381. * used to denote syscall entry/exit:
  1382. */
  1383. regno = (is_compat_task() ? 12 : 7);
  1384. saved_reg = regs->regs[regno];
  1385. regs->regs[regno] = dir;
  1386. if (dir == PTRACE_SYSCALL_EXIT)
  1387. tracehook_report_syscall_exit(regs, 0);
  1388. else if (tracehook_report_syscall_entry(regs))
  1389. forget_syscall(regs);
  1390. regs->regs[regno] = saved_reg;
  1391. }
  1392. int syscall_trace_enter(struct pt_regs *regs)
  1393. {
  1394. if (test_thread_flag(TIF_SYSCALL_TRACE))
  1395. tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
  1396. /* Do the secure computing after ptrace; failures should be fast. */
  1397. if (secure_computing(NULL) == -1)
  1398. return -1;
  1399. if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
  1400. trace_sys_enter(regs, regs->syscallno);
  1401. audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
  1402. regs->regs[2], regs->regs[3]);
  1403. return regs->syscallno;
  1404. }
  1405. void syscall_trace_exit(struct pt_regs *regs)
  1406. {
  1407. audit_syscall_exit(regs);
  1408. if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
  1409. trace_sys_exit(regs, regs_return_value(regs));
  1410. if (test_thread_flag(TIF_SYSCALL_TRACE))
  1411. tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
  1412. rseq_syscall(regs);
  1413. }
  1414. /*
  1415. * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a
  1416. * We also take into account DIT (bit 24), which is not yet documented, and
  1417. * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be
  1418. * allocated an EL0 meaning in future.
  1419. * Userspace cannot use these until they have an architectural meaning.
  1420. * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
  1421. * We also reserve IL for the kernel; SS is handled dynamically.
  1422. */
  1423. #define SPSR_EL1_AARCH64_RES0_BITS \
  1424. (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
  1425. GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5))
  1426. #define SPSR_EL1_AARCH32_RES0_BITS \
  1427. (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20))
  1428. static int valid_compat_regs(struct user_pt_regs *regs)
  1429. {
  1430. regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
  1431. if (!system_supports_mixed_endian_el0()) {
  1432. if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
  1433. regs->pstate |= PSR_AA32_E_BIT;
  1434. else
  1435. regs->pstate &= ~PSR_AA32_E_BIT;
  1436. }
  1437. if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
  1438. (regs->pstate & PSR_AA32_A_BIT) == 0 &&
  1439. (regs->pstate & PSR_AA32_I_BIT) == 0 &&
  1440. (regs->pstate & PSR_AA32_F_BIT) == 0) {
  1441. return 1;
  1442. }
  1443. /*
  1444. * Force PSR to a valid 32-bit EL0t, preserving the same bits as
  1445. * arch/arm.
  1446. */
  1447. regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT |
  1448. PSR_AA32_C_BIT | PSR_AA32_V_BIT |
  1449. PSR_AA32_Q_BIT | PSR_AA32_IT_MASK |
  1450. PSR_AA32_GE_MASK | PSR_AA32_E_BIT |
  1451. PSR_AA32_T_BIT;
  1452. regs->pstate |= PSR_MODE32_BIT;
  1453. return 0;
  1454. }
  1455. static int valid_native_regs(struct user_pt_regs *regs)
  1456. {
  1457. regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
  1458. if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
  1459. (regs->pstate & PSR_D_BIT) == 0 &&
  1460. (regs->pstate & PSR_A_BIT) == 0 &&
  1461. (regs->pstate & PSR_I_BIT) == 0 &&
  1462. (regs->pstate & PSR_F_BIT) == 0) {
  1463. return 1;
  1464. }
  1465. /* Force PSR to a valid 64-bit EL0t */
  1466. regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
  1467. return 0;
  1468. }
  1469. /*
  1470. * Are the current registers suitable for user mode? (used to maintain
  1471. * security in signal handlers)
  1472. */
  1473. int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
  1474. {
  1475. if (!test_tsk_thread_flag(task, TIF_SINGLESTEP))
  1476. regs->pstate &= ~DBG_SPSR_SS;
  1477. if (is_compat_thread(task_thread_info(task)))
  1478. return valid_compat_regs(regs);
  1479. else
  1480. return valid_native_regs(regs);
  1481. }