signal.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977
  1. /*
  2. * Based on arch/arm/kernel/signal.c
  3. *
  4. * Copyright (C) 1995-2009 Russell King
  5. * Copyright (C) 2012 ARM Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/cache.h>
  20. #include <linux/compat.h>
  21. #include <linux/errno.h>
  22. #include <linux/kernel.h>
  23. #include <linux/signal.h>
  24. #include <linux/personality.h>
  25. #include <linux/freezer.h>
  26. #include <linux/stddef.h>
  27. #include <linux/uaccess.h>
  28. #include <linux/sizes.h>
  29. #include <linux/string.h>
  30. #include <linux/tracehook.h>
  31. #include <linux/ratelimit.h>
  32. #include <linux/syscalls.h>
  33. #include <asm/daifflags.h>
  34. #include <asm/debug-monitors.h>
  35. #include <asm/elf.h>
  36. #include <asm/cacheflush.h>
  37. #include <asm/ucontext.h>
  38. #include <asm/unistd.h>
  39. #include <asm/fpsimd.h>
  40. #include <asm/ptrace.h>
  41. #include <asm/signal32.h>
  42. #include <asm/traps.h>
  43. #include <asm/vdso.h>
  44. /*
  45. * Do a signal return; undo the signal stack. These are aligned to 128-bit.
  46. */
  47. struct rt_sigframe {
  48. struct siginfo info;
  49. struct ucontext uc;
  50. };
  51. struct frame_record {
  52. u64 fp;
  53. u64 lr;
  54. };
  55. struct rt_sigframe_user_layout {
  56. struct rt_sigframe __user *sigframe;
  57. struct frame_record __user *next_frame;
  58. unsigned long size; /* size of allocated sigframe data */
  59. unsigned long limit; /* largest allowed size */
  60. unsigned long fpsimd_offset;
  61. unsigned long esr_offset;
  62. unsigned long sve_offset;
  63. unsigned long extra_offset;
  64. unsigned long end_offset;
  65. };
  66. #define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16)
  67. #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
  68. #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
  69. static void init_user_layout(struct rt_sigframe_user_layout *user)
  70. {
  71. const size_t reserved_size =
  72. sizeof(user->sigframe->uc.uc_mcontext.__reserved);
  73. memset(user, 0, sizeof(*user));
  74. user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved);
  75. user->limit = user->size + reserved_size;
  76. user->limit -= TERMINATOR_SIZE;
  77. user->limit -= EXTRA_CONTEXT_SIZE;
  78. /* Reserve space for extension and terminator ^ */
  79. }
  80. static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
  81. {
  82. return round_up(max(user->size, sizeof(struct rt_sigframe)), 16);
  83. }
  84. /*
  85. * Sanity limit on the approximate maximum size of signal frame we'll
  86. * try to generate. Stack alignment padding and the frame record are
  87. * not taken into account. This limit is not a guarantee and is
  88. * NOT ABI.
  89. */
  90. #define SIGFRAME_MAXSZ SZ_64K
  91. static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
  92. unsigned long *offset, size_t size, bool extend)
  93. {
  94. size_t padded_size = round_up(size, 16);
  95. if (padded_size > user->limit - user->size &&
  96. !user->extra_offset &&
  97. extend) {
  98. int ret;
  99. user->limit += EXTRA_CONTEXT_SIZE;
  100. ret = __sigframe_alloc(user, &user->extra_offset,
  101. sizeof(struct extra_context), false);
  102. if (ret) {
  103. user->limit -= EXTRA_CONTEXT_SIZE;
  104. return ret;
  105. }
  106. /* Reserve space for the __reserved[] terminator */
  107. user->size += TERMINATOR_SIZE;
  108. /*
  109. * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for
  110. * the terminator:
  111. */
  112. user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE;
  113. }
  114. /* Still not enough space? Bad luck! */
  115. if (padded_size > user->limit - user->size)
  116. return -ENOMEM;
  117. *offset = user->size;
  118. user->size += padded_size;
  119. return 0;
  120. }
  121. /*
  122. * Allocate space for an optional record of <size> bytes in the user
  123. * signal frame. The offset from the signal frame base address to the
  124. * allocated block is assigned to *offset.
  125. */
  126. static int sigframe_alloc(struct rt_sigframe_user_layout *user,
  127. unsigned long *offset, size_t size)
  128. {
  129. return __sigframe_alloc(user, offset, size, true);
  130. }
  131. /* Allocate the null terminator record and prevent further allocations */
  132. static int sigframe_alloc_end(struct rt_sigframe_user_layout *user)
  133. {
  134. int ret;
  135. /* Un-reserve the space reserved for the terminator: */
  136. user->limit += TERMINATOR_SIZE;
  137. ret = sigframe_alloc(user, &user->end_offset,
  138. sizeof(struct _aarch64_ctx));
  139. if (ret)
  140. return ret;
  141. /* Prevent further allocation: */
  142. user->limit = user->size;
  143. return 0;
  144. }
  145. static void __user *apply_user_offset(
  146. struct rt_sigframe_user_layout const *user, unsigned long offset)
  147. {
  148. char __user *base = (char __user *)user->sigframe;
  149. return base + offset;
  150. }
  151. static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
  152. {
  153. struct user_fpsimd_state const *fpsimd =
  154. &current->thread.uw.fpsimd_state;
  155. int err;
  156. /* copy the FP and status/control registers */
  157. err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
  158. __put_user_error(fpsimd->fpsr, &ctx->fpsr, err);
  159. __put_user_error(fpsimd->fpcr, &ctx->fpcr, err);
  160. /* copy the magic/size information */
  161. __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err);
  162. __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err);
  163. return err ? -EFAULT : 0;
  164. }
  165. static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
  166. {
  167. struct user_fpsimd_state fpsimd;
  168. __u32 magic, size;
  169. int err = 0;
  170. /* check the magic/size information */
  171. __get_user_error(magic, &ctx->head.magic, err);
  172. __get_user_error(size, &ctx->head.size, err);
  173. if (err)
  174. return -EFAULT;
  175. if (magic != FPSIMD_MAGIC || size != sizeof(struct fpsimd_context))
  176. return -EINVAL;
  177. /* copy the FP and status/control registers */
  178. err = __copy_from_user(fpsimd.vregs, ctx->vregs,
  179. sizeof(fpsimd.vregs));
  180. __get_user_error(fpsimd.fpsr, &ctx->fpsr, err);
  181. __get_user_error(fpsimd.fpcr, &ctx->fpcr, err);
  182. clear_thread_flag(TIF_SVE);
  183. /* load the hardware registers from the fpsimd_state structure */
  184. if (!err)
  185. fpsimd_update_current_state(&fpsimd);
  186. return err ? -EFAULT : 0;
  187. }
  188. struct user_ctxs {
  189. struct fpsimd_context __user *fpsimd;
  190. struct sve_context __user *sve;
  191. };
  192. #ifdef CONFIG_ARM64_SVE
  193. static int preserve_sve_context(struct sve_context __user *ctx)
  194. {
  195. int err = 0;
  196. u16 reserved[ARRAY_SIZE(ctx->__reserved)];
  197. unsigned int vl = current->thread.sve_vl;
  198. unsigned int vq = 0;
  199. if (test_thread_flag(TIF_SVE))
  200. vq = sve_vq_from_vl(vl);
  201. memset(reserved, 0, sizeof(reserved));
  202. __put_user_error(SVE_MAGIC, &ctx->head.magic, err);
  203. __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
  204. &ctx->head.size, err);
  205. __put_user_error(vl, &ctx->vl, err);
  206. BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
  207. err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
  208. if (vq) {
  209. /*
  210. * This assumes that the SVE state has already been saved to
  211. * the task struct by calling preserve_fpsimd_context().
  212. */
  213. err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET,
  214. current->thread.sve_state,
  215. SVE_SIG_REGS_SIZE(vq));
  216. }
  217. return err ? -EFAULT : 0;
  218. }
  219. static int restore_sve_fpsimd_context(struct user_ctxs *user)
  220. {
  221. int err;
  222. unsigned int vq;
  223. struct user_fpsimd_state fpsimd;
  224. struct sve_context sve;
  225. if (__copy_from_user(&sve, user->sve, sizeof(sve)))
  226. return -EFAULT;
  227. if (sve.vl != current->thread.sve_vl)
  228. return -EINVAL;
  229. if (sve.head.size <= sizeof(*user->sve)) {
  230. clear_thread_flag(TIF_SVE);
  231. goto fpsimd_only;
  232. }
  233. vq = sve_vq_from_vl(sve.vl);
  234. if (sve.head.size < SVE_SIG_CONTEXT_SIZE(vq))
  235. return -EINVAL;
  236. /*
  237. * Careful: we are about __copy_from_user() directly into
  238. * thread.sve_state with preemption enabled, so protection is
  239. * needed to prevent a racing context switch from writing stale
  240. * registers back over the new data.
  241. */
  242. fpsimd_flush_task_state(current);
  243. barrier();
  244. /* From now, fpsimd_thread_switch() won't clear TIF_FOREIGN_FPSTATE */
  245. set_thread_flag(TIF_FOREIGN_FPSTATE);
  246. barrier();
  247. /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
  248. sve_alloc(current);
  249. err = __copy_from_user(current->thread.sve_state,
  250. (char __user const *)user->sve +
  251. SVE_SIG_REGS_OFFSET,
  252. SVE_SIG_REGS_SIZE(vq));
  253. if (err)
  254. return -EFAULT;
  255. set_thread_flag(TIF_SVE);
  256. fpsimd_only:
  257. /* copy the FP and status/control registers */
  258. /* restore_sigframe() already checked that user->fpsimd != NULL. */
  259. err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs,
  260. sizeof(fpsimd.vregs));
  261. __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err);
  262. __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err);
  263. /* load the hardware registers from the fpsimd_state structure */
  264. if (!err)
  265. fpsimd_update_current_state(&fpsimd);
  266. return err ? -EFAULT : 0;
  267. }
  268. #else /* ! CONFIG_ARM64_SVE */
  269. /* Turn any non-optimised out attempts to use these into a link error: */
  270. extern int preserve_sve_context(void __user *ctx);
  271. extern int restore_sve_fpsimd_context(struct user_ctxs *user);
  272. #endif /* ! CONFIG_ARM64_SVE */
  273. static int parse_user_sigframe(struct user_ctxs *user,
  274. struct rt_sigframe __user *sf)
  275. {
  276. struct sigcontext __user *const sc = &sf->uc.uc_mcontext;
  277. struct _aarch64_ctx __user *head;
  278. char __user *base = (char __user *)&sc->__reserved;
  279. size_t offset = 0;
  280. size_t limit = sizeof(sc->__reserved);
  281. bool have_extra_context = false;
  282. char const __user *const sfp = (char const __user *)sf;
  283. user->fpsimd = NULL;
  284. user->sve = NULL;
  285. if (!IS_ALIGNED((unsigned long)base, 16))
  286. goto invalid;
  287. while (1) {
  288. int err = 0;
  289. u32 magic, size;
  290. char const __user *userp;
  291. struct extra_context const __user *extra;
  292. u64 extra_datap;
  293. u32 extra_size;
  294. struct _aarch64_ctx const __user *end;
  295. u32 end_magic, end_size;
  296. if (limit - offset < sizeof(*head))
  297. goto invalid;
  298. if (!IS_ALIGNED(offset, 16))
  299. goto invalid;
  300. head = (struct _aarch64_ctx __user *)(base + offset);
  301. __get_user_error(magic, &head->magic, err);
  302. __get_user_error(size, &head->size, err);
  303. if (err)
  304. return err;
  305. if (limit - offset < size)
  306. goto invalid;
  307. switch (magic) {
  308. case 0:
  309. if (size)
  310. goto invalid;
  311. goto done;
  312. case FPSIMD_MAGIC:
  313. if (user->fpsimd)
  314. goto invalid;
  315. if (size < sizeof(*user->fpsimd))
  316. goto invalid;
  317. user->fpsimd = (struct fpsimd_context __user *)head;
  318. break;
  319. case ESR_MAGIC:
  320. /* ignore */
  321. break;
  322. case SVE_MAGIC:
  323. if (!system_supports_sve())
  324. goto invalid;
  325. if (user->sve)
  326. goto invalid;
  327. if (size < sizeof(*user->sve))
  328. goto invalid;
  329. user->sve = (struct sve_context __user *)head;
  330. break;
  331. case EXTRA_MAGIC:
  332. if (have_extra_context)
  333. goto invalid;
  334. if (size < sizeof(*extra))
  335. goto invalid;
  336. userp = (char const __user *)head;
  337. extra = (struct extra_context const __user *)userp;
  338. userp += size;
  339. __get_user_error(extra_datap, &extra->datap, err);
  340. __get_user_error(extra_size, &extra->size, err);
  341. if (err)
  342. return err;
  343. /* Check for the dummy terminator in __reserved[]: */
  344. if (limit - offset - size < TERMINATOR_SIZE)
  345. goto invalid;
  346. end = (struct _aarch64_ctx const __user *)userp;
  347. userp += TERMINATOR_SIZE;
  348. __get_user_error(end_magic, &end->magic, err);
  349. __get_user_error(end_size, &end->size, err);
  350. if (err)
  351. return err;
  352. if (end_magic || end_size)
  353. goto invalid;
  354. /* Prevent looping/repeated parsing of extra_context */
  355. have_extra_context = true;
  356. base = (__force void __user *)extra_datap;
  357. if (!IS_ALIGNED((unsigned long)base, 16))
  358. goto invalid;
  359. if (!IS_ALIGNED(extra_size, 16))
  360. goto invalid;
  361. if (base != userp)
  362. goto invalid;
  363. /* Reject "unreasonably large" frames: */
  364. if (extra_size > sfp + SIGFRAME_MAXSZ - userp)
  365. goto invalid;
  366. /*
  367. * Ignore trailing terminator in __reserved[]
  368. * and start parsing extra data:
  369. */
  370. offset = 0;
  371. limit = extra_size;
  372. if (!access_ok(VERIFY_READ, base, limit))
  373. goto invalid;
  374. continue;
  375. default:
  376. goto invalid;
  377. }
  378. if (size < sizeof(*head))
  379. goto invalid;
  380. if (limit - offset < size)
  381. goto invalid;
  382. offset += size;
  383. }
  384. done:
  385. return 0;
  386. invalid:
  387. return -EINVAL;
  388. }
  389. static int restore_sigframe(struct pt_regs *regs,
  390. struct rt_sigframe __user *sf)
  391. {
  392. sigset_t set;
  393. int i, err;
  394. struct user_ctxs user;
  395. err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
  396. if (err == 0)
  397. set_current_blocked(&set);
  398. for (i = 0; i < 31; i++)
  399. __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
  400. err);
  401. __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
  402. __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
  403. __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
  404. /*
  405. * Avoid sys_rt_sigreturn() restarting.
  406. */
  407. forget_syscall(regs);
  408. err |= !valid_user_regs(&regs->user_regs, current);
  409. if (err == 0)
  410. err = parse_user_sigframe(&user, sf);
  411. if (err == 0) {
  412. if (!user.fpsimd)
  413. return -EINVAL;
  414. if (user.sve) {
  415. if (!system_supports_sve())
  416. return -EINVAL;
  417. err = restore_sve_fpsimd_context(&user);
  418. } else {
  419. err = restore_fpsimd_context(user.fpsimd);
  420. }
  421. }
  422. return err;
  423. }
  424. asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
  425. {
  426. struct rt_sigframe __user *frame;
  427. /* Always make any pending restarted system calls return -EINTR */
  428. current->restart_block.fn = do_no_restart_syscall;
  429. /*
  430. * Since we stacked the signal on a 128-bit boundary, then 'sp' should
  431. * be word aligned here.
  432. */
  433. if (regs->sp & 15)
  434. goto badframe;
  435. frame = (struct rt_sigframe __user *)regs->sp;
  436. if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
  437. goto badframe;
  438. if (restore_sigframe(regs, frame))
  439. goto badframe;
  440. if (restore_altstack(&frame->uc.uc_stack))
  441. goto badframe;
  442. return regs->regs[0];
  443. badframe:
  444. arm64_notify_segfault(regs->sp);
  445. return 0;
  446. }
  447. /*
  448. * Determine the layout of optional records in the signal frame
  449. *
  450. * add_all: if true, lays out the biggest possible signal frame for
  451. * this task; otherwise, generates a layout for the current state
  452. * of the task.
  453. */
  454. static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
  455. bool add_all)
  456. {
  457. int err;
  458. err = sigframe_alloc(user, &user->fpsimd_offset,
  459. sizeof(struct fpsimd_context));
  460. if (err)
  461. return err;
  462. /* fault information, if valid */
  463. if (add_all || current->thread.fault_code) {
  464. err = sigframe_alloc(user, &user->esr_offset,
  465. sizeof(struct esr_context));
  466. if (err)
  467. return err;
  468. }
  469. if (system_supports_sve()) {
  470. unsigned int vq = 0;
  471. if (add_all || test_thread_flag(TIF_SVE)) {
  472. int vl = sve_max_vl;
  473. if (!add_all)
  474. vl = current->thread.sve_vl;
  475. vq = sve_vq_from_vl(vl);
  476. }
  477. err = sigframe_alloc(user, &user->sve_offset,
  478. SVE_SIG_CONTEXT_SIZE(vq));
  479. if (err)
  480. return err;
  481. }
  482. return sigframe_alloc_end(user);
  483. }
  484. static int setup_sigframe(struct rt_sigframe_user_layout *user,
  485. struct pt_regs *regs, sigset_t *set)
  486. {
  487. int i, err = 0;
  488. struct rt_sigframe __user *sf = user->sigframe;
  489. /* set up the stack frame for unwinding */
  490. __put_user_error(regs->regs[29], &user->next_frame->fp, err);
  491. __put_user_error(regs->regs[30], &user->next_frame->lr, err);
  492. for (i = 0; i < 31; i++)
  493. __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
  494. err);
  495. __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
  496. __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
  497. __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
  498. __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
  499. err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
  500. if (err == 0) {
  501. struct fpsimd_context __user *fpsimd_ctx =
  502. apply_user_offset(user, user->fpsimd_offset);
  503. err |= preserve_fpsimd_context(fpsimd_ctx);
  504. }
  505. /* fault information, if valid */
  506. if (err == 0 && user->esr_offset) {
  507. struct esr_context __user *esr_ctx =
  508. apply_user_offset(user, user->esr_offset);
  509. __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err);
  510. __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err);
  511. __put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
  512. }
  513. /* Scalable Vector Extension state, if present */
  514. if (system_supports_sve() && err == 0 && user->sve_offset) {
  515. struct sve_context __user *sve_ctx =
  516. apply_user_offset(user, user->sve_offset);
  517. err |= preserve_sve_context(sve_ctx);
  518. }
  519. if (err == 0 && user->extra_offset) {
  520. char __user *sfp = (char __user *)user->sigframe;
  521. char __user *userp =
  522. apply_user_offset(user, user->extra_offset);
  523. struct extra_context __user *extra;
  524. struct _aarch64_ctx __user *end;
  525. u64 extra_datap;
  526. u32 extra_size;
  527. extra = (struct extra_context __user *)userp;
  528. userp += EXTRA_CONTEXT_SIZE;
  529. end = (struct _aarch64_ctx __user *)userp;
  530. userp += TERMINATOR_SIZE;
  531. /*
  532. * extra_datap is just written to the signal frame.
  533. * The value gets cast back to a void __user *
  534. * during sigreturn.
  535. */
  536. extra_datap = (__force u64)userp;
  537. extra_size = sfp + round_up(user->size, 16) - userp;
  538. __put_user_error(EXTRA_MAGIC, &extra->head.magic, err);
  539. __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err);
  540. __put_user_error(extra_datap, &extra->datap, err);
  541. __put_user_error(extra_size, &extra->size, err);
  542. /* Add the terminator */
  543. __put_user_error(0, &end->magic, err);
  544. __put_user_error(0, &end->size, err);
  545. }
  546. /* set the "end" magic */
  547. if (err == 0) {
  548. struct _aarch64_ctx __user *end =
  549. apply_user_offset(user, user->end_offset);
  550. __put_user_error(0, &end->magic, err);
  551. __put_user_error(0, &end->size, err);
  552. }
  553. return err;
  554. }
  555. static int get_sigframe(struct rt_sigframe_user_layout *user,
  556. struct ksignal *ksig, struct pt_regs *regs)
  557. {
  558. unsigned long sp, sp_top;
  559. int err;
  560. init_user_layout(user);
  561. err = setup_sigframe_layout(user, false);
  562. if (err)
  563. return err;
  564. sp = sp_top = sigsp(regs->sp, ksig);
  565. sp = round_down(sp - sizeof(struct frame_record), 16);
  566. user->next_frame = (struct frame_record __user *)sp;
  567. sp = round_down(sp, 16) - sigframe_size(user);
  568. user->sigframe = (struct rt_sigframe __user *)sp;
  569. /*
  570. * Check that we can actually write to the signal frame.
  571. */
  572. if (!access_ok(VERIFY_WRITE, user->sigframe, sp_top - sp))
  573. return -EFAULT;
  574. return 0;
  575. }
  576. static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
  577. struct rt_sigframe_user_layout *user, int usig)
  578. {
  579. __sigrestore_t sigtramp;
  580. regs->regs[0] = usig;
  581. regs->sp = (unsigned long)user->sigframe;
  582. regs->regs[29] = (unsigned long)&user->next_frame->fp;
  583. regs->pc = (unsigned long)ka->sa.sa_handler;
  584. if (ka->sa.sa_flags & SA_RESTORER)
  585. sigtramp = ka->sa.sa_restorer;
  586. else
  587. sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
  588. regs->regs[30] = (unsigned long)sigtramp;
  589. }
  590. static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
  591. struct pt_regs *regs)
  592. {
  593. struct rt_sigframe_user_layout user;
  594. struct rt_sigframe __user *frame;
  595. int err = 0;
  596. fpsimd_signal_preserve_current_state();
  597. if (get_sigframe(&user, ksig, regs))
  598. return 1;
  599. frame = user.sigframe;
  600. __put_user_error(0, &frame->uc.uc_flags, err);
  601. __put_user_error(NULL, &frame->uc.uc_link, err);
  602. err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
  603. err |= setup_sigframe(&user, regs, set);
  604. if (err == 0) {
  605. setup_return(regs, &ksig->ka, &user, usig);
  606. if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
  607. err |= copy_siginfo_to_user(&frame->info, &ksig->info);
  608. regs->regs[1] = (unsigned long)&frame->info;
  609. regs->regs[2] = (unsigned long)&frame->uc;
  610. }
  611. }
  612. return err;
  613. }
  614. static void setup_restart_syscall(struct pt_regs *regs)
  615. {
  616. if (is_compat_task())
  617. compat_setup_restart_syscall(regs);
  618. else
  619. regs->regs[8] = __NR_restart_syscall;
  620. }
  621. /*
  622. * OK, we're invoking a handler
  623. */
  624. static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
  625. {
  626. struct task_struct *tsk = current;
  627. sigset_t *oldset = sigmask_to_save();
  628. int usig = ksig->sig;
  629. int ret;
  630. /*
  631. * Set up the stack frame
  632. */
  633. if (is_compat_task()) {
  634. if (ksig->ka.sa.sa_flags & SA_SIGINFO)
  635. ret = compat_setup_rt_frame(usig, ksig, oldset, regs);
  636. else
  637. ret = compat_setup_frame(usig, ksig, oldset, regs);
  638. } else {
  639. ret = setup_rt_frame(usig, ksig, oldset, regs);
  640. }
  641. /*
  642. * Check that the resulting registers are actually sane.
  643. */
  644. ret |= !valid_user_regs(&regs->user_regs, current);
  645. /*
  646. * Fast forward the stepping logic so we step into the signal
  647. * handler.
  648. */
  649. if (!ret)
  650. user_fastforward_single_step(tsk);
  651. signal_setup_done(ret, ksig, 0);
  652. }
  653. /*
  654. * Note that 'init' is a special process: it doesn't get signals it doesn't
  655. * want to handle. Thus you cannot kill init even with a SIGKILL even by
  656. * mistake.
  657. *
  658. * Note that we go through the signals twice: once to check the signals that
  659. * the kernel can handle, and then we build all the user-level signal handling
  660. * stack-frames in one go after that.
  661. */
  662. static void do_signal(struct pt_regs *regs)
  663. {
  664. unsigned long continue_addr = 0, restart_addr = 0;
  665. int retval = 0;
  666. struct ksignal ksig;
  667. bool syscall = in_syscall(regs);
  668. /*
  669. * If we were from a system call, check for system call restarting...
  670. */
  671. if (syscall) {
  672. continue_addr = regs->pc;
  673. restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
  674. retval = regs->regs[0];
  675. /*
  676. * Avoid additional syscall restarting via ret_to_user.
  677. */
  678. forget_syscall(regs);
  679. /*
  680. * Prepare for system call restart. We do this here so that a
  681. * debugger will see the already changed PC.
  682. */
  683. switch (retval) {
  684. case -ERESTARTNOHAND:
  685. case -ERESTARTSYS:
  686. case -ERESTARTNOINTR:
  687. case -ERESTART_RESTARTBLOCK:
  688. regs->regs[0] = regs->orig_x0;
  689. regs->pc = restart_addr;
  690. break;
  691. }
  692. }
  693. /*
  694. * Get the signal to deliver. When running under ptrace, at this point
  695. * the debugger may change all of our registers.
  696. */
  697. if (get_signal(&ksig)) {
  698. /*
  699. * Depending on the signal settings, we may need to revert the
  700. * decision to restart the system call, but skip this if a
  701. * debugger has chosen to restart at a different PC.
  702. */
  703. if (regs->pc == restart_addr &&
  704. (retval == -ERESTARTNOHAND ||
  705. retval == -ERESTART_RESTARTBLOCK ||
  706. (retval == -ERESTARTSYS &&
  707. !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
  708. regs->regs[0] = -EINTR;
  709. regs->pc = continue_addr;
  710. }
  711. handle_signal(&ksig, regs);
  712. return;
  713. }
  714. /*
  715. * Handle restarting a different system call. As above, if a debugger
  716. * has chosen to restart at a different PC, ignore the restart.
  717. */
  718. if (syscall && regs->pc == restart_addr) {
  719. if (retval == -ERESTART_RESTARTBLOCK)
  720. setup_restart_syscall(regs);
  721. user_rewind_single_step(current);
  722. }
  723. restore_saved_sigmask();
  724. }
  725. asmlinkage void do_notify_resume(struct pt_regs *regs,
  726. unsigned int thread_flags)
  727. {
  728. /*
  729. * The assembly code enters us with IRQs off, but it hasn't
  730. * informed the tracing code of that for efficiency reasons.
  731. * Update the trace code with the current status.
  732. */
  733. trace_hardirqs_off();
  734. do {
  735. /* Check valid user FS if needed */
  736. addr_limit_user_check();
  737. if (thread_flags & _TIF_NEED_RESCHED) {
  738. /* Unmask Debug and SError for the next task */
  739. local_daif_restore(DAIF_PROCCTX_NOIRQ);
  740. schedule();
  741. } else {
  742. local_daif_restore(DAIF_PROCCTX);
  743. if (thread_flags & _TIF_UPROBE)
  744. uprobe_notify_resume(regs);
  745. if (thread_flags & _TIF_SIGPENDING)
  746. do_signal(regs);
  747. if (thread_flags & _TIF_NOTIFY_RESUME) {
  748. clear_thread_flag(TIF_NOTIFY_RESUME);
  749. tracehook_notify_resume(regs);
  750. }
  751. if (thread_flags & _TIF_FOREIGN_FPSTATE)
  752. fpsimd_restore_current_state();
  753. }
  754. local_daif_mask();
  755. thread_flags = READ_ONCE(current_thread_info()->flags);
  756. } while (thread_flags & _TIF_WORK_MASK);
  757. }
  758. unsigned long __ro_after_init signal_minsigstksz;
  759. /*
  760. * Determine the stack space required for guaranteed signal devliery.
  761. * This function is used to populate AT_MINSIGSTKSZ at process startup.
  762. * cpufeatures setup is assumed to be complete.
  763. */
  764. void __init minsigstksz_setup(void)
  765. {
  766. struct rt_sigframe_user_layout user;
  767. init_user_layout(&user);
  768. /*
  769. * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't
  770. * be big enough, but it's our best guess:
  771. */
  772. if (WARN_ON(setup_sigframe_layout(&user, true)))
  773. return;
  774. signal_minsigstksz = sigframe_size(&user) +
  775. round_up(sizeof(struct frame_record), 16) +
  776. 16; /* max alignment padding */
  777. }