signal.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981
  1. /*
  2. * Based on arch/arm/kernel/signal.c
  3. *
  4. * Copyright (C) 1995-2009 Russell King
  5. * Copyright (C) 2012 ARM Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/cache.h>
  20. #include <linux/compat.h>
  21. #include <linux/errno.h>
  22. #include <linux/kernel.h>
  23. #include <linux/signal.h>
  24. #include <linux/personality.h>
  25. #include <linux/freezer.h>
  26. #include <linux/stddef.h>
  27. #include <linux/uaccess.h>
  28. #include <linux/sizes.h>
  29. #include <linux/string.h>
  30. #include <linux/tracehook.h>
  31. #include <linux/ratelimit.h>
  32. #include <linux/syscalls.h>
  33. #include <asm/daifflags.h>
  34. #include <asm/debug-monitors.h>
  35. #include <asm/elf.h>
  36. #include <asm/cacheflush.h>
  37. #include <asm/ucontext.h>
  38. #include <asm/unistd.h>
  39. #include <asm/fpsimd.h>
  40. #include <asm/ptrace.h>
  41. #include <asm/signal32.h>
  42. #include <asm/traps.h>
  43. #include <asm/vdso.h>
  44. /*
  45. * Do a signal return; undo the signal stack. These are aligned to 128-bit.
  46. */
  47. struct rt_sigframe {
  48. struct siginfo info;
  49. struct ucontext uc;
  50. };
  51. struct frame_record {
  52. u64 fp;
  53. u64 lr;
  54. };
  55. struct rt_sigframe_user_layout {
  56. struct rt_sigframe __user *sigframe;
  57. struct frame_record __user *next_frame;
  58. unsigned long size; /* size of allocated sigframe data */
  59. unsigned long limit; /* largest allowed size */
  60. unsigned long fpsimd_offset;
  61. unsigned long esr_offset;
  62. unsigned long sve_offset;
  63. unsigned long extra_offset;
  64. unsigned long end_offset;
  65. };
  66. #define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16)
  67. #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
  68. #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
  69. static void init_user_layout(struct rt_sigframe_user_layout *user)
  70. {
  71. const size_t reserved_size =
  72. sizeof(user->sigframe->uc.uc_mcontext.__reserved);
  73. memset(user, 0, sizeof(*user));
  74. user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved);
  75. user->limit = user->size + reserved_size;
  76. user->limit -= TERMINATOR_SIZE;
  77. user->limit -= EXTRA_CONTEXT_SIZE;
  78. /* Reserve space for extension and terminator ^ */
  79. }
  80. static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
  81. {
  82. return round_up(max(user->size, sizeof(struct rt_sigframe)), 16);
  83. }
  84. /*
  85. * Sanity limit on the approximate maximum size of signal frame we'll
  86. * try to generate. Stack alignment padding and the frame record are
  87. * not taken into account. This limit is not a guarantee and is
  88. * NOT ABI.
  89. */
  90. #define SIGFRAME_MAXSZ SZ_64K
  91. static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
  92. unsigned long *offset, size_t size, bool extend)
  93. {
  94. size_t padded_size = round_up(size, 16);
  95. if (padded_size > user->limit - user->size &&
  96. !user->extra_offset &&
  97. extend) {
  98. int ret;
  99. user->limit += EXTRA_CONTEXT_SIZE;
  100. ret = __sigframe_alloc(user, &user->extra_offset,
  101. sizeof(struct extra_context), false);
  102. if (ret) {
  103. user->limit -= EXTRA_CONTEXT_SIZE;
  104. return ret;
  105. }
  106. /* Reserve space for the __reserved[] terminator */
  107. user->size += TERMINATOR_SIZE;
  108. /*
  109. * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for
  110. * the terminator:
  111. */
  112. user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE;
  113. }
  114. /* Still not enough space? Bad luck! */
  115. if (padded_size > user->limit - user->size)
  116. return -ENOMEM;
  117. *offset = user->size;
  118. user->size += padded_size;
  119. return 0;
  120. }
  121. /*
  122. * Allocate space for an optional record of <size> bytes in the user
  123. * signal frame. The offset from the signal frame base address to the
  124. * allocated block is assigned to *offset.
  125. */
  126. static int sigframe_alloc(struct rt_sigframe_user_layout *user,
  127. unsigned long *offset, size_t size)
  128. {
  129. return __sigframe_alloc(user, offset, size, true);
  130. }
  131. /* Allocate the null terminator record and prevent further allocations */
  132. static int sigframe_alloc_end(struct rt_sigframe_user_layout *user)
  133. {
  134. int ret;
  135. /* Un-reserve the space reserved for the terminator: */
  136. user->limit += TERMINATOR_SIZE;
  137. ret = sigframe_alloc(user, &user->end_offset,
  138. sizeof(struct _aarch64_ctx));
  139. if (ret)
  140. return ret;
  141. /* Prevent further allocation: */
  142. user->limit = user->size;
  143. return 0;
  144. }
  145. static void __user *apply_user_offset(
  146. struct rt_sigframe_user_layout const *user, unsigned long offset)
  147. {
  148. char __user *base = (char __user *)user->sigframe;
  149. return base + offset;
  150. }
  151. static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
  152. {
  153. struct user_fpsimd_state const *fpsimd =
  154. &current->thread.uw.fpsimd_state;
  155. int err;
  156. /* copy the FP and status/control registers */
  157. err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
  158. __put_user_error(fpsimd->fpsr, &ctx->fpsr, err);
  159. __put_user_error(fpsimd->fpcr, &ctx->fpcr, err);
  160. /* copy the magic/size information */
  161. __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err);
  162. __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err);
  163. return err ? -EFAULT : 0;
  164. }
  165. static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
  166. {
  167. struct user_fpsimd_state fpsimd;
  168. __u32 magic, size;
  169. int err = 0;
  170. /* check the magic/size information */
  171. __get_user_error(magic, &ctx->head.magic, err);
  172. __get_user_error(size, &ctx->head.size, err);
  173. if (err)
  174. return -EFAULT;
  175. if (magic != FPSIMD_MAGIC || size != sizeof(struct fpsimd_context))
  176. return -EINVAL;
  177. /* copy the FP and status/control registers */
  178. err = __copy_from_user(fpsimd.vregs, ctx->vregs,
  179. sizeof(fpsimd.vregs));
  180. __get_user_error(fpsimd.fpsr, &ctx->fpsr, err);
  181. __get_user_error(fpsimd.fpcr, &ctx->fpcr, err);
  182. clear_thread_flag(TIF_SVE);
  183. /* load the hardware registers from the fpsimd_state structure */
  184. if (!err)
  185. fpsimd_update_current_state(&fpsimd);
  186. return err ? -EFAULT : 0;
  187. }
  188. struct user_ctxs {
  189. struct fpsimd_context __user *fpsimd;
  190. struct sve_context __user *sve;
  191. };
  192. #ifdef CONFIG_ARM64_SVE
  193. static int preserve_sve_context(struct sve_context __user *ctx)
  194. {
  195. int err = 0;
  196. u16 reserved[ARRAY_SIZE(ctx->__reserved)];
  197. unsigned int vl = current->thread.sve_vl;
  198. unsigned int vq = 0;
  199. if (test_thread_flag(TIF_SVE))
  200. vq = sve_vq_from_vl(vl);
  201. memset(reserved, 0, sizeof(reserved));
  202. __put_user_error(SVE_MAGIC, &ctx->head.magic, err);
  203. __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
  204. &ctx->head.size, err);
  205. __put_user_error(vl, &ctx->vl, err);
  206. BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
  207. err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
  208. if (vq) {
  209. /*
  210. * This assumes that the SVE state has already been saved to
  211. * the task struct by calling preserve_fpsimd_context().
  212. */
  213. err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET,
  214. current->thread.sve_state,
  215. SVE_SIG_REGS_SIZE(vq));
  216. }
  217. return err ? -EFAULT : 0;
  218. }
  219. static int restore_sve_fpsimd_context(struct user_ctxs *user)
  220. {
  221. int err;
  222. unsigned int vq;
  223. struct user_fpsimd_state fpsimd;
  224. struct sve_context sve;
  225. if (__copy_from_user(&sve, user->sve, sizeof(sve)))
  226. return -EFAULT;
  227. if (sve.vl != current->thread.sve_vl)
  228. return -EINVAL;
  229. if (sve.head.size <= sizeof(*user->sve)) {
  230. clear_thread_flag(TIF_SVE);
  231. goto fpsimd_only;
  232. }
  233. vq = sve_vq_from_vl(sve.vl);
  234. if (sve.head.size < SVE_SIG_CONTEXT_SIZE(vq))
  235. return -EINVAL;
  236. /*
  237. * Careful: we are about __copy_from_user() directly into
  238. * thread.sve_state with preemption enabled, so protection is
  239. * needed to prevent a racing context switch from writing stale
  240. * registers back over the new data.
  241. */
  242. fpsimd_flush_task_state(current);
  243. barrier();
  244. /* From now, fpsimd_thread_switch() won't clear TIF_FOREIGN_FPSTATE */
  245. set_thread_flag(TIF_FOREIGN_FPSTATE);
  246. barrier();
  247. /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
  248. sve_alloc(current);
  249. err = __copy_from_user(current->thread.sve_state,
  250. (char __user const *)user->sve +
  251. SVE_SIG_REGS_OFFSET,
  252. SVE_SIG_REGS_SIZE(vq));
  253. if (err)
  254. return -EFAULT;
  255. set_thread_flag(TIF_SVE);
  256. fpsimd_only:
  257. /* copy the FP and status/control registers */
  258. /* restore_sigframe() already checked that user->fpsimd != NULL. */
  259. err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs,
  260. sizeof(fpsimd.vregs));
  261. __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err);
  262. __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err);
  263. /* load the hardware registers from the fpsimd_state structure */
  264. if (!err)
  265. fpsimd_update_current_state(&fpsimd);
  266. return err ? -EFAULT : 0;
  267. }
  268. #else /* ! CONFIG_ARM64_SVE */
  269. /* Turn any non-optimised out attempts to use these into a link error: */
  270. extern int preserve_sve_context(void __user *ctx);
  271. extern int restore_sve_fpsimd_context(struct user_ctxs *user);
  272. #endif /* ! CONFIG_ARM64_SVE */
  273. static int parse_user_sigframe(struct user_ctxs *user,
  274. struct rt_sigframe __user *sf)
  275. {
  276. struct sigcontext __user *const sc = &sf->uc.uc_mcontext;
  277. struct _aarch64_ctx __user *head;
  278. char __user *base = (char __user *)&sc->__reserved;
  279. size_t offset = 0;
  280. size_t limit = sizeof(sc->__reserved);
  281. bool have_extra_context = false;
  282. char const __user *const sfp = (char const __user *)sf;
  283. user->fpsimd = NULL;
  284. user->sve = NULL;
  285. if (!IS_ALIGNED((unsigned long)base, 16))
  286. goto invalid;
  287. while (1) {
  288. int err = 0;
  289. u32 magic, size;
  290. char const __user *userp;
  291. struct extra_context const __user *extra;
  292. u64 extra_datap;
  293. u32 extra_size;
  294. struct _aarch64_ctx const __user *end;
  295. u32 end_magic, end_size;
  296. if (limit - offset < sizeof(*head))
  297. goto invalid;
  298. if (!IS_ALIGNED(offset, 16))
  299. goto invalid;
  300. head = (struct _aarch64_ctx __user *)(base + offset);
  301. __get_user_error(magic, &head->magic, err);
  302. __get_user_error(size, &head->size, err);
  303. if (err)
  304. return err;
  305. if (limit - offset < size)
  306. goto invalid;
  307. switch (magic) {
  308. case 0:
  309. if (size)
  310. goto invalid;
  311. goto done;
  312. case FPSIMD_MAGIC:
  313. if (user->fpsimd)
  314. goto invalid;
  315. if (size < sizeof(*user->fpsimd))
  316. goto invalid;
  317. user->fpsimd = (struct fpsimd_context __user *)head;
  318. break;
  319. case ESR_MAGIC:
  320. /* ignore */
  321. break;
  322. case SVE_MAGIC:
  323. if (!system_supports_sve())
  324. goto invalid;
  325. if (user->sve)
  326. goto invalid;
  327. if (size < sizeof(*user->sve))
  328. goto invalid;
  329. user->sve = (struct sve_context __user *)head;
  330. break;
  331. case EXTRA_MAGIC:
  332. if (have_extra_context)
  333. goto invalid;
  334. if (size < sizeof(*extra))
  335. goto invalid;
  336. userp = (char const __user *)head;
  337. extra = (struct extra_context const __user *)userp;
  338. userp += size;
  339. __get_user_error(extra_datap, &extra->datap, err);
  340. __get_user_error(extra_size, &extra->size, err);
  341. if (err)
  342. return err;
  343. /* Check for the dummy terminator in __reserved[]: */
  344. if (limit - offset - size < TERMINATOR_SIZE)
  345. goto invalid;
  346. end = (struct _aarch64_ctx const __user *)userp;
  347. userp += TERMINATOR_SIZE;
  348. __get_user_error(end_magic, &end->magic, err);
  349. __get_user_error(end_size, &end->size, err);
  350. if (err)
  351. return err;
  352. if (end_magic || end_size)
  353. goto invalid;
  354. /* Prevent looping/repeated parsing of extra_context */
  355. have_extra_context = true;
  356. base = (__force void __user *)extra_datap;
  357. if (!IS_ALIGNED((unsigned long)base, 16))
  358. goto invalid;
  359. if (!IS_ALIGNED(extra_size, 16))
  360. goto invalid;
  361. if (base != userp)
  362. goto invalid;
  363. /* Reject "unreasonably large" frames: */
  364. if (extra_size > sfp + SIGFRAME_MAXSZ - userp)
  365. goto invalid;
  366. /*
  367. * Ignore trailing terminator in __reserved[]
  368. * and start parsing extra data:
  369. */
  370. offset = 0;
  371. limit = extra_size;
  372. if (!access_ok(VERIFY_READ, base, limit))
  373. goto invalid;
  374. continue;
  375. default:
  376. goto invalid;
  377. }
  378. if (size < sizeof(*head))
  379. goto invalid;
  380. if (limit - offset < size)
  381. goto invalid;
  382. offset += size;
  383. }
  384. done:
  385. return 0;
  386. invalid:
  387. return -EINVAL;
  388. }
  389. static int restore_sigframe(struct pt_regs *regs,
  390. struct rt_sigframe __user *sf)
  391. {
  392. sigset_t set;
  393. int i, err;
  394. struct user_ctxs user;
  395. err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
  396. if (err == 0)
  397. set_current_blocked(&set);
  398. for (i = 0; i < 31; i++)
  399. __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
  400. err);
  401. __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
  402. __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
  403. __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
  404. /*
  405. * Avoid sys_rt_sigreturn() restarting.
  406. */
  407. forget_syscall(regs);
  408. err |= !valid_user_regs(&regs->user_regs, current);
  409. if (err == 0)
  410. err = parse_user_sigframe(&user, sf);
  411. if (err == 0) {
  412. if (!user.fpsimd)
  413. return -EINVAL;
  414. if (user.sve) {
  415. if (!system_supports_sve())
  416. return -EINVAL;
  417. err = restore_sve_fpsimd_context(&user);
  418. } else {
  419. err = restore_fpsimd_context(user.fpsimd);
  420. }
  421. }
  422. return err;
  423. }
  424. SYSCALL_DEFINE0(rt_sigreturn)
  425. {
  426. struct pt_regs *regs = current_pt_regs();
  427. struct rt_sigframe __user *frame;
  428. /* Always make any pending restarted system calls return -EINTR */
  429. current->restart_block.fn = do_no_restart_syscall;
  430. /*
  431. * Since we stacked the signal on a 128-bit boundary, then 'sp' should
  432. * be word aligned here.
  433. */
  434. if (regs->sp & 15)
  435. goto badframe;
  436. frame = (struct rt_sigframe __user *)regs->sp;
  437. if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
  438. goto badframe;
  439. if (restore_sigframe(regs, frame))
  440. goto badframe;
  441. if (restore_altstack(&frame->uc.uc_stack))
  442. goto badframe;
  443. return regs->regs[0];
  444. badframe:
  445. arm64_notify_segfault(regs->sp);
  446. return 0;
  447. }
  448. /*
  449. * Determine the layout of optional records in the signal frame
  450. *
  451. * add_all: if true, lays out the biggest possible signal frame for
  452. * this task; otherwise, generates a layout for the current state
  453. * of the task.
  454. */
  455. static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
  456. bool add_all)
  457. {
  458. int err;
  459. err = sigframe_alloc(user, &user->fpsimd_offset,
  460. sizeof(struct fpsimd_context));
  461. if (err)
  462. return err;
  463. /* fault information, if valid */
  464. if (add_all || current->thread.fault_code) {
  465. err = sigframe_alloc(user, &user->esr_offset,
  466. sizeof(struct esr_context));
  467. if (err)
  468. return err;
  469. }
  470. if (system_supports_sve()) {
  471. unsigned int vq = 0;
  472. if (add_all || test_thread_flag(TIF_SVE)) {
  473. int vl = sve_max_vl;
  474. if (!add_all)
  475. vl = current->thread.sve_vl;
  476. vq = sve_vq_from_vl(vl);
  477. }
  478. err = sigframe_alloc(user, &user->sve_offset,
  479. SVE_SIG_CONTEXT_SIZE(vq));
  480. if (err)
  481. return err;
  482. }
  483. return sigframe_alloc_end(user);
  484. }
  485. static int setup_sigframe(struct rt_sigframe_user_layout *user,
  486. struct pt_regs *regs, sigset_t *set)
  487. {
  488. int i, err = 0;
  489. struct rt_sigframe __user *sf = user->sigframe;
  490. /* set up the stack frame for unwinding */
  491. __put_user_error(regs->regs[29], &user->next_frame->fp, err);
  492. __put_user_error(regs->regs[30], &user->next_frame->lr, err);
  493. for (i = 0; i < 31; i++)
  494. __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
  495. err);
  496. __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
  497. __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
  498. __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
  499. __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
  500. err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
  501. if (err == 0) {
  502. struct fpsimd_context __user *fpsimd_ctx =
  503. apply_user_offset(user, user->fpsimd_offset);
  504. err |= preserve_fpsimd_context(fpsimd_ctx);
  505. }
  506. /* fault information, if valid */
  507. if (err == 0 && user->esr_offset) {
  508. struct esr_context __user *esr_ctx =
  509. apply_user_offset(user, user->esr_offset);
  510. __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err);
  511. __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err);
  512. __put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
  513. }
  514. /* Scalable Vector Extension state, if present */
  515. if (system_supports_sve() && err == 0 && user->sve_offset) {
  516. struct sve_context __user *sve_ctx =
  517. apply_user_offset(user, user->sve_offset);
  518. err |= preserve_sve_context(sve_ctx);
  519. }
  520. if (err == 0 && user->extra_offset) {
  521. char __user *sfp = (char __user *)user->sigframe;
  522. char __user *userp =
  523. apply_user_offset(user, user->extra_offset);
  524. struct extra_context __user *extra;
  525. struct _aarch64_ctx __user *end;
  526. u64 extra_datap;
  527. u32 extra_size;
  528. extra = (struct extra_context __user *)userp;
  529. userp += EXTRA_CONTEXT_SIZE;
  530. end = (struct _aarch64_ctx __user *)userp;
  531. userp += TERMINATOR_SIZE;
  532. /*
  533. * extra_datap is just written to the signal frame.
  534. * The value gets cast back to a void __user *
  535. * during sigreturn.
  536. */
  537. extra_datap = (__force u64)userp;
  538. extra_size = sfp + round_up(user->size, 16) - userp;
  539. __put_user_error(EXTRA_MAGIC, &extra->head.magic, err);
  540. __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err);
  541. __put_user_error(extra_datap, &extra->datap, err);
  542. __put_user_error(extra_size, &extra->size, err);
  543. /* Add the terminator */
  544. __put_user_error(0, &end->magic, err);
  545. __put_user_error(0, &end->size, err);
  546. }
  547. /* set the "end" magic */
  548. if (err == 0) {
  549. struct _aarch64_ctx __user *end =
  550. apply_user_offset(user, user->end_offset);
  551. __put_user_error(0, &end->magic, err);
  552. __put_user_error(0, &end->size, err);
  553. }
  554. return err;
  555. }
  556. static int get_sigframe(struct rt_sigframe_user_layout *user,
  557. struct ksignal *ksig, struct pt_regs *regs)
  558. {
  559. unsigned long sp, sp_top;
  560. int err;
  561. init_user_layout(user);
  562. err = setup_sigframe_layout(user, false);
  563. if (err)
  564. return err;
  565. sp = sp_top = sigsp(regs->sp, ksig);
  566. sp = round_down(sp - sizeof(struct frame_record), 16);
  567. user->next_frame = (struct frame_record __user *)sp;
  568. sp = round_down(sp, 16) - sigframe_size(user);
  569. user->sigframe = (struct rt_sigframe __user *)sp;
  570. /*
  571. * Check that we can actually write to the signal frame.
  572. */
  573. if (!access_ok(VERIFY_WRITE, user->sigframe, sp_top - sp))
  574. return -EFAULT;
  575. return 0;
  576. }
  577. static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
  578. struct rt_sigframe_user_layout *user, int usig)
  579. {
  580. __sigrestore_t sigtramp;
  581. regs->regs[0] = usig;
  582. regs->sp = (unsigned long)user->sigframe;
  583. regs->regs[29] = (unsigned long)&user->next_frame->fp;
  584. regs->pc = (unsigned long)ka->sa.sa_handler;
  585. if (ka->sa.sa_flags & SA_RESTORER)
  586. sigtramp = ka->sa.sa_restorer;
  587. else
  588. sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
  589. regs->regs[30] = (unsigned long)sigtramp;
  590. }
  591. static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
  592. struct pt_regs *regs)
  593. {
  594. struct rt_sigframe_user_layout user;
  595. struct rt_sigframe __user *frame;
  596. int err = 0;
  597. fpsimd_signal_preserve_current_state();
  598. if (get_sigframe(&user, ksig, regs))
  599. return 1;
  600. frame = user.sigframe;
  601. __put_user_error(0, &frame->uc.uc_flags, err);
  602. __put_user_error(NULL, &frame->uc.uc_link, err);
  603. err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
  604. err |= setup_sigframe(&user, regs, set);
  605. if (err == 0) {
  606. setup_return(regs, &ksig->ka, &user, usig);
  607. if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
  608. err |= copy_siginfo_to_user(&frame->info, &ksig->info);
  609. regs->regs[1] = (unsigned long)&frame->info;
  610. regs->regs[2] = (unsigned long)&frame->uc;
  611. }
  612. }
  613. return err;
  614. }
  615. static void setup_restart_syscall(struct pt_regs *regs)
  616. {
  617. if (is_compat_task())
  618. compat_setup_restart_syscall(regs);
  619. else
  620. regs->regs[8] = __NR_restart_syscall;
  621. }
  622. /*
  623. * OK, we're invoking a handler
  624. */
  625. static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
  626. {
  627. struct task_struct *tsk = current;
  628. sigset_t *oldset = sigmask_to_save();
  629. int usig = ksig->sig;
  630. int ret;
  631. rseq_signal_deliver(ksig, regs);
  632. /*
  633. * Set up the stack frame
  634. */
  635. if (is_compat_task()) {
  636. if (ksig->ka.sa.sa_flags & SA_SIGINFO)
  637. ret = compat_setup_rt_frame(usig, ksig, oldset, regs);
  638. else
  639. ret = compat_setup_frame(usig, ksig, oldset, regs);
  640. } else {
  641. ret = setup_rt_frame(usig, ksig, oldset, regs);
  642. }
  643. /*
  644. * Check that the resulting registers are actually sane.
  645. */
  646. ret |= !valid_user_regs(&regs->user_regs, current);
  647. /*
  648. * Fast forward the stepping logic so we step into the signal
  649. * handler.
  650. */
  651. if (!ret)
  652. user_fastforward_single_step(tsk);
  653. signal_setup_done(ret, ksig, 0);
  654. }
  655. /*
  656. * Note that 'init' is a special process: it doesn't get signals it doesn't
  657. * want to handle. Thus you cannot kill init even with a SIGKILL even by
  658. * mistake.
  659. *
  660. * Note that we go through the signals twice: once to check the signals that
  661. * the kernel can handle, and then we build all the user-level signal handling
  662. * stack-frames in one go after that.
  663. */
  664. static void do_signal(struct pt_regs *regs)
  665. {
  666. unsigned long continue_addr = 0, restart_addr = 0;
  667. int retval = 0;
  668. struct ksignal ksig;
  669. bool syscall = in_syscall(regs);
  670. /*
  671. * If we were from a system call, check for system call restarting...
  672. */
  673. if (syscall) {
  674. continue_addr = regs->pc;
  675. restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
  676. retval = regs->regs[0];
  677. /*
  678. * Avoid additional syscall restarting via ret_to_user.
  679. */
  680. forget_syscall(regs);
  681. /*
  682. * Prepare for system call restart. We do this here so that a
  683. * debugger will see the already changed PC.
  684. */
  685. switch (retval) {
  686. case -ERESTARTNOHAND:
  687. case -ERESTARTSYS:
  688. case -ERESTARTNOINTR:
  689. case -ERESTART_RESTARTBLOCK:
  690. regs->regs[0] = regs->orig_x0;
  691. regs->pc = restart_addr;
  692. break;
  693. }
  694. }
  695. /*
  696. * Get the signal to deliver. When running under ptrace, at this point
  697. * the debugger may change all of our registers.
  698. */
  699. if (get_signal(&ksig)) {
  700. /*
  701. * Depending on the signal settings, we may need to revert the
  702. * decision to restart the system call, but skip this if a
  703. * debugger has chosen to restart at a different PC.
  704. */
  705. if (regs->pc == restart_addr &&
  706. (retval == -ERESTARTNOHAND ||
  707. retval == -ERESTART_RESTARTBLOCK ||
  708. (retval == -ERESTARTSYS &&
  709. !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
  710. regs->regs[0] = -EINTR;
  711. regs->pc = continue_addr;
  712. }
  713. handle_signal(&ksig, regs);
  714. return;
  715. }
  716. /*
  717. * Handle restarting a different system call. As above, if a debugger
  718. * has chosen to restart at a different PC, ignore the restart.
  719. */
  720. if (syscall && regs->pc == restart_addr) {
  721. if (retval == -ERESTART_RESTARTBLOCK)
  722. setup_restart_syscall(regs);
  723. user_rewind_single_step(current);
  724. }
  725. restore_saved_sigmask();
  726. }
  727. asmlinkage void do_notify_resume(struct pt_regs *regs,
  728. unsigned long thread_flags)
  729. {
  730. /*
  731. * The assembly code enters us with IRQs off, but it hasn't
  732. * informed the tracing code of that for efficiency reasons.
  733. * Update the trace code with the current status.
  734. */
  735. trace_hardirqs_off();
  736. do {
  737. /* Check valid user FS if needed */
  738. addr_limit_user_check();
  739. if (thread_flags & _TIF_NEED_RESCHED) {
  740. /* Unmask Debug and SError for the next task */
  741. local_daif_restore(DAIF_PROCCTX_NOIRQ);
  742. schedule();
  743. } else {
  744. local_daif_restore(DAIF_PROCCTX);
  745. if (thread_flags & _TIF_UPROBE)
  746. uprobe_notify_resume(regs);
  747. if (thread_flags & _TIF_SIGPENDING)
  748. do_signal(regs);
  749. if (thread_flags & _TIF_NOTIFY_RESUME) {
  750. clear_thread_flag(TIF_NOTIFY_RESUME);
  751. tracehook_notify_resume(regs);
  752. rseq_handle_notify_resume(NULL, regs);
  753. }
  754. if (thread_flags & _TIF_FOREIGN_FPSTATE)
  755. fpsimd_restore_current_state();
  756. }
  757. local_daif_mask();
  758. thread_flags = READ_ONCE(current_thread_info()->flags);
  759. } while (thread_flags & _TIF_WORK_MASK);
  760. }
  761. unsigned long __ro_after_init signal_minsigstksz;
  762. /*
  763. * Determine the stack space required for guaranteed signal devliery.
  764. * This function is used to populate AT_MINSIGSTKSZ at process startup.
  765. * cpufeatures setup is assumed to be complete.
  766. */
  767. void __init minsigstksz_setup(void)
  768. {
  769. struct rt_sigframe_user_layout user;
  770. init_user_layout(&user);
  771. /*
  772. * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't
  773. * be big enough, but it's our best guess:
  774. */
  775. if (WARN_ON(setup_sigframe_layout(&user, true)))
  776. return;
  777. signal_minsigstksz = sigframe_size(&user) +
  778. round_up(sizeof(struct frame_record), 16) +
  779. 16; /* max alignment padding */
  780. }