signal.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 1991, 1992 Linus Torvalds
  4. * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
  5. *
  6. * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
  7. * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
  8. * 2000-2002 x86-64 support by Andi Kleen
  9. */
  10. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11. #include <linux/sched.h>
  12. #include <linux/sched/task_stack.h>
  13. #include <linux/mm.h>
  14. #include <linux/smp.h>
  15. #include <linux/kernel.h>
  16. #include <linux/errno.h>
  17. #include <linux/wait.h>
  18. #include <linux/tracehook.h>
  19. #include <linux/unistd.h>
  20. #include <linux/stddef.h>
  21. #include <linux/personality.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/user-return-notifier.h>
  24. #include <linux/uprobes.h>
  25. #include <linux/context_tracking.h>
  26. #include <linux/syscalls.h>
  27. #include <asm/processor.h>
  28. #include <asm/ucontext.h>
  29. #include <asm/fpu/internal.h>
  30. #include <asm/fpu/signal.h>
  31. #include <asm/vdso.h>
  32. #include <asm/mce.h>
  33. #include <asm/sighandling.h>
  34. #include <asm/vm86.h>
  35. #ifdef CONFIG_X86_64
  36. #include <asm/proto.h>
  37. #include <asm/ia32_unistd.h>
  38. #endif /* CONFIG_X86_64 */
  39. #include <asm/syscall.h>
  40. #include <asm/syscalls.h>
  41. #include <asm/sigframe.h>
  42. #include <asm/signal.h>
  43. #define COPY(x) do { \
  44. get_user_ex(regs->x, &sc->x); \
  45. } while (0)
  46. #define GET_SEG(seg) ({ \
  47. unsigned short tmp; \
  48. get_user_ex(tmp, &sc->seg); \
  49. tmp; \
  50. })
  51. #define COPY_SEG(seg) do { \
  52. regs->seg = GET_SEG(seg); \
  53. } while (0)
  54. #define COPY_SEG_CPL3(seg) do { \
  55. regs->seg = GET_SEG(seg) | 3; \
  56. } while (0)
  57. #ifdef CONFIG_X86_64
  58. /*
  59. * If regs->ss will cause an IRET fault, change it. Otherwise leave it
  60. * alone. Using this generally makes no sense unless
  61. * user_64bit_mode(regs) would return true.
  62. */
  63. static void force_valid_ss(struct pt_regs *regs)
  64. {
  65. u32 ar;
  66. asm volatile ("lar %[old_ss], %[ar]\n\t"
  67. "jz 1f\n\t" /* If invalid: */
  68. "xorl %[ar], %[ar]\n\t" /* set ar = 0 */
  69. "1:"
  70. : [ar] "=r" (ar)
  71. : [old_ss] "rm" ((u16)regs->ss));
  72. /*
  73. * For a valid 64-bit user context, we need DPL 3, type
  74. * read-write data or read-write exp-down data, and S and P
  75. * set. We can't use VERW because VERW doesn't check the
  76. * P bit.
  77. */
  78. ar &= AR_DPL_MASK | AR_S | AR_P | AR_TYPE_MASK;
  79. if (ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA) &&
  80. ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA_EXPDOWN))
  81. regs->ss = __USER_DS;
  82. }
  83. #endif
  84. static int restore_sigcontext(struct pt_regs *regs,
  85. struct sigcontext __user *sc,
  86. unsigned long uc_flags)
  87. {
  88. unsigned long buf_val;
  89. void __user *buf;
  90. unsigned int tmpflags;
  91. unsigned int err = 0;
  92. /* Always make any pending restarted system calls return -EINTR */
  93. current->restart_block.fn = do_no_restart_syscall;
  94. get_user_try {
  95. #ifdef CONFIG_X86_32
  96. set_user_gs(regs, GET_SEG(gs));
  97. COPY_SEG(fs);
  98. COPY_SEG(es);
  99. COPY_SEG(ds);
  100. #endif /* CONFIG_X86_32 */
  101. COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
  102. COPY(dx); COPY(cx); COPY(ip); COPY(ax);
  103. #ifdef CONFIG_X86_64
  104. COPY(r8);
  105. COPY(r9);
  106. COPY(r10);
  107. COPY(r11);
  108. COPY(r12);
  109. COPY(r13);
  110. COPY(r14);
  111. COPY(r15);
  112. #endif /* CONFIG_X86_64 */
  113. COPY_SEG_CPL3(cs);
  114. COPY_SEG_CPL3(ss);
  115. #ifdef CONFIG_X86_64
  116. /*
  117. * Fix up SS if needed for the benefit of old DOSEMU and
  118. * CRIU.
  119. */
  120. if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) &&
  121. user_64bit_mode(regs)))
  122. force_valid_ss(regs);
  123. #endif
  124. get_user_ex(tmpflags, &sc->flags);
  125. regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
  126. regs->orig_ax = -1; /* disable syscall checks */
  127. get_user_ex(buf_val, &sc->fpstate);
  128. buf = (void __user *)buf_val;
  129. } get_user_catch(err);
  130. err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32));
  131. force_iret();
  132. return err;
  133. }
  134. int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
  135. struct pt_regs *regs, unsigned long mask)
  136. {
  137. int err = 0;
  138. put_user_try {
  139. #ifdef CONFIG_X86_32
  140. put_user_ex(get_user_gs(regs), (unsigned int __user *)&sc->gs);
  141. put_user_ex(regs->fs, (unsigned int __user *)&sc->fs);
  142. put_user_ex(regs->es, (unsigned int __user *)&sc->es);
  143. put_user_ex(regs->ds, (unsigned int __user *)&sc->ds);
  144. #endif /* CONFIG_X86_32 */
  145. put_user_ex(regs->di, &sc->di);
  146. put_user_ex(regs->si, &sc->si);
  147. put_user_ex(regs->bp, &sc->bp);
  148. put_user_ex(regs->sp, &sc->sp);
  149. put_user_ex(regs->bx, &sc->bx);
  150. put_user_ex(regs->dx, &sc->dx);
  151. put_user_ex(regs->cx, &sc->cx);
  152. put_user_ex(regs->ax, &sc->ax);
  153. #ifdef CONFIG_X86_64
  154. put_user_ex(regs->r8, &sc->r8);
  155. put_user_ex(regs->r9, &sc->r9);
  156. put_user_ex(regs->r10, &sc->r10);
  157. put_user_ex(regs->r11, &sc->r11);
  158. put_user_ex(regs->r12, &sc->r12);
  159. put_user_ex(regs->r13, &sc->r13);
  160. put_user_ex(regs->r14, &sc->r14);
  161. put_user_ex(regs->r15, &sc->r15);
  162. #endif /* CONFIG_X86_64 */
  163. put_user_ex(current->thread.trap_nr, &sc->trapno);
  164. put_user_ex(current->thread.error_code, &sc->err);
  165. put_user_ex(regs->ip, &sc->ip);
  166. #ifdef CONFIG_X86_32
  167. put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
  168. put_user_ex(regs->flags, &sc->flags);
  169. put_user_ex(regs->sp, &sc->sp_at_signal);
  170. put_user_ex(regs->ss, (unsigned int __user *)&sc->ss);
  171. #else /* !CONFIG_X86_32 */
  172. put_user_ex(regs->flags, &sc->flags);
  173. put_user_ex(regs->cs, &sc->cs);
  174. put_user_ex(0, &sc->gs);
  175. put_user_ex(0, &sc->fs);
  176. put_user_ex(regs->ss, &sc->ss);
  177. #endif /* CONFIG_X86_32 */
  178. put_user_ex(fpstate, &sc->fpstate);
  179. /* non-iBCS2 extensions.. */
  180. put_user_ex(mask, &sc->oldmask);
  181. put_user_ex(current->thread.cr2, &sc->cr2);
  182. } put_user_catch(err);
  183. return err;
  184. }
  185. /*
  186. * Set up a signal frame.
  187. */
  188. /*
  189. * Determine which stack to use..
  190. */
  191. static unsigned long align_sigframe(unsigned long sp)
  192. {
  193. #ifdef CONFIG_X86_32
  194. /*
  195. * Align the stack pointer according to the i386 ABI,
  196. * i.e. so that on function entry ((sp + 4) & 15) == 0.
  197. */
  198. sp = ((sp + 4) & -16ul) - 4;
  199. #else /* !CONFIG_X86_32 */
  200. sp = round_down(sp, 16) - 8;
  201. #endif
  202. return sp;
  203. }
  204. static void __user *
  205. get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
  206. void __user **fpstate)
  207. {
  208. /* Default to using normal stack */
  209. unsigned long math_size = 0;
  210. unsigned long sp = regs->sp;
  211. unsigned long buf_fx = 0;
  212. int onsigstack = on_sig_stack(sp);
  213. struct fpu *fpu = &current->thread.fpu;
  214. /* redzone */
  215. if (IS_ENABLED(CONFIG_X86_64))
  216. sp -= 128;
  217. /* This is the X/Open sanctioned signal stack switching. */
  218. if (ka->sa.sa_flags & SA_ONSTACK) {
  219. if (sas_ss_flags(sp) == 0)
  220. sp = current->sas_ss_sp + current->sas_ss_size;
  221. } else if (IS_ENABLED(CONFIG_X86_32) &&
  222. !onsigstack &&
  223. regs->ss != __USER_DS &&
  224. !(ka->sa.sa_flags & SA_RESTORER) &&
  225. ka->sa.sa_restorer) {
  226. /* This is the legacy signal stack switching. */
  227. sp = (unsigned long) ka->sa.sa_restorer;
  228. }
  229. if (fpu->initialized) {
  230. sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
  231. &buf_fx, &math_size);
  232. *fpstate = (void __user *)sp;
  233. }
  234. sp = align_sigframe(sp - frame_size);
  235. /*
  236. * If we are on the alternate signal stack and would overflow it, don't.
  237. * Return an always-bogus address instead so we will die with SIGSEGV.
  238. */
  239. if (onsigstack && !likely(on_sig_stack(sp)))
  240. return (void __user *)-1L;
  241. /* save i387 and extended state */
  242. if (fpu->initialized &&
  243. copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size) < 0)
  244. return (void __user *)-1L;
  245. return (void __user *)sp;
  246. }
  247. #ifdef CONFIG_X86_32
  248. static const struct {
  249. u16 poplmovl;
  250. u32 val;
  251. u16 int80;
  252. } __attribute__((packed)) retcode = {
  253. 0xb858, /* popl %eax; movl $..., %eax */
  254. __NR_sigreturn,
  255. 0x80cd, /* int $0x80 */
  256. };
  257. static const struct {
  258. u8 movl;
  259. u32 val;
  260. u16 int80;
  261. u8 pad;
  262. } __attribute__((packed)) rt_retcode = {
  263. 0xb8, /* movl $..., %eax */
  264. __NR_rt_sigreturn,
  265. 0x80cd, /* int $0x80 */
  266. 0
  267. };
  268. static int
  269. __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
  270. struct pt_regs *regs)
  271. {
  272. struct sigframe __user *frame;
  273. void __user *restorer;
  274. int err = 0;
  275. void __user *fpstate = NULL;
  276. frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fpstate);
  277. if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
  278. return -EFAULT;
  279. if (__put_user(sig, &frame->sig))
  280. return -EFAULT;
  281. if (setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0]))
  282. return -EFAULT;
  283. if (_NSIG_WORDS > 1) {
  284. if (__copy_to_user(&frame->extramask, &set->sig[1],
  285. sizeof(frame->extramask)))
  286. return -EFAULT;
  287. }
  288. if (current->mm->context.vdso)
  289. restorer = current->mm->context.vdso +
  290. vdso_image_32.sym___kernel_sigreturn;
  291. else
  292. restorer = &frame->retcode;
  293. if (ksig->ka.sa.sa_flags & SA_RESTORER)
  294. restorer = ksig->ka.sa.sa_restorer;
  295. /* Set up to return from userspace. */
  296. err |= __put_user(restorer, &frame->pretcode);
  297. /*
  298. * This is popl %eax ; movl $__NR_sigreturn, %eax ; int $0x80
  299. *
  300. * WE DO NOT USE IT ANY MORE! It's only left here for historical
  301. * reasons and because gdb uses it as a signature to notice
  302. * signal handler stack frames.
  303. */
  304. err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
  305. if (err)
  306. return -EFAULT;
  307. /* Set up registers for signal handler */
  308. regs->sp = (unsigned long)frame;
  309. regs->ip = (unsigned long)ksig->ka.sa.sa_handler;
  310. regs->ax = (unsigned long)sig;
  311. regs->dx = 0;
  312. regs->cx = 0;
  313. regs->ds = __USER_DS;
  314. regs->es = __USER_DS;
  315. regs->ss = __USER_DS;
  316. regs->cs = __USER_CS;
  317. return 0;
  318. }
  319. static int __setup_rt_frame(int sig, struct ksignal *ksig,
  320. sigset_t *set, struct pt_regs *regs)
  321. {
  322. struct rt_sigframe __user *frame;
  323. void __user *restorer;
  324. int err = 0;
  325. void __user *fpstate = NULL;
  326. frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fpstate);
  327. if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
  328. return -EFAULT;
  329. put_user_try {
  330. put_user_ex(sig, &frame->sig);
  331. put_user_ex(&frame->info, &frame->pinfo);
  332. put_user_ex(&frame->uc, &frame->puc);
  333. /* Create the ucontext. */
  334. if (boot_cpu_has(X86_FEATURE_XSAVE))
  335. put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
  336. else
  337. put_user_ex(0, &frame->uc.uc_flags);
  338. put_user_ex(0, &frame->uc.uc_link);
  339. save_altstack_ex(&frame->uc.uc_stack, regs->sp);
  340. /* Set up to return from userspace. */
  341. restorer = current->mm->context.vdso +
  342. vdso_image_32.sym___kernel_rt_sigreturn;
  343. if (ksig->ka.sa.sa_flags & SA_RESTORER)
  344. restorer = ksig->ka.sa.sa_restorer;
  345. put_user_ex(restorer, &frame->pretcode);
  346. /*
  347. * This is movl $__NR_rt_sigreturn, %ax ; int $0x80
  348. *
  349. * WE DO NOT USE IT ANY MORE! It's only left here for historical
  350. * reasons and because gdb uses it as a signature to notice
  351. * signal handler stack frames.
  352. */
  353. put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
  354. } put_user_catch(err);
  355. err |= copy_siginfo_to_user(&frame->info, &ksig->info);
  356. err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
  357. regs, set->sig[0]);
  358. err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
  359. if (err)
  360. return -EFAULT;
  361. /* Set up registers for signal handler */
  362. regs->sp = (unsigned long)frame;
  363. regs->ip = (unsigned long)ksig->ka.sa.sa_handler;
  364. regs->ax = (unsigned long)sig;
  365. regs->dx = (unsigned long)&frame->info;
  366. regs->cx = (unsigned long)&frame->uc;
  367. regs->ds = __USER_DS;
  368. regs->es = __USER_DS;
  369. regs->ss = __USER_DS;
  370. regs->cs = __USER_CS;
  371. return 0;
  372. }
  373. #else /* !CONFIG_X86_32 */
  374. static unsigned long frame_uc_flags(struct pt_regs *regs)
  375. {
  376. unsigned long flags;
  377. if (boot_cpu_has(X86_FEATURE_XSAVE))
  378. flags = UC_FP_XSTATE | UC_SIGCONTEXT_SS;
  379. else
  380. flags = UC_SIGCONTEXT_SS;
  381. if (likely(user_64bit_mode(regs)))
  382. flags |= UC_STRICT_RESTORE_SS;
  383. return flags;
  384. }
  385. static int __setup_rt_frame(int sig, struct ksignal *ksig,
  386. sigset_t *set, struct pt_regs *regs)
  387. {
  388. struct rt_sigframe __user *frame;
  389. void __user *fp = NULL;
  390. int err = 0;
  391. frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp);
  392. if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
  393. return -EFAULT;
  394. if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
  395. if (copy_siginfo_to_user(&frame->info, &ksig->info))
  396. return -EFAULT;
  397. }
  398. put_user_try {
  399. /* Create the ucontext. */
  400. put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
  401. put_user_ex(0, &frame->uc.uc_link);
  402. save_altstack_ex(&frame->uc.uc_stack, regs->sp);
  403. /* Set up to return from userspace. If provided, use a stub
  404. already in userspace. */
  405. /* x86-64 should always use SA_RESTORER. */
  406. if (ksig->ka.sa.sa_flags & SA_RESTORER) {
  407. put_user_ex(ksig->ka.sa.sa_restorer, &frame->pretcode);
  408. } else {
  409. /* could use a vstub here */
  410. err |= -EFAULT;
  411. }
  412. } put_user_catch(err);
  413. err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
  414. err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
  415. if (err)
  416. return -EFAULT;
  417. /* Set up registers for signal handler */
  418. regs->di = sig;
  419. /* In case the signal handler was declared without prototypes */
  420. regs->ax = 0;
  421. /* This also works for non SA_SIGINFO handlers because they expect the
  422. next argument after the signal number on the stack. */
  423. regs->si = (unsigned long)&frame->info;
  424. regs->dx = (unsigned long)&frame->uc;
  425. regs->ip = (unsigned long) ksig->ka.sa.sa_handler;
  426. regs->sp = (unsigned long)frame;
  427. /*
  428. * Set up the CS and SS registers to run signal handlers in
  429. * 64-bit mode, even if the handler happens to be interrupting
  430. * 32-bit or 16-bit code.
  431. *
  432. * SS is subtle. In 64-bit mode, we don't need any particular
  433. * SS descriptor, but we do need SS to be valid. It's possible
  434. * that the old SS is entirely bogus -- this can happen if the
  435. * signal we're trying to deliver is #GP or #SS caused by a bad
  436. * SS value. We also have a compatbility issue here: DOSEMU
  437. * relies on the contents of the SS register indicating the
  438. * SS value at the time of the signal, even though that code in
  439. * DOSEMU predates sigreturn's ability to restore SS. (DOSEMU
  440. * avoids relying on sigreturn to restore SS; instead it uses
  441. * a trampoline.) So we do our best: if the old SS was valid,
  442. * we keep it. Otherwise we replace it.
  443. */
  444. regs->cs = __USER_CS;
  445. if (unlikely(regs->ss != __USER_DS))
  446. force_valid_ss(regs);
  447. return 0;
  448. }
  449. #endif /* CONFIG_X86_32 */
  450. static int x32_setup_rt_frame(struct ksignal *ksig,
  451. compat_sigset_t *set,
  452. struct pt_regs *regs)
  453. {
  454. #ifdef CONFIG_X86_X32_ABI
  455. struct rt_sigframe_x32 __user *frame;
  456. void __user *restorer;
  457. int err = 0;
  458. void __user *fpstate = NULL;
  459. frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fpstate);
  460. if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
  461. return -EFAULT;
  462. if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
  463. if (__copy_siginfo_to_user32(&frame->info, &ksig->info, true))
  464. return -EFAULT;
  465. }
  466. put_user_try {
  467. /* Create the ucontext. */
  468. put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
  469. put_user_ex(0, &frame->uc.uc_link);
  470. compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
  471. put_user_ex(0, &frame->uc.uc__pad0);
  472. if (ksig->ka.sa.sa_flags & SA_RESTORER) {
  473. restorer = ksig->ka.sa.sa_restorer;
  474. } else {
  475. /* could use a vstub here */
  476. restorer = NULL;
  477. err |= -EFAULT;
  478. }
  479. put_user_ex(restorer, &frame->pretcode);
  480. } put_user_catch(err);
  481. err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
  482. regs, set->sig[0]);
  483. err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
  484. if (err)
  485. return -EFAULT;
  486. /* Set up registers for signal handler */
  487. regs->sp = (unsigned long) frame;
  488. regs->ip = (unsigned long) ksig->ka.sa.sa_handler;
  489. /* We use the x32 calling convention here... */
  490. regs->di = ksig->sig;
  491. regs->si = (unsigned long) &frame->info;
  492. regs->dx = (unsigned long) &frame->uc;
  493. loadsegment(ds, __USER_DS);
  494. loadsegment(es, __USER_DS);
  495. regs->cs = __USER_CS;
  496. regs->ss = __USER_DS;
  497. #endif /* CONFIG_X86_X32_ABI */
  498. return 0;
  499. }
  500. /*
  501. * Do a signal return; undo the signal stack.
  502. */
  503. #ifdef CONFIG_X86_32
  504. SYSCALL_DEFINE0(sigreturn)
  505. {
  506. struct pt_regs *regs = current_pt_regs();
  507. struct sigframe __user *frame;
  508. sigset_t set;
  509. frame = (struct sigframe __user *)(regs->sp - 8);
  510. if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
  511. goto badframe;
  512. if (__get_user(set.sig[0], &frame->sc.oldmask) || (_NSIG_WORDS > 1
  513. && __copy_from_user(&set.sig[1], &frame->extramask,
  514. sizeof(frame->extramask))))
  515. goto badframe;
  516. set_current_blocked(&set);
  517. /*
  518. * x86_32 has no uc_flags bits relevant to restore_sigcontext.
  519. * Save a few cycles by skipping the __get_user.
  520. */
  521. if (restore_sigcontext(regs, &frame->sc, 0))
  522. goto badframe;
  523. return regs->ax;
  524. badframe:
  525. signal_fault(regs, frame, "sigreturn");
  526. return 0;
  527. }
  528. #endif /* CONFIG_X86_32 */
  529. SYSCALL_DEFINE0(rt_sigreturn)
  530. {
  531. struct pt_regs *regs = current_pt_regs();
  532. struct rt_sigframe __user *frame;
  533. sigset_t set;
  534. unsigned long uc_flags;
  535. frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long));
  536. if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
  537. goto badframe;
  538. if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
  539. goto badframe;
  540. if (__get_user(uc_flags, &frame->uc.uc_flags))
  541. goto badframe;
  542. set_current_blocked(&set);
  543. if (restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags))
  544. goto badframe;
  545. if (restore_altstack(&frame->uc.uc_stack))
  546. goto badframe;
  547. return regs->ax;
  548. badframe:
  549. signal_fault(regs, frame, "rt_sigreturn");
  550. return 0;
  551. }
  552. static inline int is_ia32_compat_frame(struct ksignal *ksig)
  553. {
  554. return IS_ENABLED(CONFIG_IA32_EMULATION) &&
  555. ksig->ka.sa.sa_flags & SA_IA32_ABI;
  556. }
  557. static inline int is_ia32_frame(struct ksignal *ksig)
  558. {
  559. return IS_ENABLED(CONFIG_X86_32) || is_ia32_compat_frame(ksig);
  560. }
  561. static inline int is_x32_frame(struct ksignal *ksig)
  562. {
  563. return IS_ENABLED(CONFIG_X86_X32_ABI) &&
  564. ksig->ka.sa.sa_flags & SA_X32_ABI;
  565. }
  566. static int
  567. setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
  568. {
  569. int usig = ksig->sig;
  570. sigset_t *set = sigmask_to_save();
  571. compat_sigset_t *cset = (compat_sigset_t *) set;
  572. /*
  573. * Increment event counter and perform fixup for the pre-signal
  574. * frame.
  575. */
  576. rseq_signal_deliver(ksig, regs);
  577. /* Set up the stack frame */
  578. if (is_ia32_frame(ksig)) {
  579. if (ksig->ka.sa.sa_flags & SA_SIGINFO)
  580. return ia32_setup_rt_frame(usig, ksig, cset, regs);
  581. else
  582. return ia32_setup_frame(usig, ksig, cset, regs);
  583. } else if (is_x32_frame(ksig)) {
  584. return x32_setup_rt_frame(ksig, cset, regs);
  585. } else {
  586. return __setup_rt_frame(ksig->sig, ksig, set, regs);
  587. }
  588. }
  589. static void
  590. handle_signal(struct ksignal *ksig, struct pt_regs *regs)
  591. {
  592. bool stepping, failed;
  593. struct fpu *fpu = &current->thread.fpu;
  594. if (v8086_mode(regs))
  595. save_v86_state((struct kernel_vm86_regs *) regs, VM86_SIGNAL);
  596. /* Are we from a system call? */
  597. if (syscall_get_nr(current, regs) >= 0) {
  598. /* If so, check system call restarting.. */
  599. switch (syscall_get_error(current, regs)) {
  600. case -ERESTART_RESTARTBLOCK:
  601. case -ERESTARTNOHAND:
  602. regs->ax = -EINTR;
  603. break;
  604. case -ERESTARTSYS:
  605. if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
  606. regs->ax = -EINTR;
  607. break;
  608. }
  609. /* fallthrough */
  610. case -ERESTARTNOINTR:
  611. regs->ax = regs->orig_ax;
  612. regs->ip -= 2;
  613. break;
  614. }
  615. }
  616. /*
  617. * If TF is set due to a debugger (TIF_FORCED_TF), clear TF now
  618. * so that register information in the sigcontext is correct and
  619. * then notify the tracer before entering the signal handler.
  620. */
  621. stepping = test_thread_flag(TIF_SINGLESTEP);
  622. if (stepping)
  623. user_disable_single_step(current);
  624. failed = (setup_rt_frame(ksig, regs) < 0);
  625. if (!failed) {
  626. /*
  627. * Clear the direction flag as per the ABI for function entry.
  628. *
  629. * Clear RF when entering the signal handler, because
  630. * it might disable possible debug exception from the
  631. * signal handler.
  632. *
  633. * Clear TF for the case when it wasn't set by debugger to
  634. * avoid the recursive send_sigtrap() in SIGTRAP handler.
  635. */
  636. regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF);
  637. /*
  638. * Ensure the signal handler starts with the new fpu state.
  639. */
  640. if (fpu->initialized)
  641. fpu__clear(fpu);
  642. }
  643. signal_setup_done(failed, ksig, stepping);
  644. }
  645. static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
  646. {
  647. /*
  648. * This function is fundamentally broken as currently
  649. * implemented.
  650. *
  651. * The idea is that we want to trigger a call to the
  652. * restart_block() syscall and that we want in_ia32_syscall(),
  653. * in_x32_syscall(), etc. to match whatever they were in the
  654. * syscall being restarted. We assume that the syscall
  655. * instruction at (regs->ip - 2) matches whatever syscall
  656. * instruction we used to enter in the first place.
  657. *
  658. * The problem is that we can get here when ptrace pokes
  659. * syscall-like values into regs even if we're not in a syscall
  660. * at all.
  661. *
  662. * For now, we maintain historical behavior and guess based on
  663. * stored state. We could do better by saving the actual
  664. * syscall arch in restart_block or (with caveats on x32) by
  665. * checking if regs->ip points to 'int $0x80'. The current
  666. * behavior is incorrect if a tracer has a different bitness
  667. * than the tracee.
  668. */
  669. #ifdef CONFIG_IA32_EMULATION
  670. if (current_thread_info()->status & (TS_COMPAT|TS_I386_REGS_POKED))
  671. return __NR_ia32_restart_syscall;
  672. #endif
  673. #ifdef CONFIG_X86_X32_ABI
  674. return __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
  675. #else
  676. return __NR_restart_syscall;
  677. #endif
  678. }
  679. /*
  680. * Note that 'init' is a special process: it doesn't get signals it doesn't
  681. * want to handle. Thus you cannot kill init even with a SIGKILL even by
  682. * mistake.
  683. */
  684. void do_signal(struct pt_regs *regs)
  685. {
  686. struct ksignal ksig;
  687. if (get_signal(&ksig)) {
  688. /* Whee! Actually deliver the signal. */
  689. handle_signal(&ksig, regs);
  690. return;
  691. }
  692. /* Did we come from a system call? */
  693. if (syscall_get_nr(current, regs) >= 0) {
  694. /* Restart the system call - no handlers present */
  695. switch (syscall_get_error(current, regs)) {
  696. case -ERESTARTNOHAND:
  697. case -ERESTARTSYS:
  698. case -ERESTARTNOINTR:
  699. regs->ax = regs->orig_ax;
  700. regs->ip -= 2;
  701. break;
  702. case -ERESTART_RESTARTBLOCK:
  703. regs->ax = get_nr_restart_syscall(regs);
  704. regs->ip -= 2;
  705. break;
  706. }
  707. }
  708. /*
  709. * If there's no signal to deliver, we just put the saved sigmask
  710. * back.
  711. */
  712. restore_saved_sigmask();
  713. }
  714. void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
  715. {
  716. struct task_struct *me = current;
  717. if (show_unhandled_signals && printk_ratelimit()) {
  718. printk("%s"
  719. "%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx",
  720. task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
  721. me->comm, me->pid, where, frame,
  722. regs->ip, regs->sp, regs->orig_ax);
  723. print_vma_addr(KERN_CONT " in ", regs->ip);
  724. pr_cont("\n");
  725. }
  726. force_sig(SIGSEGV, me);
  727. }
  728. #ifdef CONFIG_X86_X32_ABI
  729. asmlinkage long sys32_x32_rt_sigreturn(void)
  730. {
  731. struct pt_regs *regs = current_pt_regs();
  732. struct rt_sigframe_x32 __user *frame;
  733. sigset_t set;
  734. unsigned long uc_flags;
  735. frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8);
  736. if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
  737. goto badframe;
  738. if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
  739. goto badframe;
  740. if (__get_user(uc_flags, &frame->uc.uc_flags))
  741. goto badframe;
  742. set_current_blocked(&set);
  743. if (restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags))
  744. goto badframe;
  745. if (compat_restore_altstack(&frame->uc.uc_stack))
  746. goto badframe;
  747. return regs->ax;
  748. badframe:
  749. signal_fault(regs, frame, "x32 rt_sigreturn");
  750. return 0;
  751. }
  752. #endif