signal.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1991, 1992 Linus Torvalds
  7. * Copyright (C) 1994 - 2000 Ralf Baechle
  8. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  9. * Copyright (C) 2014, Imagination Technologies Ltd.
  10. */
  11. #include <linux/cache.h>
  12. #include <linux/context_tracking.h>
  13. #include <linux/irqflags.h>
  14. #include <linux/sched.h>
  15. #include <linux/mm.h>
  16. #include <linux/personality.h>
  17. #include <linux/smp.h>
  18. #include <linux/kernel.h>
  19. #include <linux/signal.h>
  20. #include <linux/errno.h>
  21. #include <linux/wait.h>
  22. #include <linux/ptrace.h>
  23. #include <linux/unistd.h>
  24. #include <linux/compiler.h>
  25. #include <linux/syscalls.h>
  26. #include <linux/uaccess.h>
  27. #include <linux/tracehook.h>
  28. #include <asm/abi.h>
  29. #include <asm/asm.h>
  30. #include <linux/bitops.h>
  31. #include <asm/cacheflush.h>
  32. #include <asm/fpu.h>
  33. #include <asm/sim.h>
  34. #include <asm/ucontext.h>
  35. #include <asm/cpu-features.h>
  36. #include <asm/war.h>
  37. #include <asm/vdso.h>
  38. #include <asm/dsp.h>
  39. #include <asm/inst.h>
  40. #include "signal-common.h"
  41. static int (*save_fp_context)(void __user *sc);
  42. static int (*restore_fp_context)(void __user *sc);
  43. struct sigframe {
  44. u32 sf_ass[4]; /* argument save space for o32 */
  45. u32 sf_pad[2]; /* Was: signal trampoline */
  46. struct sigcontext sf_sc;
  47. sigset_t sf_mask;
  48. };
  49. struct rt_sigframe {
  50. u32 rs_ass[4]; /* argument save space for o32 */
  51. u32 rs_pad[2]; /* Was: signal trampoline */
  52. struct siginfo rs_info;
  53. struct ucontext rs_uc;
  54. };
  55. /*
  56. * Thread saved context copy to/from a signal context presumed to be on the
  57. * user stack, and therefore accessed with appropriate macros from uaccess.h.
  58. */
  59. static int copy_fp_to_sigcontext(void __user *sc)
  60. {
  61. struct mips_abi *abi = current->thread.abi;
  62. uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
  63. uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
  64. int i;
  65. int err = 0;
  66. for (i = 0; i < NUM_FPU_REGS; i++) {
  67. err |=
  68. __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
  69. &fpregs[i]);
  70. }
  71. err |= __put_user(current->thread.fpu.fcr31, csr);
  72. return err;
  73. }
  74. static int copy_fp_from_sigcontext(void __user *sc)
  75. {
  76. struct mips_abi *abi = current->thread.abi;
  77. uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
  78. uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
  79. int i;
  80. int err = 0;
  81. u64 fpr_val;
  82. for (i = 0; i < NUM_FPU_REGS; i++) {
  83. err |= __get_user(fpr_val, &fpregs[i]);
  84. set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
  85. }
  86. err |= __get_user(current->thread.fpu.fcr31, csr);
  87. return err;
  88. }
  89. /*
  90. * Wrappers for the assembly _{save,restore}_fp_context functions.
  91. */
  92. static int save_hw_fp_context(void __user *sc)
  93. {
  94. struct mips_abi *abi = current->thread.abi;
  95. uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
  96. uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
  97. return _save_fp_context(fpregs, csr);
  98. }
  99. static int restore_hw_fp_context(void __user *sc)
  100. {
  101. struct mips_abi *abi = current->thread.abi;
  102. uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
  103. uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
  104. return _restore_fp_context(fpregs, csr);
  105. }
  106. /*
  107. * Helper routines
  108. */
  109. static int protected_save_fp_context(void __user *sc)
  110. {
  111. struct mips_abi *abi = current->thread.abi;
  112. uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
  113. uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
  114. int err;
  115. /*
  116. * EVA does not have userland equivalents of ldc1 or sdc1, so
  117. * save to the kernel FP context & copy that to userland below.
  118. */
  119. if (config_enabled(CONFIG_EVA))
  120. lose_fpu(1);
  121. while (1) {
  122. lock_fpu_owner();
  123. if (is_fpu_owner()) {
  124. err = save_fp_context(sc);
  125. unlock_fpu_owner();
  126. } else {
  127. unlock_fpu_owner();
  128. err = copy_fp_to_sigcontext(sc);
  129. }
  130. if (likely(!err))
  131. break;
  132. /* touch the sigcontext and try again */
  133. err = __put_user(0, &fpregs[0]) |
  134. __put_user(0, &fpregs[31]) |
  135. __put_user(0, csr);
  136. if (err)
  137. break; /* really bad sigcontext */
  138. }
  139. return err;
  140. }
  141. static int protected_restore_fp_context(void __user *sc)
  142. {
  143. struct mips_abi *abi = current->thread.abi;
  144. uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
  145. uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
  146. int err, tmp __maybe_unused;
  147. /*
  148. * EVA does not have userland equivalents of ldc1 or sdc1, so we
  149. * disable the FPU here such that the code below simply copies to
  150. * the kernel FP context.
  151. */
  152. if (config_enabled(CONFIG_EVA))
  153. lose_fpu(0);
  154. while (1) {
  155. lock_fpu_owner();
  156. if (is_fpu_owner()) {
  157. err = restore_fp_context(sc);
  158. unlock_fpu_owner();
  159. } else {
  160. unlock_fpu_owner();
  161. err = copy_fp_from_sigcontext(sc);
  162. }
  163. if (likely(!err))
  164. break;
  165. /* touch the sigcontext and try again */
  166. err = __get_user(tmp, &fpregs[0]) |
  167. __get_user(tmp, &fpregs[31]) |
  168. __get_user(tmp, csr);
  169. if (err)
  170. break; /* really bad sigcontext */
  171. }
  172. return err;
  173. }
  174. int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
  175. {
  176. int err = 0;
  177. int i;
  178. unsigned int used_math;
  179. err |= __put_user(regs->cp0_epc, &sc->sc_pc);
  180. err |= __put_user(0, &sc->sc_regs[0]);
  181. for (i = 1; i < 32; i++)
  182. err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
  183. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  184. err |= __put_user(regs->acx, &sc->sc_acx);
  185. #endif
  186. err |= __put_user(regs->hi, &sc->sc_mdhi);
  187. err |= __put_user(regs->lo, &sc->sc_mdlo);
  188. if (cpu_has_dsp) {
  189. err |= __put_user(mfhi1(), &sc->sc_hi1);
  190. err |= __put_user(mflo1(), &sc->sc_lo1);
  191. err |= __put_user(mfhi2(), &sc->sc_hi2);
  192. err |= __put_user(mflo2(), &sc->sc_lo2);
  193. err |= __put_user(mfhi3(), &sc->sc_hi3);
  194. err |= __put_user(mflo3(), &sc->sc_lo3);
  195. err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
  196. }
  197. used_math = !!used_math();
  198. err |= __put_user(used_math, &sc->sc_used_math);
  199. if (used_math) {
  200. /*
  201. * Save FPU state to signal context. Signal handler
  202. * will "inherit" current FPU state.
  203. */
  204. err |= protected_save_fp_context(sc);
  205. }
  206. return err;
  207. }
  208. int fpcsr_pending(unsigned int __user *fpcsr)
  209. {
  210. int err, sig = 0;
  211. unsigned int csr, enabled;
  212. err = __get_user(csr, fpcsr);
  213. enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5);
  214. /*
  215. * If the signal handler set some FPU exceptions, clear it and
  216. * send SIGFPE.
  217. */
  218. if (csr & enabled) {
  219. csr &= ~enabled;
  220. err |= __put_user(csr, fpcsr);
  221. sig = SIGFPE;
  222. }
  223. return err ?: sig;
  224. }
  225. static int
  226. check_and_restore_fp_context(void __user *sc)
  227. {
  228. struct mips_abi *abi = current->thread.abi;
  229. int err, sig;
  230. err = sig = fpcsr_pending(sc + abi->off_sc_fpc_csr);
  231. if (err > 0)
  232. err = 0;
  233. err |= protected_restore_fp_context(sc);
  234. return err ?: sig;
  235. }
  236. int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
  237. {
  238. unsigned int used_math;
  239. unsigned long treg;
  240. int err = 0;
  241. int i;
  242. /* Always make any pending restarted system calls return -EINTR */
  243. current->restart_block.fn = do_no_restart_syscall;
  244. err |= __get_user(regs->cp0_epc, &sc->sc_pc);
  245. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  246. err |= __get_user(regs->acx, &sc->sc_acx);
  247. #endif
  248. err |= __get_user(regs->hi, &sc->sc_mdhi);
  249. err |= __get_user(regs->lo, &sc->sc_mdlo);
  250. if (cpu_has_dsp) {
  251. err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
  252. err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
  253. err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
  254. err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
  255. err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
  256. err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
  257. err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
  258. }
  259. for (i = 1; i < 32; i++)
  260. err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
  261. err |= __get_user(used_math, &sc->sc_used_math);
  262. conditional_used_math(used_math);
  263. if (used_math) {
  264. /* restore fpu context if we have used it before */
  265. if (!err)
  266. err = check_and_restore_fp_context(sc);
  267. } else {
  268. /* signal handler may have used FPU. Give it up. */
  269. lose_fpu(0);
  270. }
  271. return err;
  272. }
  273. void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
  274. size_t frame_size)
  275. {
  276. unsigned long sp;
  277. /* Default to using normal stack */
  278. sp = regs->regs[29];
  279. /*
  280. * FPU emulator may have it's own trampoline active just
  281. * above the user stack, 16-bytes before the next lowest
  282. * 16 byte boundary. Try to avoid trashing it.
  283. */
  284. sp -= 32;
  285. sp = sigsp(sp, ksig);
  286. return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK));
  287. }
  288. /*
  289. * Atomically swap in the new signal mask, and wait for a signal.
  290. */
  291. #ifdef CONFIG_TRAD_SIGNALS
  292. SYSCALL_DEFINE1(sigsuspend, sigset_t __user *, uset)
  293. {
  294. return sys_rt_sigsuspend(uset, sizeof(sigset_t));
  295. }
  296. #endif
  297. #ifdef CONFIG_TRAD_SIGNALS
  298. SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act,
  299. struct sigaction __user *, oact)
  300. {
  301. struct k_sigaction new_ka, old_ka;
  302. int ret;
  303. int err = 0;
  304. if (act) {
  305. old_sigset_t mask;
  306. if (!access_ok(VERIFY_READ, act, sizeof(*act)))
  307. return -EFAULT;
  308. err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler);
  309. err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
  310. err |= __get_user(mask, &act->sa_mask.sig[0]);
  311. if (err)
  312. return -EFAULT;
  313. siginitset(&new_ka.sa.sa_mask, mask);
  314. }
  315. ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
  316. if (!ret && oact) {
  317. if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
  318. return -EFAULT;
  319. err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
  320. err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler);
  321. err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
  322. err |= __put_user(0, &oact->sa_mask.sig[1]);
  323. err |= __put_user(0, &oact->sa_mask.sig[2]);
  324. err |= __put_user(0, &oact->sa_mask.sig[3]);
  325. if (err)
  326. return -EFAULT;
  327. }
  328. return ret;
  329. }
  330. #endif
  331. #ifdef CONFIG_TRAD_SIGNALS
  332. asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
  333. {
  334. struct sigframe __user *frame;
  335. sigset_t blocked;
  336. int sig;
  337. frame = (struct sigframe __user *) regs.regs[29];
  338. if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
  339. goto badframe;
  340. if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
  341. goto badframe;
  342. set_current_blocked(&blocked);
  343. sig = restore_sigcontext(&regs, &frame->sf_sc);
  344. if (sig < 0)
  345. goto badframe;
  346. else if (sig)
  347. force_sig(sig, current);
  348. /*
  349. * Don't let your children do this ...
  350. */
  351. __asm__ __volatile__(
  352. "move\t$29, %0\n\t"
  353. "j\tsyscall_exit"
  354. :/* no outputs */
  355. :"r" (&regs));
  356. /* Unreached */
  357. badframe:
  358. force_sig(SIGSEGV, current);
  359. }
  360. #endif /* CONFIG_TRAD_SIGNALS */
  361. asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
  362. {
  363. struct rt_sigframe __user *frame;
  364. sigset_t set;
  365. int sig;
  366. frame = (struct rt_sigframe __user *) regs.regs[29];
  367. if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
  368. goto badframe;
  369. if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
  370. goto badframe;
  371. set_current_blocked(&set);
  372. sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
  373. if (sig < 0)
  374. goto badframe;
  375. else if (sig)
  376. force_sig(sig, current);
  377. if (restore_altstack(&frame->rs_uc.uc_stack))
  378. goto badframe;
  379. /*
  380. * Don't let your children do this ...
  381. */
  382. __asm__ __volatile__(
  383. "move\t$29, %0\n\t"
  384. "j\tsyscall_exit"
  385. :/* no outputs */
  386. :"r" (&regs));
  387. /* Unreached */
  388. badframe:
  389. force_sig(SIGSEGV, current);
  390. }
  391. #ifdef CONFIG_TRAD_SIGNALS
  392. static int setup_frame(void *sig_return, struct ksignal *ksig,
  393. struct pt_regs *regs, sigset_t *set)
  394. {
  395. struct sigframe __user *frame;
  396. int err = 0;
  397. frame = get_sigframe(ksig, regs, sizeof(*frame));
  398. if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
  399. return -EFAULT;
  400. err |= setup_sigcontext(regs, &frame->sf_sc);
  401. err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set));
  402. if (err)
  403. return -EFAULT;
  404. /*
  405. * Arguments to signal handler:
  406. *
  407. * a0 = signal number
  408. * a1 = 0 (should be cause)
  409. * a2 = pointer to struct sigcontext
  410. *
  411. * $25 and c0_epc point to the signal handler, $29 points to the
  412. * struct sigframe.
  413. */
  414. regs->regs[ 4] = ksig->sig;
  415. regs->regs[ 5] = 0;
  416. regs->regs[ 6] = (unsigned long) &frame->sf_sc;
  417. regs->regs[29] = (unsigned long) frame;
  418. regs->regs[31] = (unsigned long) sig_return;
  419. regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
  420. DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
  421. current->comm, current->pid,
  422. frame, regs->cp0_epc, regs->regs[31]);
  423. return 0;
  424. }
  425. #endif
  426. static int setup_rt_frame(void *sig_return, struct ksignal *ksig,
  427. struct pt_regs *regs, sigset_t *set)
  428. {
  429. struct rt_sigframe __user *frame;
  430. int err = 0;
  431. frame = get_sigframe(ksig, regs, sizeof(*frame));
  432. if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
  433. return -EFAULT;
  434. /* Create siginfo. */
  435. err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info);
  436. /* Create the ucontext. */
  437. err |= __put_user(0, &frame->rs_uc.uc_flags);
  438. err |= __put_user(NULL, &frame->rs_uc.uc_link);
  439. err |= __save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
  440. err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext);
  441. err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set));
  442. if (err)
  443. return -EFAULT;
  444. /*
  445. * Arguments to signal handler:
  446. *
  447. * a0 = signal number
  448. * a1 = 0 (should be cause)
  449. * a2 = pointer to ucontext
  450. *
  451. * $25 and c0_epc point to the signal handler, $29 points to
  452. * the struct rt_sigframe.
  453. */
  454. regs->regs[ 4] = ksig->sig;
  455. regs->regs[ 5] = (unsigned long) &frame->rs_info;
  456. regs->regs[ 6] = (unsigned long) &frame->rs_uc;
  457. regs->regs[29] = (unsigned long) frame;
  458. regs->regs[31] = (unsigned long) sig_return;
  459. regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
  460. DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
  461. current->comm, current->pid,
  462. frame, regs->cp0_epc, regs->regs[31]);
  463. return 0;
  464. }
  465. struct mips_abi mips_abi = {
  466. #ifdef CONFIG_TRAD_SIGNALS
  467. .setup_frame = setup_frame,
  468. .signal_return_offset = offsetof(struct mips_vdso, signal_trampoline),
  469. #endif
  470. .setup_rt_frame = setup_rt_frame,
  471. .rt_signal_return_offset =
  472. offsetof(struct mips_vdso, rt_signal_trampoline),
  473. .restart = __NR_restart_syscall,
  474. .off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs),
  475. .off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr),
  476. .off_sc_used_math = offsetof(struct sigcontext, sc_used_math),
  477. };
  478. static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
  479. {
  480. sigset_t *oldset = sigmask_to_save();
  481. int ret;
  482. struct mips_abi *abi = current->thread.abi;
  483. #ifdef CONFIG_CPU_MICROMIPS
  484. void *vdso;
  485. unsigned long tmp = (unsigned long)current->mm->context.vdso;
  486. set_isa16_mode(tmp);
  487. vdso = (void *)tmp;
  488. #else
  489. void *vdso = current->mm->context.vdso;
  490. #endif
  491. if (regs->regs[0]) {
  492. switch(regs->regs[2]) {
  493. case ERESTART_RESTARTBLOCK:
  494. case ERESTARTNOHAND:
  495. regs->regs[2] = EINTR;
  496. break;
  497. case ERESTARTSYS:
  498. if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
  499. regs->regs[2] = EINTR;
  500. break;
  501. }
  502. /* fallthrough */
  503. case ERESTARTNOINTR:
  504. regs->regs[7] = regs->regs[26];
  505. regs->regs[2] = regs->regs[0];
  506. regs->cp0_epc -= 4;
  507. }
  508. regs->regs[0] = 0; /* Don't deal with this again. */
  509. }
  510. if (sig_uses_siginfo(&ksig->ka))
  511. ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset,
  512. ksig, regs, oldset);
  513. else
  514. ret = abi->setup_frame(vdso + abi->signal_return_offset, ksig,
  515. regs, oldset);
  516. signal_setup_done(ret, ksig, 0);
  517. }
  518. static void do_signal(struct pt_regs *regs)
  519. {
  520. struct ksignal ksig;
  521. if (get_signal(&ksig)) {
  522. /* Whee! Actually deliver the signal. */
  523. handle_signal(&ksig, regs);
  524. return;
  525. }
  526. if (regs->regs[0]) {
  527. switch (regs->regs[2]) {
  528. case ERESTARTNOHAND:
  529. case ERESTARTSYS:
  530. case ERESTARTNOINTR:
  531. regs->regs[2] = regs->regs[0];
  532. regs->regs[7] = regs->regs[26];
  533. regs->cp0_epc -= 4;
  534. break;
  535. case ERESTART_RESTARTBLOCK:
  536. regs->regs[2] = current->thread.abi->restart;
  537. regs->regs[7] = regs->regs[26];
  538. regs->cp0_epc -= 4;
  539. break;
  540. }
  541. regs->regs[0] = 0; /* Don't deal with this again. */
  542. }
  543. /*
  544. * If there's no signal to deliver, we just put the saved sigmask
  545. * back
  546. */
  547. restore_saved_sigmask();
  548. }
  549. /*
  550. * notification of userspace execution resumption
  551. * - triggered by the TIF_WORK_MASK flags
  552. */
  553. asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
  554. __u32 thread_info_flags)
  555. {
  556. local_irq_enable();
  557. user_exit();
  558. /* deal with pending signal delivery */
  559. if (thread_info_flags & _TIF_SIGPENDING)
  560. do_signal(regs);
  561. if (thread_info_flags & _TIF_NOTIFY_RESUME) {
  562. clear_thread_flag(TIF_NOTIFY_RESUME);
  563. tracehook_notify_resume(regs);
  564. }
  565. user_enter();
  566. }
  567. #ifdef CONFIG_SMP
  568. static int smp_save_fp_context(void __user *sc)
  569. {
  570. return raw_cpu_has_fpu
  571. ? save_hw_fp_context(sc)
  572. : copy_fp_to_sigcontext(sc);
  573. }
  574. static int smp_restore_fp_context(void __user *sc)
  575. {
  576. return raw_cpu_has_fpu
  577. ? restore_hw_fp_context(sc)
  578. : copy_fp_from_sigcontext(sc);
  579. }
  580. #endif
  581. static int signal_setup(void)
  582. {
  583. #ifdef CONFIG_SMP
  584. /* For now just do the cpu_has_fpu check when the functions are invoked */
  585. save_fp_context = smp_save_fp_context;
  586. restore_fp_context = smp_restore_fp_context;
  587. #else
  588. if (cpu_has_fpu) {
  589. save_fp_context = save_hw_fp_context;
  590. restore_fp_context = restore_hw_fp_context;
  591. } else {
  592. save_fp_context = copy_fp_to_sigcontext;
  593. restore_fp_context = copy_fp_from_sigcontext;
  594. }
  595. #endif /* CONFIG_SMP */
  596. return 0;
  597. }
  598. arch_initcall(signal_setup);