signal.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1991, 1992 Linus Torvalds
  7. * Copyright (C) 1994 - 2000 Ralf Baechle
  8. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  9. * Copyright (C) 2014, Imagination Technologies Ltd.
  10. */
  11. #include <linux/cache.h>
  12. #include <linux/context_tracking.h>
  13. #include <linux/irqflags.h>
  14. #include <linux/sched.h>
  15. #include <linux/mm.h>
  16. #include <linux/personality.h>
  17. #include <linux/smp.h>
  18. #include <linux/kernel.h>
  19. #include <linux/signal.h>
  20. #include <linux/errno.h>
  21. #include <linux/wait.h>
  22. #include <linux/ptrace.h>
  23. #include <linux/unistd.h>
  24. #include <linux/compiler.h>
  25. #include <linux/syscalls.h>
  26. #include <linux/uaccess.h>
  27. #include <linux/tracehook.h>
  28. #include <asm/abi.h>
  29. #include <asm/asm.h>
  30. #include <linux/bitops.h>
  31. #include <asm/cacheflush.h>
  32. #include <asm/fpu.h>
  33. #include <asm/sim.h>
  34. #include <asm/ucontext.h>
  35. #include <asm/cpu-features.h>
  36. #include <asm/war.h>
  37. #include <asm/vdso.h>
  38. #include <asm/dsp.h>
  39. #include <asm/inst.h>
  40. #include "signal-common.h"
  41. static int (*save_fp_context)(void __user *sc);
  42. static int (*restore_fp_context)(void __user *sc);
  43. struct sigframe {
  44. u32 sf_ass[4]; /* argument save space for o32 */
  45. u32 sf_pad[2]; /* Was: signal trampoline */
  46. struct sigcontext sf_sc;
  47. sigset_t sf_mask;
  48. };
  49. struct rt_sigframe {
  50. u32 rs_ass[4]; /* argument save space for o32 */
  51. u32 rs_pad[2]; /* Was: signal trampoline */
  52. struct siginfo rs_info;
  53. struct ucontext rs_uc;
  54. };
  55. /*
  56. * Thread saved context copy to/from a signal context presumed to be on the
  57. * user stack, and therefore accessed with appropriate macros from uaccess.h.
  58. */
  59. static int copy_fp_to_sigcontext(void __user *sc)
  60. {
  61. struct mips_abi *abi = current->thread.abi;
  62. uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
  63. uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
  64. int i;
  65. int err = 0;
  66. int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
  67. for (i = 0; i < NUM_FPU_REGS; i += inc) {
  68. err |=
  69. __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
  70. &fpregs[i]);
  71. }
  72. err |= __put_user(current->thread.fpu.fcr31, csr);
  73. return err;
  74. }
  75. static int copy_fp_from_sigcontext(void __user *sc)
  76. {
  77. struct mips_abi *abi = current->thread.abi;
  78. uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
  79. uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
  80. int i;
  81. int err = 0;
  82. int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
  83. u64 fpr_val;
  84. for (i = 0; i < NUM_FPU_REGS; i += inc) {
  85. err |= __get_user(fpr_val, &fpregs[i]);
  86. set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
  87. }
  88. err |= __get_user(current->thread.fpu.fcr31, csr);
  89. return err;
  90. }
  91. /*
  92. * Wrappers for the assembly _{save,restore}_fp_context functions.
  93. */
  94. static int save_hw_fp_context(void __user *sc)
  95. {
  96. struct mips_abi *abi = current->thread.abi;
  97. uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
  98. uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
  99. return _save_fp_context(fpregs, csr);
  100. }
  101. static int restore_hw_fp_context(void __user *sc)
  102. {
  103. struct mips_abi *abi = current->thread.abi;
  104. uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
  105. uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
  106. return _restore_fp_context(fpregs, csr);
  107. }
  108. /*
  109. * Helper routines
  110. */
  111. int protected_save_fp_context(void __user *sc)
  112. {
  113. struct mips_abi *abi = current->thread.abi;
  114. uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
  115. uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
  116. uint32_t __user *used_math = sc + abi->off_sc_used_math;
  117. unsigned int used;
  118. int err;
  119. used = !!used_math();
  120. err = __put_user(used, used_math);
  121. if (err || !used)
  122. return err;
  123. /*
  124. * EVA does not have userland equivalents of ldc1 or sdc1, so
  125. * save to the kernel FP context & copy that to userland below.
  126. */
  127. if (config_enabled(CONFIG_EVA))
  128. lose_fpu(1);
  129. while (1) {
  130. lock_fpu_owner();
  131. if (is_fpu_owner()) {
  132. err = save_fp_context(sc);
  133. unlock_fpu_owner();
  134. } else {
  135. unlock_fpu_owner();
  136. err = copy_fp_to_sigcontext(sc);
  137. }
  138. if (likely(!err))
  139. break;
  140. /* touch the sigcontext and try again */
  141. err = __put_user(0, &fpregs[0]) |
  142. __put_user(0, &fpregs[31]) |
  143. __put_user(0, csr);
  144. if (err)
  145. break; /* really bad sigcontext */
  146. }
  147. return err;
  148. }
  149. int protected_restore_fp_context(void __user *sc)
  150. {
  151. struct mips_abi *abi = current->thread.abi;
  152. uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
  153. uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
  154. uint32_t __user *used_math = sc + abi->off_sc_used_math;
  155. unsigned int used;
  156. int err, sig, tmp __maybe_unused;
  157. err = __get_user(used, used_math);
  158. conditional_used_math(used);
  159. /*
  160. * The signal handler may have used FPU; give it up if the program
  161. * doesn't want it following sigreturn.
  162. */
  163. if (err || !used) {
  164. lose_fpu(0);
  165. return err;
  166. }
  167. err = sig = fpcsr_pending(csr);
  168. if (err < 0)
  169. return err;
  170. /*
  171. * EVA does not have userland equivalents of ldc1 or sdc1, so we
  172. * disable the FPU here such that the code below simply copies to
  173. * the kernel FP context.
  174. */
  175. if (config_enabled(CONFIG_EVA))
  176. lose_fpu(0);
  177. while (1) {
  178. lock_fpu_owner();
  179. if (is_fpu_owner()) {
  180. err = restore_fp_context(sc);
  181. unlock_fpu_owner();
  182. } else {
  183. unlock_fpu_owner();
  184. err = copy_fp_from_sigcontext(sc);
  185. }
  186. if (likely(!err))
  187. break;
  188. /* touch the sigcontext and try again */
  189. err = __get_user(tmp, &fpregs[0]) |
  190. __get_user(tmp, &fpregs[31]) |
  191. __get_user(tmp, csr);
  192. if (err)
  193. break; /* really bad sigcontext */
  194. }
  195. return err ?: sig;
  196. }
  197. int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
  198. {
  199. int err = 0;
  200. int i;
  201. err |= __put_user(regs->cp0_epc, &sc->sc_pc);
  202. err |= __put_user(0, &sc->sc_regs[0]);
  203. for (i = 1; i < 32; i++)
  204. err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
  205. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  206. err |= __put_user(regs->acx, &sc->sc_acx);
  207. #endif
  208. err |= __put_user(regs->hi, &sc->sc_mdhi);
  209. err |= __put_user(regs->lo, &sc->sc_mdlo);
  210. if (cpu_has_dsp) {
  211. err |= __put_user(mfhi1(), &sc->sc_hi1);
  212. err |= __put_user(mflo1(), &sc->sc_lo1);
  213. err |= __put_user(mfhi2(), &sc->sc_hi2);
  214. err |= __put_user(mflo2(), &sc->sc_lo2);
  215. err |= __put_user(mfhi3(), &sc->sc_hi3);
  216. err |= __put_user(mflo3(), &sc->sc_lo3);
  217. err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
  218. }
  219. /*
  220. * Save FPU state to signal context. Signal handler
  221. * will "inherit" current FPU state.
  222. */
  223. err |= protected_save_fp_context(sc);
  224. return err;
  225. }
  226. int fpcsr_pending(unsigned int __user *fpcsr)
  227. {
  228. int err, sig = 0;
  229. unsigned int csr, enabled;
  230. err = __get_user(csr, fpcsr);
  231. enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5);
  232. /*
  233. * If the signal handler set some FPU exceptions, clear it and
  234. * send SIGFPE.
  235. */
  236. if (csr & enabled) {
  237. csr &= ~enabled;
  238. err |= __put_user(csr, fpcsr);
  239. sig = SIGFPE;
  240. }
  241. return err ?: sig;
  242. }
  243. int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
  244. {
  245. unsigned long treg;
  246. int err = 0;
  247. int i;
  248. /* Always make any pending restarted system calls return -EINTR */
  249. current->restart_block.fn = do_no_restart_syscall;
  250. err |= __get_user(regs->cp0_epc, &sc->sc_pc);
  251. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  252. err |= __get_user(regs->acx, &sc->sc_acx);
  253. #endif
  254. err |= __get_user(regs->hi, &sc->sc_mdhi);
  255. err |= __get_user(regs->lo, &sc->sc_mdlo);
  256. if (cpu_has_dsp) {
  257. err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
  258. err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
  259. err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
  260. err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
  261. err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
  262. err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
  263. err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
  264. }
  265. for (i = 1; i < 32; i++)
  266. err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
  267. return err ?: protected_restore_fp_context(sc);
  268. }
  269. void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
  270. size_t frame_size)
  271. {
  272. unsigned long sp;
  273. /* Default to using normal stack */
  274. sp = regs->regs[29];
  275. /*
  276. * FPU emulator may have it's own trampoline active just
  277. * above the user stack, 16-bytes before the next lowest
  278. * 16 byte boundary. Try to avoid trashing it.
  279. */
  280. sp -= 32;
  281. sp = sigsp(sp, ksig);
  282. return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK));
  283. }
  284. /*
  285. * Atomically swap in the new signal mask, and wait for a signal.
  286. */
  287. #ifdef CONFIG_TRAD_SIGNALS
  288. SYSCALL_DEFINE1(sigsuspend, sigset_t __user *, uset)
  289. {
  290. return sys_rt_sigsuspend(uset, sizeof(sigset_t));
  291. }
  292. #endif
  293. #ifdef CONFIG_TRAD_SIGNALS
  294. SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act,
  295. struct sigaction __user *, oact)
  296. {
  297. struct k_sigaction new_ka, old_ka;
  298. int ret;
  299. int err = 0;
  300. if (act) {
  301. old_sigset_t mask;
  302. if (!access_ok(VERIFY_READ, act, sizeof(*act)))
  303. return -EFAULT;
  304. err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler);
  305. err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
  306. err |= __get_user(mask, &act->sa_mask.sig[0]);
  307. if (err)
  308. return -EFAULT;
  309. siginitset(&new_ka.sa.sa_mask, mask);
  310. }
  311. ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
  312. if (!ret && oact) {
  313. if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
  314. return -EFAULT;
  315. err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
  316. err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler);
  317. err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
  318. err |= __put_user(0, &oact->sa_mask.sig[1]);
  319. err |= __put_user(0, &oact->sa_mask.sig[2]);
  320. err |= __put_user(0, &oact->sa_mask.sig[3]);
  321. if (err)
  322. return -EFAULT;
  323. }
  324. return ret;
  325. }
  326. #endif
  327. #ifdef CONFIG_TRAD_SIGNALS
  328. asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
  329. {
  330. struct sigframe __user *frame;
  331. sigset_t blocked;
  332. int sig;
  333. frame = (struct sigframe __user *) regs.regs[29];
  334. if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
  335. goto badframe;
  336. if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
  337. goto badframe;
  338. set_current_blocked(&blocked);
  339. sig = restore_sigcontext(&regs, &frame->sf_sc);
  340. if (sig < 0)
  341. goto badframe;
  342. else if (sig)
  343. force_sig(sig, current);
  344. /*
  345. * Don't let your children do this ...
  346. */
  347. __asm__ __volatile__(
  348. "move\t$29, %0\n\t"
  349. "j\tsyscall_exit"
  350. :/* no outputs */
  351. :"r" (&regs));
  352. /* Unreached */
  353. badframe:
  354. force_sig(SIGSEGV, current);
  355. }
  356. #endif /* CONFIG_TRAD_SIGNALS */
  357. asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
  358. {
  359. struct rt_sigframe __user *frame;
  360. sigset_t set;
  361. int sig;
  362. frame = (struct rt_sigframe __user *) regs.regs[29];
  363. if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
  364. goto badframe;
  365. if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
  366. goto badframe;
  367. set_current_blocked(&set);
  368. sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
  369. if (sig < 0)
  370. goto badframe;
  371. else if (sig)
  372. force_sig(sig, current);
  373. if (restore_altstack(&frame->rs_uc.uc_stack))
  374. goto badframe;
  375. /*
  376. * Don't let your children do this ...
  377. */
  378. __asm__ __volatile__(
  379. "move\t$29, %0\n\t"
  380. "j\tsyscall_exit"
  381. :/* no outputs */
  382. :"r" (&regs));
  383. /* Unreached */
  384. badframe:
  385. force_sig(SIGSEGV, current);
  386. }
  387. #ifdef CONFIG_TRAD_SIGNALS
  388. static int setup_frame(void *sig_return, struct ksignal *ksig,
  389. struct pt_regs *regs, sigset_t *set)
  390. {
  391. struct sigframe __user *frame;
  392. int err = 0;
  393. frame = get_sigframe(ksig, regs, sizeof(*frame));
  394. if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
  395. return -EFAULT;
  396. err |= setup_sigcontext(regs, &frame->sf_sc);
  397. err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set));
  398. if (err)
  399. return -EFAULT;
  400. /*
  401. * Arguments to signal handler:
  402. *
  403. * a0 = signal number
  404. * a1 = 0 (should be cause)
  405. * a2 = pointer to struct sigcontext
  406. *
  407. * $25 and c0_epc point to the signal handler, $29 points to the
  408. * struct sigframe.
  409. */
  410. regs->regs[ 4] = ksig->sig;
  411. regs->regs[ 5] = 0;
  412. regs->regs[ 6] = (unsigned long) &frame->sf_sc;
  413. regs->regs[29] = (unsigned long) frame;
  414. regs->regs[31] = (unsigned long) sig_return;
  415. regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
  416. DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
  417. current->comm, current->pid,
  418. frame, regs->cp0_epc, regs->regs[31]);
  419. return 0;
  420. }
  421. #endif
  422. static int setup_rt_frame(void *sig_return, struct ksignal *ksig,
  423. struct pt_regs *regs, sigset_t *set)
  424. {
  425. struct rt_sigframe __user *frame;
  426. int err = 0;
  427. frame = get_sigframe(ksig, regs, sizeof(*frame));
  428. if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
  429. return -EFAULT;
  430. /* Create siginfo. */
  431. err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info);
  432. /* Create the ucontext. */
  433. err |= __put_user(0, &frame->rs_uc.uc_flags);
  434. err |= __put_user(NULL, &frame->rs_uc.uc_link);
  435. err |= __save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
  436. err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext);
  437. err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set));
  438. if (err)
  439. return -EFAULT;
  440. /*
  441. * Arguments to signal handler:
  442. *
  443. * a0 = signal number
  444. * a1 = 0 (should be cause)
  445. * a2 = pointer to ucontext
  446. *
  447. * $25 and c0_epc point to the signal handler, $29 points to
  448. * the struct rt_sigframe.
  449. */
  450. regs->regs[ 4] = ksig->sig;
  451. regs->regs[ 5] = (unsigned long) &frame->rs_info;
  452. regs->regs[ 6] = (unsigned long) &frame->rs_uc;
  453. regs->regs[29] = (unsigned long) frame;
  454. regs->regs[31] = (unsigned long) sig_return;
  455. regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
  456. DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
  457. current->comm, current->pid,
  458. frame, regs->cp0_epc, regs->regs[31]);
  459. return 0;
  460. }
  461. struct mips_abi mips_abi = {
  462. #ifdef CONFIG_TRAD_SIGNALS
  463. .setup_frame = setup_frame,
  464. .signal_return_offset = offsetof(struct mips_vdso, signal_trampoline),
  465. #endif
  466. .setup_rt_frame = setup_rt_frame,
  467. .rt_signal_return_offset =
  468. offsetof(struct mips_vdso, rt_signal_trampoline),
  469. .restart = __NR_restart_syscall,
  470. .off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs),
  471. .off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr),
  472. .off_sc_used_math = offsetof(struct sigcontext, sc_used_math),
  473. };
  474. static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
  475. {
  476. sigset_t *oldset = sigmask_to_save();
  477. int ret;
  478. struct mips_abi *abi = current->thread.abi;
  479. #ifdef CONFIG_CPU_MICROMIPS
  480. void *vdso;
  481. unsigned long tmp = (unsigned long)current->mm->context.vdso;
  482. set_isa16_mode(tmp);
  483. vdso = (void *)tmp;
  484. #else
  485. void *vdso = current->mm->context.vdso;
  486. #endif
  487. if (regs->regs[0]) {
  488. switch(regs->regs[2]) {
  489. case ERESTART_RESTARTBLOCK:
  490. case ERESTARTNOHAND:
  491. regs->regs[2] = EINTR;
  492. break;
  493. case ERESTARTSYS:
  494. if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
  495. regs->regs[2] = EINTR;
  496. break;
  497. }
  498. /* fallthrough */
  499. case ERESTARTNOINTR:
  500. regs->regs[7] = regs->regs[26];
  501. regs->regs[2] = regs->regs[0];
  502. regs->cp0_epc -= 4;
  503. }
  504. regs->regs[0] = 0; /* Don't deal with this again. */
  505. }
  506. if (sig_uses_siginfo(&ksig->ka))
  507. ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset,
  508. ksig, regs, oldset);
  509. else
  510. ret = abi->setup_frame(vdso + abi->signal_return_offset, ksig,
  511. regs, oldset);
  512. signal_setup_done(ret, ksig, 0);
  513. }
  514. static void do_signal(struct pt_regs *regs)
  515. {
  516. struct ksignal ksig;
  517. if (get_signal(&ksig)) {
  518. /* Whee! Actually deliver the signal. */
  519. handle_signal(&ksig, regs);
  520. return;
  521. }
  522. if (regs->regs[0]) {
  523. switch (regs->regs[2]) {
  524. case ERESTARTNOHAND:
  525. case ERESTARTSYS:
  526. case ERESTARTNOINTR:
  527. regs->regs[2] = regs->regs[0];
  528. regs->regs[7] = regs->regs[26];
  529. regs->cp0_epc -= 4;
  530. break;
  531. case ERESTART_RESTARTBLOCK:
  532. regs->regs[2] = current->thread.abi->restart;
  533. regs->regs[7] = regs->regs[26];
  534. regs->cp0_epc -= 4;
  535. break;
  536. }
  537. regs->regs[0] = 0; /* Don't deal with this again. */
  538. }
  539. /*
  540. * If there's no signal to deliver, we just put the saved sigmask
  541. * back
  542. */
  543. restore_saved_sigmask();
  544. }
  545. /*
  546. * notification of userspace execution resumption
  547. * - triggered by the TIF_WORK_MASK flags
  548. */
  549. asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
  550. __u32 thread_info_flags)
  551. {
  552. local_irq_enable();
  553. user_exit();
  554. /* deal with pending signal delivery */
  555. if (thread_info_flags & _TIF_SIGPENDING)
  556. do_signal(regs);
  557. if (thread_info_flags & _TIF_NOTIFY_RESUME) {
  558. clear_thread_flag(TIF_NOTIFY_RESUME);
  559. tracehook_notify_resume(regs);
  560. }
  561. user_enter();
  562. }
  563. #ifdef CONFIG_SMP
  564. static int smp_save_fp_context(void __user *sc)
  565. {
  566. return raw_cpu_has_fpu
  567. ? save_hw_fp_context(sc)
  568. : copy_fp_to_sigcontext(sc);
  569. }
  570. static int smp_restore_fp_context(void __user *sc)
  571. {
  572. return raw_cpu_has_fpu
  573. ? restore_hw_fp_context(sc)
  574. : copy_fp_from_sigcontext(sc);
  575. }
  576. #endif
  577. static int signal_setup(void)
  578. {
  579. #ifdef CONFIG_SMP
  580. /* For now just do the cpu_has_fpu check when the functions are invoked */
  581. save_fp_context = smp_save_fp_context;
  582. restore_fp_context = smp_restore_fp_context;
  583. #else
  584. if (cpu_has_fpu) {
  585. save_fp_context = save_hw_fp_context;
  586. restore_fp_context = restore_hw_fp_context;
  587. } else {
  588. save_fp_context = copy_fp_to_sigcontext;
  589. restore_fp_context = copy_fp_from_sigcontext;
  590. }
  591. #endif /* CONFIG_SMP */
  592. return 0;
  593. }
  594. arch_initcall(signal_setup);