traps.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/arch/m32r/kernel/traps.c
  4. *
  5. * Copyright (C) 2001, 2002 Hirokazu Takata, Hiroyuki Kondo,
  6. * Hitoshi Yamamoto
  7. */
  8. /*
  9. * 'traps.c' handles hardware traps and faults after we have saved some
  10. * state in 'entry.S'.
  11. */
  12. #include <linux/init.h>
  13. #include <linux/kernel.h>
  14. #include <linux/kallsyms.h>
  15. #include <linux/stddef.h>
  16. #include <linux/ptrace.h>
  17. #include <linux/sched/debug.h>
  18. #include <linux/sched/task_stack.h>
  19. #include <linux/mm.h>
  20. #include <linux/cpu.h>
  21. #include <asm/page.h>
  22. #include <asm/processor.h>
  23. #include <linux/uaccess.h>
  24. #include <asm/io.h>
  25. #include <linux/atomic.h>
  26. #include <asm/smp.h>
  27. #include <linux/module.h>
  28. asmlinkage void alignment_check(void);
  29. asmlinkage void ei_handler(void);
  30. asmlinkage void rie_handler(void);
  31. asmlinkage void debug_trap(void);
  32. asmlinkage void cache_flushing_handler(void);
  33. asmlinkage void ill_trap(void);
  34. #ifdef CONFIG_SMP
  35. extern void smp_reschedule_interrupt(void);
  36. extern void smp_invalidate_interrupt(void);
  37. extern void smp_call_function_interrupt(void);
  38. extern void smp_ipi_timer_interrupt(void);
  39. extern void smp_flush_cache_all_interrupt(void);
  40. extern void smp_call_function_single_interrupt(void);
  41. /*
  42. * for Boot AP function
  43. */
  44. asm (
  45. " .section .eit_vector4,\"ax\" \n"
  46. " .global _AP_RE \n"
  47. " .global startup_AP \n"
  48. "_AP_RE: \n"
  49. " .fill 32, 4, 0 \n"
  50. "_AP_EI: bra startup_AP \n"
  51. " .previous \n"
  52. );
  53. #endif /* CONFIG_SMP */
  54. extern unsigned long eit_vector[];
  55. #define BRA_INSN(func, entry) \
  56. ((unsigned long)func - (unsigned long)eit_vector - entry*4)/4 \
  57. + 0xff000000UL
  58. static void set_eit_vector_entries(void)
  59. {
  60. extern void default_eit_handler(void);
  61. extern void system_call(void);
  62. extern void pie_handler(void);
  63. extern void ace_handler(void);
  64. extern void tme_handler(void);
  65. extern void _flush_cache_copyback_all(void);
  66. eit_vector[0] = 0xd0c00001; /* seth r0, 0x01 */
  67. eit_vector[1] = BRA_INSN(default_eit_handler, 1);
  68. eit_vector[4] = 0xd0c00010; /* seth r0, 0x10 */
  69. eit_vector[5] = BRA_INSN(default_eit_handler, 5);
  70. eit_vector[8] = BRA_INSN(rie_handler, 8);
  71. eit_vector[12] = BRA_INSN(alignment_check, 12);
  72. eit_vector[16] = BRA_INSN(ill_trap, 16);
  73. eit_vector[17] = BRA_INSN(debug_trap, 17);
  74. eit_vector[18] = BRA_INSN(system_call, 18);
  75. eit_vector[19] = BRA_INSN(ill_trap, 19);
  76. eit_vector[20] = BRA_INSN(ill_trap, 20);
  77. eit_vector[21] = BRA_INSN(ill_trap, 21);
  78. eit_vector[22] = BRA_INSN(ill_trap, 22);
  79. eit_vector[23] = BRA_INSN(ill_trap, 23);
  80. eit_vector[24] = BRA_INSN(ill_trap, 24);
  81. eit_vector[25] = BRA_INSN(ill_trap, 25);
  82. eit_vector[26] = BRA_INSN(ill_trap, 26);
  83. eit_vector[27] = BRA_INSN(ill_trap, 27);
  84. eit_vector[28] = BRA_INSN(cache_flushing_handler, 28);
  85. eit_vector[29] = BRA_INSN(ill_trap, 29);
  86. eit_vector[30] = BRA_INSN(ill_trap, 30);
  87. eit_vector[31] = BRA_INSN(ill_trap, 31);
  88. eit_vector[32] = BRA_INSN(ei_handler, 32);
  89. eit_vector[64] = BRA_INSN(pie_handler, 64);
  90. #ifdef CONFIG_MMU
  91. eit_vector[68] = BRA_INSN(ace_handler, 68);
  92. eit_vector[72] = BRA_INSN(tme_handler, 72);
  93. #endif /* CONFIG_MMU */
  94. #ifdef CONFIG_SMP
  95. eit_vector[184] = (unsigned long)smp_reschedule_interrupt;
  96. eit_vector[185] = (unsigned long)smp_invalidate_interrupt;
  97. eit_vector[186] = (unsigned long)smp_call_function_interrupt;
  98. eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt;
  99. eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt;
  100. eit_vector[189] = 0; /* CPU_BOOT_IPI */
  101. eit_vector[190] = (unsigned long)smp_call_function_single_interrupt;
  102. eit_vector[191] = 0;
  103. #endif
  104. _flush_cache_copyback_all();
  105. }
  106. void abort(void)
  107. {
  108. BUG();
  109. /* if that doesn't kill us, halt */
  110. panic("Oops failed to kill thread");
  111. }
  112. void __init trap_init(void)
  113. {
  114. set_eit_vector_entries();
  115. /*
  116. * Should be a barrier for any external CPU state.
  117. */
  118. cpu_init();
  119. }
  120. static int kstack_depth_to_print = 24;
  121. static void show_trace(struct task_struct *task, unsigned long *stack)
  122. {
  123. unsigned long addr;
  124. if (!stack)
  125. stack = (unsigned long*)&stack;
  126. printk("Call Trace: ");
  127. while (!kstack_end(stack)) {
  128. addr = *stack++;
  129. if (__kernel_text_address(addr))
  130. printk("[<%08lx>] %pSR\n", addr, (void *)addr);
  131. }
  132. printk("\n");
  133. }
  134. void show_stack(struct task_struct *task, unsigned long *sp)
  135. {
  136. unsigned long *stack;
  137. int i;
  138. /*
  139. * debugging aid: "show_stack(NULL);" prints the
  140. * back trace for this cpu.
  141. */
  142. if(sp==NULL) {
  143. if (task)
  144. sp = (unsigned long *)task->thread.sp;
  145. else
  146. sp=(unsigned long*)&sp;
  147. }
  148. stack = sp;
  149. for(i=0; i < kstack_depth_to_print; i++) {
  150. if (kstack_end(stack))
  151. break;
  152. if (i && ((i % 4) == 0))
  153. printk("\n ");
  154. printk("%08lx ", *stack++);
  155. }
  156. printk("\n");
  157. show_trace(task, sp);
  158. }
  159. static void show_registers(struct pt_regs *regs)
  160. {
  161. int i = 0;
  162. int in_kernel = 1;
  163. unsigned long sp;
  164. printk("CPU: %d\n", smp_processor_id());
  165. show_regs(regs);
  166. sp = (unsigned long) (1+regs);
  167. if (user_mode(regs)) {
  168. in_kernel = 0;
  169. sp = regs->spu;
  170. printk("SPU: %08lx\n", sp);
  171. } else {
  172. printk("SPI: %08lx\n", sp);
  173. }
  174. printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)",
  175. current->comm, task_pid_nr(current), 0xffff & i, 4096+(unsigned long)current);
  176. /*
  177. * When in-kernel, we also print out the stack and code at the
  178. * time of the fault..
  179. */
  180. if (in_kernel) {
  181. printk("\nStack: ");
  182. show_stack(current, (unsigned long*) sp);
  183. printk("\nCode: ");
  184. if (regs->bpc < PAGE_OFFSET)
  185. goto bad;
  186. for(i=0;i<20;i++) {
  187. unsigned char c;
  188. if (__get_user(c, &((unsigned char*)regs->bpc)[i])) {
  189. bad:
  190. printk(" Bad PC value.");
  191. break;
  192. }
  193. printk("%02x ", c);
  194. }
  195. }
  196. printk("\n");
  197. }
  198. static DEFINE_SPINLOCK(die_lock);
  199. void die(const char * str, struct pt_regs * regs, long err)
  200. {
  201. console_verbose();
  202. spin_lock_irq(&die_lock);
  203. bust_spinlocks(1);
  204. printk("%s: %04lx\n", str, err & 0xffff);
  205. show_registers(regs);
  206. bust_spinlocks(0);
  207. spin_unlock_irq(&die_lock);
  208. do_exit(SIGSEGV);
  209. }
  210. static __inline__ void die_if_kernel(const char * str,
  211. struct pt_regs * regs, long err)
  212. {
  213. if (!user_mode(regs))
  214. die(str, regs, err);
  215. }
  216. static __inline__ void do_trap(int trapnr, int signr, const char * str,
  217. struct pt_regs * regs, long error_code, siginfo_t *info)
  218. {
  219. if (user_mode(regs)) {
  220. /* trap_signal */
  221. struct task_struct *tsk = current;
  222. tsk->thread.error_code = error_code;
  223. tsk->thread.trap_no = trapnr;
  224. if (info)
  225. force_sig_info(signr, info, tsk);
  226. else
  227. force_sig(signr, tsk);
  228. return;
  229. } else {
  230. /* kernel_trap */
  231. if (!fixup_exception(regs))
  232. die(str, regs, error_code);
  233. return;
  234. }
  235. }
  236. #define DO_ERROR(trapnr, signr, str, name) \
  237. asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
  238. { \
  239. do_trap(trapnr, signr, NULL, regs, error_code, NULL); \
  240. }
  241. #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
  242. asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
  243. { \
  244. siginfo_t info; \
  245. info.si_signo = signr; \
  246. info.si_errno = 0; \
  247. info.si_code = sicode; \
  248. info.si_addr = (void __user *)siaddr; \
  249. do_trap(trapnr, signr, str, regs, error_code, &info); \
  250. }
  251. DO_ERROR( 1, SIGTRAP, "debug trap", debug_trap)
  252. DO_ERROR_INFO(0x20, SIGILL, "reserved instruction ", rie_handler, ILL_ILLOPC, regs->bpc)
  253. DO_ERROR_INFO(0x100, SIGILL, "privileged instruction", pie_handler, ILL_PRVOPC, regs->bpc)
  254. DO_ERROR_INFO(-1, SIGILL, "illegal trap", ill_trap, ILL_ILLTRP, regs->bpc)
  255. extern int handle_unaligned_access(unsigned long, struct pt_regs *);
  256. /* This code taken from arch/sh/kernel/traps.c */
  257. asmlinkage void do_alignment_check(struct pt_regs *regs, long error_code)
  258. {
  259. mm_segment_t oldfs;
  260. unsigned long insn;
  261. int tmp;
  262. oldfs = get_fs();
  263. if (user_mode(regs)) {
  264. local_irq_enable();
  265. current->thread.error_code = error_code;
  266. current->thread.trap_no = 0x17;
  267. set_fs(USER_DS);
  268. if (copy_from_user(&insn, (void *)regs->bpc, 4)) {
  269. set_fs(oldfs);
  270. goto uspace_segv;
  271. }
  272. tmp = handle_unaligned_access(insn, regs);
  273. set_fs(oldfs);
  274. if (!tmp)
  275. return;
  276. uspace_segv:
  277. printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
  278. "access\n", current->comm);
  279. force_sig(SIGSEGV, current);
  280. } else {
  281. set_fs(KERNEL_DS);
  282. if (copy_from_user(&insn, (void *)regs->bpc, 4)) {
  283. set_fs(oldfs);
  284. die("insn faulting in do_address_error", regs, 0);
  285. }
  286. handle_unaligned_access(insn, regs);
  287. set_fs(oldfs);
  288. }
  289. }