traps.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/arch/m32r/kernel/traps.c
  4. *
  5. * Copyright (C) 2001, 2002 Hirokazu Takata, Hiroyuki Kondo,
  6. * Hitoshi Yamamoto
  7. */
  8. /*
  9. * 'traps.c' handles hardware traps and faults after we have saved some
  10. * state in 'entry.S'.
  11. */
  12. #include <linux/init.h>
  13. #include <linux/kernel.h>
  14. #include <linux/kallsyms.h>
  15. #include <linux/stddef.h>
  16. #include <linux/ptrace.h>
  17. #include <linux/sched/debug.h>
  18. #include <linux/sched/task_stack.h>
  19. #include <linux/mm.h>
  20. #include <linux/cpu.h>
  21. #include <asm/page.h>
  22. #include <asm/processor.h>
  23. #include <linux/uaccess.h>
  24. #include <asm/io.h>
  25. #include <linux/atomic.h>
  26. #include <asm/smp.h>
  27. #include <linux/module.h>
  28. asmlinkage void alignment_check(void);
  29. asmlinkage void ei_handler(void);
  30. asmlinkage void rie_handler(void);
  31. asmlinkage void debug_trap(void);
  32. asmlinkage void cache_flushing_handler(void);
  33. asmlinkage void ill_trap(void);
  34. #ifdef CONFIG_SMP
  35. extern void smp_reschedule_interrupt(void);
  36. extern void smp_invalidate_interrupt(void);
  37. extern void smp_call_function_interrupt(void);
  38. extern void smp_ipi_timer_interrupt(void);
  39. extern void smp_flush_cache_all_interrupt(void);
  40. extern void smp_call_function_single_interrupt(void);
  41. /*
  42. * for Boot AP function
  43. */
  44. asm (
  45. " .section .eit_vector4,\"ax\" \n"
  46. " .global _AP_RE \n"
  47. " .global startup_AP \n"
  48. "_AP_RE: \n"
  49. " .fill 32, 4, 0 \n"
  50. "_AP_EI: bra startup_AP \n"
  51. " .previous \n"
  52. );
  53. #endif /* CONFIG_SMP */
  54. extern unsigned long eit_vector[];
  55. #define BRA_INSN(func, entry) \
  56. ((unsigned long)func - (unsigned long)eit_vector - entry*4)/4 \
  57. + 0xff000000UL
  58. static void set_eit_vector_entries(void)
  59. {
  60. extern void default_eit_handler(void);
  61. extern void system_call(void);
  62. extern void pie_handler(void);
  63. extern void ace_handler(void);
  64. extern void tme_handler(void);
  65. extern void _flush_cache_copyback_all(void);
  66. eit_vector[0] = 0xd0c00001; /* seth r0, 0x01 */
  67. eit_vector[1] = BRA_INSN(default_eit_handler, 1);
  68. eit_vector[4] = 0xd0c00010; /* seth r0, 0x10 */
  69. eit_vector[5] = BRA_INSN(default_eit_handler, 5);
  70. eit_vector[8] = BRA_INSN(rie_handler, 8);
  71. eit_vector[12] = BRA_INSN(alignment_check, 12);
  72. eit_vector[16] = BRA_INSN(ill_trap, 16);
  73. eit_vector[17] = BRA_INSN(debug_trap, 17);
  74. eit_vector[18] = BRA_INSN(system_call, 18);
  75. eit_vector[19] = BRA_INSN(ill_trap, 19);
  76. eit_vector[20] = BRA_INSN(ill_trap, 20);
  77. eit_vector[21] = BRA_INSN(ill_trap, 21);
  78. eit_vector[22] = BRA_INSN(ill_trap, 22);
  79. eit_vector[23] = BRA_INSN(ill_trap, 23);
  80. eit_vector[24] = BRA_INSN(ill_trap, 24);
  81. eit_vector[25] = BRA_INSN(ill_trap, 25);
  82. eit_vector[26] = BRA_INSN(ill_trap, 26);
  83. eit_vector[27] = BRA_INSN(ill_trap, 27);
  84. eit_vector[28] = BRA_INSN(cache_flushing_handler, 28);
  85. eit_vector[29] = BRA_INSN(ill_trap, 29);
  86. eit_vector[30] = BRA_INSN(ill_trap, 30);
  87. eit_vector[31] = BRA_INSN(ill_trap, 31);
  88. eit_vector[32] = BRA_INSN(ei_handler, 32);
  89. eit_vector[64] = BRA_INSN(pie_handler, 64);
  90. #ifdef CONFIG_MMU
  91. eit_vector[68] = BRA_INSN(ace_handler, 68);
  92. eit_vector[72] = BRA_INSN(tme_handler, 72);
  93. #endif /* CONFIG_MMU */
  94. #ifdef CONFIG_SMP
  95. eit_vector[184] = (unsigned long)smp_reschedule_interrupt;
  96. eit_vector[185] = (unsigned long)smp_invalidate_interrupt;
  97. eit_vector[186] = (unsigned long)smp_call_function_interrupt;
  98. eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt;
  99. eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt;
  100. eit_vector[189] = 0; /* CPU_BOOT_IPI */
  101. eit_vector[190] = (unsigned long)smp_call_function_single_interrupt;
  102. eit_vector[191] = 0;
  103. #endif
  104. _flush_cache_copyback_all();
  105. }
  106. void __init trap_init(void)
  107. {
  108. set_eit_vector_entries();
  109. /*
  110. * Should be a barrier for any external CPU state.
  111. */
  112. cpu_init();
  113. }
  114. static int kstack_depth_to_print = 24;
  115. static void show_trace(struct task_struct *task, unsigned long *stack)
  116. {
  117. unsigned long addr;
  118. if (!stack)
  119. stack = (unsigned long*)&stack;
  120. printk("Call Trace: ");
  121. while (!kstack_end(stack)) {
  122. addr = *stack++;
  123. if (__kernel_text_address(addr))
  124. printk("[<%08lx>] %pSR\n", addr, (void *)addr);
  125. }
  126. printk("\n");
  127. }
  128. void show_stack(struct task_struct *task, unsigned long *sp)
  129. {
  130. unsigned long *stack;
  131. int i;
  132. /*
  133. * debugging aid: "show_stack(NULL);" prints the
  134. * back trace for this cpu.
  135. */
  136. if(sp==NULL) {
  137. if (task)
  138. sp = (unsigned long *)task->thread.sp;
  139. else
  140. sp=(unsigned long*)&sp;
  141. }
  142. stack = sp;
  143. for(i=0; i < kstack_depth_to_print; i++) {
  144. if (kstack_end(stack))
  145. break;
  146. if (i && ((i % 4) == 0))
  147. printk("\n ");
  148. printk("%08lx ", *stack++);
  149. }
  150. printk("\n");
  151. show_trace(task, sp);
  152. }
  153. static void show_registers(struct pt_regs *regs)
  154. {
  155. int i = 0;
  156. int in_kernel = 1;
  157. unsigned long sp;
  158. printk("CPU: %d\n", smp_processor_id());
  159. show_regs(regs);
  160. sp = (unsigned long) (1+regs);
  161. if (user_mode(regs)) {
  162. in_kernel = 0;
  163. sp = regs->spu;
  164. printk("SPU: %08lx\n", sp);
  165. } else {
  166. printk("SPI: %08lx\n", sp);
  167. }
  168. printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)",
  169. current->comm, task_pid_nr(current), 0xffff & i, 4096+(unsigned long)current);
  170. /*
  171. * When in-kernel, we also print out the stack and code at the
  172. * time of the fault..
  173. */
  174. if (in_kernel) {
  175. printk("\nStack: ");
  176. show_stack(current, (unsigned long*) sp);
  177. printk("\nCode: ");
  178. if (regs->bpc < PAGE_OFFSET)
  179. goto bad;
  180. for(i=0;i<20;i++) {
  181. unsigned char c;
  182. if (__get_user(c, &((unsigned char*)regs->bpc)[i])) {
  183. bad:
  184. printk(" Bad PC value.");
  185. break;
  186. }
  187. printk("%02x ", c);
  188. }
  189. }
  190. printk("\n");
  191. }
  192. static DEFINE_SPINLOCK(die_lock);
  193. void die(const char * str, struct pt_regs * regs, long err)
  194. {
  195. console_verbose();
  196. spin_lock_irq(&die_lock);
  197. bust_spinlocks(1);
  198. printk("%s: %04lx\n", str, err & 0xffff);
  199. show_registers(regs);
  200. bust_spinlocks(0);
  201. spin_unlock_irq(&die_lock);
  202. do_exit(SIGSEGV);
  203. }
  204. static __inline__ void die_if_kernel(const char * str,
  205. struct pt_regs * regs, long err)
  206. {
  207. if (!user_mode(regs))
  208. die(str, regs, err);
  209. }
  210. static __inline__ void do_trap(int trapnr, int signr, const char * str,
  211. struct pt_regs * regs, long error_code, siginfo_t *info)
  212. {
  213. if (user_mode(regs)) {
  214. /* trap_signal */
  215. struct task_struct *tsk = current;
  216. tsk->thread.error_code = error_code;
  217. tsk->thread.trap_no = trapnr;
  218. if (info)
  219. force_sig_info(signr, info, tsk);
  220. else
  221. force_sig(signr, tsk);
  222. return;
  223. } else {
  224. /* kernel_trap */
  225. if (!fixup_exception(regs))
  226. die(str, regs, error_code);
  227. return;
  228. }
  229. }
  230. #define DO_ERROR(trapnr, signr, str, name) \
  231. asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
  232. { \
  233. do_trap(trapnr, signr, NULL, regs, error_code, NULL); \
  234. }
  235. #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
  236. asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
  237. { \
  238. siginfo_t info; \
  239. info.si_signo = signr; \
  240. info.si_errno = 0; \
  241. info.si_code = sicode; \
  242. info.si_addr = (void __user *)siaddr; \
  243. do_trap(trapnr, signr, str, regs, error_code, &info); \
  244. }
  245. DO_ERROR( 1, SIGTRAP, "debug trap", debug_trap)
  246. DO_ERROR_INFO(0x20, SIGILL, "reserved instruction ", rie_handler, ILL_ILLOPC, regs->bpc)
  247. DO_ERROR_INFO(0x100, SIGILL, "privileged instruction", pie_handler, ILL_PRVOPC, regs->bpc)
  248. DO_ERROR_INFO(-1, SIGILL, "illegal trap", ill_trap, ILL_ILLTRP, regs->bpc)
  249. extern int handle_unaligned_access(unsigned long, struct pt_regs *);
  250. /* This code taken from arch/sh/kernel/traps.c */
  251. asmlinkage void do_alignment_check(struct pt_regs *regs, long error_code)
  252. {
  253. mm_segment_t oldfs;
  254. unsigned long insn;
  255. int tmp;
  256. oldfs = get_fs();
  257. if (user_mode(regs)) {
  258. local_irq_enable();
  259. current->thread.error_code = error_code;
  260. current->thread.trap_no = 0x17;
  261. set_fs(USER_DS);
  262. if (copy_from_user(&insn, (void *)regs->bpc, 4)) {
  263. set_fs(oldfs);
  264. goto uspace_segv;
  265. }
  266. tmp = handle_unaligned_access(insn, regs);
  267. set_fs(oldfs);
  268. if (!tmp)
  269. return;
  270. uspace_segv:
  271. printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
  272. "access\n", current->comm);
  273. force_sig(SIGSEGV, current);
  274. } else {
  275. set_fs(KERNEL_DS);
  276. if (copy_from_user(&insn, (void *)regs->bpc, 4)) {
  277. set_fs(oldfs);
  278. die("insn faulting in do_address_error", regs, 0);
  279. }
  280. handle_unaligned_access(insn, regs);
  281. set_fs(oldfs);
  282. }
  283. }