traps.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323
  1. /*
  2. * linux/arch/m32r/kernel/traps.c
  3. *
  4. * Copyright (C) 2001, 2002 Hirokazu Takata, Hiroyuki Kondo,
  5. * Hitoshi Yamamoto
  6. */
  7. /*
  8. * 'traps.c' handles hardware traps and faults after we have saved some
  9. * state in 'entry.S'.
  10. */
  11. #include <linux/init.h>
  12. #include <linux/kernel.h>
  13. #include <linux/kallsyms.h>
  14. #include <linux/stddef.h>
  15. #include <linux/ptrace.h>
  16. #include <linux/sched/debug.h>
  17. #include <linux/sched/task_stack.h>
  18. #include <linux/mm.h>
  19. #include <linux/cpu.h>
  20. #include <asm/page.h>
  21. #include <asm/processor.h>
  22. #include <linux/uaccess.h>
  23. #include <asm/io.h>
  24. #include <linux/atomic.h>
  25. #include <asm/smp.h>
  26. #include <linux/module.h>
  27. asmlinkage void alignment_check(void);
  28. asmlinkage void ei_handler(void);
  29. asmlinkage void rie_handler(void);
  30. asmlinkage void debug_trap(void);
  31. asmlinkage void cache_flushing_handler(void);
  32. asmlinkage void ill_trap(void);
  33. #ifdef CONFIG_SMP
  34. extern void smp_reschedule_interrupt(void);
  35. extern void smp_invalidate_interrupt(void);
  36. extern void smp_call_function_interrupt(void);
  37. extern void smp_ipi_timer_interrupt(void);
  38. extern void smp_flush_cache_all_interrupt(void);
  39. extern void smp_call_function_single_interrupt(void);
  40. /*
  41. * for Boot AP function
  42. */
  43. asm (
  44. " .section .eit_vector4,\"ax\" \n"
  45. " .global _AP_RE \n"
  46. " .global startup_AP \n"
  47. "_AP_RE: \n"
  48. " .fill 32, 4, 0 \n"
  49. "_AP_EI: bra startup_AP \n"
  50. " .previous \n"
  51. );
  52. #endif /* CONFIG_SMP */
  53. extern unsigned long eit_vector[];
  54. #define BRA_INSN(func, entry) \
  55. ((unsigned long)func - (unsigned long)eit_vector - entry*4)/4 \
  56. + 0xff000000UL
  57. static void set_eit_vector_entries(void)
  58. {
  59. extern void default_eit_handler(void);
  60. extern void system_call(void);
  61. extern void pie_handler(void);
  62. extern void ace_handler(void);
  63. extern void tme_handler(void);
  64. extern void _flush_cache_copyback_all(void);
  65. eit_vector[0] = 0xd0c00001; /* seth r0, 0x01 */
  66. eit_vector[1] = BRA_INSN(default_eit_handler, 1);
  67. eit_vector[4] = 0xd0c00010; /* seth r0, 0x10 */
  68. eit_vector[5] = BRA_INSN(default_eit_handler, 5);
  69. eit_vector[8] = BRA_INSN(rie_handler, 8);
  70. eit_vector[12] = BRA_INSN(alignment_check, 12);
  71. eit_vector[16] = BRA_INSN(ill_trap, 16);
  72. eit_vector[17] = BRA_INSN(debug_trap, 17);
  73. eit_vector[18] = BRA_INSN(system_call, 18);
  74. eit_vector[19] = BRA_INSN(ill_trap, 19);
  75. eit_vector[20] = BRA_INSN(ill_trap, 20);
  76. eit_vector[21] = BRA_INSN(ill_trap, 21);
  77. eit_vector[22] = BRA_INSN(ill_trap, 22);
  78. eit_vector[23] = BRA_INSN(ill_trap, 23);
  79. eit_vector[24] = BRA_INSN(ill_trap, 24);
  80. eit_vector[25] = BRA_INSN(ill_trap, 25);
  81. eit_vector[26] = BRA_INSN(ill_trap, 26);
  82. eit_vector[27] = BRA_INSN(ill_trap, 27);
  83. eit_vector[28] = BRA_INSN(cache_flushing_handler, 28);
  84. eit_vector[29] = BRA_INSN(ill_trap, 29);
  85. eit_vector[30] = BRA_INSN(ill_trap, 30);
  86. eit_vector[31] = BRA_INSN(ill_trap, 31);
  87. eit_vector[32] = BRA_INSN(ei_handler, 32);
  88. eit_vector[64] = BRA_INSN(pie_handler, 64);
  89. #ifdef CONFIG_MMU
  90. eit_vector[68] = BRA_INSN(ace_handler, 68);
  91. eit_vector[72] = BRA_INSN(tme_handler, 72);
  92. #endif /* CONFIG_MMU */
  93. #ifdef CONFIG_SMP
  94. eit_vector[184] = (unsigned long)smp_reschedule_interrupt;
  95. eit_vector[185] = (unsigned long)smp_invalidate_interrupt;
  96. eit_vector[186] = (unsigned long)smp_call_function_interrupt;
  97. eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt;
  98. eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt;
  99. eit_vector[189] = 0; /* CPU_BOOT_IPI */
  100. eit_vector[190] = (unsigned long)smp_call_function_single_interrupt;
  101. eit_vector[191] = 0;
  102. #endif
  103. _flush_cache_copyback_all();
  104. }
  105. void __init trap_init(void)
  106. {
  107. set_eit_vector_entries();
  108. /*
  109. * Should be a barrier for any external CPU state.
  110. */
  111. cpu_init();
  112. }
  113. static int kstack_depth_to_print = 24;
  114. static void show_trace(struct task_struct *task, unsigned long *stack)
  115. {
  116. unsigned long addr;
  117. if (!stack)
  118. stack = (unsigned long*)&stack;
  119. printk("Call Trace: ");
  120. while (!kstack_end(stack)) {
  121. addr = *stack++;
  122. if (__kernel_text_address(addr))
  123. printk("[<%08lx>] %pSR\n", addr, (void *)addr);
  124. }
  125. printk("\n");
  126. }
  127. void show_stack(struct task_struct *task, unsigned long *sp)
  128. {
  129. unsigned long *stack;
  130. int i;
  131. /*
  132. * debugging aid: "show_stack(NULL);" prints the
  133. * back trace for this cpu.
  134. */
  135. if(sp==NULL) {
  136. if (task)
  137. sp = (unsigned long *)task->thread.sp;
  138. else
  139. sp=(unsigned long*)&sp;
  140. }
  141. stack = sp;
  142. for(i=0; i < kstack_depth_to_print; i++) {
  143. if (kstack_end(stack))
  144. break;
  145. if (i && ((i % 4) == 0))
  146. printk("\n ");
  147. printk("%08lx ", *stack++);
  148. }
  149. printk("\n");
  150. show_trace(task, sp);
  151. }
  152. static void show_registers(struct pt_regs *regs)
  153. {
  154. int i = 0;
  155. int in_kernel = 1;
  156. unsigned long sp;
  157. printk("CPU: %d\n", smp_processor_id());
  158. show_regs(regs);
  159. sp = (unsigned long) (1+regs);
  160. if (user_mode(regs)) {
  161. in_kernel = 0;
  162. sp = regs->spu;
  163. printk("SPU: %08lx\n", sp);
  164. } else {
  165. printk("SPI: %08lx\n", sp);
  166. }
  167. printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)",
  168. current->comm, task_pid_nr(current), 0xffff & i, 4096+(unsigned long)current);
  169. /*
  170. * When in-kernel, we also print out the stack and code at the
  171. * time of the fault..
  172. */
  173. if (in_kernel) {
  174. printk("\nStack: ");
  175. show_stack(current, (unsigned long*) sp);
  176. printk("\nCode: ");
  177. if (regs->bpc < PAGE_OFFSET)
  178. goto bad;
  179. for(i=0;i<20;i++) {
  180. unsigned char c;
  181. if (__get_user(c, &((unsigned char*)regs->bpc)[i])) {
  182. bad:
  183. printk(" Bad PC value.");
  184. break;
  185. }
  186. printk("%02x ", c);
  187. }
  188. }
  189. printk("\n");
  190. }
  191. static DEFINE_SPINLOCK(die_lock);
  192. void die(const char * str, struct pt_regs * regs, long err)
  193. {
  194. console_verbose();
  195. spin_lock_irq(&die_lock);
  196. bust_spinlocks(1);
  197. printk("%s: %04lx\n", str, err & 0xffff);
  198. show_registers(regs);
  199. bust_spinlocks(0);
  200. spin_unlock_irq(&die_lock);
  201. do_exit(SIGSEGV);
  202. }
  203. static __inline__ void die_if_kernel(const char * str,
  204. struct pt_regs * regs, long err)
  205. {
  206. if (!user_mode(regs))
  207. die(str, regs, err);
  208. }
  209. static __inline__ void do_trap(int trapnr, int signr, const char * str,
  210. struct pt_regs * regs, long error_code, siginfo_t *info)
  211. {
  212. if (user_mode(regs)) {
  213. /* trap_signal */
  214. struct task_struct *tsk = current;
  215. tsk->thread.error_code = error_code;
  216. tsk->thread.trap_no = trapnr;
  217. if (info)
  218. force_sig_info(signr, info, tsk);
  219. else
  220. force_sig(signr, tsk);
  221. return;
  222. } else {
  223. /* kernel_trap */
  224. if (!fixup_exception(regs))
  225. die(str, regs, error_code);
  226. return;
  227. }
  228. }
  229. #define DO_ERROR(trapnr, signr, str, name) \
  230. asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
  231. { \
  232. do_trap(trapnr, signr, NULL, regs, error_code, NULL); \
  233. }
  234. #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
  235. asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
  236. { \
  237. siginfo_t info; \
  238. info.si_signo = signr; \
  239. info.si_errno = 0; \
  240. info.si_code = sicode; \
  241. info.si_addr = (void __user *)siaddr; \
  242. do_trap(trapnr, signr, str, regs, error_code, &info); \
  243. }
  244. DO_ERROR( 1, SIGTRAP, "debug trap", debug_trap)
  245. DO_ERROR_INFO(0x20, SIGILL, "reserved instruction ", rie_handler, ILL_ILLOPC, regs->bpc)
  246. DO_ERROR_INFO(0x100, SIGILL, "privileged instruction", pie_handler, ILL_PRVOPC, regs->bpc)
  247. DO_ERROR_INFO(-1, SIGILL, "illegal trap", ill_trap, ILL_ILLTRP, regs->bpc)
  248. extern int handle_unaligned_access(unsigned long, struct pt_regs *);
  249. /* This code taken from arch/sh/kernel/traps.c */
  250. asmlinkage void do_alignment_check(struct pt_regs *regs, long error_code)
  251. {
  252. mm_segment_t oldfs;
  253. unsigned long insn;
  254. int tmp;
  255. oldfs = get_fs();
  256. if (user_mode(regs)) {
  257. local_irq_enable();
  258. current->thread.error_code = error_code;
  259. current->thread.trap_no = 0x17;
  260. set_fs(USER_DS);
  261. if (copy_from_user(&insn, (void *)regs->bpc, 4)) {
  262. set_fs(oldfs);
  263. goto uspace_segv;
  264. }
  265. tmp = handle_unaligned_access(insn, regs);
  266. set_fs(oldfs);
  267. if (!tmp)
  268. return;
  269. uspace_segv:
  270. printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
  271. "access\n", current->comm);
  272. force_sig(SIGSEGV, current);
  273. } else {
  274. set_fs(KERNEL_DS);
  275. if (copy_from_user(&insn, (void *)regs->bpc, 4)) {
  276. set_fs(oldfs);
  277. die("insn faulting in do_address_error", regs, 0);
  278. }
  279. handle_unaligned_access(insn, regs);
  280. set_fs(oldfs);
  281. }
  282. }