traps.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2005-2017 Andes Technology Corporation
  3. #include <linux/module.h>
  4. #include <linux/personality.h>
  5. #include <linux/kallsyms.h>
  6. #include <linux/hardirq.h>
  7. #include <linux/kdebug.h>
  8. #include <linux/sched/task_stack.h>
  9. #include <linux/uaccess.h>
  10. #include <linux/ftrace.h>
  11. #include <asm/proc-fns.h>
  12. #include <asm/unistd.h>
  13. #include <linux/ptrace.h>
  14. #include <nds32_intrinsic.h>
  15. extern void show_pte(struct mm_struct *mm, unsigned long addr);
  16. /*
  17. * Dump out the contents of some memory nicely...
  18. */
  19. void dump_mem(const char *lvl, unsigned long bottom, unsigned long top)
  20. {
  21. unsigned long first;
  22. mm_segment_t fs;
  23. int i;
  24. /*
  25. * We need to switch to kernel mode so that we can use __get_user
  26. * to safely read from kernel space. Note that we now dump the
  27. * code first, just in case the backtrace kills us.
  28. */
  29. fs = get_fs();
  30. set_fs(KERNEL_DS);
  31. pr_emerg("%s(0x%08lx to 0x%08lx)\n", lvl, bottom, top);
  32. for (first = bottom & ~31; first < top; first += 32) {
  33. unsigned long p;
  34. char str[sizeof(" 12345678") * 8 + 1];
  35. memset(str, ' ', sizeof(str));
  36. str[sizeof(str) - 1] = '\0';
  37. for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
  38. if (p >= bottom && p < top) {
  39. unsigned long val;
  40. if (__get_user(val, (unsigned long *)p) == 0)
  41. sprintf(str + i * 9, " %08lx", val);
  42. else
  43. sprintf(str + i * 9, " ????????");
  44. }
  45. }
  46. pr_emerg("%s%04lx:%s\n", lvl, first & 0xffff, str);
  47. }
  48. set_fs(fs);
  49. }
  50. EXPORT_SYMBOL(dump_mem);
  51. static void dump_instr(struct pt_regs *regs)
  52. {
  53. unsigned long addr = instruction_pointer(regs);
  54. mm_segment_t fs;
  55. char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
  56. int i;
  57. return;
  58. /*
  59. * We need to switch to kernel mode so that we can use __get_user
  60. * to safely read from kernel space. Note that we now dump the
  61. * code first, just in case the backtrace kills us.
  62. */
  63. fs = get_fs();
  64. set_fs(KERNEL_DS);
  65. pr_emerg("Code: ");
  66. for (i = -4; i < 1; i++) {
  67. unsigned int val, bad;
  68. bad = __get_user(val, &((u32 *) addr)[i]);
  69. if (!bad) {
  70. p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
  71. } else {
  72. p += sprintf(p, "bad PC value");
  73. break;
  74. }
  75. }
  76. pr_emerg("Code: %s\n", str);
  77. set_fs(fs);
  78. }
  79. #define LOOP_TIMES (100)
  80. static void __dump(struct task_struct *tsk, unsigned long *base_reg)
  81. {
  82. unsigned long ret_addr;
  83. int cnt = LOOP_TIMES, graph = 0;
  84. pr_emerg("Call Trace:\n");
  85. if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
  86. while (!kstack_end(base_reg)) {
  87. ret_addr = *base_reg++;
  88. if (__kernel_text_address(ret_addr)) {
  89. ret_addr = ftrace_graph_ret_addr(
  90. tsk, &graph, ret_addr, NULL);
  91. print_ip_sym(ret_addr);
  92. }
  93. if (--cnt < 0)
  94. break;
  95. }
  96. } else {
  97. while (!kstack_end((void *)base_reg) &&
  98. !((unsigned long)base_reg & 0x3) &&
  99. ((unsigned long)base_reg >= TASK_SIZE)) {
  100. unsigned long next_fp;
  101. ret_addr = base_reg[LP_OFFSET];
  102. next_fp = base_reg[FP_OFFSET];
  103. if (__kernel_text_address(ret_addr)) {
  104. ret_addr = ftrace_graph_ret_addr(
  105. tsk, &graph, ret_addr, NULL);
  106. print_ip_sym(ret_addr);
  107. }
  108. if (--cnt < 0)
  109. break;
  110. base_reg = (unsigned long *)next_fp;
  111. }
  112. }
  113. pr_emerg("\n");
  114. }
  115. void show_stack(struct task_struct *tsk, unsigned long *sp)
  116. {
  117. unsigned long *base_reg;
  118. if (!tsk)
  119. tsk = current;
  120. if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
  121. if (tsk != current)
  122. base_reg = (unsigned long *)(tsk->thread.cpu_context.sp);
  123. else
  124. __asm__ __volatile__("\tori\t%0, $sp, #0\n":"=r"(base_reg));
  125. } else {
  126. if (tsk != current)
  127. base_reg = (unsigned long *)(tsk->thread.cpu_context.fp);
  128. else
  129. __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(base_reg));
  130. }
  131. __dump(tsk, base_reg);
  132. barrier();
  133. }
  134. DEFINE_SPINLOCK(die_lock);
  135. /*
  136. * This function is protected against re-entrancy.
  137. */
  138. void die(const char *str, struct pt_regs *regs, int err)
  139. {
  140. struct task_struct *tsk = current;
  141. static int die_counter;
  142. console_verbose();
  143. spin_lock_irq(&die_lock);
  144. bust_spinlocks(1);
  145. pr_emerg("Internal error: %s: %x [#%d]\n", str, err, ++die_counter);
  146. print_modules();
  147. pr_emerg("CPU: %i\n", smp_processor_id());
  148. show_regs(regs);
  149. pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n",
  150. tsk->comm, tsk->pid, task_thread_info(tsk) + 1);
  151. if (!user_mode(regs) || in_interrupt()) {
  152. dump_mem("Stack: ", regs->sp,
  153. THREAD_SIZE + (unsigned long)task_thread_info(tsk));
  154. dump_instr(regs);
  155. dump_stack();
  156. }
  157. bust_spinlocks(0);
  158. spin_unlock_irq(&die_lock);
  159. do_exit(SIGSEGV);
  160. }
  161. EXPORT_SYMBOL(die);
  162. void die_if_kernel(const char *str, struct pt_regs *regs, int err)
  163. {
  164. if (user_mode(regs))
  165. return;
  166. die(str, regs, err);
  167. }
  168. int bad_syscall(int n, struct pt_regs *regs)
  169. {
  170. if (current->personality != PER_LINUX) {
  171. send_sig(SIGSEGV, current, 1);
  172. return regs->uregs[0];
  173. }
  174. force_sig_fault(SIGILL, ILL_ILLTRP,
  175. (void __user *)instruction_pointer(regs) - 4, current);
  176. die_if_kernel("Oops - bad syscall", regs, n);
  177. return regs->uregs[0];
  178. }
  179. void __pte_error(const char *file, int line, unsigned long val)
  180. {
  181. pr_emerg("%s:%d: bad pte %08lx.\n", file, line, val);
  182. }
  183. void __pmd_error(const char *file, int line, unsigned long val)
  184. {
  185. pr_emerg("%s:%d: bad pmd %08lx.\n", file, line, val);
  186. }
  187. void __pgd_error(const char *file, int line, unsigned long val)
  188. {
  189. pr_emerg("%s:%d: bad pgd %08lx.\n", file, line, val);
  190. }
  191. extern char *exception_vector, *exception_vector_end;
  192. void __init trap_init(void)
  193. {
  194. return;
  195. }
  196. void __init early_trap_init(void)
  197. {
  198. unsigned long ivb = 0;
  199. unsigned long base = PAGE_OFFSET;
  200. memcpy((unsigned long *)base, (unsigned long *)&exception_vector,
  201. ((unsigned long)&exception_vector_end -
  202. (unsigned long)&exception_vector));
  203. ivb = __nds32__mfsr(NDS32_SR_IVB);
  204. /* Check platform support. */
  205. if (((ivb & IVB_mskNIVIC) >> IVB_offNIVIC) < 2)
  206. panic
  207. ("IVIC mode is not allowed on the platform with interrupt controller\n");
  208. __nds32__mtsr((ivb & ~IVB_mskESZ) | (IVB_valESZ16 << IVB_offESZ) |
  209. IVB_BASE, NDS32_SR_IVB);
  210. __nds32__mtsr(INT_MASK_INITAIAL_VAL, NDS32_SR_INT_MASK);
  211. /*
  212. * 0x800 = 128 vectors * 16byte.
  213. * It should be enough to flush a page.
  214. */
  215. cpu_cache_wbinval_page(base, true);
  216. }
  217. void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
  218. int error_code, int si_code)
  219. {
  220. tsk->thread.trap_no = ENTRY_DEBUG_RELATED;
  221. tsk->thread.error_code = error_code;
  222. force_sig_fault(SIGTRAP, si_code,
  223. (void __user *)instruction_pointer(regs), tsk);
  224. }
  225. void do_debug_trap(unsigned long entry, unsigned long addr,
  226. unsigned long type, struct pt_regs *regs)
  227. {
  228. if (notify_die(DIE_OOPS, "Oops", regs, addr, type, SIGTRAP)
  229. == NOTIFY_STOP)
  230. return;
  231. if (user_mode(regs)) {
  232. /* trap_signal */
  233. send_sigtrap(current, regs, 0, TRAP_BRKPT);
  234. } else {
  235. /* kernel_trap */
  236. if (!fixup_exception(regs))
  237. die("unexpected kernel_trap", regs, 0);
  238. }
  239. }
  240. void unhandled_interruption(struct pt_regs *regs)
  241. {
  242. pr_emerg("unhandled_interruption\n");
  243. show_regs(regs);
  244. if (!user_mode(regs))
  245. do_exit(SIGKILL);
  246. force_sig(SIGKILL, current);
  247. }
  248. void unhandled_exceptions(unsigned long entry, unsigned long addr,
  249. unsigned long type, struct pt_regs *regs)
  250. {
  251. pr_emerg("Unhandled Exception: entry: %lx addr:%lx itype:%lx\n", entry,
  252. addr, type);
  253. show_regs(regs);
  254. if (!user_mode(regs))
  255. do_exit(SIGKILL);
  256. force_sig(SIGKILL, current);
  257. }
  258. extern int do_page_fault(unsigned long entry, unsigned long addr,
  259. unsigned int error_code, struct pt_regs *regs);
  260. /*
  261. * 2:DEF dispatch for TLB MISC exception handler
  262. */
  263. void do_dispatch_tlb_misc(unsigned long entry, unsigned long addr,
  264. unsigned long type, struct pt_regs *regs)
  265. {
  266. type = type & (ITYPE_mskINST | ITYPE_mskETYPE);
  267. if ((type & ITYPE_mskETYPE) < 5) {
  268. /* Permission exceptions */
  269. do_page_fault(entry, addr, type, regs);
  270. } else
  271. unhandled_exceptions(entry, addr, type, regs);
  272. }
  273. void do_revinsn(struct pt_regs *regs)
  274. {
  275. pr_emerg("Reserved Instruction\n");
  276. show_regs(regs);
  277. if (!user_mode(regs))
  278. do_exit(SIGILL);
  279. force_sig(SIGILL, current);
  280. }
  281. #ifdef CONFIG_ALIGNMENT_TRAP
  282. extern int unalign_access_mode;
  283. extern int do_unaligned_access(unsigned long addr, struct pt_regs *regs);
  284. #endif
  285. void do_dispatch_general(unsigned long entry, unsigned long addr,
  286. unsigned long itype, struct pt_regs *regs,
  287. unsigned long oipc)
  288. {
  289. unsigned int swid = itype >> ITYPE_offSWID;
  290. unsigned long type = itype & (ITYPE_mskINST | ITYPE_mskETYPE);
  291. if (type == ETYPE_ALIGNMENT_CHECK) {
  292. #ifdef CONFIG_ALIGNMENT_TRAP
  293. /* Alignment check */
  294. if (user_mode(regs) && unalign_access_mode) {
  295. int ret;
  296. ret = do_unaligned_access(addr, regs);
  297. if (ret == 0)
  298. return;
  299. if (ret == -EFAULT)
  300. pr_emerg
  301. ("Unhandled unaligned access exception\n");
  302. }
  303. #endif
  304. do_page_fault(entry, addr, type, regs);
  305. } else if (type == ETYPE_RESERVED_INSTRUCTION) {
  306. /* Reserved instruction */
  307. do_revinsn(regs);
  308. } else if (type == ETYPE_TRAP && swid == SWID_RAISE_INTERRUPT_LEVEL) {
  309. /* trap, used on v3 EDM target debugging workaround */
  310. /*
  311. * DIPC(OIPC) is passed as parameter before
  312. * interrupt is enabled, so the DIPC will not be corrupted
  313. * even though interrupts are coming in
  314. */
  315. /*
  316. * 1. update ipc
  317. * 2. update pt_regs ipc with oipc
  318. * 3. update pt_regs ipsw (clear DEX)
  319. */
  320. __asm__ volatile ("mtsr %0, $IPC\n\t"::"r" (oipc));
  321. regs->ipc = oipc;
  322. if (regs->pipsw & PSW_mskDEX) {
  323. pr_emerg
  324. ("Nested Debug exception is possibly happened\n");
  325. pr_emerg("ipc:%08x pipc:%08x\n",
  326. (unsigned int)regs->ipc,
  327. (unsigned int)regs->pipc);
  328. }
  329. do_debug_trap(entry, addr, itype, regs);
  330. regs->ipsw &= ~PSW_mskDEX;
  331. } else
  332. unhandled_exceptions(entry, addr, type, regs);
  333. }