traps.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818
  1. /*
  2. * Based on arch/arm/kernel/traps.c
  3. *
  4. * Copyright (C) 1995-2009 Russell King
  5. * Copyright (C) 2012 ARM Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/bug.h>
  20. #include <linux/signal.h>
  21. #include <linux/personality.h>
  22. #include <linux/kallsyms.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/uaccess.h>
  25. #include <linux/hardirq.h>
  26. #include <linux/kdebug.h>
  27. #include <linux/module.h>
  28. #include <linux/kexec.h>
  29. #include <linux/delay.h>
  30. #include <linux/init.h>
  31. #include <linux/sched/signal.h>
  32. #include <linux/sched/debug.h>
  33. #include <linux/sched/task_stack.h>
  34. #include <linux/sizes.h>
  35. #include <linux/syscalls.h>
  36. #include <linux/mm_types.h>
  37. #include <asm/atomic.h>
  38. #include <asm/bug.h>
  39. #include <asm/cpufeature.h>
  40. #include <asm/daifflags.h>
  41. #include <asm/debug-monitors.h>
  42. #include <asm/esr.h>
  43. #include <asm/insn.h>
  44. #include <asm/traps.h>
  45. #include <asm/smp.h>
  46. #include <asm/stack_pointer.h>
  47. #include <asm/stacktrace.h>
  48. #include <asm/exception.h>
  49. #include <asm/system_misc.h>
  50. #include <asm/sysreg.h>
  51. static const char *handler[]= {
  52. "Synchronous Abort",
  53. "IRQ",
  54. "FIQ",
  55. "Error"
  56. };
  57. int show_unhandled_signals = 0;
  58. static void dump_backtrace_entry(unsigned long where)
  59. {
  60. printk(" %pS\n", (void *)where);
  61. }
  62. static void __dump_instr(const char *lvl, struct pt_regs *regs)
  63. {
  64. unsigned long addr = instruction_pointer(regs);
  65. char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
  66. int i;
  67. for (i = -4; i < 1; i++) {
  68. unsigned int val, bad;
  69. bad = get_user(val, &((u32 *)addr)[i]);
  70. if (!bad)
  71. p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
  72. else {
  73. p += sprintf(p, "bad PC value");
  74. break;
  75. }
  76. }
  77. printk("%sCode: %s\n", lvl, str);
  78. }
  79. static void dump_instr(const char *lvl, struct pt_regs *regs)
  80. {
  81. if (!user_mode(regs)) {
  82. mm_segment_t fs = get_fs();
  83. set_fs(KERNEL_DS);
  84. __dump_instr(lvl, regs);
  85. set_fs(fs);
  86. } else {
  87. __dump_instr(lvl, regs);
  88. }
  89. }
  90. void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
  91. {
  92. struct stackframe frame;
  93. int skip;
  94. pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
  95. if (!tsk)
  96. tsk = current;
  97. if (!try_get_task_stack(tsk))
  98. return;
  99. if (tsk == current) {
  100. frame.fp = (unsigned long)__builtin_frame_address(0);
  101. frame.pc = (unsigned long)dump_backtrace;
  102. } else {
  103. /*
  104. * task blocked in __switch_to
  105. */
  106. frame.fp = thread_saved_fp(tsk);
  107. frame.pc = thread_saved_pc(tsk);
  108. }
  109. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  110. frame.graph = tsk->curr_ret_stack;
  111. #endif
  112. skip = !!regs;
  113. printk("Call trace:\n");
  114. do {
  115. /* skip until specified stack frame */
  116. if (!skip) {
  117. dump_backtrace_entry(frame.pc);
  118. } else if (frame.fp == regs->regs[29]) {
  119. skip = 0;
  120. /*
  121. * Mostly, this is the case where this function is
  122. * called in panic/abort. As exception handler's
  123. * stack frame does not contain the corresponding pc
  124. * at which an exception has taken place, use regs->pc
  125. * instead.
  126. */
  127. dump_backtrace_entry(regs->pc);
  128. }
  129. } while (!unwind_frame(tsk, &frame));
  130. put_task_stack(tsk);
  131. }
  132. void show_stack(struct task_struct *tsk, unsigned long *sp)
  133. {
  134. dump_backtrace(NULL, tsk);
  135. barrier();
  136. }
  137. #ifdef CONFIG_PREEMPT
  138. #define S_PREEMPT " PREEMPT"
  139. #else
  140. #define S_PREEMPT ""
  141. #endif
  142. #define S_SMP " SMP"
  143. static int __die(const char *str, int err, struct pt_regs *regs)
  144. {
  145. struct task_struct *tsk = current;
  146. static int die_counter;
  147. int ret;
  148. pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
  149. str, err, ++die_counter);
  150. /* trap and error numbers are mostly meaningless on ARM */
  151. ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV);
  152. if (ret == NOTIFY_STOP)
  153. return ret;
  154. print_modules();
  155. __show_regs(regs);
  156. pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
  157. TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
  158. end_of_stack(tsk));
  159. if (!user_mode(regs)) {
  160. dump_backtrace(regs, tsk);
  161. dump_instr(KERN_EMERG, regs);
  162. }
  163. return ret;
  164. }
  165. static DEFINE_RAW_SPINLOCK(die_lock);
  166. /*
  167. * This function is protected against re-entrancy.
  168. */
  169. void die(const char *str, struct pt_regs *regs, int err)
  170. {
  171. int ret;
  172. unsigned long flags;
  173. raw_spin_lock_irqsave(&die_lock, flags);
  174. oops_enter();
  175. console_verbose();
  176. bust_spinlocks(1);
  177. ret = __die(str, err, regs);
  178. if (regs && kexec_should_crash(current))
  179. crash_kexec(regs);
  180. bust_spinlocks(0);
  181. add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
  182. oops_exit();
  183. if (in_interrupt())
  184. panic("Fatal exception in interrupt");
  185. if (panic_on_oops)
  186. panic("Fatal exception");
  187. raw_spin_unlock_irqrestore(&die_lock, flags);
  188. if (ret != NOTIFY_STOP)
  189. do_exit(SIGSEGV);
  190. }
  191. static bool show_unhandled_signals_ratelimited(void)
  192. {
  193. static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
  194. DEFAULT_RATELIMIT_BURST);
  195. return show_unhandled_signals && __ratelimit(&rs);
  196. }
  197. void arm64_force_sig_info(struct siginfo *info, const char *str,
  198. struct task_struct *tsk)
  199. {
  200. unsigned int esr = tsk->thread.fault_code;
  201. struct pt_regs *regs = task_pt_regs(tsk);
  202. if (!unhandled_signal(tsk, info->si_signo))
  203. goto send_sig;
  204. if (!show_unhandled_signals_ratelimited())
  205. goto send_sig;
  206. pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk));
  207. if (esr)
  208. pr_cont("%s, ESR 0x%08x, ", esr_get_class_string(esr), esr);
  209. pr_cont("%s", str);
  210. print_vma_addr(KERN_CONT " in ", regs->pc);
  211. pr_cont("\n");
  212. __show_regs(regs);
  213. send_sig:
  214. force_sig_info(info->si_signo, info, tsk);
  215. }
  216. void arm64_notify_die(const char *str, struct pt_regs *regs,
  217. struct siginfo *info, int err)
  218. {
  219. if (user_mode(regs)) {
  220. WARN_ON(regs != current_pt_regs());
  221. current->thread.fault_address = 0;
  222. current->thread.fault_code = err;
  223. arm64_force_sig_info(info, str, current);
  224. } else {
  225. die(str, regs, err);
  226. }
  227. }
  228. void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
  229. {
  230. regs->pc += size;
  231. /*
  232. * If we were single stepping, we want to get the step exception after
  233. * we return from the trap.
  234. */
  235. if (user_mode(regs))
  236. user_fastforward_single_step(current);
  237. }
  238. static LIST_HEAD(undef_hook);
  239. static DEFINE_RAW_SPINLOCK(undef_lock);
  240. void register_undef_hook(struct undef_hook *hook)
  241. {
  242. unsigned long flags;
  243. raw_spin_lock_irqsave(&undef_lock, flags);
  244. list_add(&hook->node, &undef_hook);
  245. raw_spin_unlock_irqrestore(&undef_lock, flags);
  246. }
  247. void unregister_undef_hook(struct undef_hook *hook)
  248. {
  249. unsigned long flags;
  250. raw_spin_lock_irqsave(&undef_lock, flags);
  251. list_del(&hook->node);
  252. raw_spin_unlock_irqrestore(&undef_lock, flags);
  253. }
  254. static int call_undef_hook(struct pt_regs *regs)
  255. {
  256. struct undef_hook *hook;
  257. unsigned long flags;
  258. u32 instr;
  259. int (*fn)(struct pt_regs *regs, u32 instr) = NULL;
  260. void __user *pc = (void __user *)instruction_pointer(regs);
  261. if (!user_mode(regs))
  262. return 1;
  263. if (compat_thumb_mode(regs)) {
  264. /* 16-bit Thumb instruction */
  265. __le16 instr_le;
  266. if (get_user(instr_le, (__le16 __user *)pc))
  267. goto exit;
  268. instr = le16_to_cpu(instr_le);
  269. if (aarch32_insn_is_wide(instr)) {
  270. u32 instr2;
  271. if (get_user(instr_le, (__le16 __user *)(pc + 2)))
  272. goto exit;
  273. instr2 = le16_to_cpu(instr_le);
  274. instr = (instr << 16) | instr2;
  275. }
  276. } else {
  277. /* 32-bit ARM instruction */
  278. __le32 instr_le;
  279. if (get_user(instr_le, (__le32 __user *)pc))
  280. goto exit;
  281. instr = le32_to_cpu(instr_le);
  282. }
  283. raw_spin_lock_irqsave(&undef_lock, flags);
  284. list_for_each_entry(hook, &undef_hook, node)
  285. if ((instr & hook->instr_mask) == hook->instr_val &&
  286. (regs->pstate & hook->pstate_mask) == hook->pstate_val)
  287. fn = hook->fn;
  288. raw_spin_unlock_irqrestore(&undef_lock, flags);
  289. exit:
  290. return fn ? fn(regs, instr) : 1;
  291. }
  292. void force_signal_inject(int signal, int code, unsigned long address)
  293. {
  294. siginfo_t info;
  295. const char *desc;
  296. struct pt_regs *regs = current_pt_regs();
  297. clear_siginfo(&info);
  298. switch (signal) {
  299. case SIGILL:
  300. desc = "undefined instruction";
  301. break;
  302. case SIGSEGV:
  303. desc = "illegal memory access";
  304. break;
  305. default:
  306. desc = "unknown or unrecoverable error";
  307. break;
  308. }
  309. /* Force signals we don't understand to SIGKILL */
  310. if (WARN_ON(signal != SIGKILL &&
  311. siginfo_layout(signal, code) != SIL_FAULT)) {
  312. signal = SIGKILL;
  313. }
  314. info.si_signo = signal;
  315. info.si_errno = 0;
  316. info.si_code = code;
  317. info.si_addr = (void __user *)address;
  318. arm64_notify_die(desc, regs, &info, 0);
  319. }
  320. /*
  321. * Set up process info to signal segmentation fault - called on access error.
  322. */
  323. void arm64_notify_segfault(unsigned long addr)
  324. {
  325. int code;
  326. down_read(&current->mm->mmap_sem);
  327. if (find_vma(current->mm, addr) == NULL)
  328. code = SEGV_MAPERR;
  329. else
  330. code = SEGV_ACCERR;
  331. up_read(&current->mm->mmap_sem);
  332. force_signal_inject(SIGSEGV, code, addr);
  333. }
  334. asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
  335. {
  336. /* check for AArch32 breakpoint instructions */
  337. if (!aarch32_break_handler(regs))
  338. return;
  339. if (call_undef_hook(regs) == 0)
  340. return;
  341. force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
  342. }
  343. void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
  344. {
  345. config_sctlr_el1(SCTLR_EL1_UCI, 0);
  346. }
  347. #define __user_cache_maint(insn, address, res) \
  348. if (address >= user_addr_max()) { \
  349. res = -EFAULT; \
  350. } else { \
  351. uaccess_ttbr0_enable(); \
  352. asm volatile ( \
  353. "1: " insn ", %1\n" \
  354. " mov %w0, #0\n" \
  355. "2:\n" \
  356. " .pushsection .fixup,\"ax\"\n" \
  357. " .align 2\n" \
  358. "3: mov %w0, %w2\n" \
  359. " b 2b\n" \
  360. " .popsection\n" \
  361. _ASM_EXTABLE(1b, 3b) \
  362. : "=r" (res) \
  363. : "r" (address), "i" (-EFAULT)); \
  364. uaccess_ttbr0_disable(); \
  365. }
  366. static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
  367. {
  368. unsigned long address;
  369. int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
  370. int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
  371. int ret = 0;
  372. address = untagged_addr(pt_regs_read_reg(regs, rt));
  373. switch (crm) {
  374. case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */
  375. __user_cache_maint("dc civac", address, ret);
  376. break;
  377. case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */
  378. __user_cache_maint("dc civac", address, ret);
  379. break;
  380. case ESR_ELx_SYS64_ISS_CRM_DC_CVAP: /* DC CVAP */
  381. __user_cache_maint("sys 3, c7, c12, 1", address, ret);
  382. break;
  383. case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC: /* DC CIVAC */
  384. __user_cache_maint("dc civac", address, ret);
  385. break;
  386. case ESR_ELx_SYS64_ISS_CRM_IC_IVAU: /* IC IVAU */
  387. __user_cache_maint("ic ivau", address, ret);
  388. break;
  389. default:
  390. force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
  391. return;
  392. }
  393. if (ret)
  394. arm64_notify_segfault(address);
  395. else
  396. arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
  397. }
  398. static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
  399. {
  400. int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
  401. unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
  402. pt_regs_write_reg(regs, rt, val);
  403. arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
  404. }
  405. static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
  406. {
  407. int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
  408. pt_regs_write_reg(regs, rt, arch_counter_get_cntvct());
  409. arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
  410. }
  411. static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
  412. {
  413. int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
  414. pt_regs_write_reg(regs, rt, arch_timer_get_rate());
  415. arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
  416. }
  417. struct sys64_hook {
  418. unsigned int esr_mask;
  419. unsigned int esr_val;
  420. void (*handler)(unsigned int esr, struct pt_regs *regs);
  421. };
  422. static struct sys64_hook sys64_hooks[] = {
  423. {
  424. .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK,
  425. .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL,
  426. .handler = user_cache_maint_handler,
  427. },
  428. {
  429. /* Trap read access to CTR_EL0 */
  430. .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
  431. .esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ,
  432. .handler = ctr_read_handler,
  433. },
  434. {
  435. /* Trap read access to CNTVCT_EL0 */
  436. .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
  437. .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT,
  438. .handler = cntvct_read_handler,
  439. },
  440. {
  441. /* Trap read access to CNTFRQ_EL0 */
  442. .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
  443. .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ,
  444. .handler = cntfrq_read_handler,
  445. },
  446. {},
  447. };
  448. asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
  449. {
  450. struct sys64_hook *hook;
  451. for (hook = sys64_hooks; hook->handler; hook++)
  452. if ((hook->esr_mask & esr) == hook->esr_val) {
  453. hook->handler(esr, regs);
  454. return;
  455. }
  456. /*
  457. * New SYS instructions may previously have been undefined at EL0. Fall
  458. * back to our usual undefined instruction handler so that we handle
  459. * these consistently.
  460. */
  461. do_undefinstr(regs);
  462. }
  463. long compat_arm_syscall(struct pt_regs *regs);
  464. asmlinkage long do_ni_syscall(struct pt_regs *regs)
  465. {
  466. #ifdef CONFIG_COMPAT
  467. long ret;
  468. if (is_compat_task()) {
  469. ret = compat_arm_syscall(regs);
  470. if (ret != -ENOSYS)
  471. return ret;
  472. }
  473. #endif
  474. return sys_ni_syscall();
  475. }
  476. static const char *esr_class_str[] = {
  477. [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC",
  478. [ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized",
  479. [ESR_ELx_EC_WFx] = "WFI/WFE",
  480. [ESR_ELx_EC_CP15_32] = "CP15 MCR/MRC",
  481. [ESR_ELx_EC_CP15_64] = "CP15 MCRR/MRRC",
  482. [ESR_ELx_EC_CP14_MR] = "CP14 MCR/MRC",
  483. [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC",
  484. [ESR_ELx_EC_FP_ASIMD] = "ASIMD",
  485. [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS",
  486. [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC",
  487. [ESR_ELx_EC_ILL] = "PSTATE.IL",
  488. [ESR_ELx_EC_SVC32] = "SVC (AArch32)",
  489. [ESR_ELx_EC_HVC32] = "HVC (AArch32)",
  490. [ESR_ELx_EC_SMC32] = "SMC (AArch32)",
  491. [ESR_ELx_EC_SVC64] = "SVC (AArch64)",
  492. [ESR_ELx_EC_HVC64] = "HVC (AArch64)",
  493. [ESR_ELx_EC_SMC64] = "SMC (AArch64)",
  494. [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)",
  495. [ESR_ELx_EC_SVE] = "SVE",
  496. [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF",
  497. [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)",
  498. [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)",
  499. [ESR_ELx_EC_PC_ALIGN] = "PC Alignment",
  500. [ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)",
  501. [ESR_ELx_EC_DABT_CUR] = "DABT (current EL)",
  502. [ESR_ELx_EC_SP_ALIGN] = "SP Alignment",
  503. [ESR_ELx_EC_FP_EXC32] = "FP (AArch32)",
  504. [ESR_ELx_EC_FP_EXC64] = "FP (AArch64)",
  505. [ESR_ELx_EC_SERROR] = "SError",
  506. [ESR_ELx_EC_BREAKPT_LOW] = "Breakpoint (lower EL)",
  507. [ESR_ELx_EC_BREAKPT_CUR] = "Breakpoint (current EL)",
  508. [ESR_ELx_EC_SOFTSTP_LOW] = "Software Step (lower EL)",
  509. [ESR_ELx_EC_SOFTSTP_CUR] = "Software Step (current EL)",
  510. [ESR_ELx_EC_WATCHPT_LOW] = "Watchpoint (lower EL)",
  511. [ESR_ELx_EC_WATCHPT_CUR] = "Watchpoint (current EL)",
  512. [ESR_ELx_EC_BKPT32] = "BKPT (AArch32)",
  513. [ESR_ELx_EC_VECTOR32] = "Vector catch (AArch32)",
  514. [ESR_ELx_EC_BRK64] = "BRK (AArch64)",
  515. };
  516. const char *esr_get_class_string(u32 esr)
  517. {
  518. return esr_class_str[ESR_ELx_EC(esr)];
  519. }
  520. /*
  521. * bad_mode handles the impossible case in the exception vector. This is always
  522. * fatal.
  523. */
  524. asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
  525. {
  526. console_verbose();
  527. pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
  528. handler[reason], smp_processor_id(), esr,
  529. esr_get_class_string(esr));
  530. die("Oops - bad mode", regs, 0);
  531. local_daif_mask();
  532. panic("bad mode");
  533. }
  534. /*
  535. * bad_el0_sync handles unexpected, but potentially recoverable synchronous
  536. * exceptions taken from EL0. Unlike bad_mode, this returns.
  537. */
  538. asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
  539. {
  540. siginfo_t info;
  541. void __user *pc = (void __user *)instruction_pointer(regs);
  542. clear_siginfo(&info);
  543. info.si_signo = SIGILL;
  544. info.si_errno = 0;
  545. info.si_code = ILL_ILLOPC;
  546. info.si_addr = pc;
  547. current->thread.fault_address = 0;
  548. current->thread.fault_code = esr;
  549. arm64_force_sig_info(&info, "Bad EL0 synchronous exception", current);
  550. }
  551. #ifdef CONFIG_VMAP_STACK
  552. DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
  553. __aligned(16);
  554. asmlinkage void handle_bad_stack(struct pt_regs *regs)
  555. {
  556. unsigned long tsk_stk = (unsigned long)current->stack;
  557. unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
  558. unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
  559. unsigned int esr = read_sysreg(esr_el1);
  560. unsigned long far = read_sysreg(far_el1);
  561. console_verbose();
  562. pr_emerg("Insufficient stack space to handle exception!");
  563. pr_emerg("ESR: 0x%08x -- %s\n", esr, esr_get_class_string(esr));
  564. pr_emerg("FAR: 0x%016lx\n", far);
  565. pr_emerg("Task stack: [0x%016lx..0x%016lx]\n",
  566. tsk_stk, tsk_stk + THREAD_SIZE);
  567. pr_emerg("IRQ stack: [0x%016lx..0x%016lx]\n",
  568. irq_stk, irq_stk + THREAD_SIZE);
  569. pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
  570. ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
  571. __show_regs(regs);
  572. /*
  573. * We use nmi_panic to limit the potential for recusive overflows, and
  574. * to get a better stack trace.
  575. */
  576. nmi_panic(NULL, "kernel stack overflow");
  577. cpu_park_loop();
  578. }
  579. #endif
  580. void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr)
  581. {
  582. console_verbose();
  583. pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n",
  584. smp_processor_id(), esr, esr_get_class_string(esr));
  585. if (regs)
  586. __show_regs(regs);
  587. nmi_panic(regs, "Asynchronous SError Interrupt");
  588. cpu_park_loop();
  589. unreachable();
  590. }
  591. bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
  592. {
  593. u32 aet = arm64_ras_serror_get_severity(esr);
  594. switch (aet) {
  595. case ESR_ELx_AET_CE: /* corrected error */
  596. case ESR_ELx_AET_UEO: /* restartable, not yet consumed */
  597. /*
  598. * The CPU can make progress. We may take UEO again as
  599. * a more severe error.
  600. */
  601. return false;
  602. case ESR_ELx_AET_UEU: /* Uncorrected Unrecoverable */
  603. case ESR_ELx_AET_UER: /* Uncorrected Recoverable */
  604. /*
  605. * The CPU can't make progress. The exception may have
  606. * been imprecise.
  607. */
  608. return true;
  609. case ESR_ELx_AET_UC: /* Uncontainable or Uncategorized error */
  610. default:
  611. /* Error has been silently propagated */
  612. arm64_serror_panic(regs, esr);
  613. }
  614. }
  615. asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
  616. {
  617. nmi_enter();
  618. /* non-RAS errors are not containable */
  619. if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
  620. arm64_serror_panic(regs, esr);
  621. nmi_exit();
  622. }
  623. void __pte_error(const char *file, int line, unsigned long val)
  624. {
  625. pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
  626. }
  627. void __pmd_error(const char *file, int line, unsigned long val)
  628. {
  629. pr_err("%s:%d: bad pmd %016lx.\n", file, line, val);
  630. }
  631. void __pud_error(const char *file, int line, unsigned long val)
  632. {
  633. pr_err("%s:%d: bad pud %016lx.\n", file, line, val);
  634. }
  635. void __pgd_error(const char *file, int line, unsigned long val)
  636. {
  637. pr_err("%s:%d: bad pgd %016lx.\n", file, line, val);
  638. }
  639. /* GENERIC_BUG traps */
  640. int is_valid_bugaddr(unsigned long addr)
  641. {
  642. /*
  643. * bug_handler() only called for BRK #BUG_BRK_IMM.
  644. * So the answer is trivial -- any spurious instances with no
  645. * bug table entry will be rejected by report_bug() and passed
  646. * back to the debug-monitors code and handled as a fatal
  647. * unexpected debug exception.
  648. */
  649. return 1;
  650. }
  651. static int bug_handler(struct pt_regs *regs, unsigned int esr)
  652. {
  653. if (user_mode(regs))
  654. return DBG_HOOK_ERROR;
  655. switch (report_bug(regs->pc, regs)) {
  656. case BUG_TRAP_TYPE_BUG:
  657. die("Oops - BUG", regs, 0);
  658. break;
  659. case BUG_TRAP_TYPE_WARN:
  660. break;
  661. default:
  662. /* unknown/unrecognised bug trap type */
  663. return DBG_HOOK_ERROR;
  664. }
  665. /* If thread survives, skip over the BUG instruction and continue: */
  666. arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
  667. return DBG_HOOK_HANDLED;
  668. }
  669. static struct break_hook bug_break_hook = {
  670. .esr_val = 0xf2000000 | BUG_BRK_IMM,
  671. .esr_mask = 0xffffffff,
  672. .fn = bug_handler,
  673. };
  674. /*
  675. * Initial handler for AArch64 BRK exceptions
  676. * This handler only used until debug_traps_init().
  677. */
  678. int __init early_brk64(unsigned long addr, unsigned int esr,
  679. struct pt_regs *regs)
  680. {
  681. return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
  682. }
  683. /* This registration must happen early, before debug_traps_init(). */
  684. void __init trap_init(void)
  685. {
  686. register_break_hook(&bug_break_hook);
  687. }