debug-monitors.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447
  1. /*
  2. * ARMv8 single-step debug support and mdscr context switching.
  3. *
  4. * Copyright (C) 2012 ARM Limited
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * Author: Will Deacon <will.deacon@arm.com>
  19. */
  20. #include <linux/cpu.h>
  21. #include <linux/debugfs.h>
  22. #include <linux/hardirq.h>
  23. #include <linux/init.h>
  24. #include <linux/ptrace.h>
  25. #include <linux/kprobes.h>
  26. #include <linux/stat.h>
  27. #include <linux/uaccess.h>
  28. #include <asm/cpufeature.h>
  29. #include <asm/cputype.h>
  30. #include <asm/debug-monitors.h>
  31. #include <asm/system_misc.h>
  32. /* Determine debug architecture. */
  33. u8 debug_monitors_arch(void)
  34. {
  35. return cpuid_feature_extract_unsigned_field(read_system_reg(SYS_ID_AA64DFR0_EL1),
  36. ID_AA64DFR0_DEBUGVER_SHIFT);
  37. }
  38. /*
  39. * MDSCR access routines.
  40. */
  41. static void mdscr_write(u32 mdscr)
  42. {
  43. unsigned long flags;
  44. local_dbg_save(flags);
  45. asm volatile("msr mdscr_el1, %0" :: "r" (mdscr));
  46. local_dbg_restore(flags);
  47. }
  48. NOKPROBE_SYMBOL(mdscr_write);
  49. static u32 mdscr_read(void)
  50. {
  51. u32 mdscr;
  52. asm volatile("mrs %0, mdscr_el1" : "=r" (mdscr));
  53. return mdscr;
  54. }
  55. NOKPROBE_SYMBOL(mdscr_read);
  56. /*
  57. * Allow root to disable self-hosted debug from userspace.
  58. * This is useful if you want to connect an external JTAG debugger.
  59. */
  60. static bool debug_enabled = true;
  61. static int create_debug_debugfs_entry(void)
  62. {
  63. debugfs_create_bool("debug_enabled", 0644, NULL, &debug_enabled);
  64. return 0;
  65. }
  66. fs_initcall(create_debug_debugfs_entry);
  67. static int __init early_debug_disable(char *buf)
  68. {
  69. debug_enabled = false;
  70. return 0;
  71. }
  72. early_param("nodebugmon", early_debug_disable);
  73. /*
  74. * Keep track of debug users on each core.
  75. * The ref counts are per-cpu so we use a local_t type.
  76. */
  77. static DEFINE_PER_CPU(int, mde_ref_count);
  78. static DEFINE_PER_CPU(int, kde_ref_count);
  79. void enable_debug_monitors(enum dbg_active_el el)
  80. {
  81. u32 mdscr, enable = 0;
  82. WARN_ON(preemptible());
  83. if (this_cpu_inc_return(mde_ref_count) == 1)
  84. enable = DBG_MDSCR_MDE;
  85. if (el == DBG_ACTIVE_EL1 &&
  86. this_cpu_inc_return(kde_ref_count) == 1)
  87. enable |= DBG_MDSCR_KDE;
  88. if (enable && debug_enabled) {
  89. mdscr = mdscr_read();
  90. mdscr |= enable;
  91. mdscr_write(mdscr);
  92. }
  93. }
  94. NOKPROBE_SYMBOL(enable_debug_monitors);
  95. void disable_debug_monitors(enum dbg_active_el el)
  96. {
  97. u32 mdscr, disable = 0;
  98. WARN_ON(preemptible());
  99. if (this_cpu_dec_return(mde_ref_count) == 0)
  100. disable = ~DBG_MDSCR_MDE;
  101. if (el == DBG_ACTIVE_EL1 &&
  102. this_cpu_dec_return(kde_ref_count) == 0)
  103. disable &= ~DBG_MDSCR_KDE;
  104. if (disable) {
  105. mdscr = mdscr_read();
  106. mdscr &= disable;
  107. mdscr_write(mdscr);
  108. }
  109. }
  110. NOKPROBE_SYMBOL(disable_debug_monitors);
  111. /*
  112. * OS lock clearing.
  113. */
  114. static void clear_os_lock(void *unused)
  115. {
  116. asm volatile("msr oslar_el1, %0" : : "r" (0));
  117. }
  118. static int os_lock_notify(struct notifier_block *self,
  119. unsigned long action, void *data)
  120. {
  121. if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
  122. clear_os_lock(NULL);
  123. return NOTIFY_OK;
  124. }
  125. static struct notifier_block os_lock_nb = {
  126. .notifier_call = os_lock_notify,
  127. };
  128. static int debug_monitors_init(void)
  129. {
  130. cpu_notifier_register_begin();
  131. /* Clear the OS lock. */
  132. on_each_cpu(clear_os_lock, NULL, 1);
  133. isb();
  134. /* Register hotplug handler. */
  135. __register_cpu_notifier(&os_lock_nb);
  136. cpu_notifier_register_done();
  137. return 0;
  138. }
  139. postcore_initcall(debug_monitors_init);
  140. /*
  141. * Single step API and exception handling.
  142. */
  143. static void set_regs_spsr_ss(struct pt_regs *regs)
  144. {
  145. regs->pstate |= DBG_SPSR_SS;
  146. }
  147. NOKPROBE_SYMBOL(set_regs_spsr_ss);
  148. static void clear_regs_spsr_ss(struct pt_regs *regs)
  149. {
  150. regs->pstate &= ~DBG_SPSR_SS;
  151. }
  152. NOKPROBE_SYMBOL(clear_regs_spsr_ss);
  153. /* EL1 Single Step Handler hooks */
  154. static LIST_HEAD(step_hook);
  155. static DEFINE_SPINLOCK(step_hook_lock);
  156. void register_step_hook(struct step_hook *hook)
  157. {
  158. spin_lock(&step_hook_lock);
  159. list_add_rcu(&hook->node, &step_hook);
  160. spin_unlock(&step_hook_lock);
  161. }
  162. void unregister_step_hook(struct step_hook *hook)
  163. {
  164. spin_lock(&step_hook_lock);
  165. list_del_rcu(&hook->node);
  166. spin_unlock(&step_hook_lock);
  167. synchronize_rcu();
  168. }
  169. /*
  170. * Call registered single step handlers
  171. * There is no Syndrome info to check for determining the handler.
  172. * So we call all the registered handlers, until the right handler is
  173. * found which returns zero.
  174. */
  175. static int call_step_hook(struct pt_regs *regs, unsigned int esr)
  176. {
  177. struct step_hook *hook;
  178. int retval = DBG_HOOK_ERROR;
  179. rcu_read_lock();
  180. list_for_each_entry_rcu(hook, &step_hook, node) {
  181. retval = hook->fn(regs, esr);
  182. if (retval == DBG_HOOK_HANDLED)
  183. break;
  184. }
  185. rcu_read_unlock();
  186. return retval;
  187. }
  188. NOKPROBE_SYMBOL(call_step_hook);
  189. static void send_user_sigtrap(int si_code)
  190. {
  191. struct pt_regs *regs = current_pt_regs();
  192. siginfo_t info = {
  193. .si_signo = SIGTRAP,
  194. .si_errno = 0,
  195. .si_code = si_code,
  196. .si_addr = (void __user *)instruction_pointer(regs),
  197. };
  198. if (WARN_ON(!user_mode(regs)))
  199. return;
  200. if (interrupts_enabled(regs))
  201. local_irq_enable();
  202. force_sig_info(SIGTRAP, &info, current);
  203. }
  204. static int single_step_handler(unsigned long addr, unsigned int esr,
  205. struct pt_regs *regs)
  206. {
  207. /*
  208. * If we are stepping a pending breakpoint, call the hw_breakpoint
  209. * handler first.
  210. */
  211. if (!reinstall_suspended_bps(regs))
  212. return 0;
  213. if (user_mode(regs)) {
  214. send_user_sigtrap(TRAP_HWBKPT);
  215. /*
  216. * ptrace will disable single step unless explicitly
  217. * asked to re-enable it. For other clients, it makes
  218. * sense to leave it enabled (i.e. rewind the controls
  219. * to the active-not-pending state).
  220. */
  221. user_rewind_single_step(current);
  222. } else {
  223. #ifdef CONFIG_KPROBES
  224. if (kprobe_single_step_handler(regs, esr) == DBG_HOOK_HANDLED)
  225. return 0;
  226. #endif
  227. if (call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
  228. return 0;
  229. pr_warning("Unexpected kernel single-step exception at EL1\n");
  230. /*
  231. * Re-enable stepping since we know that we will be
  232. * returning to regs.
  233. */
  234. set_regs_spsr_ss(regs);
  235. }
  236. return 0;
  237. }
  238. NOKPROBE_SYMBOL(single_step_handler);
  239. /*
  240. * Breakpoint handler is re-entrant as another breakpoint can
  241. * hit within breakpoint handler, especically in kprobes.
  242. * Use reader/writer locks instead of plain spinlock.
  243. */
  244. static LIST_HEAD(break_hook);
  245. static DEFINE_SPINLOCK(break_hook_lock);
  246. void register_break_hook(struct break_hook *hook)
  247. {
  248. spin_lock(&break_hook_lock);
  249. list_add_rcu(&hook->node, &break_hook);
  250. spin_unlock(&break_hook_lock);
  251. }
  252. void unregister_break_hook(struct break_hook *hook)
  253. {
  254. spin_lock(&break_hook_lock);
  255. list_del_rcu(&hook->node);
  256. spin_unlock(&break_hook_lock);
  257. synchronize_rcu();
  258. }
  259. static int call_break_hook(struct pt_regs *regs, unsigned int esr)
  260. {
  261. struct break_hook *hook;
  262. int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
  263. rcu_read_lock();
  264. list_for_each_entry_rcu(hook, &break_hook, node)
  265. if ((esr & hook->esr_mask) == hook->esr_val)
  266. fn = hook->fn;
  267. rcu_read_unlock();
  268. return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
  269. }
  270. NOKPROBE_SYMBOL(call_break_hook);
  271. static int brk_handler(unsigned long addr, unsigned int esr,
  272. struct pt_regs *regs)
  273. {
  274. if (user_mode(regs)) {
  275. send_user_sigtrap(TRAP_BRKPT);
  276. }
  277. #ifdef CONFIG_KPROBES
  278. else if ((esr & BRK64_ESR_MASK) == BRK64_ESR_KPROBES) {
  279. if (kprobe_breakpoint_handler(regs, esr) != DBG_HOOK_HANDLED)
  280. return -EFAULT;
  281. }
  282. #endif
  283. else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) {
  284. pr_warn("Unexpected kernel BRK exception at EL1\n");
  285. return -EFAULT;
  286. }
  287. return 0;
  288. }
  289. NOKPROBE_SYMBOL(brk_handler);
  290. int aarch32_break_handler(struct pt_regs *regs)
  291. {
  292. u32 arm_instr;
  293. u16 thumb_instr;
  294. bool bp = false;
  295. void __user *pc = (void __user *)instruction_pointer(regs);
  296. if (!compat_user_mode(regs))
  297. return -EFAULT;
  298. if (compat_thumb_mode(regs)) {
  299. /* get 16-bit Thumb instruction */
  300. get_user(thumb_instr, (u16 __user *)pc);
  301. thumb_instr = le16_to_cpu(thumb_instr);
  302. if (thumb_instr == AARCH32_BREAK_THUMB2_LO) {
  303. /* get second half of 32-bit Thumb-2 instruction */
  304. get_user(thumb_instr, (u16 __user *)(pc + 2));
  305. thumb_instr = le16_to_cpu(thumb_instr);
  306. bp = thumb_instr == AARCH32_BREAK_THUMB2_HI;
  307. } else {
  308. bp = thumb_instr == AARCH32_BREAK_THUMB;
  309. }
  310. } else {
  311. /* 32-bit ARM instruction */
  312. get_user(arm_instr, (u32 __user *)pc);
  313. arm_instr = le32_to_cpu(arm_instr);
  314. bp = (arm_instr & ~0xf0000000) == AARCH32_BREAK_ARM;
  315. }
  316. if (!bp)
  317. return -EFAULT;
  318. send_user_sigtrap(TRAP_BRKPT);
  319. return 0;
  320. }
  321. NOKPROBE_SYMBOL(aarch32_break_handler);
  322. static int __init debug_traps_init(void)
  323. {
  324. hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP,
  325. TRAP_HWBKPT, "single-step handler");
  326. hook_debug_fault_code(DBG_ESR_EVT_BRK, brk_handler, SIGTRAP,
  327. TRAP_BRKPT, "ptrace BRK handler");
  328. return 0;
  329. }
  330. arch_initcall(debug_traps_init);
  331. /* Re-enable single step for syscall restarting. */
  332. void user_rewind_single_step(struct task_struct *task)
  333. {
  334. /*
  335. * If single step is active for this thread, then set SPSR.SS
  336. * to 1 to avoid returning to the active-pending state.
  337. */
  338. if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP))
  339. set_regs_spsr_ss(task_pt_regs(task));
  340. }
  341. NOKPROBE_SYMBOL(user_rewind_single_step);
  342. void user_fastforward_single_step(struct task_struct *task)
  343. {
  344. if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP))
  345. clear_regs_spsr_ss(task_pt_regs(task));
  346. }
  347. /* Kernel API */
  348. void kernel_enable_single_step(struct pt_regs *regs)
  349. {
  350. WARN_ON(!irqs_disabled());
  351. set_regs_spsr_ss(regs);
  352. mdscr_write(mdscr_read() | DBG_MDSCR_SS);
  353. enable_debug_monitors(DBG_ACTIVE_EL1);
  354. }
  355. NOKPROBE_SYMBOL(kernel_enable_single_step);
  356. void kernel_disable_single_step(void)
  357. {
  358. WARN_ON(!irqs_disabled());
  359. mdscr_write(mdscr_read() & ~DBG_MDSCR_SS);
  360. disable_debug_monitors(DBG_ACTIVE_EL1);
  361. }
  362. NOKPROBE_SYMBOL(kernel_disable_single_step);
  363. int kernel_active_single_step(void)
  364. {
  365. WARN_ON(!irqs_disabled());
  366. return mdscr_read() & DBG_MDSCR_SS;
  367. }
  368. NOKPROBE_SYMBOL(kernel_active_single_step);
  369. /* ptrace API */
  370. void user_enable_single_step(struct task_struct *task)
  371. {
  372. set_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP);
  373. set_regs_spsr_ss(task_pt_regs(task));
  374. }
  375. NOKPROBE_SYMBOL(user_enable_single_step);
  376. void user_disable_single_step(struct task_struct *task)
  377. {
  378. clear_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP);
  379. }
  380. NOKPROBE_SYMBOL(user_disable_single_step);