stack.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/sched.h>
  15. #include <linux/kernel.h>
  16. #include <linux/kprobes.h>
  17. #include <linux/module.h>
  18. #include <linux/pfn.h>
  19. #include <linux/kallsyms.h>
  20. #include <linux/stacktrace.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/mmzone.h>
  23. #include <asm/backtrace.h>
  24. #include <asm/page.h>
  25. #include <asm/tlbflush.h>
  26. #include <asm/ucontext.h>
  27. #include <asm/switch_to.h>
  28. #include <asm/sigframe.h>
  29. #include <asm/stack.h>
  30. #include <arch/abi.h>
  31. #include <arch/interrupts.h>
  32. #define KBT_ONGOING 0 /* Backtrace still ongoing */
  33. #define KBT_DONE 1 /* Backtrace cleanly completed */
  34. #define KBT_RUNNING 2 /* Can't run backtrace on a running task */
  35. #define KBT_LOOP 3 /* Backtrace entered a loop */
  36. /* Is address on the specified kernel stack? */
  37. static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp)
  38. {
  39. ulong kstack_base = (ulong) kbt->task->stack;
  40. if (kstack_base == 0) /* corrupt task pointer; just follow stack... */
  41. return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory;
  42. return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
  43. }
  44. /* Is address valid for reading? */
  45. static int valid_address(struct KBacktraceIterator *kbt, unsigned long address)
  46. {
  47. HV_PTE *l1_pgtable = kbt->pgtable;
  48. HV_PTE *l2_pgtable;
  49. unsigned long pfn;
  50. HV_PTE pte;
  51. struct page *page;
  52. if (l1_pgtable == NULL)
  53. return 0; /* can't read user space in other tasks */
  54. #ifdef CONFIG_64BIT
  55. /* Find the real l1_pgtable by looking in the l0_pgtable. */
  56. pte = l1_pgtable[HV_L0_INDEX(address)];
  57. if (!hv_pte_get_present(pte))
  58. return 0;
  59. pfn = hv_pte_get_pfn(pte);
  60. if (pte_huge(pte)) {
  61. if (!pfn_valid(pfn)) {
  62. pr_err("L0 huge page has bad pfn %#lx\n", pfn);
  63. return 0;
  64. }
  65. return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
  66. }
  67. page = pfn_to_page(pfn);
  68. BUG_ON(PageHighMem(page)); /* No HIGHMEM on 64-bit. */
  69. l1_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
  70. #endif
  71. pte = l1_pgtable[HV_L1_INDEX(address)];
  72. if (!hv_pte_get_present(pte))
  73. return 0;
  74. pfn = hv_pte_get_pfn(pte);
  75. if (pte_huge(pte)) {
  76. if (!pfn_valid(pfn)) {
  77. pr_err("huge page has bad pfn %#lx\n", pfn);
  78. return 0;
  79. }
  80. return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
  81. }
  82. page = pfn_to_page(pfn);
  83. if (PageHighMem(page)) {
  84. pr_err("L2 page table not in LOWMEM (%#llx)\n",
  85. HV_PFN_TO_CPA(pfn));
  86. return 0;
  87. }
  88. l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
  89. pte = l2_pgtable[HV_L2_INDEX(address)];
  90. return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
  91. }
  92. /* Callback for backtracer; basically a glorified memcpy */
  93. static bool read_memory_func(void *result, unsigned long address,
  94. unsigned int size, void *vkbt)
  95. {
  96. int retval;
  97. struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
  98. if (__kernel_text_address(address)) {
  99. /* OK to read kernel code. */
  100. } else if (address >= PAGE_OFFSET) {
  101. /* We only tolerate kernel-space reads of this task's stack */
  102. if (!in_kernel_stack(kbt, address))
  103. return 0;
  104. } else if (!valid_address(kbt, address)) {
  105. return 0; /* invalid user-space address */
  106. }
  107. pagefault_disable();
  108. retval = __copy_from_user_inatomic(result,
  109. (void __user __force *)address,
  110. size);
  111. pagefault_enable();
  112. return (retval == 0);
  113. }
  114. /* Return a pt_regs pointer for a valid fault handler frame */
  115. static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
  116. {
  117. const char *fault = NULL; /* happy compiler */
  118. char fault_buf[64];
  119. unsigned long sp = kbt->it.sp;
  120. struct pt_regs *p;
  121. if (!in_kernel_stack(kbt, sp))
  122. return NULL;
  123. if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
  124. return NULL;
  125. p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE);
  126. if (p->faultnum == INT_SWINT_1 || p->faultnum == INT_SWINT_1_SIGRETURN)
  127. fault = "syscall";
  128. else {
  129. if (kbt->verbose) { /* else we aren't going to use it */
  130. snprintf(fault_buf, sizeof(fault_buf),
  131. "interrupt %ld", p->faultnum);
  132. fault = fault_buf;
  133. }
  134. }
  135. if (EX1_PL(p->ex1) == KERNEL_PL &&
  136. __kernel_text_address(p->pc) &&
  137. in_kernel_stack(kbt, p->sp) &&
  138. p->sp >= sp) {
  139. if (kbt->verbose)
  140. pr_err(" <%s while in kernel mode>\n", fault);
  141. } else if (EX1_PL(p->ex1) == USER_PL &&
  142. p->pc < PAGE_OFFSET &&
  143. p->sp < PAGE_OFFSET) {
  144. if (kbt->verbose)
  145. pr_err(" <%s while in user mode>\n", fault);
  146. } else if (kbt->verbose) {
  147. pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
  148. p->pc, p->sp, p->ex1);
  149. p = NULL;
  150. }
  151. if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0)
  152. return p;
  153. return NULL;
  154. }
  155. /* Is the pc pointing to a sigreturn trampoline? */
  156. static int is_sigreturn(unsigned long pc)
  157. {
  158. return (pc == VDSO_BASE);
  159. }
  160. /* Return a pt_regs pointer for a valid signal handler frame */
  161. static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt)
  162. {
  163. BacktraceIterator *b = &kbt->it;
  164. if (b->pc == VDSO_BASE) {
  165. struct rt_sigframe *frame;
  166. unsigned long sigframe_top =
  167. b->sp + sizeof(struct rt_sigframe) - 1;
  168. if (!valid_address(kbt, b->sp) ||
  169. !valid_address(kbt, sigframe_top)) {
  170. if (kbt->verbose)
  171. pr_err(" (odd signal: sp %#lx?)\n",
  172. (unsigned long)(b->sp));
  173. return NULL;
  174. }
  175. frame = (struct rt_sigframe *)b->sp;
  176. if (kbt->verbose) {
  177. pr_err(" <received signal %d>\n",
  178. frame->info.si_signo);
  179. }
  180. return (struct pt_regs *)&frame->uc.uc_mcontext;
  181. }
  182. return NULL;
  183. }
  184. static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt)
  185. {
  186. return is_sigreturn(kbt->it.pc);
  187. }
  188. static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
  189. {
  190. struct pt_regs *p;
  191. p = valid_fault_handler(kbt);
  192. if (p == NULL)
  193. p = valid_sigframe(kbt);
  194. if (p == NULL)
  195. return 0;
  196. backtrace_init(&kbt->it, read_memory_func, kbt,
  197. p->pc, p->lr, p->sp, p->regs[52]);
  198. kbt->new_context = 1;
  199. return 1;
  200. }
  201. /* Find a frame that isn't a sigreturn, if there is one. */
  202. static int KBacktraceIterator_next_item_inclusive(
  203. struct KBacktraceIterator *kbt)
  204. {
  205. for (;;) {
  206. do {
  207. if (!KBacktraceIterator_is_sigreturn(kbt))
  208. return KBT_ONGOING;
  209. } while (backtrace_next(&kbt->it));
  210. if (!KBacktraceIterator_restart(kbt))
  211. return KBT_DONE;
  212. }
  213. }
  214. /*
  215. * If the current sp is on a page different than what we recorded
  216. * as the top-of-kernel-stack last time we context switched, we have
  217. * probably blown the stack, and nothing is going to work out well.
  218. * If we can at least get out a warning, that may help the debug,
  219. * though we probably won't be able to backtrace into the code that
  220. * actually did the recursive damage.
  221. */
  222. static void validate_stack(struct pt_regs *regs)
  223. {
  224. int cpu = smp_processor_id();
  225. unsigned long ksp0 = get_current_ksp0();
  226. unsigned long ksp0_base = ksp0 - THREAD_SIZE;
  227. unsigned long sp = stack_pointer;
  228. if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) {
  229. pr_err("WARNING: cpu %d: kernel stack page %#lx underrun!\n"
  230. " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
  231. cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
  232. }
  233. else if (sp < ksp0_base + sizeof(struct thread_info)) {
  234. pr_err("WARNING: cpu %d: kernel stack page %#lx overrun!\n"
  235. " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
  236. cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
  237. }
  238. }
  239. void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
  240. struct task_struct *t, struct pt_regs *regs)
  241. {
  242. unsigned long pc, lr, sp, r52;
  243. int is_current;
  244. /*
  245. * Set up callback information. We grab the kernel stack base
  246. * so we will allow reads of that address range, and if we're
  247. * asking about the current process we grab the page table
  248. * so we can check user accesses before trying to read them.
  249. * We flush the TLB to avoid any weird skew issues.
  250. */
  251. is_current = (t == NULL);
  252. kbt->is_current = is_current;
  253. if (is_current)
  254. t = validate_current();
  255. kbt->task = t;
  256. kbt->pgtable = NULL;
  257. kbt->verbose = 0; /* override in caller if desired */
  258. kbt->profile = 0; /* override in caller if desired */
  259. kbt->end = KBT_ONGOING;
  260. kbt->new_context = 0;
  261. if (is_current) {
  262. HV_PhysAddr pgdir_pa = hv_inquire_context().page_table;
  263. if (pgdir_pa == (unsigned long)swapper_pg_dir - PAGE_OFFSET) {
  264. /*
  265. * Not just an optimization: this also allows
  266. * this to work at all before va/pa mappings
  267. * are set up.
  268. */
  269. kbt->pgtable = swapper_pg_dir;
  270. } else {
  271. struct page *page = pfn_to_page(PFN_DOWN(pgdir_pa));
  272. if (!PageHighMem(page))
  273. kbt->pgtable = __va(pgdir_pa);
  274. else
  275. pr_err("page table not in LOWMEM"
  276. " (%#llx)\n", pgdir_pa);
  277. }
  278. local_flush_tlb_all();
  279. validate_stack(regs);
  280. }
  281. if (regs == NULL) {
  282. if (is_current || t->state == TASK_RUNNING) {
  283. /* Can't do this; we need registers */
  284. kbt->end = KBT_RUNNING;
  285. return;
  286. }
  287. pc = get_switch_to_pc();
  288. lr = t->thread.pc;
  289. sp = t->thread.ksp;
  290. r52 = 0;
  291. } else {
  292. pc = regs->pc;
  293. lr = regs->lr;
  294. sp = regs->sp;
  295. r52 = regs->regs[52];
  296. }
  297. backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
  298. kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
  299. }
  300. EXPORT_SYMBOL(KBacktraceIterator_init);
  301. int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
  302. {
  303. return kbt->end != KBT_ONGOING;
  304. }
  305. EXPORT_SYMBOL(KBacktraceIterator_end);
  306. void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
  307. {
  308. unsigned long old_pc = kbt->it.pc, old_sp = kbt->it.sp;
  309. kbt->new_context = 0;
  310. if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
  311. kbt->end = KBT_DONE;
  312. return;
  313. }
  314. kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
  315. if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) {
  316. /* Trapped in a loop; give up. */
  317. kbt->end = KBT_LOOP;
  318. }
  319. }
  320. EXPORT_SYMBOL(KBacktraceIterator_next);
  321. /*
  322. * This method wraps the backtracer's more generic support.
  323. * It is only invoked from the architecture-specific code; show_stack()
  324. * and dump_stack() (in entry.S) are architecture-independent entry points.
  325. */
  326. void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
  327. {
  328. int i;
  329. if (headers) {
  330. /*
  331. * Add a blank line since if we are called from panic(),
  332. * then bust_spinlocks() spit out a space in front of us
  333. * and it will mess up our KERN_ERR.
  334. */
  335. pr_err("\n");
  336. pr_err("Starting stack dump of tid %d, pid %d (%s)"
  337. " on cpu %d at cycle %lld\n",
  338. kbt->task->pid, kbt->task->tgid, kbt->task->comm,
  339. smp_processor_id(), get_cycles());
  340. }
  341. kbt->verbose = 1;
  342. i = 0;
  343. for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
  344. char *modname;
  345. const char *name;
  346. unsigned long address = kbt->it.pc;
  347. unsigned long offset, size;
  348. char namebuf[KSYM_NAME_LEN+100];
  349. if (address >= PAGE_OFFSET)
  350. name = kallsyms_lookup(address, &size, &offset,
  351. &modname, namebuf);
  352. else
  353. name = NULL;
  354. if (!name)
  355. namebuf[0] = '\0';
  356. else {
  357. size_t namelen = strlen(namebuf);
  358. size_t remaining = (sizeof(namebuf) - 1) - namelen;
  359. char *p = namebuf + namelen;
  360. int rc = snprintf(p, remaining, "+%#lx/%#lx ",
  361. offset, size);
  362. if (modname && rc < remaining)
  363. snprintf(p + rc, remaining - rc,
  364. "[%s] ", modname);
  365. namebuf[sizeof(namebuf)-1] = '\0';
  366. }
  367. pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n",
  368. i++, address, namebuf, (unsigned long)(kbt->it.sp));
  369. if (i >= 100) {
  370. pr_err("Stack dump truncated"
  371. " (%d frames)\n", i);
  372. break;
  373. }
  374. }
  375. if (kbt->end == KBT_LOOP)
  376. pr_err("Stack dump stopped; next frame identical to this one\n");
  377. if (headers)
  378. pr_err("Stack dump complete\n");
  379. }
  380. EXPORT_SYMBOL(tile_show_stack);
  381. /* This is called from show_regs() and _dump_stack() */
  382. void dump_stack_regs(struct pt_regs *regs)
  383. {
  384. struct KBacktraceIterator kbt;
  385. KBacktraceIterator_init(&kbt, NULL, regs);
  386. tile_show_stack(&kbt, 1);
  387. }
  388. EXPORT_SYMBOL(dump_stack_regs);
  389. static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
  390. ulong pc, ulong lr, ulong sp, ulong r52)
  391. {
  392. memset(regs, 0, sizeof(struct pt_regs));
  393. regs->pc = pc;
  394. regs->lr = lr;
  395. regs->sp = sp;
  396. regs->regs[52] = r52;
  397. return regs;
  398. }
  399. /* This is called from dump_stack() and just converts to pt_regs */
  400. void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
  401. {
  402. struct pt_regs regs;
  403. dump_stack_regs(regs_to_pt_regs(&regs, pc, lr, sp, r52));
  404. }
  405. /* This is called from KBacktraceIterator_init_current() */
  406. void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc,
  407. ulong lr, ulong sp, ulong r52)
  408. {
  409. struct pt_regs regs;
  410. KBacktraceIterator_init(kbt, NULL,
  411. regs_to_pt_regs(&regs, pc, lr, sp, r52));
  412. }
  413. /* This is called only from kernel/sched.c, with esp == NULL */
  414. void show_stack(struct task_struct *task, unsigned long *esp)
  415. {
  416. struct KBacktraceIterator kbt;
  417. if (task == NULL || task == current)
  418. KBacktraceIterator_init_current(&kbt);
  419. else
  420. KBacktraceIterator_init(&kbt, task, NULL);
  421. tile_show_stack(&kbt, 0);
  422. }
  423. #ifdef CONFIG_STACKTRACE
  424. /* Support generic Linux stack API too */
  425. void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
  426. {
  427. struct KBacktraceIterator kbt;
  428. int skip = trace->skip;
  429. int i = 0;
  430. if (task == NULL || task == current)
  431. KBacktraceIterator_init_current(&kbt);
  432. else
  433. KBacktraceIterator_init(&kbt, task, NULL);
  434. for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) {
  435. if (skip) {
  436. --skip;
  437. continue;
  438. }
  439. if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET)
  440. break;
  441. trace->entries[i++] = kbt.it.pc;
  442. }
  443. trace->nr_entries = i;
  444. }
  445. EXPORT_SYMBOL(save_stack_trace_tsk);
  446. void save_stack_trace(struct stack_trace *trace)
  447. {
  448. save_stack_trace_tsk(NULL, trace);
  449. }
  450. #endif
  451. /* In entry.S */
  452. EXPORT_SYMBOL(KBacktraceIterator_init_current);