|
@@ -104,6 +104,45 @@ in_irq_stack(unsigned long *stack, unsigned long *irq_stack,
|
|
|
return (stack >= irq_stack && stack < irq_stack_end);
|
|
|
}
|
|
|
|
|
|
+static const unsigned long irq_stack_size =
|
|
|
+ (IRQ_STACK_SIZE - 64) / sizeof(unsigned long);
|
|
|
+
|
|
|
+enum stack_type {
|
|
|
+ STACK_IS_UNKNOWN,
|
|
|
+ STACK_IS_NORMAL,
|
|
|
+ STACK_IS_EXCEPTION,
|
|
|
+ STACK_IS_IRQ,
|
|
|
+};
|
|
|
+
|
|
|
+static enum stack_type
|
|
|
+analyze_stack(int cpu, struct task_struct *task,
|
|
|
+ unsigned long *stack, unsigned long **stack_end, char **id)
|
|
|
+{
|
|
|
+ unsigned long *irq_stack;
|
|
|
+ unsigned long addr;
|
|
|
+ unsigned used = 0;
|
|
|
+
|
|
|
+ addr = ((unsigned long)stack & (~(THREAD_SIZE - 1)));
|
|
|
+ if ((unsigned long)task_stack_page(task) == addr)
|
|
|
+ return STACK_IS_NORMAL;
|
|
|
+
|
|
|
+ *stack_end = in_exception_stack(cpu, (unsigned long)stack,
|
|
|
+ &used, id);
|
|
|
+ if (*stack_end)
|
|
|
+ return STACK_IS_EXCEPTION;
|
|
|
+
|
|
|
+ *stack_end = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
|
|
|
+ if (!*stack_end)
|
|
|
+ return STACK_IS_UNKNOWN;
|
|
|
+
|
|
|
+ irq_stack = *stack_end - irq_stack_size;
|
|
|
+
|
|
|
+ if (in_irq_stack(stack, irq_stack, *stack_end))
|
|
|
+ return STACK_IS_IRQ;
|
|
|
+
|
|
|
+ return STACK_IS_UNKNOWN;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* x86-64 can have up to three kernel stacks:
|
|
|
* process stack
|
|
@@ -116,12 +155,11 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
|
|
const struct stacktrace_ops *ops, void *data)
|
|
|
{
|
|
|
const unsigned cpu = get_cpu();
|
|
|
- unsigned long *irq_stack_end =
|
|
|
- (unsigned long *)per_cpu(irq_stack_ptr, cpu);
|
|
|
- unsigned used = 0;
|
|
|
struct thread_info *tinfo;
|
|
|
- int graph = 0;
|
|
|
+ unsigned long *irq_stack;
|
|
|
unsigned long dummy;
|
|
|
+ int graph = 0;
|
|
|
+ int done = 0;
|
|
|
|
|
|
if (!task)
|
|
|
task = current;
|
|
@@ -143,49 +181,60 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
|
|
* exceptions
|
|
|
*/
|
|
|
tinfo = task_thread_info(task);
|
|
|
- for (;;) {
|
|
|
+ while (!done) {
|
|
|
+ unsigned long *stack_end;
|
|
|
+ enum stack_type stype;
|
|
|
char *id;
|
|
|
- unsigned long *estack_end;
|
|
|
- estack_end = in_exception_stack(cpu, (unsigned long)stack,
|
|
|
- &used, &id);
|
|
|
|
|
|
- if (estack_end) {
|
|
|
+ stype = analyze_stack(cpu, task, stack, &stack_end, &id);
|
|
|
+
|
|
|
+ /* Default finish unless specified to continue */
|
|
|
+ done = 1;
|
|
|
+
|
|
|
+ switch (stype) {
|
|
|
+
|
|
|
+ /* Break out early if we are on the thread stack */
|
|
|
+ case STACK_IS_NORMAL:
|
|
|
+ break;
|
|
|
+
|
|
|
+ case STACK_IS_EXCEPTION:
|
|
|
+
|
|
|
if (ops->stack(data, id) < 0)
|
|
|
break;
|
|
|
|
|
|
bp = ops->walk_stack(tinfo, stack, bp, ops,
|
|
|
- data, estack_end, &graph);
|
|
|
+ data, stack_end, &graph);
|
|
|
ops->stack(data, "<EOE>");
|
|
|
/*
|
|
|
* We link to the next stack via the
|
|
|
* second-to-last pointer (index -2 to end) in the
|
|
|
* exception stack:
|
|
|
*/
|
|
|
- stack = (unsigned long *) estack_end[-2];
|
|
|
- continue;
|
|
|
- }
|
|
|
- if (irq_stack_end) {
|
|
|
- unsigned long *irq_stack;
|
|
|
- irq_stack = irq_stack_end -
|
|
|
- (IRQ_STACK_SIZE - 64) / sizeof(*irq_stack);
|
|
|
-
|
|
|
- if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
|
|
|
- if (ops->stack(data, "IRQ") < 0)
|
|
|
- break;
|
|
|
- bp = ops->walk_stack(tinfo, stack, bp,
|
|
|
- ops, data, irq_stack_end, &graph);
|
|
|
- /*
|
|
|
- * We link to the next stack (which would be
|
|
|
- * the process stack normally) the last
|
|
|
- * pointer (index -1 to end) in the IRQ stack:
|
|
|
- */
|
|
|
- stack = (unsigned long *) (irq_stack_end[-1]);
|
|
|
- irq_stack_end = NULL;
|
|
|
- ops->stack(data, "EOI");
|
|
|
- continue;
|
|
|
- }
|
|
|
+ stack = (unsigned long *) stack_end[-2];
|
|
|
+ done = 0;
|
|
|
+ break;
|
|
|
+
|
|
|
+ case STACK_IS_IRQ:
|
|
|
+
|
|
|
+ if (ops->stack(data, "IRQ") < 0)
|
|
|
+ break;
|
|
|
+ bp = ops->walk_stack(tinfo, stack, bp,
|
|
|
+ ops, data, stack_end, &graph);
|
|
|
+ /*
|
|
|
+ * We link to the next stack (which would be
|
|
|
+ * the process stack normally) the last
|
|
|
+ * pointer (index -1 to end) in the IRQ stack:
|
|
|
+ */
|
|
|
+ stack = (unsigned long *) (stack_end[-1]);
|
|
|
+ irq_stack = stack_end - irq_stack_size;
|
|
|
+ ops->stack(data, "EOI");
|
|
|
+ done = 0;
|
|
|
+ break;
|
|
|
+
|
|
|
+ case STACK_IS_UNKNOWN:
|
|
|
+ ops->stack(data, "UNK");
|
|
|
+ break;
|
|
|
}
|
|
|
- break;
|
|
|
}
|
|
|
|
|
|
/*
|