|
@@ -28,76 +28,109 @@ static unsigned long exception_stack_sizes[N_EXCEPTION_STACKS] = {
|
|
|
[DEBUG_STACK - 1] = DEBUG_STKSZ
|
|
|
};
|
|
|
|
|
|
-static unsigned long *in_exception_stack(unsigned long stack, unsigned *usedp,
|
|
|
- char **idp)
|
|
|
+void stack_type_str(enum stack_type type, const char **begin, const char **end)
|
|
|
{
|
|
|
- unsigned long begin, end;
|
|
|
+ BUILD_BUG_ON(N_EXCEPTION_STACKS != 4);
|
|
|
+
|
|
|
+ switch (type) {
|
|
|
+ case STACK_TYPE_IRQ:
|
|
|
+ *begin = "IRQ";
|
|
|
+ *end = "EOI";
|
|
|
+ break;
|
|
|
+ case STACK_TYPE_EXCEPTION ... STACK_TYPE_EXCEPTION_LAST:
|
|
|
+ *begin = exception_stack_names[type - STACK_TYPE_EXCEPTION];
|
|
|
+ *end = "EOE";
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ *begin = NULL;
|
|
|
+ *end = NULL;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static bool in_exception_stack(unsigned long *stack, struct stack_info *info,
|
|
|
+ unsigned long *visit_mask)
|
|
|
+{
|
|
|
+ unsigned long *begin, *end;
|
|
|
+ struct pt_regs *regs;
|
|
|
unsigned k;
|
|
|
|
|
|
BUILD_BUG_ON(N_EXCEPTION_STACKS != 4);
|
|
|
|
|
|
for (k = 0; k < N_EXCEPTION_STACKS; k++) {
|
|
|
- end = raw_cpu_ptr(&orig_ist)->ist[k];
|
|
|
- begin = end - exception_stack_sizes[k];
|
|
|
+ end = (unsigned long *)raw_cpu_ptr(&orig_ist)->ist[k];
|
|
|
+ begin = end - (exception_stack_sizes[k] / sizeof(long));
|
|
|
+ regs = (struct pt_regs *)end - 1;
|
|
|
|
|
|
if (stack < begin || stack >= end)
|
|
|
continue;
|
|
|
|
|
|
/*
|
|
|
- * Make sure we only iterate through an exception stack once.
|
|
|
- * If it comes up for the second time then there's something
|
|
|
- * wrong going on - just break and return NULL:
|
|
|
+ * Make sure we don't iterate through an exception stack more
|
|
|
+ * than once. If it comes up a second time then there's
|
|
|
+ * something wrong going on - just break out and report an
|
|
|
+ * unknown stack type.
|
|
|
*/
|
|
|
- if (*usedp & (1U << k))
|
|
|
+ if (*visit_mask & (1U << k))
|
|
|
break;
|
|
|
- *usedp |= 1U << k;
|
|
|
+ *visit_mask |= 1U << k;
|
|
|
|
|
|
- *idp = exception_stack_names[k];
|
|
|
- return (unsigned long *)end;
|
|
|
+ info->type = STACK_TYPE_EXCEPTION + k;
|
|
|
+ info->begin = begin;
|
|
|
+ info->end = end;
|
|
|
+ info->next_sp = (unsigned long *)regs->sp;
|
|
|
+
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
- return NULL;
|
|
|
+ return false;
|
|
|
}
|
|
|
|
|
|
-static inline int
|
|
|
-in_irq_stack(unsigned long *stack, unsigned long *irq_stack,
|
|
|
- unsigned long *irq_stack_end)
|
|
|
+static bool in_irq_stack(unsigned long *stack, struct stack_info *info)
|
|
|
{
|
|
|
- return (stack >= irq_stack && stack < irq_stack_end);
|
|
|
-}
|
|
|
+ unsigned long *end = (unsigned long *)this_cpu_read(irq_stack_ptr);
|
|
|
+ unsigned long *begin = end - (IRQ_STACK_SIZE / sizeof(long));
|
|
|
|
|
|
-enum stack_type {
|
|
|
- STACK_IS_UNKNOWN,
|
|
|
- STACK_IS_NORMAL,
|
|
|
- STACK_IS_EXCEPTION,
|
|
|
- STACK_IS_IRQ,
|
|
|
-};
|
|
|
+ if (stack < begin || stack >= end)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ info->type = STACK_TYPE_IRQ;
|
|
|
+ info->begin = begin;
|
|
|
+ info->end = end;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The next stack pointer is the first thing pushed by the entry code
|
|
|
+ * after switching to the irq stack.
|
|
|
+ */
|
|
|
+ info->next_sp = (unsigned long *)*(end - 1);
|
|
|
+
|
|
|
+ return true;
|
|
|
+}
|
|
|
|
|
|
-static enum stack_type
|
|
|
-analyze_stack(struct task_struct *task, unsigned long *stack,
|
|
|
- unsigned long **stack_end, unsigned long *irq_stack,
|
|
|
- unsigned *used, char **id)
|
|
|
+int get_stack_info(unsigned long *stack, struct task_struct *task,
|
|
|
+ struct stack_info *info, unsigned long *visit_mask)
|
|
|
{
|
|
|
- unsigned long addr;
|
|
|
+ if (!stack)
|
|
|
+ goto unknown;
|
|
|
|
|
|
- addr = ((unsigned long)stack & (~(THREAD_SIZE - 1)));
|
|
|
- if ((unsigned long)task_stack_page(task) == addr)
|
|
|
- return STACK_IS_NORMAL;
|
|
|
+ task = task ? : current;
|
|
|
+
|
|
|
+ if (in_task_stack(stack, task, info))
|
|
|
+ return 0;
|
|
|
|
|
|
- *stack_end = in_exception_stack((unsigned long)stack, used, id);
|
|
|
- if (*stack_end)
|
|
|
- return STACK_IS_EXCEPTION;
|
|
|
+ if (task != current)
|
|
|
+ goto unknown;
|
|
|
|
|
|
- if (!irq_stack)
|
|
|
- return STACK_IS_NORMAL;
|
|
|
+ if (in_exception_stack(stack, info, visit_mask))
|
|
|
+ return 0;
|
|
|
|
|
|
- *stack_end = irq_stack;
|
|
|
- irq_stack -= (IRQ_STACK_SIZE / sizeof(long));
|
|
|
+ if (in_irq_stack(stack, info))
|
|
|
+ return 0;
|
|
|
|
|
|
- if (in_irq_stack(stack, irq_stack, *stack_end))
|
|
|
- return STACK_IS_IRQ;
|
|
|
+ return 0;
|
|
|
|
|
|
- return STACK_IS_UNKNOWN;
|
|
|
+unknown:
|
|
|
+ info->type = STACK_TYPE_UNKNOWN;
|
|
|
+ return -EINVAL;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -111,8 +144,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
|
|
unsigned long *stack, unsigned long bp,
|
|
|
const struct stacktrace_ops *ops, void *data)
|
|
|
{
|
|
|
- unsigned long *irq_stack = (unsigned long *)this_cpu_read(irq_stack_ptr);
|
|
|
- unsigned used = 0;
|
|
|
+ unsigned long visit_mask = 0;
|
|
|
+ struct stack_info info;
|
|
|
int graph = 0;
|
|
|
int done = 0;
|
|
|
|
|
@@ -126,57 +159,37 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
|
|
* exceptions
|
|
|
*/
|
|
|
while (!done) {
|
|
|
- unsigned long *stack_end;
|
|
|
- enum stack_type stype;
|
|
|
- char *id;
|
|
|
+ const char *begin_str, *end_str;
|
|
|
|
|
|
- stype = analyze_stack(task, stack, &stack_end, irq_stack, &used,
|
|
|
- &id);
|
|
|
+ get_stack_info(stack, task, &info, &visit_mask);
|
|
|
|
|
|
/* Default finish unless specified to continue */
|
|
|
done = 1;
|
|
|
|
|
|
- switch (stype) {
|
|
|
+ switch (info.type) {
|
|
|
|
|
|
/* Break out early if we are on the thread stack */
|
|
|
- case STACK_IS_NORMAL:
|
|
|
+ case STACK_TYPE_TASK:
|
|
|
break;
|
|
|
|
|
|
- case STACK_IS_EXCEPTION:
|
|
|
+ case STACK_TYPE_IRQ:
|
|
|
+ case STACK_TYPE_EXCEPTION ... STACK_TYPE_EXCEPTION_LAST:
|
|
|
+
|
|
|
+ stack_type_str(info.type, &begin_str, &end_str);
|
|
|
|
|
|
- if (ops->stack(data, id) < 0)
|
|
|
+ if (ops->stack(data, begin_str) < 0)
|
|
|
break;
|
|
|
|
|
|
bp = ops->walk_stack(task, stack, bp, ops,
|
|
|
- data, stack_end, &graph);
|
|
|
- ops->stack(data, "EOE");
|
|
|
- /*
|
|
|
- * We link to the next stack via the
|
|
|
- * second-to-last pointer (index -2 to end) in the
|
|
|
- * exception stack:
|
|
|
- */
|
|
|
- stack = (unsigned long *) stack_end[-2];
|
|
|
- done = 0;
|
|
|
- break;
|
|
|
+ data, &info, &graph);
|
|
|
|
|
|
- case STACK_IS_IRQ:
|
|
|
+ ops->stack(data, end_str);
|
|
|
|
|
|
- if (ops->stack(data, "IRQ") < 0)
|
|
|
- break;
|
|
|
- bp = ops->walk_stack(task, stack, bp,
|
|
|
- ops, data, stack_end, &graph);
|
|
|
- /*
|
|
|
- * We link to the next stack (which would be
|
|
|
- * the process stack normally) the last
|
|
|
- * pointer (index -1 to end) in the IRQ stack:
|
|
|
- */
|
|
|
- stack = (unsigned long *) (stack_end[-1]);
|
|
|
- irq_stack = NULL;
|
|
|
- ops->stack(data, "EOI");
|
|
|
+ stack = info.next_sp;
|
|
|
done = 0;
|
|
|
break;
|
|
|
|
|
|
- case STACK_IS_UNKNOWN:
|
|
|
+ default:
|
|
|
ops->stack(data, "UNK");
|
|
|
break;
|
|
|
}
|
|
@@ -185,7 +198,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
|
|
/*
|
|
|
* This handles the process stack:
|
|
|
*/
|
|
|
- bp = ops->walk_stack(task, stack, bp, ops, data, NULL, &graph);
|
|
|
+ bp = ops->walk_stack(task, stack, bp, ops, data, &info, &graph);
|
|
|
}
|
|
|
EXPORT_SYMBOL(dump_trace);
|
|
|
|