|
@@ -32,6 +32,21 @@ struct stackframe {
|
|
|
#endif
|
|
|
};
|
|
|
|
|
|
+enum stack_type {
|
|
|
+ STACK_TYPE_UNKNOWN,
|
|
|
+ STACK_TYPE_TASK,
|
|
|
+ STACK_TYPE_IRQ,
|
|
|
+ STACK_TYPE_OVERFLOW,
|
|
|
+ STACK_TYPE_SDEI_NORMAL,
|
|
|
+ STACK_TYPE_SDEI_CRITICAL,
|
|
|
+};
|
|
|
+
|
|
|
+struct stack_info {
|
|
|
+ unsigned long low;
|
|
|
+ unsigned long high;
|
|
|
+ enum stack_type type;
|
|
|
+};
|
|
|
+
|
|
|
extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
|
|
|
extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
|
|
|
int (*fn)(struct stackframe *, void *), void *data);
|
|
@@ -39,7 +54,8 @@ extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk);
|
|
|
|
|
|
DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
|
|
|
|
|
|
-static inline bool on_irq_stack(unsigned long sp)
|
|
|
+static inline bool on_irq_stack(unsigned long sp,
|
|
|
+ struct stack_info *info)
|
|
|
{
|
|
|
unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
|
|
|
unsigned long high = low + IRQ_STACK_SIZE;
|
|
@@ -47,46 +63,79 @@ static inline bool on_irq_stack(unsigned long sp)
|
|
|
if (!low)
|
|
|
return false;
|
|
|
|
|
|
- return (low <= sp && sp < high);
|
|
|
+ if (sp < low || sp >= high)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ if (info) {
|
|
|
+ info->low = low;
|
|
|
+ info->high = high;
|
|
|
+ info->type = STACK_TYPE_IRQ;
|
|
|
+ }
|
|
|
+
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
-static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp)
|
|
|
+static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp,
|
|
|
+ struct stack_info *info)
|
|
|
{
|
|
|
unsigned long low = (unsigned long)task_stack_page(tsk);
|
|
|
unsigned long high = low + THREAD_SIZE;
|
|
|
|
|
|
- return (low <= sp && sp < high);
|
|
|
+ if (sp < low || sp >= high)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ if (info) {
|
|
|
+ info->low = low;
|
|
|
+ info->high = high;
|
|
|
+ info->type = STACK_TYPE_TASK;
|
|
|
+ }
|
|
|
+
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_VMAP_STACK
|
|
|
DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
|
|
|
|
|
|
-static inline bool on_overflow_stack(unsigned long sp)
|
|
|
+static inline bool on_overflow_stack(unsigned long sp,
|
|
|
+ struct stack_info *info)
|
|
|
{
|
|
|
unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
|
|
|
unsigned long high = low + OVERFLOW_STACK_SIZE;
|
|
|
|
|
|
- return (low <= sp && sp < high);
|
|
|
+ if (sp < low || sp >= high)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ if (info) {
|
|
|
+ info->low = low;
|
|
|
+ info->high = high;
|
|
|
+ info->type = STACK_TYPE_OVERFLOW;
|
|
|
+ }
|
|
|
+
|
|
|
+ return true;
|
|
|
}
|
|
|
#else
|
|
|
-static inline bool on_overflow_stack(unsigned long sp) { return false; }
|
|
|
+static inline bool on_overflow_stack(unsigned long sp,
|
|
|
+ struct stack_info *info) { return false; }
|
|
|
#endif
|
|
|
|
|
|
+
|
|
|
/*
|
|
|
* We can only safely access per-cpu stacks from current in a non-preemptible
|
|
|
* context.
|
|
|
*/
|
|
|
-static inline bool on_accessible_stack(struct task_struct *tsk, unsigned long sp)
|
|
|
+static inline bool on_accessible_stack(struct task_struct *tsk,
|
|
|
+ unsigned long sp,
|
|
|
+ struct stack_info *info)
|
|
|
{
|
|
|
- if (on_task_stack(tsk, sp))
|
|
|
+ if (on_task_stack(tsk, sp, info))
|
|
|
return true;
|
|
|
if (tsk != current || preemptible())
|
|
|
return false;
|
|
|
- if (on_irq_stack(sp))
|
|
|
+ if (on_irq_stack(sp, info))
|
|
|
return true;
|
|
|
- if (on_overflow_stack(sp))
|
|
|
+ if (on_overflow_stack(sp, info))
|
|
|
return true;
|
|
|
- if (on_sdei_stack(sp))
|
|
|
+ if (on_sdei_stack(sp, info))
|
|
|
return true;
|
|
|
|
|
|
return false;
|