|
@@ -1130,8 +1130,8 @@ DEFINE_PER_CPU_FIRST(union irq_stack_union,
|
|
|
irq_stack_union) __aligned(PAGE_SIZE) __visible;
|
|
|
|
|
|
/*
|
|
|
- * The following four percpu variables are hot. Align current_task to
|
|
|
- * cacheline size such that all four fall in the same cacheline.
|
|
|
+ * The following percpu variables are hot. Align current_task to
|
|
|
+ * cacheline size such that they fall in the same cacheline.
|
|
|
*/
|
|
|
DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
|
|
|
&init_task;
|
|
@@ -1226,6 +1226,15 @@ DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
|
|
|
EXPORT_PER_CPU_SYMBOL(__preempt_count);
|
|
|
DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
|
|
|
|
|
|
+/*
|
|
|
+ * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find
|
|
|
+ * the top of the kernel stack. Use an extra percpu variable to track the
|
|
|
+ * top of the kernel stack directly.
|
|
|
+ */
|
|
|
+DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) =
|
|
|
+ (unsigned long)&init_thread_union + THREAD_SIZE;
|
|
|
+EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack);
|
|
|
+
|
|
|
#ifdef CONFIG_CC_STACKPROTECTOR
|
|
|
DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
|
|
|
#endif
|