|
|
@@ -360,7 +360,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
|
|
|
childregs->pstate |= PSR_UAO_BIT;
|
|
|
|
|
|
if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
|
|
|
- childregs->pstate |= PSR_SSBS_BIT;
|
|
|
+ set_ssbs_bit(childregs);
|
|
|
|
|
|
p->thread.cpu_context.x19 = stack_start;
|
|
|
p->thread.cpu_context.x20 = stk_sz;
|
|
|
@@ -401,6 +401,32 @@ void uao_thread_switch(struct task_struct *next)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Force SSBS state on context-switch, since it may be lost after migrating
|
|
|
+ * from a CPU which treats the bit as RES0 in a heterogeneous system.
|
|
|
+ */
|
|
|
+static void ssbs_thread_switch(struct task_struct *next)
|
|
|
+{
|
|
|
+ struct pt_regs *regs = task_pt_regs(next);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Nothing to do for kernel threads, but 'regs' may be junk
|
|
|
+ * (e.g. idle task) so check the flags and bail early.
|
|
|
+ */
|
|
|
+ if (unlikely(next->flags & PF_KTHREAD))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* If the mitigation is enabled, then we leave SSBS clear. */
|
|
|
+ if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
|
|
|
+ test_tsk_thread_flag(next, TIF_SSBD))
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (compat_user_mode(regs))
|
|
|
+ set_compat_ssbs_bit(regs);
|
|
|
+ else if (user_mode(regs))
|
|
|
+ set_ssbs_bit(regs);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* We store our current task in sp_el0, which is clobbered by userspace. Keep a
|
|
|
* shadow copy so that we can restore this upon entry from userspace.
|
|
|
@@ -429,6 +455,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
|
|
|
contextidr_thread_switch(next);
|
|
|
entry_task_switch(next);
|
|
|
uao_thread_switch(next);
|
|
|
+ ssbs_thread_switch(next);
|
|
|
|
|
|
/*
|
|
|
* Complete any pending TLB or cache maintenance on this CPU in case
|