|
@@ -395,27 +395,40 @@ static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
|
|
|
wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
|
|
|
}
|
|
|
|
|
|
-static __always_inline void spec_ctrl_update_msr(unsigned long tifn)
|
|
|
-{
|
|
|
- u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
|
|
|
-
|
|
|
- wrmsrl(MSR_IA32_SPEC_CTRL, msr);
|
|
|
-}
|
|
|
+/*
|
|
|
+ * Update the MSRs managing speculation control, during context switch.
|
|
|
+ *
|
|
|
+ * tifp: Previous task's thread flags
|
|
|
+ * tifn: Next task's thread flags
|
|
|
+ */
|
|
|
+static __always_inline void __speculation_ctrl_update(unsigned long tifp,
|
|
|
+ unsigned long tifn)
|
|
|
+{
|
|
|
+ u64 msr = x86_spec_ctrl_base;
|
|
|
+ bool updmsr = false;
|
|
|
+
|
|
|
+ /* If TIF_SSBD is different, select the proper mitigation method */
|
|
|
+ if ((tifp ^ tifn) & _TIF_SSBD) {
|
|
|
+ if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
|
|
|
+ amd_set_ssb_virt_state(tifn);
|
|
|
+ } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
|
|
|
+ amd_set_core_ssb_state(tifn);
|
|
|
+ } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
|
|
|
+ static_cpu_has(X86_FEATURE_AMD_SSBD)) {
|
|
|
+ msr |= ssbd_tif_to_spec_ctrl(tifn);
|
|
|
+ updmsr = true;
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
-static __always_inline void __speculation_ctrl_update(unsigned long tifn)
|
|
|
-{
|
|
|
- if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
|
|
|
- amd_set_ssb_virt_state(tifn);
|
|
|
- else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
|
|
|
- amd_set_core_ssb_state(tifn);
|
|
|
- else
|
|
|
- spec_ctrl_update_msr(tifn);
|
|
|
+ if (updmsr)
|
|
|
+ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
|
|
|
}
|
|
|
|
|
|
void speculation_ctrl_update(unsigned long tif)
|
|
|
{
|
|
|
+ /* Forced update. Make sure all relevant TIF flags are different */
|
|
|
preempt_disable();
|
|
|
- __speculation_ctrl_update(tif);
|
|
|
+ __speculation_ctrl_update(~tif, tif);
|
|
|
preempt_enable();
|
|
|
}
|
|
|
|
|
@@ -451,8 +464,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
|
|
if ((tifp ^ tifn) & _TIF_NOCPUID)
|
|
|
set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
|
|
|
|
|
|
- if ((tifp ^ tifn) & _TIF_SSBD)
|
|
|
- __speculation_ctrl_update(tifn);
|
|
|
+ __speculation_ctrl_update(tifp, tifn);
|
|
|
}
|
|
|
|
|
|
/*
|