|
@@ -279,22 +279,135 @@ static inline void switch_to_bitmap(struct tss_struct *tss,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
|
|
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
|
+
|
|
|
|
+struct ssb_state {
|
|
|
|
+ struct ssb_state *shared_state;
|
|
|
|
+ raw_spinlock_t lock;
|
|
|
|
+ unsigned int disable_state;
|
|
|
|
+ unsigned long local_state;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+#define LSTATE_SSB 0
|
|
|
|
+
|
|
|
|
+static DEFINE_PER_CPU(struct ssb_state, ssb_state);
|
|
|
|
+
|
|
|
|
+void speculative_store_bypass_ht_init(void)
|
|
{
|
|
{
|
|
- u64 msr;
|
|
|
|
|
|
+ struct ssb_state *st = this_cpu_ptr(&ssb_state);
|
|
|
|
+ unsigned int this_cpu = smp_processor_id();
|
|
|
|
+ unsigned int cpu;
|
|
|
|
+
|
|
|
|
+ st->local_state = 0;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Shared state setup happens once on the first bringup
|
|
|
|
+ * of the CPU. It's not destroyed on CPU hotunplug.
|
|
|
|
+ */
|
|
|
|
+ if (st->shared_state)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ raw_spin_lock_init(&st->lock);
|
|
|
|
|
|
- if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
|
|
|
|
- msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Go over HT siblings and check whether one of them has set up the
|
|
|
|
+ * shared state pointer already.
|
|
|
|
+ */
|
|
|
|
+ for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
|
|
|
|
+ if (cpu == this_cpu)
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ if (!per_cpu(ssb_state, cpu).shared_state)
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ /* Link it to the state of the sibling: */
|
|
|
|
+ st->shared_state = per_cpu(ssb_state, cpu).shared_state;
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * First HT sibling to come up on the core. Link shared state of
|
|
|
|
+ * the first HT sibling to itself. The siblings on the same core
|
|
|
|
+ * which come up later will see the shared state pointer and link
|
|
|
|
+ * themself to the state of this CPU.
|
|
|
|
+ */
|
|
|
|
+ st->shared_state = st;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Logic is: First HT sibling enables SSBD for both siblings in the core
|
|
|
|
+ * and last sibling to disable it, disables it for the whole core. This how
|
|
|
|
+ * MSR_SPEC_CTRL works in "hardware":
|
|
|
|
+ *
|
|
|
|
+ * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
|
|
|
|
+ */
|
|
|
|
+static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
|
|
|
|
+{
|
|
|
|
+ struct ssb_state *st = this_cpu_ptr(&ssb_state);
|
|
|
|
+ u64 msr = x86_amd_ls_cfg_base;
|
|
|
|
+
|
|
|
|
+ if (!static_cpu_has(X86_FEATURE_ZEN)) {
|
|
|
|
+ msr |= ssbd_tif_to_amd_ls_cfg(tifn);
|
|
wrmsrl(MSR_AMD64_LS_CFG, msr);
|
|
wrmsrl(MSR_AMD64_LS_CFG, msr);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (tifn & _TIF_SSBD) {
|
|
|
|
+ /*
|
|
|
|
+ * Since this can race with prctl(), block reentry on the
|
|
|
|
+ * same CPU.
|
|
|
|
+ */
|
|
|
|
+ if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ msr |= x86_amd_ls_cfg_ssbd_mask;
|
|
|
|
+
|
|
|
|
+ raw_spin_lock(&st->shared_state->lock);
|
|
|
|
+ /* First sibling enables SSBD: */
|
|
|
|
+ if (!st->shared_state->disable_state)
|
|
|
|
+ wrmsrl(MSR_AMD64_LS_CFG, msr);
|
|
|
|
+ st->shared_state->disable_state++;
|
|
|
|
+ raw_spin_unlock(&st->shared_state->lock);
|
|
} else {
|
|
} else {
|
|
- msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
|
|
|
|
- wrmsrl(MSR_IA32_SPEC_CTRL, msr);
|
|
|
|
|
|
+ if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ raw_spin_lock(&st->shared_state->lock);
|
|
|
|
+ st->shared_state->disable_state--;
|
|
|
|
+ if (!st->shared_state->disable_state)
|
|
|
|
+ wrmsrl(MSR_AMD64_LS_CFG, msr);
|
|
|
|
+ raw_spin_unlock(&st->shared_state->lock);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
+#else
|
|
|
|
+static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
|
|
|
|
+{
|
|
|
|
+ u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
|
|
|
|
+
|
|
|
|
+ wrmsrl(MSR_AMD64_LS_CFG, msr);
|
|
|
|
+}
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+static __always_inline void intel_set_ssb_state(unsigned long tifn)
|
|
|
|
+{
|
|
|
|
+ u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
|
|
|
|
+
|
|
|
|
+ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
|
|
|
|
+{
|
|
|
|
+ if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
|
|
|
|
+ amd_set_core_ssb_state(tifn);
|
|
|
|
+ else
|
|
|
|
+ intel_set_ssb_state(tifn);
|
|
|
|
+}
|
|
|
|
|
|
void speculative_store_bypass_update(void)
|
|
void speculative_store_bypass_update(void)
|
|
{
|
|
{
|
|
|
|
+ preempt_disable();
|
|
__speculative_store_bypass_update(current_thread_info()->flags);
|
|
__speculative_store_bypass_update(current_thread_info()->flags);
|
|
|
|
+ preempt_enable();
|
|
}
|
|
}
|
|
|
|
|
|
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
|
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|