spec-ctrl.h 1.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_X86_SPECCTRL_H_
  3. #define _ASM_X86_SPECCTRL_H_
  4. #include <linux/thread_info.h>
  5. #include <asm/nospec-branch.h>
  6. /*
  7. * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
  8. * the guest has, while on VMEXIT we restore the host view. This
  9. * would be easier if SPEC_CTRL were architecturally maskable or
  10. * shadowable for guests but this is not (currently) the case.
  11. * Takes the guest view of SPEC_CTRL MSR as a parameter and also
  12. * the guest's version of VIRT_SPEC_CTRL, if emulated.
  13. */
  14. extern void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl,
  15. u64 guest_virt_spec_ctrl);
  16. extern void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl,
  17. u64 guest_virt_spec_ctrl);
  18. /* AMD specific Speculative Store Bypass MSR data */
  19. extern u64 x86_amd_ls_cfg_base;
  20. extern u64 x86_amd_ls_cfg_ssbd_mask;
  21. /* The Intel SPEC CTRL MSR base value cache */
  22. extern u64 x86_spec_ctrl_base;
  23. static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
  24. {
  25. BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
  26. return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
  27. }
  28. static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
  29. {
  30. return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
  31. }
  32. #ifdef CONFIG_SMP
  33. extern void speculative_store_bypass_ht_init(void);
  34. #else
  35. static inline void speculative_store_bypass_ht_init(void) { }
  36. #endif
  37. extern void speculative_store_bypass_update(void);
  38. #endif