|
@@ -960,37 +960,53 @@ static void identify_cpu(struct cpuinfo_x86 *c)
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
|
#ifdef CONFIG_X86_64
|
|
-#ifdef CONFIG_IA32_EMULATION
|
|
|
|
|
|
+# ifdef CONFIG_IA32_EMULATION
|
|
/* May not be __init: called during resume */
|
|
/* May not be __init: called during resume */
|
|
static void syscall32_cpu_init(void)
|
|
static void syscall32_cpu_init(void)
|
|
{
|
|
{
|
|
- /* Load these always in case some future AMD CPU supports
|
|
|
|
- SYSENTER from compat mode too. */
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Always load these, in case some future 64-bit CPU supports
|
|
|
|
+ * SYSENTER from compat mode too:
|
|
|
|
+ */
|
|
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
|
|
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
|
|
wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
|
|
wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
|
|
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
|
|
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
|
|
|
|
|
|
wrmsrl(MSR_CSTAR, ia32_cstar_target);
|
|
wrmsrl(MSR_CSTAR, ia32_cstar_target);
|
|
}
|
|
}
|
|
-#endif /* CONFIG_IA32_EMULATION */
|
|
|
|
-#endif /* CONFIG_X86_64 */
|
|
|
|
|
|
+# endif
|
|
|
|
+#endif
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
|
|
|
|
+ * on 32-bit kernels:
|
|
|
|
+ */
|
|
#ifdef CONFIG_X86_32
|
|
#ifdef CONFIG_X86_32
|
|
void enable_sep_cpu(void)
|
|
void enable_sep_cpu(void)
|
|
{
|
|
{
|
|
- int cpu = get_cpu();
|
|
|
|
- struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
|
|
|
|
|
|
+ struct tss_struct *tss;
|
|
|
|
+ int cpu;
|
|
|
|
|
|
- if (!boot_cpu_has(X86_FEATURE_SEP)) {
|
|
|
|
- put_cpu();
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
|
|
+ cpu = get_cpu();
|
|
|
|
+ tss = &per_cpu(cpu_tss, cpu);
|
|
|
|
+
|
|
|
|
+ if (!boot_cpu_has(X86_FEATURE_SEP))
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * The struct::SS1 and tss_struct::SP1 fields are not used by the hardware,
|
|
|
|
+ * we cache the SYSENTER CS and ESP values there for easy access:
|
|
|
|
+ */
|
|
|
|
|
|
tss->x86_tss.ss1 = __KERNEL_CS;
|
|
tss->x86_tss.ss1 = __KERNEL_CS;
|
|
|
|
+ wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
|
|
|
|
+
|
|
tss->x86_tss.sp1 = (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack);
|
|
tss->x86_tss.sp1 = (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack);
|
|
- wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
|
|
|
|
wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0);
|
|
wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0);
|
|
- wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0);
|
|
|
|
|
|
+
|
|
|
|
+ wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)ia32_sysenter_target, 0);
|
|
|
|
+
|
|
|
|
+out:
|
|
put_cpu();
|
|
put_cpu();
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|