|
@@ -37,6 +37,7 @@
|
|
|
#include <asm/vm86.h>
|
|
|
#include <asm/switch_to.h>
|
|
|
#include <asm/desc.h>
|
|
|
+#include <asm/prctl.h>
|
|
|
|
|
|
/*
|
|
|
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
|
|
@@ -124,11 +125,6 @@ void flush_thread(void)
|
|
|
fpu__clear(&tsk->thread.fpu);
|
|
|
}
|
|
|
|
|
|
-static void hard_disable_TSC(void)
|
|
|
-{
|
|
|
- cr4_set_bits(X86_CR4_TSD);
|
|
|
-}
|
|
|
-
|
|
|
void disable_TSC(void)
|
|
|
{
|
|
|
preempt_disable();
|
|
@@ -137,15 +133,10 @@ void disable_TSC(void)
|
|
|
* Must flip the CPU state synchronously with
|
|
|
* TIF_NOTSC in the current running context.
|
|
|
*/
|
|
|
- hard_disable_TSC();
|
|
|
+ cr4_set_bits(X86_CR4_TSD);
|
|
|
preempt_enable();
|
|
|
}
|
|
|
|
|
|
-static void hard_enable_TSC(void)
|
|
|
-{
|
|
|
- cr4_clear_bits(X86_CR4_TSD);
|
|
|
-}
|
|
|
-
|
|
|
static void enable_TSC(void)
|
|
|
{
|
|
|
preempt_disable();
|
|
@@ -154,7 +145,7 @@ static void enable_TSC(void)
|
|
|
* Must flip the CPU state synchronously with
|
|
|
* TIF_NOTSC in the current running context.
|
|
|
*/
|
|
|
- hard_enable_TSC();
|
|
|
+ cr4_clear_bits(X86_CR4_TSD);
|
|
|
preempt_enable();
|
|
|
}
|
|
|
|
|
@@ -182,54 +173,129 @@ int set_tsc_mode(unsigned int val)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
|
|
- struct tss_struct *tss)
|
|
|
-{
|
|
|
- struct thread_struct *prev, *next;
|
|
|
-
|
|
|
- prev = &prev_p->thread;
|
|
|
- next = &next_p->thread;
|
|
|
+DEFINE_PER_CPU(u64, msr_misc_features_shadow);
|
|
|
|
|
|
- if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
|
|
|
- test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
|
|
|
- unsigned long debugctl = get_debugctlmsr();
|
|
|
+static void set_cpuid_faulting(bool on)
|
|
|
+{
|
|
|
+ u64 msrval;
|
|
|
|
|
|
- debugctl &= ~DEBUGCTLMSR_BTF;
|
|
|
- if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
|
|
|
- debugctl |= DEBUGCTLMSR_BTF;
|
|
|
+ msrval = this_cpu_read(msr_misc_features_shadow);
|
|
|
+ msrval &= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
|
|
|
+ msrval |= (on << MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT);
|
|
|
+ this_cpu_write(msr_misc_features_shadow, msrval);
|
|
|
+ wrmsrl(MSR_MISC_FEATURES_ENABLES, msrval);
|
|
|
+}
|
|
|
|
|
|
- update_debugctlmsr(debugctl);
|
|
|
+static void disable_cpuid(void)
|
|
|
+{
|
|
|
+ preempt_disable();
|
|
|
+ if (!test_and_set_thread_flag(TIF_NOCPUID)) {
|
|
|
+ /*
|
|
|
+ * Must flip the CPU state synchronously with
|
|
|
+ * TIF_NOCPUID in the current running context.
|
|
|
+ */
|
|
|
+ set_cpuid_faulting(true);
|
|
|
}
|
|
|
+ preempt_enable();
|
|
|
+}
|
|
|
|
|
|
- if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
|
|
|
- test_tsk_thread_flag(next_p, TIF_NOTSC)) {
|
|
|
- /* prev and next are different */
|
|
|
- if (test_tsk_thread_flag(next_p, TIF_NOTSC))
|
|
|
- hard_disable_TSC();
|
|
|
- else
|
|
|
- hard_enable_TSC();
|
|
|
+static void enable_cpuid(void)
|
|
|
+{
|
|
|
+ preempt_disable();
|
|
|
+ if (test_and_clear_thread_flag(TIF_NOCPUID)) {
|
|
|
+ /*
|
|
|
+ * Must flip the CPU state synchronously with
|
|
|
+ * TIF_NOCPUID in the current running context.
|
|
|
+ */
|
|
|
+ set_cpuid_faulting(false);
|
|
|
}
|
|
|
+ preempt_enable();
|
|
|
+}
|
|
|
+
|
|
|
+static int get_cpuid_mode(void)
|
|
|
+{
|
|
|
+ return !test_thread_flag(TIF_NOCPUID);
|
|
|
+}
|
|
|
+
|
|
|
+static int set_cpuid_mode(struct task_struct *task, unsigned long cpuid_enabled)
|
|
|
+{
|
|
|
+ if (!static_cpu_has(X86_FEATURE_CPUID_FAULT))
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
+ if (cpuid_enabled)
|
|
|
+ enable_cpuid();
|
|
|
+ else
|
|
|
+ disable_cpuid();
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Called immediately after a successful exec.
|
|
|
+ */
|
|
|
+void arch_setup_new_exec(void)
|
|
|
+{
|
|
|
+ /* If cpuid was previously disabled for this task, re-enable it. */
|
|
|
+ if (test_thread_flag(TIF_NOCPUID))
|
|
|
+ enable_cpuid();
|
|
|
+}
|
|
|
|
|
|
- if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
|
|
|
+static inline void switch_to_bitmap(struct tss_struct *tss,
|
|
|
+ struct thread_struct *prev,
|
|
|
+ struct thread_struct *next,
|
|
|
+ unsigned long tifp, unsigned long tifn)
|
|
|
+{
|
|
|
+ if (tifn & _TIF_IO_BITMAP) {
|
|
|
/*
|
|
|
* Copy the relevant range of the IO bitmap.
|
|
|
* Normally this is 128 bytes or less:
|
|
|
*/
|
|
|
memcpy(tss->io_bitmap, next->io_bitmap_ptr,
|
|
|
max(prev->io_bitmap_max, next->io_bitmap_max));
|
|
|
-
|
|
|
/*
|
|
|
* Make sure that the TSS limit is correct for the CPU
|
|
|
* to notice the IO bitmap.
|
|
|
*/
|
|
|
refresh_tss_limit();
|
|
|
- } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
|
|
|
+ } else if (tifp & _TIF_IO_BITMAP) {
|
|
|
/*
|
|
|
* Clear any possible leftover bits:
|
|
|
*/
|
|
|
memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
|
|
|
}
|
|
|
+}
|
|
|
+
|
|
|
+void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
|
|
+ struct tss_struct *tss)
|
|
|
+{
|
|
|
+ struct thread_struct *prev, *next;
|
|
|
+ unsigned long tifp, tifn;
|
|
|
+
|
|
|
+ prev = &prev_p->thread;
|
|
|
+ next = &next_p->thread;
|
|
|
+
|
|
|
+ tifn = READ_ONCE(task_thread_info(next_p)->flags);
|
|
|
+ tifp = READ_ONCE(task_thread_info(prev_p)->flags);
|
|
|
+ switch_to_bitmap(tss, prev, next, tifp, tifn);
|
|
|
+
|
|
|
propagate_user_return_notify(prev_p, next_p);
|
|
|
+
|
|
|
+ if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) &&
|
|
|
+ arch_has_block_step()) {
|
|
|
+ unsigned long debugctl, msk;
|
|
|
+
|
|
|
+ rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
|
|
|
+ debugctl &= ~DEBUGCTLMSR_BTF;
|
|
|
+ msk = tifn & _TIF_BLOCKSTEP;
|
|
|
+ debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT;
|
|
|
+ wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
|
|
|
+ }
|
|
|
+
|
|
|
+ if ((tifp ^ tifn) & _TIF_NOTSC)
|
|
|
+ cr4_toggle_bits(X86_CR4_TSD);
|
|
|
+
|
|
|
+ if ((tifp ^ tifn) & _TIF_NOCPUID)
|
|
|
+ set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -550,3 +616,16 @@ out:
|
|
|
put_task_stack(p);
|
|
|
return ret;
|
|
|
}
|
|
|
+
|
|
|
+long do_arch_prctl_common(struct task_struct *task, int option,
|
|
|
+ unsigned long cpuid_enabled)
|
|
|
+{
|
|
|
+ switch (option) {
|
|
|
+ case ARCH_GET_CPUID:
|
|
|
+ return get_cpuid_mode();
|
|
|
+ case ARCH_SET_CPUID:
|
|
|
+ return set_cpuid_mode(task, cpuid_enabled);
|
|
|
+ }
|
|
|
+
|
|
|
+ return -EINVAL;
|
|
|
+}
|