|
@@ -57,27 +57,9 @@ static bool kernel_fpu_disabled(void)
|
|
return this_cpu_read(in_kernel_fpu);
|
|
return this_cpu_read(in_kernel_fpu);
|
|
}
|
|
}
|
|
|
|
|
|
-/*
|
|
|
|
- * Were we in an interrupt that interrupted kernel mode?
|
|
|
|
- *
|
|
|
|
- * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
|
|
|
|
- * pair does nothing at all: the thread must not have fpu (so
|
|
|
|
- * that we don't try to save the FPU state), and TS must
|
|
|
|
- * be set (so that the clts/stts pair does nothing that is
|
|
|
|
- * visible in the interrupted kernel thread).
|
|
|
|
- *
|
|
|
|
- * Except for the eagerfpu case when we return true; in the likely case
|
|
|
|
- * the thread has FPU but we are not going to set/clear TS.
|
|
|
|
- */
|
|
|
|
static bool interrupted_kernel_fpu_idle(void)
|
|
static bool interrupted_kernel_fpu_idle(void)
|
|
{
|
|
{
|
|
- if (kernel_fpu_disabled())
|
|
|
|
- return false;
|
|
|
|
-
|
|
|
|
- if (use_eager_fpu())
|
|
|
|
- return true;
|
|
|
|
-
|
|
|
|
- return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
|
|
|
|
|
|
+ return !kernel_fpu_disabled();
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -125,7 +107,6 @@ void __kernel_fpu_begin(void)
|
|
copy_fpregs_to_fpstate(fpu);
|
|
copy_fpregs_to_fpstate(fpu);
|
|
} else {
|
|
} else {
|
|
this_cpu_write(fpu_fpregs_owner_ctx, NULL);
|
|
this_cpu_write(fpu_fpregs_owner_ctx, NULL);
|
|
- __fpregs_activate_hw();
|
|
|
|
}
|
|
}
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(__kernel_fpu_begin);
|
|
EXPORT_SYMBOL(__kernel_fpu_begin);
|
|
@@ -136,8 +117,6 @@ void __kernel_fpu_end(void)
|
|
|
|
|
|
if (fpu->fpregs_active)
|
|
if (fpu->fpregs_active)
|
|
copy_kernel_to_fpregs(&fpu->state);
|
|
copy_kernel_to_fpregs(&fpu->state);
|
|
- else
|
|
|
|
- __fpregs_deactivate_hw();
|
|
|
|
|
|
|
|
kernel_fpu_enable();
|
|
kernel_fpu_enable();
|
|
}
|
|
}
|
|
@@ -199,10 +178,7 @@ void fpu__save(struct fpu *fpu)
|
|
trace_x86_fpu_before_save(fpu);
|
|
trace_x86_fpu_before_save(fpu);
|
|
if (fpu->fpregs_active) {
|
|
if (fpu->fpregs_active) {
|
|
if (!copy_fpregs_to_fpstate(fpu)) {
|
|
if (!copy_fpregs_to_fpstate(fpu)) {
|
|
- if (use_eager_fpu())
|
|
|
|
- copy_kernel_to_fpregs(&fpu->state);
|
|
|
|
- else
|
|
|
|
- fpregs_deactivate(fpu);
|
|
|
|
|
|
+ copy_kernel_to_fpregs(&fpu->state);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
trace_x86_fpu_after_save(fpu);
|
|
trace_x86_fpu_after_save(fpu);
|
|
@@ -259,8 +235,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
|
|
* Don't let 'init optimized' areas of the XSAVE area
|
|
* Don't let 'init optimized' areas of the XSAVE area
|
|
* leak into the child task:
|
|
* leak into the child task:
|
|
*/
|
|
*/
|
|
- if (use_eager_fpu())
|
|
|
|
- memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
|
|
|
|
|
|
+ memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
|
|
|
|
|
|
/*
|
|
/*
|
|
* Save current FPU registers directly into the child
|
|
* Save current FPU registers directly into the child
|
|
@@ -282,10 +257,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
|
|
memcpy(&src_fpu->state, &dst_fpu->state,
|
|
memcpy(&src_fpu->state, &dst_fpu->state,
|
|
fpu_kernel_xstate_size);
|
|
fpu_kernel_xstate_size);
|
|
|
|
|
|
- if (use_eager_fpu())
|
|
|
|
- copy_kernel_to_fpregs(&src_fpu->state);
|
|
|
|
- else
|
|
|
|
- fpregs_deactivate(src_fpu);
|
|
|
|
|
|
+ copy_kernel_to_fpregs(&src_fpu->state);
|
|
}
|
|
}
|
|
preempt_enable();
|
|
preempt_enable();
|
|
|
|
|
|
@@ -517,7 +489,7 @@ void fpu__clear(struct fpu *fpu)
|
|
{
|
|
{
|
|
WARN_ON_FPU(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */
|
|
WARN_ON_FPU(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */
|
|
|
|
|
|
- if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) {
|
|
|
|
|
|
+ if (!static_cpu_has(X86_FEATURE_FPU)) {
|
|
/* FPU state will be reallocated lazily at the first use. */
|
|
/* FPU state will be reallocated lazily at the first use. */
|
|
fpu__drop(fpu);
|
|
fpu__drop(fpu);
|
|
} else {
|
|
} else {
|