|
@@ -114,6 +114,10 @@ void __kernel_fpu_begin(void)
|
|
|
kernel_fpu_disable();
|
|
|
|
|
|
if (fpu->fpregs_active) {
|
|
|
+ /*
|
|
|
+ * Ignore return value -- we don't care if reg state
|
|
|
+ * is clobbered.
|
|
|
+ */
|
|
|
copy_fpregs_to_fpstate(fpu);
|
|
|
} else {
|
|
|
this_cpu_write(fpu_fpregs_owner_ctx, NULL);
|
|
@@ -189,8 +193,12 @@ void fpu__save(struct fpu *fpu)
|
|
|
|
|
|
preempt_disable();
|
|
|
if (fpu->fpregs_active) {
|
|
|
- if (!copy_fpregs_to_fpstate(fpu))
|
|
|
- fpregs_deactivate(fpu);
|
|
|
+ if (!copy_fpregs_to_fpstate(fpu)) {
|
|
|
+ if (use_eager_fpu())
|
|
|
+ copy_kernel_to_fpregs(&fpu->state);
|
|
|
+ else
|
|
|
+ fpregs_deactivate(fpu);
|
|
|
+ }
|
|
|
}
|
|
|
preempt_enable();
|
|
|
}
|
|
@@ -223,14 +231,15 @@ void fpstate_init(union fpregs_state *state)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(fpstate_init);
|
|
|
|
|
|
-/*
|
|
|
- * Copy the current task's FPU state to a new task's FPU context.
|
|
|
- *
|
|
|
- * In both the 'eager' and the 'lazy' case we save hardware registers
|
|
|
- * directly to the destination buffer.
|
|
|
- */
|
|
|
-static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
|
|
|
+int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
|
|
|
{
|
|
|
+ dst_fpu->counter = 0;
|
|
|
+ dst_fpu->fpregs_active = 0;
|
|
|
+ dst_fpu->last_cpu = -1;
|
|
|
+
|
|
|
+ if (!src_fpu->fpstate_active || !cpu_has_fpu)
|
|
|
+ return 0;
|
|
|
+
|
|
|
WARN_ON_FPU(src_fpu != ¤t->thread.fpu);
|
|
|
|
|
|
/*
|
|
@@ -243,10 +252,9 @@ static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
|
|
|
/*
|
|
|
* Save current FPU registers directly into the child
|
|
|
* FPU context, without any memory-to-memory copying.
|
|
|
- *
|
|
|
- * If the FPU context got destroyed in the process (FNSAVE
|
|
|
- * done on old CPUs) then copy it back into the source
|
|
|
- * context and mark the current task for lazy restore.
|
|
|
+ * In lazy mode, if the FPU context isn't loaded into
|
|
|
+ * fpregs, CR0.TS will be set and do_device_not_available
|
|
|
+ * will load the FPU context.
|
|
|
*
|
|
|
* We have to do all this with preemption disabled,
|
|
|
* mostly because of the FNSAVE case, because in that
|
|
@@ -259,19 +267,13 @@ static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
|
|
|
preempt_disable();
|
|
|
if (!copy_fpregs_to_fpstate(dst_fpu)) {
|
|
|
memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
|
|
|
- fpregs_deactivate(src_fpu);
|
|
|
+
|
|
|
+ if (use_eager_fpu())
|
|
|
+ copy_kernel_to_fpregs(&src_fpu->state);
|
|
|
+ else
|
|
|
+ fpregs_deactivate(src_fpu);
|
|
|
}
|
|
|
preempt_enable();
|
|
|
-}
|
|
|
-
|
|
|
-int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
|
|
|
-{
|
|
|
- dst_fpu->counter = 0;
|
|
|
- dst_fpu->fpregs_active = 0;
|
|
|
- dst_fpu->last_cpu = -1;
|
|
|
-
|
|
|
- if (src_fpu->fpstate_active && cpu_has_fpu)
|
|
|
- fpu_copy(dst_fpu, src_fpu);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -425,7 +427,7 @@ void fpu__clear(struct fpu *fpu)
|
|
|
{
|
|
|
WARN_ON_FPU(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */
|
|
|
|
|
|
- if (!use_eager_fpu()) {
|
|
|
+ if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) {
|
|
|
/* FPU state will be reallocated lazily at the first use. */
|
|
|
fpu__drop(fpu);
|
|
|
} else {
|