|
@@ -207,7 +207,7 @@ static inline void fpu_fxsave(struct fpu *fpu)
|
|
|
if (config_enabled(CONFIG_X86_32))
|
|
|
asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
|
|
|
else if (config_enabled(CONFIG_AS_FXSAVEQ))
|
|
|
- asm volatile("fxsaveq %0" : "=m" (fpu->state->fxsave));
|
|
|
+ asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state->fxsave));
|
|
|
else {
|
|
|
/* Using "rex64; fxsave %0" is broken because, if the memory
|
|
|
* operand uses any extended registers for addressing, a second
|
|
@@ -290,9 +290,11 @@ static inline int fpu_restore_checking(struct fpu *fpu)
|
|
|
|
|
|
static inline int restore_fpu_checking(struct task_struct *tsk)
|
|
|
{
|
|
|
- /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
|
|
|
- is pending. Clear the x87 state here by setting it to fixed
|
|
|
- values. "m" is a random variable that should be in L1 */
|
|
|
+ /*
|
|
|
+ * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
|
|
|
+ * pending. Clear the x87 state here by setting it to fixed values.
|
|
|
+ * "m" is a random variable that should be in L1.
|
|
|
+ */
|
|
|
if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) {
|
|
|
asm volatile(
|
|
|
"fnclex\n\t"
|