|
@@ -3940,6 +3940,25 @@ static int check_fxsr(struct x86_emulate_ctxt *ctxt)
|
|
|
return X86EMUL_CONTINUE;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
|
|
|
+ * and restore MXCSR.
|
|
|
+ */
|
|
|
+static size_t __fxstate_size(int nregs)
|
|
|
+{
|
|
|
+ return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
|
|
|
+}
|
|
|
+
|
|
|
+static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
|
|
|
+{
|
|
|
+ bool cr4_osfxsr;
|
|
|
+ if (ctxt->mode == X86EMUL_MODE_PROT64)
|
|
|
+ return __fxstate_size(16);
|
|
|
+
|
|
|
+ cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
|
|
|
+ return __fxstate_size(cr4_osfxsr ? 8 : 0);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
|
|
|
* 1) 16 bit mode
|
|
@@ -3961,7 +3980,6 @@ static int check_fxsr(struct x86_emulate_ctxt *ctxt)
|
|
|
static int em_fxsave(struct x86_emulate_ctxt *ctxt)
|
|
|
{
|
|
|
struct fxregs_state fx_state;
|
|
|
- size_t size;
|
|
|
int rc;
|
|
|
|
|
|
rc = check_fxsr(ctxt);
|
|
@@ -3977,68 +3995,42 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
|
|
|
if (rc != X86EMUL_CONTINUE)
|
|
|
return rc;
|
|
|
|
|
|
- if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR)
|
|
|
- size = offsetof(struct fxregs_state, xmm_space[8 * 16/4]);
|
|
|
- else
|
|
|
- size = offsetof(struct fxregs_state, xmm_space[0]);
|
|
|
-
|
|
|
- return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
|
|
|
-}
|
|
|
-
|
|
|
-static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
|
|
|
- struct fxregs_state *new)
|
|
|
-{
|
|
|
- int rc = X86EMUL_CONTINUE;
|
|
|
- struct fxregs_state old;
|
|
|
-
|
|
|
- rc = asm_safe("fxsave %[fx]", , [fx] "+m"(old));
|
|
|
- if (rc != X86EMUL_CONTINUE)
|
|
|
- return rc;
|
|
|
-
|
|
|
- /*
|
|
|
- * 64 bit host will restore XMM 8-15, which is not correct on non-64
|
|
|
- * bit guests. Load the current values in order to preserve 64 bit
|
|
|
- * XMMs after fxrstor.
|
|
|
- */
|
|
|
-#ifdef CONFIG_X86_64
|
|
|
- /* XXX: accessing XMM 8-15 very awkwardly */
|
|
|
- memcpy(&new->xmm_space[8 * 16/4], &old.xmm_space[8 * 16/4], 8 * 16);
|
|
|
-#endif
|
|
|
-
|
|
|
- /*
|
|
|
- * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but
|
|
|
- * does save and restore MXCSR.
|
|
|
- */
|
|
|
- if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))
|
|
|
- memcpy(new->xmm_space, old.xmm_space, 8 * 16);
|
|
|
-
|
|
|
- return rc;
|
|
|
+ return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
|
|
|
+ fxstate_size(ctxt));
|
|
|
}
|
|
|
|
|
|
static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
|
|
|
{
|
|
|
struct fxregs_state fx_state;
|
|
|
int rc;
|
|
|
+ size_t size;
|
|
|
|
|
|
rc = check_fxsr(ctxt);
|
|
|
if (rc != X86EMUL_CONTINUE)
|
|
|
return rc;
|
|
|
|
|
|
- rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
|
|
|
- if (rc != X86EMUL_CONTINUE)
|
|
|
- return rc;
|
|
|
+ ctxt->ops->get_fpu(ctxt);
|
|
|
|
|
|
- if (fx_state.mxcsr >> 16)
|
|
|
- return emulate_gp(ctxt, 0);
|
|
|
+ size = fxstate_size(ctxt);
|
|
|
+ if (size < __fxstate_size(16)) {
|
|
|
+ rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
|
|
|
+ if (rc != X86EMUL_CONTINUE)
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
|
|
|
- ctxt->ops->get_fpu(ctxt);
|
|
|
+ rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
|
|
|
+ if (rc != X86EMUL_CONTINUE)
|
|
|
+ goto out;
|
|
|
|
|
|
- if (ctxt->mode < X86EMUL_MODE_PROT64)
|
|
|
- rc = fxrstor_fixup(ctxt, &fx_state);
|
|
|
+ if (fx_state.mxcsr >> 16) {
|
|
|
+ rc = emulate_gp(ctxt, 0);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
|
|
|
if (rc == X86EMUL_CONTINUE)
|
|
|
rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
|
|
|
|
|
|
+out:
|
|
|
ctxt->ops->put_fpu(ctxt);
|
|
|
|
|
|
return rc;
|