|
@@ -818,6 +818,20 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
|
|
|
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
|
|
|
}
|
|
|
|
|
|
+static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
|
|
|
+ struct segmented_address addr,
|
|
|
+ void *data,
|
|
|
+ unsigned int size)
|
|
|
+{
|
|
|
+ int rc;
|
|
|
+ ulong linear;
|
|
|
+
|
|
|
+ rc = linearize(ctxt, addr, size, true, &linear);
|
|
|
+ if (rc != X86EMUL_CONTINUE)
|
|
|
+ return rc;
|
|
|
+ return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Prefetch the remaining bytes of the instruction without crossing page
|
|
|
* boundary if they are not in fetch_cache yet.
|
|
@@ -1571,7 +1585,6 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
|
|
&ctxt->exception);
|
|
|
}
|
|
|
|
|
|
-/* Does not support long mode */
|
|
|
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
|
|
u16 selector, int seg, u8 cpl,
|
|
|
enum x86_transfer_type transfer,
|
|
@@ -1608,20 +1621,34 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
|
|
|
|
|
rpl = selector & 3;
|
|
|
|
|
|
- /* NULL selector is not valid for TR, CS and SS (except for long mode) */
|
|
|
- if ((seg == VCPU_SREG_CS
|
|
|
- || (seg == VCPU_SREG_SS
|
|
|
- && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
|
|
|
- || seg == VCPU_SREG_TR)
|
|
|
- && null_selector)
|
|
|
- goto exception;
|
|
|
-
|
|
|
/* TR should be in GDT only */
|
|
|
if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
|
|
|
goto exception;
|
|
|
|
|
|
- if (null_selector) /* for NULL selector skip all following checks */
|
|
|
+ /* NULL selector is not valid for TR, CS and (except for long mode) SS */
|
|
|
+ if (null_selector) {
|
|
|
+ if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
|
|
|
+ goto exception;
|
|
|
+
|
|
|
+ if (seg == VCPU_SREG_SS) {
|
|
|
+ if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
|
|
|
+ goto exception;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * ctxt->ops->set_segment expects the CPL to be in
|
|
|
+ * SS.DPL, so fake an expand-up 32-bit data segment.
|
|
|
+ */
|
|
|
+ seg_desc.type = 3;
|
|
|
+ seg_desc.p = 1;
|
|
|
+ seg_desc.s = 1;
|
|
|
+ seg_desc.dpl = cpl;
|
|
|
+ seg_desc.d = 1;
|
|
|
+ seg_desc.g = 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Skip all following checks */
|
|
|
goto load;
|
|
|
+ }
|
|
|
|
|
|
ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
|
|
|
if (ret != X86EMUL_CONTINUE)
|
|
@@ -1737,6 +1764,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
|
|
u16 selector, int seg)
|
|
|
{
|
|
|
u8 cpl = ctxt->ops->cpl(ctxt);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
|
|
|
+ * they can load it at CPL<3 (Intel's manual says only LSS can,
|
|
|
+ * but it's wrong).
|
|
|
+ *
|
|
|
+ * However, the Intel manual says that putting IST=1/DPL=3 in
|
|
|
+ * an interrupt gate will result in SS=3 (the AMD manual instead
|
|
|
+ * says it doesn't), so allow SS=3 in __load_segment_descriptor
|
|
|
+ * and only forbid it here.
|
|
|
+ */
|
|
|
+ if (seg == VCPU_SREG_SS && selector == 3 &&
|
|
|
+ ctxt->mode == X86EMUL_MODE_PROT64)
|
|
|
+ return emulate_exception(ctxt, GP_VECTOR, 0, true);
|
|
|
+
|
|
|
return __load_segment_descriptor(ctxt, selector, seg, cpl,
|
|
|
X86_TRANSFER_NONE, NULL);
|
|
|
}
|
|
@@ -3685,8 +3727,8 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
|
|
|
}
|
|
|
/* Disable writeback. */
|
|
|
ctxt->dst.type = OP_NONE;
|
|
|
- return segmented_write(ctxt, ctxt->dst.addr.mem,
|
|
|
- &desc_ptr, 2 + ctxt->op_bytes);
|
|
|
+ return segmented_write_std(ctxt, ctxt->dst.addr.mem,
|
|
|
+ &desc_ptr, 2 + ctxt->op_bytes);
|
|
|
}
|
|
|
|
|
|
static int em_sgdt(struct x86_emulate_ctxt *ctxt)
|
|
@@ -3932,7 +3974,7 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
|
|
|
else
|
|
|
size = offsetof(struct fxregs_state, xmm_space[0]);
|
|
|
|
|
|
- return segmented_write(ctxt, ctxt->memop.addr.mem, &fx_state, size);
|
|
|
+ return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
|
|
|
}
|
|
|
|
|
|
static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
|
|
@@ -3974,7 +4016,7 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
|
|
|
if (rc != X86EMUL_CONTINUE)
|
|
|
return rc;
|
|
|
|
|
|
- rc = segmented_read(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
|
|
|
+ rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
|
|
|
if (rc != X86EMUL_CONTINUE)
|
|
|
return rc;
|
|
|
|