|
@@ -504,11 +504,6 @@ static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
|
|
masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
|
|
masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
|
|
}
|
|
}
|
|
|
|
|
|
-static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
|
|
|
|
-{
|
|
|
|
- register_address_increment(ctxt, &ctxt->_eip, rel);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static u32 desc_limit_scaled(struct desc_struct *desc)
|
|
static u32 desc_limit_scaled(struct desc_struct *desc)
|
|
{
|
|
{
|
|
u32 limit = get_desc_limit(desc);
|
|
u32 limit = get_desc_limit(desc);
|
|
@@ -569,6 +564,28 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt)
|
|
return emulate_exception(ctxt, NM_VECTOR, 0, false);
|
|
return emulate_exception(ctxt, NM_VECTOR, 0, false);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline void assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
|
|
|
|
+{
|
|
|
|
+ switch (ctxt->op_bytes) {
|
|
|
|
+ case 2:
|
|
|
|
+ ctxt->_eip = (u16)dst;
|
|
|
|
+ break;
|
|
|
|
+ case 4:
|
|
|
|
+ ctxt->_eip = (u32)dst;
|
|
|
|
+ break;
|
|
|
|
+ case 8:
|
|
|
|
+ ctxt->_eip = dst;
|
|
|
|
+ break;
|
|
|
|
+ default:
|
|
|
|
+ WARN(1, "unsupported eip assignment size\n");
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
|
|
|
|
+{
|
|
|
|
+ assign_eip_near(ctxt, ctxt->_eip + rel);
|
|
|
|
+}
|
|
|
|
+
|
|
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
|
|
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
|
|
{
|
|
{
|
|
u16 selector;
|
|
u16 selector;
|