|
@@ -102,6 +102,8 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
|
|
|
static void process_nmi(struct kvm_vcpu *vcpu);
|
|
|
static void enter_smm(struct kvm_vcpu *vcpu);
|
|
|
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
|
|
|
+static void store_regs(struct kvm_vcpu *vcpu);
|
|
|
+static int sync_regs(struct kvm_vcpu *vcpu);
|
|
|
|
|
|
struct kvm_x86_ops *kvm_x86_ops __read_mostly;
|
|
|
EXPORT_SYMBOL_GPL(kvm_x86_ops);
|
|
@@ -2829,6 +2831,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|
|
case KVM_CAP_GET_MSR_FEATURES:
|
|
|
r = 1;
|
|
|
break;
|
|
|
+ case KVM_CAP_SYNC_REGS:
|
|
|
+ r = KVM_SYNC_X86_VALID_FIELDS;
|
|
|
+ break;
|
|
|
case KVM_CAP_ADJUST_CLOCK:
|
|
|
r = KVM_CLOCK_TSC_STABLE;
|
|
|
break;
|
|
@@ -7510,7 +7515,6 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|
|
{
|
|
|
int r;
|
|
@@ -7536,6 +7540,17 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
+ if (vcpu->run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) {
|
|
|
+ r = -EINVAL;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (vcpu->run->kvm_dirty_regs) {
|
|
|
+ r = sync_regs(vcpu);
|
|
|
+ if (r != 0)
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
/* re-sync apic's tpr */
|
|
|
if (!lapic_in_kernel(vcpu)) {
|
|
|
if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
|
|
@@ -7560,6 +7575,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|
|
|
|
|
out:
|
|
|
kvm_put_guest_fpu(vcpu);
|
|
|
+ if (vcpu->run->kvm_valid_regs)
|
|
|
+ store_regs(vcpu);
|
|
|
post_kvm_run_save(vcpu);
|
|
|
kvm_sigset_deactivate(vcpu);
|
|
|
|
|
@@ -7567,10 +7584,8 @@ out:
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
-int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
|
|
+static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
|
|
{
|
|
|
- vcpu_load(vcpu);
|
|
|
-
|
|
|
if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
|
|
|
/*
|
|
|
* We are here if userspace calls get_regs() in the middle of
|
|
@@ -7603,15 +7618,18 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
|
|
|
|
|
regs->rip = kvm_rip_read(vcpu);
|
|
|
regs->rflags = kvm_get_rflags(vcpu);
|
|
|
+}
|
|
|
|
|
|
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
|
|
+{
|
|
|
+ vcpu_load(vcpu);
|
|
|
+ __get_regs(vcpu, regs);
|
|
|
vcpu_put(vcpu);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
|
|
+static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
|
|
{
|
|
|
- vcpu_load(vcpu);
|
|
|
-
|
|
|
vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
|
|
|
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
|
|
|
|
|
@@ -7640,7 +7658,12 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
|
|
vcpu->arch.exception.pending = false;
|
|
|
|
|
|
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
|
|
+}
|
|
|
|
|
|
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
|
|
+{
|
|
|
+ vcpu_load(vcpu);
|
|
|
+ __set_regs(vcpu, regs);
|
|
|
vcpu_put(vcpu);
|
|
|
return 0;
|
|
|
}
|
|
@@ -7655,13 +7678,10 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
|
|
|
|
|
|
-int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
|
|
|
- struct kvm_sregs *sregs)
|
|
|
+static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
|
|
{
|
|
|
struct desc_ptr dt;
|
|
|
|
|
|
- vcpu_load(vcpu);
|
|
|
-
|
|
|
kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
|
|
|
kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
|
|
|
kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
|
|
@@ -7692,7 +7712,13 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
|
|
|
if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
|
|
|
set_bit(vcpu->arch.interrupt.nr,
|
|
|
(unsigned long *)sregs->interrupt_bitmap);
|
|
|
+}
|
|
|
|
|
|
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
|
|
|
+ struct kvm_sregs *sregs)
|
|
|
+{
|
|
|
+ vcpu_load(vcpu);
|
|
|
+ __get_sregs(vcpu, sregs);
|
|
|
vcpu_put(vcpu);
|
|
|
return 0;
|
|
|
}
|
|
@@ -7787,8 +7813,7 @@ int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
|
|
- struct kvm_sregs *sregs)
|
|
|
+static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
|
|
{
|
|
|
struct msr_data apic_base_msr;
|
|
|
int mmu_reset_needed = 0;
|
|
@@ -7796,8 +7821,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
|
|
struct desc_ptr dt;
|
|
|
int ret = -EINVAL;
|
|
|
|
|
|
- vcpu_load(vcpu);
|
|
|
-
|
|
|
if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
|
|
|
(sregs->cr4 & X86_CR4_OSXSAVE))
|
|
|
goto out;
|
|
@@ -7876,6 +7899,16 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
|
|
|
|
|
ret = 0;
|
|
|
out:
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
|
|
+ struct kvm_sregs *sregs)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ vcpu_load(vcpu);
|
|
|
+ ret = __set_sregs(vcpu, sregs);
|
|
|
vcpu_put(vcpu);
|
|
|
return ret;
|
|
|
}
|
|
@@ -8002,6 +8035,45 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static void store_regs(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ BUILD_BUG_ON(sizeof(struct kvm_sync_regs) > SYNC_REGS_SIZE_BYTES);
|
|
|
+
|
|
|
+ if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS)
|
|
|
+ __get_regs(vcpu, &vcpu->run->s.regs.regs);
|
|
|
+
|
|
|
+ if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS)
|
|
|
+ __get_sregs(vcpu, &vcpu->run->s.regs.sregs);
|
|
|
+
|
|
|
+ if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS)
|
|
|
+ kvm_vcpu_ioctl_x86_get_vcpu_events(
|
|
|
+ vcpu, &vcpu->run->s.regs.events);
|
|
|
+}
|
|
|
+
|
|
|
+static int sync_regs(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ if (vcpu->run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) {
|
|
|
+ __set_regs(vcpu, &vcpu->run->s.regs.regs);
|
|
|
+ vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS;
|
|
|
+ }
|
|
|
+ if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) {
|
|
|
+ if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs))
|
|
|
+ return -EINVAL;
|
|
|
+ vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS;
|
|
|
+ }
|
|
|
+ if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) {
|
|
|
+ if (kvm_vcpu_ioctl_x86_set_vcpu_events(
|
|
|
+ vcpu, &vcpu->run->s.regs.events))
|
|
|
+ return -EINVAL;
|
|
|
+ vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static void fx_init(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
fpstate_init(&vcpu->arch.guest_fpu.state);
|