|
@@ -179,6 +179,28 @@ int kvm_arch_hardware_enable(void)
|
|
|
static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
|
|
|
unsigned long end);
|
|
|
|
|
|
+static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
|
|
|
+{
|
|
|
+ u8 delta_idx = 0;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The TOD jumps by delta, we have to compensate this by adding
|
|
|
+ * -delta to the epoch.
|
|
|
+ */
|
|
|
+ delta = -delta;
|
|
|
+
|
|
|
+ /* sign-extension - we're adding to signed values below */
|
|
|
+ if ((s64)delta < 0)
|
|
|
+ delta_idx = -1;
|
|
|
+
|
|
|
+ scb->epoch += delta;
|
|
|
+ if (scb->ecd & ECD_MEF) {
|
|
|
+ scb->epdx += delta_idx;
|
|
|
+ if (scb->epoch < delta)
|
|
|
+ scb->epdx += 1;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* This callback is executed during stop_machine(). All CPUs are therefore
|
|
|
* temporarily stopped. In order not to change guest behavior, we have to
|
|
@@ -194,13 +216,17 @@ static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
|
|
|
unsigned long long *delta = v;
|
|
|
|
|
|
list_for_each_entry(kvm, &vm_list, vm_list) {
|
|
|
- kvm->arch.epoch -= *delta;
|
|
|
kvm_for_each_vcpu(i, vcpu, kvm) {
|
|
|
- vcpu->arch.sie_block->epoch -= *delta;
|
|
|
+ kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
|
|
|
+ if (i == 0) {
|
|
|
+ kvm->arch.epoch = vcpu->arch.sie_block->epoch;
|
|
|
+ kvm->arch.epdx = vcpu->arch.sie_block->epdx;
|
|
|
+ }
|
|
|
if (vcpu->arch.cputm_enabled)
|
|
|
vcpu->arch.cputm_start += *delta;
|
|
|
if (vcpu->arch.vsie_block)
|
|
|
- vcpu->arch.vsie_block->epoch -= *delta;
|
|
|
+ kvm_clock_sync_scb(vcpu->arch.vsie_block,
|
|
|
+ *delta);
|
|
|
}
|
|
|
}
|
|
|
return NOTIFY_OK;
|
|
@@ -902,12 +928,9 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
|
|
|
if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
|
|
|
return -EFAULT;
|
|
|
|
|
|
- if (test_kvm_facility(kvm, 139))
|
|
|
- kvm_s390_set_tod_clock_ext(kvm, >od);
|
|
|
- else if (gtod.epoch_idx == 0)
|
|
|
- kvm_s390_set_tod_clock(kvm, gtod.tod);
|
|
|
- else
|
|
|
+ if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
|
|
|
return -EINVAL;
|
|
|
+ kvm_s390_set_tod_clock(kvm, >od);
|
|
|
|
|
|
VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
|
|
|
gtod.epoch_idx, gtod.tod);
|
|
@@ -932,13 +955,14 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
|
|
|
|
|
|
static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
|
|
|
{
|
|
|
- u64 gtod;
|
|
|
+ struct kvm_s390_vm_tod_clock gtod = { 0 };
|
|
|
|
|
|
- if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
|
|
|
+ if (copy_from_user(>od.tod, (void __user *)attr->addr,
|
|
|
+ sizeof(gtod.tod)))
|
|
|
return -EFAULT;
|
|
|
|
|
|
- kvm_s390_set_tod_clock(kvm, gtod);
|
|
|
- VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
|
|
|
+ kvm_s390_set_tod_clock(kvm, >od);
|
|
|
+ VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -2389,6 +2413,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
|
|
|
mutex_lock(&vcpu->kvm->lock);
|
|
|
preempt_disable();
|
|
|
vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
|
|
|
+ vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
|
|
|
preempt_enable();
|
|
|
mutex_unlock(&vcpu->kvm->lock);
|
|
|
if (!kvm_is_ucontrol(vcpu->kvm)) {
|
|
@@ -3021,8 +3046,8 @@ retry:
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
|
|
|
- const struct kvm_s390_vm_tod_clock *gtod)
|
|
|
+void kvm_s390_set_tod_clock(struct kvm *kvm,
|
|
|
+ const struct kvm_s390_vm_tod_clock *gtod)
|
|
|
{
|
|
|
struct kvm_vcpu *vcpu;
|
|
|
struct kvm_s390_tod_clock_ext htod;
|
|
@@ -3034,10 +3059,12 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
|
|
|
get_tod_clock_ext((char *)&htod);
|
|
|
|
|
|
kvm->arch.epoch = gtod->tod - htod.tod;
|
|
|
- kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
|
|
|
-
|
|
|
- if (kvm->arch.epoch > gtod->tod)
|
|
|
- kvm->arch.epdx -= 1;
|
|
|
+ kvm->arch.epdx = 0;
|
|
|
+ if (test_kvm_facility(kvm, 139)) {
|
|
|
+ kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
|
|
|
+ if (kvm->arch.epoch > gtod->tod)
|
|
|
+ kvm->arch.epdx -= 1;
|
|
|
+ }
|
|
|
|
|
|
kvm_s390_vcpu_block_all(kvm);
|
|
|
kvm_for_each_vcpu(i, vcpu, kvm) {
|
|
@@ -3050,22 +3077,6 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
|
|
|
mutex_unlock(&kvm->lock);
|
|
|
}
|
|
|
|
|
|
-void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
|
|
|
-{
|
|
|
- struct kvm_vcpu *vcpu;
|
|
|
- int i;
|
|
|
-
|
|
|
- mutex_lock(&kvm->lock);
|
|
|
- preempt_disable();
|
|
|
- kvm->arch.epoch = tod - get_tod_clock();
|
|
|
- kvm_s390_vcpu_block_all(kvm);
|
|
|
- kvm_for_each_vcpu(i, vcpu, kvm)
|
|
|
- vcpu->arch.sie_block->epoch = kvm->arch.epoch;
|
|
|
- kvm_s390_vcpu_unblock_all(kvm);
|
|
|
- preempt_enable();
|
|
|
- mutex_unlock(&kvm->lock);
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* kvm_arch_fault_in_page - fault-in guest page if necessary
|
|
|
* @vcpu: The corresponding virtual cpu
|