|
@@ -928,12 +928,9 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
|
|
|
if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
|
|
|
return -EFAULT;
|
|
|
|
|
|
- if (test_kvm_facility(kvm, 139))
|
|
|
- kvm_s390_set_tod_clock_ext(kvm, >od);
|
|
|
- else if (gtod.epoch_idx == 0)
|
|
|
- kvm_s390_set_tod_clock(kvm, gtod.tod);
|
|
|
- else
|
|
|
+ if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
|
|
|
return -EINVAL;
|
|
|
+ kvm_s390_set_tod_clock(kvm, >od);
|
|
|
|
|
|
VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
|
|
|
gtod.epoch_idx, gtod.tod);
|
|
@@ -958,13 +955,14 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
|
|
|
|
|
|
static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
|
|
|
{
|
|
|
- u64 gtod;
|
|
|
+ struct kvm_s390_vm_tod_clock gtod = { 0 };
|
|
|
|
|
|
- if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
|
|
|
+ if (copy_from_user(>od.tod, (void __user *)attr->addr,
|
|
|
+ sizeof(gtod.tod)))
|
|
|
return -EFAULT;
|
|
|
|
|
|
- kvm_s390_set_tod_clock(kvm, gtod);
|
|
|
- VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
|
|
|
+ kvm_s390_set_tod_clock(kvm, >od);
|
|
|
+ VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -3048,8 +3046,8 @@ retry:
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
|
|
|
- const struct kvm_s390_vm_tod_clock *gtod)
|
|
|
+void kvm_s390_set_tod_clock(struct kvm *kvm,
|
|
|
+ const struct kvm_s390_vm_tod_clock *gtod)
|
|
|
{
|
|
|
struct kvm_vcpu *vcpu;
|
|
|
struct kvm_s390_tod_clock_ext htod;
|
|
@@ -3061,10 +3059,12 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
|
|
|
get_tod_clock_ext((char *)&htod);
|
|
|
|
|
|
kvm->arch.epoch = gtod->tod - htod.tod;
|
|
|
- kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
|
|
|
-
|
|
|
- if (kvm->arch.epoch > gtod->tod)
|
|
|
- kvm->arch.epdx -= 1;
|
|
|
+ kvm->arch.epdx = 0;
|
|
|
+ if (test_kvm_facility(kvm, 139)) {
|
|
|
+ kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
|
|
|
+ if (kvm->arch.epoch > gtod->tod)
|
|
|
+ kvm->arch.epdx -= 1;
|
|
|
+ }
|
|
|
|
|
|
kvm_s390_vcpu_block_all(kvm);
|
|
|
kvm_for_each_vcpu(i, vcpu, kvm) {
|
|
@@ -3077,22 +3077,6 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
|
|
|
mutex_unlock(&kvm->lock);
|
|
|
}
|
|
|
|
|
|
-void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
|
|
|
-{
|
|
|
- struct kvm_vcpu *vcpu;
|
|
|
- int i;
|
|
|
-
|
|
|
- mutex_lock(&kvm->lock);
|
|
|
- preempt_disable();
|
|
|
- kvm->arch.epoch = tod - get_tod_clock();
|
|
|
- kvm_s390_vcpu_block_all(kvm);
|
|
|
- kvm_for_each_vcpu(i, vcpu, kvm)
|
|
|
- vcpu->arch.sie_block->epoch = kvm->arch.epoch;
|
|
|
- kvm_s390_vcpu_unblock_all(kvm);
|
|
|
- preempt_enable();
|
|
|
- mutex_unlock(&kvm->lock);
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* kvm_arch_fault_in_page - fault-in guest page if necessary
|
|
|
* @vcpu: The corresponding virtual cpu
|