|
@@ -523,19 +523,14 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
|
|
|
{
|
|
|
struct kvm_vcpu *cur_vcpu;
|
|
|
unsigned int vcpu_idx;
|
|
|
- u64 host_tod, gtod;
|
|
|
- int r;
|
|
|
+ u64 gtod;
|
|
|
|
|
|
if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
|
|
|
return -EFAULT;
|
|
|
|
|
|
- r = store_tod_clock(&host_tod);
|
|
|
- if (r)
|
|
|
- return r;
|
|
|
-
|
|
|
mutex_lock(&kvm->lock);
|
|
|
preempt_disable();
|
|
|
- kvm->arch.epoch = gtod - host_tod;
|
|
|
+ kvm->arch.epoch = gtod - get_tod_clock();
|
|
|
kvm_s390_vcpu_block_all(kvm);
|
|
|
kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
|
|
|
cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
|
|
@@ -581,15 +576,10 @@ static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
|
|
|
|
|
|
static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
|
|
|
{
|
|
|
- u64 host_tod, gtod;
|
|
|
- int r;
|
|
|
-
|
|
|
- r = store_tod_clock(&host_tod);
|
|
|
- if (r)
|
|
|
- return r;
|
|
|
+ u64 gtod;
|
|
|
|
|
|
preempt_disable();
|
|
|
- gtod = host_tod + kvm->arch.epoch;
|
|
|
+ gtod = get_tod_clock() + kvm->arch.epoch;
|
|
|
preempt_enable();
|
|
|
if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
|
|
|
return -EFAULT;
|