|
|
@@ -130,6 +130,12 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
|
|
{ NULL }
|
|
|
};
|
|
|
|
|
|
+struct kvm_s390_tod_clock_ext {
|
|
|
+ __u8 epoch_idx;
|
|
|
+ __u64 tod;
|
|
|
+ __u8 reserved[7];
|
|
|
+} __packed;
|
|
|
+
|
|
|
/* allow nested virtualization in KVM (if enabled by user space) */
|
|
|
static int nested;
|
|
|
module_param(nested, int, S_IRUGO);
|
|
|
@@ -874,6 +880,26 @@ static int kvm_s390_vm_get_migration(struct kvm *kvm,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
|
|
|
+{
|
|
|
+ struct kvm_s390_vm_tod_clock gtod;
|
|
|
+
|
|
|
+ if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
|
|
|
+ return -EFAULT;
|
|
|
+
|
|
|
+ if (test_kvm_facility(kvm, 139))
|
|
|
+ kvm_s390_set_tod_clock_ext(kvm, >od);
|
|
|
+ else if (gtod.epoch_idx == 0)
|
|
|
+ kvm_s390_set_tod_clock(kvm, gtod.tod);
|
|
|
+ else
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
|
|
|
+ gtod.epoch_idx, gtod.tod);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
|
|
|
{
|
|
|
u8 gtod_high;
|
|
|
@@ -909,6 +935,9 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
|
|
|
return -EINVAL;
|
|
|
|
|
|
switch (attr->attr) {
|
|
|
+ case KVM_S390_VM_TOD_EXT:
|
|
|
+ ret = kvm_s390_set_tod_ext(kvm, attr);
|
|
|
+ break;
|
|
|
case KVM_S390_VM_TOD_HIGH:
|
|
|
ret = kvm_s390_set_tod_high(kvm, attr);
|
|
|
break;
|
|
|
@@ -922,6 +951,43 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static void kvm_s390_get_tod_clock_ext(struct kvm *kvm,
|
|
|
+ struct kvm_s390_vm_tod_clock *gtod)
|
|
|
+{
|
|
|
+ struct kvm_s390_tod_clock_ext htod;
|
|
|
+
|
|
|
+ preempt_disable();
|
|
|
+
|
|
|
+ get_tod_clock_ext((char *)&htod);
|
|
|
+
|
|
|
+ gtod->tod = htod.tod + kvm->arch.epoch;
|
|
|
+ gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
|
|
|
+
|
|
|
+ if (gtod->tod < htod.tod)
|
|
|
+ gtod->epoch_idx += 1;
|
|
|
+
|
|
|
+ preempt_enable();
|
|
|
+}
|
|
|
+
|
|
|
+static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
|
|
|
+{
|
|
|
+ struct kvm_s390_vm_tod_clock gtod;
|
|
|
+
|
|
|
+ memset(>od, 0, sizeof(gtod));
|
|
|
+
|
|
|
+ if (test_kvm_facility(kvm, 139))
|
|
|
+ kvm_s390_get_tod_clock_ext(kvm, >od);
|
|
|
+ else
|
|
|
+ gtod.tod = kvm_s390_get_tod_clock_fast(kvm);
|
|
|
+
|
|
|
+ if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
|
|
|
+ return -EFAULT;
|
|
|
+
|
|
|
+ VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
|
|
|
+ gtod.epoch_idx, gtod.tod);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
|
|
|
{
|
|
|
u8 gtod_high = 0;
|
|
|
@@ -954,6 +1020,9 @@ static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
|
|
|
return -EINVAL;
|
|
|
|
|
|
switch (attr->attr) {
|
|
|
+ case KVM_S390_VM_TOD_EXT:
|
|
|
+ ret = kvm_s390_get_tod_ext(kvm, attr);
|
|
|
+ break;
|
|
|
case KVM_S390_VM_TOD_HIGH:
|
|
|
ret = kvm_s390_get_tod_high(kvm, attr);
|
|
|
break;
|
|
|
@@ -2369,6 +2438,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
|
|
vcpu->arch.sie_block->eca |= ECA_VX;
|
|
|
vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
|
|
|
}
|
|
|
+ if (test_kvm_facility(vcpu->kvm, 139))
|
|
|
+ vcpu->arch.sie_block->ecd |= ECD_MEF;
|
|
|
+
|
|
|
vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
|
|
|
| SDNXC;
|
|
|
vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
|
|
|
@@ -2855,6 +2927,35 @@ retry:
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
|
|
|
+ const struct kvm_s390_vm_tod_clock *gtod)
|
|
|
+{
|
|
|
+ struct kvm_vcpu *vcpu;
|
|
|
+ struct kvm_s390_tod_clock_ext htod;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ mutex_lock(&kvm->lock);
|
|
|
+ preempt_disable();
|
|
|
+
|
|
|
+ get_tod_clock_ext((char *)&htod);
|
|
|
+
|
|
|
+ kvm->arch.epoch = gtod->tod - htod.tod;
|
|
|
+ kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
|
|
|
+
|
|
|
+ if (kvm->arch.epoch > gtod->tod)
|
|
|
+ kvm->arch.epdx -= 1;
|
|
|
+
|
|
|
+ kvm_s390_vcpu_block_all(kvm);
|
|
|
+ kvm_for_each_vcpu(i, vcpu, kvm) {
|
|
|
+ vcpu->arch.sie_block->epoch = kvm->arch.epoch;
|
|
|
+ vcpu->arch.sie_block->epdx = kvm->arch.epdx;
|
|
|
+ }
|
|
|
+
|
|
|
+ kvm_s390_vcpu_unblock_all(kvm);
|
|
|
+ preempt_enable();
|
|
|
+ mutex_unlock(&kvm->lock);
|
|
|
+}
|
|
|
+
|
|
|
void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
|
|
|
{
|
|
|
struct kvm_vcpu *vcpu;
|