|
@@ -405,6 +405,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|
|
case KVM_CAP_S390_RI:
|
|
|
r = test_facility(64);
|
|
|
break;
|
|
|
+ case KVM_CAP_S390_GS:
|
|
|
+ r = test_facility(133);
|
|
|
+ break;
|
|
|
default:
|
|
|
r = 0;
|
|
|
}
|
|
@@ -541,6 +544,20 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
|
|
|
VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
|
|
|
r ? "(not available)" : "(success)");
|
|
|
break;
|
|
|
+ case KVM_CAP_S390_GS:
|
|
|
+ r = -EINVAL;
|
|
|
+ mutex_lock(&kvm->lock);
|
|
|
+ if (atomic_read(&kvm->online_vcpus)) {
|
|
|
+ r = -EBUSY;
|
|
|
+ } else if (test_facility(133)) {
|
|
|
+ set_kvm_facility(kvm->arch.model.fac_mask, 133);
|
|
|
+ set_kvm_facility(kvm->arch.model.fac_list, 133);
|
|
|
+ r = 0;
|
|
|
+ }
|
|
|
+ mutex_unlock(&kvm->lock);
|
|
|
+ VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
|
|
|
+ r ? "(not available)" : "(success)");
|
|
|
+ break;
|
|
|
case KVM_CAP_S390_USER_STSI:
|
|
|
VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
|
|
|
kvm->arch.user_stsi = 1;
|
|
@@ -1749,6 +1766,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
|
|
kvm_s390_set_prefix(vcpu, 0);
|
|
|
if (test_kvm_facility(vcpu->kvm, 64))
|
|
|
vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
|
|
|
+ if (test_kvm_facility(vcpu->kvm, 133))
|
|
|
+ vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
|
|
|
/* fprs can be synchronized via vrs, even if the guest has no vx. With
|
|
|
* MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
|
|
|
*/
|
|
@@ -1993,6 +2012,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
|
|
vcpu->arch.sie_block->eca |= ECA_VX;
|
|
|
vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
|
|
|
}
|
|
|
+ vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
|
|
|
+ | SDNXC;
|
|
|
vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
|
|
|
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
|
|
|
|
|
@@ -2720,8 +2741,10 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
|
|
|
static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|
|
{
|
|
|
struct runtime_instr_cb *riccb;
|
|
|
+ struct gs_cb *gscb;
|
|
|
|
|
|
riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
|
|
|
+ gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
|
|
|
vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
|
|
|
vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
|
|
|
if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
|
|
@@ -2756,6 +2779,19 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|
|
VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
|
|
|
vcpu->arch.sie_block->ecb3 |= ECB3_RI;
|
|
|
}
|
|
|
+ /*
|
|
|
+ * If userspace sets the gscb (e.g. after migration) to non-zero,
|
|
|
+ * we should enable GS here instead of doing the lazy enablement.
|
|
|
+ */
|
|
|
+ if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
|
|
|
+ test_kvm_facility(vcpu->kvm, 133) &&
|
|
|
+ gscb->gssm &&
|
|
|
+ !vcpu->arch.gs_enabled) {
|
|
|
+ VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
|
|
|
+ vcpu->arch.sie_block->ecb |= ECB_GS;
|
|
|
+ vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
|
|
|
+ vcpu->arch.gs_enabled = 1;
|
|
|
+ }
|
|
|
save_access_regs(vcpu->arch.host_acrs);
|
|
|
restore_access_regs(vcpu->run->s.regs.acrs);
|
|
|
/* save host (userspace) fprs/vrs */
|
|
@@ -2770,6 +2806,20 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|
|
if (test_fp_ctl(current->thread.fpu.fpc))
|
|
|
/* User space provided an invalid FPC, let's clear it */
|
|
|
current->thread.fpu.fpc = 0;
|
|
|
+ if (MACHINE_HAS_GS) {
|
|
|
+ preempt_disable();
|
|
|
+ __ctl_set_bit(2, 4);
|
|
|
+ if (current->thread.gs_cb) {
|
|
|
+ vcpu->arch.host_gscb = current->thread.gs_cb;
|
|
|
+ save_gs_cb(vcpu->arch.host_gscb);
|
|
|
+ }
|
|
|
+ if (vcpu->arch.gs_enabled) {
|
|
|
+ current->thread.gs_cb = (struct gs_cb *)
|
|
|
+ &vcpu->run->s.regs.gscb;
|
|
|
+ restore_gs_cb(current->thread.gs_cb);
|
|
|
+ }
|
|
|
+ preempt_enable();
|
|
|
+ }
|
|
|
|
|
|
kvm_run->kvm_dirty_regs = 0;
|
|
|
}
|
|
@@ -2796,6 +2846,18 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|
|
/* Restore will be done lazily at return */
|
|
|
current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
|
|
|
current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
|
|
|
+ if (MACHINE_HAS_GS) {
|
|
|
+ __ctl_set_bit(2, 4);
|
|
|
+ if (vcpu->arch.gs_enabled)
|
|
|
+ save_gs_cb(current->thread.gs_cb);
|
|
|
+ preempt_disable();
|
|
|
+ current->thread.gs_cb = vcpu->arch.host_gscb;
|
|
|
+ restore_gs_cb(vcpu->arch.host_gscb);
|
|
|
+ preempt_enable();
|
|
|
+ if (!vcpu->arch.host_gscb)
|
|
|
+ __ctl_clear_bit(2, 4);
|
|
|
+ vcpu->arch.host_gscb = NULL;
|
|
|
+ }
|
|
|
|
|
|
}
|
|
|
|