|
@@ -1989,10 +1989,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|
|
|
|
|
rc = -ENOMEM;
|
|
rc = -ENOMEM;
|
|
|
|
|
|
- kvm->arch.use_esca = 0; /* start with basic SCA */
|
|
|
|
if (!sclp.has_64bscao)
|
|
if (!sclp.has_64bscao)
|
|
alloc_flags |= GFP_DMA;
|
|
alloc_flags |= GFP_DMA;
|
|
rwlock_init(&kvm->arch.sca_lock);
|
|
rwlock_init(&kvm->arch.sca_lock);
|
|
|
|
+ /* start with basic SCA */
|
|
kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
|
|
kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
|
|
if (!kvm->arch.sca)
|
|
if (!kvm->arch.sca)
|
|
goto out_err;
|
|
goto out_err;
|
|
@@ -2043,8 +2043,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|
kvm_s390_crypto_init(kvm);
|
|
kvm_s390_crypto_init(kvm);
|
|
|
|
|
|
mutex_init(&kvm->arch.float_int.ais_lock);
|
|
mutex_init(&kvm->arch.float_int.ais_lock);
|
|
- kvm->arch.float_int.simm = 0;
|
|
|
|
- kvm->arch.float_int.nimm = 0;
|
|
|
|
spin_lock_init(&kvm->arch.float_int.lock);
|
|
spin_lock_init(&kvm->arch.float_int.lock);
|
|
for (i = 0; i < FIRQ_LIST_COUNT; i++)
|
|
for (i = 0; i < FIRQ_LIST_COUNT; i++)
|
|
INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
|
|
INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
|
|
@@ -2070,12 +2068,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|
kvm->arch.gmap->pfault_enabled = 0;
|
|
kvm->arch.gmap->pfault_enabled = 0;
|
|
}
|
|
}
|
|
|
|
|
|
- kvm->arch.css_support = 0;
|
|
|
|
- kvm->arch.use_irqchip = 0;
|
|
|
|
kvm->arch.use_pfmfi = sclp.has_pfmfi;
|
|
kvm->arch.use_pfmfi = sclp.has_pfmfi;
|
|
kvm->arch.use_skf = sclp.has_skey;
|
|
kvm->arch.use_skf = sclp.has_skey;
|
|
- kvm->arch.epoch = 0;
|
|
|
|
-
|
|
|
|
spin_lock_init(&kvm->arch.start_stop_lock);
|
|
spin_lock_init(&kvm->arch.start_stop_lock);
|
|
kvm_s390_vsie_init(kvm);
|
|
kvm_s390_vsie_init(kvm);
|
|
kvm_s390_gisa_init(kvm);
|
|
kvm_s390_gisa_init(kvm);
|