|
@@ -110,7 +110,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
|
/* upper facilities limit for kvm */
|
|
/* upper facilities limit for kvm */
|
|
unsigned long kvm_s390_fac_list_mask[] = {
|
|
unsigned long kvm_s390_fac_list_mask[] = {
|
|
0xffe6fffbfcfdfc40UL,
|
|
0xffe6fffbfcfdfc40UL,
|
|
- 0x005c800000000000UL,
|
|
|
|
|
|
+ 0x005e800000000000UL,
|
|
};
|
|
};
|
|
|
|
|
|
unsigned long kvm_s390_fac_list_mask_size(void)
|
|
unsigned long kvm_s390_fac_list_mask_size(void)
|
|
@@ -454,10 +454,10 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
|
|
|
|
|
|
mutex_lock(&kvm->lock);
|
|
mutex_lock(&kvm->lock);
|
|
kvm->arch.epoch = gtod - host_tod;
|
|
kvm->arch.epoch = gtod - host_tod;
|
|
- kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
|
|
|
|
|
|
+ kvm_s390_vcpu_block_all(kvm);
|
|
|
|
+ kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
|
|
cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
|
|
cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
|
|
- exit_sie(cur_vcpu);
|
|
|
|
- }
|
|
|
|
|
|
+ kvm_s390_vcpu_unblock_all(kvm);
|
|
mutex_unlock(&kvm->lock);
|
|
mutex_unlock(&kvm->lock);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -1311,8 +1311,13 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
|
|
atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
|
|
CPUSTAT_SM |
|
|
CPUSTAT_SM |
|
|
- CPUSTAT_STOPPED |
|
|
|
|
- CPUSTAT_GED);
|
|
|
|
|
|
+ CPUSTAT_STOPPED);
|
|
|
|
+
|
|
|
|
+ if (test_kvm_facility(vcpu->kvm, 78))
|
|
|
|
+ atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
|
|
|
|
+ else if (test_kvm_facility(vcpu->kvm, 8))
|
|
|
|
+ atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
|
|
|
|
+
|
|
kvm_s390_vcpu_setup_model(vcpu);
|
|
kvm_s390_vcpu_setup_model(vcpu);
|
|
|
|
|
|
vcpu->arch.sie_block->ecb = 6;
|
|
vcpu->arch.sie_block->ecb = 6;
|
|
@@ -1409,16 +1414,26 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
|
|
return kvm_s390_vcpu_has_irq(vcpu, 0);
|
|
return kvm_s390_vcpu_has_irq(vcpu, 0);
|
|
}
|
|
}
|
|
|
|
|
|
-void s390_vcpu_block(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
+void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
|
|
atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
|
|
}
|
|
}
|
|
|
|
|
|
-void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
+void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
|
|
atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
|
|
|
|
+{
|
|
|
|
+ atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
|
|
|
|
+{
|
|
|
|
+ atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Kick a guest cpu out of SIE and wait until SIE is not running.
|
|
* Kick a guest cpu out of SIE and wait until SIE is not running.
|
|
* If the CPU is not running (e.g. waiting as idle) the function will
|
|
* If the CPU is not running (e.g. waiting as idle) the function will
|
|
@@ -1430,10 +1445,11 @@ void exit_sie(struct kvm_vcpu *vcpu)
|
|
cpu_relax();
|
|
cpu_relax();
|
|
}
|
|
}
|
|
|
|
|
|
-/* Kick a guest cpu out of SIE and prevent SIE-reentry */
|
|
|
|
-void exit_sie_sync(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
+/* Kick a guest cpu out of SIE to process a request synchronously */
|
|
|
|
+void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
- s390_vcpu_block(vcpu);
|
|
|
|
|
|
+ kvm_make_request(req, vcpu);
|
|
|
|
+ kvm_s390_vcpu_request(vcpu);
|
|
exit_sie(vcpu);
|
|
exit_sie(vcpu);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1447,8 +1463,7 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
|
|
/* match against both prefix pages */
|
|
/* match against both prefix pages */
|
|
if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
|
|
if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
|
|
VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
|
|
VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
|
|
- kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
|
|
|
|
- exit_sie_sync(vcpu);
|
|
|
|
|
|
+ kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -1720,8 +1735,10 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
|
|
static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
|
|
+ if (!vcpu->requests)
|
|
|
|
+ return 0;
|
|
retry:
|
|
retry:
|
|
- s390_vcpu_unblock(vcpu);
|
|
|
|
|
|
+ kvm_s390_vcpu_request_handled(vcpu);
|
|
/*
|
|
/*
|
|
* We use MMU_RELOAD just to re-arm the ipte notifier for the
|
|
* We use MMU_RELOAD just to re-arm the ipte notifier for the
|
|
* guest prefix page. gmap_ipte_notify will wait on the ptl lock.
|
|
* guest prefix page. gmap_ipte_notify will wait on the ptl lock.
|
|
@@ -2208,8 +2225,7 @@ int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
|
|
static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
|
|
static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
|
|
kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
|
|
- kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
|
|
|
|
- exit_sie_sync(vcpu);
|
|
|
|
|
|
+ kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
|
|
}
|
|
}
|
|
|
|
|
|
static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
|
|
static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
|
|
@@ -2225,8 +2241,7 @@ static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
|
|
static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
|
|
static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
|
|
kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
|
|
- kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
|
|
|
|
- exit_sie_sync(vcpu);
|
|
|
|
|
|
+ kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
|
|
}
|
|
}
|
|
|
|
|
|
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
|
|
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
|