|
@@ -169,8 +169,15 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
static int ckc_irq_pending(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm))
|
|
|
+ const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
|
|
|
+ const u64 ckc = vcpu->arch.sie_block->ckc;
|
|
|
+
|
|
|
+ if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
|
|
|
+ if ((s64)ckc >= (s64)now)
|
|
|
+ return 0;
|
|
|
+ } else if (ckc >= now) {
|
|
|
return 0;
|
|
|
+ }
|
|
|
return ckc_interrupts_enabled(vcpu);
|
|
|
}
|
|
|
|
|
@@ -1047,13 +1054,19 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- u64 now, cputm, sltime = 0;
|
|
|
+ const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
|
|
|
+ const u64 ckc = vcpu->arch.sie_block->ckc;
|
|
|
+ u64 cputm, sltime = 0;
|
|
|
|
|
|
if (ckc_interrupts_enabled(vcpu)) {
|
|
|
- now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
|
|
|
- sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
|
|
|
- /* already expired or overflow? */
|
|
|
- if (!sltime || vcpu->arch.sie_block->ckc <= now)
|
|
|
+ if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
|
|
|
+ if ((s64)now < (s64)ckc)
|
|
|
+ sltime = tod_to_ns((s64)ckc - (s64)now);
|
|
|
+ } else if (now < ckc) {
|
|
|
+ sltime = tod_to_ns(ckc - now);
|
|
|
+ }
|
|
|
+ /* already expired */
|
|
|
+ if (!sltime)
|
|
|
return 0;
|
|
|
if (cpu_timer_interrupts_enabled(vcpu)) {
|
|
|
cputm = kvm_s390_get_cpu_timer(vcpu);
|