|
@@ -107,12 +107,11 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
|
|
|
|
|
|
static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
|
|
|
int rc, expect;
|
|
|
|
|
|
if (!kvm_s390_use_sca_entries())
|
|
|
return;
|
|
|
- atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
|
|
|
+ atomic_andnot(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
|
|
|
read_lock(&vcpu->kvm->arch.sca_lock);
|
|
|
if (vcpu->kvm->arch.use_esca) {
|
|
|
struct esca_block *sca = vcpu->kvm->arch.sca;
|
|
@@ -279,13 +278,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
|
|
|
static void __set_cpu_idle(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
|
|
|
- set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
|
|
|
+ set_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
|
|
|
}
|
|
|
|
|
|
static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
|
|
|
- clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
|
|
|
+ clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
|
|
|
}
|
|
|
|
|
|
static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
|
|
@@ -1228,7 +1227,7 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
|
|
|
|
|
|
li->irq.ext = irq->u.ext;
|
|
|
set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
|
|
|
- atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
|
|
|
+ __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1253,7 +1252,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
|
|
|
if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
|
|
|
return -EBUSY;
|
|
|
*extcall = irq->u.extcall;
|
|
|
- atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
|
|
|
+ __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1329,7 +1328,7 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
|
|
|
|
|
|
set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
|
|
|
set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
|
|
|
- atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
|
|
|
+ __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1373,7 +1372,7 @@ static int __inject_ckc(struct kvm_vcpu *vcpu)
|
|
|
0, 0);
|
|
|
|
|
|
set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
|
|
|
- atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
|
|
|
+ __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1386,7 +1385,7 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
|
|
|
0, 0);
|
|
|
|
|
|
set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
|
|
|
- atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
|
|
|
+ __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1546,7 +1545,6 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
|
|
|
static void __floating_irq_kick(struct kvm *kvm, u64 type)
|
|
|
{
|
|
|
struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
|
|
|
- struct kvm_s390_local_interrupt *li;
|
|
|
struct kvm_vcpu *dst_vcpu;
|
|
|
int sigcpu, online_vcpus, nr_tries = 0;
|
|
|
|
|
@@ -1568,16 +1566,15 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
|
|
|
dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
|
|
|
|
|
|
/* make the VCPU drop out of the SIE, or wake it up if sleeping */
|
|
|
- li = &dst_vcpu->arch.local_int;
|
|
|
switch (type) {
|
|
|
case KVM_S390_MCHK:
|
|
|
- atomic_or(CPUSTAT_STOP_INT, li->cpuflags);
|
|
|
+ __set_cpuflag(dst_vcpu, CPUSTAT_STOP_INT);
|
|
|
break;
|
|
|
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
|
|
|
- atomic_or(CPUSTAT_IO_INT, li->cpuflags);
|
|
|
+ __set_cpuflag(dst_vcpu, CPUSTAT_IO_INT);
|
|
|
break;
|
|
|
default:
|
|
|
- atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
|
|
|
+ __set_cpuflag(dst_vcpu, CPUSTAT_EXT_INT);
|
|
|
break;
|
|
|
}
|
|
|
kvm_s390_vcpu_wakeup(dst_vcpu);
|