|
@@ -749,11 +749,19 @@ next:
|
|
vgic_clear_lr(vcpu, count);
|
|
vgic_clear_lr(vcpu, count);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline void vgic_save_state(struct kvm_vcpu *vcpu)
|
|
|
|
+{
|
|
|
|
+ if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
|
|
|
|
+ vgic_v2_save_state(vcpu);
|
|
|
|
+}
|
|
|
|
+
|
|
/* Sync back the hardware VGIC state into our emulation after a guest's run. */
|
|
/* Sync back the hardware VGIC state into our emulation after a guest's run. */
|
|
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
|
|
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
|
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
|
|
|
|
|
|
|
+ vgic_save_state(vcpu);
|
|
|
|
+
|
|
WARN_ON(vgic_v4_sync_hwstate(vcpu));
|
|
WARN_ON(vgic_v4_sync_hwstate(vcpu));
|
|
|
|
|
|
/* An empty ap_list_head implies used_lrs == 0 */
|
|
/* An empty ap_list_head implies used_lrs == 0 */
|
|
@@ -765,6 +773,12 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
|
|
vgic_prune_ap_list(vcpu);
|
|
vgic_prune_ap_list(vcpu);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
|
|
|
|
+{
|
|
|
|
+ if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
|
|
|
|
+ vgic_v2_restore_state(vcpu);
|
|
|
|
+}
|
|
|
|
+
|
|
/* Flush our emulation state into the GIC hardware before entering the guest. */
|
|
/* Flush our emulation state into the GIC hardware before entering the guest. */
|
|
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
|
|
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
@@ -780,13 +794,16 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
|
|
* this.
|
|
* this.
|
|
*/
|
|
*/
|
|
if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
|
|
if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
|
|
- return;
|
|
|
|
|
|
+ goto out;
|
|
|
|
|
|
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
|
|
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
|
|
|
|
|
|
spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
|
|
spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
|
|
vgic_flush_lr_state(vcpu);
|
|
vgic_flush_lr_state(vcpu);
|
|
spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
|
|
spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
|
|
|
|
+
|
|
|
|
+out:
|
|
|
|
+ vgic_restore_state(vcpu);
|
|
}
|
|
}
|
|
|
|
|
|
void kvm_vgic_load(struct kvm_vcpu *vcpu)
|
|
void kvm_vgic_load(struct kvm_vcpu *vcpu)
|