|
@@ -23,29 +23,19 @@
|
|
|
#include <asm/kvm_hyp.h>
|
|
|
#include <asm/kvm_mmu.h>
|
|
|
|
|
|
-static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
|
|
|
-{
|
|
|
- struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
|
|
|
- int nr_lr = (kern_hyp_va(&kvm_vgic_global_state))->nr_lr;
|
|
|
- u32 elrsr0, elrsr1;
|
|
|
-
|
|
|
- elrsr0 = readl_relaxed(base + GICH_ELRSR0);
|
|
|
- if (unlikely(nr_lr > 32))
|
|
|
- elrsr1 = readl_relaxed(base + GICH_ELRSR1);
|
|
|
- else
|
|
|
- elrsr1 = 0;
|
|
|
-
|
|
|
- cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
|
|
|
-}
|
|
|
-
|
|
|
static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
|
|
|
{
|
|
|
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
|
|
|
- int i;
|
|
|
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
|
|
|
+ u64 elrsr;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ elrsr = readl_relaxed(base + GICH_ELRSR0);
|
|
|
+ if (unlikely(used_lrs > 32))
|
|
|
+ elrsr |= ((u64)readl_relaxed(base + GICH_ELRSR1)) << 32;
|
|
|
|
|
|
for (i = 0; i < used_lrs; i++) {
|
|
|
- if (cpu_if->vgic_elrsr & (1UL << i))
|
|
|
+ if (elrsr & (1UL << i))
|
|
|
cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
|
|
|
else
|
|
|
cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
|
|
@@ -68,13 +58,9 @@ void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
if (used_lrs) {
|
|
|
cpu_if->vgic_apr = readl_relaxed(base + GICH_APR);
|
|
|
-
|
|
|
- save_elrsr(vcpu, base);
|
|
|
save_lrs(vcpu, base);
|
|
|
-
|
|
|
writel_relaxed(0, base + GICH_HCR);
|
|
|
} else {
|
|
|
- cpu_if->vgic_elrsr = ~0UL;
|
|
|
cpu_if->vgic_apr = 0;
|
|
|
}
|
|
|
}
|