|
@@ -209,15 +209,15 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
|
|
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
|
|
|
- u64 val;
|
|
|
|
|
|
/*
|
|
|
* Make sure stores to the GIC via the memory mapped interface
|
|
|
- * are now visible to the system register interface.
|
|
|
+ * are now visible to the system register interface when reading the
|
|
|
+ * LRs, and when reading back the VMCR on non-VHE systems.
|
|
|
*/
|
|
|
- if (!cpu_if->vgic_sre) {
|
|
|
- dsb(st);
|
|
|
- cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
|
|
|
+ if (used_lrs || !has_vhe()) {
|
|
|
+ if (!cpu_if->vgic_sre)
|
|
|
+ dsb(st);
|
|
|
}
|
|
|
|
|
|
if (used_lrs) {
|
|
@@ -226,7 +226,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
elrsr = read_gicreg(ICH_ELSR_EL2);
|
|
|
|
|
|
- write_gicreg(0, ICH_HCR_EL2);
|
|
|
+ write_gicreg(cpu_if->vgic_hcr & ~ICH_HCR_EN, ICH_HCR_EL2);
|
|
|
|
|
|
for (i = 0; i < used_lrs; i++) {
|
|
|
if (elrsr & (1 << i))
|
|
@@ -236,19 +236,6 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
__gic_v3_set_lr(0, i);
|
|
|
}
|
|
|
- } else {
|
|
|
- if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
|
|
|
- cpu_if->its_vpe.its_vm)
|
|
|
- write_gicreg(0, ICH_HCR_EL2);
|
|
|
- }
|
|
|
-
|
|
|
- val = read_gicreg(ICC_SRE_EL2);
|
|
|
- write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
|
|
|
-
|
|
|
- if (!cpu_if->vgic_sre) {
|
|
|
- /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
|
|
|
- isb();
|
|
|
- write_gicreg(1, ICC_SRE_EL1);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -258,6 +245,31 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
|
|
|
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
|
|
|
int i;
|
|
|
|
|
|
+ if (used_lrs) {
|
|
|
+ write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
|
|
|
+
|
|
|
+ for (i = 0; i < used_lrs; i++)
|
|
|
+ __gic_v3_set_lr(cpu_if->vgic_lr[i], i);
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Ensure that writes to the LRs, and on non-VHE systems ensure that
|
|
|
+ * the write to the VMCR in __vgic_v3_activate_traps(), will have
|
|
|
+ * reached the (re)distributors. This ensure the guest will read the
|
|
|
+ * correct values from the memory-mapped interface.
|
|
|
+ */
|
|
|
+ if (used_lrs || !has_vhe()) {
|
|
|
+ if (!cpu_if->vgic_sre) {
|
|
|
+ isb();
|
|
|
+ dsb(sy);
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void __hyp_text __vgic_v3_activate_traps(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
|
|
+
|
|
|
/*
|
|
|
* VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
|
|
|
* Group0 interrupt (as generated in GICv2 mode) to be
|
|
@@ -265,47 +277,69 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
|
|
|
* consequences. So we must make sure that ICC_SRE_EL1 has
|
|
|
* been actually programmed with the value we want before
|
|
|
* starting to mess with the rest of the GIC, and VMCR_EL2 in
|
|
|
- * particular.
|
|
|
+ * particular. This logic must be called before
|
|
|
+ * __vgic_v3_restore_state().
|
|
|
*/
|
|
|
if (!cpu_if->vgic_sre) {
|
|
|
write_gicreg(0, ICC_SRE_EL1);
|
|
|
isb();
|
|
|
write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
|
|
|
- }
|
|
|
|
|
|
- if (used_lrs) {
|
|
|
- write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
|
|
|
|
|
|
- for (i = 0; i < used_lrs; i++)
|
|
|
- __gic_v3_set_lr(cpu_if->vgic_lr[i], i);
|
|
|
- } else {
|
|
|
- /*
|
|
|
- * If we need to trap system registers, we must write
|
|
|
- * ICH_HCR_EL2 anyway, even if no interrupts are being
|
|
|
- * injected. Same thing if GICv4 is used, as VLPI
|
|
|
- * delivery is gated by ICH_HCR_EL2.En.
|
|
|
- */
|
|
|
- if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
|
|
|
- cpu_if->its_vpe.its_vm)
|
|
|
- write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
|
|
|
+ if (has_vhe()) {
|
|
|
+ /*
|
|
|
+ * Ensure that the write to the VMCR will have reached
|
|
|
+ * the (re)distributors. This ensure the guest will
|
|
|
+ * read the correct values from the memory-mapped
|
|
|
+ * interface.
|
|
|
+ */
|
|
|
+ isb();
|
|
|
+ dsb(sy);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Ensures that the above will have reached the
|
|
|
- * (re)distributors. This ensure the guest will read the
|
|
|
- * correct values from the memory-mapped interface.
|
|
|
+ * Prevent the guest from touching the GIC system registers if
|
|
|
+ * SRE isn't enabled for GICv3 emulation.
|
|
|
*/
|
|
|
+ write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
|
|
|
+ ICC_SRE_EL2);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If we need to trap system registers, we must write
|
|
|
+ * ICH_HCR_EL2 anyway, even if no interrupts are being
|
|
|
+ * injected,
|
|
|
+ */
|
|
|
+ if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
|
|
|
+ cpu_if->its_vpe.its_vm)
|
|
|
+ write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
|
|
|
+}
|
|
|
+
|
|
|
+void __hyp_text __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
|
|
+ u64 val;
|
|
|
+
|
|
|
if (!cpu_if->vgic_sre) {
|
|
|
+ cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
|
|
|
+ }
|
|
|
+
|
|
|
+ val = read_gicreg(ICC_SRE_EL2);
|
|
|
+ write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
|
|
|
+
|
|
|
+ if (!cpu_if->vgic_sre) {
|
|
|
+ /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
|
|
|
isb();
|
|
|
- dsb(sy);
|
|
|
+ write_gicreg(1, ICC_SRE_EL1);
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Prevent the guest from touching the GIC system registers if
|
|
|
- * SRE isn't enabled for GICv3 emulation.
|
|
|
+ * If we were trapping system registers, we enabled the VGIC even if
|
|
|
+ * no interrupts were being injected, and we disable it again here.
|
|
|
*/
|
|
|
- write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
|
|
|
- ICC_SRE_EL2);
|
|
|
+ if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
|
|
|
+ cpu_if->its_vpe.its_vm)
|
|
|
+ write_gicreg(0, ICH_HCR_EL2);
|
|
|
}
|
|
|
|
|
|
void __hyp_text __vgic_v3_save_aprs(struct kvm_vcpu *vcpu)
|