|
@@ -86,6 +86,22 @@ static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void sanitise_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * Don't let L1 enable features for L2 which we've disabled for L1,
|
|
|
+ * but preserve the interrupt cause field.
|
|
|
+ */
|
|
|
+ hr->hfscr &= (HFSCR_INTR_CAUSE | vcpu->arch.hfscr);
|
|
|
+
|
|
|
+ /* Don't let data address watchpoint match in hypervisor state */
|
|
|
+ hr->dawrx0 &= ~DAWRX_HYP;
|
|
|
+
|
|
|
+ /* Don't let completed instruction address breakpt match in HV state */
|
|
|
+ if ((hr->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
|
|
|
+ hr->ciabr &= ~CIABR_PRIV;
|
|
|
+}
|
|
|
+
|
|
|
static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
|
|
|
{
|
|
|
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
|
@@ -198,6 +214,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
|
|
|
mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD |
|
|
|
LPCR_LPES | LPCR_MER;
|
|
|
lpcr = (vc->lpcr & ~mask) | (l2_hv.lpcr & mask);
|
|
|
+ sanitise_hv_regs(vcpu, &l2_hv);
|
|
|
restore_hv_regs(vcpu, &l2_hv);
|
|
|
|
|
|
vcpu->arch.ret = RESUME_GUEST;
|