|
@@ -189,6 +189,7 @@ struct vcpu_svm {
|
|
|
struct nested_state nested;
|
|
|
|
|
|
bool nmi_singlestep;
|
|
|
+ u64 nmi_singlestep_guest_rflags;
|
|
|
|
|
|
unsigned int3_injected;
|
|
|
unsigned long int3_rip;
|
|
@@ -966,9 +967,13 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
|
|
|
static void disable_nmi_singlestep(struct vcpu_svm *svm)
|
|
|
{
|
|
|
svm->nmi_singlestep = false;
|
|
|
- if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
|
|
|
- svm->vmcb->save.rflags &=
|
|
|
- ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
|
|
|
+ if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
|
|
|
+ /* Clear our flags if they were not set by the guest */
|
|
|
+ if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
|
|
|
+ svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
|
|
|
+ if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
|
|
|
+ svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/* Note:
|
|
@@ -2538,6 +2543,31 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
|
|
|
return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
|
|
|
}
|
|
|
|
|
|
+/* DB exceptions for our internal use must not cause vmexit */
|
|
|
+static int nested_svm_intercept_db(struct vcpu_svm *svm)
|
|
|
+{
|
|
|
+ unsigned long dr6;
|
|
|
+
|
|
|
+ /* if we're not singlestepping, it's not ours */
|
|
|
+ if (!svm->nmi_singlestep)
|
|
|
+ return NESTED_EXIT_DONE;
|
|
|
+
|
|
|
+ /* if it's not a singlestep exception, it's not ours */
|
|
|
+ if (kvm_get_dr(&svm->vcpu, 6, &dr6))
|
|
|
+ return NESTED_EXIT_DONE;
|
|
|
+ if (!(dr6 & DR6_BS))
|
|
|
+ return NESTED_EXIT_DONE;
|
|
|
+
|
|
|
+ /* if the guest is singlestepping, it should get the vmexit */
|
|
|
+ if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) {
|
|
|
+ disable_nmi_singlestep(svm);
|
|
|
+ return NESTED_EXIT_DONE;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* it's ours, the nested hypervisor must not see this one */
|
|
|
+ return NESTED_EXIT_HOST;
|
|
|
+}
|
|
|
+
|
|
|
static int nested_svm_exit_special(struct vcpu_svm *svm)
|
|
|
{
|
|
|
u32 exit_code = svm->vmcb->control.exit_code;
|
|
@@ -2593,8 +2623,12 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
|
|
|
}
|
|
|
case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
|
|
|
u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
|
|
|
- if (svm->nested.intercept_exceptions & excp_bits)
|
|
|
- vmexit = NESTED_EXIT_DONE;
|
|
|
+ if (svm->nested.intercept_exceptions & excp_bits) {
|
|
|
+ if (exit_code == SVM_EXIT_EXCP_BASE + DB_VECTOR)
|
|
|
+ vmexit = nested_svm_intercept_db(svm);
|
|
|
+ else
|
|
|
+ vmexit = NESTED_EXIT_DONE;
|
|
|
+ }
|
|
|
/* async page fault always cause vmexit */
|
|
|
else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
|
|
|
svm->apf_reason != 0)
|
|
@@ -4635,6 +4669,7 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
|
|
|
* Something prevents NMI from been injected. Single step over possible
|
|
|
* problem (IRET or exception injection or interrupt shadow)
|
|
|
*/
|
|
|
+ svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
|
|
|
svm->nmi_singlestep = true;
|
|
|
svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
|
|
|
}
|