|
@@ -882,6 +882,24 @@ void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+static void kvmppc_setup_debug(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
|
|
|
+ u64 msr = kvmppc_get_msr(vcpu);
|
|
|
+
|
|
|
+ kvmppc_set_msr(vcpu, msr | MSR_SE);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void kvmppc_clear_debug(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
|
|
|
+ u64 msr = kvmppc_get_msr(vcpu);
|
|
|
+
|
|
|
+ kvmppc_set_msr(vcpu, msr & ~MSR_SE);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|
|
unsigned int exit_nr)
|
|
|
{
|
|
@@ -1207,10 +1225,18 @@ program_interrupt:
|
|
|
break;
|
|
|
#endif
|
|
|
case BOOK3S_INTERRUPT_MACHINE_CHECK:
|
|
|
- case BOOK3S_INTERRUPT_TRACE:
|
|
|
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
|
|
|
r = RESUME_GUEST;
|
|
|
break;
|
|
|
+ case BOOK3S_INTERRUPT_TRACE:
|
|
|
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
|
|
|
+ run->exit_reason = KVM_EXIT_DEBUG;
|
|
|
+ r = RESUME_HOST;
|
|
|
+ } else {
|
|
|
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
|
|
|
+ r = RESUME_GUEST;
|
|
|
+ }
|
|
|
+ break;
|
|
|
default:
|
|
|
{
|
|
|
ulong shadow_srr1 = vcpu->arch.shadow_srr1;
|
|
@@ -1479,6 +1505,8 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
+ kvmppc_setup_debug(vcpu);
|
|
|
+
|
|
|
/*
|
|
|
* Interrupts could be timers for the guest which we have to inject
|
|
|
* again, so let's postpone them until we're in the guest and if we
|
|
@@ -1501,6 +1529,8 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|
|
|
|
|
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
|
|
|
|
|
|
+ kvmppc_clear_debug(vcpu);
|
|
|
+
|
|
|
/* No need for kvm_guest_exit. It's done in handle_exit.
|
|
|
We also get here with interrupts enabled. */
|
|
|
|