浏览代码

KVM: PPC: Move prepare_to_enter call site into subarch code

This function should be called with interrupts disabled, to avoid
a race where an exception is delivered after we check, but the
resched kick is received before we disable interrupts (and thus doesn't
actually trigger the exit code that would recheck exceptions).

booke already does this properly in the lightweight exit case, but
not on initial entry.

For now, move the call of prepare_to_enter into subarch-specific code so
that booke can do the right thing here.  Ideally book3s would do the same
thing, but I'm having a hard time seeing where it does any interrupt
disabling of this sort (plus it has several additional call sites), so
I'm deferring the book3s fix to someone more familiar with that code.
book3s behavior should be unchanged by this patch.

Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Scott Wood 14 年之前
父节点
当前提交
25051b5a5a
共有 4 个文件被更改,包括 8 次插入2 次删除
  1. 2 0
      arch/powerpc/kvm/book3s_hv.c
  2. 2 0
      arch/powerpc/kvm/book3s_pr.c
  3. 4 0
      arch/powerpc/kvm/booke.c
  4. 0 2
      arch/powerpc/kvm/powerpc.c

+ 2 - 0
arch/powerpc/kvm/book3s_hv.c

@@ -836,6 +836,8 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
+	kvmppc_core_prepare_to_enter(vcpu);
+
 	/* No need to go into the guest when all we'll do is come back out */
 	/* No need to go into the guest when all we'll do is come back out */
 	if (signal_pending(current)) {
 	if (signal_pending(current)) {
 		run->exit_reason = KVM_EXIT_INTR;
 		run->exit_reason = KVM_EXIT_INTR;

+ 2 - 0
arch/powerpc/kvm/book3s_pr.c

@@ -929,6 +929,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
+	kvmppc_core_prepare_to_enter(vcpu);
+
 	/* No need to go into the guest when all we do is going out */
 	/* No need to go into the guest when all we do is going out */
 	if (signal_pending(current)) {
 	if (signal_pending(current)) {
 		kvm_run->exit_reason = KVM_EXIT_INTR;
 		kvm_run->exit_reason = KVM_EXIT_INTR;

+ 4 - 0
arch/powerpc/kvm/booke.c

@@ -295,6 +295,8 @@ void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
 	unsigned long old_pending = vcpu->arch.pending_exceptions;
 	unsigned long old_pending = vcpu->arch.pending_exceptions;
 	unsigned int priority;
 	unsigned int priority;
 
 
+	WARN_ON_ONCE(!irqs_disabled());
+
 	priority = __ffs(*pending);
 	priority = __ffs(*pending);
 	while (priority <= BOOKE_IRQPRIO_MAX) {
 	while (priority <= BOOKE_IRQPRIO_MAX) {
 		if (kvmppc_booke_irqprio_deliver(vcpu, priority))
 		if (kvmppc_booke_irqprio_deliver(vcpu, priority))
@@ -323,6 +325,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 
 
 	local_irq_disable();
 	local_irq_disable();
 
 
+	kvmppc_core_prepare_to_enter(vcpu);
+
 	if (signal_pending(current)) {
 	if (signal_pending(current)) {
 		kvm_run->exit_reason = KVM_EXIT_INTR;
 		kvm_run->exit_reason = KVM_EXIT_INTR;
 		ret = -EINTR;
 		ret = -EINTR;

+ 0 - 2
arch/powerpc/kvm/powerpc.c

@@ -559,8 +559,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 		vcpu->arch.hcall_needed = 0;
 		vcpu->arch.hcall_needed = 0;
 	}
 	}
 
 
-	kvmppc_core_prepare_to_enter(vcpu);
-
 	r = kvmppc_vcpu_run(run, vcpu);
 	r = kvmppc_vcpu_run(run, vcpu);
 
 
 	if (vcpu->sigset_active)
 	if (vcpu->sigset_active)