Răsfoiți Sursa

KVM: Close minor race in signal handling

We need to check for signals inside the critical section, otherwise a
signal can be sent which we will not notice.  Also move the check
before entry, so that if the signal happens before the first entry,
we exit immediately instead of waiting for something to happen to the
guest.

Signed-off-by: Avi Kivity <avi@qumranet.com>
Avi Kivity 18 ani în urmă
părinte
comite
7e66f350cf
2 a modificat fișierele cu 22 adăugiri și 20 ștergeri
  1. 10 9
      drivers/kvm/svm.c
  2. 12 11
      drivers/kvm/vmx.c

+ 10 - 9
drivers/kvm/svm.c

@@ -1398,11 +1398,19 @@ static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 	if (unlikely(r))
 	if (unlikely(r))
 		return r;
 		return r;
 
 
+	clgi();
+
+	if (signal_pending(current)) {
+		stgi();
+		++vcpu->stat.signal_exits;
+		post_kvm_run_save(svm, kvm_run);
+		kvm_run->exit_reason = KVM_EXIT_INTR;
+		return -EINTR;
+	}
+
 	if (!vcpu->mmio_read_completed)
 	if (!vcpu->mmio_read_completed)
 		do_interrupt_requests(svm, kvm_run);
 		do_interrupt_requests(svm, kvm_run);
 
 
-	clgi();
-
 	vcpu->guest_mode = 1;
 	vcpu->guest_mode = 1;
 	if (vcpu->requests)
 	if (vcpu->requests)
 		if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
 		if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
@@ -1582,13 +1590,6 @@ static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
 
 	r = handle_exit(svm, kvm_run);
 	r = handle_exit(svm, kvm_run);
 	if (r > 0) {
 	if (r > 0) {
-		if (signal_pending(current)) {
-			++vcpu->stat.signal_exits;
-			post_kvm_run_save(svm, kvm_run);
-			kvm_run->exit_reason = KVM_EXIT_INTR;
-			return -EINTR;
-		}
-
 		if (dm_request_for_irq_injection(svm, kvm_run)) {
 		if (dm_request_for_irq_injection(svm, kvm_run)) {
 			++vcpu->stat.request_irq_exits;
 			++vcpu->stat.request_irq_exits;
 			post_kvm_run_save(svm, kvm_run);
 			post_kvm_run_save(svm, kvm_run);

+ 12 - 11
drivers/kvm/vmx.c

@@ -2066,9 +2066,6 @@ static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
 
 	preempt_disable();
 	preempt_disable();
 
 
-	if (!vcpu->mmio_read_completed)
-		do_interrupt_requests(vcpu, kvm_run);
-
 	vmx_save_host_state(vmx);
 	vmx_save_host_state(vmx);
 	kvm_load_guest_fpu(vcpu);
 	kvm_load_guest_fpu(vcpu);
 
 
@@ -2079,6 +2076,18 @@ static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
 
 	local_irq_disable();
 	local_irq_disable();
 
 
+	if (signal_pending(current)) {
+		local_irq_enable();
+		preempt_enable();
+		r = -EINTR;
+		kvm_run->exit_reason = KVM_EXIT_INTR;
+		++vcpu->stat.signal_exits;
+		goto out;
+	}
+
+	if (!vcpu->mmio_read_completed)
+		do_interrupt_requests(vcpu, kvm_run);
+
 	vcpu->guest_mode = 1;
 	vcpu->guest_mode = 1;
 	if (vcpu->requests)
 	if (vcpu->requests)
 		if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
 		if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
@@ -2227,14 +2236,6 @@ static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
 
 	r = kvm_handle_exit(kvm_run, vcpu);
 	r = kvm_handle_exit(kvm_run, vcpu);
 	if (r > 0) {
 	if (r > 0) {
-		/* Give scheduler a change to reschedule. */
-		if (signal_pending(current)) {
-			r = -EINTR;
-			kvm_run->exit_reason = KVM_EXIT_INTR;
-			++vcpu->stat.signal_exits;
-			goto out;
-		}
-
 		if (dm_request_for_irq_injection(vcpu, kvm_run)) {
 		if (dm_request_for_irq_injection(vcpu, kvm_run)) {
 			r = -EINTR;
 			r = -EINTR;
 			kvm_run->exit_reason = KVM_EXIT_INTR;
 			kvm_run->exit_reason = KVM_EXIT_INTR;