Browse Source

KVM: x86: use guest_exit_irqoff

This gains a few clock cycles per vmexit.  On Intel there is no need
anymore to enable the interrupts in vmx_handle_external_intr, since
we are using the "acknowledge interrupt on exit" feature.  AMD
needs to do that, and must be careful to avoid the interrupt shadow.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Paolo Bonzini 9 years ago
parent
commit
f2485b3e0c
3 changed files with 9 additions and 12 deletions
  1. 6 0
      arch/x86/kvm/svm.c
  2. 1 3
      arch/x86/kvm/vmx.c
  3. 2 9
      arch/x86/kvm/x86.c

+ 6 - 0
arch/x86/kvm/svm.c

@@ -4935,6 +4935,12 @@ out:
 static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
 static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
 {
 {
 	local_irq_enable();
 	local_irq_enable();
+	/*
+	 * We must have an instruction with interrupts enabled, so
+	 * the timer interrupt isn't delayed by the interrupt shadow.
+	 */
+	asm("nop");
+	local_irq_disable();
 }
 }
 
 
 static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
 static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)

+ 1 - 3
arch/x86/kvm/vmx.c

@@ -8574,7 +8574,6 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
 			"push %[sp]\n\t"
 			"push %[sp]\n\t"
 #endif
 #endif
 			"pushf\n\t"
 			"pushf\n\t"
-			"orl $0x200, (%%" _ASM_SP ")\n\t"
 			__ASM_SIZE(push) " $%c[cs]\n\t"
 			__ASM_SIZE(push) " $%c[cs]\n\t"
 			"call *%[entry]\n\t"
 			"call *%[entry]\n\t"
 			:
 			:
@@ -8587,8 +8586,7 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
 			[ss]"i"(__KERNEL_DS),
 			[ss]"i"(__KERNEL_DS),
 			[cs]"i"(__KERNEL_CS)
 			[cs]"i"(__KERNEL_CS)
 			);
 			);
-	} else
-		local_irq_enable();
+	}
 }
 }
 
 
 static bool vmx_has_high_real_mode_segbase(void)
 static bool vmx_has_high_real_mode_segbase(void)

+ 2 - 9
arch/x86/kvm/x86.c

@@ -6709,16 +6709,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
 
 	++vcpu->stat.exits;
 	++vcpu->stat.exits;
 
 
-	/*
-	 * We must have an instruction between local_irq_enable() and
-	 * kvm_guest_exit(), so the timer interrupt isn't delayed by
-	 * the interrupt shadow.  The stat.exits increment will do nicely.
-	 * But we need to prevent reordering, hence this barrier():
-	 */
-	barrier();
-
-	guest_exit();
+	guest_exit_irqoff();
 
 
+	local_irq_enable();
 	preempt_enable();
 	preempt_enable();
 
 
 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);