소스 검색

KVM: x86: optimize steal time calculation

Since accumulate_steal_time is now only called in record_steal_time, it
doesn't quite make sense to put the delta calculation in a separate
function. The function could be called thousands of times before guest
enables the steal time MSR (though the compiler may optimize out this
function call). And after it's enabled, the MSR enable bit is tested twice
every time. Removing the accumulate_steal_time function also avoids the
necessity of having the accum_steal field.

Signed-off-by: Liang Chen <liangchen.linux@gmail.com>
Signed-off-by: Gavin Guo <gavin.guo@canonical.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Liang Chen 9 년 전
부모
커밋
c54cdf141c
2개의 변경된 파일3개의 추가작업 그리고 17개의 파일을 삭제
  1. 0 1
      arch/x86/include/asm/kvm_host.h
  2. 3 16
      arch/x86/kvm/x86.c

+ 0 - 1
arch/x86/include/asm/kvm_host.h

@@ -562,7 +562,6 @@ struct kvm_vcpu_arch {
 	struct {
 	struct {
 		u64 msr_val;
 		u64 msr_val;
 		u64 last_steal;
 		u64 last_steal;
-		u64 accum_steal;
 		struct gfn_to_hva_cache stime;
 		struct gfn_to_hva_cache stime;
 		struct kvm_steal_time steal;
 		struct kvm_steal_time steal;
 	} st;
 	} st;

+ 3 - 16
arch/x86/kvm/x86.c

@@ -2002,22 +2002,8 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu)
 	vcpu->arch.pv_time_enabled = false;
 	vcpu->arch.pv_time_enabled = false;
 }
 }
 
 
-static void accumulate_steal_time(struct kvm_vcpu *vcpu)
-{
-	u64 delta;
-
-	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
-		return;
-
-	delta = current->sched_info.run_delay - vcpu->arch.st.last_steal;
-	vcpu->arch.st.last_steal = current->sched_info.run_delay;
-	vcpu->arch.st.accum_steal = delta;
-}
-
 static void record_steal_time(struct kvm_vcpu *vcpu)
 static void record_steal_time(struct kvm_vcpu *vcpu)
 {
 {
-	accumulate_steal_time(vcpu);
-
 	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
 	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
 		return;
 		return;
 
 
@@ -2025,9 +2011,10 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
 		return;
 		return;
 
 
-	vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal;
+	vcpu->arch.st.steal.steal += current->sched_info.run_delay -
+		vcpu->arch.st.last_steal;
+	vcpu->arch.st.last_steal = current->sched_info.run_delay;
 	vcpu->arch.st.steal.version += 2;
 	vcpu->arch.st.steal.version += 2;
-	vcpu->arch.st.accum_steal = 0;
 
 
 	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
 	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));