|
@@ -153,7 +153,7 @@ static void ack_flush(void *_completed)
|
|
|
{
|
|
|
}
|
|
|
|
|
|
-static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
|
|
|
+bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
|
|
|
{
|
|
|
int i, cpu, me;
|
|
|
cpumask_var_t cpus;
|
|
@@ -190,7 +190,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
|
|
|
long dirty_count = kvm->tlbs_dirty;
|
|
|
|
|
|
smp_mb();
|
|
|
- if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
|
|
|
+ if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
|
|
|
++kvm->stat.remote_tlb_flush;
|
|
|
cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
|
|
|
}
|
|
@@ -198,17 +198,17 @@ EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
|
|
|
|
|
|
void kvm_reload_remote_mmus(struct kvm *kvm)
|
|
|
{
|
|
|
- make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
|
|
|
+ kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
|
|
|
}
|
|
|
|
|
|
void kvm_make_mclock_inprogress_request(struct kvm *kvm)
|
|
|
{
|
|
|
- make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
|
|
|
+ kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
|
|
|
}
|
|
|
|
|
|
void kvm_make_scan_ioapic_request(struct kvm *kvm)
|
|
|
{
|
|
|
- make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
|
|
|
+ kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
|
|
|
}
|
|
|
|
|
|
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
|