|
@@ -496,7 +496,7 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
|
|
|
|
|
|
-int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
|
|
|
+static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
|
|
|
void *data, int offset, int len, u32 access)
|
|
|
{
|
|
|
return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
|
|
@@ -647,7 +647,7 @@ static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
|
|
|
+static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
|
|
|
{
|
|
|
u64 xcr0 = xcr;
|
|
|
u64 old_xcr0 = vcpu->arch.xcr0;
|
|
@@ -1193,7 +1193,7 @@ static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
|
|
|
#endif
|
|
|
|
|
|
static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
|
|
|
-unsigned long max_tsc_khz;
|
|
|
+static unsigned long max_tsc_khz;
|
|
|
|
|
|
static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
|
|
|
{
|
|
@@ -1247,7 +1247,7 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
|
|
|
return tsc;
|
|
|
}
|
|
|
|
|
|
-void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
|
|
|
+static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
#ifdef CONFIG_X86_64
|
|
|
bool vcpus_matched;
|