|
@@ -114,11 +114,18 @@ void kvm_arch_check_processor_compat(void *rtn)
|
|
|
*/
|
|
|
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|
|
{
|
|
|
- int ret = 0;
|
|
|
+ int ret, cpu;
|
|
|
|
|
|
if (type)
|
|
|
return -EINVAL;
|
|
|
|
|
|
+ kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
|
|
|
+ if (!kvm->arch.last_vcpu_ran)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ for_each_possible_cpu(cpu)
|
|
|
+ *per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
|
|
|
+
|
|
|
ret = kvm_alloc_stage2_pgd(kvm);
|
|
|
if (ret)
|
|
|
goto out_fail_alloc;
|
|
@@ -141,6 +148,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|
|
out_free_stage2_pgd:
|
|
|
kvm_free_stage2_pgd(kvm);
|
|
|
out_fail_alloc:
|
|
|
+ free_percpu(kvm->arch.last_vcpu_ran);
|
|
|
+ kvm->arch.last_vcpu_ran = NULL;
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -168,6 +177,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
|
+ free_percpu(kvm->arch.last_vcpu_ran);
|
|
|
+ kvm->arch.last_vcpu_ran = NULL;
|
|
|
+
|
|
|
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
|
|
|
if (kvm->vcpus[i]) {
|
|
|
kvm_arch_vcpu_free(kvm->vcpus[i]);
|
|
@@ -312,6 +324,19 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|
|
{
|
|
|
+ int *last_ran;
|
|
|
+
|
|
|
+ last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We might get preempted before the vCPU actually runs, but
|
|
|
+ * over-invalidation doesn't affect correctness.
|
|
|
+ */
|
|
|
+ if (*last_ran != vcpu->vcpu_id) {
|
|
|
+ kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
|
|
|
+ *last_ran = vcpu->vcpu_id;
|
|
|
+ }
|
|
|
+
|
|
|
vcpu->cpu = cpu;
|
|
|
vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
|
|
|
|