|
|
@@ -66,7 +66,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
|
|
|
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
|
|
|
static u32 kvm_next_vmid;
|
|
|
static unsigned int kvm_vmid_bits __read_mostly;
|
|
|
-static DEFINE_RWLOCK(kvm_vmid_lock);
|
|
|
+static DEFINE_SPINLOCK(kvm_vmid_lock);
|
|
|
|
|
|
static bool vgic_present;
|
|
|
|
|
|
@@ -482,7 +482,9 @@ void force_vm_exit(const cpumask_t *mask)
|
|
|
*/
|
|
|
static bool need_new_vmid_gen(struct kvm *kvm)
|
|
|
{
|
|
|
- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
|
|
|
+ u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
|
|
|
+ smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
|
|
|
+ return unlikely(READ_ONCE(kvm->arch.vmid_gen) != current_vmid_gen);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -497,16 +499,11 @@ static void update_vttbr(struct kvm *kvm)
|
|
|
{
|
|
|
phys_addr_t pgd_phys;
|
|
|
u64 vmid;
|
|
|
- bool new_gen;
|
|
|
|
|
|
- read_lock(&kvm_vmid_lock);
|
|
|
- new_gen = need_new_vmid_gen(kvm);
|
|
|
- read_unlock(&kvm_vmid_lock);
|
|
|
-
|
|
|
- if (!new_gen)
|
|
|
+ if (!need_new_vmid_gen(kvm))
|
|
|
return;
|
|
|
|
|
|
- write_lock(&kvm_vmid_lock);
|
|
|
+ spin_lock(&kvm_vmid_lock);
|
|
|
|
|
|
/*
|
|
|
* We need to re-check the vmid_gen here to ensure that if another vcpu
|
|
|
@@ -514,7 +511,7 @@ static void update_vttbr(struct kvm *kvm)
|
|
|
* use the same vmid.
|
|
|
*/
|
|
|
if (!need_new_vmid_gen(kvm)) {
|
|
|
- write_unlock(&kvm_vmid_lock);
|
|
|
+ spin_unlock(&kvm_vmid_lock);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
@@ -537,7 +534,6 @@ static void update_vttbr(struct kvm *kvm)
|
|
|
kvm_call_hyp(__kvm_flush_vm_context);
|
|
|
}
|
|
|
|
|
|
- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
|
|
|
kvm->arch.vmid = kvm_next_vmid;
|
|
|
kvm_next_vmid++;
|
|
|
kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
|
|
|
@@ -548,7 +544,10 @@ static void update_vttbr(struct kvm *kvm)
|
|
|
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
|
|
|
kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
|
|
|
|
|
|
- write_unlock(&kvm_vmid_lock);
|
|
|
+ smp_wmb();
|
|
|
+ WRITE_ONCE(kvm->arch.vmid_gen, atomic64_read(&kvm_vmid_gen));
|
|
|
+
|
|
|
+ spin_unlock(&kvm_vmid_lock);
|
|
|
}
|
|
|
|
|
|
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
|