|
@@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
|
|
|
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
|
|
|
static u32 kvm_next_vmid;
|
|
|
static unsigned int kvm_vmid_bits __read_mostly;
|
|
|
-static DEFINE_SPINLOCK(kvm_vmid_lock);
|
|
|
+static DEFINE_RWLOCK(kvm_vmid_lock);
|
|
|
|
|
|
static bool vgic_present;
|
|
|
|
|
@@ -473,11 +473,16 @@ static void update_vttbr(struct kvm *kvm)
|
|
|
{
|
|
|
phys_addr_t pgd_phys;
|
|
|
u64 vmid;
|
|
|
+ bool new_gen;
|
|
|
|
|
|
- if (!need_new_vmid_gen(kvm))
|
|
|
+ read_lock(&kvm_vmid_lock);
|
|
|
+ new_gen = need_new_vmid_gen(kvm);
|
|
|
+ read_unlock(&kvm_vmid_lock);
|
|
|
+
|
|
|
+ if (!new_gen)
|
|
|
return;
|
|
|
|
|
|
- spin_lock(&kvm_vmid_lock);
|
|
|
+ write_lock(&kvm_vmid_lock);
|
|
|
|
|
|
/*
|
|
|
* We need to re-check the vmid_gen here to ensure that if another vcpu
|
|
@@ -485,7 +490,7 @@ static void update_vttbr(struct kvm *kvm)
|
|
|
* use the same vmid.
|
|
|
*/
|
|
|
if (!need_new_vmid_gen(kvm)) {
|
|
|
- spin_unlock(&kvm_vmid_lock);
|
|
|
+ write_unlock(&kvm_vmid_lock);
|
|
|
return;
|
|
|
}
|
|
|
|
|
@@ -519,7 +524,7 @@ static void update_vttbr(struct kvm *kvm)
|
|
|
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
|
|
|
kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
|
|
|
|
|
|
- spin_unlock(&kvm_vmid_lock);
|
|
|
+ write_unlock(&kvm_vmid_lock);
|
|
|
}
|
|
|
|
|
|
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
|