|
@@ -191,9 +191,23 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
|
|
|
#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
|
|
|
void kvm_flush_remote_tlbs(struct kvm *kvm)
|
|
|
{
|
|
|
- long dirty_count = kvm->tlbs_dirty;
|
|
|
+ /*
|
|
|
+ * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in
|
|
|
+ * kvm_make_all_cpus_request.
|
|
|
+ */
|
|
|
+ long dirty_count = smp_load_acquire(&kvm->tlbs_dirty);
|
|
|
|
|
|
- smp_mb();
|
|
|
+ /*
|
|
|
+ * We want to publish modifications to the page tables before reading
|
|
|
+ * mode. Pairs with a memory barrier in arch-specific code.
|
|
|
+ * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
|
|
|
+ * and smp_mb in walk_shadow_page_lockless_begin/end.
|
|
|
+ * - powerpc: smp_mb in kvmppc_prepare_to_enter.
|
|
|
+ *
|
|
|
+ * There is already an smp_mb__after_atomic() before
|
|
|
+ * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
|
|
|
+ * barrier here.
|
|
|
+ */
|
|
|
if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
|
|
|
++kvm->stat.remote_tlb_flush;
|
|
|
cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
|