浏览代码

Revert "KVM: Simplify kvm->tlbs_dirty handling"

This reverts commit 5befdc385ddb2d5ae8995ad89004529a3acf58fc.

Since we will allow flush tlb out of mmu-lock in the later
patch

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Xiao Guangrong 11 年之前
父节点
当前提交
a086f6a1eb
共有 3 个文件被更改,包括 8 次插入8 次删除
  1. 3 4
      arch/x86/kvm/paging_tmpl.h
  2. 1 3
      include/linux/kvm_host.h
  3. 4 1
      virt/kvm/kvm_main.c

+ 3 - 4
arch/x86/kvm/paging_tmpl.h

@@ -913,8 +913,7 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
  *   and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
  *   and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
  *   used by guest then tlbs are not flushed, so guest is allowed to access the
  *   used by guest then tlbs are not flushed, so guest is allowed to access the
  *   freed pages.
  *   freed pages.
- *   We set tlbs_dirty to let the notifier know this change and delay the flush
- *   until such a case actually happens.
+ *   And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
  */
  */
 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 {
 {
@@ -943,7 +942,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 			return -EINVAL;
 			return -EINVAL;
 
 
 		if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
 		if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
-			vcpu->kvm->tlbs_dirty = true;
+			vcpu->kvm->tlbs_dirty++;
 			continue;
 			continue;
 		}
 		}
 
 
@@ -958,7 +957,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 
 
 		if (gfn != sp->gfns[i]) {
 		if (gfn != sp->gfns[i]) {
 			drop_spte(vcpu->kvm, &sp->spt[i]);
 			drop_spte(vcpu->kvm, &sp->spt[i]);
-			vcpu->kvm->tlbs_dirty = true;
+			vcpu->kvm->tlbs_dirty++;
 			continue;
 			continue;
 		}
 		}
 
 

+ 1 - 3
include/linux/kvm_host.h

@@ -411,9 +411,7 @@ struct kvm {
 	unsigned long mmu_notifier_seq;
 	unsigned long mmu_notifier_seq;
 	long mmu_notifier_count;
 	long mmu_notifier_count;
 #endif
 #endif
-	/* Protected by mmu_lock */
-	bool tlbs_dirty;
-
+	long tlbs_dirty;
 	struct list_head devices;
 	struct list_head devices;
 };
 };
 
 

+ 4 - 1
virt/kvm/kvm_main.c

@@ -186,9 +186,12 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
 
 
 void kvm_flush_remote_tlbs(struct kvm *kvm)
 void kvm_flush_remote_tlbs(struct kvm *kvm)
 {
 {
+	long dirty_count = kvm->tlbs_dirty;
+
+	smp_mb();
 	if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
 	if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
 		++kvm->stat.remote_tlb_flush;
 		++kvm->stat.remote_tlb_flush;
-	kvm->tlbs_dirty = false;
+	cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
 }
 }
 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);