Forráskód Böngészése

kvm: x86: Unpin and remove kvm_arch->apic_access_page

In order to make the APIC access page migratable, stop pinning it in
memory.

And because the APIC access page is not pinned in memory, we can
remove kvm_arch->apic_access_page.  When we need to write its
physical address into vmcs, we use gfn_to_page() to get its page
struct, which is needed to call page_to_phys(); the page is then
immediately unpinned.

Suggested-by: Gleb Natapov <gleb@kernel.org>
Signed-off-by: Tang Chen <tangchen@cn.fujitsu.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Tang Chen 11 éve
szülő
commit
c24ae0dcd3
3 módosított fájl, 24 hozzáadás és 9 törlés
  1. 1 1
      arch/x86/include/asm/kvm_host.h
  2. 7 2
      arch/x86/kvm/vmx.c
  3. 16 6
      arch/x86/kvm/x86.c

+ 1 - 1
arch/x86/include/asm/kvm_host.h

@@ -574,7 +574,7 @@ struct kvm_arch {
 	struct kvm_apic_map *apic_map;
 	struct kvm_apic_map *apic_map;
 
 
 	unsigned int tss_addr;
 	unsigned int tss_addr;
-	struct page *apic_access_page;
+	bool apic_access_page_done;
 
 
 	gpa_t wall_clock;
 	gpa_t wall_clock;
 
 

+ 7 - 2
arch/x86/kvm/vmx.c

@@ -4033,7 +4033,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
 	int r = 0;
 	int r = 0;
 
 
 	mutex_lock(&kvm->slots_lock);
 	mutex_lock(&kvm->slots_lock);
-	if (kvm->arch.apic_access_page)
+	if (kvm->arch.apic_access_page_done)
 		goto out;
 		goto out;
 	kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
 	kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
 	kvm_userspace_mem.flags = 0;
 	kvm_userspace_mem.flags = 0;
@@ -4049,7 +4049,12 @@ static int alloc_apic_access_page(struct kvm *kvm)
 		goto out;
 		goto out;
 	}
 	}
 
 
-	kvm->arch.apic_access_page = page;
+	/*
+	 * Do not pin the page in memory, so that memory hot-unplug
+	 * is able to migrate it.
+	 */
+	put_page(page);
+	kvm->arch.apic_access_page_done = true;
 out:
 out:
 	mutex_unlock(&kvm->slots_lock);
 	mutex_unlock(&kvm->slots_lock);
 	return r;
 	return r;

+ 16 - 6
arch/x86/kvm/x86.c

@@ -6028,19 +6028,31 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
 
 
 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
 {
 {
+	struct page *page = NULL;
+
 	if (!kvm_x86_ops->set_apic_access_page_addr)
 	if (!kvm_x86_ops->set_apic_access_page_addr)
 		return;
 		return;
 
 
-	vcpu->kvm->arch.apic_access_page = gfn_to_page(vcpu->kvm,
-			APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
-	kvm_x86_ops->set_apic_access_page_addr(vcpu,
-			page_to_phys(vcpu->kvm->arch.apic_access_page));
+	page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
+	kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));
+
+	/*
+	 * Do not pin apic access page in memory, the MMU notifier
+	 * will call us again if it is migrated or swapped out.
+	 */
+	put_page(page);
 }
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page);
 EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page);
 
 
 void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
 void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
 					   unsigned long address)
 					   unsigned long address)
 {
 {
+	/*
+	 * The physical address of apic access page is stored in the VMCS.
+	 * Update it when it becomes invalid.
+	 */
+	if (address == gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT))
+		kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
 }
 }
 
 
 /*
 /*
@@ -7297,8 +7309,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
 	kfree(kvm->arch.vpic);
 	kfree(kvm->arch.vpic);
 	kfree(kvm->arch.vioapic);
 	kfree(kvm->arch.vioapic);
 	kvm_free_vcpus(kvm);
 	kvm_free_vcpus(kvm);
-	if (kvm->arch.apic_access_page)
-		put_page(kvm->arch.apic_access_page);
 	kfree(rcu_dereference_check(kvm->arch.apic_map, 1));
 	kfree(rcu_dereference_check(kvm->arch.apic_map, 1));
 }
 }