|
@@ -34,6 +34,8 @@
|
|
|
#include <linux/sched.h>
|
|
|
#include <linux/trace_events.h>
|
|
|
#include <linux/slab.h>
|
|
|
+#include <linux/amd-iommu.h>
|
|
|
+#include <linux/hashtable.h>
|
|
|
|
|
|
#include <asm/apic.h>
|
|
|
#include <asm/perf_event.h>
|
|
@@ -945,6 +947,55 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
|
|
|
set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
|
|
|
}
|
|
|
|
|
|
+/* Note:
|
|
|
+ * This hash table is used to map VM_ID to a struct kvm_arch,
|
|
|
+ * when handling AMD IOMMU GALOG notification to schedule in
|
|
|
+ * a particular vCPU.
|
|
|
+ */
|
|
|
+#define SVM_VM_DATA_HASH_BITS 8
|
|
|
+DECLARE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS);
|
|
|
+static spinlock_t svm_vm_data_hash_lock;
|
|
|
+
|
|
|
+/* Note:
|
|
|
+ * This function is called from IOMMU driver to notify
|
|
|
+ * SVM to schedule in a particular vCPU of a particular VM.
|
|
|
+ */
|
|
|
+static int avic_ga_log_notifier(u32 ga_tag)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ struct kvm_arch *ka = NULL;
|
|
|
+ struct kvm_vcpu *vcpu = NULL;
|
|
|
+ u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
|
|
|
+ u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
|
|
|
+
|
|
|
+ pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
|
|
|
+
|
|
|
+ spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
|
|
|
+ hash_for_each_possible(svm_vm_data_hash, ka, hnode, vm_id) {
|
|
|
+ struct kvm *kvm = container_of(ka, struct kvm, arch);
|
|
|
+ struct kvm_arch *vm_data = &kvm->arch;
|
|
|
+
|
|
|
+ if (vm_data->avic_vm_id != vm_id)
|
|
|
+ continue;
|
|
|
+ vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
|
|
|
+
|
|
|
+ if (!vcpu)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ /* Note:
|
|
|
+ * At this point, the IOMMU should have already set the pending
|
|
|
+ * bit in the vAPIC backing page. So, we just need to schedule
|
|
|
+ * in the vcpu.
|
|
|
+ */
|
|
|
+ if (vcpu->mode == OUTSIDE_GUEST_MODE)
|
|
|
+ kvm_vcpu_wake_up(vcpu);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static __init int svm_hardware_setup(void)
|
|
|
{
|
|
|
int cpu;
|
|
@@ -1003,10 +1054,15 @@ static __init int svm_hardware_setup(void)
|
|
|
if (avic) {
|
|
|
if (!npt_enabled ||
|
|
|
!boot_cpu_has(X86_FEATURE_AVIC) ||
|
|
|
- !IS_ENABLED(CONFIG_X86_LOCAL_APIC))
|
|
|
+ !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
|
|
|
avic = false;
|
|
|
- else
|
|
|
+ } else {
|
|
|
pr_info("AVIC enabled\n");
|
|
|
+
|
|
|
+ hash_init(svm_vm_data_hash);
|
|
|
+ spin_lock_init(&svm_vm_data_hash_lock);
|
|
|
+ amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
@@ -1327,6 +1383,7 @@ static inline int avic_free_vm_id(int id)
|
|
|
|
|
|
static void avic_vm_destroy(struct kvm *kvm)
|
|
|
{
|
|
|
+ unsigned long flags;
|
|
|
struct kvm_arch *vm_data = &kvm->arch;
|
|
|
|
|
|
avic_free_vm_id(vm_data->avic_vm_id);
|
|
@@ -1335,10 +1392,15 @@ static void avic_vm_destroy(struct kvm *kvm)
|
|
|
__free_page(vm_data->avic_logical_id_table_page);
|
|
|
if (vm_data->avic_physical_id_table_page)
|
|
|
__free_page(vm_data->avic_physical_id_table_page);
|
|
|
+
|
|
|
+ spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
|
|
|
+ hash_del(&vm_data->hnode);
|
|
|
+ spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
|
|
|
}
|
|
|
|
|
|
static int avic_vm_init(struct kvm *kvm)
|
|
|
{
|
|
|
+ unsigned long flags;
|
|
|
int err = -ENOMEM;
|
|
|
struct kvm_arch *vm_data = &kvm->arch;
|
|
|
struct page *p_page;
|
|
@@ -1367,6 +1429,10 @@ static int avic_vm_init(struct kvm *kvm)
|
|
|
vm_data->avic_logical_id_table_page = l_page;
|
|
|
clear_page(page_address(l_page));
|
|
|
|
|
|
+ spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
|
|
|
+ hash_add(svm_vm_data_hash, &vm_data->hnode, vm_data->avic_vm_id);
|
|
|
+ spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
|
|
|
+
|
|
|
return 0;
|
|
|
|
|
|
free_avic:
|