|
@@ -4269,4 +4269,43 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
+
|
|
|
+int amd_iommu_update_ga(int cpu, bool is_run, void *data)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ struct amd_iommu *iommu;
|
|
|
+ struct irq_remap_table *irt;
|
|
|
+ struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
|
|
|
+ int devid = ir_data->irq_2_irte.devid;
|
|
|
+ struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
|
|
|
+ struct irte_ga *ref = (struct irte_ga *) ir_data->ref;
|
|
|
+
|
|
|
+ if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
|
|
|
+ !ref || !entry || !entry->lo.fields_vapic.guest_mode)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ iommu = amd_iommu_rlookup_table[devid];
|
|
|
+ if (!iommu)
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
+ irt = get_irq_table(devid, false);
|
|
|
+ if (!irt)
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&irt->lock, flags);
|
|
|
+
|
|
|
+ if (ref->lo.fields_vapic.guest_mode) {
|
|
|
+ if (cpu >= 0)
|
|
|
+ ref->lo.fields_vapic.destination = cpu;
|
|
|
+ ref->lo.fields_vapic.is_run = is_run;
|
|
|
+ barrier();
|
|
|
+ }
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&irt->lock, flags);
|
|
|
+
|
|
|
+ iommu_flush_irt(iommu, devid);
|
|
|
+ iommu_completion_wait(iommu);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(amd_iommu_update_ga);
|
|
|
#endif
|