|
@@ -4405,7 +4405,7 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
|
|
{
|
|
{
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
struct amd_iommu *iommu;
|
|
struct amd_iommu *iommu;
|
|
- struct irq_remap_table *irt;
|
|
|
|
|
|
+ struct irq_remap_table *table;
|
|
struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
|
|
struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
|
|
int devid = ir_data->irq_2_irte.devid;
|
|
int devid = ir_data->irq_2_irte.devid;
|
|
struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
|
|
struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
|
|
@@ -4419,11 +4419,11 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
|
|
if (!iommu)
|
|
if (!iommu)
|
|
return -ENODEV;
|
|
return -ENODEV;
|
|
|
|
|
|
- irt = get_irq_table(devid);
|
|
|
|
- if (!irt)
|
|
|
|
|
|
+ table = get_irq_table(devid);
|
|
|
|
+ if (!table)
|
|
return -ENODEV;
|
|
return -ENODEV;
|
|
|
|
|
|
- raw_spin_lock_irqsave(&irt->lock, flags);
|
|
|
|
|
|
+ raw_spin_lock_irqsave(&table->lock, flags);
|
|
|
|
|
|
if (ref->lo.fields_vapic.guest_mode) {
|
|
if (ref->lo.fields_vapic.guest_mode) {
|
|
if (cpu >= 0)
|
|
if (cpu >= 0)
|
|
@@ -4432,7 +4432,7 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
|
|
barrier();
|
|
barrier();
|
|
}
|
|
}
|
|
|
|
|
|
- raw_spin_unlock_irqrestore(&irt->lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&table->lock, flags);
|
|
|
|
|
|
iommu_flush_irt(iommu, devid);
|
|
iommu_flush_irt(iommu, devid);
|
|
iommu_completion_wait(iommu);
|
|
iommu_completion_wait(iommu);
|