|
@@ -137,6 +137,7 @@ struct iommu_dev_data {
|
|
|
bool pri_tlp; /* PASID TLB required for
|
|
|
PPR completions */
|
|
|
u32 errata; /* Bitmap for errata to apply */
|
|
|
+ bool use_vapic; /* Enable device to use vapic mode */
|
|
|
};
|
|
|
|
|
|
/*
|
|
@@ -707,14 +708,74 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_IRQ_REMAP
|
|
|
+static int (*iommu_ga_log_notifier)(u32);
|
|
|
+
|
|
|
+int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
|
|
|
+{
|
|
|
+ iommu_ga_log_notifier = notifier;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);
|
|
|
+
|
|
|
+static void iommu_poll_ga_log(struct amd_iommu *iommu)
|
|
|
+{
|
|
|
+ u32 head, tail, cnt = 0;
|
|
|
+
|
|
|
+ if (iommu->ga_log == NULL)
|
|
|
+ return;
|
|
|
+
|
|
|
+ head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
|
|
|
+ tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
|
|
|
+
|
|
|
+ while (head != tail) {
|
|
|
+ volatile u64 *raw;
|
|
|
+ u64 log_entry;
|
|
|
+
|
|
|
+ raw = (u64 *)(iommu->ga_log + head);
|
|
|
+ cnt++;
|
|
|
+
|
|
|
+ /* Avoid memcpy function-call overhead */
|
|
|
+ log_entry = *raw;
|
|
|
+
|
|
|
+ /* Update head pointer of hardware ring-buffer */
|
|
|
+ head = (head + GA_ENTRY_SIZE) % GA_LOG_SIZE;
|
|
|
+ writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
|
|
|
+
|
|
|
+ /* Handle GA entry */
|
|
|
+ switch (GA_REQ_TYPE(log_entry)) {
|
|
|
+ case GA_GUEST_NR:
|
|
|
+ if (!iommu_ga_log_notifier)
|
|
|
+ break;
|
|
|
+
|
|
|
+ pr_debug("AMD-Vi: %s: devid=%#x, ga_tag=%#x\n",
|
|
|
+ __func__, GA_DEVID(log_entry),
|
|
|
+ GA_TAG(log_entry));
|
|
|
+
|
|
|
+ if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0)
|
|
|
+ pr_err("AMD-Vi: GA log notifier failed.\n");
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+#endif /* CONFIG_IRQ_REMAP */
|
|
|
+
|
|
|
+#define AMD_IOMMU_INT_MASK \
|
|
|
+ (MMIO_STATUS_EVT_INT_MASK | \
|
|
|
+ MMIO_STATUS_PPR_INT_MASK | \
|
|
|
+ MMIO_STATUS_GALOG_INT_MASK)
|
|
|
+
|
|
|
irqreturn_t amd_iommu_int_thread(int irq, void *data)
|
|
|
{
|
|
|
struct amd_iommu *iommu = (struct amd_iommu *) data;
|
|
|
u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
|
|
|
|
|
|
- while (status & (MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK)) {
|
|
|
- /* Enable EVT and PPR interrupts again */
|
|
|
- writel((MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK),
|
|
|
+ while (status & AMD_IOMMU_INT_MASK) {
|
|
|
+ /* Enable EVT and PPR and GA interrupts again */
|
|
|
+ writel(AMD_IOMMU_INT_MASK,
|
|
|
iommu->mmio_base + MMIO_STATUS_OFFSET);
|
|
|
|
|
|
if (status & MMIO_STATUS_EVT_INT_MASK) {
|
|
@@ -727,6 +788,13 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
|
|
|
iommu_poll_ppr_log(iommu);
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_IRQ_REMAP
|
|
|
+ if (status & MMIO_STATUS_GALOG_INT_MASK) {
|
|
|
+ pr_devel("AMD-Vi: Processing IOMMU GA Log\n");
|
|
|
+ iommu_poll_ga_log(iommu);
|
|
|
+ }
|
|
|
+#endif
|
|
|
+
|
|
|
/*
|
|
|
* Hardware bug: ERBT1312
|
|
|
* When re-enabling interrupt (by writing 1
|
|
@@ -2948,6 +3016,12 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
|
|
|
if (!iommu)
|
|
|
return;
|
|
|
|
|
|
+#ifdef CONFIG_IRQ_REMAP
|
|
|
+ if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
|
|
|
+ (dom->type == IOMMU_DOMAIN_UNMANAGED))
|
|
|
+ dev_data->use_vapic = 0;
|
|
|
+#endif
|
|
|
+
|
|
|
iommu_completion_wait(iommu);
|
|
|
}
|
|
|
|
|
@@ -2973,6 +3047,15 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
|
|
|
|
|
|
ret = attach_device(dev, domain);
|
|
|
|
|
|
+#ifdef CONFIG_IRQ_REMAP
|
|
|
+ if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
|
|
|
+ if (dom->type == IOMMU_DOMAIN_UNMANAGED)
|
|
|
+ dev_data->use_vapic = 1;
|
|
|
+ else
|
|
|
+ dev_data->use_vapic = 0;
|
|
|
+ }
|
|
|
+#endif
|
|
|
+
|
|
|
iommu_completion_wait(iommu);
|
|
|
|
|
|
return ret;
|
|
@@ -3511,34 +3594,6 @@ EXPORT_SYMBOL(amd_iommu_device_info);
|
|
|
*
|
|
|
*****************************************************************************/
|
|
|
|
|
|
-union irte {
|
|
|
- u32 val;
|
|
|
- struct {
|
|
|
- u32 valid : 1,
|
|
|
- no_fault : 1,
|
|
|
- int_type : 3,
|
|
|
- rq_eoi : 1,
|
|
|
- dm : 1,
|
|
|
- rsvd_1 : 1,
|
|
|
- destination : 8,
|
|
|
- vector : 8,
|
|
|
- rsvd_2 : 8;
|
|
|
- } fields;
|
|
|
-};
|
|
|
-
|
|
|
-struct irq_2_irte {
|
|
|
- u16 devid; /* Device ID for IRTE table */
|
|
|
- u16 index; /* Index into IRTE table*/
|
|
|
-};
|
|
|
-
|
|
|
-struct amd_ir_data {
|
|
|
- struct irq_2_irte irq_2_irte;
|
|
|
- union irte irte_entry;
|
|
|
- union {
|
|
|
- struct msi_msg msi_entry;
|
|
|
- };
|
|
|
-};
|
|
|
-
|
|
|
static struct irq_chip amd_ir_chip;
|
|
|
|
|
|
#define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6)
|
|
@@ -3560,8 +3615,6 @@ static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
|
|
|
amd_iommu_dev_table[devid].data[2] = dte;
|
|
|
}
|
|
|
|
|
|
-#define IRTE_ALLOCATED (~1U)
|
|
|
-
|
|
|
static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
|
|
|
{
|
|
|
struct irq_remap_table *table = NULL;
|
|
@@ -3607,13 +3660,18 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- memset(table->table, 0, MAX_IRQS_PER_TABLE * sizeof(u32));
|
|
|
+ if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
|
|
|
+ memset(table->table, 0,
|
|
|
+ MAX_IRQS_PER_TABLE * sizeof(u32));
|
|
|
+ else
|
|
|
+ memset(table->table, 0,
|
|
|
+ (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
|
|
|
|
|
|
if (ioapic) {
|
|
|
int i;
|
|
|
|
|
|
for (i = 0; i < 32; ++i)
|
|
|
- table->table[i] = IRTE_ALLOCATED;
|
|
|
+ iommu->irte_ops->set_allocated(table, i);
|
|
|
}
|
|
|
|
|
|
irq_lookup_table[devid] = table;
|
|
@@ -3639,6 +3697,10 @@ static int alloc_irq_index(u16 devid, int count)
|
|
|
struct irq_remap_table *table;
|
|
|
unsigned long flags;
|
|
|
int index, c;
|
|
|
+ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
|
|
|
+
|
|
|
+ if (!iommu)
|
|
|
+ return -ENODEV;
|
|
|
|
|
|
table = get_irq_table(devid, false);
|
|
|
if (!table)
|
|
@@ -3650,14 +3712,14 @@ static int alloc_irq_index(u16 devid, int count)
|
|
|
for (c = 0, index = table->min_index;
|
|
|
index < MAX_IRQS_PER_TABLE;
|
|
|
++index) {
|
|
|
- if (table->table[index] == 0)
|
|
|
+ if (!iommu->irte_ops->is_allocated(table, index))
|
|
|
c += 1;
|
|
|
else
|
|
|
c = 0;
|
|
|
|
|
|
if (c == count) {
|
|
|
for (; c != 0; --c)
|
|
|
- table->table[index - c + 1] = IRTE_ALLOCATED;
|
|
|
+ iommu->irte_ops->set_allocated(table, index - c + 1);
|
|
|
|
|
|
index -= count - 1;
|
|
|
goto out;
|
|
@@ -3672,7 +3734,42 @@ out:
|
|
|
return index;
|
|
|
}
|
|
|
|
|
|
-static int modify_irte(u16 devid, int index, union irte irte)
|
|
|
+static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
|
|
|
+ struct amd_ir_data *data)
|
|
|
+{
|
|
|
+ struct irq_remap_table *table;
|
|
|
+ struct amd_iommu *iommu;
|
|
|
+ unsigned long flags;
|
|
|
+ struct irte_ga *entry;
|
|
|
+
|
|
|
+ iommu = amd_iommu_rlookup_table[devid];
|
|
|
+ if (iommu == NULL)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ table = get_irq_table(devid, false);
|
|
|
+ if (!table)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&table->lock, flags);
|
|
|
+
|
|
|
+ entry = (struct irte_ga *)table->table;
|
|
|
+ entry = &entry[index];
|
|
|
+ entry->lo.fields_remap.valid = 0;
|
|
|
+ entry->hi.val = irte->hi.val;
|
|
|
+ entry->lo.val = irte->lo.val;
|
|
|
+ entry->lo.fields_remap.valid = 1;
|
|
|
+ if (data)
|
|
|
+ data->ref = entry;
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&table->lock, flags);
|
|
|
+
|
|
|
+ iommu_flush_irt(iommu, devid);
|
|
|
+ iommu_completion_wait(iommu);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int modify_irte(u16 devid, int index, union irte *irte)
|
|
|
{
|
|
|
struct irq_remap_table *table;
|
|
|
struct amd_iommu *iommu;
|
|
@@ -3687,7 +3784,7 @@ static int modify_irte(u16 devid, int index, union irte irte)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
spin_lock_irqsave(&table->lock, flags);
|
|
|
- table->table[index] = irte.val;
|
|
|
+ table->table[index] = irte->val;
|
|
|
spin_unlock_irqrestore(&table->lock, flags);
|
|
|
|
|
|
iommu_flush_irt(iommu, devid);
|
|
@@ -3711,13 +3808,146 @@ static void free_irte(u16 devid, int index)
|
|
|
return;
|
|
|
|
|
|
spin_lock_irqsave(&table->lock, flags);
|
|
|
- table->table[index] = 0;
|
|
|
+ iommu->irte_ops->clear_allocated(table, index);
|
|
|
spin_unlock_irqrestore(&table->lock, flags);
|
|
|
|
|
|
iommu_flush_irt(iommu, devid);
|
|
|
iommu_completion_wait(iommu);
|
|
|
}
|
|
|
|
|
|
+static void irte_prepare(void *entry,
|
|
|
+ u32 delivery_mode, u32 dest_mode,
|
|
|
+ u8 vector, u32 dest_apicid, int devid)
|
|
|
+{
|
|
|
+ union irte *irte = (union irte *) entry;
|
|
|
+
|
|
|
+ irte->val = 0;
|
|
|
+ irte->fields.vector = vector;
|
|
|
+ irte->fields.int_type = delivery_mode;
|
|
|
+ irte->fields.destination = dest_apicid;
|
|
|
+ irte->fields.dm = dest_mode;
|
|
|
+ irte->fields.valid = 1;
|
|
|
+}
|
|
|
+
|
|
|
+static void irte_ga_prepare(void *entry,
|
|
|
+ u32 delivery_mode, u32 dest_mode,
|
|
|
+ u8 vector, u32 dest_apicid, int devid)
|
|
|
+{
|
|
|
+ struct irte_ga *irte = (struct irte_ga *) entry;
|
|
|
+ struct iommu_dev_data *dev_data = search_dev_data(devid);
|
|
|
+
|
|
|
+ irte->lo.val = 0;
|
|
|
+ irte->hi.val = 0;
|
|
|
+ irte->lo.fields_remap.guest_mode = dev_data ? dev_data->use_vapic : 0;
|
|
|
+ irte->lo.fields_remap.int_type = delivery_mode;
|
|
|
+ irte->lo.fields_remap.dm = dest_mode;
|
|
|
+ irte->hi.fields.vector = vector;
|
|
|
+ irte->lo.fields_remap.destination = dest_apicid;
|
|
|
+ irte->lo.fields_remap.valid = 1;
|
|
|
+}
|
|
|
+
|
|
|
+static void irte_activate(void *entry, u16 devid, u16 index)
|
|
|
+{
|
|
|
+ union irte *irte = (union irte *) entry;
|
|
|
+
|
|
|
+ irte->fields.valid = 1;
|
|
|
+ modify_irte(devid, index, irte);
|
|
|
+}
|
|
|
+
|
|
|
+static void irte_ga_activate(void *entry, u16 devid, u16 index)
|
|
|
+{
|
|
|
+ struct irte_ga *irte = (struct irte_ga *) entry;
|
|
|
+
|
|
|
+ irte->lo.fields_remap.valid = 1;
|
|
|
+ modify_irte_ga(devid, index, irte, NULL);
|
|
|
+}
|
|
|
+
|
|
|
+static void irte_deactivate(void *entry, u16 devid, u16 index)
|
|
|
+{
|
|
|
+ union irte *irte = (union irte *) entry;
|
|
|
+
|
|
|
+ irte->fields.valid = 0;
|
|
|
+ modify_irte(devid, index, irte);
|
|
|
+}
|
|
|
+
|
|
|
+static void irte_ga_deactivate(void *entry, u16 devid, u16 index)
|
|
|
+{
|
|
|
+ struct irte_ga *irte = (struct irte_ga *) entry;
|
|
|
+
|
|
|
+ irte->lo.fields_remap.valid = 0;
|
|
|
+ modify_irte_ga(devid, index, irte, NULL);
|
|
|
+}
|
|
|
+
|
|
|
+static void irte_set_affinity(void *entry, u16 devid, u16 index,
|
|
|
+ u8 vector, u32 dest_apicid)
|
|
|
+{
|
|
|
+ union irte *irte = (union irte *) entry;
|
|
|
+
|
|
|
+ irte->fields.vector = vector;
|
|
|
+ irte->fields.destination = dest_apicid;
|
|
|
+ modify_irte(devid, index, irte);
|
|
|
+}
|
|
|
+
|
|
|
+static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
|
|
|
+ u8 vector, u32 dest_apicid)
|
|
|
+{
|
|
|
+ struct irte_ga *irte = (struct irte_ga *) entry;
|
|
|
+ struct iommu_dev_data *dev_data = search_dev_data(devid);
|
|
|
+
|
|
|
+ if (!dev_data || !dev_data->use_vapic) {
|
|
|
+ irte->hi.fields.vector = vector;
|
|
|
+ irte->lo.fields_remap.destination = dest_apicid;
|
|
|
+ irte->lo.fields_remap.guest_mode = 0;
|
|
|
+ modify_irte_ga(devid, index, irte, NULL);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+#define IRTE_ALLOCATED (~1U)
|
|
|
+static void irte_set_allocated(struct irq_remap_table *table, int index)
|
|
|
+{
|
|
|
+ table->table[index] = IRTE_ALLOCATED;
|
|
|
+}
|
|
|
+
|
|
|
+static void irte_ga_set_allocated(struct irq_remap_table *table, int index)
|
|
|
+{
|
|
|
+ struct irte_ga *ptr = (struct irte_ga *)table->table;
|
|
|
+ struct irte_ga *irte = &ptr[index];
|
|
|
+
|
|
|
+ memset(&irte->lo.val, 0, sizeof(u64));
|
|
|
+ memset(&irte->hi.val, 0, sizeof(u64));
|
|
|
+ irte->hi.fields.vector = 0xff;
|
|
|
+}
|
|
|
+
|
|
|
+static bool irte_is_allocated(struct irq_remap_table *table, int index)
|
|
|
+{
|
|
|
+ union irte *ptr = (union irte *)table->table;
|
|
|
+ union irte *irte = &ptr[index];
|
|
|
+
|
|
|
+ return irte->val != 0;
|
|
|
+}
|
|
|
+
|
|
|
+static bool irte_ga_is_allocated(struct irq_remap_table *table, int index)
|
|
|
+{
|
|
|
+ struct irte_ga *ptr = (struct irte_ga *)table->table;
|
|
|
+ struct irte_ga *irte = &ptr[index];
|
|
|
+
|
|
|
+ return irte->hi.fields.vector != 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void irte_clear_allocated(struct irq_remap_table *table, int index)
|
|
|
+{
|
|
|
+ table->table[index] = 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void irte_ga_clear_allocated(struct irq_remap_table *table, int index)
|
|
|
+{
|
|
|
+ struct irte_ga *ptr = (struct irte_ga *)table->table;
|
|
|
+ struct irte_ga *irte = &ptr[index];
|
|
|
+
|
|
|
+ memset(&irte->lo.val, 0, sizeof(u64));
|
|
|
+ memset(&irte->hi.val, 0, sizeof(u64));
|
|
|
+}
|
|
|
+
|
|
|
static int get_devid(struct irq_alloc_info *info)
|
|
|
{
|
|
|
int devid = -1;
|
|
@@ -3802,19 +4032,17 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data,
|
|
|
{
|
|
|
struct irq_2_irte *irte_info = &data->irq_2_irte;
|
|
|
struct msi_msg *msg = &data->msi_entry;
|
|
|
- union irte *irte = &data->irte_entry;
|
|
|
struct IO_APIC_route_entry *entry;
|
|
|
+ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
|
|
|
+
|
|
|
+ if (!iommu)
|
|
|
+ return;
|
|
|
|
|
|
data->irq_2_irte.devid = devid;
|
|
|
data->irq_2_irte.index = index + sub_handle;
|
|
|
-
|
|
|
- /* Setup IRTE for IOMMU */
|
|
|
- irte->val = 0;
|
|
|
- irte->fields.vector = irq_cfg->vector;
|
|
|
- irte->fields.int_type = apic->irq_delivery_mode;
|
|
|
- irte->fields.destination = irq_cfg->dest_apicid;
|
|
|
- irte->fields.dm = apic->irq_dest_mode;
|
|
|
- irte->fields.valid = 1;
|
|
|
+ iommu->irte_ops->prepare(data->entry, apic->irq_delivery_mode,
|
|
|
+ apic->irq_dest_mode, irq_cfg->vector,
|
|
|
+ irq_cfg->dest_apicid, devid);
|
|
|
|
|
|
switch (info->type) {
|
|
|
case X86_IRQ_ALLOC_TYPE_IOAPIC:
|
|
@@ -3845,12 +4073,32 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+struct amd_irte_ops irte_32_ops = {
|
|
|
+ .prepare = irte_prepare,
|
|
|
+ .activate = irte_activate,
|
|
|
+ .deactivate = irte_deactivate,
|
|
|
+ .set_affinity = irte_set_affinity,
|
|
|
+ .set_allocated = irte_set_allocated,
|
|
|
+ .is_allocated = irte_is_allocated,
|
|
|
+ .clear_allocated = irte_clear_allocated,
|
|
|
+};
|
|
|
+
|
|
|
+struct amd_irte_ops irte_128_ops = {
|
|
|
+ .prepare = irte_ga_prepare,
|
|
|
+ .activate = irte_ga_activate,
|
|
|
+ .deactivate = irte_ga_deactivate,
|
|
|
+ .set_affinity = irte_ga_set_affinity,
|
|
|
+ .set_allocated = irte_ga_set_allocated,
|
|
|
+ .is_allocated = irte_ga_is_allocated,
|
|
|
+ .clear_allocated = irte_ga_clear_allocated,
|
|
|
+};
|
|
|
+
|
|
|
static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
|
|
|
unsigned int nr_irqs, void *arg)
|
|
|
{
|
|
|
struct irq_alloc_info *info = arg;
|
|
|
struct irq_data *irq_data;
|
|
|
- struct amd_ir_data *data;
|
|
|
+ struct amd_ir_data *data = NULL;
|
|
|
struct irq_cfg *cfg;
|
|
|
int i, ret, devid;
|
|
|
int index = -1;
|
|
@@ -3903,6 +4151,16 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
|
|
|
if (!data)
|
|
|
goto out_free_data;
|
|
|
|
|
|
+ if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
|
|
|
+ data->entry = kzalloc(sizeof(union irte), GFP_KERNEL);
|
|
|
+ else
|
|
|
+ data->entry = kzalloc(sizeof(struct irte_ga),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!data->entry) {
|
|
|
+ kfree(data);
|
|
|
+ goto out_free_data;
|
|
|
+ }
|
|
|
+
|
|
|
irq_data->hwirq = (devid << 16) + i;
|
|
|
irq_data->chip_data = data;
|
|
|
irq_data->chip = &amd_ir_chip;
|
|
@@ -3939,6 +4197,7 @@ static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
|
|
|
data = irq_data->chip_data;
|
|
|
irte_info = &data->irq_2_irte;
|
|
|
free_irte(irte_info->devid, irte_info->index);
|
|
|
+ kfree(data->entry);
|
|
|
kfree(data);
|
|
|
}
|
|
|
}
|
|
@@ -3950,8 +4209,11 @@ static void irq_remapping_activate(struct irq_domain *domain,
|
|
|
{
|
|
|
struct amd_ir_data *data = irq_data->chip_data;
|
|
|
struct irq_2_irte *irte_info = &data->irq_2_irte;
|
|
|
+ struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
|
|
|
|
|
|
- modify_irte(irte_info->devid, irte_info->index, data->irte_entry);
|
|
|
+ if (iommu)
|
|
|
+ iommu->irte_ops->activate(data->entry, irte_info->devid,
|
|
|
+ irte_info->index);
|
|
|
}
|
|
|
|
|
|
static void irq_remapping_deactivate(struct irq_domain *domain,
|
|
@@ -3959,10 +4221,11 @@ static void irq_remapping_deactivate(struct irq_domain *domain,
|
|
|
{
|
|
|
struct amd_ir_data *data = irq_data->chip_data;
|
|
|
struct irq_2_irte *irte_info = &data->irq_2_irte;
|
|
|
- union irte entry;
|
|
|
+ struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
|
|
|
|
|
|
- entry.val = 0;
|
|
|
- modify_irte(irte_info->devid, irte_info->index, data->irte_entry);
|
|
|
+ if (iommu)
|
|
|
+ iommu->irte_ops->deactivate(data->entry, irte_info->devid,
|
|
|
+ irte_info->index);
|
|
|
}
|
|
|
|
|
|
static struct irq_domain_ops amd_ir_domain_ops = {
|
|
@@ -3972,6 +4235,70 @@ static struct irq_domain_ops amd_ir_domain_ops = {
|
|
|
.deactivate = irq_remapping_deactivate,
|
|
|
};
|
|
|
|
|
|
+static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
|
|
|
+{
|
|
|
+ struct amd_iommu *iommu;
|
|
|
+ struct amd_iommu_pi_data *pi_data = vcpu_info;
|
|
|
+ struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
|
|
|
+ struct amd_ir_data *ir_data = data->chip_data;
|
|
|
+ struct irte_ga *irte = (struct irte_ga *) ir_data->entry;
|
|
|
+ struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
|
|
|
+ struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);
|
|
|
+
|
|
|
+ /* Note:
|
|
|
+ * This device has never been set up for guest mode.
|
|
|
+ * we should not modify the IRTE
|
|
|
+ */
|
|
|
+ if (!dev_data || !dev_data->use_vapic)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ pi_data->ir_data = ir_data;
|
|
|
+
|
|
|
+ /* Note:
|
|
|
+ * SVM tries to set up for VAPIC mode, but we are in
|
|
|
+ * legacy mode. So, we force legacy mode instead.
|
|
|
+ */
|
|
|
+ if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
|
|
|
+ pr_debug("AMD-Vi: %s: Fall back to using intr legacy remap\n",
|
|
|
+ __func__);
|
|
|
+ pi_data->is_guest_mode = false;
|
|
|
+ }
|
|
|
+
|
|
|
+ iommu = amd_iommu_rlookup_table[irte_info->devid];
|
|
|
+ if (iommu == NULL)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ pi_data->prev_ga_tag = ir_data->cached_ga_tag;
|
|
|
+ if (pi_data->is_guest_mode) {
|
|
|
+ /* Setting */
|
|
|
+ irte->hi.fields.ga_root_ptr = (pi_data->base >> 12);
|
|
|
+ irte->hi.fields.vector = vcpu_pi_info->vector;
|
|
|
+ irte->lo.fields_vapic.guest_mode = 1;
|
|
|
+ irte->lo.fields_vapic.ga_tag = pi_data->ga_tag;
|
|
|
+
|
|
|
+ ir_data->cached_ga_tag = pi_data->ga_tag;
|
|
|
+ } else {
|
|
|
+ /* Un-Setting */
|
|
|
+ struct irq_cfg *cfg = irqd_cfg(data);
|
|
|
+
|
|
|
+ irte->hi.val = 0;
|
|
|
+ irte->lo.val = 0;
|
|
|
+ irte->hi.fields.vector = cfg->vector;
|
|
|
+ irte->lo.fields_remap.guest_mode = 0;
|
|
|
+ irte->lo.fields_remap.destination = cfg->dest_apicid;
|
|
|
+ irte->lo.fields_remap.int_type = apic->irq_delivery_mode;
|
|
|
+ irte->lo.fields_remap.dm = apic->irq_dest_mode;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * This communicates the ga_tag back to the caller
|
|
|
+ * so that it can do all the necessary clean up.
|
|
|
+ */
|
|
|
+ ir_data->cached_ga_tag = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ return modify_irte_ga(irte_info->devid, irte_info->index, irte, ir_data);
|
|
|
+}
|
|
|
+
|
|
|
static int amd_ir_set_affinity(struct irq_data *data,
|
|
|
const struct cpumask *mask, bool force)
|
|
|
{
|
|
@@ -3979,8 +4306,12 @@ static int amd_ir_set_affinity(struct irq_data *data,
|
|
|
struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
|
|
|
struct irq_cfg *cfg = irqd_cfg(data);
|
|
|
struct irq_data *parent = data->parent_data;
|
|
|
+ struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
|
|
|
int ret;
|
|
|
|
|
|
+ if (!iommu)
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
ret = parent->chip->irq_set_affinity(parent, mask, force);
|
|
|
if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
|
|
|
return ret;
|
|
@@ -3989,9 +4320,8 @@ static int amd_ir_set_affinity(struct irq_data *data,
|
|
|
* Atomically updates the IRTE with the new destination, vector
|
|
|
* and flushes the interrupt entry cache.
|
|
|
*/
|
|
|
- ir_data->irte_entry.fields.vector = cfg->vector;
|
|
|
- ir_data->irte_entry.fields.destination = cfg->dest_apicid;
|
|
|
- modify_irte(irte_info->devid, irte_info->index, ir_data->irte_entry);
|
|
|
+ iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
|
|
|
+ irte_info->index, cfg->vector, cfg->dest_apicid);
|
|
|
|
|
|
/*
|
|
|
* After this point, all the interrupts will start arriving
|
|
@@ -4013,6 +4343,7 @@ static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
|
|
|
static struct irq_chip amd_ir_chip = {
|
|
|
.irq_ack = ir_ack_apic_edge,
|
|
|
.irq_set_affinity = amd_ir_set_affinity,
|
|
|
+ .irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity,
|
|
|
.irq_compose_msi_msg = ir_compose_msi_msg,
|
|
|
};
|
|
|
|
|
@@ -4027,4 +4358,43 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
+
|
|
|
+int amd_iommu_update_ga(int cpu, bool is_run, void *data)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ struct amd_iommu *iommu;
|
|
|
+ struct irq_remap_table *irt;
|
|
|
+ struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
|
|
|
+ int devid = ir_data->irq_2_irte.devid;
|
|
|
+ struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
|
|
|
+ struct irte_ga *ref = (struct irte_ga *) ir_data->ref;
|
|
|
+
|
|
|
+ if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
|
|
|
+ !ref || !entry || !entry->lo.fields_vapic.guest_mode)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ iommu = amd_iommu_rlookup_table[devid];
|
|
|
+ if (!iommu)
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
+ irt = get_irq_table(devid, false);
|
|
|
+ if (!irt)
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&irt->lock, flags);
|
|
|
+
|
|
|
+ if (ref->lo.fields_vapic.guest_mode) {
|
|
|
+ if (cpu >= 0)
|
|
|
+ ref->lo.fields_vapic.destination = cpu;
|
|
|
+ ref->lo.fields_vapic.is_run = is_run;
|
|
|
+ barrier();
|
|
|
+ }
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&irt->lock, flags);
|
|
|
+
|
|
|
+ iommu_flush_irt(iommu, devid);
|
|
|
+ iommu_completion_wait(iommu);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(amd_iommu_update_ga);
|
|
|
#endif
|