|
@@ -235,6 +235,15 @@ static int allocate_vector(struct irq_data *irqd, const struct cpumask *dest)
|
|
if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
|
|
if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Careful here. @apicd might either have move_in_progress set or
|
|
|
|
+ * be enqueued for cleanup. Assigning a new vector would either
|
|
|
|
+ * leave a stale vector on some CPU around or in case of a pending
|
|
|
|
+ * cleanup corrupt the hlist.
|
|
|
|
+ */
|
|
|
|
+ if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist))
|
|
|
|
+ return -EBUSY;
|
|
|
|
+
|
|
vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
|
|
vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
|
|
if (vector > 0)
|
|
if (vector > 0)
|
|
apic_update_vector(irqd, vector, cpu);
|
|
apic_update_vector(irqd, vector, cpu);
|
|
@@ -579,8 +588,7 @@ error:
|
|
static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
|
|
static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
|
|
struct irq_data *irqd, int ind)
|
|
struct irq_data *irqd, int ind)
|
|
{
|
|
{
|
|
- unsigned int cpu, vector, prev_cpu, prev_vector;
|
|
|
|
- struct apic_chip_data *apicd;
|
|
|
|
|
|
+ struct apic_chip_data apicd;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
int irq;
|
|
int irq;
|
|
|
|
|
|
@@ -596,24 +604,26 @@ static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
- apicd = irqd->chip_data;
|
|
|
|
- if (!apicd) {
|
|
|
|
|
|
+ if (!irqd->chip_data) {
|
|
seq_printf(m, "%*sVector: Not assigned\n", ind, "");
|
|
seq_printf(m, "%*sVector: Not assigned\n", ind, "");
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
raw_spin_lock_irqsave(&vector_lock, flags);
|
|
raw_spin_lock_irqsave(&vector_lock, flags);
|
|
- cpu = apicd->cpu;
|
|
|
|
- vector = apicd->vector;
|
|
|
|
- prev_cpu = apicd->prev_cpu;
|
|
|
|
- prev_vector = apicd->prev_vector;
|
|
|
|
|
|
+ memcpy(&apicd, irqd->chip_data, sizeof(apicd));
|
|
raw_spin_unlock_irqrestore(&vector_lock, flags);
|
|
raw_spin_unlock_irqrestore(&vector_lock, flags);
|
|
- seq_printf(m, "%*sVector: %5u\n", ind, "", vector);
|
|
|
|
- seq_printf(m, "%*sTarget: %5u\n", ind, "", cpu);
|
|
|
|
- if (prev_vector) {
|
|
|
|
- seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", prev_vector);
|
|
|
|
- seq_printf(m, "%*sPrevious target: %5u\n", ind, "", prev_cpu);
|
|
|
|
|
|
+
|
|
|
|
+ seq_printf(m, "%*sVector: %5u\n", ind, "", apicd.vector);
|
|
|
|
+ seq_printf(m, "%*sTarget: %5u\n", ind, "", apicd.cpu);
|
|
|
|
+ if (apicd.prev_vector) {
|
|
|
|
+ seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", apicd.prev_vector);
|
|
|
|
+ seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu);
|
|
}
|
|
}
|
|
|
|
+ seq_printf(m, "%*smove_in_progress: %u\n", ind, "", apicd.move_in_progress ? 1 : 0);
|
|
|
|
+ seq_printf(m, "%*sis_managed: %u\n", ind, "", apicd.is_managed ? 1 : 0);
|
|
|
|
+ seq_printf(m, "%*scan_reserve: %u\n", ind, "", apicd.can_reserve ? 1 : 0);
|
|
|
|
+ seq_printf(m, "%*shas_reserved: %u\n", ind, "", apicd.has_reserved ? 1 : 0);
|
|
|
|
+ seq_printf(m, "%*scleanup_pending: %u\n", ind, "", !hlist_unhashed(&apicd.clist));
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
@@ -800,13 +810,18 @@ static int apic_retrigger_irq(struct irq_data *irqd)
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
|
|
-void apic_ack_edge(struct irq_data *irqd)
|
|
|
|
|
|
+void apic_ack_irq(struct irq_data *irqd)
|
|
{
|
|
{
|
|
- irq_complete_move(irqd_cfg(irqd));
|
|
|
|
irq_move_irq(irqd);
|
|
irq_move_irq(irqd);
|
|
ack_APIC_irq();
|
|
ack_APIC_irq();
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+void apic_ack_edge(struct irq_data *irqd)
|
|
|
|
+{
|
|
|
|
+ irq_complete_move(irqd_cfg(irqd));
|
|
|
|
+ apic_ack_irq(irqd);
|
|
|
|
+}
|
|
|
|
+
|
|
static struct irq_chip lapic_controller = {
|
|
static struct irq_chip lapic_controller = {
|
|
.name = "APIC",
|
|
.name = "APIC",
|
|
.irq_ack = apic_ack_edge,
|
|
.irq_ack = apic_ack_edge,
|