|
@@ -30,6 +30,7 @@ struct apic_chip_data {
|
|
|
|
|
|
struct irq_domain *x86_vector_domain;
|
|
struct irq_domain *x86_vector_domain;
|
|
static DEFINE_RAW_SPINLOCK(vector_lock);
|
|
static DEFINE_RAW_SPINLOCK(vector_lock);
|
|
|
|
+static cpumask_var_t vector_cpumask;
|
|
static struct irq_chip lapic_controller;
|
|
static struct irq_chip lapic_controller;
|
|
#ifdef CONFIG_X86_IO_APIC
|
|
#ifdef CONFIG_X86_IO_APIC
|
|
static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
|
|
static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
|
|
@@ -116,14 +117,10 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
|
|
static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
|
|
static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
|
|
static int current_offset = VECTOR_OFFSET_START % 16;
|
|
static int current_offset = VECTOR_OFFSET_START % 16;
|
|
int cpu, err;
|
|
int cpu, err;
|
|
- cpumask_var_t tmp_mask;
|
|
|
|
|
|
|
|
if (d->move_in_progress)
|
|
if (d->move_in_progress)
|
|
return -EBUSY;
|
|
return -EBUSY;
|
|
|
|
|
|
- if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
|
|
|
|
- return -ENOMEM;
|
|
|
|
-
|
|
|
|
/* Only try and allocate irqs on cpus that are present */
|
|
/* Only try and allocate irqs on cpus that are present */
|
|
err = -ENOSPC;
|
|
err = -ENOSPC;
|
|
cpumask_clear(d->old_domain);
|
|
cpumask_clear(d->old_domain);
|
|
@@ -131,21 +128,22 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
|
|
while (cpu < nr_cpu_ids) {
|
|
while (cpu < nr_cpu_ids) {
|
|
int new_cpu, vector, offset;
|
|
int new_cpu, vector, offset;
|
|
|
|
|
|
- apic->vector_allocation_domain(cpu, tmp_mask, mask);
|
|
|
|
|
|
+ apic->vector_allocation_domain(cpu, vector_cpumask, mask);
|
|
|
|
|
|
- if (cpumask_subset(tmp_mask, d->domain)) {
|
|
|
|
|
|
+ if (cpumask_subset(vector_cpumask, d->domain)) {
|
|
err = 0;
|
|
err = 0;
|
|
- if (cpumask_equal(tmp_mask, d->domain))
|
|
|
|
|
|
+ if (cpumask_equal(vector_cpumask, d->domain))
|
|
break;
|
|
break;
|
|
/*
|
|
/*
|
|
* New cpumask using the vector is a proper subset of
|
|
* New cpumask using the vector is a proper subset of
|
|
* the current in use mask. So cleanup the vector
|
|
* the current in use mask. So cleanup the vector
|
|
* allocation for the members that are not used anymore.
|
|
* allocation for the members that are not used anymore.
|
|
*/
|
|
*/
|
|
- cpumask_andnot(d->old_domain, d->domain, tmp_mask);
|
|
|
|
|
|
+ cpumask_andnot(d->old_domain, d->domain,
|
|
|
|
+ vector_cpumask);
|
|
d->move_in_progress =
|
|
d->move_in_progress =
|
|
cpumask_intersects(d->old_domain, cpu_online_mask);
|
|
cpumask_intersects(d->old_domain, cpu_online_mask);
|
|
- cpumask_and(d->domain, d->domain, tmp_mask);
|
|
|
|
|
|
+ cpumask_and(d->domain, d->domain, vector_cpumask);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -159,16 +157,18 @@ next:
|
|
}
|
|
}
|
|
|
|
|
|
if (unlikely(current_vector == vector)) {
|
|
if (unlikely(current_vector == vector)) {
|
|
- cpumask_or(d->old_domain, d->old_domain, tmp_mask);
|
|
|
|
- cpumask_andnot(tmp_mask, mask, d->old_domain);
|
|
|
|
- cpu = cpumask_first_and(tmp_mask, cpu_online_mask);
|
|
|
|
|
|
+ cpumask_or(d->old_domain, d->old_domain,
|
|
|
|
+ vector_cpumask);
|
|
|
|
+ cpumask_andnot(vector_cpumask, mask, d->old_domain);
|
|
|
|
+ cpu = cpumask_first_and(vector_cpumask,
|
|
|
|
+ cpu_online_mask);
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
|
|
if (test_bit(vector, used_vectors))
|
|
if (test_bit(vector, used_vectors))
|
|
goto next;
|
|
goto next;
|
|
|
|
|
|
- for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) {
|
|
|
|
|
|
+ for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) {
|
|
if (per_cpu(vector_irq, new_cpu)[vector] >
|
|
if (per_cpu(vector_irq, new_cpu)[vector] >
|
|
VECTOR_UNDEFINED)
|
|
VECTOR_UNDEFINED)
|
|
goto next;
|
|
goto next;
|
|
@@ -181,14 +181,13 @@ next:
|
|
d->move_in_progress =
|
|
d->move_in_progress =
|
|
cpumask_intersects(d->old_domain, cpu_online_mask);
|
|
cpumask_intersects(d->old_domain, cpu_online_mask);
|
|
}
|
|
}
|
|
- for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
|
|
|
|
|
|
+ for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
|
|
per_cpu(vector_irq, new_cpu)[vector] = irq;
|
|
per_cpu(vector_irq, new_cpu)[vector] = irq;
|
|
d->cfg.vector = vector;
|
|
d->cfg.vector = vector;
|
|
- cpumask_copy(d->domain, tmp_mask);
|
|
|
|
|
|
+ cpumask_copy(d->domain, vector_cpumask);
|
|
err = 0;
|
|
err = 0;
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
- free_cpumask_var(tmp_mask);
|
|
|
|
|
|
|
|
if (!err) {
|
|
if (!err) {
|
|
/* cache destination APIC IDs into cfg->dest_apicid */
|
|
/* cache destination APIC IDs into cfg->dest_apicid */
|
|
@@ -397,6 +396,8 @@ int __init arch_early_irq_init(void)
|
|
arch_init_msi_domain(x86_vector_domain);
|
|
arch_init_msi_domain(x86_vector_domain);
|
|
arch_init_htirq_domain(x86_vector_domain);
|
|
arch_init_htirq_domain(x86_vector_domain);
|
|
|
|
|
|
|
|
+ BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
|
|
|
|
+
|
|
return arch_early_ioapic_init();
|
|
return arch_early_ioapic_init();
|
|
}
|
|
}
|
|
|
|
|