|
@@ -25,8 +25,10 @@
|
|
|
#include <linux/irq.h>
|
|
|
#include <linux/kernel.h>
|
|
|
#include <linux/export.h>
|
|
|
+#include <linux/of.h>
|
|
|
#include <linux/perf_event.h>
|
|
|
#include <linux/platform_device.h>
|
|
|
+#include <linux/slab.h>
|
|
|
#include <linux/spinlock.h>
|
|
|
#include <linux/uaccess.h>
|
|
|
|
|
@@ -405,7 +407,12 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
|
|
|
free_percpu_irq(irq, &cpu_hw_events);
|
|
|
} else {
|
|
|
for (i = 0; i < irqs; ++i) {
|
|
|
- if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
|
|
|
+ int cpu = i;
|
|
|
+
|
|
|
+ if (armpmu->irq_affinity)
|
|
|
+ cpu = armpmu->irq_affinity[i];
|
|
|
+
|
|
|
+ if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs))
|
|
|
continue;
|
|
|
irq = platform_get_irq(pmu_device, i);
|
|
|
if (irq > 0)
|
|
@@ -459,19 +466,24 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
|
|
|
on_each_cpu(armpmu_enable_percpu_irq, &irq, 1);
|
|
|
} else {
|
|
|
for (i = 0; i < irqs; ++i) {
|
|
|
+ int cpu = i;
|
|
|
+
|
|
|
err = 0;
|
|
|
irq = platform_get_irq(pmu_device, i);
|
|
|
if (irq <= 0)
|
|
|
continue;
|
|
|
|
|
|
+ if (armpmu->irq_affinity)
|
|
|
+ cpu = armpmu->irq_affinity[i];
|
|
|
+
|
|
|
/*
|
|
|
* If we have a single PMU interrupt that we can't shift,
|
|
|
* assume that we're running on a uniprocessor machine and
|
|
|
* continue. Otherwise, continue without this interrupt.
|
|
|
*/
|
|
|
- if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
|
|
|
+ if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
|
|
|
pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
|
|
|
- irq, i);
|
|
|
+ irq, cpu);
|
|
|
continue;
|
|
|
}
|
|
|
|
|
@@ -485,7 +497,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
- cpumask_set_cpu(i, &armpmu->active_irqs);
|
|
|
+ cpumask_set_cpu(cpu, &armpmu->active_irqs);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1298,9 +1310,46 @@ static const struct of_device_id armpmu_of_device_ids[] = {
|
|
|
|
|
|
static int armpmu_device_probe(struct platform_device *pdev)
|
|
|
{
|
|
|
+ int i, *irqs;
|
|
|
+
|
|
|
if (!cpu_pmu)
|
|
|
return -ENODEV;
|
|
|
|
|
|
+ irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
|
|
|
+ if (!irqs)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ for (i = 0; i < pdev->num_resources; ++i) {
|
|
|
+ struct device_node *dn;
|
|
|
+ int cpu;
|
|
|
+
|
|
|
+ dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity",
|
|
|
+ i);
|
|
|
+ if (!dn) {
|
|
|
+ pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
|
|
|
+ of_node_full_name(dn), i);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ for_each_possible_cpu(cpu)
|
|
|
+ if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
|
|
|
+ break;
|
|
|
+
|
|
|
+ of_node_put(dn);
|
|
|
+ if (cpu >= nr_cpu_ids) {
|
|
|
+ pr_warn("Failed to find logical CPU for %s\n",
|
|
|
+ dn->name);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ irqs[i] = cpu;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (i == pdev->num_resources)
|
|
|
+ cpu_pmu->irq_affinity = irqs;
|
|
|
+ else
|
|
|
+ kfree(irqs);
|
|
|
+
|
|
|
cpu_pmu->plat_device = pdev;
|
|
|
return 0;
|
|
|
}
|