|
@@ -602,7 +602,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
|
|
|
irqs = min(pmu_device->num_resources, num_possible_cpus());
|
|
|
|
|
|
irq = platform_get_irq(pmu_device, 0);
|
|
|
- if (irq >= 0 && irq_is_percpu(irq)) {
|
|
|
+ if (irq > 0 && irq_is_percpu(irq)) {
|
|
|
on_each_cpu_mask(&cpu_pmu->supported_cpus,
|
|
|
cpu_pmu_disable_percpu_irq, &irq, 1);
|
|
|
free_percpu_irq(irq, &hw_events->percpu_pmu);
|
|
@@ -616,7 +616,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
|
|
|
if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
|
|
|
continue;
|
|
|
irq = platform_get_irq(pmu_device, i);
|
|
|
- if (irq >= 0)
|
|
|
+ if (irq > 0)
|
|
|
free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
|
|
|
}
|
|
|
}
|
|
@@ -638,7 +638,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
|
|
|
}
|
|
|
|
|
|
irq = platform_get_irq(pmu_device, 0);
|
|
|
- if (irq >= 0 && irq_is_percpu(irq)) {
|
|
|
+ if (irq > 0 && irq_is_percpu(irq)) {
|
|
|
err = request_percpu_irq(irq, handler, "arm-pmu",
|
|
|
&hw_events->percpu_pmu);
|
|
|
if (err) {
|
|
@@ -919,7 +919,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
|
|
|
|
|
|
/* Check the IRQ type and prohibit a mix of PPIs and SPIs */
|
|
|
irq = platform_get_irq(pdev, i);
|
|
|
- if (irq >= 0) {
|
|
|
+ if (irq > 0) {
|
|
|
bool spi = !irq_is_percpu(irq);
|
|
|
|
|
|
if (i > 0 && spi != using_spi) {
|
|
@@ -969,8 +969,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
|
|
|
if (cpumask_weight(&pmu->supported_cpus) == 0) {
|
|
|
int irq = platform_get_irq(pdev, 0);
|
|
|
|
|
|
- if (irq_is_percpu(irq)) {
|
|
|
- /* If using PPIs, check the affinity of the partition */
|
|
|
+ if (irq > 0 && irq_is_percpu(irq)) {
|
|
|
int ret;
|
|
|
|
|
|
ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
|