arm_pmu_acpi.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. /*
  2. * ACPI probing code for ARM performance counters.
  3. *
  4. * Copyright (C) 2017 ARM Ltd.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/acpi.h>
  11. #include <linux/cpumask.h>
  12. #include <linux/init.h>
  13. #include <linux/percpu.h>
  14. #include <linux/perf/arm_pmu.h>
  15. #include <asm/cputype.h>
  16. static DEFINE_PER_CPU(struct arm_pmu *, probed_pmus);
  17. static DEFINE_PER_CPU(int, pmu_irqs);
  18. static int arm_pmu_acpi_register_irq(int cpu)
  19. {
  20. struct acpi_madt_generic_interrupt *gicc;
  21. int gsi, trigger;
  22. gicc = acpi_cpu_get_madt_gicc(cpu);
  23. if (WARN_ON(!gicc))
  24. return -EINVAL;
  25. gsi = gicc->performance_interrupt;
  26. /*
  27. * Per the ACPI spec, the MADT cannot describe a PMU that doesn't
  28. * have an interrupt. QEMU advertises this by using a GSI of zero,
  29. * which is not known to be valid on any hardware despite being
  30. * valid per the spec. Take the pragmatic approach and reject a
  31. * GSI of zero for now.
  32. */
  33. if (!gsi)
  34. return 0;
  35. if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
  36. trigger = ACPI_EDGE_SENSITIVE;
  37. else
  38. trigger = ACPI_LEVEL_SENSITIVE;
  39. /*
  40. * Helpfully, the MADT GICC doesn't have a polarity flag for the
  41. * "performance interrupt". Luckily, on compliant GICs the polarity is
  42. * a fixed value in HW (for both SPIs and PPIs) that we cannot change
  43. * from SW.
  44. *
  45. * Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This
  46. * may not match the real polarity, but that should not matter.
  47. *
  48. * Other interrupt controllers are not supported with ACPI.
  49. */
  50. return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH);
  51. }
  52. static void arm_pmu_acpi_unregister_irq(int cpu)
  53. {
  54. struct acpi_madt_generic_interrupt *gicc;
  55. int gsi;
  56. gicc = acpi_cpu_get_madt_gicc(cpu);
  57. if (!gicc)
  58. return;
  59. gsi = gicc->performance_interrupt;
  60. acpi_unregister_gsi(gsi);
  61. }
  62. static int arm_pmu_acpi_parse_irqs(void)
  63. {
  64. int irq, cpu, irq_cpu, err;
  65. for_each_possible_cpu(cpu) {
  66. irq = arm_pmu_acpi_register_irq(cpu);
  67. if (irq < 0) {
  68. err = irq;
  69. pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n",
  70. cpu, err);
  71. goto out_err;
  72. } else if (irq == 0) {
  73. pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
  74. }
  75. per_cpu(pmu_irqs, cpu) = irq;
  76. }
  77. return 0;
  78. out_err:
  79. for_each_possible_cpu(cpu) {
  80. irq = per_cpu(pmu_irqs, cpu);
  81. if (!irq)
  82. continue;
  83. arm_pmu_acpi_unregister_irq(cpu);
  84. /*
  85. * Blat all copies of the IRQ so that we only unregister the
  86. * corresponding GSI once (e.g. when we have PPIs).
  87. */
  88. for_each_possible_cpu(irq_cpu) {
  89. if (per_cpu(pmu_irqs, irq_cpu) == irq)
  90. per_cpu(pmu_irqs, irq_cpu) = 0;
  91. }
  92. }
  93. return err;
  94. }
  95. static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
  96. {
  97. unsigned long cpuid = read_cpuid_id();
  98. struct arm_pmu *pmu;
  99. int cpu;
  100. for_each_possible_cpu(cpu) {
  101. pmu = per_cpu(probed_pmus, cpu);
  102. if (!pmu || pmu->acpi_cpuid != cpuid)
  103. continue;
  104. return pmu;
  105. }
  106. pmu = armpmu_alloc();
  107. if (!pmu) {
  108. pr_warn("Unable to allocate PMU for CPU%d\n",
  109. smp_processor_id());
  110. return NULL;
  111. }
  112. pmu->acpi_cpuid = cpuid;
  113. return pmu;
  114. }
  115. /*
  116. * This must run before the common arm_pmu hotplug logic, so that we can
  117. * associate a CPU and its interrupt before the common code tries to manage the
  118. * affinity and so on.
  119. *
  120. * Note that hotplug events are serialized, so we cannot race with another CPU
  121. * coming up. The perf core won't open events while a hotplug event is in
  122. * progress.
  123. */
  124. static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
  125. {
  126. struct arm_pmu *pmu;
  127. struct pmu_hw_events __percpu *hw_events;
  128. int irq;
  129. /* If we've already probed this CPU, we have nothing to do */
  130. if (per_cpu(probed_pmus, cpu))
  131. return 0;
  132. irq = per_cpu(pmu_irqs, cpu);
  133. pmu = arm_pmu_acpi_find_alloc_pmu();
  134. if (!pmu)
  135. return -ENOMEM;
  136. cpumask_set_cpu(cpu, &pmu->supported_cpus);
  137. per_cpu(probed_pmus, cpu) = pmu;
  138. /*
  139. * Log and request the IRQ so the core arm_pmu code can manage it. In
  140. * some situations (e.g. mismatched PPIs), we may fail to request the
  141. * IRQ. However, it may be too late for us to do anything about it.
  142. * The common ARM PMU code will log a warning in this case.
  143. */
  144. hw_events = pmu->hw_events;
  145. per_cpu(hw_events->irq, cpu) = irq;
  146. armpmu_request_irq(pmu, cpu);
  147. /*
  148. * Ideally, we'd probe the PMU here when we find the first matching
  149. * CPU. We can't do that for several reasons; see the comment in
  150. * arm_pmu_acpi_init().
  151. *
  152. * So for the time being, we're done.
  153. */
  154. return 0;
  155. }
  156. int arm_pmu_acpi_probe(armpmu_init_fn init_fn)
  157. {
  158. int pmu_idx = 0;
  159. int cpu, ret;
  160. if (acpi_disabled)
  161. return 0;
  162. /*
  163. * Initialise and register the set of PMUs which we know about right
  164. * now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we
  165. * could handle late hotplug, but this may lead to deadlock since we
  166. * might try to register a hotplug notifier instance from within a
  167. * hotplug notifier.
  168. *
  169. * There's also the problem of having access to the right init_fn,
  170. * without tying this too deeply into the "real" PMU driver.
  171. *
  172. * For the moment, as with the platform/DT case, we need at least one
  173. * of a PMU's CPUs to be online at probe time.
  174. */
  175. for_each_possible_cpu(cpu) {
  176. struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
  177. char *base_name;
  178. if (!pmu || pmu->name)
  179. continue;
  180. ret = init_fn(pmu);
  181. if (ret == -ENODEV) {
  182. /* PMU not handled by this driver, or not present */
  183. continue;
  184. } else if (ret) {
  185. pr_warn("Unable to initialise PMU for CPU%d\n", cpu);
  186. return ret;
  187. }
  188. base_name = pmu->name;
  189. pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++);
  190. if (!pmu->name) {
  191. pr_warn("Unable to allocate PMU name for CPU%d\n", cpu);
  192. return -ENOMEM;
  193. }
  194. ret = armpmu_register(pmu);
  195. if (ret) {
  196. pr_warn("Failed to register PMU for CPU%d\n", cpu);
  197. kfree(pmu->name);
  198. return ret;
  199. }
  200. }
  201. return 0;
  202. }
  203. static int arm_pmu_acpi_init(void)
  204. {
  205. int ret;
  206. if (acpi_disabled)
  207. return 0;
  208. /*
  209. * We can't request IRQs yet, since we don't know the cookie value
  210. * until we know which CPUs share the same logical PMU. We'll handle
  211. * that in arm_pmu_acpi_cpu_starting().
  212. */
  213. ret = arm_pmu_acpi_parse_irqs();
  214. if (ret)
  215. return ret;
  216. ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING,
  217. "perf/arm/pmu_acpi:starting",
  218. arm_pmu_acpi_cpu_starting, NULL);
  219. return ret;
  220. }
  221. subsys_initcall(arm_pmu_acpi_init)