arm_pmu_acpi.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292
  1. /*
  2. * ACPI probing code for ARM performance counters.
  3. *
  4. * Copyright (C) 2017 ARM Ltd.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/acpi.h>
  11. #include <linux/cpumask.h>
  12. #include <linux/init.h>
  13. #include <linux/irq.h>
  14. #include <linux/irqdesc.h>
  15. #include <linux/percpu.h>
  16. #include <linux/perf/arm_pmu.h>
  17. #include <asm/cputype.h>
  18. static DEFINE_PER_CPU(struct arm_pmu *, probed_pmus);
  19. static DEFINE_PER_CPU(int, pmu_irqs);
  20. static int arm_pmu_acpi_register_irq(int cpu)
  21. {
  22. struct acpi_madt_generic_interrupt *gicc;
  23. int gsi, trigger;
  24. gicc = acpi_cpu_get_madt_gicc(cpu);
  25. if (WARN_ON(!gicc))
  26. return -EINVAL;
  27. gsi = gicc->performance_interrupt;
  28. /*
  29. * Per the ACPI spec, the MADT cannot describe a PMU that doesn't
  30. * have an interrupt. QEMU advertises this by using a GSI of zero,
  31. * which is not known to be valid on any hardware despite being
  32. * valid per the spec. Take the pragmatic approach and reject a
  33. * GSI of zero for now.
  34. */
  35. if (!gsi)
  36. return 0;
  37. if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
  38. trigger = ACPI_EDGE_SENSITIVE;
  39. else
  40. trigger = ACPI_LEVEL_SENSITIVE;
  41. /*
  42. * Helpfully, the MADT GICC doesn't have a polarity flag for the
  43. * "performance interrupt". Luckily, on compliant GICs the polarity is
  44. * a fixed value in HW (for both SPIs and PPIs) that we cannot change
  45. * from SW.
  46. *
  47. * Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This
  48. * may not match the real polarity, but that should not matter.
  49. *
  50. * Other interrupt controllers are not supported with ACPI.
  51. */
  52. return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH);
  53. }
  54. static void arm_pmu_acpi_unregister_irq(int cpu)
  55. {
  56. struct acpi_madt_generic_interrupt *gicc;
  57. int gsi;
  58. gicc = acpi_cpu_get_madt_gicc(cpu);
  59. if (!gicc)
  60. return;
  61. gsi = gicc->performance_interrupt;
  62. acpi_unregister_gsi(gsi);
  63. }
  64. static int arm_pmu_acpi_parse_irqs(void)
  65. {
  66. int irq, cpu, irq_cpu, err;
  67. for_each_possible_cpu(cpu) {
  68. irq = arm_pmu_acpi_register_irq(cpu);
  69. if (irq < 0) {
  70. err = irq;
  71. pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n",
  72. cpu, err);
  73. goto out_err;
  74. } else if (irq == 0) {
  75. pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
  76. }
  77. /*
  78. * Log and request the IRQ so the core arm_pmu code can manage
  79. * it. We'll have to sanity-check IRQs later when we associate
  80. * them with their PMUs.
  81. */
  82. per_cpu(pmu_irqs, cpu) = irq;
  83. armpmu_request_irq(irq, cpu);
  84. }
  85. return 0;
  86. out_err:
  87. for_each_possible_cpu(cpu) {
  88. irq = per_cpu(pmu_irqs, cpu);
  89. if (!irq)
  90. continue;
  91. arm_pmu_acpi_unregister_irq(cpu);
  92. /*
  93. * Blat all copies of the IRQ so that we only unregister the
  94. * corresponding GSI once (e.g. when we have PPIs).
  95. */
  96. for_each_possible_cpu(irq_cpu) {
  97. if (per_cpu(pmu_irqs, irq_cpu) == irq)
  98. per_cpu(pmu_irqs, irq_cpu) = 0;
  99. }
  100. }
  101. return err;
  102. }
  103. static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
  104. {
  105. unsigned long cpuid = read_cpuid_id();
  106. struct arm_pmu *pmu;
  107. int cpu;
  108. for_each_possible_cpu(cpu) {
  109. pmu = per_cpu(probed_pmus, cpu);
  110. if (!pmu || pmu->acpi_cpuid != cpuid)
  111. continue;
  112. return pmu;
  113. }
  114. pmu = armpmu_alloc_atomic();
  115. if (!pmu) {
  116. pr_warn("Unable to allocate PMU for CPU%d\n",
  117. smp_processor_id());
  118. return NULL;
  119. }
  120. pmu->acpi_cpuid = cpuid;
  121. return pmu;
  122. }
  123. /*
  124. * Check whether the new IRQ is compatible with those already associated with
  125. * the PMU (e.g. we don't have mismatched PPIs).
  126. */
  127. static bool pmu_irq_matches(struct arm_pmu *pmu, int irq)
  128. {
  129. struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
  130. int cpu;
  131. if (!irq)
  132. return true;
  133. for_each_cpu(cpu, &pmu->supported_cpus) {
  134. int other_irq = per_cpu(hw_events->irq, cpu);
  135. if (!other_irq)
  136. continue;
  137. if (irq == other_irq)
  138. continue;
  139. if (!irq_is_percpu_devid(irq) && !irq_is_percpu_devid(other_irq))
  140. continue;
  141. pr_warn("mismatched PPIs detected\n");
  142. return false;
  143. }
  144. return true;
  145. }
  146. /*
  147. * This must run before the common arm_pmu hotplug logic, so that we can
  148. * associate a CPU and its interrupt before the common code tries to manage the
  149. * affinity and so on.
  150. *
  151. * Note that hotplug events are serialized, so we cannot race with another CPU
  152. * coming up. The perf core won't open events while a hotplug event is in
  153. * progress.
  154. */
  155. static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
  156. {
  157. struct arm_pmu *pmu;
  158. struct pmu_hw_events __percpu *hw_events;
  159. int irq;
  160. /* If we've already probed this CPU, we have nothing to do */
  161. if (per_cpu(probed_pmus, cpu))
  162. return 0;
  163. irq = per_cpu(pmu_irqs, cpu);
  164. pmu = arm_pmu_acpi_find_alloc_pmu();
  165. if (!pmu)
  166. return -ENOMEM;
  167. per_cpu(probed_pmus, cpu) = pmu;
  168. if (pmu_irq_matches(pmu, irq)) {
  169. hw_events = pmu->hw_events;
  170. per_cpu(hw_events->irq, cpu) = irq;
  171. }
  172. cpumask_set_cpu(cpu, &pmu->supported_cpus);
  173. /*
  174. * Ideally, we'd probe the PMU here when we find the first matching
  175. * CPU. We can't do that for several reasons; see the comment in
  176. * arm_pmu_acpi_init().
  177. *
  178. * So for the time being, we're done.
  179. */
  180. return 0;
  181. }
  182. int arm_pmu_acpi_probe(armpmu_init_fn init_fn)
  183. {
  184. int pmu_idx = 0;
  185. int cpu, ret;
  186. /*
  187. * Initialise and register the set of PMUs which we know about right
  188. * now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we
  189. * could handle late hotplug, but this may lead to deadlock since we
  190. * might try to register a hotplug notifier instance from within a
  191. * hotplug notifier.
  192. *
  193. * There's also the problem of having access to the right init_fn,
  194. * without tying this too deeply into the "real" PMU driver.
  195. *
  196. * For the moment, as with the platform/DT case, we need at least one
  197. * of a PMU's CPUs to be online at probe time.
  198. */
  199. for_each_possible_cpu(cpu) {
  200. struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
  201. char *base_name;
  202. if (!pmu || pmu->name)
  203. continue;
  204. ret = init_fn(pmu);
  205. if (ret == -ENODEV) {
  206. /* PMU not handled by this driver, or not present */
  207. continue;
  208. } else if (ret) {
  209. pr_warn("Unable to initialise PMU for CPU%d\n", cpu);
  210. return ret;
  211. }
  212. base_name = pmu->name;
  213. pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++);
  214. if (!pmu->name) {
  215. pr_warn("Unable to allocate PMU name for CPU%d\n", cpu);
  216. return -ENOMEM;
  217. }
  218. ret = armpmu_register(pmu);
  219. if (ret) {
  220. pr_warn("Failed to register PMU for CPU%d\n", cpu);
  221. kfree(pmu->name);
  222. return ret;
  223. }
  224. }
  225. return 0;
  226. }
  227. static int arm_pmu_acpi_init(void)
  228. {
  229. int ret;
  230. if (acpi_disabled)
  231. return 0;
  232. ret = arm_pmu_acpi_parse_irqs();
  233. if (ret)
  234. return ret;
  235. ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING,
  236. "perf/arm/pmu_acpi:starting",
  237. arm_pmu_acpi_cpu_starting, NULL);
  238. return ret;
  239. }
  240. subsys_initcall(arm_pmu_acpi_init)