arm_pmu_acpi.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * ACPI probing code for ARM performance counters.
  4. *
  5. * Copyright (C) 2017 ARM Ltd.
  6. */
  7. #include <linux/acpi.h>
  8. #include <linux/cpumask.h>
  9. #include <linux/init.h>
  10. #include <linux/irq.h>
  11. #include <linux/irqdesc.h>
  12. #include <linux/percpu.h>
  13. #include <linux/perf/arm_pmu.h>
  14. #include <asm/cputype.h>
  15. static DEFINE_PER_CPU(struct arm_pmu *, probed_pmus);
  16. static DEFINE_PER_CPU(int, pmu_irqs);
  17. static int arm_pmu_acpi_register_irq(int cpu)
  18. {
  19. struct acpi_madt_generic_interrupt *gicc;
  20. int gsi, trigger;
  21. gicc = acpi_cpu_get_madt_gicc(cpu);
  22. gsi = gicc->performance_interrupt;
  23. /*
  24. * Per the ACPI spec, the MADT cannot describe a PMU that doesn't
  25. * have an interrupt. QEMU advertises this by using a GSI of zero,
  26. * which is not known to be valid on any hardware despite being
  27. * valid per the spec. Take the pragmatic approach and reject a
  28. * GSI of zero for now.
  29. */
  30. if (!gsi)
  31. return 0;
  32. if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
  33. trigger = ACPI_EDGE_SENSITIVE;
  34. else
  35. trigger = ACPI_LEVEL_SENSITIVE;
  36. /*
  37. * Helpfully, the MADT GICC doesn't have a polarity flag for the
  38. * "performance interrupt". Luckily, on compliant GICs the polarity is
  39. * a fixed value in HW (for both SPIs and PPIs) that we cannot change
  40. * from SW.
  41. *
  42. * Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This
  43. * may not match the real polarity, but that should not matter.
  44. *
  45. * Other interrupt controllers are not supported with ACPI.
  46. */
  47. return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH);
  48. }
  49. static void arm_pmu_acpi_unregister_irq(int cpu)
  50. {
  51. struct acpi_madt_generic_interrupt *gicc;
  52. int gsi;
  53. gicc = acpi_cpu_get_madt_gicc(cpu);
  54. gsi = gicc->performance_interrupt;
  55. if (gsi)
  56. acpi_unregister_gsi(gsi);
  57. }
  58. #if IS_ENABLED(CONFIG_ARM_SPE_PMU)
  59. static struct resource spe_resources[] = {
  60. {
  61. /* irq */
  62. .flags = IORESOURCE_IRQ,
  63. }
  64. };
  65. static struct platform_device spe_dev = {
  66. .name = ARMV8_SPE_PDEV_NAME,
  67. .id = -1,
  68. .resource = spe_resources,
  69. .num_resources = ARRAY_SIZE(spe_resources)
  70. };
  71. /*
  72. * For lack of a better place, hook the normal PMU MADT walk
  73. * and create a SPE device if we detect a recent MADT with
  74. * a homogeneous PPI mapping.
  75. */
  76. static void arm_spe_acpi_register_device(void)
  77. {
  78. int cpu, hetid, irq, ret;
  79. bool first = true;
  80. u16 gsi = 0;
  81. /*
  82. * Sanity check all the GICC tables for the same interrupt number.
  83. * For now, we only support homogeneous ACPI/SPE machines.
  84. */
  85. for_each_possible_cpu(cpu) {
  86. struct acpi_madt_generic_interrupt *gicc;
  87. gicc = acpi_cpu_get_madt_gicc(cpu);
  88. if (gicc->header.length < ACPI_MADT_GICC_SPE)
  89. return;
  90. if (first) {
  91. gsi = gicc->spe_interrupt;
  92. if (!gsi)
  93. return;
  94. hetid = find_acpi_cpu_topology_hetero_id(cpu);
  95. first = false;
  96. } else if ((gsi != gicc->spe_interrupt) ||
  97. (hetid != find_acpi_cpu_topology_hetero_id(cpu))) {
  98. pr_warn("ACPI: SPE must be homogeneous\n");
  99. return;
  100. }
  101. }
  102. irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE,
  103. ACPI_ACTIVE_HIGH);
  104. if (irq < 0) {
  105. pr_warn("ACPI: SPE Unable to register interrupt: %d\n", gsi);
  106. return;
  107. }
  108. spe_resources[0].start = irq;
  109. ret = platform_device_register(&spe_dev);
  110. if (ret < 0) {
  111. pr_warn("ACPI: SPE: Unable to register device\n");
  112. acpi_unregister_gsi(gsi);
  113. }
  114. }
  115. #else
  116. static inline void arm_spe_acpi_register_device(void)
  117. {
  118. }
  119. #endif /* CONFIG_ARM_SPE_PMU */
  120. static int arm_pmu_acpi_parse_irqs(void)
  121. {
  122. int irq, cpu, irq_cpu, err;
  123. for_each_possible_cpu(cpu) {
  124. irq = arm_pmu_acpi_register_irq(cpu);
  125. if (irq < 0) {
  126. err = irq;
  127. pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n",
  128. cpu, err);
  129. goto out_err;
  130. } else if (irq == 0) {
  131. pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
  132. }
  133. /*
  134. * Log and request the IRQ so the core arm_pmu code can manage
  135. * it. We'll have to sanity-check IRQs later when we associate
  136. * them with their PMUs.
  137. */
  138. per_cpu(pmu_irqs, cpu) = irq;
  139. err = armpmu_request_irq(irq, cpu);
  140. if (err)
  141. goto out_err;
  142. }
  143. return 0;
  144. out_err:
  145. for_each_possible_cpu(cpu) {
  146. irq = per_cpu(pmu_irqs, cpu);
  147. if (!irq)
  148. continue;
  149. arm_pmu_acpi_unregister_irq(cpu);
  150. /*
  151. * Blat all copies of the IRQ so that we only unregister the
  152. * corresponding GSI once (e.g. when we have PPIs).
  153. */
  154. for_each_possible_cpu(irq_cpu) {
  155. if (per_cpu(pmu_irqs, irq_cpu) == irq)
  156. per_cpu(pmu_irqs, irq_cpu) = 0;
  157. }
  158. }
  159. return err;
  160. }
  161. static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
  162. {
  163. unsigned long cpuid = read_cpuid_id();
  164. struct arm_pmu *pmu;
  165. int cpu;
  166. for_each_possible_cpu(cpu) {
  167. pmu = per_cpu(probed_pmus, cpu);
  168. if (!pmu || pmu->acpi_cpuid != cpuid)
  169. continue;
  170. return pmu;
  171. }
  172. pmu = armpmu_alloc_atomic();
  173. if (!pmu) {
  174. pr_warn("Unable to allocate PMU for CPU%d\n",
  175. smp_processor_id());
  176. return NULL;
  177. }
  178. pmu->acpi_cpuid = cpuid;
  179. return pmu;
  180. }
  181. /*
  182. * Check whether the new IRQ is compatible with those already associated with
  183. * the PMU (e.g. we don't have mismatched PPIs).
  184. */
  185. static bool pmu_irq_matches(struct arm_pmu *pmu, int irq)
  186. {
  187. struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
  188. int cpu;
  189. if (!irq)
  190. return true;
  191. for_each_cpu(cpu, &pmu->supported_cpus) {
  192. int other_irq = per_cpu(hw_events->irq, cpu);
  193. if (!other_irq)
  194. continue;
  195. if (irq == other_irq)
  196. continue;
  197. if (!irq_is_percpu_devid(irq) && !irq_is_percpu_devid(other_irq))
  198. continue;
  199. pr_warn("mismatched PPIs detected\n");
  200. return false;
  201. }
  202. return true;
  203. }
  204. /*
  205. * This must run before the common arm_pmu hotplug logic, so that we can
  206. * associate a CPU and its interrupt before the common code tries to manage the
  207. * affinity and so on.
  208. *
  209. * Note that hotplug events are serialized, so we cannot race with another CPU
  210. * coming up. The perf core won't open events while a hotplug event is in
  211. * progress.
  212. */
  213. static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
  214. {
  215. struct arm_pmu *pmu;
  216. struct pmu_hw_events __percpu *hw_events;
  217. int irq;
  218. /* If we've already probed this CPU, we have nothing to do */
  219. if (per_cpu(probed_pmus, cpu))
  220. return 0;
  221. irq = per_cpu(pmu_irqs, cpu);
  222. pmu = arm_pmu_acpi_find_alloc_pmu();
  223. if (!pmu)
  224. return -ENOMEM;
  225. per_cpu(probed_pmus, cpu) = pmu;
  226. if (pmu_irq_matches(pmu, irq)) {
  227. hw_events = pmu->hw_events;
  228. per_cpu(hw_events->irq, cpu) = irq;
  229. }
  230. cpumask_set_cpu(cpu, &pmu->supported_cpus);
  231. /*
  232. * Ideally, we'd probe the PMU here when we find the first matching
  233. * CPU. We can't do that for several reasons; see the comment in
  234. * arm_pmu_acpi_init().
  235. *
  236. * So for the time being, we're done.
  237. */
  238. return 0;
  239. }
  240. int arm_pmu_acpi_probe(armpmu_init_fn init_fn)
  241. {
  242. int pmu_idx = 0;
  243. int cpu, ret;
  244. /*
  245. * Initialise and register the set of PMUs which we know about right
  246. * now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we
  247. * could handle late hotplug, but this may lead to deadlock since we
  248. * might try to register a hotplug notifier instance from within a
  249. * hotplug notifier.
  250. *
  251. * There's also the problem of having access to the right init_fn,
  252. * without tying this too deeply into the "real" PMU driver.
  253. *
  254. * For the moment, as with the platform/DT case, we need at least one
  255. * of a PMU's CPUs to be online at probe time.
  256. */
  257. for_each_possible_cpu(cpu) {
  258. struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
  259. char *base_name;
  260. if (!pmu || pmu->name)
  261. continue;
  262. ret = init_fn(pmu);
  263. if (ret == -ENODEV) {
  264. /* PMU not handled by this driver, or not present */
  265. continue;
  266. } else if (ret) {
  267. pr_warn("Unable to initialise PMU for CPU%d\n", cpu);
  268. return ret;
  269. }
  270. base_name = pmu->name;
  271. pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++);
  272. if (!pmu->name) {
  273. pr_warn("Unable to allocate PMU name for CPU%d\n", cpu);
  274. return -ENOMEM;
  275. }
  276. ret = armpmu_register(pmu);
  277. if (ret) {
  278. pr_warn("Failed to register PMU for CPU%d\n", cpu);
  279. kfree(pmu->name);
  280. return ret;
  281. }
  282. }
  283. return 0;
  284. }
  285. static int arm_pmu_acpi_init(void)
  286. {
  287. int ret;
  288. if (acpi_disabled)
  289. return 0;
  290. arm_spe_acpi_register_device();
  291. ret = arm_pmu_acpi_parse_irqs();
  292. if (ret)
  293. return ret;
  294. ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING,
  295. "perf/arm/pmu_acpi:starting",
  296. arm_pmu_acpi_cpu_starting, NULL);
  297. return ret;
  298. }
  299. subsys_initcall(arm_pmu_acpi_init)