perf_event_v6.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * ARMv6 Performance counter handling code.
  4. *
  5. * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
  6. *
  7. * ARMv6 has 2 configurable performance counters and a single cycle counter.
  8. * They all share a single reset bit but can be written to zero so we can use
  9. * that for a reset.
  10. *
  11. * The counters can't be individually enabled or disabled so when we remove
  12. * one event and replace it with another we could get spurious counts from the
  13. * wrong event. However, we can take advantage of the fact that the
  14. * performance counters can export events to the event bus, and the event bus
  15. * itself can be monitored. This requires that we *don't* export the events to
  16. * the event bus. The procedure for disabling a configurable counter is:
  17. * - change the counter to count the ETMEXTOUT[0] signal (0x20). This
  18. * effectively stops the counter from counting.
  19. * - disable the counter's interrupt generation (each counter has it's
  20. * own interrupt enable bit).
  21. * Once stopped, the counter value can be written as 0 to reset.
  22. *
  23. * To enable a counter:
  24. * - enable the counter's interrupt generation.
  25. * - set the new event type.
  26. *
  27. * Note: the dedicated cycle counter only counts cycles and can't be
  28. * enabled/disabled independently of the others. When we want to disable the
  29. * cycle counter, we have to just disable the interrupt reporting and start
  30. * ignoring that counter. When re-enabling, we have to reset the value and
  31. * enable the interrupt.
  32. */
  33. #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
  34. #include <asm/cputype.h>
  35. #include <asm/irq_regs.h>
  36. #include <linux/of.h>
  37. #include <linux/perf/arm_pmu.h>
  38. #include <linux/platform_device.h>
  39. enum armv6_perf_types {
  40. ARMV6_PERFCTR_ICACHE_MISS = 0x0,
  41. ARMV6_PERFCTR_IBUF_STALL = 0x1,
  42. ARMV6_PERFCTR_DDEP_STALL = 0x2,
  43. ARMV6_PERFCTR_ITLB_MISS = 0x3,
  44. ARMV6_PERFCTR_DTLB_MISS = 0x4,
  45. ARMV6_PERFCTR_BR_EXEC = 0x5,
  46. ARMV6_PERFCTR_BR_MISPREDICT = 0x6,
  47. ARMV6_PERFCTR_INSTR_EXEC = 0x7,
  48. ARMV6_PERFCTR_DCACHE_HIT = 0x9,
  49. ARMV6_PERFCTR_DCACHE_ACCESS = 0xA,
  50. ARMV6_PERFCTR_DCACHE_MISS = 0xB,
  51. ARMV6_PERFCTR_DCACHE_WBACK = 0xC,
  52. ARMV6_PERFCTR_SW_PC_CHANGE = 0xD,
  53. ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF,
  54. ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10,
  55. ARMV6_PERFCTR_LSU_FULL_STALL = 0x11,
  56. ARMV6_PERFCTR_WBUF_DRAINED = 0x12,
  57. ARMV6_PERFCTR_CPU_CYCLES = 0xFF,
  58. ARMV6_PERFCTR_NOP = 0x20,
  59. };
  60. enum armv6_counters {
  61. ARMV6_CYCLE_COUNTER = 0,
  62. ARMV6_COUNTER0,
  63. ARMV6_COUNTER1,
  64. };
  65. /*
  66. * The hardware events that we support. We do support cache operations but
  67. * we have harvard caches and no way to combine instruction and data
  68. * accesses/misses in hardware.
  69. */
  70. static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
  71. PERF_MAP_ALL_UNSUPPORTED,
  72. [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES,
  73. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC,
  74. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
  75. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT,
  76. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6_PERFCTR_IBUF_STALL,
  77. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6_PERFCTR_LSU_FULL_STALL,
  78. };
  79. static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  80. [PERF_COUNT_HW_CACHE_OP_MAX]
  81. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  82. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  83. /*
  84. * The performance counters don't differentiate between read and write
  85. * accesses/misses so this isn't strictly correct, but it's the best we
  86. * can do. Writes and reads get combined.
  87. */
  88. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
  89. [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
  90. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
  91. [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
  92. [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
  93. /*
  94. * The ARM performance counters can count micro DTLB misses, micro ITLB
  95. * misses and main TLB misses. There isn't an event for TLB misses, so
  96. * use the micro misses here and if users want the main TLB misses they
  97. * can use a raw counter.
  98. */
  99. [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
  100. [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
  101. [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
  102. [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
  103. };
  104. enum armv6mpcore_perf_types {
  105. ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0,
  106. ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1,
  107. ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2,
  108. ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3,
  109. ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4,
  110. ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5,
  111. ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6,
  112. ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7,
  113. ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8,
  114. ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA,
  115. ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB,
  116. ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC,
  117. ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD,
  118. ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE,
  119. ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF,
  120. ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10,
  121. ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11,
  122. ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12,
  123. ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13,
  124. ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF,
  125. };
  126. /*
  127. * The hardware events that we support. We do support cache operations but
  128. * we have harvard caches and no way to combine instruction and data
  129. * accesses/misses in hardware.
  130. */
  131. static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
  132. PERF_MAP_ALL_UNSUPPORTED,
  133. [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
  134. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
  135. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
  136. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
  137. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6MPCORE_PERFCTR_IBUF_STALL,
  138. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6MPCORE_PERFCTR_LSU_FULL_STALL,
  139. };
  140. static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  141. [PERF_COUNT_HW_CACHE_OP_MAX]
  142. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  143. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  144. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
  145. [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
  146. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
  147. [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
  148. [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
  149. /*
  150. * The ARM performance counters can count micro DTLB misses, micro ITLB
  151. * misses and main TLB misses. There isn't an event for TLB misses, so
  152. * use the micro misses here and if users want the main TLB misses they
  153. * can use a raw counter.
  154. */
  155. [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
  156. [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
  157. [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
  158. [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
  159. };
  160. static inline unsigned long
  161. armv6_pmcr_read(void)
  162. {
  163. u32 val;
  164. asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val));
  165. return val;
  166. }
  167. static inline void
  168. armv6_pmcr_write(unsigned long val)
  169. {
  170. asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val));
  171. }
  172. #define ARMV6_PMCR_ENABLE (1 << 0)
  173. #define ARMV6_PMCR_CTR01_RESET (1 << 1)
  174. #define ARMV6_PMCR_CCOUNT_RESET (1 << 2)
  175. #define ARMV6_PMCR_CCOUNT_DIV (1 << 3)
  176. #define ARMV6_PMCR_COUNT0_IEN (1 << 4)
  177. #define ARMV6_PMCR_COUNT1_IEN (1 << 5)
  178. #define ARMV6_PMCR_CCOUNT_IEN (1 << 6)
  179. #define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8)
  180. #define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9)
  181. #define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10)
  182. #define ARMV6_PMCR_EVT_COUNT0_SHIFT 20
  183. #define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
  184. #define ARMV6_PMCR_EVT_COUNT1_SHIFT 12
  185. #define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
  186. #define ARMV6_PMCR_OVERFLOWED_MASK \
  187. (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
  188. ARMV6_PMCR_CCOUNT_OVERFLOW)
  189. static inline int
  190. armv6_pmcr_has_overflowed(unsigned long pmcr)
  191. {
  192. return pmcr & ARMV6_PMCR_OVERFLOWED_MASK;
  193. }
  194. static inline int
  195. armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
  196. enum armv6_counters counter)
  197. {
  198. int ret = 0;
  199. if (ARMV6_CYCLE_COUNTER == counter)
  200. ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW;
  201. else if (ARMV6_COUNTER0 == counter)
  202. ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW;
  203. else if (ARMV6_COUNTER1 == counter)
  204. ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW;
  205. else
  206. WARN_ONCE(1, "invalid counter number (%d)\n", counter);
  207. return ret;
  208. }
  209. static inline u64 armv6pmu_read_counter(struct perf_event *event)
  210. {
  211. struct hw_perf_event *hwc = &event->hw;
  212. int counter = hwc->idx;
  213. unsigned long value = 0;
  214. if (ARMV6_CYCLE_COUNTER == counter)
  215. asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value));
  216. else if (ARMV6_COUNTER0 == counter)
  217. asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value));
  218. else if (ARMV6_COUNTER1 == counter)
  219. asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value));
  220. else
  221. WARN_ONCE(1, "invalid counter number (%d)\n", counter);
  222. return value;
  223. }
  224. static inline void armv6pmu_write_counter(struct perf_event *event, u64 value)
  225. {
  226. struct hw_perf_event *hwc = &event->hw;
  227. int counter = hwc->idx;
  228. if (ARMV6_CYCLE_COUNTER == counter)
  229. asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value));
  230. else if (ARMV6_COUNTER0 == counter)
  231. asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value));
  232. else if (ARMV6_COUNTER1 == counter)
  233. asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value));
  234. else
  235. WARN_ONCE(1, "invalid counter number (%d)\n", counter);
  236. }
  237. static void armv6pmu_enable_event(struct perf_event *event)
  238. {
  239. unsigned long val, mask, evt, flags;
  240. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  241. struct hw_perf_event *hwc = &event->hw;
  242. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  243. int idx = hwc->idx;
  244. if (ARMV6_CYCLE_COUNTER == idx) {
  245. mask = 0;
  246. evt = ARMV6_PMCR_CCOUNT_IEN;
  247. } else if (ARMV6_COUNTER0 == idx) {
  248. mask = ARMV6_PMCR_EVT_COUNT0_MASK;
  249. evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
  250. ARMV6_PMCR_COUNT0_IEN;
  251. } else if (ARMV6_COUNTER1 == idx) {
  252. mask = ARMV6_PMCR_EVT_COUNT1_MASK;
  253. evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
  254. ARMV6_PMCR_COUNT1_IEN;
  255. } else {
  256. WARN_ONCE(1, "invalid counter number (%d)\n", idx);
  257. return;
  258. }
  259. /*
  260. * Mask out the current event and set the counter to count the event
  261. * that we're interested in.
  262. */
  263. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  264. val = armv6_pmcr_read();
  265. val &= ~mask;
  266. val |= evt;
  267. armv6_pmcr_write(val);
  268. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  269. }
  270. static irqreturn_t
  271. armv6pmu_handle_irq(struct arm_pmu *cpu_pmu)
  272. {
  273. unsigned long pmcr = armv6_pmcr_read();
  274. struct perf_sample_data data;
  275. struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
  276. struct pt_regs *regs;
  277. int idx;
  278. if (!armv6_pmcr_has_overflowed(pmcr))
  279. return IRQ_NONE;
  280. regs = get_irq_regs();
  281. /*
  282. * The interrupts are cleared by writing the overflow flags back to
  283. * the control register. All of the other bits don't have any effect
  284. * if they are rewritten, so write the whole value back.
  285. */
  286. armv6_pmcr_write(pmcr);
  287. for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
  288. struct perf_event *event = cpuc->events[idx];
  289. struct hw_perf_event *hwc;
  290. /* Ignore if we don't have an event. */
  291. if (!event)
  292. continue;
  293. /*
  294. * We have a single interrupt for all counters. Check that
  295. * each counter has overflowed before we process it.
  296. */
  297. if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
  298. continue;
  299. hwc = &event->hw;
  300. armpmu_event_update(event);
  301. perf_sample_data_init(&data, 0, hwc->last_period);
  302. if (!armpmu_event_set_period(event))
  303. continue;
  304. if (perf_event_overflow(event, &data, regs))
  305. cpu_pmu->disable(event);
  306. }
  307. /*
  308. * Handle the pending perf events.
  309. *
  310. * Note: this call *must* be run with interrupts disabled. For
  311. * platforms that can have the PMU interrupts raised as an NMI, this
  312. * will not work.
  313. */
  314. irq_work_run();
  315. return IRQ_HANDLED;
  316. }
  317. static void armv6pmu_start(struct arm_pmu *cpu_pmu)
  318. {
  319. unsigned long flags, val;
  320. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  321. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  322. val = armv6_pmcr_read();
  323. val |= ARMV6_PMCR_ENABLE;
  324. armv6_pmcr_write(val);
  325. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  326. }
  327. static void armv6pmu_stop(struct arm_pmu *cpu_pmu)
  328. {
  329. unsigned long flags, val;
  330. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  331. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  332. val = armv6_pmcr_read();
  333. val &= ~ARMV6_PMCR_ENABLE;
  334. armv6_pmcr_write(val);
  335. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  336. }
  337. static int
  338. armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
  339. struct perf_event *event)
  340. {
  341. struct hw_perf_event *hwc = &event->hw;
  342. /* Always place a cycle counter into the cycle counter. */
  343. if (ARMV6_PERFCTR_CPU_CYCLES == hwc->config_base) {
  344. if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
  345. return -EAGAIN;
  346. return ARMV6_CYCLE_COUNTER;
  347. } else {
  348. /*
  349. * For anything other than a cycle counter, try and use
  350. * counter0 and counter1.
  351. */
  352. if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask))
  353. return ARMV6_COUNTER1;
  354. if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask))
  355. return ARMV6_COUNTER0;
  356. /* The counters are all in use. */
  357. return -EAGAIN;
  358. }
  359. }
  360. static void armv6pmu_clear_event_idx(struct pmu_hw_events *cpuc,
  361. struct perf_event *event)
  362. {
  363. clear_bit(event->hw.idx, cpuc->used_mask);
  364. }
  365. static void armv6pmu_disable_event(struct perf_event *event)
  366. {
  367. unsigned long val, mask, evt, flags;
  368. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  369. struct hw_perf_event *hwc = &event->hw;
  370. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  371. int idx = hwc->idx;
  372. if (ARMV6_CYCLE_COUNTER == idx) {
  373. mask = ARMV6_PMCR_CCOUNT_IEN;
  374. evt = 0;
  375. } else if (ARMV6_COUNTER0 == idx) {
  376. mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK;
  377. evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT;
  378. } else if (ARMV6_COUNTER1 == idx) {
  379. mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK;
  380. evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT;
  381. } else {
  382. WARN_ONCE(1, "invalid counter number (%d)\n", idx);
  383. return;
  384. }
  385. /*
  386. * Mask out the current event and set the counter to count the number
  387. * of ETM bus signal assertion cycles. The external reporting should
  388. * be disabled and so this should never increment.
  389. */
  390. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  391. val = armv6_pmcr_read();
  392. val &= ~mask;
  393. val |= evt;
  394. armv6_pmcr_write(val);
  395. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  396. }
  397. static void armv6mpcore_pmu_disable_event(struct perf_event *event)
  398. {
  399. unsigned long val, mask, flags, evt = 0;
  400. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  401. struct hw_perf_event *hwc = &event->hw;
  402. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  403. int idx = hwc->idx;
  404. if (ARMV6_CYCLE_COUNTER == idx) {
  405. mask = ARMV6_PMCR_CCOUNT_IEN;
  406. } else if (ARMV6_COUNTER0 == idx) {
  407. mask = ARMV6_PMCR_COUNT0_IEN;
  408. } else if (ARMV6_COUNTER1 == idx) {
  409. mask = ARMV6_PMCR_COUNT1_IEN;
  410. } else {
  411. WARN_ONCE(1, "invalid counter number (%d)\n", idx);
  412. return;
  413. }
  414. /*
  415. * Unlike UP ARMv6, we don't have a way of stopping the counters. We
  416. * simply disable the interrupt reporting.
  417. */
  418. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  419. val = armv6_pmcr_read();
  420. val &= ~mask;
  421. val |= evt;
  422. armv6_pmcr_write(val);
  423. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  424. }
  425. static int armv6_map_event(struct perf_event *event)
  426. {
  427. return armpmu_map_event(event, &armv6_perf_map,
  428. &armv6_perf_cache_map, 0xFF);
  429. }
  430. static void armv6pmu_init(struct arm_pmu *cpu_pmu)
  431. {
  432. cpu_pmu->handle_irq = armv6pmu_handle_irq;
  433. cpu_pmu->enable = armv6pmu_enable_event;
  434. cpu_pmu->disable = armv6pmu_disable_event;
  435. cpu_pmu->read_counter = armv6pmu_read_counter;
  436. cpu_pmu->write_counter = armv6pmu_write_counter;
  437. cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
  438. cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx;
  439. cpu_pmu->start = armv6pmu_start;
  440. cpu_pmu->stop = armv6pmu_stop;
  441. cpu_pmu->map_event = armv6_map_event;
  442. cpu_pmu->num_events = 3;
  443. }
  444. static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
  445. {
  446. armv6pmu_init(cpu_pmu);
  447. cpu_pmu->name = "armv6_1136";
  448. return 0;
  449. }
  450. static int armv6_1156_pmu_init(struct arm_pmu *cpu_pmu)
  451. {
  452. armv6pmu_init(cpu_pmu);
  453. cpu_pmu->name = "armv6_1156";
  454. return 0;
  455. }
  456. static int armv6_1176_pmu_init(struct arm_pmu *cpu_pmu)
  457. {
  458. armv6pmu_init(cpu_pmu);
  459. cpu_pmu->name = "armv6_1176";
  460. return 0;
  461. }
  462. /*
  463. * ARMv6mpcore is almost identical to single core ARMv6 with the exception
  464. * that some of the events have different enumerations and that there is no
  465. * *hack* to stop the programmable counters. To stop the counters we simply
  466. * disable the interrupt reporting and update the event. When unthrottling we
  467. * reset the period and enable the interrupt reporting.
  468. */
  469. static int armv6mpcore_map_event(struct perf_event *event)
  470. {
  471. return armpmu_map_event(event, &armv6mpcore_perf_map,
  472. &armv6mpcore_perf_cache_map, 0xFF);
  473. }
  474. static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
  475. {
  476. cpu_pmu->name = "armv6_11mpcore";
  477. cpu_pmu->handle_irq = armv6pmu_handle_irq;
  478. cpu_pmu->enable = armv6pmu_enable_event;
  479. cpu_pmu->disable = armv6mpcore_pmu_disable_event;
  480. cpu_pmu->read_counter = armv6pmu_read_counter;
  481. cpu_pmu->write_counter = armv6pmu_write_counter;
  482. cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
  483. cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx;
  484. cpu_pmu->start = armv6pmu_start;
  485. cpu_pmu->stop = armv6pmu_stop;
  486. cpu_pmu->map_event = armv6mpcore_map_event;
  487. cpu_pmu->num_events = 3;
  488. return 0;
  489. }
  490. static const struct of_device_id armv6_pmu_of_device_ids[] = {
  491. {.compatible = "arm,arm11mpcore-pmu", .data = armv6mpcore_pmu_init},
  492. {.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init},
  493. {.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init},
  494. { /* sentinel value */ }
  495. };
  496. static const struct pmu_probe_info armv6_pmu_probe_table[] = {
  497. ARM_PMU_PROBE(ARM_CPU_PART_ARM1136, armv6_1136_pmu_init),
  498. ARM_PMU_PROBE(ARM_CPU_PART_ARM1156, armv6_1156_pmu_init),
  499. ARM_PMU_PROBE(ARM_CPU_PART_ARM1176, armv6_1176_pmu_init),
  500. ARM_PMU_PROBE(ARM_CPU_PART_ARM11MPCORE, armv6mpcore_pmu_init),
  501. { /* sentinel value */ }
  502. };
  503. static int armv6_pmu_device_probe(struct platform_device *pdev)
  504. {
  505. return arm_pmu_device_probe(pdev, armv6_pmu_of_device_ids,
  506. armv6_pmu_probe_table);
  507. }
  508. static struct platform_driver armv6_pmu_driver = {
  509. .driver = {
  510. .name = "armv6-pmu",
  511. .of_match_table = armv6_pmu_of_device_ids,
  512. },
  513. .probe = armv6_pmu_device_probe,
  514. };
  515. builtin_platform_driver(armv6_pmu_driver);
  516. #endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */