generic-compat-pmu.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342
  1. // SPDX-License-Identifier: GPL-2.0+
  2. //
  3. // Copyright 2019 Madhavan Srinivasan, IBM Corporation.
  4. #define pr_fmt(fmt) "generic-compat-pmu: " fmt
  5. #include "isa207-common.h"
  6. /*
  7. * Raw event encoding:
  8. *
  9. * 60 56 52 48 44 40 36 32
  10. * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
  11. *
  12. * 28 24 20 16 12 8 4 0
  13. * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
  14. * [ pmc ] [ pmcxsel ]
  15. */
  16. /*
  17. * Event codes defined in ISA v3.0B
  18. */
  19. #define EVENT(_name, _code) _name = _code,
  20. enum {
  21. /* Cycles, alternate code */
  22. EVENT(PM_CYC_ALT, 0x100f0)
  23. /* One or more instructions completed in a cycle */
  24. EVENT(PM_CYC_INST_CMPL, 0x100f2)
  25. /* Floating-point instruction completed */
  26. EVENT(PM_FLOP_CMPL, 0x100f4)
  27. /* Instruction ERAT/L1-TLB miss */
  28. EVENT(PM_L1_ITLB_MISS, 0x100f6)
  29. /* All instructions completed and none available */
  30. EVENT(PM_NO_INST_AVAIL, 0x100f8)
  31. /* A load-type instruction completed (ISA v3.0+) */
  32. EVENT(PM_LD_CMPL, 0x100fc)
  33. /* Instruction completed, alternate code (ISA v3.0+) */
  34. EVENT(PM_INST_CMPL_ALT, 0x100fe)
  35. /* A store-type instruction completed */
  36. EVENT(PM_ST_CMPL, 0x200f0)
  37. /* Instruction Dispatched */
  38. EVENT(PM_INST_DISP, 0x200f2)
  39. /* Run_cycles */
  40. EVENT(PM_RUN_CYC, 0x200f4)
  41. /* Data ERAT/L1-TLB miss/reload */
  42. EVENT(PM_L1_DTLB_RELOAD, 0x200f6)
  43. /* Taken branch completed */
  44. EVENT(PM_BR_TAKEN_CMPL, 0x200fa)
  45. /* Demand iCache Miss */
  46. EVENT(PM_L1_ICACHE_MISS, 0x200fc)
  47. /* L1 Dcache reload from memory */
  48. EVENT(PM_L1_RELOAD_FROM_MEM, 0x200fe)
  49. /* L1 Dcache store miss */
  50. EVENT(PM_ST_MISS_L1, 0x300f0)
  51. /* Alternate code for PM_INST_DISP */
  52. EVENT(PM_INST_DISP_ALT, 0x300f2)
  53. /* Branch direction or target mispredicted */
  54. EVENT(PM_BR_MISPREDICT, 0x300f6)
  55. /* Data TLB miss/reload */
  56. EVENT(PM_DTLB_MISS, 0x300fc)
  57. /* Demand LD - L3 Miss (not L2 hit and not L3 hit) */
  58. EVENT(PM_DATA_FROM_L3MISS, 0x300fe)
  59. /* L1 Dcache load miss */
  60. EVENT(PM_LD_MISS_L1, 0x400f0)
  61. /* Cycle when instruction(s) dispatched */
  62. EVENT(PM_CYC_INST_DISP, 0x400f2)
  63. /* Branch or branch target mispredicted */
  64. EVENT(PM_BR_MPRED_CMPL, 0x400f6)
  65. /* Instructions completed with run latch set */
  66. EVENT(PM_RUN_INST_CMPL, 0x400fa)
  67. /* Instruction TLB miss/reload */
  68. EVENT(PM_ITLB_MISS, 0x400fc)
  69. /* Load data not cached */
  70. EVENT(PM_LD_NOT_CACHED, 0x400fe)
  71. /* Instructions */
  72. EVENT(PM_INST_CMPL, 0x500fa)
  73. /* Cycles */
  74. EVENT(PM_CYC, 0x600f4)
  75. };
  76. #undef EVENT
  77. /* Table of alternatives, sorted in increasing order of column 0 */
  78. /* Note that in each row, column 0 must be the smallest */
  79. static const unsigned int generic_event_alternatives[][MAX_ALT] = {
  80. { PM_CYC_ALT, PM_CYC },
  81. { PM_INST_CMPL_ALT, PM_INST_CMPL },
  82. { PM_INST_DISP, PM_INST_DISP_ALT },
  83. };
  84. static int generic_get_alternatives(u64 event, unsigned int flags, u64 alt[])
  85. {
  86. int num_alt = 0;
  87. num_alt = isa207_get_alternatives(event, alt,
  88. ARRAY_SIZE(generic_event_alternatives), flags,
  89. generic_event_alternatives);
  90. return num_alt;
  91. }
  92. GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
  93. GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
  94. GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_NO_INST_AVAIL);
  95. GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
  96. GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1);
  97. CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1);
  98. CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
  99. CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
  100. CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
  101. CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
  102. CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
  103. CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
  104. static struct attribute *generic_compat_events_attr[] = {
  105. GENERIC_EVENT_PTR(PM_CYC),
  106. GENERIC_EVENT_PTR(PM_INST_CMPL),
  107. GENERIC_EVENT_PTR(PM_NO_INST_AVAIL),
  108. GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
  109. GENERIC_EVENT_PTR(PM_LD_MISS_L1),
  110. CACHE_EVENT_PTR(PM_LD_MISS_L1),
  111. CACHE_EVENT_PTR(PM_ST_MISS_L1),
  112. CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
  113. CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
  114. CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
  115. CACHE_EVENT_PTR(PM_DTLB_MISS),
  116. CACHE_EVENT_PTR(PM_ITLB_MISS),
  117. NULL
  118. };
  119. static const struct attribute_group generic_compat_pmu_events_group = {
  120. .name = "events",
  121. .attrs = generic_compat_events_attr,
  122. };
  123. PMU_FORMAT_ATTR(event, "config:0-19");
  124. PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
  125. PMU_FORMAT_ATTR(pmc, "config:16-19");
  126. static struct attribute *generic_compat_pmu_format_attr[] = {
  127. &format_attr_event.attr,
  128. &format_attr_pmcxsel.attr,
  129. &format_attr_pmc.attr,
  130. NULL,
  131. };
  132. static const struct attribute_group generic_compat_pmu_format_group = {
  133. .name = "format",
  134. .attrs = generic_compat_pmu_format_attr,
  135. };
  136. static struct attribute *generic_compat_pmu_caps_attrs[] = {
  137. NULL
  138. };
  139. static struct attribute_group generic_compat_pmu_caps_group = {
  140. .name = "caps",
  141. .attrs = generic_compat_pmu_caps_attrs,
  142. };
  143. static const struct attribute_group *generic_compat_pmu_attr_groups[] = {
  144. &generic_compat_pmu_format_group,
  145. &generic_compat_pmu_events_group,
  146. &generic_compat_pmu_caps_group,
  147. NULL,
  148. };
  149. static int compat_generic_events[] = {
  150. [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
  151. [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
  152. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_NO_INST_AVAIL,
  153. [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
  154. [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
  155. };
  156. #define C(x) PERF_COUNT_HW_CACHE_##x
  157. /*
  158. * Table of generalized cache-related events.
  159. * 0 means not supported, -1 means nonsensical, other values
  160. * are event codes.
  161. */
  162. static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
  163. [ C(L1D) ] = {
  164. [ C(OP_READ) ] = {
  165. [ C(RESULT_ACCESS) ] = 0,
  166. [ C(RESULT_MISS) ] = PM_LD_MISS_L1,
  167. },
  168. [ C(OP_WRITE) ] = {
  169. [ C(RESULT_ACCESS) ] = 0,
  170. [ C(RESULT_MISS) ] = PM_ST_MISS_L1,
  171. },
  172. [ C(OP_PREFETCH) ] = {
  173. [ C(RESULT_ACCESS) ] = 0,
  174. [ C(RESULT_MISS) ] = 0,
  175. },
  176. },
  177. [ C(L1I) ] = {
  178. [ C(OP_READ) ] = {
  179. [ C(RESULT_ACCESS) ] = 0,
  180. [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
  181. },
  182. [ C(OP_WRITE) ] = {
  183. [ C(RESULT_ACCESS) ] = 0,
  184. [ C(RESULT_MISS) ] = -1,
  185. },
  186. [ C(OP_PREFETCH) ] = {
  187. [ C(RESULT_ACCESS) ] = 0,
  188. [ C(RESULT_MISS) ] = 0,
  189. },
  190. },
  191. [ C(LL) ] = {
  192. [ C(OP_READ) ] = {
  193. [ C(RESULT_ACCESS) ] = 0,
  194. [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
  195. },
  196. [ C(OP_WRITE) ] = {
  197. [ C(RESULT_ACCESS) ] = 0,
  198. [ C(RESULT_MISS) ] = 0,
  199. },
  200. [ C(OP_PREFETCH) ] = {
  201. [ C(RESULT_ACCESS) ] = 0,
  202. [ C(RESULT_MISS) ] = 0,
  203. },
  204. },
  205. [ C(DTLB) ] = {
  206. [ C(OP_READ) ] = {
  207. [ C(RESULT_ACCESS) ] = 0,
  208. [ C(RESULT_MISS) ] = PM_DTLB_MISS,
  209. },
  210. [ C(OP_WRITE) ] = {
  211. [ C(RESULT_ACCESS) ] = -1,
  212. [ C(RESULT_MISS) ] = -1,
  213. },
  214. [ C(OP_PREFETCH) ] = {
  215. [ C(RESULT_ACCESS) ] = -1,
  216. [ C(RESULT_MISS) ] = -1,
  217. },
  218. },
  219. [ C(ITLB) ] = {
  220. [ C(OP_READ) ] = {
  221. [ C(RESULT_ACCESS) ] = 0,
  222. [ C(RESULT_MISS) ] = PM_ITLB_MISS,
  223. },
  224. [ C(OP_WRITE) ] = {
  225. [ C(RESULT_ACCESS) ] = -1,
  226. [ C(RESULT_MISS) ] = -1,
  227. },
  228. [ C(OP_PREFETCH) ] = {
  229. [ C(RESULT_ACCESS) ] = -1,
  230. [ C(RESULT_MISS) ] = -1,
  231. },
  232. },
  233. [ C(BPU) ] = {
  234. [ C(OP_READ) ] = {
  235. [ C(RESULT_ACCESS) ] = 0,
  236. [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
  237. },
  238. [ C(OP_WRITE) ] = {
  239. [ C(RESULT_ACCESS) ] = -1,
  240. [ C(RESULT_MISS) ] = -1,
  241. },
  242. [ C(OP_PREFETCH) ] = {
  243. [ C(RESULT_ACCESS) ] = -1,
  244. [ C(RESULT_MISS) ] = -1,
  245. },
  246. },
  247. [ C(NODE) ] = {
  248. [ C(OP_READ) ] = {
  249. [ C(RESULT_ACCESS) ] = -1,
  250. [ C(RESULT_MISS) ] = -1,
  251. },
  252. [ C(OP_WRITE) ] = {
  253. [ C(RESULT_ACCESS) ] = -1,
  254. [ C(RESULT_MISS) ] = -1,
  255. },
  256. [ C(OP_PREFETCH) ] = {
  257. [ C(RESULT_ACCESS) ] = -1,
  258. [ C(RESULT_MISS) ] = -1,
  259. },
  260. },
  261. };
  262. #undef C
  263. /*
  264. * We set MMCR0[CC5-6RUN] so we can use counters 5 and 6 for
  265. * PM_INST_CMPL and PM_CYC.
  266. */
  267. static int generic_compute_mmcr(u64 event[], int n_ev,
  268. unsigned int hwc[], struct mmcr_regs *mmcr,
  269. struct perf_event *pevents[], u32 flags)
  270. {
  271. int ret;
  272. ret = isa207_compute_mmcr(event, n_ev, hwc, mmcr, pevents, flags);
  273. if (!ret)
  274. mmcr->mmcr0 |= MMCR0_C56RUN;
  275. return ret;
  276. }
  277. static struct power_pmu generic_compat_pmu = {
  278. .name = "ISAv3",
  279. .n_counter = MAX_PMU_COUNTERS,
  280. .add_fields = ISA207_ADD_FIELDS,
  281. .test_adder = ISA207_TEST_ADDER,
  282. .compute_mmcr = generic_compute_mmcr,
  283. .get_constraint = isa207_get_constraint,
  284. .get_alternatives = generic_get_alternatives,
  285. .disable_pmc = isa207_disable_pmc,
  286. .flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
  287. .n_generic = ARRAY_SIZE(compat_generic_events),
  288. .generic_events = compat_generic_events,
  289. .cache_events = &generic_compat_cache_events,
  290. .attr_groups = generic_compat_pmu_attr_groups,
  291. };
  292. int __init init_generic_compat_pmu(void)
  293. {
  294. int rc = 0;
  295. /*
  296. * From ISA v2.07 on, PMU features are architected;
  297. * we require >= v3.0 because (a) that has PM_LD_CMPL and
  298. * PM_INST_CMPL_ALT, which v2.07 doesn't have, and
  299. * (b) we don't expect any non-IBM Power ISA
  300. * implementations that conform to v2.07 but not v3.0.
  301. */
  302. if (!cpu_has_feature(CPU_FTR_ARCH_300))
  303. return -ENODEV;
  304. rc = register_power_pmu(&generic_compat_pmu);
  305. if (rc)
  306. return rc;
  307. /* Tell userspace that EBB is supported */
  308. cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
  309. return 0;
  310. }