power8-pmu.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Performance counter support for POWER8 processors.
  4. *
  5. * Copyright 2009 Paul Mackerras, IBM Corporation.
  6. * Copyright 2013 Michael Ellerman, IBM Corporation.
  7. */
  8. #define pr_fmt(fmt) "power8-pmu: " fmt
  9. #include "isa207-common.h"
  10. /*
  11. * Some power8 event codes.
  12. */
  13. #define EVENT(_name, _code) _name = _code,
  14. enum {
  15. #include "power8-events-list.h"
  16. };
  17. #undef EVENT
  18. /* MMCRA IFM bits - POWER8 */
  19. #define POWER8_MMCRA_IFM1 0x0000000040000000UL
  20. #define POWER8_MMCRA_IFM2 0x0000000080000000UL
  21. #define POWER8_MMCRA_IFM3 0x00000000C0000000UL
  22. #define POWER8_MMCRA_BHRB_MASK 0x00000000C0000000UL
  23. /*
  24. * Raw event encoding for PowerISA v2.07 (Power8):
  25. *
  26. * 60 56 52 48 44 40 36 32
  27. * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
  28. * | | [ ] [ thresh_cmp ] [ thresh_ctl ]
  29. * | | | |
  30. * | | *- IFM (Linux) thresh start/stop OR FAB match -*
  31. * | *- BHRB (Linux)
  32. * *- EBB (Linux)
  33. *
  34. * 28 24 20 16 12 8 4 0
  35. * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
  36. * [ ] [ sample ] [cache] [ pmc ] [unit ] c m [ pmcxsel ]
  37. * | | | | |
  38. * | | | | *- mark
  39. * | | *- L1/L2/L3 cache_sel |
  40. * | | |
  41. * | *- sampling mode for marked events *- combine
  42. * |
  43. * *- thresh_sel
  44. *
  45. * Below uses IBM bit numbering.
  46. *
  47. * MMCR1[x:y] = unit (PMCxUNIT)
  48. * MMCR1[x] = combine (PMCxCOMB)
  49. *
  50. * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
  51. * # PM_MRK_FAB_RSP_MATCH
  52. * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
  53. * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
  54. * # PM_MRK_FAB_RSP_MATCH_CYC
  55. * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
  56. * else
  57. * MMCRA[48:55] = thresh_ctl (THRESH START/END)
  58. *
  59. * if thresh_sel:
  60. * MMCRA[45:47] = thresh_sel
  61. *
  62. * if thresh_cmp:
  63. * MMCRA[22:24] = thresh_cmp[0:2]
  64. * MMCRA[25:31] = thresh_cmp[3:9]
  65. *
  66. * if unit == 6 or unit == 7
  67. * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL)
  68. * else if unit == 8 or unit == 9:
  69. * if cache_sel[0] == 0: # L3 bank
  70. * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0)
  71. * else if cache_sel[0] == 1:
  72. * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1)
  73. * else if cache_sel[1]: # L1 event
  74. * MMCR1[16] = cache_sel[2]
  75.  * MMCR1[17] = cache_sel[3]
  76. *
  77. * if mark:
  78. * MMCRA[63] = 1 (SAMPLE_ENABLE)
  79. * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
  80.  * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
  81. *
  82. * if EBB and BHRB:
  83. * MMCRA[32:33] = IFM
  84. *
  85. */
  86. /* PowerISA v2.07 format attribute structure*/
  87. extern const struct attribute_group isa207_pmu_format_group;
  88. /* Table of alternatives, sorted by column 0 */
  89. static const unsigned int event_alternatives[][MAX_ALT] = {
  90. { PM_MRK_ST_CMPL, PM_MRK_ST_CMPL_ALT },
  91. { PM_BR_MRK_2PATH, PM_BR_MRK_2PATH_ALT },
  92. { PM_L3_CO_MEPF, PM_L3_CO_MEPF_ALT },
  93. { PM_MRK_DATA_FROM_L2MISS, PM_MRK_DATA_FROM_L2MISS_ALT },
  94. { PM_CMPLU_STALL_ALT, PM_CMPLU_STALL },
  95. { PM_BR_2PATH, PM_BR_2PATH_ALT },
  96. { PM_INST_DISP, PM_INST_DISP_ALT },
  97. { PM_RUN_CYC_ALT, PM_RUN_CYC },
  98. { PM_MRK_FILT_MATCH, PM_MRK_FILT_MATCH_ALT },
  99. { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
  100. { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
  101. };
  102. static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[])
  103. {
  104. int num_alt = 0;
  105. num_alt = isa207_get_alternatives(event, alt,
  106. ARRAY_SIZE(event_alternatives), flags,
  107. event_alternatives);
  108. return num_alt;
  109. }
  110. GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
  111. GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_GCT_NOSLOT_CYC);
  112. GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
  113. GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
  114. GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_FIN);
  115. GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
  116. GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
  117. GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1);
  118. GENERIC_EVENT_ATTR(mem_access, MEM_ACCESS);
  119. CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1);
  120. CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
  121. CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_L1_PREF);
  122. CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
  123. CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
  124. CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
  125. CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE);
  126. CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
  127. CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3);
  128. CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PREF_ALL);
  129. CACHE_EVENT_ATTR(LLC-store-misses, PM_L2_ST_MISS);
  130. CACHE_EVENT_ATTR(LLC-stores, PM_L2_ST);
  131. CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
  132. CACHE_EVENT_ATTR(branch-loads, PM_BRU_FIN);
  133. CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
  134. CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
  135. static struct attribute *power8_events_attr[] = {
  136. GENERIC_EVENT_PTR(PM_CYC),
  137. GENERIC_EVENT_PTR(PM_GCT_NOSLOT_CYC),
  138. GENERIC_EVENT_PTR(PM_CMPLU_STALL),
  139. GENERIC_EVENT_PTR(PM_INST_CMPL),
  140. GENERIC_EVENT_PTR(PM_BRU_FIN),
  141. GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
  142. GENERIC_EVENT_PTR(PM_LD_REF_L1),
  143. GENERIC_EVENT_PTR(PM_LD_MISS_L1),
  144. GENERIC_EVENT_PTR(MEM_ACCESS),
  145. CACHE_EVENT_PTR(PM_LD_MISS_L1),
  146. CACHE_EVENT_PTR(PM_LD_REF_L1),
  147. CACHE_EVENT_PTR(PM_L1_PREF),
  148. CACHE_EVENT_PTR(PM_ST_MISS_L1),
  149. CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
  150. CACHE_EVENT_PTR(PM_INST_FROM_L1),
  151. CACHE_EVENT_PTR(PM_IC_PREF_WRITE),
  152. CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
  153. CACHE_EVENT_PTR(PM_DATA_FROM_L3),
  154. CACHE_EVENT_PTR(PM_L3_PREF_ALL),
  155. CACHE_EVENT_PTR(PM_L2_ST_MISS),
  156. CACHE_EVENT_PTR(PM_L2_ST),
  157. CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
  158. CACHE_EVENT_PTR(PM_BRU_FIN),
  159. CACHE_EVENT_PTR(PM_DTLB_MISS),
  160. CACHE_EVENT_PTR(PM_ITLB_MISS),
  161. NULL
  162. };
  163. static const struct attribute_group power8_pmu_events_group = {
  164. .name = "events",
  165. .attrs = power8_events_attr,
  166. };
  167. static struct attribute *power8_pmu_caps_attrs[] = {
  168. NULL
  169. };
  170. static struct attribute_group power8_pmu_caps_group = {
  171. .name = "caps",
  172. .attrs = power8_pmu_caps_attrs,
  173. };
  174. static const struct attribute_group *power8_pmu_attr_groups[] = {
  175. &isa207_pmu_format_group,
  176. &power8_pmu_events_group,
  177. &power8_pmu_caps_group,
  178. NULL,
  179. };
  180. static int power8_generic_events[] = {
  181. [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
  182. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_GCT_NOSLOT_CYC,
  183. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
  184. [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
  185. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN,
  186. [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
  187. [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
  188. [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
  189. };
  190. static u64 power8_bhrb_filter_map(u64 branch_sample_type)
  191. {
  192. u64 pmu_bhrb_filter = 0;
  193. /* BHRB and regular PMU events share the same privilege state
  194. * filter configuration. BHRB is always recorded along with a
  195. * regular PMU event. As the privilege state filter is handled
  196. * in the basic PMC configuration of the accompanying regular
  197. * PMU event, we ignore any separate BHRB specific request.
  198. */
  199. /* No branch filter requested */
  200. if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
  201. return pmu_bhrb_filter;
  202. /* Invalid branch filter options - HW does not support */
  203. if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
  204. return -1;
  205. if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL)
  206. return -1;
  207. if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL)
  208. return -1;
  209. if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
  210. pmu_bhrb_filter |= POWER8_MMCRA_IFM1;
  211. return pmu_bhrb_filter;
  212. }
  213. /* Every thing else is unsupported */
  214. return -1;
  215. }
  216. static void power8_config_bhrb(u64 pmu_bhrb_filter)
  217. {
  218. pmu_bhrb_filter &= POWER8_MMCRA_BHRB_MASK;
  219. /* Enable BHRB filter in PMU */
  220. mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
  221. }
  222. #define C(x) PERF_COUNT_HW_CACHE_##x
  223. /*
  224. * Table of generalized cache-related events.
  225. * 0 means not supported, -1 means nonsensical, other values
  226. * are event codes.
  227. */
  228. static u64 power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
  229. [ C(L1D) ] = {
  230. [ C(OP_READ) ] = {
  231. [ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
  232. [ C(RESULT_MISS) ] = PM_LD_MISS_L1,
  233. },
  234. [ C(OP_WRITE) ] = {
  235. [ C(RESULT_ACCESS) ] = 0,
  236. [ C(RESULT_MISS) ] = PM_ST_MISS_L1,
  237. },
  238. [ C(OP_PREFETCH) ] = {
  239. [ C(RESULT_ACCESS) ] = PM_L1_PREF,
  240. [ C(RESULT_MISS) ] = 0,
  241. },
  242. },
  243. [ C(L1I) ] = {
  244. [ C(OP_READ) ] = {
  245. [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1,
  246. [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
  247. },
  248. [ C(OP_WRITE) ] = {
  249. [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE,
  250. [ C(RESULT_MISS) ] = -1,
  251. },
  252. [ C(OP_PREFETCH) ] = {
  253. [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE,
  254. [ C(RESULT_MISS) ] = 0,
  255. },
  256. },
  257. [ C(LL) ] = {
  258. [ C(OP_READ) ] = {
  259. [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3,
  260. [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
  261. },
  262. [ C(OP_WRITE) ] = {
  263. [ C(RESULT_ACCESS) ] = PM_L2_ST,
  264. [ C(RESULT_MISS) ] = PM_L2_ST_MISS,
  265. },
  266. [ C(OP_PREFETCH) ] = {
  267. [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
  268. [ C(RESULT_MISS) ] = 0,
  269. },
  270. },
  271. [ C(DTLB) ] = {
  272. [ C(OP_READ) ] = {
  273. [ C(RESULT_ACCESS) ] = 0,
  274. [ C(RESULT_MISS) ] = PM_DTLB_MISS,
  275. },
  276. [ C(OP_WRITE) ] = {
  277. [ C(RESULT_ACCESS) ] = -1,
  278. [ C(RESULT_MISS) ] = -1,
  279. },
  280. [ C(OP_PREFETCH) ] = {
  281. [ C(RESULT_ACCESS) ] = -1,
  282. [ C(RESULT_MISS) ] = -1,
  283. },
  284. },
  285. [ C(ITLB) ] = {
  286. [ C(OP_READ) ] = {
  287. [ C(RESULT_ACCESS) ] = 0,
  288. [ C(RESULT_MISS) ] = PM_ITLB_MISS,
  289. },
  290. [ C(OP_WRITE) ] = {
  291. [ C(RESULT_ACCESS) ] = -1,
  292. [ C(RESULT_MISS) ] = -1,
  293. },
  294. [ C(OP_PREFETCH) ] = {
  295. [ C(RESULT_ACCESS) ] = -1,
  296. [ C(RESULT_MISS) ] = -1,
  297. },
  298. },
  299. [ C(BPU) ] = {
  300. [ C(OP_READ) ] = {
  301. [ C(RESULT_ACCESS) ] = PM_BRU_FIN,
  302. [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
  303. },
  304. [ C(OP_WRITE) ] = {
  305. [ C(RESULT_ACCESS) ] = -1,
  306. [ C(RESULT_MISS) ] = -1,
  307. },
  308. [ C(OP_PREFETCH) ] = {
  309. [ C(RESULT_ACCESS) ] = -1,
  310. [ C(RESULT_MISS) ] = -1,
  311. },
  312. },
  313. [ C(NODE) ] = {
  314. [ C(OP_READ) ] = {
  315. [ C(RESULT_ACCESS) ] = -1,
  316. [ C(RESULT_MISS) ] = -1,
  317. },
  318. [ C(OP_WRITE) ] = {
  319. [ C(RESULT_ACCESS) ] = -1,
  320. [ C(RESULT_MISS) ] = -1,
  321. },
  322. [ C(OP_PREFETCH) ] = {
  323. [ C(RESULT_ACCESS) ] = -1,
  324. [ C(RESULT_MISS) ] = -1,
  325. },
  326. },
  327. };
  328. #undef C
  329. static struct power_pmu power8_pmu = {
  330. .name = "POWER8",
  331. .n_counter = MAX_PMU_COUNTERS,
  332. .max_alternatives = MAX_ALT + 1,
  333. .add_fields = ISA207_ADD_FIELDS,
  334. .test_adder = ISA207_TEST_ADDER,
  335. .compute_mmcr = isa207_compute_mmcr,
  336. .config_bhrb = power8_config_bhrb,
  337. .bhrb_filter_map = power8_bhrb_filter_map,
  338. .get_constraint = isa207_get_constraint,
  339. .get_alternatives = power8_get_alternatives,
  340. .get_mem_data_src = isa207_get_mem_data_src,
  341. .get_mem_weight = isa207_get_mem_weight,
  342. .disable_pmc = isa207_disable_pmc,
  343. .flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
  344. .n_generic = ARRAY_SIZE(power8_generic_events),
  345. .generic_events = power8_generic_events,
  346. .cache_events = &power8_cache_events,
  347. .attr_groups = power8_pmu_attr_groups,
  348. .bhrb_nr = 32,
  349. };
  350. int __init init_power8_pmu(void)
  351. {
  352. int rc;
  353. unsigned int pvr = mfspr(SPRN_PVR);
  354. if (PVR_VER(pvr) != PVR_POWER8E && PVR_VER(pvr) != PVR_POWER8NVL &&
  355. PVR_VER(pvr) != PVR_POWER8)
  356. return -ENODEV;
  357. rc = register_power_pmu(&power8_pmu);
  358. if (rc)
  359. return rc;
  360. /* Tell userspace that EBB is supported */
  361. cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
  362. if (cpu_has_feature(CPU_FTR_PMAO_BUG))
  363. pr_info("PMAO restore workaround active.\n");
  364. return 0;
  365. }