power9-pmu.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Performance counter support for POWER9 processors.
  4. *
  5. * Copyright 2009 Paul Mackerras, IBM Corporation.
  6. * Copyright 2013 Michael Ellerman, IBM Corporation.
  7. * Copyright 2016 Madhavan Srinivasan, IBM Corporation.
  8. */
  9. #define pr_fmt(fmt) "power9-pmu: " fmt
  10. #include "isa207-common.h"
  11. /*
  12. * Raw event encoding for Power9:
  13. *
  14. * 60 56 52 48 44 40 36 32
  15. * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
  16. * | | [ ] [ ] [ thresh_cmp ] [ thresh_ctl ]
  17. * | | | | |
  18. * | | *- IFM (Linux) | thresh start/stop -*
  19. * | *- BHRB (Linux) *sm
  20. * *- EBB (Linux)
  21. *
  22. * 28 24 20 16 12 8 4 0
  23. * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
  24. * [ ] [ sample ] [cache] [ pmc ] [unit ] [] m [ pmcxsel ]
  25. * | | | | |
  26. * | | | | *- mark
  27. * | | *- L1/L2/L3 cache_sel |
  28. * | | |
  29. * | *- sampling mode for marked events *- combine
  30. * |
  31. * *- thresh_sel
  32. *
  33. * Below uses IBM bit numbering.
  34. *
  35. * MMCR1[x:y] = unit (PMCxUNIT)
  36. * MMCR1[24] = pmc1combine[0]
  37. * MMCR1[25] = pmc1combine[1]
  38. * MMCR1[26] = pmc2combine[0]
  39. * MMCR1[27] = pmc2combine[1]
  40. * MMCR1[28] = pmc3combine[0]
  41. * MMCR1[29] = pmc3combine[1]
  42. * MMCR1[30] = pmc4combine[0]
  43. * MMCR1[31] = pmc4combine[1]
  44. *
  45. * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
  46. * MMCR1[20:27] = thresh_ctl
  47. * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
  48. * MMCR1[20:27] = thresh_ctl
  49. * else
  50. * MMCRA[48:55] = thresh_ctl (THRESH START/END)
  51. *
  52. * if thresh_sel:
  53. * MMCRA[45:47] = thresh_sel
  54. *
  55. * if thresh_cmp:
  56. * MMCRA[9:11] = thresh_cmp[0:2]
  57. * MMCRA[12:18] = thresh_cmp[3:9]
  58. *
  59. * MMCR1[16] = cache_sel[2]
  60.  * MMCR1[17] = cache_sel[3]
  61. *
  62. * if mark:
  63. * MMCRA[63] = 1 (SAMPLE_ENABLE)
  64. * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
  65.  * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
  66. *
  67. * if EBB and BHRB:
  68. * MMCRA[32:33] = IFM
  69. *
  70. * MMCRA[SDAR_MODE] = sm
  71. */
  72. /*
  73. * Some power9 event codes.
  74. */
  75. #define EVENT(_name, _code) _name = _code,
  76. enum {
  77. #include "power9-events-list.h"
  78. };
  79. #undef EVENT
  80. /* MMCRA IFM bits - POWER9 */
  81. #define POWER9_MMCRA_IFM1 0x0000000040000000UL
  82. #define POWER9_MMCRA_IFM2 0x0000000080000000UL
  83. #define POWER9_MMCRA_IFM3 0x00000000C0000000UL
  84. #define POWER9_MMCRA_BHRB_MASK 0x00000000C0000000UL
  85. extern u64 PERF_REG_EXTENDED_MASK;
  86. /* Nasty Power9 specific hack */
  87. #define PVR_POWER9_CUMULUS 0x00002000
  88. /* PowerISA v2.07 format attribute structure*/
  89. extern const struct attribute_group isa207_pmu_format_group;
  90. static int p9_dd21_bl_ev[] = {
  91. PM_MRK_ST_DONE_L2,
  92. PM_RADIX_PWC_L1_HIT,
  93. PM_FLOP_CMPL,
  94. PM_MRK_NTF_FIN,
  95. PM_RADIX_PWC_L2_HIT,
  96. PM_IFETCH_THROTTLE,
  97. PM_MRK_L2_TM_ST_ABORT_SISTER,
  98. PM_RADIX_PWC_L3_HIT,
  99. PM_RUN_CYC_SMT2_MODE,
  100. PM_TM_TX_PASS_RUN_INST,
  101. PM_DISP_HELD_SYNC_HOLD,
  102. };
  103. static int p9_dd22_bl_ev[] = {
  104. PM_DTLB_MISS_16G,
  105. PM_DERAT_MISS_2M,
  106. PM_DTLB_MISS_2M,
  107. PM_MRK_DTLB_MISS_1G,
  108. PM_DTLB_MISS_4K,
  109. PM_DERAT_MISS_1G,
  110. PM_MRK_DERAT_MISS_2M,
  111. PM_MRK_DTLB_MISS_4K,
  112. PM_MRK_DTLB_MISS_16G,
  113. PM_DTLB_MISS_64K,
  114. PM_MRK_DERAT_MISS_1G,
  115. PM_MRK_DTLB_MISS_64K,
  116. PM_DISP_HELD_SYNC_HOLD,
  117. PM_DTLB_MISS_16M,
  118. PM_DTLB_MISS_1G,
  119. PM_MRK_DTLB_MISS_16M,
  120. };
  121. /* Table of alternatives, sorted by column 0 */
  122. static const unsigned int power9_event_alternatives[][MAX_ALT] = {
  123. { PM_BR_2PATH, PM_BR_2PATH_ALT },
  124. { PM_INST_DISP, PM_INST_DISP_ALT },
  125. { PM_RUN_CYC_ALT, PM_RUN_CYC },
  126. { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
  127. { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
  128. };
  129. static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[])
  130. {
  131. int num_alt = 0;
  132. num_alt = isa207_get_alternatives(event, alt,
  133. ARRAY_SIZE(power9_event_alternatives), flags,
  134. power9_event_alternatives);
  135. return num_alt;
  136. }
  137. static int power9_check_attr_config(struct perf_event *ev)
  138. {
  139. u64 val;
  140. u64 event = ev->attr.config;
  141. val = (event >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
  142. if (val == 0xC || isa3XX_check_attr_config(ev))
  143. return -EINVAL;
  144. return 0;
  145. }
  146. GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
  147. GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_ICT_NOSLOT_CYC);
  148. GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
  149. GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
  150. GENERIC_EVENT_ATTR(branch-instructions, PM_BR_CMPL);
  151. GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
  152. GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
  153. GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1_FIN);
  154. GENERIC_EVENT_ATTR(mem-loads, MEM_LOADS);
  155. GENERIC_EVENT_ATTR(mem-stores, MEM_STORES);
  156. CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1_FIN);
  157. CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
  158. CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_L1_PREF);
  159. CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
  160. CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
  161. CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
  162. CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE);
  163. CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
  164. CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3);
  165. CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PREF_ALL);
  166. CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
  167. CACHE_EVENT_ATTR(branch-loads, PM_BR_CMPL);
  168. CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
  169. CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
  170. static struct attribute *power9_events_attr[] = {
  171. GENERIC_EVENT_PTR(PM_CYC),
  172. GENERIC_EVENT_PTR(PM_ICT_NOSLOT_CYC),
  173. GENERIC_EVENT_PTR(PM_CMPLU_STALL),
  174. GENERIC_EVENT_PTR(PM_INST_CMPL),
  175. GENERIC_EVENT_PTR(PM_BR_CMPL),
  176. GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
  177. GENERIC_EVENT_PTR(PM_LD_REF_L1),
  178. GENERIC_EVENT_PTR(PM_LD_MISS_L1_FIN),
  179. GENERIC_EVENT_PTR(MEM_LOADS),
  180. GENERIC_EVENT_PTR(MEM_STORES),
  181. CACHE_EVENT_PTR(PM_LD_MISS_L1_FIN),
  182. CACHE_EVENT_PTR(PM_LD_REF_L1),
  183. CACHE_EVENT_PTR(PM_L1_PREF),
  184. CACHE_EVENT_PTR(PM_ST_MISS_L1),
  185. CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
  186. CACHE_EVENT_PTR(PM_INST_FROM_L1),
  187. CACHE_EVENT_PTR(PM_IC_PREF_WRITE),
  188. CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
  189. CACHE_EVENT_PTR(PM_DATA_FROM_L3),
  190. CACHE_EVENT_PTR(PM_L3_PREF_ALL),
  191. CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
  192. CACHE_EVENT_PTR(PM_BR_CMPL),
  193. CACHE_EVENT_PTR(PM_DTLB_MISS),
  194. CACHE_EVENT_PTR(PM_ITLB_MISS),
  195. NULL
  196. };
  197. static const struct attribute_group power9_pmu_events_group = {
  198. .name = "events",
  199. .attrs = power9_events_attr,
  200. };
  201. PMU_FORMAT_ATTR(event, "config:0-51");
  202. PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
  203. PMU_FORMAT_ATTR(mark, "config:8");
  204. PMU_FORMAT_ATTR(combine, "config:10-11");
  205. PMU_FORMAT_ATTR(unit, "config:12-15");
  206. PMU_FORMAT_ATTR(pmc, "config:16-19");
  207. PMU_FORMAT_ATTR(cache_sel, "config:20-23");
  208. PMU_FORMAT_ATTR(sample_mode, "config:24-28");
  209. PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
  210. PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
  211. PMU_FORMAT_ATTR(thresh_start, "config:36-39");
  212. PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
  213. PMU_FORMAT_ATTR(sdar_mode, "config:50-51");
  214. static struct attribute *power9_pmu_format_attr[] = {
  215. &format_attr_event.attr,
  216. &format_attr_pmcxsel.attr,
  217. &format_attr_mark.attr,
  218. &format_attr_combine.attr,
  219. &format_attr_unit.attr,
  220. &format_attr_pmc.attr,
  221. &format_attr_cache_sel.attr,
  222. &format_attr_sample_mode.attr,
  223. &format_attr_thresh_sel.attr,
  224. &format_attr_thresh_stop.attr,
  225. &format_attr_thresh_start.attr,
  226. &format_attr_thresh_cmp.attr,
  227. &format_attr_sdar_mode.attr,
  228. NULL,
  229. };
  230. static const struct attribute_group power9_pmu_format_group = {
  231. .name = "format",
  232. .attrs = power9_pmu_format_attr,
  233. };
  234. static struct attribute *power9_pmu_caps_attrs[] = {
  235. NULL
  236. };
  237. static struct attribute_group power9_pmu_caps_group = {
  238. .name = "caps",
  239. .attrs = power9_pmu_caps_attrs,
  240. };
  241. static const struct attribute_group *power9_pmu_attr_groups[] = {
  242. &power9_pmu_format_group,
  243. &power9_pmu_events_group,
  244. &power9_pmu_caps_group,
  245. NULL,
  246. };
  247. static int power9_generic_events[] = {
  248. [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
  249. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC,
  250. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
  251. [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
  252. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BR_CMPL,
  253. [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
  254. [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
  255. [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1_FIN,
  256. };
  257. static u64 power9_bhrb_filter_map(u64 branch_sample_type)
  258. {
  259. u64 pmu_bhrb_filter = 0;
  260. /* BHRB and regular PMU events share the same privilege state
  261. * filter configuration. BHRB is always recorded along with a
  262. * regular PMU event. As the privilege state filter is handled
  263. * in the basic PMC configuration of the accompanying regular
  264. * PMU event, we ignore any separate BHRB specific request.
  265. */
  266. /* No branch filter requested */
  267. if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
  268. return pmu_bhrb_filter;
  269. /* Invalid branch filter options - HW does not support */
  270. if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
  271. return -1;
  272. if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL)
  273. return -1;
  274. if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL)
  275. return -1;
  276. if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
  277. pmu_bhrb_filter |= POWER9_MMCRA_IFM1;
  278. return pmu_bhrb_filter;
  279. }
  280. /* Every thing else is unsupported */
  281. return -1;
  282. }
  283. static void power9_config_bhrb(u64 pmu_bhrb_filter)
  284. {
  285. pmu_bhrb_filter &= POWER9_MMCRA_BHRB_MASK;
  286. /* Enable BHRB filter in PMU */
  287. mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
  288. }
  289. #define C(x) PERF_COUNT_HW_CACHE_##x
  290. /*
  291. * Table of generalized cache-related events.
  292. * 0 means not supported, -1 means nonsensical, other values
  293. * are event codes.
  294. */
  295. static u64 power9_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
  296. [ C(L1D) ] = {
  297. [ C(OP_READ) ] = {
  298. [ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
  299. [ C(RESULT_MISS) ] = PM_LD_MISS_L1_FIN,
  300. },
  301. [ C(OP_WRITE) ] = {
  302. [ C(RESULT_ACCESS) ] = 0,
  303. [ C(RESULT_MISS) ] = PM_ST_MISS_L1,
  304. },
  305. [ C(OP_PREFETCH) ] = {
  306. [ C(RESULT_ACCESS) ] = PM_L1_PREF,
  307. [ C(RESULT_MISS) ] = 0,
  308. },
  309. },
  310. [ C(L1I) ] = {
  311. [ C(OP_READ) ] = {
  312. [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1,
  313. [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
  314. },
  315. [ C(OP_WRITE) ] = {
  316. [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE,
  317. [ C(RESULT_MISS) ] = -1,
  318. },
  319. [ C(OP_PREFETCH) ] = {
  320. [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE,
  321. [ C(RESULT_MISS) ] = 0,
  322. },
  323. },
  324. [ C(LL) ] = {
  325. [ C(OP_READ) ] = {
  326. [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3,
  327. [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
  328. },
  329. [ C(OP_WRITE) ] = {
  330. [ C(RESULT_ACCESS) ] = 0,
  331. [ C(RESULT_MISS) ] = 0,
  332. },
  333. [ C(OP_PREFETCH) ] = {
  334. [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
  335. [ C(RESULT_MISS) ] = 0,
  336. },
  337. },
  338. [ C(DTLB) ] = {
  339. [ C(OP_READ) ] = {
  340. [ C(RESULT_ACCESS) ] = 0,
  341. [ C(RESULT_MISS) ] = PM_DTLB_MISS,
  342. },
  343. [ C(OP_WRITE) ] = {
  344. [ C(RESULT_ACCESS) ] = -1,
  345. [ C(RESULT_MISS) ] = -1,
  346. },
  347. [ C(OP_PREFETCH) ] = {
  348. [ C(RESULT_ACCESS) ] = -1,
  349. [ C(RESULT_MISS) ] = -1,
  350. },
  351. },
  352. [ C(ITLB) ] = {
  353. [ C(OP_READ) ] = {
  354. [ C(RESULT_ACCESS) ] = 0,
  355. [ C(RESULT_MISS) ] = PM_ITLB_MISS,
  356. },
  357. [ C(OP_WRITE) ] = {
  358. [ C(RESULT_ACCESS) ] = -1,
  359. [ C(RESULT_MISS) ] = -1,
  360. },
  361. [ C(OP_PREFETCH) ] = {
  362. [ C(RESULT_ACCESS) ] = -1,
  363. [ C(RESULT_MISS) ] = -1,
  364. },
  365. },
  366. [ C(BPU) ] = {
  367. [ C(OP_READ) ] = {
  368. [ C(RESULT_ACCESS) ] = PM_BR_CMPL,
  369. [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
  370. },
  371. [ C(OP_WRITE) ] = {
  372. [ C(RESULT_ACCESS) ] = -1,
  373. [ C(RESULT_MISS) ] = -1,
  374. },
  375. [ C(OP_PREFETCH) ] = {
  376. [ C(RESULT_ACCESS) ] = -1,
  377. [ C(RESULT_MISS) ] = -1,
  378. },
  379. },
  380. [ C(NODE) ] = {
  381. [ C(OP_READ) ] = {
  382. [ C(RESULT_ACCESS) ] = -1,
  383. [ C(RESULT_MISS) ] = -1,
  384. },
  385. [ C(OP_WRITE) ] = {
  386. [ C(RESULT_ACCESS) ] = -1,
  387. [ C(RESULT_MISS) ] = -1,
  388. },
  389. [ C(OP_PREFETCH) ] = {
  390. [ C(RESULT_ACCESS) ] = -1,
  391. [ C(RESULT_MISS) ] = -1,
  392. },
  393. },
  394. };
  395. #undef C
  396. static struct power_pmu power9_pmu = {
  397. .name = "POWER9",
  398. .n_counter = MAX_PMU_COUNTERS,
  399. .add_fields = ISA207_ADD_FIELDS,
  400. .test_adder = ISA207_TEST_ADDER,
  401. .group_constraint_mask = CNST_CACHE_PMC4_MASK,
  402. .group_constraint_val = CNST_CACHE_PMC4_VAL,
  403. .compute_mmcr = isa207_compute_mmcr,
  404. .config_bhrb = power9_config_bhrb,
  405. .bhrb_filter_map = power9_bhrb_filter_map,
  406. .get_constraint = isa207_get_constraint,
  407. .get_alternatives = power9_get_alternatives,
  408. .get_mem_data_src = isa207_get_mem_data_src,
  409. .get_mem_weight = isa207_get_mem_weight,
  410. .disable_pmc = isa207_disable_pmc,
  411. .flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
  412. .n_generic = ARRAY_SIZE(power9_generic_events),
  413. .generic_events = power9_generic_events,
  414. .cache_events = &power9_cache_events,
  415. .attr_groups = power9_pmu_attr_groups,
  416. .bhrb_nr = 32,
  417. .capabilities = PERF_PMU_CAP_EXTENDED_REGS,
  418. .check_attr_config = power9_check_attr_config,
  419. };
  420. int __init init_power9_pmu(void)
  421. {
  422. int rc = 0;
  423. unsigned int pvr = mfspr(SPRN_PVR);
  424. if (PVR_VER(pvr) != PVR_POWER9)
  425. return -ENODEV;
  426. /* Blacklist events */
  427. if (!(pvr & PVR_POWER9_CUMULUS)) {
  428. if ((PVR_CFG(pvr) == 2) && (PVR_MIN(pvr) == 1)) {
  429. power9_pmu.blacklist_ev = p9_dd21_bl_ev;
  430. power9_pmu.n_blacklist_ev = ARRAY_SIZE(p9_dd21_bl_ev);
  431. } else if ((PVR_CFG(pvr) == 2) && (PVR_MIN(pvr) == 2)) {
  432. power9_pmu.blacklist_ev = p9_dd22_bl_ev;
  433. power9_pmu.n_blacklist_ev = ARRAY_SIZE(p9_dd22_bl_ev);
  434. }
  435. }
  436. /* Set the PERF_REG_EXTENDED_MASK here */
  437. PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_300;
  438. rc = register_power_pmu(&power9_pmu);
  439. if (rc)
  440. return rc;
  441. /* Tell userspace that EBB is supported */
  442. cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
  443. return 0;
  444. }