power6-pmu.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Performance counter support for POWER6 processors.
  4. *
  5. * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/perf_event.h>
  9. #include <linux/string.h>
  10. #include <asm/reg.h>
  11. #include <asm/cputable.h>
  12. #include "internal.h"
  13. /*
  14. * Bits in event code for POWER6
  15. */
  16. #define PM_PMC_SH 20 /* PMC number (1-based) for direct events */
  17. #define PM_PMC_MSK 0x7
  18. #define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH)
  19. #define PM_UNIT_SH 16 /* Unit event comes (TTMxSEL encoding) */
  20. #define PM_UNIT_MSK 0xf
  21. #define PM_UNIT_MSKS (PM_UNIT_MSK << PM_UNIT_SH)
  22. #define PM_LLAV 0x8000 /* Load lookahead match value */
  23. #define PM_LLA 0x4000 /* Load lookahead match enable */
  24. #define PM_BYTE_SH 12 /* Byte of event bus to use */
  25. #define PM_BYTE_MSK 3
  26. #define PM_SUBUNIT_SH 8 /* Subunit event comes from (NEST_SEL enc.) */
  27. #define PM_SUBUNIT_MSK 7
  28. #define PM_SUBUNIT_MSKS (PM_SUBUNIT_MSK << PM_SUBUNIT_SH)
  29. #define PM_PMCSEL_MSK 0xff /* PMCxSEL value */
  30. #define PM_BUSEVENT_MSK 0xf3700
  31. /*
  32. * Bits in MMCR1 for POWER6
  33. */
  34. #define MMCR1_TTM0SEL_SH 60
  35. #define MMCR1_TTMSEL_SH(n) (MMCR1_TTM0SEL_SH - (n) * 4)
  36. #define MMCR1_TTMSEL_MSK 0xf
  37. #define MMCR1_TTMSEL(m, n) (((m) >> MMCR1_TTMSEL_SH(n)) & MMCR1_TTMSEL_MSK)
  38. #define MMCR1_NESTSEL_SH 45
  39. #define MMCR1_NESTSEL_MSK 0x7
  40. #define MMCR1_NESTSEL(m) (((m) >> MMCR1_NESTSEL_SH) & MMCR1_NESTSEL_MSK)
  41. #define MMCR1_PMC1_LLA (1ul << 44)
  42. #define MMCR1_PMC1_LLA_VALUE (1ul << 39)
  43. #define MMCR1_PMC1_ADDR_SEL (1ul << 35)
  44. #define MMCR1_PMC1SEL_SH 24
  45. #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8)
  46. #define MMCR1_PMCSEL_MSK 0xff
  47. /*
  48. * Map of which direct events on which PMCs are marked instruction events.
  49. * Indexed by PMCSEL value >> 1.
  50. * Bottom 4 bits are a map of which PMCs are interesting,
  51. * top 4 bits say what sort of event:
  52. * 0 = direct marked event,
  53. * 1 = byte decode event,
  54. * 4 = add/and event (PMC1 -> bits 0 & 4),
  55. * 5 = add/and event (PMC1 -> bits 1 & 5),
  56. * 6 = add/and event (PMC1 -> bits 2 & 6),
  57. * 7 = add/and event (PMC1 -> bits 3 & 7).
  58. */
  59. static unsigned char direct_event_is_marked[0x60 >> 1] = {
  60. 0, /* 00 */
  61. 0, /* 02 */
  62. 0, /* 04 */
  63. 0x07, /* 06 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */
  64. 0x04, /* 08 PM_MRK_DFU_FIN */
  65. 0x06, /* 0a PM_MRK_IFU_FIN, PM_MRK_INST_FIN */
  66. 0, /* 0c */
  67. 0, /* 0e */
  68. 0x02, /* 10 PM_MRK_INST_DISP */
  69. 0x08, /* 12 PM_MRK_LSU_DERAT_MISS */
  70. 0, /* 14 */
  71. 0, /* 16 */
  72. 0x0c, /* 18 PM_THRESH_TIMEO, PM_MRK_INST_FIN */
  73. 0x0f, /* 1a PM_MRK_INST_DISP, PM_MRK_{FXU,FPU,LSU}_FIN */
  74. 0x01, /* 1c PM_MRK_INST_ISSUED */
  75. 0, /* 1e */
  76. 0, /* 20 */
  77. 0, /* 22 */
  78. 0, /* 24 */
  79. 0, /* 26 */
  80. 0x15, /* 28 PM_MRK_DATA_FROM_L2MISS, PM_MRK_DATA_FROM_L3MISS */
  81. 0, /* 2a */
  82. 0, /* 2c */
  83. 0, /* 2e */
  84. 0x4f, /* 30 */
  85. 0x7f, /* 32 */
  86. 0x4f, /* 34 */
  87. 0x5f, /* 36 */
  88. 0x6f, /* 38 */
  89. 0x4f, /* 3a */
  90. 0, /* 3c */
  91. 0x08, /* 3e PM_MRK_INST_TIMEO */
  92. 0x1f, /* 40 */
  93. 0x1f, /* 42 */
  94. 0x1f, /* 44 */
  95. 0x1f, /* 46 */
  96. 0x1f, /* 48 */
  97. 0x1f, /* 4a */
  98. 0x1f, /* 4c */
  99. 0x1f, /* 4e */
  100. 0, /* 50 */
  101. 0x05, /* 52 PM_MRK_BR_TAKEN, PM_MRK_BR_MPRED */
  102. 0x1c, /* 54 PM_MRK_PTEG_FROM_L3MISS, PM_MRK_PTEG_FROM_L2MISS */
  103. 0x02, /* 56 PM_MRK_LD_MISS_L1 */
  104. 0, /* 58 */
  105. 0, /* 5a */
  106. 0, /* 5c */
  107. 0, /* 5e */
  108. };
  109. /*
  110. * Masks showing for each unit which bits are marked events.
  111. * These masks are in LE order, i.e. 0x00000001 is byte 0, bit 0.
  112. */
  113. static u32 marked_bus_events[16] = {
  114. 0x01000000, /* direct events set 1: byte 3 bit 0 */
  115. 0x00010000, /* direct events set 2: byte 2 bit 0 */
  116. 0, 0, 0, 0, /* IDU, IFU, nest: nothing */
  117. 0x00000088, /* VMX set 1: byte 0 bits 3, 7 */
  118. 0x000000c0, /* VMX set 2: byte 0 bits 4-7 */
  119. 0x04010000, /* LSU set 1: byte 2 bit 0, byte 3 bit 2 */
  120. 0xff010000u, /* LSU set 2: byte 2 bit 0, all of byte 3 */
  121. 0, /* LSU set 3 */
  122. 0x00000010, /* VMX set 3: byte 0 bit 4 */
  123. 0, /* BFP set 1 */
  124. 0x00000022, /* BFP set 2: byte 0 bits 1, 5 */
  125. 0, 0
  126. };
  127. /*
  128. * Returns 1 if event counts things relating to marked instructions
  129. * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
  130. */
  131. static int power6_marked_instr_event(u64 event)
  132. {
  133. int pmc, psel, ptype;
  134. int bit, byte, unit;
  135. u32 mask;
  136. pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
  137. psel = (event & PM_PMCSEL_MSK) >> 1; /* drop edge/level bit */
  138. if (pmc >= 5)
  139. return 0;
  140. bit = -1;
  141. if (psel < sizeof(direct_event_is_marked)) {
  142. ptype = direct_event_is_marked[psel];
  143. if (pmc == 0 || !(ptype & (1 << (pmc - 1))))
  144. return 0;
  145. ptype >>= 4;
  146. if (ptype == 0)
  147. return 1;
  148. if (ptype == 1)
  149. bit = 0;
  150. else
  151. bit = ptype ^ (pmc - 1);
  152. } else if ((psel & 0x48) == 0x40)
  153. bit = psel & 7;
  154. if (!(event & PM_BUSEVENT_MSK) || bit == -1)
  155. return 0;
  156. byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
  157. unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
  158. mask = marked_bus_events[unit];
  159. return (mask >> (byte * 8 + bit)) & 1;
  160. }
  161. /*
  162. * Assign PMC numbers and compute MMCR1 value for a set of events
  163. */
  164. static int p6_compute_mmcr(u64 event[], int n_ev,
  165. unsigned int hwc[], struct mmcr_regs *mmcr, struct perf_event *pevents[],
  166. u32 flags __maybe_unused)
  167. {
  168. unsigned long mmcr1 = 0;
  169. unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS;
  170. int i;
  171. unsigned int pmc, ev, b, u, s, psel;
  172. unsigned int ttmset = 0;
  173. unsigned int pmc_inuse = 0;
  174. if (n_ev > 6)
  175. return -1;
  176. for (i = 0; i < n_ev; ++i) {
  177. pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
  178. if (pmc) {
  179. if (pmc_inuse & (1 << (pmc - 1)))
  180. return -1; /* collision! */
  181. pmc_inuse |= 1 << (pmc - 1);
  182. }
  183. }
  184. for (i = 0; i < n_ev; ++i) {
  185. ev = event[i];
  186. pmc = (ev >> PM_PMC_SH) & PM_PMC_MSK;
  187. if (pmc) {
  188. --pmc;
  189. } else {
  190. /* can go on any PMC; find a free one */
  191. for (pmc = 0; pmc < 4; ++pmc)
  192. if (!(pmc_inuse & (1 << pmc)))
  193. break;
  194. if (pmc >= 4)
  195. return -1;
  196. pmc_inuse |= 1 << pmc;
  197. }
  198. hwc[i] = pmc;
  199. psel = ev & PM_PMCSEL_MSK;
  200. if (ev & PM_BUSEVENT_MSK) {
  201. /* this event uses the event bus */
  202. b = (ev >> PM_BYTE_SH) & PM_BYTE_MSK;
  203. u = (ev >> PM_UNIT_SH) & PM_UNIT_MSK;
  204. /* check for conflict on this byte of event bus */
  205. if ((ttmset & (1 << b)) && MMCR1_TTMSEL(mmcr1, b) != u)
  206. return -1;
  207. mmcr1 |= (unsigned long)u << MMCR1_TTMSEL_SH(b);
  208. ttmset |= 1 << b;
  209. if (u == 5) {
  210. /* Nest events have a further mux */
  211. s = (ev >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK;
  212. if ((ttmset & 0x10) &&
  213. MMCR1_NESTSEL(mmcr1) != s)
  214. return -1;
  215. ttmset |= 0x10;
  216. mmcr1 |= (unsigned long)s << MMCR1_NESTSEL_SH;
  217. }
  218. if (0x30 <= psel && psel <= 0x3d) {
  219. /* these need the PMCx_ADDR_SEL bits */
  220. if (b >= 2)
  221. mmcr1 |= MMCR1_PMC1_ADDR_SEL >> pmc;
  222. }
  223. /* bus select values are different for PMC3/4 */
  224. if (pmc >= 2 && (psel & 0x90) == 0x80)
  225. psel ^= 0x20;
  226. }
  227. if (ev & PM_LLA) {
  228. mmcr1 |= MMCR1_PMC1_LLA >> pmc;
  229. if (ev & PM_LLAV)
  230. mmcr1 |= MMCR1_PMC1_LLA_VALUE >> pmc;
  231. }
  232. if (power6_marked_instr_event(event[i]))
  233. mmcra |= MMCRA_SAMPLE_ENABLE;
  234. if (pmc < 4)
  235. mmcr1 |= (unsigned long)psel << MMCR1_PMCSEL_SH(pmc);
  236. }
  237. mmcr->mmcr0 = 0;
  238. if (pmc_inuse & 1)
  239. mmcr->mmcr0 = MMCR0_PMC1CE;
  240. if (pmc_inuse & 0xe)
  241. mmcr->mmcr0 |= MMCR0_PMCjCE;
  242. mmcr->mmcr1 = mmcr1;
  243. mmcr->mmcra = mmcra;
  244. return 0;
  245. }
  246. /*
  247. * Layout of constraint bits:
  248. *
  249. * 0-1 add field: number of uses of PMC1 (max 1)
  250. * 2-3, 4-5, 6-7, 8-9, 10-11: ditto for PMC2, 3, 4, 5, 6
  251. * 12-15 add field: number of uses of PMC1-4 (max 4)
  252. * 16-19 select field: unit on byte 0 of event bus
  253. * 20-23, 24-27, 28-31 ditto for bytes 1, 2, 3
  254. * 32-34 select field: nest (subunit) event selector
  255. */
  256. static int p6_get_constraint(u64 event, unsigned long *maskp,
  257. unsigned long *valp, u64 event_config1 __maybe_unused)
  258. {
  259. int pmc, byte, sh, subunit;
  260. unsigned long mask = 0, value = 0;
  261. pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
  262. if (pmc) {
  263. if (pmc > 4 && !(event == 0x500009 || event == 0x600005))
  264. return -1;
  265. sh = (pmc - 1) * 2;
  266. mask |= 2 << sh;
  267. value |= 1 << sh;
  268. }
  269. if (event & PM_BUSEVENT_MSK) {
  270. byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
  271. sh = byte * 4 + (16 - PM_UNIT_SH);
  272. mask |= PM_UNIT_MSKS << sh;
  273. value |= (unsigned long)(event & PM_UNIT_MSKS) << sh;
  274. if ((event & PM_UNIT_MSKS) == (5 << PM_UNIT_SH)) {
  275. subunit = (event >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK;
  276. mask |= (unsigned long)PM_SUBUNIT_MSK << 32;
  277. value |= (unsigned long)subunit << 32;
  278. }
  279. }
  280. if (pmc <= 4) {
  281. mask |= 0x8000; /* add field for count of PMC1-4 uses */
  282. value |= 0x1000;
  283. }
  284. *maskp = mask;
  285. *valp = value;
  286. return 0;
  287. }
  288. static int p6_limited_pmc_event(u64 event)
  289. {
  290. int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
  291. return pmc == 5 || pmc == 6;
  292. }
  293. #define MAX_ALT 4 /* at most 4 alternatives for any event */
  294. static const unsigned int event_alternatives[][MAX_ALT] = {
  295. { 0x0130e8, 0x2000f6, 0x3000fc }, /* PM_PTEG_RELOAD_VALID */
  296. { 0x080080, 0x10000d, 0x30000c, 0x4000f0 }, /* PM_LD_MISS_L1 */
  297. { 0x080088, 0x200054, 0x3000f0 }, /* PM_ST_MISS_L1 */
  298. { 0x10000a, 0x2000f4, 0x600005 }, /* PM_RUN_CYC */
  299. { 0x10000b, 0x2000f5 }, /* PM_RUN_COUNT */
  300. { 0x10000e, 0x400010 }, /* PM_PURR */
  301. { 0x100010, 0x4000f8 }, /* PM_FLUSH */
  302. { 0x10001a, 0x200010 }, /* PM_MRK_INST_DISP */
  303. { 0x100026, 0x3000f8 }, /* PM_TB_BIT_TRANS */
  304. { 0x100054, 0x2000f0 }, /* PM_ST_FIN */
  305. { 0x100056, 0x2000fc }, /* PM_L1_ICACHE_MISS */
  306. { 0x1000f0, 0x40000a }, /* PM_INST_IMC_MATCH_CMPL */
  307. { 0x1000f8, 0x200008 }, /* PM_GCT_EMPTY_CYC */
  308. { 0x1000fc, 0x400006 }, /* PM_LSU_DERAT_MISS_CYC */
  309. { 0x20000e, 0x400007 }, /* PM_LSU_DERAT_MISS */
  310. { 0x200012, 0x300012 }, /* PM_INST_DISP */
  311. { 0x2000f2, 0x3000f2 }, /* PM_INST_DISP */
  312. { 0x2000f8, 0x300010 }, /* PM_EXT_INT */
  313. { 0x2000fe, 0x300056 }, /* PM_DATA_FROM_L2MISS */
  314. { 0x2d0030, 0x30001a }, /* PM_MRK_FPU_FIN */
  315. { 0x30000a, 0x400018 }, /* PM_MRK_INST_FIN */
  316. { 0x3000f6, 0x40000e }, /* PM_L1_DCACHE_RELOAD_VALID */
  317. { 0x3000fe, 0x400056 }, /* PM_DATA_FROM_L3MISS */
  318. };
  319. /*
  320. * This could be made more efficient with a binary search on
  321. * a presorted list, if necessary
  322. */
  323. static int find_alternatives_list(u64 event)
  324. {
  325. int i, j;
  326. unsigned int alt;
  327. for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
  328. if (event < event_alternatives[i][0])
  329. return -1;
  330. for (j = 0; j < MAX_ALT; ++j) {
  331. alt = event_alternatives[i][j];
  332. if (!alt || event < alt)
  333. break;
  334. if (event == alt)
  335. return i;
  336. }
  337. }
  338. return -1;
  339. }
  340. static int p6_get_alternatives(u64 event, unsigned int flags, u64 alt[])
  341. {
  342. int i, j, nlim;
  343. unsigned int psel, pmc;
  344. unsigned int nalt = 1;
  345. u64 aevent;
  346. alt[0] = event;
  347. nlim = p6_limited_pmc_event(event);
  348. /* check the alternatives table */
  349. i = find_alternatives_list(event);
  350. if (i >= 0) {
  351. /* copy out alternatives from list */
  352. for (j = 0; j < MAX_ALT; ++j) {
  353. aevent = event_alternatives[i][j];
  354. if (!aevent)
  355. break;
  356. if (aevent != event)
  357. alt[nalt++] = aevent;
  358. nlim += p6_limited_pmc_event(aevent);
  359. }
  360. } else {
  361. /* Check for alternative ways of computing sum events */
  362. /* PMCSEL 0x32 counter N == PMCSEL 0x34 counter 5-N */
  363. psel = event & (PM_PMCSEL_MSK & ~1); /* ignore edge bit */
  364. pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
  365. if (pmc && (psel == 0x32 || psel == 0x34))
  366. alt[nalt++] = ((event ^ 0x6) & ~PM_PMC_MSKS) |
  367. ((5 - pmc) << PM_PMC_SH);
  368. /* PMCSEL 0x38 counter N == PMCSEL 0x3a counter N+/-2 */
  369. if (pmc && (psel == 0x38 || psel == 0x3a))
  370. alt[nalt++] = ((event ^ 0x2) & ~PM_PMC_MSKS) |
  371. ((pmc > 2? pmc - 2: pmc + 2) << PM_PMC_SH);
  372. }
  373. if (flags & PPMU_ONLY_COUNT_RUN) {
  374. /*
  375. * We're only counting in RUN state,
  376. * so PM_CYC is equivalent to PM_RUN_CYC,
  377. * PM_INST_CMPL === PM_RUN_INST_CMPL, PM_PURR === PM_RUN_PURR.
  378. * This doesn't include alternatives that don't provide
  379. * any extra flexibility in assigning PMCs (e.g.
  380. * 0x10000a for PM_RUN_CYC vs. 0x1e for PM_CYC).
  381. * Note that even with these additional alternatives
  382. * we never end up with more than 4 alternatives for any event.
  383. */
  384. j = nalt;
  385. for (i = 0; i < nalt; ++i) {
  386. switch (alt[i]) {
  387. case 0x1e: /* PM_CYC */
  388. alt[j++] = 0x600005; /* PM_RUN_CYC */
  389. ++nlim;
  390. break;
  391. case 0x10000a: /* PM_RUN_CYC */
  392. alt[j++] = 0x1e; /* PM_CYC */
  393. break;
  394. case 2: /* PM_INST_CMPL */
  395. alt[j++] = 0x500009; /* PM_RUN_INST_CMPL */
  396. ++nlim;
  397. break;
  398. case 0x500009: /* PM_RUN_INST_CMPL */
  399. alt[j++] = 2; /* PM_INST_CMPL */
  400. break;
  401. case 0x10000e: /* PM_PURR */
  402. alt[j++] = 0x4000f4; /* PM_RUN_PURR */
  403. break;
  404. case 0x4000f4: /* PM_RUN_PURR */
  405. alt[j++] = 0x10000e; /* PM_PURR */
  406. break;
  407. }
  408. }
  409. nalt = j;
  410. }
  411. if (!(flags & PPMU_LIMITED_PMC_OK) && nlim) {
  412. /* remove the limited PMC events */
  413. j = 0;
  414. for (i = 0; i < nalt; ++i) {
  415. if (!p6_limited_pmc_event(alt[i])) {
  416. alt[j] = alt[i];
  417. ++j;
  418. }
  419. }
  420. nalt = j;
  421. } else if ((flags & PPMU_LIMITED_PMC_REQD) && nlim < nalt) {
  422. /* remove all but the limited PMC events */
  423. j = 0;
  424. for (i = 0; i < nalt; ++i) {
  425. if (p6_limited_pmc_event(alt[i])) {
  426. alt[j] = alt[i];
  427. ++j;
  428. }
  429. }
  430. nalt = j;
  431. }
  432. return nalt;
  433. }
  434. static void p6_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr)
  435. {
  436. /* Set PMCxSEL to 0 to disable PMCx */
  437. if (pmc <= 3)
  438. mmcr->mmcr1 &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc));
  439. }
  440. static int power6_generic_events[] = {
  441. [PERF_COUNT_HW_CPU_CYCLES] = 0x1e,
  442. [PERF_COUNT_HW_INSTRUCTIONS] = 2,
  443. [PERF_COUNT_HW_CACHE_REFERENCES] = 0x280030, /* LD_REF_L1 */
  444. [PERF_COUNT_HW_CACHE_MISSES] = 0x30000c, /* LD_MISS_L1 */
  445. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x410a0, /* BR_PRED */
  446. [PERF_COUNT_HW_BRANCH_MISSES] = 0x400052, /* BR_MPRED */
  447. };
  448. #define C(x) PERF_COUNT_HW_CACHE_##x
  449. /*
  450. * Table of generalized cache-related events.
  451. * 0 means not supported, -1 means nonsensical, other values
  452. * are event codes.
  453. * The "DTLB" and "ITLB" events relate to the DERAT and IERAT.
  454. */
  455. static u64 power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
  456. [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
  457. [C(OP_READ)] = { 0x280030, 0x80080 },
  458. [C(OP_WRITE)] = { 0x180032, 0x80088 },
  459. [C(OP_PREFETCH)] = { 0x810a4, 0 },
  460. },
  461. [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
  462. [C(OP_READ)] = { 0, 0x100056 },
  463. [C(OP_WRITE)] = { -1, -1 },
  464. [C(OP_PREFETCH)] = { 0x4008c, 0 },
  465. },
  466. [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
  467. [C(OP_READ)] = { 0x150730, 0x250532 },
  468. [C(OP_WRITE)] = { 0x250432, 0x150432 },
  469. [C(OP_PREFETCH)] = { 0x810a6, 0 },
  470. },
  471. [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
  472. [C(OP_READ)] = { 0, 0x20000e },
  473. [C(OP_WRITE)] = { -1, -1 },
  474. [C(OP_PREFETCH)] = { -1, -1 },
  475. },
  476. [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */
  477. [C(OP_READ)] = { 0, 0x420ce },
  478. [C(OP_WRITE)] = { -1, -1 },
  479. [C(OP_PREFETCH)] = { -1, -1 },
  480. },
  481. [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
  482. [C(OP_READ)] = { 0x430e6, 0x400052 },
  483. [C(OP_WRITE)] = { -1, -1 },
  484. [C(OP_PREFETCH)] = { -1, -1 },
  485. },
  486. [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
  487. [C(OP_READ)] = { -1, -1 },
  488. [C(OP_WRITE)] = { -1, -1 },
  489. [C(OP_PREFETCH)] = { -1, -1 },
  490. },
  491. };
  492. static struct power_pmu power6_pmu = {
  493. .name = "POWER6",
  494. .n_counter = 6,
  495. .max_alternatives = MAX_ALT,
  496. .add_fields = 0x1555,
  497. .test_adder = 0x3000,
  498. .compute_mmcr = p6_compute_mmcr,
  499. .get_constraint = p6_get_constraint,
  500. .get_alternatives = p6_get_alternatives,
  501. .disable_pmc = p6_disable_pmc,
  502. .limited_pmc_event = p6_limited_pmc_event,
  503. .flags = PPMU_LIMITED_PMC5_6 | PPMU_ALT_SIPR,
  504. .n_generic = ARRAY_SIZE(power6_generic_events),
  505. .generic_events = power6_generic_events,
  506. .cache_events = &power6_cache_events,
  507. };
  508. int __init init_power6_pmu(void)
  509. {
  510. unsigned int pvr = mfspr(SPRN_PVR);
  511. if (PVR_VER(pvr) != PVR_POWER6)
  512. return -ENODEV;
  513. return register_power_pmu(&power6_pmu);
  514. }