mlxbf-pmc.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479
  1. // SPDX-License-Identifier: GPL-2.0-only OR Linux-OpenIB
  2. /*
  3. * Mellanox BlueField Performance Monitoring Counters driver
  4. *
  5. * This driver provides a sysfs interface for monitoring
  6. * performance statistics in BlueField SoC.
  7. *
  8. * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
  9. */
  10. #include <linux/acpi.h>
  11. #include <linux/arm-smccc.h>
  12. #include <linux/bitfield.h>
  13. #include <linux/errno.h>
  14. #include <linux/hwmon.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/string.h>
  17. #include <uapi/linux/psci.h>
  18. #define MLXBF_PMC_WRITE_REG_32 0x82000009
  19. #define MLXBF_PMC_READ_REG_32 0x8200000A
  20. #define MLXBF_PMC_WRITE_REG_64 0x8200000B
  21. #define MLXBF_PMC_READ_REG_64 0x8200000C
  22. #define MLXBF_PMC_SIP_SVC_UID 0x8200ff01
  23. #define MLXBF_PMC_SIP_SVC_VERSION 0x8200ff03
  24. #define MLXBF_PMC_SVC_REQ_MAJOR 0
  25. #define MLXBF_PMC_SVC_MIN_MINOR 3
  26. #define MLXBF_PMC_SMCCC_ACCESS_VIOLATION -4
  27. #define MLXBF_PMC_EVENT_SET_BF1 0
  28. #define MLXBF_PMC_EVENT_SET_BF2 1
  29. #define MLXBF_PMC_EVENT_INFO_LEN 100
  30. #define MLXBF_PMC_MAX_BLOCKS 30
  31. #define MLXBF_PMC_MAX_ATTRS 30
  32. #define MLXBF_PMC_INFO_SZ 4
  33. #define MLXBF_PMC_REG_SIZE 8
  34. #define MLXBF_PMC_L3C_REG_SIZE 4
  35. #define MLXBF_PMC_TYPE_COUNTER 1
  36. #define MLXBF_PMC_TYPE_REGISTER 0
  37. #define MLXBF_PMC_PERFCTL 0
  38. #define MLXBF_PMC_PERFEVT 1
  39. #define MLXBF_PMC_PERFACC0 4
  40. #define MLXBF_PMC_PERFMON_CONFIG_WR_R_B BIT(0)
  41. #define MLXBF_PMC_PERFMON_CONFIG_STROBE BIT(1)
  42. #define MLXBF_PMC_PERFMON_CONFIG_ADDR GENMASK_ULL(4, 2)
  43. #define MLXBF_PMC_PERFMON_CONFIG_WDATA GENMASK_ULL(60, 5)
  44. #define MLXBF_PMC_PERFCTL_FM0 GENMASK_ULL(18, 16)
  45. #define MLXBF_PMC_PERFCTL_MS0 GENMASK_ULL(21, 20)
  46. #define MLXBF_PMC_PERFCTL_ACCM0 GENMASK_ULL(26, 24)
  47. #define MLXBF_PMC_PERFCTL_AD0 BIT(27)
  48. #define MLXBF_PMC_PERFCTL_ETRIG0 GENMASK_ULL(29, 28)
  49. #define MLXBF_PMC_PERFCTL_EB0 BIT(30)
  50. #define MLXBF_PMC_PERFCTL_EN0 BIT(31)
  51. #define MLXBF_PMC_PERFEVT_EVTSEL GENMASK_ULL(31, 24)
  52. #define MLXBF_PMC_L3C_PERF_CNT_CFG 0x0
  53. #define MLXBF_PMC_L3C_PERF_CNT_SEL 0x10
  54. #define MLXBF_PMC_L3C_PERF_CNT_SEL_1 0x14
  55. #define MLXBF_PMC_L3C_PERF_CNT_LOW 0x40
  56. #define MLXBF_PMC_L3C_PERF_CNT_HIGH 0x60
  57. #define MLXBF_PMC_L3C_PERF_CNT_CFG_EN BIT(0)
  58. #define MLXBF_PMC_L3C_PERF_CNT_CFG_RST BIT(1)
  59. #define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0 GENMASK(5, 0)
  60. #define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1 GENMASK(13, 8)
  61. #define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2 GENMASK(21, 16)
  62. #define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3 GENMASK(29, 24)
  63. #define MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4 GENMASK(5, 0)
  64. #define MLXBF_PMC_L3C_PERF_CNT_LOW_VAL GENMASK(31, 0)
  65. #define MLXBF_PMC_L3C_PERF_CNT_HIGH_VAL GENMASK(24, 0)
  66. /**
  67. * struct mlxbf_pmc_attribute - Structure to hold attribute and block info
  68. * for each sysfs entry
  69. * @dev_attr: Device attribute struct
  70. * @index: index to identify counter number within a block
  71. * @nr: block number to which the sysfs belongs
  72. */
  73. struct mlxbf_pmc_attribute {
  74. struct device_attribute dev_attr;
  75. int index;
  76. int nr;
  77. };
  78. /**
  79. * struct mlxbf_pmc_block_info - Structure to hold info for each HW block
  80. *
  81. * @mmio_base: The VA at which the PMC block is mapped
  82. * @blk_size: Size of each mapped region
  83. * @counters: Number of counters in the block
  84. * @type: Type of counters in the block
  85. * @attr_counter: Attributes for "counter" sysfs files
  86. * @attr_event: Attributes for "event" sysfs files
  87. * @attr_event_list: Attributes for "event_list" sysfs files
  88. * @attr_enable: Attributes for "enable" sysfs files
  89. * @block_attr: All attributes needed for the block
  90. * @block_attr_grp: Attribute group for the block
  91. */
  92. struct mlxbf_pmc_block_info {
  93. void __iomem *mmio_base;
  94. size_t blk_size;
  95. size_t counters;
  96. int type;
  97. struct mlxbf_pmc_attribute *attr_counter;
  98. struct mlxbf_pmc_attribute *attr_event;
  99. struct mlxbf_pmc_attribute attr_event_list;
  100. struct mlxbf_pmc_attribute attr_enable;
  101. struct attribute *block_attr[MLXBF_PMC_MAX_ATTRS];
  102. struct attribute_group block_attr_grp;
  103. };
  104. /**
  105. * struct mlxbf_pmc_context - Structure to hold PMC context info
  106. *
  107. * @pdev: The kernel structure representing the device
  108. * @total_blocks: Total number of blocks
  109. * @tile_count: Number of tiles in the system
  110. * @hwmon_dev: Hwmon device for bfperf
  111. * @block_name: Block name
  112. * @block: Block info
  113. * @groups: Attribute groups from each block
  114. * @svc_sreg_support: Whether SMCs are used to access performance registers
  115. * @sreg_tbl_perf: Secure register access table number
  116. * @event_set: Event set to use
  117. */
  118. struct mlxbf_pmc_context {
  119. struct platform_device *pdev;
  120. uint32_t total_blocks;
  121. uint32_t tile_count;
  122. struct device *hwmon_dev;
  123. const char *block_name[MLXBF_PMC_MAX_BLOCKS];
  124. struct mlxbf_pmc_block_info block[MLXBF_PMC_MAX_BLOCKS];
  125. const struct attribute_group *groups[MLXBF_PMC_MAX_BLOCKS];
  126. bool svc_sreg_support;
  127. uint32_t sreg_tbl_perf;
  128. unsigned int event_set;
  129. };
  130. /**
  131. * struct mlxbf_pmc_events - Structure to hold supported events for each block
  132. * @evt_num: Event number used to program counters
  133. * @evt_name: Name of the event
  134. */
  135. struct mlxbf_pmc_events {
  136. int evt_num;
  137. char *evt_name;
  138. };
  139. static const struct mlxbf_pmc_events mlxbf_pmc_pcie_events[] = {
  140. { 0x0, "IN_P_PKT_CNT" },
  141. { 0x10, "IN_NP_PKT_CNT" },
  142. { 0x18, "IN_C_PKT_CNT" },
  143. { 0x20, "OUT_P_PKT_CNT" },
  144. { 0x28, "OUT_NP_PKT_CNT" },
  145. { 0x30, "OUT_C_PKT_CNT" },
  146. { 0x38, "IN_P_BYTE_CNT" },
  147. { 0x40, "IN_NP_BYTE_CNT" },
  148. { 0x48, "IN_C_BYTE_CNT" },
  149. { 0x50, "OUT_P_BYTE_CNT" },
  150. { 0x58, "OUT_NP_BYTE_CNT" },
  151. { 0x60, "OUT_C_BYTE_CNT" },
  152. };
  153. static const struct mlxbf_pmc_events mlxbf_pmc_smgen_events[] = {
  154. { 0x0, "AW_REQ" },
  155. { 0x1, "AW_BEATS" },
  156. { 0x2, "AW_TRANS" },
  157. { 0x3, "AW_RESP" },
  158. { 0x4, "AW_STL" },
  159. { 0x5, "AW_LAT" },
  160. { 0x6, "AW_REQ_TBU" },
  161. { 0x8, "AR_REQ" },
  162. { 0x9, "AR_BEATS" },
  163. { 0xa, "AR_TRANS" },
  164. { 0xb, "AR_STL" },
  165. { 0xc, "AR_LAT" },
  166. { 0xd, "AR_REQ_TBU" },
  167. { 0xe, "TBU_MISS" },
  168. { 0xf, "TX_DAT_AF" },
  169. { 0x10, "RX_DAT_AF" },
  170. { 0x11, "RETRYQ_CRED" },
  171. };
  172. static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1[] = {
  173. { 0x0, "DISABLE" },
  174. { 0xa0, "TPIO_DATA_BEAT" },
  175. { 0xa1, "TDMA_DATA_BEAT" },
  176. { 0xa2, "MAP_DATA_BEAT" },
  177. { 0xa3, "TXMSG_DATA_BEAT" },
  178. { 0xa4, "TPIO_DATA_PACKET" },
  179. { 0xa5, "TDMA_DATA_PACKET" },
  180. { 0xa6, "MAP_DATA_PACKET" },
  181. { 0xa7, "TXMSG_DATA_PACKET" },
  182. { 0xa8, "TDMA_RT_AF" },
  183. { 0xa9, "TDMA_PBUF_MAC_AF" },
  184. { 0xaa, "TRIO_MAP_WRQ_BUF_EMPTY" },
  185. { 0xab, "TRIO_MAP_CPL_BUF_EMPTY" },
  186. { 0xac, "TRIO_MAP_RDQ0_BUF_EMPTY" },
  187. { 0xad, "TRIO_MAP_RDQ1_BUF_EMPTY" },
  188. { 0xae, "TRIO_MAP_RDQ2_BUF_EMPTY" },
  189. { 0xaf, "TRIO_MAP_RDQ3_BUF_EMPTY" },
  190. { 0xb0, "TRIO_MAP_RDQ4_BUF_EMPTY" },
  191. { 0xb1, "TRIO_MAP_RDQ5_BUF_EMPTY" },
  192. { 0xb2, "TRIO_MAP_RDQ6_BUF_EMPTY" },
  193. { 0xb3, "TRIO_MAP_RDQ7_BUF_EMPTY" },
  194. };
  195. static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2[] = {
  196. { 0x0, "DISABLE" },
  197. { 0xa0, "TPIO_DATA_BEAT" },
  198. { 0xa1, "TDMA_DATA_BEAT" },
  199. { 0xa2, "MAP_DATA_BEAT" },
  200. { 0xa3, "TXMSG_DATA_BEAT" },
  201. { 0xa4, "TPIO_DATA_PACKET" },
  202. { 0xa5, "TDMA_DATA_PACKET" },
  203. { 0xa6, "MAP_DATA_PACKET" },
  204. { 0xa7, "TXMSG_DATA_PACKET" },
  205. { 0xa8, "TDMA_RT_AF" },
  206. { 0xa9, "TDMA_PBUF_MAC_AF" },
  207. { 0xaa, "TRIO_MAP_WRQ_BUF_EMPTY" },
  208. { 0xab, "TRIO_MAP_CPL_BUF_EMPTY" },
  209. { 0xac, "TRIO_MAP_RDQ0_BUF_EMPTY" },
  210. { 0xad, "TRIO_MAP_RDQ1_BUF_EMPTY" },
  211. { 0xae, "TRIO_MAP_RDQ2_BUF_EMPTY" },
  212. { 0xaf, "TRIO_MAP_RDQ3_BUF_EMPTY" },
  213. { 0xb0, "TRIO_MAP_RDQ4_BUF_EMPTY" },
  214. { 0xb1, "TRIO_MAP_RDQ5_BUF_EMPTY" },
  215. { 0xb2, "TRIO_MAP_RDQ6_BUF_EMPTY" },
  216. { 0xb3, "TRIO_MAP_RDQ7_BUF_EMPTY" },
  217. { 0xb4, "TRIO_RING_TX_FLIT_CH0" },
  218. { 0xb5, "TRIO_RING_TX_FLIT_CH1" },
  219. { 0xb6, "TRIO_RING_TX_FLIT_CH2" },
  220. { 0xb7, "TRIO_RING_TX_FLIT_CH3" },
  221. { 0xb8, "TRIO_RING_TX_FLIT_CH4" },
  222. { 0xb9, "TRIO_RING_RX_FLIT_CH0" },
  223. { 0xba, "TRIO_RING_RX_FLIT_CH1" },
  224. { 0xbb, "TRIO_RING_RX_FLIT_CH2" },
  225. { 0xbc, "TRIO_RING_RX_FLIT_CH3" },
  226. };
  227. static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events[] = {
  228. { 0x0, "DISABLE" },
  229. { 0x100, "ECC_SINGLE_ERROR_CNT" },
  230. { 0x104, "ECC_DOUBLE_ERROR_CNT" },
  231. { 0x114, "SERR_INJ" },
  232. { 0x118, "DERR_INJ" },
  233. { 0x124, "ECC_SINGLE_ERROR_0" },
  234. { 0x164, "ECC_DOUBLE_ERROR_0" },
  235. { 0x340, "DRAM_ECC_COUNT" },
  236. { 0x344, "DRAM_ECC_INJECT" },
  237. { 0x348, "DRAM_ECC_ERROR" },
  238. };
  239. static const struct mlxbf_pmc_events mlxbf_pmc_mss_events[] = {
  240. { 0x0, "DISABLE" },
  241. { 0xc0, "RXREQ_MSS" },
  242. { 0xc1, "RXDAT_MSS" },
  243. { 0xc2, "TXRSP_MSS" },
  244. { 0xc3, "TXDAT_MSS" },
  245. };
  246. static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events[] = {
  247. { 0x0, "DISABLE" },
  248. { 0x45, "HNF_REQUESTS" },
  249. { 0x46, "HNF_REJECTS" },
  250. { 0x47, "ALL_BUSY" },
  251. { 0x48, "MAF_BUSY" },
  252. { 0x49, "MAF_REQUESTS" },
  253. { 0x4a, "RNF_REQUESTS" },
  254. { 0x4b, "REQUEST_TYPE" },
  255. { 0x4c, "MEMORY_READS" },
  256. { 0x4d, "MEMORY_WRITES" },
  257. { 0x4e, "VICTIM_WRITE" },
  258. { 0x4f, "POC_FULL" },
  259. { 0x50, "POC_FAIL" },
  260. { 0x51, "POC_SUCCESS" },
  261. { 0x52, "POC_WRITES" },
  262. { 0x53, "POC_READS" },
  263. { 0x54, "FORWARD" },
  264. { 0x55, "RXREQ_HNF" },
  265. { 0x56, "RXRSP_HNF" },
  266. { 0x57, "RXDAT_HNF" },
  267. { 0x58, "TXREQ_HNF" },
  268. { 0x59, "TXRSP_HNF" },
  269. { 0x5a, "TXDAT_HNF" },
  270. { 0x5b, "TXSNP_HNF" },
  271. { 0x5c, "INDEX_MATCH" },
  272. { 0x5d, "A72_ACCESS" },
  273. { 0x5e, "IO_ACCESS" },
  274. { 0x5f, "TSO_WRITE" },
  275. { 0x60, "TSO_CONFLICT" },
  276. { 0x61, "DIR_HIT" },
  277. { 0x62, "HNF_ACCEPTS" },
  278. { 0x63, "REQ_BUF_EMPTY" },
  279. { 0x64, "REQ_BUF_IDLE_MAF" },
  280. { 0x65, "TSO_NOARB" },
  281. { 0x66, "TSO_NOARB_CYCLES" },
  282. { 0x67, "MSS_NO_CREDIT" },
  283. { 0x68, "TXDAT_NO_LCRD" },
  284. { 0x69, "TXSNP_NO_LCRD" },
  285. { 0x6a, "TXRSP_NO_LCRD" },
  286. { 0x6b, "TXREQ_NO_LCRD" },
  287. { 0x6c, "TSO_CL_MATCH" },
  288. { 0x6d, "MEMORY_READS_BYPASS" },
  289. { 0x6e, "TSO_NOARB_TIMEOUT" },
  290. { 0x6f, "ALLOCATE" },
  291. { 0x70, "VICTIM" },
  292. { 0x71, "A72_WRITE" },
  293. { 0x72, "A72_READ" },
  294. { 0x73, "IO_WRITE" },
  295. { 0x74, "IO_READ" },
  296. { 0x75, "TSO_REJECT" },
  297. { 0x80, "TXREQ_RN" },
  298. { 0x81, "TXRSP_RN" },
  299. { 0x82, "TXDAT_RN" },
  300. { 0x83, "RXSNP_RN" },
  301. { 0x84, "RXRSP_RN" },
  302. { 0x85, "RXDAT_RN" },
  303. };
  304. static const struct mlxbf_pmc_events mlxbf_pmc_hnfnet_events[] = {
  305. { 0x0, "DISABLE" },
  306. { 0x12, "CDN_REQ" },
  307. { 0x13, "DDN_REQ" },
  308. { 0x14, "NDN_REQ" },
  309. { 0x15, "CDN_DIAG_N_OUT_OF_CRED" },
  310. { 0x16, "CDN_DIAG_S_OUT_OF_CRED" },
  311. { 0x17, "CDN_DIAG_E_OUT_OF_CRED" },
  312. { 0x18, "CDN_DIAG_W_OUT_OF_CRED" },
  313. { 0x19, "CDN_DIAG_C_OUT_OF_CRED" },
  314. { 0x1a, "CDN_DIAG_N_EGRESS" },
  315. { 0x1b, "CDN_DIAG_S_EGRESS" },
  316. { 0x1c, "CDN_DIAG_E_EGRESS" },
  317. { 0x1d, "CDN_DIAG_W_EGRESS" },
  318. { 0x1e, "CDN_DIAG_C_EGRESS" },
  319. { 0x1f, "CDN_DIAG_N_INGRESS" },
  320. { 0x20, "CDN_DIAG_S_INGRESS" },
  321. { 0x21, "CDN_DIAG_E_INGRESS" },
  322. { 0x22, "CDN_DIAG_W_INGRESS" },
  323. { 0x23, "CDN_DIAG_C_INGRESS" },
  324. { 0x24, "CDN_DIAG_CORE_SENT" },
  325. { 0x25, "DDN_DIAG_N_OUT_OF_CRED" },
  326. { 0x26, "DDN_DIAG_S_OUT_OF_CRED" },
  327. { 0x27, "DDN_DIAG_E_OUT_OF_CRED" },
  328. { 0x28, "DDN_DIAG_W_OUT_OF_CRED" },
  329. { 0x29, "DDN_DIAG_C_OUT_OF_CRED" },
  330. { 0x2a, "DDN_DIAG_N_EGRESS" },
  331. { 0x2b, "DDN_DIAG_S_EGRESS" },
  332. { 0x2c, "DDN_DIAG_E_EGRESS" },
  333. { 0x2d, "DDN_DIAG_W_EGRESS" },
  334. { 0x2e, "DDN_DIAG_C_EGRESS" },
  335. { 0x2f, "DDN_DIAG_N_INGRESS" },
  336. { 0x30, "DDN_DIAG_S_INGRESS" },
  337. { 0x31, "DDN_DIAG_E_INGRESS" },
  338. { 0x32, "DDN_DIAG_W_INGRESS" },
  339. { 0x33, "DDN_DIAG_C_INGRESS" },
  340. { 0x34, "DDN_DIAG_CORE_SENT" },
  341. { 0x35, "NDN_DIAG_N_OUT_OF_CRED" },
  342. { 0x36, "NDN_DIAG_S_OUT_OF_CRED" },
  343. { 0x37, "NDN_DIAG_E_OUT_OF_CRED" },
  344. { 0x38, "NDN_DIAG_W_OUT_OF_CRED" },
  345. { 0x39, "NDN_DIAG_C_OUT_OF_CRED" },
  346. { 0x3a, "NDN_DIAG_N_EGRESS" },
  347. { 0x3b, "NDN_DIAG_S_EGRESS" },
  348. { 0x3c, "NDN_DIAG_E_EGRESS" },
  349. { 0x3d, "NDN_DIAG_W_EGRESS" },
  350. { 0x3e, "NDN_DIAG_C_EGRESS" },
  351. { 0x3f, "NDN_DIAG_N_INGRESS" },
  352. { 0x40, "NDN_DIAG_S_INGRESS" },
  353. { 0x41, "NDN_DIAG_E_INGRESS" },
  354. { 0x42, "NDN_DIAG_W_INGRESS" },
  355. { 0x43, "NDN_DIAG_C_INGRESS" },
  356. { 0x44, "NDN_DIAG_CORE_SENT" },
  357. };
  358. static const struct mlxbf_pmc_events mlxbf_pmc_l3c_events[] = {
  359. { 0x00, "DISABLE" },
  360. { 0x01, "CYCLES" },
  361. { 0x02, "TOTAL_RD_REQ_IN" },
  362. { 0x03, "TOTAL_WR_REQ_IN" },
  363. { 0x04, "TOTAL_WR_DBID_ACK" },
  364. { 0x05, "TOTAL_WR_DATA_IN" },
  365. { 0x06, "TOTAL_WR_COMP" },
  366. { 0x07, "TOTAL_RD_DATA_OUT" },
  367. { 0x08, "TOTAL_CDN_REQ_IN_BANK0" },
  368. { 0x09, "TOTAL_CDN_REQ_IN_BANK1" },
  369. { 0x0a, "TOTAL_DDN_REQ_IN_BANK0" },
  370. { 0x0b, "TOTAL_DDN_REQ_IN_BANK1" },
  371. { 0x0c, "TOTAL_EMEM_RD_RES_IN_BANK0" },
  372. { 0x0d, "TOTAL_EMEM_RD_RES_IN_BANK1" },
  373. { 0x0e, "TOTAL_CACHE_RD_RES_IN_BANK0" },
  374. { 0x0f, "TOTAL_CACHE_RD_RES_IN_BANK1" },
  375. { 0x10, "TOTAL_EMEM_RD_REQ_BANK0" },
  376. { 0x11, "TOTAL_EMEM_RD_REQ_BANK1" },
  377. { 0x12, "TOTAL_EMEM_WR_REQ_BANK0" },
  378. { 0x13, "TOTAL_EMEM_WR_REQ_BANK1" },
  379. { 0x14, "TOTAL_RD_REQ_OUT" },
  380. { 0x15, "TOTAL_WR_REQ_OUT" },
  381. { 0x16, "TOTAL_RD_RES_IN" },
  382. { 0x17, "HITS_BANK0" },
  383. { 0x18, "HITS_BANK1" },
  384. { 0x19, "MISSES_BANK0" },
  385. { 0x1a, "MISSES_BANK1" },
  386. { 0x1b, "ALLOCATIONS_BANK0" },
  387. { 0x1c, "ALLOCATIONS_BANK1" },
  388. { 0x1d, "EVICTIONS_BANK0" },
  389. { 0x1e, "EVICTIONS_BANK1" },
  390. { 0x1f, "DBID_REJECT" },
  391. { 0x20, "WRDB_REJECT_BANK0" },
  392. { 0x21, "WRDB_REJECT_BANK1" },
  393. { 0x22, "CMDQ_REJECT_BANK0" },
  394. { 0x23, "CMDQ_REJECT_BANK1" },
  395. { 0x24, "COB_REJECT_BANK0" },
  396. { 0x25, "COB_REJECT_BANK1" },
  397. { 0x26, "TRB_REJECT_BANK0" },
  398. { 0x27, "TRB_REJECT_BANK1" },
  399. { 0x28, "TAG_REJECT_BANK0" },
  400. { 0x29, "TAG_REJECT_BANK1" },
  401. { 0x2a, "ANY_REJECT_BANK0" },
  402. { 0x2b, "ANY_REJECT_BANK1" },
  403. };
  404. static struct mlxbf_pmc_context *pmc;
  405. /* UUID used to probe ATF service. */
  406. static const char *mlxbf_pmc_svc_uuid_str = "89c036b4-e7d7-11e6-8797-001aca00bfc4";
  407. /* Calls an SMC to access a performance register */
  408. static int mlxbf_pmc_secure_read(void __iomem *addr, uint32_t command,
  409. uint64_t *result)
  410. {
  411. struct arm_smccc_res res;
  412. int status, err = 0;
  413. arm_smccc_smc(command, pmc->sreg_tbl_perf, (uintptr_t)addr, 0, 0, 0, 0,
  414. 0, &res);
  415. status = res.a0;
  416. switch (status) {
  417. case PSCI_RET_NOT_SUPPORTED:
  418. err = -EINVAL;
  419. break;
  420. case MLXBF_PMC_SMCCC_ACCESS_VIOLATION:
  421. err = -EACCES;
  422. break;
  423. default:
  424. *result = res.a1;
  425. break;
  426. }
  427. return err;
  428. }
  429. /* Read from a performance counter */
  430. static int mlxbf_pmc_read(void __iomem *addr, uint32_t command,
  431. uint64_t *result)
  432. {
  433. if (pmc->svc_sreg_support)
  434. return mlxbf_pmc_secure_read(addr, command, result);
  435. if (command == MLXBF_PMC_READ_REG_32)
  436. *result = readl(addr);
  437. else
  438. *result = readq(addr);
  439. return 0;
  440. }
  441. /* Convenience function for 32-bit reads */
  442. static int mlxbf_pmc_readl(void __iomem *addr, uint32_t *result)
  443. {
  444. uint64_t read_out;
  445. int status;
  446. status = mlxbf_pmc_read(addr, MLXBF_PMC_READ_REG_32, &read_out);
  447. if (status)
  448. return status;
  449. *result = (uint32_t)read_out;
  450. return 0;
  451. }
  452. /* Calls an SMC to access a performance register */
  453. static int mlxbf_pmc_secure_write(void __iomem *addr, uint32_t command,
  454. uint64_t value)
  455. {
  456. struct arm_smccc_res res;
  457. int status, err = 0;
  458. arm_smccc_smc(command, pmc->sreg_tbl_perf, value, (uintptr_t)addr, 0, 0,
  459. 0, 0, &res);
  460. status = res.a0;
  461. switch (status) {
  462. case PSCI_RET_NOT_SUPPORTED:
  463. err = -EINVAL;
  464. break;
  465. case MLXBF_PMC_SMCCC_ACCESS_VIOLATION:
  466. err = -EACCES;
  467. break;
  468. }
  469. return err;
  470. }
  471. /* Write to a performance counter */
  472. static int mlxbf_pmc_write(void __iomem *addr, int command, uint64_t value)
  473. {
  474. if (pmc->svc_sreg_support)
  475. return mlxbf_pmc_secure_write(addr, command, value);
  476. if (command == MLXBF_PMC_WRITE_REG_32)
  477. writel(value, addr);
  478. else
  479. writeq(value, addr);
  480. return 0;
  481. }
  482. /* Check if the register offset is within the mapped region for the block */
  483. static bool mlxbf_pmc_valid_range(int blk_num, uint32_t offset)
  484. {
  485. if ((offset >= 0) && !(offset % MLXBF_PMC_REG_SIZE) &&
  486. (offset + MLXBF_PMC_REG_SIZE <= pmc->block[blk_num].blk_size))
  487. return true; /* inside the mapped PMC space */
  488. return false;
  489. }
  490. /* Get the event list corresponding to a certain block */
  491. static const struct mlxbf_pmc_events *mlxbf_pmc_event_list(const char *blk,
  492. int *size)
  493. {
  494. const struct mlxbf_pmc_events *events;
  495. if (strstr(blk, "tilenet")) {
  496. events = mlxbf_pmc_hnfnet_events;
  497. *size = ARRAY_SIZE(mlxbf_pmc_hnfnet_events);
  498. } else if (strstr(blk, "tile")) {
  499. events = mlxbf_pmc_hnf_events;
  500. *size = ARRAY_SIZE(mlxbf_pmc_hnf_events);
  501. } else if (strstr(blk, "triogen")) {
  502. events = mlxbf_pmc_smgen_events;
  503. *size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
  504. } else if (strstr(blk, "trio")) {
  505. switch (pmc->event_set) {
  506. case MLXBF_PMC_EVENT_SET_BF1:
  507. events = mlxbf_pmc_trio_events_1;
  508. *size = ARRAY_SIZE(mlxbf_pmc_trio_events_1);
  509. break;
  510. case MLXBF_PMC_EVENT_SET_BF2:
  511. events = mlxbf_pmc_trio_events_2;
  512. *size = ARRAY_SIZE(mlxbf_pmc_trio_events_2);
  513. break;
  514. default:
  515. events = NULL;
  516. *size = 0;
  517. break;
  518. }
  519. } else if (strstr(blk, "mss")) {
  520. events = mlxbf_pmc_mss_events;
  521. *size = ARRAY_SIZE(mlxbf_pmc_mss_events);
  522. } else if (strstr(blk, "ecc")) {
  523. events = mlxbf_pmc_ecc_events;
  524. *size = ARRAY_SIZE(mlxbf_pmc_ecc_events);
  525. } else if (strstr(blk, "pcie")) {
  526. events = mlxbf_pmc_pcie_events;
  527. *size = ARRAY_SIZE(mlxbf_pmc_pcie_events);
  528. } else if (strstr(blk, "l3cache")) {
  529. events = mlxbf_pmc_l3c_events;
  530. *size = ARRAY_SIZE(mlxbf_pmc_l3c_events);
  531. } else if (strstr(blk, "gic")) {
  532. events = mlxbf_pmc_smgen_events;
  533. *size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
  534. } else if (strstr(blk, "smmu")) {
  535. events = mlxbf_pmc_smgen_events;
  536. *size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
  537. } else {
  538. events = NULL;
  539. *size = 0;
  540. }
  541. return events;
  542. }
  543. /* Get the event number given the name */
  544. static int mlxbf_pmc_get_event_num(const char *blk, const char *evt)
  545. {
  546. const struct mlxbf_pmc_events *events;
  547. int i, size;
  548. events = mlxbf_pmc_event_list(blk, &size);
  549. if (!events)
  550. return -EINVAL;
  551. for (i = 0; i < size; ++i) {
  552. if (!strcmp(evt, events[i].evt_name))
  553. return events[i].evt_num;
  554. }
  555. return -ENODEV;
  556. }
  557. /* Get the event number given the name */
  558. static char *mlxbf_pmc_get_event_name(const char *blk, int evt)
  559. {
  560. const struct mlxbf_pmc_events *events;
  561. int i, size;
  562. events = mlxbf_pmc_event_list(blk, &size);
  563. if (!events)
  564. return NULL;
  565. for (i = 0; i < size; ++i) {
  566. if (evt == events[i].evt_num)
  567. return events[i].evt_name;
  568. }
  569. return NULL;
  570. }
  571. /* Method to enable/disable/reset l3cache counters */
  572. static int mlxbf_pmc_config_l3_counters(int blk_num, bool enable, bool reset)
  573. {
  574. uint32_t perfcnt_cfg = 0;
  575. if (enable)
  576. perfcnt_cfg |= MLXBF_PMC_L3C_PERF_CNT_CFG_EN;
  577. if (reset)
  578. perfcnt_cfg |= MLXBF_PMC_L3C_PERF_CNT_CFG_RST;
  579. return mlxbf_pmc_write(pmc->block[blk_num].mmio_base +
  580. MLXBF_PMC_L3C_PERF_CNT_CFG,
  581. MLXBF_PMC_WRITE_REG_32, perfcnt_cfg);
  582. }
  583. /* Method to handle l3cache counter programming */
  584. static int mlxbf_pmc_program_l3_counter(int blk_num, uint32_t cnt_num,
  585. uint32_t evt)
  586. {
  587. uint32_t perfcnt_sel_1 = 0;
  588. uint32_t perfcnt_sel = 0;
  589. uint32_t *wordaddr;
  590. void __iomem *pmcaddr;
  591. int ret;
  592. /* Disable all counters before programming them */
  593. if (mlxbf_pmc_config_l3_counters(blk_num, false, false))
  594. return -EINVAL;
  595. /* Select appropriate register information */
  596. switch (cnt_num) {
  597. case 0 ... 3:
  598. pmcaddr = pmc->block[blk_num].mmio_base +
  599. MLXBF_PMC_L3C_PERF_CNT_SEL;
  600. wordaddr = &perfcnt_sel;
  601. break;
  602. case 4:
  603. pmcaddr = pmc->block[blk_num].mmio_base +
  604. MLXBF_PMC_L3C_PERF_CNT_SEL_1;
  605. wordaddr = &perfcnt_sel_1;
  606. break;
  607. default:
  608. return -EINVAL;
  609. }
  610. ret = mlxbf_pmc_readl(pmcaddr, wordaddr);
  611. if (ret)
  612. return ret;
  613. switch (cnt_num) {
  614. case 0:
  615. perfcnt_sel &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0;
  616. perfcnt_sel |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0,
  617. evt);
  618. break;
  619. case 1:
  620. perfcnt_sel &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1;
  621. perfcnt_sel |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1,
  622. evt);
  623. break;
  624. case 2:
  625. perfcnt_sel &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2;
  626. perfcnt_sel |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2,
  627. evt);
  628. break;
  629. case 3:
  630. perfcnt_sel &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3;
  631. perfcnt_sel |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3,
  632. evt);
  633. break;
  634. case 4:
  635. perfcnt_sel_1 &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4;
  636. perfcnt_sel_1 |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4,
  637. evt);
  638. break;
  639. default:
  640. return -EINVAL;
  641. }
  642. return mlxbf_pmc_write(pmcaddr, MLXBF_PMC_WRITE_REG_32, *wordaddr);
  643. }
  644. /* Method to program a counter to monitor an event */
  645. static int mlxbf_pmc_program_counter(int blk_num, uint32_t cnt_num,
  646. uint32_t evt, bool is_l3)
  647. {
  648. uint64_t perfctl, perfevt, perfmon_cfg;
  649. if (cnt_num >= pmc->block[blk_num].counters)
  650. return -ENODEV;
  651. if (is_l3)
  652. return mlxbf_pmc_program_l3_counter(blk_num, cnt_num, evt);
  653. /* Configure the counter */
  654. perfctl = FIELD_PREP(MLXBF_PMC_PERFCTL_EN0, 1);
  655. perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_EB0, 0);
  656. perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_ETRIG0, 1);
  657. perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_AD0, 0);
  658. perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_ACCM0, 0);
  659. perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_MS0, 0);
  660. perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_FM0, 0);
  661. perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WDATA, perfctl);
  662. perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
  663. MLXBF_PMC_PERFCTL);
  664. perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
  665. perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 1);
  666. if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base +
  667. cnt_num * MLXBF_PMC_REG_SIZE,
  668. MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
  669. return -EFAULT;
  670. /* Select the event */
  671. perfevt = FIELD_PREP(MLXBF_PMC_PERFEVT_EVTSEL, evt);
  672. perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WDATA, perfevt);
  673. perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
  674. MLXBF_PMC_PERFEVT);
  675. perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
  676. perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 1);
  677. if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base +
  678. cnt_num * MLXBF_PMC_REG_SIZE,
  679. MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
  680. return -EFAULT;
  681. /* Clear the accumulator */
  682. perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
  683. MLXBF_PMC_PERFACC0);
  684. perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
  685. perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 1);
  686. if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base +
  687. cnt_num * MLXBF_PMC_REG_SIZE,
  688. MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
  689. return -EFAULT;
  690. return 0;
  691. }
  692. /* Method to handle l3 counter reads */
  693. static int mlxbf_pmc_read_l3_counter(int blk_num, uint32_t cnt_num,
  694. uint64_t *result)
  695. {
  696. uint32_t perfcnt_low = 0, perfcnt_high = 0;
  697. uint64_t value;
  698. int status = 0;
  699. status = mlxbf_pmc_readl(pmc->block[blk_num].mmio_base +
  700. MLXBF_PMC_L3C_PERF_CNT_LOW +
  701. cnt_num * MLXBF_PMC_L3C_REG_SIZE,
  702. &perfcnt_low);
  703. if (status)
  704. return status;
  705. status = mlxbf_pmc_readl(pmc->block[blk_num].mmio_base +
  706. MLXBF_PMC_L3C_PERF_CNT_HIGH +
  707. cnt_num * MLXBF_PMC_L3C_REG_SIZE,
  708. &perfcnt_high);
  709. if (status)
  710. return status;
  711. value = perfcnt_high;
  712. value = value << 32;
  713. value |= perfcnt_low;
  714. *result = value;
  715. return 0;
  716. }
  717. /* Method to read the counter value */
  718. static int mlxbf_pmc_read_counter(int blk_num, uint32_t cnt_num, bool is_l3,
  719. uint64_t *result)
  720. {
  721. uint32_t perfcfg_offset, perfval_offset;
  722. uint64_t perfmon_cfg;
  723. int status;
  724. if (cnt_num >= pmc->block[blk_num].counters)
  725. return -EINVAL;
  726. if (is_l3)
  727. return mlxbf_pmc_read_l3_counter(blk_num, cnt_num, result);
  728. perfcfg_offset = cnt_num * MLXBF_PMC_REG_SIZE;
  729. perfval_offset = perfcfg_offset +
  730. pmc->block[blk_num].counters * MLXBF_PMC_REG_SIZE;
  731. /* Set counter in "read" mode */
  732. perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
  733. MLXBF_PMC_PERFACC0);
  734. perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
  735. perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
  736. status = mlxbf_pmc_write(pmc->block[blk_num].mmio_base + perfcfg_offset,
  737. MLXBF_PMC_WRITE_REG_64, perfmon_cfg);
  738. if (status)
  739. return status;
  740. /* Get the counter value */
  741. return mlxbf_pmc_read(pmc->block[blk_num].mmio_base + perfval_offset,
  742. MLXBF_PMC_READ_REG_64, result);
  743. }
  744. /* Method to read L3 block event */
  745. static int mlxbf_pmc_read_l3_event(int blk_num, uint32_t cnt_num,
  746. uint64_t *result)
  747. {
  748. uint32_t perfcnt_sel = 0, perfcnt_sel_1 = 0;
  749. uint32_t *wordaddr;
  750. void __iomem *pmcaddr;
  751. uint64_t evt;
  752. /* Select appropriate register information */
  753. switch (cnt_num) {
  754. case 0 ... 3:
  755. pmcaddr = pmc->block[blk_num].mmio_base +
  756. MLXBF_PMC_L3C_PERF_CNT_SEL;
  757. wordaddr = &perfcnt_sel;
  758. break;
  759. case 4:
  760. pmcaddr = pmc->block[blk_num].mmio_base +
  761. MLXBF_PMC_L3C_PERF_CNT_SEL_1;
  762. wordaddr = &perfcnt_sel_1;
  763. break;
  764. default:
  765. return -EINVAL;
  766. }
  767. if (mlxbf_pmc_readl(pmcaddr, wordaddr))
  768. return -EINVAL;
  769. /* Read from appropriate register field for the counter */
  770. switch (cnt_num) {
  771. case 0:
  772. evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0, perfcnt_sel);
  773. break;
  774. case 1:
  775. evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1, perfcnt_sel);
  776. break;
  777. case 2:
  778. evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2, perfcnt_sel);
  779. break;
  780. case 3:
  781. evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3, perfcnt_sel);
  782. break;
  783. case 4:
  784. evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4,
  785. perfcnt_sel_1);
  786. break;
  787. default:
  788. return -EINVAL;
  789. }
  790. *result = evt;
  791. return 0;
  792. }
  793. /* Method to find the event currently being monitored by a counter */
  794. static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3,
  795. uint64_t *result)
  796. {
  797. uint32_t perfcfg_offset, perfval_offset;
  798. uint64_t perfmon_cfg, perfevt;
  799. if (cnt_num >= pmc->block[blk_num].counters)
  800. return -EINVAL;
  801. if (is_l3)
  802. return mlxbf_pmc_read_l3_event(blk_num, cnt_num, result);
  803. perfcfg_offset = cnt_num * MLXBF_PMC_REG_SIZE;
  804. perfval_offset = perfcfg_offset +
  805. pmc->block[blk_num].counters * MLXBF_PMC_REG_SIZE;
  806. /* Set counter in "read" mode */
  807. perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
  808. MLXBF_PMC_PERFEVT);
  809. perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
  810. perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
  811. if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base + perfcfg_offset,
  812. MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
  813. return -EFAULT;
  814. /* Get the event number */
  815. if (mlxbf_pmc_read(pmc->block[blk_num].mmio_base + perfval_offset,
  816. MLXBF_PMC_READ_REG_64, &perfevt))
  817. return -EFAULT;
  818. *result = FIELD_GET(MLXBF_PMC_PERFEVT_EVTSEL, perfevt);
  819. return 0;
  820. }
  821. /* Method to read a register */
  822. static int mlxbf_pmc_read_reg(int blk_num, uint32_t offset, uint64_t *result)
  823. {
  824. uint32_t ecc_out;
  825. if (strstr(pmc->block_name[blk_num], "ecc")) {
  826. if (mlxbf_pmc_readl(pmc->block[blk_num].mmio_base + offset,
  827. &ecc_out))
  828. return -EFAULT;
  829. *result = ecc_out;
  830. return 0;
  831. }
  832. if (mlxbf_pmc_valid_range(blk_num, offset))
  833. return mlxbf_pmc_read(pmc->block[blk_num].mmio_base + offset,
  834. MLXBF_PMC_READ_REG_64, result);
  835. return -EINVAL;
  836. }
  837. /* Method to write to a register */
  838. static int mlxbf_pmc_write_reg(int blk_num, uint32_t offset, uint64_t data)
  839. {
  840. if (strstr(pmc->block_name[blk_num], "ecc")) {
  841. return mlxbf_pmc_write(pmc->block[blk_num].mmio_base + offset,
  842. MLXBF_PMC_WRITE_REG_32, data);
  843. }
  844. if (mlxbf_pmc_valid_range(blk_num, offset))
  845. return mlxbf_pmc_write(pmc->block[blk_num].mmio_base + offset,
  846. MLXBF_PMC_WRITE_REG_64, data);
  847. return -EINVAL;
  848. }
  849. /* Show function for "counter" sysfs files */
  850. static ssize_t mlxbf_pmc_counter_show(struct device *dev,
  851. struct device_attribute *attr, char *buf)
  852. {
  853. struct mlxbf_pmc_attribute *attr_counter = container_of(
  854. attr, struct mlxbf_pmc_attribute, dev_attr);
  855. int blk_num, cnt_num, offset;
  856. bool is_l3 = false;
  857. uint64_t value;
  858. blk_num = attr_counter->nr;
  859. cnt_num = attr_counter->index;
  860. if (strstr(pmc->block_name[blk_num], "l3cache"))
  861. is_l3 = true;
  862. if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_COUNTER) {
  863. if (mlxbf_pmc_read_counter(blk_num, cnt_num, is_l3, &value))
  864. return -EINVAL;
  865. } else if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_REGISTER) {
  866. offset = mlxbf_pmc_get_event_num(pmc->block_name[blk_num],
  867. attr->attr.name);
  868. if (offset < 0)
  869. return -EINVAL;
  870. if (mlxbf_pmc_read_reg(blk_num, offset, &value))
  871. return -EINVAL;
  872. } else
  873. return -EINVAL;
  874. return sysfs_emit(buf, "0x%llx\n", value);
  875. }
  876. /* Store function for "counter" sysfs files */
  877. static ssize_t mlxbf_pmc_counter_store(struct device *dev,
  878. struct device_attribute *attr,
  879. const char *buf, size_t count)
  880. {
  881. struct mlxbf_pmc_attribute *attr_counter = container_of(
  882. attr, struct mlxbf_pmc_attribute, dev_attr);
  883. int blk_num, cnt_num, offset, err, data;
  884. bool is_l3 = false;
  885. uint64_t evt_num;
  886. blk_num = attr_counter->nr;
  887. cnt_num = attr_counter->index;
  888. err = kstrtoint(buf, 0, &data);
  889. if (err < 0)
  890. return err;
  891. /* Allow non-zero writes only to the ecc regs */
  892. if (!(strstr(pmc->block_name[blk_num], "ecc")) && data)
  893. return -EINVAL;
  894. /* Do not allow writes to the L3C regs */
  895. if (strstr(pmc->block_name[blk_num], "l3cache"))
  896. return -EINVAL;
  897. if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_COUNTER) {
  898. err = mlxbf_pmc_read_event(blk_num, cnt_num, is_l3, &evt_num);
  899. if (err)
  900. return err;
  901. err = mlxbf_pmc_program_counter(blk_num, cnt_num, evt_num,
  902. is_l3);
  903. if (err)
  904. return err;
  905. } else if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_REGISTER) {
  906. offset = mlxbf_pmc_get_event_num(pmc->block_name[blk_num],
  907. attr->attr.name);
  908. if (offset < 0)
  909. return -EINVAL;
  910. err = mlxbf_pmc_write_reg(blk_num, offset, data);
  911. if (err)
  912. return err;
  913. } else
  914. return -EINVAL;
  915. return count;
  916. }
  917. /* Show function for "event" sysfs files */
  918. static ssize_t mlxbf_pmc_event_show(struct device *dev,
  919. struct device_attribute *attr, char *buf)
  920. {
  921. struct mlxbf_pmc_attribute *attr_event = container_of(
  922. attr, struct mlxbf_pmc_attribute, dev_attr);
  923. int blk_num, cnt_num, err;
  924. bool is_l3 = false;
  925. uint64_t evt_num;
  926. char *evt_name;
  927. blk_num = attr_event->nr;
  928. cnt_num = attr_event->index;
  929. if (strstr(pmc->block_name[blk_num], "l3cache"))
  930. is_l3 = true;
  931. err = mlxbf_pmc_read_event(blk_num, cnt_num, is_l3, &evt_num);
  932. if (err)
  933. return sysfs_emit(buf, "No event being monitored\n");
  934. evt_name = mlxbf_pmc_get_event_name(pmc->block_name[blk_num], evt_num);
  935. if (!evt_name)
  936. return -EINVAL;
  937. return sysfs_emit(buf, "0x%llx: %s\n", evt_num, evt_name);
  938. }
  939. /* Store function for "event" sysfs files */
  940. static ssize_t mlxbf_pmc_event_store(struct device *dev,
  941. struct device_attribute *attr,
  942. const char *buf, size_t count)
  943. {
  944. struct mlxbf_pmc_attribute *attr_event = container_of(
  945. attr, struct mlxbf_pmc_attribute, dev_attr);
  946. int blk_num, cnt_num, evt_num, err;
  947. bool is_l3 = false;
  948. blk_num = attr_event->nr;
  949. cnt_num = attr_event->index;
  950. if (isalpha(buf[0])) {
  951. evt_num = mlxbf_pmc_get_event_num(pmc->block_name[blk_num],
  952. buf);
  953. if (evt_num < 0)
  954. return -EINVAL;
  955. } else {
  956. err = kstrtoint(buf, 0, &evt_num);
  957. if (err < 0)
  958. return err;
  959. }
  960. if (strstr(pmc->block_name[blk_num], "l3cache"))
  961. is_l3 = true;
  962. err = mlxbf_pmc_program_counter(blk_num, cnt_num, evt_num, is_l3);
  963. if (err)
  964. return err;
  965. return count;
  966. }
  967. /* Show function for "event_list" sysfs files */
  968. static ssize_t mlxbf_pmc_event_list_show(struct device *dev,
  969. struct device_attribute *attr,
  970. char *buf)
  971. {
  972. struct mlxbf_pmc_attribute *attr_event_list = container_of(
  973. attr, struct mlxbf_pmc_attribute, dev_attr);
  974. int blk_num, i, size, len = 0, ret = 0;
  975. const struct mlxbf_pmc_events *events;
  976. char e_info[MLXBF_PMC_EVENT_INFO_LEN];
  977. blk_num = attr_event_list->nr;
  978. events = mlxbf_pmc_event_list(pmc->block_name[blk_num], &size);
  979. if (!events)
  980. return -EINVAL;
  981. for (i = 0, buf[0] = '\0'; i < size; ++i) {
  982. len += snprintf(e_info, sizeof(e_info), "0x%x: %s\n",
  983. events[i].evt_num, events[i].evt_name);
  984. if (len >= PAGE_SIZE)
  985. break;
  986. strcat(buf, e_info);
  987. ret = len;
  988. }
  989. return ret;
  990. }
  991. /* Show function for "enable" sysfs files - only for l3cache */
  992. static ssize_t mlxbf_pmc_enable_show(struct device *dev,
  993. struct device_attribute *attr, char *buf)
  994. {
  995. struct mlxbf_pmc_attribute *attr_enable = container_of(
  996. attr, struct mlxbf_pmc_attribute, dev_attr);
  997. uint32_t perfcnt_cfg;
  998. int blk_num, value;
  999. blk_num = attr_enable->nr;
  1000. if (mlxbf_pmc_readl(pmc->block[blk_num].mmio_base +
  1001. MLXBF_PMC_L3C_PERF_CNT_CFG,
  1002. &perfcnt_cfg))
  1003. return -EINVAL;
  1004. value = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_CFG_EN, perfcnt_cfg);
  1005. return sysfs_emit(buf, "%d\n", value);
  1006. }
  1007. /* Store function for "enable" sysfs files - only for l3cache */
  1008. static ssize_t mlxbf_pmc_enable_store(struct device *dev,
  1009. struct device_attribute *attr,
  1010. const char *buf, size_t count)
  1011. {
  1012. struct mlxbf_pmc_attribute *attr_enable = container_of(
  1013. attr, struct mlxbf_pmc_attribute, dev_attr);
  1014. int err, en, blk_num;
  1015. blk_num = attr_enable->nr;
  1016. err = kstrtoint(buf, 0, &en);
  1017. if (err < 0)
  1018. return err;
  1019. if (!en) {
  1020. err = mlxbf_pmc_config_l3_counters(blk_num, false, false);
  1021. if (err)
  1022. return err;
  1023. } else if (en == 1) {
  1024. err = mlxbf_pmc_config_l3_counters(blk_num, false, true);
  1025. if (err)
  1026. return err;
  1027. err = mlxbf_pmc_config_l3_counters(blk_num, true, false);
  1028. if (err)
  1029. return err;
  1030. } else
  1031. return -EINVAL;
  1032. return count;
  1033. }
  1034. /* Populate attributes for blocks with counters to monitor performance */
  1035. static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
  1036. {
  1037. struct mlxbf_pmc_attribute *attr;
  1038. int i = 0, j = 0;
  1039. /* "event_list" sysfs to list events supported by the block */
  1040. attr = &pmc->block[blk_num].attr_event_list;
  1041. attr->dev_attr.attr.mode = 0444;
  1042. attr->dev_attr.show = mlxbf_pmc_event_list_show;
  1043. attr->nr = blk_num;
  1044. attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, "event_list");
  1045. if (!attr->dev_attr.attr.name)
  1046. return -ENOMEM;
  1047. pmc->block[blk_num].block_attr[i] = &attr->dev_attr.attr;
  1048. attr = NULL;
  1049. /* "enable" sysfs to start/stop the counters. Only in L3C blocks */
  1050. if (strstr(pmc->block_name[blk_num], "l3cache")) {
  1051. attr = &pmc->block[blk_num].attr_enable;
  1052. attr->dev_attr.attr.mode = 0644;
  1053. attr->dev_attr.show = mlxbf_pmc_enable_show;
  1054. attr->dev_attr.store = mlxbf_pmc_enable_store;
  1055. attr->nr = blk_num;
  1056. attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
  1057. "enable");
  1058. if (!attr->dev_attr.attr.name)
  1059. return -ENOMEM;
  1060. pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
  1061. attr = NULL;
  1062. }
  1063. pmc->block[blk_num].attr_counter = devm_kcalloc(
  1064. dev, pmc->block[blk_num].counters,
  1065. sizeof(struct mlxbf_pmc_attribute), GFP_KERNEL);
  1066. if (!pmc->block[blk_num].attr_counter)
  1067. return -ENOMEM;
  1068. pmc->block[blk_num].attr_event = devm_kcalloc(
  1069. dev, pmc->block[blk_num].counters,
  1070. sizeof(struct mlxbf_pmc_attribute), GFP_KERNEL);
  1071. if (!pmc->block[blk_num].attr_event)
  1072. return -ENOMEM;
  1073. /* "eventX" and "counterX" sysfs to program and read counter values */
  1074. for (j = 0; j < pmc->block[blk_num].counters; ++j) {
  1075. attr = &pmc->block[blk_num].attr_counter[j];
  1076. attr->dev_attr.attr.mode = 0644;
  1077. attr->dev_attr.show = mlxbf_pmc_counter_show;
  1078. attr->dev_attr.store = mlxbf_pmc_counter_store;
  1079. attr->index = j;
  1080. attr->nr = blk_num;
  1081. attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
  1082. "counter%d", j);
  1083. if (!attr->dev_attr.attr.name)
  1084. return -ENOMEM;
  1085. pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
  1086. attr = NULL;
  1087. attr = &pmc->block[blk_num].attr_event[j];
  1088. attr->dev_attr.attr.mode = 0644;
  1089. attr->dev_attr.show = mlxbf_pmc_event_show;
  1090. attr->dev_attr.store = mlxbf_pmc_event_store;
  1091. attr->index = j;
  1092. attr->nr = blk_num;
  1093. attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
  1094. "event%d", j);
  1095. if (!attr->dev_attr.attr.name)
  1096. return -ENOMEM;
  1097. pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
  1098. attr = NULL;
  1099. }
  1100. return 0;
  1101. }
  1102. /* Populate attributes for blocks with registers to monitor performance */
  1103. static int mlxbf_pmc_init_perftype_reg(struct device *dev, int blk_num)
  1104. {
  1105. struct mlxbf_pmc_attribute *attr;
  1106. const struct mlxbf_pmc_events *events;
  1107. int i = 0, j = 0;
  1108. events = mlxbf_pmc_event_list(pmc->block_name[blk_num], &j);
  1109. if (!events)
  1110. return -EINVAL;
  1111. pmc->block[blk_num].attr_event = devm_kcalloc(
  1112. dev, j, sizeof(struct mlxbf_pmc_attribute), GFP_KERNEL);
  1113. if (!pmc->block[blk_num].attr_event)
  1114. return -ENOMEM;
  1115. while (j > 0) {
  1116. --j;
  1117. attr = &pmc->block[blk_num].attr_event[j];
  1118. attr->dev_attr.attr.mode = 0644;
  1119. attr->dev_attr.show = mlxbf_pmc_counter_show;
  1120. attr->dev_attr.store = mlxbf_pmc_counter_store;
  1121. attr->nr = blk_num;
  1122. attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
  1123. events[j].evt_name);
  1124. if (!attr->dev_attr.attr.name)
  1125. return -ENOMEM;
  1126. pmc->block[blk_num].block_attr[i] = &attr->dev_attr.attr;
  1127. attr = NULL;
  1128. i++;
  1129. }
  1130. return 0;
  1131. }
  1132. /* Helper to create the bfperf sysfs sub-directories and files */
  1133. static int mlxbf_pmc_create_groups(struct device *dev, int blk_num)
  1134. {
  1135. int err;
  1136. /* Populate attributes based on counter type */
  1137. if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_COUNTER)
  1138. err = mlxbf_pmc_init_perftype_counter(dev, blk_num);
  1139. else if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_REGISTER)
  1140. err = mlxbf_pmc_init_perftype_reg(dev, blk_num);
  1141. else
  1142. err = -EINVAL;
  1143. if (err)
  1144. return err;
  1145. /* Add a new attribute_group for the block */
  1146. pmc->block[blk_num].block_attr_grp.attrs = pmc->block[blk_num].block_attr;
  1147. pmc->block[blk_num].block_attr_grp.name = devm_kasprintf(
  1148. dev, GFP_KERNEL, pmc->block_name[blk_num]);
  1149. if (!pmc->block[blk_num].block_attr_grp.name)
  1150. return -ENOMEM;
  1151. pmc->groups[blk_num] = &pmc->block[blk_num].block_attr_grp;
  1152. return 0;
  1153. }
  1154. static bool mlxbf_pmc_guid_match(const guid_t *guid,
  1155. const struct arm_smccc_res *res)
  1156. {
  1157. guid_t id = GUID_INIT(res->a0, res->a1, res->a1 >> 16, res->a2,
  1158. res->a2 >> 8, res->a2 >> 16, res->a2 >> 24,
  1159. res->a3, res->a3 >> 8, res->a3 >> 16,
  1160. res->a3 >> 24);
  1161. return guid_equal(guid, &id);
  1162. }
  1163. /* Helper to map the Performance Counters from the varios blocks */
  1164. static int mlxbf_pmc_map_counters(struct device *dev)
  1165. {
  1166. uint64_t info[MLXBF_PMC_INFO_SZ];
  1167. int i, tile_num, ret;
  1168. for (i = 0; i < pmc->total_blocks; ++i) {
  1169. if (strstr(pmc->block_name[i], "tile")) {
  1170. if (sscanf(pmc->block_name[i], "tile%d", &tile_num) != 1)
  1171. return -EINVAL;
  1172. if (tile_num >= pmc->tile_count)
  1173. continue;
  1174. }
  1175. ret = device_property_read_u64_array(dev, pmc->block_name[i],
  1176. info, MLXBF_PMC_INFO_SZ);
  1177. if (ret)
  1178. return ret;
  1179. /*
  1180. * Do not remap if the proper SMC calls are supported,
  1181. * since the SMC calls expect physical addresses.
  1182. */
  1183. if (pmc->svc_sreg_support)
  1184. pmc->block[i].mmio_base = (void __iomem *)info[0];
  1185. else
  1186. pmc->block[i].mmio_base =
  1187. devm_ioremap(dev, info[0], info[1]);
  1188. pmc->block[i].blk_size = info[1];
  1189. pmc->block[i].counters = info[2];
  1190. pmc->block[i].type = info[3];
  1191. if (!pmc->block[i].mmio_base)
  1192. return -ENOMEM;
  1193. ret = mlxbf_pmc_create_groups(dev, i);
  1194. if (ret)
  1195. return ret;
  1196. }
  1197. return 0;
  1198. }
  1199. static int mlxbf_pmc_probe(struct platform_device *pdev)
  1200. {
  1201. struct acpi_device *acpi_dev = ACPI_COMPANION(&pdev->dev);
  1202. const char *hid = acpi_device_hid(acpi_dev);
  1203. struct device *dev = &pdev->dev;
  1204. struct arm_smccc_res res;
  1205. guid_t guid;
  1206. int ret;
  1207. /* Ensure we have the UUID we expect for this service. */
  1208. arm_smccc_smc(MLXBF_PMC_SIP_SVC_UID, 0, 0, 0, 0, 0, 0, 0, &res);
  1209. guid_parse(mlxbf_pmc_svc_uuid_str, &guid);
  1210. if (!mlxbf_pmc_guid_match(&guid, &res))
  1211. return -ENODEV;
  1212. pmc = devm_kzalloc(dev, sizeof(struct mlxbf_pmc_context), GFP_KERNEL);
  1213. if (!pmc)
  1214. return -ENOMEM;
  1215. /*
  1216. * ACPI indicates whether we use SMCs to access registers or not.
  1217. * If sreg_tbl_perf is not present, just assume we're not using SMCs.
  1218. */
  1219. ret = device_property_read_u32(dev, "sec_reg_block",
  1220. &pmc->sreg_tbl_perf);
  1221. if (ret) {
  1222. pmc->svc_sreg_support = false;
  1223. } else {
  1224. /*
  1225. * Check service version to see if we actually do support the
  1226. * needed SMCs. If we have the calls we need, mark support for
  1227. * them in the pmc struct.
  1228. */
  1229. arm_smccc_smc(MLXBF_PMC_SIP_SVC_VERSION, 0, 0, 0, 0, 0, 0, 0,
  1230. &res);
  1231. if (res.a0 == MLXBF_PMC_SVC_REQ_MAJOR &&
  1232. res.a1 >= MLXBF_PMC_SVC_MIN_MINOR)
  1233. pmc->svc_sreg_support = true;
  1234. else
  1235. return -EINVAL;
  1236. }
  1237. if (!strcmp(hid, "MLNXBFD0"))
  1238. pmc->event_set = MLXBF_PMC_EVENT_SET_BF1;
  1239. else if (!strcmp(hid, "MLNXBFD1"))
  1240. pmc->event_set = MLXBF_PMC_EVENT_SET_BF2;
  1241. else
  1242. return -ENODEV;
  1243. ret = device_property_read_u32(dev, "block_num", &pmc->total_blocks);
  1244. if (ret)
  1245. return ret;
  1246. ret = device_property_read_string_array(dev, "block_name",
  1247. pmc->block_name,
  1248. pmc->total_blocks);
  1249. if (ret != pmc->total_blocks)
  1250. return -EFAULT;
  1251. ret = device_property_read_u32(dev, "tile_num", &pmc->tile_count);
  1252. if (ret)
  1253. return ret;
  1254. pmc->pdev = pdev;
  1255. ret = mlxbf_pmc_map_counters(dev);
  1256. if (ret)
  1257. return ret;
  1258. pmc->hwmon_dev = devm_hwmon_device_register_with_groups(
  1259. dev, "bfperf", pmc, pmc->groups);
  1260. if (IS_ERR(pmc->hwmon_dev))
  1261. return PTR_ERR(pmc->hwmon_dev);
  1262. platform_set_drvdata(pdev, pmc);
  1263. return 0;
  1264. }
  1265. static const struct acpi_device_id mlxbf_pmc_acpi_ids[] = { { "MLNXBFD0", 0 },
  1266. { "MLNXBFD1", 0 },
  1267. {}, };
  1268. MODULE_DEVICE_TABLE(acpi, mlxbf_pmc_acpi_ids);
  1269. static struct platform_driver pmc_driver = {
  1270. .driver = { .name = "mlxbf-pmc",
  1271. .acpi_match_table = ACPI_PTR(mlxbf_pmc_acpi_ids), },
  1272. .probe = mlxbf_pmc_probe,
  1273. };
  1274. module_platform_driver(pmc_driver);
  1275. MODULE_AUTHOR("Shravan Kumar Ramani <[email protected]>");
  1276. MODULE_DESCRIPTION("Mellanox PMC driver");
  1277. MODULE_LICENSE("Dual BSD/GPL");