llcc_perfmon.c 65 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7. #include <linux/of_device.h>
  8. #include <linux/platform_device.h>
  9. #include <linux/mutex.h>
  10. #include <linux/bitops.h>
  11. #include <linux/io.h>
  12. #include <linux/hrtimer.h>
  13. #include <linux/regmap.h>
  14. #include <linux/soc/qcom/llcc-qcom.h>
  15. #include <linux/module.h>
  16. #include <linux/clk.h>
  17. #include "llcc_events.h"
  18. #include "llcc_perfmon.h"
  19. #define LLCC_PERFMON_NAME "qcom_llcc_perfmon"
  20. #define MAX_CNTR 16
  21. #define MAX_NUMBER_OF_PORTS 8
  22. #define MAX_FILTERS 16
  23. #define MAX_FILTERS_TYPE 2
  24. #define NUM_CHANNELS 16
  25. #define DELIM_CHAR " "
  26. #define UNKNOWN_EVENT 255
  27. #define MAX_CLOCK_CNTR 1
  28. #define FEAC_RD_BYTES_FIL0 14
  29. #define FEAC_WR_BEATS_FIL0 17
  30. #define FEAC_RD_BYTES_FIL1 111
  31. #define FEAC_WR_BEATS_FIL1 114
  32. #define BEAC_MC_RD_BEAT_FIL0 24
  33. #define BEAC_MC_WR_BEAT_FIL0 25
  34. #define BEAC_MC_RD_BEAT_FIL1 38
  35. #define BEAC_MC_WR_BEAT_FIL1 39
  36. #define MCPROF_FEAC_FLTR_0 0
  37. #define MCPROF_FEAC_FLTR_1 1
  38. #define MCPROF_BEAC_FLTR_0 2
  39. #define MCPROF_BEAC_FLTR_1 3
  40. /**
  41. * struct llcc_perfmon_counter_map - llcc perfmon counter map info
  42. * @port_sel: Port selected for configured counter
  43. * @event_sel: Event selected for configured counter
  44. * @filter_en: Filter activation flag for configured counter
  45. * @filter_sel: Filter applied for configured counter
  46. * @counter_dump: Cumulative counter dump
  47. */
  48. struct llcc_perfmon_counter_map {
  49. unsigned int port_sel;
  50. unsigned int event_sel;
  51. bool filter_en;
  52. u8 filter_sel;
  53. unsigned long long counter_dump[NUM_CHANNELS];
  54. };
  55. struct llcc_perfmon_private;
  56. /**
  57. * struct event_port_ops - event port operation
  58. * @event_config: Counter config support for port & event
  59. * @event_enable: Counter enable support for port
  60. * @event_filter_config: Port filter config support
  61. */
  62. struct event_port_ops {
  63. bool (*event_config)(struct llcc_perfmon_private *priv, unsigned int type,
  64. unsigned int *num, bool enable);
  65. void (*event_enable)(struct llcc_perfmon_private *priv, bool enable);
  66. bool (*event_filter_config)(struct llcc_perfmon_private *priv, enum filter_type filter,
  67. unsigned long long match, unsigned long long mask, bool enable,
  68. u8 filter_sel);
  69. };
  70. enum fltr_config {
  71. no_fltr,
  72. fltr_0_only,
  73. multiple_filtr,
  74. };
  75. /**
  76. * struct llcc_perfmon_private - llcc perfmon private
  77. * @llcc_map: llcc register address space map
  78. * @llcc_bcast_map: llcc broadcast register address space map
  79. * @bank_off: Offset of llcc banks
  80. * @num_banks: Number of banks supported
  81. * @port_ops: struct event_port_ops
  82. * @configured: Mapping of configured event counters
  83. * @configured_cntrs: Count of configured counters.
  84. * @removed_cntrs: Count of removed counter configurations.
  85. * @enables_port: Port enabled for perfmon configuration
  86. * @filtered_ports: Port filter enabled
  87. * @port_filter_sel: Port filter enabled for Filter0 and Filter1
  88. * @filters_applied: List of all filters applied on ports
  89. * @fltr_logic: Filter selection logic to check if Filter 0 applied
  90. * @port_configd: Number of perfmon port configuration supported
  91. * @mutex: mutex to protect this structure
  92. * @hrtimer: hrtimer instance for timer functionality
  93. * @expires: timer expire time in nano seconds
  94. * @num_mc: number of MCS
  95. * @version: Version information of llcc block
  96. * @clock: clock node to enable qdss
  97. * @clock_enabled: flag to control profiling enable and disable
  98. * @drv_ver: driver version of llcc-qcom
  99. * @mc_proftag: Prof tag to MC
  100. */
  101. struct llcc_perfmon_private {
  102. struct regmap *llcc_map;
  103. struct regmap *llcc_bcast_map;
  104. unsigned int bank_off[NUM_CHANNELS];
  105. unsigned int num_banks;
  106. struct event_port_ops *port_ops[MAX_NUMBER_OF_PORTS];
  107. struct llcc_perfmon_counter_map configured[MAX_CNTR];
  108. unsigned int configured_cntrs;
  109. unsigned int removed_cntrs;
  110. unsigned int enables_port;
  111. unsigned int filtered_ports;
  112. unsigned int port_filter_sel[MAX_FILTERS_TYPE];
  113. u8 filters_applied[MAX_NUMBER_OF_PORTS][MAX_FILTERS][MAX_FILTERS_TYPE];
  114. enum fltr_config fltr_logic;
  115. unsigned int port_configd;
  116. struct mutex mutex;
  117. struct hrtimer hrtimer;
  118. ktime_t expires;
  119. unsigned int num_mc;
  120. unsigned int version;
  121. struct clk *clock;
  122. bool clock_enabled;
  123. int drv_ver;
  124. unsigned long mc_proftag;
  125. };
  126. static inline void llcc_bcast_write(struct llcc_perfmon_private *llcc_priv,
  127. unsigned int offset, uint32_t val)
  128. {
  129. regmap_write(llcc_priv->llcc_bcast_map, offset, val);
  130. }
  131. static inline void llcc_bcast_read(struct llcc_perfmon_private *llcc_priv,
  132. unsigned int offset, uint32_t *val)
  133. {
  134. regmap_read(llcc_priv->llcc_bcast_map, offset, val);
  135. }
  136. static void llcc_bcast_modify(struct llcc_perfmon_private *llcc_priv,
  137. unsigned int offset, uint32_t val, uint32_t mask)
  138. {
  139. uint32_t readval;
  140. llcc_bcast_read(llcc_priv, offset, &readval);
  141. readval &= ~mask;
  142. readval |= val & mask;
  143. llcc_bcast_write(llcc_priv, offset, readval);
  144. }
  145. static void perfmon_counter_dump(struct llcc_perfmon_private *llcc_priv)
  146. {
  147. struct llcc_perfmon_counter_map *counter_map;
  148. uint32_t val;
  149. unsigned int i, j, offset;
  150. if (!llcc_priv->configured_cntrs)
  151. return;
  152. offset = PERFMON_DUMP(llcc_priv->drv_ver);
  153. llcc_bcast_write(llcc_priv, offset, MONITOR_DUMP);
  154. for (i = 0; i < llcc_priv->configured_cntrs; i++) {
  155. counter_map = &llcc_priv->configured[i];
  156. offset = LLCC_COUNTER_n_VALUE(llcc_priv->drv_ver, i);
  157. for (j = 0; j < llcc_priv->num_banks; j++) {
  158. regmap_read(llcc_priv->llcc_map, llcc_priv->bank_off[j] + offset, &val);
  159. counter_map->counter_dump[j] += val;
  160. }
  161. }
  162. }
  163. static ssize_t perfmon_counter_dump_show(struct device *dev, struct device_attribute *attr,
  164. char *buf)
  165. {
  166. struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev);
  167. struct llcc_perfmon_counter_map *counter_map;
  168. unsigned int i, j, val, offset;
  169. unsigned long long total;
  170. ssize_t cnt = 0;
  171. if (llcc_priv->configured_cntrs == 0) {
  172. pr_err("counters not configured\n");
  173. return cnt;
  174. }
  175. perfmon_counter_dump(llcc_priv);
  176. for (i = 0; i < llcc_priv->configured_cntrs; i++) {
  177. total = 0;
  178. counter_map = &llcc_priv->configured[i];
  179. if (counter_map->port_sel == EVENT_PORT_BEAC && llcc_priv->num_mc > 1) {
  180. /* DBX uses 2 counters for BEAC 0 & 1 */
  181. i++;
  182. for (j = 0; j < llcc_priv->num_banks; j++) {
  183. total += counter_map->counter_dump[j];
  184. counter_map->counter_dump[j] = 0;
  185. total += counter_map[1].counter_dump[j];
  186. counter_map[1].counter_dump[j] = 0;
  187. }
  188. } else {
  189. for (j = 0; j < llcc_priv->num_banks; j++) {
  190. total += counter_map->counter_dump[j];
  191. counter_map->counter_dump[j] = 0;
  192. }
  193. }
  194. /* Checking if the last counter is configured as clock cycle counter */
  195. if (i == llcc_priv->configured_cntrs - 1) {
  196. offset = PERFMON_COUNTER_n_CONFIG(llcc_priv->drv_ver, i);
  197. llcc_bcast_read(llcc_priv, offset, &val);
  198. if (val & COUNT_CLOCK_EVENT) {
  199. cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "CYCLE COUNT, ,");
  200. cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "0x%016llx\n", total);
  201. break;
  202. }
  203. }
  204. cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "Port %02d,Event %03d,",
  205. counter_map->port_sel, counter_map->event_sel);
  206. cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "0x%016llx\n", total);
  207. }
  208. if (llcc_priv->expires)
  209. hrtimer_forward_now(&llcc_priv->hrtimer, llcc_priv->expires);
  210. return cnt;
  211. }
  212. static void remove_filters(struct llcc_perfmon_private *llcc_priv)
  213. {
  214. struct event_port_ops *port_ops;
  215. u32 i, j, port_filter_sel;
  216. u8 filter0_applied, filter1_applied;
  217. /* Capturing filtered ports info for filter0 and filter1 */
  218. port_filter_sel = llcc_priv->port_filter_sel[FILTER_0] |
  219. llcc_priv->port_filter_sel[FILTER_1];
  220. if (!port_filter_sel) {
  221. pr_err("No filter configuration found!\n");
  222. return;
  223. }
  224. for (i = 0; i < MAX_NUMBER_OF_PORTS; i++) {
  225. /* If filter is not present on port then check next port bit */
  226. if (!(port_filter_sel & (1 << i)))
  227. continue;
  228. for (j = 0; j < MAX_FILTERS; j++) {
  229. filter0_applied = llcc_priv->filters_applied[i][j][FILTER_0];
  230. filter1_applied = llcc_priv->filters_applied[i][j][FILTER_1];
  231. port_ops = llcc_priv->port_ops[i];
  232. if ((!filter0_applied && !filter1_applied) ||
  233. !port_ops->event_filter_config)
  234. continue;
  235. /* Removing FILTER0 configuration if present */
  236. if (filter0_applied) {
  237. port_ops->event_filter_config(llcc_priv, filter0_applied, 0, 0,
  238. false, FILTER_0);
  239. llcc_priv->filters_applied[i][j][FILTER_0] = UNKNOWN_FILTER;
  240. }
  241. /* Removing FILTER1 configuration if present */
  242. if (filter1_applied) {
  243. port_ops->event_filter_config(llcc_priv, filter1_applied, 0, 0,
  244. false, FILTER_1);
  245. llcc_priv->filters_applied[i][j][FILTER_1] = UNKNOWN_FILTER;
  246. }
  247. }
  248. }
  249. /* Clearing internal info for filters and counters */
  250. llcc_priv->port_filter_sel[FILTER_0] = 0;
  251. llcc_priv->port_filter_sel[FILTER_1] = 0;
  252. llcc_priv->fltr_logic = no_fltr;
  253. for (i = 0; i < MAX_CNTR; i++)
  254. llcc_priv->configured[i].filter_en = false;
  255. pr_info("All Filters removed\n");
  256. }
  257. static void remove_counters(struct llcc_perfmon_private *llcc_priv)
  258. {
  259. u32 i, offset, val;
  260. struct event_port_ops *port_ops;
  261. struct llcc_perfmon_counter_map *counter_map;
  262. if (!llcc_priv->configured_cntrs) {
  263. pr_err("Counters are not configured\n");
  264. return;
  265. }
  266. /* Remove the counters configured for ports */
  267. for (i = 0; i < llcc_priv->configured_cntrs; i++) {
  268. /*Checking if the last counter is configured as cyclic counter. Removing if found*/
  269. if (i == llcc_priv->configured_cntrs - 1) {
  270. offset = PERFMON_COUNTER_n_CONFIG(llcc_priv->drv_ver, i);
  271. llcc_bcast_read(llcc_priv, offset, &val);
  272. if (val & COUNT_CLOCK_EVENT) {
  273. llcc_bcast_write(llcc_priv, offset, 0);
  274. break;
  275. }
  276. }
  277. counter_map = &llcc_priv->configured[i];
  278. /* In case the port configuration is already removed, skip */
  279. if (counter_map->port_sel == MAX_NUMBER_OF_PORTS)
  280. continue;
  281. port_ops = llcc_priv->port_ops[counter_map->port_sel];
  282. port_ops->event_config(llcc_priv, 0, &i, false);
  283. pr_info("removed counter %2d for event %3ld from port %2ld\n", i,
  284. counter_map->event_sel, counter_map->port_sel);
  285. if ((llcc_priv->enables_port & (1 << counter_map->port_sel)) &&
  286. port_ops->event_enable)
  287. port_ops->event_enable(llcc_priv, false);
  288. llcc_priv->enables_port &= ~(1 << counter_map->port_sel);
  289. /*Setting unknown values for port and event for error handling*/
  290. counter_map->port_sel = MAX_NUMBER_OF_PORTS;
  291. counter_map->event_sel = UNKNOWN_EVENT;
  292. }
  293. llcc_priv->configured_cntrs = 0;
  294. pr_info("Counters removed\n");
  295. /* Remove the filters if applied */
  296. if (llcc_priv->fltr_logic != no_fltr) {
  297. pr_info("Removing filters\n");
  298. remove_filters(llcc_priv);
  299. }
  300. }
  301. static bool find_filter_index(const char *token, u8 *filter_idx)
  302. {
  303. if (sysfs_streq(token, "FILTER0")) {
  304. *filter_idx = FILTER_0;
  305. return true;
  306. } else if (sysfs_streq(token, "FILTER1")) {
  307. *filter_idx = FILTER_1;
  308. return true;
  309. } else {
  310. return false;
  311. }
  312. }
  313. static ssize_t perfmon_configure_store(struct device *dev, struct device_attribute *attr,
  314. const char *buf, size_t count)
  315. {
  316. struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev);
  317. struct event_port_ops *port_ops;
  318. struct llcc_perfmon_counter_map *counter_map;
  319. unsigned int j = 0, k;
  320. unsigned long port_sel, event_sel;
  321. uint32_t val, offset;
  322. char *token, *delim = DELIM_CHAR;
  323. u8 filter_idx = FILTER_0, beac_res_cntrs = 0;
  324. bool multi_fltr_flag = false;
  325. mutex_lock(&llcc_priv->mutex);
  326. if (llcc_priv->configured_cntrs == MAX_CNTR) {
  327. offset = PERFMON_COUNTER_n_CONFIG(llcc_priv->drv_ver,
  328. llcc_priv->configured_cntrs - 1);
  329. llcc_bcast_read(llcc_priv, offset, &val);
  330. /* Check if last counter is clock counter. If yes, let overwrite last counter */
  331. if (!(val & COUNT_CLOCK_EVENT)) {
  332. pr_err("Counters configured already, remove & try again\n");
  333. mutex_unlock(&llcc_priv->mutex);
  334. return -EINVAL;
  335. }
  336. }
  337. /* Cheking whether an existing counter configuration is present, if present initializing
  338. * the count to number of configured counters - 1 to overwrite the last configured cyclic
  339. * counter. Cyclic count will be configured to the last counter available.
  340. */
  341. if (llcc_priv->configured_cntrs)
  342. j = llcc_priv->configured_cntrs - 1;
  343. token = strsep((char **)&buf, delim);
  344. /* Getting filter information if provided */
  345. if (token && strlen(token) == strlen("FILTERX")) {
  346. if (llcc_priv->fltr_logic != multiple_filtr) {
  347. pr_err("Error Multifilter configuration not present\n");
  348. goto out_configure;
  349. }
  350. if (!find_filter_index(token, &filter_idx)) {
  351. pr_err("Invalid Filter Index, supported are FILTER0/1\n");
  352. goto out_configure;
  353. }
  354. multi_fltr_flag = true;
  355. token = strsep((char **)&buf, delim);
  356. }
  357. while (token != NULL) {
  358. if (kstrtoul(token, 0, &port_sel))
  359. break;
  360. if (port_sel >= llcc_priv->port_configd)
  361. break;
  362. /* Checking whether given filter is enabled for the port */
  363. if (multi_fltr_flag &&
  364. !(llcc_priv->port_filter_sel[filter_idx] & (1 << port_sel))) {
  365. pr_err("Filter not configured for given port, removing configurations\n");
  366. remove_counters(llcc_priv);
  367. goto out_configure;
  368. }
  369. token = strsep((char **)&buf, delim);
  370. if (token == NULL)
  371. break;
  372. if (kstrtoul(token, 0, &event_sel))
  373. break;
  374. token = strsep((char **)&buf, delim);
  375. if (event_sel >= EVENT_NUM_MAX) {
  376. pr_err("unsupported event num %ld\n", event_sel);
  377. continue;
  378. }
  379. /* If BEAC is getting configured, then counters equal to number of Memory Controller
  380. * are needed for BEAC configuration. Hence, setting the same.
  381. */
  382. beac_res_cntrs = 0;
  383. if (port_sel == EVENT_PORT_BEAC)
  384. beac_res_cntrs = llcc_priv->num_mc;
  385. if (j == (MAX_CNTR - beac_res_cntrs)) {
  386. pr_err("All counters already used\n");
  387. break;
  388. }
  389. counter_map = &llcc_priv->configured[j];
  390. counter_map->port_sel = port_sel;
  391. counter_map->event_sel = event_sel;
  392. if (multi_fltr_flag) {
  393. counter_map->filter_en = true;
  394. counter_map->filter_sel = filter_idx;
  395. }
  396. for (k = 0; k < llcc_priv->num_banks; k++)
  397. counter_map->counter_dump[k] = 0;
  398. port_ops = llcc_priv->port_ops[port_sel];
  399. /* if any perfmon configuration fails, remove the existing configurations */
  400. if (!port_ops->event_config(llcc_priv, event_sel, &j, true)) {
  401. llcc_priv->configured_cntrs = ++j;
  402. remove_counters(llcc_priv);
  403. goto out_configure;
  404. }
  405. pr_info("counter %2d configured for event %3ld from port %ld\n", j++, event_sel,
  406. port_sel);
  407. if (((llcc_priv->enables_port & (1 << port_sel)) == 0) && port_ops->event_enable)
  408. port_ops->event_enable(llcc_priv, true);
  409. llcc_priv->enables_port |= (1 << port_sel);
  410. }
  411. if (!j) {
  412. pr_err("Port/Event number not provided, counters not configured\n");
  413. goto out_configure;
  414. }
  415. /* Configure cycle counter as last one if any counter available */
  416. if (j < MAX_CNTR) {
  417. val = COUNT_CLOCK_EVENT | CLEAR_ON_ENABLE | CLEAR_ON_DUMP;
  418. offset = PERFMON_COUNTER_n_CONFIG(llcc_priv->drv_ver, j++);
  419. llcc_bcast_write(llcc_priv, offset, val);
  420. }
  421. llcc_priv->configured_cntrs = j;
  422. out_configure:
  423. mutex_unlock(&llcc_priv->mutex);
  424. return count;
  425. }
  426. static ssize_t perfmon_remove_store(struct device *dev, struct device_attribute *attr,
  427. const char *buf, size_t count)
  428. {
  429. struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev);
  430. struct event_port_ops *port_ops;
  431. struct llcc_perfmon_counter_map *counter_map;
  432. unsigned int j = 0, val;
  433. unsigned long port_sel, event_sel;
  434. char *token, *delim = DELIM_CHAR;
  435. uint32_t offset;
  436. u8 filter_idx = FILTER_0, beac_res_cntrs = 0;
  437. bool multi_fltr_flag = false, filter_en = false;
  438. mutex_lock(&llcc_priv->mutex);
  439. if (!llcc_priv->configured_cntrs) {
  440. pr_err("Counters not configured\n");
  441. mutex_unlock(&llcc_priv->mutex);
  442. return -EINVAL;
  443. }
  444. /* Checking if counters were removed earlier, setting the count value to next counter */
  445. if (llcc_priv->removed_cntrs && llcc_priv->removed_cntrs < llcc_priv->configured_cntrs)
  446. j = llcc_priv->removed_cntrs;
  447. token = strsep((char **)&buf, delim);
  448. /* Case for removing all the counters at once */
  449. if (token && sysfs_streq(token, "REMOVE")) {
  450. pr_info("Removing all configured counters and filters\n");
  451. remove_counters(llcc_priv);
  452. goto out_remove_store;
  453. }
  454. /* Getting filter information if provided */
  455. if (token && strlen(token) == strlen("FILTERX")) {
  456. if (llcc_priv->fltr_logic != multiple_filtr) {
  457. pr_err("Error! Multifilter configuration not present\n");
  458. goto out_remove_store_err;
  459. }
  460. if (!find_filter_index(token, &filter_idx)) {
  461. pr_err("Error! Invalid Filter Index, supported are FILTER0/1\n");
  462. goto out_remove_store_err;
  463. }
  464. multi_fltr_flag = true;
  465. token = strsep((char **)&buf, delim);
  466. }
  467. while (token != NULL) {
  468. if (kstrtoul(token, 0, &port_sel))
  469. break;
  470. if (port_sel >= llcc_priv->port_configd)
  471. break;
  472. /* Counter mapping for last removed counter */
  473. counter_map = &llcc_priv->configured[j];
  474. /* Getting filter activation status for given port and filter type 0/1 */
  475. if (counter_map->filter_en)
  476. filter_en = llcc_priv->port_filter_sel[counter_map->filter_sel] &
  477. (1 << port_sel);
  478. /* If multifilters format is used to remove perfmon configuration checking if given
  479. * port is same as configured port on current counter checking if filter is enabled
  480. * on current counter for given port with same filter type FILTER0/FILTER1
  481. */
  482. if (counter_map->port_sel == port_sel) {
  483. if (multi_fltr_flag && !filter_en) {
  484. pr_err("Error! filter not present counter:%u for port:%u\n", j,
  485. port_sel);
  486. goto out_remove_store_err;
  487. } else if (!multi_fltr_flag && filter_en) {
  488. pr_err("Error! Filter is present on counter:%u for port:%u\n", j,
  489. port_sel);
  490. goto out_remove_store_err;
  491. }
  492. } else {
  493. pr_err("Error! Given port %u is not configured on counter %u\n", port_sel,
  494. j);
  495. goto out_remove_store_err;
  496. }
  497. token = strsep((char **)&buf, delim);
  498. if (token == NULL)
  499. break;
  500. if (kstrtoul(token, 0, &event_sel))
  501. break;
  502. token = strsep((char **)&buf, delim);
  503. if (event_sel >= EVENT_NUM_MAX) {
  504. pr_err("unsupported event num %ld\n", event_sel);
  505. continue;
  506. }
  507. beac_res_cntrs = 0;
  508. if (port_sel == EVENT_PORT_BEAC)
  509. beac_res_cntrs = llcc_priv->num_mc;
  510. if (j == (llcc_priv->configured_cntrs - beac_res_cntrs))
  511. break;
  512. /* put dummy values */
  513. counter_map = &llcc_priv->configured[j];
  514. counter_map->port_sel = MAX_NUMBER_OF_PORTS;
  515. counter_map->event_sel = UNKNOWN_EVENT;
  516. port_ops = llcc_priv->port_ops[port_sel];
  517. port_ops->event_config(llcc_priv, event_sel, &j, false);
  518. llcc_priv->removed_cntrs++;
  519. pr_info("removed counter %2d for event %3ld from port %2ld\n", j++, event_sel,
  520. port_sel);
  521. if ((llcc_priv->enables_port & (1 << port_sel)) && port_ops->event_enable)
  522. port_ops->event_enable(llcc_priv, false);
  523. llcc_priv->enables_port &= ~(1 << port_sel);
  524. }
  525. /* If count reached to last counters then checking whether last counter is used as cycle
  526. * counter, remove the same.
  527. */
  528. if (j == (llcc_priv->configured_cntrs - 1)) {
  529. offset = PERFMON_COUNTER_n_CONFIG(llcc_priv->drv_ver, j);
  530. llcc_bcast_read(llcc_priv, offset, &val);
  531. if (val & COUNT_CLOCK_EVENT) {
  532. llcc_bcast_write(llcc_priv, offset, 0);
  533. j++;
  534. }
  535. }
  536. if (j == llcc_priv->configured_cntrs) {
  537. pr_info("All counters removed\n");
  538. llcc_priv->configured_cntrs = 0;
  539. llcc_priv->removed_cntrs = 0;
  540. }
  541. out_remove_store:
  542. mutex_unlock(&llcc_priv->mutex);
  543. return count;
  544. out_remove_store_err:
  545. remove_counters(llcc_priv);
  546. mutex_unlock(&llcc_priv->mutex);
  547. return -EINVAL;
  548. }
  549. static enum filter_type find_filter_type(char *filter)
  550. {
  551. enum filter_type ret = UNKNOWN_FILTER;
  552. if (!strcmp(filter, "SCID"))
  553. ret = SCID;
  554. else if (!strcmp(filter, "MID"))
  555. ret = MID;
  556. else if (!strcmp(filter, "PROFILING_TAG"))
  557. ret = PROFILING_TAG;
  558. else if (!strcmp(filter, "WAY_ID"))
  559. ret = WAY_ID;
  560. else if (!strcmp(filter, "OPCODE"))
  561. ret = OPCODE;
  562. else if (!strcmp(filter, "CACHEALLOC"))
  563. ret = CACHEALLOC;
  564. else if (!strcmp(filter, "MEMTAGOPS"))
  565. ret = MEMTAGOPS;
  566. else if (!strcmp(filter, "MULTISCID"))
  567. ret = MULTISCID;
  568. else if (!strcmp(filter, "DIRTYINFO"))
  569. ret = DIRTYINFO;
  570. else if (!strcmp(filter, "ADDR_MASK"))
  571. ret = ADDR_MASK;
  572. return ret;
  573. }
  574. static ssize_t perfmon_filter_config_store(struct device *dev, struct device_attribute *attr,
  575. const char *buf, size_t count)
  576. {
  577. struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev);
  578. unsigned long long mask, match;
  579. unsigned long port, port_filter_en = 0;
  580. struct event_port_ops *port_ops;
  581. char *token, *delim = DELIM_CHAR;
  582. enum filter_type fil_applied = UNKNOWN_FILTER;
  583. u8 filter_idx = FILTER_0, i;
  584. bool filter_status;
  585. if (llcc_priv->configured_cntrs) {
  586. pr_err("remove configured events and try\n");
  587. return count;
  588. }
  589. mutex_lock(&llcc_priv->mutex);
  590. token = strsep((char **)&buf, delim);
  591. if (token != NULL)
  592. fil_applied = find_filter_type(token);
  593. if (fil_applied == UNKNOWN_FILTER) {
  594. pr_err("filter configuration failed, Unsupported filter\n");
  595. goto filter_config_free;
  596. }
  597. token = strsep((char **)&buf, delim);
  598. if (token == NULL) {
  599. pr_err("filter configuration failed, Wrong input\n");
  600. goto filter_config_free;
  601. }
  602. if (kstrtoull(token, 0, &match)) {
  603. pr_err("filter configuration failed, Wrong format\n");
  604. goto filter_config_free;
  605. }
  606. if ((fil_applied == SCID) && (match >= SCID_MAX)) {
  607. pr_err("filter configuration failed, SCID above MAX value\n");
  608. goto filter_config_free;
  609. }
  610. token = strsep((char **)&buf, delim);
  611. if (token == NULL) {
  612. pr_err("filter configuration failed, Wrong input\n");
  613. goto filter_config_free;
  614. }
  615. if (kstrtoull(token, 0, &mask)) {
  616. pr_err("filter configuration failed, Wrong format\n");
  617. goto filter_config_free;
  618. }
  619. while (token != NULL) {
  620. /* With Selective filter enabled, all filter config expected to be in
  621. * selective mode
  622. */
  623. token = strsep((char **)&buf, delim);
  624. if (token == NULL) {
  625. if (llcc_priv->fltr_logic == multiple_filtr) {
  626. pr_err("Multiple Filter aleady present, try again\n");
  627. goto filter_config_free;
  628. } else {
  629. break;
  630. }
  631. }
  632. if (find_filter_index(token, &filter_idx)) {
  633. if (llcc_priv->fltr_logic == fltr_0_only) {
  634. pr_err("Filter 0 config already present, try again\n");
  635. goto filter_config_free;
  636. }
  637. llcc_priv->fltr_logic = multiple_filtr;
  638. pr_info("Selective filter configuration selected\n");
  639. break;
  640. }
  641. if (kstrtoul(token, 0, &port)) {
  642. pr_err("filter configuration failed, Wrong format. Try again!\n");
  643. goto filter_config_free;
  644. }
  645. if (port >= MAX_NUMBER_OF_PORTS) {
  646. pr_err("filter configuration failed, port number above MAX value\n");
  647. goto filter_config_free;
  648. }
  649. port_filter_en |= 1 << port;
  650. }
  651. if (!port_filter_en) {
  652. pr_err("No port number input for filter config, try again\n");
  653. goto filter_config_free;
  654. }
  655. /* Defaults to Filter 0 config if multiple filter not selected */
  656. if (llcc_priv->fltr_logic != multiple_filtr) {
  657. llcc_priv->fltr_logic = fltr_0_only;
  658. pr_info("Using Filter 0 settings\n");
  659. }
  660. for (i = 0; i < MAX_NUMBER_OF_PORTS; i++) {
  661. port_ops = llcc_priv->port_ops[i];
  662. if (!port_ops->event_filter_config)
  663. continue;
  664. if (port_filter_en & (1 << i)) {
  665. /* Updating the applied filter information for the port */
  666. llcc_priv->filters_applied[i][fil_applied][filter_idx] = fil_applied;
  667. filter_status = port_ops->event_filter_config(llcc_priv, fil_applied,
  668. match, mask, true, filter_idx);
  669. if (!filter_status)
  670. goto filter_config_free;
  671. }
  672. }
  673. llcc_priv->port_filter_sel[filter_idx] |= port_filter_en;
  674. mutex_unlock(&llcc_priv->mutex);
  675. return count;
  676. filter_config_free:
  677. remove_filters(llcc_priv);
  678. mutex_unlock(&llcc_priv->mutex);
  679. return -EINVAL;
  680. }
  681. static void reset_flags(struct llcc_perfmon_private *llcc_priv)
  682. {
  683. /* Check for removing the flags for Filter 0 alone and Multiple filters.
  684. * Checking if port configuration for FILTER0 is clear
  685. */
  686. if (!llcc_priv->port_filter_sel[FILTER_0]) {
  687. /* Remove Multiple filter if set and port configuration for FILTER1 is clear */
  688. if (!llcc_priv->port_filter_sel[FILTER_1] &&
  689. llcc_priv->fltr_logic == multiple_filtr)
  690. llcc_priv->fltr_logic = no_fltr;
  691. /* Remove Filter 0 alone if set and selective filter unset */
  692. else if (llcc_priv->fltr_logic == fltr_0_only)
  693. llcc_priv->fltr_logic = no_fltr;
  694. }
  695. }
  696. static ssize_t perfmon_filter_remove_store(struct device *dev, struct device_attribute *attr,
  697. const char *buf, size_t count)
  698. {
  699. struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev);
  700. struct event_port_ops *port_ops;
  701. unsigned long long mask, match;
  702. unsigned long port, port_filter_en = 0;
  703. char *token, *delim = DELIM_CHAR;
  704. enum filter_type fil_applied = UNKNOWN_FILTER;
  705. u8 filter_idx = FILTER_0, i, j;
  706. if (llcc_priv->fltr_logic == no_fltr) {
  707. pr_err("Filters are not applied\n");
  708. return count;
  709. }
  710. if (llcc_priv->configured_cntrs) {
  711. pr_err("remove configured events and try\n");
  712. return count;
  713. }
  714. mutex_lock(&llcc_priv->mutex);
  715. token = strsep((char **)&buf, delim);
  716. if (token && sysfs_streq(token, "REMOVE")) {
  717. remove_filters(llcc_priv);
  718. goto filter_remove_free;
  719. }
  720. if (token)
  721. fil_applied = find_filter_type(token);
  722. if (fil_applied == UNKNOWN_FILTER) {
  723. pr_err("filter configuration failed, Unsupported filter\n");
  724. goto filter_remove_free;
  725. }
  726. token = strsep((char **)&buf, delim);
  727. if (token == NULL) {
  728. pr_err("filter configuration failed, Wrong input\n");
  729. goto filter_remove_free;
  730. }
  731. if (kstrtoull(token, 0, &match)) {
  732. pr_err("filter configuration failed, Wrong format\n");
  733. goto filter_remove_free;
  734. }
  735. if (fil_applied == SCID && match >= SCID_MAX) {
  736. pr_err("filter configuration failed, SCID above MAX value\n");
  737. goto filter_remove_free;
  738. }
  739. token = strsep((char **)&buf, delim);
  740. if (token == NULL) {
  741. pr_err("filter configuration failed, Wrong input\n");
  742. goto filter_remove_free;
  743. }
  744. if (kstrtoull(token, 0, &mask)) {
  745. pr_err("filter configuration failed, Wrong format\n");
  746. goto filter_remove_free;
  747. }
  748. while (token != NULL) {
  749. token = strsep((char **)&buf, delim);
  750. if (token == NULL) {
  751. /* Filter 0 config rejected as Multiple filter config already present */
  752. if (llcc_priv->fltr_logic == multiple_filtr) {
  753. pr_err("Mismatch! Selective configuration present\n");
  754. goto filter_remove_free;
  755. } else {
  756. break;
  757. }
  758. }
  759. if (find_filter_index(token, &filter_idx)) {
  760. /* Multiple filter logic rejected as Filter 0 alone already present */
  761. if (llcc_priv->fltr_logic == fltr_0_only) {
  762. pr_err("Mismatch! Filter 0 alone configuration present\n");
  763. goto filter_remove_free;
  764. }
  765. break;
  766. }
  767. if (kstrtoul(token, 0, &port))
  768. break;
  769. if (port >= MAX_NUMBER_OF_PORTS) {
  770. pr_err("filter configuration failed, port number above MAX value\n");
  771. goto filter_remove_free;
  772. }
  773. /* Updating bit field for filtered port */
  774. port_filter_en |= 1 << port;
  775. }
  776. for (i = 0; i < MAX_NUMBER_OF_PORTS; i++) {
  777. if (!(port_filter_en & (1 << i)))
  778. continue;
  779. port_ops = llcc_priv->port_ops[i];
  780. if (!port_ops->event_filter_config)
  781. continue;
  782. port_ops->event_filter_config(llcc_priv, fil_applied, 0, 0, false, filter_idx);
  783. llcc_priv->filters_applied[i][fil_applied][filter_idx] = UNKNOWN_FILTER;
  784. /* Checking if any filter is present on given port */
  785. for (j = 0; j < MAX_FILTERS; j++)
  786. if (llcc_priv->filters_applied[i][j][filter_idx])
  787. break;
  788. /* Clearing the port filter en bit if all filter fields are UNKNOWN for port
  789. * same will be used to clear the global filter flag in llcc_priv
  790. */
  791. if (j == MAX_FILTERS) {
  792. port_filter_en &= ~(1 << i);
  793. llcc_priv->port_filter_sel[filter_idx] &= ~(1 << i);
  794. }
  795. }
  796. reset_flags(llcc_priv);
  797. filter_remove_free:
  798. mutex_unlock(&llcc_priv->mutex);
  799. return count;
  800. }
  801. static ssize_t perfmon_start_store(struct device *dev, struct device_attribute *attr,
  802. const char *buf, size_t count)
  803. {
  804. struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev);
  805. uint32_t val = 0, mask_val, offset, cntr_num = DUMP_NUM_COUNTERS_MASK;
  806. unsigned long start;
  807. int ret = 0;
  808. if (kstrtoul(buf, 0, &start))
  809. return -EINVAL;
  810. if (!llcc_priv->configured_cntrs) {
  811. pr_err("Perfmon not configured\n");
  812. return -EINVAL;
  813. }
  814. mutex_lock(&llcc_priv->mutex);
  815. if (start) {
  816. if (llcc_priv->clock) {
  817. ret = clk_prepare_enable(llcc_priv->clock);
  818. if (ret) {
  819. mutex_unlock(&llcc_priv->mutex);
  820. pr_err("clock not enabled\n");
  821. return -EINVAL;
  822. }
  823. llcc_priv->clock_enabled = true;
  824. }
  825. val = MANUAL_MODE | MONITOR_EN;
  826. val &= ~DUMP_SEL;
  827. if (llcc_priv->expires) {
  828. if (hrtimer_is_queued(&llcc_priv->hrtimer))
  829. hrtimer_forward_now(&llcc_priv->hrtimer, llcc_priv->expires);
  830. else
  831. hrtimer_start(&llcc_priv->hrtimer, llcc_priv->expires,
  832. HRTIMER_MODE_REL_PINNED);
  833. }
  834. cntr_num = (((llcc_priv->configured_cntrs - 1) & DUMP_NUM_COUNTERS_MASK) <<
  835. DUMP_NUM_COUNTERS_SHIFT);
  836. } else {
  837. if (llcc_priv->expires)
  838. hrtimer_cancel(&llcc_priv->hrtimer);
  839. }
  840. mask_val = PERFMON_MODE_MONITOR_MODE_MASK | PERFMON_MODE_MONITOR_EN_MASK |
  841. PERFMON_MODE_DUMP_SEL_MASK;
  842. offset = PERFMON_MODE(llcc_priv->drv_ver);
  843. /* Check to ensure that register write for stopping perfmon should only happen
  844. * if clock is already prepared.
  845. */
  846. if (llcc_priv->clock) {
  847. if (llcc_priv->clock_enabled) {
  848. llcc_bcast_modify(llcc_priv, offset, val, mask_val);
  849. if (!start) {
  850. clk_disable_unprepare(llcc_priv->clock);
  851. llcc_priv->clock_enabled = false;
  852. }
  853. }
  854. } else {
  855. /* For RUMI environment where clock node is not available */
  856. llcc_bcast_modify(llcc_priv, offset, val, mask_val);
  857. }
  858. /* Updating total counters to dump info, based on configured counters */
  859. offset = PERFMON_NUM_CNTRS_DUMP_CFG(llcc_priv->drv_ver);
  860. llcc_bcast_write(llcc_priv, offset, cntr_num);
  861. mutex_unlock(&llcc_priv->mutex);
  862. return count;
  863. }
  864. static ssize_t perfmon_ns_periodic_dump_store(struct device *dev, struct device_attribute *attr,
  865. const char *buf, size_t count)
  866. {
  867. struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev);
  868. if (kstrtos64(buf, 0, &llcc_priv->expires))
  869. return -EINVAL;
  870. mutex_lock(&llcc_priv->mutex);
  871. if (!llcc_priv->expires) {
  872. hrtimer_cancel(&llcc_priv->hrtimer);
  873. mutex_unlock(&llcc_priv->mutex);
  874. return count;
  875. }
  876. if (hrtimer_is_queued(&llcc_priv->hrtimer))
  877. hrtimer_forward_now(&llcc_priv->hrtimer, llcc_priv->expires);
  878. else
  879. hrtimer_start(&llcc_priv->hrtimer, llcc_priv->expires, HRTIMER_MODE_REL_PINNED);
  880. mutex_unlock(&llcc_priv->mutex);
  881. return count;
  882. }
  883. static ssize_t perfmon_scid_status_show(struct device *dev, struct device_attribute *attr,
  884. char *buf)
  885. {
  886. struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev);
  887. uint32_t val;
  888. unsigned int i, j, offset;
  889. ssize_t cnt = 0;
  890. unsigned long total;
  891. for (i = 0; i < SCID_MAX; i++) {
  892. total = 0;
  893. offset = TRP_SCID_n_STATUS(i);
  894. for (j = 0; j < llcc_priv->num_banks; j++) {
  895. regmap_read(llcc_priv->llcc_map, llcc_priv->bank_off[j] + offset, &val);
  896. val = (val & TRP_SCID_STATUS_CURRENT_CAP_MASK) >>
  897. TRP_SCID_STATUS_CURRENT_CAP_SHIFT;
  898. total += val;
  899. }
  900. llcc_bcast_read(llcc_priv, offset, &val);
  901. if (val & TRP_SCID_STATUS_ACTIVE_MASK)
  902. cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "SCID %02d %10s", i, "ACTIVE");
  903. else
  904. cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt,
  905. "SCID %02d %10s", i, "DEACTIVE");
  906. cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, ",0x%08lx\n", total);
  907. }
  908. return cnt;
  909. }
  910. static ssize_t perfmon_beac_mc_proftag_store(struct device *dev, struct device_attribute *attr,
  911. const char *buf, size_t count)
  912. {
  913. struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev);
  914. if (kstrtoul(buf, 0, &llcc_priv->mc_proftag))
  915. return -EINVAL;
  916. mutex_lock(&llcc_priv->mutex);
  917. switch (llcc_priv->mc_proftag) {
  918. case MCPROF_FEAC_FLTR_0:
  919. if (llcc_priv->port_filter_sel[0] & (1 << EVENT_PORT_FEAC)) {
  920. llcc_priv->mc_proftag = MCPROF_FEAC_FLTR_0;
  921. } else {
  922. llcc_priv->mc_proftag = MCPROF_BEAC_FLTR_0;
  923. pr_err("FEAC Filter 0 not enabled, deafulting to BEAC FILTER 0\n");
  924. }
  925. break;
  926. case MCPROF_FEAC_FLTR_1:
  927. if (llcc_priv->port_filter_sel[1] & (1 << EVENT_PORT_FEAC)) {
  928. llcc_priv->mc_proftag = MCPROF_FEAC_FLTR_1;
  929. } else {
  930. llcc_priv->mc_proftag = MCPROF_BEAC_FLTR_0;
  931. pr_err("FEAC Filter 1 not enabled, deafulting to BEAC FILTER 0\n");
  932. }
  933. break;
  934. case MCPROF_BEAC_FLTR_0:
  935. if (llcc_priv->port_filter_sel[0] & (1 << EVENT_PORT_BEAC)) {
  936. llcc_priv->mc_proftag = MCPROF_BEAC_FLTR_0;
  937. } else {
  938. llcc_priv->mc_proftag = MCPROF_BEAC_FLTR_0;
  939. pr_err("BEAC Filter 0 not enabled, deafulting to BEAC FILTER 0\n");
  940. }
  941. break;
  942. case MCPROF_BEAC_FLTR_1:
  943. if (llcc_priv->port_filter_sel[1] & (1 << EVENT_PORT_BEAC)) {
  944. llcc_priv->mc_proftag = MCPROF_BEAC_FLTR_1;
  945. } else {
  946. llcc_priv->mc_proftag = MCPROF_BEAC_FLTR_0;
  947. pr_err("BEAC Filter 1 not enabled, deafulting to BEAC FILTER 0\n");
  948. }
  949. break;
  950. }
  951. mutex_unlock(&llcc_priv->mutex);
  952. return count;
  953. }
  954. static DEVICE_ATTR_RO(perfmon_counter_dump);
  955. static DEVICE_ATTR_WO(perfmon_configure);
  956. static DEVICE_ATTR_WO(perfmon_remove);
  957. static DEVICE_ATTR_WO(perfmon_filter_config);
  958. static DEVICE_ATTR_WO(perfmon_filter_remove);
  959. static DEVICE_ATTR_WO(perfmon_start);
  960. static DEVICE_ATTR_RO(perfmon_scid_status);
  961. static DEVICE_ATTR_WO(perfmon_ns_periodic_dump);
  962. static DEVICE_ATTR_WO(perfmon_beac_mc_proftag);
  963. static struct attribute *llcc_perfmon_attrs[] = {
  964. &dev_attr_perfmon_counter_dump.attr,
  965. &dev_attr_perfmon_configure.attr,
  966. &dev_attr_perfmon_remove.attr,
  967. &dev_attr_perfmon_filter_config.attr,
  968. &dev_attr_perfmon_filter_remove.attr,
  969. &dev_attr_perfmon_start.attr,
  970. &dev_attr_perfmon_scid_status.attr,
  971. &dev_attr_perfmon_ns_periodic_dump.attr,
  972. &dev_attr_perfmon_beac_mc_proftag.attr,
  973. NULL,
  974. };
  975. static struct attribute_group llcc_perfmon_group = {
  976. .attrs = llcc_perfmon_attrs,
  977. };
  978. static void perfmon_cntr_config(struct llcc_perfmon_private *llcc_priv, unsigned int port,
  979. unsigned int counter_num, bool enable)
  980. {
  981. uint32_t val = 0, offset;
  982. if (counter_num >= MAX_CNTR)
  983. return;
  984. if (enable)
  985. val = (port & PERFMON_PORT_SELECT_MASK) |
  986. ((counter_num << EVENT_SELECT_SHIFT) & PERFMON_EVENT_SELECT_MASK) |
  987. CLEAR_ON_ENABLE | CLEAR_ON_DUMP;
  988. offset = PERFMON_COUNTER_n_CONFIG(llcc_priv->drv_ver, counter_num);
  989. llcc_bcast_write(llcc_priv, offset, val);
  990. }
  991. static bool feac_event_config(struct llcc_perfmon_private *llcc_priv, unsigned int event_type,
  992. unsigned int *counter_num, bool enable)
  993. {
  994. uint32_t val = 0, mask_val, offset;
  995. u8 filter_en, filter_sel = FILTER_0;
  996. filter_en = llcc_priv->port_filter_sel[filter_sel] & (1 << EVENT_PORT_FEAC);
  997. if (llcc_priv->fltr_logic == multiple_filtr) {
  998. filter_en = llcc_priv->configured[*counter_num].filter_en;
  999. filter_sel = llcc_priv->configured[*counter_num].filter_sel;
  1000. }
  1001. mask_val = EVENT_SEL_MASK;
  1002. if (llcc_priv->version >= REV_2) {
  1003. mask_val = EVENT_SEL_MASK7;
  1004. if (llcc_priv->version == REV_5)
  1005. mask_val = EVENT_SEL_MASK8;
  1006. }
  1007. if (enable) {
  1008. val = (event_type << EVENT_SEL_SHIFT) & mask_val;
  1009. if (filter_en) {
  1010. /* In case Feac events Read_/Write_ beat/byte, filter selection
  1011. * logic should not apply as these 4 events do not use the FILTER_SEL and
  1012. * FILTER_EN fields from LLCC_*_PROF_EVENT_n_CFG. Instead, they use
  1013. * exclusive filters that need to be configured for that specific events.
  1014. */
  1015. if (((event_type >= FEAC_RD_BYTES_FIL0 &&
  1016. event_type <= FEAC_WR_BEATS_FIL0) && filter_sel == FILTER_1) ||
  1017. ((event_type >= FEAC_RD_BYTES_FIL1 &&
  1018. event_type <= FEAC_WR_BEATS_FIL1) && filter_sel == FILTER_0)) {
  1019. pr_err("Invalid configuration for FEAC, removing\n");
  1020. return false;
  1021. } else if (!(event_type >= FEAC_RD_BYTES_FIL0 &&
  1022. event_type <= FEAC_WR_BEATS_FIL0) &&
  1023. !(event_type >= FEAC_RD_BYTES_FIL1 &&
  1024. event_type <= FEAC_WR_BEATS_FIL1)) {
  1025. val |= (filter_sel << FILTER_SEL_SHIFT) | FILTER_EN;
  1026. }
  1027. }
  1028. }
  1029. if (filter_en)
  1030. mask_val |= FILTER_SEL_MASK | FILTER_EN_MASK;
  1031. offset = FEAC_PROF_EVENT_n_CFG(llcc_priv->drv_ver, *counter_num);
  1032. llcc_bcast_modify(llcc_priv, offset, val, mask_val);
  1033. perfmon_cntr_config(llcc_priv, EVENT_PORT_FEAC, *counter_num, enable);
  1034. return true;
  1035. }
  1036. static void feac_event_enable(struct llcc_perfmon_private *llcc_priv, bool enable)
  1037. {
  1038. uint32_t val = 0, val_cfg1 = 0, mask_val = 0, offset;
  1039. bool prof_cfg_filter = false, prof_cfg1_filter1 = false;
  1040. prof_cfg_filter = llcc_priv->port_filter_sel[FILTER_0] & (1 << EVENT_PORT_FEAC);
  1041. prof_cfg1_filter1 = llcc_priv->port_filter_sel[FILTER_1] & (1 << EVENT_PORT_FEAC);
  1042. val = (BYTE_SCALING << BYTE_SCALING_SHIFT) | (BEAT_SCALING << BEAT_SCALING_SHIFT);
  1043. val_cfg1 = (BYTE_SCALING << BYTE_SCALING_SHIFT) | (BEAT_SCALING << BEAT_SCALING_SHIFT);
  1044. mask_val = PROF_CFG_BEAT_SCALING_MASK | PROF_CFG_BYTE_SCALING_MASK | PROF_CFG_EN_MASK;
  1045. if (prof_cfg_filter || prof_cfg1_filter1) {
  1046. if (llcc_priv->version == REV_0) {
  1047. mask_val |= FEAC_SCALING_FILTER_SEL_MASK | FEAC_SCALING_FILTER_EN_MASK;
  1048. val |= (FILTER_0 << FEAC_SCALING_FILTER_SEL_SHIFT) |
  1049. FEAC_SCALING_FILTER_EN;
  1050. } else {
  1051. mask_val |= FEAC_WR_BEAT_FILTER_SEL_MASK | FEAC_WR_BEAT_FILTER_EN_MASK |
  1052. FEAC_WR_BYTE_FILTER_SEL_MASK | FEAC_WR_BYTE_FILTER_EN_MASK |
  1053. FEAC_RD_BEAT_FILTER_SEL_MASK | FEAC_RD_BEAT_FILTER_EN_MASK |
  1054. FEAC_RD_BYTE_FILTER_SEL_MASK | FEAC_RD_BYTE_FILTER_EN_MASK;
  1055. val |= FEAC_WR_BEAT_FILTER_EN | FEAC_WR_BYTE_FILTER_EN |
  1056. FEAC_RD_BEAT_FILTER_EN | FEAC_RD_BYTE_FILTER_EN;
  1057. if (prof_cfg_filter && prof_cfg1_filter1) {
  1058. val_cfg1 = val;
  1059. val |= (FILTER_0 << FEAC_WR_BEAT_FILTER_SEL_SHIFT) |
  1060. (FILTER_0 << FEAC_WR_BYTE_FILTER_SEL_SHIFT) |
  1061. (FILTER_0 << FEAC_RD_BEAT_FILTER_SEL_SHIFT) |
  1062. (FILTER_0 << FEAC_RD_BYTE_FILTER_SEL_SHIFT);
  1063. val_cfg1 |= (FILTER_1 << FEAC_WR_BEAT_FILTER_SEL_SHIFT) |
  1064. (FILTER_1 << FEAC_WR_BYTE_FILTER_SEL_SHIFT) |
  1065. (FILTER_1 << FEAC_RD_BEAT_FILTER_SEL_SHIFT) |
  1066. (FILTER_1 << FEAC_RD_BYTE_FILTER_SEL_SHIFT);
  1067. } else if (prof_cfg1_filter1) {
  1068. val |= (FILTER_1 << FEAC_WR_BEAT_FILTER_SEL_SHIFT) |
  1069. (FILTER_1 << FEAC_WR_BYTE_FILTER_SEL_SHIFT) |
  1070. (FILTER_1 << FEAC_RD_BEAT_FILTER_SEL_SHIFT) |
  1071. (FILTER_1 << FEAC_RD_BYTE_FILTER_SEL_SHIFT);
  1072. } else if (prof_cfg_filter) {
  1073. val |= (FILTER_0 << FEAC_WR_BEAT_FILTER_SEL_SHIFT) |
  1074. (FILTER_0 << FEAC_WR_BYTE_FILTER_SEL_SHIFT) |
  1075. (FILTER_0 << FEAC_RD_BEAT_FILTER_SEL_SHIFT) |
  1076. (FILTER_0 << FEAC_RD_BYTE_FILTER_SEL_SHIFT);
  1077. }
  1078. }
  1079. }
  1080. val |= PROF_EN;
  1081. mask_val |= PROF_CFG_EN_MASK;
  1082. if (!enable) {
  1083. val = 0;
  1084. val_cfg1 = 0;
  1085. }
  1086. /* Hardware version based filtering capabilities, if cache version v31 or higher, both
  1087. * filter0 & 1 can be applied on PROF_CFG and PROG_CFG1 respectively. Otherwise for a
  1088. * single applied filter only PROF_CFG will be used for either filter 0 or 1
  1089. */
  1090. if (llcc_priv->version >= REV_2 && (prof_cfg_filter && prof_cfg1_filter1)) {
  1091. offset = FEAC_PROF_CFG(llcc_priv->drv_ver);
  1092. llcc_bcast_modify(llcc_priv, offset, val, mask_val);
  1093. mask_val &= ~PROF_CFG_EN_MASK;
  1094. offset = FEAC_PROF_CFG1(llcc_priv->drv_ver);
  1095. llcc_bcast_modify(llcc_priv, offset, val_cfg1, mask_val);
  1096. } else {
  1097. offset = FEAC_PROF_CFG(llcc_priv->drv_ver);
  1098. llcc_bcast_modify(llcc_priv, offset, val, mask_val);
  1099. }
  1100. }
  1101. static bool feac_event_filter_config(struct llcc_perfmon_private *llcc_priv,
  1102. enum filter_type filter, unsigned long long match, unsigned long long mask,
  1103. bool enable, u8 filter_sel)
  1104. {
  1105. uint64_t val = 0;
  1106. uint32_t mask_val = 0, offset;
  1107. u32 lower_val_mask = 0, lower_val_match = 0, upper_val_match = 0, upper_val_mask = 0;
  1108. u32 lower_offset_match, lower_offset_mask;
  1109. switch (filter) {
  1110. case SCID:
  1111. if (llcc_priv->version == REV_0) {
  1112. if (enable)
  1113. val = (match << SCID_MATCH_SHIFT) | (mask << SCID_MASK_SHIFT);
  1114. mask_val = SCID_MATCH_MASK | SCID_MASK_MASK;
  1115. } else {
  1116. val = SCID_MULTI_MATCH_MASK;
  1117. mask_val = SCID_MULTI_MATCH_MASK;
  1118. if (enable)
  1119. val = (1 << match);
  1120. }
  1121. offset = FEAC_PROF_FILTER_0_CFG6(llcc_priv->drv_ver);
  1122. if (filter_sel)
  1123. offset = FEAC_PROF_FILTER_1_CFG6(llcc_priv->drv_ver);
  1124. break;
  1125. case MULTISCID:
  1126. if (llcc_priv->version != REV_0) {
  1127. /* Clear register for multi scid filter settings */
  1128. val = SCID_MULTI_MATCH_MASK;
  1129. mask_val = SCID_MULTI_MATCH_MASK;
  1130. if (enable)
  1131. val = match;
  1132. }
  1133. offset = FEAC_PROF_FILTER_0_CFG6(llcc_priv->drv_ver);
  1134. if (filter_sel)
  1135. offset = FEAC_PROF_FILTER_1_CFG6(llcc_priv->drv_ver);
  1136. break;
  1137. case MID:
  1138. if (enable)
  1139. val = (match << MID_MATCH_SHIFT) | (mask << MID_MASK_SHIFT);
  1140. mask_val = MID_MATCH_MASK | MID_MASK_MASK;
  1141. offset = FEAC_PROF_FILTER_0_CFG5(llcc_priv->drv_ver);
  1142. if (filter_sel)
  1143. offset = FEAC_PROF_FILTER_1_CFG5(llcc_priv->drv_ver);
  1144. break;
  1145. case OPCODE:
  1146. if (enable)
  1147. val = (match << OPCODE_MATCH_SHIFT) | (mask << OPCODE_MASK_SHIFT);
  1148. mask_val = OPCODE_MATCH_MASK | OPCODE_MASK_MASK;
  1149. offset = FEAC_PROF_FILTER_0_CFG3(llcc_priv->drv_ver);
  1150. if (filter_sel)
  1151. offset = FEAC_PROF_FILTER_1_CFG3(llcc_priv->drv_ver);
  1152. break;
  1153. case CACHEALLOC:
  1154. if (enable)
  1155. val = (match << CACHEALLOC_MATCH_SHIFT) | (mask << CACHEALLOC_MASK_SHIFT);
  1156. mask_val = CACHEALLOC_MATCH_MASK | CACHEALLOC_MASK_MASK;
  1157. offset = FEAC_PROF_FILTER_0_CFG3(llcc_priv->drv_ver);
  1158. if (filter_sel)
  1159. offset = FEAC_PROF_FILTER_1_CFG3(llcc_priv->drv_ver);
  1160. break;
  1161. case MEMTAGOPS:
  1162. if (enable)
  1163. val = (match << MEMTAGOPS_MATCH_SHIFT) | (mask << MEMTAGOPS_MASK_SHIFT);
  1164. mask_val = MEMTAGOPS_MATCH_MASK | MEMTAGOPS_MASK_MASK;
  1165. offset = FEAC_PROF_FILTER_0_CFG7(llcc_priv->drv_ver);
  1166. if (filter_sel)
  1167. offset = FEAC_PROF_FILTER_1_CFG7(llcc_priv->drv_ver);
  1168. break;
  1169. case DIRTYINFO:
  1170. if (enable)
  1171. val = (match << DIRTYINFO_MATCH_SHIFT) | (mask << DIRTYINFO_MASK_SHIFT);
  1172. mask_val = DIRTYINFO_MATCH_MASK | DIRTYINFO_MASK_MASK;
  1173. offset = FEAC_PROF_FILTER_0_CFG7(llcc_priv->drv_ver);
  1174. if (filter_sel)
  1175. offset = FEAC_PROF_FILTER_1_CFG7(llcc_priv->drv_ver);
  1176. break;
  1177. case ADDR_MASK:
  1178. if (enable) {
  1179. lower_val_match = (match & ADDR_LOWER_MASK) << FEAC_ADDR_LOWER_MATCH_SHIFT;
  1180. lower_val_mask = (mask & ADDR_LOWER_MASK) << FEAC_ADDR_LOWER_MASK_SHIFT;
  1181. upper_val_match = (match & ADDR_UPPER_MASK) >> ADDR_UPPER_SHIFT;
  1182. upper_val_mask = (mask & ADDR_UPPER_MASK) >> ADDR_UPPER_SHIFT;
  1183. val = (upper_val_match << FEAC_ADDR_UPPER_MATCH_SHIFT) |
  1184. (upper_val_mask << FEAC_ADDR_UPPER_MASK_SHIFT);
  1185. }
  1186. lower_offset_match = FEAC_PROF_FILTER_0_CFG1(llcc_priv->drv_ver);
  1187. lower_offset_mask = FEAC_PROF_FILTER_0_CFG2(llcc_priv->drv_ver);
  1188. offset = FEAC_PROF_FILTER_0_CFG3(llcc_priv->drv_ver);
  1189. if (filter_sel) {
  1190. lower_offset_match = FEAC_PROF_FILTER_1_CFG1(llcc_priv->drv_ver);
  1191. lower_offset_mask = FEAC_PROF_FILTER_1_CFG2(llcc_priv->drv_ver);
  1192. offset = FEAC_PROF_FILTER_1_CFG3(llcc_priv->drv_ver);
  1193. }
  1194. mask_val = FEAC_ADDR_LOWER_MATCH_MASK;
  1195. llcc_bcast_modify(llcc_priv, lower_offset_match, lower_val_match, mask_val);
  1196. mask_val = FEAC_ADDR_LOWER_MASK_MASK;
  1197. llcc_bcast_modify(llcc_priv, lower_offset_mask, lower_val_mask, mask_val);
  1198. mask_val = FEAC_ADDR_UPPER_MATCH_MASK | FEAC_ADDR_UPPER_MASK_MASK;
  1199. break;
  1200. default:
  1201. pr_err("unknown filter/not supported\n");
  1202. return false;
  1203. }
  1204. llcc_bcast_modify(llcc_priv, offset, val, mask_val);
  1205. return true;
  1206. }
  1207. static struct event_port_ops feac_port_ops = {
  1208. .event_config = feac_event_config,
  1209. .event_enable = feac_event_enable,
  1210. .event_filter_config = feac_event_filter_config,
  1211. };
  1212. static bool ferc_event_config(struct llcc_perfmon_private *llcc_priv, unsigned int event_type,
  1213. unsigned int *counter_num, bool enable)
  1214. {
  1215. uint32_t val = 0, mask_val, offset;
  1216. u8 filter_en, filter_sel = FILTER_0;
  1217. filter_en = llcc_priv->port_filter_sel[filter_sel] & (1 << EVENT_PORT_FERC);
  1218. if (llcc_priv->fltr_logic == multiple_filtr) {
  1219. filter_en = llcc_priv->configured[*counter_num].filter_en;
  1220. filter_sel = llcc_priv->configured[*counter_num].filter_sel;
  1221. }
  1222. mask_val = EVENT_SEL_MASK;
  1223. if (filter_en)
  1224. mask_val |= FILTER_SEL_MASK | FILTER_EN_MASK;
  1225. if (enable) {
  1226. val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK;
  1227. if (filter_en)
  1228. val |= (filter_sel << FILTER_SEL_SHIFT) | FILTER_EN;
  1229. }
  1230. offset = FERC_PROF_EVENT_n_CFG(llcc_priv->drv_ver, *counter_num);
  1231. llcc_bcast_modify(llcc_priv, offset, val, mask_val);
  1232. perfmon_cntr_config(llcc_priv, EVENT_PORT_FERC, *counter_num, enable);
  1233. return true;
  1234. }
  1235. static void ferc_event_enable(struct llcc_perfmon_private *llcc_priv, bool enable)
  1236. {
  1237. uint32_t val = 0, mask_val, offset;
  1238. if (enable)
  1239. val = (BYTE_SCALING << BYTE_SCALING_SHIFT) | (BEAT_SCALING << BEAT_SCALING_SHIFT) |
  1240. PROF_EN;
  1241. mask_val = PROF_CFG_BEAT_SCALING_MASK | PROF_CFG_BYTE_SCALING_MASK | PROF_CFG_EN_MASK;
  1242. offset = FERC_PROF_CFG(llcc_priv->drv_ver);
  1243. llcc_bcast_modify(llcc_priv, offset, val, mask_val);
  1244. }
  1245. static bool ferc_event_filter_config(struct llcc_perfmon_private *llcc_priv,
  1246. enum filter_type filter, unsigned long long match,
  1247. unsigned long long mask, bool enable, u8 filter_sel)
  1248. {
  1249. uint32_t val = 0, mask_val, offset;
  1250. if (filter != PROFILING_TAG) {
  1251. pr_err("unknown filter/not supported\n");
  1252. return false;
  1253. }
  1254. if (enable)
  1255. val = (match << PROFTAG_MATCH_SHIFT) | (mask << PROFTAG_MASK_SHIFT);
  1256. mask_val = PROFTAG_MATCH_MASK | PROFTAG_MASK_MASK;
  1257. offset = FERC_PROF_FILTER_0_CFG0(llcc_priv->drv_ver);
  1258. if (filter_sel)
  1259. offset = FERC_PROF_FILTER_1_CFG0(llcc_priv->drv_ver);
  1260. llcc_bcast_modify(llcc_priv, offset, val, mask_val);
  1261. return true;
  1262. }
  1263. static struct event_port_ops ferc_port_ops = {
  1264. .event_config = ferc_event_config,
  1265. .event_enable = ferc_event_enable,
  1266. .event_filter_config = ferc_event_filter_config,
  1267. };
  1268. static bool fewc_event_config(struct llcc_perfmon_private *llcc_priv, unsigned int event_type,
  1269. unsigned int *counter_num, bool enable)
  1270. {
  1271. uint32_t val = 0, mask_val, offset;
  1272. u8 filter_en, filter_sel = FILTER_0;
  1273. filter_en = llcc_priv->port_filter_sel[filter_sel] & (1 << EVENT_PORT_FEWC);
  1274. if (llcc_priv->fltr_logic == multiple_filtr) {
  1275. filter_en = llcc_priv->configured[*counter_num].filter_en;
  1276. filter_sel = llcc_priv->configured[*counter_num].filter_sel;
  1277. }
  1278. mask_val = EVENT_SEL_MASK;
  1279. if (filter_en)
  1280. mask_val |= FILTER_SEL_MASK | FILTER_EN_MASK;
  1281. if (enable) {
  1282. val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK;
  1283. if (filter_en)
  1284. val |= (filter_sel << FILTER_SEL_SHIFT) | FILTER_EN;
  1285. }
  1286. offset = FEWC_PROF_EVENT_n_CFG(llcc_priv->drv_ver, *counter_num);
  1287. llcc_bcast_modify(llcc_priv, offset, val, mask_val);
  1288. perfmon_cntr_config(llcc_priv, EVENT_PORT_FEWC, *counter_num, enable);
  1289. return true;
  1290. }
  1291. static bool fewc_event_filter_config(struct llcc_perfmon_private *llcc_priv,
  1292. enum filter_type filter, unsigned long long match,
  1293. unsigned long long mask, bool enable, u8 filter_sel)
  1294. {
  1295. uint32_t val = 0, mask_val, offset;
  1296. if (filter != PROFILING_TAG) {
  1297. pr_err("unknown filter/not supported\n");
  1298. return false;
  1299. }
  1300. if (enable)
  1301. val = (match << PROFTAG_MATCH_SHIFT) | (mask << PROFTAG_MASK_SHIFT);
  1302. mask_val = PROFTAG_MATCH_MASK | PROFTAG_MASK_MASK;
  1303. offset = FEWC_PROF_FILTER_0_CFG0(llcc_priv->drv_ver);
  1304. if (filter_sel)
  1305. offset = FEWC_PROF_FILTER_1_CFG0(llcc_priv->drv_ver);
  1306. llcc_bcast_modify(llcc_priv, offset, val, mask_val);
  1307. return true;
  1308. }
  1309. static struct event_port_ops fewc_port_ops = {
  1310. .event_config = fewc_event_config,
  1311. .event_filter_config = fewc_event_filter_config,
  1312. };
  1313. static bool beac_event_config(struct llcc_perfmon_private *llcc_priv, unsigned int event_type,
  1314. unsigned int *counter_num, bool enable)
  1315. {
  1316. uint32_t val = 0, mask_val, offset;
  1317. u8 filter_en, filter_sel = FILTER_0;
  1318. unsigned int mc_cnt;
  1319. struct llcc_perfmon_counter_map *counter_map;
  1320. filter_en = llcc_priv->port_filter_sel[filter_sel] & (1 << EVENT_PORT_BEAC);
  1321. if (llcc_priv->fltr_logic == multiple_filtr) {
  1322. filter_en = llcc_priv->configured[*counter_num].filter_en;
  1323. filter_sel = llcc_priv->configured[*counter_num].filter_sel;
  1324. }
  1325. mask_val = EVENT_SEL_MASK;
  1326. if (enable) {
  1327. val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK;
  1328. if (filter_en) {
  1329. /* In case BEAC, events Read Write beat byte filter selection logic should
  1330. * not apply as these events do not use FILTER_SEL and FILTER_EN fields
  1331. * from LLCC_*_PROF_EVENT_n_CFG. Instead, they use exclusive filters that
  1332. * need to be configured for that specific event using PROF_CFG, PROF_CFG0,
  1333. * PROF_CFG1.
  1334. */
  1335. if (((event_type >= BEAC_MC_RD_BEAT_FIL0 &&
  1336. event_type <= BEAC_MC_WR_BEAT_FIL0) && filter_sel == FILTER_1) ||
  1337. ((event_type >= BEAC_MC_RD_BEAT_FIL1 &&
  1338. event_type <= BEAC_MC_WR_BEAT_FIL1) && filter_sel == FILTER_0)) {
  1339. pr_err("Invalid configuration for BEAC, removing\n");
  1340. return false;
  1341. } else {
  1342. val |= (filter_sel << FILTER_SEL_SHIFT) | FILTER_EN;
  1343. }
  1344. }
  1345. }
  1346. if (filter_en)
  1347. mask_val |= FILTER_SEL_MASK | FILTER_EN_MASK;
  1348. for (mc_cnt = 0; mc_cnt < llcc_priv->num_mc; mc_cnt++) {
  1349. offset = BEAC0_PROF_EVENT_n_CFG(llcc_priv->drv_ver, *counter_num + mc_cnt) +
  1350. mc_cnt * BEAC_INST_OFF;
  1351. llcc_bcast_modify(llcc_priv, offset, val, mask_val);
  1352. perfmon_cntr_config(llcc_priv, EVENT_PORT_BEAC, *counter_num, enable);
  1353. /* DBX uses 2 counters for BEAC 0 & 1 */
  1354. if (mc_cnt == 1)
  1355. perfmon_cntr_config(llcc_priv, EVENT_PORT_BEAC1, *counter_num + mc_cnt,
  1356. enable);
  1357. }
  1358. /* DBX uses 2 counters for BEAC 0 & 1 */
  1359. if (llcc_priv->num_mc > 1) {
  1360. counter_map = &llcc_priv->configured[(*counter_num)++];
  1361. counter_map->port_sel = MAX_NUMBER_OF_PORTS;
  1362. counter_map->event_sel = UNKNOWN_EVENT;
  1363. if (enable) {
  1364. counter_map->port_sel = EVENT_PORT_BEAC;
  1365. counter_map->event_sel = event_type;
  1366. }
  1367. }
  1368. return true;
  1369. }
  1370. static void beac_event_enable(struct llcc_perfmon_private *llcc_priv, bool enable)
  1371. {
  1372. uint32_t val = 0, val_cfg0 = 0, val_cfg1 = 0, mask_val = 0, mask_val0, mask_val1, offset;
  1373. bool prof_cfg_filter = false, prof_cfg1_filter1 = false;
  1374. unsigned int mc_cnt;
  1375. prof_cfg_filter = llcc_priv->port_filter_sel[FILTER_0] & (1 << EVENT_PORT_BEAC);
  1376. prof_cfg1_filter1 = llcc_priv->port_filter_sel[FILTER_1] & (1 << EVENT_PORT_BEAC);
  1377. val = val_cfg0 = val_cfg1 = (BEAT_SCALING << BEAT_SCALING_SHIFT);
  1378. mask_val = PROF_CFG_BEAT_SCALING_MASK;
  1379. if (prof_cfg_filter || prof_cfg1_filter1) {
  1380. mask_val |= BEAC_WR_BEAT_FILTER_SEL_MASK | BEAC_WR_BEAT_FILTER_EN_MASK |
  1381. BEAC_RD_BEAT_FILTER_SEL_MASK | BEAC_RD_BEAT_FILTER_EN_MASK;
  1382. val |= BEAC_WR_BEAT_FILTER_EN | BEAC_RD_BEAT_FILTER_EN;
  1383. if (prof_cfg1_filter1) {
  1384. val_cfg1 |= (FILTER_1 << BEAC_WR_BEAT_FILTER_SEL_SHIFT) |
  1385. (FILTER_1 << BEAC_RD_BEAT_FILTER_SEL_SHIFT) |
  1386. BEAC_RD_BEAT_FILTER_EN | BEAC_WR_BEAT_FILTER_EN;
  1387. }
  1388. if (prof_cfg_filter) {
  1389. val_cfg0 |= (FILTER_0 << BEAC_WR_BEAT_FILTER_SEL_SHIFT) |
  1390. (FILTER_0 << BEAC_RD_BEAT_FILTER_SEL_SHIFT) |
  1391. BEAC_RD_BEAT_FILTER_EN | BEAC_WR_BEAT_FILTER_EN;
  1392. }
  1393. }
  1394. val |= (BYTE_SCALING << BYTE_SCALING_SHIFT) | PROF_EN;
  1395. mask_val0 = mask_val1 = mask_val;
  1396. mask_val |= PROF_CFG_BYTE_SCALING_MASK | PROF_CFG_EN_MASK | BEAC_MC_PROFTAG_MASK;
  1397. if (!enable)
  1398. val = val_cfg0 = val_cfg1 = 0;
  1399. for (mc_cnt = 0; mc_cnt < llcc_priv->num_mc; mc_cnt++) {
  1400. offset = BEAC0_PROF_CFG0(llcc_priv->drv_ver) + mc_cnt * BEAC_INST_OFF;
  1401. llcc_bcast_modify(llcc_priv, offset, val_cfg0, mask_val0);
  1402. offset = BEAC0_PROF_CFG1(llcc_priv->drv_ver) + mc_cnt * BEAC_INST_OFF;
  1403. llcc_bcast_modify(llcc_priv, offset, val_cfg1, mask_val1);
  1404. offset = BEAC0_PROF_CFG(llcc_priv->drv_ver) + mc_cnt * BEAC_INST_OFF;
  1405. llcc_bcast_modify(llcc_priv, offset, val, mask_val);
  1406. }
  1407. }
  1408. static bool beac_event_filter_config(struct llcc_perfmon_private *llcc_priv,
  1409. enum filter_type filter, unsigned long long match,
  1410. unsigned long long mask, bool enable, u8 filter_sel)
  1411. {
  1412. uint64_t val = 0;
  1413. uint32_t mask_val;
  1414. unsigned int mc_cnt, offset;
  1415. switch (filter) {
  1416. case PROFILING_TAG:
  1417. if (enable)
  1418. val = (match << BEAC_PROFTAG_MATCH_SHIFT) |
  1419. (mask << BEAC_PROFTAG_MASK_SHIFT);
  1420. mask_val = BEAC_PROFTAG_MASK_MASK | BEAC_PROFTAG_MATCH_MASK;
  1421. llcc_priv->mc_proftag = MCPROF_FEAC_FLTR_0;
  1422. if (match == 2)
  1423. llcc_priv->mc_proftag = MCPROF_FEAC_FLTR_1;
  1424. offset = BEAC0_PROF_FILTER_0_CFG5(llcc_priv->drv_ver);
  1425. if (filter_sel)
  1426. offset = BEAC0_PROF_FILTER_1_CFG5(llcc_priv->drv_ver);
  1427. break;
  1428. case MID:
  1429. if (enable)
  1430. val = (match << MID_MATCH_SHIFT) | (mask << MID_MASK_SHIFT);
  1431. mask_val = MID_MATCH_MASK | MID_MASK_MASK;
  1432. llcc_priv->mc_proftag = MCPROF_BEAC_FLTR_0 + filter_sel;
  1433. offset = BEAC0_PROF_FILTER_0_CFG2(llcc_priv->drv_ver);
  1434. if (filter_sel)
  1435. offset = BEAC0_PROF_FILTER_1_CFG2(llcc_priv->drv_ver);
  1436. break;
  1437. case ADDR_MASK:
  1438. if (enable)
  1439. val = (match & ADDR_LOWER_MASK) << BEAC_ADDR_LOWER_MATCH_SHIFT;
  1440. mask_val = BEAC_ADDR_LOWER_MATCH_MASK;
  1441. offset = BEAC0_PROF_FILTER_0_CFG4(llcc_priv->drv_ver);
  1442. if (filter_sel)
  1443. offset = BEAC0_PROF_FILTER_1_CFG4(llcc_priv->drv_ver);
  1444. for (mc_cnt = 0; mc_cnt < llcc_priv->num_mc; mc_cnt++) {
  1445. llcc_bcast_modify(llcc_priv, offset, val, mask_val);
  1446. offset = offset + BEAC_INST_OFF;
  1447. }
  1448. if (enable)
  1449. val = (mask & ADDR_LOWER_MASK) << BEAC_ADDR_LOWER_MASK_SHIFT;
  1450. mask_val = BEAC_ADDR_LOWER_MASK_MASK;
  1451. offset = BEAC0_PROF_FILTER_0_CFG3(llcc_priv->drv_ver);
  1452. if (filter_sel)
  1453. offset = BEAC0_PROF_FILTER_1_CFG3(llcc_priv->drv_ver);
  1454. for (mc_cnt = 0; mc_cnt < llcc_priv->num_mc; mc_cnt++) {
  1455. llcc_bcast_modify(llcc_priv, offset, val, mask_val);
  1456. offset = offset + BEAC_INST_OFF;
  1457. }
  1458. if (enable) {
  1459. match = (match & ADDR_UPPER_MASK) >> ADDR_UPPER_SHIFT;
  1460. mask = (mask & ADDR_UPPER_MASK) >> ADDR_UPPER_SHIFT;
  1461. val = (match << FEAC_ADDR_UPPER_MATCH_SHIFT) |
  1462. (mask << FEAC_ADDR_UPPER_MASK_SHIFT);
  1463. }
  1464. mask_val = BEAC_ADDR_UPPER_MATCH_MASK | BEAC_ADDR_UPPER_MASK_MASK;
  1465. llcc_priv->mc_proftag = MCPROF_BEAC_FLTR_0 + filter_sel;
  1466. offset = BEAC0_PROF_FILTER_0_CFG5(llcc_priv->drv_ver);
  1467. if (filter_sel)
  1468. offset = BEAC0_PROF_FILTER_1_CFG5(llcc_priv->drv_ver);
  1469. break;
  1470. default:
  1471. pr_err("unknown filter/not supported\n");
  1472. return false;
  1473. }
  1474. for (mc_cnt = 0; mc_cnt < llcc_priv->num_mc; mc_cnt++) {
  1475. llcc_bcast_modify(llcc_priv, offset, val, mask_val);
  1476. offset = offset + BEAC_INST_OFF;
  1477. }
  1478. /* mc_prf tag set to reset value */
  1479. if (!enable)
  1480. llcc_priv->mc_proftag = MCPROF_BEAC_FLTR_0;
  1481. return true;
  1482. }
  1483. static struct event_port_ops beac_port_ops = {
  1484. .event_config = beac_event_config,
  1485. .event_enable = beac_event_enable,
  1486. .event_filter_config = beac_event_filter_config,
  1487. };
  1488. static bool berc_event_config(struct llcc_perfmon_private *llcc_priv, unsigned int event_type,
  1489. unsigned int *counter_num, bool enable)
  1490. {
  1491. uint64_t val = 0;
  1492. uint32_t mask_val, offset;
  1493. u8 filter_en, filter_sel = FILTER_0;
  1494. filter_en = llcc_priv->port_filter_sel[filter_sel] & (1 << EVENT_PORT_BERC);
  1495. if (llcc_priv->fltr_logic == multiple_filtr) {
  1496. filter_en = llcc_priv->configured[*counter_num].filter_en;
  1497. filter_sel = llcc_priv->configured[*counter_num].filter_sel;
  1498. }
  1499. mask_val = EVENT_SEL_MASK;
  1500. if (filter_en)
  1501. mask_val |= FILTER_SEL_MASK | FILTER_EN_MASK;
  1502. if (enable) {
  1503. val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK;
  1504. if (filter_en)
  1505. val |= (filter_sel << FILTER_SEL_SHIFT) | FILTER_EN;
  1506. }
  1507. offset = BERC_PROF_EVENT_n_CFG(llcc_priv->drv_ver, *counter_num);
  1508. llcc_bcast_modify(llcc_priv, offset, val, mask_val);
  1509. perfmon_cntr_config(llcc_priv, EVENT_PORT_BERC, *counter_num, enable);
  1510. return true;
  1511. }
  1512. static void berc_event_enable(struct llcc_perfmon_private *llcc_priv, bool enable)
  1513. {
  1514. uint32_t val = 0, mask_val, offset;
  1515. if (enable)
  1516. val = (BYTE_SCALING << BYTE_SCALING_SHIFT) | (BEAT_SCALING << BEAT_SCALING_SHIFT) |
  1517. PROF_EN;
  1518. mask_val = PROF_CFG_BEAT_SCALING_MASK | PROF_CFG_BYTE_SCALING_MASK | PROF_CFG_EN_MASK;
  1519. offset = BERC_PROF_CFG(llcc_priv->drv_ver);
  1520. llcc_bcast_modify(llcc_priv, offset, val, mask_val);
  1521. }
  1522. static bool berc_event_filter_config(struct llcc_perfmon_private *llcc_priv,
  1523. enum filter_type filter, unsigned long long match,
  1524. unsigned long long mask, bool enable, u8 filter_sel)
  1525. {
  1526. uint32_t val = 0, mask_val, offset;
  1527. if (filter != PROFILING_TAG) {
  1528. pr_err("unknown filter/not supported\n");
  1529. return true;
  1530. }
  1531. if (enable)
  1532. val = (match << PROFTAG_MATCH_SHIFT) | (mask << PROFTAG_MASK_SHIFT);
  1533. mask_val = PROFTAG_MATCH_MASK | PROFTAG_MASK_MASK;
  1534. offset = BERC_PROF_FILTER_0_CFG0(llcc_priv->drv_ver);
  1535. if (filter_sel)
  1536. offset = BERC_PROF_FILTER_1_CFG0(llcc_priv->drv_ver);
  1537. llcc_bcast_modify(llcc_priv, offset, val, mask_val);
  1538. return true;
  1539. }
  1540. static struct event_port_ops berc_port_ops = {
  1541. .event_config = berc_event_config,
  1542. .event_enable = berc_event_enable,
  1543. .event_filter_config = berc_event_filter_config,
  1544. };
  1545. static bool trp_event_config(struct llcc_perfmon_private *llcc_priv, unsigned int event_type,
  1546. unsigned int *counter_num, bool enable)
  1547. {
  1548. uint64_t val = 0;
  1549. uint32_t mask_val;
  1550. u8 filter_en, filter_sel = FILTER_0;
  1551. filter_en = llcc_priv->port_filter_sel[filter_sel] & (1 << EVENT_PORT_TRP);
  1552. if (llcc_priv->fltr_logic == multiple_filtr) {
  1553. filter_en = llcc_priv->configured[*counter_num].filter_en;
  1554. filter_sel = llcc_priv->configured[*counter_num].filter_sel;
  1555. }
  1556. mask_val = EVENT_SEL_MASK;
  1557. if (llcc_priv->version >= REV_2)
  1558. mask_val = EVENT_SEL_MASK7;
  1559. if (enable) {
  1560. val = (event_type << EVENT_SEL_SHIFT) & mask_val;
  1561. if (llcc_priv->version >= REV_2)
  1562. val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK7;
  1563. if (filter_en)
  1564. val |= (filter_sel << FILTER_SEL_SHIFT) | FILTER_EN;
  1565. }
  1566. if (filter_en)
  1567. mask_val |= FILTER_SEL_MASK | FILTER_EN_MASK;
  1568. llcc_bcast_modify(llcc_priv, TRP_PROF_EVENT_n_CFG(*counter_num), val, mask_val);
  1569. perfmon_cntr_config(llcc_priv, EVENT_PORT_TRP, *counter_num, enable);
  1570. return true;
  1571. }
  1572. static bool trp_event_filter_config(struct llcc_perfmon_private *llcc_priv, enum filter_type filter,
  1573. unsigned long long match, unsigned long long mask, bool enable, u8 filter_sel)
  1574. {
  1575. uint64_t val = 0;
  1576. uint32_t mask_val, offset;
  1577. switch (filter) {
  1578. case SCID:
  1579. if (llcc_priv->version >= REV_2) {
  1580. val = SCID_MULTI_MATCH_MASK;
  1581. if (enable)
  1582. val = (1 << match);
  1583. mask_val = SCID_MULTI_MATCH_MASK;
  1584. offset = TRP_PROF_FILTER_0_CFG2;
  1585. if (filter_sel)
  1586. offset = TRP_PROF_FILTER_1_CFG2;
  1587. } else {
  1588. if (enable)
  1589. val = (match << TRP_SCID_MATCH_SHIFT) |
  1590. (mask << TRP_SCID_MASK_SHIFT);
  1591. mask_val = TRP_SCID_MATCH_MASK | TRP_SCID_MASK_MASK;
  1592. offset = TRP_PROF_FILTER_0_CFG1;
  1593. if (filter_sel)
  1594. offset = TRP_PROF_FILTER_1_CFG1;
  1595. }
  1596. break;
  1597. case MULTISCID:
  1598. if (llcc_priv->version >= REV_2) {
  1599. val = SCID_MULTI_MATCH_MASK;
  1600. if (enable)
  1601. val = match;
  1602. mask_val = SCID_MULTI_MATCH_MASK;
  1603. offset = TRP_PROF_FILTER_0_CFG2;
  1604. if (filter_sel)
  1605. offset = TRP_PROF_FILTER_1_CFG2;
  1606. } else {
  1607. pr_err("unknown filter/not supported\n");
  1608. return false;
  1609. }
  1610. break;
  1611. case WAY_ID:
  1612. if (enable)
  1613. val = (match << TRP_WAY_ID_MATCH_SHIFT) | (mask << TRP_WAY_ID_MASK_SHIFT);
  1614. mask_val = TRP_WAY_ID_MATCH_MASK | TRP_WAY_ID_MASK_MASK;
  1615. offset = TRP_PROF_FILTER_0_CFG1;
  1616. if (filter_sel)
  1617. offset = TRP_PROF_FILTER_1_CFG1;
  1618. break;
  1619. case PROFILING_TAG:
  1620. if (enable)
  1621. val = (match << TRP_PROFTAG_MATCH_SHIFT) | (mask << TRP_PROFTAG_MASK_SHIFT);
  1622. mask_val = TRP_PROFTAG_MATCH_MASK | TRP_PROFTAG_MASK_MASK;
  1623. offset = TRP_PROF_FILTER_0_CFG1;
  1624. if (filter_sel)
  1625. offset = TRP_PROF_FILTER_1_CFG1;
  1626. break;
  1627. default:
  1628. pr_err("unknown filter/not supported\n");
  1629. return true;
  1630. }
  1631. llcc_bcast_modify(llcc_priv, offset, val, mask_val);
  1632. return true;
  1633. }
  1634. static struct event_port_ops trp_port_ops = {
  1635. .event_config = trp_event_config,
  1636. .event_filter_config = trp_event_filter_config,
  1637. };
  1638. static bool drp_event_config(struct llcc_perfmon_private *llcc_priv, unsigned int event_type,
  1639. unsigned int *counter_num, bool enable)
  1640. {
  1641. uint32_t val = 0, mask_val, offset;
  1642. u8 filter_en, filter_sel = FILTER_0;
  1643. filter_en = llcc_priv->port_filter_sel[filter_sel] & (1 << EVENT_PORT_DRP);
  1644. if (llcc_priv->fltr_logic == multiple_filtr) {
  1645. filter_en = llcc_priv->configured[*counter_num].filter_en;
  1646. filter_sel = llcc_priv->configured[*counter_num].filter_sel;
  1647. }
  1648. mask_val = EVENT_SEL_MASK;
  1649. if (llcc_priv->version >= REV_2)
  1650. mask_val = EVENT_SEL_MASK7;
  1651. if (enable) {
  1652. val = (event_type << EVENT_SEL_SHIFT) & mask_val;
  1653. if (filter_en)
  1654. val |= (filter_sel << FILTER_SEL_SHIFT) | FILTER_EN;
  1655. }
  1656. if (filter_en)
  1657. mask_val |= FILTER_SEL_MASK | FILTER_EN_MASK;
  1658. offset = DRP_PROF_EVENT_n_CFG(llcc_priv->drv_ver, *counter_num);
  1659. llcc_bcast_modify(llcc_priv, offset, val, mask_val);
  1660. perfmon_cntr_config(llcc_priv, EVENT_PORT_DRP, *counter_num, enable);
  1661. return true;
  1662. }
  1663. static void drp_event_enable(struct llcc_perfmon_private *llcc_priv, bool enable)
  1664. {
  1665. uint32_t val = 0, mask_val, offset;
  1666. if (enable)
  1667. val = (BEAT_SCALING << BEAT_SCALING_SHIFT) | PROF_EN;
  1668. mask_val = PROF_CFG_BEAT_SCALING_MASK | PROF_CFG_EN_MASK;
  1669. offset = DRP_PROF_CFG(llcc_priv->drv_ver);
  1670. llcc_bcast_modify(llcc_priv, offset, val, mask_val);
  1671. }
  1672. static struct event_port_ops drp_port_ops = {
  1673. .event_config = drp_event_config,
  1674. .event_enable = drp_event_enable,
  1675. };
  1676. static bool pmgr_event_config(struct llcc_perfmon_private *llcc_priv, unsigned int event_type,
  1677. unsigned int *counter_num, bool enable)
  1678. {
  1679. uint32_t val = 0, mask_val, offset;
  1680. u8 filter_en, filter_sel = FILTER_0;
  1681. filter_en = llcc_priv->port_filter_sel[filter_sel] & (1 << EVENT_PORT_PMGR);
  1682. if (llcc_priv->fltr_logic == multiple_filtr) {
  1683. filter_en = llcc_priv->configured[*counter_num].filter_en;
  1684. filter_sel = llcc_priv->configured[*counter_num].filter_sel;
  1685. }
  1686. mask_val = EVENT_SEL_MASK;
  1687. if (filter_en)
  1688. mask_val |= FILTER_SEL_MASK | FILTER_EN_MASK;
  1689. if (enable) {
  1690. val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK;
  1691. if (filter_en)
  1692. val |= (filter_sel << FILTER_SEL_SHIFT) | FILTER_EN;
  1693. }
  1694. offset = PMGR_PROF_EVENT_n_CFG(llcc_priv->drv_ver, *counter_num);
  1695. llcc_bcast_modify(llcc_priv, offset, val, mask_val);
  1696. perfmon_cntr_config(llcc_priv, EVENT_PORT_PMGR, *counter_num, enable);
  1697. return true;
  1698. }
  1699. static struct event_port_ops pmgr_port_ops = {
  1700. .event_config = pmgr_event_config,
  1701. };
  1702. static void llcc_register_event_port(struct llcc_perfmon_private *llcc_priv,
  1703. struct event_port_ops *ops, unsigned int event_port_num)
  1704. {
  1705. if (llcc_priv->port_configd >= MAX_NUMBER_OF_PORTS) {
  1706. pr_err("Register port Failure!\n");
  1707. return;
  1708. }
  1709. llcc_priv->port_configd = llcc_priv->port_configd + 1;
  1710. llcc_priv->port_ops[event_port_num] = ops;
  1711. }
  1712. static enum hrtimer_restart llcc_perfmon_timer_handler(struct hrtimer *hrtimer)
  1713. {
  1714. struct llcc_perfmon_private *llcc_priv = container_of(hrtimer, struct llcc_perfmon_private,
  1715. hrtimer);
  1716. perfmon_counter_dump(llcc_priv);
  1717. hrtimer_forward_now(&llcc_priv->hrtimer, llcc_priv->expires);
  1718. return HRTIMER_RESTART;
  1719. }
  1720. static int llcc_perfmon_probe(struct platform_device *pdev)
  1721. {
  1722. int result = 0;
  1723. struct llcc_perfmon_private *llcc_priv;
  1724. struct llcc_drv_data *llcc_driv_data;
  1725. uint32_t val, offset;
  1726. llcc_driv_data = dev_get_drvdata(pdev->dev.parent);
  1727. llcc_priv = devm_kzalloc(&pdev->dev, sizeof(*llcc_priv), GFP_KERNEL);
  1728. if (llcc_priv == NULL)
  1729. return -ENOMEM;
  1730. if (!llcc_driv_data)
  1731. return -ENOMEM;
  1732. if (llcc_driv_data->regmap == NULL || llcc_driv_data->bcast_regmap == NULL)
  1733. return -ENODEV;
  1734. llcc_priv->llcc_map = llcc_driv_data->regmap;
  1735. llcc_priv->llcc_bcast_map = llcc_driv_data->bcast_regmap;
  1736. llcc_priv->drv_ver = llcc_driv_data->llcc_ver;
  1737. offset = LLCC_COMMON_STATUS0(llcc_priv->drv_ver);
  1738. llcc_bcast_read(llcc_priv, offset, &val);
  1739. llcc_priv->num_mc = (val & NUM_MC_MASK) >> NUM_MC_SHIFT;
  1740. /* Setting to 1, as some platforms it read as 0 */
  1741. if (llcc_priv->num_mc == 0)
  1742. llcc_priv->num_mc = 1;
  1743. llcc_priv->num_banks = (val & LB_CNT_MASK) >> LB_CNT_SHIFT;
  1744. for (val = 0; val < llcc_priv->num_banks; val++)
  1745. llcc_priv->bank_off[val] = llcc_driv_data->offsets[val];
  1746. llcc_priv->clock = devm_clk_get(&pdev->dev, "qdss_clk");
  1747. if (IS_ERR_OR_NULL(llcc_priv->clock)) {
  1748. pr_warn("failed to get qdss clock node\n");
  1749. llcc_priv->clock = NULL;
  1750. }
  1751. result = sysfs_create_group(&pdev->dev.kobj, &llcc_perfmon_group);
  1752. if (result) {
  1753. pr_err("Unable to create sysfs group\n");
  1754. return result;
  1755. }
  1756. mutex_init(&llcc_priv->mutex);
  1757. platform_set_drvdata(pdev, llcc_priv);
  1758. llcc_register_event_port(llcc_priv, &feac_port_ops, EVENT_PORT_FEAC);
  1759. llcc_register_event_port(llcc_priv, &ferc_port_ops, EVENT_PORT_FERC);
  1760. llcc_register_event_port(llcc_priv, &fewc_port_ops, EVENT_PORT_FEWC);
  1761. llcc_register_event_port(llcc_priv, &beac_port_ops, EVENT_PORT_BEAC);
  1762. llcc_register_event_port(llcc_priv, &berc_port_ops, EVENT_PORT_BERC);
  1763. llcc_register_event_port(llcc_priv, &trp_port_ops, EVENT_PORT_TRP);
  1764. llcc_register_event_port(llcc_priv, &drp_port_ops, EVENT_PORT_DRP);
  1765. llcc_register_event_port(llcc_priv, &pmgr_port_ops, EVENT_PORT_PMGR);
  1766. hrtimer_init(&llcc_priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  1767. llcc_priv->hrtimer.function = llcc_perfmon_timer_handler;
  1768. llcc_priv->expires = 0;
  1769. llcc_priv->clock_enabled = false;
  1770. offset = LLCC_COMMON_HW_INFO(llcc_priv->drv_ver);
  1771. llcc_bcast_read(llcc_priv, offset, &val);
  1772. llcc_priv->version = REV_0;
  1773. if (val == LLCC_VERSION_1)
  1774. llcc_priv->version = REV_1;
  1775. else if ((val & MAJOR_VER_MASK) >= LLCC_VERSION_2)
  1776. llcc_priv->version = REV_2;
  1777. if ((val & MAJOR_VER_MASK) == LLCC_VERSION_5)
  1778. llcc_priv->version = REV_5;
  1779. pr_info("Revision <%x.%x.%x>, %d MEMORY CNTRLRS connected with LLCC\n",
  1780. MAJOR_REV_NO(val), BRANCH_NO(val), MINOR_NO(val), llcc_priv->num_mc);
  1781. return 0;
  1782. }
  1783. static int llcc_perfmon_remove(struct platform_device *pdev)
  1784. {
  1785. struct llcc_perfmon_private *llcc_priv = platform_get_drvdata(pdev);
  1786. while (hrtimer_active(&llcc_priv->hrtimer))
  1787. hrtimer_cancel(&llcc_priv->hrtimer);
  1788. mutex_destroy(&llcc_priv->mutex);
  1789. sysfs_remove_group(&pdev->dev.kobj, &llcc_perfmon_group);
  1790. platform_set_drvdata(pdev, NULL);
  1791. return 0;
  1792. }
  1793. static const struct of_device_id of_match_llcc_perfmon[] = {
  1794. {
  1795. .compatible = "qcom,llcc-perfmon",
  1796. },
  1797. {},
  1798. };
  1799. MODULE_DEVICE_TABLE(of, of_match_llcc_perfmon);
  1800. static struct platform_driver llcc_perfmon_driver = {
  1801. .probe = llcc_perfmon_probe,
  1802. .remove = llcc_perfmon_remove,
  1803. .driver = {
  1804. .name = LLCC_PERFMON_NAME,
  1805. .of_match_table = of_match_llcc_perfmon,
  1806. }
  1807. };
  1808. module_platform_driver(llcc_perfmon_driver);
  1809. MODULE_DESCRIPTION("QCOM LLCC PMU MONITOR");
  1810. MODULE_LICENSE("GPL");