adreno_a6xx_perfcounter.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include "adreno.h"
  7. #include "adreno_a6xx.h"
  8. #include "adreno_a6xx_hwsched_hfi.h"
  9. #include "adreno_perfcounter.h"
  10. #include "adreno_pm4types.h"
  11. #include "kgsl_device.h"
  12. #define VBIF2_PERF_CNT_SEL_MASK 0x7F
  13. /* offset of clear register from select register */
  14. #define VBIF2_PERF_CLR_REG_SEL_OFF 8
  15. /* offset of enable register from select register */
  16. #define VBIF2_PERF_EN_REG_SEL_OFF 16
  17. /* offset of clear register from the enable register */
  18. #define VBIF2_PERF_PWR_CLR_REG_EN_OFF 8
  19. /* offset of clear register from select register for GBIF */
  20. #define GBIF_PERF_CLR_REG_SEL_OFF 1
  21. /* offset of enable register from select register for GBIF*/
  22. #define GBIF_PERF_EN_REG_SEL_OFF 2
  23. /* offset of clear register from the power enable register for GBIF*/
  24. #define GBIF_PWR_CLR_REG_EN_OFF 1
  25. #define GBIF_PWR_SEL_REG_EN_OFF 3
  26. #define GBIF_PERF_SEL_RMW_MASK 0xFF
  27. #define GBIF_PWR_SEL_RMW_MASK 0xFF
  28. #define GBIF_PWR_EN_CLR_RMW_MASK 0x10000
  29. static void a6xx_counter_load(struct adreno_device *adreno_dev,
  30. struct adreno_perfcount_register *reg)
  31. {
  32. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  33. int index = reg->load_bit / 32;
  34. u32 enable = BIT(reg->load_bit & 31);
  35. /*
  36. * a650 and a660 currently have the perfcounter values saved via
  37. * retention in the GMU.
  38. */
  39. if (adreno_is_a650(adreno_dev) || adreno_is_a660(adreno_dev))
  40. return;
  41. kgsl_regwrite(device, A6XX_RBBM_PERFCTR_LOAD_VALUE_LO,
  42. lower_32_bits(reg->value));
  43. kgsl_regwrite(device, A6XX_RBBM_PERFCTR_LOAD_VALUE_HI,
  44. upper_32_bits(reg->value));
  45. kgsl_regwrite(device, A6XX_RBBM_PERFCTR_LOAD_CMD0 + index, enable);
  46. }
  47. /*
  48. * For registers that do not get restored on power cycle, read the value and add
  49. * the stored shadow value
  50. */
  51. static u64 a6xx_counter_read_norestore(struct adreno_device *adreno_dev,
  52. const struct adreno_perfcount_group *group,
  53. unsigned int counter)
  54. {
  55. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  56. struct adreno_perfcount_register *reg = &group->regs[counter];
  57. u32 hi, lo;
  58. kgsl_regread(device, reg->offset, &lo);
  59. kgsl_regread(device, reg->offset_hi, &hi);
  60. return ((((u64) hi) << 32) | lo) + reg->value;
  61. }
  62. static int a6xx_counter_enable(struct adreno_device *adreno_dev,
  63. const struct adreno_perfcount_group *group,
  64. u32 counter, u32 countable)
  65. {
  66. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  67. struct adreno_perfcount_register *reg = &group->regs[counter];
  68. int ret = 0;
  69. if (group->flags & ADRENO_PERFCOUNTER_GROUP_RESTORE)
  70. ret = a6xx_perfcounter_update(adreno_dev, reg, true);
  71. else
  72. kgsl_regwrite(device, reg->select, countable);
  73. if (!ret)
  74. reg->value = 0;
  75. return ret;
  76. }
  77. static int a6xx_hwsched_counter_enable(struct adreno_device *adreno_dev,
  78. const struct adreno_perfcount_group *group,
  79. u32 counter, u32 countable)
  80. {
  81. if (!(KGSL_DEVICE(adreno_dev)->state == KGSL_STATE_ACTIVE))
  82. return a6xx_counter_enable(adreno_dev, group, counter, countable);
  83. return a6xx_hwsched_counter_inline_enable(adreno_dev, group, counter, countable);
  84. }
  85. static int a6xx_counter_inline_enable(struct adreno_device *adreno_dev,
  86. const struct adreno_perfcount_group *group,
  87. unsigned int counter, unsigned int countable)
  88. {
  89. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  90. struct adreno_perfcount_register *reg = &group->regs[counter];
  91. struct adreno_ringbuffer *rb = &adreno_dev->ringbuffers[0];
  92. u32 cmds[3];
  93. int ret;
  94. if (!(device->state == KGSL_STATE_ACTIVE))
  95. return a6xx_counter_enable(adreno_dev, group, counter,
  96. countable);
  97. if (group->flags & ADRENO_PERFCOUNTER_GROUP_RESTORE)
  98. a6xx_perfcounter_update(adreno_dev, reg, false);
  99. cmds[0] = cp_type7_packet(CP_WAIT_FOR_IDLE, 0);
  100. cmds[1] = cp_type4_packet(reg->select, 1);
  101. cmds[2] = countable;
  102. /* submit to highest priority RB always */
  103. ret = a6xx_ringbuffer_addcmds(adreno_dev, rb, NULL,
  104. F_NOTPROTECTED, cmds, 3, 0, NULL);
  105. if (ret)
  106. return ret;
  107. /*
  108. * schedule dispatcher to make sure rb[0] is run, because
  109. * if the current RB is not rb[0] and gpu is idle then
  110. * rb[0] will not get scheduled to run
  111. */
  112. if (adreno_dev->cur_rb != rb)
  113. adreno_dispatcher_schedule(device);
  114. /* wait for the above commands submitted to complete */
  115. ret = adreno_ringbuffer_waittimestamp(rb, rb->timestamp,
  116. ADRENO_IDLE_TIMEOUT);
  117. if (ret) {
  118. /*
  119. * If we were woken up because of cancelling rb events
  120. * either due to soft reset or adreno_stop, ignore the
  121. * error and return 0 here. The perfcounter is already
  122. * set up in software and it will be programmed in
  123. * hardware when we wake up or come up after soft reset,
  124. * by adreno_perfcounter_restore.
  125. */
  126. if (ret == -EAGAIN)
  127. ret = 0;
  128. else
  129. dev_err(device->dev,
  130. "Perfcounter %s/%u/%u start via commands failed %d\n",
  131. group->name, counter, countable, ret);
  132. }
  133. if (!ret)
  134. reg->value = 0;
  135. return ret;
  136. }
  137. static u64 a6xx_counter_read(struct adreno_device *adreno_dev,
  138. const struct adreno_perfcount_group *group,
  139. unsigned int counter)
  140. {
  141. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  142. struct adreno_perfcount_register *reg = &group->regs[counter];
  143. u32 hi, lo;
  144. kgsl_regread(device, reg->offset, &lo);
  145. kgsl_regread(device, reg->offset_hi, &hi);
  146. /* These registers are restored on power resume */
  147. return (((u64) hi) << 32) | lo;
  148. }
  149. static int a6xx_counter_gbif_enable(struct adreno_device *adreno_dev,
  150. const struct adreno_perfcount_group *group,
  151. unsigned int counter, unsigned int countable)
  152. {
  153. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  154. struct adreno_perfcount_register *reg = &group->regs[counter];
  155. unsigned int shift = counter << 3;
  156. unsigned int perfctr_mask = 1 << counter;
  157. if (countable > VBIF2_PERF_CNT_SEL_MASK)
  158. return -EINVAL;
  159. /*
  160. * Write 1, followed by 0 to CLR register for
  161. * clearing the counter
  162. */
  163. kgsl_regrmw(device, reg->select - GBIF_PERF_CLR_REG_SEL_OFF,
  164. perfctr_mask, perfctr_mask);
  165. kgsl_regrmw(device, reg->select - GBIF_PERF_CLR_REG_SEL_OFF,
  166. perfctr_mask, 0);
  167. /* select the desired countable */
  168. kgsl_regrmw(device, reg->select,
  169. GBIF_PERF_SEL_RMW_MASK << shift, countable << shift);
  170. /* enable counter */
  171. kgsl_regrmw(device, reg->select - GBIF_PERF_EN_REG_SEL_OFF,
  172. perfctr_mask, perfctr_mask);
  173. reg->value = 0;
  174. return 0;
  175. }
  176. static int a630_counter_vbif_enable(struct adreno_device *adreno_dev,
  177. const struct adreno_perfcount_group *group,
  178. unsigned int counter, unsigned int countable)
  179. {
  180. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  181. struct adreno_perfcount_register *reg = &group->regs[counter];
  182. if (countable > VBIF2_PERF_CNT_SEL_MASK)
  183. return -EINVAL;
  184. /*
  185. * Write 1, followed by 0 to CLR register for
  186. * clearing the counter
  187. */
  188. kgsl_regwrite(device,
  189. reg->select - VBIF2_PERF_CLR_REG_SEL_OFF, 1);
  190. kgsl_regwrite(device,
  191. reg->select - VBIF2_PERF_CLR_REG_SEL_OFF, 0);
  192. kgsl_regwrite(device,
  193. reg->select, countable & VBIF2_PERF_CNT_SEL_MASK);
  194. /* enable reg is 8 DWORDS before select reg */
  195. kgsl_regwrite(device,
  196. reg->select - VBIF2_PERF_EN_REG_SEL_OFF, 1);
  197. kgsl_regwrite(device, reg->select, countable);
  198. reg->value = 0;
  199. return 0;
  200. }
  201. static int a630_counter_vbif_pwr_enable(struct adreno_device *adreno_dev,
  202. const struct adreno_perfcount_group *group,
  203. unsigned int counter, unsigned int countable)
  204. {
  205. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  206. struct adreno_perfcount_register *reg = &group->regs[counter];
  207. /*
  208. * Write 1, followed by 0 to CLR register for
  209. * clearing the counter
  210. */
  211. kgsl_regwrite(device, reg->select +
  212. VBIF2_PERF_PWR_CLR_REG_EN_OFF, 1);
  213. kgsl_regwrite(device, reg->select +
  214. VBIF2_PERF_PWR_CLR_REG_EN_OFF, 0);
  215. kgsl_regwrite(device, reg->select, 1);
  216. reg->value = 0;
  217. return 0;
  218. }
  219. static int a6xx_counter_gbif_pwr_enable(struct adreno_device *adreno_dev,
  220. const struct adreno_perfcount_group *group,
  221. unsigned int counter, unsigned int countable)
  222. {
  223. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  224. struct adreno_perfcount_register *reg = &group->regs[counter];
  225. unsigned int shift = counter << 3;
  226. unsigned int perfctr_mask = GBIF_PWR_EN_CLR_RMW_MASK << counter;
  227. /*
  228. * Write 1, followed by 0 to CLR register for
  229. * clearing the counter
  230. */
  231. kgsl_regrmw(device, reg->select + GBIF_PWR_CLR_REG_EN_OFF,
  232. perfctr_mask, perfctr_mask);
  233. kgsl_regrmw(device, reg->select + GBIF_PWR_CLR_REG_EN_OFF,
  234. perfctr_mask, 0);
  235. /* select the desired countable */
  236. kgsl_regrmw(device, reg->select + GBIF_PWR_SEL_REG_EN_OFF,
  237. GBIF_PWR_SEL_RMW_MASK << shift, countable << shift);
  238. /* Enable the counter */
  239. kgsl_regrmw(device, reg->select, perfctr_mask, perfctr_mask);
  240. reg->value = 0;
  241. return 0;
  242. }
  243. static int a6xx_counter_alwayson_enable(struct adreno_device *adreno_dev,
  244. const struct adreno_perfcount_group *group,
  245. unsigned int counter, unsigned int countable)
  246. {
  247. return 0;
  248. }
  249. static u64 a6xx_counter_alwayson_read(struct adreno_device *adreno_dev,
  250. const struct adreno_perfcount_group *group,
  251. unsigned int counter)
  252. {
  253. struct adreno_perfcount_register *reg = &group->regs[counter];
  254. return a6xx_read_alwayson(adreno_dev) + reg->value;
  255. }
  256. static void a6xx_write_gmu_counter_enable(struct kgsl_device *device,
  257. struct adreno_perfcount_register *reg, u32 bit, u32 countable)
  258. {
  259. u32 val;
  260. kgsl_regread(device, reg->select, &val);
  261. val &= ~(0xff << bit);
  262. val |= countable << bit;
  263. kgsl_regwrite(device, reg->select, val);
  264. }
  265. static int a6xx_counter_gmu_xoclk_enable(struct adreno_device *adreno_dev,
  266. const struct adreno_perfcount_group *group,
  267. unsigned int counter, unsigned int countable)
  268. {
  269. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  270. struct adreno_perfcount_register *reg = &group->regs[counter];
  271. if (countable > 0xff)
  272. return -EINVAL;
  273. if (counter >= 6 && !adreno_is_a660(adreno_dev))
  274. return -EINVAL;
  275. /*
  276. * Counters [0:3] are in select 1 bit offsets 0, 8, 16 and 24
  277. * Counters [4:5] are in select 2 bit offset 0, 8
  278. * Counters [6:9] are in select 3 bit offset 0, 8, 16 and 24
  279. */
  280. if (counter == 4 || counter == 5)
  281. counter -= 4;
  282. else if (counter >= 6)
  283. counter -= 6;
  284. a6xx_write_gmu_counter_enable(device, reg, counter * 8, countable);
  285. reg->value = 0;
  286. kgsl_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
  287. return 0;
  288. }
  289. static int a6xx_counter_gmu_gmuclk_enable(struct adreno_device *adreno_dev,
  290. const struct adreno_perfcount_group *group,
  291. unsigned int counter, unsigned int countable)
  292. {
  293. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  294. struct adreno_perfcount_register *reg = &group->regs[counter];
  295. if (countable > 0xff)
  296. return -EINVAL;
  297. /*
  298. * The two counters are stuck into GMU_CX_GMU_POWER_COUNTER_SELECT_1
  299. * at bit offset 16 and 24
  300. */
  301. a6xx_write_gmu_counter_enable(device, reg,
  302. 16 + (counter * 8), countable);
  303. kgsl_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
  304. reg->value = 0;
  305. return 0;
  306. }
  307. static int a6xx_counter_gmu_perf_enable(struct adreno_device *adreno_dev,
  308. const struct adreno_perfcount_group *group,
  309. unsigned int counter, unsigned int countable)
  310. {
  311. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  312. struct adreno_perfcount_register *reg = &group->regs[counter];
  313. if (countable > 0xff)
  314. return -EINVAL;
  315. /*
  316. * Counters [0:3] are in select 1 bit offsets 0, 8, 16 and 24
  317. * Counters [4:5] are in select 2 bit offset 0, 8
  318. */
  319. if (counter >= 4)
  320. counter -= 4;
  321. a6xx_write_gmu_counter_enable(device, reg, counter * 8, countable);
  322. kgsl_regwrite(device, A6XX_GMU_CX_GMU_PERF_COUNTER_ENABLE, 1);
  323. reg->value = 0;
  324. return 0;
  325. }
  326. static struct adreno_perfcount_register a6xx_perfcounters_cp[] = {
  327. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_0_LO,
  328. A6XX_RBBM_PERFCTR_CP_0_HI, 0, A6XX_CP_PERFCTR_CP_SEL_0 },
  329. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_1_LO,
  330. A6XX_RBBM_PERFCTR_CP_1_HI, 1, A6XX_CP_PERFCTR_CP_SEL_1 },
  331. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_2_LO,
  332. A6XX_RBBM_PERFCTR_CP_2_HI, 2, A6XX_CP_PERFCTR_CP_SEL_2 },
  333. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_3_LO,
  334. A6XX_RBBM_PERFCTR_CP_3_HI, 3, A6XX_CP_PERFCTR_CP_SEL_3 },
  335. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_4_LO,
  336. A6XX_RBBM_PERFCTR_CP_4_HI, 4, A6XX_CP_PERFCTR_CP_SEL_4 },
  337. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_5_LO,
  338. A6XX_RBBM_PERFCTR_CP_5_HI, 5, A6XX_CP_PERFCTR_CP_SEL_5 },
  339. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_6_LO,
  340. A6XX_RBBM_PERFCTR_CP_6_HI, 6, A6XX_CP_PERFCTR_CP_SEL_6 },
  341. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_7_LO,
  342. A6XX_RBBM_PERFCTR_CP_7_HI, 7, A6XX_CP_PERFCTR_CP_SEL_7 },
  343. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_8_LO,
  344. A6XX_RBBM_PERFCTR_CP_8_HI, 8, A6XX_CP_PERFCTR_CP_SEL_8 },
  345. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_9_LO,
  346. A6XX_RBBM_PERFCTR_CP_9_HI, 9, A6XX_CP_PERFCTR_CP_SEL_9 },
  347. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_10_LO,
  348. A6XX_RBBM_PERFCTR_CP_10_HI, 10, A6XX_CP_PERFCTR_CP_SEL_10 },
  349. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_11_LO,
  350. A6XX_RBBM_PERFCTR_CP_11_HI, 11, A6XX_CP_PERFCTR_CP_SEL_11 },
  351. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_12_LO,
  352. A6XX_RBBM_PERFCTR_CP_12_HI, 12, A6XX_CP_PERFCTR_CP_SEL_12 },
  353. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_13_LO,
  354. A6XX_RBBM_PERFCTR_CP_13_HI, 13, A6XX_CP_PERFCTR_CP_SEL_13 },
  355. };
  356. static struct adreno_perfcount_register a6xx_perfcounters_rbbm[] = {
  357. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_0_LO,
  358. A6XX_RBBM_PERFCTR_RBBM_0_HI, 14, A6XX_RBBM_PERFCTR_RBBM_SEL_0 },
  359. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_1_LO,
  360. A6XX_RBBM_PERFCTR_RBBM_1_HI, 15, A6XX_RBBM_PERFCTR_RBBM_SEL_1 },
  361. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_2_LO,
  362. A6XX_RBBM_PERFCTR_RBBM_2_HI, 16, A6XX_RBBM_PERFCTR_RBBM_SEL_2 },
  363. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_3_LO,
  364. A6XX_RBBM_PERFCTR_RBBM_3_HI, 17, A6XX_RBBM_PERFCTR_RBBM_SEL_3 },
  365. };
  366. static struct adreno_perfcount_register a6xx_perfcounters_pc[] = {
  367. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_0_LO,
  368. A6XX_RBBM_PERFCTR_PC_0_HI, 18, A6XX_PC_PERFCTR_PC_SEL_0 },
  369. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_1_LO,
  370. A6XX_RBBM_PERFCTR_PC_1_HI, 19, A6XX_PC_PERFCTR_PC_SEL_1 },
  371. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_2_LO,
  372. A6XX_RBBM_PERFCTR_PC_2_HI, 20, A6XX_PC_PERFCTR_PC_SEL_2 },
  373. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_3_LO,
  374. A6XX_RBBM_PERFCTR_PC_3_HI, 21, A6XX_PC_PERFCTR_PC_SEL_3 },
  375. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_4_LO,
  376. A6XX_RBBM_PERFCTR_PC_4_HI, 22, A6XX_PC_PERFCTR_PC_SEL_4 },
  377. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_5_LO,
  378. A6XX_RBBM_PERFCTR_PC_5_HI, 23, A6XX_PC_PERFCTR_PC_SEL_5 },
  379. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_6_LO,
  380. A6XX_RBBM_PERFCTR_PC_6_HI, 24, A6XX_PC_PERFCTR_PC_SEL_6 },
  381. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_7_LO,
  382. A6XX_RBBM_PERFCTR_PC_7_HI, 25, A6XX_PC_PERFCTR_PC_SEL_7 },
  383. };
  384. static struct adreno_perfcount_register a6xx_perfcounters_vfd[] = {
  385. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_0_LO,
  386. A6XX_RBBM_PERFCTR_VFD_0_HI, 26, A6XX_VFD_PERFCTR_VFD_SEL_0 },
  387. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_1_LO,
  388. A6XX_RBBM_PERFCTR_VFD_1_HI, 27, A6XX_VFD_PERFCTR_VFD_SEL_1 },
  389. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_2_LO,
  390. A6XX_RBBM_PERFCTR_VFD_2_HI, 28, A6XX_VFD_PERFCTR_VFD_SEL_2 },
  391. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_3_LO,
  392. A6XX_RBBM_PERFCTR_VFD_3_HI, 29, A6XX_VFD_PERFCTR_VFD_SEL_3 },
  393. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_4_LO,
  394. A6XX_RBBM_PERFCTR_VFD_4_HI, 30, A6XX_VFD_PERFCTR_VFD_SEL_4 },
  395. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_5_LO,
  396. A6XX_RBBM_PERFCTR_VFD_5_HI, 31, A6XX_VFD_PERFCTR_VFD_SEL_5 },
  397. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_6_LO,
  398. A6XX_RBBM_PERFCTR_VFD_6_HI, 32, A6XX_VFD_PERFCTR_VFD_SEL_6 },
  399. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_7_LO,
  400. A6XX_RBBM_PERFCTR_VFD_7_HI, 33, A6XX_VFD_PERFCTR_VFD_SEL_7 },
  401. };
  402. static struct adreno_perfcount_register a6xx_perfcounters_hlsq[] = {
  403. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_0_LO,
  404. A6XX_RBBM_PERFCTR_HLSQ_0_HI, 34, A6XX_HLSQ_PERFCTR_HLSQ_SEL_0 },
  405. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_1_LO,
  406. A6XX_RBBM_PERFCTR_HLSQ_1_HI, 35, A6XX_HLSQ_PERFCTR_HLSQ_SEL_1 },
  407. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_2_LO,
  408. A6XX_RBBM_PERFCTR_HLSQ_2_HI, 36, A6XX_HLSQ_PERFCTR_HLSQ_SEL_2 },
  409. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_3_LO,
  410. A6XX_RBBM_PERFCTR_HLSQ_3_HI, 37, A6XX_HLSQ_PERFCTR_HLSQ_SEL_3 },
  411. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_4_LO,
  412. A6XX_RBBM_PERFCTR_HLSQ_4_HI, 38, A6XX_HLSQ_PERFCTR_HLSQ_SEL_4 },
  413. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_5_LO,
  414. A6XX_RBBM_PERFCTR_HLSQ_5_HI, 39, A6XX_HLSQ_PERFCTR_HLSQ_SEL_5 },
  415. };
  416. static struct adreno_perfcount_register a6xx_perfcounters_vpc[] = {
  417. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_0_LO,
  418. A6XX_RBBM_PERFCTR_VPC_0_HI, 40, A6XX_VPC_PERFCTR_VPC_SEL_0 },
  419. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_1_LO,
  420. A6XX_RBBM_PERFCTR_VPC_1_HI, 41, A6XX_VPC_PERFCTR_VPC_SEL_1 },
  421. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_2_LO,
  422. A6XX_RBBM_PERFCTR_VPC_2_HI, 42, A6XX_VPC_PERFCTR_VPC_SEL_2 },
  423. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_3_LO,
  424. A6XX_RBBM_PERFCTR_VPC_3_HI, 43, A6XX_VPC_PERFCTR_VPC_SEL_3 },
  425. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_4_LO,
  426. A6XX_RBBM_PERFCTR_VPC_4_HI, 44, A6XX_VPC_PERFCTR_VPC_SEL_4 },
  427. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_5_LO,
  428. A6XX_RBBM_PERFCTR_VPC_5_HI, 45, A6XX_VPC_PERFCTR_VPC_SEL_5 },
  429. };
  430. static struct adreno_perfcount_register a6xx_perfcounters_ccu[] = {
  431. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_0_LO,
  432. A6XX_RBBM_PERFCTR_CCU_0_HI, 46, A6XX_RB_PERFCTR_CCU_SEL_0 },
  433. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_1_LO,
  434. A6XX_RBBM_PERFCTR_CCU_1_HI, 47, A6XX_RB_PERFCTR_CCU_SEL_1 },
  435. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_2_LO,
  436. A6XX_RBBM_PERFCTR_CCU_2_HI, 48, A6XX_RB_PERFCTR_CCU_SEL_2 },
  437. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_3_LO,
  438. A6XX_RBBM_PERFCTR_CCU_3_HI, 49, A6XX_RB_PERFCTR_CCU_SEL_3 },
  439. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_4_LO,
  440. A6XX_RBBM_PERFCTR_CCU_4_HI, 50, A6XX_RB_PERFCTR_CCU_SEL_4 },
  441. };
  442. static struct adreno_perfcount_register a6xx_perfcounters_tse[] = {
  443. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_0_LO,
  444. A6XX_RBBM_PERFCTR_TSE_0_HI, 51, A6XX_GRAS_PERFCTR_TSE_SEL_0 },
  445. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_1_LO,
  446. A6XX_RBBM_PERFCTR_TSE_1_HI, 52, A6XX_GRAS_PERFCTR_TSE_SEL_1 },
  447. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_2_LO,
  448. A6XX_RBBM_PERFCTR_TSE_2_HI, 53, A6XX_GRAS_PERFCTR_TSE_SEL_2 },
  449. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_3_LO,
  450. A6XX_RBBM_PERFCTR_TSE_3_HI, 54, A6XX_GRAS_PERFCTR_TSE_SEL_3 },
  451. };
  452. static struct adreno_perfcount_register a6xx_perfcounters_ras[] = {
  453. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_0_LO,
  454. A6XX_RBBM_PERFCTR_RAS_0_HI, 55, A6XX_GRAS_PERFCTR_RAS_SEL_0 },
  455. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_1_LO,
  456. A6XX_RBBM_PERFCTR_RAS_1_HI, 56, A6XX_GRAS_PERFCTR_RAS_SEL_1 },
  457. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_2_LO,
  458. A6XX_RBBM_PERFCTR_RAS_2_HI, 57, A6XX_GRAS_PERFCTR_RAS_SEL_2 },
  459. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_3_LO,
  460. A6XX_RBBM_PERFCTR_RAS_3_HI, 58, A6XX_GRAS_PERFCTR_RAS_SEL_3 },
  461. };
  462. static struct adreno_perfcount_register a6xx_perfcounters_uche[] = {
  463. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_0_LO,
  464. A6XX_RBBM_PERFCTR_UCHE_0_HI, 59, A6XX_UCHE_PERFCTR_UCHE_SEL_0 },
  465. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_1_LO,
  466. A6XX_RBBM_PERFCTR_UCHE_1_HI, 60, A6XX_UCHE_PERFCTR_UCHE_SEL_1 },
  467. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_2_LO,
  468. A6XX_RBBM_PERFCTR_UCHE_2_HI, 61, A6XX_UCHE_PERFCTR_UCHE_SEL_2 },
  469. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_3_LO,
  470. A6XX_RBBM_PERFCTR_UCHE_3_HI, 62, A6XX_UCHE_PERFCTR_UCHE_SEL_3 },
  471. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_4_LO,
  472. A6XX_RBBM_PERFCTR_UCHE_4_HI, 63, A6XX_UCHE_PERFCTR_UCHE_SEL_4 },
  473. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_5_LO,
  474. A6XX_RBBM_PERFCTR_UCHE_5_HI, 64, A6XX_UCHE_PERFCTR_UCHE_SEL_5 },
  475. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_6_LO,
  476. A6XX_RBBM_PERFCTR_UCHE_6_HI, 65, A6XX_UCHE_PERFCTR_UCHE_SEL_6 },
  477. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_7_LO,
  478. A6XX_RBBM_PERFCTR_UCHE_7_HI, 66, A6XX_UCHE_PERFCTR_UCHE_SEL_7 },
  479. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_8_LO,
  480. A6XX_RBBM_PERFCTR_UCHE_8_HI, 67, A6XX_UCHE_PERFCTR_UCHE_SEL_8 },
  481. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_9_LO,
  482. A6XX_RBBM_PERFCTR_UCHE_9_HI, 68, A6XX_UCHE_PERFCTR_UCHE_SEL_9 },
  483. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_10_LO,
  484. A6XX_RBBM_PERFCTR_UCHE_10_HI, 69,
  485. A6XX_UCHE_PERFCTR_UCHE_SEL_10 },
  486. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_11_LO,
  487. A6XX_RBBM_PERFCTR_UCHE_11_HI, 70,
  488. A6XX_UCHE_PERFCTR_UCHE_SEL_11 },
  489. };
  490. static struct adreno_perfcount_register a6xx_perfcounters_tp[] = {
  491. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_0_LO,
  492. A6XX_RBBM_PERFCTR_TP_0_HI, 71, A6XX_TPL1_PERFCTR_TP_SEL_0 },
  493. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_1_LO,
  494. A6XX_RBBM_PERFCTR_TP_1_HI, 72, A6XX_TPL1_PERFCTR_TP_SEL_1 },
  495. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_2_LO,
  496. A6XX_RBBM_PERFCTR_TP_2_HI, 73, A6XX_TPL1_PERFCTR_TP_SEL_2 },
  497. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_3_LO,
  498. A6XX_RBBM_PERFCTR_TP_3_HI, 74, A6XX_TPL1_PERFCTR_TP_SEL_3 },
  499. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_4_LO,
  500. A6XX_RBBM_PERFCTR_TP_4_HI, 75, A6XX_TPL1_PERFCTR_TP_SEL_4 },
  501. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_5_LO,
  502. A6XX_RBBM_PERFCTR_TP_5_HI, 76, A6XX_TPL1_PERFCTR_TP_SEL_5 },
  503. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_6_LO,
  504. A6XX_RBBM_PERFCTR_TP_6_HI, 77, A6XX_TPL1_PERFCTR_TP_SEL_6 },
  505. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_7_LO,
  506. A6XX_RBBM_PERFCTR_TP_7_HI, 78, A6XX_TPL1_PERFCTR_TP_SEL_7 },
  507. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_8_LO,
  508. A6XX_RBBM_PERFCTR_TP_8_HI, 79, A6XX_TPL1_PERFCTR_TP_SEL_8 },
  509. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_9_LO,
  510. A6XX_RBBM_PERFCTR_TP_9_HI, 80, A6XX_TPL1_PERFCTR_TP_SEL_9 },
  511. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_10_LO,
  512. A6XX_RBBM_PERFCTR_TP_10_HI, 81, A6XX_TPL1_PERFCTR_TP_SEL_10 },
  513. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_11_LO,
  514. A6XX_RBBM_PERFCTR_TP_11_HI, 82, A6XX_TPL1_PERFCTR_TP_SEL_11 },
  515. };
  516. static struct adreno_perfcount_register a6xx_perfcounters_sp[] = {
  517. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_0_LO,
  518. A6XX_RBBM_PERFCTR_SP_0_HI, 83, A6XX_SP_PERFCTR_SP_SEL_0 },
  519. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_1_LO,
  520. A6XX_RBBM_PERFCTR_SP_1_HI, 84, A6XX_SP_PERFCTR_SP_SEL_1 },
  521. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_2_LO,
  522. A6XX_RBBM_PERFCTR_SP_2_HI, 85, A6XX_SP_PERFCTR_SP_SEL_2 },
  523. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_3_LO,
  524. A6XX_RBBM_PERFCTR_SP_3_HI, 86, A6XX_SP_PERFCTR_SP_SEL_3 },
  525. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_4_LO,
  526. A6XX_RBBM_PERFCTR_SP_4_HI, 87, A6XX_SP_PERFCTR_SP_SEL_4 },
  527. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_5_LO,
  528. A6XX_RBBM_PERFCTR_SP_5_HI, 88, A6XX_SP_PERFCTR_SP_SEL_5 },
  529. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_6_LO,
  530. A6XX_RBBM_PERFCTR_SP_6_HI, 89, A6XX_SP_PERFCTR_SP_SEL_6 },
  531. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_7_LO,
  532. A6XX_RBBM_PERFCTR_SP_7_HI, 90, A6XX_SP_PERFCTR_SP_SEL_7 },
  533. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_8_LO,
  534. A6XX_RBBM_PERFCTR_SP_8_HI, 91, A6XX_SP_PERFCTR_SP_SEL_8 },
  535. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_9_LO,
  536. A6XX_RBBM_PERFCTR_SP_9_HI, 92, A6XX_SP_PERFCTR_SP_SEL_9 },
  537. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_10_LO,
  538. A6XX_RBBM_PERFCTR_SP_10_HI, 93, A6XX_SP_PERFCTR_SP_SEL_10 },
  539. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_11_LO,
  540. A6XX_RBBM_PERFCTR_SP_11_HI, 94, A6XX_SP_PERFCTR_SP_SEL_11 },
  541. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_12_LO,
  542. A6XX_RBBM_PERFCTR_SP_12_HI, 95, A6XX_SP_PERFCTR_SP_SEL_12 },
  543. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_13_LO,
  544. A6XX_RBBM_PERFCTR_SP_13_HI, 96, A6XX_SP_PERFCTR_SP_SEL_13 },
  545. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_14_LO,
  546. A6XX_RBBM_PERFCTR_SP_14_HI, 97, A6XX_SP_PERFCTR_SP_SEL_14 },
  547. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_15_LO,
  548. A6XX_RBBM_PERFCTR_SP_15_HI, 98, A6XX_SP_PERFCTR_SP_SEL_15 },
  549. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_16_LO,
  550. A6XX_RBBM_PERFCTR_SP_16_HI, 99, A6XX_SP_PERFCTR_SP_SEL_16 },
  551. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_17_LO,
  552. A6XX_RBBM_PERFCTR_SP_17_HI, 100, A6XX_SP_PERFCTR_SP_SEL_17 },
  553. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_18_LO,
  554. A6XX_RBBM_PERFCTR_SP_18_HI, 101, A6XX_SP_PERFCTR_SP_SEL_18 },
  555. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_19_LO,
  556. A6XX_RBBM_PERFCTR_SP_19_HI, 102, A6XX_SP_PERFCTR_SP_SEL_19 },
  557. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_20_LO,
  558. A6XX_RBBM_PERFCTR_SP_20_HI, 103, A6XX_SP_PERFCTR_SP_SEL_20 },
  559. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_21_LO,
  560. A6XX_RBBM_PERFCTR_SP_21_HI, 104, A6XX_SP_PERFCTR_SP_SEL_21 },
  561. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_22_LO,
  562. A6XX_RBBM_PERFCTR_SP_22_HI, 105, A6XX_SP_PERFCTR_SP_SEL_22 },
  563. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_23_LO,
  564. A6XX_RBBM_PERFCTR_SP_23_HI, 106, A6XX_SP_PERFCTR_SP_SEL_23 },
  565. };
  566. static struct adreno_perfcount_register a6xx_perfcounters_rb[] = {
  567. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_0_LO,
  568. A6XX_RBBM_PERFCTR_RB_0_HI, 107, A6XX_RB_PERFCTR_RB_SEL_0 },
  569. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_1_LO,
  570. A6XX_RBBM_PERFCTR_RB_1_HI, 108, A6XX_RB_PERFCTR_RB_SEL_1 },
  571. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_2_LO,
  572. A6XX_RBBM_PERFCTR_RB_2_HI, 109, A6XX_RB_PERFCTR_RB_SEL_2 },
  573. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_3_LO,
  574. A6XX_RBBM_PERFCTR_RB_3_HI, 110, A6XX_RB_PERFCTR_RB_SEL_3 },
  575. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_4_LO,
  576. A6XX_RBBM_PERFCTR_RB_4_HI, 111, A6XX_RB_PERFCTR_RB_SEL_4 },
  577. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_5_LO,
  578. A6XX_RBBM_PERFCTR_RB_5_HI, 112, A6XX_RB_PERFCTR_RB_SEL_5 },
  579. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_6_LO,
  580. A6XX_RBBM_PERFCTR_RB_6_HI, 113, A6XX_RB_PERFCTR_RB_SEL_6 },
  581. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_7_LO,
  582. A6XX_RBBM_PERFCTR_RB_7_HI, 114, A6XX_RB_PERFCTR_RB_SEL_7 },
  583. };
  584. static struct adreno_perfcount_register a6xx_perfcounters_vsc[] = {
  585. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VSC_0_LO,
  586. A6XX_RBBM_PERFCTR_VSC_0_HI, 115, A6XX_VSC_PERFCTR_VSC_SEL_0 },
  587. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VSC_1_LO,
  588. A6XX_RBBM_PERFCTR_VSC_1_HI, 116, A6XX_VSC_PERFCTR_VSC_SEL_1 },
  589. };
  590. static struct adreno_perfcount_register a6xx_perfcounters_lrz[] = {
  591. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_0_LO,
  592. A6XX_RBBM_PERFCTR_LRZ_0_HI, 117, A6XX_GRAS_PERFCTR_LRZ_SEL_0 },
  593. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_1_LO,
  594. A6XX_RBBM_PERFCTR_LRZ_1_HI, 118, A6XX_GRAS_PERFCTR_LRZ_SEL_1 },
  595. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_2_LO,
  596. A6XX_RBBM_PERFCTR_LRZ_2_HI, 119, A6XX_GRAS_PERFCTR_LRZ_SEL_2 },
  597. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_3_LO,
  598. A6XX_RBBM_PERFCTR_LRZ_3_HI, 120, A6XX_GRAS_PERFCTR_LRZ_SEL_3 },
  599. };
  600. static struct adreno_perfcount_register a6xx_perfcounters_cmp[] = {
  601. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_0_LO,
  602. A6XX_RBBM_PERFCTR_CMP_0_HI, 121, A6XX_RB_PERFCTR_CMP_SEL_0 },
  603. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_1_LO,
  604. A6XX_RBBM_PERFCTR_CMP_1_HI, 122, A6XX_RB_PERFCTR_CMP_SEL_1 },
  605. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_2_LO,
  606. A6XX_RBBM_PERFCTR_CMP_2_HI, 123, A6XX_RB_PERFCTR_CMP_SEL_2 },
  607. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_3_LO,
  608. A6XX_RBBM_PERFCTR_CMP_3_HI, 124, A6XX_RB_PERFCTR_CMP_SEL_3 },
  609. };
  610. static struct adreno_perfcount_register a6xx_perfcounters_vbif[] = {
  611. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW0,
  612. A6XX_VBIF_PERF_CNT_HIGH0, -1, A6XX_VBIF_PERF_CNT_SEL0 },
  613. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW1,
  614. A6XX_VBIF_PERF_CNT_HIGH1, -1, A6XX_VBIF_PERF_CNT_SEL1 },
  615. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW2,
  616. A6XX_VBIF_PERF_CNT_HIGH2, -1, A6XX_VBIF_PERF_CNT_SEL2 },
  617. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW3,
  618. A6XX_VBIF_PERF_CNT_HIGH3, -1, A6XX_VBIF_PERF_CNT_SEL3 },
  619. };
  620. static struct adreno_perfcount_register a6xx_perfcounters_vbif_pwr[] = {
  621. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW0,
  622. A6XX_VBIF_PERF_PWR_CNT_HIGH0, -1, A6XX_VBIF_PERF_PWR_CNT_EN0 },
  623. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW1,
  624. A6XX_VBIF_PERF_PWR_CNT_HIGH1, -1, A6XX_VBIF_PERF_PWR_CNT_EN1 },
  625. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW2,
  626. A6XX_VBIF_PERF_PWR_CNT_HIGH2, -1, A6XX_VBIF_PERF_PWR_CNT_EN2 },
  627. };
  628. static struct adreno_perfcount_register a6xx_perfcounters_gbif[] = {
  629. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PERF_CNT_LOW0,
  630. A6XX_GBIF_PERF_CNT_HIGH0, -1, A6XX_GBIF_PERF_CNT_SEL },
  631. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PERF_CNT_LOW1,
  632. A6XX_GBIF_PERF_CNT_HIGH1, -1, A6XX_GBIF_PERF_CNT_SEL },
  633. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PERF_CNT_LOW2,
  634. A6XX_GBIF_PERF_CNT_HIGH2, -1, A6XX_GBIF_PERF_CNT_SEL },
  635. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PERF_CNT_LOW3,
  636. A6XX_GBIF_PERF_CNT_HIGH3, -1, A6XX_GBIF_PERF_CNT_SEL },
  637. };
  638. static struct adreno_perfcount_register a6xx_perfcounters_gbif_pwr[] = {
  639. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PWR_CNT_LOW0,
  640. A6XX_GBIF_PWR_CNT_HIGH0, -1, A6XX_GBIF_PERF_PWR_CNT_EN },
  641. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PWR_CNT_LOW1,
  642. A6XX_GBIF_PWR_CNT_HIGH1, -1, A6XX_GBIF_PERF_PWR_CNT_EN },
  643. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PWR_CNT_LOW2,
  644. A6XX_GBIF_PWR_CNT_HIGH2, -1, A6XX_GBIF_PERF_PWR_CNT_EN },
  645. };
  646. #define GMU_COUNTER(lo, hi, sel) \
  647. { .countable = KGSL_PERFCOUNTER_NOT_USED, \
  648. .offset = lo, .offset_hi = hi, .select = sel }
  649. #define GMU_COUNTER_RESERVED(lo, hi, sel) \
  650. { .countable = KGSL_PERFCOUNTER_BROKEN, \
  651. .offset = lo, .offset_hi = hi, .select = sel }
  652. static struct adreno_perfcount_register a6xx_perfcounters_gmu_xoclk[] = {
  653. /*
  654. * COUNTER_XOCLK_0 and COUNTER_XOCLK_4 are used for the GPU
  655. * busy and ifpc count. Mark them as reserved to ensure they
  656. * are not re-used.
  657. */
  658. GMU_COUNTER_RESERVED(A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
  659. A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H,
  660. A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0),
  661. GMU_COUNTER(A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L,
  662. A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H,
  663. A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0),
  664. GMU_COUNTER(A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L,
  665. A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H,
  666. A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0),
  667. GMU_COUNTER(A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L,
  668. A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H,
  669. A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0),
  670. GMU_COUNTER_RESERVED(A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L,
  671. A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H,
  672. A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1),
  673. GMU_COUNTER(A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L,
  674. A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H,
  675. A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1),
  676. GMU_COUNTER(A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_6_L,
  677. A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_6_H,
  678. A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_2),
  679. GMU_COUNTER(A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_7_L,
  680. A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_7_H,
  681. A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_2),
  682. GMU_COUNTER(A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_8_L,
  683. A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_8_H,
  684. A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_2),
  685. GMU_COUNTER(A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_9_L,
  686. A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_9_H,
  687. A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_2),
  688. };
  689. static struct adreno_perfcount_register a6xx_perfcounters_gmu_gmuclk[] = {
  690. GMU_COUNTER(A6XX_GMU_CX_GMU_POWER_COUNTER_GMUCLK_0_L,
  691. A6XX_GMU_CX_GMU_POWER_COUNTER_GMUCLK_0_H,
  692. A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1),
  693. GMU_COUNTER(A6XX_GMU_CX_GMU_POWER_COUNTER_GMUCLK_1_L,
  694. A6XX_GMU_CX_GMU_POWER_COUNTER_GMUCLK_1_H,
  695. A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1),
  696. };
  697. static struct adreno_perfcount_register a6xx_perfcounters_gmu_perf[] = {
  698. GMU_COUNTER(A6XX_GMU_CX_GMU_PERF_COUNTER_0_L,
  699. A6XX_GMU_CX_GMU_PERF_COUNTER_0_H,
  700. A6XX_GMU_CX_GMU_PERF_COUNTER_SELECT_0),
  701. GMU_COUNTER(A6XX_GMU_CX_GMU_PERF_COUNTER_1_L,
  702. A6XX_GMU_CX_GMU_PERF_COUNTER_1_H,
  703. A6XX_GMU_CX_GMU_PERF_COUNTER_SELECT_0),
  704. GMU_COUNTER(A6XX_GMU_CX_GMU_PERF_COUNTER_2_L,
  705. A6XX_GMU_CX_GMU_PERF_COUNTER_2_H,
  706. A6XX_GMU_CX_GMU_PERF_COUNTER_SELECT_0),
  707. GMU_COUNTER(A6XX_GMU_CX_GMU_PERF_COUNTER_3_L,
  708. A6XX_GMU_CX_GMU_PERF_COUNTER_3_H,
  709. A6XX_GMU_CX_GMU_PERF_COUNTER_SELECT_0),
  710. GMU_COUNTER(A6XX_GMU_CX_GMU_PERF_COUNTER_4_L,
  711. A6XX_GMU_CX_GMU_PERF_COUNTER_4_H,
  712. A6XX_GMU_CX_GMU_PERF_COUNTER_SELECT_1),
  713. GMU_COUNTER(A6XX_GMU_CX_GMU_PERF_COUNTER_5_L,
  714. A6XX_GMU_CX_GMU_PERF_COUNTER_5_H,
  715. A6XX_GMU_CX_GMU_PERF_COUNTER_SELECT_1),
  716. };
  717. static struct adreno_perfcount_register a6xx_perfcounters_alwayson[] = {
  718. { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_CP_ALWAYS_ON_COUNTER_LO,
  719. A6XX_CP_ALWAYS_ON_COUNTER_HI, -1 },
  720. };
  721. /*
  722. * ADRENO_PERFCOUNTER_GROUP_RESTORE flag is enabled by default
  723. * because most of the perfcounter groups need to be restored
  724. * as part of preemption and IFPC. Perfcounter groups that are
  725. * not restored as part of preemption and IFPC should be defined
  726. * using A6XX_PERFCOUNTER_GROUP_FLAGS macro
  727. */
  728. #define A6XX_PERFCOUNTER_GROUP(offset, name, enable, read, load) \
  729. ADRENO_PERFCOUNTER_GROUP_FLAGS(a6xx, offset, name, \
  730. ADRENO_PERFCOUNTER_GROUP_RESTORE, enable, read, load)
  731. #define A6XX_PERFCOUNTER_GROUP_FLAGS(offset, name, flags, enable, read, load) \
  732. ADRENO_PERFCOUNTER_GROUP_FLAGS(a6xx, offset, name, flags, enable, \
  733. read, load)
  734. #define A6XX_REGULAR_PERFCOUNTER_GROUP(offset, name) \
  735. A6XX_PERFCOUNTER_GROUP(offset, name, \
  736. a6xx_counter_enable, a6xx_counter_read, a6xx_counter_load)
  737. static const struct adreno_perfcount_group a630_perfcounter_groups
  738. [KGSL_PERFCOUNTER_GROUP_MAX] = {
  739. A6XX_REGULAR_PERFCOUNTER_GROUP(CP, cp),
  740. A6XX_PERFCOUNTER_GROUP_FLAGS(RBBM, rbbm, 0,
  741. a6xx_counter_enable, a6xx_counter_read, a6xx_counter_load),
  742. A6XX_REGULAR_PERFCOUNTER_GROUP(PC, pc),
  743. A6XX_REGULAR_PERFCOUNTER_GROUP(VFD, vfd),
  744. A6XX_PERFCOUNTER_GROUP(HLSQ, hlsq, a6xx_counter_inline_enable,
  745. a6xx_counter_read, a6xx_counter_load),
  746. A6XX_REGULAR_PERFCOUNTER_GROUP(VPC, vpc),
  747. A6XX_REGULAR_PERFCOUNTER_GROUP(CCU, ccu),
  748. A6XX_REGULAR_PERFCOUNTER_GROUP(CMP, cmp),
  749. A6XX_REGULAR_PERFCOUNTER_GROUP(TSE, tse),
  750. A6XX_REGULAR_PERFCOUNTER_GROUP(RAS, ras),
  751. A6XX_REGULAR_PERFCOUNTER_GROUP(LRZ, lrz),
  752. A6XX_REGULAR_PERFCOUNTER_GROUP(UCHE, uche),
  753. A6XX_PERFCOUNTER_GROUP(TP, tp, a6xx_counter_inline_enable,
  754. a6xx_counter_read, a6xx_counter_load),
  755. A6XX_PERFCOUNTER_GROUP(SP, sp, a6xx_counter_inline_enable,
  756. a6xx_counter_read, a6xx_counter_load),
  757. A6XX_REGULAR_PERFCOUNTER_GROUP(RB, rb),
  758. A6XX_REGULAR_PERFCOUNTER_GROUP(VSC, vsc),
  759. A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF, vbif, 0,
  760. a630_counter_vbif_enable, a6xx_counter_read_norestore, NULL),
  761. A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF_PWR, vbif_pwr,
  762. ADRENO_PERFCOUNTER_GROUP_FIXED, a630_counter_vbif_pwr_enable,
  763. a6xx_counter_read_norestore, NULL),
  764. A6XX_PERFCOUNTER_GROUP_FLAGS(ALWAYSON, alwayson,
  765. ADRENO_PERFCOUNTER_GROUP_FIXED,
  766. a6xx_counter_alwayson_enable, a6xx_counter_alwayson_read, NULL),
  767. };
  768. static const struct adreno_perfcount_group
  769. a6xx_legacy_perfcounter_groups [KGSL_PERFCOUNTER_GROUP_MAX] = {
  770. A6XX_REGULAR_PERFCOUNTER_GROUP(CP, cp),
  771. A6XX_PERFCOUNTER_GROUP_FLAGS(RBBM, rbbm, 0,
  772. a6xx_counter_enable, a6xx_counter_read, a6xx_counter_load),
  773. A6XX_REGULAR_PERFCOUNTER_GROUP(PC, pc),
  774. A6XX_REGULAR_PERFCOUNTER_GROUP(VFD, vfd),
  775. A6XX_PERFCOUNTER_GROUP(HLSQ, hlsq, a6xx_counter_inline_enable,
  776. a6xx_counter_read, a6xx_counter_load),
  777. A6XX_REGULAR_PERFCOUNTER_GROUP(VPC, vpc),
  778. A6XX_REGULAR_PERFCOUNTER_GROUP(CCU, ccu),
  779. A6XX_REGULAR_PERFCOUNTER_GROUP(CMP, cmp),
  780. A6XX_REGULAR_PERFCOUNTER_GROUP(TSE, tse),
  781. A6XX_REGULAR_PERFCOUNTER_GROUP(RAS, ras),
  782. A6XX_REGULAR_PERFCOUNTER_GROUP(LRZ, lrz),
  783. A6XX_REGULAR_PERFCOUNTER_GROUP(UCHE, uche),
  784. A6XX_PERFCOUNTER_GROUP(TP, tp, a6xx_counter_inline_enable,
  785. a6xx_counter_read, a6xx_counter_load),
  786. A6XX_PERFCOUNTER_GROUP(SP, sp, a6xx_counter_inline_enable,
  787. a6xx_counter_read, a6xx_counter_load),
  788. A6XX_REGULAR_PERFCOUNTER_GROUP(RB, rb),
  789. A6XX_REGULAR_PERFCOUNTER_GROUP(VSC, vsc),
  790. A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF, gbif, 0,
  791. a6xx_counter_gbif_enable, a6xx_counter_read_norestore, NULL),
  792. A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF_PWR, gbif_pwr,
  793. ADRENO_PERFCOUNTER_GROUP_FIXED, a6xx_counter_gbif_pwr_enable,
  794. a6xx_counter_read_norestore, NULL),
  795. A6XX_PERFCOUNTER_GROUP_FLAGS(ALWAYSON, alwayson,
  796. ADRENO_PERFCOUNTER_GROUP_FIXED,
  797. a6xx_counter_alwayson_enable, a6xx_counter_alwayson_read, NULL),
  798. };
  799. static const struct adreno_perfcount_group a6xx_perfcounter_groups
  800. [KGSL_PERFCOUNTER_GROUP_MAX] = {
  801. A6XX_REGULAR_PERFCOUNTER_GROUP(CP, cp),
  802. A6XX_PERFCOUNTER_GROUP_FLAGS(RBBM, rbbm, 0,
  803. a6xx_counter_enable, a6xx_counter_read, a6xx_counter_load),
  804. A6XX_REGULAR_PERFCOUNTER_GROUP(PC, pc),
  805. A6XX_REGULAR_PERFCOUNTER_GROUP(VFD, vfd),
  806. A6XX_PERFCOUNTER_GROUP(HLSQ, hlsq, a6xx_counter_inline_enable,
  807. a6xx_counter_read, a6xx_counter_load),
  808. A6XX_REGULAR_PERFCOUNTER_GROUP(VPC, vpc),
  809. A6XX_REGULAR_PERFCOUNTER_GROUP(CCU, ccu),
  810. A6XX_REGULAR_PERFCOUNTER_GROUP(CMP, cmp),
  811. A6XX_REGULAR_PERFCOUNTER_GROUP(TSE, tse),
  812. A6XX_REGULAR_PERFCOUNTER_GROUP(RAS, ras),
  813. A6XX_REGULAR_PERFCOUNTER_GROUP(LRZ, lrz),
  814. A6XX_REGULAR_PERFCOUNTER_GROUP(UCHE, uche),
  815. A6XX_PERFCOUNTER_GROUP(TP, tp, a6xx_counter_inline_enable,
  816. a6xx_counter_read, a6xx_counter_load),
  817. A6XX_PERFCOUNTER_GROUP(SP, sp, a6xx_counter_inline_enable,
  818. a6xx_counter_read, a6xx_counter_load),
  819. A6XX_REGULAR_PERFCOUNTER_GROUP(RB, rb),
  820. A6XX_REGULAR_PERFCOUNTER_GROUP(VSC, vsc),
  821. A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF, gbif, 0,
  822. a6xx_counter_gbif_enable, a6xx_counter_read_norestore, NULL),
  823. A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF_PWR, gbif_pwr,
  824. ADRENO_PERFCOUNTER_GROUP_FIXED, a6xx_counter_gbif_pwr_enable,
  825. a6xx_counter_read_norestore, NULL),
  826. A6XX_PERFCOUNTER_GROUP_FLAGS(ALWAYSON, alwayson,
  827. ADRENO_PERFCOUNTER_GROUP_FIXED,
  828. a6xx_counter_alwayson_enable, a6xx_counter_alwayson_read, NULL),
  829. A6XX_PERFCOUNTER_GROUP_FLAGS(GMU_XOCLK, gmu_xoclk, 0,
  830. a6xx_counter_gmu_xoclk_enable, a6xx_counter_read_norestore,
  831. NULL),
  832. A6XX_PERFCOUNTER_GROUP_FLAGS(GMU_GMUCLK, gmu_gmuclk, 0,
  833. a6xx_counter_gmu_gmuclk_enable, a6xx_counter_read_norestore,
  834. NULL),
  835. A6XX_PERFCOUNTER_GROUP_FLAGS(GMU_PERF, gmu_perf, 0,
  836. a6xx_counter_gmu_perf_enable, a6xx_counter_read_norestore,
  837. NULL),
  838. };
  839. static const struct adreno_perfcount_group a6xx_hwsched_perfcounter_groups
  840. [KGSL_PERFCOUNTER_GROUP_MAX] = {
  841. A6XX_REGULAR_PERFCOUNTER_GROUP(CP, cp),
  842. A6XX_PERFCOUNTER_GROUP_FLAGS(RBBM, rbbm, 0,
  843. a6xx_counter_enable, a6xx_counter_read, a6xx_counter_load),
  844. A6XX_REGULAR_PERFCOUNTER_GROUP(PC, pc),
  845. A6XX_REGULAR_PERFCOUNTER_GROUP(VFD, vfd),
  846. A6XX_PERFCOUNTER_GROUP(HLSQ, hlsq, a6xx_hwsched_counter_enable,
  847. a6xx_counter_read, a6xx_counter_load),
  848. A6XX_REGULAR_PERFCOUNTER_GROUP(VPC, vpc),
  849. A6XX_REGULAR_PERFCOUNTER_GROUP(CCU, ccu),
  850. A6XX_REGULAR_PERFCOUNTER_GROUP(CMP, cmp),
  851. A6XX_REGULAR_PERFCOUNTER_GROUP(TSE, tse),
  852. A6XX_REGULAR_PERFCOUNTER_GROUP(RAS, ras),
  853. A6XX_REGULAR_PERFCOUNTER_GROUP(LRZ, lrz),
  854. A6XX_REGULAR_PERFCOUNTER_GROUP(UCHE, uche),
  855. A6XX_PERFCOUNTER_GROUP(TP, tp, a6xx_hwsched_counter_enable,
  856. a6xx_counter_read, a6xx_counter_load),
  857. A6XX_PERFCOUNTER_GROUP(SP, sp, a6xx_hwsched_counter_enable,
  858. a6xx_counter_read, a6xx_counter_load),
  859. A6XX_REGULAR_PERFCOUNTER_GROUP(RB, rb),
  860. A6XX_REGULAR_PERFCOUNTER_GROUP(VSC, vsc),
  861. A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF, gbif, 0,
  862. a6xx_counter_gbif_enable, a6xx_counter_read_norestore, NULL),
  863. A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF_PWR, gbif_pwr,
  864. ADRENO_PERFCOUNTER_GROUP_FIXED, a6xx_counter_gbif_pwr_enable,
  865. a6xx_counter_read_norestore, NULL),
  866. A6XX_PERFCOUNTER_GROUP_FLAGS(ALWAYSON, alwayson,
  867. ADRENO_PERFCOUNTER_GROUP_FIXED,
  868. a6xx_counter_alwayson_enable, a6xx_counter_alwayson_read, NULL),
  869. A6XX_PERFCOUNTER_GROUP_FLAGS(GMU_XOCLK, gmu_xoclk, 0,
  870. a6xx_counter_gmu_xoclk_enable, a6xx_counter_read_norestore,
  871. NULL),
  872. A6XX_PERFCOUNTER_GROUP_FLAGS(GMU_GMUCLK, gmu_gmuclk, 0,
  873. a6xx_counter_gmu_gmuclk_enable, a6xx_counter_read_norestore,
  874. NULL),
  875. A6XX_PERFCOUNTER_GROUP_FLAGS(GMU_PERF, gmu_perf, 0,
  876. a6xx_counter_gmu_perf_enable, a6xx_counter_read_norestore,
  877. NULL),
  878. };
  879. /* a610, a612, a616, a618 and a619 do not have the GMU registers.
  880. * a605, a608, a615, a630, a640 and a680 don't have enough room in the
  881. * CP_PROTECT registers so the GMU counters are not accessible
  882. */
  883. const struct adreno_perfcounters adreno_a6xx_legacy_perfcounters = {
  884. a6xx_legacy_perfcounter_groups,
  885. ARRAY_SIZE(a6xx_legacy_perfcounter_groups),
  886. };
  887. const struct adreno_perfcounters adreno_a630_perfcounters = {
  888. a630_perfcounter_groups,
  889. ARRAY_SIZE(a630_perfcounter_groups),
  890. };
  891. const struct adreno_perfcounters adreno_a6xx_perfcounters = {
  892. a6xx_perfcounter_groups,
  893. ARRAY_SIZE(a6xx_perfcounter_groups),
  894. };
  895. const struct adreno_perfcounters adreno_a6xx_hwsched_perfcounters = {
  896. a6xx_hwsched_perfcounter_groups,
  897. ARRAY_SIZE(a6xx_hwsched_perfcounter_groups),
  898. };