adreno_a6xx_gmu_snapshot.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include "a6xx_reg.h"
  7. #include "adreno.h"
  8. #include "adreno_a6xx.h"
  9. #include "adreno_a6xx_gmu.h"
  10. #include "adreno_snapshot.h"
  11. #include "kgsl_device.h"
  12. static const unsigned int a6xx_gmu_gx_registers[] = {
  13. /* GMU GX */
  14. 0x1A800, 0x1A800, 0x1A810, 0x1A813, 0x1A816, 0x1A816, 0x1A818, 0x1A81B,
  15. 0x1A81E, 0x1A81E, 0x1A820, 0x1A823, 0x1A826, 0x1A826, 0x1A828, 0x1A82B,
  16. 0x1A82E, 0x1A82E, 0x1A830, 0x1A833, 0x1A836, 0x1A836, 0x1A838, 0x1A83B,
  17. 0x1A83E, 0x1A83E, 0x1A840, 0x1A843, 0x1A846, 0x1A846, 0x1A880, 0x1A884,
  18. 0x1A900, 0x1A92B, 0x1A940, 0x1A940,
  19. };
  20. static const unsigned int a6xx_gmu_tcm_registers[] = {
  21. /* ITCM */
  22. 0x1B400, 0x1C3FF,
  23. /* DTCM */
  24. 0x1C400, 0x1D3FF,
  25. };
  26. static const unsigned int a6xx_gmu_registers[] = {
  27. /* GMU CX */
  28. 0x1F400, 0x1F407, 0x1F410, 0x1F412, 0x1F500, 0x1F500, 0x1F507, 0x1F50A,
  29. 0x1F800, 0x1F804, 0x1F807, 0x1F808, 0x1F80B, 0x1F80C, 0x1F80F, 0x1F81C,
  30. 0x1F824, 0x1F82A, 0x1F82D, 0x1F830, 0x1F840, 0x1F853, 0x1F887, 0x1F889,
  31. 0x1F8A0, 0x1F8A2, 0x1F8A4, 0x1F8AF, 0x1F8C0, 0x1F8C3, 0x1F8D0, 0x1F8D0,
  32. 0x1F8E4, 0x1F8E4, 0x1F8E8, 0x1F8EC, 0x1F900, 0x1F903, 0x1F940, 0x1F940,
  33. 0x1F942, 0x1F944, 0x1F94C, 0x1F94D, 0x1F94F, 0x1F951, 0x1F954, 0x1F954,
  34. 0x1F957, 0x1F958, 0x1F95D, 0x1F95D, 0x1F962, 0x1F962, 0x1F964, 0x1F965,
  35. 0x1F980, 0x1F986, 0x1F990, 0x1F99E, 0x1F9C0, 0x1F9C0, 0x1F9C5, 0x1F9CC,
  36. 0x1F9E0, 0x1F9E2, 0x1F9F0, 0x1F9F0, 0x1FA00, 0x1FA01,
  37. /* GMU AO */
  38. 0x23B00, 0x23B16,
  39. };
  40. static const unsigned int a660_gmu_registers[] = {
  41. /* GMU CX */
  42. 0x1F408, 0x1F40D, 0x1F40F, 0x1F40F, 0x1F50B, 0x1F50B, 0x1F860, 0x1F860,
  43. 0x1F870, 0x1F877, 0x1F8C4, 0x1F8C4, 0x1F8F0, 0x1F8F1, 0x1F948, 0x1F94A,
  44. 0x1F966, 0x1F96B, 0x1F970, 0x1F970, 0x1F972, 0x1F979, 0x1F9CD, 0x1F9D4,
  45. 0x1FA02, 0x1FA03, 0x20000, 0x20001, 0x20004, 0x20004, 0x20008, 0x20012,
  46. 0x20018, 0x20018,
  47. /* GMU AO LPAC */
  48. 0x23B30, 0x23B30,
  49. };
  50. static const unsigned int a6xx_gmu_gpucc_registers[] = {
  51. /* GPU CC */
  52. 0x24000, 0x24012, 0x24040, 0x24052, 0x24400, 0x24404, 0x24407, 0x2440B,
  53. 0x24415, 0x2441C, 0x2441E, 0x2442D, 0x2443C, 0x2443D, 0x2443F, 0x24440,
  54. 0x24442, 0x24449, 0x24458, 0x2445A, 0x24540, 0x2455E, 0x24800, 0x24802,
  55. 0x24C00, 0x24C02, 0x25400, 0x25402, 0x25800, 0x25802, 0x25C00, 0x25C02,
  56. 0x26000, 0x26002,
  57. /* GPU CC ACD */
  58. 0x26400, 0x26416, 0x26420, 0x26427,
  59. };
  60. static const unsigned int a662_gmu_gpucc_registers[] = {
  61. /* GPU CC */
  62. 0x24000, 0x2400e, 0x24400, 0x2440e, 0x24800, 0x24805, 0x24c00, 0x24cff,
  63. 0x25800, 0x25804, 0x25c00, 0x25c04, 0x26000, 0x26004, 0x26400, 0x26405,
  64. 0x26414, 0x2641d, 0x2642a, 0x26430, 0x26432, 0x26432, 0x26441, 0x26455,
  65. 0x26466, 0x26468, 0x26478, 0x2647a, 0x26489, 0x2648a, 0x2649c, 0x2649e,
  66. 0x264a0, 0x264a3, 0x264b3, 0x264b5, 0x264c5, 0x264c7, 0x264d6, 0x264d8,
  67. 0x264e8, 0x264e9, 0x264f9, 0x264fc, 0x2650b, 0x2650c, 0x2651c, 0x2651e,
  68. 0x26540, 0x26570, 0x26600, 0x26616, 0x26620, 0x2662d,
  69. };
  70. static const unsigned int a663_gmu_gpucc_registers[] = {
  71. /* GPU CC */
  72. 0x24000, 0x2400e, 0x24400, 0x2440e, 0x25800, 0x25804, 0x25c00, 0x25c04,
  73. 0x26000, 0x26004, 0x26400, 0x26405, 0x26414, 0x2641d, 0x2642a, 0x26430,
  74. 0x26432, 0x26432, 0x26441, 0x26455, 0x26466, 0x26468, 0x26478, 0x2647a,
  75. 0x26489, 0x2648a, 0x2649c, 0x2649e, 0x264a0, 0x264a3, 0x264b3, 0x264b5,
  76. 0x264c5, 0x264c7, 0x264d6, 0x264d8, 0x264e8, 0x264e9, 0x264f9, 0x264fc,
  77. 0x2650b, 0x2650c, 0x2651c, 0x2651e, 0x26540, 0x26570, 0x26600, 0x26616,
  78. 0x26620, 0x2662d,
  79. };
  80. static const unsigned int a630_rscc_snapshot_registers[] = {
  81. 0x23400, 0x23434, 0x23436, 0x23436, 0x23480, 0x23484, 0x23489, 0x2348C,
  82. 0x23491, 0x23494, 0x23499, 0x2349C, 0x234A1, 0x234A4, 0x234A9, 0x234AC,
  83. 0x23500, 0x23502, 0x23504, 0x23507, 0x23514, 0x23519, 0x23524, 0x2352B,
  84. 0x23580, 0x23597, 0x23740, 0x23741, 0x23744, 0x23747, 0x2374C, 0x23787,
  85. 0x237EC, 0x237EF, 0x237F4, 0x2382F, 0x23894, 0x23897, 0x2389C, 0x238D7,
  86. 0x2393C, 0x2393F, 0x23944, 0x2397F,
  87. };
  88. static const unsigned int a6xx_rscc_snapshot_registers[] = {
  89. 0x23400, 0x23434, 0x23436, 0x23436, 0x23440, 0x23440, 0x23480, 0x23484,
  90. 0x23489, 0x2348C, 0x23491, 0x23494, 0x23499, 0x2349C, 0x234A1, 0x234A4,
  91. 0x234A9, 0x234AC, 0x23500, 0x23502, 0x23504, 0x23507, 0x23514, 0x23519,
  92. 0x23524, 0x2352B, 0x23580, 0x23597, 0x23740, 0x23741, 0x23744, 0x23747,
  93. 0x2374C, 0x23787, 0x237EC, 0x237EF, 0x237F4, 0x2382F, 0x23894, 0x23897,
  94. 0x2389C, 0x238D7, 0x2393C, 0x2393F, 0x23944, 0x2397F,
  95. };
  96. static const unsigned int a650_rscc_registers[] = {
  97. 0x38000, 0x38034, 0x38036, 0x38036, 0x38040, 0x38042, 0x38080, 0x38084,
  98. 0x38089, 0x3808C, 0x38091, 0x38094, 0x38099, 0x3809C, 0x380A1, 0x380A4,
  99. 0x380A9, 0x380AC, 0x38100, 0x38102, 0x38104, 0x38107, 0x38114, 0x38119,
  100. 0x38124, 0x3812E, 0x38180, 0x38197, 0x38340, 0x38341, 0x38344, 0x38347,
  101. 0x3834C, 0x3834F, 0x38351, 0x38354, 0x38356, 0x38359, 0x3835B, 0x3835E,
  102. 0x38360, 0x38363, 0x38365, 0x38368, 0x3836A, 0x3836D, 0x3836F, 0x38372,
  103. 0x383EC, 0x383EF, 0x383F4, 0x383F7, 0x383F9, 0x383FC, 0x383FE, 0x38401,
  104. 0x38403, 0x38406, 0x38408, 0x3840B, 0x3840D, 0x38410, 0x38412, 0x38415,
  105. 0x38417, 0x3841A, 0x38494, 0x38497, 0x3849C, 0x3849F, 0x384A1, 0x384A4,
  106. 0x384A6, 0x384A9, 0x384AB, 0x384AE, 0x384B0, 0x384B3, 0x384B5, 0x384B8,
  107. 0x384BA, 0x384BD, 0x384BF, 0x384C2, 0x3853C, 0x3853F, 0x38544, 0x38547,
  108. 0x38549, 0x3854C, 0x3854E, 0x38551, 0x38553, 0x38556, 0x38558, 0x3855B,
  109. 0x3855D, 0x38560, 0x38562, 0x38565, 0x38567, 0x3856A, 0x385E4, 0x385E7,
  110. 0x385EC, 0x385EF, 0x385F1, 0x385F4, 0x385F6, 0x385F9, 0x385FB, 0x385FE,
  111. 0x38600, 0x38603, 0x38605, 0x38608, 0x3860A, 0x3860D, 0x3860F, 0x38612,
  112. 0x3868C, 0x3868F, 0x38694, 0x38697, 0x38699, 0x3869C, 0x3869E, 0x386A1,
  113. 0x386A3, 0x386A6, 0x386A8, 0x386AB, 0x386AD, 0x386B0, 0x386B2, 0x386B5,
  114. 0x386B7, 0x386BA, 0x38734, 0x38737, 0x3873C, 0x3873F, 0x38741, 0x38744,
  115. 0x38746, 0x38749, 0x3874B, 0x3874E, 0x38750, 0x38753, 0x38755, 0x38758,
  116. 0x3875A, 0x3875D, 0x3875F, 0x38762, 0x387DC, 0x387DF, 0x387E4, 0x387E7,
  117. 0x387E9, 0x387EC, 0x387EE, 0x387F1, 0x387F3, 0x387F6, 0x387F8, 0x387FB,
  118. 0x387FD, 0x38800, 0x38802, 0x38805, 0x38807, 0x3880A, 0x38884, 0x38887,
  119. 0x3888C, 0x3888F, 0x38891, 0x38894, 0x38896, 0x38899, 0x3889B, 0x3889E,
  120. 0x388A0, 0x388A3, 0x388A5, 0x388A8, 0x388AA, 0x388AD, 0x388AF, 0x388B2,
  121. 0x3892C, 0x3892F, 0x38934, 0x38937, 0x38939, 0x3893C, 0x3893E, 0x38941,
  122. 0x38943, 0x38946, 0x38948, 0x3894B, 0x3894D, 0x38950, 0x38952, 0x38955,
  123. 0x38957, 0x3895A, 0x38B50, 0x38B51, 0x38B53, 0x38B55, 0x38B5A, 0x38B5A,
  124. 0x38B5F, 0x38B5F, 0x38B64, 0x38B64, 0x38B69, 0x38B69, 0x38B6E, 0x38B6E,
  125. 0x38B73, 0x38B73, 0x38BF8, 0x38BF8, 0x38BFD, 0x38BFD, 0x38C02, 0x38C02,
  126. 0x38C07, 0x38C07, 0x38C0C, 0x38C0C, 0x38C11, 0x38C11, 0x38C16, 0x38C16,
  127. 0x38C1B, 0x38C1B, 0x38CA0, 0x38CA0, 0x38CA5, 0x38CA5, 0x38CAA, 0x38CAA,
  128. 0x38CAF, 0x38CAF, 0x38CB4, 0x38CB4, 0x38CB9, 0x38CB9, 0x38CBE, 0x38CBE,
  129. 0x38CC3, 0x38CC3, 0x38D48, 0x38D48, 0x38D4D, 0x38D4D, 0x38D52, 0x38D52,
  130. 0x38D57, 0x38D57, 0x38D5C, 0x38D5C, 0x38D61, 0x38D61, 0x38D66, 0x38D66,
  131. 0x38D6B, 0x38D6B, 0x38DF0, 0x38DF0, 0x38DF5, 0x38DF5, 0x38DFA, 0x38DFA,
  132. 0x38DFF, 0x38DFF, 0x38E04, 0x38E04, 0x38E09, 0x38E09, 0x38E0E, 0x38E0E,
  133. 0x38E13, 0x38E13, 0x38E98, 0x38E98, 0x38E9D, 0x38E9D, 0x38EA2, 0x38EA2,
  134. 0x38EA7, 0x38EA7, 0x38EAC, 0x38EAC, 0x38EB1, 0x38EB1, 0x38EB6, 0x38EB6,
  135. 0x38EBB, 0x38EBB, 0x38F40, 0x38F40, 0x38F45, 0x38F45, 0x38F4A, 0x38F4A,
  136. 0x38F4F, 0x38F4F, 0x38F54, 0x38F54, 0x38F59, 0x38F59, 0x38F5E, 0x38F5E,
  137. 0x38F63, 0x38F63, 0x38FE8, 0x38FE8, 0x38FED, 0x38FED, 0x38FF2, 0x38FF2,
  138. 0x38FF7, 0x38FF7, 0x38FFC, 0x38FFC, 0x39001, 0x39001, 0x39006, 0x39006,
  139. 0x3900B, 0x3900B, 0x39090, 0x39090, 0x39095, 0x39095, 0x3909A, 0x3909A,
  140. 0x3909F, 0x3909F, 0x390A4, 0x390A4, 0x390A9, 0x390A9, 0x390AE, 0x390AE,
  141. 0x390B3, 0x390B3, 0x39138, 0x39138, 0x3913D, 0x3913D, 0x39142, 0x39142,
  142. 0x39147, 0x39147, 0x3914C, 0x3914C, 0x39151, 0x39151, 0x39156, 0x39156,
  143. 0x3915B, 0x3915B,
  144. };
  145. static size_t a6xx_snapshot_gmu_mem(struct kgsl_device *device,
  146. u8 *buf, size_t remain, void *priv)
  147. {
  148. struct kgsl_snapshot_gmu_mem *mem_hdr =
  149. (struct kgsl_snapshot_gmu_mem *)buf;
  150. unsigned int *data = (unsigned int *)
  151. (buf + sizeof(*mem_hdr));
  152. struct gmu_mem_type_desc *desc = priv;
  153. if (priv == NULL || desc->memdesc->hostptr == NULL)
  154. return 0;
  155. if (remain < desc->memdesc->size + sizeof(*mem_hdr)) {
  156. dev_err(device->dev,
  157. "snapshot: Not enough memory for the gmu section %d\n",
  158. desc->type);
  159. return 0;
  160. }
  161. memset(mem_hdr, 0, sizeof(*mem_hdr));
  162. mem_hdr->type = desc->type;
  163. mem_hdr->hostaddr = (uintptr_t)desc->memdesc->hostptr;
  164. mem_hdr->gmuaddr = desc->memdesc->gmuaddr;
  165. mem_hdr->gpuaddr = 0;
  166. /* Just copy the ringbuffer, there are no active IBs */
  167. memcpy(data, desc->memdesc->hostptr, desc->memdesc->size);
  168. return desc->memdesc->size + sizeof(*mem_hdr);
  169. }
  170. static size_t a6xx_gmu_snapshot_dtcm(struct kgsl_device *device,
  171. u8 *buf, size_t remain, void *priv)
  172. {
  173. struct kgsl_snapshot_gmu_mem *mem_hdr =
  174. (struct kgsl_snapshot_gmu_mem *)buf;
  175. struct a6xx_gmu_device *gmu = (struct a6xx_gmu_device *)priv;
  176. u32 *data = (u32 *)(buf + sizeof(*mem_hdr));
  177. u32 i;
  178. if (remain < gmu->vma[GMU_DTCM].size + sizeof(*mem_hdr)) {
  179. SNAPSHOT_ERR_NOMEM(device, "GMU DTCM Memory");
  180. return 0;
  181. }
  182. mem_hdr->type = SNAPSHOT_GMU_MEM_BIN_BLOCK;
  183. mem_hdr->hostaddr = 0;
  184. mem_hdr->gmuaddr = gmu->vma[GMU_DTCM].start;
  185. mem_hdr->gpuaddr = 0;
  186. /* FIXME: use a bulk read? */
  187. for (i = 0; i < (gmu->vma[GMU_DTCM].size >> 2); i++)
  188. gmu_core_regread(device, A6XX_GMU_CM3_DTCM_START + i, data++);
  189. return gmu->vma[GMU_DTCM].size + sizeof(*mem_hdr);
  190. }
  191. static size_t a6xx_gmu_snapshot_itcm(struct kgsl_device *device,
  192. u8 *buf, size_t remain, void *priv)
  193. {
  194. struct kgsl_snapshot_gmu_mem *mem_hdr =
  195. (struct kgsl_snapshot_gmu_mem *)buf;
  196. void *dest = buf + sizeof(*mem_hdr);
  197. struct a6xx_gmu_device *gmu = (struct a6xx_gmu_device *)priv;
  198. if (!gmu->itcm_shadow) {
  199. dev_err(&gmu->pdev->dev, "ITCM not captured\n");
  200. return 0;
  201. }
  202. if (remain < gmu->vma[GMU_ITCM].size + sizeof(*mem_hdr)) {
  203. SNAPSHOT_ERR_NOMEM(device, "GMU ITCM Memory");
  204. return 0;
  205. }
  206. mem_hdr->type = SNAPSHOT_GMU_MEM_BIN_BLOCK;
  207. mem_hdr->hostaddr = 0;
  208. mem_hdr->gmuaddr = gmu->vma[GMU_ITCM].start;
  209. mem_hdr->gpuaddr = 0;
  210. memcpy(dest, gmu->itcm_shadow, gmu->vma[GMU_ITCM].size);
  211. return gmu->vma[GMU_ITCM].size + sizeof(*mem_hdr);
  212. }
  213. static void a6xx_gmu_snapshot_memories(struct kgsl_device *device,
  214. struct a6xx_gmu_device *gmu, struct kgsl_snapshot *snapshot)
  215. {
  216. struct gmu_mem_type_desc desc;
  217. struct kgsl_memdesc *md;
  218. int i;
  219. for (i = 0; i < ARRAY_SIZE(gmu->gmu_globals); i++) {
  220. md = &gmu->gmu_globals[i];
  221. if (!md->size)
  222. continue;
  223. desc.memdesc = md;
  224. if (md == gmu->hfi.hfi_mem)
  225. desc.type = SNAPSHOT_GMU_MEM_HFI;
  226. else if (md == gmu->gmu_log)
  227. desc.type = SNAPSHOT_GMU_MEM_LOG;
  228. else if (md == gmu->dump_mem)
  229. desc.type = SNAPSHOT_GMU_MEM_DEBUG;
  230. else if (md == gmu->vrb)
  231. desc.type = SNAPSHOT_GMU_MEM_VRB;
  232. else if (md == gmu->trace.md)
  233. desc.type = SNAPSHOT_GMU_MEM_TRACE;
  234. else
  235. desc.type = SNAPSHOT_GMU_MEM_BIN_BLOCK;
  236. kgsl_snapshot_add_section(device,
  237. KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
  238. snapshot, a6xx_snapshot_gmu_mem, &desc);
  239. }
  240. }
  241. struct kgsl_snapshot_gmu_version {
  242. uint32_t type;
  243. uint32_t value;
  244. };
  245. static size_t a6xx_snapshot_gmu_version(struct kgsl_device *device,
  246. u8 *buf, size_t remain, void *priv)
  247. {
  248. struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
  249. uint32_t *data = (uint32_t *) (buf + sizeof(*header));
  250. struct kgsl_snapshot_gmu_version *ver = priv;
  251. if (remain < DEBUG_SECTION_SZ(1)) {
  252. SNAPSHOT_ERR_NOMEM(device, "GMU Version");
  253. return 0;
  254. }
  255. header->type = ver->type;
  256. header->size = 1;
  257. *data = ver->value;
  258. return DEBUG_SECTION_SZ(1);
  259. }
  260. static void a6xx_gmu_snapshot_versions(struct kgsl_device *device,
  261. struct a6xx_gmu_device *gmu,
  262. struct kgsl_snapshot *snapshot)
  263. {
  264. int i;
  265. struct kgsl_snapshot_gmu_version gmu_vers[] = {
  266. { .type = SNAPSHOT_DEBUG_GMU_CORE_VERSION,
  267. .value = gmu->ver.core, },
  268. { .type = SNAPSHOT_DEBUG_GMU_CORE_DEV_VERSION,
  269. .value = gmu->ver.core_dev, },
  270. { .type = SNAPSHOT_DEBUG_GMU_PWR_VERSION,
  271. .value = gmu->ver.pwr, },
  272. { .type = SNAPSHOT_DEBUG_GMU_PWR_DEV_VERSION,
  273. .value = gmu->ver.pwr_dev, },
  274. { .type = SNAPSHOT_DEBUG_GMU_HFI_VERSION,
  275. .value = gmu->ver.hfi, },
  276. };
  277. for (i = 0; i < ARRAY_SIZE(gmu_vers); i++)
  278. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
  279. snapshot, a6xx_snapshot_gmu_version,
  280. &gmu_vers[i]);
  281. }
  282. #define RSCC_OFFSET_DWORDS 0x38000
  283. static size_t a6xx_snapshot_rscc_registers(struct kgsl_device *device, u8 *buf,
  284. size_t remain, void *priv)
  285. {
  286. struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
  287. struct kgsl_snapshot_registers *regs = priv;
  288. unsigned int *data = (unsigned int *)(buf + sizeof(*header));
  289. int count = 0, j, k;
  290. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  291. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  292. /* Figure out how many registers we are going to dump */
  293. for (j = 0; j < regs->count; j++) {
  294. int start = regs->regs[j * 2];
  295. int end = regs->regs[j * 2 + 1];
  296. count += (end - start + 1);
  297. }
  298. if (remain < (count * 8) + sizeof(*header)) {
  299. SNAPSHOT_ERR_NOMEM(device, "RSCC REGISTERS");
  300. return 0;
  301. }
  302. for (j = 0; j < regs->count; j++) {
  303. unsigned int start = regs->regs[j * 2];
  304. unsigned int end = regs->regs[j * 2 + 1];
  305. for (k = start; k <= end; k++) {
  306. unsigned int val;
  307. val = __raw_readl(gmu->rscc_virt +
  308. ((k - RSCC_OFFSET_DWORDS) << 2));
  309. *data++ = k;
  310. *data++ = val;
  311. }
  312. }
  313. header->count = count;
  314. /* Return the size of the section */
  315. return (count * 8) + sizeof(*header);
  316. }
  317. static void snapshot_rscc_registers(struct adreno_device *adreno_dev,
  318. struct kgsl_snapshot *snapshot)
  319. {
  320. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  321. /* RSCC registers are on cx */
  322. if (adreno_is_a650_family(adreno_dev)) {
  323. struct kgsl_snapshot_registers r;
  324. r.regs = a650_rscc_registers;
  325. r.count = ARRAY_SIZE(a650_rscc_registers) / 2;
  326. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
  327. snapshot, a6xx_snapshot_rscc_registers, &r);
  328. } else if (adreno_is_a615_family(adreno_dev) ||
  329. adreno_is_a630(adreno_dev)) {
  330. adreno_snapshot_registers(device, snapshot,
  331. a630_rscc_snapshot_registers,
  332. ARRAY_SIZE(a630_rscc_snapshot_registers) / 2);
  333. } else if (adreno_is_a640(adreno_dev) || adreno_is_a680(adreno_dev)) {
  334. adreno_snapshot_registers(device, snapshot,
  335. a6xx_rscc_snapshot_registers,
  336. ARRAY_SIZE(a6xx_rscc_snapshot_registers) / 2);
  337. }
  338. }
  339. /*
  340. * a6xx_gmu_device_snapshot() - A6XX GMU snapshot function
  341. * @device: Device being snapshotted
  342. * @snapshot: Pointer to the snapshot instance
  343. *
  344. * This is where all of the A6XX GMU specific bits and pieces are grabbed
  345. * into the snapshot memory
  346. */
  347. void a6xx_gmu_device_snapshot(struct kgsl_device *device,
  348. struct kgsl_snapshot *snapshot)
  349. {
  350. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  351. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  352. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
  353. snapshot, a6xx_gmu_snapshot_itcm, gmu);
  354. a6xx_gmu_snapshot_versions(device, gmu, snapshot);
  355. a6xx_gmu_snapshot_memories(device, gmu, snapshot);
  356. /* Snapshot tcms as registers for legacy targets */
  357. if (adreno_is_a630(adreno_dev) ||
  358. adreno_is_a615_family(adreno_dev))
  359. adreno_snapshot_registers(device, snapshot,
  360. a6xx_gmu_tcm_registers,
  361. ARRAY_SIZE(a6xx_gmu_tcm_registers) / 2);
  362. adreno_snapshot_registers(device, snapshot, a6xx_gmu_registers,
  363. ARRAY_SIZE(a6xx_gmu_registers) / 2);
  364. if (adreno_is_a662(adreno_dev) || adreno_is_a621(adreno_dev))
  365. adreno_snapshot_registers(device, snapshot,
  366. a662_gmu_gpucc_registers,
  367. ARRAY_SIZE(a662_gmu_gpucc_registers) / 2);
  368. else if (adreno_is_a663(adreno_dev))
  369. adreno_snapshot_registers(device, snapshot,
  370. a663_gmu_gpucc_registers,
  371. ARRAY_SIZE(a663_gmu_gpucc_registers) / 2);
  372. else
  373. adreno_snapshot_registers(device, snapshot,
  374. a6xx_gmu_gpucc_registers,
  375. ARRAY_SIZE(a6xx_gmu_gpucc_registers) / 2);
  376. /* Snapshot A660 specific GMU registers */
  377. if (adreno_is_a660(adreno_dev))
  378. adreno_snapshot_registers(device, snapshot, a660_gmu_registers,
  379. ARRAY_SIZE(a660_gmu_registers) / 2);
  380. snapshot_rscc_registers(adreno_dev, snapshot);
  381. if (!a6xx_gmu_gx_is_on(adreno_dev))
  382. goto dtcm;
  383. /* Set fence to ALLOW mode so registers can be read */
  384. kgsl_regwrite(device, A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
  385. /* Make sure the previous write posted before reading */
  386. wmb();
  387. adreno_snapshot_registers(device, snapshot,
  388. a6xx_gmu_gx_registers,
  389. ARRAY_SIZE(a6xx_gmu_gx_registers) / 2);
  390. /* A stalled SMMU can lead to NoC timeouts when host accesses DTCM */
  391. if (adreno_smmu_is_stalled(adreno_dev)) {
  392. dev_err(&gmu->pdev->dev,
  393. "Not dumping dtcm because SMMU is stalled\n");
  394. return;
  395. }
  396. dtcm:
  397. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
  398. snapshot, a6xx_gmu_snapshot_dtcm, gmu);
  399. }