adreno_gen8_gmu_snapshot.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include "adreno.h"
  7. #include "adreno_gen8.h"
  8. #include "adreno_gen8_gmu.h"
  9. #include "adreno_gen8_3_0_snapshot.h"
  10. #include "adreno_snapshot.h"
  11. #include "gen8_reg.h"
  12. #include "kgsl_device.h"
  13. size_t gen8_snapshot_gmu_mem(struct kgsl_device *device,
  14. u8 *buf, size_t remain, void *priv)
  15. {
  16. struct kgsl_snapshot_gmu_mem *mem_hdr =
  17. (struct kgsl_snapshot_gmu_mem *)buf;
  18. u32 *data = (u32 *)(buf + sizeof(*mem_hdr));
  19. struct gmu_mem_type_desc *desc = priv;
  20. if (priv == NULL || desc->memdesc->hostptr == NULL)
  21. return 0;
  22. if (remain < desc->memdesc->size + sizeof(*mem_hdr)) {
  23. dev_err(device->dev,
  24. "snapshot: Not enough memory for the gmu section %d\n",
  25. desc->type);
  26. return 0;
  27. }
  28. mem_hdr->type = desc->type;
  29. mem_hdr->hostaddr = (u64)(uintptr_t)desc->memdesc->hostptr;
  30. mem_hdr->gmuaddr = desc->memdesc->gmuaddr;
  31. mem_hdr->gpuaddr = 0;
  32. /* The hw fence queues are mapped as iomem in the kernel */
  33. if (desc->type == SNAPSHOT_GMU_MEM_HW_FENCE)
  34. memcpy_fromio(data, desc->memdesc->hostptr, desc->memdesc->size);
  35. else
  36. memcpy(data, desc->memdesc->hostptr, desc->memdesc->size);
  37. return desc->memdesc->size + sizeof(*mem_hdr);
  38. }
  39. static size_t gen8_gmu_snapshot_dtcm(struct kgsl_device *device,
  40. u8 *buf, size_t remain, void *priv)
  41. {
  42. struct kgsl_snapshot_gmu_mem *mem_hdr =
  43. (struct kgsl_snapshot_gmu_mem *)buf;
  44. struct gen8_gmu_device *gmu = (struct gen8_gmu_device *)priv;
  45. u32 *data = (u32 *)(buf + sizeof(*mem_hdr));
  46. u32 i;
  47. if (remain < gmu->vma[GMU_DTCM].size + sizeof(*mem_hdr)) {
  48. SNAPSHOT_ERR_NOMEM(device, "GMU DTCM Memory");
  49. return 0;
  50. }
  51. mem_hdr->type = SNAPSHOT_GMU_MEM_BIN_BLOCK;
  52. mem_hdr->hostaddr = 0;
  53. mem_hdr->gmuaddr = gmu->vma[GMU_DTCM].start;
  54. mem_hdr->gpuaddr = 0;
  55. /*
  56. * Read of GMU TCMs over side-band debug controller interface is
  57. * supported on gen8 family
  58. * region [20]: Dump ITCM/DTCM. Select 1 for DTCM.
  59. * autoInc [31]: Autoincrement the address field after each
  60. * access to TCM_DBG_DATA
  61. */
  62. kgsl_regwrite(device, GEN8_CX_DBGC_TCM_DBG_ADDR, BIT(20) | BIT(31));
  63. for (i = 0; i < (gmu->vma[GMU_DTCM].size >> 2); i++)
  64. kgsl_regread(device, GEN8_CX_DBGC_TCM_DBG_DATA, data++);
  65. return gmu->vma[GMU_DTCM].size + sizeof(*mem_hdr);
  66. }
  67. static size_t gen8_gmu_snapshot_itcm(struct kgsl_device *device,
  68. u8 *buf, size_t remain, void *priv)
  69. {
  70. struct kgsl_snapshot_gmu_mem *mem_hdr =
  71. (struct kgsl_snapshot_gmu_mem *)buf;
  72. void *dest = buf + sizeof(*mem_hdr);
  73. struct gen8_gmu_device *gmu = (struct gen8_gmu_device *)priv;
  74. if (!gmu->itcm_shadow) {
  75. dev_err(&gmu->pdev->dev, "No memory allocated for ITCM shadow capture\n");
  76. return 0;
  77. }
  78. if (remain < gmu->vma[GMU_ITCM].size + sizeof(*mem_hdr)) {
  79. SNAPSHOT_ERR_NOMEM(device, "GMU ITCM Memory");
  80. return 0;
  81. }
  82. mem_hdr->type = SNAPSHOT_GMU_MEM_BIN_BLOCK;
  83. mem_hdr->hostaddr = 0;
  84. mem_hdr->gmuaddr = gmu->vma[GMU_ITCM].start;
  85. mem_hdr->gpuaddr = 0;
  86. memcpy(dest, gmu->itcm_shadow, gmu->vma[GMU_ITCM].size);
  87. return gmu->vma[GMU_ITCM].size + sizeof(*mem_hdr);
  88. }
  89. static void gen8_gmu_snapshot_memories(struct kgsl_device *device,
  90. struct gen8_gmu_device *gmu, struct kgsl_snapshot *snapshot)
  91. {
  92. struct gmu_mem_type_desc desc;
  93. struct kgsl_memdesc *md;
  94. int i;
  95. for (i = 0; i < ARRAY_SIZE(gmu->gmu_globals); i++) {
  96. md = &gmu->gmu_globals[i];
  97. if (!md->size)
  98. continue;
  99. desc.memdesc = md;
  100. if (md == gmu->hfi.hfi_mem)
  101. desc.type = SNAPSHOT_GMU_MEM_HFI;
  102. else if (md == gmu->gmu_log)
  103. desc.type = SNAPSHOT_GMU_MEM_LOG;
  104. else if (md == gmu->dump_mem)
  105. desc.type = SNAPSHOT_GMU_MEM_DEBUG;
  106. else if ((md == gmu->gmu_init_scratch) || (md == gmu->gpu_boot_scratch))
  107. desc.type = SNAPSHOT_GMU_MEM_WARMBOOT;
  108. else if (md == gmu->vrb)
  109. desc.type = SNAPSHOT_GMU_MEM_VRB;
  110. else if (md == gmu->trace.md)
  111. desc.type = SNAPSHOT_GMU_MEM_TRACE;
  112. else
  113. desc.type = SNAPSHOT_GMU_MEM_BIN_BLOCK;
  114. kgsl_snapshot_add_section(device,
  115. KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
  116. snapshot, gen8_snapshot_gmu_mem, &desc);
  117. }
  118. }
  119. struct kgsl_snapshot_gmu_version {
  120. u32 type;
  121. u32 value;
  122. };
  123. static size_t gen8_snapshot_gmu_version(struct kgsl_device *device,
  124. u8 *buf, size_t remain, void *priv)
  125. {
  126. struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
  127. u32 *data = (u32 *) (buf + sizeof(*header));
  128. struct kgsl_snapshot_gmu_version *ver = priv;
  129. if (remain < DEBUG_SECTION_SZ(1)) {
  130. SNAPSHOT_ERR_NOMEM(device, "GMU Version");
  131. return 0;
  132. }
  133. header->type = ver->type;
  134. header->size = 1;
  135. *data = ver->value;
  136. return DEBUG_SECTION_SZ(1);
  137. }
  138. static void gen8_gmu_snapshot_versions(struct kgsl_device *device,
  139. struct gen8_gmu_device *gmu,
  140. struct kgsl_snapshot *snapshot)
  141. {
  142. int i;
  143. struct kgsl_snapshot_gmu_version gmu_vers[] = {
  144. { .type = SNAPSHOT_DEBUG_GMU_CORE_VERSION,
  145. .value = gmu->ver.core, },
  146. { .type = SNAPSHOT_DEBUG_GMU_CORE_DEV_VERSION,
  147. .value = gmu->ver.core_dev, },
  148. { .type = SNAPSHOT_DEBUG_GMU_PWR_VERSION,
  149. .value = gmu->ver.pwr, },
  150. { .type = SNAPSHOT_DEBUG_GMU_PWR_DEV_VERSION,
  151. .value = gmu->ver.pwr_dev, },
  152. { .type = SNAPSHOT_DEBUG_GMU_HFI_VERSION,
  153. .value = gmu->ver.hfi, },
  154. };
  155. for (i = 0; i < ARRAY_SIZE(gmu_vers); i++)
  156. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
  157. snapshot, gen8_snapshot_gmu_version,
  158. &gmu_vers[i]);
  159. }
  160. #define RSCC_OFFSET_DWORDS 0x14000
  161. static size_t gen8_snapshot_rscc_registers(struct kgsl_device *device, u8 *buf,
  162. size_t remain, void *priv)
  163. {
  164. const u32 *regs = priv;
  165. u32 *data = (u32 *)buf;
  166. int count = 0, k;
  167. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  168. struct gen8_gmu_device *gmu = to_gen8_gmu(adreno_dev);
  169. /* Figure out how many registers we are going to dump */
  170. count = adreno_snapshot_regs_count(regs);
  171. if (remain < (count * 4)) {
  172. SNAPSHOT_ERR_NOMEM(device, "RSCC REGISTERS");
  173. return 0;
  174. }
  175. for (regs = priv; regs[0] != UINT_MAX; regs += 2) {
  176. u32 cnt = REG_COUNT(regs);
  177. if (cnt == 1) {
  178. *data++ = BIT(31) | regs[0];
  179. *data++ = __raw_readl(gmu->rscc_virt +
  180. ((regs[0] - RSCC_OFFSET_DWORDS) << 2));
  181. continue;
  182. }
  183. *data++ = regs[0];
  184. *data++ = cnt;
  185. for (k = regs[0]; k <= regs[1]; k++)
  186. *data++ = __raw_readl(gmu->rscc_virt +
  187. ((k - RSCC_OFFSET_DWORDS) << 2));
  188. }
  189. /* Return the size of the section */
  190. return (count * 4);
  191. }
  192. /*
  193. * gen8_gmu_device_snapshot() - GEN8 GMU snapshot function
  194. * @device: Device being snapshotted
  195. * @snapshot: Pointer to the snapshot instance
  196. *
  197. * This is where all of the GEN8 GMU specific bits and pieces are grabbed
  198. * into the snapshot memory
  199. */
  200. static void gen8_gmu_device_snapshot(struct kgsl_device *device,
  201. struct kgsl_snapshot *snapshot)
  202. {
  203. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  204. struct gen8_gmu_device *gmu = to_gen8_gmu(adreno_dev);
  205. const struct adreno_gen8_core *gpucore = to_gen8_core(ADRENO_DEVICE(device));
  206. const struct gen8_snapshot_block_list *gen8_snapshot_block_list =
  207. gpucore->gen8_snapshot_block_list;
  208. u32 i, slice, j;
  209. struct gen8_reg_list_info info = {0};
  210. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
  211. snapshot, gen8_gmu_snapshot_itcm, gmu);
  212. gen8_gmu_snapshot_versions(device, gmu, snapshot);
  213. gen8_gmu_snapshot_memories(device, gmu, snapshot);
  214. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS_V2, snapshot,
  215. gen8_snapshot_rscc_registers, (void *) gen8_snapshot_block_list->rscc_regs);
  216. /* Capture GMU registers which are on CX domain and unsliced */
  217. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS_V2, snapshot,
  218. adreno_snapshot_registers_v2,
  219. (void *) gen8_snapshot_block_list->gmu_cx_unsliced_regs);
  220. if (!gen8_gmu_rpmh_pwr_state_is_active(device) ||
  221. !gen8_gmu_gx_is_on(adreno_dev))
  222. goto dtcm;
  223. /* Set fence to ALLOW mode so registers can be read */
  224. kgsl_regwrite(device, GEN8_GMUAO_AHB_FENCE_CTRL, 0);
  225. /* Capture GMU registers which are on GX domain */
  226. for (i = 0 ; i < gen8_snapshot_block_list->num_gmu_gx_regs; i++) {
  227. struct gen8_reg_list *regs = &gen8_snapshot_block_list->gmu_gx_regs[i];
  228. slice = regs->slice_region ? MAX_PHYSICAL_SLICES : 1;
  229. for (j = 0 ; j < slice; j++) {
  230. info.regs = regs;
  231. info.slice_id = SLICE_ID(regs->slice_region, j);
  232. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_MVC_V3, snapshot,
  233. gen8_legacy_snapshot_registers, &info);
  234. }
  235. }
  236. dtcm:
  237. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
  238. snapshot, gen8_gmu_snapshot_dtcm, gmu);
  239. }
  240. void gen8_gmu_snapshot(struct adreno_device *adreno_dev,
  241. struct kgsl_snapshot *snapshot)
  242. {
  243. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  244. /*
  245. * Dump external register first to have GPUCC and other external
  246. * register in snapshot to analyze the system state even in partial
  247. * snapshot dump
  248. */
  249. gen8_snapshot_external_core_regs(device, snapshot);
  250. gen8_gmu_device_snapshot(device, snapshot);
  251. gen8_snapshot(adreno_dev, snapshot);
  252. gmu_core_regwrite(device, GEN8_GMUCX_GMU2HOST_INTR_CLR, UINT_MAX);
  253. gmu_core_regwrite(device, GEN8_GMUCX_GMU2HOST_INTR_MASK, HFI_IRQ_MASK);
  254. }