adreno_gen8_snapshot.c 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include "adreno.h"
  7. #include "adreno_gen8_3_0_snapshot.h"
  8. #include "adreno_snapshot.h"
  9. static struct kgsl_memdesc *gen8_capturescript;
  10. static struct kgsl_memdesc *gen8_crashdump_registers;
  11. static u32 *gen8_cd_reg_end;
  12. static const struct gen8_snapshot_block_list *gen8_snapshot_block_list;
  13. static bool gen8_crashdump_timedout;
  14. /* Starting kernel virtual address for QDSS TMC register block */
  15. static void __iomem *tmc_virt;
  16. const struct gen8_snapshot_block_list gen8_3_0_snapshot_block_list = {
  17. .pre_crashdumper_regs = gen8_3_0_ahb_registers,
  18. .num_pre_crashdumper_regs = ARRAY_SIZE(gen8_3_0_ahb_registers),
  19. .debugbus_blocks = gen8_3_0_debugbus_blocks,
  20. .debugbus_blocks_len = ARRAY_SIZE(gen8_3_0_debugbus_blocks),
  21. .gbif_debugbus_blocks = gen8_gbif_debugbus_blocks,
  22. .gbif_debugbus_blocks_len = ARRAY_SIZE(gen8_gbif_debugbus_blocks),
  23. .cx_debugbus_blocks = gen8_cx_debugbus_blocks,
  24. .cx_debugbus_blocks_len = ARRAY_SIZE(gen8_cx_debugbus_blocks),
  25. .external_core_regs = gen8_3_0_external_core_regs,
  26. .num_external_core_regs = ARRAY_SIZE(gen8_3_0_external_core_regs),
  27. .gmu_cx_unsliced_regs = gen8_3_0_gmu_registers,
  28. .gmu_gx_regs = gen8_3_0_gmu_gx_regs,
  29. .num_gmu_gx_regs = ARRAY_SIZE(gen8_3_0_gmu_gx_regs),
  30. .rscc_regs = gen8_3_0_rscc_rsc_registers,
  31. .reg_list = gen8_3_0_reg_list,
  32. .cx_misc_regs = gen8_3_0_cx_misc_registers,
  33. .shader_blocks = gen8_3_0_shader_blocks,
  34. .num_shader_blocks = ARRAY_SIZE(gen8_3_0_shader_blocks),
  35. .cp_clusters = gen8_3_0_cp_clusters,
  36. .num_cp_clusters = ARRAY_SIZE(gen8_3_0_cp_clusters),
  37. .clusters = gen8_3_0_mvc_clusters,
  38. .num_clusters = ARRAY_SIZE(gen8_3_0_mvc_clusters),
  39. .sptp_clusters = gen8_3_0_sptp_clusters,
  40. .num_sptp_clusters = ARRAY_SIZE(gen8_3_0_sptp_clusters),
  41. .index_registers = gen8_3_0_cp_indexed_reg_list,
  42. .index_registers_len = ARRAY_SIZE(gen8_3_0_cp_indexed_reg_list),
  43. .mempool_index_registers = gen8_3_0_cp_mempool_reg_list,
  44. .mempool_index_registers_len = ARRAY_SIZE(gen8_3_0_cp_mempool_reg_list),
  45. };
  46. #define GEN8_SP_READ_SEL_VAL(_sliceid, _location, _pipe, _statetype, _usptp, _sptp) \
  47. (FIELD_PREP(GENMASK(25, 21), _sliceid) | \
  48. FIELD_PREP(GENMASK(20, 18), _location) | \
  49. FIELD_PREP(GENMASK(17, 16), _pipe) | \
  50. FIELD_PREP(GENMASK(15, 8), _statetype) | \
  51. FIELD_PREP(GENMASK(7, 4), _usptp) | \
  52. FIELD_PREP(GENMASK(3, 0), _sptp))
  53. #define GEN8_CP_APERTURE_REG_VAL(_sliceid, _pipe, _cluster, _context) \
  54. (FIELD_PREP(GENMASK(23, 23), 1) | \
  55. FIELD_PREP(GENMASK(18, 16), _sliceid) | \
  56. FIELD_PREP(GENMASK(15, 12), _pipe) | \
  57. FIELD_PREP(GENMASK(11, 8), _cluster) | \
  58. FIELD_PREP(GENMASK(5, 4), _context))
  59. #define GEN8_DEBUGBUS_SECTION_SIZE (sizeof(struct kgsl_snapshot_debugbus) \
  60. + (GEN8_DEBUGBUS_BLOCK_SIZE << 3))
  61. #define CD_REG_END 0xaaaaaaaa
  62. static u32 CD_WRITE(u64 *ptr, u32 offset, u64 val)
  63. {
  64. ptr[0] = val;
  65. ptr[1] = FIELD_PREP(GENMASK(63, 44), offset) | BIT(21) | BIT(0);
  66. return 2;
  67. }
  68. static u32 CD_READ(u64 *ptr, u32 offset, u32 size, u64 target)
  69. {
  70. ptr[0] = target;
  71. ptr[1] = FIELD_PREP(GENMASK(63, 44), offset) | size;
  72. return 2;
  73. }
  74. static void CD_FINISH(u64 *ptr, u32 offset)
  75. {
  76. gen8_cd_reg_end = gen8_crashdump_registers->hostptr + offset;
  77. *gen8_cd_reg_end = CD_REG_END;
  78. ptr[0] = gen8_crashdump_registers->gpuaddr + offset;
  79. ptr[1] = FIELD_PREP(GENMASK(63, 44), GEN8_CP_CRASH_DUMP_STATUS) | BIT(0);
  80. ptr[2] = 0;
  81. ptr[3] = 0;
  82. }
  83. static bool CD_SCRIPT_CHECK(struct kgsl_device *device)
  84. {
  85. return (adreno_smmu_is_stalled(ADRENO_DEVICE(device)) ||
  86. (!device->snapshot_crashdumper) ||
  87. IS_ERR_OR_NULL(gen8_capturescript) ||
  88. IS_ERR_OR_NULL(gen8_crashdump_registers) ||
  89. gen8_crashdump_timedout);
  90. }
  91. static bool _gen8_do_crashdump(struct kgsl_device *device)
  92. {
  93. u32 reg = 0;
  94. ktime_t timeout;
  95. if (CD_SCRIPT_CHECK(device))
  96. return false;
  97. kgsl_regwrite(device, GEN8_CP_CRASH_DUMP_SCRIPT_BASE_LO,
  98. lower_32_bits(gen8_capturescript->gpuaddr));
  99. kgsl_regwrite(device, GEN8_CP_CRASH_DUMP_SCRIPT_BASE_HI,
  100. upper_32_bits(gen8_capturescript->gpuaddr));
  101. kgsl_regwrite(device, GEN8_CP_CRASH_DUMP_CNTL, 1);
  102. timeout = ktime_add_ms(ktime_get(), CP_CRASH_DUMPER_TIMEOUT);
  103. if (!device->snapshot_atomic)
  104. might_sleep();
  105. for (;;) {
  106. /* make sure we're reading the latest value */
  107. rmb();
  108. if ((*gen8_cd_reg_end) != CD_REG_END)
  109. break;
  110. if (ktime_compare(ktime_get(), timeout) > 0)
  111. break;
  112. /* Wait 1msec to avoid unnecessary looping */
  113. if (!device->snapshot_atomic)
  114. usleep_range(100, 1000);
  115. }
  116. kgsl_regread(device, GEN8_CP_CRASH_DUMP_STATUS, &reg);
  117. /*
  118. * Writing to the GEN8_CP_CRASH_DUMP_CNTL also resets the
  119. * GEN8_CP_CRASH_DUMP_STATUS. Make sure the read above is
  120. * complete before we change the value
  121. */
  122. rmb();
  123. kgsl_regwrite(device, GEN8_CP_CRASH_DUMP_CNTL, 0);
  124. if (WARN(!(reg & 0x2), "Crashdumper timed out\n")) {
  125. /*
  126. * Gen7 crash dumper script is broken down into multiple chunks
  127. * and script will be invoked multiple times to capture snapshot
  128. * of different sections of GPU. If crashdumper fails once, it is
  129. * highly likely it will fail subsequently as well. Hence update
  130. * gen8_crashdump_timedout variable to avoid running crashdumper
  131. * after it fails once.
  132. */
  133. gen8_crashdump_timedout = true;
  134. return false;
  135. }
  136. return true;
  137. }
  138. size_t gen8_legacy_snapshot_registers(struct kgsl_device *device,
  139. u8 *buf, size_t remain, void *priv)
  140. {
  141. struct gen8_reg_list_info *info = (struct gen8_reg_list_info *)priv;
  142. const u32 *ptr = info->regs->regs;
  143. struct kgsl_snapshot_mvc_regs_v3 *header =
  144. (struct kgsl_snapshot_mvc_regs_v3 *)buf;
  145. u32 *data = (u32 *)(buf + sizeof(*header));
  146. u32 size = (adreno_snapshot_regs_count(ptr) * sizeof(*data)) + sizeof(*header);
  147. u32 count, k;
  148. if (remain < size) {
  149. SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
  150. return 0;
  151. }
  152. header->ctxt_id = 0;
  153. header->cluster_id = CLUSTER_NONE;
  154. header->pipe_id = PIPE_NONE;
  155. header->location_id = UINT_MAX;
  156. header->sp_id = UINT_MAX;
  157. header->usptp_id = UINT_MAX;
  158. header->slice_id = info->slice_id;
  159. if (info->regs->sel)
  160. kgsl_regwrite(device, info->regs->sel->host_reg, info->regs->sel->val);
  161. if (info->regs->slice_region)
  162. kgsl_regwrite(device, GEN8_CP_APERTURE_CNTL_HOST, GEN8_CP_APERTURE_REG_VAL
  163. (info->slice_id, 0, 0, 0));
  164. /* Make sure the previous writes are posted before reading */
  165. mb();
  166. for (ptr = info->regs->regs; ptr[0] != UINT_MAX; ptr += 2) {
  167. count = REG_COUNT(ptr);
  168. if (count == 1)
  169. *data++ = ptr[0];
  170. else {
  171. *data++ = ptr[0] | (1 << 31);
  172. *data++ = ptr[1];
  173. }
  174. for (k = ptr[0]; k <= ptr[1]; k++)
  175. kgsl_regread(device, k, data++);
  176. }
  177. return size;
  178. }
  179. static size_t gen8_snapshot_registers(struct kgsl_device *device, u8 *buf,
  180. size_t remain, void *priv)
  181. {
  182. struct gen8_reg_list_info *info = (struct gen8_reg_list_info *)priv;
  183. const u32 *ptr = info->regs->regs;
  184. struct kgsl_snapshot_mvc_regs_v3 *header =
  185. (struct kgsl_snapshot_mvc_regs_v3 *)buf;
  186. u32 *data = (u32 *)(buf + sizeof(*header));
  187. u32 *src;
  188. u32 cnt;
  189. u32 size = (adreno_snapshot_regs_count(ptr) * sizeof(*data)) + sizeof(*header);
  190. if (remain < size) {
  191. SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
  192. return 0;
  193. }
  194. header->ctxt_id = 0;
  195. header->cluster_id = CLUSTER_NONE;
  196. header->pipe_id = PIPE_NONE;
  197. header->location_id = UINT_MAX;
  198. header->sp_id = UINT_MAX;
  199. header->usptp_id = UINT_MAX;
  200. header->slice_id = info->slice_id;
  201. src = gen8_crashdump_registers->hostptr + info->offset;
  202. for (ptr = info->regs->regs; ptr[0] != UINT_MAX; ptr += 2) {
  203. cnt = REG_COUNT(ptr);
  204. if (cnt == 1)
  205. *data++ = ptr[0];
  206. else {
  207. *data++ = BIT(31) | ptr[0];
  208. *data++ = ptr[1];
  209. }
  210. memcpy(data, src, cnt << 2);
  211. data += cnt;
  212. src += cnt;
  213. }
  214. /* Return the size of the section */
  215. return size;
  216. }
  217. static size_t gen8_legacy_snapshot_shader(struct kgsl_device *device,
  218. u8 *buf, size_t remain, void *priv)
  219. {
  220. struct kgsl_snapshot_shader_v3 *header =
  221. (struct kgsl_snapshot_shader_v3 *) buf;
  222. struct gen8_shader_block_info *info = (struct gen8_shader_block_info *) priv;
  223. struct gen8_shader_block *block = info->block;
  224. u32 *data = (u32 *)(buf + sizeof(*header));
  225. u32 read_sel, i;
  226. if (remain < (sizeof(*header) + (block->size << 2))) {
  227. SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
  228. return 0;
  229. }
  230. header->type = block->statetype;
  231. header->slice_id = info->slice_id;
  232. header->sp_index = info->sp_id;
  233. header->usptp = info->usptp;
  234. header->pipe_id = block->pipeid;
  235. header->location = block->location;
  236. header->ctxt_id = 1;
  237. header->size = block->size;
  238. read_sel = GEN8_SP_READ_SEL_VAL(info->slice_id, block->location, block->pipeid,
  239. block->statetype, info->usptp, info->sp_id);
  240. kgsl_regwrite(device, GEN8_SP_READ_SEL, read_sel);
  241. /*
  242. * An explicit barrier is needed so that reads do not happen before
  243. * the register write.
  244. */
  245. mb();
  246. for (i = 0; i < block->size; i++)
  247. data[i] = kgsl_regmap_read(&device->regmap, GEN8_SP_AHB_READ_APERTURE + i);
  248. return (sizeof(*header) + (block->size << 2));
  249. }
  250. static size_t gen8_snapshot_shader_memory(struct kgsl_device *device,
  251. u8 *buf, size_t remain, void *priv)
  252. {
  253. struct kgsl_snapshot_shader_v3 *header =
  254. (struct kgsl_snapshot_shader_v3 *) buf;
  255. struct gen8_shader_block_info *info = (struct gen8_shader_block_info *) priv;
  256. struct gen8_shader_block *block = info->block;
  257. u32 *data = (u32 *) (buf + sizeof(*header));
  258. if (remain < (sizeof(*header) + (block->size << 2))) {
  259. SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
  260. return 0;
  261. }
  262. header->type = block->statetype;
  263. header->slice_id = info->slice_id;
  264. header->sp_index = info->sp_id;
  265. header->usptp = info->usptp;
  266. header->pipe_id = block->pipeid;
  267. header->location = block->location;
  268. header->ctxt_id = 1;
  269. header->size = block->size;
  270. memcpy(data, gen8_crashdump_registers->hostptr + info->offset,
  271. (block->size << 2));
  272. return (sizeof(*header) + (block->size << 2));
  273. }
  274. static void qdss_regwrite(void __iomem *regbase, u32 offsetbytes, u32 value)
  275. {
  276. void __iomem *reg;
  277. reg = regbase + offsetbytes;
  278. /* Ensure previous write is committed */
  279. wmb();
  280. __raw_writel(value, reg);
  281. }
  282. static u32 qdss_regread(void __iomem *regbase, u32 offsetbytes)
  283. {
  284. void __iomem *reg;
  285. u32 val;
  286. reg = regbase + offsetbytes;
  287. val = __raw_readl(reg);
  288. /* Make sure memory is updated before next access */
  289. rmb();
  290. return val;
  291. }
  292. static size_t gen8_snapshot_trace_buffer_gfx_trace(struct kgsl_device *device,
  293. u8 *buf, size_t remain, void *priv)
  294. {
  295. u32 start_idx = 0, status = 0, count = 0, wrap_count = 0, write_ptr = 0;
  296. struct kgsl_snapshot_trace_buffer *header =
  297. (struct kgsl_snapshot_trace_buffer *) buf;
  298. u32 *data = (u32 *)(buf + sizeof(*header));
  299. struct gen8_trace_buffer_info *info =
  300. (struct gen8_trace_buffer_info *) priv;
  301. if (remain < SZ_2K + sizeof(*header)) {
  302. SNAPSHOT_ERR_NOMEM(device, "TRACE 2K BUFFER");
  303. return 0;
  304. }
  305. memcpy(header->ping_blk, info->ping_blk, sizeof(header->ping_blk));
  306. memcpy(header->ping_idx, info->ping_idx, sizeof(header->ping_idx));
  307. header->granularity = info->granularity;
  308. header->segment = info->segment;
  309. header->dbgc_ctrl = info->dbgc_ctrl;
  310. /* Read the status of trace buffer to determine if it's full or empty */
  311. kgsl_regread(device, GEN8_DBGC_TRACE_BUFFER_STATUS, &status);
  312. /*
  313. * wrap_count and write ptr are part of status.
  314. * if status is 0 => wrap_count = 0 and write ptr = 0 buffer is empty.
  315. * if status is non zero and wrap count is 0 read partial buffer.
  316. * if wrap count in non zero read entier 2k buffer.
  317. * Always read the oldest data available.
  318. */
  319. /* if status is 0 then buffer is empty */
  320. if (!status) {
  321. header->size = 0;
  322. return sizeof(*header);
  323. }
  324. /* Number of times the circular buffer has wrapped around */
  325. wrap_count = FIELD_GET(GENMASK(31, 12), status);
  326. write_ptr = FIELD_GET(GENMASK(8, 0), status);
  327. /* Read partial buffer starting from 0 */
  328. if (!wrap_count) {
  329. /* No of dwords to read : (write ptr - 0) of indexed register */
  330. count = write_ptr;
  331. header->size = count << 2;
  332. start_idx = 0;
  333. } else {
  334. /* Read entire 2k buffer starting from write ptr */
  335. start_idx = write_ptr + 1;
  336. count = SZ_512;
  337. header->size = SZ_2K;
  338. }
  339. kgsl_regmap_read_indexed_interleaved(&device->regmap,
  340. GEN8_DBGC_DBG_TRACE_BUFFER_RD_ADDR, GEN8_DBGC_DBG_TRACE_BUFFER_RD_DATA, data,
  341. start_idx, count);
  342. return (sizeof(*header) + header->size);
  343. }
  344. static size_t gen8_snapshot_trace_buffer_etb(struct kgsl_device *device,
  345. u8 *buf, size_t remain, void *priv)
  346. {
  347. u32 read_ptr, count, write_ptr, val, idx = 0;
  348. struct kgsl_snapshot_trace_buffer *header = (struct kgsl_snapshot_trace_buffer *) buf;
  349. u32 *data = (u32 *)(buf + sizeof(*header));
  350. struct gen8_trace_buffer_info *info = (struct gen8_trace_buffer_info *) priv;
  351. /* Unlock ETB buffer */
  352. qdss_regwrite(tmc_virt, QDSS_AOSS_APB_TMC_LAR, 0xC5ACCE55);
  353. /* Make sure unlock goes through before proceeding further */
  354. mb();
  355. /* Flush the QDSS pipeline to ensure completion of pending write to buffer */
  356. val = qdss_regread(tmc_virt, QDSS_AOSS_APB_TMC_FFCR);
  357. qdss_regwrite(tmc_virt, QDSS_AOSS_APB_TMC_FFCR, val | 0x40);
  358. /* Make sure pipeline is flushed before we get read and write pointers */
  359. mb();
  360. /* Disable ETB */
  361. qdss_regwrite(tmc_virt, QDSS_AOSS_APB_TMC_CTRL, 0);
  362. /* Set to circular mode */
  363. qdss_regwrite(tmc_virt, QDSS_AOSS_APB_TMC_MODE, 0);
  364. /* Ensure buffer is set to circular mode before accessing it */
  365. mb();
  366. /* Size of buffer is specified in register TMC_RSZ */
  367. count = qdss_regread(tmc_virt, QDSS_AOSS_APB_TMC_RSZ) << 2;
  368. read_ptr = qdss_regread(tmc_virt, QDSS_AOSS_APB_TMC_RRP);
  369. write_ptr = qdss_regread(tmc_virt, QDSS_AOSS_APB_TMC_RWP);
  370. /* ETB buffer if full read_ptr will be equal to write_ptr else write_ptr leads read_ptr */
  371. count = (read_ptr == write_ptr) ? count : (write_ptr - read_ptr);
  372. if (remain < count + sizeof(*header)) {
  373. SNAPSHOT_ERR_NOMEM(device, "ETB BUFFER");
  374. return 0;
  375. }
  376. /*
  377. * Read pointer is 4 byte aligned and write pointer is 2 byte aligned
  378. * We read 4 bytes of data in one iteration below so aligin it down
  379. * to 4 bytes.
  380. */
  381. count = ALIGN_DOWN(count, 4);
  382. header->size = count;
  383. header->dbgc_ctrl = info->dbgc_ctrl;
  384. memcpy(header->ping_blk, info->ping_blk, sizeof(header->ping_blk));
  385. memcpy(header->ping_idx, info->ping_idx, sizeof(header->ping_idx));
  386. header->granularity = info->granularity;
  387. header->segment = info->segment;
  388. while (count != 0) {
  389. /* This indexed register auto increments index as we read */
  390. data[idx++] = qdss_regread(tmc_virt, QDSS_AOSS_APB_TMC_RRD);
  391. count = count - 4;
  392. }
  393. return (sizeof(*header) + header->size);
  394. }
  395. static void gen8_snapshot_trace_buffer(struct kgsl_device *device,
  396. struct kgsl_snapshot *snapshot)
  397. {
  398. u32 val_tmc_ctrl = 0, val_etr_ctrl = 0, val_etr1_ctrl = 0;
  399. u32 i = 0, sel_gx = 0, sel_cx = 0, val_gx = 0, val_cx = 0, val = 0;
  400. struct gen8_trace_buffer_info info;
  401. struct resource *res1, *res2;
  402. struct clk *clk;
  403. int ret;
  404. void __iomem *etr_virt;
  405. /*
  406. * Data can be collected from CX_DBGC or DBGC and it's mutually exclusive.
  407. * Read the necessary select registers and determine the source of data.
  408. * This loop reads SEL_A to SEL_D of both CX_DBGC and DBGC and accordingly
  409. * updates the header information of trace buffer section.
  410. */
  411. for (i = 0; i < TRACE_BUF_NUM_SIG; i++) {
  412. kgsl_regread(device, GEN8_DBGC_CFG_DBGBUS_SEL_A + i, &sel_gx);
  413. kgsl_regread(device, GEN8_CX_DBGC_CFG_DBGBUS_SEL_A + i, &sel_cx);
  414. val_gx |= sel_gx;
  415. val_cx |= sel_cx;
  416. info.ping_idx[i] = FIELD_GET(GENMASK(7, 0), (sel_gx | sel_cx));
  417. info.ping_blk[i] = FIELD_GET(GENMASK(24, 16), (sel_gx | sel_cx));
  418. }
  419. /* Zero the header if not programmed to export any buffer */
  420. if (!val_gx && !val_cx) {
  421. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_TRACE_BUFFER,
  422. snapshot, NULL, &info);
  423. return;
  424. }
  425. /* Enable APB clock to read data from trace buffer */
  426. clk = clk_get(&device->pdev->dev, "apb_pclk");
  427. if (IS_ERR(clk)) {
  428. dev_err(device->dev, "Unable to get QDSS clock\n");
  429. return;
  430. }
  431. ret = clk_prepare_enable(clk);
  432. if (ret) {
  433. dev_err(device->dev, "QDSS Clock enable error: %d\n", ret);
  434. clk_put(clk);
  435. return;
  436. }
  437. res1 = platform_get_resource_byname(device->pdev, IORESOURCE_MEM, "qdss_etr");
  438. res2 = platform_get_resource_byname(device->pdev, IORESOURCE_MEM, "qdss_tmc");
  439. if (!res1 || !res2)
  440. goto err_clk_put;
  441. etr_virt = ioremap(res1->start, resource_size(res1));
  442. tmc_virt = ioremap(res2->start, resource_size(res2));
  443. if (!etr_virt || !tmc_virt)
  444. goto err_unmap;
  445. /*
  446. * Update header information based on source of data, read necessary CNTLT registers
  447. * for granularity and segment information.
  448. */
  449. if (val_gx) {
  450. info.dbgc_ctrl = GX_DBGC;
  451. kgsl_regread(device, GEN8_DBGC_CFG_DBGBUS_CNTLT, &val);
  452. } else {
  453. info.dbgc_ctrl = CX_DBGC;
  454. kgsl_regread(device, GEN8_CX_DBGC_CFG_DBGBUS_CNTLT, &val);
  455. }
  456. info.granularity = FIELD_GET(GENMASK(14, 12), val);
  457. info.segment = FIELD_GET(GENMASK(31, 28), val);
  458. val_tmc_ctrl = qdss_regread(tmc_virt, QDSS_AOSS_APB_TMC_CTRL);
  459. /*
  460. * Incase TMC CTRL is 0 and val_cx is non zero dump empty buffer.
  461. * Incase TMC CTRL is 0 and val_gx is non zero dump 2k gfx buffer.
  462. * 2k buffer is not present for CX blocks.
  463. * Incase both ETR's CTRL is 0 Dump ETB QDSS buffer and disable QDSS.
  464. * Incase either ETR's CTRL is 1 Disable QDSS dumping ETB buffer to DDR.
  465. */
  466. if (!val_tmc_ctrl) {
  467. if (val_gx)
  468. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_TRACE_BUFFER,
  469. snapshot, gen8_snapshot_trace_buffer_gfx_trace, &info);
  470. else
  471. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_TRACE_BUFFER,
  472. snapshot, NULL, &info);
  473. } else {
  474. val_etr_ctrl = qdss_regread(etr_virt, QDSS_AOSS_APB_ETR_CTRL);
  475. val_etr1_ctrl = qdss_regread(etr_virt, QDSS_AOSS_APB_ETR1_CTRL);
  476. if (!val_etr_ctrl && !val_etr1_ctrl)
  477. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_TRACE_BUFFER,
  478. snapshot, gen8_snapshot_trace_buffer_etb, &info);
  479. qdss_regwrite(tmc_virt, QDSS_AOSS_APB_TMC_CTRL, 0);
  480. }
  481. err_unmap:
  482. iounmap(tmc_virt);
  483. iounmap(etr_virt);
  484. err_clk_put:
  485. clk_disable_unprepare(clk);
  486. clk_put(clk);
  487. }
  488. static void gen8_snapshot_shader(struct kgsl_device *device,
  489. struct kgsl_snapshot *snapshot)
  490. {
  491. struct gen8_shader_block_info info = {0};
  492. u64 *ptr;
  493. u32 offset = 0;
  494. struct gen8_shader_block *shader_blocks = gen8_snapshot_block_list->shader_blocks;
  495. size_t num_shader_blocks = gen8_snapshot_block_list->num_shader_blocks;
  496. u32 i, sp, usptp, slice;
  497. size_t (*func)(struct kgsl_device *device, u8 *buf, size_t remain,
  498. void *priv) = gen8_legacy_snapshot_shader;
  499. if (CD_SCRIPT_CHECK(device)) {
  500. for (i = 0; i < num_shader_blocks; i++) {
  501. struct gen8_shader_block *block = &shader_blocks[i];
  502. for (slice = 0; slice < block->num_slices; slice++) {
  503. for (sp = 0; sp < block->num_sps; sp++) {
  504. for (usptp = 0; usptp < block->num_usptps; usptp++) {
  505. info.block = block;
  506. info.sp_id = sp;
  507. info.usptp = usptp;
  508. info.slice_id = slice;
  509. info.offset = offset;
  510. offset += block->size << 2;
  511. /* Shader working/shadow memory */
  512. kgsl_snapshot_add_section(device,
  513. KGSL_SNAPSHOT_SECTION_SHADER_V3,
  514. snapshot, func, &info);
  515. }
  516. }
  517. }
  518. }
  519. return;
  520. }
  521. for (i = 0; i < num_shader_blocks; i++) {
  522. struct gen8_shader_block *block = &shader_blocks[i];
  523. /* Build the crash script */
  524. ptr = gen8_capturescript->hostptr;
  525. offset = 0;
  526. for (slice = 0; slice < block->num_slices; slice++) {
  527. for (sp = 0; sp < block->num_sps; sp++) {
  528. for (usptp = 0; usptp < block->num_usptps; usptp++) {
  529. /* Program the aperture */
  530. ptr += CD_WRITE(ptr, GEN8_SP_READ_SEL,
  531. GEN8_SP_READ_SEL_VAL(slice, block->location,
  532. block->pipeid, block->statetype, usptp, sp));
  533. /* Read all the data in one chunk */
  534. ptr += CD_READ(ptr, GEN8_SP_AHB_READ_APERTURE, block->size,
  535. gen8_crashdump_registers->gpuaddr + offset);
  536. offset += block->size << 2;
  537. }
  538. }
  539. }
  540. /* Marker for end of script */
  541. CD_FINISH(ptr, offset);
  542. /* Try to run the crash dumper */
  543. func = gen8_legacy_snapshot_shader;
  544. if (_gen8_do_crashdump(device))
  545. func = gen8_snapshot_shader_memory;
  546. offset = 0;
  547. for (slice = 0; slice < block->num_slices; slice++) {
  548. for (sp = 0; sp < block->num_sps; sp++) {
  549. for (usptp = 0; usptp < block->num_usptps; usptp++) {
  550. info.block = block;
  551. info.sp_id = sp;
  552. info.usptp = usptp;
  553. info.slice_id = slice;
  554. info.offset = offset;
  555. offset += block->size << 2;
  556. /* Shader working/shadow memory */
  557. kgsl_snapshot_add_section(device,
  558. KGSL_SNAPSHOT_SECTION_SHADER_V3, snapshot, func, &info);
  559. }
  560. }
  561. }
  562. }
  563. }
  564. static void gen8_rmw_aperture(struct kgsl_device *device,
  565. u32 offsetwords, u32 mask, u32 val, u32 pipe, u32 slice_id, u32 use_slice_id)
  566. {
  567. gen8_host_aperture_set(ADRENO_DEVICE(device), pipe, slice_id, use_slice_id);
  568. kgsl_regmap_rmw(&device->regmap, offsetwords, mask, val);
  569. }
  570. static void gen8_snapshot_mempool(struct kgsl_device *device,
  571. struct kgsl_snapshot *snapshot)
  572. {
  573. struct gen8_cp_indexed_reg *cp_indexed_reg;
  574. size_t mempool_index_registers_len = gen8_snapshot_block_list->mempool_index_registers_len;
  575. u32 i, j, slice;
  576. for (i = 0; i < mempool_index_registers_len; i++) {
  577. cp_indexed_reg = &gen8_snapshot_block_list->mempool_index_registers[i];
  578. slice = NUMBER_OF_SLICES(cp_indexed_reg->slice_region);
  579. for (j = 0; j < slice; j++) {
  580. /* set CP_CHICKEN_DBG[StabilizeMVC] to stabilize it while dumping */
  581. gen8_rmw_aperture(device, GEN8_CP_CHICKEN_DBG_PIPE, 0x4, 0x4,
  582. cp_indexed_reg->pipe_id, 0, 0);
  583. gen8_rmw_aperture(device, GEN8_CP_SLICE_CHICKEN_DBG_PIPE, 0x4, 0x4,
  584. cp_indexed_reg->pipe_id, j, 1);
  585. kgsl_snapshot_indexed_registers_v2(device, snapshot,
  586. cp_indexed_reg->addr, cp_indexed_reg->data,
  587. 0, cp_indexed_reg->size, cp_indexed_reg->pipe_id,
  588. SLICE_ID(cp_indexed_reg->slice_region, j));
  589. /* Reset CP_CHICKEN_DBG[StabilizeMVC] once we are done */
  590. gen8_rmw_aperture(device, GEN8_CP_CHICKEN_DBG_PIPE, 0x4, 0x0,
  591. cp_indexed_reg->pipe_id, 0, 0);
  592. gen8_rmw_aperture(device, GEN8_CP_SLICE_CHICKEN_DBG_PIPE, 0x4, 0x0,
  593. cp_indexed_reg->pipe_id, j, 1);
  594. }
  595. }
  596. /* Clear aperture register */
  597. gen8_host_aperture_set(ADRENO_DEVICE(device), 0, 0, 0);
  598. }
  599. static u32 gen8_read_dbgahb(struct kgsl_device *device,
  600. u32 regbase, u32 reg)
  601. {
  602. u32 val;
  603. kgsl_regread(device, (GEN8_SP_AHB_READ_APERTURE + reg - regbase), &val);
  604. return val;
  605. }
  606. static size_t gen8_legacy_snapshot_cluster_dbgahb(struct kgsl_device *device,
  607. u8 *buf, size_t remain, void *priv)
  608. {
  609. struct kgsl_snapshot_mvc_regs_v3 *header =
  610. (struct kgsl_snapshot_mvc_regs_v3 *)buf;
  611. struct gen8_sptp_cluster_registers_info *info =
  612. (struct gen8_sptp_cluster_registers_info *)priv;
  613. const u32 *ptr = info->cluster->regs;
  614. u32 *data = (u32 *)(buf + sizeof(*header));
  615. u32 read_sel, j;
  616. u32 size = adreno_snapshot_regs_count(ptr) * sizeof(*data);
  617. if (remain < (sizeof(*header) + size)) {
  618. SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
  619. return 0;
  620. }
  621. header->ctxt_id = info->context_id;
  622. header->cluster_id = info->cluster_id;
  623. header->pipe_id = info->pipe_id;
  624. header->location_id = info->location_id;
  625. header->sp_id = info->sp_id;
  626. header->usptp_id = info->usptp_id;
  627. header->slice_id = info->slice_id;
  628. read_sel = GEN8_SP_READ_SEL_VAL(info->slice_id, info->location_id,
  629. info->pipe_id, info->statetype_id, info->usptp_id, info->sp_id);
  630. kgsl_regwrite(device, GEN8_SP_READ_SEL, read_sel);
  631. /*
  632. * An explicit barrier is needed so that reads do not happen before
  633. * the register write.
  634. */
  635. mb();
  636. for (; ptr[0] != UINT_MAX; ptr += 2) {
  637. u32 count = REG_COUNT(ptr);
  638. if (count == 1)
  639. *data++ = ptr[0];
  640. else {
  641. *data++ = ptr[0] | (1 << 31);
  642. *data++ = ptr[1];
  643. }
  644. for (j = ptr[0]; j <= ptr[1]; j++)
  645. *data++ = gen8_read_dbgahb(device, info->cluster->regbase, j);
  646. }
  647. return (size + sizeof(*header));
  648. }
  649. static size_t gen8_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
  650. size_t remain, void *priv)
  651. {
  652. struct kgsl_snapshot_mvc_regs_v3 *header =
  653. (struct kgsl_snapshot_mvc_regs_v3 *)buf;
  654. struct gen8_sptp_cluster_registers_info *info =
  655. (struct gen8_sptp_cluster_registers_info *)priv;
  656. const u32 *ptr = info->cluster->regs;
  657. u32 *data = (u32 *)(buf + sizeof(*header));
  658. u32 *src;
  659. u32 size = adreno_snapshot_regs_count(ptr) * sizeof(*data);
  660. if (remain < (sizeof(*header) + size)) {
  661. SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
  662. return 0;
  663. }
  664. header->ctxt_id = info->context_id;
  665. header->cluster_id = info->cluster_id;
  666. header->pipe_id = info->pipe_id;
  667. header->location_id = info->location_id;
  668. header->sp_id = info->sp_id;
  669. header->usptp_id = info->usptp_id;
  670. header->slice_id = info->slice_id;
  671. src = gen8_crashdump_registers->hostptr + info->offset;
  672. for (ptr = info->cluster->regs; ptr[0] != UINT_MAX; ptr += 2) {
  673. u32 cnt = REG_COUNT(ptr);
  674. if (cnt == 1)
  675. *data++ = ptr[0];
  676. else {
  677. *data++ = ptr[0] | (1 << 31);
  678. *data++ = ptr[1];
  679. }
  680. memcpy(data, src, cnt << 2);
  681. data += cnt;
  682. src += cnt;
  683. }
  684. return (size + sizeof(*header));
  685. }
  686. static void gen8_snapshot_dbgahb_regs(struct kgsl_device *device,
  687. struct kgsl_snapshot *snapshot)
  688. {
  689. u32 i, j, sp, usptp, count, slice;
  690. u64 *ptr, offset = 0;
  691. struct gen8_sptp_cluster_registers_info info = {0};
  692. struct gen8_sptp_cluster_registers *sptp_clusters = gen8_snapshot_block_list->sptp_clusters;
  693. size_t num_sptp_clusters = gen8_snapshot_block_list->num_sptp_clusters;
  694. size_t (*func)(struct kgsl_device *device, u8 *buf, size_t remain,
  695. void *priv) = gen8_legacy_snapshot_cluster_dbgahb;
  696. if (CD_SCRIPT_CHECK(device)) {
  697. for (i = 0; i < num_sptp_clusters; i++) {
  698. struct gen8_sptp_cluster_registers *cluster = &sptp_clusters[i];
  699. slice = NUMBER_OF_SLICES(cluster->slice_region);
  700. for (sp = 0; sp < cluster->num_sps; sp++) {
  701. for (usptp = 0; usptp < cluster->num_usptps; usptp++) {
  702. for (j = 0; j < slice; j++) {
  703. info.cluster = cluster;
  704. info.location_id = cluster->location_id;
  705. info.pipe_id = cluster->pipe_id;
  706. info.usptp_id = usptp;
  707. info.sp_id = sp;
  708. info.slice_id = SLICE_ID(cluster->slice_region, j);
  709. info.statetype_id = cluster->statetype;
  710. info.cluster_id = cluster->cluster_id;
  711. info.context_id = cluster->context_id;
  712. kgsl_snapshot_add_section(device,
  713. KGSL_SNAPSHOT_SECTION_MVC_V3, snapshot,
  714. func, &info);
  715. }
  716. }
  717. }
  718. }
  719. return;
  720. }
  721. for (i = 0; i < num_sptp_clusters; i++) {
  722. struct gen8_sptp_cluster_registers *cluster = &sptp_clusters[i];
  723. slice = NUMBER_OF_SLICES(cluster->slice_region);
  724. cluster->offset = offset;
  725. for (sp = 0; sp < cluster->num_sps; sp++) {
  726. for (usptp = 0; usptp < cluster->num_usptps; usptp++) {
  727. for (j = 0; j < slice; j++) {
  728. const u32 *regs = cluster->regs;
  729. info.cluster = cluster;
  730. info.location_id = cluster->location_id;
  731. info.pipe_id = cluster->pipe_id;
  732. info.usptp_id = usptp;
  733. info.sp_id = sp;
  734. info.slice_id = SLICE_ID(cluster->slice_region, j);
  735. info.statetype_id = cluster->statetype;
  736. info.cluster_id = cluster->cluster_id;
  737. info.context_id = cluster->context_id;
  738. info.offset = offset;
  739. /* Build the crash script */
  740. ptr = gen8_capturescript->hostptr;
  741. /* Program the aperture */
  742. ptr += CD_WRITE(ptr, GEN8_SP_READ_SEL, GEN8_SP_READ_SEL_VAL
  743. (j, cluster->location_id, cluster->pipe_id,
  744. cluster->statetype, usptp, sp));
  745. for (; regs[0] != UINT_MAX; regs += 2) {
  746. count = REG_COUNT(regs);
  747. ptr += CD_READ(ptr, (GEN8_SP_AHB_READ_APERTURE +
  748. regs[0] - cluster->regbase), count,
  749. (gen8_crashdump_registers->gpuaddr +
  750. offset));
  751. offset += count * sizeof(u32);
  752. }
  753. /* Marker for end of script */
  754. CD_FINISH(ptr, offset);
  755. func = gen8_legacy_snapshot_cluster_dbgahb;
  756. /* Try to run the crash dumper */
  757. if (_gen8_do_crashdump(device))
  758. func = gen8_snapshot_cluster_dbgahb;
  759. kgsl_snapshot_add_section(device,
  760. KGSL_SNAPSHOT_SECTION_MVC_V3, snapshot,
  761. func, &info);
  762. }
  763. }
  764. }
  765. }
  766. }
  767. static size_t gen8_legacy_snapshot_mvc(struct kgsl_device *device, u8 *buf,
  768. size_t remain, void *priv)
  769. {
  770. struct kgsl_snapshot_mvc_regs_v3 *header =
  771. (struct kgsl_snapshot_mvc_regs_v3 *)buf;
  772. u32 *data = (u32 *)(buf + sizeof(*header));
  773. struct gen8_cluster_registers_info *info =
  774. (struct gen8_cluster_registers_info *)priv;
  775. const u32 *ptr = info->cluster->regs;
  776. u32 size = adreno_snapshot_regs_count(ptr) * sizeof(*data);
  777. u32 j;
  778. if (remain < (sizeof(*header) + size)) {
  779. SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
  780. return 0;
  781. }
  782. header->ctxt_id = (info->context_id == STATE_FORCE_CTXT_1) ? 1 : 0;
  783. header->cluster_id = info->cluster_id;
  784. header->pipe_id = info->pipe_id;
  785. header->location_id = UINT_MAX;
  786. header->sp_id = UINT_MAX;
  787. header->usptp_id = UINT_MAX;
  788. header->slice_id = info->slice_id;
  789. /*
  790. * Set the AHB control for the Host to read from the
  791. * cluster/context for this iteration.
  792. */
  793. kgsl_regwrite(device, GEN8_CP_APERTURE_CNTL_HOST, GEN8_CP_APERTURE_REG_VAL
  794. (info->slice_id, info->pipe_id, info->cluster_id, info->context_id));
  795. if (info->cluster->sel)
  796. kgsl_regwrite(device, info->cluster->sel->host_reg, info->cluster->sel->val);
  797. /* Make sure the previous writes are posted before reading */
  798. mb();
  799. for (; ptr[0] != UINT_MAX; ptr += 2) {
  800. u32 count = REG_COUNT(ptr);
  801. if (count == 1)
  802. *data++ = ptr[0];
  803. else {
  804. *data++ = ptr[0] | (1 << 31);
  805. *data++ = ptr[1];
  806. }
  807. for (j = ptr[0]; j <= ptr[1]; j++)
  808. kgsl_regread(device, j, data++);
  809. }
  810. return (size + sizeof(*header));
  811. }
  812. static size_t gen8_snapshot_mvc(struct kgsl_device *device, u8 *buf,
  813. size_t remain, void *priv)
  814. {
  815. struct kgsl_snapshot_mvc_regs_v3 *header =
  816. (struct kgsl_snapshot_mvc_regs_v3 *)buf;
  817. struct gen8_cluster_registers_info *info =
  818. (struct gen8_cluster_registers_info *)priv;
  819. const u32 *ptr = info->cluster->regs;
  820. u32 *data = (u32 *)(buf + sizeof(*header));
  821. u32 *src;
  822. u32 cnt;
  823. u32 size = adreno_snapshot_regs_count(ptr) * sizeof(*data);
  824. if (remain < (sizeof(*header) + size)) {
  825. SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
  826. return 0;
  827. }
  828. header->ctxt_id = (info->context_id == STATE_FORCE_CTXT_1) ? 1 : 0;
  829. header->cluster_id = info->cluster_id;
  830. header->pipe_id = info->pipe_id;
  831. header->location_id = UINT_MAX;
  832. header->sp_id = UINT_MAX;
  833. header->usptp_id = UINT_MAX;
  834. header->slice_id = info->slice_id;
  835. src = gen8_crashdump_registers->hostptr + info->offset;
  836. for (; ptr[0] != UINT_MAX; ptr += 2) {
  837. cnt = REG_COUNT(ptr);
  838. if (cnt == 1)
  839. *data++ = ptr[0];
  840. else {
  841. *data++ = ptr[0] | (1 << 31);
  842. *data++ = ptr[1];
  843. }
  844. memcpy(data, src, cnt << 2);
  845. src += cnt;
  846. data += cnt;
  847. }
  848. return (size + sizeof(*header));
  849. }
  850. static void gen8_snapshot_mvc_regs(struct kgsl_device *device,
  851. struct kgsl_snapshot *snapshot,
  852. struct gen8_cluster_registers *clusters,
  853. size_t num_cluster)
  854. {
  855. u32 i, j;
  856. u64 *ptr, offset = 0;
  857. u32 count, slice;
  858. struct gen8_cluster_registers_info info = {0};
  859. size_t (*func)(struct kgsl_device *device, u8 *buf,
  860. size_t remain, void *priv) = gen8_legacy_snapshot_mvc;
  861. if (CD_SCRIPT_CHECK(device)) {
  862. for (i = 0; i < num_cluster; i++) {
  863. struct gen8_cluster_registers *cluster = &clusters[i];
  864. slice = NUMBER_OF_SLICES(cluster->slice_region);
  865. for (j = 0; j < slice; j++) {
  866. info.cluster = cluster;
  867. info.pipe_id = cluster->pipe_id;
  868. info.cluster_id = cluster->cluster_id;
  869. info.context_id = cluster->context_id;
  870. info.slice_id = SLICE_ID(cluster->slice_region, j);
  871. kgsl_snapshot_add_section(device,
  872. KGSL_SNAPSHOT_SECTION_MVC_V3, snapshot, func, &info);
  873. }
  874. }
  875. return;
  876. }
  877. for (i = 0; i < num_cluster; i++) {
  878. struct gen8_cluster_registers *cluster = &clusters[i];
  879. slice = NUMBER_OF_SLICES(cluster->slice_region);
  880. cluster->offset = offset;
  881. for (j = 0; j < slice; j++) {
  882. const u32 *regs = cluster->regs;
  883. info.cluster = cluster;
  884. info.pipe_id = cluster->pipe_id;
  885. info.cluster_id = cluster->cluster_id;
  886. info.context_id = cluster->context_id;
  887. info.slice_id = SLICE_ID(cluster->slice_region, j);
  888. info.offset = offset;
  889. /* Build the crash script */
  890. ptr = gen8_capturescript->hostptr;
  891. ptr += CD_WRITE(ptr, GEN8_CP_APERTURE_CNTL_CD, GEN8_CP_APERTURE_REG_VAL
  892. (j, cluster->pipe_id, cluster->cluster_id, cluster->context_id));
  893. if (cluster->sel)
  894. ptr += CD_WRITE(ptr, cluster->sel->cd_reg, cluster->sel->val);
  895. for (; regs[0] != UINT_MAX; regs += 2) {
  896. count = REG_COUNT(regs);
  897. ptr += CD_READ(ptr, regs[0],
  898. count, (gen8_crashdump_registers->gpuaddr + offset));
  899. offset += count * sizeof(u32);
  900. }
  901. /* Marker for end of script */
  902. CD_FINISH(ptr, offset);
  903. func = gen8_legacy_snapshot_mvc;
  904. /* Try to run the crash dumper */
  905. if (_gen8_do_crashdump(device))
  906. func = gen8_snapshot_mvc;
  907. kgsl_snapshot_add_section(device,
  908. KGSL_SNAPSHOT_SECTION_MVC_V3, snapshot, func, &info);
  909. }
  910. }
  911. }
  912. /* gen8_dbgc_debug_bus_read() - Read data from trace bus */
  913. static void gen8_dbgc_debug_bus_read(struct kgsl_device *device,
  914. u32 block_id, u32 index, u32 *val)
  915. {
  916. u32 reg;
  917. reg = FIELD_PREP(GENMASK(7, 0), index) |
  918. FIELD_PREP(GENMASK(24, 16), block_id);
  919. kgsl_regwrite(device, GEN8_DBGC_CFG_DBGBUS_SEL_A, reg);
  920. kgsl_regwrite(device, GEN8_DBGC_CFG_DBGBUS_SEL_B, reg);
  921. kgsl_regwrite(device, GEN8_DBGC_CFG_DBGBUS_SEL_C, reg);
  922. kgsl_regwrite(device, GEN8_DBGC_CFG_DBGBUS_SEL_D, reg);
  923. /*
  924. * There needs to be a delay of 1 us to ensure enough time for correct
  925. * data is funneled into the trace buffer
  926. */
  927. udelay(1);
  928. kgsl_regread(device, GEN8_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
  929. val++;
  930. kgsl_regread(device, GEN8_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
  931. }
  932. /* gen8_snapshot_dbgc_debugbus_block() - Capture debug data for a gpu block */
  933. static size_t gen8_snapshot_dbgc_debugbus_block(struct kgsl_device *device,
  934. u8 *buf, size_t remain, void *priv)
  935. {
  936. struct kgsl_snapshot_debugbus *header =
  937. (struct kgsl_snapshot_debugbus *)buf;
  938. const u32 *block = priv;
  939. u32 i;
  940. u32 *data = (u32 *)(buf + sizeof(*header));
  941. if (remain < GEN8_DEBUGBUS_SECTION_SIZE) {
  942. SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
  943. return 0;
  944. }
  945. header->id = *block;
  946. header->count = GEN8_DEBUGBUS_BLOCK_SIZE * 2;
  947. for (i = 0; i < GEN8_DEBUGBUS_BLOCK_SIZE; i++)
  948. gen8_dbgc_debug_bus_read(device, *block, i, &data[i*2]);
  949. return GEN8_DEBUGBUS_SECTION_SIZE;
  950. }
  951. static void gen8_dbgc_side_debug_bus_read(struct kgsl_device *device,
  952. u32 block_id, u32 index, u32 *val)
  953. {
  954. u32 reg = FIELD_PREP(GENMASK(7, 0), index) |
  955. FIELD_PREP(GENMASK(24, 16), block_id);
  956. kgsl_regwrite(device, GEN8_DBGC_CFG_DBGBUS_SEL_A, reg);
  957. kgsl_regwrite(device, GEN8_DBGC_CFG_DBGBUS_SEL_B, reg);
  958. kgsl_regwrite(device, GEN8_DBGC_CFG_DBGBUS_SEL_C, reg);
  959. kgsl_regwrite(device, GEN8_DBGC_CFG_DBGBUS_SEL_D, reg);
  960. /*
  961. * There needs to be a delay of 1 us to ensure enough time for correct
  962. * data is funneled into the trace buffer
  963. */
  964. udelay(1);
  965. reg = kgsl_regmap_read(&device->regmap, GEN8_DBGC_CFG_DBGBUS_OVER);
  966. *val = FIELD_GET(GENMASK(27, 24), reg);
  967. }
  968. static size_t gen8_snapshot_dbgc_side_debugbus_block(struct kgsl_device *device,
  969. u8 *buf, size_t remain, void *priv)
  970. {
  971. struct kgsl_snapshot_side_debugbus *header =
  972. (struct kgsl_snapshot_side_debugbus *)buf;
  973. const u32 *block = priv;
  974. int i;
  975. u32 *data = (u32 *)(buf + sizeof(*header));
  976. size_t size = (GEN8_DEBUGBUS_BLOCK_SIZE * sizeof(u32)) + sizeof(*header);
  977. if (remain < size) {
  978. SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
  979. return 0;
  980. }
  981. header->id = *block;
  982. header->size = GEN8_DEBUGBUS_BLOCK_SIZE;
  983. header->valid_data = 0x4;
  984. for (i = 0; i < GEN8_DEBUGBUS_BLOCK_SIZE; i++)
  985. gen8_dbgc_side_debug_bus_read(device, *block, i, &data[i]);
  986. return size;
  987. }
  988. /* gen8_cx_dbgc_debug_bus_read() - Read data from trace bus */
  989. static void gen8_cx_debug_bus_read(struct kgsl_device *device,
  990. u32 block_id, u32 index, u32 *val)
  991. {
  992. u32 reg = FIELD_PREP(GENMASK(7, 0), index) |
  993. FIELD_PREP(GENMASK(24, 16), block_id);
  994. kgsl_regwrite(device, GEN8_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
  995. kgsl_regwrite(device, GEN8_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
  996. kgsl_regwrite(device, GEN8_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
  997. kgsl_regwrite(device, GEN8_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
  998. /*
  999. * There needs to be a delay of 1 us to ensure enough time for correct
  1000. * data is funneled into the trace buffer
  1001. */
  1002. udelay(1);
  1003. kgsl_regread(device, GEN8_CX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
  1004. val++;
  1005. kgsl_regread(device, GEN8_CX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
  1006. }
  1007. /*
  1008. * gen8_snapshot_cx_dbgc_debugbus_block() - Capture debug data for a gpu
  1009. * block from the CX DBGC block
  1010. */
  1011. static size_t gen8_snapshot_cx_dbgc_debugbus_block(struct kgsl_device *device,
  1012. u8 *buf, size_t remain, void *priv)
  1013. {
  1014. struct kgsl_snapshot_debugbus *header =
  1015. (struct kgsl_snapshot_debugbus *)buf;
  1016. const u32 *block = priv;
  1017. int i;
  1018. u32 *data = (u32 *)(buf + sizeof(*header));
  1019. if (remain < GEN8_DEBUGBUS_SECTION_SIZE) {
  1020. SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
  1021. return 0;
  1022. }
  1023. header->id = *block;
  1024. header->count = GEN8_DEBUGBUS_BLOCK_SIZE * 2;
  1025. for (i = 0; i < GEN8_DEBUGBUS_BLOCK_SIZE; i++)
  1026. gen8_cx_debug_bus_read(device, *block, i, &data[i*2]);
  1027. return GEN8_DEBUGBUS_SECTION_SIZE;
  1028. }
  1029. /* gen8_cx_side_dbgc_debug_bus_read() - Read data from trace bus */
  1030. static void gen8_cx_side_debug_bus_read(struct kgsl_device *device,
  1031. u32 block_id, u32 index, u32 *val)
  1032. {
  1033. u32 reg = FIELD_PREP(GENMASK(7, 0), index) |
  1034. FIELD_PREP(GENMASK(24, 16), block_id);
  1035. kgsl_regwrite(device, GEN8_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
  1036. kgsl_regwrite(device, GEN8_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
  1037. kgsl_regwrite(device, GEN8_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
  1038. kgsl_regwrite(device, GEN8_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
  1039. /*
  1040. * There needs to be a delay of 1 us to ensure enough time for correct
  1041. * data is funneled into the trace buffer
  1042. */
  1043. udelay(1);
  1044. kgsl_regread(device, GEN8_CX_DBGC_CFG_DBGBUS_OVER, &reg);
  1045. *val = FIELD_GET(GENMASK(27, 24), reg);
  1046. }
  1047. /*
  1048. * gen8_snapshot_cx_dbgc_debugbus_block() - Capture debug data for a gpu
  1049. * block from the CX DBGC block
  1050. */
  1051. static size_t gen8_snapshot_cx_side_dbgc_debugbus_block(struct kgsl_device *device,
  1052. u8 *buf, size_t remain, void *priv)
  1053. {
  1054. struct kgsl_snapshot_side_debugbus *header =
  1055. (struct kgsl_snapshot_side_debugbus *)buf;
  1056. const u32 *block = priv;
  1057. int i;
  1058. u32 *data = (u32 *)(buf + sizeof(*header));
  1059. size_t size = (GEN8_DEBUGBUS_BLOCK_SIZE * sizeof(u32)) + sizeof(*header);
  1060. if (remain < size) {
  1061. SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
  1062. return 0;
  1063. }
  1064. header->id = *block;
  1065. header->size = GEN8_DEBUGBUS_BLOCK_SIZE;
  1066. header->valid_data = 0x4;
  1067. for (i = 0; i < GEN8_DEBUGBUS_BLOCK_SIZE; i++)
  1068. gen8_cx_side_debug_bus_read(device, *block, i, &data[i]);
  1069. return size;
  1070. }
  1071. static void gen8_snapshot_cx_debugbus(struct adreno_device *adreno_dev,
  1072. struct kgsl_snapshot *snapshot)
  1073. {
  1074. u32 i;
  1075. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1076. kgsl_regwrite(device, GEN8_CX_DBGC_CFG_DBGBUS_CNTLT,
  1077. FIELD_PREP(GENMASK(31, 28), 0xf));
  1078. kgsl_regwrite(device, GEN8_CX_DBGC_CFG_DBGBUS_CNTLM,
  1079. FIELD_PREP(GENMASK(27, 24), 0xf));
  1080. kgsl_regwrite(device, GEN8_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
  1081. kgsl_regwrite(device, GEN8_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
  1082. kgsl_regwrite(device, GEN8_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
  1083. kgsl_regwrite(device, GEN8_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
  1084. kgsl_regwrite(device, GEN8_CX_DBGC_CFG_DBGBUS_BYTEL_0,
  1085. FIELD_PREP(GENMASK(3, 0), 0x0) |
  1086. FIELD_PREP(GENMASK(7, 4), 0x1) |
  1087. FIELD_PREP(GENMASK(11, 8), 0x2) |
  1088. FIELD_PREP(GENMASK(15, 12), 0x3) |
  1089. FIELD_PREP(GENMASK(19, 16), 0x4) |
  1090. FIELD_PREP(GENMASK(23, 20), 0x5) |
  1091. FIELD_PREP(GENMASK(27, 24), 0x6) |
  1092. FIELD_PREP(GENMASK(31, 28), 0x7));
  1093. kgsl_regwrite(device, GEN8_CX_DBGC_CFG_DBGBUS_BYTEL_1,
  1094. FIELD_PREP(GENMASK(3, 0), 0x8) |
  1095. FIELD_PREP(GENMASK(7, 4), 0x9) |
  1096. FIELD_PREP(GENMASK(11, 8), 0xa) |
  1097. FIELD_PREP(GENMASK(15, 12), 0xb) |
  1098. FIELD_PREP(GENMASK(19, 16), 0xc) |
  1099. FIELD_PREP(GENMASK(23, 20), 0xd) |
  1100. FIELD_PREP(GENMASK(27, 24), 0xe) |
  1101. FIELD_PREP(GENMASK(31, 28), 0xf));
  1102. kgsl_regwrite(device, GEN8_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
  1103. kgsl_regwrite(device, GEN8_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
  1104. /* Dump the CX debugbus data if the block exists */
  1105. if (!kgsl_regmap_valid_offset(&device->regmap, GEN8_CX_DBGC_CFG_DBGBUS_SEL_A))
  1106. return;
  1107. for (i = 0; i < gen8_snapshot_block_list->cx_debugbus_blocks_len; i++) {
  1108. kgsl_snapshot_add_section(device,
  1109. KGSL_SNAPSHOT_SECTION_DEBUGBUS,
  1110. snapshot, gen8_snapshot_cx_dbgc_debugbus_block,
  1111. (void *) &gen8_snapshot_block_list->cx_debugbus_blocks[i]);
  1112. kgsl_snapshot_add_section(device,
  1113. KGSL_SNAPSHOT_SECTION_SIDE_DEBUGBUS,
  1114. snapshot, gen8_snapshot_cx_side_dbgc_debugbus_block,
  1115. (void *) &gen8_snapshot_block_list->cx_debugbus_blocks[i]);
  1116. }
  1117. }
  1118. /* gen8_snapshot_debugbus() - Capture debug bus data */
  1119. static void gen8_snapshot_debugbus(struct adreno_device *adreno_dev,
  1120. struct kgsl_snapshot *snapshot)
  1121. {
  1122. u32 i;
  1123. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1124. kgsl_regwrite(device, GEN8_DBGC_CFG_DBGBUS_CNTLT,
  1125. FIELD_PREP(GENMASK(31, 28), 0xf));
  1126. kgsl_regwrite(device, GEN8_DBGC_CFG_DBGBUS_CNTLM,
  1127. FIELD_PREP(GENMASK(27, 24), 0xf));
  1128. kgsl_regwrite(device, GEN8_DBGC_CFG_DBGBUS_IVTL_0, 0);
  1129. kgsl_regwrite(device, GEN8_DBGC_CFG_DBGBUS_IVTL_1, 0);
  1130. kgsl_regwrite(device, GEN8_DBGC_CFG_DBGBUS_IVTL_2, 0);
  1131. kgsl_regwrite(device, GEN8_DBGC_CFG_DBGBUS_IVTL_3, 0);
  1132. kgsl_regwrite(device, GEN8_DBGC_CFG_DBGBUS_BYTEL_0,
  1133. FIELD_PREP(GENMASK(3, 0), 0x0) |
  1134. FIELD_PREP(GENMASK(7, 4), 0x1) |
  1135. FIELD_PREP(GENMASK(11, 8), 0x2) |
  1136. FIELD_PREP(GENMASK(15, 12), 0x3) |
  1137. FIELD_PREP(GENMASK(19, 16), 0x4) |
  1138. FIELD_PREP(GENMASK(23, 20), 0x5) |
  1139. FIELD_PREP(GENMASK(27, 24), 0x6) |
  1140. FIELD_PREP(GENMASK(31, 28), 0x7));
  1141. kgsl_regwrite(device, GEN8_DBGC_CFG_DBGBUS_BYTEL_1,
  1142. FIELD_PREP(GENMASK(3, 0), 0x8) |
  1143. FIELD_PREP(GENMASK(7, 4), 0x9) |
  1144. FIELD_PREP(GENMASK(11, 8), 0xa) |
  1145. FIELD_PREP(GENMASK(15, 12), 0xb) |
  1146. FIELD_PREP(GENMASK(19, 16), 0xc) |
  1147. FIELD_PREP(GENMASK(23, 20), 0xd) |
  1148. FIELD_PREP(GENMASK(27, 24), 0xe) |
  1149. FIELD_PREP(GENMASK(31, 28), 0xf));
  1150. kgsl_regwrite(device, GEN8_DBGC_CFG_DBGBUS_MASKL_0, 0);
  1151. kgsl_regwrite(device, GEN8_DBGC_CFG_DBGBUS_MASKL_1, 0);
  1152. kgsl_regwrite(device, GEN8_DBGC_CFG_DBGBUS_MASKL_2, 0);
  1153. kgsl_regwrite(device, GEN8_DBGC_CFG_DBGBUS_MASKL_3, 0);
  1154. for (i = 0; i < gen8_snapshot_block_list->debugbus_blocks_len; i++) {
  1155. kgsl_snapshot_add_section(device,
  1156. KGSL_SNAPSHOT_SECTION_DEBUGBUS,
  1157. snapshot, gen8_snapshot_dbgc_debugbus_block,
  1158. (void *) &gen8_snapshot_block_list->debugbus_blocks[i]);
  1159. kgsl_snapshot_add_section(device,
  1160. KGSL_SNAPSHOT_SECTION_SIDE_DEBUGBUS,
  1161. snapshot, gen8_snapshot_dbgc_side_debugbus_block,
  1162. (void *) &gen8_snapshot_block_list->debugbus_blocks[i]);
  1163. }
  1164. for (i = 0; i < gen8_snapshot_block_list->gbif_debugbus_blocks_len; i++) {
  1165. kgsl_snapshot_add_section(device,
  1166. KGSL_SNAPSHOT_SECTION_DEBUGBUS,
  1167. snapshot, gen8_snapshot_dbgc_debugbus_block,
  1168. (void *) &gen8_snapshot_block_list->gbif_debugbus_blocks[i]);
  1169. kgsl_snapshot_add_section(device,
  1170. KGSL_SNAPSHOT_SECTION_SIDE_DEBUGBUS,
  1171. snapshot, gen8_snapshot_dbgc_side_debugbus_block,
  1172. (void *) &gen8_snapshot_block_list->gbif_debugbus_blocks[i]);
  1173. }
  1174. }
  1175. /* gen8_snapshot_sqe() - Dump SQE data in snapshot */
  1176. static size_t gen8_snapshot_sqe(struct kgsl_device *device, u8 *buf,
  1177. size_t remain, void *priv)
  1178. {
  1179. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1180. struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
  1181. u32 *data = (u32 *)(buf + sizeof(*header));
  1182. struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
  1183. if (remain < DEBUG_SECTION_SZ(GEN8_SQE_FW_SNAPSHOT_DWORDS)) {
  1184. SNAPSHOT_ERR_NOMEM(device, "SQE VERSION DEBUG");
  1185. return 0;
  1186. }
  1187. /* Dump the SQE firmware version */
  1188. header->type = SNAPSHOT_DEBUG_SQE_VERSION;
  1189. header->size = GEN8_SQE_FW_SNAPSHOT_DWORDS;
  1190. memcpy(data, fw->memdesc->hostptr, (GEN8_SQE_FW_SNAPSHOT_DWORDS * sizeof(u32)));
  1191. return DEBUG_SECTION_SZ(GEN8_SQE_FW_SNAPSHOT_DWORDS);
  1192. }
  1193. /* gen8_snapshot_aqe() - Dump AQE data in snapshot */
  1194. static size_t gen8_snapshot_aqe(struct kgsl_device *device, u8 *buf,
  1195. size_t remain, void *priv)
  1196. {
  1197. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1198. struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
  1199. u32 *data = (u32 *)(buf + sizeof(*header));
  1200. struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_AQE);
  1201. if (!ADRENO_FEATURE(adreno_dev, ADRENO_AQE))
  1202. return 0;
  1203. if (remain < DEBUG_SECTION_SZ(1)) {
  1204. SNAPSHOT_ERR_NOMEM(device, "AQE VERSION DEBUG");
  1205. return 0;
  1206. }
  1207. /* Dump the AQE firmware version */
  1208. header->type = SNAPSHOT_DEBUG_AQE_VERSION;
  1209. header->size = 1;
  1210. *data = fw->version;
  1211. return DEBUG_SECTION_SZ(1);
  1212. }
  1213. /* Snapshot the preemption related buffers */
  1214. static size_t snapshot_preemption_record(struct kgsl_device *device,
  1215. u8 *buf, size_t remain, void *priv)
  1216. {
  1217. struct kgsl_memdesc *memdesc = priv;
  1218. struct kgsl_snapshot_gpu_object_v2 *header =
  1219. (struct kgsl_snapshot_gpu_object_v2 *)buf;
  1220. u8 *ptr = buf + sizeof(*header);
  1221. u64 ctxt_record_size = max_t(u64, GEN8_SNAPSHOT_CTXRECORD_SIZE_IN_BYTES,
  1222. device->snapshot_ctxt_record_size);
  1223. if (remain < (ctxt_record_size + sizeof(*header))) {
  1224. SNAPSHOT_ERR_NOMEM(device, "PREEMPTION RECORD");
  1225. return 0;
  1226. }
  1227. header->size = ctxt_record_size >> 2;
  1228. header->gpuaddr = memdesc->gpuaddr;
  1229. header->ptbase =
  1230. kgsl_mmu_pagetable_get_ttbr0(device->mmu.defaultpagetable);
  1231. header->type = SNAPSHOT_GPU_OBJECT_GLOBAL;
  1232. memcpy(ptr, memdesc->hostptr, ctxt_record_size);
  1233. return ctxt_record_size + sizeof(*header);
  1234. }
  1235. static void gen8_reglist_snapshot(struct kgsl_device *device,
  1236. struct kgsl_snapshot *snapshot)
  1237. {
  1238. u64 *ptr, offset = 0;
  1239. u32 i, j, r, slices;
  1240. struct gen8_reg_list *reg_list = gen8_snapshot_block_list->reg_list;
  1241. size_t (*func)(struct kgsl_device *device, u8 *buf, size_t remain,
  1242. void *priv) = gen8_legacy_snapshot_registers;
  1243. struct gen8_reg_list_info info = {0};
  1244. if (CD_SCRIPT_CHECK(device)) {
  1245. for (i = 0; reg_list[i].regs; i++) {
  1246. struct gen8_reg_list *regs = &reg_list[i];
  1247. slices = NUMBER_OF_SLICES(regs->slice_region);
  1248. for (j = 0; j < slices; j++) {
  1249. info.regs = regs;
  1250. info.slice_id = SLICE_ID(regs->slice_region, j);
  1251. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_MVC_V3,
  1252. snapshot, func, &info);
  1253. }
  1254. }
  1255. return;
  1256. }
  1257. for (i = 0; reg_list[i].regs; i++) {
  1258. struct gen8_reg_list *regs = &reg_list[i];
  1259. slices = NUMBER_OF_SLICES(regs->slice_region);
  1260. regs->offset = offset;
  1261. for (j = 0; j < slices; j++) {
  1262. const u32 *regs_ptr = regs->regs;
  1263. /* Build the crash script */
  1264. ptr = gen8_capturescript->hostptr;
  1265. ptr += CD_WRITE(ptr, GEN8_CP_APERTURE_CNTL_CD, GEN8_CP_APERTURE_REG_VAL
  1266. (j, 0, 0, 0));
  1267. /* Program the SEL_CNTL_CD register appropriately */
  1268. if (regs->sel)
  1269. ptr += CD_WRITE(ptr, regs->sel->cd_reg, regs->sel->val);
  1270. info.regs = regs;
  1271. info.slice_id = SLICE_ID(regs->slice_region, j);
  1272. info.offset = offset;
  1273. for (; regs_ptr[0] != UINT_MAX; regs_ptr += 2) {
  1274. r = REG_COUNT(regs_ptr);
  1275. ptr += CD_READ(ptr, regs_ptr[0], r,
  1276. (gen8_crashdump_registers->gpuaddr + offset));
  1277. offset += r * sizeof(u32);
  1278. }
  1279. /* Marker for end of script */
  1280. CD_FINISH(ptr, offset);
  1281. func = gen8_legacy_snapshot_registers;
  1282. /* Try to run the crash dumper */
  1283. if (_gen8_do_crashdump(device))
  1284. func = gen8_snapshot_registers;
  1285. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_MVC_V3,
  1286. snapshot, func, &info);
  1287. }
  1288. }
  1289. }
  1290. static size_t gen8_snapshot_cx_misc_registers(struct kgsl_device *device, u8 *buf,
  1291. size_t remain, void *priv)
  1292. {
  1293. const u32 *ptr = (u32 *)priv;
  1294. u32 *src, *data = (unsigned int *)buf;
  1295. size_t size = adreno_snapshot_regs_count(ptr) * sizeof(u32);
  1296. if (remain < size) {
  1297. SNAPSHOT_ERR_NOMEM(device, "CX_MISC REGISTERS");
  1298. return 0;
  1299. }
  1300. src = gen8_crashdump_registers->hostptr;
  1301. for (; ptr[0] != UINT_MAX; ptr += 2) {
  1302. u32 cnt = REG_COUNT(ptr);
  1303. if (cnt == 1)
  1304. *data++ = BIT(31) | ptr[0];
  1305. else {
  1306. *data++ = ptr[0];
  1307. *data++ = cnt;
  1308. }
  1309. memcpy(data, src, cnt << 2);
  1310. data += cnt;
  1311. src += cnt;
  1312. }
  1313. /* Return the size of the section */
  1314. return size;
  1315. }
  1316. static void gen8_cx_misc_regs_snapshot(struct kgsl_device *device,
  1317. struct kgsl_snapshot *snapshot)
  1318. {
  1319. u64 *ptr, offset = 0;
  1320. const u32 *regs_ptr = (const u32 *)gen8_snapshot_block_list->cx_misc_regs;
  1321. if (CD_SCRIPT_CHECK(device) || !gen8_gmu_rpmh_pwr_state_is_active(device)
  1322. || !gen8_gmu_gx_is_on(ADRENO_DEVICE(device)))
  1323. goto legacy_snapshot;
  1324. /* Build the crash script */
  1325. ptr = (u64 *)gen8_capturescript->hostptr;
  1326. for (; regs_ptr[0] != UINT_MAX; regs_ptr += 2) {
  1327. u32 r = REG_COUNT(regs_ptr);
  1328. ptr += CD_READ(ptr, regs_ptr[0], r,
  1329. (gen8_crashdump_registers->gpuaddr + offset));
  1330. offset += r * sizeof(u32);
  1331. }
  1332. /* Marker for end of script */
  1333. CD_FINISH(ptr, offset);
  1334. /* Try to run the crash dumper */
  1335. if (_gen8_do_crashdump(device)) {
  1336. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS_V2,
  1337. snapshot, gen8_snapshot_cx_misc_registers,
  1338. (void *)gen8_snapshot_block_list->cx_misc_regs);
  1339. return;
  1340. }
  1341. legacy_snapshot:
  1342. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS_V2,
  1343. snapshot, adreno_snapshot_cx_misc_registers,
  1344. (void *)gen8_snapshot_block_list->cx_misc_regs);
  1345. }
  1346. void gen8_snapshot_external_core_regs(struct kgsl_device *device,
  1347. struct kgsl_snapshot *snapshot)
  1348. {
  1349. const u32 **external_core_regs;
  1350. u32 i, num_external_core_regs;
  1351. const struct adreno_gen8_core *gpucore = to_gen8_core(ADRENO_DEVICE(device));
  1352. gen8_snapshot_block_list = gpucore->gen8_snapshot_block_list;
  1353. external_core_regs = gen8_snapshot_block_list->external_core_regs;
  1354. num_external_core_regs = gen8_snapshot_block_list->num_external_core_regs;
  1355. for (i = 0; i < num_external_core_regs; i++)
  1356. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS_V2,
  1357. snapshot, adreno_snapshot_registers_v2,
  1358. (void *) external_core_regs[i]);
  1359. }
  1360. /*
  1361. * gen8_snapshot() - GEN8 GPU snapshot function
  1362. * @adreno_dev: Device being snapshotted
  1363. * @snapshot: Pointer to the snapshot instance
  1364. *
  1365. * This is where all of the GEN8 specific bits and pieces are grabbed
  1366. * into the snapshot memory
  1367. */
  1368. void gen8_snapshot(struct adreno_device *adreno_dev,
  1369. struct kgsl_snapshot *snapshot)
  1370. {
  1371. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1372. struct adreno_ringbuffer *rb;
  1373. u32 i;
  1374. const struct adreno_gen8_core *gpucore = to_gen8_core(ADRENO_DEVICE(device));
  1375. int is_current_rt;
  1376. gen8_crashdump_timedout = false;
  1377. gen8_snapshot_block_list = gpucore->gen8_snapshot_block_list;
  1378. /* External registers are dumped in the beginning of gmu snapshot */
  1379. if (!gmu_core_isenabled(device))
  1380. gen8_snapshot_external_core_regs(device, snapshot);
  1381. gen8_cx_misc_regs_snapshot(device, snapshot);
  1382. gen8_snapshot_cx_debugbus(adreno_dev, snapshot);
  1383. if (!gen8_gmu_rpmh_pwr_state_is_active(device) ||
  1384. !gen8_gmu_gx_is_on(adreno_dev))
  1385. return;
  1386. /* SQE Firmware */
  1387. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
  1388. snapshot, gen8_snapshot_sqe, NULL);
  1389. /* AQE Firmware */
  1390. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
  1391. snapshot, gen8_snapshot_aqe, NULL);
  1392. gen8_snapshot_trace_buffer(device, snapshot);
  1393. gen8_snapshot_debugbus(adreno_dev, snapshot);
  1394. is_current_rt = rt_task(current);
  1395. if (is_current_rt)
  1396. sched_set_normal(current, 0);
  1397. gen8_regread64_aperture(device, GEN8_CP_IB1_BASE_LO_PIPE,
  1398. GEN8_CP_IB1_BASE_HI_PIPE, &snapshot->ib1base, PIPE_BR, 0, 0);
  1399. gen8_regread64_aperture(device, GEN8_CP_IB2_BASE_LO_PIPE,
  1400. GEN8_CP_IB2_BASE_HI_PIPE, &snapshot->ib2base, PIPE_BR, 0, 0);
  1401. gen8_regread_aperture(device, GEN8_CP_IB1_REM_SIZE_PIPE,
  1402. &snapshot->ib1size, PIPE_BR, 0, 0);
  1403. gen8_regread_aperture(device, GEN8_CP_IB2_REM_SIZE_PIPE,
  1404. &snapshot->ib2size, PIPE_BR, 0, 0);
  1405. if (ADRENO_FEATURE(adreno_dev, ADRENO_LPAC)) {
  1406. gen8_regread64_aperture(device, GEN8_CP_IB1_BASE_LO_PIPE,
  1407. GEN8_CP_IB1_BASE_HI_PIPE, &snapshot->ib1base_lpac, PIPE_LPAC, 0, 0);
  1408. gen8_regread64_aperture(device, GEN8_CP_IB2_BASE_LO_PIPE,
  1409. GEN8_CP_IB2_BASE_HI_PIPE, &snapshot->ib2base_lpac, PIPE_LPAC, 0, 0);
  1410. gen8_regread_aperture(device, GEN8_CP_IB1_REM_SIZE_PIPE,
  1411. &snapshot->ib1size_lpac, PIPE_LPAC, 0, 0);
  1412. gen8_regread_aperture(device, GEN8_CP_IB2_REM_SIZE_PIPE,
  1413. &snapshot->ib2size_lpac, PIPE_LPAC, 0, 0);
  1414. }
  1415. /* Clear aperture register */
  1416. gen8_host_aperture_set(adreno_dev, 0, 0, 0);
  1417. /* Assert the isStatic bit before triggering snapshot */
  1418. kgsl_regwrite(device, GEN8_RBBM_SNAPSHOT_STATUS, 0x1);
  1419. /* Dump the registers which get affected by crash dumper trigger */
  1420. for (i = 0; i < gen8_snapshot_block_list->num_pre_crashdumper_regs; i++) {
  1421. struct gen8_reg_list *regs = &gen8_snapshot_block_list->pre_crashdumper_regs[i];
  1422. struct gen8_reg_list_info info = {0};
  1423. u32 j, slices;
  1424. slices = NUMBER_OF_SLICES(regs->slice_region);
  1425. for (j = 0; j < slices; j++) {
  1426. info.regs = regs;
  1427. info.slice_id = SLICE_ID(regs->slice_region, j);
  1428. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_MVC_V3,
  1429. snapshot, gen8_legacy_snapshot_registers, &info);
  1430. }
  1431. }
  1432. gen8_reglist_snapshot(device, snapshot);
  1433. for (i = 0; i < gen8_snapshot_block_list->index_registers_len; i++) {
  1434. kgsl_regwrite(device, GEN8_CP_APERTURE_CNTL_HOST, GEN8_CP_APERTURE_REG_VAL
  1435. (0, gen8_snapshot_block_list->index_registers[i].pipe_id, 0, 0));
  1436. kgsl_snapshot_indexed_registers_v2(device, snapshot,
  1437. gen8_snapshot_block_list->index_registers[i].addr,
  1438. gen8_snapshot_block_list->index_registers[i].data, 0,
  1439. gen8_snapshot_block_list->index_registers[i].size,
  1440. gen8_snapshot_block_list->index_registers[i].pipe_id, UINT_MAX);
  1441. }
  1442. /* Mempool debug data */
  1443. gen8_snapshot_mempool(device, snapshot);
  1444. /* CP MVC register section */
  1445. gen8_snapshot_mvc_regs(device, snapshot,
  1446. gen8_snapshot_block_list->cp_clusters, gen8_snapshot_block_list->num_cp_clusters);
  1447. /* MVC register section */
  1448. gen8_snapshot_mvc_regs(device, snapshot,
  1449. gen8_snapshot_block_list->clusters, gen8_snapshot_block_list->num_clusters);
  1450. /* registers dumped through DBG AHB */
  1451. gen8_snapshot_dbgahb_regs(device, snapshot);
  1452. /* Shader memory */
  1453. gen8_snapshot_shader(device, snapshot);
  1454. kgsl_regwrite(device, GEN8_RBBM_SNAPSHOT_STATUS, 0x0);
  1455. /* Preemption record */
  1456. if (adreno_is_preemption_enabled(adreno_dev)) {
  1457. FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
  1458. kgsl_snapshot_add_section(device,
  1459. KGSL_SNAPSHOT_SECTION_GPU_OBJECT_V2,
  1460. snapshot, snapshot_preemption_record,
  1461. rb->preemption_desc);
  1462. }
  1463. }
  1464. if (is_current_rt)
  1465. sched_set_fifo(current);
  1466. }
  1467. void gen8_crashdump_init(struct adreno_device *adreno_dev)
  1468. {
  1469. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1470. int ret;
  1471. ret = adreno_allocate_global(device, &gen8_capturescript,
  1472. 50 * PAGE_SIZE, 0, KGSL_MEMFLAGS_GPUREADONLY,
  1473. KGSL_MEMDESC_PRIVILEGED, "capturescript");
  1474. if (!ret)
  1475. ret = adreno_allocate_global(device, &gen8_crashdump_registers,
  1476. 200 * PAGE_SIZE, 0, 0,
  1477. KGSL_MEMDESC_PRIVILEGED, "capturescript_regs");
  1478. if (ret)
  1479. dev_err(device->dev, "Failed to init crashdumper err = %d\n", ret);
  1480. }