kgsl_snapshot.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/of.h>
  7. #include <linux/panic_notifier.h>
  8. #include <linux/slab.h>
  9. #include <linux/utsname.h>
  10. #include "adreno_cp_parser.h"
  11. #include "kgsl_device.h"
  12. #include "kgsl_eventlog.h"
  13. #include "kgsl_sharedmem.h"
  14. #include "kgsl_snapshot.h"
  15. #include "kgsl_util.h"
  16. static void kgsl_snapshot_save_frozen_objs(struct work_struct *work);
  17. /* Placeholder for list of ib objects that contain all objects in that IB */
  18. struct kgsl_snapshot_cp_obj {
  19. struct adreno_ib_object_list *ib_obj_list;
  20. struct list_head node;
  21. };
  22. struct snapshot_obj_itr {
  23. u8 *buf; /* Buffer pointer to write to */
  24. int pos; /* Current position in the sequence */
  25. loff_t offset; /* file offset to start writing from */
  26. size_t remain; /* Bytes remaining in buffer */
  27. size_t write; /* Bytes written so far */
  28. };
  29. static inline u64 snapshot_phy_addr(struct kgsl_device *device)
  30. {
  31. return device->snapshot_memory.dma_handle ?
  32. device->snapshot_memory.dma_handle : __pa(device->snapshot_memory.ptr);
  33. }
  34. static inline u64 atomic_snapshot_phy_addr(struct kgsl_device *device)
  35. {
  36. return device->snapshot_memory_atomic.ptr == device->snapshot_memory.ptr ?
  37. snapshot_phy_addr(device) : __pa(device->snapshot_memory_atomic.ptr);
  38. }
  39. static void obj_itr_init(struct snapshot_obj_itr *itr, u8 *buf,
  40. loff_t offset, size_t remain)
  41. {
  42. itr->buf = buf;
  43. itr->offset = offset;
  44. itr->remain = remain;
  45. itr->pos = 0;
  46. itr->write = 0;
  47. }
  48. static int obj_itr_out(struct snapshot_obj_itr *itr, void *src, int size)
  49. {
  50. if (itr->remain == 0)
  51. return 0;
  52. if ((itr->pos + size) <= itr->offset)
  53. goto done;
  54. /* Handle the case that offset is in the middle of the buffer */
  55. if (itr->offset > itr->pos) {
  56. src += (itr->offset - itr->pos);
  57. size -= (itr->offset - itr->pos);
  58. /* Advance pos to the offset start */
  59. itr->pos = itr->offset;
  60. }
  61. if (size > itr->remain)
  62. size = itr->remain;
  63. memcpy(itr->buf, src, size);
  64. itr->buf += size;
  65. itr->write += size;
  66. itr->remain -= size;
  67. done:
  68. itr->pos += size;
  69. return size;
  70. }
  71. static void kgsl_snapshot_put_object(struct kgsl_snapshot_object *obj)
  72. {
  73. list_del(&obj->node);
  74. obj->entry->memdesc.priv &= ~KGSL_MEMDESC_FROZEN;
  75. obj->entry->memdesc.priv &= ~KGSL_MEMDESC_SKIP_RECLAIM;
  76. kgsl_mem_entry_put(obj->entry);
  77. kfree(obj);
  78. }
  79. /**
  80. * kgsl_snapshot_have_object() - return 1 if the object has been processed
  81. * @snapshot: the snapshot data
  82. * @process: The process that owns the object to freeze
  83. * @gpuaddr: The gpu address of the object to freeze
  84. * @size: the size of the object (may not always be the size of the region)
  85. *
  86. * Return 1 if the object is already in the list - this can save us from
  87. * having to parse the same thing over again. There are 2 lists that are
  88. * tracking objects so check for the object in both lists
  89. */
  90. int kgsl_snapshot_have_object(struct kgsl_snapshot *snapshot,
  91. struct kgsl_process_private *process,
  92. uint64_t gpuaddr, uint64_t size)
  93. {
  94. struct kgsl_snapshot_object *obj;
  95. struct kgsl_snapshot_cp_obj *obj_cp;
  96. struct adreno_ib_object *ib_obj;
  97. int i;
  98. /* Check whether the object is tracked already in ib list */
  99. list_for_each_entry(obj_cp, &snapshot->cp_list, node) {
  100. if (obj_cp->ib_obj_list == NULL
  101. || obj_cp->ib_obj_list->num_objs == 0)
  102. continue;
  103. ib_obj = &(obj_cp->ib_obj_list->obj_list[0]);
  104. if (ib_obj->entry == NULL || ib_obj->entry->priv != process)
  105. continue;
  106. for (i = 0; i < obj_cp->ib_obj_list->num_objs; i++) {
  107. ib_obj = &(obj_cp->ib_obj_list->obj_list[i]);
  108. if ((gpuaddr >= ib_obj->gpuaddr) &&
  109. ((gpuaddr + size) <=
  110. (ib_obj->gpuaddr + ib_obj->size)))
  111. return 1;
  112. }
  113. }
  114. list_for_each_entry(obj, &snapshot->obj_list, node) {
  115. if (obj->entry == NULL || obj->entry->priv != process)
  116. continue;
  117. if ((gpuaddr >= obj->gpuaddr) &&
  118. ((gpuaddr + size) <= (obj->gpuaddr + obj->size)))
  119. return 1;
  120. }
  121. return 0;
  122. }
  123. /**
  124. * kgsl_snapshot_get_object() - Mark a GPU buffer to be frozen
  125. * @snapshot: The snapshot data
  126. * @process: The process that owns the object we want to freeze
  127. * @gpuaddr: The gpu address of the object to freeze
  128. * @size: the size of the object (may not always be the size of the region)
  129. * @type: the type of object being saved (shader, vbo, etc)
  130. *
  131. * Mark and freeze a GPU buffer object. This will prevent it from being
  132. * freed until it can be copied out as part of the snapshot dump. Returns the
  133. * size of the object being frozen
  134. */
  135. int kgsl_snapshot_get_object(struct kgsl_snapshot *snapshot,
  136. struct kgsl_process_private *process, uint64_t gpuaddr,
  137. uint64_t size, u32 type)
  138. {
  139. struct kgsl_mem_entry *entry;
  140. struct kgsl_snapshot_object *obj;
  141. uint64_t offset;
  142. int ret = -EINVAL;
  143. u32 mem_type;
  144. if (!gpuaddr)
  145. return 0;
  146. entry = kgsl_sharedmem_find(process, gpuaddr);
  147. if (entry == NULL)
  148. return -EINVAL;
  149. /* We can't freeze external memory, because we don't own it */
  150. if (entry->memdesc.flags & KGSL_MEMFLAGS_USERMEM_MASK)
  151. goto err_put;
  152. /*
  153. * Do not save texture and render targets in snapshot,
  154. * they can be just too big
  155. */
  156. mem_type = kgsl_memdesc_get_memtype(&entry->memdesc);
  157. if (mem_type == KGSL_MEMTYPE_TEXTURE ||
  158. mem_type == KGSL_MEMTYPE_EGL_SURFACE ||
  159. mem_type == KGSL_MEMTYPE_EGL_IMAGE) {
  160. ret = 0;
  161. goto err_put;
  162. }
  163. /*
  164. * size indicates the number of bytes in the region to save. This might
  165. * not always be the entire size of the region because some buffers are
  166. * sub-allocated from a larger region. However, if size 0 was passed
  167. * thats a flag that the caller wants to capture the entire buffer
  168. */
  169. if (size == 0) {
  170. size = entry->memdesc.size;
  171. offset = 0;
  172. /* Adjust the gpuaddr to the start of the object */
  173. gpuaddr = entry->memdesc.gpuaddr;
  174. } else {
  175. offset = gpuaddr - entry->memdesc.gpuaddr;
  176. }
  177. if (size + offset > entry->memdesc.size) {
  178. dev_err(snapshot->device->dev,
  179. "snapshot: invalid size for GPU buffer 0x%016llx\n",
  180. gpuaddr);
  181. goto err_put;
  182. }
  183. /* If the buffer is already on the list, skip it */
  184. list_for_each_entry(obj, &snapshot->obj_list, node) {
  185. /* combine the range with existing object if they overlap */
  186. if (obj->entry->priv == process && obj->type == type &&
  187. kgsl_addr_range_overlap(obj->gpuaddr, obj->size,
  188. gpuaddr, size)) {
  189. uint64_t end1 = obj->gpuaddr + obj->size;
  190. uint64_t end2 = gpuaddr + size;
  191. if (obj->gpuaddr > gpuaddr)
  192. obj->gpuaddr = gpuaddr;
  193. if (end1 > end2)
  194. obj->size = end1 - obj->gpuaddr;
  195. else
  196. obj->size = end2 - obj->gpuaddr;
  197. obj->offset = obj->gpuaddr - entry->memdesc.gpuaddr;
  198. ret = 0;
  199. goto err_put;
  200. }
  201. }
  202. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  203. if (obj == NULL)
  204. goto err_put;
  205. obj->type = type;
  206. obj->entry = entry;
  207. obj->gpuaddr = gpuaddr;
  208. obj->size = size;
  209. obj->offset = offset;
  210. list_add(&obj->node, &snapshot->obj_list);
  211. /*
  212. * Return the size of the entire mem entry that was frozen - this gets
  213. * used for tracking how much memory is frozen for a hang. Also, mark
  214. * the memory entry as frozen. If the entry was already marked as
  215. * frozen, then another buffer already got to it. In that case, return
  216. * 0 so it doesn't get counted twice
  217. */
  218. ret = (entry->memdesc.priv & KGSL_MEMDESC_FROZEN) ? 0
  219. : entry->memdesc.size;
  220. entry->memdesc.priv |= KGSL_MEMDESC_FROZEN;
  221. return ret;
  222. err_put:
  223. entry->memdesc.priv &= ~KGSL_MEMDESC_SKIP_RECLAIM;
  224. kgsl_mem_entry_put(entry);
  225. return ret;
  226. }
  227. /**
  228. * kgsl_snapshot_dump_registers - helper function to dump device registers
  229. * @device - the device to dump registers from
  230. * @snapshot - pointer to the start of the region of memory for the snapshot
  231. * @remain - a pointer to the number of bytes remaining in the snapshot
  232. * @priv - A pointer to the kgsl_snapshot_registers data
  233. *
  234. * Given an array of register ranges pairs (start,end [inclusive]), dump the
  235. * registers into a snapshot register section. The snapshot region stores a
  236. * part of dwords for each register - the word address of the register, and
  237. * the value.
  238. */
  239. size_t kgsl_snapshot_dump_registers(struct kgsl_device *device, u8 *buf,
  240. size_t remain, void *priv)
  241. {
  242. struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
  243. struct kgsl_snapshot_registers *regs = priv;
  244. u32 *data = (u32 *)(buf + sizeof(*header));
  245. int count = 0, j, k;
  246. /* Figure out how many registers we are going to dump */
  247. for (j = 0; j < regs->count; j++) {
  248. int start = regs->regs[j * 2];
  249. int end = regs->regs[j * 2 + 1];
  250. count += (end - start + 1);
  251. }
  252. if (remain < (count * 8) + sizeof(*header)) {
  253. SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
  254. return 0;
  255. }
  256. for (j = 0; j < regs->count; j++) {
  257. u32 start = regs->regs[j * 2];
  258. u32 end = regs->regs[j * 2 + 1];
  259. for (k = start; k <= end; k++) {
  260. u32 val;
  261. kgsl_regread(device, k, &val);
  262. *data++ = k;
  263. *data++ = val;
  264. }
  265. }
  266. header->count = count;
  267. /* Return the size of the section */
  268. return (count * 8) + sizeof(*header);
  269. }
  270. struct kgsl_snapshot_indexed_registers {
  271. u32 index;
  272. u32 data;
  273. u32 start;
  274. u32 count;
  275. };
  276. static size_t kgsl_snapshot_dump_indexed_regs(struct kgsl_device *device,
  277. u8 *buf, size_t remain, void *priv)
  278. {
  279. struct kgsl_snapshot_indexed_registers *iregs = priv;
  280. struct kgsl_snapshot_indexed_regs *header =
  281. (struct kgsl_snapshot_indexed_regs *)buf;
  282. u32 *data = (u32 *)(buf + sizeof(*header));
  283. if (remain < (iregs->count * 4) + sizeof(*header)) {
  284. SNAPSHOT_ERR_NOMEM(device, "INDEXED REGS");
  285. return 0;
  286. }
  287. header->index_reg = iregs->index;
  288. header->data_reg = iregs->data;
  289. header->count = iregs->count;
  290. header->start = iregs->start;
  291. kgsl_regmap_read_indexed_interleaved(&device->regmap, iregs->index,
  292. iregs->data, data, iregs->start, iregs->count);
  293. return (iregs->count * 4) + sizeof(*header);
  294. }
  295. /**
  296. * kgsl_snapshot_indexed_registers - Add a set of indexed registers to the
  297. * snapshot
  298. * @device: Pointer to the KGSL device being snapshotted
  299. * @snapshot: Snapshot instance
  300. * @index: Offset for the index register
  301. * @data: Offset for the data register
  302. * @start: Index to start reading
  303. * @count: Number of entries to read
  304. *
  305. * Dump the values from an indexed register group into the snapshot
  306. */
  307. void kgsl_snapshot_indexed_registers(struct kgsl_device *device,
  308. struct kgsl_snapshot *snapshot,
  309. u32 index, u32 data,
  310. u32 start,
  311. u32 count)
  312. {
  313. struct kgsl_snapshot_indexed_registers iregs;
  314. iregs.index = index;
  315. iregs.data = data;
  316. iregs.start = start;
  317. iregs.count = count;
  318. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_INDEXED_REGS,
  319. snapshot, kgsl_snapshot_dump_indexed_regs, &iregs);
  320. }
  321. struct kgsl_snapshot_indexed_registers_v2 {
  322. u32 index;
  323. u32 data;
  324. u32 start;
  325. u32 count;
  326. u32 pipe_id;
  327. u32 slice_id;
  328. };
  329. static size_t kgsl_snapshot_dump_indexed_regs_v2(struct kgsl_device *device,
  330. u8 *buf, size_t remain, void *priv)
  331. {
  332. struct kgsl_snapshot_indexed_registers_v2 *iregs = priv;
  333. struct kgsl_snapshot_indexed_regs_v2 *header =
  334. (struct kgsl_snapshot_indexed_regs_v2 *)buf;
  335. u32 *data = (u32 *)(buf + sizeof(*header));
  336. if (remain < ((iregs->count * 4) + sizeof(*header))) {
  337. SNAPSHOT_ERR_NOMEM(device, "INDEXED REGS");
  338. return 0;
  339. }
  340. header->index_reg = iregs->index;
  341. header->data_reg = iregs->data;
  342. header->count = iregs->count;
  343. header->start = iregs->start;
  344. header->pipe_id = iregs->pipe_id;
  345. header->slice_id = iregs->slice_id;
  346. kgsl_regmap_read_indexed_interleaved(&device->regmap, iregs->index,
  347. iregs->data, data, iregs->start, iregs->count);
  348. return (iregs->count * 4) + sizeof(*header);
  349. }
  350. void kgsl_snapshot_indexed_registers_v2(struct kgsl_device *device,
  351. struct kgsl_snapshot *snapshot,
  352. u32 index, u32 data, u32 start, u32 count,
  353. u32 pipe_id, u32 slice_id)
  354. {
  355. struct kgsl_snapshot_indexed_registers_v2 iregs;
  356. iregs.index = index;
  357. iregs.data = data;
  358. iregs.start = start;
  359. iregs.count = count;
  360. iregs.pipe_id = pipe_id;
  361. iregs.slice_id = slice_id;
  362. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_INDEXED_REGS_V2,
  363. snapshot, kgsl_snapshot_dump_indexed_regs_v2, &iregs);
  364. }
  365. /**
  366. * kgsl_snapshot_add_section() - Add a new section to the GPU snapshot
  367. * @device: the KGSL device being snapshotted
  368. * @id: the section id
  369. * @snapshot: pointer to the snapshot instance
  370. * @func: Function pointer to fill the section
  371. * @priv: Private pointer to pass to the function
  372. *
  373. * Set up a KGSL snapshot header by filling the memory with the callback
  374. * function and adding the standard section header
  375. */
  376. void kgsl_snapshot_add_section(struct kgsl_device *device, u16 id,
  377. struct kgsl_snapshot *snapshot,
  378. size_t (*func)(struct kgsl_device *, u8 *, size_t, void *),
  379. void *priv)
  380. {
  381. struct kgsl_snapshot_section_header *header =
  382. (struct kgsl_snapshot_section_header *)snapshot->ptr;
  383. u8 *data = snapshot->ptr + sizeof(*header);
  384. size_t ret = 0;
  385. /*
  386. * Sanity check to make sure there is enough for the header. The
  387. * callback will check to make sure there is enough for the rest
  388. * of the data. If there isn't enough room then don't advance the
  389. * pointer.
  390. */
  391. if (snapshot->remain < sizeof(*header))
  392. return;
  393. /* It is legal to have no function (i.e. - make an empty section) */
  394. if (func) {
  395. ret = func(device, data, snapshot->remain - sizeof(*header),
  396. priv);
  397. /*
  398. * If there wasn't enough room for the data then don't bother
  399. * setting up the header.
  400. */
  401. if (ret == 0)
  402. return;
  403. }
  404. header->magic = SNAPSHOT_SECTION_MAGIC;
  405. header->id = id;
  406. header->size = ret + sizeof(*header);
  407. snapshot->ptr += header->size;
  408. snapshot->remain -= header->size;
  409. snapshot->size += header->size;
  410. }
  411. static void kgsl_free_snapshot(struct kgsl_snapshot *snapshot)
  412. {
  413. struct kgsl_snapshot_object *obj, *tmp;
  414. struct kgsl_device *device = snapshot->device;
  415. wait_for_completion(&snapshot->dump_gate);
  416. list_for_each_entry_safe(obj, tmp,
  417. &snapshot->obj_list, node)
  418. kgsl_snapshot_put_object(obj);
  419. if (snapshot->mempool)
  420. vfree(snapshot->mempool);
  421. kfree(snapshot);
  422. dev_err(device->dev, "snapshot: objects released\n");
  423. }
  424. #define SP0_ISDB_ISDB_BRKPT_CFG 0x40014
  425. #define SP0_ISDB_ISDB_EN 0x40004
  426. #define SP0_ISDB_ISDB_CMD 0x4000C
  427. void isdb_write(void __iomem *base, u32 offset)
  428. {
  429. /* To set the SCHBREAKTYPE bit */
  430. __raw_writel(0x801, base + SP0_ISDB_ISDB_BRKPT_CFG + offset);
  431. /*
  432. * ensure the configurations are set before
  433. * enabling ISDB
  434. */
  435. wmb();
  436. /* To set the ISDBCLKON and ISDB_EN bits*/
  437. __raw_writel(0x03, base + SP0_ISDB_ISDB_EN + offset);
  438. /*
  439. * ensure previous write to enable isdb posts
  440. * before issuing the break command
  441. */
  442. wmb();
  443. /*To issue ISDB_0_ISDB_CMD_BREAK*/
  444. __raw_writel(0x1, base + SP0_ISDB_ISDB_CMD + offset);
  445. }
  446. static void kgsl_device_snapshot_atomic(struct kgsl_device *device)
  447. {
  448. struct kgsl_snapshot *snapshot;
  449. struct timespec64 boot;
  450. if (device->snapshot && device->force_panic)
  451. return;
  452. if (!atomic_read(&device->active_cnt)) {
  453. dev_err(device->dev, "snapshot: device is powered off\n");
  454. return;
  455. }
  456. device->snapshot_memory_atomic.size = device->snapshot_memory.size;
  457. if (!device->snapshot_faultcount) {
  458. /* Use non-atomic snapshot memory if it is unused */
  459. device->snapshot_memory_atomic.ptr = device->snapshot_memory.ptr;
  460. } else {
  461. /* Limit size to 3MB to avoid failure for atomic snapshot memory */
  462. if (device->snapshot_memory_atomic.size > (SZ_2M + SZ_1M))
  463. device->snapshot_memory_atomic.size = (SZ_2M + SZ_1M);
  464. device->snapshot_memory_atomic.ptr = devm_kzalloc(&device->pdev->dev,
  465. device->snapshot_memory_atomic.size, GFP_ATOMIC);
  466. /* If we fail to allocate more than 1MB fall back to 1MB */
  467. if (WARN_ON((!device->snapshot_memory_atomic.ptr) &&
  468. device->snapshot_memory_atomic.size > SZ_1M)) {
  469. device->snapshot_memory_atomic.size = SZ_1M;
  470. device->snapshot_memory_atomic.ptr = devm_kzalloc(&device->pdev->dev,
  471. device->snapshot_memory_atomic.size, GFP_ATOMIC);
  472. }
  473. if (!device->snapshot_memory_atomic.ptr) {
  474. dev_err(device->dev,
  475. "Failed to allocate memory for atomic snapshot\n");
  476. return;
  477. }
  478. }
  479. /* Allocate memory for the snapshot instance */
  480. snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
  481. if (snapshot == NULL)
  482. return;
  483. device->snapshot_atomic = true;
  484. INIT_LIST_HEAD(&snapshot->obj_list);
  485. INIT_LIST_HEAD(&snapshot->cp_list);
  486. snapshot->start = device->snapshot_memory_atomic.ptr;
  487. snapshot->ptr = device->snapshot_memory_atomic.ptr;
  488. snapshot->remain = device->snapshot_memory_atomic.size;
  489. /*
  490. * Trigger both GPU and GMU snapshot. GPU specific code
  491. * will take care of whether to dumps full state or only
  492. * GMU state based on current GPU power state.
  493. */
  494. if (device->ftbl->snapshot)
  495. device->ftbl->snapshot(device, snapshot, NULL, NULL);
  496. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_EVENTLOG,
  497. snapshot, kgsl_snapshot_eventlog_buffer, NULL);
  498. /*
  499. * The timestamp is the seconds since boot so it is easier to match to
  500. * the kernel log
  501. */
  502. getboottime64(&boot);
  503. snapshot->timestamp = ktime_get_real_seconds() - boot.tv_sec;
  504. kgsl_add_to_minidump("ATOMIC_GPU_SNAPSHOT", (u64) device->snapshot_memory_atomic.ptr,
  505. atomic_snapshot_phy_addr(device), device->snapshot_memory_atomic.size);
  506. /* log buffer info to aid in ramdump fault tolerance */
  507. dev_err(device->dev, "Atomic GPU snapshot created at pa %llx++0x%zx\n",
  508. atomic_snapshot_phy_addr(device), snapshot->size);
  509. }
  510. /**
  511. * kgsl_device_snapshot() - construct a device snapshot
  512. * @device: device to snapshot
  513. * @context: the context that is hung, might be NULL if unknown.
  514. * @context_lpac: the lpac context that is hung, might be NULL if unknown.
  515. * @gmu_fault: whether this snapshot is triggered by a GMU fault.
  516. *
  517. * Given a device, construct a binary snapshot dump of the current device state
  518. * and store it in the device snapshot memory.
  519. */
  520. void kgsl_device_snapshot(struct kgsl_device *device,
  521. struct kgsl_context *context, struct kgsl_context *context_lpac,
  522. bool gmu_fault)
  523. {
  524. struct kgsl_snapshot *snapshot;
  525. struct timespec64 boot;
  526. if (device->ftbl->set_isdb_breakpoint_registers)
  527. device->ftbl->set_isdb_breakpoint_registers(device);
  528. if (device->snapshot_memory.ptr == NULL) {
  529. dev_err(device->dev,
  530. "snapshot: no snapshot memory available\n");
  531. return;
  532. }
  533. if (WARN(!kgsl_state_is_awake(device),
  534. "snapshot: device is powered off\n"))
  535. return;
  536. /* increment the hang count for good book keeping */
  537. device->snapshot_faultcount++;
  538. device->gmu_fault = gmu_fault;
  539. if (device->snapshot != NULL) {
  540. /*
  541. * Snapshot over-write policy:
  542. * 1. By default, don't over-write the very first snapshot,
  543. * be it a gmu or gpu fault.
  544. * 2. Never over-write existing snapshot on a gpu fault.
  545. * 3. Never over-write a snapshot that we didn't recover from.
  546. * 4. In order to over-write a new gmu fault snapshot with a
  547. * previously recovered fault, then set the sysfs knob
  548. * prioritize_recoverable to true.
  549. */
  550. if (!device->prioritize_unrecoverable ||
  551. !device->snapshot->recovered || !gmu_fault)
  552. return;
  553. /*
  554. * If another thread is currently reading it, that thread
  555. * will free it, otherwise free it now.
  556. */
  557. if (!device->snapshot->sysfs_read)
  558. kgsl_free_snapshot(device->snapshot);
  559. device->snapshot = NULL;
  560. }
  561. /* Allocate memory for the snapshot instance */
  562. snapshot = kzalloc(sizeof(*snapshot), GFP_KERNEL);
  563. if (snapshot == NULL)
  564. return;
  565. init_completion(&snapshot->dump_gate);
  566. INIT_LIST_HEAD(&snapshot->obj_list);
  567. INIT_LIST_HEAD(&snapshot->cp_list);
  568. INIT_WORK(&snapshot->work, kgsl_snapshot_save_frozen_objs);
  569. snapshot->start = device->snapshot_memory.ptr;
  570. snapshot->ptr = device->snapshot_memory.ptr;
  571. snapshot->remain = device->snapshot_memory.size;
  572. snapshot->recovered = false;
  573. snapshot->first_read = true;
  574. snapshot->sysfs_read = 0;
  575. device->ftbl->snapshot(device, snapshot, context, context_lpac);
  576. kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_EVENTLOG,
  577. snapshot, kgsl_snapshot_eventlog_buffer, NULL);
  578. /*
  579. * The timestamp is the seconds since boot so it is easier to match to
  580. * the kernel log
  581. */
  582. getboottime64(&boot);
  583. snapshot->timestamp = ktime_get_real_seconds() - boot.tv_sec;
  584. /* Store the instance in the device until it gets dumped */
  585. device->snapshot = snapshot;
  586. snapshot->device = device;
  587. /* log buffer info to aid in ramdump fault tolerance */
  588. dev_err(device->dev, "%s snapshot created at pa %llx++0x%zx\n",
  589. gmu_fault ? "GMU" : "GPU", snapshot_phy_addr(device),
  590. snapshot->size);
  591. kgsl_add_to_minidump("GPU_SNAPSHOT", (u64) device->snapshot_memory.ptr,
  592. snapshot_phy_addr(device), device->snapshot_memory.size);
  593. if (device->skip_ib_capture)
  594. BUG_ON(device->force_panic);
  595. sysfs_notify(&device->snapshot_kobj, NULL, "timestamp");
  596. /*
  597. * Queue a work item that will save the IB data in snapshot into
  598. * static memory to prevent loss of data due to overwriting of
  599. * memory. If force panic is enabled, there is no need to move
  600. * ahead and IB data can be dumped inline.
  601. */
  602. if (device->force_panic)
  603. kgsl_snapshot_save_frozen_objs(&snapshot->work);
  604. else
  605. kgsl_schedule_work(&snapshot->work);
  606. }
  607. /* An attribute for showing snapshot details */
  608. struct kgsl_snapshot_attribute {
  609. struct attribute attr;
  610. ssize_t (*show)(struct kgsl_device *device, char *buf);
  611. ssize_t (*store)(struct kgsl_device *device, const char *buf,
  612. size_t count);
  613. };
  614. /**
  615. * kgsl_snapshot_process_ib_obj_list() - Go through the list of IB's which need
  616. * to be dumped for snapshot and move them to the global snapshot list so
  617. * they will get dumped when the global list is dumped
  618. * @device: device being snapshotted
  619. */
  620. static void kgsl_snapshot_process_ib_obj_list(struct kgsl_snapshot *snapshot)
  621. {
  622. struct kgsl_snapshot_cp_obj *obj, *obj_temp;
  623. struct adreno_ib_object *ib_obj;
  624. int i;
  625. list_for_each_entry_safe(obj, obj_temp, &snapshot->cp_list,
  626. node) {
  627. for (i = 0; i < obj->ib_obj_list->num_objs; i++) {
  628. ib_obj = &(obj->ib_obj_list->obj_list[i]);
  629. kgsl_snapshot_get_object(snapshot, ib_obj->entry->priv,
  630. ib_obj->gpuaddr, ib_obj->size,
  631. ib_obj->snapshot_obj_type);
  632. }
  633. list_del(&obj->node);
  634. adreno_ib_destroy_obj_list(obj->ib_obj_list);
  635. kfree(obj);
  636. }
  637. }
  638. #define to_snapshot_attr(a) \
  639. container_of(a, struct kgsl_snapshot_attribute, attr)
  640. #define kobj_to_device(a) \
  641. container_of(a, struct kgsl_device, snapshot_kobj)
  642. static int snapshot_release(struct kgsl_device *device,
  643. struct kgsl_snapshot *snapshot)
  644. {
  645. bool snapshot_free = false;
  646. int ret = 0;
  647. mutex_lock(&device->mutex);
  648. snapshot->sysfs_read--;
  649. /*
  650. * If someone's replaced the snapshot, return an error and free
  651. * the snapshot if this is the last thread to read it.
  652. */
  653. if (device->snapshot != snapshot) {
  654. ret = -EIO;
  655. if (!snapshot->sysfs_read)
  656. snapshot_free = true;
  657. }
  658. mutex_unlock(&device->mutex);
  659. if (snapshot_free)
  660. kgsl_free_snapshot(snapshot);
  661. return ret;
  662. }
  663. /* Dump the sysfs binary data to the user */
  664. static ssize_t snapshot_show(struct file *filep, struct kobject *kobj,
  665. struct bin_attribute *attr, char *buf, loff_t off,
  666. size_t count)
  667. {
  668. struct kgsl_device *device = kobj_to_device(kobj);
  669. struct kgsl_snapshot *snapshot;
  670. struct kgsl_snapshot_section_header head;
  671. struct snapshot_obj_itr itr;
  672. int ret = 0;
  673. mutex_lock(&device->mutex);
  674. snapshot = device->snapshot;
  675. if (snapshot != NULL) {
  676. /*
  677. * If we're reading at a non-zero offset from a new snapshot,
  678. * that means we want to read from the previous snapshot (which
  679. * was overwritten), so return an error
  680. */
  681. if (snapshot->first_read) {
  682. if (off)
  683. ret = -EIO;
  684. else
  685. snapshot->first_read = false;
  686. }
  687. if (!ret)
  688. snapshot->sysfs_read++;
  689. }
  690. mutex_unlock(&device->mutex);
  691. if (ret)
  692. return ret;
  693. /* Return nothing if we haven't taken a snapshot yet */
  694. if (snapshot == NULL)
  695. return 0;
  696. /*
  697. * Wait for the dump worker to finish. This is interruptible
  698. * to allow userspace to bail if things go horribly wrong.
  699. */
  700. ret = wait_for_completion_interruptible(&snapshot->dump_gate);
  701. if (ret) {
  702. snapshot_release(device, snapshot);
  703. return ret;
  704. }
  705. obj_itr_init(&itr, buf, off, count);
  706. ret = obj_itr_out(&itr, snapshot->start, snapshot->size);
  707. if (ret == 0)
  708. goto done;
  709. /* Dump the memory pool if it exists */
  710. if (snapshot->mempool) {
  711. ret = obj_itr_out(&itr, snapshot->mempool,
  712. snapshot->mempool_size);
  713. if (ret == 0)
  714. goto done;
  715. }
  716. {
  717. head.magic = SNAPSHOT_SECTION_MAGIC;
  718. head.id = KGSL_SNAPSHOT_SECTION_END;
  719. head.size = sizeof(head);
  720. obj_itr_out(&itr, &head, sizeof(head));
  721. }
  722. /*
  723. * Make sure everything has been written out before destroying things.
  724. * The best way to confirm this is to go all the way through without
  725. * writing any bytes - so only release if we get this far and
  726. * itr->write is 0 and there are no concurrent reads pending
  727. */
  728. if (itr.write == 0) {
  729. bool snapshot_free = false;
  730. mutex_lock(&device->mutex);
  731. if (--snapshot->sysfs_read == 0) {
  732. if (device->snapshot == snapshot)
  733. device->snapshot = NULL;
  734. snapshot_free = true;
  735. }
  736. mutex_unlock(&device->mutex);
  737. if (snapshot_free)
  738. kgsl_free_snapshot(snapshot);
  739. return 0;
  740. }
  741. done:
  742. ret = snapshot_release(device, snapshot);
  743. return (ret < 0) ? ret : itr.write;
  744. }
  745. /* Show the total number of hangs since device boot */
  746. static ssize_t faultcount_show(struct kgsl_device *device, char *buf)
  747. {
  748. return scnprintf(buf, PAGE_SIZE, "%d\n", device->snapshot_faultcount);
  749. }
  750. /* Reset the total number of hangs since device boot */
  751. static ssize_t faultcount_store(struct kgsl_device *device, const char *buf,
  752. size_t count)
  753. {
  754. if (count)
  755. device->snapshot_faultcount = 0;
  756. return count;
  757. }
  758. /* Show the force_panic request status */
  759. static ssize_t force_panic_show(struct kgsl_device *device, char *buf)
  760. {
  761. return scnprintf(buf, PAGE_SIZE, "%d\n", device->force_panic);
  762. }
  763. /* Store the panic request value to force_panic */
  764. static ssize_t force_panic_store(struct kgsl_device *device, const char *buf,
  765. size_t count)
  766. {
  767. if (strtobool(buf, &device->force_panic))
  768. return -EINVAL;
  769. return count;
  770. }
  771. /* Show the break_ib request status */
  772. static ssize_t skip_ib_capture_show(struct kgsl_device *device, char *buf)
  773. {
  774. return scnprintf(buf, PAGE_SIZE, "%d\n", device->skip_ib_capture);
  775. }
  776. /* Store the panic request value to break_ib */
  777. static ssize_t skip_ib_capture_store(struct kgsl_device *device,
  778. const char *buf, size_t count)
  779. {
  780. int ret;
  781. ret = kstrtobool(buf, &device->skip_ib_capture);
  782. return ret ? ret : count;
  783. }
  784. /* Show the prioritize_unrecoverable status */
  785. static ssize_t prioritize_unrecoverable_show(
  786. struct kgsl_device *device, char *buf)
  787. {
  788. return scnprintf(buf, PAGE_SIZE, "%d\n",
  789. device->prioritize_unrecoverable);
  790. }
  791. /* Store the priority value to prioritize unrecoverable */
  792. static ssize_t prioritize_unrecoverable_store(
  793. struct kgsl_device *device, const char *buf, size_t count)
  794. {
  795. if (strtobool(buf, &device->prioritize_unrecoverable))
  796. return -EINVAL;
  797. return count;
  798. }
  799. /* Show the snapshot_crashdumper request status */
  800. static ssize_t snapshot_crashdumper_show(struct kgsl_device *device, char *buf)
  801. {
  802. return scnprintf(buf, PAGE_SIZE, "%d\n", device->snapshot_crashdumper);
  803. }
  804. /* Store the value to snapshot_crashdumper*/
  805. static ssize_t snapshot_crashdumper_store(struct kgsl_device *device,
  806. const char *buf, size_t count)
  807. {
  808. if (strtobool(buf, &device->snapshot_crashdumper))
  809. return -EINVAL;
  810. return count;
  811. }
  812. /* Show the timestamp of the last collected snapshot */
  813. static ssize_t timestamp_show(struct kgsl_device *device, char *buf)
  814. {
  815. unsigned long timestamp;
  816. mutex_lock(&device->mutex);
  817. timestamp = device->snapshot ? device->snapshot->timestamp : 0;
  818. mutex_unlock(&device->mutex);
  819. return scnprintf(buf, PAGE_SIZE, "%lu\n", timestamp);
  820. }
  821. static ssize_t snapshot_legacy_show(struct kgsl_device *device, char *buf)
  822. {
  823. return scnprintf(buf, PAGE_SIZE, "%d\n", device->snapshot_legacy);
  824. }
  825. static ssize_t snapshot_legacy_store(struct kgsl_device *device,
  826. const char *buf, size_t count)
  827. {
  828. if (strtobool(buf, &device->snapshot_legacy))
  829. return -EINVAL;
  830. return count;
  831. }
  832. static struct bin_attribute snapshot_attr = {
  833. .attr.name = "dump",
  834. .attr.mode = 0444,
  835. .size = 0,
  836. .read = snapshot_show
  837. };
  838. #define SNAPSHOT_ATTR(_name, _mode, _show, _store) \
  839. struct kgsl_snapshot_attribute attr_##_name = { \
  840. .attr = { .name = __stringify(_name), .mode = _mode }, \
  841. .show = _show, \
  842. .store = _store, \
  843. }
  844. static SNAPSHOT_ATTR(timestamp, 0444, timestamp_show, NULL);
  845. static SNAPSHOT_ATTR(faultcount, 0644, faultcount_show, faultcount_store);
  846. static SNAPSHOT_ATTR(force_panic, 0644, force_panic_show, force_panic_store);
  847. static SNAPSHOT_ATTR(prioritize_unrecoverable, 0644,
  848. prioritize_unrecoverable_show, prioritize_unrecoverable_store);
  849. static SNAPSHOT_ATTR(snapshot_crashdumper, 0644, snapshot_crashdumper_show,
  850. snapshot_crashdumper_store);
  851. static SNAPSHOT_ATTR(snapshot_legacy, 0644, snapshot_legacy_show,
  852. snapshot_legacy_store);
  853. static SNAPSHOT_ATTR(skip_ib_capture, 0644, skip_ib_capture_show,
  854. skip_ib_capture_store);
  855. static ssize_t snapshot_sysfs_show(struct kobject *kobj,
  856. struct attribute *attr, char *buf)
  857. {
  858. struct kgsl_snapshot_attribute *pattr = to_snapshot_attr(attr);
  859. struct kgsl_device *device = kobj_to_device(kobj);
  860. ssize_t ret;
  861. if (device && pattr->show)
  862. ret = pattr->show(device, buf);
  863. else
  864. ret = -EIO;
  865. return ret;
  866. }
  867. static ssize_t snapshot_sysfs_store(struct kobject *kobj,
  868. struct attribute *attr, const char *buf, size_t count)
  869. {
  870. struct kgsl_snapshot_attribute *pattr = to_snapshot_attr(attr);
  871. struct kgsl_device *device = kobj_to_device(kobj);
  872. ssize_t ret = -EIO;
  873. if (pattr->store)
  874. ret = pattr->store(device, buf, count);
  875. return ret;
  876. }
  877. static const struct sysfs_ops snapshot_sysfs_ops = {
  878. .show = snapshot_sysfs_show,
  879. .store = snapshot_sysfs_store,
  880. };
  881. static struct kobj_type ktype_snapshot = {
  882. .sysfs_ops = &snapshot_sysfs_ops,
  883. };
  884. static const struct attribute *snapshot_attrs[] = {
  885. &attr_timestamp.attr,
  886. &attr_faultcount.attr,
  887. &attr_force_panic.attr,
  888. &attr_prioritize_unrecoverable.attr,
  889. &attr_snapshot_crashdumper.attr,
  890. &attr_snapshot_legacy.attr,
  891. &attr_skip_ib_capture.attr,
  892. NULL,
  893. };
  894. static int kgsl_panic_notifier_callback(struct notifier_block *nb,
  895. unsigned long action, void *unused)
  896. {
  897. struct kgsl_device *device = container_of(nb, struct kgsl_device,
  898. panic_nb);
  899. /* To send NMI to GMU */
  900. device->gmu_fault = true;
  901. kgsl_device_snapshot_atomic(device);
  902. return NOTIFY_OK;
  903. }
  904. void kgsl_device_snapshot_probe(struct kgsl_device *device, u32 size)
  905. {
  906. device->snapshot_memory.size = size;
  907. device->snapshot_memory.ptr = dma_alloc_coherent(&device->pdev->dev,
  908. device->snapshot_memory.size, &device->snapshot_memory.dma_handle,
  909. GFP_KERNEL);
  910. /*
  911. * If we fail to allocate more than 1MB for snapshot fall back
  912. * to 1MB
  913. */
  914. if (WARN_ON((!device->snapshot_memory.ptr) && size > SZ_1M)) {
  915. device->snapshot_memory.size = SZ_1M;
  916. device->snapshot_memory.ptr = devm_kzalloc(&device->pdev->dev,
  917. device->snapshot_memory.size, GFP_KERNEL);
  918. }
  919. if (!device->snapshot_memory.ptr) {
  920. dev_err(device->dev,
  921. "KGSL failed to allocate memory for snapshot\n");
  922. return;
  923. }
  924. device->snapshot = NULL;
  925. device->snapshot_faultcount = 0;
  926. device->force_panic = false;
  927. device->snapshot_crashdumper = true;
  928. device->snapshot_legacy = false;
  929. device->snapshot_atomic = false;
  930. device->panic_nb.notifier_call = kgsl_panic_notifier_callback;
  931. device->panic_nb.priority = 1;
  932. device->snapshot_ctxt_record_size = 64 * 1024;
  933. /*
  934. * Set this to false so that we only ever keep the first snapshot around
  935. * If we want to over-write with a gmu snapshot, then set it to true
  936. * via sysfs
  937. */
  938. device->prioritize_unrecoverable = false;
  939. if (kobject_init_and_add(&device->snapshot_kobj, &ktype_snapshot,
  940. &device->dev->kobj, "snapshot"))
  941. return;
  942. WARN_ON(sysfs_create_bin_file(&device->snapshot_kobj, &snapshot_attr));
  943. WARN_ON(sysfs_create_files(&device->snapshot_kobj, snapshot_attrs));
  944. atomic_notifier_chain_register(&panic_notifier_list,
  945. &device->panic_nb);
  946. }
  947. /**
  948. * kgsl_device_snapshot_close() - take down snapshot memory for a device
  949. * @device: Pointer to the kgsl_device
  950. *
  951. * Remove the sysfs files and free the memory allocated for the GPU
  952. * snapshot
  953. */
  954. void kgsl_device_snapshot_close(struct kgsl_device *device)
  955. {
  956. if (device->snapshot_memory.ptr == NULL)
  957. return;
  958. kgsl_remove_from_minidump("GPU_SNAPSHOT", (u64) device->snapshot_memory.ptr,
  959. snapshot_phy_addr(device), device->snapshot_memory.size);
  960. atomic_notifier_chain_unregister(&panic_notifier_list,
  961. &device->panic_nb);
  962. sysfs_remove_bin_file(&device->snapshot_kobj, &snapshot_attr);
  963. sysfs_remove_files(&device->snapshot_kobj, snapshot_attrs);
  964. kobject_put(&device->snapshot_kobj);
  965. if (device->snapshot_memory.dma_handle)
  966. dma_free_coherent(&device->pdev->dev, device->snapshot_memory.size,
  967. device->snapshot_memory.ptr, device->snapshot_memory.dma_handle);
  968. }
  969. /**
  970. * kgsl_snapshot_add_ib_obj_list() - Add a IB object list to the snapshot
  971. * object list
  972. * @device: the device that is being snapshotted
  973. * @ib_obj_list: The IB list that has objects required to execute an IB
  974. * @num_objs: Number of IB objects
  975. * @ptbase: The pagetable base in which the IB is mapped
  976. *
  977. * Adds a new IB to the list of IB objects maintained when getting snapshot
  978. * Returns 0 on success else -ENOMEM on error
  979. */
  980. int kgsl_snapshot_add_ib_obj_list(struct kgsl_snapshot *snapshot,
  981. struct adreno_ib_object_list *ib_obj_list)
  982. {
  983. struct kgsl_snapshot_cp_obj *obj;
  984. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  985. if (!obj)
  986. return -ENOMEM;
  987. obj->ib_obj_list = ib_obj_list;
  988. list_add(&obj->node, &snapshot->cp_list);
  989. return 0;
  990. }
  991. static size_t _mempool_add_object(struct kgsl_snapshot *snapshot, u8 *data,
  992. struct kgsl_snapshot_object *obj)
  993. {
  994. struct kgsl_snapshot_section_header *section =
  995. (struct kgsl_snapshot_section_header *)data;
  996. struct kgsl_snapshot_gpu_object_v2 *header =
  997. (struct kgsl_snapshot_gpu_object_v2 *)(data + sizeof(*section));
  998. u8 *dest = data + sizeof(*section) + sizeof(*header);
  999. uint64_t size;
  1000. size = obj->size;
  1001. if (!kgsl_memdesc_map(&obj->entry->memdesc)) {
  1002. dev_err(snapshot->device->dev,
  1003. "snapshot: failed to map GPU object\n");
  1004. return 0;
  1005. }
  1006. section->magic = SNAPSHOT_SECTION_MAGIC;
  1007. section->id = KGSL_SNAPSHOT_SECTION_GPU_OBJECT_V2;
  1008. section->size = size + sizeof(*header) + sizeof(*section);
  1009. header->size = size >> 2;
  1010. header->gpuaddr = obj->gpuaddr;
  1011. header->ptbase =
  1012. kgsl_mmu_pagetable_get_ttbr0(obj->entry->priv->pagetable);
  1013. header->type = obj->type;
  1014. if (kgsl_addr_range_overlap(obj->gpuaddr, obj->size,
  1015. snapshot->ib1base, snapshot->ib1size))
  1016. snapshot->ib1dumped = true;
  1017. if (kgsl_addr_range_overlap(obj->gpuaddr, obj->size,
  1018. snapshot->ib2base, snapshot->ib2size))
  1019. snapshot->ib2dumped = true;
  1020. memcpy(dest, obj->entry->memdesc.hostptr + obj->offset, size);
  1021. kgsl_memdesc_unmap(&obj->entry->memdesc);
  1022. return section->size;
  1023. }
  1024. /**
  1025. * kgsl_snapshot_save_frozen_objs() - Save the objects frozen in snapshot into
  1026. * memory so that the data reported in these objects is correct when snapshot
  1027. * is taken
  1028. * @work: The work item that scheduled this work
  1029. */
  1030. static void kgsl_snapshot_save_frozen_objs(struct work_struct *work)
  1031. {
  1032. struct kgsl_snapshot *snapshot = container_of(work,
  1033. struct kgsl_snapshot, work);
  1034. struct kgsl_snapshot_object *obj, *tmp;
  1035. size_t size = 0;
  1036. void *ptr;
  1037. if (snapshot->device->gmu_fault)
  1038. goto gmu_only;
  1039. kgsl_snapshot_process_ib_obj_list(snapshot);
  1040. list_for_each_entry(obj, &snapshot->obj_list, node) {
  1041. obj->size = ALIGN(obj->size, 4);
  1042. size += ((size_t) obj->size +
  1043. sizeof(struct kgsl_snapshot_gpu_object_v2) +
  1044. sizeof(struct kgsl_snapshot_section_header));
  1045. }
  1046. if (size == 0)
  1047. goto done;
  1048. snapshot->mempool = vmalloc(size);
  1049. ptr = snapshot->mempool;
  1050. snapshot->mempool_size = 0;
  1051. /* even if vmalloc fails, make sure we clean up the obj_list */
  1052. list_for_each_entry_safe(obj, tmp, &snapshot->obj_list, node) {
  1053. if (snapshot->mempool) {
  1054. size_t ret = _mempool_add_object(snapshot, ptr, obj);
  1055. ptr += ret;
  1056. snapshot->mempool_size += ret;
  1057. }
  1058. kgsl_snapshot_put_object(obj);
  1059. }
  1060. done:
  1061. /*
  1062. * Get rid of the process struct here, so that it doesn't sit
  1063. * around until someone bothers to read the snapshot file.
  1064. */
  1065. kgsl_process_private_put(snapshot->process);
  1066. snapshot->process = NULL;
  1067. if (snapshot->ib1base && !snapshot->ib1dumped)
  1068. dev_err(snapshot->device->dev,
  1069. "snapshot: Active IB1:%016llx not dumped\n",
  1070. snapshot->ib1base);
  1071. else if (snapshot->ib2base && !snapshot->ib2dumped)
  1072. dev_err(snapshot->device->dev,
  1073. "snapshot: Active IB2:%016llx not dumped\n",
  1074. snapshot->ib2base);
  1075. gmu_only:
  1076. BUG_ON(!snapshot->device->skip_ib_capture &&
  1077. snapshot->device->force_panic);
  1078. complete_all(&snapshot->dump_gate);
  1079. }