minidump_memory.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/align.h>
  7. #include <linux/mm.h>
  8. #include <linux/swap.h>
  9. #include <linux/mman.h>
  10. #include <linux/seq_buf.h>
  11. #include <linux/vmalloc.h>
  12. #include <linux/cma.h>
  13. #include <linux/slab.h>
  14. #include <linux/page_ext.h>
  15. #include <linux/page_owner.h>
  16. #include <linux/debugfs.h>
  17. #include <linux/ctype.h>
  18. #include <soc/qcom/minidump.h>
  19. #include <linux/dma-map-ops.h>
  20. #include <linux/jhash.h>
  21. #include <linux/dma-buf.h>
  22. #include <linux/dma-resv.h>
  23. #include <linux/fdtable.h>
  24. #include <linux/qcom_dma_heap.h>
  25. #include "debug_symbol.h"
  26. #include <linux/bitmap.h>
  27. #include "minidump_memory.h"
  28. #include "../../../mm/slab.h"
  29. #include "../mm/internal.h"
  30. static unsigned long *md_debug_totalcma_pages;
  31. static struct list_head *md_debug_slab_caches;
  32. static struct mutex *md_debug_slab_mutex;
  33. static struct static_key *md_debug_page_owner_inited;
  34. static struct static_key *md_debug_slub_debug_enabled;
  35. static unsigned long *md_debug_min_low_pfn;
  36. static unsigned long *md_debug_max_pfn;
  37. #define DMA_BUF_HASH_SIZE (1 << 20)
  38. #define DMA_BUF_HASH_SEED 0x9747b28c
  39. static DECLARE_BITMAP(dma_buf_hash, DMA_BUF_HASH_SIZE);
  40. struct priv_buf {
  41. char *buf;
  42. size_t size;
  43. size_t offset;
  44. };
  45. struct dma_buf_priv {
  46. struct priv_buf *priv_buf;
  47. struct task_struct *task;
  48. int count;
  49. size_t size;
  50. };
  51. static void show_val_kb(struct seq_buf *m, const char *s, unsigned long num)
  52. {
  53. seq_buf_printf(m, "%s : %lld KB\n", s, num << (PAGE_SHIFT - 10));
  54. }
  55. void md_dump_meminfo(struct seq_buf *m)
  56. {
  57. struct sysinfo i;
  58. long cached;
  59. long available;
  60. unsigned long pages[NR_LRU_LISTS];
  61. unsigned long sreclaimable, sunreclaim;
  62. int lru;
  63. si_meminfo(&i);
  64. si_swapinfo(&i);
  65. cached = global_node_page_state(NR_FILE_PAGES) -
  66. total_swapcache_pages() - i.bufferram;
  67. if (cached < 0)
  68. cached = 0;
  69. for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
  70. pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
  71. available = si_mem_available();
  72. sreclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B);
  73. sunreclaim = global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B);
  74. show_val_kb(m, "MemTotal: ", i.totalram);
  75. show_val_kb(m, "MemFree: ", i.freeram);
  76. show_val_kb(m, "MemAvailable: ", available);
  77. show_val_kb(m, "Buffers: ", i.bufferram);
  78. show_val_kb(m, "Cached: ", cached);
  79. show_val_kb(m, "SwapCached: ", total_swapcache_pages());
  80. show_val_kb(m, "Active: ", pages[LRU_ACTIVE_ANON] +
  81. pages[LRU_ACTIVE_FILE]);
  82. show_val_kb(m, "Inactive: ", pages[LRU_INACTIVE_ANON] +
  83. pages[LRU_INACTIVE_FILE]);
  84. show_val_kb(m, "Active(anon): ", pages[LRU_ACTIVE_ANON]);
  85. show_val_kb(m, "Inactive(anon): ", pages[LRU_INACTIVE_ANON]);
  86. show_val_kb(m, "Active(file): ", pages[LRU_ACTIVE_FILE]);
  87. show_val_kb(m, "Inactive(file): ", pages[LRU_INACTIVE_FILE]);
  88. show_val_kb(m, "Unevictable: ", pages[LRU_UNEVICTABLE]);
  89. show_val_kb(m, "Mlocked: ", global_zone_page_state(NR_MLOCK));
  90. #ifdef CONFIG_HIGHMEM
  91. show_val_kb(m, "HighTotal: ", i.totalhigh);
  92. show_val_kb(m, "HighFree: ", i.freehigh);
  93. show_val_kb(m, "LowTotal: ", i.totalram - i.totalhigh);
  94. show_val_kb(m, "LowFree: ", i.freeram - i.freehigh);
  95. #endif
  96. show_val_kb(m, "SwapTotal: ", i.totalswap);
  97. show_val_kb(m, "SwapFree: ", i.freeswap);
  98. show_val_kb(m, "Dirty: ",
  99. global_node_page_state(NR_FILE_DIRTY));
  100. show_val_kb(m, "Writeback: ",
  101. global_node_page_state(NR_WRITEBACK));
  102. show_val_kb(m, "AnonPages: ",
  103. global_node_page_state(NR_ANON_MAPPED));
  104. show_val_kb(m, "Mapped: ",
  105. global_node_page_state(NR_FILE_MAPPED));
  106. show_val_kb(m, "Shmem: ", i.sharedram);
  107. show_val_kb(m, "KReclaimable: ", sreclaimable +
  108. global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE));
  109. show_val_kb(m, "Slab: ", sreclaimable + sunreclaim);
  110. show_val_kb(m, "SReclaimable: ", sreclaimable);
  111. show_val_kb(m, "SUnreclaim: ", sunreclaim);
  112. seq_buf_printf(m, "KernelStack: %8lu kB\n",
  113. global_node_page_state(NR_KERNEL_STACK_KB));
  114. #ifdef CONFIG_SHADOW_CALL_STACK
  115. seq_buf_printf(m, "ShadowCallStack:%8lu kB\n",
  116. global_node_page_state(NR_KERNEL_SCS_KB));
  117. #endif
  118. show_val_kb(m, "PageTables: ",
  119. global_node_page_state(NR_PAGETABLE));
  120. show_val_kb(m, "Bounce: ",
  121. global_zone_page_state(NR_BOUNCE));
  122. show_val_kb(m, "WritebackTmp: ",
  123. global_node_page_state(NR_WRITEBACK_TEMP));
  124. seq_buf_printf(m, "VmallocTotal: %8lu kB\n",
  125. (unsigned long)VMALLOC_TOTAL >> 10);
  126. show_val_kb(m, "VmallocUsed: ", vmalloc_nr_pages());
  127. show_val_kb(m, "Percpu: ", pcpu_nr_pages());
  128. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  129. show_val_kb(m, "AnonHugePages: ",
  130. global_node_page_state(NR_ANON_THPS) * HPAGE_PMD_NR);
  131. show_val_kb(m, "ShmemHugePages: ",
  132. global_node_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR);
  133. show_val_kb(m, "ShmemPmdMapped: ",
  134. global_node_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR);
  135. show_val_kb(m, "FileHugePages: ",
  136. global_node_page_state(NR_FILE_THPS) * HPAGE_PMD_NR);
  137. show_val_kb(m, "FilePmdMapped: ",
  138. global_node_page_state(NR_FILE_PMDMAPPED) * HPAGE_PMD_NR);
  139. #endif
  140. #ifdef CONFIG_CMA
  141. show_val_kb(m, "CmaTotal: ", *md_debug_totalcma_pages);
  142. show_val_kb(m, "CmaFree: ",
  143. global_zone_page_state(NR_FREE_CMA_PAGES));
  144. #endif
  145. }
  146. #ifdef CONFIG_SLUB_DEBUG
  147. static void slabinfo_stats(struct seq_buf *m, struct kmem_cache *cachep)
  148. {
  149. #ifdef CONFIG_DEBUG_SLAB
  150. { /* node stats */
  151. unsigned long high = cachep->high_mark;
  152. unsigned long allocs = cachep->num_allocations;
  153. unsigned long grown = cachep->grown;
  154. unsigned long reaped = cachep->reaped;
  155. unsigned long errors = cachep->errors;
  156. unsigned long max_freeable = cachep->max_freeable;
  157. unsigned long node_allocs = cachep->node_allocs;
  158. unsigned long node_frees = cachep->node_frees;
  159. unsigned long overflows = cachep->node_overflow;
  160. seq_buf_printf(m,
  161. " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
  162. allocs, high, grown,
  163. reaped, errors, max_freeable,
  164. node_allocs, node_frees, overflows);
  165. }
  166. /* cpu stats */
  167. {
  168. unsigned long allochit = atomic_read(&cachep->allochit);
  169. unsigned long allocmiss = atomic_read(&cachep->allocmiss);
  170. unsigned long freehit = atomic_read(&cachep->freehit);
  171. unsigned long freemiss = atomic_read(&cachep->freemiss);
  172. seq_buf_printf(m,
  173. " : cpustat %6lu %6lu %6lu %6lu",
  174. allochit, allocmiss, freehit, freemiss);
  175. }
  176. #endif
  177. }
  178. void md_dump_slabinfo(struct seq_buf *m)
  179. {
  180. struct kmem_cache *s;
  181. struct slabinfo sinfo;
  182. if (!md_debug_slab_caches)
  183. return;
  184. if (!md_debug_slab_mutex)
  185. return;
  186. if (!mutex_trylock(md_debug_slab_mutex))
  187. return;
  188. /* print_slabinfo_header */
  189. seq_buf_printf(m,
  190. "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
  191. seq_buf_printf(m,
  192. " : tunables <limit> <batchcount> <sharedfactor>");
  193. seq_buf_printf(m,
  194. " : slabdata <active_slabs> <num_slabs> <sharedavail>");
  195. #ifdef CONFIG_DEBUG_SLAB
  196. seq_buf_printf(m,
  197. " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
  198. seq_buf_printf(m,
  199. " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
  200. #endif
  201. seq_buf_printf(m, "\n");
  202. /* Loop through all slabs */
  203. list_for_each_entry(s, md_debug_slab_caches, list) {
  204. memset(&sinfo, 0, sizeof(sinfo));
  205. get_slabinfo(s, &sinfo);
  206. seq_buf_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
  207. s->name, sinfo.active_objs, sinfo.num_objs, s->size,
  208. sinfo.objects_per_slab, (1 << sinfo.cache_order));
  209. seq_buf_printf(m, " : tunables %4u %4u %4u",
  210. sinfo.limit, sinfo.batchcount, sinfo.shared);
  211. seq_buf_printf(m, " : slabdata %6lu %6lu %6lu",
  212. sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
  213. slabinfo_stats(m, s);
  214. seq_buf_printf(m, "\n");
  215. }
  216. mutex_unlock(md_debug_slab_mutex);
  217. }
  218. #endif
  219. bool md_register_memory_dump(int size, char *name)
  220. {
  221. struct md_region md_entry;
  222. void *buffer_start;
  223. struct page *page;
  224. int ret;
  225. page = cma_alloc(dma_contiguous_default_area, size >> PAGE_SHIFT,
  226. 0, GFP_KERNEL);
  227. if (!page) {
  228. pr_err("Failed to allocate %s minidump, increase cma size\n",
  229. name);
  230. return false;
  231. }
  232. buffer_start = page_to_virt(page);
  233. strscpy(md_entry.name, name, sizeof(md_entry.name));
  234. md_entry.virt_addr = (uintptr_t) buffer_start;
  235. md_entry.phys_addr = virt_to_phys(buffer_start);
  236. md_entry.size = size;
  237. ret = msm_minidump_add_region(&md_entry);
  238. if (ret < 0) {
  239. cma_release(dma_contiguous_default_area, page, size >> PAGE_SHIFT);
  240. pr_err("Failed to add %s entry in Minidump\n", name);
  241. return false;
  242. }
  243. memset(buffer_start, 0, size);
  244. /* Complete registration before adding enteries */
  245. smp_mb();
  246. #ifdef CONFIG_PAGE_OWNER
  247. if (!strcmp(name, "PAGEOWNER"))
  248. WRITE_ONCE(md_pageowner_dump_addr, buffer_start);
  249. #endif
  250. #ifdef CONFIG_SLUB_DEBUG
  251. if (!strcmp(name, "SLABOWNER"))
  252. WRITE_ONCE(md_slabowner_dump_addr, buffer_start);
  253. #endif
  254. if (!strcmp(name, "DMA_INFO"))
  255. WRITE_ONCE(md_dma_buf_info_addr, buffer_start);
  256. if (!strcmp(name, "DMA_PROC"))
  257. WRITE_ONCE(md_dma_buf_procs_addr, buffer_start);
  258. return true;
  259. }
  260. bool md_unregister_memory_dump(char *name)
  261. {
  262. struct page *page;
  263. struct md_region mdr;
  264. struct md_region md_entry;
  265. mdr = md_get_region(name);
  266. if (!mdr.virt_addr) {
  267. pr_err("minidump entry for %s not found\n", name);
  268. return false;
  269. }
  270. strscpy(md_entry.name, mdr.name, sizeof(md_entry.name));
  271. md_entry.virt_addr = mdr.virt_addr;
  272. md_entry.phys_addr = mdr.phys_addr;
  273. md_entry.size = mdr.size;
  274. page = virt_to_page(mdr.virt_addr);
  275. if (msm_minidump_remove_region(&md_entry) < 0)
  276. return false;
  277. cma_release(dma_contiguous_default_area, page,
  278. (md_entry.size) >> PAGE_SHIFT);
  279. return true;
  280. }
  281. static void update_dump_size(char *name, size_t size, char **addr, size_t *dump_size)
  282. {
  283. if ((*dump_size) == 0) {
  284. if (md_register_memory_dump(size * SZ_1M,
  285. name)) {
  286. *dump_size = size * SZ_1M;
  287. pr_info_ratelimited("%s Minidump set to %zd MB size\n",
  288. name, size);
  289. }
  290. return;
  291. }
  292. if (md_unregister_memory_dump(name)) {
  293. *addr = NULL;
  294. if (size == 0) {
  295. *dump_size = 0;
  296. pr_info_ratelimited("%s Minidump : disabled\n", name);
  297. return;
  298. }
  299. if (md_register_memory_dump(size * SZ_1M,
  300. name)) {
  301. *dump_size = size * SZ_1M;
  302. pr_info_ratelimited("%s Minidump : set to %zd MB\n",
  303. name, size);
  304. } else if (md_register_memory_dump(*dump_size,
  305. name)) {
  306. pr_info_ratelimited("%s Minidump : Fallback to %zd MB\n",
  307. name, (*dump_size) / SZ_1M);
  308. } else {
  309. pr_err_ratelimited("%s Minidump : disabled, Can't fallback to %zd MB,\n",
  310. name, (*dump_size) / SZ_1M);
  311. *dump_size = 0;
  312. }
  313. } else {
  314. pr_err_ratelimited("Failed to unregister %s Minidump\n", name);
  315. }
  316. }
  317. #ifdef CONFIG_PAGE_OWNER
  318. static unsigned long page_owner_filter = 0xF;
  319. static unsigned long page_owner_handles_size = SZ_16K;
  320. static int nr_page_owner_handles, nr_slab_owner_handles;
  321. static LIST_HEAD(accounted_call_site_list);
  322. static DEFINE_SPINLOCK(accounted_call_site_lock);
  323. struct accounted_call_site {
  324. struct list_head list;
  325. char name[50];
  326. };
  327. bool is_page_owner_enabled(void)
  328. {
  329. if (md_debug_page_owner_inited &&
  330. atomic_read(&md_debug_page_owner_inited->enabled))
  331. return true;
  332. return false;
  333. }
  334. static bool found_stack(depot_stack_handle_t handle,
  335. char *dump_addr, size_t dump_size,
  336. unsigned long handles_size, int *nr_handles)
  337. {
  338. int *handles, i;
  339. handles = (int *) (dump_addr +
  340. dump_size - handles_size);
  341. for (i = 0; i < *nr_handles; i++)
  342. if (handle == handles[i])
  343. return true;
  344. if ((handles + *nr_handles) < (int *)(dump_addr + dump_size)) {
  345. handles[*nr_handles] = handle;
  346. *nr_handles += 1;
  347. } else {
  348. pr_err_ratelimited("Can't stores handles increase handles size\n");
  349. }
  350. return false;
  351. }
  352. static bool check_unaccounted(char *buf, ssize_t count,
  353. struct page *page, depot_stack_handle_t handle)
  354. {
  355. int i, ret = 0;
  356. unsigned long *entries, flags;
  357. unsigned int nr_entries;
  358. struct accounted_call_site *call_site;
  359. if ((page->flags &
  360. ((1UL << PG_lru) | (1UL << PG_slab) | (1UL << PG_swapbacked))))
  361. return false;
  362. nr_entries = stack_depot_fetch(handle, &entries);
  363. for (i = 0; i < nr_entries; i++) {
  364. ret = scnprintf(buf, count, "%pS\n",
  365. (void *)entries[i]);
  366. if (ret == count - 1)
  367. return false;
  368. spin_lock_irqsave(&accounted_call_site_lock, flags);
  369. list_for_each_entry(call_site,
  370. &accounted_call_site_list, list) {
  371. if (strnstr(buf, call_site->name,
  372. strlen(buf))) {
  373. spin_unlock_irqrestore(&accounted_call_site_lock, flags);
  374. return false;
  375. }
  376. }
  377. spin_unlock_irqrestore(&accounted_call_site_lock, flags);
  378. }
  379. return true;
  380. }
  381. static ssize_t dump_page_owner_md(char *buf, size_t count,
  382. unsigned long pfn, struct page *page,
  383. depot_stack_handle_t handle)
  384. {
  385. int i, bit, ret = 0;
  386. unsigned long *entries;
  387. unsigned int nr_entries;
  388. if (page_owner_filter == 0xF)
  389. goto dump;
  390. for (bit = 1; page_owner_filter >= bit; bit *= 2) {
  391. if (page_owner_filter & bit) {
  392. switch (bit) {
  393. case 0x1:
  394. if (check_unaccounted(buf, count, page, handle))
  395. goto dump;
  396. break;
  397. case 0x2:
  398. if (page->flags & (1UL << PG_slab))
  399. goto dump;
  400. break;
  401. case 0x4:
  402. if (page->flags & (1UL << PG_swapbacked))
  403. goto dump;
  404. break;
  405. case 0x8:
  406. if ((page->flags & (1UL << PG_lru)) &&
  407. ~(page->flags & (1UL << PG_swapbacked)))
  408. goto dump;
  409. break;
  410. default:
  411. break;
  412. }
  413. }
  414. if (bit >= 0x8)
  415. return ret;
  416. }
  417. if (bit > page_owner_filter)
  418. return ret;
  419. dump:
  420. nr_entries = stack_depot_fetch(handle, &entries);
  421. if ((buf > (md_pageowner_dump_addr +
  422. md_pageowner_dump_size - page_owner_handles_size))
  423. || !found_stack(handle,
  424. md_pageowner_dump_addr,
  425. md_pageowner_dump_size,
  426. page_owner_handles_size,
  427. &nr_page_owner_handles)) {
  428. ret = scnprintf(buf, count, "%lu %u %u\n",
  429. pfn, handle, nr_entries);
  430. if (ret == count - 1)
  431. goto err;
  432. for (i = 0; i < nr_entries; i++) {
  433. ret += scnprintf(buf + ret, count - ret,
  434. "%p\n", (void *)entries[i]);
  435. if (ret == count - 1)
  436. goto err;
  437. }
  438. } else {
  439. ret = scnprintf(buf, count, "%lu %u %u\n", pfn, handle, 0);
  440. }
  441. err:
  442. return ret;
  443. }
  444. void md_dump_pageowner(char *addr, size_t dump_size)
  445. {
  446. unsigned long pfn;
  447. struct page *page;
  448. struct page_ext *page_ext;
  449. depot_stack_handle_t handle;
  450. ssize_t size;
  451. if (!md_debug_min_low_pfn)
  452. return;
  453. if (!md_debug_max_pfn)
  454. return;
  455. page = NULL;
  456. pfn = *md_debug_min_low_pfn;
  457. /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
  458. while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
  459. pfn++;
  460. /* Find an allocated page */
  461. for (; pfn < *md_debug_max_pfn; pfn++) {
  462. /*
  463. * If the new page is in a new MAX_ORDER_NR_PAGES area,
  464. * validate the area as existing, skip it if not
  465. */
  466. if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
  467. pfn += MAX_ORDER_NR_PAGES - 1;
  468. continue;
  469. }
  470. page = pfn_to_page(pfn);
  471. if (PageBuddy(page)) {
  472. unsigned long freepage_order = buddy_order_unsafe(page);
  473. if (freepage_order < MAX_ORDER)
  474. pfn += (1UL << freepage_order) - 1;
  475. continue;
  476. }
  477. page_ext = page_ext_get(page);
  478. if (unlikely(!page_ext))
  479. goto next;
  480. /*
  481. * Some pages could be missed by concurrent allocation or free,
  482. * because we don't hold the zone lock.
  483. */
  484. if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
  485. goto next;
  486. /*
  487. * Although we do have the info about past allocation of free
  488. * pages, it's not relevant for current memory usage.
  489. */
  490. if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
  491. goto next;
  492. handle = get_page_owner_handle(page_ext, pfn);
  493. if (!handle)
  494. goto next;
  495. size = dump_page_owner_md(addr, dump_size, pfn, page, handle);
  496. if (size == dump_size - 1) {
  497. pr_err("pageowner minidump region exhausted\n");
  498. page_ext_put(page_ext);
  499. return;
  500. }
  501. dump_size -= size;
  502. addr += size;
  503. next:
  504. page_ext_put(page_ext);
  505. }
  506. }
  507. static DEFINE_MUTEX(page_owner_dump_size_lock);
  508. static ssize_t page_owner_dump_size_write(struct file *file,
  509. const char __user *ubuf,
  510. size_t count, loff_t *offset)
  511. {
  512. unsigned long long size;
  513. if (kstrtoull_from_user(ubuf, count, 0, &size)) {
  514. pr_err_ratelimited("Invalid format for size\n");
  515. return -EINVAL;
  516. }
  517. mutex_lock(&page_owner_dump_size_lock);
  518. update_dump_size("PAGEOWNER", size,
  519. &md_pageowner_dump_addr, &md_pageowner_dump_size);
  520. mutex_unlock(&page_owner_dump_size_lock);
  521. return count;
  522. }
  523. static ssize_t page_owner_dump_size_read(struct file *file, char __user *ubuf,
  524. size_t count, loff_t *offset)
  525. {
  526. char buf[100];
  527. snprintf(buf, sizeof(buf), "%llu MB\n",
  528. md_pageowner_dump_size / SZ_1M);
  529. return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
  530. }
  531. static const struct file_operations proc_page_owner_dump_size_ops = {
  532. .open = simple_open,
  533. .write = page_owner_dump_size_write,
  534. .read = page_owner_dump_size_read,
  535. };
  536. static ssize_t page_owner_filter_write(struct file *file,
  537. const char __user *ubuf,
  538. size_t count, loff_t *offset)
  539. {
  540. unsigned long filter;
  541. if (kstrtoul_from_user(ubuf, count, 0, &filter)) {
  542. pr_err_ratelimited("Invalid format for filter\n");
  543. return -EINVAL;
  544. }
  545. if (filter & (~0xF)) {
  546. pr_err_ratelimited("Invalid filter : use following filters or any combinations of these\n"
  547. "0x1 - unaccounted\n"
  548. "0x2 - slab\n"
  549. "0x4 - Anon\n"
  550. "0x8 - File\n");
  551. return -EINVAL;
  552. }
  553. page_owner_filter = filter;
  554. return count;
  555. }
  556. static ssize_t page_owner_filter_read(struct file *file, char __user *ubuf,
  557. size_t count, loff_t *offset)
  558. {
  559. char buf[64];
  560. snprintf(buf, sizeof(buf), "0x%lx\n", page_owner_filter);
  561. return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
  562. }
  563. static const struct file_operations proc_page_owner_filter_ops = {
  564. .open = simple_open,
  565. .write = page_owner_filter_write,
  566. .read = page_owner_filter_read,
  567. };
  568. static ssize_t page_owner_handle_write(struct file *file,
  569. const char __user *ubuf,
  570. size_t count, loff_t *offset)
  571. {
  572. unsigned long size;
  573. if (kstrtoul_from_user(ubuf, count, 0, &size)) {
  574. pr_err_ratelimited("Invalid format for handle size\n");
  575. return -EINVAL;
  576. }
  577. if (size) {
  578. if (size > (md_pageowner_dump_size / SZ_16K)) {
  579. pr_err_ratelimited("size : %lu KB exceeds max size : %lu KB\n",
  580. size, (md_pageowner_dump_size / SZ_16K));
  581. goto err;
  582. }
  583. page_owner_handles_size = size * SZ_1K;
  584. }
  585. err:
  586. return count;
  587. }
  588. static ssize_t page_owner_handle_read(struct file *file, char __user *ubuf,
  589. size_t count, loff_t *offset)
  590. {
  591. char buf[64];
  592. snprintf(buf, sizeof(buf), "%lu KB\n",
  593. (page_owner_handles_size / SZ_1K));
  594. return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
  595. }
  596. static const struct file_operations proc_page_owner_handle_ops = {
  597. .open = simple_open,
  598. .write = page_owner_handle_write,
  599. .read = page_owner_handle_read,
  600. };
  601. static ssize_t page_owner_call_site_write(struct file *file,
  602. const char __user *ubuf,
  603. size_t count, loff_t *offset)
  604. {
  605. struct accounted_call_site *call_site;
  606. char buf[50];
  607. unsigned long flags;
  608. if (count >= 50) {
  609. pr_err_ratelimited("Input string size too large\n");
  610. return -EINVAL;
  611. }
  612. memset(buf, 0, 50);
  613. if (copy_from_user(buf, ubuf, count)) {
  614. pr_err_ratelimited("Couldn't copy from user\n");
  615. return -EFAULT;
  616. }
  617. if (!isalpha(buf[0]) && buf[0] != '_') {
  618. pr_err_ratelimited("Invalid call site name\n");
  619. return -EINVAL;
  620. }
  621. call_site = kzalloc(sizeof(*call_site), GFP_KERNEL);
  622. if (!call_site)
  623. return -ENOMEM;
  624. strscpy(call_site->name, buf, strlen(call_site->name));
  625. spin_lock_irqsave(&accounted_call_site_lock, flags);
  626. list_add_tail(&call_site->list, &accounted_call_site_list);
  627. spin_unlock_irqrestore(&accounted_call_site_lock, flags);
  628. return count;
  629. }
  630. static ssize_t page_owner_call_site_read(struct file *file, char __user *ubuf,
  631. size_t count, loff_t *offset)
  632. {
  633. char *kbuf;
  634. struct accounted_call_site *call_site;
  635. unsigned long flags;
  636. int i = 1, ret = 0;
  637. size_t size = PAGE_SIZE;
  638. kbuf = kmalloc(size, GFP_KERNEL);
  639. if (!kbuf)
  640. return -ENOMEM;
  641. ret = scnprintf(kbuf, count, "%s\n", "Accounted call sites:");
  642. spin_lock_irqsave(&accounted_call_site_lock, flags);
  643. list_for_each_entry(call_site, &accounted_call_site_list, list) {
  644. ret += scnprintf(kbuf + ret, size - ret,
  645. "%d. %s\n", i, call_site->name);
  646. i += 1;
  647. if (ret == size) {
  648. ret = -ENOMEM;
  649. spin_unlock_irqrestore(&accounted_call_site_lock, flags);
  650. goto err;
  651. }
  652. }
  653. spin_unlock_irqrestore(&accounted_call_site_lock, flags);
  654. ret = simple_read_from_buffer(ubuf, count, offset, kbuf, strlen(kbuf));
  655. err:
  656. kfree(kbuf);
  657. return ret;
  658. }
  659. static const struct file_operations proc_page_owner_call_site_ops = {
  660. .open = simple_open,
  661. .write = page_owner_call_site_write,
  662. .read = page_owner_call_site_read,
  663. };
  664. void md_debugfs_pageowner(struct dentry *minidump_dir)
  665. {
  666. debugfs_create_file("page_owner_dump_size_mb", 0400, minidump_dir, NULL,
  667. &proc_page_owner_dump_size_ops);
  668. debugfs_create_file("page_owner_filter", 0400, minidump_dir, NULL,
  669. &proc_page_owner_filter_ops);
  670. debugfs_create_file("page_owner_handles_size_kb", 0400, minidump_dir, NULL,
  671. &proc_page_owner_handle_ops);
  672. debugfs_create_file("page_owner_call_sites", 0400, minidump_dir, NULL,
  673. &proc_page_owner_call_site_ops);
  674. }
  675. #endif
  676. #ifdef CONFIG_SLUB_DEBUG
  677. #define STACK_HASH_SEED 0x9747b28c
  678. static unsigned long slab_owner_filter;
  679. static unsigned long slab_owner_handles_size = SZ_16K;
  680. bool is_slub_debug_enabled(void)
  681. {
  682. if (md_debug_slub_debug_enabled &&
  683. atomic_read(&md_debug_slub_debug_enabled->enabled))
  684. return true;
  685. return false;
  686. }
  687. static int dump_tracking(const struct kmem_cache *s,
  688. const void *object,
  689. const struct track *t, void *private)
  690. {
  691. int ret = 0;
  692. u32 nr_entries;
  693. struct priv_buf *priv_buf;
  694. char *buf;
  695. size_t size;
  696. unsigned long *entries;
  697. if (!t->addr || !t->handle)
  698. return 0;
  699. priv_buf = (struct priv_buf *)private;
  700. buf = priv_buf->buf + priv_buf->offset;
  701. size = priv_buf->size - priv_buf->offset;
  702. #ifdef CONFIG_STACKDEPOT
  703. {
  704. int i;
  705. nr_entries = stack_depot_fetch(t->handle, &entries);
  706. if ((buf > (md_slabowner_dump_addr +
  707. md_slabowner_dump_size - slab_owner_handles_size))
  708. || !found_stack(t->handle,
  709. md_slabowner_dump_addr,
  710. md_slabowner_dump_size,
  711. slab_owner_handles_size,
  712. &nr_slab_owner_handles)) {
  713. ret = scnprintf(buf, size, "%p %p %u\n",
  714. object, t->handle, nr_entries);
  715. if (ret == size - 1)
  716. goto err;
  717. for (i = 0; i < nr_entries; i++) {
  718. ret += scnprintf(buf + ret, size - ret,
  719. "%p\n", (void *)entries[i]);
  720. if (ret == size - 1)
  721. goto err;
  722. }
  723. } else {
  724. ret = scnprintf(buf, size, "%p %p %u\n",
  725. object, t->handle, 0);
  726. }
  727. }
  728. #else
  729. ret = scnprintf(buf, size, "%p %p\n", object, (void *)t->addr);
  730. #endif
  731. err:
  732. priv_buf->offset += ret;
  733. return ret;
  734. }
  735. void md_dump_slabowner(char *m, size_t dump_size)
  736. {
  737. struct kmem_cache *s;
  738. int node;
  739. struct priv_buf buf;
  740. struct kmem_cache_node *n;
  741. ssize_t ret;
  742. int i;
  743. buf.buf = m;
  744. buf.size = dump_size;
  745. buf.offset = 0;
  746. for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
  747. if (!test_bit(i, &slab_owner_filter))
  748. continue;
  749. s = kmalloc_caches[KMALLOC_NORMAL][i];
  750. if (!s)
  751. continue;
  752. ret = scnprintf(buf.buf, buf.size, "%s\n", s->name);
  753. if (ret == buf.size - 1)
  754. return;
  755. buf.buf += ret;
  756. for_each_kmem_cache_node(s, node, n) {
  757. unsigned long flags;
  758. struct page *page;
  759. if (!atomic_long_read(&n->nr_slabs))
  760. continue;
  761. spin_lock_irqsave(&n->list_lock, flags);
  762. list_for_each_entry(page, &n->partial, lru) {
  763. ret = get_each_object_track(s, page, TRACK_ALLOC,
  764. dump_tracking, &buf);
  765. if (buf.offset == buf.size - 1) {
  766. spin_unlock_irqrestore(&n->list_lock, flags);
  767. pr_err("slabowner minidump region exhausted\n");
  768. return;
  769. }
  770. }
  771. list_for_each_entry(page, &n->full, lru) {
  772. ret = get_each_object_track(s, page, TRACK_ALLOC,
  773. dump_tracking, &buf);
  774. if (buf.offset == buf.size - 1) {
  775. spin_unlock_irqrestore(&n->list_lock, flags);
  776. pr_err("slabowner minidump region exhausted\n");
  777. return;
  778. }
  779. }
  780. spin_unlock_irqrestore(&n->list_lock, flags);
  781. }
  782. ret = scnprintf(buf.buf, buf.size, "\n");
  783. if (ret == buf.size - 1)
  784. return;
  785. buf.buf += ret;
  786. }
  787. }
  788. static ssize_t slab_owner_dump_size_write(struct file *file,
  789. const char __user *ubuf,
  790. size_t count, loff_t *offset)
  791. {
  792. unsigned long long size;
  793. if (kstrtoull_from_user(ubuf, count, 0, &size)) {
  794. pr_err_ratelimited("Invalid format for size\n");
  795. return -EINVAL;
  796. }
  797. update_dump_size("SLABOWNER", size,
  798. &md_slabowner_dump_addr, &md_slabowner_dump_size);
  799. return count;
  800. }
  801. static ssize_t slab_owner_dump_size_read(struct file *file, char __user *ubuf,
  802. size_t count, loff_t *offset)
  803. {
  804. char buf[100];
  805. snprintf(buf, sizeof(buf), "%llu MB\n", md_slabowner_dump_size/SZ_1M);
  806. return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
  807. }
  808. static const struct file_operations proc_slab_owner_dump_size_ops = {
  809. .open = simple_open,
  810. .write = slab_owner_dump_size_write,
  811. .read = slab_owner_dump_size_read,
  812. };
  813. static ssize_t slab_owner_filter_write(struct file *file,
  814. const char __user *ubuf,
  815. size_t count, loff_t *offset)
  816. {
  817. unsigned long filter;
  818. int bit, i;
  819. struct kmem_cache *s;
  820. if (kstrtoul_from_user(ubuf, count, 0, &filter)) {
  821. pr_err_ratelimited("Invalid format for filter\n");
  822. return -EINVAL;
  823. }
  824. for (i = 0, bit = 1; filter >= bit; bit *= 2, i++) {
  825. if (filter & bit) {
  826. s = kmalloc_caches[KMALLOC_NORMAL][i];
  827. if (!s) {
  828. pr_err("Invalid filter : %lx kmalloc-%d doesn't exist\n",
  829. filter, bit);
  830. return -EINVAL;
  831. }
  832. }
  833. }
  834. slab_owner_filter = filter;
  835. return count;
  836. }
  837. static ssize_t slab_owner_filter_read(struct file *file, char __user *ubuf,
  838. size_t count, loff_t *offset)
  839. {
  840. char buf[64];
  841. snprintf(buf, sizeof(buf), "0x%lx\n", slab_owner_filter);
  842. return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
  843. }
  844. static const struct file_operations proc_slab_owner_filter_ops = {
  845. .open = simple_open,
  846. .write = slab_owner_filter_write,
  847. .read = slab_owner_filter_read,
  848. };
  849. static ssize_t slab_owner_handle_write(struct file *file,
  850. const char __user *ubuf,
  851. size_t count, loff_t *offset)
  852. {
  853. unsigned long size;
  854. if (kstrtoul_from_user(ubuf, count, 0, &size)) {
  855. pr_err_ratelimited("Invalid format for handle size\n");
  856. return -EINVAL;
  857. }
  858. if (size) {
  859. if (size > (md_slabowner_dump_size / SZ_16K)) {
  860. pr_err_ratelimited("size : %lu KB exceeds max size : %lu KB\n",
  861. size, (md_slabowner_dump_size / SZ_16K));
  862. goto err;
  863. }
  864. slab_owner_handles_size = size * SZ_1K;
  865. }
  866. err:
  867. return count;
  868. }
  869. static ssize_t slab_owner_handle_read(struct file *file, char __user *ubuf,
  870. size_t count, loff_t *offset)
  871. {
  872. char buf[64];
  873. snprintf(buf, sizeof(buf), "%lu KB\n",
  874. (slab_owner_handles_size / SZ_1K));
  875. return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
  876. }
  877. static const struct file_operations proc_slab_owner_handle_ops = {
  878. .open = simple_open,
  879. .write = slab_owner_handle_write,
  880. .read = slab_owner_handle_read,
  881. };
  882. void md_debugfs_slabowner(struct dentry *minidump_dir)
  883. {
  884. int i;
  885. debugfs_create_file("slab_owner_dump_size_mb", 0400, minidump_dir, NULL,
  886. &proc_slab_owner_dump_size_ops);
  887. debugfs_create_file("slab_owner_filter", 0400, minidump_dir, NULL,
  888. &proc_slab_owner_filter_ops);
  889. debugfs_create_file("slab_owner_handles_size_kb", 0400,
  890. minidump_dir, NULL, &proc_slab_owner_handle_ops);
  891. for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
  892. if (kmalloc_caches[KMALLOC_NORMAL][i])
  893. set_bit(i, &slab_owner_filter);
  894. }
  895. }
  896. #endif /* CONFIG_SLUB_DEBUG */
  897. static int dump_bufinfo(const struct dma_buf *buf_obj, void *private)
  898. {
  899. int ret;
  900. struct dma_buf_attachment *attach_obj;
  901. struct dma_resv *robj;
  902. struct dma_resv_iter cursor;
  903. struct dma_fence *fence;
  904. int attach_count;
  905. struct dma_buf_priv *buf = (struct dma_buf_priv *)private;
  906. struct priv_buf *priv_buf = buf->priv_buf;
  907. ret = dma_resv_lock(buf_obj->resv, NULL);
  908. if (ret)
  909. goto err;
  910. ret = scnprintf(priv_buf->buf + priv_buf->offset,
  911. priv_buf->size - priv_buf->offset,
  912. "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
  913. buf_obj->size,
  914. buf_obj->file->f_flags, buf_obj->file->f_mode,
  915. file_count(buf_obj->file),
  916. buf_obj->exp_name,
  917. file_inode(buf_obj->file)->i_ino,
  918. buf_obj->name ?: "");
  919. priv_buf->offset += ret;
  920. if (priv_buf->offset == priv_buf->size - 1)
  921. goto err;
  922. robj = buf_obj->resv;
  923. dma_resv_for_each_fence(&cursor, robj,
  924. DMA_RESV_USAGE_BOOKKEEP, fence) {
  925. if (!dma_fence_get_rcu(fence))
  926. continue;
  927. ret = scnprintf(priv_buf->buf + priv_buf->offset,
  928. priv_buf->size - priv_buf->offset,
  929. "\tFence: %s %s %ssignalled\n",
  930. fence->ops->get_driver_name(fence),
  931. fence->ops->get_timeline_name(fence),
  932. dma_fence_is_signaled(fence) ? "" : "un");
  933. priv_buf->offset += ret;
  934. if (priv_buf->offset == priv_buf->size - 1)
  935. goto err;
  936. dma_fence_put(fence);
  937. }
  938. ret = scnprintf(priv_buf->buf + priv_buf->offset,
  939. priv_buf->size - priv_buf->offset,
  940. "\tAttached Devices:\n");
  941. priv_buf->offset += ret;
  942. if (priv_buf->offset == priv_buf->size - 1)
  943. goto err;
  944. attach_count = 0;
  945. list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
  946. ret = scnprintf(priv_buf->buf + priv_buf->offset,
  947. priv_buf->size - priv_buf->offset,
  948. "\t%s\n", dev_name(attach_obj->dev));
  949. priv_buf->offset += ret;
  950. if (priv_buf->offset == priv_buf->size - 1)
  951. goto err;
  952. attach_count++;
  953. }
  954. dma_resv_unlock(buf_obj->resv);
  955. ret = scnprintf(priv_buf->buf + priv_buf->offset,
  956. priv_buf->size - priv_buf->offset,
  957. "Total %d devices attached\n\n",
  958. attach_count);
  959. priv_buf->offset += ret;
  960. if (priv_buf->offset == priv_buf->size - 1)
  961. goto err;
  962. buf->count += 1;
  963. buf->size += buf_obj->size;
  964. return 0;
  965. err:
  966. pr_err("DMABUF_INFO minidump region exhausted\n");
  967. return -ENOSPC;
  968. }
  969. void md_dma_buf_info(char *m, size_t dump_size)
  970. {
  971. int ret;
  972. struct dma_buf_priv dma_buf_priv;
  973. struct priv_buf buf;
  974. if (!in_task())
  975. return;
  976. buf.buf = m;
  977. buf.size = dump_size;
  978. buf.offset = 0;
  979. dma_buf_priv.priv_buf = &buf;
  980. dma_buf_priv.count = 0;
  981. dma_buf_priv.size = 0;
  982. ret = scnprintf(buf.buf, buf.size, "\nDma-buf Objects:\n");
  983. ret += scnprintf(buf.buf + ret, buf.size - ret,
  984. "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
  985. "size", "flags", "mode", "count", "ino");
  986. buf.offset = ret;
  987. dma_buf_get_each(dump_bufinfo, &dma_buf_priv);
  988. scnprintf(buf.buf + buf.offset, buf.size - buf.offset,
  989. "\nTotal %d objects, %zu bytes\n",
  990. dma_buf_priv.count, dma_buf_priv.size);
  991. }
  992. static ssize_t dma_buf_info_size_write(struct file *file,
  993. const char __user *ubuf,
  994. size_t count, loff_t *offset)
  995. {
  996. unsigned long long size;
  997. if (kstrtoull_from_user(ubuf, count, 0, &size)) {
  998. pr_err_ratelimited("Invalid format for size\n");
  999. return -EINVAL;
  1000. }
  1001. update_dump_size("DMA_INFO", size,
  1002. &md_dma_buf_info_addr, &md_dma_buf_info_size);
  1003. return count;
  1004. }
  1005. static ssize_t dma_buf_info_size_read(struct file *file, char __user *ubuf,
  1006. size_t count, loff_t *offset)
  1007. {
  1008. char buf[100];
  1009. snprintf(buf, sizeof(buf), "%llu MB\n", md_dma_buf_info_size/SZ_1M);
  1010. return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
  1011. }
  1012. static const struct file_operations proc_dma_buf_info_size_ops = {
  1013. .open = simple_open,
  1014. .write = dma_buf_info_size_write,
  1015. .read = dma_buf_info_size_read,
  1016. };
  1017. void md_debugfs_dmabufinfo(struct dentry *minidump_dir)
  1018. {
  1019. debugfs_create_file("dma_buf_info_size_mb", 0400, minidump_dir, NULL,
  1020. &proc_dma_buf_info_size_ops);
  1021. }
  1022. static int get_dma_info(const void *data, struct file *file, unsigned int n)
  1023. {
  1024. struct priv_buf *buf;
  1025. struct dma_buf_priv *dma_buf_priv;
  1026. struct dma_buf *dmabuf;
  1027. struct task_struct *task;
  1028. int ret;
  1029. u32 index;
  1030. if (!qcom_is_dma_buf_file(file))
  1031. return 0;
  1032. dma_buf_priv = (struct dma_buf_priv *)data;
  1033. buf = dma_buf_priv->priv_buf;
  1034. task = dma_buf_priv->task;
  1035. if (dma_buf_priv->count == 0) {
  1036. ret = scnprintf(buf->buf + buf->offset, buf->size - buf->offset,
  1037. "\n%s (PID %d)\nDMA Buffers:\n",
  1038. task->comm, task->tgid);
  1039. buf->offset += ret;
  1040. if (buf->offset == buf->size - 1)
  1041. return -EINVAL;
  1042. }
  1043. dmabuf = (struct dma_buf *)file->private_data;
  1044. index = jhash(dmabuf, sizeof(struct dma_buf), DMA_BUF_HASH_SEED);
  1045. index = index & (DMA_BUF_HASH_SIZE - 1);
  1046. if (test_bit(index, dma_buf_hash))
  1047. return 0;
  1048. set_bit(index, dma_buf_hash);
  1049. dma_buf_priv->count += 1;
  1050. ret = scnprintf(buf->buf + buf->offset, buf->size - buf->offset,
  1051. "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
  1052. "size", "flags", "mode", "count", "ino");
  1053. buf->offset += ret;
  1054. if (buf->offset == buf->size - 1)
  1055. return -EINVAL;
  1056. ret = scnprintf(buf->buf + buf->offset, buf->size - buf->offset,
  1057. "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
  1058. dmabuf->size,
  1059. dmabuf->file->f_flags, dmabuf->file->f_mode,
  1060. file_count(dmabuf->file),
  1061. dmabuf->exp_name,
  1062. file_inode(dmabuf->file)->i_ino,
  1063. dmabuf->name ?: "");
  1064. buf->offset += ret;
  1065. if (buf->offset == buf->size - 1)
  1066. return -EINVAL;
  1067. dma_buf_priv->size += dmabuf->size;
  1068. return 0;
  1069. }
  1070. void md_dma_buf_procs(char *m, size_t dump_size)
  1071. {
  1072. struct task_struct *task, *thread;
  1073. struct files_struct *files;
  1074. int ret = 0;
  1075. struct priv_buf buf;
  1076. struct dma_buf_priv dma_buf_priv;
  1077. buf.buf = m;
  1078. buf.size = dump_size;
  1079. buf.offset = 0;
  1080. dma_buf_priv.priv_buf = &buf;
  1081. dma_buf_priv.count = 0;
  1082. dma_buf_priv.size = 0;
  1083. rcu_read_lock();
  1084. for_each_process(task) {
  1085. struct files_struct *group_leader_files = NULL;
  1086. dma_buf_priv.task = task;
  1087. for_each_thread(task, thread) {
  1088. task_lock(thread);
  1089. if (unlikely(!group_leader_files))
  1090. group_leader_files = task->group_leader->files;
  1091. files = thread->files;
  1092. if (files && (group_leader_files != files ||
  1093. thread == task->group_leader))
  1094. ret = iterate_fd(files, 0, get_dma_info, &dma_buf_priv);
  1095. task_unlock(thread);
  1096. if (ret)
  1097. goto err;
  1098. }
  1099. if (dma_buf_priv.count) {
  1100. ret = scnprintf(buf.buf + buf.offset, buf.size - buf.offset,
  1101. "\nTotal %d objects, %zu bytes\n",
  1102. dma_buf_priv.count, dma_buf_priv.size);
  1103. buf.offset += ret;
  1104. if (buf.offset == buf.size - 1)
  1105. goto err;
  1106. dma_buf_priv.count = 0;
  1107. dma_buf_priv.size = 0;
  1108. bitmap_zero(dma_buf_hash, DMA_BUF_HASH_SIZE);
  1109. }
  1110. }
  1111. rcu_read_unlock();
  1112. return;
  1113. err:
  1114. rcu_read_unlock();
  1115. pr_err("DMABUF_PROCS Minidump region exhausted\n");
  1116. }
  1117. static ssize_t dma_buf_procs_size_write(struct file *file,
  1118. const char __user *ubuf,
  1119. size_t count, loff_t *offset)
  1120. {
  1121. unsigned long long size;
  1122. if (kstrtoull_from_user(ubuf, count, 0, &size)) {
  1123. pr_err_ratelimited("Invalid format for size\n");
  1124. return -EINVAL;
  1125. }
  1126. update_dump_size("DMA_PROC", size,
  1127. &md_dma_buf_procs_addr, &md_dma_buf_procs_size);
  1128. return count;
  1129. }
  1130. static ssize_t dma_buf_procs_size_read(struct file *file, char __user *ubuf,
  1131. size_t count, loff_t *offset)
  1132. {
  1133. char buf[100];
  1134. snprintf(buf, sizeof(buf), "%llu MB\n", md_dma_buf_procs_size/SZ_1M);
  1135. return simple_read_from_buffer(ubuf, count, offset, buf, strlen(buf));
  1136. }
  1137. static const struct file_operations proc_dma_buf_procs_size_ops = {
  1138. .open = simple_open,
  1139. .write = dma_buf_procs_size_write,
  1140. .read = dma_buf_procs_size_read,
  1141. };
  1142. void md_debugfs_dmabufprocs(struct dentry *minidump_dir)
  1143. {
  1144. debugfs_create_file("dma_buf_procs_size_mb", 0400, minidump_dir, NULL,
  1145. &proc_dma_buf_procs_size_ops);
  1146. }
  1147. #define MD_DEBUG_LOOKUP(_var, type) \
  1148. do { \
  1149. md_debug_##_var = (type *)DEBUG_SYMBOL_LOOKUP(_var); \
  1150. if (!md_debug_##_var) { \
  1151. pr_err("minidump: %s symbol not available in vmlinux\n", #_var); \
  1152. error |= 1; \
  1153. } \
  1154. } while (0)
  1155. int md_minidump_memory_init(void)
  1156. {
  1157. int error = 0;
  1158. MD_DEBUG_LOOKUP(totalcma_pages, unsigned long);
  1159. MD_DEBUG_LOOKUP(slab_caches, struct list_head);
  1160. MD_DEBUG_LOOKUP(slab_mutex, struct mutex);
  1161. MD_DEBUG_LOOKUP(page_owner_inited, struct static_key);
  1162. MD_DEBUG_LOOKUP(slub_debug_enabled, struct static_key);
  1163. MD_DEBUG_LOOKUP(min_low_pfn, unsigned long);
  1164. MD_DEBUG_LOOKUP(max_pfn, unsigned long);
  1165. return error;
  1166. }