qcom_va_minidump.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "va-minidump: %s: " fmt, __func__
  7. #include <linux/init.h>
  8. #include <linux/export.h>
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/of.h>
  12. #include <linux/platform_device.h>
  13. #include <linux/of_reserved_mem.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/dma-direct.h>
  16. #include <linux/elf.h>
  17. #include <linux/slab.h>
  18. #include <linux/panic_notifier.h>
  19. #include <soc/qcom/minidump.h>
  20. #include "elf.h"
  21. struct va_md_tree_node {
  22. struct va_md_entry entry;
  23. int lindex;
  24. int rindex;
  25. };
  26. struct va_md_elf_info {
  27. unsigned long ehdr;
  28. unsigned long shdr_cnt;
  29. unsigned long phdr_cnt;
  30. unsigned long pload_size;
  31. unsigned long str_tbl_size;
  32. };
  33. #define VA_MD_VADDR_MARKER -1
  34. #define VA_MD_CB_MARKER -2
  35. #define MAX_ELF_SECTION 0xFFFFU
  36. struct va_minidump_data {
  37. phys_addr_t mem_phys_addr;
  38. unsigned int total_mem_size;
  39. unsigned long elf_mem;
  40. unsigned int num_sections;
  41. unsigned long str_tbl_idx;
  42. struct va_md_elf_info elf;
  43. struct md_region md_entry;
  44. bool in_oops_handler;
  45. bool va_md_minidump_reg;
  46. bool va_md_init;
  47. struct list_head va_md_list;
  48. struct kset *va_md_kset;
  49. };
  50. /*
  51. * Incase, client uses stack memory to allocate
  52. * notifier block, we have to make a copy.
  53. */
  54. struct notifier_block_list {
  55. struct notifier_block nb;
  56. struct list_head nb_list;
  57. };
  58. struct va_md_s_data {
  59. struct kobject s_kobj;
  60. struct atomic_notifier_head va_md_s_notif_list;
  61. struct list_head va_md_s_list;
  62. struct list_head va_md_s_nb_list;
  63. bool enable;
  64. };
  65. struct va_minidump_data va_md_data;
  66. static DEFINE_MUTEX(va_md_lock);
  67. #define to_va_md_attr(_attr) container_of(_attr, struct va_md_attribute, attr)
  68. #define to_va_md_s_data(obj) container_of(obj, struct va_md_s_data, s_kobj)
  69. struct va_md_attribute {
  70. struct attribute attr;
  71. ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
  72. char *buf);
  73. ssize_t (*store)(struct kobject *kobj, struct attribute *attr,
  74. const char *buf, size_t count);
  75. };
  76. static ssize_t attr_show(struct kobject *kobj, struct attribute *attr,
  77. char *buf)
  78. {
  79. struct va_md_attribute *va_md_attr = to_va_md_attr(attr);
  80. ssize_t ret = -EIO;
  81. if (va_md_attr->show)
  82. ret = va_md_attr->show(kobj, attr, buf);
  83. return ret;
  84. }
  85. static ssize_t attr_store(struct kobject *kobj, struct attribute *attr,
  86. const char *buf, size_t count)
  87. {
  88. struct va_md_attribute *va_md_attr = to_va_md_attr(attr);
  89. ssize_t ret = -EIO;
  90. if (va_md_attr->store)
  91. ret = va_md_attr->store(kobj, attr, buf, count);
  92. return ret;
  93. }
  94. static const struct sysfs_ops va_md_sysfs_ops = {
  95. .show = attr_show,
  96. .store = attr_store,
  97. };
  98. static struct kobj_type va_md_kobj_type = {
  99. .sysfs_ops = &va_md_sysfs_ops,
  100. };
  101. static ssize_t enable_show(struct kobject *kobj, struct attribute *this, char *buf)
  102. {
  103. struct va_md_s_data *vamd_sdata = to_va_md_s_data(kobj);
  104. return scnprintf(buf, PAGE_SIZE, "enable: %u\n", vamd_sdata->enable);
  105. }
  106. static ssize_t enable_store(struct kobject *kobj, struct attribute *this,
  107. const char *buf, size_t count)
  108. {
  109. struct va_md_s_data *vamd_sdata = to_va_md_s_data(kobj);
  110. bool val;
  111. int ret;
  112. ret = kstrtobool(buf, &val);
  113. if (ret) {
  114. pr_err("Invalid value passed\n");
  115. return ret;
  116. }
  117. vamd_sdata->enable = val;
  118. return count;
  119. }
  120. static struct va_md_attribute va_md_s_attr = __ATTR_RW(enable);
  121. static struct attribute *va_md_s_attrs[] = {
  122. &va_md_s_attr.attr,
  123. NULL
  124. };
  125. static struct attribute_group va_md_s_attr_group = {
  126. .attrs = va_md_s_attrs,
  127. };
  128. bool qcom_va_md_enabled(void)
  129. {
  130. /*
  131. * Ensure that minidump is enabled and va-minidump is initialized
  132. * before we start registration.
  133. */
  134. return msm_minidump_enabled() && smp_load_acquire(&va_md_data.va_md_init);
  135. }
  136. EXPORT_SYMBOL(qcom_va_md_enabled);
  137. int qcom_va_md_register(const char *name, struct notifier_block *nb)
  138. {
  139. int ret = 0;
  140. struct va_md_s_data *va_md_s_data;
  141. struct notifier_block_list *nbl, *temp_nbl;
  142. struct kobject *kobj;
  143. if (!qcom_va_md_enabled()) {
  144. pr_err("qcom va minidump driver is not initialized\n");
  145. return -ENODEV;
  146. }
  147. nbl = kzalloc(sizeof(struct notifier_block_list), GFP_KERNEL);
  148. if (!nbl)
  149. return -ENOMEM;
  150. nbl->nb = *nb;
  151. mutex_lock(&va_md_lock);
  152. kobj = kset_find_obj(va_md_data.va_md_kset, name);
  153. if (kobj) {
  154. pr_warn("subsystem: %s is already registered\n", name);
  155. kobject_put(kobj);
  156. va_md_s_data = to_va_md_s_data(kobj);
  157. goto register_notifier;
  158. }
  159. va_md_s_data = kzalloc(sizeof(*va_md_s_data), GFP_KERNEL);
  160. if (!va_md_s_data) {
  161. ret = -ENOMEM;
  162. kfree(nbl);
  163. goto out;
  164. }
  165. va_md_s_data->s_kobj.kset = va_md_data.va_md_kset;
  166. ret = kobject_init_and_add(&va_md_s_data->s_kobj, &va_md_kobj_type,
  167. &va_md_data.va_md_kset->kobj, name);
  168. if (ret) {
  169. pr_err("%s: Error in kobject creation\n", __func__);
  170. kobject_put(&va_md_s_data->s_kobj);
  171. kfree(nbl);
  172. goto out;
  173. }
  174. kobject_uevent(&va_md_s_data->s_kobj, KOBJ_ADD);
  175. ret = sysfs_create_group(&va_md_s_data->s_kobj, &va_md_s_attr_group);
  176. if (ret) {
  177. pr_err("%s: Error in creation sysfs_create_group\n", __func__);
  178. kobject_put(&va_md_s_data->s_kobj);
  179. kfree(nbl);
  180. goto out;
  181. }
  182. ATOMIC_INIT_NOTIFIER_HEAD(&va_md_s_data->va_md_s_notif_list);
  183. INIT_LIST_HEAD(&va_md_s_data->va_md_s_nb_list);
  184. va_md_s_data->enable = false;
  185. list_add_tail(&va_md_s_data->va_md_s_list, &va_md_data.va_md_list);
  186. register_notifier:
  187. list_for_each_entry(temp_nbl, &va_md_s_data->va_md_s_nb_list, nb_list) {
  188. if (temp_nbl->nb.notifier_call == nbl->nb.notifier_call) {
  189. pr_warn("subsystem:%s callback is already registered\n", name);
  190. kfree(nbl);
  191. ret = -EEXIST;
  192. goto out;
  193. }
  194. }
  195. atomic_notifier_chain_register(&va_md_s_data->va_md_s_notif_list, &nbl->nb);
  196. list_add_tail(&nbl->nb_list, &va_md_s_data->va_md_s_nb_list);
  197. out:
  198. mutex_unlock(&va_md_lock);
  199. return ret;
  200. }
  201. EXPORT_SYMBOL(qcom_va_md_register);
  202. int qcom_va_md_unregister(const char *name, struct notifier_block *nb)
  203. {
  204. struct va_md_s_data *va_md_s_data;
  205. struct notifier_block_list *nbl, *tmpnbl;
  206. struct kobject *kobj;
  207. int ret = 0;
  208. bool found = false;
  209. if (!qcom_va_md_enabled()) {
  210. pr_err("qcom va minidump driver is not initialized\n");
  211. return -ENODEV;
  212. }
  213. mutex_lock(&va_md_lock);
  214. kobj = kset_find_obj(va_md_data.va_md_kset, name);
  215. if (!kobj) {
  216. pr_warn("subsystem: %s is not registered\n", name);
  217. mutex_unlock(&va_md_lock);
  218. return -EINVAL;
  219. }
  220. va_md_s_data = to_va_md_s_data(kobj);
  221. kobject_put(kobj);
  222. list_for_each_entry_safe(nbl, tmpnbl, &va_md_s_data->va_md_s_nb_list, nb_list) {
  223. if (nbl->nb.notifier_call == nb->notifier_call) {
  224. atomic_notifier_chain_unregister(&va_md_s_data->va_md_s_notif_list,
  225. &nbl->nb);
  226. list_del(&nbl->nb_list);
  227. kfree(nbl);
  228. found = true;
  229. break;
  230. }
  231. }
  232. if (!found) {
  233. pr_warn("subsystem:%s callback is not registered\n", name);
  234. ret = -EINVAL;
  235. } else if (list_empty(&va_md_s_data->va_md_s_nb_list)) {
  236. list_del(&va_md_s_data->va_md_s_nb_list);
  237. sysfs_remove_group(&va_md_s_data->s_kobj, &va_md_s_attr_group);
  238. kobject_put(&va_md_s_data->s_kobj);
  239. list_del(&va_md_s_data->va_md_s_list);
  240. kfree(va_md_s_data);
  241. }
  242. mutex_unlock(&va_md_lock);
  243. return ret;
  244. }
  245. EXPORT_SYMBOL(qcom_va_md_unregister);
  246. static void va_md_add_entry(struct va_md_entry *entry)
  247. {
  248. struct va_md_tree_node *dst = ((struct va_md_tree_node *)va_md_data.elf_mem) +
  249. va_md_data.num_sections;
  250. unsigned int len = strlen(entry->owner);
  251. dst->entry = *entry;
  252. WARN_ONCE(len > MAX_OWNER_STRING - 1,
  253. "Client entry name %s (len = %u) is greater than expected %u\n",
  254. entry->owner, len, MAX_OWNER_STRING - 1);
  255. dst->entry.owner[MAX_OWNER_STRING - 1] = '\0';
  256. if (entry->vaddr) {
  257. dst->lindex = VA_MD_VADDR_MARKER;
  258. dst->rindex = VA_MD_VADDR_MARKER;
  259. } else {
  260. dst->lindex = VA_MD_CB_MARKER;
  261. dst->rindex = VA_MD_CB_MARKER;
  262. }
  263. va_md_data.num_sections++;
  264. }
  265. static bool va_md_check_overlap(struct va_md_entry *entry, unsigned int index)
  266. {
  267. unsigned long ent_start, ent_end;
  268. unsigned long node_start, node_end;
  269. struct va_md_tree_node *node = (struct va_md_tree_node *)va_md_data.elf_mem;
  270. node_start = node[index].entry.vaddr;
  271. node_end = node[index].entry.vaddr + node[index].entry.size - 1;
  272. ent_start = entry->vaddr;
  273. ent_end = entry->vaddr + entry->size - 1;
  274. if (((node_start <= ent_start) && (ent_start <= node_end)) ||
  275. ((node_start <= ent_end) && (ent_end <= node_end)) ||
  276. ((ent_start <= node_start) && (node_end <= ent_end)))
  277. return true;
  278. return false;
  279. }
  280. static bool va_md_move_left(struct va_md_entry *entry, unsigned int index)
  281. {
  282. unsigned long ent_start, ent_end;
  283. unsigned long node_start, node_end;
  284. struct va_md_tree_node *node = (struct va_md_tree_node *)va_md_data.elf_mem;
  285. node_start = node[index].entry.vaddr;
  286. node_end = node[index].entry.vaddr + node[index].entry.size - 1;
  287. ent_start = entry->vaddr;
  288. ent_end = entry->vaddr + entry->size - 1;
  289. if ((ent_start < node_start) && (ent_end < node_start))
  290. return true;
  291. return false;
  292. }
  293. static bool va_md_move_right(struct va_md_entry *entry, unsigned int index)
  294. {
  295. unsigned long ent_start, ent_end;
  296. unsigned long node_start, node_end;
  297. struct va_md_tree_node *node = (struct va_md_tree_node *)va_md_data.elf_mem;
  298. node_start = node[index].entry.vaddr;
  299. node_end = node[index].entry.vaddr + node[index].entry.size - 1;
  300. ent_start = entry->vaddr;
  301. ent_end = entry->vaddr + entry->size - 1;
  302. if ((ent_start > node_end) && (ent_end > node_end))
  303. return true;
  304. return false;
  305. }
  306. static int va_md_tree_insert(struct va_md_entry *entry)
  307. {
  308. unsigned int baseindex = 0;
  309. int ret = 0;
  310. static int num_nodes;
  311. struct va_md_tree_node *tree = (struct va_md_tree_node *)va_md_data.elf_mem;
  312. if (!entry->vaddr || !va_md_data.num_sections) {
  313. va_md_add_entry(entry);
  314. goto out;
  315. }
  316. while (baseindex < va_md_data.num_sections) {
  317. if ((tree[baseindex].lindex == VA_MD_CB_MARKER) &&
  318. (tree[baseindex].rindex == VA_MD_CB_MARKER)) {
  319. baseindex++;
  320. continue;
  321. }
  322. if (va_md_check_overlap(entry, baseindex)) {
  323. entry->owner[MAX_OWNER_STRING - 1] = '\0';
  324. pr_err("Overlapping region owner:%s\n", entry->owner);
  325. ret = -EINVAL;
  326. goto out;
  327. }
  328. if (va_md_move_left(entry, baseindex)) {
  329. if (tree[baseindex].lindex == VA_MD_VADDR_MARKER) {
  330. tree[baseindex].lindex = va_md_data.num_sections;
  331. va_md_add_entry(entry);
  332. num_nodes++;
  333. goto exit_loop;
  334. } else {
  335. baseindex = tree[baseindex].lindex;
  336. continue;
  337. }
  338. } else if (va_md_move_right(entry, baseindex)) {
  339. if (tree[baseindex].rindex == VA_MD_VADDR_MARKER) {
  340. tree[baseindex].rindex = va_md_data.num_sections;
  341. va_md_add_entry(entry);
  342. num_nodes++;
  343. goto exit_loop;
  344. } else {
  345. baseindex = tree[baseindex].rindex;
  346. continue;
  347. }
  348. } else {
  349. pr_err("Warning: Corrupted Binary Search Tree\n");
  350. }
  351. }
  352. exit_loop:
  353. if (!num_nodes) {
  354. va_md_add_entry(entry);
  355. num_nodes++;
  356. }
  357. out:
  358. return ret;
  359. }
  360. static bool va_md_overflow_check(void)
  361. {
  362. unsigned long end_addr;
  363. unsigned long start_addr = va_md_data.elf_mem;
  364. start_addr += sizeof(struct va_md_tree_node) * va_md_data.num_sections;
  365. end_addr = start_addr + sizeof(struct va_md_tree_node) - 1;
  366. if (end_addr > va_md_data.elf_mem + va_md_data.total_mem_size - 1)
  367. return true;
  368. else
  369. return false;
  370. }
  371. int qcom_va_md_add_region(struct va_md_entry *entry)
  372. {
  373. if (!va_md_data.in_oops_handler)
  374. return -EINVAL;
  375. if ((!entry->vaddr == !entry->cb) || (entry->size <= 0)) {
  376. entry->owner[MAX_OWNER_STRING - 1] = '\0';
  377. pr_err("Invalid entry from owner:%s\n", entry->owner);
  378. return -EINVAL;
  379. }
  380. if (va_md_data.num_sections > MAX_ELF_SECTION) {
  381. pr_err("MAX_ELF_SECTION reached\n");
  382. return -ENOSPC;
  383. }
  384. if (va_md_overflow_check()) {
  385. pr_err("Total CMA consumed for Qcom VA minidump\n");
  386. return -ENOMEM;
  387. }
  388. return va_md_tree_insert(entry);
  389. }
  390. EXPORT_SYMBOL(qcom_va_md_add_region);
  391. static void qcom_va_md_minidump_registration(void)
  392. {
  393. strscpy(va_md_data.md_entry.name, "KVA_DUMP", sizeof(va_md_data.md_entry.name));
  394. va_md_data.md_entry.virt_addr = va_md_data.elf.ehdr;
  395. va_md_data.md_entry.phys_addr = va_md_data.mem_phys_addr +
  396. (sizeof(struct va_md_tree_node) * va_md_data.num_sections);
  397. va_md_data.md_entry.size = sizeof(struct elfhdr) +
  398. (sizeof(struct elf_shdr) * va_md_data.elf.shdr_cnt) +
  399. (sizeof(struct elf_phdr) * va_md_data.elf.phdr_cnt) +
  400. va_md_data.elf.pload_size + va_md_data.elf.str_tbl_size;
  401. va_md_data.md_entry.size = ALIGN(va_md_data.md_entry.size, 4);
  402. if (msm_minidump_add_region(&va_md_data.md_entry) < 0) {
  403. pr_err("Failed to register VA driver CMA region with minidump\n");
  404. va_md_data.va_md_minidump_reg = false;
  405. return;
  406. }
  407. va_md_data.va_md_minidump_reg = true;
  408. }
  409. static inline unsigned long set_sec_name(struct elfhdr *ehdr, const char *name)
  410. {
  411. char *strtab = elf_str_table(ehdr);
  412. unsigned long idx = va_md_data.str_tbl_idx;
  413. unsigned long ret = 0;
  414. if ((strtab == NULL) || (name == NULL))
  415. return 0;
  416. ret = idx;
  417. idx += strscpy((strtab + idx), name, MAX_OWNER_STRING);
  418. va_md_data.str_tbl_idx = idx + 1;
  419. return ret;
  420. }
  421. static void qcom_va_add_elf_hdr(void)
  422. {
  423. struct elfhdr *ehdr = (struct elfhdr *)va_md_data.elf.ehdr;
  424. unsigned long phdr_off;
  425. phdr_off = sizeof(*ehdr) + (sizeof(struct elf_shdr) * va_md_data.elf.shdr_cnt);
  426. memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
  427. ehdr->e_ident[EI_CLASS] = ELF_CLASS;
  428. ehdr->e_ident[EI_DATA] = ELF_DATA;
  429. ehdr->e_ident[EI_VERSION] = EV_CURRENT;
  430. ehdr->e_ident[EI_OSABI] = ELF_OSABI;
  431. ehdr->e_type = ET_CORE;
  432. ehdr->e_machine = ELF_ARCH;
  433. ehdr->e_version = EV_CURRENT;
  434. ehdr->e_ehsize = sizeof(*ehdr);
  435. ehdr->e_shoff = sizeof(*ehdr);
  436. ehdr->e_shentsize = sizeof(struct elf_shdr);
  437. ehdr->e_shstrndx = 1;
  438. ehdr->e_phentsize = sizeof(struct elf_phdr);
  439. ehdr->e_phoff = phdr_off;
  440. }
  441. static void qcom_va_add_hdrs(void)
  442. {
  443. struct elf_shdr *shdr;
  444. struct elf_phdr *phdr;
  445. unsigned long strtbl_off, offset, i;
  446. struct elfhdr *ehdr = (struct elfhdr *)va_md_data.elf.ehdr;
  447. struct va_md_tree_node *arr = (struct va_md_tree_node *)va_md_data.elf_mem;
  448. strtbl_off = ehdr->e_phoff + (sizeof(*phdr) * va_md_data.elf.phdr_cnt);
  449. /* First section header is NULL */
  450. shdr = elf_section(ehdr, ehdr->e_shnum);
  451. ehdr->e_shnum++;
  452. /* String table section */
  453. va_md_data.str_tbl_idx = 1;
  454. shdr = elf_section(ehdr, ehdr->e_shnum);
  455. ehdr->e_shnum++;
  456. shdr->sh_type = SHT_STRTAB;
  457. shdr->sh_offset = strtbl_off;
  458. shdr->sh_name = set_sec_name(ehdr, "STR_TBL");
  459. shdr->sh_size = va_md_data.elf.str_tbl_size;
  460. offset = strtbl_off + va_md_data.elf.str_tbl_size;
  461. for (i = 0; i < (va_md_data.elf.shdr_cnt - 2); i++) {
  462. /* section header */
  463. shdr = elf_section(ehdr, ehdr->e_shnum);
  464. shdr->sh_type = SHT_PROGBITS;
  465. shdr->sh_name = set_sec_name(ehdr, arr[i].entry.owner);
  466. shdr->sh_size = arr[i].entry.size;
  467. shdr->sh_flags = SHF_WRITE;
  468. shdr->sh_offset = offset;
  469. /* program header */
  470. phdr = elf_program(ehdr, ehdr->e_phnum);
  471. phdr->p_type = PT_LOAD;
  472. phdr->p_offset = offset;
  473. phdr->p_filesz = phdr->p_memsz = arr[i].entry.size;
  474. phdr->p_flags = PF_R | PF_W;
  475. if (arr[i].entry.vaddr) {
  476. shdr->sh_addr = phdr->p_vaddr = arr[i].entry.vaddr;
  477. memcpy((void *)(va_md_data.elf.ehdr + offset),
  478. (void *)shdr->sh_addr, shdr->sh_size);
  479. } else {
  480. shdr->sh_addr = phdr->p_vaddr = va_md_data.elf.ehdr + offset;
  481. arr[i].entry.cb((void *)(va_md_data.elf.ehdr + offset),
  482. shdr->sh_size);
  483. }
  484. offset += shdr->sh_size;
  485. ehdr->e_shnum++;
  486. ehdr->e_phnum++;
  487. }
  488. }
  489. static int qcom_va_md_calc_size(unsigned int shdr_cnt)
  490. {
  491. unsigned int len, size = 0;
  492. static unsigned long tot_size;
  493. struct va_md_tree_node *arr = (struct va_md_tree_node *)va_md_data.elf_mem;
  494. if (!shdr_cnt) {
  495. tot_size = sizeof(struct va_md_tree_node) * va_md_data.num_sections;
  496. size = (sizeof(struct elfhdr) + (2 * sizeof(struct elf_shdr)) +
  497. strlen("STR_TBL") + 2);
  498. }
  499. len = strlen(arr[shdr_cnt].entry.owner);
  500. size += (sizeof(struct elf_shdr) + sizeof(struct elf_phdr) +
  501. arr[shdr_cnt].entry.size + len + 1);
  502. tot_size += size;
  503. if (tot_size > va_md_data.total_mem_size) {
  504. pr_err("Total CMA consumed, no space left\n");
  505. return -ENOSPC;
  506. }
  507. if (!shdr_cnt) {
  508. va_md_data.elf.ehdr = va_md_data.elf_mem + (sizeof(struct va_md_tree_node)
  509. * va_md_data.num_sections);
  510. va_md_data.elf.shdr_cnt = 2;
  511. va_md_data.elf.phdr_cnt = 0;
  512. va_md_data.elf.pload_size = 0;
  513. va_md_data.elf.str_tbl_size = strlen("STR_TBL") + 2;
  514. }
  515. va_md_data.elf.shdr_cnt++;
  516. va_md_data.elf.phdr_cnt++;
  517. va_md_data.elf.pload_size += arr[shdr_cnt].entry.size;
  518. va_md_data.elf.str_tbl_size += (len + 1);
  519. return 0;
  520. }
  521. static int qcom_va_md_calc_elf_size(void)
  522. {
  523. unsigned int i;
  524. int ret = 0;
  525. if (va_md_overflow_check()) {
  526. pr_err("Total CMA consumed, no space to create ELF\n");
  527. return -ENOSPC;
  528. }
  529. pr_debug("Num sections:%u\n", va_md_data.num_sections);
  530. for (i = 0; i < va_md_data.num_sections; i++) {
  531. ret = qcom_va_md_calc_size(i);
  532. if (ret < 0)
  533. break;
  534. }
  535. return ret;
  536. }
  537. static int qcom_va_md_panic_handler(struct notifier_block *this,
  538. unsigned long event, void *ptr)
  539. {
  540. unsigned long size;
  541. struct va_md_s_data *va_md_s_data;
  542. if (va_md_data.in_oops_handler)
  543. return NOTIFY_DONE;
  544. va_md_data.in_oops_handler = true;
  545. list_for_each_entry(va_md_s_data, &va_md_data.va_md_list, va_md_s_list) {
  546. if (va_md_s_data->enable)
  547. atomic_notifier_call_chain(&va_md_s_data->va_md_s_notif_list,
  548. 0, NULL);
  549. }
  550. if (!va_md_data.num_sections)
  551. goto out;
  552. if (qcom_va_md_calc_elf_size() < 0)
  553. goto out;
  554. size = sizeof(struct elfhdr) +
  555. (sizeof(struct elf_shdr) * va_md_data.elf.shdr_cnt) +
  556. (sizeof(struct elf_phdr) * va_md_data.elf.phdr_cnt) +
  557. va_md_data.elf.pload_size + va_md_data.elf.str_tbl_size;
  558. size = ALIGN(size, 4);
  559. memset((void *)va_md_data.elf.ehdr, 0, size);
  560. qcom_va_md_minidump_registration();
  561. out:
  562. return NOTIFY_DONE;
  563. }
  564. static int qcom_va_md_elf_panic_handler(struct notifier_block *this,
  565. unsigned long event, void *ptr)
  566. {
  567. if (!va_md_data.num_sections || !va_md_data.va_md_minidump_reg)
  568. goto out;
  569. qcom_va_add_elf_hdr();
  570. qcom_va_add_hdrs();
  571. out:
  572. va_md_data.in_oops_handler = false;
  573. return NOTIFY_DONE;
  574. }
  575. static struct notifier_block qcom_va_md_panic_blk = {
  576. .notifier_call = qcom_va_md_panic_handler,
  577. .priority = INT_MAX - 3,
  578. };
  579. static struct notifier_block qcom_va_md_elf_panic_blk = {
  580. .notifier_call = qcom_va_md_elf_panic_handler,
  581. .priority = INT_MAX - 4,
  582. };
  583. static int qcom_va_md_reserve_mem(struct device *dev)
  584. {
  585. struct device_node *node;
  586. unsigned int size[2];
  587. int ret = 0;
  588. node = of_parse_phandle(dev->of_node, "memory-region", 0);
  589. if (node) {
  590. ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, 0);
  591. of_node_put(dev->of_node);
  592. if (ret) {
  593. pr_err("Failed to initialize CMA mem, ret %d\n",
  594. ret);
  595. goto out;
  596. }
  597. }
  598. ret = of_property_read_u32_array(node, "size", size, 2);
  599. if (ret) {
  600. pr_err("Failed to get size of CMA, ret %d\n", ret);
  601. goto out;
  602. }
  603. va_md_data.total_mem_size = size[1];
  604. out:
  605. return ret;
  606. }
  607. static int qcom_va_md_driver_remove(struct platform_device *pdev)
  608. {
  609. struct va_md_s_data *va_md_s_data, *tmp;
  610. struct notifier_block_list *nbl, *tmpnbl;
  611. mutex_lock(&va_md_lock);
  612. list_for_each_entry_safe(va_md_s_data, tmp, &va_md_data.va_md_list, va_md_s_list) {
  613. list_for_each_entry_safe(nbl, tmpnbl, &va_md_s_data->va_md_s_nb_list, nb_list) {
  614. atomic_notifier_chain_unregister(&va_md_s_data->va_md_s_notif_list,
  615. &nbl->nb);
  616. list_del(&nbl->nb_list);
  617. kfree(nbl);
  618. }
  619. list_del(&va_md_s_data->va_md_s_nb_list);
  620. sysfs_remove_group(&va_md_s_data->s_kobj, &va_md_s_attr_group);
  621. kobject_put(&va_md_s_data->s_kobj);
  622. list_del(&va_md_s_data->va_md_s_list);
  623. kfree(va_md_s_data);
  624. }
  625. mutex_unlock(&va_md_lock);
  626. kset_unregister(va_md_data.va_md_kset);
  627. atomic_notifier_chain_unregister(&panic_notifier_list, &qcom_va_md_elf_panic_blk);
  628. atomic_notifier_chain_unregister(&panic_notifier_list, &qcom_va_md_panic_blk);
  629. vunmap((void *)va_md_data.elf_mem);
  630. return 0;
  631. }
  632. static int qcom_va_md_driver_probe(struct platform_device *pdev)
  633. {
  634. int ret = 0;
  635. int i;
  636. void *vaddr;
  637. int count;
  638. struct page **pages, *page;
  639. dma_addr_t dma_handle;
  640. ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  641. if (ret)
  642. return ret;
  643. ret = qcom_va_md_reserve_mem(&pdev->dev);
  644. if (ret) {
  645. dev_err(&pdev->dev, "CMA for VA based minidump is not present\n");
  646. goto out;
  647. }
  648. vaddr = dma_alloc_coherent(&pdev->dev, va_md_data.total_mem_size, &dma_handle,
  649. GFP_KERNEL);
  650. if (!vaddr) {
  651. ret = -ENOMEM;
  652. goto out;
  653. }
  654. dma_free_coherent(&pdev->dev, va_md_data.total_mem_size, vaddr, dma_handle);
  655. page = phys_to_page(dma_to_phys(&pdev->dev, dma_handle));
  656. count = PAGE_ALIGN(va_md_data.total_mem_size) >> PAGE_SHIFT;
  657. pages = kmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
  658. if (!pages)
  659. return -ENOMEM;
  660. for (i = 0; i < count; i++)
  661. pages[i] = nth_page(page, i);
  662. vaddr = vmap(pages, count, VM_DMA_COHERENT, pgprot_dmacoherent(PAGE_KERNEL));
  663. kfree(pages);
  664. va_md_data.mem_phys_addr = dma_to_phys(&pdev->dev, dma_handle);
  665. va_md_data.elf_mem = (unsigned long)vaddr;
  666. atomic_notifier_chain_register(&panic_notifier_list, &qcom_va_md_panic_blk);
  667. atomic_notifier_chain_register(&panic_notifier_list, &qcom_va_md_elf_panic_blk);
  668. INIT_LIST_HEAD(&va_md_data.va_md_list);
  669. va_md_data.va_md_kset = kset_create_and_add("va-minidump", NULL, kernel_kobj);
  670. if (!va_md_data.va_md_kset) {
  671. dev_err(&pdev->dev, "Failed to create kset for va-minidump\n");
  672. vunmap((void *)va_md_data.elf_mem);
  673. ret = -ENOMEM;
  674. goto out;
  675. }
  676. /* All updates above should be visible, before init completes */
  677. smp_store_release(&va_md_data.va_md_init, true);
  678. out:
  679. return ret;
  680. }
  681. static const struct of_device_id qcom_va_md_of_match[] = {
  682. {.compatible = "qcom,va-minidump"},
  683. {}
  684. };
  685. MODULE_DEVICE_TABLE(of, qcom_va_md_of_match);
  686. static struct platform_driver qcom_va_md_driver = {
  687. .driver = {
  688. .name = "qcom-va-minidump",
  689. .of_match_table = qcom_va_md_of_match,
  690. },
  691. .probe = qcom_va_md_driver_probe,
  692. .remove = qcom_va_md_driver_remove,
  693. };
  694. module_platform_driver(qcom_va_md_driver);
  695. MODULE_DESCRIPTION("Qcom VA Minidump Driver");
  696. MODULE_LICENSE("GPL v2");