dmesg_dumper.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.*/
  3. #include <linux/dma-direct.h>
  4. #include <linux/dma-mapping.h>
  5. #include <linux/io.h>
  6. #include <linux/kmsg_dump.h>
  7. #include <linux/module.h>
  8. #include <linux/of_address.h>
  9. #include <linux/of_device.h>
  10. #include <linux/of_reserved_mem.h>
  11. #include <linux/platform_device.h>
  12. #include <linux/pm.h>
  13. #include <linux/proc_fs.h>
  14. #include <linux/skbuff.h>
  15. #include <linux/suspend.h>
  16. #include <linux/types.h>
  17. #include <linux/gunyah/gh_dbl.h>
  18. #include <linux/gunyah/gh_panic_notifier.h>
  19. #include <linux/gunyah/gh_rm_drv.h>
  20. #include <linux/qcom_scm.h>
  21. #include <soc/qcom/secure_buffer.h>
  22. #include "dmesg_dumper_private.h"
  23. #define DDUMP_DBL_MASK 0x1
  24. #define DDUMP_PROFS_NAME "vmkmsg"
  25. #define DDUMP_WAIT_WAKEIRQ_TIMEOUT msecs_to_jiffies(1000)
  26. static bool vm_status_ready;
  27. static void qcom_ddump_to_shm(struct kmsg_dumper *dumper,
  28. enum kmsg_dump_reason reason)
  29. {
  30. struct qcom_dmesg_dumper *qdd = container_of(dumper,
  31. struct qcom_dmesg_dumper, dump);
  32. size_t len;
  33. dev_warn(qdd->dev, "reason = %d\n", reason);
  34. kmsg_dump_rewind(&qdd->iter);
  35. memset(qdd->base, 0, qdd->size);
  36. kmsg_dump_get_buffer(&qdd->iter, false, qdd->base, qdd->size, &len);
  37. dev_warn(qdd->dev, "size of dmesg logbuf logged = %lld\n", len);
  38. }
  39. static int qcom_ddump_gh_panic_handler(struct notifier_block *nb,
  40. unsigned long cmd, void *data)
  41. {
  42. struct qcom_dmesg_dumper *qdd;
  43. qdd = container_of(nb, struct qcom_dmesg_dumper, gh_panic_nb);
  44. qcom_ddump_to_shm(&qdd->dump, KMSG_DUMP_PANIC);
  45. return NOTIFY_DONE;
  46. }
  47. static struct device_node *qcom_ddump_svm_of_parse(struct qcom_dmesg_dumper *qdd)
  48. {
  49. const char *compat = "qcom,ddump-gunyah-gen";
  50. struct device_node *np = NULL;
  51. u32 label;
  52. int ret;
  53. while ((np = of_find_compatible_node(np, NULL, compat))) {
  54. ret = of_property_read_u32(np, "qcom,label", &label);
  55. if (ret) {
  56. of_node_put(np);
  57. continue;
  58. }
  59. if (label == qdd->label)
  60. break;
  61. of_node_put(np);
  62. }
  63. return np;
  64. }
  65. static int qcom_ddump_map_memory(struct qcom_dmesg_dumper *qdd)
  66. {
  67. struct device *dev = qdd->dev;
  68. struct device_node *shm_np;
  69. struct device_node *np;
  70. u32 size = 0;
  71. int ret;
  72. np = dev->of_node;
  73. if (!qdd->primary_vm) {
  74. np = qcom_ddump_svm_of_parse(qdd);
  75. if (!np) {
  76. dev_err(dev, "Unable to parse shared mem node\n");
  77. return -EINVAL;
  78. }
  79. }
  80. shm_np = of_parse_phandle(np, "memory-region", 0);
  81. if (!shm_np)
  82. return -EINVAL;
  83. ret = of_address_to_resource(shm_np, 0, &qdd->res);
  84. of_node_put(shm_np);
  85. if (!qdd->primary_vm)
  86. of_node_put(np);
  87. if (!ret) {
  88. qdd->is_static = true;
  89. qdd->size = resource_size(&qdd->res);
  90. return ret;
  91. }
  92. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
  93. if (ret) {
  94. dev_err(dev, "%s: dma_set_mask_and_coherent failed\n", __func__);
  95. return ret;
  96. }
  97. ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, 0);
  98. if (ret) {
  99. dev_err(dev, "%s: Failed to initialize CMA mem, ret %d\n",
  100. __func__, ret);
  101. return ret;
  102. }
  103. ret = of_property_read_u32(qdd->dev->of_node, "shared-buffer-size", &size);
  104. if (ret) {
  105. dev_err(dev, "%s: Failed to get shared memory size, ret %d\n",
  106. __func__, ret);
  107. return ret;
  108. }
  109. qdd->size = size;
  110. qdd->is_static = false;
  111. return 0;
  112. }
  113. static int qcom_ddump_share_mem(struct qcom_dmesg_dumper *qdd, gh_vmid_t self,
  114. gh_vmid_t peer)
  115. {
  116. struct qcom_scm_vmperm dst_vmlist[] = {{self, PERM_READ | PERM_WRITE},
  117. {peer, PERM_READ | PERM_WRITE}};
  118. struct qcom_scm_vmperm src_vmlist[] = {{self,
  119. PERM_READ | PERM_WRITE | PERM_EXEC}};
  120. u64 dst_vmid = BIT(dst_vmlist[0].vmid) | BIT(dst_vmlist[1].vmid);
  121. u64 src_vmid = BIT(src_vmlist[0].vmid);
  122. struct gh_acl_desc *acl;
  123. struct gh_sgl_desc *sgl;
  124. int ret, assign_mem_ret;
  125. ret = qcom_scm_assign_mem(qdd->res.start, resource_size(&qdd->res),
  126. &src_vmid, dst_vmlist, ARRAY_SIZE(dst_vmlist));
  127. if (ret) {
  128. dev_err(qdd->dev, "qcom_scm_assign_mem addr=%x size=%u failed: %d\n",
  129. qdd->res.start, qdd->size, ret);
  130. return ret;
  131. }
  132. acl = kzalloc(offsetof(struct gh_acl_desc, acl_entries[2]), GFP_KERNEL);
  133. if (!acl)
  134. return -ENOMEM;
  135. sgl = kzalloc(offsetof(struct gh_sgl_desc, sgl_entries[1]), GFP_KERNEL);
  136. if (!sgl) {
  137. kfree(acl);
  138. return -ENOMEM;
  139. }
  140. acl->n_acl_entries = 2;
  141. acl->acl_entries[0].vmid = (u16)self;
  142. acl->acl_entries[0].perms = GH_RM_ACL_R | GH_RM_ACL_W;
  143. acl->acl_entries[1].vmid = (u16)peer;
  144. acl->acl_entries[1].perms = GH_RM_ACL_R | GH_RM_ACL_W;
  145. sgl->n_sgl_entries = 1;
  146. sgl->sgl_entries[0].ipa_base = qdd->res.start;
  147. sgl->sgl_entries[0].size = resource_size(&qdd->res);
  148. ret = ghd_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, qdd->label,
  149. acl, sgl, NULL, &qdd->memparcel);
  150. if (ret) {
  151. dev_err(qdd->dev, "Gunyah mem share addr=%x size=%u failed: %d\n",
  152. qdd->res.start, qdd->size, ret);
  153. /* Attempt to give resource back to HLOS */
  154. assign_mem_ret = qcom_scm_assign_mem(qdd->res.start, resource_size(&qdd->res),
  155. &dst_vmid, src_vmlist, ARRAY_SIZE(src_vmlist));
  156. if (assign_mem_ret) {
  157. dev_err(qdd->dev, "qcom_scm_assign_mem addr=%x size=%u failed: %d\n",
  158. qdd->res.start, qdd->size, ret);
  159. }
  160. }
  161. kfree(acl);
  162. kfree(sgl);
  163. return ret;
  164. }
  165. static int qcom_ddump_unshare_mem(struct qcom_dmesg_dumper *qdd, gh_vmid_t self,
  166. gh_vmid_t peer)
  167. {
  168. struct qcom_scm_vmperm dst_vmlist[] = {{self,
  169. PERM_READ | PERM_WRITE | PERM_EXEC}};
  170. u64 src_vmid = BIT(self) | BIT(peer);
  171. int ret;
  172. ret = ghd_rm_mem_reclaim(qdd->memparcel, 0);
  173. if (ret)
  174. dev_err(qdd->dev, "Gunyah mem reclaim failed: %d\n", ret);
  175. ret = qcom_scm_assign_mem(qdd->res.start, resource_size(&qdd->res),
  176. &src_vmid, dst_vmlist, ARRAY_SIZE(dst_vmlist));
  177. if (ret)
  178. dev_err(qdd->dev, "unshare mem assign call failed with %d\n",
  179. ret);
  180. return ret;
  181. }
  182. static int qcom_ddump_rm_cb(struct notifier_block *nb, unsigned long cmd,
  183. void *data)
  184. {
  185. struct gh_rm_notif_vm_status_payload *vm_status_payload;
  186. struct qcom_dmesg_dumper *qdd;
  187. dma_addr_t dma_handle;
  188. gh_vmid_t peer_vmid;
  189. gh_vmid_t self_vmid;
  190. int ret;
  191. qdd = container_of(nb, struct qcom_dmesg_dumper, rm_nb);
  192. if (cmd != GH_RM_NOTIF_VM_STATUS)
  193. return NOTIFY_DONE;
  194. vm_status_payload = data;
  195. if (vm_status_payload->vm_status != GH_RM_VM_STATUS_READY &&
  196. vm_status_payload->vm_status != GH_RM_VM_STATUS_RESET)
  197. return NOTIFY_DONE;
  198. if (ghd_rm_get_vmid(qdd->peer_name, &peer_vmid))
  199. return NOTIFY_DONE;
  200. if (ghd_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
  201. return NOTIFY_DONE;
  202. if (peer_vmid != vm_status_payload->vmid)
  203. return NOTIFY_DONE;
  204. if (vm_status_payload->vm_status == GH_RM_VM_STATUS_READY) {
  205. if (!qdd->is_static) {
  206. qdd->base = dma_alloc_coherent(qdd->dev, qdd->size,
  207. &dma_handle, GFP_KERNEL);
  208. if (!qdd->base)
  209. return NOTIFY_DONE;
  210. qdd->res.start = dma_to_phys(qdd->dev, dma_handle);
  211. qdd->res.end = qdd->res.start + qdd->size - 1;
  212. }
  213. strscpy(qdd->md_entry.name, "VM_LOG", sizeof(qdd->md_entry.name));
  214. qdd->md_entry.virt_addr = (uintptr_t)qdd->base;
  215. qdd->md_entry.phys_addr = qdd->res.start;
  216. qdd->md_entry.size = qdd->size;
  217. ret = msm_minidump_add_region(&qdd->md_entry);
  218. if (ret < 0)
  219. dev_err(qdd->dev, "Failed to add vm log entry in minidump table %d\n", ret);
  220. if (qcom_ddump_share_mem(qdd, self_vmid, peer_vmid)) {
  221. dev_err(qdd->dev, "Failed to share memory\n");
  222. goto free_mem;
  223. }
  224. vm_status_ready = true;
  225. }
  226. if (vm_status_payload->vm_status == GH_RM_VM_STATUS_RESET) {
  227. vm_status_ready = false;
  228. if (!qcom_ddump_unshare_mem(qdd, self_vmid, peer_vmid))
  229. goto free_mem;
  230. }
  231. return NOTIFY_DONE;
  232. free_mem:
  233. if (!qdd->is_static)
  234. dma_free_coherent(qdd->dev, qdd->size, qdd->base,
  235. phys_to_dma(qdd->dev, qdd->res.start));
  236. return NOTIFY_DONE;
  237. }
  238. static inline int qcom_ddump_gh_kick(struct qcom_dmesg_dumper *qdd)
  239. {
  240. gh_dbl_flags_t dbl_mask = DDUMP_DBL_MASK;
  241. int ret;
  242. ret = gh_dbl_send(qdd->tx_dbl, &dbl_mask, 0);
  243. if (ret)
  244. dev_err(qdd->dev, "failed to raise virq to the sender %d\n", ret);
  245. return ret;
  246. }
  247. static void qcom_ddump_gh_cb(int irq, void *data)
  248. {
  249. gh_dbl_flags_t dbl_mask = DDUMP_DBL_MASK;
  250. struct qcom_dmesg_dumper *qdd;
  251. struct ddump_shm_hdr *hdr;
  252. int ret;
  253. qdd = data;
  254. hdr = qdd->base;
  255. gh_dbl_read_and_clean(qdd->rx_dbl, &dbl_mask, GH_DBL_NONBLOCK);
  256. if (qdd->primary_vm) {
  257. complete(&qdd->ddump_completion);
  258. } else {
  259. /* avoid system enter suspend */
  260. pm_wakeup_ws_event(qdd->wakeup_source, 2000, true);
  261. ret = qcom_ddump_alive_log_to_shm(qdd, hdr->user_buf_len);
  262. if (ret)
  263. dev_err(qdd->dev, "dump alive log error %d\n", ret);
  264. qcom_ddump_gh_kick(qdd);
  265. if (hdr->svm_dump_len == 0)
  266. pm_wakeup_ws_event(qdd->wakeup_source, 0, true);
  267. }
  268. }
  269. static ssize_t qcom_ddump_vmkmsg_read(struct file *file, char __user *buf,
  270. size_t count, loff_t *ppos)
  271. {
  272. struct qcom_dmesg_dumper *qdd = pde_data(file_inode(file));
  273. struct ddump_shm_hdr *hdr = qdd->base;
  274. int ret;
  275. if (!vm_status_ready)
  276. return -ENODEV;
  277. if (count < LOG_LINE_MAX) {
  278. dev_err(qdd->dev, "user buffer size should greater than %d\n", LOG_LINE_MAX);
  279. return -EINVAL;
  280. }
  281. /**
  282. * If SVM is in suspend mode and the log size more than 1k byte,
  283. * we think SVM has log need to be read. Otherwise, we think the
  284. * log is only suspend log that we need skip the unnecessary log.
  285. */
  286. if (hdr->svm_is_suspend && hdr->svm_dump_len < 1024)
  287. return 0;
  288. hdr->user_buf_len = count;
  289. qcom_ddump_gh_kick(qdd);
  290. ret = wait_for_completion_timeout(&qdd->ddump_completion, DDUMP_WAIT_WAKEIRQ_TIMEOUT);
  291. if (!ret) {
  292. dev_err(qdd->dev, "wait for completion timeout\n");
  293. return -ETIMEDOUT;
  294. }
  295. if (hdr->svm_dump_len > count) {
  296. dev_err(qdd->dev, "can not read the correct length of svm kmsg\n");
  297. return -EINVAL;
  298. }
  299. if (hdr->svm_dump_len &&
  300. copy_to_user(buf, &hdr->data, hdr->svm_dump_len)) {
  301. dev_err(qdd->dev, "copy_to_user fail\n");
  302. return -EFAULT;
  303. }
  304. return hdr->svm_dump_len;
  305. }
  306. static const struct proc_ops ddump_proc_ops = {
  307. .proc_flags = PROC_ENTRY_PERMANENT,
  308. .proc_read = qcom_ddump_vmkmsg_read,
  309. };
  310. static int qcom_ddump_alive_log_probe(struct qcom_dmesg_dumper *qdd)
  311. {
  312. struct device_node *node = qdd->dev->of_node;
  313. struct device *dev = qdd->dev;
  314. struct proc_dir_entry *dent;
  315. struct ddump_shm_hdr *hdr;
  316. enum gh_dbl_label dbl_label;
  317. struct resource *res;
  318. size_t shm_min_size;
  319. int ret;
  320. shm_min_size = LOG_LINE_MAX + DDUMP_GET_SHM_HDR;
  321. if (qdd->size < shm_min_size) {
  322. dev_err(dev, "Shared memory size should greater than %d\n", shm_min_size);
  323. return -EINVAL;
  324. }
  325. if (qdd->primary_vm) {
  326. if (qdd->is_static) {
  327. res = devm_request_mem_region(dev, qdd->res.start,
  328. qdd->size, dev_name(dev));
  329. if (!res) {
  330. dev_err(dev, "request mem region fail\n");
  331. return -ENXIO;
  332. }
  333. qdd->base = devm_ioremap_wc(dev, qdd->res.start, qdd->size);
  334. if (!qdd->base) {
  335. dev_err(dev, "ioremap fail\n");
  336. return -ENOMEM;
  337. }
  338. }
  339. init_completion(&qdd->ddump_completion);
  340. dent = proc_create_data(DDUMP_PROFS_NAME, 0400, NULL, &ddump_proc_ops, qdd);
  341. if (!dent) {
  342. dev_err(dev, "proc_create_data fail\n");
  343. return -ENOMEM;
  344. }
  345. } else {
  346. /* init shared memory header */
  347. hdr = qdd->base;
  348. hdr->svm_is_suspend = false;
  349. ret = qcom_ddump_encrypt_init(node);
  350. if (ret)
  351. return ret;
  352. qdd->wakeup_source = wakeup_source_register(dev, dev_name(dev));
  353. if (!qdd->wakeup_source)
  354. return -ENOMEM;
  355. qdd->gh_panic_nb.notifier_call = qcom_ddump_gh_panic_handler;
  356. qdd->gh_panic_nb.priority = INT_MAX;
  357. ret = gh_panic_notifier_register(&qdd->gh_panic_nb);
  358. if (ret)
  359. goto err_panic_notifier_register;
  360. }
  361. dbl_label = qdd->label;
  362. qdd->tx_dbl = gh_dbl_tx_register(dbl_label);
  363. if (IS_ERR_OR_NULL(qdd->tx_dbl)) {
  364. ret = PTR_ERR(qdd->tx_dbl);
  365. dev_err(dev, "%s:Failed to get gunyah tx dbl %d\n", __func__, ret);
  366. goto err_dbl_tx_register;
  367. }
  368. qdd->rx_dbl = gh_dbl_rx_register(dbl_label, qcom_ddump_gh_cb, qdd);
  369. if (IS_ERR_OR_NULL(qdd->rx_dbl)) {
  370. ret = PTR_ERR(qdd->rx_dbl);
  371. dev_err(dev, "%s:Failed to get gunyah rx dbl %d\n", __func__, ret);
  372. goto err_dbl_rx_register;
  373. }
  374. return 0;
  375. err_dbl_rx_register:
  376. gh_dbl_tx_unregister(qdd->tx_dbl);
  377. err_dbl_tx_register:
  378. if (qdd->primary_vm)
  379. remove_proc_entry(DDUMP_PROFS_NAME, NULL);
  380. else
  381. gh_panic_notifier_unregister(&qdd->gh_panic_nb);
  382. err_panic_notifier_register:
  383. if (!qdd->primary_vm)
  384. wakeup_source_unregister(qdd->wakeup_source);
  385. return ret;
  386. }
  387. static int qcom_ddump_probe(struct platform_device *pdev)
  388. {
  389. struct device_node *node = pdev->dev.of_node;
  390. struct qcom_dmesg_dumper *qdd;
  391. struct device *dev;
  392. int ret;
  393. struct resource *res;
  394. qdd = devm_kzalloc(&pdev->dev, sizeof(*qdd), GFP_KERNEL);
  395. if (!qdd)
  396. return -ENOMEM;
  397. qdd->dev = &pdev->dev;
  398. platform_set_drvdata(pdev, qdd);
  399. dev = qdd->dev;
  400. ret = of_property_read_u32(node, "gunyah-label", &qdd->label);
  401. if (ret) {
  402. dev_err(dev, "Failed to read label %d\n", ret);
  403. return ret;
  404. }
  405. qdd->primary_vm = of_property_read_bool(node, "qcom,primary-vm");
  406. ret = qcom_ddump_map_memory(qdd);
  407. if (ret)
  408. return ret;
  409. if (qdd->primary_vm) {
  410. ret = of_property_read_u32(node, "peer-name", &qdd->peer_name);
  411. if (ret)
  412. qdd->peer_name = GH_SELF_VM;
  413. qdd->rm_nb.notifier_call = qcom_ddump_rm_cb;
  414. qdd->rm_nb.priority = INT_MAX;
  415. gh_rm_register_notifier(&qdd->rm_nb);
  416. } else {
  417. res = devm_request_mem_region(dev, qdd->res.start, qdd->size, dev_name(dev));
  418. if (!res) {
  419. dev_err(dev, "request mem region fail\n");
  420. return -ENXIO;
  421. }
  422. qdd->base = devm_ioremap_wc(dev, qdd->res.start, qdd->size);
  423. if (!qdd->base) {
  424. dev_err(dev, "ioremap fail\n");
  425. return -ENOMEM;
  426. }
  427. kmsg_dump_rewind(&qdd->iter);
  428. qdd->dump.dump = qcom_ddump_to_shm;
  429. ret = kmsg_dump_register(&qdd->dump);
  430. if (ret)
  431. return ret;
  432. }
  433. if (IS_ENABLED(CONFIG_QCOM_VM_ALIVE_LOG_DUMPER)) {
  434. ret = qcom_ddump_alive_log_probe(qdd);
  435. if (ret) {
  436. if (qdd->primary_vm)
  437. gh_rm_unregister_notifier(&qdd->rm_nb);
  438. else
  439. kmsg_dump_unregister(&qdd->dump);
  440. return ret;
  441. }
  442. }
  443. return 0;
  444. }
  445. static int qcom_ddump_remove(struct platform_device *pdev)
  446. {
  447. gh_vmid_t peer_vmid;
  448. gh_vmid_t self_vmid;
  449. int ret;
  450. struct qcom_dmesg_dumper *qdd = platform_get_drvdata(pdev);
  451. if (IS_ENABLED(CONFIG_QCOM_VM_ALIVE_LOG_DUMPER)) {
  452. gh_dbl_tx_unregister(qdd->tx_dbl);
  453. gh_dbl_rx_unregister(qdd->rx_dbl);
  454. if (qdd->primary_vm) {
  455. remove_proc_entry(DDUMP_PROFS_NAME, NULL);
  456. } else {
  457. gh_panic_notifier_unregister(&qdd->gh_panic_nb);
  458. wakeup_source_unregister(qdd->wakeup_source);
  459. qcom_ddump_encrypt_exit();
  460. }
  461. }
  462. if (qdd->primary_vm) {
  463. gh_rm_unregister_notifier(&qdd->rm_nb);
  464. ret = ghd_rm_get_vmid(qdd->peer_name, &peer_vmid);
  465. if (ret)
  466. return ret;
  467. ret = ghd_rm_get_vmid(GH_PRIMARY_VM, &self_vmid);
  468. if (ret)
  469. return ret;
  470. ret = qcom_ddump_unshare_mem(qdd, self_vmid, peer_vmid);
  471. if (ret)
  472. return ret;
  473. if (!qdd->is_static)
  474. dma_free_coherent(qdd->dev, qdd->size, qdd->base,
  475. phys_to_dma(qdd->dev, qdd->res.start));
  476. } else {
  477. ret = kmsg_dump_unregister(&qdd->dump);
  478. if (ret)
  479. return ret;
  480. }
  481. return 0;
  482. }
  483. #if IS_ENABLED(CONFIG_PM_SLEEP) && IS_ENABLED(CONFIG_ARCH_QTI_VM) && \
  484. IS_ENABLED(CONFIG_QCOM_VM_ALIVE_LOG_DUMPER)
  485. static int qcom_ddump_suspend(struct device *pdev)
  486. {
  487. struct qcom_dmesg_dumper *qdd = dev_get_drvdata(pdev);
  488. struct ddump_shm_hdr *hdr = qdd->base;
  489. u64 seq_backup;
  490. int ret;
  491. hdr->svm_is_suspend = true;
  492. seq_backup = qdd->iter.cur_seq;
  493. ret = qcom_ddump_alive_log_to_shm(qdd, qdd->size);
  494. if (ret)
  495. dev_err(qdd->dev, "dump alive log error %d\n", ret);
  496. qdd->iter.cur_seq = seq_backup;
  497. return 0;
  498. }
  499. static int qcom_ddump_resume(struct device *pdev)
  500. {
  501. struct qcom_dmesg_dumper *qdd = dev_get_drvdata(pdev);
  502. struct ddump_shm_hdr *hdr = qdd->base;
  503. hdr->svm_is_suspend = false;
  504. return 0;
  505. }
  506. static SIMPLE_DEV_PM_OPS(ddump_pm_ops, qcom_ddump_suspend, qcom_ddump_resume);
  507. #endif
  508. static const struct of_device_id ddump_match_table[] = {
  509. { .compatible = "qcom,dmesg-dump" },
  510. {}
  511. };
  512. static struct platform_driver ddump_driver = {
  513. .driver = {
  514. .name = "qcom_dmesg_dumper",
  515. #if IS_ENABLED(CONFIG_PM_SLEEP) && IS_ENABLED(CONFIG_ARCH_QTI_VM) && \
  516. IS_ENABLED(CONFIG_QCOM_VM_ALIVE_LOG_DUMPER)
  517. .pm = &ddump_pm_ops,
  518. #endif
  519. .of_match_table = ddump_match_table,
  520. },
  521. .probe = qcom_ddump_probe,
  522. .remove = qcom_ddump_remove,
  523. };
  524. static int __init qcom_ddump_init(void)
  525. {
  526. return platform_driver_register(&ddump_driver);
  527. }
  528. #if IS_ENABLED(CONFIG_ARCH_QTI_VM)
  529. arch_initcall(qcom_ddump_init);
  530. #else
  531. module_init(qcom_ddump_init);
  532. #endif
  533. static __exit void qcom_ddump_exit(void)
  534. {
  535. platform_driver_unregister(&ddump_driver);
  536. }
  537. module_exit(qcom_ddump_exit);
  538. MODULE_DESCRIPTION("QTI Virtual Machine dmesg log buffer dumper");
  539. MODULE_LICENSE("GPL");