gh_panic_notifier.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/dma-direct.h>
  6. #include <linux/dma-mapping.h>
  7. #include <linux/interrupt.h>
  8. #include <linux/irq.h>
  9. #include <linux/module.h>
  10. #include <linux/of_address.h>
  11. #include <linux/of_device.h>
  12. #include <linux/of_reserved_mem.h>
  13. #include <linux/panic_notifier.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/pm.h>
  16. #include <linux/qcom_scm.h>
  17. #include <linux/gunyah/gh_dbl.h>
  18. #include <linux/gunyah/gh_panic_notifier.h>
  19. #include <linux/gunyah/gh_rm_drv.h>
  20. #include <linux/gunyah/gh_vm.h>
  21. #include <soc/qcom/secure_buffer.h>
  22. struct gh_panic_notifier_dev {
  23. struct device *dev;
  24. struct resource res;
  25. void *base;
  26. u64 size;
  27. u32 label, peer_name, memparcel;
  28. bool primary_vm;
  29. void *tx_dbl;
  30. void *rx_dbl;
  31. struct wakeup_source *ws;
  32. struct notifier_block vm_nb;
  33. };
  34. SRCU_NOTIFIER_HEAD_STATIC(gh_panic_notifier);
  35. static bool gh_panic_notifier_initialized;
  36. static struct gh_panic_notifier_dev *gpnd;
  37. #define GH_PANIC_DBL_MASK 0x1
  38. int gh_panic_notifier_register(struct notifier_block *nb)
  39. {
  40. if (!gh_panic_notifier_initialized)
  41. return -EPROBE_DEFER;
  42. return srcu_notifier_chain_register(&gh_panic_notifier, nb);
  43. }
  44. EXPORT_SYMBOL(gh_panic_notifier_register);
  45. int gh_panic_notifier_unregister(struct notifier_block *nb)
  46. {
  47. if (!gh_panic_notifier_initialized)
  48. return -EPROBE_DEFER;
  49. return srcu_notifier_chain_unregister(&gh_panic_notifier, nb);
  50. }
  51. EXPORT_SYMBOL(gh_panic_notifier_unregister);
  52. static inline int gh_panic_notifier_kick(void)
  53. {
  54. gh_dbl_flags_t dbl_mask = GH_PANIC_DBL_MASK;
  55. int ret;
  56. ret = gh_dbl_send(gpnd->tx_dbl, &dbl_mask, GH_DBL_NONBLOCK);
  57. if (ret)
  58. printk_deferred("failed to raise virq to the sender %d\n", ret);
  59. return ret;
  60. }
  61. static void gh_panic_notify_receiver(int irq, void *data)
  62. {
  63. gh_dbl_flags_t dbl_mask = GH_PANIC_DBL_MASK;
  64. bool *handle_done;
  65. handle_done = gpnd->base;
  66. gh_dbl_read_and_clean(gpnd->rx_dbl, &dbl_mask, GH_DBL_NONBLOCK);
  67. /* avoid system enter suspend */
  68. __pm_stay_awake(gpnd->ws);
  69. srcu_notifier_call_chain(&gh_panic_notifier, 0, NULL);
  70. *handle_done = true;
  71. }
  72. static int gh_panic_notifier_share_mem(gh_vmid_t self, gh_vmid_t peer)
  73. {
  74. struct qcom_scm_vmperm dst_vmlist[] = {{self, PERM_READ | PERM_WRITE},
  75. {peer, PERM_READ | PERM_WRITE}};
  76. struct qcom_scm_vmperm src_vmlist[] = {{self,
  77. PERM_READ | PERM_WRITE | PERM_EXEC}};
  78. u64 dst_vmid = BIT(dst_vmlist[0].vmid) | BIT(dst_vmlist[1].vmid);
  79. u64 src_vmid = BIT(src_vmlist[0].vmid);
  80. struct gh_acl_desc *acl;
  81. struct gh_sgl_desc *sgl;
  82. int ret, assign_mem_ret;
  83. ret = qcom_scm_assign_mem(gpnd->res.start, gpnd->size, &src_vmid,
  84. dst_vmlist, ARRAY_SIZE(dst_vmlist));
  85. if (ret) {
  86. dev_err(gpnd->dev, "qcom_scm_assign_mem addr=%x size=%u failed: %d\n",
  87. gpnd->res.start, gpnd->size, ret);
  88. return ret;
  89. }
  90. acl = kzalloc(offsetof(struct gh_acl_desc, acl_entries[2]), GFP_KERNEL);
  91. if (!acl)
  92. return -ENOMEM;
  93. sgl = kzalloc(offsetof(struct gh_sgl_desc, sgl_entries[1]), GFP_KERNEL);
  94. if (!sgl) {
  95. kfree(acl);
  96. return -ENOMEM;
  97. }
  98. acl->n_acl_entries = 2;
  99. acl->acl_entries[0].vmid = (u16)self;
  100. acl->acl_entries[0].perms = GH_RM_ACL_R | GH_RM_ACL_W;
  101. acl->acl_entries[1].vmid = (u16)peer;
  102. acl->acl_entries[1].perms = GH_RM_ACL_R | GH_RM_ACL_W;
  103. sgl->n_sgl_entries = 1;
  104. sgl->sgl_entries[0].ipa_base = gpnd->res.start;
  105. sgl->sgl_entries[0].size = resource_size(&gpnd->res);
  106. ret = ghd_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, gpnd->label,
  107. acl, sgl, NULL, &gpnd->memparcel);
  108. if (ret) {
  109. dev_err(gpnd->dev, "Gunyah mem share addr=%x size=%u failed: %d\n",
  110. gpnd->res.start, gpnd->size, ret);
  111. /* Attempt to give resource back to HLOS */
  112. assign_mem_ret = qcom_scm_assign_mem(gpnd->res.start, gpnd->size, &dst_vmid,
  113. src_vmlist, ARRAY_SIZE(src_vmlist));
  114. if (assign_mem_ret) {
  115. dev_err(gpnd->dev, "qcom_scm_assign_mem addr=%x size=%u failed: %d\n",
  116. gpnd->res.start, gpnd->size, ret);
  117. }
  118. }
  119. kfree(acl);
  120. kfree(sgl);
  121. return ret;
  122. }
  123. static void gh_panic_notifier_unshare_mem(gh_vmid_t self, gh_vmid_t peer)
  124. {
  125. struct qcom_scm_vmperm dst_vmlist[] = {{self,
  126. PERM_READ | PERM_WRITE | PERM_EXEC}};
  127. u64 src_vmid = BIT(self) | BIT(peer);
  128. int ret;
  129. ret = ghd_rm_mem_reclaim(gpnd->memparcel, 0);
  130. if (ret)
  131. dev_err(gpnd->dev, "Gunyah mem reclaim failed: %d\n", ret);
  132. ret = qcom_scm_assign_mem(gpnd->res.start, resource_size(&gpnd->res),
  133. &src_vmid, dst_vmlist, ARRAY_SIZE(dst_vmlist));
  134. if (ret) {
  135. dev_err(gpnd->dev, "unshare mem assign call failed with %d\n",
  136. ret);
  137. } else {
  138. dma_free_coherent(gpnd->dev, gpnd->size, gpnd->base,
  139. phys_to_dma(gpnd->dev, gpnd->res.start));
  140. }
  141. }
  142. static int gh_panic_notifier_vm_cb(struct notifier_block *nb, unsigned long cmd,
  143. void *data)
  144. {
  145. dma_addr_t dma_handle;
  146. gh_vmid_t *notify_vmid;
  147. gh_vmid_t peer_vmid;
  148. gh_vmid_t self_vmid;
  149. bool *handle_done;
  150. if (cmd != GH_VM_BEFORE_POWERUP && cmd != GH_VM_EARLY_POWEROFF)
  151. return NOTIFY_DONE;
  152. notify_vmid = data;
  153. if (ghd_rm_get_vmid(gpnd->peer_name, &peer_vmid))
  154. return NOTIFY_DONE;
  155. if (ghd_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
  156. return NOTIFY_DONE;
  157. if (peer_vmid != *notify_vmid)
  158. return NOTIFY_DONE;
  159. if (cmd == GH_VM_BEFORE_POWERUP) {
  160. gpnd->base = dma_alloc_coherent(gpnd->dev, gpnd->size, &dma_handle, GFP_KERNEL);
  161. if (!gpnd->base)
  162. return NOTIFY_DONE;
  163. gpnd->res.start = dma_to_phys(gpnd->dev, dma_handle);
  164. gpnd->res.end = gpnd->res.start + gpnd->size - 1;
  165. handle_done = gpnd->base;
  166. *handle_done = false;
  167. if (gh_panic_notifier_share_mem(self_vmid, peer_vmid)) {
  168. dev_err(gpnd->dev, "Failed to share memory\n");
  169. return NOTIFY_DONE;
  170. }
  171. }
  172. if (cmd == GH_VM_EARLY_POWEROFF)
  173. gh_panic_notifier_unshare_mem(self_vmid, peer_vmid);
  174. return NOTIFY_DONE;
  175. }
  176. static int set_irqchip_state(struct irq_desc *desc, unsigned int irq,
  177. enum irqchip_irq_state which, bool val)
  178. {
  179. struct irq_data *data;
  180. struct irq_chip *chip;
  181. int ret = -EINVAL;
  182. if (!desc)
  183. return ret;
  184. data = irq_desc_get_irq_data(desc);
  185. do {
  186. chip = irq_data_get_irq_chip(data);
  187. if (WARN_ON_ONCE(!chip))
  188. return -ENODEV;
  189. if (chip->irq_set_irqchip_state)
  190. break;
  191. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  192. data = data->parent_data;
  193. #else
  194. data = NULL;
  195. #endif
  196. } while (data);
  197. if (data)
  198. ret = chip->irq_set_irqchip_state(data, which, val);
  199. return ret;
  200. }
  201. static int get_irqchip_state(struct irq_desc *desc,
  202. enum irqchip_irq_state which, bool *state)
  203. {
  204. struct irq_data *data;
  205. struct irq_chip *chip;
  206. int ret = -EINVAL;
  207. if (!desc)
  208. return ret;
  209. data = irq_desc_get_irq_data(desc);
  210. do {
  211. chip = irq_data_get_irq_chip(data);
  212. if (WARN_ON_ONCE(!chip))
  213. return -ENODEV;
  214. if (chip->irq_get_irqchip_state)
  215. break;
  216. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  217. data = data->parent_data;
  218. #else
  219. data = NULL;
  220. #endif
  221. } while (data);
  222. if (data)
  223. ret = chip->irq_get_irqchip_state(data, which, state);
  224. return ret;
  225. }
  226. static void clear_pending_irq(void)
  227. {
  228. bool state;
  229. unsigned int i;
  230. struct irq_desc *desc;
  231. for_each_irq_desc(i, desc) {
  232. struct irq_chip *chip;
  233. int ret;
  234. chip = irq_desc_get_chip(desc);
  235. if (!chip)
  236. continue;
  237. ret = get_irqchip_state(desc, IRQCHIP_STATE_PENDING, &state);
  238. if (!ret && state) {
  239. /* Clear interrupt pending status */
  240. ret = set_irqchip_state(desc, i, IRQCHIP_STATE_PENDING, false);
  241. if (ret && irqd_irq_inprogress(&desc->irq_data) &&
  242. chip->irq_eoi)
  243. chip->irq_eoi(&desc->irq_data);
  244. if (chip->irq_mask)
  245. chip->irq_mask(&desc->irq_data);
  246. if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
  247. chip->irq_disable(&desc->irq_data);
  248. }
  249. }
  250. }
  251. static int gh_panic_notifier_notify(struct notifier_block *this,
  252. unsigned long event, void *ptr)
  253. {
  254. unsigned int retry_times = 20;
  255. gh_vmid_t peer_vmid;
  256. bool *handle_done;
  257. int ret;
  258. handle_done = gpnd->base;
  259. if (!handle_done)
  260. return NOTIFY_DONE;
  261. ret = ghd_rm_get_vmid(gpnd->peer_name, &peer_vmid);
  262. if (ret)
  263. return NOTIFY_DONE;
  264. gh_panic_notifier_kick();
  265. /*
  266. * When PVM panic, only one cpu can work and disable local irq in PVM.
  267. * if there are interrupts pending, they never can be responsed. And call
  268. * gh_hcall_vcpu_run will return at once and the vcpu can't be scheduled.
  269. * So we should clear the pending interrupts and mask the interrupts before
  270. * call gh_hcall_vcpu_run.
  271. */
  272. do {
  273. clear_pending_irq();
  274. ret = gh_poll_vcpu_run(peer_vmid);
  275. if (ret) {
  276. printk_deferred("Failed poll vcpu run %d\n", ret);
  277. break;
  278. }
  279. retry_times--;
  280. } while (!(*handle_done) && retry_times > 0);
  281. if (!(*handle_done))
  282. printk_deferred("Notify the panic to VM fail\n");
  283. return NOTIFY_DONE;
  284. }
  285. static struct notifier_block gh_panic_blk = {
  286. .notifier_call = gh_panic_notifier_notify,
  287. .priority = 0,
  288. };
  289. static int gh_panic_notifier_svm_mem_map(void)
  290. {
  291. const char *compat = "qcom,gunyah-panic-gen";
  292. struct device_node *np = NULL;
  293. struct device_node *shm_np;
  294. struct resource *res;
  295. u32 label;
  296. int ret;
  297. while ((np = of_find_compatible_node(np, NULL, compat))) {
  298. ret = of_property_read_u32(np, "qcom,label", &label);
  299. if (ret) {
  300. of_node_put(np);
  301. continue;
  302. }
  303. if (label == gpnd->label)
  304. break;
  305. of_node_put(np);
  306. }
  307. if (!np) {
  308. dev_err(gpnd->dev, "can't find the label=%d memory!\n", gpnd->label);
  309. return -ENODEV;
  310. }
  311. shm_np = of_parse_phandle(np, "memory-region", 0);
  312. of_node_put(np);
  313. ret = of_address_to_resource(shm_np, 0, &gpnd->res);
  314. of_node_put(shm_np);
  315. if (ret) {
  316. dev_err(gpnd->dev, "of_address_to_resource failed!\n");
  317. return -EINVAL;
  318. }
  319. gpnd->size = resource_size(&gpnd->res);
  320. res = devm_request_mem_region(gpnd->dev, gpnd->res.start, gpnd->size,
  321. dev_name(gpnd->dev));
  322. if (!res) {
  323. dev_err(gpnd->dev, "request mem region fail\n");
  324. return -ENXIO;
  325. }
  326. gpnd->base = devm_ioremap_wc(gpnd->dev, gpnd->res.start, gpnd->size);
  327. if (!gpnd->base) {
  328. dev_err(gpnd->dev, "ioremap fail\n");
  329. return -ENOMEM;
  330. }
  331. return 0;
  332. }
  333. static int gh_panic_notifier_pvm_mem_probe(void)
  334. {
  335. struct device *dev = gpnd->dev;
  336. u32 size;
  337. int ret;
  338. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
  339. if (ret) {
  340. dev_err(dev, "%s: dma_set_mask_and_coherent failed\n", __func__);
  341. return ret;
  342. }
  343. ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, 0);
  344. if (ret) {
  345. dev_err(dev, "%s: Failed to initialize CMA mem, ret %d\n", __func__, ret);
  346. return ret;
  347. }
  348. ret = of_property_read_u32(gpnd->dev->of_node, "shared-buffer-size", &size);
  349. if (ret) {
  350. dev_err(dev, "%s: Failed to get shared memory size, ret %d\n",
  351. __func__, ret);
  352. return ret;
  353. }
  354. gpnd->size = size;
  355. return 0;
  356. }
  357. static int gh_panic_notifier_probe(struct platform_device *pdev)
  358. {
  359. struct device_node *node = pdev->dev.of_node;
  360. enum gh_dbl_label dbl_label;
  361. struct device *dev;
  362. int ret;
  363. gpnd = devm_kzalloc(&pdev->dev, sizeof(*gpnd), GFP_KERNEL);
  364. if (!gpnd)
  365. return -ENOMEM;
  366. gpnd->dev = &pdev->dev;
  367. platform_set_drvdata(pdev, gpnd);
  368. dev = gpnd->dev;
  369. ret = of_property_read_u32(node, "gunyah-label", &gpnd->label);
  370. if (ret) {
  371. dev_err(dev, "Failed to read label %d\n", ret);
  372. return ret;
  373. }
  374. dbl_label = gpnd->label;
  375. gpnd->primary_vm = of_property_read_bool(node, "qcom,primary-vm");
  376. if (gpnd->primary_vm) {
  377. gh_panic_notifier_pvm_mem_probe();
  378. ret = of_property_read_u32(node, "peer-name", &gpnd->peer_name);
  379. if (ret)
  380. gpnd->peer_name = GH_SELF_VM;
  381. gpnd->tx_dbl = gh_dbl_tx_register(dbl_label);
  382. if (IS_ERR_OR_NULL(gpnd->tx_dbl)) {
  383. ret = PTR_ERR(gpnd->tx_dbl);
  384. dev_err(dev, "%s:Failed to get gunyah tx dbl %d\n", __func__, ret);
  385. return PTR_ERR(gpnd->tx_dbl);
  386. }
  387. gpnd->vm_nb.notifier_call = gh_panic_notifier_vm_cb;
  388. gpnd->vm_nb.priority = INT_MAX;
  389. gh_register_vm_notifier(&gpnd->vm_nb);
  390. atomic_notifier_chain_register(&panic_notifier_list, &gh_panic_blk);
  391. } else {
  392. ret = gh_panic_notifier_svm_mem_map();
  393. if (ret)
  394. return ret;
  395. gpnd->rx_dbl = gh_dbl_rx_register(dbl_label, gh_panic_notify_receiver, NULL);
  396. if (IS_ERR_OR_NULL(gpnd->rx_dbl)) {
  397. ret = PTR_ERR(gpnd->rx_dbl);
  398. dev_err(dev, "%s:Failed to get gunyah rx dbl %d\n", __func__, ret);
  399. return PTR_ERR(gpnd->rx_dbl);
  400. }
  401. gpnd->ws = wakeup_source_register(dev, dev_name(dev));
  402. if (!gpnd->ws) {
  403. dev_err(dev, "%s:Failed to register wakeup source\n", __func__);
  404. gh_dbl_rx_unregister(gpnd->rx_dbl);
  405. return -ENOMEM;
  406. }
  407. }
  408. gh_panic_notifier_initialized = true;
  409. return 0;
  410. }
  411. static int gh_panic_notifier_remove(struct platform_device *pdev)
  412. {
  413. if (gpnd->primary_vm) {
  414. gh_dbl_tx_unregister(gpnd->tx_dbl);
  415. gh_unregister_vm_notifier(&gpnd->vm_nb);
  416. atomic_notifier_chain_unregister(&panic_notifier_list, &gh_panic_blk);
  417. } else {
  418. gh_dbl_rx_unregister(gpnd->rx_dbl);
  419. wakeup_source_unregister(gpnd->ws);
  420. }
  421. gh_panic_notifier_initialized = false;
  422. return 0;
  423. }
  424. static const struct of_device_id gh_panic_notifier_match_table[] = {
  425. { .compatible = "qcom,gh-panic-notifier" },
  426. {}
  427. };
  428. static struct platform_driver gh_panic_notifier_driver = {
  429. .driver = {
  430. .name = "gh_panic_notifier",
  431. .of_match_table = gh_panic_notifier_match_table,
  432. },
  433. .probe = gh_panic_notifier_probe,
  434. .remove = gh_panic_notifier_remove,
  435. };
  436. static int __init gh_panic_notifier_init(void)
  437. {
  438. return platform_driver_register(&gh_panic_notifier_driver);
  439. }
  440. #if IS_ENABLED(CONFIG_ARCH_QTI_VM)
  441. arch_initcall(gh_panic_notifier_init);
  442. #else
  443. module_init(gh_panic_notifier_init);
  444. #endif
  445. static __exit void gh_panic_notifier_exit(void)
  446. {
  447. platform_driver_unregister(&gh_panic_notifier_driver);
  448. }
  449. module_exit(gh_panic_notifier_exit);
  450. MODULE_DESCRIPTION(" Qualcomm Technologies, Inc. Gunyah Panic Notifier Driver");
  451. MODULE_LICENSE("GPL");