qcom_cpucp.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/module.h>
  7. #include <linux/irq.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/irqdomain.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/mailbox_controller.h>
  12. /* CPUCP Register offsets */
  13. #define CPUCP_IPC_CHAN_SUPPORTED 2
  14. #define CPUCP_SEND_IRQ_OFFSET 0xC
  15. #define CPUCP_SEND_IRQ_VAL BIT(28)
  16. #define CPUCP_CLEAR_IRQ_OFFSET 0x308
  17. #define CPUCP_STATUS_IRQ_OFFSET 0x30C
  18. #define CPUCP_CLEAR_IRQ_VAL BIT(3)
  19. #define CPUCP_STATUS_IRQ_VAL BIT(3)
  20. #define CPUCP_CLOCK_DOMAIN_OFFSET 0x1000
  21. /**
  22. * struct cpucp_ipc ipc per channel
  23. * @mbox: mailbox-controller interface
  24. * @chans: The mailbox clients' channel array
  25. * @tx_irq_base: Memory address for sending irq
  26. * @rx_irq_base: Memory address for receiving irq
  27. * @dev: Device associated with this instance
  28. * @irq: CPUCP to HLOS irq
  29. * @num_chan: Number of ipc channels supported
  30. */
  31. struct qcom_cpucp_ipc {
  32. struct mbox_controller mbox;
  33. struct mbox_chan chans[CPUCP_IPC_CHAN_SUPPORTED];
  34. void __iomem *tx_irq_base;
  35. void __iomem *rx_irq_base;
  36. struct device *dev;
  37. int irq;
  38. int num_chan;
  39. };
  40. static irqreturn_t qcom_cpucp_rx_interrupt(int irq, void *p)
  41. {
  42. struct qcom_cpucp_ipc *cpucp_ipc;
  43. u32 val;
  44. int i;
  45. unsigned long flags;
  46. cpucp_ipc = p;
  47. for (i = 0; i < cpucp_ipc->num_chan; i++) {
  48. val = readl(cpucp_ipc->rx_irq_base +
  49. CPUCP_STATUS_IRQ_OFFSET + (i * CPUCP_CLOCK_DOMAIN_OFFSET));
  50. if (val & CPUCP_STATUS_IRQ_VAL) {
  51. val = CPUCP_CLEAR_IRQ_VAL;
  52. writel(val, cpucp_ipc->rx_irq_base +
  53. CPUCP_CLEAR_IRQ_OFFSET +
  54. (i * CPUCP_CLOCK_DOMAIN_OFFSET));
  55. /* Make sure reg write is complete before proceeding */
  56. mb();
  57. spin_lock_irqsave(&cpucp_ipc->chans[i].lock, flags);
  58. if (cpucp_ipc->chans[i].con_priv)
  59. mbox_chan_received_data(&cpucp_ipc->chans[i]
  60. , NULL);
  61. spin_unlock_irqrestore(&cpucp_ipc->chans[i].lock, flags);
  62. }
  63. }
  64. return IRQ_HANDLED;
  65. }
  66. static void qcom_cpucp_mbox_shutdown(struct mbox_chan *chan)
  67. {
  68. unsigned long flags;
  69. spin_lock_irqsave(&chan->lock, flags);
  70. chan->con_priv = NULL;
  71. spin_unlock_irqrestore(&chan->lock, flags);
  72. }
  73. static int qcom_cpucp_mbox_send_data(struct mbox_chan *chan, void *data)
  74. {
  75. struct qcom_cpucp_ipc *cpucp_ipc = container_of(chan->mbox,
  76. struct qcom_cpucp_ipc, mbox);
  77. writel(CPUCP_SEND_IRQ_VAL,
  78. cpucp_ipc->tx_irq_base + CPUCP_SEND_IRQ_OFFSET);
  79. return 0;
  80. }
  81. static struct mbox_chan *qcom_cpucp_mbox_xlate(struct mbox_controller *mbox,
  82. const struct of_phandle_args *sp)
  83. {
  84. struct qcom_cpucp_ipc *cpucp_ipc = container_of(mbox,
  85. struct qcom_cpucp_ipc, mbox);
  86. unsigned long ind = sp->args[0];
  87. if (sp->args_count != 1)
  88. return ERR_PTR(-EINVAL);
  89. if (ind >= mbox->num_chans)
  90. return ERR_PTR(-EINVAL);
  91. if (mbox->chans[ind].con_priv)
  92. return ERR_PTR(-EBUSY);
  93. mbox->chans[ind].con_priv = cpucp_ipc;
  94. return &mbox->chans[ind];
  95. }
  96. static const struct mbox_chan_ops cpucp_mbox_chan_ops = {
  97. .send_data = qcom_cpucp_mbox_send_data,
  98. .shutdown = qcom_cpucp_mbox_shutdown
  99. };
  100. static int qcom_cpucp_ipc_setup_mbox(struct qcom_cpucp_ipc *cpucp_ipc)
  101. {
  102. struct mbox_controller *mbox;
  103. struct device *dev = cpucp_ipc->dev;
  104. unsigned long i;
  105. /* Initialize channel identifiers */
  106. for (i = 0; i < ARRAY_SIZE(cpucp_ipc->chans); i++)
  107. cpucp_ipc->chans[i].con_priv = NULL;
  108. mbox = &cpucp_ipc->mbox;
  109. mbox->dev = dev;
  110. mbox->num_chans = cpucp_ipc->num_chan;
  111. mbox->chans = cpucp_ipc->chans;
  112. mbox->ops = &cpucp_mbox_chan_ops;
  113. mbox->of_xlate = qcom_cpucp_mbox_xlate;
  114. mbox->txdone_irq = false;
  115. mbox->txdone_poll = false;
  116. return mbox_controller_register(mbox);
  117. }
  118. static int qcom_cpucp_probe(struct platform_device *pdev)
  119. {
  120. struct qcom_cpucp_ipc *cpucp_ipc;
  121. struct resource *res;
  122. int ret;
  123. cpucp_ipc = devm_kzalloc(&pdev->dev, sizeof(*cpucp_ipc), GFP_KERNEL);
  124. if (!cpucp_ipc)
  125. return -ENOMEM;
  126. cpucp_ipc->dev = &pdev->dev;
  127. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  128. if (!res) {
  129. dev_err(&pdev->dev, "Failed to get the device base address\n");
  130. return -ENODEV;
  131. }
  132. cpucp_ipc->tx_irq_base = devm_ioremap(&pdev->dev, res->start,
  133. resource_size(res));
  134. if (!cpucp_ipc->tx_irq_base) {
  135. dev_err(&pdev->dev, "Failed to ioremap cpucp tx irq addr\n");
  136. return -ENOMEM;
  137. }
  138. res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  139. if (!res) {
  140. dev_err(&pdev->dev, "Failed to get the device base address\n");
  141. return -ENODEV;
  142. }
  143. cpucp_ipc->rx_irq_base = devm_ioremap(&pdev->dev, res->start,
  144. resource_size(res));
  145. if (!cpucp_ipc->rx_irq_base) {
  146. dev_err(&pdev->dev, "Failed to ioremap cpucp rx irq addr\n");
  147. return -ENOMEM;
  148. }
  149. cpucp_ipc->irq = platform_get_irq(pdev, 0);
  150. if (cpucp_ipc->irq < 0) {
  151. dev_err(&pdev->dev, "Failed to get the IRQ\n");
  152. return cpucp_ipc->irq;
  153. }
  154. cpucp_ipc->num_chan = CPUCP_IPC_CHAN_SUPPORTED;
  155. ret = qcom_cpucp_ipc_setup_mbox(cpucp_ipc);
  156. if (ret) {
  157. dev_err(&pdev->dev, "Failed to create mailbox\n");
  158. return ret;
  159. }
  160. ret = devm_request_irq(&pdev->dev, cpucp_ipc->irq,
  161. qcom_cpucp_rx_interrupt, IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
  162. "qcom_cpucp", cpucp_ipc);
  163. if (ret < 0) {
  164. dev_err(&pdev->dev, "Failed to register the irq: %d\n", ret);
  165. goto err_mbox;
  166. }
  167. platform_set_drvdata(pdev, cpucp_ipc);
  168. return 0;
  169. err_mbox:
  170. mbox_controller_unregister(&cpucp_ipc->mbox);
  171. return ret;
  172. }
  173. static int qcom_cpucp_remove(struct platform_device *pdev)
  174. {
  175. struct qcom_cpucp_ipc *cpucp_ipc = platform_get_drvdata(pdev);
  176. mbox_controller_unregister(&cpucp_ipc->mbox);
  177. return 0;
  178. }
  179. static const struct of_device_id qcom_cpucp_of_match[] = {
  180. { .compatible = "qcom,cpucp"},
  181. {}
  182. };
  183. MODULE_DEVICE_TABLE(of, qcom_cpucp_of_match);
  184. static struct platform_driver qcom_cpucp_driver = {
  185. .probe = qcom_cpucp_probe,
  186. .remove = qcom_cpucp_remove,
  187. .driver = {
  188. .name = "qcom_cpucp",
  189. .of_match_table = qcom_cpucp_of_match,
  190. .suppress_bind_attrs = true,
  191. },
  192. };
  193. module_platform_driver(qcom_cpucp_driver);
  194. MODULE_DESCRIPTION("QTI CPUCP Driver");
  195. MODULE_LICENSE("GPL");