gunyah-msgq.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/mailbox_controller.h>
  6. #include <linux/module.h>
  7. #include <linux/interrupt.h>
  8. #include <linux/gunyah.h>
  9. #include <linux/printk.h>
  10. #include <linux/init.h>
  11. #include <linux/slab.h>
  12. #include <linux/wait.h>
  13. #define mbox_chan_to_msgq(chan) (container_of(chan->mbox, struct gh_msgq, mbox))
  14. static irqreturn_t gh_msgq_rx_irq_handler(int irq, void *data)
  15. {
  16. struct gh_msgq *msgq = data;
  17. struct gh_msgq_rx_data rx_data;
  18. enum gh_error gh_error;
  19. bool ready = true;
  20. while (ready) {
  21. gh_error = gh_hypercall_msgq_recv(msgq->rx_ghrsc->capid,
  22. &rx_data.data, sizeof(rx_data.data),
  23. &rx_data.length, &ready);
  24. if (gh_error != GH_ERROR_OK) {
  25. if (gh_error != GH_ERROR_MSGQUEUE_EMPTY)
  26. dev_warn(msgq->mbox.dev, "Failed to receive data: %d\n", gh_error);
  27. break;
  28. }
  29. if (likely(gh_msgq_chan(msgq)->cl))
  30. mbox_chan_received_data(gh_msgq_chan(msgq), &rx_data);
  31. }
  32. return IRQ_HANDLED;
  33. }
  34. /* Fired when message queue transitions from "full" to "space available" to send messages */
  35. static irqreturn_t gh_msgq_tx_irq_handler(int irq, void *data)
  36. {
  37. struct gh_msgq *msgq = data;
  38. mbox_chan_txdone(gh_msgq_chan(msgq), 0);
  39. return IRQ_HANDLED;
  40. }
  41. /* Fired after sending message and hypercall told us there was more space available. */
  42. static void gh_msgq_txdone_tasklet(struct tasklet_struct *tasklet)
  43. {
  44. struct gh_msgq *msgq = container_of(tasklet, struct gh_msgq, txdone_tasklet);
  45. mbox_chan_txdone(gh_msgq_chan(msgq), msgq->last_ret);
  46. }
  47. static int gh_msgq_send_data(struct mbox_chan *chan, void *data)
  48. {
  49. struct gh_msgq *msgq = mbox_chan_to_msgq(chan);
  50. struct gh_msgq_tx_data *msgq_data = data;
  51. u64 tx_flags = 0;
  52. enum gh_error gh_error;
  53. bool ready;
  54. if (!msgq->tx_ghrsc)
  55. return -EOPNOTSUPP;
  56. if (msgq_data->push)
  57. tx_flags |= GH_HYPERCALL_MSGQ_TX_FLAGS_PUSH;
  58. gh_error = gh_hypercall_msgq_send(msgq->tx_ghrsc->capid, msgq_data->length, msgq_data->data,
  59. tx_flags, &ready);
  60. /**
  61. * unlikely because Linux tracks state of msgq and should not try to
  62. * send message when msgq is full.
  63. */
  64. if (unlikely(gh_error == GH_ERROR_MSGQUEUE_FULL))
  65. return -EAGAIN;
  66. /**
  67. * Propagate all other errors to client. If we return error to mailbox
  68. * framework, then no other messages can be sent and nobody will know
  69. * to retry this message.
  70. */
  71. msgq->last_ret = gh_error_remap(gh_error);
  72. /**
  73. * This message was successfully sent, but message queue isn't ready to
  74. * accept more messages because it's now full. Mailbox framework
  75. * requires that we only report that message was transmitted when
  76. * we're ready to transmit another message. We'll get that in the form
  77. * of tx IRQ once the other side starts to drain the msgq.
  78. */
  79. if (gh_error == GH_ERROR_OK) {
  80. if (!ready)
  81. return 0;
  82. } else {
  83. dev_err(msgq->mbox.dev, "Failed to send data: %d (%d)\n", gh_error, msgq->last_ret);
  84. }
  85. /**
  86. * We can send more messages. Mailbox framework requires that tx done
  87. * happens asynchronously to sending the message. Gunyah message queues
  88. * tell us right away on the hypercall return whether we can send more
  89. * messages. To work around this, defer the txdone to a tasklet.
  90. */
  91. tasklet_schedule(&msgq->txdone_tasklet);
  92. return 0;
  93. }
  94. static struct mbox_chan_ops gh_msgq_ops = {
  95. .send_data = gh_msgq_send_data,
  96. };
  97. /**
  98. * gh_msgq_init() - Initialize a Gunyah message queue with an mbox_client
  99. * @parent: device parent used for the mailbox controller
  100. * @msgq: Pointer to the gh_msgq to initialize
  101. * @cl: A mailbox client to bind to the mailbox channel that the message queue creates
  102. * @tx_ghrsc: optional, the transmission side of the message queue
  103. * @rx_ghrsc: optional, the receiving side of the message queue
  104. *
  105. * At least one of tx_ghrsc and rx_ghrsc must be not NULL. Most message queue use cases come with
  106. * a pair of message queues to facilitate bidirectional communication. When tx_ghrsc is set,
  107. * the client can send messages with mbox_send_message(gh_msgq_chan(msgq), msg). When rx_ghrsc
  108. * is set, the mbox_client must register an .rx_callback() and the message queue driver will
  109. * deliver all available messages upon receiving the RX ready interrupt. The messages should be
  110. * consumed or copied by the client right away as the gh_msgq_rx_data will be replaced/destroyed
  111. * after the callback.
  112. *
  113. * Returns - 0 on success, negative otherwise
  114. */
  115. int gh_msgq_init(struct device *parent, struct gh_msgq *msgq, struct mbox_client *cl,
  116. struct gh_resource *tx_ghrsc, struct gh_resource *rx_ghrsc)
  117. {
  118. int ret;
  119. /* Must have at least a tx_ghrsc or rx_ghrsc and that they are the right device types */
  120. if ((!tx_ghrsc && !rx_ghrsc) ||
  121. (tx_ghrsc && tx_ghrsc->type != GH_RESOURCE_TYPE_MSGQ_TX) ||
  122. (rx_ghrsc && rx_ghrsc->type != GH_RESOURCE_TYPE_MSGQ_RX))
  123. return -EINVAL;
  124. msgq->mbox.dev = parent;
  125. msgq->mbox.ops = &gh_msgq_ops;
  126. msgq->mbox.num_chans = 1;
  127. msgq->mbox.txdone_irq = true;
  128. msgq->mbox.chans = &msgq->mbox_chan;
  129. ret = mbox_controller_register(&msgq->mbox);
  130. if (ret)
  131. return ret;
  132. ret = mbox_bind_client(gh_msgq_chan(msgq), cl);
  133. if (ret)
  134. goto err_mbox;
  135. if (tx_ghrsc) {
  136. msgq->tx_ghrsc = tx_ghrsc;
  137. ret = request_irq(msgq->tx_ghrsc->irq, gh_msgq_tx_irq_handler, 0, "gh_msgq_tx",
  138. msgq);
  139. if (ret)
  140. goto err_tx_ghrsc;
  141. enable_irq_wake(msgq->tx_ghrsc->irq);
  142. tasklet_setup(&msgq->txdone_tasklet, gh_msgq_txdone_tasklet);
  143. }
  144. if (rx_ghrsc) {
  145. msgq->rx_ghrsc = rx_ghrsc;
  146. ret = request_threaded_irq(msgq->rx_ghrsc->irq, NULL, gh_msgq_rx_irq_handler,
  147. IRQF_ONESHOT, "gh_msgq_rx", msgq);
  148. if (ret)
  149. goto err_tx_irq;
  150. enable_irq_wake(msgq->rx_ghrsc->irq);
  151. }
  152. return 0;
  153. err_tx_irq:
  154. if (msgq->tx_ghrsc)
  155. free_irq(msgq->tx_ghrsc->irq, msgq);
  156. msgq->rx_ghrsc = NULL;
  157. err_tx_ghrsc:
  158. msgq->tx_ghrsc = NULL;
  159. err_mbox:
  160. mbox_controller_unregister(&msgq->mbox);
  161. return ret;
  162. }
  163. EXPORT_SYMBOL_GPL(gh_msgq_init);
  164. void gh_msgq_remove(struct gh_msgq *msgq)
  165. {
  166. mbox_free_channel(gh_msgq_chan(msgq));
  167. if (msgq->rx_ghrsc)
  168. free_irq(msgq->rx_ghrsc->irq, msgq);
  169. if (msgq->tx_ghrsc) {
  170. tasklet_kill(&msgq->txdone_tasklet);
  171. free_irq(msgq->tx_ghrsc->irq, msgq);
  172. }
  173. mbox_controller_unregister(&msgq->mbox);
  174. msgq->rx_ghrsc = NULL;
  175. msgq->tx_ghrsc = NULL;
  176. }
  177. EXPORT_SYMBOL_GPL(gh_msgq_remove);
  178. MODULE_LICENSE("GPL");
  179. MODULE_DESCRIPTION("Gunyah Message Queue Driver");