sst-ipc.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Intel SST generic IPC Support
  4. *
  5. * Copyright (C) 2015, Intel Corporation. All rights reserved.
  6. */
  7. #include <linux/types.h>
  8. #include <linux/kernel.h>
  9. #include <linux/list.h>
  10. #include <linux/wait.h>
  11. #include <linux/module.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/device.h>
  14. #include <linux/slab.h>
  15. #include <linux/workqueue.h>
  16. #include <linux/sched.h>
  17. #include <linux/delay.h>
  18. #include <linux/platform_device.h>
  19. #include <sound/asound.h>
  20. #include "sst-dsp.h"
  21. #include "sst-dsp-priv.h"
  22. #include "sst-ipc.h"
  23. /* IPC message timeout (msecs) */
  24. #define IPC_TIMEOUT_MSECS 300
  25. #define IPC_EMPTY_LIST_SIZE 8
  26. /* locks held by caller */
  27. static struct ipc_message *msg_get_empty(struct sst_generic_ipc *ipc)
  28. {
  29. struct ipc_message *msg = NULL;
  30. if (!list_empty(&ipc->empty_list)) {
  31. msg = list_first_entry(&ipc->empty_list, struct ipc_message,
  32. list);
  33. list_del(&msg->list);
  34. }
  35. return msg;
  36. }
  37. static int tx_wait_done(struct sst_generic_ipc *ipc,
  38. struct ipc_message *msg, struct sst_ipc_message *reply)
  39. {
  40. unsigned long flags;
  41. int ret;
  42. /* wait for DSP completion (in all cases atm inc pending) */
  43. ret = wait_event_timeout(msg->waitq, msg->complete,
  44. msecs_to_jiffies(IPC_TIMEOUT_MSECS));
  45. spin_lock_irqsave(&ipc->dsp->spinlock, flags);
  46. if (ret == 0) {
  47. if (ipc->ops.shim_dbg != NULL)
  48. ipc->ops.shim_dbg(ipc, "message timeout");
  49. list_del(&msg->list);
  50. ret = -ETIMEDOUT;
  51. } else {
  52. /* copy the data returned from DSP */
  53. if (reply) {
  54. reply->header = msg->rx.header;
  55. if (reply->data)
  56. memcpy(reply->data, msg->rx.data, msg->rx.size);
  57. }
  58. ret = msg->errno;
  59. }
  60. list_add_tail(&msg->list, &ipc->empty_list);
  61. spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
  62. return ret;
  63. }
  64. static int ipc_tx_message(struct sst_generic_ipc *ipc,
  65. struct sst_ipc_message request,
  66. struct sst_ipc_message *reply, int wait)
  67. {
  68. struct ipc_message *msg;
  69. unsigned long flags;
  70. spin_lock_irqsave(&ipc->dsp->spinlock, flags);
  71. msg = msg_get_empty(ipc);
  72. if (msg == NULL) {
  73. spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
  74. return -EBUSY;
  75. }
  76. msg->tx.header = request.header;
  77. msg->tx.size = request.size;
  78. msg->rx.header = 0;
  79. msg->rx.size = reply ? reply->size : 0;
  80. msg->wait = wait;
  81. msg->errno = 0;
  82. msg->pending = false;
  83. msg->complete = false;
  84. if ((request.size) && (ipc->ops.tx_data_copy != NULL))
  85. ipc->ops.tx_data_copy(msg, request.data, request.size);
  86. list_add_tail(&msg->list, &ipc->tx_list);
  87. schedule_work(&ipc->kwork);
  88. spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
  89. if (wait)
  90. return tx_wait_done(ipc, msg, reply);
  91. else
  92. return 0;
  93. }
  94. static int msg_empty_list_init(struct sst_generic_ipc *ipc)
  95. {
  96. int i;
  97. ipc->msg = kcalloc(IPC_EMPTY_LIST_SIZE, sizeof(struct ipc_message),
  98. GFP_KERNEL);
  99. if (ipc->msg == NULL)
  100. return -ENOMEM;
  101. for (i = 0; i < IPC_EMPTY_LIST_SIZE; i++) {
  102. ipc->msg[i].tx.data = kzalloc(ipc->tx_data_max_size, GFP_KERNEL);
  103. if (ipc->msg[i].tx.data == NULL)
  104. goto free_mem;
  105. ipc->msg[i].rx.data = kzalloc(ipc->rx_data_max_size, GFP_KERNEL);
  106. if (ipc->msg[i].rx.data == NULL) {
  107. kfree(ipc->msg[i].tx.data);
  108. goto free_mem;
  109. }
  110. init_waitqueue_head(&ipc->msg[i].waitq);
  111. list_add(&ipc->msg[i].list, &ipc->empty_list);
  112. }
  113. return 0;
  114. free_mem:
  115. while (i > 0) {
  116. kfree(ipc->msg[i-1].tx.data);
  117. kfree(ipc->msg[i-1].rx.data);
  118. --i;
  119. }
  120. kfree(ipc->msg);
  121. return -ENOMEM;
  122. }
  123. static void ipc_tx_msgs(struct work_struct *work)
  124. {
  125. struct sst_generic_ipc *ipc =
  126. container_of(work, struct sst_generic_ipc, kwork);
  127. struct ipc_message *msg;
  128. spin_lock_irq(&ipc->dsp->spinlock);
  129. while (!list_empty(&ipc->tx_list) && !ipc->pending) {
  130. /* if the DSP is busy, we will TX messages after IRQ.
  131. * also postpone if we are in the middle of processing
  132. * completion irq
  133. */
  134. if (ipc->ops.is_dsp_busy && ipc->ops.is_dsp_busy(ipc->dsp)) {
  135. dev_dbg(ipc->dev, "ipc_tx_msgs dsp busy\n");
  136. break;
  137. }
  138. msg = list_first_entry(&ipc->tx_list, struct ipc_message, list);
  139. list_move(&msg->list, &ipc->rx_list);
  140. if (ipc->ops.tx_msg != NULL)
  141. ipc->ops.tx_msg(ipc, msg);
  142. }
  143. spin_unlock_irq(&ipc->dsp->spinlock);
  144. }
  145. int sst_ipc_tx_message_wait(struct sst_generic_ipc *ipc,
  146. struct sst_ipc_message request, struct sst_ipc_message *reply)
  147. {
  148. int ret;
  149. /*
  150. * DSP maybe in lower power active state, so
  151. * check if the DSP supports DSP lp On method
  152. * if so invoke that before sending IPC
  153. */
  154. if (ipc->ops.check_dsp_lp_on)
  155. if (ipc->ops.check_dsp_lp_on(ipc->dsp, true))
  156. return -EIO;
  157. ret = ipc_tx_message(ipc, request, reply, 1);
  158. if (ipc->ops.check_dsp_lp_on)
  159. if (ipc->ops.check_dsp_lp_on(ipc->dsp, false))
  160. return -EIO;
  161. return ret;
  162. }
  163. EXPORT_SYMBOL_GPL(sst_ipc_tx_message_wait);
  164. int sst_ipc_tx_message_nowait(struct sst_generic_ipc *ipc,
  165. struct sst_ipc_message request)
  166. {
  167. return ipc_tx_message(ipc, request, NULL, 0);
  168. }
  169. EXPORT_SYMBOL_GPL(sst_ipc_tx_message_nowait);
  170. int sst_ipc_tx_message_nopm(struct sst_generic_ipc *ipc,
  171. struct sst_ipc_message request, struct sst_ipc_message *reply)
  172. {
  173. return ipc_tx_message(ipc, request, reply, 1);
  174. }
  175. EXPORT_SYMBOL_GPL(sst_ipc_tx_message_nopm);
  176. struct ipc_message *sst_ipc_reply_find_msg(struct sst_generic_ipc *ipc,
  177. u64 header)
  178. {
  179. struct ipc_message *msg;
  180. u64 mask;
  181. if (ipc->ops.reply_msg_match != NULL)
  182. header = ipc->ops.reply_msg_match(header, &mask);
  183. else
  184. mask = (u64)-1;
  185. if (list_empty(&ipc->rx_list)) {
  186. dev_err(ipc->dev, "error: rx list empty but received 0x%llx\n",
  187. header);
  188. return NULL;
  189. }
  190. list_for_each_entry(msg, &ipc->rx_list, list) {
  191. if ((msg->tx.header & mask) == header)
  192. return msg;
  193. }
  194. return NULL;
  195. }
  196. EXPORT_SYMBOL_GPL(sst_ipc_reply_find_msg);
  197. /* locks held by caller */
  198. void sst_ipc_tx_msg_reply_complete(struct sst_generic_ipc *ipc,
  199. struct ipc_message *msg)
  200. {
  201. msg->complete = true;
  202. if (!msg->wait)
  203. list_add_tail(&msg->list, &ipc->empty_list);
  204. else
  205. wake_up(&msg->waitq);
  206. }
  207. EXPORT_SYMBOL_GPL(sst_ipc_tx_msg_reply_complete);
  208. int sst_ipc_init(struct sst_generic_ipc *ipc)
  209. {
  210. int ret;
  211. INIT_LIST_HEAD(&ipc->tx_list);
  212. INIT_LIST_HEAD(&ipc->rx_list);
  213. INIT_LIST_HEAD(&ipc->empty_list);
  214. init_waitqueue_head(&ipc->wait_txq);
  215. ret = msg_empty_list_init(ipc);
  216. if (ret < 0)
  217. return -ENOMEM;
  218. INIT_WORK(&ipc->kwork, ipc_tx_msgs);
  219. return 0;
  220. }
  221. EXPORT_SYMBOL_GPL(sst_ipc_init);
  222. void sst_ipc_fini(struct sst_generic_ipc *ipc)
  223. {
  224. int i;
  225. cancel_work_sync(&ipc->kwork);
  226. if (ipc->msg) {
  227. for (i = 0; i < IPC_EMPTY_LIST_SIZE; i++) {
  228. kfree(ipc->msg[i].tx.data);
  229. kfree(ipc->msg[i].rx.data);
  230. }
  231. kfree(ipc->msg);
  232. }
  233. }
  234. EXPORT_SYMBOL_GPL(sst_ipc_fini);
  235. /* Module information */
  236. MODULE_AUTHOR("Jin Yao");
  237. MODULE_DESCRIPTION("Intel SST IPC generic");
  238. MODULE_LICENSE("GPL v2");