skl-sst-dsp.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * skl-sst-dsp.c - SKL SST library generic function
  4. *
  5. * Copyright (C) 2014-15, Intel Corporation.
  6. * Author:Rafal Redzimski <[email protected]>
  7. * Jeeja KP <[email protected]>
  8. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  9. */
  10. #include <sound/pcm.h>
  11. #include "../common/sst-dsp.h"
  12. #include "../common/sst-ipc.h"
  13. #include "../common/sst-dsp-priv.h"
  14. #include "skl.h"
  15. /* various timeout values */
  16. #define SKL_DSP_PU_TO 50
  17. #define SKL_DSP_PD_TO 50
  18. #define SKL_DSP_RESET_TO 50
  19. void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state)
  20. {
  21. mutex_lock(&ctx->mutex);
  22. ctx->sst_state = state;
  23. mutex_unlock(&ctx->mutex);
  24. }
  25. /*
  26. * Initialize core power state and usage count. To be called after
  27. * successful first boot. Hence core 0 will be running and other cores
  28. * will be reset
  29. */
  30. void skl_dsp_init_core_state(struct sst_dsp *ctx)
  31. {
  32. struct skl_dev *skl = ctx->thread_context;
  33. int i;
  34. skl->cores.state[SKL_DSP_CORE0_ID] = SKL_DSP_RUNNING;
  35. skl->cores.usage_count[SKL_DSP_CORE0_ID] = 1;
  36. for (i = SKL_DSP_CORE0_ID + 1; i < skl->cores.count; i++) {
  37. skl->cores.state[i] = SKL_DSP_RESET;
  38. skl->cores.usage_count[i] = 0;
  39. }
  40. }
  41. /* Get the mask for all enabled cores */
  42. unsigned int skl_dsp_get_enabled_cores(struct sst_dsp *ctx)
  43. {
  44. struct skl_dev *skl = ctx->thread_context;
  45. unsigned int core_mask, en_cores_mask;
  46. u32 val;
  47. core_mask = SKL_DSP_CORES_MASK(skl->cores.count);
  48. val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS);
  49. /* Cores having CPA bit set */
  50. en_cores_mask = (val & SKL_ADSPCS_CPA_MASK(core_mask)) >>
  51. SKL_ADSPCS_CPA_SHIFT;
  52. /* And cores having CRST bit cleared */
  53. en_cores_mask &= (~val & SKL_ADSPCS_CRST_MASK(core_mask)) >>
  54. SKL_ADSPCS_CRST_SHIFT;
  55. /* And cores having CSTALL bit cleared */
  56. en_cores_mask &= (~val & SKL_ADSPCS_CSTALL_MASK(core_mask)) >>
  57. SKL_ADSPCS_CSTALL_SHIFT;
  58. en_cores_mask &= core_mask;
  59. dev_dbg(ctx->dev, "DSP enabled cores mask = %x\n", en_cores_mask);
  60. return en_cores_mask;
  61. }
  62. static int
  63. skl_dsp_core_set_reset_state(struct sst_dsp *ctx, unsigned int core_mask)
  64. {
  65. int ret;
  66. /* update bits */
  67. sst_dsp_shim_update_bits_unlocked(ctx,
  68. SKL_ADSP_REG_ADSPCS, SKL_ADSPCS_CRST_MASK(core_mask),
  69. SKL_ADSPCS_CRST_MASK(core_mask));
  70. /* poll with timeout to check if operation successful */
  71. ret = sst_dsp_register_poll(ctx,
  72. SKL_ADSP_REG_ADSPCS,
  73. SKL_ADSPCS_CRST_MASK(core_mask),
  74. SKL_ADSPCS_CRST_MASK(core_mask),
  75. SKL_DSP_RESET_TO,
  76. "Set reset");
  77. if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
  78. SKL_ADSPCS_CRST_MASK(core_mask)) !=
  79. SKL_ADSPCS_CRST_MASK(core_mask)) {
  80. dev_err(ctx->dev, "Set reset state failed: core_mask %x\n",
  81. core_mask);
  82. ret = -EIO;
  83. }
  84. return ret;
  85. }
  86. int skl_dsp_core_unset_reset_state(
  87. struct sst_dsp *ctx, unsigned int core_mask)
  88. {
  89. int ret;
  90. dev_dbg(ctx->dev, "In %s\n", __func__);
  91. /* update bits */
  92. sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
  93. SKL_ADSPCS_CRST_MASK(core_mask), 0);
  94. /* poll with timeout to check if operation successful */
  95. ret = sst_dsp_register_poll(ctx,
  96. SKL_ADSP_REG_ADSPCS,
  97. SKL_ADSPCS_CRST_MASK(core_mask),
  98. 0,
  99. SKL_DSP_RESET_TO,
  100. "Unset reset");
  101. if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
  102. SKL_ADSPCS_CRST_MASK(core_mask)) != 0) {
  103. dev_err(ctx->dev, "Unset reset state failed: core_mask %x\n",
  104. core_mask);
  105. ret = -EIO;
  106. }
  107. return ret;
  108. }
  109. static bool
  110. is_skl_dsp_core_enable(struct sst_dsp *ctx, unsigned int core_mask)
  111. {
  112. int val;
  113. bool is_enable;
  114. val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS);
  115. is_enable = ((val & SKL_ADSPCS_CPA_MASK(core_mask)) &&
  116. (val & SKL_ADSPCS_SPA_MASK(core_mask)) &&
  117. !(val & SKL_ADSPCS_CRST_MASK(core_mask)) &&
  118. !(val & SKL_ADSPCS_CSTALL_MASK(core_mask)));
  119. dev_dbg(ctx->dev, "DSP core(s) enabled? %d : core_mask %x\n",
  120. is_enable, core_mask);
  121. return is_enable;
  122. }
  123. static int skl_dsp_reset_core(struct sst_dsp *ctx, unsigned int core_mask)
  124. {
  125. /* stall core */
  126. sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
  127. SKL_ADSPCS_CSTALL_MASK(core_mask),
  128. SKL_ADSPCS_CSTALL_MASK(core_mask));
  129. /* set reset state */
  130. return skl_dsp_core_set_reset_state(ctx, core_mask);
  131. }
  132. int skl_dsp_start_core(struct sst_dsp *ctx, unsigned int core_mask)
  133. {
  134. int ret;
  135. /* unset reset state */
  136. ret = skl_dsp_core_unset_reset_state(ctx, core_mask);
  137. if (ret < 0)
  138. return ret;
  139. /* run core */
  140. dev_dbg(ctx->dev, "unstall/run core: core_mask = %x\n", core_mask);
  141. sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
  142. SKL_ADSPCS_CSTALL_MASK(core_mask), 0);
  143. if (!is_skl_dsp_core_enable(ctx, core_mask)) {
  144. skl_dsp_reset_core(ctx, core_mask);
  145. dev_err(ctx->dev, "DSP start core failed: core_mask %x\n",
  146. core_mask);
  147. ret = -EIO;
  148. }
  149. return ret;
  150. }
  151. int skl_dsp_core_power_up(struct sst_dsp *ctx, unsigned int core_mask)
  152. {
  153. int ret;
  154. /* update bits */
  155. sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
  156. SKL_ADSPCS_SPA_MASK(core_mask),
  157. SKL_ADSPCS_SPA_MASK(core_mask));
  158. /* poll with timeout to check if operation successful */
  159. ret = sst_dsp_register_poll(ctx,
  160. SKL_ADSP_REG_ADSPCS,
  161. SKL_ADSPCS_CPA_MASK(core_mask),
  162. SKL_ADSPCS_CPA_MASK(core_mask),
  163. SKL_DSP_PU_TO,
  164. "Power up");
  165. if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
  166. SKL_ADSPCS_CPA_MASK(core_mask)) !=
  167. SKL_ADSPCS_CPA_MASK(core_mask)) {
  168. dev_err(ctx->dev, "DSP core power up failed: core_mask %x\n",
  169. core_mask);
  170. ret = -EIO;
  171. }
  172. return ret;
  173. }
  174. int skl_dsp_core_power_down(struct sst_dsp *ctx, unsigned int core_mask)
  175. {
  176. /* update bits */
  177. sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
  178. SKL_ADSPCS_SPA_MASK(core_mask), 0);
  179. /* poll with timeout to check if operation successful */
  180. return sst_dsp_register_poll(ctx,
  181. SKL_ADSP_REG_ADSPCS,
  182. SKL_ADSPCS_CPA_MASK(core_mask),
  183. 0,
  184. SKL_DSP_PD_TO,
  185. "Power down");
  186. }
  187. int skl_dsp_enable_core(struct sst_dsp *ctx, unsigned int core_mask)
  188. {
  189. int ret;
  190. /* power up */
  191. ret = skl_dsp_core_power_up(ctx, core_mask);
  192. if (ret < 0) {
  193. dev_err(ctx->dev, "dsp core power up failed: core_mask %x\n",
  194. core_mask);
  195. return ret;
  196. }
  197. return skl_dsp_start_core(ctx, core_mask);
  198. }
  199. int skl_dsp_disable_core(struct sst_dsp *ctx, unsigned int core_mask)
  200. {
  201. int ret;
  202. ret = skl_dsp_reset_core(ctx, core_mask);
  203. if (ret < 0) {
  204. dev_err(ctx->dev, "dsp core reset failed: core_mask %x\n",
  205. core_mask);
  206. return ret;
  207. }
  208. /* power down core*/
  209. ret = skl_dsp_core_power_down(ctx, core_mask);
  210. if (ret < 0) {
  211. dev_err(ctx->dev, "dsp core power down fail mask %x: %d\n",
  212. core_mask, ret);
  213. return ret;
  214. }
  215. if (is_skl_dsp_core_enable(ctx, core_mask)) {
  216. dev_err(ctx->dev, "dsp core disable fail mask %x: %d\n",
  217. core_mask, ret);
  218. ret = -EIO;
  219. }
  220. return ret;
  221. }
  222. int skl_dsp_boot(struct sst_dsp *ctx)
  223. {
  224. int ret;
  225. if (is_skl_dsp_core_enable(ctx, SKL_DSP_CORE0_MASK)) {
  226. ret = skl_dsp_reset_core(ctx, SKL_DSP_CORE0_MASK);
  227. if (ret < 0) {
  228. dev_err(ctx->dev, "dsp core0 reset fail: %d\n", ret);
  229. return ret;
  230. }
  231. ret = skl_dsp_start_core(ctx, SKL_DSP_CORE0_MASK);
  232. if (ret < 0) {
  233. dev_err(ctx->dev, "dsp core0 start fail: %d\n", ret);
  234. return ret;
  235. }
  236. } else {
  237. ret = skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
  238. if (ret < 0) {
  239. dev_err(ctx->dev, "dsp core0 disable fail: %d\n", ret);
  240. return ret;
  241. }
  242. ret = skl_dsp_enable_core(ctx, SKL_DSP_CORE0_MASK);
  243. }
  244. return ret;
  245. }
  246. irqreturn_t skl_dsp_sst_interrupt(int irq, void *dev_id)
  247. {
  248. struct sst_dsp *ctx = dev_id;
  249. u32 val;
  250. irqreturn_t result = IRQ_NONE;
  251. spin_lock(&ctx->spinlock);
  252. val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPIS);
  253. ctx->intr_status = val;
  254. if (val == 0xffffffff) {
  255. spin_unlock(&ctx->spinlock);
  256. return IRQ_NONE;
  257. }
  258. if (val & SKL_ADSPIS_IPC) {
  259. skl_ipc_int_disable(ctx);
  260. result = IRQ_WAKE_THREAD;
  261. }
  262. if (val & SKL_ADSPIS_CL_DMA) {
  263. skl_cldma_int_disable(ctx);
  264. result = IRQ_WAKE_THREAD;
  265. }
  266. spin_unlock(&ctx->spinlock);
  267. return result;
  268. }
  269. /*
  270. * skl_dsp_get_core/skl_dsp_put_core will be called inside DAPM context
  271. * within the dapm mutex. Hence no separate lock is used.
  272. */
  273. int skl_dsp_get_core(struct sst_dsp *ctx, unsigned int core_id)
  274. {
  275. struct skl_dev *skl = ctx->thread_context;
  276. int ret = 0;
  277. if (core_id >= skl->cores.count) {
  278. dev_err(ctx->dev, "invalid core id: %d\n", core_id);
  279. return -EINVAL;
  280. }
  281. skl->cores.usage_count[core_id]++;
  282. if (skl->cores.state[core_id] == SKL_DSP_RESET) {
  283. ret = ctx->fw_ops.set_state_D0(ctx, core_id);
  284. if (ret < 0) {
  285. dev_err(ctx->dev, "unable to get core%d\n", core_id);
  286. goto out;
  287. }
  288. }
  289. out:
  290. dev_dbg(ctx->dev, "core id %d state %d usage_count %d\n",
  291. core_id, skl->cores.state[core_id],
  292. skl->cores.usage_count[core_id]);
  293. return ret;
  294. }
  295. EXPORT_SYMBOL_GPL(skl_dsp_get_core);
  296. int skl_dsp_put_core(struct sst_dsp *ctx, unsigned int core_id)
  297. {
  298. struct skl_dev *skl = ctx->thread_context;
  299. int ret = 0;
  300. if (core_id >= skl->cores.count) {
  301. dev_err(ctx->dev, "invalid core id: %d\n", core_id);
  302. return -EINVAL;
  303. }
  304. if ((--skl->cores.usage_count[core_id] == 0) &&
  305. (skl->cores.state[core_id] != SKL_DSP_RESET)) {
  306. ret = ctx->fw_ops.set_state_D3(ctx, core_id);
  307. if (ret < 0) {
  308. dev_err(ctx->dev, "unable to put core %d: %d\n",
  309. core_id, ret);
  310. skl->cores.usage_count[core_id]++;
  311. }
  312. }
  313. dev_dbg(ctx->dev, "core id %d state %d usage_count %d\n",
  314. core_id, skl->cores.state[core_id],
  315. skl->cores.usage_count[core_id]);
  316. return ret;
  317. }
  318. EXPORT_SYMBOL_GPL(skl_dsp_put_core);
  319. int skl_dsp_wake(struct sst_dsp *ctx)
  320. {
  321. return skl_dsp_get_core(ctx, SKL_DSP_CORE0_ID);
  322. }
  323. EXPORT_SYMBOL_GPL(skl_dsp_wake);
  324. int skl_dsp_sleep(struct sst_dsp *ctx)
  325. {
  326. return skl_dsp_put_core(ctx, SKL_DSP_CORE0_ID);
  327. }
  328. EXPORT_SYMBOL_GPL(skl_dsp_sleep);
  329. struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
  330. struct sst_dsp_device *sst_dev, int irq)
  331. {
  332. int ret;
  333. struct sst_dsp *sst;
  334. sst = devm_kzalloc(dev, sizeof(*sst), GFP_KERNEL);
  335. if (sst == NULL)
  336. return NULL;
  337. spin_lock_init(&sst->spinlock);
  338. mutex_init(&sst->mutex);
  339. sst->dev = dev;
  340. sst->sst_dev = sst_dev;
  341. sst->irq = irq;
  342. sst->ops = sst_dev->ops;
  343. sst->thread_context = sst_dev->thread_context;
  344. /* Initialise SST Audio DSP */
  345. if (sst->ops->init) {
  346. ret = sst->ops->init(sst);
  347. if (ret < 0)
  348. return NULL;
  349. }
  350. return sst;
  351. }
  352. int skl_dsp_acquire_irq(struct sst_dsp *sst)
  353. {
  354. struct sst_dsp_device *sst_dev = sst->sst_dev;
  355. int ret;
  356. /* Register the ISR */
  357. ret = request_threaded_irq(sst->irq, sst->ops->irq_handler,
  358. sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
  359. if (ret)
  360. dev_err(sst->dev, "unable to grab threaded IRQ %d, disabling device\n",
  361. sst->irq);
  362. return ret;
  363. }
  364. void skl_dsp_free(struct sst_dsp *dsp)
  365. {
  366. skl_ipc_int_disable(dsp);
  367. free_irq(dsp->irq, dsp);
  368. skl_ipc_op_int_disable(dsp);
  369. skl_dsp_disable_core(dsp, SKL_DSP_CORE0_MASK);
  370. }
  371. EXPORT_SYMBOL_GPL(skl_dsp_free);
  372. bool is_skl_dsp_running(struct sst_dsp *ctx)
  373. {
  374. return (ctx->sst_state == SKL_DSP_RUNNING);
  375. }
  376. EXPORT_SYMBOL_GPL(is_skl_dsp_running);