ops.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595
  1. /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
  2. /*
  3. * This file is provided under a dual BSD/GPLv2 license. When using or
  4. * redistributing this file, you may do so under either license.
  5. *
  6. * Copyright(c) 2018 Intel Corporation. All rights reserved.
  7. *
  8. * Author: Liam Girdwood <[email protected]>
  9. */
  10. #ifndef __SOUND_SOC_SOF_IO_H
  11. #define __SOUND_SOC_SOF_IO_H
  12. #include <linux/device.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/kernel.h>
  15. #include <linux/types.h>
  16. #include <sound/pcm.h>
  17. #include "sof-priv.h"
  18. #define sof_ops(sdev) \
  19. ((sdev)->pdata->desc->ops)
  20. static inline int sof_ops_init(struct snd_sof_dev *sdev)
  21. {
  22. if (sdev->pdata->desc->ops_init)
  23. return sdev->pdata->desc->ops_init(sdev);
  24. return 0;
  25. }
  26. static inline void sof_ops_free(struct snd_sof_dev *sdev)
  27. {
  28. if (sdev->pdata->desc->ops_free)
  29. sdev->pdata->desc->ops_free(sdev);
  30. }
  31. /* Mandatory operations are verified during probing */
  32. /* init */
  33. static inline int snd_sof_probe(struct snd_sof_dev *sdev)
  34. {
  35. return sof_ops(sdev)->probe(sdev);
  36. }
  37. static inline int snd_sof_remove(struct snd_sof_dev *sdev)
  38. {
  39. if (sof_ops(sdev)->remove)
  40. return sof_ops(sdev)->remove(sdev);
  41. return 0;
  42. }
  43. static inline int snd_sof_shutdown(struct snd_sof_dev *sdev)
  44. {
  45. if (sof_ops(sdev)->shutdown)
  46. return sof_ops(sdev)->shutdown(sdev);
  47. return 0;
  48. }
  49. /* control */
  50. /*
  51. * snd_sof_dsp_run returns the core mask of the cores that are available
  52. * after successful fw boot
  53. */
  54. static inline int snd_sof_dsp_run(struct snd_sof_dev *sdev)
  55. {
  56. return sof_ops(sdev)->run(sdev);
  57. }
  58. static inline int snd_sof_dsp_stall(struct snd_sof_dev *sdev, unsigned int core_mask)
  59. {
  60. if (sof_ops(sdev)->stall)
  61. return sof_ops(sdev)->stall(sdev, core_mask);
  62. return 0;
  63. }
  64. static inline int snd_sof_dsp_reset(struct snd_sof_dev *sdev)
  65. {
  66. if (sof_ops(sdev)->reset)
  67. return sof_ops(sdev)->reset(sdev);
  68. return 0;
  69. }
  70. /* dsp core get/put */
  71. static inline int snd_sof_dsp_core_get(struct snd_sof_dev *sdev, int core)
  72. {
  73. if (core > sdev->num_cores - 1) {
  74. dev_err(sdev->dev, "invalid core id: %d for num_cores: %d\n", core,
  75. sdev->num_cores);
  76. return -EINVAL;
  77. }
  78. if (sof_ops(sdev)->core_get) {
  79. int ret;
  80. /* if current ref_count is > 0, increment it and return */
  81. if (sdev->dsp_core_ref_count[core] > 0) {
  82. sdev->dsp_core_ref_count[core]++;
  83. return 0;
  84. }
  85. /* power up the core */
  86. ret = sof_ops(sdev)->core_get(sdev, core);
  87. if (ret < 0)
  88. return ret;
  89. /* increment ref_count */
  90. sdev->dsp_core_ref_count[core]++;
  91. /* and update enabled_cores_mask */
  92. sdev->enabled_cores_mask |= BIT(core);
  93. dev_dbg(sdev->dev, "Core %d powered up\n", core);
  94. }
  95. return 0;
  96. }
  97. static inline int snd_sof_dsp_core_put(struct snd_sof_dev *sdev, int core)
  98. {
  99. if (core > sdev->num_cores - 1) {
  100. dev_err(sdev->dev, "invalid core id: %d for num_cores: %d\n", core,
  101. sdev->num_cores);
  102. return -EINVAL;
  103. }
  104. if (sof_ops(sdev)->core_put) {
  105. int ret;
  106. /* decrement ref_count and return if it is > 0 */
  107. if (--(sdev->dsp_core_ref_count[core]) > 0)
  108. return 0;
  109. /* power down the core */
  110. ret = sof_ops(sdev)->core_put(sdev, core);
  111. if (ret < 0)
  112. return ret;
  113. /* and update enabled_cores_mask */
  114. sdev->enabled_cores_mask &= ~BIT(core);
  115. dev_dbg(sdev->dev, "Core %d powered down\n", core);
  116. }
  117. return 0;
  118. }
  119. /* pre/post fw load */
  120. static inline int snd_sof_dsp_pre_fw_run(struct snd_sof_dev *sdev)
  121. {
  122. if (sof_ops(sdev)->pre_fw_run)
  123. return sof_ops(sdev)->pre_fw_run(sdev);
  124. return 0;
  125. }
  126. static inline int snd_sof_dsp_post_fw_run(struct snd_sof_dev *sdev)
  127. {
  128. if (sof_ops(sdev)->post_fw_run)
  129. return sof_ops(sdev)->post_fw_run(sdev);
  130. return 0;
  131. }
  132. /* parse platform specific extended manifest */
  133. static inline int snd_sof_dsp_parse_platform_ext_manifest(struct snd_sof_dev *sdev,
  134. const struct sof_ext_man_elem_header *hdr)
  135. {
  136. if (sof_ops(sdev)->parse_platform_ext_manifest)
  137. return sof_ops(sdev)->parse_platform_ext_manifest(sdev, hdr);
  138. return 0;
  139. }
  140. /* misc */
  141. /**
  142. * snd_sof_dsp_get_bar_index - Maps a section type with a BAR index
  143. *
  144. * @sdev: sof device
  145. * @type: section type as described by snd_sof_fw_blk_type
  146. *
  147. * Returns the corresponding BAR index (a positive integer) or -EINVAL
  148. * in case there is no mapping
  149. */
  150. static inline int snd_sof_dsp_get_bar_index(struct snd_sof_dev *sdev, u32 type)
  151. {
  152. if (sof_ops(sdev)->get_bar_index)
  153. return sof_ops(sdev)->get_bar_index(sdev, type);
  154. return sdev->mmio_bar;
  155. }
  156. static inline int snd_sof_dsp_get_mailbox_offset(struct snd_sof_dev *sdev)
  157. {
  158. if (sof_ops(sdev)->get_mailbox_offset)
  159. return sof_ops(sdev)->get_mailbox_offset(sdev);
  160. dev_err(sdev->dev, "error: %s not defined\n", __func__);
  161. return -ENOTSUPP;
  162. }
  163. static inline int snd_sof_dsp_get_window_offset(struct snd_sof_dev *sdev,
  164. u32 id)
  165. {
  166. if (sof_ops(sdev)->get_window_offset)
  167. return sof_ops(sdev)->get_window_offset(sdev, id);
  168. dev_err(sdev->dev, "error: %s not defined\n", __func__);
  169. return -ENOTSUPP;
  170. }
  171. /* power management */
  172. static inline int snd_sof_dsp_resume(struct snd_sof_dev *sdev)
  173. {
  174. if (sof_ops(sdev)->resume)
  175. return sof_ops(sdev)->resume(sdev);
  176. return 0;
  177. }
  178. static inline int snd_sof_dsp_suspend(struct snd_sof_dev *sdev,
  179. u32 target_state)
  180. {
  181. if (sof_ops(sdev)->suspend)
  182. return sof_ops(sdev)->suspend(sdev, target_state);
  183. return 0;
  184. }
  185. static inline int snd_sof_dsp_runtime_resume(struct snd_sof_dev *sdev)
  186. {
  187. if (sof_ops(sdev)->runtime_resume)
  188. return sof_ops(sdev)->runtime_resume(sdev);
  189. return 0;
  190. }
  191. static inline int snd_sof_dsp_runtime_suspend(struct snd_sof_dev *sdev)
  192. {
  193. if (sof_ops(sdev)->runtime_suspend)
  194. return sof_ops(sdev)->runtime_suspend(sdev);
  195. return 0;
  196. }
  197. static inline int snd_sof_dsp_runtime_idle(struct snd_sof_dev *sdev)
  198. {
  199. if (sof_ops(sdev)->runtime_idle)
  200. return sof_ops(sdev)->runtime_idle(sdev);
  201. return 0;
  202. }
  203. static inline int snd_sof_dsp_hw_params_upon_resume(struct snd_sof_dev *sdev)
  204. {
  205. if (sof_ops(sdev)->set_hw_params_upon_resume)
  206. return sof_ops(sdev)->set_hw_params_upon_resume(sdev);
  207. return 0;
  208. }
  209. static inline int snd_sof_dsp_set_clk(struct snd_sof_dev *sdev, u32 freq)
  210. {
  211. if (sof_ops(sdev)->set_clk)
  212. return sof_ops(sdev)->set_clk(sdev, freq);
  213. return 0;
  214. }
  215. static inline int
  216. snd_sof_dsp_set_power_state(struct snd_sof_dev *sdev,
  217. const struct sof_dsp_power_state *target_state)
  218. {
  219. int ret = 0;
  220. mutex_lock(&sdev->power_state_access);
  221. if (sof_ops(sdev)->set_power_state)
  222. ret = sof_ops(sdev)->set_power_state(sdev, target_state);
  223. mutex_unlock(&sdev->power_state_access);
  224. return ret;
  225. }
  226. /* debug */
  227. void snd_sof_dsp_dbg_dump(struct snd_sof_dev *sdev, const char *msg, u32 flags);
  228. static inline int snd_sof_debugfs_add_region_item(struct snd_sof_dev *sdev,
  229. enum snd_sof_fw_blk_type blk_type, u32 offset, size_t size,
  230. const char *name, enum sof_debugfs_access_type access_type)
  231. {
  232. if (sof_ops(sdev) && sof_ops(sdev)->debugfs_add_region_item)
  233. return sof_ops(sdev)->debugfs_add_region_item(sdev, blk_type, offset,
  234. size, name, access_type);
  235. return 0;
  236. }
  237. /* register IO */
  238. static inline void snd_sof_dsp_write(struct snd_sof_dev *sdev, u32 bar,
  239. u32 offset, u32 value)
  240. {
  241. if (sof_ops(sdev)->write) {
  242. sof_ops(sdev)->write(sdev, sdev->bar[bar] + offset, value);
  243. return;
  244. }
  245. dev_err_ratelimited(sdev->dev, "error: %s not defined\n", __func__);
  246. }
  247. static inline void snd_sof_dsp_write64(struct snd_sof_dev *sdev, u32 bar,
  248. u32 offset, u64 value)
  249. {
  250. if (sof_ops(sdev)->write64) {
  251. sof_ops(sdev)->write64(sdev, sdev->bar[bar] + offset, value);
  252. return;
  253. }
  254. dev_err_ratelimited(sdev->dev, "error: %s not defined\n", __func__);
  255. }
  256. static inline u32 snd_sof_dsp_read(struct snd_sof_dev *sdev, u32 bar,
  257. u32 offset)
  258. {
  259. if (sof_ops(sdev)->read)
  260. return sof_ops(sdev)->read(sdev, sdev->bar[bar] + offset);
  261. dev_err(sdev->dev, "error: %s not defined\n", __func__);
  262. return -ENOTSUPP;
  263. }
  264. static inline u64 snd_sof_dsp_read64(struct snd_sof_dev *sdev, u32 bar,
  265. u32 offset)
  266. {
  267. if (sof_ops(sdev)->read64)
  268. return sof_ops(sdev)->read64(sdev, sdev->bar[bar] + offset);
  269. dev_err(sdev->dev, "error: %s not defined\n", __func__);
  270. return -ENOTSUPP;
  271. }
  272. /* block IO */
  273. static inline int snd_sof_dsp_block_read(struct snd_sof_dev *sdev,
  274. enum snd_sof_fw_blk_type blk_type,
  275. u32 offset, void *dest, size_t bytes)
  276. {
  277. return sof_ops(sdev)->block_read(sdev, blk_type, offset, dest, bytes);
  278. }
  279. static inline int snd_sof_dsp_block_write(struct snd_sof_dev *sdev,
  280. enum snd_sof_fw_blk_type blk_type,
  281. u32 offset, void *src, size_t bytes)
  282. {
  283. return sof_ops(sdev)->block_write(sdev, blk_type, offset, src, bytes);
  284. }
  285. /* mailbox IO */
  286. static inline void snd_sof_dsp_mailbox_read(struct snd_sof_dev *sdev,
  287. u32 offset, void *dest, size_t bytes)
  288. {
  289. if (sof_ops(sdev)->mailbox_read)
  290. sof_ops(sdev)->mailbox_read(sdev, offset, dest, bytes);
  291. }
  292. static inline void snd_sof_dsp_mailbox_write(struct snd_sof_dev *sdev,
  293. u32 offset, void *src, size_t bytes)
  294. {
  295. if (sof_ops(sdev)->mailbox_write)
  296. sof_ops(sdev)->mailbox_write(sdev, offset, src, bytes);
  297. }
  298. /* ipc */
  299. static inline int snd_sof_dsp_send_msg(struct snd_sof_dev *sdev,
  300. struct snd_sof_ipc_msg *msg)
  301. {
  302. return sof_ops(sdev)->send_msg(sdev, msg);
  303. }
  304. /* host PCM ops */
  305. static inline int
  306. snd_sof_pcm_platform_open(struct snd_sof_dev *sdev,
  307. struct snd_pcm_substream *substream)
  308. {
  309. if (sof_ops(sdev) && sof_ops(sdev)->pcm_open)
  310. return sof_ops(sdev)->pcm_open(sdev, substream);
  311. return 0;
  312. }
  313. /* disconnect pcm substream to a host stream */
  314. static inline int
  315. snd_sof_pcm_platform_close(struct snd_sof_dev *sdev,
  316. struct snd_pcm_substream *substream)
  317. {
  318. if (sof_ops(sdev) && sof_ops(sdev)->pcm_close)
  319. return sof_ops(sdev)->pcm_close(sdev, substream);
  320. return 0;
  321. }
  322. /* host stream hw params */
  323. static inline int
  324. snd_sof_pcm_platform_hw_params(struct snd_sof_dev *sdev,
  325. struct snd_pcm_substream *substream,
  326. struct snd_pcm_hw_params *params,
  327. struct snd_sof_platform_stream_params *platform_params)
  328. {
  329. if (sof_ops(sdev) && sof_ops(sdev)->pcm_hw_params)
  330. return sof_ops(sdev)->pcm_hw_params(sdev, substream, params,
  331. platform_params);
  332. return 0;
  333. }
  334. /* host stream hw free */
  335. static inline int
  336. snd_sof_pcm_platform_hw_free(struct snd_sof_dev *sdev,
  337. struct snd_pcm_substream *substream)
  338. {
  339. if (sof_ops(sdev) && sof_ops(sdev)->pcm_hw_free)
  340. return sof_ops(sdev)->pcm_hw_free(sdev, substream);
  341. return 0;
  342. }
  343. /* host stream trigger */
  344. static inline int
  345. snd_sof_pcm_platform_trigger(struct snd_sof_dev *sdev,
  346. struct snd_pcm_substream *substream, int cmd)
  347. {
  348. if (sof_ops(sdev) && sof_ops(sdev)->pcm_trigger)
  349. return sof_ops(sdev)->pcm_trigger(sdev, substream, cmd);
  350. return 0;
  351. }
  352. /* Firmware loading */
  353. static inline int snd_sof_load_firmware(struct snd_sof_dev *sdev)
  354. {
  355. dev_dbg(sdev->dev, "loading firmware\n");
  356. return sof_ops(sdev)->load_firmware(sdev);
  357. }
  358. /* host DSP message data */
  359. static inline int snd_sof_ipc_msg_data(struct snd_sof_dev *sdev,
  360. struct snd_pcm_substream *substream,
  361. void *p, size_t sz)
  362. {
  363. return sof_ops(sdev)->ipc_msg_data(sdev, substream, p, sz);
  364. }
  365. /* host side configuration of the stream's data offset in stream mailbox area */
  366. static inline int
  367. snd_sof_set_stream_data_offset(struct snd_sof_dev *sdev,
  368. struct snd_pcm_substream *substream,
  369. size_t posn_offset)
  370. {
  371. if (sof_ops(sdev) && sof_ops(sdev)->set_stream_data_offset)
  372. return sof_ops(sdev)->set_stream_data_offset(sdev, substream,
  373. posn_offset);
  374. return 0;
  375. }
  376. /* host stream pointer */
  377. static inline snd_pcm_uframes_t
  378. snd_sof_pcm_platform_pointer(struct snd_sof_dev *sdev,
  379. struct snd_pcm_substream *substream)
  380. {
  381. if (sof_ops(sdev) && sof_ops(sdev)->pcm_pointer)
  382. return sof_ops(sdev)->pcm_pointer(sdev, substream);
  383. return 0;
  384. }
  385. /* pcm ack */
  386. static inline int snd_sof_pcm_platform_ack(struct snd_sof_dev *sdev,
  387. struct snd_pcm_substream *substream)
  388. {
  389. if (sof_ops(sdev) && sof_ops(sdev)->pcm_ack)
  390. return sof_ops(sdev)->pcm_ack(sdev, substream);
  391. return 0;
  392. }
  393. /* machine driver */
  394. static inline int
  395. snd_sof_machine_register(struct snd_sof_dev *sdev, void *pdata)
  396. {
  397. if (sof_ops(sdev) && sof_ops(sdev)->machine_register)
  398. return sof_ops(sdev)->machine_register(sdev, pdata);
  399. return 0;
  400. }
  401. static inline void
  402. snd_sof_machine_unregister(struct snd_sof_dev *sdev, void *pdata)
  403. {
  404. if (sof_ops(sdev) && sof_ops(sdev)->machine_unregister)
  405. sof_ops(sdev)->machine_unregister(sdev, pdata);
  406. }
  407. static inline struct snd_soc_acpi_mach *
  408. snd_sof_machine_select(struct snd_sof_dev *sdev)
  409. {
  410. if (sof_ops(sdev) && sof_ops(sdev)->machine_select)
  411. return sof_ops(sdev)->machine_select(sdev);
  412. return NULL;
  413. }
  414. static inline void
  415. snd_sof_set_mach_params(struct snd_soc_acpi_mach *mach,
  416. struct snd_sof_dev *sdev)
  417. {
  418. if (sof_ops(sdev) && sof_ops(sdev)->set_mach_params)
  419. sof_ops(sdev)->set_mach_params(mach, sdev);
  420. }
  421. /**
  422. * snd_sof_dsp_register_poll_timeout - Periodically poll an address
  423. * until a condition is met or a timeout occurs
  424. * @op: accessor function (takes @addr as its only argument)
  425. * @addr: Address to poll
  426. * @val: Variable to read the value into
  427. * @cond: Break condition (usually involving @val)
  428. * @sleep_us: Maximum time to sleep between reads in us (0
  429. * tight-loops). Should be less than ~20ms since usleep_range
  430. * is used (see Documentation/timers/timers-howto.rst).
  431. * @timeout_us: Timeout in us, 0 means never timeout
  432. *
  433. * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
  434. * case, the last read value at @addr is stored in @val. Must not
  435. * be called from atomic context if sleep_us or timeout_us are used.
  436. *
  437. * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
  438. */
  439. #define snd_sof_dsp_read_poll_timeout(sdev, bar, offset, val, cond, sleep_us, timeout_us) \
  440. ({ \
  441. u64 __timeout_us = (timeout_us); \
  442. unsigned long __sleep_us = (sleep_us); \
  443. ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
  444. might_sleep_if((__sleep_us) != 0); \
  445. for (;;) { \
  446. (val) = snd_sof_dsp_read(sdev, bar, offset); \
  447. if (cond) { \
  448. dev_dbg(sdev->dev, \
  449. "FW Poll Status: reg[%#x]=%#x successful\n", \
  450. (offset), (val)); \
  451. break; \
  452. } \
  453. if (__timeout_us && \
  454. ktime_compare(ktime_get(), __timeout) > 0) { \
  455. (val) = snd_sof_dsp_read(sdev, bar, offset); \
  456. dev_dbg(sdev->dev, \
  457. "FW Poll Status: reg[%#x]=%#x timedout\n", \
  458. (offset), (val)); \
  459. break; \
  460. } \
  461. if (__sleep_us) \
  462. usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
  463. } \
  464. (cond) ? 0 : -ETIMEDOUT; \
  465. })
  466. /* This is for registers bits with attribute RWC */
  467. bool snd_sof_pci_update_bits(struct snd_sof_dev *sdev, u32 offset,
  468. u32 mask, u32 value);
  469. bool snd_sof_dsp_update_bits_unlocked(struct snd_sof_dev *sdev, u32 bar,
  470. u32 offset, u32 mask, u32 value);
  471. bool snd_sof_dsp_update_bits64_unlocked(struct snd_sof_dev *sdev, u32 bar,
  472. u32 offset, u64 mask, u64 value);
  473. bool snd_sof_dsp_update_bits(struct snd_sof_dev *sdev, u32 bar, u32 offset,
  474. u32 mask, u32 value);
  475. bool snd_sof_dsp_update_bits64(struct snd_sof_dev *sdev, u32 bar,
  476. u32 offset, u64 mask, u64 value);
  477. void snd_sof_dsp_update_bits_forced(struct snd_sof_dev *sdev, u32 bar,
  478. u32 offset, u32 mask, u32 value);
  479. int snd_sof_dsp_register_poll(struct snd_sof_dev *sdev, u32 bar, u32 offset,
  480. u32 mask, u32 target, u32 timeout_ms,
  481. u32 interval_us);
  482. void snd_sof_dsp_panic(struct snd_sof_dev *sdev, u32 offset, bool non_recoverable);
  483. #endif