ipc3-dtrace.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. //
  3. // Copyright(c) 2022 Intel Corporation. All rights reserved.
  4. //
  5. // Author: Liam Girdwood <[email protected]>
  6. #include <linux/debugfs.h>
  7. #include <linux/sched/signal.h>
  8. #include "sof-priv.h"
  9. #include "sof-audio.h"
  10. #include "ops.h"
  11. #include "sof-utils.h"
  12. #include "ipc3-priv.h"
  13. #define TRACE_FILTER_ELEMENTS_PER_ENTRY 4
  14. #define TRACE_FILTER_MAX_CONFIG_STRING_LENGTH 1024
  15. enum sof_dtrace_state {
  16. SOF_DTRACE_DISABLED,
  17. SOF_DTRACE_STOPPED,
  18. SOF_DTRACE_INITIALIZING,
  19. SOF_DTRACE_ENABLED,
  20. };
  21. struct sof_dtrace_priv {
  22. struct snd_dma_buffer dmatb;
  23. struct snd_dma_buffer dmatp;
  24. int dma_trace_pages;
  25. wait_queue_head_t trace_sleep;
  26. u32 host_offset;
  27. bool dtrace_error;
  28. bool dtrace_draining;
  29. enum sof_dtrace_state dtrace_state;
  30. };
  31. static bool trace_pos_update_expected(struct sof_dtrace_priv *priv)
  32. {
  33. if (priv->dtrace_state == SOF_DTRACE_ENABLED ||
  34. priv->dtrace_state == SOF_DTRACE_INITIALIZING)
  35. return true;
  36. return false;
  37. }
  38. static int trace_filter_append_elem(struct snd_sof_dev *sdev, u32 key, u32 value,
  39. struct sof_ipc_trace_filter_elem *elem_list,
  40. int capacity, int *counter)
  41. {
  42. if (*counter >= capacity)
  43. return -ENOMEM;
  44. elem_list[*counter].key = key;
  45. elem_list[*counter].value = value;
  46. ++*counter;
  47. return 0;
  48. }
  49. static int trace_filter_parse_entry(struct snd_sof_dev *sdev, const char *line,
  50. struct sof_ipc_trace_filter_elem *elem,
  51. int capacity, int *counter)
  52. {
  53. int log_level, pipe_id, comp_id, read, ret;
  54. int len = strlen(line);
  55. int cnt = *counter;
  56. u32 uuid_id;
  57. /* ignore empty content */
  58. ret = sscanf(line, " %n", &read);
  59. if (!ret && read == len)
  60. return len;
  61. ret = sscanf(line, " %d %x %d %d %n", &log_level, &uuid_id, &pipe_id, &comp_id, &read);
  62. if (ret != TRACE_FILTER_ELEMENTS_PER_ENTRY || read != len) {
  63. dev_err(sdev->dev, "Invalid trace filter entry '%s'\n", line);
  64. return -EINVAL;
  65. }
  66. if (uuid_id > 0) {
  67. ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_BY_UUID,
  68. uuid_id, elem, capacity, &cnt);
  69. if (ret)
  70. return ret;
  71. }
  72. if (pipe_id >= 0) {
  73. ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_BY_PIPE,
  74. pipe_id, elem, capacity, &cnt);
  75. if (ret)
  76. return ret;
  77. }
  78. if (comp_id >= 0) {
  79. ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_BY_COMP,
  80. comp_id, elem, capacity, &cnt);
  81. if (ret)
  82. return ret;
  83. }
  84. ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_SET_LEVEL |
  85. SOF_IPC_TRACE_FILTER_ELEM_FIN,
  86. log_level, elem, capacity, &cnt);
  87. if (ret)
  88. return ret;
  89. /* update counter only when parsing whole entry passed */
  90. *counter = cnt;
  91. return len;
  92. }
  93. static int trace_filter_parse(struct snd_sof_dev *sdev, char *string,
  94. int *out_elem_cnt,
  95. struct sof_ipc_trace_filter_elem **out)
  96. {
  97. static const char entry_delimiter[] = ";";
  98. char *entry = string;
  99. int capacity = 0;
  100. int entry_len;
  101. int cnt = 0;
  102. /*
  103. * Each entry contains at least 1, up to TRACE_FILTER_ELEMENTS_PER_ENTRY
  104. * IPC elements, depending on content. Calculate IPC elements capacity
  105. * for the input string where each element is set.
  106. */
  107. while (entry) {
  108. capacity += TRACE_FILTER_ELEMENTS_PER_ENTRY;
  109. entry = strchr(entry + 1, entry_delimiter[0]);
  110. }
  111. *out = kmalloc(capacity * sizeof(**out), GFP_KERNEL);
  112. if (!*out)
  113. return -ENOMEM;
  114. /* split input string by ';', and parse each entry separately in trace_filter_parse_entry */
  115. while ((entry = strsep(&string, entry_delimiter))) {
  116. entry_len = trace_filter_parse_entry(sdev, entry, *out, capacity, &cnt);
  117. if (entry_len < 0) {
  118. dev_err(sdev->dev,
  119. "Parsing filter entry '%s' failed with %d\n",
  120. entry, entry_len);
  121. return -EINVAL;
  122. }
  123. }
  124. *out_elem_cnt = cnt;
  125. return 0;
  126. }
  127. static int ipc3_trace_update_filter(struct snd_sof_dev *sdev, int num_elems,
  128. struct sof_ipc_trace_filter_elem *elems)
  129. {
  130. struct sof_ipc_trace_filter *msg;
  131. struct sof_ipc_reply reply;
  132. size_t size;
  133. int ret;
  134. size = struct_size(msg, elems, num_elems);
  135. if (size > SOF_IPC_MSG_MAX_SIZE)
  136. return -EINVAL;
  137. msg = kmalloc(size, GFP_KERNEL);
  138. if (!msg)
  139. return -ENOMEM;
  140. msg->hdr.size = size;
  141. msg->hdr.cmd = SOF_IPC_GLB_TRACE_MSG | SOF_IPC_TRACE_FILTER_UPDATE;
  142. msg->elem_cnt = num_elems;
  143. memcpy(&msg->elems[0], elems, num_elems * sizeof(*elems));
  144. ret = pm_runtime_resume_and_get(sdev->dev);
  145. if (ret < 0 && ret != -EACCES) {
  146. dev_err(sdev->dev, "enabling device failed: %d\n", ret);
  147. goto error;
  148. }
  149. ret = sof_ipc_tx_message(sdev->ipc, msg, msg->hdr.size, &reply, sizeof(reply));
  150. pm_runtime_mark_last_busy(sdev->dev);
  151. pm_runtime_put_autosuspend(sdev->dev);
  152. error:
  153. kfree(msg);
  154. return ret ? ret : reply.error;
  155. }
  156. static ssize_t dfsentry_trace_filter_write(struct file *file, const char __user *from,
  157. size_t count, loff_t *ppos)
  158. {
  159. struct snd_sof_dfsentry *dfse = file->private_data;
  160. struct sof_ipc_trace_filter_elem *elems = NULL;
  161. struct snd_sof_dev *sdev = dfse->sdev;
  162. int num_elems;
  163. char *string;
  164. int ret;
  165. if (count > TRACE_FILTER_MAX_CONFIG_STRING_LENGTH) {
  166. dev_err(sdev->dev, "%s too long input, %zu > %d\n", __func__, count,
  167. TRACE_FILTER_MAX_CONFIG_STRING_LENGTH);
  168. return -EINVAL;
  169. }
  170. string = kmalloc(count + 1, GFP_KERNEL);
  171. if (!string)
  172. return -ENOMEM;
  173. if (copy_from_user(string, from, count)) {
  174. ret = -EFAULT;
  175. goto error;
  176. }
  177. string[count] = '\0';
  178. ret = trace_filter_parse(sdev, string, &num_elems, &elems);
  179. if (ret < 0)
  180. goto error;
  181. if (num_elems) {
  182. ret = ipc3_trace_update_filter(sdev, num_elems, elems);
  183. if (ret < 0) {
  184. dev_err(sdev->dev, "Filter update failed: %d\n", ret);
  185. goto error;
  186. }
  187. }
  188. ret = count;
  189. error:
  190. kfree(string);
  191. kfree(elems);
  192. return ret;
  193. }
  194. static const struct file_operations sof_dfs_trace_filter_fops = {
  195. .open = simple_open,
  196. .write = dfsentry_trace_filter_write,
  197. .llseek = default_llseek,
  198. };
  199. static int debugfs_create_trace_filter(struct snd_sof_dev *sdev)
  200. {
  201. struct snd_sof_dfsentry *dfse;
  202. dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
  203. if (!dfse)
  204. return -ENOMEM;
  205. dfse->sdev = sdev;
  206. dfse->type = SOF_DFSENTRY_TYPE_BUF;
  207. debugfs_create_file("filter", 0200, sdev->debugfs_root, dfse,
  208. &sof_dfs_trace_filter_fops);
  209. /* add to dfsentry list */
  210. list_add(&dfse->list, &sdev->dfsentry_list);
  211. return 0;
  212. }
  213. static bool sof_dtrace_set_host_offset(struct sof_dtrace_priv *priv, u32 new_offset)
  214. {
  215. u32 host_offset = READ_ONCE(priv->host_offset);
  216. if (host_offset != new_offset) {
  217. /* This is a bit paranoid and unlikely that it is needed */
  218. u32 ret = cmpxchg(&priv->host_offset, host_offset, new_offset);
  219. if (ret == host_offset)
  220. return true;
  221. }
  222. return false;
  223. }
  224. static size_t sof_dtrace_avail(struct snd_sof_dev *sdev,
  225. loff_t pos, size_t buffer_size)
  226. {
  227. struct sof_dtrace_priv *priv = sdev->fw_trace_data;
  228. loff_t host_offset = READ_ONCE(priv->host_offset);
  229. /*
  230. * If host offset is less than local pos, it means write pointer of
  231. * host DMA buffer has been wrapped. We should output the trace data
  232. * at the end of host DMA buffer at first.
  233. */
  234. if (host_offset < pos)
  235. return buffer_size - pos;
  236. /* If there is available trace data now, it is unnecessary to wait. */
  237. if (host_offset > pos)
  238. return host_offset - pos;
  239. return 0;
  240. }
  241. static size_t sof_wait_dtrace_avail(struct snd_sof_dev *sdev, loff_t pos,
  242. size_t buffer_size)
  243. {
  244. size_t ret = sof_dtrace_avail(sdev, pos, buffer_size);
  245. struct sof_dtrace_priv *priv = sdev->fw_trace_data;
  246. wait_queue_entry_t wait;
  247. /* data immediately available */
  248. if (ret)
  249. return ret;
  250. if (priv->dtrace_draining && !trace_pos_update_expected(priv)) {
  251. /*
  252. * tracing has ended and all traces have been
  253. * read by client, return EOF
  254. */
  255. priv->dtrace_draining = false;
  256. return 0;
  257. }
  258. /* wait for available trace data from FW */
  259. init_waitqueue_entry(&wait, current);
  260. set_current_state(TASK_INTERRUPTIBLE);
  261. add_wait_queue(&priv->trace_sleep, &wait);
  262. if (!signal_pending(current)) {
  263. /* set timeout to max value, no error code */
  264. schedule_timeout(MAX_SCHEDULE_TIMEOUT);
  265. }
  266. remove_wait_queue(&priv->trace_sleep, &wait);
  267. return sof_dtrace_avail(sdev, pos, buffer_size);
  268. }
  269. static ssize_t dfsentry_dtrace_read(struct file *file, char __user *buffer,
  270. size_t count, loff_t *ppos)
  271. {
  272. struct snd_sof_dfsentry *dfse = file->private_data;
  273. struct snd_sof_dev *sdev = dfse->sdev;
  274. struct sof_dtrace_priv *priv = sdev->fw_trace_data;
  275. unsigned long rem;
  276. loff_t lpos = *ppos;
  277. size_t avail, buffer_size = dfse->size;
  278. u64 lpos_64;
  279. /* make sure we know about any failures on the DSP side */
  280. priv->dtrace_error = false;
  281. /* check pos and count */
  282. if (lpos < 0)
  283. return -EINVAL;
  284. if (!count)
  285. return 0;
  286. /* check for buffer wrap and count overflow */
  287. lpos_64 = lpos;
  288. lpos = do_div(lpos_64, buffer_size);
  289. /* get available count based on current host offset */
  290. avail = sof_wait_dtrace_avail(sdev, lpos, buffer_size);
  291. if (priv->dtrace_error) {
  292. dev_err(sdev->dev, "trace IO error\n");
  293. return -EIO;
  294. }
  295. /* no new trace data */
  296. if (!avail)
  297. return 0;
  298. /* make sure count is <= avail */
  299. if (count > avail)
  300. count = avail;
  301. /*
  302. * make sure that all trace data is available for the CPU as the trace
  303. * data buffer might be allocated from non consistent memory.
  304. * Note: snd_dma_buffer_sync() is called for normal audio playback and
  305. * capture streams also.
  306. */
  307. snd_dma_buffer_sync(&priv->dmatb, SNDRV_DMA_SYNC_CPU);
  308. /* copy available trace data to debugfs */
  309. rem = copy_to_user(buffer, ((u8 *)(dfse->buf) + lpos), count);
  310. if (rem)
  311. return -EFAULT;
  312. *ppos += count;
  313. /* move debugfs reading position */
  314. return count;
  315. }
  316. static int dfsentry_dtrace_release(struct inode *inode, struct file *file)
  317. {
  318. struct snd_sof_dfsentry *dfse = inode->i_private;
  319. struct snd_sof_dev *sdev = dfse->sdev;
  320. struct sof_dtrace_priv *priv = sdev->fw_trace_data;
  321. /* avoid duplicate traces at next open */
  322. if (priv->dtrace_state != SOF_DTRACE_ENABLED)
  323. sof_dtrace_set_host_offset(priv, 0);
  324. return 0;
  325. }
  326. static const struct file_operations sof_dfs_dtrace_fops = {
  327. .open = simple_open,
  328. .read = dfsentry_dtrace_read,
  329. .llseek = default_llseek,
  330. .release = dfsentry_dtrace_release,
  331. };
  332. static int debugfs_create_dtrace(struct snd_sof_dev *sdev)
  333. {
  334. struct sof_dtrace_priv *priv;
  335. struct snd_sof_dfsentry *dfse;
  336. int ret;
  337. if (!sdev)
  338. return -EINVAL;
  339. priv = sdev->fw_trace_data;
  340. ret = debugfs_create_trace_filter(sdev);
  341. if (ret < 0)
  342. dev_warn(sdev->dev, "failed to create filter debugfs file: %d", ret);
  343. dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
  344. if (!dfse)
  345. return -ENOMEM;
  346. dfse->type = SOF_DFSENTRY_TYPE_BUF;
  347. dfse->buf = priv->dmatb.area;
  348. dfse->size = priv->dmatb.bytes;
  349. dfse->sdev = sdev;
  350. debugfs_create_file("trace", 0444, sdev->debugfs_root, dfse,
  351. &sof_dfs_dtrace_fops);
  352. return 0;
  353. }
  354. static int ipc3_dtrace_enable(struct snd_sof_dev *sdev)
  355. {
  356. struct sof_dtrace_priv *priv = sdev->fw_trace_data;
  357. struct sof_ipc_fw_ready *ready = &sdev->fw_ready;
  358. struct sof_ipc_fw_version *v = &ready->version;
  359. struct sof_ipc_dma_trace_params_ext params;
  360. struct sof_ipc_reply ipc_reply;
  361. int ret;
  362. if (!sdev->fw_trace_is_supported)
  363. return 0;
  364. if (priv->dtrace_state == SOF_DTRACE_ENABLED || !priv->dma_trace_pages)
  365. return -EINVAL;
  366. if (priv->dtrace_state == SOF_DTRACE_STOPPED)
  367. goto start;
  368. /* set IPC parameters */
  369. params.hdr.cmd = SOF_IPC_GLB_TRACE_MSG;
  370. /* PARAMS_EXT is only supported from ABI 3.7.0 onwards */
  371. if (v->abi_version >= SOF_ABI_VER(3, 7, 0)) {
  372. params.hdr.size = sizeof(struct sof_ipc_dma_trace_params_ext);
  373. params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS_EXT;
  374. params.timestamp_ns = ktime_get(); /* in nanosecond */
  375. } else {
  376. params.hdr.size = sizeof(struct sof_ipc_dma_trace_params);
  377. params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS;
  378. }
  379. params.buffer.phy_addr = priv->dmatp.addr;
  380. params.buffer.size = priv->dmatb.bytes;
  381. params.buffer.pages = priv->dma_trace_pages;
  382. params.stream_tag = 0;
  383. sof_dtrace_set_host_offset(priv, 0);
  384. priv->dtrace_draining = false;
  385. ret = sof_dtrace_host_init(sdev, &priv->dmatb, &params);
  386. if (ret < 0) {
  387. dev_err(sdev->dev, "Host dtrace init failed: %d\n", ret);
  388. return ret;
  389. }
  390. dev_dbg(sdev->dev, "stream_tag: %d\n", params.stream_tag);
  391. /* send IPC to the DSP */
  392. priv->dtrace_state = SOF_DTRACE_INITIALIZING;
  393. ret = sof_ipc_tx_message(sdev->ipc, &params, sizeof(params), &ipc_reply, sizeof(ipc_reply));
  394. if (ret < 0) {
  395. dev_err(sdev->dev, "can't set params for DMA for trace %d\n", ret);
  396. goto trace_release;
  397. }
  398. start:
  399. priv->dtrace_state = SOF_DTRACE_ENABLED;
  400. ret = sof_dtrace_host_trigger(sdev, SNDRV_PCM_TRIGGER_START);
  401. if (ret < 0) {
  402. dev_err(sdev->dev, "Host dtrace trigger start failed: %d\n", ret);
  403. goto trace_release;
  404. }
  405. return 0;
  406. trace_release:
  407. priv->dtrace_state = SOF_DTRACE_DISABLED;
  408. sof_dtrace_host_release(sdev);
  409. return ret;
  410. }
  411. static int ipc3_dtrace_init(struct snd_sof_dev *sdev)
  412. {
  413. struct sof_dtrace_priv *priv;
  414. int ret;
  415. /* dtrace is only supported with SOF_IPC */
  416. if (sdev->pdata->ipc_type != SOF_IPC)
  417. return -EOPNOTSUPP;
  418. if (sdev->fw_trace_data) {
  419. dev_err(sdev->dev, "fw_trace_data has been already allocated\n");
  420. return -EBUSY;
  421. }
  422. priv = devm_kzalloc(sdev->dev, sizeof(*priv), GFP_KERNEL);
  423. if (!priv)
  424. return -ENOMEM;
  425. sdev->fw_trace_data = priv;
  426. /* set false before start initialization */
  427. priv->dtrace_state = SOF_DTRACE_DISABLED;
  428. /* allocate trace page table buffer */
  429. ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev,
  430. PAGE_SIZE, &priv->dmatp);
  431. if (ret < 0) {
  432. dev_err(sdev->dev, "can't alloc page table for trace %d\n", ret);
  433. return ret;
  434. }
  435. /* allocate trace data buffer */
  436. ret = snd_dma_alloc_dir_pages(SNDRV_DMA_TYPE_DEV_SG, sdev->dev,
  437. DMA_FROM_DEVICE, DMA_BUF_SIZE_FOR_TRACE,
  438. &priv->dmatb);
  439. if (ret < 0) {
  440. dev_err(sdev->dev, "can't alloc buffer for trace %d\n", ret);
  441. goto page_err;
  442. }
  443. /* create compressed page table for audio firmware */
  444. ret = snd_sof_create_page_table(sdev->dev, &priv->dmatb,
  445. priv->dmatp.area, priv->dmatb.bytes);
  446. if (ret < 0)
  447. goto table_err;
  448. priv->dma_trace_pages = ret;
  449. dev_dbg(sdev->dev, "dma_trace_pages: %d\n", priv->dma_trace_pages);
  450. if (sdev->first_boot) {
  451. ret = debugfs_create_dtrace(sdev);
  452. if (ret < 0)
  453. goto table_err;
  454. }
  455. init_waitqueue_head(&priv->trace_sleep);
  456. ret = ipc3_dtrace_enable(sdev);
  457. if (ret < 0)
  458. goto table_err;
  459. return 0;
  460. table_err:
  461. priv->dma_trace_pages = 0;
  462. snd_dma_free_pages(&priv->dmatb);
  463. page_err:
  464. snd_dma_free_pages(&priv->dmatp);
  465. return ret;
  466. }
  467. int ipc3_dtrace_posn_update(struct snd_sof_dev *sdev,
  468. struct sof_ipc_dma_trace_posn *posn)
  469. {
  470. struct sof_dtrace_priv *priv = sdev->fw_trace_data;
  471. if (!sdev->fw_trace_is_supported)
  472. return 0;
  473. if (trace_pos_update_expected(priv) &&
  474. sof_dtrace_set_host_offset(priv, posn->host_offset))
  475. wake_up(&priv->trace_sleep);
  476. if (posn->overflow != 0)
  477. dev_err(sdev->dev,
  478. "DSP trace buffer overflow %u bytes. Total messages %d\n",
  479. posn->overflow, posn->messages);
  480. return 0;
  481. }
  482. /* an error has occurred within the DSP that prevents further trace */
  483. static void ipc3_dtrace_fw_crashed(struct snd_sof_dev *sdev)
  484. {
  485. struct sof_dtrace_priv *priv = sdev->fw_trace_data;
  486. if (priv->dtrace_state == SOF_DTRACE_ENABLED) {
  487. priv->dtrace_error = true;
  488. wake_up(&priv->trace_sleep);
  489. }
  490. }
  491. static void ipc3_dtrace_release(struct snd_sof_dev *sdev, bool only_stop)
  492. {
  493. struct sof_dtrace_priv *priv = sdev->fw_trace_data;
  494. struct sof_ipc_fw_ready *ready = &sdev->fw_ready;
  495. struct sof_ipc_fw_version *v = &ready->version;
  496. struct sof_ipc_cmd_hdr hdr;
  497. struct sof_ipc_reply ipc_reply;
  498. int ret;
  499. if (!sdev->fw_trace_is_supported || priv->dtrace_state == SOF_DTRACE_DISABLED)
  500. return;
  501. ret = sof_dtrace_host_trigger(sdev, SNDRV_PCM_TRIGGER_STOP);
  502. if (ret < 0)
  503. dev_err(sdev->dev, "Host dtrace trigger stop failed: %d\n", ret);
  504. priv->dtrace_state = SOF_DTRACE_STOPPED;
  505. /*
  506. * stop and free trace DMA in the DSP. TRACE_DMA_FREE is only supported from
  507. * ABI 3.20.0 onwards
  508. */
  509. if (v->abi_version >= SOF_ABI_VER(3, 20, 0)) {
  510. hdr.size = sizeof(hdr);
  511. hdr.cmd = SOF_IPC_GLB_TRACE_MSG | SOF_IPC_TRACE_DMA_FREE;
  512. ret = sof_ipc_tx_message(sdev->ipc, &hdr, hdr.size,
  513. &ipc_reply, sizeof(ipc_reply));
  514. if (ret < 0)
  515. dev_err(sdev->dev, "DMA_TRACE_FREE failed with error: %d\n", ret);
  516. }
  517. if (only_stop)
  518. goto out;
  519. ret = sof_dtrace_host_release(sdev);
  520. if (ret < 0)
  521. dev_err(sdev->dev, "Host dtrace release failed %d\n", ret);
  522. priv->dtrace_state = SOF_DTRACE_DISABLED;
  523. out:
  524. priv->dtrace_draining = true;
  525. wake_up(&priv->trace_sleep);
  526. }
  527. static void ipc3_dtrace_suspend(struct snd_sof_dev *sdev, pm_message_t pm_state)
  528. {
  529. ipc3_dtrace_release(sdev, pm_state.event == SOF_DSP_PM_D0);
  530. }
  531. static int ipc3_dtrace_resume(struct snd_sof_dev *sdev)
  532. {
  533. return ipc3_dtrace_enable(sdev);
  534. }
  535. static void ipc3_dtrace_free(struct snd_sof_dev *sdev)
  536. {
  537. struct sof_dtrace_priv *priv = sdev->fw_trace_data;
  538. /* release trace */
  539. ipc3_dtrace_release(sdev, false);
  540. if (priv->dma_trace_pages) {
  541. snd_dma_free_pages(&priv->dmatb);
  542. snd_dma_free_pages(&priv->dmatp);
  543. priv->dma_trace_pages = 0;
  544. }
  545. }
  546. const struct sof_ipc_fw_tracing_ops ipc3_dtrace_ops = {
  547. .init = ipc3_dtrace_init,
  548. .free = ipc3_dtrace_free,
  549. .fw_crashed = ipc3_dtrace_fw_crashed,
  550. .suspend = ipc3_dtrace_suspend,
  551. .resume = ipc3_dtrace_resume,
  552. };