hda-stream.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099
  1. // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
  2. //
  3. // This file is provided under a dual BSD/GPLv2 license. When using or
  4. // redistributing this file, you may do so under either license.
  5. //
  6. // Copyright(c) 2018 Intel Corporation. All rights reserved.
  7. //
  8. // Authors: Liam Girdwood <[email protected]>
  9. // Ranjani Sridharan <[email protected]>
  10. // Rander Wang <[email protected]>
  11. // Keyon Jie <[email protected]>
  12. //
  13. /*
  14. * Hardware interface for generic Intel audio DSP HDA IP
  15. */
  16. #include <linux/pm_runtime.h>
  17. #include <sound/hdaudio_ext.h>
  18. #include <sound/hda_register.h>
  19. #include <sound/sof.h>
  20. #include <trace/events/sof_intel.h>
  21. #include "../ops.h"
  22. #include "../sof-audio.h"
  23. #include "hda.h"
  24. #define HDA_LTRP_GB_VALUE_US 95
  25. static inline const char *hda_hstream_direction_str(struct hdac_stream *hstream)
  26. {
  27. if (hstream->direction == SNDRV_PCM_STREAM_PLAYBACK)
  28. return "Playback";
  29. else
  30. return "Capture";
  31. }
  32. static char *hda_hstream_dbg_get_stream_info_str(struct hdac_stream *hstream)
  33. {
  34. struct snd_soc_pcm_runtime *rtd;
  35. if (hstream->substream)
  36. rtd = asoc_substream_to_rtd(hstream->substream);
  37. else if (hstream->cstream)
  38. rtd = hstream->cstream->private_data;
  39. else
  40. /* Non audio DMA user, like dma-trace */
  41. return kasprintf(GFP_KERNEL, "-- (%s, stream_tag: %u)",
  42. hda_hstream_direction_str(hstream),
  43. hstream->stream_tag);
  44. return kasprintf(GFP_KERNEL, "dai_link \"%s\" (%s, stream_tag: %u)",
  45. rtd->dai_link->name, hda_hstream_direction_str(hstream),
  46. hstream->stream_tag);
  47. }
  48. /*
  49. * set up one of BDL entries for a stream
  50. */
  51. static int hda_setup_bdle(struct snd_sof_dev *sdev,
  52. struct snd_dma_buffer *dmab,
  53. struct hdac_stream *hstream,
  54. struct sof_intel_dsp_bdl **bdlp,
  55. int offset, int size, int ioc)
  56. {
  57. struct hdac_bus *bus = sof_to_bus(sdev);
  58. struct sof_intel_dsp_bdl *bdl = *bdlp;
  59. while (size > 0) {
  60. dma_addr_t addr;
  61. int chunk;
  62. if (hstream->frags >= HDA_DSP_MAX_BDL_ENTRIES) {
  63. dev_err(sdev->dev, "error: stream frags exceeded\n");
  64. return -EINVAL;
  65. }
  66. addr = snd_sgbuf_get_addr(dmab, offset);
  67. /* program BDL addr */
  68. bdl->addr_l = cpu_to_le32(lower_32_bits(addr));
  69. bdl->addr_h = cpu_to_le32(upper_32_bits(addr));
  70. /* program BDL size */
  71. chunk = snd_sgbuf_get_chunk_size(dmab, offset, size);
  72. /* one BDLE should not cross 4K boundary */
  73. if (bus->align_bdle_4k) {
  74. u32 remain = 0x1000 - (offset & 0xfff);
  75. if (chunk > remain)
  76. chunk = remain;
  77. }
  78. bdl->size = cpu_to_le32(chunk);
  79. /* only program IOC when the whole segment is processed */
  80. size -= chunk;
  81. bdl->ioc = (size || !ioc) ? 0 : cpu_to_le32(0x01);
  82. bdl++;
  83. hstream->frags++;
  84. offset += chunk;
  85. }
  86. *bdlp = bdl;
  87. return offset;
  88. }
  89. /*
  90. * set up Buffer Descriptor List (BDL) for host memory transfer
  91. * BDL describes the location of the individual buffers and is little endian.
  92. */
  93. int hda_dsp_stream_setup_bdl(struct snd_sof_dev *sdev,
  94. struct snd_dma_buffer *dmab,
  95. struct hdac_stream *hstream)
  96. {
  97. struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
  98. struct sof_intel_dsp_bdl *bdl;
  99. int i, offset, period_bytes, periods;
  100. int remain, ioc;
  101. period_bytes = hstream->period_bytes;
  102. dev_dbg(sdev->dev, "period_bytes:0x%x\n", period_bytes);
  103. if (!period_bytes)
  104. period_bytes = hstream->bufsize;
  105. periods = hstream->bufsize / period_bytes;
  106. dev_dbg(sdev->dev, "periods:%d\n", periods);
  107. remain = hstream->bufsize % period_bytes;
  108. if (remain)
  109. periods++;
  110. /* program the initial BDL entries */
  111. bdl = (struct sof_intel_dsp_bdl *)hstream->bdl.area;
  112. offset = 0;
  113. hstream->frags = 0;
  114. /*
  115. * set IOC if don't use position IPC
  116. * and period_wakeup needed.
  117. */
  118. ioc = hda->no_ipc_position ?
  119. !hstream->no_period_wakeup : 0;
  120. for (i = 0; i < periods; i++) {
  121. if (i == (periods - 1) && remain)
  122. /* set the last small entry */
  123. offset = hda_setup_bdle(sdev, dmab,
  124. hstream, &bdl, offset,
  125. remain, 0);
  126. else
  127. offset = hda_setup_bdle(sdev, dmab,
  128. hstream, &bdl, offset,
  129. period_bytes, ioc);
  130. }
  131. return offset;
  132. }
  133. int hda_dsp_stream_spib_config(struct snd_sof_dev *sdev,
  134. struct hdac_ext_stream *hext_stream,
  135. int enable, u32 size)
  136. {
  137. struct hdac_stream *hstream = &hext_stream->hstream;
  138. u32 mask;
  139. if (!sdev->bar[HDA_DSP_SPIB_BAR]) {
  140. dev_err(sdev->dev, "error: address of spib capability is NULL\n");
  141. return -EINVAL;
  142. }
  143. mask = (1 << hstream->index);
  144. /* enable/disable SPIB for the stream */
  145. snd_sof_dsp_update_bits(sdev, HDA_DSP_SPIB_BAR,
  146. SOF_HDA_ADSP_REG_CL_SPBFIFO_SPBFCCTL, mask,
  147. enable << hstream->index);
  148. /* set the SPIB value */
  149. sof_io_write(sdev, hext_stream->spib_addr, size);
  150. return 0;
  151. }
  152. /* get next unused stream */
  153. struct hdac_ext_stream *
  154. hda_dsp_stream_get(struct snd_sof_dev *sdev, int direction, u32 flags)
  155. {
  156. struct hdac_bus *bus = sof_to_bus(sdev);
  157. struct sof_intel_hda_stream *hda_stream;
  158. struct hdac_ext_stream *hext_stream = NULL;
  159. struct hdac_stream *s;
  160. spin_lock_irq(&bus->reg_lock);
  161. /* get an unused stream */
  162. list_for_each_entry(s, &bus->stream_list, list) {
  163. if (s->direction == direction && !s->opened) {
  164. hext_stream = stream_to_hdac_ext_stream(s);
  165. hda_stream = container_of(hext_stream,
  166. struct sof_intel_hda_stream,
  167. hext_stream);
  168. /* check if the host DMA channel is reserved */
  169. if (hda_stream->host_reserved)
  170. continue;
  171. s->opened = true;
  172. break;
  173. }
  174. }
  175. spin_unlock_irq(&bus->reg_lock);
  176. /* stream found ? */
  177. if (!hext_stream) {
  178. dev_err(sdev->dev, "error: no free %s streams\n",
  179. direction == SNDRV_PCM_STREAM_PLAYBACK ?
  180. "playback" : "capture");
  181. return hext_stream;
  182. }
  183. hda_stream->flags = flags;
  184. /*
  185. * Prevent DMI Link L1 entry for streams that don't support it.
  186. * Workaround to address a known issue with host DMA that results
  187. * in xruns during pause/release in capture scenarios.
  188. */
  189. if (!(flags & SOF_HDA_STREAM_DMI_L1_COMPATIBLE))
  190. snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
  191. HDA_VS_INTEL_EM2,
  192. HDA_VS_INTEL_EM2_L1SEN, 0);
  193. return hext_stream;
  194. }
  195. /* free a stream */
  196. int hda_dsp_stream_put(struct snd_sof_dev *sdev, int direction, int stream_tag)
  197. {
  198. struct hdac_bus *bus = sof_to_bus(sdev);
  199. struct sof_intel_hda_stream *hda_stream;
  200. struct hdac_ext_stream *hext_stream;
  201. struct hdac_stream *s;
  202. bool dmi_l1_enable = true;
  203. bool found = false;
  204. spin_lock_irq(&bus->reg_lock);
  205. /*
  206. * close stream matching the stream tag and check if there are any open streams
  207. * that are DMI L1 incompatible.
  208. */
  209. list_for_each_entry(s, &bus->stream_list, list) {
  210. hext_stream = stream_to_hdac_ext_stream(s);
  211. hda_stream = container_of(hext_stream, struct sof_intel_hda_stream, hext_stream);
  212. if (!s->opened)
  213. continue;
  214. if (s->direction == direction && s->stream_tag == stream_tag) {
  215. s->opened = false;
  216. found = true;
  217. } else if (!(hda_stream->flags & SOF_HDA_STREAM_DMI_L1_COMPATIBLE)) {
  218. dmi_l1_enable = false;
  219. }
  220. }
  221. spin_unlock_irq(&bus->reg_lock);
  222. /* Enable DMI L1 if permitted */
  223. if (dmi_l1_enable)
  224. snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_EM2,
  225. HDA_VS_INTEL_EM2_L1SEN, HDA_VS_INTEL_EM2_L1SEN);
  226. if (!found) {
  227. dev_err(sdev->dev, "%s: stream_tag %d not opened!\n",
  228. __func__, stream_tag);
  229. return -ENODEV;
  230. }
  231. return 0;
  232. }
  233. static int hda_dsp_stream_reset(struct snd_sof_dev *sdev, struct hdac_stream *hstream)
  234. {
  235. int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
  236. int timeout = HDA_DSP_STREAM_RESET_TIMEOUT;
  237. u32 val;
  238. /* enter stream reset */
  239. snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset, SOF_STREAM_SD_OFFSET_CRST,
  240. SOF_STREAM_SD_OFFSET_CRST);
  241. do {
  242. val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, sd_offset);
  243. if (val & SOF_STREAM_SD_OFFSET_CRST)
  244. break;
  245. } while (--timeout);
  246. if (timeout == 0) {
  247. dev_err(sdev->dev, "timeout waiting for stream reset\n");
  248. return -ETIMEDOUT;
  249. }
  250. timeout = HDA_DSP_STREAM_RESET_TIMEOUT;
  251. /* exit stream reset and wait to read a zero before reading any other register */
  252. snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset, SOF_STREAM_SD_OFFSET_CRST, 0x0);
  253. /* wait for hardware to report that stream is out of reset */
  254. udelay(3);
  255. do {
  256. val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, sd_offset);
  257. if ((val & SOF_STREAM_SD_OFFSET_CRST) == 0)
  258. break;
  259. } while (--timeout);
  260. if (timeout == 0) {
  261. dev_err(sdev->dev, "timeout waiting for stream to exit reset\n");
  262. return -ETIMEDOUT;
  263. }
  264. return 0;
  265. }
  266. int hda_dsp_stream_trigger(struct snd_sof_dev *sdev,
  267. struct hdac_ext_stream *hext_stream, int cmd)
  268. {
  269. struct hdac_stream *hstream = &hext_stream->hstream;
  270. int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
  271. u32 dma_start = SOF_HDA_SD_CTL_DMA_START;
  272. int ret = 0;
  273. u32 run;
  274. /* cmd must be for audio stream */
  275. switch (cmd) {
  276. case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
  277. case SNDRV_PCM_TRIGGER_START:
  278. snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
  279. 1 << hstream->index,
  280. 1 << hstream->index);
  281. snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
  282. sd_offset,
  283. SOF_HDA_SD_CTL_DMA_START |
  284. SOF_HDA_CL_DMA_SD_INT_MASK,
  285. SOF_HDA_SD_CTL_DMA_START |
  286. SOF_HDA_CL_DMA_SD_INT_MASK);
  287. ret = snd_sof_dsp_read_poll_timeout(sdev,
  288. HDA_DSP_HDA_BAR,
  289. sd_offset, run,
  290. ((run & dma_start) == dma_start),
  291. HDA_DSP_REG_POLL_INTERVAL_US,
  292. HDA_DSP_STREAM_RUN_TIMEOUT);
  293. if (ret >= 0)
  294. hstream->running = true;
  295. break;
  296. case SNDRV_PCM_TRIGGER_SUSPEND:
  297. case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
  298. case SNDRV_PCM_TRIGGER_STOP:
  299. snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
  300. sd_offset,
  301. SOF_HDA_SD_CTL_DMA_START |
  302. SOF_HDA_CL_DMA_SD_INT_MASK, 0x0);
  303. ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_HDA_BAR,
  304. sd_offset, run,
  305. !(run & dma_start),
  306. HDA_DSP_REG_POLL_INTERVAL_US,
  307. HDA_DSP_STREAM_RUN_TIMEOUT);
  308. if (ret >= 0) {
  309. snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
  310. sd_offset + SOF_HDA_ADSP_REG_CL_SD_STS,
  311. SOF_HDA_CL_DMA_SD_INT_MASK);
  312. hstream->running = false;
  313. snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
  314. SOF_HDA_INTCTL,
  315. 1 << hstream->index, 0x0);
  316. }
  317. break;
  318. default:
  319. dev_err(sdev->dev, "error: unknown command: %d\n", cmd);
  320. return -EINVAL;
  321. }
  322. if (ret < 0) {
  323. char *stream_name = hda_hstream_dbg_get_stream_info_str(hstream);
  324. dev_err(sdev->dev,
  325. "%s: cmd %d on %s: timeout on STREAM_SD_OFFSET read\n",
  326. __func__, cmd, stream_name ? stream_name : "unknown stream");
  327. kfree(stream_name);
  328. }
  329. return ret;
  330. }
  331. /* minimal recommended programming for ICCMAX stream */
  332. int hda_dsp_iccmax_stream_hw_params(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_stream,
  333. struct snd_dma_buffer *dmab,
  334. struct snd_pcm_hw_params *params)
  335. {
  336. struct hdac_bus *bus = sof_to_bus(sdev);
  337. struct hdac_stream *hstream = &hext_stream->hstream;
  338. int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
  339. int ret;
  340. u32 mask = 0x1 << hstream->index;
  341. if (!hext_stream) {
  342. dev_err(sdev->dev, "error: no stream available\n");
  343. return -ENODEV;
  344. }
  345. if (!dmab) {
  346. dev_err(sdev->dev, "error: no dma buffer allocated!\n");
  347. return -ENODEV;
  348. }
  349. if (hstream->posbuf)
  350. *hstream->posbuf = 0;
  351. /* reset BDL address */
  352. snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
  353. sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPL,
  354. 0x0);
  355. snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
  356. sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPU,
  357. 0x0);
  358. hstream->frags = 0;
  359. ret = hda_dsp_stream_setup_bdl(sdev, dmab, hstream);
  360. if (ret < 0) {
  361. dev_err(sdev->dev, "error: set up of BDL failed\n");
  362. return ret;
  363. }
  364. /* program BDL address */
  365. snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
  366. sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPL,
  367. (u32)hstream->bdl.addr);
  368. snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
  369. sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPU,
  370. upper_32_bits(hstream->bdl.addr));
  371. /* program cyclic buffer length */
  372. snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
  373. sd_offset + SOF_HDA_ADSP_REG_CL_SD_CBL,
  374. hstream->bufsize);
  375. /* program last valid index */
  376. snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
  377. sd_offset + SOF_HDA_ADSP_REG_CL_SD_LVI,
  378. 0xffff, (hstream->frags - 1));
  379. /* decouple host and link DMA, enable DSP features */
  380. snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
  381. mask, mask);
  382. /* Follow HW recommendation to set the guardband value to 95us during FW boot */
  383. snd_hdac_chip_updateb(bus, VS_LTRP, HDA_VS_INTEL_LTRP_GB_MASK, HDA_LTRP_GB_VALUE_US);
  384. /* start DMA */
  385. snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
  386. SOF_HDA_SD_CTL_DMA_START, SOF_HDA_SD_CTL_DMA_START);
  387. return 0;
  388. }
  389. /*
  390. * prepare for common hdac registers settings, for both code loader
  391. * and normal stream.
  392. */
  393. int hda_dsp_stream_hw_params(struct snd_sof_dev *sdev,
  394. struct hdac_ext_stream *hext_stream,
  395. struct snd_dma_buffer *dmab,
  396. struct snd_pcm_hw_params *params)
  397. {
  398. const struct sof_intel_dsp_desc *chip = get_chip_info(sdev->pdata);
  399. struct hdac_bus *bus = sof_to_bus(sdev);
  400. struct hdac_stream *hstream = &hext_stream->hstream;
  401. int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
  402. int ret;
  403. u32 dma_start = SOF_HDA_SD_CTL_DMA_START;
  404. u32 mask;
  405. u32 run;
  406. if (!hext_stream) {
  407. dev_err(sdev->dev, "error: no stream available\n");
  408. return -ENODEV;
  409. }
  410. if (!dmab) {
  411. dev_err(sdev->dev, "error: no dma buffer allocated!\n");
  412. return -ENODEV;
  413. }
  414. /* decouple host and link DMA */
  415. mask = 0x1 << hstream->index;
  416. snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
  417. mask, mask);
  418. /* clear stream status */
  419. snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
  420. SOF_HDA_CL_DMA_SD_INT_MASK |
  421. SOF_HDA_SD_CTL_DMA_START, 0);
  422. ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_HDA_BAR,
  423. sd_offset, run,
  424. !(run & dma_start),
  425. HDA_DSP_REG_POLL_INTERVAL_US,
  426. HDA_DSP_STREAM_RUN_TIMEOUT);
  427. if (ret < 0) {
  428. char *stream_name = hda_hstream_dbg_get_stream_info_str(hstream);
  429. dev_err(sdev->dev,
  430. "%s: on %s: timeout on STREAM_SD_OFFSET read1\n",
  431. __func__, stream_name ? stream_name : "unknown stream");
  432. kfree(stream_name);
  433. return ret;
  434. }
  435. snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
  436. sd_offset + SOF_HDA_ADSP_REG_CL_SD_STS,
  437. SOF_HDA_CL_DMA_SD_INT_MASK,
  438. SOF_HDA_CL_DMA_SD_INT_MASK);
  439. /* stream reset */
  440. ret = hda_dsp_stream_reset(sdev, hstream);
  441. if (ret < 0)
  442. return ret;
  443. if (hstream->posbuf)
  444. *hstream->posbuf = 0;
  445. /* reset BDL address */
  446. snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
  447. sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPL,
  448. 0x0);
  449. snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
  450. sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPU,
  451. 0x0);
  452. /* clear stream status */
  453. snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
  454. SOF_HDA_CL_DMA_SD_INT_MASK |
  455. SOF_HDA_SD_CTL_DMA_START, 0);
  456. ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_HDA_BAR,
  457. sd_offset, run,
  458. !(run & dma_start),
  459. HDA_DSP_REG_POLL_INTERVAL_US,
  460. HDA_DSP_STREAM_RUN_TIMEOUT);
  461. if (ret < 0) {
  462. char *stream_name = hda_hstream_dbg_get_stream_info_str(hstream);
  463. dev_err(sdev->dev,
  464. "%s: on %s: timeout on STREAM_SD_OFFSET read1\n",
  465. __func__, stream_name ? stream_name : "unknown stream");
  466. kfree(stream_name);
  467. return ret;
  468. }
  469. snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
  470. sd_offset + SOF_HDA_ADSP_REG_CL_SD_STS,
  471. SOF_HDA_CL_DMA_SD_INT_MASK,
  472. SOF_HDA_CL_DMA_SD_INT_MASK);
  473. hstream->frags = 0;
  474. ret = hda_dsp_stream_setup_bdl(sdev, dmab, hstream);
  475. if (ret < 0) {
  476. dev_err(sdev->dev, "error: set up of BDL failed\n");
  477. return ret;
  478. }
  479. /* program stream tag to set up stream descriptor for DMA */
  480. snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
  481. SOF_HDA_CL_SD_CTL_STREAM_TAG_MASK,
  482. hstream->stream_tag <<
  483. SOF_HDA_CL_SD_CTL_STREAM_TAG_SHIFT);
  484. /* program cyclic buffer length */
  485. snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
  486. sd_offset + SOF_HDA_ADSP_REG_CL_SD_CBL,
  487. hstream->bufsize);
  488. /*
  489. * Recommended hardware programming sequence for HDAudio DMA format
  490. * on earlier platforms - this is not needed on newer platforms
  491. *
  492. * 1. Put DMA into coupled mode by clearing PPCTL.PROCEN bit
  493. * for corresponding stream index before the time of writing
  494. * format to SDxFMT register.
  495. * 2. Write SDxFMT
  496. * 3. Set PPCTL.PROCEN bit for corresponding stream index to
  497. * enable decoupled mode
  498. */
  499. if (chip->quirks & SOF_INTEL_PROCEN_FMT_QUIRK) {
  500. /* couple host and link DMA, disable DSP features */
  501. snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
  502. mask, 0);
  503. }
  504. /* program stream format */
  505. snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
  506. sd_offset +
  507. SOF_HDA_ADSP_REG_CL_SD_FORMAT,
  508. 0xffff, hstream->format_val);
  509. if (chip->quirks & SOF_INTEL_PROCEN_FMT_QUIRK) {
  510. /* decouple host and link DMA, enable DSP features */
  511. snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
  512. mask, mask);
  513. }
  514. /* program last valid index */
  515. snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
  516. sd_offset + SOF_HDA_ADSP_REG_CL_SD_LVI,
  517. 0xffff, (hstream->frags - 1));
  518. /* program BDL address */
  519. snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
  520. sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPL,
  521. (u32)hstream->bdl.addr);
  522. snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
  523. sd_offset + SOF_HDA_ADSP_REG_CL_SD_BDLPU,
  524. upper_32_bits(hstream->bdl.addr));
  525. /* enable position buffer, if needed */
  526. if (bus->use_posbuf && bus->posbuf.addr &&
  527. !(snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_ADSP_DPLBASE)
  528. & SOF_HDA_ADSP_DPLBASE_ENABLE)) {
  529. snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_ADSP_DPUBASE,
  530. upper_32_bits(bus->posbuf.addr));
  531. snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_ADSP_DPLBASE,
  532. (u32)bus->posbuf.addr |
  533. SOF_HDA_ADSP_DPLBASE_ENABLE);
  534. }
  535. /* set interrupt enable bits */
  536. snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
  537. SOF_HDA_CL_DMA_SD_INT_MASK,
  538. SOF_HDA_CL_DMA_SD_INT_MASK);
  539. /* read FIFO size */
  540. if (hstream->direction == SNDRV_PCM_STREAM_PLAYBACK) {
  541. hstream->fifo_size =
  542. snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
  543. sd_offset +
  544. SOF_HDA_ADSP_REG_CL_SD_FIFOSIZE);
  545. hstream->fifo_size &= 0xffff;
  546. hstream->fifo_size += 1;
  547. } else {
  548. hstream->fifo_size = 0;
  549. }
  550. return ret;
  551. }
  552. int hda_dsp_stream_hw_free(struct snd_sof_dev *sdev,
  553. struct snd_pcm_substream *substream)
  554. {
  555. struct hdac_stream *hstream = substream->runtime->private_data;
  556. struct hdac_ext_stream *hext_stream = container_of(hstream,
  557. struct hdac_ext_stream,
  558. hstream);
  559. struct hdac_bus *bus = sof_to_bus(sdev);
  560. u32 mask = 0x1 << hstream->index;
  561. int ret;
  562. ret = hda_dsp_stream_reset(sdev, hstream);
  563. if (ret < 0)
  564. return ret;
  565. spin_lock_irq(&bus->reg_lock);
  566. /* couple host and link DMA if link DMA channel is idle */
  567. if (!hext_stream->link_locked)
  568. snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR,
  569. SOF_HDA_REG_PP_PPCTL, mask, 0);
  570. spin_unlock_irq(&bus->reg_lock);
  571. hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_DISABLE, 0);
  572. hstream->substream = NULL;
  573. return 0;
  574. }
  575. bool hda_dsp_check_stream_irq(struct snd_sof_dev *sdev)
  576. {
  577. struct hdac_bus *bus = sof_to_bus(sdev);
  578. bool ret = false;
  579. u32 status;
  580. /* The function can be called at irq thread, so use spin_lock_irq */
  581. spin_lock_irq(&bus->reg_lock);
  582. status = snd_hdac_chip_readl(bus, INTSTS);
  583. trace_sof_intel_hda_dsp_check_stream_irq(sdev, status);
  584. /* if Register inaccessible, ignore it.*/
  585. if (status != 0xffffffff)
  586. ret = true;
  587. spin_unlock_irq(&bus->reg_lock);
  588. return ret;
  589. }
  590. static void
  591. hda_dsp_compr_bytes_transferred(struct hdac_stream *hstream, int direction)
  592. {
  593. u64 buffer_size = hstream->bufsize;
  594. u64 prev_pos, pos, num_bytes;
  595. div64_u64_rem(hstream->curr_pos, buffer_size, &prev_pos);
  596. pos = hda_dsp_stream_get_position(hstream, direction, false);
  597. if (pos < prev_pos)
  598. num_bytes = (buffer_size - prev_pos) + pos;
  599. else
  600. num_bytes = pos - prev_pos;
  601. hstream->curr_pos += num_bytes;
  602. }
  603. static bool hda_dsp_stream_check(struct hdac_bus *bus, u32 status)
  604. {
  605. struct sof_intel_hda_dev *sof_hda = bus_to_sof_hda(bus);
  606. struct hdac_stream *s;
  607. bool active = false;
  608. u32 sd_status;
  609. list_for_each_entry(s, &bus->stream_list, list) {
  610. if (status & BIT(s->index) && s->opened) {
  611. sd_status = snd_hdac_stream_readb(s, SD_STS);
  612. trace_sof_intel_hda_dsp_stream_status(bus->dev, s, sd_status);
  613. snd_hdac_stream_writeb(s, SD_STS, sd_status);
  614. active = true;
  615. if ((!s->substream && !s->cstream) ||
  616. !s->running ||
  617. (sd_status & SOF_HDA_CL_DMA_SD_INT_COMPLETE) == 0)
  618. continue;
  619. /* Inform ALSA only in case not do that with IPC */
  620. if (s->substream && sof_hda->no_ipc_position) {
  621. snd_sof_pcm_period_elapsed(s->substream);
  622. } else if (s->cstream) {
  623. hda_dsp_compr_bytes_transferred(s, s->cstream->direction);
  624. snd_compr_fragment_elapsed(s->cstream);
  625. }
  626. }
  627. }
  628. return active;
  629. }
  630. irqreturn_t hda_dsp_stream_threaded_handler(int irq, void *context)
  631. {
  632. struct snd_sof_dev *sdev = context;
  633. struct hdac_bus *bus = sof_to_bus(sdev);
  634. #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
  635. u32 rirb_status;
  636. #endif
  637. bool active;
  638. u32 status;
  639. int i;
  640. /*
  641. * Loop 10 times to handle missed interrupts caused by
  642. * unsolicited responses from the codec
  643. */
  644. for (i = 0, active = true; i < 10 && active; i++) {
  645. spin_lock_irq(&bus->reg_lock);
  646. status = snd_hdac_chip_readl(bus, INTSTS);
  647. /* check streams */
  648. active = hda_dsp_stream_check(bus, status);
  649. /* check and clear RIRB interrupt */
  650. #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
  651. if (status & AZX_INT_CTRL_EN) {
  652. rirb_status = snd_hdac_chip_readb(bus, RIRBSTS);
  653. if (rirb_status & RIRB_INT_MASK) {
  654. /*
  655. * Clearing the interrupt status here ensures
  656. * that no interrupt gets masked after the RIRB
  657. * wp is read in snd_hdac_bus_update_rirb.
  658. */
  659. snd_hdac_chip_writeb(bus, RIRBSTS,
  660. RIRB_INT_MASK);
  661. active = true;
  662. if (rirb_status & RIRB_INT_RESPONSE)
  663. snd_hdac_bus_update_rirb(bus);
  664. }
  665. }
  666. #endif
  667. spin_unlock_irq(&bus->reg_lock);
  668. }
  669. return IRQ_HANDLED;
  670. }
  671. int hda_dsp_stream_init(struct snd_sof_dev *sdev)
  672. {
  673. struct hdac_bus *bus = sof_to_bus(sdev);
  674. struct hdac_ext_stream *hext_stream;
  675. struct hdac_stream *hstream;
  676. struct pci_dev *pci = to_pci_dev(sdev->dev);
  677. struct sof_intel_hda_dev *sof_hda = bus_to_sof_hda(bus);
  678. int sd_offset;
  679. int i, num_playback, num_capture, num_total, ret;
  680. u32 gcap;
  681. gcap = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_GCAP);
  682. dev_dbg(sdev->dev, "hda global caps = 0x%x\n", gcap);
  683. /* get stream count from GCAP */
  684. num_capture = (gcap >> 8) & 0x0f;
  685. num_playback = (gcap >> 12) & 0x0f;
  686. num_total = num_playback + num_capture;
  687. dev_dbg(sdev->dev, "detected %d playback and %d capture streams\n",
  688. num_playback, num_capture);
  689. if (num_playback >= SOF_HDA_PLAYBACK_STREAMS) {
  690. dev_err(sdev->dev, "error: too many playback streams %d\n",
  691. num_playback);
  692. return -EINVAL;
  693. }
  694. if (num_capture >= SOF_HDA_CAPTURE_STREAMS) {
  695. dev_err(sdev->dev, "error: too many capture streams %d\n",
  696. num_playback);
  697. return -EINVAL;
  698. }
  699. /*
  700. * mem alloc for the position buffer
  701. * TODO: check position buffer update
  702. */
  703. ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
  704. SOF_HDA_DPIB_ENTRY_SIZE * num_total,
  705. &bus->posbuf);
  706. if (ret < 0) {
  707. dev_err(sdev->dev, "error: posbuffer dma alloc failed\n");
  708. return -ENOMEM;
  709. }
  710. #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
  711. /* mem alloc for the CORB/RIRB ringbuffers */
  712. ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
  713. PAGE_SIZE, &bus->rb);
  714. if (ret < 0) {
  715. dev_err(sdev->dev, "error: RB alloc failed\n");
  716. return -ENOMEM;
  717. }
  718. #endif
  719. /* create capture streams */
  720. for (i = 0; i < num_capture; i++) {
  721. struct sof_intel_hda_stream *hda_stream;
  722. hda_stream = devm_kzalloc(sdev->dev, sizeof(*hda_stream),
  723. GFP_KERNEL);
  724. if (!hda_stream)
  725. return -ENOMEM;
  726. hda_stream->sdev = sdev;
  727. hext_stream = &hda_stream->hext_stream;
  728. hext_stream->pphc_addr = sdev->bar[HDA_DSP_PP_BAR] +
  729. SOF_HDA_PPHC_BASE + SOF_HDA_PPHC_INTERVAL * i;
  730. hext_stream->pplc_addr = sdev->bar[HDA_DSP_PP_BAR] +
  731. SOF_HDA_PPLC_BASE + SOF_HDA_PPLC_MULTI * num_total +
  732. SOF_HDA_PPLC_INTERVAL * i;
  733. /* do we support SPIB */
  734. if (sdev->bar[HDA_DSP_SPIB_BAR]) {
  735. hext_stream->spib_addr = sdev->bar[HDA_DSP_SPIB_BAR] +
  736. SOF_HDA_SPIB_BASE + SOF_HDA_SPIB_INTERVAL * i +
  737. SOF_HDA_SPIB_SPIB;
  738. hext_stream->fifo_addr = sdev->bar[HDA_DSP_SPIB_BAR] +
  739. SOF_HDA_SPIB_BASE + SOF_HDA_SPIB_INTERVAL * i +
  740. SOF_HDA_SPIB_MAXFIFO;
  741. }
  742. hstream = &hext_stream->hstream;
  743. hstream->bus = bus;
  744. hstream->sd_int_sta_mask = 1 << i;
  745. hstream->index = i;
  746. sd_offset = SOF_STREAM_SD_OFFSET(hstream);
  747. hstream->sd_addr = sdev->bar[HDA_DSP_HDA_BAR] + sd_offset;
  748. hstream->stream_tag = i + 1;
  749. hstream->opened = false;
  750. hstream->running = false;
  751. hstream->direction = SNDRV_PCM_STREAM_CAPTURE;
  752. /* memory alloc for stream BDL */
  753. ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
  754. HDA_DSP_BDL_SIZE, &hstream->bdl);
  755. if (ret < 0) {
  756. dev_err(sdev->dev, "error: stream bdl dma alloc failed\n");
  757. return -ENOMEM;
  758. }
  759. hstream->posbuf = (__le32 *)(bus->posbuf.area +
  760. (hstream->index) * 8);
  761. list_add_tail(&hstream->list, &bus->stream_list);
  762. }
  763. /* create playback streams */
  764. for (i = num_capture; i < num_total; i++) {
  765. struct sof_intel_hda_stream *hda_stream;
  766. hda_stream = devm_kzalloc(sdev->dev, sizeof(*hda_stream),
  767. GFP_KERNEL);
  768. if (!hda_stream)
  769. return -ENOMEM;
  770. hda_stream->sdev = sdev;
  771. hext_stream = &hda_stream->hext_stream;
  772. /* we always have DSP support */
  773. hext_stream->pphc_addr = sdev->bar[HDA_DSP_PP_BAR] +
  774. SOF_HDA_PPHC_BASE + SOF_HDA_PPHC_INTERVAL * i;
  775. hext_stream->pplc_addr = sdev->bar[HDA_DSP_PP_BAR] +
  776. SOF_HDA_PPLC_BASE + SOF_HDA_PPLC_MULTI * num_total +
  777. SOF_HDA_PPLC_INTERVAL * i;
  778. /* do we support SPIB */
  779. if (sdev->bar[HDA_DSP_SPIB_BAR]) {
  780. hext_stream->spib_addr = sdev->bar[HDA_DSP_SPIB_BAR] +
  781. SOF_HDA_SPIB_BASE + SOF_HDA_SPIB_INTERVAL * i +
  782. SOF_HDA_SPIB_SPIB;
  783. hext_stream->fifo_addr = sdev->bar[HDA_DSP_SPIB_BAR] +
  784. SOF_HDA_SPIB_BASE + SOF_HDA_SPIB_INTERVAL * i +
  785. SOF_HDA_SPIB_MAXFIFO;
  786. }
  787. hstream = &hext_stream->hstream;
  788. hstream->bus = bus;
  789. hstream->sd_int_sta_mask = 1 << i;
  790. hstream->index = i;
  791. sd_offset = SOF_STREAM_SD_OFFSET(hstream);
  792. hstream->sd_addr = sdev->bar[HDA_DSP_HDA_BAR] + sd_offset;
  793. hstream->stream_tag = i - num_capture + 1;
  794. hstream->opened = false;
  795. hstream->running = false;
  796. hstream->direction = SNDRV_PCM_STREAM_PLAYBACK;
  797. /* mem alloc for stream BDL */
  798. ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
  799. HDA_DSP_BDL_SIZE, &hstream->bdl);
  800. if (ret < 0) {
  801. dev_err(sdev->dev, "error: stream bdl dma alloc failed\n");
  802. return -ENOMEM;
  803. }
  804. hstream->posbuf = (__le32 *)(bus->posbuf.area +
  805. (hstream->index) * 8);
  806. list_add_tail(&hstream->list, &bus->stream_list);
  807. }
  808. /* store total stream count (playback + capture) from GCAP */
  809. sof_hda->stream_max = num_total;
  810. return 0;
  811. }
  812. void hda_dsp_stream_free(struct snd_sof_dev *sdev)
  813. {
  814. struct hdac_bus *bus = sof_to_bus(sdev);
  815. struct hdac_stream *s, *_s;
  816. struct hdac_ext_stream *hext_stream;
  817. struct sof_intel_hda_stream *hda_stream;
  818. /* free position buffer */
  819. if (bus->posbuf.area)
  820. snd_dma_free_pages(&bus->posbuf);
  821. #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
  822. /* free position buffer */
  823. if (bus->rb.area)
  824. snd_dma_free_pages(&bus->rb);
  825. #endif
  826. list_for_each_entry_safe(s, _s, &bus->stream_list, list) {
  827. /* TODO: decouple */
  828. /* free bdl buffer */
  829. if (s->bdl.area)
  830. snd_dma_free_pages(&s->bdl);
  831. list_del(&s->list);
  832. hext_stream = stream_to_hdac_ext_stream(s);
  833. hda_stream = container_of(hext_stream, struct sof_intel_hda_stream,
  834. hext_stream);
  835. devm_kfree(sdev->dev, hda_stream);
  836. }
  837. }
  838. snd_pcm_uframes_t hda_dsp_stream_get_position(struct hdac_stream *hstream,
  839. int direction, bool can_sleep)
  840. {
  841. struct hdac_ext_stream *hext_stream = stream_to_hdac_ext_stream(hstream);
  842. struct sof_intel_hda_stream *hda_stream = hstream_to_sof_hda_stream(hext_stream);
  843. struct snd_sof_dev *sdev = hda_stream->sdev;
  844. snd_pcm_uframes_t pos;
  845. switch (sof_hda_position_quirk) {
  846. case SOF_HDA_POSITION_QUIRK_USE_SKYLAKE_LEGACY:
  847. /*
  848. * This legacy code, inherited from the Skylake driver,
  849. * mixes DPIB registers and DPIB DDR updates and
  850. * does not seem to follow any known hardware recommendations.
  851. * It's not clear e.g. why there is a different flow
  852. * for capture and playback, the only information that matters is
  853. * what traffic class is used, and on all SOF-enabled platforms
  854. * only VC0 is supported so the work-around was likely not necessary
  855. * and quite possibly wrong.
  856. */
  857. /* DPIB/posbuf position mode:
  858. * For Playback, Use DPIB register from HDA space which
  859. * reflects the actual data transferred.
  860. * For Capture, Use the position buffer for pointer, as DPIB
  861. * is not accurate enough, its update may be completed
  862. * earlier than the data written to DDR.
  863. */
  864. if (direction == SNDRV_PCM_STREAM_PLAYBACK) {
  865. pos = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
  866. AZX_REG_VS_SDXDPIB_XBASE +
  867. (AZX_REG_VS_SDXDPIB_XINTERVAL *
  868. hstream->index));
  869. } else {
  870. /*
  871. * For capture stream, we need more workaround to fix the
  872. * position incorrect issue:
  873. *
  874. * 1. Wait at least 20us before reading position buffer after
  875. * the interrupt generated(IOC), to make sure position update
  876. * happens on frame boundary i.e. 20.833uSec for 48KHz.
  877. * 2. Perform a dummy Read to DPIB register to flush DMA
  878. * position value.
  879. * 3. Read the DMA Position from posbuf. Now the readback
  880. * value should be >= period boundary.
  881. */
  882. if (can_sleep)
  883. usleep_range(20, 21);
  884. snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
  885. AZX_REG_VS_SDXDPIB_XBASE +
  886. (AZX_REG_VS_SDXDPIB_XINTERVAL *
  887. hstream->index));
  888. pos = snd_hdac_stream_get_pos_posbuf(hstream);
  889. }
  890. break;
  891. case SOF_HDA_POSITION_QUIRK_USE_DPIB_REGISTERS:
  892. /*
  893. * In case VC1 traffic is disabled this is the recommended option
  894. */
  895. pos = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
  896. AZX_REG_VS_SDXDPIB_XBASE +
  897. (AZX_REG_VS_SDXDPIB_XINTERVAL *
  898. hstream->index));
  899. break;
  900. case SOF_HDA_POSITION_QUIRK_USE_DPIB_DDR_UPDATE:
  901. /*
  902. * This is the recommended option when VC1 is enabled.
  903. * While this isn't needed for SOF platforms it's added for
  904. * consistency and debug.
  905. */
  906. pos = snd_hdac_stream_get_pos_posbuf(hstream);
  907. break;
  908. default:
  909. dev_err_once(sdev->dev, "hda_position_quirk value %d not supported\n",
  910. sof_hda_position_quirk);
  911. pos = 0;
  912. break;
  913. }
  914. if (pos >= hstream->bufsize)
  915. pos = 0;
  916. return pos;
  917. }