lpass-platform.c 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2010-2011,2013-2015 The Linux Foundation. All rights reserved.
  4. *
  5. * lpass-platform.c -- ALSA SoC platform driver for QTi LPASS
  6. */
  7. #include <linux/dma-mapping.h>
  8. #include <linux/export.h>
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/platform_device.h>
  12. #include <sound/pcm_params.h>
  13. #include <linux/regmap.h>
  14. #include <sound/soc.h>
  15. #include "lpass-lpaif-reg.h"
  16. #include "lpass.h"
  17. #define DRV_NAME "lpass-platform"
  18. #define LPASS_PLATFORM_BUFFER_SIZE (24 * 2 * 1024)
  19. #define LPASS_PLATFORM_PERIODS 2
  20. #define LPASS_RXTX_CDC_DMA_LPM_BUFF_SIZE (8 * 1024)
  21. #define LPASS_VA_CDC_DMA_LPM_BUFF_SIZE (12 * 1024)
  22. #define LPASS_CDC_DMA_REGISTER_FIELDS_MAX 15
  23. static const struct snd_pcm_hardware lpass_platform_pcm_hardware = {
  24. .info = SNDRV_PCM_INFO_MMAP |
  25. SNDRV_PCM_INFO_MMAP_VALID |
  26. SNDRV_PCM_INFO_INTERLEAVED |
  27. SNDRV_PCM_INFO_PAUSE |
  28. SNDRV_PCM_INFO_RESUME,
  29. .formats = SNDRV_PCM_FMTBIT_S16 |
  30. SNDRV_PCM_FMTBIT_S24 |
  31. SNDRV_PCM_FMTBIT_S32,
  32. .rates = SNDRV_PCM_RATE_8000_192000,
  33. .rate_min = 8000,
  34. .rate_max = 192000,
  35. .channels_min = 1,
  36. .channels_max = 8,
  37. .buffer_bytes_max = LPASS_PLATFORM_BUFFER_SIZE,
  38. .period_bytes_max = LPASS_PLATFORM_BUFFER_SIZE /
  39. LPASS_PLATFORM_PERIODS,
  40. .period_bytes_min = LPASS_PLATFORM_BUFFER_SIZE /
  41. LPASS_PLATFORM_PERIODS,
  42. .periods_min = LPASS_PLATFORM_PERIODS,
  43. .periods_max = LPASS_PLATFORM_PERIODS,
  44. .fifo_size = 0,
  45. };
  46. static const struct snd_pcm_hardware lpass_platform_rxtx_hardware = {
  47. .info = SNDRV_PCM_INFO_MMAP |
  48. SNDRV_PCM_INFO_MMAP_VALID |
  49. SNDRV_PCM_INFO_INTERLEAVED |
  50. SNDRV_PCM_INFO_PAUSE |
  51. SNDRV_PCM_INFO_RESUME,
  52. .formats = SNDRV_PCM_FMTBIT_S16 |
  53. SNDRV_PCM_FMTBIT_S24 |
  54. SNDRV_PCM_FMTBIT_S32,
  55. .rates = SNDRV_PCM_RATE_8000_192000,
  56. .rate_min = 8000,
  57. .rate_max = 192000,
  58. .channels_min = 1,
  59. .channels_max = 8,
  60. .buffer_bytes_max = LPASS_RXTX_CDC_DMA_LPM_BUFF_SIZE,
  61. .period_bytes_max = LPASS_RXTX_CDC_DMA_LPM_BUFF_SIZE /
  62. LPASS_PLATFORM_PERIODS,
  63. .period_bytes_min = LPASS_RXTX_CDC_DMA_LPM_BUFF_SIZE /
  64. LPASS_PLATFORM_PERIODS,
  65. .periods_min = LPASS_PLATFORM_PERIODS,
  66. .periods_max = LPASS_PLATFORM_PERIODS,
  67. .fifo_size = 0,
  68. };
  69. static const struct snd_pcm_hardware lpass_platform_va_hardware = {
  70. .info = SNDRV_PCM_INFO_MMAP |
  71. SNDRV_PCM_INFO_MMAP_VALID |
  72. SNDRV_PCM_INFO_INTERLEAVED |
  73. SNDRV_PCM_INFO_PAUSE |
  74. SNDRV_PCM_INFO_RESUME,
  75. .formats = SNDRV_PCM_FMTBIT_S16 |
  76. SNDRV_PCM_FMTBIT_S24 |
  77. SNDRV_PCM_FMTBIT_S32,
  78. .rates = SNDRV_PCM_RATE_8000_192000,
  79. .rate_min = 8000,
  80. .rate_max = 192000,
  81. .channels_min = 1,
  82. .channels_max = 8,
  83. .buffer_bytes_max = LPASS_VA_CDC_DMA_LPM_BUFF_SIZE,
  84. .period_bytes_max = LPASS_VA_CDC_DMA_LPM_BUFF_SIZE /
  85. LPASS_PLATFORM_PERIODS,
  86. .period_bytes_min = LPASS_VA_CDC_DMA_LPM_BUFF_SIZE /
  87. LPASS_PLATFORM_PERIODS,
  88. .periods_min = LPASS_PLATFORM_PERIODS,
  89. .periods_max = LPASS_PLATFORM_PERIODS,
  90. .fifo_size = 0,
  91. };
  92. static int lpass_platform_alloc_rxtx_dmactl_fields(struct device *dev,
  93. struct regmap *map)
  94. {
  95. struct lpass_data *drvdata = dev_get_drvdata(dev);
  96. struct lpass_variant *v = drvdata->variant;
  97. struct lpaif_dmactl *rd_dmactl, *wr_dmactl;
  98. int rval;
  99. rd_dmactl = devm_kzalloc(dev, sizeof(*rd_dmactl), GFP_KERNEL);
  100. if (!rd_dmactl)
  101. return -ENOMEM;
  102. wr_dmactl = devm_kzalloc(dev, sizeof(*wr_dmactl), GFP_KERNEL);
  103. if (!wr_dmactl)
  104. return -ENOMEM;
  105. drvdata->rxtx_rd_dmactl = rd_dmactl;
  106. drvdata->rxtx_wr_dmactl = wr_dmactl;
  107. rval = devm_regmap_field_bulk_alloc(dev, map, &rd_dmactl->intf,
  108. &v->rxtx_rdma_intf, LPASS_CDC_DMA_REGISTER_FIELDS_MAX);
  109. if (rval)
  110. return rval;
  111. return devm_regmap_field_bulk_alloc(dev, map, &wr_dmactl->intf,
  112. &v->rxtx_wrdma_intf, LPASS_CDC_DMA_REGISTER_FIELDS_MAX);
  113. }
  114. static int lpass_platform_alloc_va_dmactl_fields(struct device *dev,
  115. struct regmap *map)
  116. {
  117. struct lpass_data *drvdata = dev_get_drvdata(dev);
  118. struct lpass_variant *v = drvdata->variant;
  119. struct lpaif_dmactl *wr_dmactl;
  120. wr_dmactl = devm_kzalloc(dev, sizeof(*wr_dmactl), GFP_KERNEL);
  121. if (!wr_dmactl)
  122. return -ENOMEM;
  123. drvdata->va_wr_dmactl = wr_dmactl;
  124. return devm_regmap_field_bulk_alloc(dev, map, &wr_dmactl->intf,
  125. &v->va_wrdma_intf, LPASS_CDC_DMA_REGISTER_FIELDS_MAX);
  126. }
  127. static int lpass_platform_alloc_dmactl_fields(struct device *dev,
  128. struct regmap *map)
  129. {
  130. struct lpass_data *drvdata = dev_get_drvdata(dev);
  131. struct lpass_variant *v = drvdata->variant;
  132. struct lpaif_dmactl *rd_dmactl, *wr_dmactl;
  133. int rval;
  134. drvdata->rd_dmactl = devm_kzalloc(dev, sizeof(struct lpaif_dmactl),
  135. GFP_KERNEL);
  136. if (drvdata->rd_dmactl == NULL)
  137. return -ENOMEM;
  138. drvdata->wr_dmactl = devm_kzalloc(dev, sizeof(struct lpaif_dmactl),
  139. GFP_KERNEL);
  140. if (drvdata->wr_dmactl == NULL)
  141. return -ENOMEM;
  142. rd_dmactl = drvdata->rd_dmactl;
  143. wr_dmactl = drvdata->wr_dmactl;
  144. rval = devm_regmap_field_bulk_alloc(dev, map, &rd_dmactl->intf,
  145. &v->rdma_intf, 6);
  146. if (rval)
  147. return rval;
  148. return devm_regmap_field_bulk_alloc(dev, map, &wr_dmactl->intf,
  149. &v->wrdma_intf, 6);
  150. }
  151. static int lpass_platform_alloc_hdmidmactl_fields(struct device *dev,
  152. struct regmap *map)
  153. {
  154. struct lpass_data *drvdata = dev_get_drvdata(dev);
  155. struct lpass_variant *v = drvdata->variant;
  156. struct lpaif_dmactl *rd_dmactl;
  157. rd_dmactl = devm_kzalloc(dev, sizeof(struct lpaif_dmactl), GFP_KERNEL);
  158. if (rd_dmactl == NULL)
  159. return -ENOMEM;
  160. drvdata->hdmi_rd_dmactl = rd_dmactl;
  161. return devm_regmap_field_bulk_alloc(dev, map, &rd_dmactl->bursten,
  162. &v->hdmi_rdma_bursten, 8);
  163. }
  164. static int lpass_platform_pcmops_open(struct snd_soc_component *component,
  165. struct snd_pcm_substream *substream)
  166. {
  167. struct snd_pcm_runtime *runtime = substream->runtime;
  168. struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
  169. struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
  170. struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
  171. struct lpass_variant *v = drvdata->variant;
  172. int ret, dma_ch, dir = substream->stream;
  173. struct lpass_pcm_data *data;
  174. struct regmap *map;
  175. unsigned int dai_id = cpu_dai->driver->id;
  176. component->id = dai_id;
  177. data = kzalloc(sizeof(*data), GFP_KERNEL);
  178. if (!data)
  179. return -ENOMEM;
  180. data->i2s_port = cpu_dai->driver->id;
  181. runtime->private_data = data;
  182. if (v->alloc_dma_channel)
  183. dma_ch = v->alloc_dma_channel(drvdata, dir, dai_id);
  184. else
  185. dma_ch = 0;
  186. if (dma_ch < 0) {
  187. kfree(data);
  188. return dma_ch;
  189. }
  190. switch (dai_id) {
  191. case MI2S_PRIMARY ... MI2S_QUINARY:
  192. map = drvdata->lpaif_map;
  193. drvdata->substream[dma_ch] = substream;
  194. break;
  195. case LPASS_DP_RX:
  196. map = drvdata->hdmiif_map;
  197. drvdata->hdmi_substream[dma_ch] = substream;
  198. break;
  199. case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
  200. case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
  201. map = drvdata->rxtx_lpaif_map;
  202. drvdata->rxtx_substream[dma_ch] = substream;
  203. break;
  204. case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
  205. map = drvdata->va_lpaif_map;
  206. drvdata->va_substream[dma_ch] = substream;
  207. break;
  208. default:
  209. break;
  210. }
  211. data->dma_ch = dma_ch;
  212. switch (dai_id) {
  213. case MI2S_PRIMARY ... MI2S_QUINARY:
  214. case LPASS_DP_RX:
  215. ret = regmap_write(map, LPAIF_DMACTL_REG(v, dma_ch, dir, data->i2s_port), 0);
  216. if (ret) {
  217. kfree(data);
  218. dev_err(soc_runtime->dev, "error writing to rdmactl reg: %d\n", ret);
  219. return ret;
  220. }
  221. snd_soc_set_runtime_hwparams(substream, &lpass_platform_pcm_hardware);
  222. runtime->dma_bytes = lpass_platform_pcm_hardware.buffer_bytes_max;
  223. break;
  224. case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
  225. case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
  226. snd_soc_set_runtime_hwparams(substream, &lpass_platform_rxtx_hardware);
  227. runtime->dma_bytes = lpass_platform_rxtx_hardware.buffer_bytes_max;
  228. snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
  229. break;
  230. case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
  231. snd_soc_set_runtime_hwparams(substream, &lpass_platform_va_hardware);
  232. runtime->dma_bytes = lpass_platform_va_hardware.buffer_bytes_max;
  233. snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
  234. break;
  235. default:
  236. break;
  237. }
  238. ret = snd_pcm_hw_constraint_integer(runtime,
  239. SNDRV_PCM_HW_PARAM_PERIODS);
  240. if (ret < 0) {
  241. kfree(data);
  242. dev_err(soc_runtime->dev, "setting constraints failed: %d\n",
  243. ret);
  244. return -EINVAL;
  245. }
  246. return 0;
  247. }
  248. static int lpass_platform_pcmops_close(struct snd_soc_component *component,
  249. struct snd_pcm_substream *substream)
  250. {
  251. struct snd_pcm_runtime *runtime = substream->runtime;
  252. struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
  253. struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
  254. struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
  255. struct lpass_variant *v = drvdata->variant;
  256. struct lpass_pcm_data *data;
  257. unsigned int dai_id = cpu_dai->driver->id;
  258. data = runtime->private_data;
  259. switch (dai_id) {
  260. case MI2S_PRIMARY ... MI2S_QUINARY:
  261. drvdata->substream[data->dma_ch] = NULL;
  262. break;
  263. case LPASS_DP_RX:
  264. drvdata->hdmi_substream[data->dma_ch] = NULL;
  265. break;
  266. case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
  267. case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
  268. drvdata->rxtx_substream[data->dma_ch] = NULL;
  269. break;
  270. case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
  271. drvdata->va_substream[data->dma_ch] = NULL;
  272. break;
  273. default:
  274. break;
  275. }
  276. if (v->free_dma_channel)
  277. v->free_dma_channel(drvdata, data->dma_ch, dai_id);
  278. kfree(data);
  279. return 0;
  280. }
  281. static struct lpaif_dmactl *__lpass_get_dmactl_handle(const struct snd_pcm_substream *substream,
  282. struct snd_soc_component *component)
  283. {
  284. struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
  285. struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
  286. struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
  287. struct lpaif_dmactl *dmactl = NULL;
  288. switch (cpu_dai->driver->id) {
  289. case MI2S_PRIMARY ... MI2S_QUINARY:
  290. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
  291. dmactl = drvdata->rd_dmactl;
  292. else
  293. dmactl = drvdata->wr_dmactl;
  294. break;
  295. case LPASS_DP_RX:
  296. dmactl = drvdata->hdmi_rd_dmactl;
  297. break;
  298. case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
  299. dmactl = drvdata->rxtx_rd_dmactl;
  300. break;
  301. case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
  302. dmactl = drvdata->rxtx_wr_dmactl;
  303. break;
  304. case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
  305. dmactl = drvdata->va_wr_dmactl;
  306. break;
  307. }
  308. return dmactl;
  309. }
  310. static int __lpass_get_id(const struct snd_pcm_substream *substream,
  311. struct snd_soc_component *component)
  312. {
  313. struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
  314. struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
  315. struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
  316. struct snd_pcm_runtime *rt = substream->runtime;
  317. struct lpass_pcm_data *pcm_data = rt->private_data;
  318. struct lpass_variant *v = drvdata->variant;
  319. int id;
  320. switch (cpu_dai->driver->id) {
  321. case MI2S_PRIMARY ... MI2S_QUINARY:
  322. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
  323. id = pcm_data->dma_ch;
  324. else
  325. id = pcm_data->dma_ch - v->wrdma_channel_start;
  326. break;
  327. case LPASS_DP_RX:
  328. id = pcm_data->dma_ch;
  329. break;
  330. case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
  331. id = pcm_data->dma_ch;
  332. break;
  333. case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
  334. id = pcm_data->dma_ch - v->rxtx_wrdma_channel_start;
  335. break;
  336. case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
  337. id = pcm_data->dma_ch - v->va_wrdma_channel_start;
  338. break;
  339. }
  340. return id;
  341. }
  342. static struct regmap *__lpass_get_regmap_handle(const struct snd_pcm_substream *substream,
  343. struct snd_soc_component *component)
  344. {
  345. struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
  346. struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
  347. struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
  348. struct regmap *map = NULL;
  349. switch (cpu_dai->driver->id) {
  350. case MI2S_PRIMARY ... MI2S_QUINARY:
  351. map = drvdata->lpaif_map;
  352. break;
  353. case LPASS_DP_RX:
  354. map = drvdata->hdmiif_map;
  355. break;
  356. case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
  357. case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
  358. map = drvdata->rxtx_lpaif_map;
  359. break;
  360. case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
  361. map = drvdata->va_lpaif_map;
  362. break;
  363. }
  364. return map;
  365. }
  366. static int lpass_platform_pcmops_hw_params(struct snd_soc_component *component,
  367. struct snd_pcm_substream *substream,
  368. struct snd_pcm_hw_params *params)
  369. {
  370. struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
  371. struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
  372. struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
  373. struct snd_pcm_runtime *rt = substream->runtime;
  374. struct lpass_pcm_data *pcm_data = rt->private_data;
  375. struct lpass_variant *v = drvdata->variant;
  376. snd_pcm_format_t format = params_format(params);
  377. unsigned int channels = params_channels(params);
  378. unsigned int regval;
  379. struct lpaif_dmactl *dmactl;
  380. int id;
  381. int bitwidth;
  382. int ret, dma_port = pcm_data->i2s_port + v->dmactl_audif_start;
  383. unsigned int dai_id = cpu_dai->driver->id;
  384. dmactl = __lpass_get_dmactl_handle(substream, component);
  385. id = __lpass_get_id(substream, component);
  386. bitwidth = snd_pcm_format_width(format);
  387. if (bitwidth < 0) {
  388. dev_err(soc_runtime->dev, "invalid bit width given: %d\n",
  389. bitwidth);
  390. return bitwidth;
  391. }
  392. ret = regmap_fields_write(dmactl->bursten, id, LPAIF_DMACTL_BURSTEN_INCR4);
  393. if (ret) {
  394. dev_err(soc_runtime->dev, "error updating bursten field: %d\n", ret);
  395. return ret;
  396. }
  397. ret = regmap_fields_write(dmactl->fifowm, id, LPAIF_DMACTL_FIFOWM_8);
  398. if (ret) {
  399. dev_err(soc_runtime->dev, "error updating fifowm field: %d\n", ret);
  400. return ret;
  401. }
  402. switch (dai_id) {
  403. case LPASS_DP_RX:
  404. ret = regmap_fields_write(dmactl->burst8, id,
  405. LPAIF_DMACTL_BURSTEN_INCR4);
  406. if (ret) {
  407. dev_err(soc_runtime->dev, "error updating burst8en field: %d\n", ret);
  408. return ret;
  409. }
  410. ret = regmap_fields_write(dmactl->burst16, id,
  411. LPAIF_DMACTL_BURSTEN_INCR4);
  412. if (ret) {
  413. dev_err(soc_runtime->dev, "error updating burst16en field: %d\n", ret);
  414. return ret;
  415. }
  416. ret = regmap_fields_write(dmactl->dynburst, id,
  417. LPAIF_DMACTL_BURSTEN_INCR4);
  418. if (ret) {
  419. dev_err(soc_runtime->dev, "error updating dynbursten field: %d\n", ret);
  420. return ret;
  421. }
  422. break;
  423. case MI2S_PRIMARY:
  424. case MI2S_SECONDARY:
  425. case MI2S_TERTIARY:
  426. case MI2S_QUATERNARY:
  427. case MI2S_QUINARY:
  428. ret = regmap_fields_write(dmactl->intf, id,
  429. LPAIF_DMACTL_AUDINTF(dma_port));
  430. if (ret) {
  431. dev_err(soc_runtime->dev, "error updating audio interface field: %d\n",
  432. ret);
  433. return ret;
  434. }
  435. break;
  436. case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
  437. case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
  438. case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX0:
  439. break;
  440. default:
  441. dev_err(soc_runtime->dev, "%s: invalid interface: %d\n", __func__, dai_id);
  442. break;
  443. }
  444. switch (bitwidth) {
  445. case 16:
  446. switch (channels) {
  447. case 1:
  448. case 2:
  449. regval = LPAIF_DMACTL_WPSCNT_ONE;
  450. break;
  451. case 4:
  452. regval = LPAIF_DMACTL_WPSCNT_TWO;
  453. break;
  454. case 6:
  455. regval = LPAIF_DMACTL_WPSCNT_THREE;
  456. break;
  457. case 8:
  458. regval = LPAIF_DMACTL_WPSCNT_FOUR;
  459. break;
  460. default:
  461. dev_err(soc_runtime->dev, "invalid PCM config given: bw=%d, ch=%u\n",
  462. bitwidth, channels);
  463. return -EINVAL;
  464. }
  465. break;
  466. case 24:
  467. case 32:
  468. switch (channels) {
  469. case 1:
  470. regval = LPAIF_DMACTL_WPSCNT_ONE;
  471. break;
  472. case 2:
  473. regval = (dai_id == LPASS_DP_RX ?
  474. LPAIF_DMACTL_WPSCNT_ONE :
  475. LPAIF_DMACTL_WPSCNT_TWO);
  476. break;
  477. case 4:
  478. regval = (dai_id == LPASS_DP_RX ?
  479. LPAIF_DMACTL_WPSCNT_TWO :
  480. LPAIF_DMACTL_WPSCNT_FOUR);
  481. break;
  482. case 6:
  483. regval = (dai_id == LPASS_DP_RX ?
  484. LPAIF_DMACTL_WPSCNT_THREE :
  485. LPAIF_DMACTL_WPSCNT_SIX);
  486. break;
  487. case 8:
  488. regval = (dai_id == LPASS_DP_RX ?
  489. LPAIF_DMACTL_WPSCNT_FOUR :
  490. LPAIF_DMACTL_WPSCNT_EIGHT);
  491. break;
  492. default:
  493. dev_err(soc_runtime->dev, "invalid PCM config given: bw=%d, ch=%u\n",
  494. bitwidth, channels);
  495. return -EINVAL;
  496. }
  497. break;
  498. default:
  499. dev_err(soc_runtime->dev, "invalid PCM config given: bw=%d, ch=%u\n",
  500. bitwidth, channels);
  501. return -EINVAL;
  502. }
  503. ret = regmap_fields_write(dmactl->wpscnt, id, regval);
  504. if (ret) {
  505. dev_err(soc_runtime->dev, "error writing to dmactl reg: %d\n",
  506. ret);
  507. return ret;
  508. }
  509. return 0;
  510. }
  511. static int lpass_platform_pcmops_hw_free(struct snd_soc_component *component,
  512. struct snd_pcm_substream *substream)
  513. {
  514. struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
  515. struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
  516. struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
  517. struct snd_pcm_runtime *rt = substream->runtime;
  518. struct lpass_pcm_data *pcm_data = rt->private_data;
  519. struct lpass_variant *v = drvdata->variant;
  520. unsigned int reg;
  521. int ret;
  522. struct regmap *map;
  523. unsigned int dai_id = cpu_dai->driver->id;
  524. if (is_cdc_dma_port(dai_id))
  525. return 0;
  526. map = __lpass_get_regmap_handle(substream, component);
  527. reg = LPAIF_DMACTL_REG(v, pcm_data->dma_ch, substream->stream, dai_id);
  528. ret = regmap_write(map, reg, 0);
  529. if (ret)
  530. dev_err(soc_runtime->dev, "error writing to rdmactl reg: %d\n",
  531. ret);
  532. return ret;
  533. }
  534. static int lpass_platform_pcmops_prepare(struct snd_soc_component *component,
  535. struct snd_pcm_substream *substream)
  536. {
  537. struct snd_pcm_runtime *runtime = substream->runtime;
  538. struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
  539. struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
  540. struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
  541. struct snd_pcm_runtime *rt = substream->runtime;
  542. struct lpass_pcm_data *pcm_data = rt->private_data;
  543. struct lpass_variant *v = drvdata->variant;
  544. struct lpaif_dmactl *dmactl;
  545. struct regmap *map;
  546. int ret, id, ch, dir = substream->stream;
  547. unsigned int dai_id = cpu_dai->driver->id;
  548. ch = pcm_data->dma_ch;
  549. dmactl = __lpass_get_dmactl_handle(substream, component);
  550. id = __lpass_get_id(substream, component);
  551. map = __lpass_get_regmap_handle(substream, component);
  552. ret = regmap_write(map, LPAIF_DMABASE_REG(v, ch, dir, dai_id),
  553. runtime->dma_addr);
  554. if (ret) {
  555. dev_err(soc_runtime->dev, "error writing to rdmabase reg: %d\n",
  556. ret);
  557. return ret;
  558. }
  559. ret = regmap_write(map, LPAIF_DMABUFF_REG(v, ch, dir, dai_id),
  560. (snd_pcm_lib_buffer_bytes(substream) >> 2) - 1);
  561. if (ret) {
  562. dev_err(soc_runtime->dev, "error writing to rdmabuff reg: %d\n",
  563. ret);
  564. return ret;
  565. }
  566. ret = regmap_write(map, LPAIF_DMAPER_REG(v, ch, dir, dai_id),
  567. (snd_pcm_lib_period_bytes(substream) >> 2) - 1);
  568. if (ret) {
  569. dev_err(soc_runtime->dev, "error writing to rdmaper reg: %d\n",
  570. ret);
  571. return ret;
  572. }
  573. if (is_cdc_dma_port(dai_id)) {
  574. ret = regmap_fields_write(dmactl->fifowm, id, LPAIF_DMACTL_FIFOWM_8);
  575. if (ret) {
  576. dev_err(soc_runtime->dev, "error writing fifowm field to dmactl reg: %d, id: %d\n",
  577. ret, id);
  578. return ret;
  579. }
  580. }
  581. ret = regmap_fields_write(dmactl->enable, id, LPAIF_DMACTL_ENABLE_ON);
  582. if (ret) {
  583. dev_err(soc_runtime->dev, "error writing to rdmactl reg: %d\n",
  584. ret);
  585. return ret;
  586. }
  587. return 0;
  588. }
  589. static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
  590. struct snd_pcm_substream *substream,
  591. int cmd)
  592. {
  593. struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
  594. struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
  595. struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
  596. struct snd_pcm_runtime *rt = substream->runtime;
  597. struct lpass_pcm_data *pcm_data = rt->private_data;
  598. struct lpass_variant *v = drvdata->variant;
  599. struct lpaif_dmactl *dmactl;
  600. struct regmap *map;
  601. int ret, ch, id;
  602. unsigned int reg_irqclr = 0, val_irqclr = 0;
  603. unsigned int reg_irqen = 0, val_irqen = 0, val_mask = 0;
  604. unsigned int dai_id = cpu_dai->driver->id;
  605. ch = pcm_data->dma_ch;
  606. dmactl = __lpass_get_dmactl_handle(substream, component);
  607. id = __lpass_get_id(substream, component);
  608. map = __lpass_get_regmap_handle(substream, component);
  609. switch (cmd) {
  610. case SNDRV_PCM_TRIGGER_START:
  611. case SNDRV_PCM_TRIGGER_RESUME:
  612. case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
  613. ret = regmap_fields_write(dmactl->enable, id,
  614. LPAIF_DMACTL_ENABLE_ON);
  615. if (ret) {
  616. dev_err(soc_runtime->dev,
  617. "error writing to rdmactl reg: %d\n", ret);
  618. return ret;
  619. }
  620. switch (dai_id) {
  621. case LPASS_DP_RX:
  622. ret = regmap_fields_write(dmactl->dyncclk, id,
  623. LPAIF_DMACTL_DYNCLK_ON);
  624. if (ret) {
  625. dev_err(soc_runtime->dev,
  626. "error writing to rdmactl reg: %d\n", ret);
  627. return ret;
  628. }
  629. reg_irqclr = LPASS_HDMITX_APP_IRQCLEAR_REG(v);
  630. val_irqclr = (LPAIF_IRQ_ALL(ch) |
  631. LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(ch) |
  632. LPAIF_IRQ_HDMI_METADONE |
  633. LPAIF_IRQ_HDMI_SDEEP_AUD_DIS(ch));
  634. reg_irqen = LPASS_HDMITX_APP_IRQEN_REG(v);
  635. val_mask = (LPAIF_IRQ_ALL(ch) |
  636. LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(ch) |
  637. LPAIF_IRQ_HDMI_METADONE |
  638. LPAIF_IRQ_HDMI_SDEEP_AUD_DIS(ch));
  639. val_irqen = (LPAIF_IRQ_ALL(ch) |
  640. LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(ch) |
  641. LPAIF_IRQ_HDMI_METADONE |
  642. LPAIF_IRQ_HDMI_SDEEP_AUD_DIS(ch));
  643. break;
  644. case MI2S_PRIMARY:
  645. case MI2S_SECONDARY:
  646. case MI2S_TERTIARY:
  647. case MI2S_QUATERNARY:
  648. case MI2S_QUINARY:
  649. reg_irqclr = LPAIF_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
  650. val_irqclr = LPAIF_IRQ_ALL(ch);
  651. reg_irqen = LPAIF_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
  652. val_mask = LPAIF_IRQ_ALL(ch);
  653. val_irqen = LPAIF_IRQ_ALL(ch);
  654. break;
  655. case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
  656. case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
  657. ret = regmap_fields_write(dmactl->dyncclk, id, LPAIF_DMACTL_DYNCLK_ON);
  658. if (ret) {
  659. dev_err(soc_runtime->dev,
  660. "error writing to rdmactl reg field: %d\n", ret);
  661. return ret;
  662. }
  663. reg_irqclr = LPAIF_RXTX_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
  664. val_irqclr = LPAIF_IRQ_ALL(ch);
  665. reg_irqen = LPAIF_RXTX_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
  666. val_mask = LPAIF_IRQ_ALL(ch);
  667. val_irqen = LPAIF_IRQ_ALL(ch);
  668. break;
  669. case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
  670. ret = regmap_fields_write(dmactl->dyncclk, id, LPAIF_DMACTL_DYNCLK_ON);
  671. if (ret) {
  672. dev_err(soc_runtime->dev,
  673. "error writing to rdmactl reg field: %d\n", ret);
  674. return ret;
  675. }
  676. reg_irqclr = LPAIF_VA_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
  677. val_irqclr = LPAIF_IRQ_ALL(ch);
  678. reg_irqen = LPAIF_VA_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
  679. val_mask = LPAIF_IRQ_ALL(ch);
  680. val_irqen = LPAIF_IRQ_ALL(ch);
  681. break;
  682. default:
  683. dev_err(soc_runtime->dev, "%s: invalid %d interface\n", __func__, dai_id);
  684. return -EINVAL;
  685. }
  686. ret = regmap_write_bits(map, reg_irqclr, val_irqclr, val_irqclr);
  687. if (ret) {
  688. dev_err(soc_runtime->dev, "error writing to irqclear reg: %d\n", ret);
  689. return ret;
  690. }
  691. ret = regmap_update_bits(map, reg_irqen, val_mask, val_irqen);
  692. if (ret) {
  693. dev_err(soc_runtime->dev, "error writing to irqen reg: %d\n", ret);
  694. return ret;
  695. }
  696. break;
  697. case SNDRV_PCM_TRIGGER_STOP:
  698. case SNDRV_PCM_TRIGGER_SUSPEND:
  699. case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
  700. ret = regmap_fields_write(dmactl->enable, id,
  701. LPAIF_DMACTL_ENABLE_OFF);
  702. if (ret) {
  703. dev_err(soc_runtime->dev,
  704. "error writing to rdmactl reg: %d\n", ret);
  705. return ret;
  706. }
  707. switch (dai_id) {
  708. case LPASS_DP_RX:
  709. ret = regmap_fields_write(dmactl->dyncclk, id,
  710. LPAIF_DMACTL_DYNCLK_OFF);
  711. if (ret) {
  712. dev_err(soc_runtime->dev,
  713. "error writing to rdmactl reg: %d\n", ret);
  714. return ret;
  715. }
  716. reg_irqen = LPASS_HDMITX_APP_IRQEN_REG(v);
  717. val_mask = (LPAIF_IRQ_ALL(ch) |
  718. LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(ch) |
  719. LPAIF_IRQ_HDMI_METADONE |
  720. LPAIF_IRQ_HDMI_SDEEP_AUD_DIS(ch));
  721. val_irqen = 0;
  722. break;
  723. case MI2S_PRIMARY:
  724. case MI2S_SECONDARY:
  725. case MI2S_TERTIARY:
  726. case MI2S_QUATERNARY:
  727. case MI2S_QUINARY:
  728. reg_irqen = LPAIF_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
  729. val_mask = LPAIF_IRQ_ALL(ch);
  730. val_irqen = 0;
  731. break;
  732. case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
  733. case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
  734. ret = regmap_fields_write(dmactl->dyncclk, id, LPAIF_DMACTL_DYNCLK_OFF);
  735. if (ret) {
  736. dev_err(soc_runtime->dev,
  737. "error writing to rdmactl reg field: %d\n", ret);
  738. return ret;
  739. }
  740. reg_irqclr = LPAIF_RXTX_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
  741. val_irqclr = LPAIF_IRQ_ALL(ch);
  742. reg_irqen = LPAIF_RXTX_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
  743. val_mask = LPAIF_IRQ_ALL(ch);
  744. val_irqen = LPAIF_IRQ_ALL(ch);
  745. break;
  746. case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
  747. ret = regmap_fields_write(dmactl->dyncclk, id, LPAIF_DMACTL_DYNCLK_OFF);
  748. if (ret) {
  749. dev_err(soc_runtime->dev,
  750. "error writing to rdmactl reg field: %d\n", ret);
  751. return ret;
  752. }
  753. reg_irqclr = LPAIF_VA_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
  754. val_irqclr = LPAIF_IRQ_ALL(ch);
  755. reg_irqen = LPAIF_VA_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
  756. val_mask = LPAIF_IRQ_ALL(ch);
  757. val_irqen = LPAIF_IRQ_ALL(ch);
  758. break;
  759. default:
  760. dev_err(soc_runtime->dev, "%s: invalid %d interface\n", __func__, dai_id);
  761. return -EINVAL;
  762. }
  763. ret = regmap_update_bits(map, reg_irqen, val_mask, val_irqen);
  764. if (ret) {
  765. dev_err(soc_runtime->dev,
  766. "error writing to irqen reg: %d\n", ret);
  767. return ret;
  768. }
  769. break;
  770. }
  771. return 0;
  772. }
  773. static snd_pcm_uframes_t lpass_platform_pcmops_pointer(
  774. struct snd_soc_component *component,
  775. struct snd_pcm_substream *substream)
  776. {
  777. struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
  778. struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
  779. struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
  780. struct snd_pcm_runtime *rt = substream->runtime;
  781. struct lpass_pcm_data *pcm_data = rt->private_data;
  782. struct lpass_variant *v = drvdata->variant;
  783. unsigned int base_addr, curr_addr;
  784. int ret, ch, dir = substream->stream;
  785. struct regmap *map;
  786. unsigned int dai_id = cpu_dai->driver->id;
  787. map = __lpass_get_regmap_handle(substream, component);
  788. ch = pcm_data->dma_ch;
  789. ret = regmap_read(map,
  790. LPAIF_DMABASE_REG(v, ch, dir, dai_id), &base_addr);
  791. if (ret) {
  792. dev_err(soc_runtime->dev,
  793. "error reading from rdmabase reg: %d\n", ret);
  794. return ret;
  795. }
  796. ret = regmap_read(map,
  797. LPAIF_DMACURR_REG(v, ch, dir, dai_id), &curr_addr);
  798. if (ret) {
  799. dev_err(soc_runtime->dev,
  800. "error reading from rdmacurr reg: %d\n", ret);
  801. return ret;
  802. }
  803. return bytes_to_frames(substream->runtime, curr_addr - base_addr);
  804. }
  805. static int lpass_platform_cdc_dma_mmap(struct snd_pcm_substream *substream,
  806. struct vm_area_struct *vma)
  807. {
  808. struct snd_pcm_runtime *runtime = substream->runtime;
  809. unsigned long size, offset;
  810. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  811. size = vma->vm_end - vma->vm_start;
  812. offset = vma->vm_pgoff << PAGE_SHIFT;
  813. return io_remap_pfn_range(vma, vma->vm_start,
  814. (runtime->dma_addr + offset) >> PAGE_SHIFT,
  815. size, vma->vm_page_prot);
  816. }
  817. static int lpass_platform_pcmops_mmap(struct snd_soc_component *component,
  818. struct snd_pcm_substream *substream,
  819. struct vm_area_struct *vma)
  820. {
  821. struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
  822. struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
  823. unsigned int dai_id = cpu_dai->driver->id;
  824. if (is_cdc_dma_port(dai_id))
  825. return lpass_platform_cdc_dma_mmap(substream, vma);
  826. return snd_pcm_lib_default_mmap(substream, vma);
  827. }
  828. static irqreturn_t lpass_dma_interrupt_handler(
  829. struct snd_pcm_substream *substream,
  830. struct lpass_data *drvdata,
  831. int chan, u32 interrupts)
  832. {
  833. struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
  834. struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
  835. struct lpass_variant *v = drvdata->variant;
  836. irqreturn_t ret = IRQ_NONE;
  837. int rv;
  838. unsigned int reg, val, mask;
  839. struct regmap *map;
  840. unsigned int dai_id = cpu_dai->driver->id;
  841. mask = LPAIF_IRQ_ALL(chan);
  842. switch (dai_id) {
  843. case LPASS_DP_RX:
  844. map = drvdata->hdmiif_map;
  845. reg = LPASS_HDMITX_APP_IRQCLEAR_REG(v);
  846. val = (LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(chan) |
  847. LPAIF_IRQ_HDMI_METADONE |
  848. LPAIF_IRQ_HDMI_SDEEP_AUD_DIS(chan));
  849. break;
  850. case MI2S_PRIMARY:
  851. case MI2S_SECONDARY:
  852. case MI2S_TERTIARY:
  853. case MI2S_QUATERNARY:
  854. case MI2S_QUINARY:
  855. map = drvdata->lpaif_map;
  856. reg = LPAIF_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
  857. val = 0;
  858. break;
  859. case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
  860. case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
  861. map = drvdata->rxtx_lpaif_map;
  862. reg = LPAIF_RXTX_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
  863. val = 0;
  864. break;
  865. case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
  866. map = drvdata->va_lpaif_map;
  867. reg = LPAIF_VA_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
  868. val = 0;
  869. break;
  870. default:
  871. dev_err(soc_runtime->dev, "%s: invalid %d interface\n", __func__, dai_id);
  872. return -EINVAL;
  873. }
  874. if (interrupts & LPAIF_IRQ_PER(chan)) {
  875. rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_PER(chan) | val));
  876. if (rv) {
  877. dev_err(soc_runtime->dev,
  878. "error writing to irqclear reg: %d\n", rv);
  879. return IRQ_NONE;
  880. }
  881. snd_pcm_period_elapsed(substream);
  882. ret = IRQ_HANDLED;
  883. }
  884. if (interrupts & LPAIF_IRQ_XRUN(chan)) {
  885. rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_XRUN(chan) | val));
  886. if (rv) {
  887. dev_err(soc_runtime->dev,
  888. "error writing to irqclear reg: %d\n", rv);
  889. return IRQ_NONE;
  890. }
  891. dev_warn_ratelimited(soc_runtime->dev, "xrun warning\n");
  892. snd_pcm_stop_xrun(substream);
  893. ret = IRQ_HANDLED;
  894. }
  895. if (interrupts & LPAIF_IRQ_ERR(chan)) {
  896. rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_ERR(chan) | val));
  897. if (rv) {
  898. dev_err(soc_runtime->dev,
  899. "error writing to irqclear reg: %d\n", rv);
  900. return IRQ_NONE;
  901. }
  902. dev_err(soc_runtime->dev, "bus access error\n");
  903. snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
  904. ret = IRQ_HANDLED;
  905. }
  906. if (interrupts & val) {
  907. rv = regmap_write(map, reg, val);
  908. if (rv) {
  909. dev_err(soc_runtime->dev,
  910. "error writing to irqclear reg: %d\n", rv);
  911. return IRQ_NONE;
  912. }
  913. ret = IRQ_HANDLED;
  914. }
  915. return ret;
  916. }
  917. static irqreturn_t lpass_platform_lpaif_irq(int irq, void *data)
  918. {
  919. struct lpass_data *drvdata = data;
  920. struct lpass_variant *v = drvdata->variant;
  921. unsigned int irqs;
  922. int rv, chan;
  923. rv = regmap_read(drvdata->lpaif_map,
  924. LPAIF_IRQSTAT_REG(v, LPAIF_IRQ_PORT_HOST), &irqs);
  925. if (rv) {
  926. pr_err("error reading from irqstat reg: %d\n", rv);
  927. return IRQ_NONE;
  928. }
  929. /* Handle per channel interrupts */
  930. for (chan = 0; chan < LPASS_MAX_DMA_CHANNELS; chan++) {
  931. if (irqs & LPAIF_IRQ_ALL(chan) && drvdata->substream[chan]) {
  932. rv = lpass_dma_interrupt_handler(
  933. drvdata->substream[chan],
  934. drvdata, chan, irqs);
  935. if (rv != IRQ_HANDLED)
  936. return rv;
  937. }
  938. }
  939. return IRQ_HANDLED;
  940. }
  941. static irqreturn_t lpass_platform_hdmiif_irq(int irq, void *data)
  942. {
  943. struct lpass_data *drvdata = data;
  944. struct lpass_variant *v = drvdata->variant;
  945. unsigned int irqs;
  946. int rv, chan;
  947. rv = regmap_read(drvdata->hdmiif_map,
  948. LPASS_HDMITX_APP_IRQSTAT_REG(v), &irqs);
  949. if (rv) {
  950. pr_err("error reading from irqstat reg: %d\n", rv);
  951. return IRQ_NONE;
  952. }
  953. /* Handle per channel interrupts */
  954. for (chan = 0; chan < LPASS_MAX_HDMI_DMA_CHANNELS; chan++) {
  955. if (irqs & (LPAIF_IRQ_ALL(chan) | LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(chan) |
  956. LPAIF_IRQ_HDMI_METADONE |
  957. LPAIF_IRQ_HDMI_SDEEP_AUD_DIS(chan))
  958. && drvdata->hdmi_substream[chan]) {
  959. rv = lpass_dma_interrupt_handler(
  960. drvdata->hdmi_substream[chan],
  961. drvdata, chan, irqs);
  962. if (rv != IRQ_HANDLED)
  963. return rv;
  964. }
  965. }
  966. return IRQ_HANDLED;
  967. }
  968. static irqreturn_t lpass_platform_rxtxif_irq(int irq, void *data)
  969. {
  970. struct lpass_data *drvdata = data;
  971. struct lpass_variant *v = drvdata->variant;
  972. unsigned int irqs;
  973. irqreturn_t rv;
  974. int chan;
  975. rv = regmap_read(drvdata->rxtx_lpaif_map,
  976. LPAIF_RXTX_IRQSTAT_REG(v, LPAIF_IRQ_PORT_HOST), &irqs);
  977. /* Handle per channel interrupts */
  978. for (chan = 0; chan < LPASS_MAX_CDC_DMA_CHANNELS; chan++) {
  979. if (irqs & LPAIF_IRQ_ALL(chan) && drvdata->rxtx_substream[chan]) {
  980. rv = lpass_dma_interrupt_handler(
  981. drvdata->rxtx_substream[chan],
  982. drvdata, chan, irqs);
  983. if (rv != IRQ_HANDLED)
  984. return rv;
  985. }
  986. }
  987. return IRQ_HANDLED;
  988. }
  989. static irqreturn_t lpass_platform_vaif_irq(int irq, void *data)
  990. {
  991. struct lpass_data *drvdata = data;
  992. struct lpass_variant *v = drvdata->variant;
  993. unsigned int irqs;
  994. irqreturn_t rv;
  995. int chan;
  996. rv = regmap_read(drvdata->va_lpaif_map,
  997. LPAIF_VA_IRQSTAT_REG(v, LPAIF_IRQ_PORT_HOST), &irqs);
  998. /* Handle per channel interrupts */
  999. for (chan = 0; chan < LPASS_MAX_VA_CDC_DMA_CHANNELS; chan++) {
  1000. if (irqs & LPAIF_IRQ_ALL(chan) && drvdata->va_substream[chan]) {
  1001. rv = lpass_dma_interrupt_handler(
  1002. drvdata->va_substream[chan],
  1003. drvdata, chan, irqs);
  1004. if (rv != IRQ_HANDLED)
  1005. return rv;
  1006. }
  1007. }
  1008. return IRQ_HANDLED;
  1009. }
  1010. static int lpass_platform_prealloc_cdc_dma_buffer(struct snd_soc_component *component,
  1011. struct snd_pcm *pcm, int dai_id)
  1012. {
  1013. struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
  1014. struct snd_pcm_substream *substream;
  1015. struct snd_dma_buffer *buf;
  1016. if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream)
  1017. substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
  1018. else
  1019. substream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
  1020. buf = &substream->dma_buffer;
  1021. buf->dev.dev = pcm->card->dev;
  1022. buf->private_data = NULL;
  1023. /* Assign Codec DMA buffer pointers */
  1024. buf->dev.type = SNDRV_DMA_TYPE_CONTINUOUS;
  1025. switch (dai_id) {
  1026. case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
  1027. buf->bytes = lpass_platform_rxtx_hardware.buffer_bytes_max;
  1028. buf->addr = drvdata->rxtx_cdc_dma_lpm_buf;
  1029. break;
  1030. case LPASS_CDC_DMA_TX0 ... LPASS_CDC_DMA_TX8:
  1031. buf->bytes = lpass_platform_rxtx_hardware.buffer_bytes_max;
  1032. buf->addr = drvdata->rxtx_cdc_dma_lpm_buf + LPASS_RXTX_CDC_DMA_LPM_BUFF_SIZE;
  1033. break;
  1034. case LPASS_CDC_DMA_VA_TX0 ... LPASS_CDC_DMA_VA_TX8:
  1035. buf->bytes = lpass_platform_va_hardware.buffer_bytes_max;
  1036. buf->addr = drvdata->va_cdc_dma_lpm_buf;
  1037. break;
  1038. default:
  1039. break;
  1040. }
  1041. buf->area = (unsigned char * __force)memremap(buf->addr, buf->bytes, MEMREMAP_WC);
  1042. return 0;
  1043. }
  1044. static int lpass_platform_pcm_new(struct snd_soc_component *component,
  1045. struct snd_soc_pcm_runtime *soc_runtime)
  1046. {
  1047. struct snd_pcm *pcm = soc_runtime->pcm;
  1048. struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
  1049. unsigned int dai_id = cpu_dai->driver->id;
  1050. size_t size = lpass_platform_pcm_hardware.buffer_bytes_max;
  1051. /*
  1052. * Lpass codec dma can access only lpass lpm hardware memory.
  1053. * ioremap is for HLOS to access hardware memory.
  1054. */
  1055. if (is_cdc_dma_port(dai_id))
  1056. return lpass_platform_prealloc_cdc_dma_buffer(component, pcm, dai_id);
  1057. return snd_pcm_set_fixed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
  1058. component->dev, size);
  1059. }
  1060. static int lpass_platform_pcmops_suspend(struct snd_soc_component *component)
  1061. {
  1062. struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
  1063. struct regmap *map;
  1064. unsigned int dai_id = component->id;
  1065. if (dai_id == LPASS_DP_RX)
  1066. map = drvdata->hdmiif_map;
  1067. else
  1068. map = drvdata->lpaif_map;
  1069. regcache_cache_only(map, true);
  1070. regcache_mark_dirty(map);
  1071. return 0;
  1072. }
  1073. static int lpass_platform_pcmops_resume(struct snd_soc_component *component)
  1074. {
  1075. struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
  1076. struct regmap *map;
  1077. unsigned int dai_id = component->id;
  1078. if (dai_id == LPASS_DP_RX)
  1079. map = drvdata->hdmiif_map;
  1080. else
  1081. map = drvdata->lpaif_map;
  1082. regcache_cache_only(map, false);
  1083. return regcache_sync(map);
  1084. }
  1085. static int lpass_platform_copy(struct snd_soc_component *component,
  1086. struct snd_pcm_substream *substream, int channel,
  1087. unsigned long pos, void __user *buf, unsigned long bytes)
  1088. {
  1089. struct snd_pcm_runtime *rt = substream->runtime;
  1090. unsigned int dai_id = component->id;
  1091. int ret = 0;
  1092. void __iomem *dma_buf = (void __iomem *) (rt->dma_area + pos +
  1093. channel * (rt->dma_bytes / rt->channels));
  1094. if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
  1095. if (is_cdc_dma_port(dai_id)) {
  1096. ret = copy_from_user_toio(dma_buf, buf, bytes);
  1097. } else {
  1098. if (copy_from_user((void __force *)dma_buf, buf, bytes))
  1099. ret = -EFAULT;
  1100. }
  1101. } else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
  1102. if (is_cdc_dma_port(dai_id)) {
  1103. ret = copy_to_user_fromio(buf, dma_buf, bytes);
  1104. } else {
  1105. if (copy_to_user(buf, (void __force *)dma_buf, bytes))
  1106. ret = -EFAULT;
  1107. }
  1108. }
  1109. return ret;
  1110. }
  1111. static const struct snd_soc_component_driver lpass_component_driver = {
  1112. .name = DRV_NAME,
  1113. .open = lpass_platform_pcmops_open,
  1114. .close = lpass_platform_pcmops_close,
  1115. .hw_params = lpass_platform_pcmops_hw_params,
  1116. .hw_free = lpass_platform_pcmops_hw_free,
  1117. .prepare = lpass_platform_pcmops_prepare,
  1118. .trigger = lpass_platform_pcmops_trigger,
  1119. .pointer = lpass_platform_pcmops_pointer,
  1120. .mmap = lpass_platform_pcmops_mmap,
  1121. .pcm_construct = lpass_platform_pcm_new,
  1122. .suspend = lpass_platform_pcmops_suspend,
  1123. .resume = lpass_platform_pcmops_resume,
  1124. .copy_user = lpass_platform_copy,
  1125. };
  1126. int asoc_qcom_lpass_platform_register(struct platform_device *pdev)
  1127. {
  1128. struct lpass_data *drvdata = platform_get_drvdata(pdev);
  1129. struct lpass_variant *v = drvdata->variant;
  1130. int ret;
  1131. drvdata->lpaif_irq = platform_get_irq_byname(pdev, "lpass-irq-lpaif");
  1132. if (drvdata->lpaif_irq < 0)
  1133. return -ENODEV;
  1134. /* ensure audio hardware is disabled */
  1135. ret = regmap_write(drvdata->lpaif_map,
  1136. LPAIF_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST), 0);
  1137. if (ret) {
  1138. dev_err(&pdev->dev, "error writing to irqen reg: %d\n", ret);
  1139. return ret;
  1140. }
  1141. ret = devm_request_irq(&pdev->dev, drvdata->lpaif_irq,
  1142. lpass_platform_lpaif_irq, IRQF_TRIGGER_RISING,
  1143. "lpass-irq-lpaif", drvdata);
  1144. if (ret) {
  1145. dev_err(&pdev->dev, "irq request failed: %d\n", ret);
  1146. return ret;
  1147. }
  1148. ret = lpass_platform_alloc_dmactl_fields(&pdev->dev,
  1149. drvdata->lpaif_map);
  1150. if (ret) {
  1151. dev_err(&pdev->dev,
  1152. "error initializing dmactl fields: %d\n", ret);
  1153. return ret;
  1154. }
  1155. if (drvdata->codec_dma_enable) {
  1156. ret = regmap_write(drvdata->rxtx_lpaif_map,
  1157. LPAIF_RXTX_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST), 0x0);
  1158. if (ret) {
  1159. dev_err(&pdev->dev, "error writing to rxtx irqen reg: %d\n", ret);
  1160. return ret;
  1161. }
  1162. ret = regmap_write(drvdata->va_lpaif_map,
  1163. LPAIF_VA_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST), 0x0);
  1164. if (ret) {
  1165. dev_err(&pdev->dev, "error writing to rxtx irqen reg: %d\n", ret);
  1166. return ret;
  1167. }
  1168. drvdata->rxtxif_irq = platform_get_irq_byname(pdev, "lpass-irq-rxtxif");
  1169. if (drvdata->rxtxif_irq < 0)
  1170. return -ENODEV;
  1171. ret = devm_request_irq(&pdev->dev, drvdata->rxtxif_irq,
  1172. lpass_platform_rxtxif_irq, 0, "lpass-irq-rxtxif", drvdata);
  1173. if (ret) {
  1174. dev_err(&pdev->dev, "rxtx irq request failed: %d\n", ret);
  1175. return ret;
  1176. }
  1177. ret = lpass_platform_alloc_rxtx_dmactl_fields(&pdev->dev,
  1178. drvdata->rxtx_lpaif_map);
  1179. if (ret) {
  1180. dev_err(&pdev->dev,
  1181. "error initializing rxtx dmactl fields: %d\n", ret);
  1182. return ret;
  1183. }
  1184. drvdata->vaif_irq = platform_get_irq_byname(pdev, "lpass-irq-vaif");
  1185. if (drvdata->vaif_irq < 0)
  1186. return -ENODEV;
  1187. ret = devm_request_irq(&pdev->dev, drvdata->vaif_irq,
  1188. lpass_platform_vaif_irq, 0, "lpass-irq-vaif", drvdata);
  1189. if (ret) {
  1190. dev_err(&pdev->dev, "va irq request failed: %d\n", ret);
  1191. return ret;
  1192. }
  1193. ret = lpass_platform_alloc_va_dmactl_fields(&pdev->dev,
  1194. drvdata->va_lpaif_map);
  1195. if (ret) {
  1196. dev_err(&pdev->dev,
  1197. "error initializing va dmactl fields: %d\n", ret);
  1198. return ret;
  1199. }
  1200. }
  1201. if (drvdata->hdmi_port_enable) {
  1202. drvdata->hdmiif_irq = platform_get_irq_byname(pdev, "lpass-irq-hdmi");
  1203. if (drvdata->hdmiif_irq < 0)
  1204. return -ENODEV;
  1205. ret = devm_request_irq(&pdev->dev, drvdata->hdmiif_irq,
  1206. lpass_platform_hdmiif_irq, 0, "lpass-irq-hdmi", drvdata);
  1207. if (ret) {
  1208. dev_err(&pdev->dev, "irq hdmi request failed: %d\n", ret);
  1209. return ret;
  1210. }
  1211. ret = regmap_write(drvdata->hdmiif_map,
  1212. LPASS_HDMITX_APP_IRQEN_REG(v), 0);
  1213. if (ret) {
  1214. dev_err(&pdev->dev, "error writing to hdmi irqen reg: %d\n", ret);
  1215. return ret;
  1216. }
  1217. ret = lpass_platform_alloc_hdmidmactl_fields(&pdev->dev,
  1218. drvdata->hdmiif_map);
  1219. if (ret) {
  1220. dev_err(&pdev->dev,
  1221. "error initializing hdmidmactl fields: %d\n", ret);
  1222. return ret;
  1223. }
  1224. }
  1225. return devm_snd_soc_register_component(&pdev->dev,
  1226. &lpass_component_driver, NULL, 0);
  1227. }
  1228. EXPORT_SYMBOL_GPL(asoc_qcom_lpass_platform_register);
  1229. MODULE_DESCRIPTION("QTi LPASS Platform Driver");
  1230. MODULE_LICENSE("GPL v2");