path.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. //
  3. // Copyright(c) 2021 Intel Corporation. All rights reserved.
  4. //
  5. // Authors: Cezary Rojewski <[email protected]>
  6. // Amadeusz Slawinski <[email protected]>
  7. //
  8. #include <sound/intel-nhlt.h>
  9. #include <sound/pcm_params.h>
  10. #include <sound/soc.h>
  11. #include "avs.h"
  12. #include "path.h"
  13. #include "topology.h"
  14. /* Must be called with adev->comp_list_mutex held. */
  15. static struct avs_tplg *
  16. avs_path_find_tplg(struct avs_dev *adev, const char *name)
  17. {
  18. struct avs_soc_component *acomp;
  19. list_for_each_entry(acomp, &adev->comp_list, node)
  20. if (!strcmp(acomp->tplg->name, name))
  21. return acomp->tplg;
  22. return NULL;
  23. }
  24. static struct avs_path_module *
  25. avs_path_find_module(struct avs_path_pipeline *ppl, u32 template_id)
  26. {
  27. struct avs_path_module *mod;
  28. list_for_each_entry(mod, &ppl->mod_list, node)
  29. if (mod->template->id == template_id)
  30. return mod;
  31. return NULL;
  32. }
  33. static struct avs_path_pipeline *
  34. avs_path_find_pipeline(struct avs_path *path, u32 template_id)
  35. {
  36. struct avs_path_pipeline *ppl;
  37. list_for_each_entry(ppl, &path->ppl_list, node)
  38. if (ppl->template->id == template_id)
  39. return ppl;
  40. return NULL;
  41. }
  42. static struct avs_path *
  43. avs_path_find_path(struct avs_dev *adev, const char *name, u32 template_id)
  44. {
  45. struct avs_tplg_path_template *pos, *template = NULL;
  46. struct avs_tplg *tplg;
  47. struct avs_path *path;
  48. tplg = avs_path_find_tplg(adev, name);
  49. if (!tplg)
  50. return NULL;
  51. list_for_each_entry(pos, &tplg->path_tmpl_list, node) {
  52. if (pos->id == template_id) {
  53. template = pos;
  54. break;
  55. }
  56. }
  57. if (!template)
  58. return NULL;
  59. spin_lock(&adev->path_list_lock);
  60. /* Only one variant of given path template may be instantiated at a time. */
  61. list_for_each_entry(path, &adev->path_list, node) {
  62. if (path->template->owner == template) {
  63. spin_unlock(&adev->path_list_lock);
  64. return path;
  65. }
  66. }
  67. spin_unlock(&adev->path_list_lock);
  68. return NULL;
  69. }
  70. static bool avs_test_hw_params(struct snd_pcm_hw_params *params,
  71. struct avs_audio_format *fmt)
  72. {
  73. return (params_rate(params) == fmt->sampling_freq &&
  74. params_channels(params) == fmt->num_channels &&
  75. params_physical_width(params) == fmt->bit_depth &&
  76. params_width(params) == fmt->valid_bit_depth);
  77. }
  78. static struct avs_tplg_path *
  79. avs_path_find_variant(struct avs_dev *adev,
  80. struct avs_tplg_path_template *template,
  81. struct snd_pcm_hw_params *fe_params,
  82. struct snd_pcm_hw_params *be_params)
  83. {
  84. struct avs_tplg_path *variant;
  85. list_for_each_entry(variant, &template->path_list, node) {
  86. dev_dbg(adev->dev, "check FE rate %d chn %d vbd %d bd %d\n",
  87. variant->fe_fmt->sampling_freq, variant->fe_fmt->num_channels,
  88. variant->fe_fmt->valid_bit_depth, variant->fe_fmt->bit_depth);
  89. dev_dbg(adev->dev, "check BE rate %d chn %d vbd %d bd %d\n",
  90. variant->be_fmt->sampling_freq, variant->be_fmt->num_channels,
  91. variant->be_fmt->valid_bit_depth, variant->be_fmt->bit_depth);
  92. if (variant->fe_fmt && avs_test_hw_params(fe_params, variant->fe_fmt) &&
  93. variant->be_fmt && avs_test_hw_params(be_params, variant->be_fmt))
  94. return variant;
  95. }
  96. return NULL;
  97. }
  98. __maybe_unused
  99. static bool avs_dma_type_is_host(u32 dma_type)
  100. {
  101. return dma_type == AVS_DMA_HDA_HOST_OUTPUT ||
  102. dma_type == AVS_DMA_HDA_HOST_INPUT;
  103. }
  104. __maybe_unused
  105. static bool avs_dma_type_is_link(u32 dma_type)
  106. {
  107. return !avs_dma_type_is_host(dma_type);
  108. }
  109. __maybe_unused
  110. static bool avs_dma_type_is_output(u32 dma_type)
  111. {
  112. return dma_type == AVS_DMA_HDA_HOST_OUTPUT ||
  113. dma_type == AVS_DMA_HDA_LINK_OUTPUT ||
  114. dma_type == AVS_DMA_I2S_LINK_OUTPUT;
  115. }
  116. __maybe_unused
  117. static bool avs_dma_type_is_input(u32 dma_type)
  118. {
  119. return !avs_dma_type_is_output(dma_type);
  120. }
  121. static int avs_copier_create(struct avs_dev *adev, struct avs_path_module *mod)
  122. {
  123. struct nhlt_acpi_table *nhlt = adev->nhlt;
  124. struct avs_tplg_module *t = mod->template;
  125. struct avs_copier_cfg *cfg;
  126. struct nhlt_specific_cfg *ep_blob;
  127. union avs_connector_node_id node_id = {0};
  128. size_t cfg_size, data_size = 0;
  129. void *data = NULL;
  130. u32 dma_type;
  131. int ret;
  132. dma_type = t->cfg_ext->copier.dma_type;
  133. node_id.dma_type = dma_type;
  134. switch (dma_type) {
  135. struct avs_audio_format *fmt;
  136. int direction;
  137. case AVS_DMA_I2S_LINK_OUTPUT:
  138. case AVS_DMA_I2S_LINK_INPUT:
  139. if (avs_dma_type_is_input(dma_type))
  140. direction = SNDRV_PCM_STREAM_CAPTURE;
  141. else
  142. direction = SNDRV_PCM_STREAM_PLAYBACK;
  143. if (t->cfg_ext->copier.blob_fmt)
  144. fmt = t->cfg_ext->copier.blob_fmt;
  145. else if (direction == SNDRV_PCM_STREAM_CAPTURE)
  146. fmt = t->in_fmt;
  147. else
  148. fmt = t->cfg_ext->copier.out_fmt;
  149. ep_blob = intel_nhlt_get_endpoint_blob(adev->dev,
  150. nhlt, t->cfg_ext->copier.vindex.i2s.instance,
  151. NHLT_LINK_SSP, fmt->valid_bit_depth, fmt->bit_depth,
  152. fmt->num_channels, fmt->sampling_freq, direction,
  153. NHLT_DEVICE_I2S);
  154. if (!ep_blob) {
  155. dev_err(adev->dev, "no I2S ep_blob found\n");
  156. return -ENOENT;
  157. }
  158. data = ep_blob->caps;
  159. data_size = ep_blob->size;
  160. /* I2S gateway's vindex is statically assigned in topology */
  161. node_id.vindex = t->cfg_ext->copier.vindex.val;
  162. break;
  163. case AVS_DMA_DMIC_LINK_INPUT:
  164. direction = SNDRV_PCM_STREAM_CAPTURE;
  165. if (t->cfg_ext->copier.blob_fmt)
  166. fmt = t->cfg_ext->copier.blob_fmt;
  167. else
  168. fmt = t->in_fmt;
  169. ep_blob = intel_nhlt_get_endpoint_blob(adev->dev, nhlt, 0,
  170. NHLT_LINK_DMIC, fmt->valid_bit_depth,
  171. fmt->bit_depth, fmt->num_channels,
  172. fmt->sampling_freq, direction, NHLT_DEVICE_DMIC);
  173. if (!ep_blob) {
  174. dev_err(adev->dev, "no DMIC ep_blob found\n");
  175. return -ENOENT;
  176. }
  177. data = ep_blob->caps;
  178. data_size = ep_blob->size;
  179. /* DMIC gateway's vindex is statically assigned in topology */
  180. node_id.vindex = t->cfg_ext->copier.vindex.val;
  181. break;
  182. case AVS_DMA_HDA_HOST_OUTPUT:
  183. case AVS_DMA_HDA_HOST_INPUT:
  184. /* HOST gateway's vindex is dynamically assigned with DMA id */
  185. node_id.vindex = mod->owner->owner->dma_id;
  186. break;
  187. case AVS_DMA_HDA_LINK_OUTPUT:
  188. case AVS_DMA_HDA_LINK_INPUT:
  189. node_id.vindex = t->cfg_ext->copier.vindex.val |
  190. mod->owner->owner->dma_id;
  191. break;
  192. case INVALID_OBJECT_ID:
  193. default:
  194. node_id = INVALID_NODE_ID;
  195. break;
  196. }
  197. cfg_size = sizeof(*cfg) + data_size;
  198. /* Every config-BLOB contains gateway attributes. */
  199. if (data_size)
  200. cfg_size -= sizeof(cfg->gtw_cfg.config.attrs);
  201. cfg = kzalloc(cfg_size, GFP_KERNEL);
  202. if (!cfg)
  203. return -ENOMEM;
  204. cfg->base.cpc = t->cfg_base->cpc;
  205. cfg->base.ibs = t->cfg_base->ibs;
  206. cfg->base.obs = t->cfg_base->obs;
  207. cfg->base.is_pages = t->cfg_base->is_pages;
  208. cfg->base.audio_fmt = *t->in_fmt;
  209. cfg->out_fmt = *t->cfg_ext->copier.out_fmt;
  210. cfg->feature_mask = t->cfg_ext->copier.feature_mask;
  211. cfg->gtw_cfg.node_id = node_id;
  212. cfg->gtw_cfg.dma_buffer_size = t->cfg_ext->copier.dma_buffer_size;
  213. /* config_length in DWORDs */
  214. cfg->gtw_cfg.config_length = DIV_ROUND_UP(data_size, 4);
  215. if (data)
  216. memcpy(&cfg->gtw_cfg.config, data, data_size);
  217. mod->gtw_attrs = cfg->gtw_cfg.config.attrs;
  218. ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
  219. t->core_id, t->domain, cfg, cfg_size,
  220. &mod->instance_id);
  221. kfree(cfg);
  222. return ret;
  223. }
  224. static int avs_updown_mix_create(struct avs_dev *adev, struct avs_path_module *mod)
  225. {
  226. struct avs_tplg_module *t = mod->template;
  227. struct avs_updown_mixer_cfg cfg;
  228. int i;
  229. cfg.base.cpc = t->cfg_base->cpc;
  230. cfg.base.ibs = t->cfg_base->ibs;
  231. cfg.base.obs = t->cfg_base->obs;
  232. cfg.base.is_pages = t->cfg_base->is_pages;
  233. cfg.base.audio_fmt = *t->in_fmt;
  234. cfg.out_channel_config = t->cfg_ext->updown_mix.out_channel_config;
  235. cfg.coefficients_select = t->cfg_ext->updown_mix.coefficients_select;
  236. for (i = 0; i < AVS_CHANNELS_MAX; i++)
  237. cfg.coefficients[i] = t->cfg_ext->updown_mix.coefficients[i];
  238. cfg.channel_map = t->cfg_ext->updown_mix.channel_map;
  239. return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
  240. t->core_id, t->domain, &cfg, sizeof(cfg),
  241. &mod->instance_id);
  242. }
  243. static int avs_src_create(struct avs_dev *adev, struct avs_path_module *mod)
  244. {
  245. struct avs_tplg_module *t = mod->template;
  246. struct avs_src_cfg cfg;
  247. cfg.base.cpc = t->cfg_base->cpc;
  248. cfg.base.ibs = t->cfg_base->ibs;
  249. cfg.base.obs = t->cfg_base->obs;
  250. cfg.base.is_pages = t->cfg_base->is_pages;
  251. cfg.base.audio_fmt = *t->in_fmt;
  252. cfg.out_freq = t->cfg_ext->src.out_freq;
  253. return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
  254. t->core_id, t->domain, &cfg, sizeof(cfg),
  255. &mod->instance_id);
  256. }
  257. static int avs_asrc_create(struct avs_dev *adev, struct avs_path_module *mod)
  258. {
  259. struct avs_tplg_module *t = mod->template;
  260. struct avs_asrc_cfg cfg;
  261. cfg.base.cpc = t->cfg_base->cpc;
  262. cfg.base.ibs = t->cfg_base->ibs;
  263. cfg.base.obs = t->cfg_base->obs;
  264. cfg.base.is_pages = t->cfg_base->is_pages;
  265. cfg.base.audio_fmt = *t->in_fmt;
  266. cfg.out_freq = t->cfg_ext->asrc.out_freq;
  267. cfg.mode = t->cfg_ext->asrc.mode;
  268. cfg.disable_jitter_buffer = t->cfg_ext->asrc.disable_jitter_buffer;
  269. return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
  270. t->core_id, t->domain, &cfg, sizeof(cfg),
  271. &mod->instance_id);
  272. }
  273. static int avs_aec_create(struct avs_dev *adev, struct avs_path_module *mod)
  274. {
  275. struct avs_tplg_module *t = mod->template;
  276. struct avs_aec_cfg cfg;
  277. cfg.base.cpc = t->cfg_base->cpc;
  278. cfg.base.ibs = t->cfg_base->ibs;
  279. cfg.base.obs = t->cfg_base->obs;
  280. cfg.base.is_pages = t->cfg_base->is_pages;
  281. cfg.base.audio_fmt = *t->in_fmt;
  282. cfg.ref_fmt = *t->cfg_ext->aec.ref_fmt;
  283. cfg.out_fmt = *t->cfg_ext->aec.out_fmt;
  284. cfg.cpc_lp_mode = t->cfg_ext->aec.cpc_lp_mode;
  285. return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
  286. t->core_id, t->domain, &cfg, sizeof(cfg),
  287. &mod->instance_id);
  288. }
  289. static int avs_mux_create(struct avs_dev *adev, struct avs_path_module *mod)
  290. {
  291. struct avs_tplg_module *t = mod->template;
  292. struct avs_mux_cfg cfg;
  293. cfg.base.cpc = t->cfg_base->cpc;
  294. cfg.base.ibs = t->cfg_base->ibs;
  295. cfg.base.obs = t->cfg_base->obs;
  296. cfg.base.is_pages = t->cfg_base->is_pages;
  297. cfg.base.audio_fmt = *t->in_fmt;
  298. cfg.ref_fmt = *t->cfg_ext->mux.ref_fmt;
  299. cfg.out_fmt = *t->cfg_ext->mux.out_fmt;
  300. return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
  301. t->core_id, t->domain, &cfg, sizeof(cfg),
  302. &mod->instance_id);
  303. }
  304. static int avs_wov_create(struct avs_dev *adev, struct avs_path_module *mod)
  305. {
  306. struct avs_tplg_module *t = mod->template;
  307. struct avs_wov_cfg cfg;
  308. cfg.base.cpc = t->cfg_base->cpc;
  309. cfg.base.ibs = t->cfg_base->ibs;
  310. cfg.base.obs = t->cfg_base->obs;
  311. cfg.base.is_pages = t->cfg_base->is_pages;
  312. cfg.base.audio_fmt = *t->in_fmt;
  313. cfg.cpc_lp_mode = t->cfg_ext->wov.cpc_lp_mode;
  314. return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
  315. t->core_id, t->domain, &cfg, sizeof(cfg),
  316. &mod->instance_id);
  317. }
  318. static int avs_micsel_create(struct avs_dev *adev, struct avs_path_module *mod)
  319. {
  320. struct avs_tplg_module *t = mod->template;
  321. struct avs_micsel_cfg cfg;
  322. cfg.base.cpc = t->cfg_base->cpc;
  323. cfg.base.ibs = t->cfg_base->ibs;
  324. cfg.base.obs = t->cfg_base->obs;
  325. cfg.base.is_pages = t->cfg_base->is_pages;
  326. cfg.base.audio_fmt = *t->in_fmt;
  327. cfg.out_fmt = *t->cfg_ext->micsel.out_fmt;
  328. return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
  329. t->core_id, t->domain, &cfg, sizeof(cfg),
  330. &mod->instance_id);
  331. }
  332. static int avs_modbase_create(struct avs_dev *adev, struct avs_path_module *mod)
  333. {
  334. struct avs_tplg_module *t = mod->template;
  335. struct avs_modcfg_base cfg;
  336. cfg.cpc = t->cfg_base->cpc;
  337. cfg.ibs = t->cfg_base->ibs;
  338. cfg.obs = t->cfg_base->obs;
  339. cfg.is_pages = t->cfg_base->is_pages;
  340. cfg.audio_fmt = *t->in_fmt;
  341. return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
  342. t->core_id, t->domain, &cfg, sizeof(cfg),
  343. &mod->instance_id);
  344. }
  345. static int avs_modext_create(struct avs_dev *adev, struct avs_path_module *mod)
  346. {
  347. struct avs_tplg_module *t = mod->template;
  348. struct avs_tplg_modcfg_ext *tcfg = t->cfg_ext;
  349. struct avs_modcfg_ext *cfg;
  350. size_t cfg_size, num_pins;
  351. int ret, i;
  352. num_pins = tcfg->generic.num_input_pins + tcfg->generic.num_output_pins;
  353. cfg_size = sizeof(*cfg) + sizeof(*cfg->pin_fmts) * num_pins;
  354. cfg = kzalloc(cfg_size, GFP_KERNEL);
  355. if (!cfg)
  356. return -ENOMEM;
  357. cfg->base.cpc = t->cfg_base->cpc;
  358. cfg->base.ibs = t->cfg_base->ibs;
  359. cfg->base.obs = t->cfg_base->obs;
  360. cfg->base.is_pages = t->cfg_base->is_pages;
  361. cfg->base.audio_fmt = *t->in_fmt;
  362. cfg->num_input_pins = tcfg->generic.num_input_pins;
  363. cfg->num_output_pins = tcfg->generic.num_output_pins;
  364. /* configure pin formats */
  365. for (i = 0; i < num_pins; i++) {
  366. struct avs_tplg_pin_format *tpin = &tcfg->generic.pin_fmts[i];
  367. struct avs_pin_format *pin = &cfg->pin_fmts[i];
  368. pin->pin_index = tpin->pin_index;
  369. pin->iobs = tpin->iobs;
  370. pin->audio_fmt = *tpin->fmt;
  371. }
  372. ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
  373. t->core_id, t->domain, cfg, cfg_size,
  374. &mod->instance_id);
  375. kfree(cfg);
  376. return ret;
  377. }
  378. static int avs_probe_create(struct avs_dev *adev, struct avs_path_module *mod)
  379. {
  380. dev_err(adev->dev, "Probe module can't be instantiated by topology");
  381. return -EINVAL;
  382. }
  383. struct avs_module_create {
  384. guid_t *guid;
  385. int (*create)(struct avs_dev *adev, struct avs_path_module *mod);
  386. };
  387. static struct avs_module_create avs_module_create[] = {
  388. { &AVS_MIXIN_MOD_UUID, avs_modbase_create },
  389. { &AVS_MIXOUT_MOD_UUID, avs_modbase_create },
  390. { &AVS_KPBUFF_MOD_UUID, avs_modbase_create },
  391. { &AVS_COPIER_MOD_UUID, avs_copier_create },
  392. { &AVS_MICSEL_MOD_UUID, avs_micsel_create },
  393. { &AVS_MUX_MOD_UUID, avs_mux_create },
  394. { &AVS_UPDWMIX_MOD_UUID, avs_updown_mix_create },
  395. { &AVS_SRCINTC_MOD_UUID, avs_src_create },
  396. { &AVS_AEC_MOD_UUID, avs_aec_create },
  397. { &AVS_ASRC_MOD_UUID, avs_asrc_create },
  398. { &AVS_INTELWOV_MOD_UUID, avs_wov_create },
  399. { &AVS_PROBE_MOD_UUID, avs_probe_create },
  400. };
  401. static int avs_path_module_type_create(struct avs_dev *adev, struct avs_path_module *mod)
  402. {
  403. const guid_t *type = &mod->template->cfg_ext->type;
  404. for (int i = 0; i < ARRAY_SIZE(avs_module_create); i++)
  405. if (guid_equal(type, avs_module_create[i].guid))
  406. return avs_module_create[i].create(adev, mod);
  407. return avs_modext_create(adev, mod);
  408. }
  409. static void avs_path_module_free(struct avs_dev *adev, struct avs_path_module *mod)
  410. {
  411. kfree(mod);
  412. }
  413. static struct avs_path_module *
  414. avs_path_module_create(struct avs_dev *adev,
  415. struct avs_path_pipeline *owner,
  416. struct avs_tplg_module *template)
  417. {
  418. struct avs_path_module *mod;
  419. int module_id, ret;
  420. module_id = avs_get_module_id(adev, &template->cfg_ext->type);
  421. if (module_id < 0)
  422. return ERR_PTR(module_id);
  423. mod = kzalloc(sizeof(*mod), GFP_KERNEL);
  424. if (!mod)
  425. return ERR_PTR(-ENOMEM);
  426. mod->template = template;
  427. mod->module_id = module_id;
  428. mod->owner = owner;
  429. INIT_LIST_HEAD(&mod->node);
  430. ret = avs_path_module_type_create(adev, mod);
  431. if (ret) {
  432. dev_err(adev->dev, "module-type create failed: %d\n", ret);
  433. kfree(mod);
  434. return ERR_PTR(ret);
  435. }
  436. return mod;
  437. }
  438. static int avs_path_binding_arm(struct avs_dev *adev, struct avs_path_binding *binding)
  439. {
  440. struct avs_path_module *this_mod, *target_mod;
  441. struct avs_path_pipeline *target_ppl;
  442. struct avs_path *target_path;
  443. struct avs_tplg_binding *t;
  444. t = binding->template;
  445. this_mod = avs_path_find_module(binding->owner,
  446. t->mod_id);
  447. if (!this_mod) {
  448. dev_err(adev->dev, "path mod %d not found\n", t->mod_id);
  449. return -EINVAL;
  450. }
  451. /* update with target_tplg_name too */
  452. target_path = avs_path_find_path(adev, t->target_tplg_name,
  453. t->target_path_tmpl_id);
  454. if (!target_path) {
  455. dev_err(adev->dev, "target path %s:%d not found\n",
  456. t->target_tplg_name, t->target_path_tmpl_id);
  457. return -EINVAL;
  458. }
  459. target_ppl = avs_path_find_pipeline(target_path,
  460. t->target_ppl_id);
  461. if (!target_ppl) {
  462. dev_err(adev->dev, "target ppl %d not found\n", t->target_ppl_id);
  463. return -EINVAL;
  464. }
  465. target_mod = avs_path_find_module(target_ppl, t->target_mod_id);
  466. if (!target_mod) {
  467. dev_err(adev->dev, "target mod %d not found\n", t->target_mod_id);
  468. return -EINVAL;
  469. }
  470. if (t->is_sink) {
  471. binding->sink = this_mod;
  472. binding->sink_pin = t->mod_pin;
  473. binding->source = target_mod;
  474. binding->source_pin = t->target_mod_pin;
  475. } else {
  476. binding->sink = target_mod;
  477. binding->sink_pin = t->target_mod_pin;
  478. binding->source = this_mod;
  479. binding->source_pin = t->mod_pin;
  480. }
  481. return 0;
  482. }
  483. static void avs_path_binding_free(struct avs_dev *adev, struct avs_path_binding *binding)
  484. {
  485. kfree(binding);
  486. }
  487. static struct avs_path_binding *avs_path_binding_create(struct avs_dev *adev,
  488. struct avs_path_pipeline *owner,
  489. struct avs_tplg_binding *t)
  490. {
  491. struct avs_path_binding *binding;
  492. binding = kzalloc(sizeof(*binding), GFP_KERNEL);
  493. if (!binding)
  494. return ERR_PTR(-ENOMEM);
  495. binding->template = t;
  496. binding->owner = owner;
  497. INIT_LIST_HEAD(&binding->node);
  498. return binding;
  499. }
  500. static int avs_path_pipeline_arm(struct avs_dev *adev,
  501. struct avs_path_pipeline *ppl)
  502. {
  503. struct avs_path_module *mod;
  504. list_for_each_entry(mod, &ppl->mod_list, node) {
  505. struct avs_path_module *source, *sink;
  506. int ret;
  507. /*
  508. * Only one module (so it's implicitly last) or it is the last
  509. * one, either way we don't have next module to bind it to.
  510. */
  511. if (mod == list_last_entry(&ppl->mod_list,
  512. struct avs_path_module, node))
  513. break;
  514. /* bind current module to next module on list */
  515. source = mod;
  516. sink = list_next_entry(mod, node);
  517. if (!source || !sink)
  518. return -EINVAL;
  519. ret = avs_ipc_bind(adev, source->module_id, source->instance_id,
  520. sink->module_id, sink->instance_id, 0, 0);
  521. if (ret)
  522. return AVS_IPC_RET(ret);
  523. }
  524. return 0;
  525. }
  526. static void avs_path_pipeline_free(struct avs_dev *adev,
  527. struct avs_path_pipeline *ppl)
  528. {
  529. struct avs_path_binding *binding, *bsave;
  530. struct avs_path_module *mod, *save;
  531. list_for_each_entry_safe(binding, bsave, &ppl->binding_list, node) {
  532. list_del(&binding->node);
  533. avs_path_binding_free(adev, binding);
  534. }
  535. avs_dsp_delete_pipeline(adev, ppl->instance_id);
  536. /* Unload resources occupied by owned modules */
  537. list_for_each_entry_safe(mod, save, &ppl->mod_list, node) {
  538. avs_dsp_delete_module(adev, mod->module_id, mod->instance_id,
  539. mod->owner->instance_id,
  540. mod->template->core_id);
  541. avs_path_module_free(adev, mod);
  542. }
  543. list_del(&ppl->node);
  544. kfree(ppl);
  545. }
  546. static struct avs_path_pipeline *
  547. avs_path_pipeline_create(struct avs_dev *adev, struct avs_path *owner,
  548. struct avs_tplg_pipeline *template)
  549. {
  550. struct avs_path_pipeline *ppl;
  551. struct avs_tplg_pplcfg *cfg = template->cfg;
  552. struct avs_tplg_module *tmod;
  553. int ret, i;
  554. ppl = kzalloc(sizeof(*ppl), GFP_KERNEL);
  555. if (!ppl)
  556. return ERR_PTR(-ENOMEM);
  557. ppl->template = template;
  558. ppl->owner = owner;
  559. INIT_LIST_HEAD(&ppl->binding_list);
  560. INIT_LIST_HEAD(&ppl->mod_list);
  561. INIT_LIST_HEAD(&ppl->node);
  562. ret = avs_dsp_create_pipeline(adev, cfg->req_size, cfg->priority,
  563. cfg->lp, cfg->attributes,
  564. &ppl->instance_id);
  565. if (ret) {
  566. dev_err(adev->dev, "error creating pipeline %d\n", ret);
  567. kfree(ppl);
  568. return ERR_PTR(ret);
  569. }
  570. list_for_each_entry(tmod, &template->mod_list, node) {
  571. struct avs_path_module *mod;
  572. mod = avs_path_module_create(adev, ppl, tmod);
  573. if (IS_ERR(mod)) {
  574. ret = PTR_ERR(mod);
  575. dev_err(adev->dev, "error creating module %d\n", ret);
  576. goto init_err;
  577. }
  578. list_add_tail(&mod->node, &ppl->mod_list);
  579. }
  580. for (i = 0; i < template->num_bindings; i++) {
  581. struct avs_path_binding *binding;
  582. binding = avs_path_binding_create(adev, ppl, template->bindings[i]);
  583. if (IS_ERR(binding)) {
  584. ret = PTR_ERR(binding);
  585. dev_err(adev->dev, "error creating binding %d\n", ret);
  586. goto init_err;
  587. }
  588. list_add_tail(&binding->node, &ppl->binding_list);
  589. }
  590. return ppl;
  591. init_err:
  592. avs_path_pipeline_free(adev, ppl);
  593. return ERR_PTR(ret);
  594. }
  595. static int avs_path_init(struct avs_dev *adev, struct avs_path *path,
  596. struct avs_tplg_path *template, u32 dma_id)
  597. {
  598. struct avs_tplg_pipeline *tppl;
  599. path->owner = adev;
  600. path->template = template;
  601. path->dma_id = dma_id;
  602. INIT_LIST_HEAD(&path->ppl_list);
  603. INIT_LIST_HEAD(&path->node);
  604. /* create all the pipelines */
  605. list_for_each_entry(tppl, &template->ppl_list, node) {
  606. struct avs_path_pipeline *ppl;
  607. ppl = avs_path_pipeline_create(adev, path, tppl);
  608. if (IS_ERR(ppl))
  609. return PTR_ERR(ppl);
  610. list_add_tail(&ppl->node, &path->ppl_list);
  611. }
  612. spin_lock(&adev->path_list_lock);
  613. list_add_tail(&path->node, &adev->path_list);
  614. spin_unlock(&adev->path_list_lock);
  615. return 0;
  616. }
  617. static int avs_path_arm(struct avs_dev *adev, struct avs_path *path)
  618. {
  619. struct avs_path_pipeline *ppl;
  620. struct avs_path_binding *binding;
  621. int ret;
  622. list_for_each_entry(ppl, &path->ppl_list, node) {
  623. /*
  624. * Arm all ppl bindings before binding internal modules
  625. * as it costs no IPCs which isn't true for the latter.
  626. */
  627. list_for_each_entry(binding, &ppl->binding_list, node) {
  628. ret = avs_path_binding_arm(adev, binding);
  629. if (ret < 0)
  630. return ret;
  631. }
  632. ret = avs_path_pipeline_arm(adev, ppl);
  633. if (ret < 0)
  634. return ret;
  635. }
  636. return 0;
  637. }
  638. static void avs_path_free_unlocked(struct avs_path *path)
  639. {
  640. struct avs_path_pipeline *ppl, *save;
  641. spin_lock(&path->owner->path_list_lock);
  642. list_del(&path->node);
  643. spin_unlock(&path->owner->path_list_lock);
  644. list_for_each_entry_safe(ppl, save, &path->ppl_list, node)
  645. avs_path_pipeline_free(path->owner, ppl);
  646. kfree(path);
  647. }
  648. static struct avs_path *avs_path_create_unlocked(struct avs_dev *adev, u32 dma_id,
  649. struct avs_tplg_path *template)
  650. {
  651. struct avs_path *path;
  652. int ret;
  653. path = kzalloc(sizeof(*path), GFP_KERNEL);
  654. if (!path)
  655. return ERR_PTR(-ENOMEM);
  656. ret = avs_path_init(adev, path, template, dma_id);
  657. if (ret < 0)
  658. goto err;
  659. ret = avs_path_arm(adev, path);
  660. if (ret < 0)
  661. goto err;
  662. path->state = AVS_PPL_STATE_INVALID;
  663. return path;
  664. err:
  665. avs_path_free_unlocked(path);
  666. return ERR_PTR(ret);
  667. }
  668. void avs_path_free(struct avs_path *path)
  669. {
  670. struct avs_dev *adev = path->owner;
  671. mutex_lock(&adev->path_mutex);
  672. avs_path_free_unlocked(path);
  673. mutex_unlock(&adev->path_mutex);
  674. }
  675. struct avs_path *avs_path_create(struct avs_dev *adev, u32 dma_id,
  676. struct avs_tplg_path_template *template,
  677. struct snd_pcm_hw_params *fe_params,
  678. struct snd_pcm_hw_params *be_params)
  679. {
  680. struct avs_tplg_path *variant;
  681. struct avs_path *path;
  682. variant = avs_path_find_variant(adev, template, fe_params, be_params);
  683. if (!variant) {
  684. dev_err(adev->dev, "no matching variant found\n");
  685. return ERR_PTR(-ENOENT);
  686. }
  687. /* Serialize path and its components creation. */
  688. mutex_lock(&adev->path_mutex);
  689. /* Satisfy needs of avs_path_find_tplg(). */
  690. mutex_lock(&adev->comp_list_mutex);
  691. path = avs_path_create_unlocked(adev, dma_id, variant);
  692. mutex_unlock(&adev->comp_list_mutex);
  693. mutex_unlock(&adev->path_mutex);
  694. return path;
  695. }
  696. static int avs_path_bind_prepare(struct avs_dev *adev,
  697. struct avs_path_binding *binding)
  698. {
  699. const struct avs_audio_format *src_fmt, *sink_fmt;
  700. struct avs_tplg_module *tsource = binding->source->template;
  701. struct avs_path_module *source = binding->source;
  702. int ret;
  703. /*
  704. * only copier modules about to be bound
  705. * to output pin other than 0 need preparation
  706. */
  707. if (!binding->source_pin)
  708. return 0;
  709. if (!guid_equal(&tsource->cfg_ext->type, &AVS_COPIER_MOD_UUID))
  710. return 0;
  711. src_fmt = tsource->in_fmt;
  712. sink_fmt = binding->sink->template->in_fmt;
  713. ret = avs_ipc_copier_set_sink_format(adev, source->module_id,
  714. source->instance_id, binding->source_pin,
  715. src_fmt, sink_fmt);
  716. if (ret) {
  717. dev_err(adev->dev, "config copier failed: %d\n", ret);
  718. return AVS_IPC_RET(ret);
  719. }
  720. return 0;
  721. }
  722. int avs_path_bind(struct avs_path *path)
  723. {
  724. struct avs_path_pipeline *ppl;
  725. struct avs_dev *adev = path->owner;
  726. int ret;
  727. list_for_each_entry(ppl, &path->ppl_list, node) {
  728. struct avs_path_binding *binding;
  729. list_for_each_entry(binding, &ppl->binding_list, node) {
  730. struct avs_path_module *source, *sink;
  731. source = binding->source;
  732. sink = binding->sink;
  733. ret = avs_path_bind_prepare(adev, binding);
  734. if (ret < 0)
  735. return ret;
  736. ret = avs_ipc_bind(adev, source->module_id,
  737. source->instance_id, sink->module_id,
  738. sink->instance_id, binding->sink_pin,
  739. binding->source_pin);
  740. if (ret) {
  741. dev_err(adev->dev, "bind path failed: %d\n", ret);
  742. return AVS_IPC_RET(ret);
  743. }
  744. }
  745. }
  746. return 0;
  747. }
  748. int avs_path_unbind(struct avs_path *path)
  749. {
  750. struct avs_path_pipeline *ppl;
  751. struct avs_dev *adev = path->owner;
  752. int ret;
  753. list_for_each_entry(ppl, &path->ppl_list, node) {
  754. struct avs_path_binding *binding;
  755. list_for_each_entry(binding, &ppl->binding_list, node) {
  756. struct avs_path_module *source, *sink;
  757. source = binding->source;
  758. sink = binding->sink;
  759. ret = avs_ipc_unbind(adev, source->module_id,
  760. source->instance_id, sink->module_id,
  761. sink->instance_id, binding->sink_pin,
  762. binding->source_pin);
  763. if (ret) {
  764. dev_err(adev->dev, "unbind path failed: %d\n", ret);
  765. return AVS_IPC_RET(ret);
  766. }
  767. }
  768. }
  769. return 0;
  770. }
  771. int avs_path_reset(struct avs_path *path)
  772. {
  773. struct avs_path_pipeline *ppl;
  774. struct avs_dev *adev = path->owner;
  775. int ret;
  776. if (path->state == AVS_PPL_STATE_RESET)
  777. return 0;
  778. list_for_each_entry(ppl, &path->ppl_list, node) {
  779. ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
  780. AVS_PPL_STATE_RESET);
  781. if (ret) {
  782. dev_err(adev->dev, "reset path failed: %d\n", ret);
  783. path->state = AVS_PPL_STATE_INVALID;
  784. return AVS_IPC_RET(ret);
  785. }
  786. }
  787. path->state = AVS_PPL_STATE_RESET;
  788. return 0;
  789. }
  790. int avs_path_pause(struct avs_path *path)
  791. {
  792. struct avs_path_pipeline *ppl;
  793. struct avs_dev *adev = path->owner;
  794. int ret;
  795. if (path->state == AVS_PPL_STATE_PAUSED)
  796. return 0;
  797. list_for_each_entry_reverse(ppl, &path->ppl_list, node) {
  798. ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
  799. AVS_PPL_STATE_PAUSED);
  800. if (ret) {
  801. dev_err(adev->dev, "pause path failed: %d\n", ret);
  802. path->state = AVS_PPL_STATE_INVALID;
  803. return AVS_IPC_RET(ret);
  804. }
  805. }
  806. path->state = AVS_PPL_STATE_PAUSED;
  807. return 0;
  808. }
  809. int avs_path_run(struct avs_path *path, int trigger)
  810. {
  811. struct avs_path_pipeline *ppl;
  812. struct avs_dev *adev = path->owner;
  813. int ret;
  814. if (path->state == AVS_PPL_STATE_RUNNING && trigger == AVS_TPLG_TRIGGER_AUTO)
  815. return 0;
  816. list_for_each_entry(ppl, &path->ppl_list, node) {
  817. if (ppl->template->cfg->trigger != trigger)
  818. continue;
  819. ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
  820. AVS_PPL_STATE_RUNNING);
  821. if (ret) {
  822. dev_err(adev->dev, "run path failed: %d\n", ret);
  823. path->state = AVS_PPL_STATE_INVALID;
  824. return AVS_IPC_RET(ret);
  825. }
  826. }
  827. path->state = AVS_PPL_STATE_RUNNING;
  828. return 0;
  829. }