skl-topology.c 92 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * skl-topology.c - Implements Platform component ALSA controls/widget
  4. * handlers.
  5. *
  6. * Copyright (C) 2014-2015 Intel Corp
  7. * Author: Jeeja KP <[email protected]>
  8. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  9. */
  10. #include <linux/slab.h>
  11. #include <linux/types.h>
  12. #include <linux/firmware.h>
  13. #include <linux/uuid.h>
  14. #include <sound/intel-nhlt.h>
  15. #include <sound/soc.h>
  16. #include <sound/soc-acpi.h>
  17. #include <sound/soc-topology.h>
  18. #include <uapi/sound/snd_sst_tokens.h>
  19. #include <uapi/sound/skl-tplg-interface.h>
  20. #include "skl-sst-dsp.h"
  21. #include "skl-sst-ipc.h"
  22. #include "skl-topology.h"
  23. #include "skl.h"
  24. #include "../common/sst-dsp.h"
  25. #include "../common/sst-dsp-priv.h"
  26. #define SKL_CH_FIXUP_MASK (1 << 0)
  27. #define SKL_RATE_FIXUP_MASK (1 << 1)
  28. #define SKL_FMT_FIXUP_MASK (1 << 2)
  29. #define SKL_IN_DIR_BIT_MASK BIT(0)
  30. #define SKL_PIN_COUNT_MASK GENMASK(7, 4)
  31. static const int mic_mono_list[] = {
  32. 0, 1, 2, 3,
  33. };
  34. static const int mic_stereo_list[][SKL_CH_STEREO] = {
  35. {0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3},
  36. };
  37. static const int mic_trio_list[][SKL_CH_TRIO] = {
  38. {0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3},
  39. };
  40. static const int mic_quatro_list[][SKL_CH_QUATRO] = {
  41. {0, 1, 2, 3},
  42. };
  43. #define CHECK_HW_PARAMS(ch, freq, bps, prm_ch, prm_freq, prm_bps) \
  44. ((ch == prm_ch) && (bps == prm_bps) && (freq == prm_freq))
  45. void skl_tplg_d0i3_get(struct skl_dev *skl, enum d0i3_capability caps)
  46. {
  47. struct skl_d0i3_data *d0i3 = &skl->d0i3;
  48. switch (caps) {
  49. case SKL_D0I3_NONE:
  50. d0i3->non_d0i3++;
  51. break;
  52. case SKL_D0I3_STREAMING:
  53. d0i3->streaming++;
  54. break;
  55. case SKL_D0I3_NON_STREAMING:
  56. d0i3->non_streaming++;
  57. break;
  58. }
  59. }
  60. void skl_tplg_d0i3_put(struct skl_dev *skl, enum d0i3_capability caps)
  61. {
  62. struct skl_d0i3_data *d0i3 = &skl->d0i3;
  63. switch (caps) {
  64. case SKL_D0I3_NONE:
  65. d0i3->non_d0i3--;
  66. break;
  67. case SKL_D0I3_STREAMING:
  68. d0i3->streaming--;
  69. break;
  70. case SKL_D0I3_NON_STREAMING:
  71. d0i3->non_streaming--;
  72. break;
  73. }
  74. }
  75. /*
  76. * SKL DSP driver modelling uses only few DAPM widgets so for rest we will
  77. * ignore. This helpers checks if the SKL driver handles this widget type
  78. */
  79. static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w,
  80. struct device *dev)
  81. {
  82. if (w->dapm->dev != dev)
  83. return false;
  84. switch (w->id) {
  85. case snd_soc_dapm_dai_link:
  86. case snd_soc_dapm_dai_in:
  87. case snd_soc_dapm_aif_in:
  88. case snd_soc_dapm_aif_out:
  89. case snd_soc_dapm_dai_out:
  90. case snd_soc_dapm_switch:
  91. case snd_soc_dapm_output:
  92. case snd_soc_dapm_mux:
  93. return false;
  94. default:
  95. return true;
  96. }
  97. }
  98. static void skl_dump_mconfig(struct skl_dev *skl, struct skl_module_cfg *mcfg)
  99. {
  100. struct skl_module_iface *iface = &mcfg->module->formats[mcfg->fmt_idx];
  101. dev_dbg(skl->dev, "Dumping config\n");
  102. dev_dbg(skl->dev, "Input Format:\n");
  103. dev_dbg(skl->dev, "channels = %d\n", iface->inputs[0].fmt.channels);
  104. dev_dbg(skl->dev, "s_freq = %d\n", iface->inputs[0].fmt.s_freq);
  105. dev_dbg(skl->dev, "ch_cfg = %d\n", iface->inputs[0].fmt.ch_cfg);
  106. dev_dbg(skl->dev, "valid bit depth = %d\n",
  107. iface->inputs[0].fmt.valid_bit_depth);
  108. dev_dbg(skl->dev, "Output Format:\n");
  109. dev_dbg(skl->dev, "channels = %d\n", iface->outputs[0].fmt.channels);
  110. dev_dbg(skl->dev, "s_freq = %d\n", iface->outputs[0].fmt.s_freq);
  111. dev_dbg(skl->dev, "valid bit depth = %d\n",
  112. iface->outputs[0].fmt.valid_bit_depth);
  113. dev_dbg(skl->dev, "ch_cfg = %d\n", iface->outputs[0].fmt.ch_cfg);
  114. }
  115. static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs)
  116. {
  117. int slot_map = 0xFFFFFFFF;
  118. int start_slot = 0;
  119. int i;
  120. for (i = 0; i < chs; i++) {
  121. /*
  122. * For 2 channels with starting slot as 0, slot map will
  123. * look like 0xFFFFFF10.
  124. */
  125. slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i)));
  126. start_slot++;
  127. }
  128. fmt->ch_map = slot_map;
  129. }
  130. static void skl_tplg_update_params(struct skl_module_fmt *fmt,
  131. struct skl_pipe_params *params, int fixup)
  132. {
  133. if (fixup & SKL_RATE_FIXUP_MASK)
  134. fmt->s_freq = params->s_freq;
  135. if (fixup & SKL_CH_FIXUP_MASK) {
  136. fmt->channels = params->ch;
  137. skl_tplg_update_chmap(fmt, fmt->channels);
  138. }
  139. if (fixup & SKL_FMT_FIXUP_MASK) {
  140. fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
  141. /*
  142. * 16 bit is 16 bit container whereas 24 bit is in 32 bit
  143. * container so update bit depth accordingly
  144. */
  145. switch (fmt->valid_bit_depth) {
  146. case SKL_DEPTH_16BIT:
  147. fmt->bit_depth = fmt->valid_bit_depth;
  148. break;
  149. default:
  150. fmt->bit_depth = SKL_DEPTH_32BIT;
  151. break;
  152. }
  153. }
  154. }
  155. /*
  156. * A pipeline may have modules which impact the pcm parameters, like SRC,
  157. * channel converter, format converter.
  158. * We need to calculate the output params by applying the 'fixup'
  159. * Topology will tell driver which type of fixup is to be applied by
  160. * supplying the fixup mask, so based on that we calculate the output
  161. *
  162. * Now In FE the pcm hw_params is source/target format. Same is applicable
  163. * for BE with its hw_params invoked.
  164. * here based on FE, BE pipeline and direction we calculate the input and
  165. * outfix and then apply that for a module
  166. */
  167. static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg,
  168. struct skl_pipe_params *params, bool is_fe)
  169. {
  170. int in_fixup, out_fixup;
  171. struct skl_module_fmt *in_fmt, *out_fmt;
  172. /* Fixups will be applied to pin 0 only */
  173. in_fmt = &m_cfg->module->formats[m_cfg->fmt_idx].inputs[0].fmt;
  174. out_fmt = &m_cfg->module->formats[m_cfg->fmt_idx].outputs[0].fmt;
  175. if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
  176. if (is_fe) {
  177. in_fixup = m_cfg->params_fixup;
  178. out_fixup = (~m_cfg->converter) &
  179. m_cfg->params_fixup;
  180. } else {
  181. out_fixup = m_cfg->params_fixup;
  182. in_fixup = (~m_cfg->converter) &
  183. m_cfg->params_fixup;
  184. }
  185. } else {
  186. if (is_fe) {
  187. out_fixup = m_cfg->params_fixup;
  188. in_fixup = (~m_cfg->converter) &
  189. m_cfg->params_fixup;
  190. } else {
  191. in_fixup = m_cfg->params_fixup;
  192. out_fixup = (~m_cfg->converter) &
  193. m_cfg->params_fixup;
  194. }
  195. }
  196. skl_tplg_update_params(in_fmt, params, in_fixup);
  197. skl_tplg_update_params(out_fmt, params, out_fixup);
  198. }
  199. /*
  200. * A module needs input and output buffers, which are dependent upon pcm
  201. * params, so once we have calculate params, we need buffer calculation as
  202. * well.
  203. */
  204. static void skl_tplg_update_buffer_size(struct skl_dev *skl,
  205. struct skl_module_cfg *mcfg)
  206. {
  207. int multiplier = 1;
  208. struct skl_module_fmt *in_fmt, *out_fmt;
  209. struct skl_module_res *res;
  210. /* Since fixups is applied to pin 0 only, ibs, obs needs
  211. * change for pin 0 only
  212. */
  213. res = &mcfg->module->resources[mcfg->res_idx];
  214. in_fmt = &mcfg->module->formats[mcfg->fmt_idx].inputs[0].fmt;
  215. out_fmt = &mcfg->module->formats[mcfg->fmt_idx].outputs[0].fmt;
  216. if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
  217. multiplier = 5;
  218. res->ibs = DIV_ROUND_UP(in_fmt->s_freq, 1000) *
  219. in_fmt->channels * (in_fmt->bit_depth >> 3) *
  220. multiplier;
  221. res->obs = DIV_ROUND_UP(out_fmt->s_freq, 1000) *
  222. out_fmt->channels * (out_fmt->bit_depth >> 3) *
  223. multiplier;
  224. }
  225. static u8 skl_tplg_be_dev_type(int dev_type)
  226. {
  227. int ret;
  228. switch (dev_type) {
  229. case SKL_DEVICE_BT:
  230. ret = NHLT_DEVICE_BT;
  231. break;
  232. case SKL_DEVICE_DMIC:
  233. ret = NHLT_DEVICE_DMIC;
  234. break;
  235. case SKL_DEVICE_I2S:
  236. ret = NHLT_DEVICE_I2S;
  237. break;
  238. default:
  239. ret = NHLT_DEVICE_INVALID;
  240. break;
  241. }
  242. return ret;
  243. }
  244. static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
  245. struct skl_dev *skl)
  246. {
  247. struct skl_module_cfg *m_cfg = w->priv;
  248. int link_type, dir;
  249. u32 ch, s_freq, s_fmt, s_cont;
  250. struct nhlt_specific_cfg *cfg;
  251. u8 dev_type = skl_tplg_be_dev_type(m_cfg->dev_type);
  252. int fmt_idx = m_cfg->fmt_idx;
  253. struct skl_module_iface *m_iface = &m_cfg->module->formats[fmt_idx];
  254. /* check if we already have blob */
  255. if (m_cfg->formats_config[SKL_PARAM_INIT].caps_size > 0)
  256. return 0;
  257. dev_dbg(skl->dev, "Applying default cfg blob\n");
  258. switch (m_cfg->dev_type) {
  259. case SKL_DEVICE_DMIC:
  260. link_type = NHLT_LINK_DMIC;
  261. dir = SNDRV_PCM_STREAM_CAPTURE;
  262. s_freq = m_iface->inputs[0].fmt.s_freq;
  263. s_fmt = m_iface->inputs[0].fmt.valid_bit_depth;
  264. s_cont = m_iface->inputs[0].fmt.bit_depth;
  265. ch = m_iface->inputs[0].fmt.channels;
  266. break;
  267. case SKL_DEVICE_I2S:
  268. link_type = NHLT_LINK_SSP;
  269. if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) {
  270. dir = SNDRV_PCM_STREAM_PLAYBACK;
  271. s_freq = m_iface->outputs[0].fmt.s_freq;
  272. s_fmt = m_iface->outputs[0].fmt.valid_bit_depth;
  273. s_cont = m_iface->outputs[0].fmt.bit_depth;
  274. ch = m_iface->outputs[0].fmt.channels;
  275. } else {
  276. dir = SNDRV_PCM_STREAM_CAPTURE;
  277. s_freq = m_iface->inputs[0].fmt.s_freq;
  278. s_fmt = m_iface->inputs[0].fmt.valid_bit_depth;
  279. s_cont = m_iface->inputs[0].fmt.bit_depth;
  280. ch = m_iface->inputs[0].fmt.channels;
  281. }
  282. break;
  283. default:
  284. return -EINVAL;
  285. }
  286. /* update the blob based on virtual bus_id and default params */
  287. cfg = intel_nhlt_get_endpoint_blob(skl->dev, skl->nhlt, m_cfg->vbus_id,
  288. link_type, s_fmt, s_cont, ch,
  289. s_freq, dir, dev_type);
  290. if (cfg) {
  291. m_cfg->formats_config[SKL_PARAM_INIT].caps_size = cfg->size;
  292. m_cfg->formats_config[SKL_PARAM_INIT].caps = (u32 *)&cfg->caps;
  293. } else {
  294. dev_err(skl->dev, "Blob NULL for id %x type %d dirn %d\n",
  295. m_cfg->vbus_id, link_type, dir);
  296. dev_err(skl->dev, "PCM: ch %d, freq %d, fmt %d/%d\n",
  297. ch, s_freq, s_fmt, s_cont);
  298. return -EIO;
  299. }
  300. return 0;
  301. }
  302. static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w,
  303. struct skl_dev *skl)
  304. {
  305. struct skl_module_cfg *m_cfg = w->priv;
  306. struct skl_pipe_params *params = m_cfg->pipe->p_params;
  307. int p_conn_type = m_cfg->pipe->conn_type;
  308. bool is_fe;
  309. if (!m_cfg->params_fixup)
  310. return;
  311. dev_dbg(skl->dev, "Mconfig for widget=%s BEFORE updation\n",
  312. w->name);
  313. skl_dump_mconfig(skl, m_cfg);
  314. if (p_conn_type == SKL_PIPE_CONN_TYPE_FE)
  315. is_fe = true;
  316. else
  317. is_fe = false;
  318. skl_tplg_update_params_fixup(m_cfg, params, is_fe);
  319. skl_tplg_update_buffer_size(skl, m_cfg);
  320. dev_dbg(skl->dev, "Mconfig for widget=%s AFTER updation\n",
  321. w->name);
  322. skl_dump_mconfig(skl, m_cfg);
  323. }
  324. /*
  325. * some modules can have multiple params set from user control and
  326. * need to be set after module is initialized. If set_param flag is
  327. * set module params will be done after module is initialised.
  328. */
  329. static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w,
  330. struct skl_dev *skl)
  331. {
  332. int i, ret;
  333. struct skl_module_cfg *mconfig = w->priv;
  334. const struct snd_kcontrol_new *k;
  335. struct soc_bytes_ext *sb;
  336. struct skl_algo_data *bc;
  337. struct skl_specific_cfg *sp_cfg;
  338. if (mconfig->formats_config[SKL_PARAM_SET].caps_size > 0 &&
  339. mconfig->formats_config[SKL_PARAM_SET].set_params == SKL_PARAM_SET) {
  340. sp_cfg = &mconfig->formats_config[SKL_PARAM_SET];
  341. ret = skl_set_module_params(skl, sp_cfg->caps,
  342. sp_cfg->caps_size,
  343. sp_cfg->param_id, mconfig);
  344. if (ret < 0)
  345. return ret;
  346. }
  347. for (i = 0; i < w->num_kcontrols; i++) {
  348. k = &w->kcontrol_news[i];
  349. if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
  350. sb = (void *) k->private_value;
  351. bc = (struct skl_algo_data *)sb->dobj.private;
  352. if (bc->set_params == SKL_PARAM_SET) {
  353. ret = skl_set_module_params(skl,
  354. (u32 *)bc->params, bc->size,
  355. bc->param_id, mconfig);
  356. if (ret < 0)
  357. return ret;
  358. }
  359. }
  360. }
  361. return 0;
  362. }
  363. /*
  364. * some module param can set from user control and this is required as
  365. * when module is initailzed. if module param is required in init it is
  366. * identifed by set_param flag. if set_param flag is not set, then this
  367. * parameter needs to set as part of module init.
  368. */
  369. static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
  370. {
  371. const struct snd_kcontrol_new *k;
  372. struct soc_bytes_ext *sb;
  373. struct skl_algo_data *bc;
  374. struct skl_module_cfg *mconfig = w->priv;
  375. int i;
  376. for (i = 0; i < w->num_kcontrols; i++) {
  377. k = &w->kcontrol_news[i];
  378. if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
  379. sb = (struct soc_bytes_ext *)k->private_value;
  380. bc = (struct skl_algo_data *)sb->dobj.private;
  381. if (bc->set_params != SKL_PARAM_INIT)
  382. continue;
  383. mconfig->formats_config[SKL_PARAM_INIT].caps =
  384. (u32 *)bc->params;
  385. mconfig->formats_config[SKL_PARAM_INIT].caps_size =
  386. bc->size;
  387. break;
  388. }
  389. }
  390. return 0;
  391. }
  392. static int skl_tplg_module_prepare(struct skl_dev *skl, struct skl_pipe *pipe,
  393. struct snd_soc_dapm_widget *w, struct skl_module_cfg *mcfg)
  394. {
  395. switch (mcfg->dev_type) {
  396. case SKL_DEVICE_HDAHOST:
  397. return skl_pcm_host_dma_prepare(skl->dev, pipe->p_params);
  398. case SKL_DEVICE_HDALINK:
  399. return skl_pcm_link_dma_prepare(skl->dev, pipe->p_params);
  400. }
  401. return 0;
  402. }
  403. /*
  404. * Inside a pipe instance, we can have various modules. These modules need
  405. * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by
  406. * skl_init_module() routine, so invoke that for all modules in a pipeline
  407. */
  408. static int
  409. skl_tplg_init_pipe_modules(struct skl_dev *skl, struct skl_pipe *pipe)
  410. {
  411. struct skl_pipe_module *w_module;
  412. struct snd_soc_dapm_widget *w;
  413. struct skl_module_cfg *mconfig;
  414. u8 cfg_idx;
  415. int ret = 0;
  416. list_for_each_entry(w_module, &pipe->w_list, node) {
  417. guid_t *uuid_mod;
  418. w = w_module->w;
  419. mconfig = w->priv;
  420. /* check if module ids are populated */
  421. if (mconfig->id.module_id < 0) {
  422. dev_err(skl->dev,
  423. "module %pUL id not populated\n",
  424. (guid_t *)mconfig->guid);
  425. return -EIO;
  426. }
  427. cfg_idx = mconfig->pipe->cur_config_idx;
  428. mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx;
  429. mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx;
  430. if (mconfig->module->loadable && skl->dsp->fw_ops.load_mod) {
  431. ret = skl->dsp->fw_ops.load_mod(skl->dsp,
  432. mconfig->id.module_id, mconfig->guid);
  433. if (ret < 0)
  434. return ret;
  435. }
  436. /* prepare the DMA if the module is gateway cpr */
  437. ret = skl_tplg_module_prepare(skl, pipe, w, mconfig);
  438. if (ret < 0)
  439. return ret;
  440. /* update blob if blob is null for be with default value */
  441. skl_tplg_update_be_blob(w, skl);
  442. /*
  443. * apply fix/conversion to module params based on
  444. * FE/BE params
  445. */
  446. skl_tplg_update_module_params(w, skl);
  447. uuid_mod = (guid_t *)mconfig->guid;
  448. mconfig->id.pvt_id = skl_get_pvt_id(skl, uuid_mod,
  449. mconfig->id.instance_id);
  450. if (mconfig->id.pvt_id < 0)
  451. return ret;
  452. skl_tplg_set_module_init_data(w);
  453. ret = skl_dsp_get_core(skl->dsp, mconfig->core_id);
  454. if (ret < 0) {
  455. dev_err(skl->dev, "Failed to wake up core %d ret=%d\n",
  456. mconfig->core_id, ret);
  457. return ret;
  458. }
  459. ret = skl_init_module(skl, mconfig);
  460. if (ret < 0) {
  461. skl_put_pvt_id(skl, uuid_mod, &mconfig->id.pvt_id);
  462. goto err;
  463. }
  464. ret = skl_tplg_set_module_params(w, skl);
  465. if (ret < 0)
  466. goto err;
  467. }
  468. return 0;
  469. err:
  470. skl_dsp_put_core(skl->dsp, mconfig->core_id);
  471. return ret;
  472. }
  473. static int skl_tplg_unload_pipe_modules(struct skl_dev *skl,
  474. struct skl_pipe *pipe)
  475. {
  476. int ret = 0;
  477. struct skl_pipe_module *w_module;
  478. struct skl_module_cfg *mconfig;
  479. list_for_each_entry(w_module, &pipe->w_list, node) {
  480. guid_t *uuid_mod;
  481. mconfig = w_module->w->priv;
  482. uuid_mod = (guid_t *)mconfig->guid;
  483. if (mconfig->module->loadable && skl->dsp->fw_ops.unload_mod) {
  484. ret = skl->dsp->fw_ops.unload_mod(skl->dsp,
  485. mconfig->id.module_id);
  486. if (ret < 0)
  487. return -EIO;
  488. }
  489. skl_put_pvt_id(skl, uuid_mod, &mconfig->id.pvt_id);
  490. ret = skl_dsp_put_core(skl->dsp, mconfig->core_id);
  491. if (ret < 0) {
  492. /* don't return; continue with other modules */
  493. dev_err(skl->dev, "Failed to sleep core %d ret=%d\n",
  494. mconfig->core_id, ret);
  495. }
  496. }
  497. /* no modules to unload in this path, so return */
  498. return ret;
  499. }
  500. static bool skl_tplg_is_multi_fmt(struct skl_dev *skl, struct skl_pipe *pipe)
  501. {
  502. struct skl_pipe_fmt *cur_fmt;
  503. struct skl_pipe_fmt *next_fmt;
  504. int i;
  505. if (pipe->nr_cfgs <= 1)
  506. return false;
  507. if (pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
  508. return true;
  509. for (i = 0; i < pipe->nr_cfgs - 1; i++) {
  510. if (pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) {
  511. cur_fmt = &pipe->configs[i].out_fmt;
  512. next_fmt = &pipe->configs[i + 1].out_fmt;
  513. } else {
  514. cur_fmt = &pipe->configs[i].in_fmt;
  515. next_fmt = &pipe->configs[i + 1].in_fmt;
  516. }
  517. if (!CHECK_HW_PARAMS(cur_fmt->channels, cur_fmt->freq,
  518. cur_fmt->bps,
  519. next_fmt->channels,
  520. next_fmt->freq,
  521. next_fmt->bps))
  522. return true;
  523. }
  524. return false;
  525. }
  526. /*
  527. * Here, we select pipe format based on the pipe type and pipe
  528. * direction to determine the current config index for the pipeline.
  529. * The config index is then used to select proper module resources.
  530. * Intermediate pipes currently have a fixed format hence we select the
  531. * 0th configuratation by default for such pipes.
  532. */
  533. static int
  534. skl_tplg_get_pipe_config(struct skl_dev *skl, struct skl_module_cfg *mconfig)
  535. {
  536. struct skl_pipe *pipe = mconfig->pipe;
  537. struct skl_pipe_params *params = pipe->p_params;
  538. struct skl_path_config *pconfig = &pipe->configs[0];
  539. struct skl_pipe_fmt *fmt = NULL;
  540. bool in_fmt = false;
  541. int i;
  542. if (pipe->nr_cfgs == 0) {
  543. pipe->cur_config_idx = 0;
  544. return 0;
  545. }
  546. if (skl_tplg_is_multi_fmt(skl, pipe)) {
  547. pipe->cur_config_idx = pipe->pipe_config_idx;
  548. pipe->memory_pages = pconfig->mem_pages;
  549. dev_dbg(skl->dev, "found pipe config idx:%d\n",
  550. pipe->cur_config_idx);
  551. return 0;
  552. }
  553. if (pipe->conn_type == SKL_PIPE_CONN_TYPE_NONE || pipe->nr_cfgs == 1) {
  554. dev_dbg(skl->dev, "No conn_type or just 1 pathcfg, taking 0th for %d\n",
  555. pipe->ppl_id);
  556. pipe->cur_config_idx = 0;
  557. pipe->memory_pages = pconfig->mem_pages;
  558. return 0;
  559. }
  560. if ((pipe->conn_type == SKL_PIPE_CONN_TYPE_FE &&
  561. pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) ||
  562. (pipe->conn_type == SKL_PIPE_CONN_TYPE_BE &&
  563. pipe->direction == SNDRV_PCM_STREAM_CAPTURE))
  564. in_fmt = true;
  565. for (i = 0; i < pipe->nr_cfgs; i++) {
  566. pconfig = &pipe->configs[i];
  567. if (in_fmt)
  568. fmt = &pconfig->in_fmt;
  569. else
  570. fmt = &pconfig->out_fmt;
  571. if (CHECK_HW_PARAMS(params->ch, params->s_freq, params->s_fmt,
  572. fmt->channels, fmt->freq, fmt->bps)) {
  573. pipe->cur_config_idx = i;
  574. pipe->memory_pages = pconfig->mem_pages;
  575. dev_dbg(skl->dev, "Using pipe config: %d\n", i);
  576. return 0;
  577. }
  578. }
  579. dev_err(skl->dev, "Invalid pipe config: %d %d %d for pipe: %d\n",
  580. params->ch, params->s_freq, params->s_fmt, pipe->ppl_id);
  581. return -EINVAL;
  582. }
  583. /*
  584. * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we
  585. * need create the pipeline. So we do following:
  586. * - Create the pipeline
  587. * - Initialize the modules in pipeline
  588. * - finally bind all modules together
  589. */
  590. static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
  591. struct skl_dev *skl)
  592. {
  593. int ret;
  594. struct skl_module_cfg *mconfig = w->priv;
  595. struct skl_pipe_module *w_module;
  596. struct skl_pipe *s_pipe = mconfig->pipe;
  597. struct skl_module_cfg *src_module = NULL, *dst_module, *module;
  598. struct skl_module_deferred_bind *modules;
  599. ret = skl_tplg_get_pipe_config(skl, mconfig);
  600. if (ret < 0)
  601. return ret;
  602. /*
  603. * Create a list of modules for pipe.
  604. * This list contains modules from source to sink
  605. */
  606. ret = skl_create_pipeline(skl, mconfig->pipe);
  607. if (ret < 0)
  608. return ret;
  609. /* Init all pipe modules from source to sink */
  610. ret = skl_tplg_init_pipe_modules(skl, s_pipe);
  611. if (ret < 0)
  612. return ret;
  613. /* Bind modules from source to sink */
  614. list_for_each_entry(w_module, &s_pipe->w_list, node) {
  615. dst_module = w_module->w->priv;
  616. if (src_module == NULL) {
  617. src_module = dst_module;
  618. continue;
  619. }
  620. ret = skl_bind_modules(skl, src_module, dst_module);
  621. if (ret < 0)
  622. return ret;
  623. src_module = dst_module;
  624. }
  625. /*
  626. * When the destination module is initialized, check for these modules
  627. * in deferred bind list. If found, bind them.
  628. */
  629. list_for_each_entry(w_module, &s_pipe->w_list, node) {
  630. if (list_empty(&skl->bind_list))
  631. break;
  632. list_for_each_entry(modules, &skl->bind_list, node) {
  633. module = w_module->w->priv;
  634. if (modules->dst == module)
  635. skl_bind_modules(skl, modules->src,
  636. modules->dst);
  637. }
  638. }
  639. return 0;
  640. }
  641. static int skl_fill_sink_instance_id(struct skl_dev *skl, u32 *params,
  642. int size, struct skl_module_cfg *mcfg)
  643. {
  644. int i, pvt_id;
  645. if (mcfg->m_type == SKL_MODULE_TYPE_KPB) {
  646. struct skl_kpb_params *kpb_params =
  647. (struct skl_kpb_params *)params;
  648. struct skl_mod_inst_map *inst = kpb_params->u.map;
  649. for (i = 0; i < kpb_params->num_modules; i++) {
  650. pvt_id = skl_get_pvt_instance_id_map(skl, inst->mod_id,
  651. inst->inst_id);
  652. if (pvt_id < 0)
  653. return -EINVAL;
  654. inst->inst_id = pvt_id;
  655. inst++;
  656. }
  657. }
  658. return 0;
  659. }
  660. /*
  661. * Some modules require params to be set after the module is bound to
  662. * all pins connected.
  663. *
  664. * The module provider initializes set_param flag for such modules and we
  665. * send params after binding
  666. */
  667. static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
  668. struct skl_module_cfg *mcfg, struct skl_dev *skl)
  669. {
  670. int i, ret;
  671. struct skl_module_cfg *mconfig = w->priv;
  672. const struct snd_kcontrol_new *k;
  673. struct soc_bytes_ext *sb;
  674. struct skl_algo_data *bc;
  675. struct skl_specific_cfg *sp_cfg;
  676. u32 *params;
  677. /*
  678. * check all out/in pins are in bind state.
  679. * if so set the module param
  680. */
  681. for (i = 0; i < mcfg->module->max_output_pins; i++) {
  682. if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE)
  683. return 0;
  684. }
  685. for (i = 0; i < mcfg->module->max_input_pins; i++) {
  686. if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE)
  687. return 0;
  688. }
  689. if (mconfig->formats_config[SKL_PARAM_BIND].caps_size > 0 &&
  690. mconfig->formats_config[SKL_PARAM_BIND].set_params ==
  691. SKL_PARAM_BIND) {
  692. sp_cfg = &mconfig->formats_config[SKL_PARAM_BIND];
  693. ret = skl_set_module_params(skl, sp_cfg->caps,
  694. sp_cfg->caps_size,
  695. sp_cfg->param_id, mconfig);
  696. if (ret < 0)
  697. return ret;
  698. }
  699. for (i = 0; i < w->num_kcontrols; i++) {
  700. k = &w->kcontrol_news[i];
  701. if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
  702. sb = (void *) k->private_value;
  703. bc = (struct skl_algo_data *)sb->dobj.private;
  704. if (bc->set_params == SKL_PARAM_BIND) {
  705. params = kmemdup(bc->params, bc->max, GFP_KERNEL);
  706. if (!params)
  707. return -ENOMEM;
  708. skl_fill_sink_instance_id(skl, params, bc->max,
  709. mconfig);
  710. ret = skl_set_module_params(skl, params,
  711. bc->max, bc->param_id, mconfig);
  712. kfree(params);
  713. if (ret < 0)
  714. return ret;
  715. }
  716. }
  717. }
  718. return 0;
  719. }
  720. static int skl_get_module_id(struct skl_dev *skl, guid_t *uuid)
  721. {
  722. struct uuid_module *module;
  723. list_for_each_entry(module, &skl->uuid_list, list) {
  724. if (guid_equal(uuid, &module->uuid))
  725. return module->id;
  726. }
  727. return -EINVAL;
  728. }
  729. static int skl_tplg_find_moduleid_from_uuid(struct skl_dev *skl,
  730. const struct snd_kcontrol_new *k)
  731. {
  732. struct soc_bytes_ext *sb = (void *) k->private_value;
  733. struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
  734. struct skl_kpb_params *uuid_params, *params;
  735. struct hdac_bus *bus = skl_to_bus(skl);
  736. int i, size, module_id;
  737. if (bc->set_params == SKL_PARAM_BIND && bc->max) {
  738. uuid_params = (struct skl_kpb_params *)bc->params;
  739. size = struct_size(params, u.map, uuid_params->num_modules);
  740. params = devm_kzalloc(bus->dev, size, GFP_KERNEL);
  741. if (!params)
  742. return -ENOMEM;
  743. params->num_modules = uuid_params->num_modules;
  744. for (i = 0; i < uuid_params->num_modules; i++) {
  745. module_id = skl_get_module_id(skl,
  746. &uuid_params->u.map_uuid[i].mod_uuid);
  747. if (module_id < 0) {
  748. devm_kfree(bus->dev, params);
  749. return -EINVAL;
  750. }
  751. params->u.map[i].mod_id = module_id;
  752. params->u.map[i].inst_id =
  753. uuid_params->u.map_uuid[i].inst_id;
  754. }
  755. devm_kfree(bus->dev, bc->params);
  756. bc->params = (char *)params;
  757. bc->max = size;
  758. }
  759. return 0;
  760. }
  761. /*
  762. * Retrieve the module id from UUID mentioned in the
  763. * post bind params
  764. */
  765. void skl_tplg_add_moduleid_in_bind_params(struct skl_dev *skl,
  766. struct snd_soc_dapm_widget *w)
  767. {
  768. struct skl_module_cfg *mconfig = w->priv;
  769. int i;
  770. /*
  771. * Post bind params are used for only for KPB
  772. * to set copier instances to drain the data
  773. * in fast mode
  774. */
  775. if (mconfig->m_type != SKL_MODULE_TYPE_KPB)
  776. return;
  777. for (i = 0; i < w->num_kcontrols; i++)
  778. if ((w->kcontrol_news[i].access &
  779. SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) &&
  780. (skl_tplg_find_moduleid_from_uuid(skl,
  781. &w->kcontrol_news[i]) < 0))
  782. dev_err(skl->dev,
  783. "%s: invalid kpb post bind params\n",
  784. __func__);
  785. }
  786. static int skl_tplg_module_add_deferred_bind(struct skl_dev *skl,
  787. struct skl_module_cfg *src, struct skl_module_cfg *dst)
  788. {
  789. struct skl_module_deferred_bind *m_list, *modules;
  790. int i;
  791. /* only supported for module with static pin connection */
  792. for (i = 0; i < dst->module->max_input_pins; i++) {
  793. struct skl_module_pin *pin = &dst->m_in_pin[i];
  794. if (pin->is_dynamic)
  795. continue;
  796. if ((pin->id.module_id == src->id.module_id) &&
  797. (pin->id.instance_id == src->id.instance_id)) {
  798. if (!list_empty(&skl->bind_list)) {
  799. list_for_each_entry(modules, &skl->bind_list, node) {
  800. if (modules->src == src && modules->dst == dst)
  801. return 0;
  802. }
  803. }
  804. m_list = kzalloc(sizeof(*m_list), GFP_KERNEL);
  805. if (!m_list)
  806. return -ENOMEM;
  807. m_list->src = src;
  808. m_list->dst = dst;
  809. list_add(&m_list->node, &skl->bind_list);
  810. }
  811. }
  812. return 0;
  813. }
  814. static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
  815. struct skl_dev *skl,
  816. struct snd_soc_dapm_widget *src_w,
  817. struct skl_module_cfg *src_mconfig)
  818. {
  819. struct snd_soc_dapm_path *p;
  820. struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL;
  821. struct skl_module_cfg *sink_mconfig;
  822. int ret;
  823. snd_soc_dapm_widget_for_each_sink_path(w, p) {
  824. if (!p->connect)
  825. continue;
  826. dev_dbg(skl->dev,
  827. "%s: src widget=%s\n", __func__, w->name);
  828. dev_dbg(skl->dev,
  829. "%s: sink widget=%s\n", __func__, p->sink->name);
  830. next_sink = p->sink;
  831. if (!is_skl_dsp_widget_type(p->sink, skl->dev))
  832. return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
  833. /*
  834. * here we will check widgets in sink pipelines, so that
  835. * can be any widgets type and we are only interested if
  836. * they are ones used for SKL so check that first
  837. */
  838. if ((p->sink->priv != NULL) &&
  839. is_skl_dsp_widget_type(p->sink, skl->dev)) {
  840. sink = p->sink;
  841. sink_mconfig = sink->priv;
  842. /*
  843. * Modules other than PGA leaf can be connected
  844. * directly or via switch to a module in another
  845. * pipeline. EX: reference path
  846. * when the path is enabled, the dst module that needs
  847. * to be bound may not be initialized. if the module is
  848. * not initialized, add these modules in the deferred
  849. * bind list and when the dst module is initialised,
  850. * bind this module to the dst_module in deferred list.
  851. */
  852. if (((src_mconfig->m_state == SKL_MODULE_INIT_DONE)
  853. && (sink_mconfig->m_state == SKL_MODULE_UNINIT))) {
  854. ret = skl_tplg_module_add_deferred_bind(skl,
  855. src_mconfig, sink_mconfig);
  856. if (ret < 0)
  857. return ret;
  858. }
  859. if (src_mconfig->m_state == SKL_MODULE_UNINIT ||
  860. sink_mconfig->m_state == SKL_MODULE_UNINIT)
  861. continue;
  862. /* Bind source to sink, mixin is always source */
  863. ret = skl_bind_modules(skl, src_mconfig, sink_mconfig);
  864. if (ret)
  865. return ret;
  866. /* set module params after bind */
  867. skl_tplg_set_module_bind_params(src_w,
  868. src_mconfig, skl);
  869. skl_tplg_set_module_bind_params(sink,
  870. sink_mconfig, skl);
  871. /* Start sinks pipe first */
  872. if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) {
  873. if (sink_mconfig->pipe->conn_type !=
  874. SKL_PIPE_CONN_TYPE_FE)
  875. ret = skl_run_pipe(skl,
  876. sink_mconfig->pipe);
  877. if (ret)
  878. return ret;
  879. }
  880. }
  881. }
  882. if (!sink && next_sink)
  883. return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig);
  884. return 0;
  885. }
  886. /*
  887. * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA
  888. * we need to do following:
  889. * - Bind to sink pipeline
  890. * Since the sink pipes can be running and we don't get mixer event on
  891. * connect for already running mixer, we need to find the sink pipes
  892. * here and bind to them. This way dynamic connect works.
  893. * - Start sink pipeline, if not running
  894. * - Then run current pipe
  895. */
  896. static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
  897. struct skl_dev *skl)
  898. {
  899. struct skl_module_cfg *src_mconfig;
  900. int ret = 0;
  901. src_mconfig = w->priv;
  902. /*
  903. * find which sink it is connected to, bind with the sink,
  904. * if sink is not started, start sink pipe first, then start
  905. * this pipe
  906. */
  907. ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig);
  908. if (ret)
  909. return ret;
  910. /* Start source pipe last after starting all sinks */
  911. if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
  912. return skl_run_pipe(skl, src_mconfig->pipe);
  913. return 0;
  914. }
  915. static struct snd_soc_dapm_widget *skl_get_src_dsp_widget(
  916. struct snd_soc_dapm_widget *w, struct skl_dev *skl)
  917. {
  918. struct snd_soc_dapm_path *p;
  919. struct snd_soc_dapm_widget *src_w = NULL;
  920. snd_soc_dapm_widget_for_each_source_path(w, p) {
  921. src_w = p->source;
  922. if (!p->connect)
  923. continue;
  924. dev_dbg(skl->dev, "sink widget=%s\n", w->name);
  925. dev_dbg(skl->dev, "src widget=%s\n", p->source->name);
  926. /*
  927. * here we will check widgets in sink pipelines, so that can
  928. * be any widgets type and we are only interested if they are
  929. * ones used for SKL so check that first
  930. */
  931. if ((p->source->priv != NULL) &&
  932. is_skl_dsp_widget_type(p->source, skl->dev)) {
  933. return p->source;
  934. }
  935. }
  936. if (src_w != NULL)
  937. return skl_get_src_dsp_widget(src_w, skl);
  938. return NULL;
  939. }
  940. /*
  941. * in the Post-PMU event of mixer we need to do following:
  942. * - Check if this pipe is running
  943. * - if not, then
  944. * - bind this pipeline to its source pipeline
  945. * if source pipe is already running, this means it is a dynamic
  946. * connection and we need to bind only to that pipe
  947. * - start this pipeline
  948. */
  949. static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w,
  950. struct skl_dev *skl)
  951. {
  952. int ret = 0;
  953. struct snd_soc_dapm_widget *source, *sink;
  954. struct skl_module_cfg *src_mconfig, *sink_mconfig;
  955. int src_pipe_started = 0;
  956. sink = w;
  957. sink_mconfig = sink->priv;
  958. /*
  959. * If source pipe is already started, that means source is driving
  960. * one more sink before this sink got connected, Since source is
  961. * started, bind this sink to source and start this pipe.
  962. */
  963. source = skl_get_src_dsp_widget(w, skl);
  964. if (source != NULL) {
  965. src_mconfig = source->priv;
  966. sink_mconfig = sink->priv;
  967. src_pipe_started = 1;
  968. /*
  969. * check pipe state, then no need to bind or start the
  970. * pipe
  971. */
  972. if (src_mconfig->pipe->state != SKL_PIPE_STARTED)
  973. src_pipe_started = 0;
  974. }
  975. if (src_pipe_started) {
  976. ret = skl_bind_modules(skl, src_mconfig, sink_mconfig);
  977. if (ret)
  978. return ret;
  979. /* set module params after bind */
  980. skl_tplg_set_module_bind_params(source, src_mconfig, skl);
  981. skl_tplg_set_module_bind_params(sink, sink_mconfig, skl);
  982. if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
  983. ret = skl_run_pipe(skl, sink_mconfig->pipe);
  984. }
  985. return ret;
  986. }
  987. /*
  988. * in the Pre-PMD event of mixer we need to do following:
  989. * - Stop the pipe
  990. * - find the source connections and remove that from dapm_path_list
  991. * - unbind with source pipelines if still connected
  992. */
  993. static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w,
  994. struct skl_dev *skl)
  995. {
  996. struct skl_module_cfg *src_mconfig, *sink_mconfig;
  997. int ret = 0, i;
  998. sink_mconfig = w->priv;
  999. /* Stop the pipe */
  1000. ret = skl_stop_pipe(skl, sink_mconfig->pipe);
  1001. if (ret)
  1002. return ret;
  1003. for (i = 0; i < sink_mconfig->module->max_input_pins; i++) {
  1004. if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) {
  1005. src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg;
  1006. if (!src_mconfig)
  1007. continue;
  1008. ret = skl_unbind_modules(skl,
  1009. src_mconfig, sink_mconfig);
  1010. }
  1011. }
  1012. return ret;
  1013. }
  1014. /*
  1015. * in the Post-PMD event of mixer we need to do following:
  1016. * - Unbind the modules within the pipeline
  1017. * - Delete the pipeline (modules are not required to be explicitly
  1018. * deleted, pipeline delete is enough here
  1019. */
  1020. static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
  1021. struct skl_dev *skl)
  1022. {
  1023. struct skl_module_cfg *mconfig = w->priv;
  1024. struct skl_pipe_module *w_module;
  1025. struct skl_module_cfg *src_module = NULL, *dst_module;
  1026. struct skl_pipe *s_pipe = mconfig->pipe;
  1027. struct skl_module_deferred_bind *modules, *tmp;
  1028. if (s_pipe->state == SKL_PIPE_INVALID)
  1029. return -EINVAL;
  1030. list_for_each_entry(w_module, &s_pipe->w_list, node) {
  1031. if (list_empty(&skl->bind_list))
  1032. break;
  1033. src_module = w_module->w->priv;
  1034. list_for_each_entry_safe(modules, tmp, &skl->bind_list, node) {
  1035. /*
  1036. * When the destination module is deleted, Unbind the
  1037. * modules from deferred bind list.
  1038. */
  1039. if (modules->dst == src_module) {
  1040. skl_unbind_modules(skl, modules->src,
  1041. modules->dst);
  1042. }
  1043. /*
  1044. * When the source module is deleted, remove this entry
  1045. * from the deferred bind list.
  1046. */
  1047. if (modules->src == src_module) {
  1048. list_del(&modules->node);
  1049. modules->src = NULL;
  1050. modules->dst = NULL;
  1051. kfree(modules);
  1052. }
  1053. }
  1054. }
  1055. list_for_each_entry(w_module, &s_pipe->w_list, node) {
  1056. dst_module = w_module->w->priv;
  1057. if (src_module == NULL) {
  1058. src_module = dst_module;
  1059. continue;
  1060. }
  1061. skl_unbind_modules(skl, src_module, dst_module);
  1062. src_module = dst_module;
  1063. }
  1064. skl_delete_pipe(skl, mconfig->pipe);
  1065. list_for_each_entry(w_module, &s_pipe->w_list, node) {
  1066. src_module = w_module->w->priv;
  1067. src_module->m_state = SKL_MODULE_UNINIT;
  1068. }
  1069. return skl_tplg_unload_pipe_modules(skl, s_pipe);
  1070. }
  1071. /*
  1072. * in the Post-PMD event of PGA we need to do following:
  1073. * - Stop the pipeline
  1074. * - In source pipe is connected, unbind with source pipelines
  1075. */
  1076. static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
  1077. struct skl_dev *skl)
  1078. {
  1079. struct skl_module_cfg *src_mconfig, *sink_mconfig;
  1080. int ret = 0, i;
  1081. src_mconfig = w->priv;
  1082. /* Stop the pipe since this is a mixin module */
  1083. ret = skl_stop_pipe(skl, src_mconfig->pipe);
  1084. if (ret)
  1085. return ret;
  1086. for (i = 0; i < src_mconfig->module->max_output_pins; i++) {
  1087. if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) {
  1088. sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg;
  1089. if (!sink_mconfig)
  1090. continue;
  1091. /*
  1092. * This is a connecter and if path is found that means
  1093. * unbind between source and sink has not happened yet
  1094. */
  1095. ret = skl_unbind_modules(skl, src_mconfig,
  1096. sink_mconfig);
  1097. }
  1098. }
  1099. return ret;
  1100. }
  1101. /*
  1102. * In modelling, we assume there will be ONLY one mixer in a pipeline. If a
  1103. * second one is required that is created as another pipe entity.
  1104. * The mixer is responsible for pipe management and represent a pipeline
  1105. * instance
  1106. */
  1107. static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w,
  1108. struct snd_kcontrol *k, int event)
  1109. {
  1110. struct snd_soc_dapm_context *dapm = w->dapm;
  1111. struct skl_dev *skl = get_skl_ctx(dapm->dev);
  1112. switch (event) {
  1113. case SND_SOC_DAPM_PRE_PMU:
  1114. return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
  1115. case SND_SOC_DAPM_POST_PMU:
  1116. return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
  1117. case SND_SOC_DAPM_PRE_PMD:
  1118. return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
  1119. case SND_SOC_DAPM_POST_PMD:
  1120. return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
  1121. }
  1122. return 0;
  1123. }
  1124. /*
  1125. * In modelling, we assumed rest of the modules in pipeline are PGA. But we
  1126. * are interested in last PGA (leaf PGA) in a pipeline to disconnect with
  1127. * the sink when it is running (two FE to one BE or one FE to two BE)
  1128. * scenarios
  1129. */
  1130. static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w,
  1131. struct snd_kcontrol *k, int event)
  1132. {
  1133. struct snd_soc_dapm_context *dapm = w->dapm;
  1134. struct skl_dev *skl = get_skl_ctx(dapm->dev);
  1135. switch (event) {
  1136. case SND_SOC_DAPM_PRE_PMU:
  1137. return skl_tplg_pga_dapm_pre_pmu_event(w, skl);
  1138. case SND_SOC_DAPM_POST_PMD:
  1139. return skl_tplg_pga_dapm_post_pmd_event(w, skl);
  1140. }
  1141. return 0;
  1142. }
  1143. static int skl_tplg_multi_config_set_get(struct snd_kcontrol *kcontrol,
  1144. struct snd_ctl_elem_value *ucontrol,
  1145. bool is_set)
  1146. {
  1147. struct snd_soc_component *component =
  1148. snd_soc_kcontrol_component(kcontrol);
  1149. struct hdac_bus *bus = snd_soc_component_get_drvdata(component);
  1150. struct skl_dev *skl = bus_to_skl(bus);
  1151. struct skl_pipeline *ppl;
  1152. struct skl_pipe *pipe = NULL;
  1153. struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
  1154. u32 *pipe_id;
  1155. if (!ec)
  1156. return -EINVAL;
  1157. if (is_set && ucontrol->value.enumerated.item[0] > ec->items)
  1158. return -EINVAL;
  1159. pipe_id = ec->dobj.private;
  1160. list_for_each_entry(ppl, &skl->ppl_list, node) {
  1161. if (ppl->pipe->ppl_id == *pipe_id) {
  1162. pipe = ppl->pipe;
  1163. break;
  1164. }
  1165. }
  1166. if (!pipe)
  1167. return -EIO;
  1168. if (is_set)
  1169. pipe->pipe_config_idx = ucontrol->value.enumerated.item[0];
  1170. else
  1171. ucontrol->value.enumerated.item[0] = pipe->pipe_config_idx;
  1172. return 0;
  1173. }
  1174. static int skl_tplg_multi_config_get(struct snd_kcontrol *kcontrol,
  1175. struct snd_ctl_elem_value *ucontrol)
  1176. {
  1177. return skl_tplg_multi_config_set_get(kcontrol, ucontrol, false);
  1178. }
  1179. static int skl_tplg_multi_config_set(struct snd_kcontrol *kcontrol,
  1180. struct snd_ctl_elem_value *ucontrol)
  1181. {
  1182. return skl_tplg_multi_config_set_get(kcontrol, ucontrol, true);
  1183. }
  1184. static int skl_tplg_multi_config_get_dmic(struct snd_kcontrol *kcontrol,
  1185. struct snd_ctl_elem_value *ucontrol)
  1186. {
  1187. return skl_tplg_multi_config_set_get(kcontrol, ucontrol, false);
  1188. }
  1189. static int skl_tplg_multi_config_set_dmic(struct snd_kcontrol *kcontrol,
  1190. struct snd_ctl_elem_value *ucontrol)
  1191. {
  1192. return skl_tplg_multi_config_set_get(kcontrol, ucontrol, true);
  1193. }
  1194. static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
  1195. unsigned int __user *data, unsigned int size)
  1196. {
  1197. struct soc_bytes_ext *sb =
  1198. (struct soc_bytes_ext *)kcontrol->private_value;
  1199. struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
  1200. struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
  1201. struct skl_module_cfg *mconfig = w->priv;
  1202. struct skl_dev *skl = get_skl_ctx(w->dapm->dev);
  1203. if (w->power)
  1204. skl_get_module_params(skl, (u32 *)bc->params,
  1205. bc->size, bc->param_id, mconfig);
  1206. /* decrement size for TLV header */
  1207. size -= 2 * sizeof(u32);
  1208. /* check size as we don't want to send kernel data */
  1209. if (size > bc->max)
  1210. size = bc->max;
  1211. if (bc->params) {
  1212. if (copy_to_user(data, &bc->param_id, sizeof(u32)))
  1213. return -EFAULT;
  1214. if (copy_to_user(data + 1, &size, sizeof(u32)))
  1215. return -EFAULT;
  1216. if (copy_to_user(data + 2, bc->params, size))
  1217. return -EFAULT;
  1218. }
  1219. return 0;
  1220. }
  1221. #define SKL_PARAM_VENDOR_ID 0xff
  1222. static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
  1223. const unsigned int __user *data, unsigned int size)
  1224. {
  1225. struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
  1226. struct skl_module_cfg *mconfig = w->priv;
  1227. struct soc_bytes_ext *sb =
  1228. (struct soc_bytes_ext *)kcontrol->private_value;
  1229. struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private;
  1230. struct skl_dev *skl = get_skl_ctx(w->dapm->dev);
  1231. if (ac->params) {
  1232. if (size > ac->max)
  1233. return -EINVAL;
  1234. ac->size = size;
  1235. if (copy_from_user(ac->params, data, size))
  1236. return -EFAULT;
  1237. if (w->power)
  1238. return skl_set_module_params(skl,
  1239. (u32 *)ac->params, ac->size,
  1240. ac->param_id, mconfig);
  1241. }
  1242. return 0;
  1243. }
  1244. static int skl_tplg_mic_control_get(struct snd_kcontrol *kcontrol,
  1245. struct snd_ctl_elem_value *ucontrol)
  1246. {
  1247. struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
  1248. struct skl_module_cfg *mconfig = w->priv;
  1249. struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
  1250. u32 ch_type = *((u32 *)ec->dobj.private);
  1251. if (mconfig->dmic_ch_type == ch_type)
  1252. ucontrol->value.enumerated.item[0] =
  1253. mconfig->dmic_ch_combo_index;
  1254. else
  1255. ucontrol->value.enumerated.item[0] = 0;
  1256. return 0;
  1257. }
  1258. static int skl_fill_mic_sel_params(struct skl_module_cfg *mconfig,
  1259. struct skl_mic_sel_config *mic_cfg, struct device *dev)
  1260. {
  1261. struct skl_specific_cfg *sp_cfg =
  1262. &mconfig->formats_config[SKL_PARAM_INIT];
  1263. sp_cfg->caps_size = sizeof(struct skl_mic_sel_config);
  1264. sp_cfg->set_params = SKL_PARAM_SET;
  1265. sp_cfg->param_id = 0x00;
  1266. if (!sp_cfg->caps) {
  1267. sp_cfg->caps = devm_kzalloc(dev, sp_cfg->caps_size, GFP_KERNEL);
  1268. if (!sp_cfg->caps)
  1269. return -ENOMEM;
  1270. }
  1271. mic_cfg->mic_switch = SKL_MIC_SEL_SWITCH;
  1272. mic_cfg->flags = 0;
  1273. memcpy(sp_cfg->caps, mic_cfg, sp_cfg->caps_size);
  1274. return 0;
  1275. }
  1276. static int skl_tplg_mic_control_set(struct snd_kcontrol *kcontrol,
  1277. struct snd_ctl_elem_value *ucontrol)
  1278. {
  1279. struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
  1280. struct skl_module_cfg *mconfig = w->priv;
  1281. struct skl_mic_sel_config mic_cfg = {0};
  1282. struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
  1283. u32 ch_type = *((u32 *)ec->dobj.private);
  1284. const int *list;
  1285. u8 in_ch, out_ch, index;
  1286. mconfig->dmic_ch_type = ch_type;
  1287. mconfig->dmic_ch_combo_index = ucontrol->value.enumerated.item[0];
  1288. /* enum control index 0 is INVALID, so no channels to be set */
  1289. if (mconfig->dmic_ch_combo_index == 0)
  1290. return 0;
  1291. /* No valid channel selection map for index 0, so offset by 1 */
  1292. index = mconfig->dmic_ch_combo_index - 1;
  1293. switch (ch_type) {
  1294. case SKL_CH_MONO:
  1295. if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_mono_list))
  1296. return -EINVAL;
  1297. list = &mic_mono_list[index];
  1298. break;
  1299. case SKL_CH_STEREO:
  1300. if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_stereo_list))
  1301. return -EINVAL;
  1302. list = mic_stereo_list[index];
  1303. break;
  1304. case SKL_CH_TRIO:
  1305. if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_trio_list))
  1306. return -EINVAL;
  1307. list = mic_trio_list[index];
  1308. break;
  1309. case SKL_CH_QUATRO:
  1310. if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_quatro_list))
  1311. return -EINVAL;
  1312. list = mic_quatro_list[index];
  1313. break;
  1314. default:
  1315. dev_err(w->dapm->dev,
  1316. "Invalid channel %d for mic_select module\n",
  1317. ch_type);
  1318. return -EINVAL;
  1319. }
  1320. /* channel type enum map to number of chanels for that type */
  1321. for (out_ch = 0; out_ch < ch_type; out_ch++) {
  1322. in_ch = list[out_ch];
  1323. mic_cfg.blob[out_ch][in_ch] = SKL_DEFAULT_MIC_SEL_GAIN;
  1324. }
  1325. return skl_fill_mic_sel_params(mconfig, &mic_cfg, w->dapm->dev);
  1326. }
  1327. /*
  1328. * Fill the dma id for host and link. In case of passthrough
  1329. * pipeline, this will both host and link in the same
  1330. * pipeline, so need to copy the link and host based on dev_type
  1331. */
  1332. static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg,
  1333. struct skl_pipe_params *params)
  1334. {
  1335. struct skl_pipe *pipe = mcfg->pipe;
  1336. if (pipe->passthru) {
  1337. switch (mcfg->dev_type) {
  1338. case SKL_DEVICE_HDALINK:
  1339. pipe->p_params->link_dma_id = params->link_dma_id;
  1340. pipe->p_params->link_index = params->link_index;
  1341. pipe->p_params->link_bps = params->link_bps;
  1342. break;
  1343. case SKL_DEVICE_HDAHOST:
  1344. pipe->p_params->host_dma_id = params->host_dma_id;
  1345. pipe->p_params->host_bps = params->host_bps;
  1346. break;
  1347. default:
  1348. break;
  1349. }
  1350. pipe->p_params->s_fmt = params->s_fmt;
  1351. pipe->p_params->ch = params->ch;
  1352. pipe->p_params->s_freq = params->s_freq;
  1353. pipe->p_params->stream = params->stream;
  1354. pipe->p_params->format = params->format;
  1355. } else {
  1356. memcpy(pipe->p_params, params, sizeof(*params));
  1357. }
  1358. }
  1359. /*
  1360. * The FE params are passed by hw_params of the DAI.
  1361. * On hw_params, the params are stored in Gateway module of the FE and we
  1362. * need to calculate the format in DSP module configuration, that
  1363. * conversion is done here
  1364. */
  1365. int skl_tplg_update_pipe_params(struct device *dev,
  1366. struct skl_module_cfg *mconfig,
  1367. struct skl_pipe_params *params)
  1368. {
  1369. struct skl_module_res *res;
  1370. struct skl_dev *skl = get_skl_ctx(dev);
  1371. struct skl_module_fmt *format = NULL;
  1372. u8 cfg_idx = mconfig->pipe->cur_config_idx;
  1373. res = &mconfig->module->resources[mconfig->res_idx];
  1374. skl_tplg_fill_dma_id(mconfig, params);
  1375. mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx;
  1376. mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx;
  1377. if (skl->nr_modules)
  1378. return 0;
  1379. if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
  1380. format = &mconfig->module->formats[mconfig->fmt_idx].inputs[0].fmt;
  1381. else
  1382. format = &mconfig->module->formats[mconfig->fmt_idx].outputs[0].fmt;
  1383. /* set the hw_params */
  1384. format->s_freq = params->s_freq;
  1385. format->channels = params->ch;
  1386. format->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
  1387. /*
  1388. * 16 bit is 16 bit container whereas 24 bit is in 32 bit
  1389. * container so update bit depth accordingly
  1390. */
  1391. switch (format->valid_bit_depth) {
  1392. case SKL_DEPTH_16BIT:
  1393. format->bit_depth = format->valid_bit_depth;
  1394. break;
  1395. case SKL_DEPTH_24BIT:
  1396. case SKL_DEPTH_32BIT:
  1397. format->bit_depth = SKL_DEPTH_32BIT;
  1398. break;
  1399. default:
  1400. dev_err(dev, "Invalid bit depth %x for pipe\n",
  1401. format->valid_bit_depth);
  1402. return -EINVAL;
  1403. }
  1404. if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
  1405. res->ibs = (format->s_freq / 1000) *
  1406. (format->channels) *
  1407. (format->bit_depth >> 3);
  1408. } else {
  1409. res->obs = (format->s_freq / 1000) *
  1410. (format->channels) *
  1411. (format->bit_depth >> 3);
  1412. }
  1413. return 0;
  1414. }
  1415. /*
  1416. * Query the module config for the FE DAI
  1417. * This is used to find the hw_params set for that DAI and apply to FE
  1418. * pipeline
  1419. */
  1420. struct skl_module_cfg *
  1421. skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream)
  1422. {
  1423. struct snd_soc_dapm_widget *w;
  1424. struct snd_soc_dapm_path *p = NULL;
  1425. if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
  1426. w = dai->playback_widget;
  1427. snd_soc_dapm_widget_for_each_sink_path(w, p) {
  1428. if (p->connect && p->sink->power &&
  1429. !is_skl_dsp_widget_type(p->sink, dai->dev))
  1430. continue;
  1431. if (p->sink->priv) {
  1432. dev_dbg(dai->dev, "set params for %s\n",
  1433. p->sink->name);
  1434. return p->sink->priv;
  1435. }
  1436. }
  1437. } else {
  1438. w = dai->capture_widget;
  1439. snd_soc_dapm_widget_for_each_source_path(w, p) {
  1440. if (p->connect && p->source->power &&
  1441. !is_skl_dsp_widget_type(p->source, dai->dev))
  1442. continue;
  1443. if (p->source->priv) {
  1444. dev_dbg(dai->dev, "set params for %s\n",
  1445. p->source->name);
  1446. return p->source->priv;
  1447. }
  1448. }
  1449. }
  1450. return NULL;
  1451. }
  1452. static struct skl_module_cfg *skl_get_mconfig_pb_cpr(
  1453. struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
  1454. {
  1455. struct snd_soc_dapm_path *p;
  1456. struct skl_module_cfg *mconfig = NULL;
  1457. snd_soc_dapm_widget_for_each_source_path(w, p) {
  1458. if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) {
  1459. if (p->connect &&
  1460. (p->sink->id == snd_soc_dapm_aif_out) &&
  1461. p->source->priv) {
  1462. mconfig = p->source->priv;
  1463. return mconfig;
  1464. }
  1465. mconfig = skl_get_mconfig_pb_cpr(dai, p->source);
  1466. if (mconfig)
  1467. return mconfig;
  1468. }
  1469. }
  1470. return mconfig;
  1471. }
  1472. static struct skl_module_cfg *skl_get_mconfig_cap_cpr(
  1473. struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
  1474. {
  1475. struct snd_soc_dapm_path *p;
  1476. struct skl_module_cfg *mconfig = NULL;
  1477. snd_soc_dapm_widget_for_each_sink_path(w, p) {
  1478. if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) {
  1479. if (p->connect &&
  1480. (p->source->id == snd_soc_dapm_aif_in) &&
  1481. p->sink->priv) {
  1482. mconfig = p->sink->priv;
  1483. return mconfig;
  1484. }
  1485. mconfig = skl_get_mconfig_cap_cpr(dai, p->sink);
  1486. if (mconfig)
  1487. return mconfig;
  1488. }
  1489. }
  1490. return mconfig;
  1491. }
  1492. struct skl_module_cfg *
  1493. skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream)
  1494. {
  1495. struct snd_soc_dapm_widget *w;
  1496. struct skl_module_cfg *mconfig;
  1497. if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
  1498. w = dai->playback_widget;
  1499. mconfig = skl_get_mconfig_pb_cpr(dai, w);
  1500. } else {
  1501. w = dai->capture_widget;
  1502. mconfig = skl_get_mconfig_cap_cpr(dai, w);
  1503. }
  1504. return mconfig;
  1505. }
  1506. static u8 skl_tplg_be_link_type(int dev_type)
  1507. {
  1508. int ret;
  1509. switch (dev_type) {
  1510. case SKL_DEVICE_BT:
  1511. ret = NHLT_LINK_SSP;
  1512. break;
  1513. case SKL_DEVICE_DMIC:
  1514. ret = NHLT_LINK_DMIC;
  1515. break;
  1516. case SKL_DEVICE_I2S:
  1517. ret = NHLT_LINK_SSP;
  1518. break;
  1519. case SKL_DEVICE_HDALINK:
  1520. ret = NHLT_LINK_HDA;
  1521. break;
  1522. default:
  1523. ret = NHLT_LINK_INVALID;
  1524. break;
  1525. }
  1526. return ret;
  1527. }
  1528. /*
  1529. * Fill the BE gateway parameters
  1530. * The BE gateway expects a blob of parameters which are kept in the ACPI
  1531. * NHLT blob, so query the blob for interface type (i2s/pdm) and instance.
  1532. * The port can have multiple settings so pick based on the pipeline
  1533. * parameters
  1534. */
  1535. static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai,
  1536. struct skl_module_cfg *mconfig,
  1537. struct skl_pipe_params *params)
  1538. {
  1539. struct nhlt_specific_cfg *cfg;
  1540. struct skl_pipe *pipe = mconfig->pipe;
  1541. struct skl_pipe_fmt *pipe_fmt;
  1542. struct skl_dev *skl = get_skl_ctx(dai->dev);
  1543. int link_type = skl_tplg_be_link_type(mconfig->dev_type);
  1544. u8 dev_type = skl_tplg_be_dev_type(mconfig->dev_type);
  1545. skl_tplg_fill_dma_id(mconfig, params);
  1546. if (link_type == NHLT_LINK_HDA)
  1547. return 0;
  1548. if (pipe->direction == SNDRV_PCM_STREAM_PLAYBACK)
  1549. pipe_fmt = &pipe->configs[pipe->pipe_config_idx].out_fmt;
  1550. else
  1551. pipe_fmt = &pipe->configs[pipe->pipe_config_idx].in_fmt;
  1552. /* update the blob based on virtual bus_id*/
  1553. cfg = intel_nhlt_get_endpoint_blob(dai->dev, skl->nhlt,
  1554. mconfig->vbus_id, link_type,
  1555. pipe_fmt->bps, params->s_cont,
  1556. pipe_fmt->channels, pipe_fmt->freq,
  1557. pipe->direction, dev_type);
  1558. if (cfg) {
  1559. mconfig->formats_config[SKL_PARAM_INIT].caps_size = cfg->size;
  1560. mconfig->formats_config[SKL_PARAM_INIT].caps = (u32 *)&cfg->caps;
  1561. } else {
  1562. dev_err(dai->dev, "Blob NULL for id:%d type:%d dirn:%d ch:%d, freq:%d, fmt:%d\n",
  1563. mconfig->vbus_id, link_type, params->stream,
  1564. params->ch, params->s_freq, params->s_fmt);
  1565. return -EINVAL;
  1566. }
  1567. return 0;
  1568. }
  1569. static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai,
  1570. struct snd_soc_dapm_widget *w,
  1571. struct skl_pipe_params *params)
  1572. {
  1573. struct snd_soc_dapm_path *p;
  1574. int ret = -EIO;
  1575. snd_soc_dapm_widget_for_each_source_path(w, p) {
  1576. if (p->connect && is_skl_dsp_widget_type(p->source, dai->dev) &&
  1577. p->source->priv) {
  1578. ret = skl_tplg_be_fill_pipe_params(dai,
  1579. p->source->priv, params);
  1580. if (ret < 0)
  1581. return ret;
  1582. } else {
  1583. ret = skl_tplg_be_set_src_pipe_params(dai,
  1584. p->source, params);
  1585. if (ret < 0)
  1586. return ret;
  1587. }
  1588. }
  1589. return ret;
  1590. }
  1591. static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai,
  1592. struct snd_soc_dapm_widget *w, struct skl_pipe_params *params)
  1593. {
  1594. struct snd_soc_dapm_path *p;
  1595. int ret = -EIO;
  1596. snd_soc_dapm_widget_for_each_sink_path(w, p) {
  1597. if (p->connect && is_skl_dsp_widget_type(p->sink, dai->dev) &&
  1598. p->sink->priv) {
  1599. ret = skl_tplg_be_fill_pipe_params(dai,
  1600. p->sink->priv, params);
  1601. if (ret < 0)
  1602. return ret;
  1603. } else {
  1604. ret = skl_tplg_be_set_sink_pipe_params(
  1605. dai, p->sink, params);
  1606. if (ret < 0)
  1607. return ret;
  1608. }
  1609. }
  1610. return ret;
  1611. }
  1612. /*
  1613. * BE hw_params can be a source parameters (capture) or sink parameters
  1614. * (playback). Based on sink and source we need to either find the source
  1615. * list or the sink list and set the pipeline parameters
  1616. */
  1617. int skl_tplg_be_update_params(struct snd_soc_dai *dai,
  1618. struct skl_pipe_params *params)
  1619. {
  1620. struct snd_soc_dapm_widget *w;
  1621. if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
  1622. w = dai->playback_widget;
  1623. return skl_tplg_be_set_src_pipe_params(dai, w, params);
  1624. } else {
  1625. w = dai->capture_widget;
  1626. return skl_tplg_be_set_sink_pipe_params(dai, w, params);
  1627. }
  1628. return 0;
  1629. }
  1630. static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = {
  1631. {SKL_MIXER_EVENT, skl_tplg_mixer_event},
  1632. {SKL_VMIXER_EVENT, skl_tplg_mixer_event},
  1633. {SKL_PGA_EVENT, skl_tplg_pga_event},
  1634. };
  1635. static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = {
  1636. {SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get,
  1637. skl_tplg_tlv_control_set},
  1638. };
  1639. static const struct snd_soc_tplg_kcontrol_ops skl_tplg_kcontrol_ops[] = {
  1640. {
  1641. .id = SKL_CONTROL_TYPE_MIC_SELECT,
  1642. .get = skl_tplg_mic_control_get,
  1643. .put = skl_tplg_mic_control_set,
  1644. },
  1645. {
  1646. .id = SKL_CONTROL_TYPE_MULTI_IO_SELECT,
  1647. .get = skl_tplg_multi_config_get,
  1648. .put = skl_tplg_multi_config_set,
  1649. },
  1650. {
  1651. .id = SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC,
  1652. .get = skl_tplg_multi_config_get_dmic,
  1653. .put = skl_tplg_multi_config_set_dmic,
  1654. }
  1655. };
  1656. static int skl_tplg_fill_pipe_cfg(struct device *dev,
  1657. struct skl_pipe *pipe, u32 tkn,
  1658. u32 tkn_val, int conf_idx, int dir)
  1659. {
  1660. struct skl_pipe_fmt *fmt;
  1661. struct skl_path_config *config;
  1662. switch (dir) {
  1663. case SKL_DIR_IN:
  1664. fmt = &pipe->configs[conf_idx].in_fmt;
  1665. break;
  1666. case SKL_DIR_OUT:
  1667. fmt = &pipe->configs[conf_idx].out_fmt;
  1668. break;
  1669. default:
  1670. dev_err(dev, "Invalid direction: %d\n", dir);
  1671. return -EINVAL;
  1672. }
  1673. config = &pipe->configs[conf_idx];
  1674. switch (tkn) {
  1675. case SKL_TKN_U32_CFG_FREQ:
  1676. fmt->freq = tkn_val;
  1677. break;
  1678. case SKL_TKN_U8_CFG_CHAN:
  1679. fmt->channels = tkn_val;
  1680. break;
  1681. case SKL_TKN_U8_CFG_BPS:
  1682. fmt->bps = tkn_val;
  1683. break;
  1684. case SKL_TKN_U32_PATH_MEM_PGS:
  1685. config->mem_pages = tkn_val;
  1686. break;
  1687. default:
  1688. dev_err(dev, "Invalid token config: %d\n", tkn);
  1689. return -EINVAL;
  1690. }
  1691. return 0;
  1692. }
  1693. static int skl_tplg_fill_pipe_tkn(struct device *dev,
  1694. struct skl_pipe *pipe, u32 tkn,
  1695. u32 tkn_val)
  1696. {
  1697. switch (tkn) {
  1698. case SKL_TKN_U32_PIPE_CONN_TYPE:
  1699. pipe->conn_type = tkn_val;
  1700. break;
  1701. case SKL_TKN_U32_PIPE_PRIORITY:
  1702. pipe->pipe_priority = tkn_val;
  1703. break;
  1704. case SKL_TKN_U32_PIPE_MEM_PGS:
  1705. pipe->memory_pages = tkn_val;
  1706. break;
  1707. case SKL_TKN_U32_PMODE:
  1708. pipe->lp_mode = tkn_val;
  1709. break;
  1710. case SKL_TKN_U32_PIPE_DIRECTION:
  1711. pipe->direction = tkn_val;
  1712. break;
  1713. case SKL_TKN_U32_NUM_CONFIGS:
  1714. pipe->nr_cfgs = tkn_val;
  1715. break;
  1716. default:
  1717. dev_err(dev, "Token not handled %d\n", tkn);
  1718. return -EINVAL;
  1719. }
  1720. return 0;
  1721. }
  1722. /*
  1723. * Add pipeline by parsing the relevant tokens
  1724. * Return an existing pipe if the pipe already exists.
  1725. */
  1726. static int skl_tplg_add_pipe(struct device *dev,
  1727. struct skl_module_cfg *mconfig, struct skl_dev *skl,
  1728. struct snd_soc_tplg_vendor_value_elem *tkn_elem)
  1729. {
  1730. struct skl_pipeline *ppl;
  1731. struct skl_pipe *pipe;
  1732. struct skl_pipe_params *params;
  1733. list_for_each_entry(ppl, &skl->ppl_list, node) {
  1734. if (ppl->pipe->ppl_id == tkn_elem->value) {
  1735. mconfig->pipe = ppl->pipe;
  1736. return -EEXIST;
  1737. }
  1738. }
  1739. ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
  1740. if (!ppl)
  1741. return -ENOMEM;
  1742. pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
  1743. if (!pipe)
  1744. return -ENOMEM;
  1745. params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
  1746. if (!params)
  1747. return -ENOMEM;
  1748. pipe->p_params = params;
  1749. pipe->ppl_id = tkn_elem->value;
  1750. INIT_LIST_HEAD(&pipe->w_list);
  1751. ppl->pipe = pipe;
  1752. list_add(&ppl->node, &skl->ppl_list);
  1753. mconfig->pipe = pipe;
  1754. mconfig->pipe->state = SKL_PIPE_INVALID;
  1755. return 0;
  1756. }
  1757. static int skl_tplg_get_uuid(struct device *dev, guid_t *guid,
  1758. struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
  1759. {
  1760. if (uuid_tkn->token == SKL_TKN_UUID) {
  1761. import_guid(guid, uuid_tkn->uuid);
  1762. return 0;
  1763. }
  1764. dev_err(dev, "Not an UUID token %d\n", uuid_tkn->token);
  1765. return -EINVAL;
  1766. }
  1767. static int skl_tplg_fill_pin(struct device *dev,
  1768. struct snd_soc_tplg_vendor_value_elem *tkn_elem,
  1769. struct skl_module_pin *m_pin,
  1770. int pin_index)
  1771. {
  1772. int ret;
  1773. switch (tkn_elem->token) {
  1774. case SKL_TKN_U32_PIN_MOD_ID:
  1775. m_pin[pin_index].id.module_id = tkn_elem->value;
  1776. break;
  1777. case SKL_TKN_U32_PIN_INST_ID:
  1778. m_pin[pin_index].id.instance_id = tkn_elem->value;
  1779. break;
  1780. case SKL_TKN_UUID:
  1781. ret = skl_tplg_get_uuid(dev, &m_pin[pin_index].id.mod_uuid,
  1782. (struct snd_soc_tplg_vendor_uuid_elem *)tkn_elem);
  1783. if (ret < 0)
  1784. return ret;
  1785. break;
  1786. default:
  1787. dev_err(dev, "%d Not a pin token\n", tkn_elem->token);
  1788. return -EINVAL;
  1789. }
  1790. return 0;
  1791. }
  1792. /*
  1793. * Parse for pin config specific tokens to fill up the
  1794. * module private data
  1795. */
  1796. static int skl_tplg_fill_pins_info(struct device *dev,
  1797. struct skl_module_cfg *mconfig,
  1798. struct snd_soc_tplg_vendor_value_elem *tkn_elem,
  1799. int dir, int pin_count)
  1800. {
  1801. int ret;
  1802. struct skl_module_pin *m_pin;
  1803. switch (dir) {
  1804. case SKL_DIR_IN:
  1805. m_pin = mconfig->m_in_pin;
  1806. break;
  1807. case SKL_DIR_OUT:
  1808. m_pin = mconfig->m_out_pin;
  1809. break;
  1810. default:
  1811. dev_err(dev, "Invalid direction value\n");
  1812. return -EINVAL;
  1813. }
  1814. ret = skl_tplg_fill_pin(dev, tkn_elem, m_pin, pin_count);
  1815. if (ret < 0)
  1816. return ret;
  1817. m_pin[pin_count].in_use = false;
  1818. m_pin[pin_count].pin_state = SKL_PIN_UNBIND;
  1819. return 0;
  1820. }
  1821. /*
  1822. * Fill up input/output module config format based
  1823. * on the direction
  1824. */
  1825. static int skl_tplg_fill_fmt(struct device *dev,
  1826. struct skl_module_fmt *dst_fmt,
  1827. u32 tkn, u32 value)
  1828. {
  1829. switch (tkn) {
  1830. case SKL_TKN_U32_FMT_CH:
  1831. dst_fmt->channels = value;
  1832. break;
  1833. case SKL_TKN_U32_FMT_FREQ:
  1834. dst_fmt->s_freq = value;
  1835. break;
  1836. case SKL_TKN_U32_FMT_BIT_DEPTH:
  1837. dst_fmt->bit_depth = value;
  1838. break;
  1839. case SKL_TKN_U32_FMT_SAMPLE_SIZE:
  1840. dst_fmt->valid_bit_depth = value;
  1841. break;
  1842. case SKL_TKN_U32_FMT_CH_CONFIG:
  1843. dst_fmt->ch_cfg = value;
  1844. break;
  1845. case SKL_TKN_U32_FMT_INTERLEAVE:
  1846. dst_fmt->interleaving_style = value;
  1847. break;
  1848. case SKL_TKN_U32_FMT_SAMPLE_TYPE:
  1849. dst_fmt->sample_type = value;
  1850. break;
  1851. case SKL_TKN_U32_FMT_CH_MAP:
  1852. dst_fmt->ch_map = value;
  1853. break;
  1854. default:
  1855. dev_err(dev, "Invalid token %d\n", tkn);
  1856. return -EINVAL;
  1857. }
  1858. return 0;
  1859. }
  1860. static int skl_tplg_widget_fill_fmt(struct device *dev,
  1861. struct skl_module_iface *fmt,
  1862. u32 tkn, u32 val, u32 dir, int fmt_idx)
  1863. {
  1864. struct skl_module_fmt *dst_fmt;
  1865. if (!fmt)
  1866. return -EINVAL;
  1867. switch (dir) {
  1868. case SKL_DIR_IN:
  1869. dst_fmt = &fmt->inputs[fmt_idx].fmt;
  1870. break;
  1871. case SKL_DIR_OUT:
  1872. dst_fmt = &fmt->outputs[fmt_idx].fmt;
  1873. break;
  1874. default:
  1875. dev_err(dev, "Invalid direction: %d\n", dir);
  1876. return -EINVAL;
  1877. }
  1878. return skl_tplg_fill_fmt(dev, dst_fmt, tkn, val);
  1879. }
  1880. static void skl_tplg_fill_pin_dynamic_val(
  1881. struct skl_module_pin *mpin, u32 pin_count, u32 value)
  1882. {
  1883. int i;
  1884. for (i = 0; i < pin_count; i++)
  1885. mpin[i].is_dynamic = value;
  1886. }
  1887. /*
  1888. * Resource table in the manifest has pin specific resources
  1889. * like pin and pin buffer size
  1890. */
  1891. static int skl_tplg_manifest_pin_res_tkn(struct device *dev,
  1892. struct snd_soc_tplg_vendor_value_elem *tkn_elem,
  1893. struct skl_module_res *res, int pin_idx, int dir)
  1894. {
  1895. struct skl_module_pin_resources *m_pin;
  1896. switch (dir) {
  1897. case SKL_DIR_IN:
  1898. m_pin = &res->input[pin_idx];
  1899. break;
  1900. case SKL_DIR_OUT:
  1901. m_pin = &res->output[pin_idx];
  1902. break;
  1903. default:
  1904. dev_err(dev, "Invalid pin direction: %d\n", dir);
  1905. return -EINVAL;
  1906. }
  1907. switch (tkn_elem->token) {
  1908. case SKL_TKN_MM_U32_RES_PIN_ID:
  1909. m_pin->pin_index = tkn_elem->value;
  1910. break;
  1911. case SKL_TKN_MM_U32_PIN_BUF:
  1912. m_pin->buf_size = tkn_elem->value;
  1913. break;
  1914. default:
  1915. dev_err(dev, "Invalid token: %d\n", tkn_elem->token);
  1916. return -EINVAL;
  1917. }
  1918. return 0;
  1919. }
  1920. /*
  1921. * Fill module specific resources from the manifest's resource
  1922. * table like CPS, DMA size, mem_pages.
  1923. */
  1924. static int skl_tplg_fill_res_tkn(struct device *dev,
  1925. struct snd_soc_tplg_vendor_value_elem *tkn_elem,
  1926. struct skl_module_res *res,
  1927. int pin_idx, int dir)
  1928. {
  1929. int ret, tkn_count = 0;
  1930. if (!res)
  1931. return -EINVAL;
  1932. switch (tkn_elem->token) {
  1933. case SKL_TKN_MM_U32_DMA_SIZE:
  1934. res->dma_buffer_size = tkn_elem->value;
  1935. break;
  1936. case SKL_TKN_MM_U32_CPC:
  1937. res->cpc = tkn_elem->value;
  1938. break;
  1939. case SKL_TKN_U32_MEM_PAGES:
  1940. res->is_pages = tkn_elem->value;
  1941. break;
  1942. case SKL_TKN_U32_OBS:
  1943. res->obs = tkn_elem->value;
  1944. break;
  1945. case SKL_TKN_U32_IBS:
  1946. res->ibs = tkn_elem->value;
  1947. break;
  1948. case SKL_TKN_MM_U32_RES_PIN_ID:
  1949. case SKL_TKN_MM_U32_PIN_BUF:
  1950. ret = skl_tplg_manifest_pin_res_tkn(dev, tkn_elem, res,
  1951. pin_idx, dir);
  1952. if (ret < 0)
  1953. return ret;
  1954. break;
  1955. case SKL_TKN_MM_U32_CPS:
  1956. case SKL_TKN_U32_MAX_MCPS:
  1957. /* ignore unused tokens */
  1958. break;
  1959. default:
  1960. dev_err(dev, "Not a res type token: %d", tkn_elem->token);
  1961. return -EINVAL;
  1962. }
  1963. tkn_count++;
  1964. return tkn_count;
  1965. }
  1966. /*
  1967. * Parse tokens to fill up the module private data
  1968. */
  1969. static int skl_tplg_get_token(struct device *dev,
  1970. struct snd_soc_tplg_vendor_value_elem *tkn_elem,
  1971. struct skl_dev *skl, struct skl_module_cfg *mconfig)
  1972. {
  1973. int tkn_count = 0;
  1974. int ret;
  1975. static int is_pipe_exists;
  1976. static int pin_index, dir, conf_idx;
  1977. struct skl_module_iface *iface = NULL;
  1978. struct skl_module_res *res = NULL;
  1979. int res_idx = mconfig->res_idx;
  1980. int fmt_idx = mconfig->fmt_idx;
  1981. /*
  1982. * If the manifest structure contains no modules, fill all
  1983. * the module data to 0th index.
  1984. * res_idx and fmt_idx are default set to 0.
  1985. */
  1986. if (skl->nr_modules == 0) {
  1987. res = &mconfig->module->resources[res_idx];
  1988. iface = &mconfig->module->formats[fmt_idx];
  1989. }
  1990. if (tkn_elem->token > SKL_TKN_MAX)
  1991. return -EINVAL;
  1992. switch (tkn_elem->token) {
  1993. case SKL_TKN_U8_IN_QUEUE_COUNT:
  1994. mconfig->module->max_input_pins = tkn_elem->value;
  1995. break;
  1996. case SKL_TKN_U8_OUT_QUEUE_COUNT:
  1997. mconfig->module->max_output_pins = tkn_elem->value;
  1998. break;
  1999. case SKL_TKN_U8_DYN_IN_PIN:
  2000. if (!mconfig->m_in_pin)
  2001. mconfig->m_in_pin =
  2002. devm_kcalloc(dev, MAX_IN_QUEUE,
  2003. sizeof(*mconfig->m_in_pin),
  2004. GFP_KERNEL);
  2005. if (!mconfig->m_in_pin)
  2006. return -ENOMEM;
  2007. skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin, MAX_IN_QUEUE,
  2008. tkn_elem->value);
  2009. break;
  2010. case SKL_TKN_U8_DYN_OUT_PIN:
  2011. if (!mconfig->m_out_pin)
  2012. mconfig->m_out_pin =
  2013. devm_kcalloc(dev, MAX_IN_QUEUE,
  2014. sizeof(*mconfig->m_in_pin),
  2015. GFP_KERNEL);
  2016. if (!mconfig->m_out_pin)
  2017. return -ENOMEM;
  2018. skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin, MAX_OUT_QUEUE,
  2019. tkn_elem->value);
  2020. break;
  2021. case SKL_TKN_U8_TIME_SLOT:
  2022. mconfig->time_slot = tkn_elem->value;
  2023. break;
  2024. case SKL_TKN_U8_CORE_ID:
  2025. mconfig->core_id = tkn_elem->value;
  2026. break;
  2027. case SKL_TKN_U8_MOD_TYPE:
  2028. mconfig->m_type = tkn_elem->value;
  2029. break;
  2030. case SKL_TKN_U8_DEV_TYPE:
  2031. mconfig->dev_type = tkn_elem->value;
  2032. break;
  2033. case SKL_TKN_U8_HW_CONN_TYPE:
  2034. mconfig->hw_conn_type = tkn_elem->value;
  2035. break;
  2036. case SKL_TKN_U16_MOD_INST_ID:
  2037. mconfig->id.instance_id =
  2038. tkn_elem->value;
  2039. break;
  2040. case SKL_TKN_U32_MEM_PAGES:
  2041. case SKL_TKN_U32_MAX_MCPS:
  2042. case SKL_TKN_U32_OBS:
  2043. case SKL_TKN_U32_IBS:
  2044. ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_index, dir);
  2045. if (ret < 0)
  2046. return ret;
  2047. break;
  2048. case SKL_TKN_U32_VBUS_ID:
  2049. mconfig->vbus_id = tkn_elem->value;
  2050. break;
  2051. case SKL_TKN_U32_PARAMS_FIXUP:
  2052. mconfig->params_fixup = tkn_elem->value;
  2053. break;
  2054. case SKL_TKN_U32_CONVERTER:
  2055. mconfig->converter = tkn_elem->value;
  2056. break;
  2057. case SKL_TKN_U32_D0I3_CAPS:
  2058. mconfig->d0i3_caps = tkn_elem->value;
  2059. break;
  2060. case SKL_TKN_U32_PIPE_ID:
  2061. ret = skl_tplg_add_pipe(dev,
  2062. mconfig, skl, tkn_elem);
  2063. if (ret < 0) {
  2064. if (ret == -EEXIST) {
  2065. is_pipe_exists = 1;
  2066. break;
  2067. }
  2068. return is_pipe_exists;
  2069. }
  2070. break;
  2071. case SKL_TKN_U32_PIPE_CONFIG_ID:
  2072. conf_idx = tkn_elem->value;
  2073. break;
  2074. case SKL_TKN_U32_PIPE_CONN_TYPE:
  2075. case SKL_TKN_U32_PIPE_PRIORITY:
  2076. case SKL_TKN_U32_PIPE_MEM_PGS:
  2077. case SKL_TKN_U32_PMODE:
  2078. case SKL_TKN_U32_PIPE_DIRECTION:
  2079. case SKL_TKN_U32_NUM_CONFIGS:
  2080. if (is_pipe_exists) {
  2081. ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe,
  2082. tkn_elem->token, tkn_elem->value);
  2083. if (ret < 0)
  2084. return ret;
  2085. }
  2086. break;
  2087. case SKL_TKN_U32_PATH_MEM_PGS:
  2088. case SKL_TKN_U32_CFG_FREQ:
  2089. case SKL_TKN_U8_CFG_CHAN:
  2090. case SKL_TKN_U8_CFG_BPS:
  2091. if (mconfig->pipe->nr_cfgs) {
  2092. ret = skl_tplg_fill_pipe_cfg(dev, mconfig->pipe,
  2093. tkn_elem->token, tkn_elem->value,
  2094. conf_idx, dir);
  2095. if (ret < 0)
  2096. return ret;
  2097. }
  2098. break;
  2099. case SKL_TKN_CFG_MOD_RES_ID:
  2100. mconfig->mod_cfg[conf_idx].res_idx = tkn_elem->value;
  2101. break;
  2102. case SKL_TKN_CFG_MOD_FMT_ID:
  2103. mconfig->mod_cfg[conf_idx].fmt_idx = tkn_elem->value;
  2104. break;
  2105. /*
  2106. * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both
  2107. * direction and the pin count. The first four bits represent
  2108. * direction and next four the pin count.
  2109. */
  2110. case SKL_TKN_U32_DIR_PIN_COUNT:
  2111. dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
  2112. pin_index = (tkn_elem->value &
  2113. SKL_PIN_COUNT_MASK) >> 4;
  2114. break;
  2115. case SKL_TKN_U32_FMT_CH:
  2116. case SKL_TKN_U32_FMT_FREQ:
  2117. case SKL_TKN_U32_FMT_BIT_DEPTH:
  2118. case SKL_TKN_U32_FMT_SAMPLE_SIZE:
  2119. case SKL_TKN_U32_FMT_CH_CONFIG:
  2120. case SKL_TKN_U32_FMT_INTERLEAVE:
  2121. case SKL_TKN_U32_FMT_SAMPLE_TYPE:
  2122. case SKL_TKN_U32_FMT_CH_MAP:
  2123. ret = skl_tplg_widget_fill_fmt(dev, iface, tkn_elem->token,
  2124. tkn_elem->value, dir, pin_index);
  2125. if (ret < 0)
  2126. return ret;
  2127. break;
  2128. case SKL_TKN_U32_PIN_MOD_ID:
  2129. case SKL_TKN_U32_PIN_INST_ID:
  2130. case SKL_TKN_UUID:
  2131. ret = skl_tplg_fill_pins_info(dev,
  2132. mconfig, tkn_elem, dir,
  2133. pin_index);
  2134. if (ret < 0)
  2135. return ret;
  2136. break;
  2137. case SKL_TKN_U32_FMT_CFG_IDX:
  2138. if (tkn_elem->value > SKL_MAX_PARAMS_TYPES)
  2139. return -EINVAL;
  2140. mconfig->fmt_cfg_idx = tkn_elem->value;
  2141. break;
  2142. case SKL_TKN_U32_CAPS_SIZE:
  2143. mconfig->formats_config[mconfig->fmt_cfg_idx].caps_size =
  2144. tkn_elem->value;
  2145. break;
  2146. case SKL_TKN_U32_CAPS_SET_PARAMS:
  2147. mconfig->formats_config[mconfig->fmt_cfg_idx].set_params =
  2148. tkn_elem->value;
  2149. break;
  2150. case SKL_TKN_U32_CAPS_PARAMS_ID:
  2151. mconfig->formats_config[mconfig->fmt_cfg_idx].param_id =
  2152. tkn_elem->value;
  2153. break;
  2154. case SKL_TKN_U32_PROC_DOMAIN:
  2155. mconfig->domain =
  2156. tkn_elem->value;
  2157. break;
  2158. case SKL_TKN_U32_DMA_BUF_SIZE:
  2159. mconfig->dma_buffer_size = tkn_elem->value;
  2160. break;
  2161. case SKL_TKN_U8_IN_PIN_TYPE:
  2162. case SKL_TKN_U8_OUT_PIN_TYPE:
  2163. case SKL_TKN_U8_CONN_TYPE:
  2164. break;
  2165. default:
  2166. dev_err(dev, "Token %d not handled\n",
  2167. tkn_elem->token);
  2168. return -EINVAL;
  2169. }
  2170. tkn_count++;
  2171. return tkn_count;
  2172. }
  2173. /*
  2174. * Parse the vendor array for specific tokens to construct
  2175. * module private data
  2176. */
  2177. static int skl_tplg_get_tokens(struct device *dev,
  2178. char *pvt_data, struct skl_dev *skl,
  2179. struct skl_module_cfg *mconfig, int block_size)
  2180. {
  2181. struct snd_soc_tplg_vendor_array *array;
  2182. struct snd_soc_tplg_vendor_value_elem *tkn_elem;
  2183. int tkn_count = 0, ret;
  2184. int off = 0, tuple_size = 0;
  2185. bool is_module_guid = true;
  2186. if (block_size <= 0)
  2187. return -EINVAL;
  2188. while (tuple_size < block_size) {
  2189. array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
  2190. off += array->size;
  2191. switch (array->type) {
  2192. case SND_SOC_TPLG_TUPLE_TYPE_STRING:
  2193. dev_warn(dev, "no string tokens expected for skl tplg\n");
  2194. continue;
  2195. case SND_SOC_TPLG_TUPLE_TYPE_UUID:
  2196. if (is_module_guid) {
  2197. ret = skl_tplg_get_uuid(dev, (guid_t *)mconfig->guid,
  2198. array->uuid);
  2199. is_module_guid = false;
  2200. } else {
  2201. ret = skl_tplg_get_token(dev, array->value, skl,
  2202. mconfig);
  2203. }
  2204. if (ret < 0)
  2205. return ret;
  2206. tuple_size += sizeof(*array->uuid);
  2207. continue;
  2208. default:
  2209. tkn_elem = array->value;
  2210. tkn_count = 0;
  2211. break;
  2212. }
  2213. while (tkn_count <= (array->num_elems - 1)) {
  2214. ret = skl_tplg_get_token(dev, tkn_elem,
  2215. skl, mconfig);
  2216. if (ret < 0)
  2217. return ret;
  2218. tkn_count = tkn_count + ret;
  2219. tkn_elem++;
  2220. }
  2221. tuple_size += tkn_count * sizeof(*tkn_elem);
  2222. }
  2223. return off;
  2224. }
  2225. /*
  2226. * Every data block is preceded by a descriptor to read the number
  2227. * of data blocks, they type of the block and it's size
  2228. */
  2229. static int skl_tplg_get_desc_blocks(struct device *dev,
  2230. struct snd_soc_tplg_vendor_array *array)
  2231. {
  2232. struct snd_soc_tplg_vendor_value_elem *tkn_elem;
  2233. tkn_elem = array->value;
  2234. switch (tkn_elem->token) {
  2235. case SKL_TKN_U8_NUM_BLOCKS:
  2236. case SKL_TKN_U8_BLOCK_TYPE:
  2237. case SKL_TKN_U16_BLOCK_SIZE:
  2238. return tkn_elem->value;
  2239. default:
  2240. dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token);
  2241. break;
  2242. }
  2243. return -EINVAL;
  2244. }
  2245. /* Functions to parse private data from configuration file format v4 */
  2246. /*
  2247. * Add pipeline from topology binary into driver pipeline list
  2248. *
  2249. * If already added we return that instance
  2250. * Otherwise we create a new instance and add into driver list
  2251. */
  2252. static int skl_tplg_add_pipe_v4(struct device *dev,
  2253. struct skl_module_cfg *mconfig, struct skl_dev *skl,
  2254. struct skl_dfw_v4_pipe *dfw_pipe)
  2255. {
  2256. struct skl_pipeline *ppl;
  2257. struct skl_pipe *pipe;
  2258. struct skl_pipe_params *params;
  2259. list_for_each_entry(ppl, &skl->ppl_list, node) {
  2260. if (ppl->pipe->ppl_id == dfw_pipe->pipe_id) {
  2261. mconfig->pipe = ppl->pipe;
  2262. return 0;
  2263. }
  2264. }
  2265. ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
  2266. if (!ppl)
  2267. return -ENOMEM;
  2268. pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
  2269. if (!pipe)
  2270. return -ENOMEM;
  2271. params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
  2272. if (!params)
  2273. return -ENOMEM;
  2274. pipe->ppl_id = dfw_pipe->pipe_id;
  2275. pipe->memory_pages = dfw_pipe->memory_pages;
  2276. pipe->pipe_priority = dfw_pipe->pipe_priority;
  2277. pipe->conn_type = dfw_pipe->conn_type;
  2278. pipe->state = SKL_PIPE_INVALID;
  2279. pipe->p_params = params;
  2280. INIT_LIST_HEAD(&pipe->w_list);
  2281. ppl->pipe = pipe;
  2282. list_add(&ppl->node, &skl->ppl_list);
  2283. mconfig->pipe = pipe;
  2284. return 0;
  2285. }
  2286. static void skl_fill_module_pin_info_v4(struct skl_dfw_v4_module_pin *dfw_pin,
  2287. struct skl_module_pin *m_pin,
  2288. bool is_dynamic, int max_pin)
  2289. {
  2290. int i;
  2291. for (i = 0; i < max_pin; i++) {
  2292. m_pin[i].id.module_id = dfw_pin[i].module_id;
  2293. m_pin[i].id.instance_id = dfw_pin[i].instance_id;
  2294. m_pin[i].in_use = false;
  2295. m_pin[i].is_dynamic = is_dynamic;
  2296. m_pin[i].pin_state = SKL_PIN_UNBIND;
  2297. }
  2298. }
  2299. static void skl_tplg_fill_fmt_v4(struct skl_module_pin_fmt *dst_fmt,
  2300. struct skl_dfw_v4_module_fmt *src_fmt,
  2301. int pins)
  2302. {
  2303. int i;
  2304. for (i = 0; i < pins; i++) {
  2305. dst_fmt[i].fmt.channels = src_fmt[i].channels;
  2306. dst_fmt[i].fmt.s_freq = src_fmt[i].freq;
  2307. dst_fmt[i].fmt.bit_depth = src_fmt[i].bit_depth;
  2308. dst_fmt[i].fmt.valid_bit_depth = src_fmt[i].valid_bit_depth;
  2309. dst_fmt[i].fmt.ch_cfg = src_fmt[i].ch_cfg;
  2310. dst_fmt[i].fmt.ch_map = src_fmt[i].ch_map;
  2311. dst_fmt[i].fmt.interleaving_style =
  2312. src_fmt[i].interleaving_style;
  2313. dst_fmt[i].fmt.sample_type = src_fmt[i].sample_type;
  2314. }
  2315. }
  2316. static int skl_tplg_get_pvt_data_v4(struct snd_soc_tplg_dapm_widget *tplg_w,
  2317. struct skl_dev *skl, struct device *dev,
  2318. struct skl_module_cfg *mconfig)
  2319. {
  2320. struct skl_dfw_v4_module *dfw =
  2321. (struct skl_dfw_v4_module *)tplg_w->priv.data;
  2322. int ret;
  2323. int idx = mconfig->fmt_cfg_idx;
  2324. dev_dbg(dev, "Parsing Skylake v4 widget topology data\n");
  2325. ret = guid_parse(dfw->uuid, (guid_t *)mconfig->guid);
  2326. if (ret)
  2327. return ret;
  2328. mconfig->id.module_id = -1;
  2329. mconfig->id.instance_id = dfw->instance_id;
  2330. mconfig->module->resources[0].cpc = dfw->max_mcps / 1000;
  2331. mconfig->module->resources[0].ibs = dfw->ibs;
  2332. mconfig->module->resources[0].obs = dfw->obs;
  2333. mconfig->core_id = dfw->core_id;
  2334. mconfig->module->max_input_pins = dfw->max_in_queue;
  2335. mconfig->module->max_output_pins = dfw->max_out_queue;
  2336. mconfig->module->loadable = dfw->is_loadable;
  2337. skl_tplg_fill_fmt_v4(mconfig->module->formats[0].inputs, dfw->in_fmt,
  2338. MAX_IN_QUEUE);
  2339. skl_tplg_fill_fmt_v4(mconfig->module->formats[0].outputs, dfw->out_fmt,
  2340. MAX_OUT_QUEUE);
  2341. mconfig->params_fixup = dfw->params_fixup;
  2342. mconfig->converter = dfw->converter;
  2343. mconfig->m_type = dfw->module_type;
  2344. mconfig->vbus_id = dfw->vbus_id;
  2345. mconfig->module->resources[0].is_pages = dfw->mem_pages;
  2346. ret = skl_tplg_add_pipe_v4(dev, mconfig, skl, &dfw->pipe);
  2347. if (ret)
  2348. return ret;
  2349. mconfig->dev_type = dfw->dev_type;
  2350. mconfig->hw_conn_type = dfw->hw_conn_type;
  2351. mconfig->time_slot = dfw->time_slot;
  2352. mconfig->formats_config[idx].caps_size = dfw->caps.caps_size;
  2353. mconfig->m_in_pin = devm_kcalloc(dev,
  2354. MAX_IN_QUEUE, sizeof(*mconfig->m_in_pin),
  2355. GFP_KERNEL);
  2356. if (!mconfig->m_in_pin)
  2357. return -ENOMEM;
  2358. mconfig->m_out_pin = devm_kcalloc(dev,
  2359. MAX_OUT_QUEUE, sizeof(*mconfig->m_out_pin),
  2360. GFP_KERNEL);
  2361. if (!mconfig->m_out_pin)
  2362. return -ENOMEM;
  2363. skl_fill_module_pin_info_v4(dfw->in_pin, mconfig->m_in_pin,
  2364. dfw->is_dynamic_in_pin,
  2365. mconfig->module->max_input_pins);
  2366. skl_fill_module_pin_info_v4(dfw->out_pin, mconfig->m_out_pin,
  2367. dfw->is_dynamic_out_pin,
  2368. mconfig->module->max_output_pins);
  2369. if (mconfig->formats_config[idx].caps_size) {
  2370. mconfig->formats_config[idx].set_params = dfw->caps.set_params;
  2371. mconfig->formats_config[idx].param_id = dfw->caps.param_id;
  2372. mconfig->formats_config[idx].caps =
  2373. devm_kzalloc(dev, mconfig->formats_config[idx].caps_size,
  2374. GFP_KERNEL);
  2375. if (!mconfig->formats_config[idx].caps)
  2376. return -ENOMEM;
  2377. memcpy(mconfig->formats_config[idx].caps, dfw->caps.caps,
  2378. dfw->caps.caps_size);
  2379. }
  2380. return 0;
  2381. }
  2382. static int skl_tplg_get_caps_data(struct device *dev, char *data,
  2383. struct skl_module_cfg *mconfig)
  2384. {
  2385. int idx = mconfig->fmt_cfg_idx;
  2386. if (mconfig->formats_config[idx].caps_size > 0) {
  2387. mconfig->formats_config[idx].caps =
  2388. devm_kzalloc(dev, mconfig->formats_config[idx].caps_size,
  2389. GFP_KERNEL);
  2390. if (!mconfig->formats_config[idx].caps)
  2391. return -ENOMEM;
  2392. memcpy(mconfig->formats_config[idx].caps, data,
  2393. mconfig->formats_config[idx].caps_size);
  2394. }
  2395. return mconfig->formats_config[idx].caps_size;
  2396. }
  2397. /*
  2398. * Parse the private data for the token and corresponding value.
  2399. * The private data can have multiple data blocks. So, a data block
  2400. * is preceded by a descriptor for number of blocks and a descriptor
  2401. * for the type and size of the suceeding data block.
  2402. */
  2403. static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w,
  2404. struct skl_dev *skl, struct device *dev,
  2405. struct skl_module_cfg *mconfig)
  2406. {
  2407. struct snd_soc_tplg_vendor_array *array;
  2408. int num_blocks, block_size, block_type, off = 0;
  2409. char *data;
  2410. int ret;
  2411. /*
  2412. * v4 configuration files have a valid UUID at the start of
  2413. * the widget's private data.
  2414. */
  2415. if (uuid_is_valid((char *)tplg_w->priv.data))
  2416. return skl_tplg_get_pvt_data_v4(tplg_w, skl, dev, mconfig);
  2417. /* Read the NUM_DATA_BLOCKS descriptor */
  2418. array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data;
  2419. ret = skl_tplg_get_desc_blocks(dev, array);
  2420. if (ret < 0)
  2421. return ret;
  2422. num_blocks = ret;
  2423. off += array->size;
  2424. /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
  2425. while (num_blocks > 0) {
  2426. array = (struct snd_soc_tplg_vendor_array *)
  2427. (tplg_w->priv.data + off);
  2428. ret = skl_tplg_get_desc_blocks(dev, array);
  2429. if (ret < 0)
  2430. return ret;
  2431. block_type = ret;
  2432. off += array->size;
  2433. array = (struct snd_soc_tplg_vendor_array *)
  2434. (tplg_w->priv.data + off);
  2435. ret = skl_tplg_get_desc_blocks(dev, array);
  2436. if (ret < 0)
  2437. return ret;
  2438. block_size = ret;
  2439. off += array->size;
  2440. data = (tplg_w->priv.data + off);
  2441. if (block_type == SKL_TYPE_TUPLE) {
  2442. ret = skl_tplg_get_tokens(dev, data,
  2443. skl, mconfig, block_size);
  2444. } else {
  2445. ret = skl_tplg_get_caps_data(dev, data, mconfig);
  2446. }
  2447. if (ret < 0)
  2448. return ret;
  2449. --num_blocks;
  2450. off += ret;
  2451. }
  2452. return 0;
  2453. }
  2454. static void skl_clear_pin_config(struct snd_soc_component *component,
  2455. struct snd_soc_dapm_widget *w)
  2456. {
  2457. int i;
  2458. struct skl_module_cfg *mconfig;
  2459. struct skl_pipe *pipe;
  2460. if (!strncmp(w->dapm->component->name, component->name,
  2461. strlen(component->name))) {
  2462. mconfig = w->priv;
  2463. pipe = mconfig->pipe;
  2464. for (i = 0; i < mconfig->module->max_input_pins; i++) {
  2465. mconfig->m_in_pin[i].in_use = false;
  2466. mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND;
  2467. }
  2468. for (i = 0; i < mconfig->module->max_output_pins; i++) {
  2469. mconfig->m_out_pin[i].in_use = false;
  2470. mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND;
  2471. }
  2472. pipe->state = SKL_PIPE_INVALID;
  2473. mconfig->m_state = SKL_MODULE_UNINIT;
  2474. }
  2475. }
  2476. void skl_cleanup_resources(struct skl_dev *skl)
  2477. {
  2478. struct snd_soc_component *soc_component = skl->component;
  2479. struct snd_soc_dapm_widget *w;
  2480. struct snd_soc_card *card;
  2481. if (soc_component == NULL)
  2482. return;
  2483. card = soc_component->card;
  2484. if (!card || !card->instantiated)
  2485. return;
  2486. list_for_each_entry(w, &card->widgets, list) {
  2487. if (is_skl_dsp_widget_type(w, skl->dev) && w->priv != NULL)
  2488. skl_clear_pin_config(soc_component, w);
  2489. }
  2490. skl_clear_module_cnt(skl->dsp);
  2491. }
  2492. /*
  2493. * Topology core widget load callback
  2494. *
  2495. * This is used to save the private data for each widget which gives
  2496. * information to the driver about module and pipeline parameters which DSP
  2497. * FW expects like ids, resource values, formats etc
  2498. */
  2499. static int skl_tplg_widget_load(struct snd_soc_component *cmpnt, int index,
  2500. struct snd_soc_dapm_widget *w,
  2501. struct snd_soc_tplg_dapm_widget *tplg_w)
  2502. {
  2503. int ret;
  2504. struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt);
  2505. struct skl_dev *skl = bus_to_skl(bus);
  2506. struct skl_module_cfg *mconfig;
  2507. if (!tplg_w->priv.size)
  2508. goto bind_event;
  2509. mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL);
  2510. if (!mconfig)
  2511. return -ENOMEM;
  2512. if (skl->nr_modules == 0) {
  2513. mconfig->module = devm_kzalloc(bus->dev,
  2514. sizeof(*mconfig->module), GFP_KERNEL);
  2515. if (!mconfig->module)
  2516. return -ENOMEM;
  2517. }
  2518. w->priv = mconfig;
  2519. /*
  2520. * module binary can be loaded later, so set it to query when
  2521. * module is load for a use case
  2522. */
  2523. mconfig->id.module_id = -1;
  2524. /* To provide backward compatibility, set default as SKL_PARAM_INIT */
  2525. mconfig->fmt_cfg_idx = SKL_PARAM_INIT;
  2526. /* Parse private data for tuples */
  2527. ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig);
  2528. if (ret < 0)
  2529. return ret;
  2530. skl_debug_init_module(skl->debugfs, w, mconfig);
  2531. bind_event:
  2532. if (tplg_w->event_type == 0) {
  2533. dev_dbg(bus->dev, "ASoC: No event handler required\n");
  2534. return 0;
  2535. }
  2536. ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops,
  2537. ARRAY_SIZE(skl_tplg_widget_ops),
  2538. tplg_w->event_type);
  2539. if (ret) {
  2540. dev_err(bus->dev, "%s: No matching event handlers found for %d\n",
  2541. __func__, tplg_w->event_type);
  2542. return -EINVAL;
  2543. }
  2544. return 0;
  2545. }
  2546. static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be,
  2547. struct snd_soc_tplg_bytes_control *bc)
  2548. {
  2549. struct skl_algo_data *ac;
  2550. struct skl_dfw_algo_data *dfw_ac =
  2551. (struct skl_dfw_algo_data *)bc->priv.data;
  2552. ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL);
  2553. if (!ac)
  2554. return -ENOMEM;
  2555. /* Fill private data */
  2556. ac->max = dfw_ac->max;
  2557. ac->param_id = dfw_ac->param_id;
  2558. ac->set_params = dfw_ac->set_params;
  2559. ac->size = dfw_ac->max;
  2560. if (ac->max) {
  2561. ac->params = devm_kzalloc(dev, ac->max, GFP_KERNEL);
  2562. if (!ac->params)
  2563. return -ENOMEM;
  2564. memcpy(ac->params, dfw_ac->params, ac->max);
  2565. }
  2566. be->dobj.private = ac;
  2567. return 0;
  2568. }
  2569. static int skl_init_enum_data(struct device *dev, struct soc_enum *se,
  2570. struct snd_soc_tplg_enum_control *ec)
  2571. {
  2572. void *data;
  2573. if (ec->priv.size) {
  2574. data = devm_kzalloc(dev, sizeof(ec->priv.size), GFP_KERNEL);
  2575. if (!data)
  2576. return -ENOMEM;
  2577. memcpy(data, ec->priv.data, ec->priv.size);
  2578. se->dobj.private = data;
  2579. }
  2580. return 0;
  2581. }
  2582. static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
  2583. int index,
  2584. struct snd_kcontrol_new *kctl,
  2585. struct snd_soc_tplg_ctl_hdr *hdr)
  2586. {
  2587. struct soc_bytes_ext *sb;
  2588. struct snd_soc_tplg_bytes_control *tplg_bc;
  2589. struct snd_soc_tplg_enum_control *tplg_ec;
  2590. struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt);
  2591. struct soc_enum *se;
  2592. switch (hdr->ops.info) {
  2593. case SND_SOC_TPLG_CTL_BYTES:
  2594. tplg_bc = container_of(hdr,
  2595. struct snd_soc_tplg_bytes_control, hdr);
  2596. if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
  2597. sb = (struct soc_bytes_ext *)kctl->private_value;
  2598. if (tplg_bc->priv.size)
  2599. return skl_init_algo_data(
  2600. bus->dev, sb, tplg_bc);
  2601. }
  2602. break;
  2603. case SND_SOC_TPLG_CTL_ENUM:
  2604. tplg_ec = container_of(hdr,
  2605. struct snd_soc_tplg_enum_control, hdr);
  2606. if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READ) {
  2607. se = (struct soc_enum *)kctl->private_value;
  2608. if (tplg_ec->priv.size)
  2609. skl_init_enum_data(bus->dev, se, tplg_ec);
  2610. }
  2611. /*
  2612. * now that the control initializations are done, remove
  2613. * write permission for the DMIC configuration enums to
  2614. * avoid conflicts between NHLT settings and user interaction
  2615. */
  2616. if (hdr->ops.get == SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC)
  2617. kctl->access = SNDRV_CTL_ELEM_ACCESS_READ;
  2618. break;
  2619. default:
  2620. dev_dbg(bus->dev, "Control load not supported %d:%d:%d\n",
  2621. hdr->ops.get, hdr->ops.put, hdr->ops.info);
  2622. break;
  2623. }
  2624. return 0;
  2625. }
  2626. static int skl_tplg_fill_str_mfest_tkn(struct device *dev,
  2627. struct snd_soc_tplg_vendor_string_elem *str_elem,
  2628. struct skl_dev *skl)
  2629. {
  2630. int tkn_count = 0;
  2631. static int ref_count;
  2632. switch (str_elem->token) {
  2633. case SKL_TKN_STR_LIB_NAME:
  2634. if (ref_count > skl->lib_count - 1) {
  2635. ref_count = 0;
  2636. return -EINVAL;
  2637. }
  2638. strncpy(skl->lib_info[ref_count].name,
  2639. str_elem->string,
  2640. ARRAY_SIZE(skl->lib_info[ref_count].name));
  2641. ref_count++;
  2642. break;
  2643. default:
  2644. dev_err(dev, "Not a string token %d\n", str_elem->token);
  2645. break;
  2646. }
  2647. tkn_count++;
  2648. return tkn_count;
  2649. }
  2650. static int skl_tplg_get_str_tkn(struct device *dev,
  2651. struct snd_soc_tplg_vendor_array *array,
  2652. struct skl_dev *skl)
  2653. {
  2654. int tkn_count = 0, ret;
  2655. struct snd_soc_tplg_vendor_string_elem *str_elem;
  2656. str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value;
  2657. while (tkn_count < array->num_elems) {
  2658. ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, skl);
  2659. str_elem++;
  2660. if (ret < 0)
  2661. return ret;
  2662. tkn_count = tkn_count + ret;
  2663. }
  2664. return tkn_count;
  2665. }
  2666. static int skl_tplg_manifest_fill_fmt(struct device *dev,
  2667. struct skl_module_iface *fmt,
  2668. struct snd_soc_tplg_vendor_value_elem *tkn_elem,
  2669. u32 dir, int fmt_idx)
  2670. {
  2671. struct skl_module_pin_fmt *dst_fmt;
  2672. struct skl_module_fmt *mod_fmt;
  2673. int ret;
  2674. if (!fmt)
  2675. return -EINVAL;
  2676. switch (dir) {
  2677. case SKL_DIR_IN:
  2678. dst_fmt = &fmt->inputs[fmt_idx];
  2679. break;
  2680. case SKL_DIR_OUT:
  2681. dst_fmt = &fmt->outputs[fmt_idx];
  2682. break;
  2683. default:
  2684. dev_err(dev, "Invalid direction: %d\n", dir);
  2685. return -EINVAL;
  2686. }
  2687. mod_fmt = &dst_fmt->fmt;
  2688. switch (tkn_elem->token) {
  2689. case SKL_TKN_MM_U32_INTF_PIN_ID:
  2690. dst_fmt->id = tkn_elem->value;
  2691. break;
  2692. default:
  2693. ret = skl_tplg_fill_fmt(dev, mod_fmt, tkn_elem->token,
  2694. tkn_elem->value);
  2695. if (ret < 0)
  2696. return ret;
  2697. break;
  2698. }
  2699. return 0;
  2700. }
  2701. static int skl_tplg_fill_mod_info(struct device *dev,
  2702. struct snd_soc_tplg_vendor_value_elem *tkn_elem,
  2703. struct skl_module *mod)
  2704. {
  2705. if (!mod)
  2706. return -EINVAL;
  2707. switch (tkn_elem->token) {
  2708. case SKL_TKN_U8_IN_PIN_TYPE:
  2709. mod->input_pin_type = tkn_elem->value;
  2710. break;
  2711. case SKL_TKN_U8_OUT_PIN_TYPE:
  2712. mod->output_pin_type = tkn_elem->value;
  2713. break;
  2714. case SKL_TKN_U8_IN_QUEUE_COUNT:
  2715. mod->max_input_pins = tkn_elem->value;
  2716. break;
  2717. case SKL_TKN_U8_OUT_QUEUE_COUNT:
  2718. mod->max_output_pins = tkn_elem->value;
  2719. break;
  2720. case SKL_TKN_MM_U8_NUM_RES:
  2721. mod->nr_resources = tkn_elem->value;
  2722. break;
  2723. case SKL_TKN_MM_U8_NUM_INTF:
  2724. mod->nr_interfaces = tkn_elem->value;
  2725. break;
  2726. default:
  2727. dev_err(dev, "Invalid mod info token %d", tkn_elem->token);
  2728. return -EINVAL;
  2729. }
  2730. return 0;
  2731. }
  2732. static int skl_tplg_get_int_tkn(struct device *dev,
  2733. struct snd_soc_tplg_vendor_value_elem *tkn_elem,
  2734. struct skl_dev *skl)
  2735. {
  2736. int tkn_count = 0, ret;
  2737. static int mod_idx, res_val_idx, intf_val_idx, dir, pin_idx;
  2738. struct skl_module_res *res = NULL;
  2739. struct skl_module_iface *fmt = NULL;
  2740. struct skl_module *mod = NULL;
  2741. static struct skl_astate_param *astate_table;
  2742. static int astate_cfg_idx, count;
  2743. int i;
  2744. size_t size;
  2745. if (skl->modules) {
  2746. mod = skl->modules[mod_idx];
  2747. res = &mod->resources[res_val_idx];
  2748. fmt = &mod->formats[intf_val_idx];
  2749. }
  2750. switch (tkn_elem->token) {
  2751. case SKL_TKN_U32_LIB_COUNT:
  2752. skl->lib_count = tkn_elem->value;
  2753. break;
  2754. case SKL_TKN_U8_NUM_MOD:
  2755. skl->nr_modules = tkn_elem->value;
  2756. skl->modules = devm_kcalloc(dev, skl->nr_modules,
  2757. sizeof(*skl->modules), GFP_KERNEL);
  2758. if (!skl->modules)
  2759. return -ENOMEM;
  2760. for (i = 0; i < skl->nr_modules; i++) {
  2761. skl->modules[i] = devm_kzalloc(dev,
  2762. sizeof(struct skl_module), GFP_KERNEL);
  2763. if (!skl->modules[i])
  2764. return -ENOMEM;
  2765. }
  2766. break;
  2767. case SKL_TKN_MM_U8_MOD_IDX:
  2768. mod_idx = tkn_elem->value;
  2769. break;
  2770. case SKL_TKN_U32_ASTATE_COUNT:
  2771. if (astate_table != NULL) {
  2772. dev_err(dev, "More than one entry for A-State count");
  2773. return -EINVAL;
  2774. }
  2775. if (tkn_elem->value > SKL_MAX_ASTATE_CFG) {
  2776. dev_err(dev, "Invalid A-State count %d\n",
  2777. tkn_elem->value);
  2778. return -EINVAL;
  2779. }
  2780. size = struct_size(skl->cfg.astate_cfg, astate_table,
  2781. tkn_elem->value);
  2782. skl->cfg.astate_cfg = devm_kzalloc(dev, size, GFP_KERNEL);
  2783. if (!skl->cfg.astate_cfg)
  2784. return -ENOMEM;
  2785. astate_table = skl->cfg.astate_cfg->astate_table;
  2786. count = skl->cfg.astate_cfg->count = tkn_elem->value;
  2787. break;
  2788. case SKL_TKN_U32_ASTATE_IDX:
  2789. if (tkn_elem->value >= count) {
  2790. dev_err(dev, "Invalid A-State index %d\n",
  2791. tkn_elem->value);
  2792. return -EINVAL;
  2793. }
  2794. astate_cfg_idx = tkn_elem->value;
  2795. break;
  2796. case SKL_TKN_U32_ASTATE_KCPS:
  2797. astate_table[astate_cfg_idx].kcps = tkn_elem->value;
  2798. break;
  2799. case SKL_TKN_U32_ASTATE_CLK_SRC:
  2800. astate_table[astate_cfg_idx].clk_src = tkn_elem->value;
  2801. break;
  2802. case SKL_TKN_U8_IN_PIN_TYPE:
  2803. case SKL_TKN_U8_OUT_PIN_TYPE:
  2804. case SKL_TKN_U8_IN_QUEUE_COUNT:
  2805. case SKL_TKN_U8_OUT_QUEUE_COUNT:
  2806. case SKL_TKN_MM_U8_NUM_RES:
  2807. case SKL_TKN_MM_U8_NUM_INTF:
  2808. ret = skl_tplg_fill_mod_info(dev, tkn_elem, mod);
  2809. if (ret < 0)
  2810. return ret;
  2811. break;
  2812. case SKL_TKN_U32_DIR_PIN_COUNT:
  2813. dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
  2814. pin_idx = (tkn_elem->value & SKL_PIN_COUNT_MASK) >> 4;
  2815. break;
  2816. case SKL_TKN_MM_U32_RES_ID:
  2817. if (!res)
  2818. return -EINVAL;
  2819. res->id = tkn_elem->value;
  2820. res_val_idx = tkn_elem->value;
  2821. break;
  2822. case SKL_TKN_MM_U32_FMT_ID:
  2823. if (!fmt)
  2824. return -EINVAL;
  2825. fmt->fmt_idx = tkn_elem->value;
  2826. intf_val_idx = tkn_elem->value;
  2827. break;
  2828. case SKL_TKN_MM_U32_CPS:
  2829. case SKL_TKN_MM_U32_DMA_SIZE:
  2830. case SKL_TKN_MM_U32_CPC:
  2831. case SKL_TKN_U32_MEM_PAGES:
  2832. case SKL_TKN_U32_OBS:
  2833. case SKL_TKN_U32_IBS:
  2834. case SKL_TKN_MM_U32_RES_PIN_ID:
  2835. case SKL_TKN_MM_U32_PIN_BUF:
  2836. ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_idx, dir);
  2837. if (ret < 0)
  2838. return ret;
  2839. break;
  2840. case SKL_TKN_MM_U32_NUM_IN_FMT:
  2841. if (!fmt)
  2842. return -EINVAL;
  2843. res->nr_input_pins = tkn_elem->value;
  2844. break;
  2845. case SKL_TKN_MM_U32_NUM_OUT_FMT:
  2846. if (!fmt)
  2847. return -EINVAL;
  2848. res->nr_output_pins = tkn_elem->value;
  2849. break;
  2850. case SKL_TKN_U32_FMT_CH:
  2851. case SKL_TKN_U32_FMT_FREQ:
  2852. case SKL_TKN_U32_FMT_BIT_DEPTH:
  2853. case SKL_TKN_U32_FMT_SAMPLE_SIZE:
  2854. case SKL_TKN_U32_FMT_CH_CONFIG:
  2855. case SKL_TKN_U32_FMT_INTERLEAVE:
  2856. case SKL_TKN_U32_FMT_SAMPLE_TYPE:
  2857. case SKL_TKN_U32_FMT_CH_MAP:
  2858. case SKL_TKN_MM_U32_INTF_PIN_ID:
  2859. ret = skl_tplg_manifest_fill_fmt(dev, fmt, tkn_elem,
  2860. dir, pin_idx);
  2861. if (ret < 0)
  2862. return ret;
  2863. break;
  2864. default:
  2865. dev_err(dev, "Not a manifest token %d\n", tkn_elem->token);
  2866. return -EINVAL;
  2867. }
  2868. tkn_count++;
  2869. return tkn_count;
  2870. }
  2871. /*
  2872. * Fill the manifest structure by parsing the tokens based on the
  2873. * type.
  2874. */
  2875. static int skl_tplg_get_manifest_tkn(struct device *dev,
  2876. char *pvt_data, struct skl_dev *skl,
  2877. int block_size)
  2878. {
  2879. int tkn_count = 0, ret;
  2880. int off = 0, tuple_size = 0;
  2881. u8 uuid_index = 0;
  2882. struct snd_soc_tplg_vendor_array *array;
  2883. struct snd_soc_tplg_vendor_value_elem *tkn_elem;
  2884. if (block_size <= 0)
  2885. return -EINVAL;
  2886. while (tuple_size < block_size) {
  2887. array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
  2888. off += array->size;
  2889. switch (array->type) {
  2890. case SND_SOC_TPLG_TUPLE_TYPE_STRING:
  2891. ret = skl_tplg_get_str_tkn(dev, array, skl);
  2892. if (ret < 0)
  2893. return ret;
  2894. tkn_count = ret;
  2895. tuple_size += tkn_count *
  2896. sizeof(struct snd_soc_tplg_vendor_string_elem);
  2897. continue;
  2898. case SND_SOC_TPLG_TUPLE_TYPE_UUID:
  2899. if (array->uuid->token != SKL_TKN_UUID) {
  2900. dev_err(dev, "Not an UUID token: %d\n",
  2901. array->uuid->token);
  2902. return -EINVAL;
  2903. }
  2904. if (uuid_index >= skl->nr_modules) {
  2905. dev_err(dev, "Too many UUID tokens\n");
  2906. return -EINVAL;
  2907. }
  2908. import_guid(&skl->modules[uuid_index++]->uuid,
  2909. array->uuid->uuid);
  2910. tuple_size += sizeof(*array->uuid);
  2911. continue;
  2912. default:
  2913. tkn_elem = array->value;
  2914. tkn_count = 0;
  2915. break;
  2916. }
  2917. while (tkn_count <= array->num_elems - 1) {
  2918. ret = skl_tplg_get_int_tkn(dev,
  2919. tkn_elem, skl);
  2920. if (ret < 0)
  2921. return ret;
  2922. tkn_count = tkn_count + ret;
  2923. tkn_elem++;
  2924. }
  2925. tuple_size += (tkn_count * sizeof(*tkn_elem));
  2926. tkn_count = 0;
  2927. }
  2928. return off;
  2929. }
  2930. /*
  2931. * Parse manifest private data for tokens. The private data block is
  2932. * preceded by descriptors for type and size of data block.
  2933. */
  2934. static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest,
  2935. struct device *dev, struct skl_dev *skl)
  2936. {
  2937. struct snd_soc_tplg_vendor_array *array;
  2938. int num_blocks, block_size = 0, block_type, off = 0;
  2939. char *data;
  2940. int ret;
  2941. /* Read the NUM_DATA_BLOCKS descriptor */
  2942. array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data;
  2943. ret = skl_tplg_get_desc_blocks(dev, array);
  2944. if (ret < 0)
  2945. return ret;
  2946. num_blocks = ret;
  2947. off += array->size;
  2948. /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
  2949. while (num_blocks > 0) {
  2950. array = (struct snd_soc_tplg_vendor_array *)
  2951. (manifest->priv.data + off);
  2952. ret = skl_tplg_get_desc_blocks(dev, array);
  2953. if (ret < 0)
  2954. return ret;
  2955. block_type = ret;
  2956. off += array->size;
  2957. array = (struct snd_soc_tplg_vendor_array *)
  2958. (manifest->priv.data + off);
  2959. ret = skl_tplg_get_desc_blocks(dev, array);
  2960. if (ret < 0)
  2961. return ret;
  2962. block_size = ret;
  2963. off += array->size;
  2964. data = (manifest->priv.data + off);
  2965. if (block_type == SKL_TYPE_TUPLE) {
  2966. ret = skl_tplg_get_manifest_tkn(dev, data, skl,
  2967. block_size);
  2968. if (ret < 0)
  2969. return ret;
  2970. --num_blocks;
  2971. } else {
  2972. return -EINVAL;
  2973. }
  2974. off += ret;
  2975. }
  2976. return 0;
  2977. }
  2978. static int skl_manifest_load(struct snd_soc_component *cmpnt, int index,
  2979. struct snd_soc_tplg_manifest *manifest)
  2980. {
  2981. struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt);
  2982. struct skl_dev *skl = bus_to_skl(bus);
  2983. /* proceed only if we have private data defined */
  2984. if (manifest->priv.size == 0)
  2985. return 0;
  2986. skl_tplg_get_manifest_data(manifest, bus->dev, skl);
  2987. if (skl->lib_count > SKL_MAX_LIB) {
  2988. dev_err(bus->dev, "Exceeding max Library count. Got:%d\n",
  2989. skl->lib_count);
  2990. return -EINVAL;
  2991. }
  2992. return 0;
  2993. }
  2994. static int skl_tplg_complete(struct snd_soc_component *component)
  2995. {
  2996. struct snd_soc_dobj *dobj;
  2997. struct snd_soc_acpi_mach *mach;
  2998. struct snd_ctl_elem_value *val;
  2999. int i;
  3000. val = kmalloc(sizeof(*val), GFP_KERNEL);
  3001. if (!val)
  3002. return -ENOMEM;
  3003. mach = dev_get_platdata(component->card->dev);
  3004. list_for_each_entry(dobj, &component->dobj_list, list) {
  3005. struct snd_kcontrol *kcontrol = dobj->control.kcontrol;
  3006. struct soc_enum *se;
  3007. char **texts;
  3008. char chan_text[4];
  3009. if (dobj->type != SND_SOC_DOBJ_ENUM || !kcontrol ||
  3010. kcontrol->put != skl_tplg_multi_config_set_dmic)
  3011. continue;
  3012. se = (struct soc_enum *)kcontrol->private_value;
  3013. texts = dobj->control.dtexts;
  3014. sprintf(chan_text, "c%d", mach->mach_params.dmic_num);
  3015. for (i = 0; i < se->items; i++) {
  3016. if (strstr(texts[i], chan_text)) {
  3017. memset(val, 0, sizeof(*val));
  3018. val->value.enumerated.item[0] = i;
  3019. kcontrol->put(kcontrol, val);
  3020. }
  3021. }
  3022. }
  3023. kfree(val);
  3024. return 0;
  3025. }
  3026. static struct snd_soc_tplg_ops skl_tplg_ops = {
  3027. .widget_load = skl_tplg_widget_load,
  3028. .control_load = skl_tplg_control_load,
  3029. .bytes_ext_ops = skl_tlv_ops,
  3030. .bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops),
  3031. .io_ops = skl_tplg_kcontrol_ops,
  3032. .io_ops_count = ARRAY_SIZE(skl_tplg_kcontrol_ops),
  3033. .manifest = skl_manifest_load,
  3034. .dai_load = skl_dai_load,
  3035. .complete = skl_tplg_complete,
  3036. };
  3037. /*
  3038. * A pipe can have multiple modules, each of them will be a DAPM widget as
  3039. * well. While managing a pipeline we need to get the list of all the
  3040. * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list()
  3041. * helps to get the SKL type widgets in that pipeline
  3042. */
  3043. static int skl_tplg_create_pipe_widget_list(struct snd_soc_component *component)
  3044. {
  3045. struct snd_soc_dapm_widget *w;
  3046. struct skl_module_cfg *mcfg = NULL;
  3047. struct skl_pipe_module *p_module = NULL;
  3048. struct skl_pipe *pipe;
  3049. list_for_each_entry(w, &component->card->widgets, list) {
  3050. if (is_skl_dsp_widget_type(w, component->dev) && w->priv) {
  3051. mcfg = w->priv;
  3052. pipe = mcfg->pipe;
  3053. p_module = devm_kzalloc(component->dev,
  3054. sizeof(*p_module), GFP_KERNEL);
  3055. if (!p_module)
  3056. return -ENOMEM;
  3057. p_module->w = w;
  3058. list_add_tail(&p_module->node, &pipe->w_list);
  3059. }
  3060. }
  3061. return 0;
  3062. }
  3063. static void skl_tplg_set_pipe_type(struct skl_dev *skl, struct skl_pipe *pipe)
  3064. {
  3065. struct skl_pipe_module *w_module;
  3066. struct snd_soc_dapm_widget *w;
  3067. struct skl_module_cfg *mconfig;
  3068. bool host_found = false, link_found = false;
  3069. list_for_each_entry(w_module, &pipe->w_list, node) {
  3070. w = w_module->w;
  3071. mconfig = w->priv;
  3072. if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
  3073. host_found = true;
  3074. else if (mconfig->dev_type != SKL_DEVICE_NONE)
  3075. link_found = true;
  3076. }
  3077. if (host_found && link_found)
  3078. pipe->passthru = true;
  3079. else
  3080. pipe->passthru = false;
  3081. }
  3082. /*
  3083. * SKL topology init routine
  3084. */
  3085. int skl_tplg_init(struct snd_soc_component *component, struct hdac_bus *bus)
  3086. {
  3087. int ret;
  3088. const struct firmware *fw;
  3089. struct skl_dev *skl = bus_to_skl(bus);
  3090. struct skl_pipeline *ppl;
  3091. ret = request_firmware(&fw, skl->tplg_name, bus->dev);
  3092. if (ret < 0) {
  3093. char alt_tplg_name[64];
  3094. snprintf(alt_tplg_name, sizeof(alt_tplg_name), "%s-tplg.bin",
  3095. skl->mach->drv_name);
  3096. dev_info(bus->dev, "tplg fw %s load failed with %d, trying alternative tplg name %s",
  3097. skl->tplg_name, ret, alt_tplg_name);
  3098. ret = request_firmware(&fw, alt_tplg_name, bus->dev);
  3099. if (!ret)
  3100. goto component_load;
  3101. dev_info(bus->dev, "tplg %s failed with %d, falling back to dfw_sst.bin",
  3102. alt_tplg_name, ret);
  3103. ret = request_firmware(&fw, "dfw_sst.bin", bus->dev);
  3104. if (ret < 0) {
  3105. dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n",
  3106. "dfw_sst.bin", ret);
  3107. return ret;
  3108. }
  3109. }
  3110. component_load:
  3111. ret = snd_soc_tplg_component_load(component, &skl_tplg_ops, fw);
  3112. if (ret < 0) {
  3113. dev_err(bus->dev, "tplg component load failed%d\n", ret);
  3114. goto err;
  3115. }
  3116. ret = skl_tplg_create_pipe_widget_list(component);
  3117. if (ret < 0) {
  3118. dev_err(bus->dev, "tplg create pipe widget list failed%d\n",
  3119. ret);
  3120. goto err;
  3121. }
  3122. list_for_each_entry(ppl, &skl->ppl_list, node)
  3123. skl_tplg_set_pipe_type(skl, ppl->pipe);
  3124. err:
  3125. release_firmware(fw);
  3126. return ret;
  3127. }
  3128. void skl_tplg_exit(struct snd_soc_component *component, struct hdac_bus *bus)
  3129. {
  3130. struct skl_dev *skl = bus_to_skl(bus);
  3131. struct skl_pipeline *ppl, *tmp;
  3132. list_for_each_entry_safe(ppl, tmp, &skl->ppl_list, node)
  3133. list_del(&ppl->node);
  3134. /* clean up topology */
  3135. snd_soc_tplg_component_remove(component);
  3136. }