skl-topology.c 90 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760
  1. /*
  2. * skl-topology.c - Implements Platform component ALSA controls/widget
  3. * handlers.
  4. *
  5. * Copyright (C) 2014-2015 Intel Corp
  6. * Author: Jeeja KP <jeeja.kp@intel.com>
  7. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. */
  18. #include <linux/slab.h>
  19. #include <linux/types.h>
  20. #include <linux/firmware.h>
  21. #include <linux/uuid.h>
  22. #include <sound/soc.h>
  23. #include <sound/soc-topology.h>
  24. #include <uapi/sound/snd_sst_tokens.h>
  25. #include <uapi/sound/skl-tplg-interface.h>
  26. #include "skl-sst-dsp.h"
  27. #include "skl-sst-ipc.h"
  28. #include "skl-topology.h"
  29. #include "skl.h"
  30. #include "../common/sst-dsp.h"
  31. #include "../common/sst-dsp-priv.h"
  32. #define SKL_CH_FIXUP_MASK (1 << 0)
  33. #define SKL_RATE_FIXUP_MASK (1 << 1)
  34. #define SKL_FMT_FIXUP_MASK (1 << 2)
  35. #define SKL_IN_DIR_BIT_MASK BIT(0)
  36. #define SKL_PIN_COUNT_MASK GENMASK(7, 4)
  37. static const int mic_mono_list[] = {
  38. 0, 1, 2, 3,
  39. };
  40. static const int mic_stereo_list[][SKL_CH_STEREO] = {
  41. {0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3},
  42. };
  43. static const int mic_trio_list[][SKL_CH_TRIO] = {
  44. {0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3},
  45. };
  46. static const int mic_quatro_list[][SKL_CH_QUATRO] = {
  47. {0, 1, 2, 3},
  48. };
  49. #define CHECK_HW_PARAMS(ch, freq, bps, prm_ch, prm_freq, prm_bps) \
  50. ((ch == prm_ch) && (bps == prm_bps) && (freq == prm_freq))
  51. void skl_tplg_d0i3_get(struct skl *skl, enum d0i3_capability caps)
  52. {
  53. struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3;
  54. switch (caps) {
  55. case SKL_D0I3_NONE:
  56. d0i3->non_d0i3++;
  57. break;
  58. case SKL_D0I3_STREAMING:
  59. d0i3->streaming++;
  60. break;
  61. case SKL_D0I3_NON_STREAMING:
  62. d0i3->non_streaming++;
  63. break;
  64. }
  65. }
  66. void skl_tplg_d0i3_put(struct skl *skl, enum d0i3_capability caps)
  67. {
  68. struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3;
  69. switch (caps) {
  70. case SKL_D0I3_NONE:
  71. d0i3->non_d0i3--;
  72. break;
  73. case SKL_D0I3_STREAMING:
  74. d0i3->streaming--;
  75. break;
  76. case SKL_D0I3_NON_STREAMING:
  77. d0i3->non_streaming--;
  78. break;
  79. }
  80. }
  81. /*
  82. * SKL DSP driver modelling uses only few DAPM widgets so for rest we will
  83. * ignore. This helpers checks if the SKL driver handles this widget type
  84. */
  85. static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w,
  86. struct device *dev)
  87. {
  88. if (w->dapm->dev != dev)
  89. return false;
  90. switch (w->id) {
  91. case snd_soc_dapm_dai_link:
  92. case snd_soc_dapm_dai_in:
  93. case snd_soc_dapm_aif_in:
  94. case snd_soc_dapm_aif_out:
  95. case snd_soc_dapm_dai_out:
  96. case snd_soc_dapm_switch:
  97. case snd_soc_dapm_output:
  98. case snd_soc_dapm_mux:
  99. return false;
  100. default:
  101. return true;
  102. }
  103. }
  104. /*
  105. * Each pipelines needs memory to be allocated. Check if we have free memory
  106. * from available pool.
  107. */
  108. static bool skl_is_pipe_mem_avail(struct skl *skl,
  109. struct skl_module_cfg *mconfig)
  110. {
  111. struct skl_sst *ctx = skl->skl_sst;
  112. if (skl->resource.mem + mconfig->pipe->memory_pages >
  113. skl->resource.max_mem) {
  114. dev_err(ctx->dev,
  115. "%s: module_id %d instance %d\n", __func__,
  116. mconfig->id.module_id,
  117. mconfig->id.instance_id);
  118. dev_err(ctx->dev,
  119. "exceeds ppl memory available %d mem %d\n",
  120. skl->resource.max_mem, skl->resource.mem);
  121. return false;
  122. } else {
  123. return true;
  124. }
  125. }
  126. /*
  127. * Add the mem to the mem pool. This is freed when pipe is deleted.
  128. * Note: DSP does actual memory management we only keep track for complete
  129. * pool
  130. */
  131. static void skl_tplg_alloc_pipe_mem(struct skl *skl,
  132. struct skl_module_cfg *mconfig)
  133. {
  134. skl->resource.mem += mconfig->pipe->memory_pages;
  135. }
  136. /*
  137. * Pipeline needs needs DSP CPU resources for computation, this is
  138. * quantified in MCPS (Million Clocks Per Second) required for module/pipe
  139. *
  140. * Each pipelines needs mcps to be allocated. Check if we have mcps for this
  141. * pipe.
  142. */
  143. static bool skl_is_pipe_mcps_avail(struct skl *skl,
  144. struct skl_module_cfg *mconfig)
  145. {
  146. struct skl_sst *ctx = skl->skl_sst;
  147. u8 res_idx = mconfig->res_idx;
  148. struct skl_module_res *res = &mconfig->module->resources[res_idx];
  149. if (skl->resource.mcps + res->cps > skl->resource.max_mcps) {
  150. dev_err(ctx->dev,
  151. "%s: module_id %d instance %d\n", __func__,
  152. mconfig->id.module_id, mconfig->id.instance_id);
  153. dev_err(ctx->dev,
  154. "exceeds ppl mcps available %d > mem %d\n",
  155. skl->resource.max_mcps, skl->resource.mcps);
  156. return false;
  157. } else {
  158. return true;
  159. }
  160. }
  161. static void skl_tplg_alloc_pipe_mcps(struct skl *skl,
  162. struct skl_module_cfg *mconfig)
  163. {
  164. u8 res_idx = mconfig->res_idx;
  165. struct skl_module_res *res = &mconfig->module->resources[res_idx];
  166. skl->resource.mcps += res->cps;
  167. }
  168. /*
  169. * Free the mcps when tearing down
  170. */
  171. static void
  172. skl_tplg_free_pipe_mcps(struct skl *skl, struct skl_module_cfg *mconfig)
  173. {
  174. u8 res_idx = mconfig->res_idx;
  175. struct skl_module_res *res = &mconfig->module->resources[res_idx];
  176. skl->resource.mcps -= res->cps;
  177. }
  178. /*
  179. * Free the memory when tearing down
  180. */
  181. static void
  182. skl_tplg_free_pipe_mem(struct skl *skl, struct skl_module_cfg *mconfig)
  183. {
  184. skl->resource.mem -= mconfig->pipe->memory_pages;
  185. }
  186. static void skl_dump_mconfig(struct skl_sst *ctx,
  187. struct skl_module_cfg *mcfg)
  188. {
  189. struct skl_module_iface *iface = &mcfg->module->formats[0];
  190. dev_dbg(ctx->dev, "Dumping config\n");
  191. dev_dbg(ctx->dev, "Input Format:\n");
  192. dev_dbg(ctx->dev, "channels = %d\n", iface->inputs[0].fmt.channels);
  193. dev_dbg(ctx->dev, "s_freq = %d\n", iface->inputs[0].fmt.s_freq);
  194. dev_dbg(ctx->dev, "ch_cfg = %d\n", iface->inputs[0].fmt.ch_cfg);
  195. dev_dbg(ctx->dev, "valid bit depth = %d\n",
  196. iface->inputs[0].fmt.valid_bit_depth);
  197. dev_dbg(ctx->dev, "Output Format:\n");
  198. dev_dbg(ctx->dev, "channels = %d\n", iface->outputs[0].fmt.channels);
  199. dev_dbg(ctx->dev, "s_freq = %d\n", iface->outputs[0].fmt.s_freq);
  200. dev_dbg(ctx->dev, "valid bit depth = %d\n",
  201. iface->outputs[0].fmt.valid_bit_depth);
  202. dev_dbg(ctx->dev, "ch_cfg = %d\n", iface->outputs[0].fmt.ch_cfg);
  203. }
  204. static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs)
  205. {
  206. int slot_map = 0xFFFFFFFF;
  207. int start_slot = 0;
  208. int i;
  209. for (i = 0; i < chs; i++) {
  210. /*
  211. * For 2 channels with starting slot as 0, slot map will
  212. * look like 0xFFFFFF10.
  213. */
  214. slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i)));
  215. start_slot++;
  216. }
  217. fmt->ch_map = slot_map;
  218. }
  219. static void skl_tplg_update_params(struct skl_module_fmt *fmt,
  220. struct skl_pipe_params *params, int fixup)
  221. {
  222. if (fixup & SKL_RATE_FIXUP_MASK)
  223. fmt->s_freq = params->s_freq;
  224. if (fixup & SKL_CH_FIXUP_MASK) {
  225. fmt->channels = params->ch;
  226. skl_tplg_update_chmap(fmt, fmt->channels);
  227. }
  228. if (fixup & SKL_FMT_FIXUP_MASK) {
  229. fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
  230. /*
  231. * 16 bit is 16 bit container whereas 24 bit is in 32 bit
  232. * container so update bit depth accordingly
  233. */
  234. switch (fmt->valid_bit_depth) {
  235. case SKL_DEPTH_16BIT:
  236. fmt->bit_depth = fmt->valid_bit_depth;
  237. break;
  238. default:
  239. fmt->bit_depth = SKL_DEPTH_32BIT;
  240. break;
  241. }
  242. }
  243. }
  244. /*
  245. * A pipeline may have modules which impact the pcm parameters, like SRC,
  246. * channel converter, format converter.
  247. * We need to calculate the output params by applying the 'fixup'
  248. * Topology will tell driver which type of fixup is to be applied by
  249. * supplying the fixup mask, so based on that we calculate the output
  250. *
  251. * Now In FE the pcm hw_params is source/target format. Same is applicable
  252. * for BE with its hw_params invoked.
  253. * here based on FE, BE pipeline and direction we calculate the input and
  254. * outfix and then apply that for a module
  255. */
  256. static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg,
  257. struct skl_pipe_params *params, bool is_fe)
  258. {
  259. int in_fixup, out_fixup;
  260. struct skl_module_fmt *in_fmt, *out_fmt;
  261. /* Fixups will be applied to pin 0 only */
  262. in_fmt = &m_cfg->module->formats[0].inputs[0].fmt;
  263. out_fmt = &m_cfg->module->formats[0].outputs[0].fmt;
  264. if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
  265. if (is_fe) {
  266. in_fixup = m_cfg->params_fixup;
  267. out_fixup = (~m_cfg->converter) &
  268. m_cfg->params_fixup;
  269. } else {
  270. out_fixup = m_cfg->params_fixup;
  271. in_fixup = (~m_cfg->converter) &
  272. m_cfg->params_fixup;
  273. }
  274. } else {
  275. if (is_fe) {
  276. out_fixup = m_cfg->params_fixup;
  277. in_fixup = (~m_cfg->converter) &
  278. m_cfg->params_fixup;
  279. } else {
  280. in_fixup = m_cfg->params_fixup;
  281. out_fixup = (~m_cfg->converter) &
  282. m_cfg->params_fixup;
  283. }
  284. }
  285. skl_tplg_update_params(in_fmt, params, in_fixup);
  286. skl_tplg_update_params(out_fmt, params, out_fixup);
  287. }
  288. /*
  289. * A module needs input and output buffers, which are dependent upon pcm
  290. * params, so once we have calculate params, we need buffer calculation as
  291. * well.
  292. */
  293. static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
  294. struct skl_module_cfg *mcfg)
  295. {
  296. int multiplier = 1;
  297. struct skl_module_fmt *in_fmt, *out_fmt;
  298. struct skl_module_res *res;
  299. /* Since fixups is applied to pin 0 only, ibs, obs needs
  300. * change for pin 0 only
  301. */
  302. res = &mcfg->module->resources[0];
  303. in_fmt = &mcfg->module->formats[0].inputs[0].fmt;
  304. out_fmt = &mcfg->module->formats[0].outputs[0].fmt;
  305. if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
  306. multiplier = 5;
  307. res->ibs = DIV_ROUND_UP(in_fmt->s_freq, 1000) *
  308. in_fmt->channels * (in_fmt->bit_depth >> 3) *
  309. multiplier;
  310. res->obs = DIV_ROUND_UP(out_fmt->s_freq, 1000) *
  311. out_fmt->channels * (out_fmt->bit_depth >> 3) *
  312. multiplier;
  313. }
  314. static u8 skl_tplg_be_dev_type(int dev_type)
  315. {
  316. int ret;
  317. switch (dev_type) {
  318. case SKL_DEVICE_BT:
  319. ret = NHLT_DEVICE_BT;
  320. break;
  321. case SKL_DEVICE_DMIC:
  322. ret = NHLT_DEVICE_DMIC;
  323. break;
  324. case SKL_DEVICE_I2S:
  325. ret = NHLT_DEVICE_I2S;
  326. break;
  327. default:
  328. ret = NHLT_DEVICE_INVALID;
  329. break;
  330. }
  331. return ret;
  332. }
  333. static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
  334. struct skl_sst *ctx)
  335. {
  336. struct skl_module_cfg *m_cfg = w->priv;
  337. int link_type, dir;
  338. u32 ch, s_freq, s_fmt;
  339. struct nhlt_specific_cfg *cfg;
  340. struct skl *skl = get_skl_ctx(ctx->dev);
  341. u8 dev_type = skl_tplg_be_dev_type(m_cfg->dev_type);
  342. int fmt_idx = m_cfg->fmt_idx;
  343. struct skl_module_iface *m_iface = &m_cfg->module->formats[fmt_idx];
  344. /* check if we already have blob */
  345. if (m_cfg->formats_config.caps_size > 0)
  346. return 0;
  347. dev_dbg(ctx->dev, "Applying default cfg blob\n");
  348. switch (m_cfg->dev_type) {
  349. case SKL_DEVICE_DMIC:
  350. link_type = NHLT_LINK_DMIC;
  351. dir = SNDRV_PCM_STREAM_CAPTURE;
  352. s_freq = m_iface->inputs[0].fmt.s_freq;
  353. s_fmt = m_iface->inputs[0].fmt.bit_depth;
  354. ch = m_iface->inputs[0].fmt.channels;
  355. break;
  356. case SKL_DEVICE_I2S:
  357. link_type = NHLT_LINK_SSP;
  358. if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) {
  359. dir = SNDRV_PCM_STREAM_PLAYBACK;
  360. s_freq = m_iface->outputs[0].fmt.s_freq;
  361. s_fmt = m_iface->outputs[0].fmt.bit_depth;
  362. ch = m_iface->outputs[0].fmt.channels;
  363. } else {
  364. dir = SNDRV_PCM_STREAM_CAPTURE;
  365. s_freq = m_iface->inputs[0].fmt.s_freq;
  366. s_fmt = m_iface->inputs[0].fmt.bit_depth;
  367. ch = m_iface->inputs[0].fmt.channels;
  368. }
  369. break;
  370. default:
  371. return -EINVAL;
  372. }
  373. /* update the blob based on virtual bus_id and default params */
  374. cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type,
  375. s_fmt, ch, s_freq, dir, dev_type);
  376. if (cfg) {
  377. m_cfg->formats_config.caps_size = cfg->size;
  378. m_cfg->formats_config.caps = (u32 *) &cfg->caps;
  379. } else {
  380. dev_err(ctx->dev, "Blob NULL for id %x type %d dirn %d\n",
  381. m_cfg->vbus_id, link_type, dir);
  382. dev_err(ctx->dev, "PCM: ch %d, freq %d, fmt %d\n",
  383. ch, s_freq, s_fmt);
  384. return -EIO;
  385. }
  386. return 0;
  387. }
  388. static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w,
  389. struct skl_sst *ctx)
  390. {
  391. struct skl_module_cfg *m_cfg = w->priv;
  392. struct skl_pipe_params *params = m_cfg->pipe->p_params;
  393. int p_conn_type = m_cfg->pipe->conn_type;
  394. bool is_fe;
  395. if (!m_cfg->params_fixup)
  396. return;
  397. dev_dbg(ctx->dev, "Mconfig for widget=%s BEFORE updation\n",
  398. w->name);
  399. skl_dump_mconfig(ctx, m_cfg);
  400. if (p_conn_type == SKL_PIPE_CONN_TYPE_FE)
  401. is_fe = true;
  402. else
  403. is_fe = false;
  404. skl_tplg_update_params_fixup(m_cfg, params, is_fe);
  405. skl_tplg_update_buffer_size(ctx, m_cfg);
  406. dev_dbg(ctx->dev, "Mconfig for widget=%s AFTER updation\n",
  407. w->name);
  408. skl_dump_mconfig(ctx, m_cfg);
  409. }
  410. /*
  411. * some modules can have multiple params set from user control and
  412. * need to be set after module is initialized. If set_param flag is
  413. * set module params will be done after module is initialised.
  414. */
  415. static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w,
  416. struct skl_sst *ctx)
  417. {
  418. int i, ret;
  419. struct skl_module_cfg *mconfig = w->priv;
  420. const struct snd_kcontrol_new *k;
  421. struct soc_bytes_ext *sb;
  422. struct skl_algo_data *bc;
  423. struct skl_specific_cfg *sp_cfg;
  424. if (mconfig->formats_config.caps_size > 0 &&
  425. mconfig->formats_config.set_params == SKL_PARAM_SET) {
  426. sp_cfg = &mconfig->formats_config;
  427. ret = skl_set_module_params(ctx, sp_cfg->caps,
  428. sp_cfg->caps_size,
  429. sp_cfg->param_id, mconfig);
  430. if (ret < 0)
  431. return ret;
  432. }
  433. for (i = 0; i < w->num_kcontrols; i++) {
  434. k = &w->kcontrol_news[i];
  435. if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
  436. sb = (void *) k->private_value;
  437. bc = (struct skl_algo_data *)sb->dobj.private;
  438. if (bc->set_params == SKL_PARAM_SET) {
  439. ret = skl_set_module_params(ctx,
  440. (u32 *)bc->params, bc->size,
  441. bc->param_id, mconfig);
  442. if (ret < 0)
  443. return ret;
  444. }
  445. }
  446. }
  447. return 0;
  448. }
  449. /*
  450. * some module param can set from user control and this is required as
  451. * when module is initailzed. if module param is required in init it is
  452. * identifed by set_param flag. if set_param flag is not set, then this
  453. * parameter needs to set as part of module init.
  454. */
  455. static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
  456. {
  457. const struct snd_kcontrol_new *k;
  458. struct soc_bytes_ext *sb;
  459. struct skl_algo_data *bc;
  460. struct skl_module_cfg *mconfig = w->priv;
  461. int i;
  462. for (i = 0; i < w->num_kcontrols; i++) {
  463. k = &w->kcontrol_news[i];
  464. if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
  465. sb = (struct soc_bytes_ext *)k->private_value;
  466. bc = (struct skl_algo_data *)sb->dobj.private;
  467. if (bc->set_params != SKL_PARAM_INIT)
  468. continue;
  469. mconfig->formats_config.caps = (u32 *)bc->params;
  470. mconfig->formats_config.caps_size = bc->size;
  471. break;
  472. }
  473. }
  474. return 0;
  475. }
  476. static int skl_tplg_module_prepare(struct skl_sst *ctx, struct skl_pipe *pipe,
  477. struct snd_soc_dapm_widget *w, struct skl_module_cfg *mcfg)
  478. {
  479. switch (mcfg->dev_type) {
  480. case SKL_DEVICE_HDAHOST:
  481. return skl_pcm_host_dma_prepare(ctx->dev, pipe->p_params);
  482. case SKL_DEVICE_HDALINK:
  483. return skl_pcm_link_dma_prepare(ctx->dev, pipe->p_params);
  484. }
  485. return 0;
  486. }
  487. /*
  488. * Inside a pipe instance, we can have various modules. These modules need
  489. * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by
  490. * skl_init_module() routine, so invoke that for all modules in a pipeline
  491. */
  492. static int
  493. skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
  494. {
  495. struct skl_pipe_module *w_module;
  496. struct snd_soc_dapm_widget *w;
  497. struct skl_module_cfg *mconfig;
  498. struct skl_sst *ctx = skl->skl_sst;
  499. u8 cfg_idx;
  500. int ret = 0;
  501. list_for_each_entry(w_module, &pipe->w_list, node) {
  502. uuid_le *uuid_mod;
  503. w = w_module->w;
  504. mconfig = w->priv;
  505. /* check if module ids are populated */
  506. if (mconfig->id.module_id < 0) {
  507. dev_err(skl->skl_sst->dev,
  508. "module %pUL id not populated\n",
  509. (uuid_le *)mconfig->guid);
  510. return -EIO;
  511. }
  512. cfg_idx = mconfig->pipe->cur_config_idx;
  513. mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx;
  514. mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx;
  515. /* check resource available */
  516. if (!skl_is_pipe_mcps_avail(skl, mconfig))
  517. return -ENOMEM;
  518. if (mconfig->module->loadable && ctx->dsp->fw_ops.load_mod) {
  519. ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
  520. mconfig->id.module_id, mconfig->guid);
  521. if (ret < 0)
  522. return ret;
  523. mconfig->m_state = SKL_MODULE_LOADED;
  524. }
  525. /* prepare the DMA if the module is gateway cpr */
  526. ret = skl_tplg_module_prepare(ctx, pipe, w, mconfig);
  527. if (ret < 0)
  528. return ret;
  529. /* update blob if blob is null for be with default value */
  530. skl_tplg_update_be_blob(w, ctx);
  531. /*
  532. * apply fix/conversion to module params based on
  533. * FE/BE params
  534. */
  535. skl_tplg_update_module_params(w, ctx);
  536. uuid_mod = (uuid_le *)mconfig->guid;
  537. mconfig->id.pvt_id = skl_get_pvt_id(ctx, uuid_mod,
  538. mconfig->id.instance_id);
  539. if (mconfig->id.pvt_id < 0)
  540. return ret;
  541. skl_tplg_set_module_init_data(w);
  542. ret = skl_dsp_get_core(ctx->dsp, mconfig->core_id);
  543. if (ret < 0) {
  544. dev_err(ctx->dev, "Failed to wake up core %d ret=%d\n",
  545. mconfig->core_id, ret);
  546. return ret;
  547. }
  548. ret = skl_init_module(ctx, mconfig);
  549. if (ret < 0) {
  550. skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id);
  551. goto err;
  552. }
  553. skl_tplg_alloc_pipe_mcps(skl, mconfig);
  554. ret = skl_tplg_set_module_params(w, ctx);
  555. if (ret < 0)
  556. goto err;
  557. }
  558. return 0;
  559. err:
  560. skl_dsp_put_core(ctx->dsp, mconfig->core_id);
  561. return ret;
  562. }
  563. static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
  564. struct skl_pipe *pipe)
  565. {
  566. int ret = 0;
  567. struct skl_pipe_module *w_module = NULL;
  568. struct skl_module_cfg *mconfig = NULL;
  569. list_for_each_entry(w_module, &pipe->w_list, node) {
  570. uuid_le *uuid_mod;
  571. mconfig = w_module->w->priv;
  572. uuid_mod = (uuid_le *)mconfig->guid;
  573. if (mconfig->module->loadable && ctx->dsp->fw_ops.unload_mod &&
  574. mconfig->m_state > SKL_MODULE_UNINIT) {
  575. ret = ctx->dsp->fw_ops.unload_mod(ctx->dsp,
  576. mconfig->id.module_id);
  577. if (ret < 0)
  578. return -EIO;
  579. }
  580. skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id);
  581. ret = skl_dsp_put_core(ctx->dsp, mconfig->core_id);
  582. if (ret < 0) {
  583. /* don't return; continue with other modules */
  584. dev_err(ctx->dev, "Failed to sleep core %d ret=%d\n",
  585. mconfig->core_id, ret);
  586. }
  587. }
  588. /* no modules to unload in this path, so return */
  589. return ret;
  590. }
  591. /*
  592. * Here, we select pipe format based on the pipe type and pipe
  593. * direction to determine the current config index for the pipeline.
  594. * The config index is then used to select proper module resources.
  595. * Intermediate pipes currently have a fixed format hence we select the
  596. * 0th configuratation by default for such pipes.
  597. */
  598. static int
  599. skl_tplg_get_pipe_config(struct skl *skl, struct skl_module_cfg *mconfig)
  600. {
  601. struct skl_sst *ctx = skl->skl_sst;
  602. struct skl_pipe *pipe = mconfig->pipe;
  603. struct skl_pipe_params *params = pipe->p_params;
  604. struct skl_path_config *pconfig = &pipe->configs[0];
  605. struct skl_pipe_fmt *fmt = NULL;
  606. bool in_fmt = false;
  607. int i;
  608. if (pipe->nr_cfgs == 0) {
  609. pipe->cur_config_idx = 0;
  610. return 0;
  611. }
  612. if (pipe->conn_type == SKL_PIPE_CONN_TYPE_NONE) {
  613. dev_dbg(ctx->dev, "No conn_type detected, take 0th config\n");
  614. pipe->cur_config_idx = 0;
  615. pipe->memory_pages = pconfig->mem_pages;
  616. return 0;
  617. }
  618. if ((pipe->conn_type == SKL_PIPE_CONN_TYPE_FE &&
  619. pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) ||
  620. (pipe->conn_type == SKL_PIPE_CONN_TYPE_BE &&
  621. pipe->direction == SNDRV_PCM_STREAM_CAPTURE))
  622. in_fmt = true;
  623. for (i = 0; i < pipe->nr_cfgs; i++) {
  624. pconfig = &pipe->configs[i];
  625. if (in_fmt)
  626. fmt = &pconfig->in_fmt;
  627. else
  628. fmt = &pconfig->out_fmt;
  629. if (CHECK_HW_PARAMS(params->ch, params->s_freq, params->s_fmt,
  630. fmt->channels, fmt->freq, fmt->bps)) {
  631. pipe->cur_config_idx = i;
  632. pipe->memory_pages = pconfig->mem_pages;
  633. dev_dbg(ctx->dev, "Using pipe config: %d\n", i);
  634. return 0;
  635. }
  636. }
  637. dev_err(ctx->dev, "Invalid pipe config: %d %d %d for pipe: %d\n",
  638. params->ch, params->s_freq, params->s_fmt, pipe->ppl_id);
  639. return -EINVAL;
  640. }
  641. /*
  642. * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we
  643. * need create the pipeline. So we do following:
  644. * - check the resources
  645. * - Create the pipeline
  646. * - Initialize the modules in pipeline
  647. * - finally bind all modules together
  648. */
  649. static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
  650. struct skl *skl)
  651. {
  652. int ret;
  653. struct skl_module_cfg *mconfig = w->priv;
  654. struct skl_pipe_module *w_module;
  655. struct skl_pipe *s_pipe = mconfig->pipe;
  656. struct skl_module_cfg *src_module = NULL, *dst_module, *module;
  657. struct skl_sst *ctx = skl->skl_sst;
  658. struct skl_module_deferred_bind *modules;
  659. ret = skl_tplg_get_pipe_config(skl, mconfig);
  660. if (ret < 0)
  661. return ret;
  662. /* check resource available */
  663. if (!skl_is_pipe_mcps_avail(skl, mconfig))
  664. return -EBUSY;
  665. if (!skl_is_pipe_mem_avail(skl, mconfig))
  666. return -ENOMEM;
  667. /*
  668. * Create a list of modules for pipe.
  669. * This list contains modules from source to sink
  670. */
  671. ret = skl_create_pipeline(ctx, mconfig->pipe);
  672. if (ret < 0)
  673. return ret;
  674. skl_tplg_alloc_pipe_mem(skl, mconfig);
  675. skl_tplg_alloc_pipe_mcps(skl, mconfig);
  676. /* Init all pipe modules from source to sink */
  677. ret = skl_tplg_init_pipe_modules(skl, s_pipe);
  678. if (ret < 0)
  679. return ret;
  680. /* Bind modules from source to sink */
  681. list_for_each_entry(w_module, &s_pipe->w_list, node) {
  682. dst_module = w_module->w->priv;
  683. if (src_module == NULL) {
  684. src_module = dst_module;
  685. continue;
  686. }
  687. ret = skl_bind_modules(ctx, src_module, dst_module);
  688. if (ret < 0)
  689. return ret;
  690. src_module = dst_module;
  691. }
  692. /*
  693. * When the destination module is initialized, check for these modules
  694. * in deferred bind list. If found, bind them.
  695. */
  696. list_for_each_entry(w_module, &s_pipe->w_list, node) {
  697. if (list_empty(&skl->bind_list))
  698. break;
  699. list_for_each_entry(modules, &skl->bind_list, node) {
  700. module = w_module->w->priv;
  701. if (modules->dst == module)
  702. skl_bind_modules(ctx, modules->src,
  703. modules->dst);
  704. }
  705. }
  706. return 0;
  707. }
  708. static int skl_fill_sink_instance_id(struct skl_sst *ctx, u32 *params,
  709. int size, struct skl_module_cfg *mcfg)
  710. {
  711. int i, pvt_id;
  712. if (mcfg->m_type == SKL_MODULE_TYPE_KPB) {
  713. struct skl_kpb_params *kpb_params =
  714. (struct skl_kpb_params *)params;
  715. struct skl_mod_inst_map *inst = kpb_params->u.map;
  716. for (i = 0; i < kpb_params->num_modules; i++) {
  717. pvt_id = skl_get_pvt_instance_id_map(ctx, inst->mod_id,
  718. inst->inst_id);
  719. if (pvt_id < 0)
  720. return -EINVAL;
  721. inst->inst_id = pvt_id;
  722. inst++;
  723. }
  724. }
  725. return 0;
  726. }
  727. /*
  728. * Some modules require params to be set after the module is bound to
  729. * all pins connected.
  730. *
  731. * The module provider initializes set_param flag for such modules and we
  732. * send params after binding
  733. */
  734. static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
  735. struct skl_module_cfg *mcfg, struct skl_sst *ctx)
  736. {
  737. int i, ret;
  738. struct skl_module_cfg *mconfig = w->priv;
  739. const struct snd_kcontrol_new *k;
  740. struct soc_bytes_ext *sb;
  741. struct skl_algo_data *bc;
  742. struct skl_specific_cfg *sp_cfg;
  743. u32 *params;
  744. /*
  745. * check all out/in pins are in bind state.
  746. * if so set the module param
  747. */
  748. for (i = 0; i < mcfg->module->max_output_pins; i++) {
  749. if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE)
  750. return 0;
  751. }
  752. for (i = 0; i < mcfg->module->max_input_pins; i++) {
  753. if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE)
  754. return 0;
  755. }
  756. if (mconfig->formats_config.caps_size > 0 &&
  757. mconfig->formats_config.set_params == SKL_PARAM_BIND) {
  758. sp_cfg = &mconfig->formats_config;
  759. ret = skl_set_module_params(ctx, sp_cfg->caps,
  760. sp_cfg->caps_size,
  761. sp_cfg->param_id, mconfig);
  762. if (ret < 0)
  763. return ret;
  764. }
  765. for (i = 0; i < w->num_kcontrols; i++) {
  766. k = &w->kcontrol_news[i];
  767. if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
  768. sb = (void *) k->private_value;
  769. bc = (struct skl_algo_data *)sb->dobj.private;
  770. if (bc->set_params == SKL_PARAM_BIND) {
  771. params = kzalloc(bc->max, GFP_KERNEL);
  772. if (!params)
  773. return -ENOMEM;
  774. memcpy(params, bc->params, bc->max);
  775. skl_fill_sink_instance_id(ctx, params, bc->max,
  776. mconfig);
  777. ret = skl_set_module_params(ctx, params,
  778. bc->max, bc->param_id, mconfig);
  779. kfree(params);
  780. if (ret < 0)
  781. return ret;
  782. }
  783. }
  784. }
  785. return 0;
  786. }
  787. static int skl_get_module_id(struct skl_sst *ctx, uuid_le *uuid)
  788. {
  789. struct uuid_module *module;
  790. list_for_each_entry(module, &ctx->uuid_list, list) {
  791. if (uuid_le_cmp(*uuid, module->uuid) == 0)
  792. return module->id;
  793. }
  794. return -EINVAL;
  795. }
  796. static int skl_tplg_find_moduleid_from_uuid(struct skl *skl,
  797. const struct snd_kcontrol_new *k)
  798. {
  799. struct soc_bytes_ext *sb = (void *) k->private_value;
  800. struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
  801. struct skl_kpb_params *uuid_params, *params;
  802. struct hdac_bus *bus = skl_to_bus(skl);
  803. int i, size, module_id;
  804. if (bc->set_params == SKL_PARAM_BIND && bc->max) {
  805. uuid_params = (struct skl_kpb_params *)bc->params;
  806. size = uuid_params->num_modules *
  807. sizeof(struct skl_mod_inst_map) +
  808. sizeof(uuid_params->num_modules);
  809. params = devm_kzalloc(bus->dev, size, GFP_KERNEL);
  810. if (!params)
  811. return -ENOMEM;
  812. params->num_modules = uuid_params->num_modules;
  813. for (i = 0; i < uuid_params->num_modules; i++) {
  814. module_id = skl_get_module_id(skl->skl_sst,
  815. &uuid_params->u.map_uuid[i].mod_uuid);
  816. if (module_id < 0) {
  817. devm_kfree(bus->dev, params);
  818. return -EINVAL;
  819. }
  820. params->u.map[i].mod_id = module_id;
  821. params->u.map[i].inst_id =
  822. uuid_params->u.map_uuid[i].inst_id;
  823. }
  824. devm_kfree(bus->dev, bc->params);
  825. bc->params = (char *)params;
  826. bc->max = size;
  827. }
  828. return 0;
  829. }
  830. /*
  831. * Retrieve the module id from UUID mentioned in the
  832. * post bind params
  833. */
  834. void skl_tplg_add_moduleid_in_bind_params(struct skl *skl,
  835. struct snd_soc_dapm_widget *w)
  836. {
  837. struct skl_module_cfg *mconfig = w->priv;
  838. int i;
  839. /*
  840. * Post bind params are used for only for KPB
  841. * to set copier instances to drain the data
  842. * in fast mode
  843. */
  844. if (mconfig->m_type != SKL_MODULE_TYPE_KPB)
  845. return;
  846. for (i = 0; i < w->num_kcontrols; i++)
  847. if ((w->kcontrol_news[i].access &
  848. SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) &&
  849. (skl_tplg_find_moduleid_from_uuid(skl,
  850. &w->kcontrol_news[i]) < 0))
  851. dev_err(skl->skl_sst->dev,
  852. "%s: invalid kpb post bind params\n",
  853. __func__);
  854. }
  855. static int skl_tplg_module_add_deferred_bind(struct skl *skl,
  856. struct skl_module_cfg *src, struct skl_module_cfg *dst)
  857. {
  858. struct skl_module_deferred_bind *m_list, *modules;
  859. int i;
  860. /* only supported for module with static pin connection */
  861. for (i = 0; i < dst->module->max_input_pins; i++) {
  862. struct skl_module_pin *pin = &dst->m_in_pin[i];
  863. if (pin->is_dynamic)
  864. continue;
  865. if ((pin->id.module_id == src->id.module_id) &&
  866. (pin->id.instance_id == src->id.instance_id)) {
  867. if (!list_empty(&skl->bind_list)) {
  868. list_for_each_entry(modules, &skl->bind_list, node) {
  869. if (modules->src == src && modules->dst == dst)
  870. return 0;
  871. }
  872. }
  873. m_list = kzalloc(sizeof(*m_list), GFP_KERNEL);
  874. if (!m_list)
  875. return -ENOMEM;
  876. m_list->src = src;
  877. m_list->dst = dst;
  878. list_add(&m_list->node, &skl->bind_list);
  879. }
  880. }
  881. return 0;
  882. }
  883. static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
  884. struct skl *skl,
  885. struct snd_soc_dapm_widget *src_w,
  886. struct skl_module_cfg *src_mconfig)
  887. {
  888. struct snd_soc_dapm_path *p;
  889. struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL;
  890. struct skl_module_cfg *sink_mconfig;
  891. struct skl_sst *ctx = skl->skl_sst;
  892. int ret;
  893. snd_soc_dapm_widget_for_each_sink_path(w, p) {
  894. if (!p->connect)
  895. continue;
  896. dev_dbg(ctx->dev, "%s: src widget=%s\n", __func__, w->name);
  897. dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name);
  898. next_sink = p->sink;
  899. if (!is_skl_dsp_widget_type(p->sink, ctx->dev))
  900. return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
  901. /*
  902. * here we will check widgets in sink pipelines, so that
  903. * can be any widgets type and we are only interested if
  904. * they are ones used for SKL so check that first
  905. */
  906. if ((p->sink->priv != NULL) &&
  907. is_skl_dsp_widget_type(p->sink, ctx->dev)) {
  908. sink = p->sink;
  909. sink_mconfig = sink->priv;
  910. /*
  911. * Modules other than PGA leaf can be connected
  912. * directly or via switch to a module in another
  913. * pipeline. EX: reference path
  914. * when the path is enabled, the dst module that needs
  915. * to be bound may not be initialized. if the module is
  916. * not initialized, add these modules in the deferred
  917. * bind list and when the dst module is initialised,
  918. * bind this module to the dst_module in deferred list.
  919. */
  920. if (((src_mconfig->m_state == SKL_MODULE_INIT_DONE)
  921. && (sink_mconfig->m_state == SKL_MODULE_UNINIT))) {
  922. ret = skl_tplg_module_add_deferred_bind(skl,
  923. src_mconfig, sink_mconfig);
  924. if (ret < 0)
  925. return ret;
  926. }
  927. if (src_mconfig->m_state == SKL_MODULE_UNINIT ||
  928. sink_mconfig->m_state == SKL_MODULE_UNINIT)
  929. continue;
  930. /* Bind source to sink, mixin is always source */
  931. ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
  932. if (ret)
  933. return ret;
  934. /* set module params after bind */
  935. skl_tplg_set_module_bind_params(src_w, src_mconfig, ctx);
  936. skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
  937. /* Start sinks pipe first */
  938. if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) {
  939. if (sink_mconfig->pipe->conn_type !=
  940. SKL_PIPE_CONN_TYPE_FE)
  941. ret = skl_run_pipe(ctx,
  942. sink_mconfig->pipe);
  943. if (ret)
  944. return ret;
  945. }
  946. }
  947. }
  948. if (!sink && next_sink)
  949. return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig);
  950. return 0;
  951. }
  952. /*
  953. * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA
  954. * we need to do following:
  955. * - Bind to sink pipeline
  956. * Since the sink pipes can be running and we don't get mixer event on
  957. * connect for already running mixer, we need to find the sink pipes
  958. * here and bind to them. This way dynamic connect works.
  959. * - Start sink pipeline, if not running
  960. * - Then run current pipe
  961. */
  962. static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
  963. struct skl *skl)
  964. {
  965. struct skl_module_cfg *src_mconfig;
  966. struct skl_sst *ctx = skl->skl_sst;
  967. int ret = 0;
  968. src_mconfig = w->priv;
  969. /*
  970. * find which sink it is connected to, bind with the sink,
  971. * if sink is not started, start sink pipe first, then start
  972. * this pipe
  973. */
  974. ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig);
  975. if (ret)
  976. return ret;
  977. /* Start source pipe last after starting all sinks */
  978. if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
  979. return skl_run_pipe(ctx, src_mconfig->pipe);
  980. return 0;
  981. }
  982. static struct snd_soc_dapm_widget *skl_get_src_dsp_widget(
  983. struct snd_soc_dapm_widget *w, struct skl *skl)
  984. {
  985. struct snd_soc_dapm_path *p;
  986. struct snd_soc_dapm_widget *src_w = NULL;
  987. struct skl_sst *ctx = skl->skl_sst;
  988. snd_soc_dapm_widget_for_each_source_path(w, p) {
  989. src_w = p->source;
  990. if (!p->connect)
  991. continue;
  992. dev_dbg(ctx->dev, "sink widget=%s\n", w->name);
  993. dev_dbg(ctx->dev, "src widget=%s\n", p->source->name);
  994. /*
  995. * here we will check widgets in sink pipelines, so that can
  996. * be any widgets type and we are only interested if they are
  997. * ones used for SKL so check that first
  998. */
  999. if ((p->source->priv != NULL) &&
  1000. is_skl_dsp_widget_type(p->source, ctx->dev)) {
  1001. return p->source;
  1002. }
  1003. }
  1004. if (src_w != NULL)
  1005. return skl_get_src_dsp_widget(src_w, skl);
  1006. return NULL;
  1007. }
  1008. /*
  1009. * in the Post-PMU event of mixer we need to do following:
  1010. * - Check if this pipe is running
  1011. * - if not, then
  1012. * - bind this pipeline to its source pipeline
  1013. * if source pipe is already running, this means it is a dynamic
  1014. * connection and we need to bind only to that pipe
  1015. * - start this pipeline
  1016. */
  1017. static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w,
  1018. struct skl *skl)
  1019. {
  1020. int ret = 0;
  1021. struct snd_soc_dapm_widget *source, *sink;
  1022. struct skl_module_cfg *src_mconfig, *sink_mconfig;
  1023. struct skl_sst *ctx = skl->skl_sst;
  1024. int src_pipe_started = 0;
  1025. sink = w;
  1026. sink_mconfig = sink->priv;
  1027. /*
  1028. * If source pipe is already started, that means source is driving
  1029. * one more sink before this sink got connected, Since source is
  1030. * started, bind this sink to source and start this pipe.
  1031. */
  1032. source = skl_get_src_dsp_widget(w, skl);
  1033. if (source != NULL) {
  1034. src_mconfig = source->priv;
  1035. sink_mconfig = sink->priv;
  1036. src_pipe_started = 1;
  1037. /*
  1038. * check pipe state, then no need to bind or start the
  1039. * pipe
  1040. */
  1041. if (src_mconfig->pipe->state != SKL_PIPE_STARTED)
  1042. src_pipe_started = 0;
  1043. }
  1044. if (src_pipe_started) {
  1045. ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
  1046. if (ret)
  1047. return ret;
  1048. /* set module params after bind */
  1049. skl_tplg_set_module_bind_params(source, src_mconfig, ctx);
  1050. skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
  1051. if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
  1052. ret = skl_run_pipe(ctx, sink_mconfig->pipe);
  1053. }
  1054. return ret;
  1055. }
  1056. /*
  1057. * in the Pre-PMD event of mixer we need to do following:
  1058. * - Stop the pipe
  1059. * - find the source connections and remove that from dapm_path_list
  1060. * - unbind with source pipelines if still connected
  1061. */
  1062. static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w,
  1063. struct skl *skl)
  1064. {
  1065. struct skl_module_cfg *src_mconfig, *sink_mconfig;
  1066. int ret = 0, i;
  1067. struct skl_sst *ctx = skl->skl_sst;
  1068. sink_mconfig = w->priv;
  1069. /* Stop the pipe */
  1070. ret = skl_stop_pipe(ctx, sink_mconfig->pipe);
  1071. if (ret)
  1072. return ret;
  1073. for (i = 0; i < sink_mconfig->module->max_input_pins; i++) {
  1074. if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) {
  1075. src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg;
  1076. if (!src_mconfig)
  1077. continue;
  1078. ret = skl_unbind_modules(ctx,
  1079. src_mconfig, sink_mconfig);
  1080. }
  1081. }
  1082. return ret;
  1083. }
  1084. /*
  1085. * in the Post-PMD event of mixer we need to do following:
  1086. * - Free the mcps used
  1087. * - Free the mem used
  1088. * - Unbind the modules within the pipeline
  1089. * - Delete the pipeline (modules are not required to be explicitly
  1090. * deleted, pipeline delete is enough here
  1091. */
  1092. static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
  1093. struct skl *skl)
  1094. {
  1095. struct skl_module_cfg *mconfig = w->priv;
  1096. struct skl_pipe_module *w_module;
  1097. struct skl_module_cfg *src_module = NULL, *dst_module;
  1098. struct skl_sst *ctx = skl->skl_sst;
  1099. struct skl_pipe *s_pipe = mconfig->pipe;
  1100. struct skl_module_deferred_bind *modules, *tmp;
  1101. if (s_pipe->state == SKL_PIPE_INVALID)
  1102. return -EINVAL;
  1103. skl_tplg_free_pipe_mcps(skl, mconfig);
  1104. skl_tplg_free_pipe_mem(skl, mconfig);
  1105. list_for_each_entry(w_module, &s_pipe->w_list, node) {
  1106. if (list_empty(&skl->bind_list))
  1107. break;
  1108. src_module = w_module->w->priv;
  1109. list_for_each_entry_safe(modules, tmp, &skl->bind_list, node) {
  1110. /*
  1111. * When the destination module is deleted, Unbind the
  1112. * modules from deferred bind list.
  1113. */
  1114. if (modules->dst == src_module) {
  1115. skl_unbind_modules(ctx, modules->src,
  1116. modules->dst);
  1117. }
  1118. /*
  1119. * When the source module is deleted, remove this entry
  1120. * from the deferred bind list.
  1121. */
  1122. if (modules->src == src_module) {
  1123. list_del(&modules->node);
  1124. modules->src = NULL;
  1125. modules->dst = NULL;
  1126. kfree(modules);
  1127. }
  1128. }
  1129. }
  1130. list_for_each_entry(w_module, &s_pipe->w_list, node) {
  1131. dst_module = w_module->w->priv;
  1132. if (mconfig->m_state >= SKL_MODULE_INIT_DONE)
  1133. skl_tplg_free_pipe_mcps(skl, dst_module);
  1134. if (src_module == NULL) {
  1135. src_module = dst_module;
  1136. continue;
  1137. }
  1138. skl_unbind_modules(ctx, src_module, dst_module);
  1139. src_module = dst_module;
  1140. }
  1141. skl_delete_pipe(ctx, mconfig->pipe);
  1142. list_for_each_entry(w_module, &s_pipe->w_list, node) {
  1143. src_module = w_module->w->priv;
  1144. src_module->m_state = SKL_MODULE_UNINIT;
  1145. }
  1146. return skl_tplg_unload_pipe_modules(ctx, s_pipe);
  1147. }
  1148. /*
  1149. * in the Post-PMD event of PGA we need to do following:
  1150. * - Free the mcps used
  1151. * - Stop the pipeline
  1152. * - In source pipe is connected, unbind with source pipelines
  1153. */
  1154. static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
  1155. struct skl *skl)
  1156. {
  1157. struct skl_module_cfg *src_mconfig, *sink_mconfig;
  1158. int ret = 0, i;
  1159. struct skl_sst *ctx = skl->skl_sst;
  1160. src_mconfig = w->priv;
  1161. /* Stop the pipe since this is a mixin module */
  1162. ret = skl_stop_pipe(ctx, src_mconfig->pipe);
  1163. if (ret)
  1164. return ret;
  1165. for (i = 0; i < src_mconfig->module->max_output_pins; i++) {
  1166. if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) {
  1167. sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg;
  1168. if (!sink_mconfig)
  1169. continue;
  1170. /*
  1171. * This is a connecter and if path is found that means
  1172. * unbind between source and sink has not happened yet
  1173. */
  1174. ret = skl_unbind_modules(ctx, src_mconfig,
  1175. sink_mconfig);
  1176. }
  1177. }
  1178. return ret;
  1179. }
  1180. /*
  1181. * In modelling, we assume there will be ONLY one mixer in a pipeline. If a
  1182. * second one is required that is created as another pipe entity.
  1183. * The mixer is responsible for pipe management and represent a pipeline
  1184. * instance
  1185. */
  1186. static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w,
  1187. struct snd_kcontrol *k, int event)
  1188. {
  1189. struct snd_soc_dapm_context *dapm = w->dapm;
  1190. struct skl *skl = get_skl_ctx(dapm->dev);
  1191. switch (event) {
  1192. case SND_SOC_DAPM_PRE_PMU:
  1193. return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
  1194. case SND_SOC_DAPM_POST_PMU:
  1195. return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
  1196. case SND_SOC_DAPM_PRE_PMD:
  1197. return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
  1198. case SND_SOC_DAPM_POST_PMD:
  1199. return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
  1200. }
  1201. return 0;
  1202. }
  1203. /*
  1204. * In modelling, we assumed rest of the modules in pipeline are PGA. But we
  1205. * are interested in last PGA (leaf PGA) in a pipeline to disconnect with
  1206. * the sink when it is running (two FE to one BE or one FE to two BE)
  1207. * scenarios
  1208. */
  1209. static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w,
  1210. struct snd_kcontrol *k, int event)
  1211. {
  1212. struct snd_soc_dapm_context *dapm = w->dapm;
  1213. struct skl *skl = get_skl_ctx(dapm->dev);
  1214. switch (event) {
  1215. case SND_SOC_DAPM_PRE_PMU:
  1216. return skl_tplg_pga_dapm_pre_pmu_event(w, skl);
  1217. case SND_SOC_DAPM_POST_PMD:
  1218. return skl_tplg_pga_dapm_post_pmd_event(w, skl);
  1219. }
  1220. return 0;
  1221. }
  1222. static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
  1223. unsigned int __user *data, unsigned int size)
  1224. {
  1225. struct soc_bytes_ext *sb =
  1226. (struct soc_bytes_ext *)kcontrol->private_value;
  1227. struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
  1228. struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
  1229. struct skl_module_cfg *mconfig = w->priv;
  1230. struct skl *skl = get_skl_ctx(w->dapm->dev);
  1231. if (w->power)
  1232. skl_get_module_params(skl->skl_sst, (u32 *)bc->params,
  1233. bc->size, bc->param_id, mconfig);
  1234. /* decrement size for TLV header */
  1235. size -= 2 * sizeof(u32);
  1236. /* check size as we don't want to send kernel data */
  1237. if (size > bc->max)
  1238. size = bc->max;
  1239. if (bc->params) {
  1240. if (copy_to_user(data, &bc->param_id, sizeof(u32)))
  1241. return -EFAULT;
  1242. if (copy_to_user(data + 1, &size, sizeof(u32)))
  1243. return -EFAULT;
  1244. if (copy_to_user(data + 2, bc->params, size))
  1245. return -EFAULT;
  1246. }
  1247. return 0;
  1248. }
  1249. #define SKL_PARAM_VENDOR_ID 0xff
  1250. static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
  1251. const unsigned int __user *data, unsigned int size)
  1252. {
  1253. struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
  1254. struct skl_module_cfg *mconfig = w->priv;
  1255. struct soc_bytes_ext *sb =
  1256. (struct soc_bytes_ext *)kcontrol->private_value;
  1257. struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private;
  1258. struct skl *skl = get_skl_ctx(w->dapm->dev);
  1259. if (ac->params) {
  1260. if (size > ac->max)
  1261. return -EINVAL;
  1262. ac->size = size;
  1263. /*
  1264. * if the param_is is of type Vendor, firmware expects actual
  1265. * parameter id and size from the control.
  1266. */
  1267. if (ac->param_id == SKL_PARAM_VENDOR_ID) {
  1268. if (copy_from_user(ac->params, data, size))
  1269. return -EFAULT;
  1270. } else {
  1271. if (copy_from_user(ac->params,
  1272. data + 2, size))
  1273. return -EFAULT;
  1274. }
  1275. if (w->power)
  1276. return skl_set_module_params(skl->skl_sst,
  1277. (u32 *)ac->params, ac->size,
  1278. ac->param_id, mconfig);
  1279. }
  1280. return 0;
  1281. }
  1282. static int skl_tplg_mic_control_get(struct snd_kcontrol *kcontrol,
  1283. struct snd_ctl_elem_value *ucontrol)
  1284. {
  1285. struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
  1286. struct skl_module_cfg *mconfig = w->priv;
  1287. struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
  1288. u32 ch_type = *((u32 *)ec->dobj.private);
  1289. if (mconfig->dmic_ch_type == ch_type)
  1290. ucontrol->value.enumerated.item[0] =
  1291. mconfig->dmic_ch_combo_index;
  1292. else
  1293. ucontrol->value.enumerated.item[0] = 0;
  1294. return 0;
  1295. }
  1296. static int skl_fill_mic_sel_params(struct skl_module_cfg *mconfig,
  1297. struct skl_mic_sel_config *mic_cfg, struct device *dev)
  1298. {
  1299. struct skl_specific_cfg *sp_cfg = &mconfig->formats_config;
  1300. sp_cfg->caps_size = sizeof(struct skl_mic_sel_config);
  1301. sp_cfg->set_params = SKL_PARAM_SET;
  1302. sp_cfg->param_id = 0x00;
  1303. if (!sp_cfg->caps) {
  1304. sp_cfg->caps = devm_kzalloc(dev, sp_cfg->caps_size, GFP_KERNEL);
  1305. if (!sp_cfg->caps)
  1306. return -ENOMEM;
  1307. }
  1308. mic_cfg->mic_switch = SKL_MIC_SEL_SWITCH;
  1309. mic_cfg->flags = 0;
  1310. memcpy(sp_cfg->caps, mic_cfg, sp_cfg->caps_size);
  1311. return 0;
  1312. }
  1313. static int skl_tplg_mic_control_set(struct snd_kcontrol *kcontrol,
  1314. struct snd_ctl_elem_value *ucontrol)
  1315. {
  1316. struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
  1317. struct skl_module_cfg *mconfig = w->priv;
  1318. struct skl_mic_sel_config mic_cfg = {0};
  1319. struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
  1320. u32 ch_type = *((u32 *)ec->dobj.private);
  1321. const int *list;
  1322. u8 in_ch, out_ch, index;
  1323. mconfig->dmic_ch_type = ch_type;
  1324. mconfig->dmic_ch_combo_index = ucontrol->value.enumerated.item[0];
  1325. /* enum control index 0 is INVALID, so no channels to be set */
  1326. if (mconfig->dmic_ch_combo_index == 0)
  1327. return 0;
  1328. /* No valid channel selection map for index 0, so offset by 1 */
  1329. index = mconfig->dmic_ch_combo_index - 1;
  1330. switch (ch_type) {
  1331. case SKL_CH_MONO:
  1332. if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_mono_list))
  1333. return -EINVAL;
  1334. list = &mic_mono_list[index];
  1335. break;
  1336. case SKL_CH_STEREO:
  1337. if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_stereo_list))
  1338. return -EINVAL;
  1339. list = mic_stereo_list[index];
  1340. break;
  1341. case SKL_CH_TRIO:
  1342. if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_trio_list))
  1343. return -EINVAL;
  1344. list = mic_trio_list[index];
  1345. break;
  1346. case SKL_CH_QUATRO:
  1347. if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_quatro_list))
  1348. return -EINVAL;
  1349. list = mic_quatro_list[index];
  1350. break;
  1351. default:
  1352. dev_err(w->dapm->dev,
  1353. "Invalid channel %d for mic_select module\n",
  1354. ch_type);
  1355. return -EINVAL;
  1356. }
  1357. /* channel type enum map to number of chanels for that type */
  1358. for (out_ch = 0; out_ch < ch_type; out_ch++) {
  1359. in_ch = list[out_ch];
  1360. mic_cfg.blob[out_ch][in_ch] = SKL_DEFAULT_MIC_SEL_GAIN;
  1361. }
  1362. return skl_fill_mic_sel_params(mconfig, &mic_cfg, w->dapm->dev);
  1363. }
  1364. /*
  1365. * Fill the dma id for host and link. In case of passthrough
  1366. * pipeline, this will both host and link in the same
  1367. * pipeline, so need to copy the link and host based on dev_type
  1368. */
  1369. static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg,
  1370. struct skl_pipe_params *params)
  1371. {
  1372. struct skl_pipe *pipe = mcfg->pipe;
  1373. if (pipe->passthru) {
  1374. switch (mcfg->dev_type) {
  1375. case SKL_DEVICE_HDALINK:
  1376. pipe->p_params->link_dma_id = params->link_dma_id;
  1377. pipe->p_params->link_index = params->link_index;
  1378. pipe->p_params->link_bps = params->link_bps;
  1379. break;
  1380. case SKL_DEVICE_HDAHOST:
  1381. pipe->p_params->host_dma_id = params->host_dma_id;
  1382. pipe->p_params->host_bps = params->host_bps;
  1383. break;
  1384. default:
  1385. break;
  1386. }
  1387. pipe->p_params->s_fmt = params->s_fmt;
  1388. pipe->p_params->ch = params->ch;
  1389. pipe->p_params->s_freq = params->s_freq;
  1390. pipe->p_params->stream = params->stream;
  1391. pipe->p_params->format = params->format;
  1392. } else {
  1393. memcpy(pipe->p_params, params, sizeof(*params));
  1394. }
  1395. }
  1396. /*
  1397. * The FE params are passed by hw_params of the DAI.
  1398. * On hw_params, the params are stored in Gateway module of the FE and we
  1399. * need to calculate the format in DSP module configuration, that
  1400. * conversion is done here
  1401. */
  1402. int skl_tplg_update_pipe_params(struct device *dev,
  1403. struct skl_module_cfg *mconfig,
  1404. struct skl_pipe_params *params)
  1405. {
  1406. struct skl_module_res *res = &mconfig->module->resources[0];
  1407. struct skl *skl = get_skl_ctx(dev);
  1408. struct skl_module_fmt *format = NULL;
  1409. u8 cfg_idx = mconfig->pipe->cur_config_idx;
  1410. skl_tplg_fill_dma_id(mconfig, params);
  1411. mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx;
  1412. mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx;
  1413. if (skl->nr_modules)
  1414. return 0;
  1415. if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
  1416. format = &mconfig->module->formats[0].inputs[0].fmt;
  1417. else
  1418. format = &mconfig->module->formats[0].outputs[0].fmt;
  1419. /* set the hw_params */
  1420. format->s_freq = params->s_freq;
  1421. format->channels = params->ch;
  1422. format->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
  1423. /*
  1424. * 16 bit is 16 bit container whereas 24 bit is in 32 bit
  1425. * container so update bit depth accordingly
  1426. */
  1427. switch (format->valid_bit_depth) {
  1428. case SKL_DEPTH_16BIT:
  1429. format->bit_depth = format->valid_bit_depth;
  1430. break;
  1431. case SKL_DEPTH_24BIT:
  1432. case SKL_DEPTH_32BIT:
  1433. format->bit_depth = SKL_DEPTH_32BIT;
  1434. break;
  1435. default:
  1436. dev_err(dev, "Invalid bit depth %x for pipe\n",
  1437. format->valid_bit_depth);
  1438. return -EINVAL;
  1439. }
  1440. if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
  1441. res->ibs = (format->s_freq / 1000) *
  1442. (format->channels) *
  1443. (format->bit_depth >> 3);
  1444. } else {
  1445. res->obs = (format->s_freq / 1000) *
  1446. (format->channels) *
  1447. (format->bit_depth >> 3);
  1448. }
  1449. return 0;
  1450. }
  1451. /*
  1452. * Query the module config for the FE DAI
  1453. * This is used to find the hw_params set for that DAI and apply to FE
  1454. * pipeline
  1455. */
  1456. struct skl_module_cfg *
  1457. skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream)
  1458. {
  1459. struct snd_soc_dapm_widget *w;
  1460. struct snd_soc_dapm_path *p = NULL;
  1461. if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
  1462. w = dai->playback_widget;
  1463. snd_soc_dapm_widget_for_each_sink_path(w, p) {
  1464. if (p->connect && p->sink->power &&
  1465. !is_skl_dsp_widget_type(p->sink, dai->dev))
  1466. continue;
  1467. if (p->sink->priv) {
  1468. dev_dbg(dai->dev, "set params for %s\n",
  1469. p->sink->name);
  1470. return p->sink->priv;
  1471. }
  1472. }
  1473. } else {
  1474. w = dai->capture_widget;
  1475. snd_soc_dapm_widget_for_each_source_path(w, p) {
  1476. if (p->connect && p->source->power &&
  1477. !is_skl_dsp_widget_type(p->source, dai->dev))
  1478. continue;
  1479. if (p->source->priv) {
  1480. dev_dbg(dai->dev, "set params for %s\n",
  1481. p->source->name);
  1482. return p->source->priv;
  1483. }
  1484. }
  1485. }
  1486. return NULL;
  1487. }
  1488. static struct skl_module_cfg *skl_get_mconfig_pb_cpr(
  1489. struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
  1490. {
  1491. struct snd_soc_dapm_path *p;
  1492. struct skl_module_cfg *mconfig = NULL;
  1493. snd_soc_dapm_widget_for_each_source_path(w, p) {
  1494. if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) {
  1495. if (p->connect &&
  1496. (p->sink->id == snd_soc_dapm_aif_out) &&
  1497. p->source->priv) {
  1498. mconfig = p->source->priv;
  1499. return mconfig;
  1500. }
  1501. mconfig = skl_get_mconfig_pb_cpr(dai, p->source);
  1502. if (mconfig)
  1503. return mconfig;
  1504. }
  1505. }
  1506. return mconfig;
  1507. }
  1508. static struct skl_module_cfg *skl_get_mconfig_cap_cpr(
  1509. struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
  1510. {
  1511. struct snd_soc_dapm_path *p;
  1512. struct skl_module_cfg *mconfig = NULL;
  1513. snd_soc_dapm_widget_for_each_sink_path(w, p) {
  1514. if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) {
  1515. if (p->connect &&
  1516. (p->source->id == snd_soc_dapm_aif_in) &&
  1517. p->sink->priv) {
  1518. mconfig = p->sink->priv;
  1519. return mconfig;
  1520. }
  1521. mconfig = skl_get_mconfig_cap_cpr(dai, p->sink);
  1522. if (mconfig)
  1523. return mconfig;
  1524. }
  1525. }
  1526. return mconfig;
  1527. }
  1528. struct skl_module_cfg *
  1529. skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream)
  1530. {
  1531. struct snd_soc_dapm_widget *w;
  1532. struct skl_module_cfg *mconfig;
  1533. if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
  1534. w = dai->playback_widget;
  1535. mconfig = skl_get_mconfig_pb_cpr(dai, w);
  1536. } else {
  1537. w = dai->capture_widget;
  1538. mconfig = skl_get_mconfig_cap_cpr(dai, w);
  1539. }
  1540. return mconfig;
  1541. }
  1542. static u8 skl_tplg_be_link_type(int dev_type)
  1543. {
  1544. int ret;
  1545. switch (dev_type) {
  1546. case SKL_DEVICE_BT:
  1547. ret = NHLT_LINK_SSP;
  1548. break;
  1549. case SKL_DEVICE_DMIC:
  1550. ret = NHLT_LINK_DMIC;
  1551. break;
  1552. case SKL_DEVICE_I2S:
  1553. ret = NHLT_LINK_SSP;
  1554. break;
  1555. case SKL_DEVICE_HDALINK:
  1556. ret = NHLT_LINK_HDA;
  1557. break;
  1558. default:
  1559. ret = NHLT_LINK_INVALID;
  1560. break;
  1561. }
  1562. return ret;
  1563. }
  1564. /*
  1565. * Fill the BE gateway parameters
  1566. * The BE gateway expects a blob of parameters which are kept in the ACPI
  1567. * NHLT blob, so query the blob for interface type (i2s/pdm) and instance.
  1568. * The port can have multiple settings so pick based on the PCM
  1569. * parameters
  1570. */
  1571. static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai,
  1572. struct skl_module_cfg *mconfig,
  1573. struct skl_pipe_params *params)
  1574. {
  1575. struct nhlt_specific_cfg *cfg;
  1576. struct skl *skl = get_skl_ctx(dai->dev);
  1577. int link_type = skl_tplg_be_link_type(mconfig->dev_type);
  1578. u8 dev_type = skl_tplg_be_dev_type(mconfig->dev_type);
  1579. skl_tplg_fill_dma_id(mconfig, params);
  1580. if (link_type == NHLT_LINK_HDA)
  1581. return 0;
  1582. /* update the blob based on virtual bus_id*/
  1583. cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type,
  1584. params->s_fmt, params->ch,
  1585. params->s_freq, params->stream,
  1586. dev_type);
  1587. if (cfg) {
  1588. mconfig->formats_config.caps_size = cfg->size;
  1589. mconfig->formats_config.caps = (u32 *) &cfg->caps;
  1590. } else {
  1591. dev_err(dai->dev, "Blob NULL for id %x type %d dirn %d\n",
  1592. mconfig->vbus_id, link_type,
  1593. params->stream);
  1594. dev_err(dai->dev, "PCM: ch %d, freq %d, fmt %d\n",
  1595. params->ch, params->s_freq, params->s_fmt);
  1596. return -EINVAL;
  1597. }
  1598. return 0;
  1599. }
  1600. static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai,
  1601. struct snd_soc_dapm_widget *w,
  1602. struct skl_pipe_params *params)
  1603. {
  1604. struct snd_soc_dapm_path *p;
  1605. int ret = -EIO;
  1606. snd_soc_dapm_widget_for_each_source_path(w, p) {
  1607. if (p->connect && is_skl_dsp_widget_type(p->source, dai->dev) &&
  1608. p->source->priv) {
  1609. ret = skl_tplg_be_fill_pipe_params(dai,
  1610. p->source->priv, params);
  1611. if (ret < 0)
  1612. return ret;
  1613. } else {
  1614. ret = skl_tplg_be_set_src_pipe_params(dai,
  1615. p->source, params);
  1616. if (ret < 0)
  1617. return ret;
  1618. }
  1619. }
  1620. return ret;
  1621. }
  1622. static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai,
  1623. struct snd_soc_dapm_widget *w, struct skl_pipe_params *params)
  1624. {
  1625. struct snd_soc_dapm_path *p = NULL;
  1626. int ret = -EIO;
  1627. snd_soc_dapm_widget_for_each_sink_path(w, p) {
  1628. if (p->connect && is_skl_dsp_widget_type(p->sink, dai->dev) &&
  1629. p->sink->priv) {
  1630. ret = skl_tplg_be_fill_pipe_params(dai,
  1631. p->sink->priv, params);
  1632. if (ret < 0)
  1633. return ret;
  1634. } else {
  1635. ret = skl_tplg_be_set_sink_pipe_params(
  1636. dai, p->sink, params);
  1637. if (ret < 0)
  1638. return ret;
  1639. }
  1640. }
  1641. return ret;
  1642. }
  1643. /*
  1644. * BE hw_params can be a source parameters (capture) or sink parameters
  1645. * (playback). Based on sink and source we need to either find the source
  1646. * list or the sink list and set the pipeline parameters
  1647. */
  1648. int skl_tplg_be_update_params(struct snd_soc_dai *dai,
  1649. struct skl_pipe_params *params)
  1650. {
  1651. struct snd_soc_dapm_widget *w;
  1652. if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
  1653. w = dai->playback_widget;
  1654. return skl_tplg_be_set_src_pipe_params(dai, w, params);
  1655. } else {
  1656. w = dai->capture_widget;
  1657. return skl_tplg_be_set_sink_pipe_params(dai, w, params);
  1658. }
  1659. return 0;
  1660. }
  1661. static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = {
  1662. {SKL_MIXER_EVENT, skl_tplg_mixer_event},
  1663. {SKL_VMIXER_EVENT, skl_tplg_mixer_event},
  1664. {SKL_PGA_EVENT, skl_tplg_pga_event},
  1665. };
  1666. static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = {
  1667. {SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get,
  1668. skl_tplg_tlv_control_set},
  1669. };
  1670. static const struct snd_soc_tplg_kcontrol_ops skl_tplg_kcontrol_ops[] = {
  1671. {
  1672. .id = SKL_CONTROL_TYPE_MIC_SELECT,
  1673. .get = skl_tplg_mic_control_get,
  1674. .put = skl_tplg_mic_control_set,
  1675. },
  1676. };
  1677. static int skl_tplg_fill_pipe_cfg(struct device *dev,
  1678. struct skl_pipe *pipe, u32 tkn,
  1679. u32 tkn_val, int conf_idx, int dir)
  1680. {
  1681. struct skl_pipe_fmt *fmt;
  1682. struct skl_path_config *config;
  1683. switch (dir) {
  1684. case SKL_DIR_IN:
  1685. fmt = &pipe->configs[conf_idx].in_fmt;
  1686. break;
  1687. case SKL_DIR_OUT:
  1688. fmt = &pipe->configs[conf_idx].out_fmt;
  1689. break;
  1690. default:
  1691. dev_err(dev, "Invalid direction: %d\n", dir);
  1692. return -EINVAL;
  1693. }
  1694. config = &pipe->configs[conf_idx];
  1695. switch (tkn) {
  1696. case SKL_TKN_U32_CFG_FREQ:
  1697. fmt->freq = tkn_val;
  1698. break;
  1699. case SKL_TKN_U8_CFG_CHAN:
  1700. fmt->channels = tkn_val;
  1701. break;
  1702. case SKL_TKN_U8_CFG_BPS:
  1703. fmt->bps = tkn_val;
  1704. break;
  1705. case SKL_TKN_U32_PATH_MEM_PGS:
  1706. config->mem_pages = tkn_val;
  1707. break;
  1708. default:
  1709. dev_err(dev, "Invalid token config: %d\n", tkn);
  1710. return -EINVAL;
  1711. }
  1712. return 0;
  1713. }
  1714. static int skl_tplg_fill_pipe_tkn(struct device *dev,
  1715. struct skl_pipe *pipe, u32 tkn,
  1716. u32 tkn_val)
  1717. {
  1718. switch (tkn) {
  1719. case SKL_TKN_U32_PIPE_CONN_TYPE:
  1720. pipe->conn_type = tkn_val;
  1721. break;
  1722. case SKL_TKN_U32_PIPE_PRIORITY:
  1723. pipe->pipe_priority = tkn_val;
  1724. break;
  1725. case SKL_TKN_U32_PIPE_MEM_PGS:
  1726. pipe->memory_pages = tkn_val;
  1727. break;
  1728. case SKL_TKN_U32_PMODE:
  1729. pipe->lp_mode = tkn_val;
  1730. break;
  1731. case SKL_TKN_U32_PIPE_DIRECTION:
  1732. pipe->direction = tkn_val;
  1733. break;
  1734. case SKL_TKN_U32_NUM_CONFIGS:
  1735. pipe->nr_cfgs = tkn_val;
  1736. break;
  1737. default:
  1738. dev_err(dev, "Token not handled %d\n", tkn);
  1739. return -EINVAL;
  1740. }
  1741. return 0;
  1742. }
  1743. /*
  1744. * Add pipeline by parsing the relevant tokens
  1745. * Return an existing pipe if the pipe already exists.
  1746. */
  1747. static int skl_tplg_add_pipe(struct device *dev,
  1748. struct skl_module_cfg *mconfig, struct skl *skl,
  1749. struct snd_soc_tplg_vendor_value_elem *tkn_elem)
  1750. {
  1751. struct skl_pipeline *ppl;
  1752. struct skl_pipe *pipe;
  1753. struct skl_pipe_params *params;
  1754. list_for_each_entry(ppl, &skl->ppl_list, node) {
  1755. if (ppl->pipe->ppl_id == tkn_elem->value) {
  1756. mconfig->pipe = ppl->pipe;
  1757. return -EEXIST;
  1758. }
  1759. }
  1760. ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
  1761. if (!ppl)
  1762. return -ENOMEM;
  1763. pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
  1764. if (!pipe)
  1765. return -ENOMEM;
  1766. params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
  1767. if (!params)
  1768. return -ENOMEM;
  1769. pipe->p_params = params;
  1770. pipe->ppl_id = tkn_elem->value;
  1771. INIT_LIST_HEAD(&pipe->w_list);
  1772. ppl->pipe = pipe;
  1773. list_add(&ppl->node, &skl->ppl_list);
  1774. mconfig->pipe = pipe;
  1775. mconfig->pipe->state = SKL_PIPE_INVALID;
  1776. return 0;
  1777. }
  1778. static int skl_tplg_get_uuid(struct device *dev, u8 *guid,
  1779. struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
  1780. {
  1781. if (uuid_tkn->token == SKL_TKN_UUID) {
  1782. memcpy(guid, &uuid_tkn->uuid, 16);
  1783. return 0;
  1784. }
  1785. dev_err(dev, "Not an UUID token %d\n", uuid_tkn->token);
  1786. return -EINVAL;
  1787. }
  1788. static int skl_tplg_fill_pin(struct device *dev,
  1789. struct snd_soc_tplg_vendor_value_elem *tkn_elem,
  1790. struct skl_module_pin *m_pin,
  1791. int pin_index)
  1792. {
  1793. int ret;
  1794. switch (tkn_elem->token) {
  1795. case SKL_TKN_U32_PIN_MOD_ID:
  1796. m_pin[pin_index].id.module_id = tkn_elem->value;
  1797. break;
  1798. case SKL_TKN_U32_PIN_INST_ID:
  1799. m_pin[pin_index].id.instance_id = tkn_elem->value;
  1800. break;
  1801. case SKL_TKN_UUID:
  1802. ret = skl_tplg_get_uuid(dev, m_pin[pin_index].id.mod_uuid.b,
  1803. (struct snd_soc_tplg_vendor_uuid_elem *)tkn_elem);
  1804. if (ret < 0)
  1805. return ret;
  1806. break;
  1807. default:
  1808. dev_err(dev, "%d Not a pin token\n", tkn_elem->token);
  1809. return -EINVAL;
  1810. }
  1811. return 0;
  1812. }
  1813. /*
  1814. * Parse for pin config specific tokens to fill up the
  1815. * module private data
  1816. */
  1817. static int skl_tplg_fill_pins_info(struct device *dev,
  1818. struct skl_module_cfg *mconfig,
  1819. struct snd_soc_tplg_vendor_value_elem *tkn_elem,
  1820. int dir, int pin_count)
  1821. {
  1822. int ret;
  1823. struct skl_module_pin *m_pin;
  1824. switch (dir) {
  1825. case SKL_DIR_IN:
  1826. m_pin = mconfig->m_in_pin;
  1827. break;
  1828. case SKL_DIR_OUT:
  1829. m_pin = mconfig->m_out_pin;
  1830. break;
  1831. default:
  1832. dev_err(dev, "Invalid direction value\n");
  1833. return -EINVAL;
  1834. }
  1835. ret = skl_tplg_fill_pin(dev, tkn_elem, m_pin, pin_count);
  1836. if (ret < 0)
  1837. return ret;
  1838. m_pin[pin_count].in_use = false;
  1839. m_pin[pin_count].pin_state = SKL_PIN_UNBIND;
  1840. return 0;
  1841. }
  1842. /*
  1843. * Fill up input/output module config format based
  1844. * on the direction
  1845. */
  1846. static int skl_tplg_fill_fmt(struct device *dev,
  1847. struct skl_module_fmt *dst_fmt,
  1848. u32 tkn, u32 value)
  1849. {
  1850. switch (tkn) {
  1851. case SKL_TKN_U32_FMT_CH:
  1852. dst_fmt->channels = value;
  1853. break;
  1854. case SKL_TKN_U32_FMT_FREQ:
  1855. dst_fmt->s_freq = value;
  1856. break;
  1857. case SKL_TKN_U32_FMT_BIT_DEPTH:
  1858. dst_fmt->bit_depth = value;
  1859. break;
  1860. case SKL_TKN_U32_FMT_SAMPLE_SIZE:
  1861. dst_fmt->valid_bit_depth = value;
  1862. break;
  1863. case SKL_TKN_U32_FMT_CH_CONFIG:
  1864. dst_fmt->ch_cfg = value;
  1865. break;
  1866. case SKL_TKN_U32_FMT_INTERLEAVE:
  1867. dst_fmt->interleaving_style = value;
  1868. break;
  1869. case SKL_TKN_U32_FMT_SAMPLE_TYPE:
  1870. dst_fmt->sample_type = value;
  1871. break;
  1872. case SKL_TKN_U32_FMT_CH_MAP:
  1873. dst_fmt->ch_map = value;
  1874. break;
  1875. default:
  1876. dev_err(dev, "Invalid token %d\n", tkn);
  1877. return -EINVAL;
  1878. }
  1879. return 0;
  1880. }
  1881. static int skl_tplg_widget_fill_fmt(struct device *dev,
  1882. struct skl_module_iface *fmt,
  1883. u32 tkn, u32 val, u32 dir, int fmt_idx)
  1884. {
  1885. struct skl_module_fmt *dst_fmt;
  1886. if (!fmt)
  1887. return -EINVAL;
  1888. switch (dir) {
  1889. case SKL_DIR_IN:
  1890. dst_fmt = &fmt->inputs[fmt_idx].fmt;
  1891. break;
  1892. case SKL_DIR_OUT:
  1893. dst_fmt = &fmt->outputs[fmt_idx].fmt;
  1894. break;
  1895. default:
  1896. dev_err(dev, "Invalid direction: %d\n", dir);
  1897. return -EINVAL;
  1898. }
  1899. return skl_tplg_fill_fmt(dev, dst_fmt, tkn, val);
  1900. }
  1901. static void skl_tplg_fill_pin_dynamic_val(
  1902. struct skl_module_pin *mpin, u32 pin_count, u32 value)
  1903. {
  1904. int i;
  1905. for (i = 0; i < pin_count; i++)
  1906. mpin[i].is_dynamic = value;
  1907. }
  1908. /*
  1909. * Resource table in the manifest has pin specific resources
  1910. * like pin and pin buffer size
  1911. */
  1912. static int skl_tplg_manifest_pin_res_tkn(struct device *dev,
  1913. struct snd_soc_tplg_vendor_value_elem *tkn_elem,
  1914. struct skl_module_res *res, int pin_idx, int dir)
  1915. {
  1916. struct skl_module_pin_resources *m_pin;
  1917. switch (dir) {
  1918. case SKL_DIR_IN:
  1919. m_pin = &res->input[pin_idx];
  1920. break;
  1921. case SKL_DIR_OUT:
  1922. m_pin = &res->output[pin_idx];
  1923. break;
  1924. default:
  1925. dev_err(dev, "Invalid pin direction: %d\n", dir);
  1926. return -EINVAL;
  1927. }
  1928. switch (tkn_elem->token) {
  1929. case SKL_TKN_MM_U32_RES_PIN_ID:
  1930. m_pin->pin_index = tkn_elem->value;
  1931. break;
  1932. case SKL_TKN_MM_U32_PIN_BUF:
  1933. m_pin->buf_size = tkn_elem->value;
  1934. break;
  1935. default:
  1936. dev_err(dev, "Invalid token: %d\n", tkn_elem->token);
  1937. return -EINVAL;
  1938. }
  1939. return 0;
  1940. }
  1941. /*
  1942. * Fill module specific resources from the manifest's resource
  1943. * table like CPS, DMA size, mem_pages.
  1944. */
  1945. static int skl_tplg_fill_res_tkn(struct device *dev,
  1946. struct snd_soc_tplg_vendor_value_elem *tkn_elem,
  1947. struct skl_module_res *res,
  1948. int pin_idx, int dir)
  1949. {
  1950. int ret, tkn_count = 0;
  1951. if (!res)
  1952. return -EINVAL;
  1953. switch (tkn_elem->token) {
  1954. case SKL_TKN_MM_U32_CPS:
  1955. res->cps = tkn_elem->value;
  1956. break;
  1957. case SKL_TKN_MM_U32_DMA_SIZE:
  1958. res->dma_buffer_size = tkn_elem->value;
  1959. break;
  1960. case SKL_TKN_MM_U32_CPC:
  1961. res->cpc = tkn_elem->value;
  1962. break;
  1963. case SKL_TKN_U32_MEM_PAGES:
  1964. res->is_pages = tkn_elem->value;
  1965. break;
  1966. case SKL_TKN_U32_OBS:
  1967. res->obs = tkn_elem->value;
  1968. break;
  1969. case SKL_TKN_U32_IBS:
  1970. res->ibs = tkn_elem->value;
  1971. break;
  1972. case SKL_TKN_U32_MAX_MCPS:
  1973. res->cps = tkn_elem->value;
  1974. break;
  1975. case SKL_TKN_MM_U32_RES_PIN_ID:
  1976. case SKL_TKN_MM_U32_PIN_BUF:
  1977. ret = skl_tplg_manifest_pin_res_tkn(dev, tkn_elem, res,
  1978. pin_idx, dir);
  1979. if (ret < 0)
  1980. return ret;
  1981. break;
  1982. default:
  1983. dev_err(dev, "Not a res type token: %d", tkn_elem->token);
  1984. return -EINVAL;
  1985. }
  1986. tkn_count++;
  1987. return tkn_count;
  1988. }
  1989. /*
  1990. * Parse tokens to fill up the module private data
  1991. */
  1992. static int skl_tplg_get_token(struct device *dev,
  1993. struct snd_soc_tplg_vendor_value_elem *tkn_elem,
  1994. struct skl *skl, struct skl_module_cfg *mconfig)
  1995. {
  1996. int tkn_count = 0;
  1997. int ret;
  1998. static int is_pipe_exists;
  1999. static int pin_index, dir, conf_idx;
  2000. struct skl_module_iface *iface = NULL;
  2001. struct skl_module_res *res = NULL;
  2002. int res_idx = mconfig->res_idx;
  2003. int fmt_idx = mconfig->fmt_idx;
  2004. /*
  2005. * If the manifest structure contains no modules, fill all
  2006. * the module data to 0th index.
  2007. * res_idx and fmt_idx are default set to 0.
  2008. */
  2009. if (skl->nr_modules == 0) {
  2010. res = &mconfig->module->resources[res_idx];
  2011. iface = &mconfig->module->formats[fmt_idx];
  2012. }
  2013. if (tkn_elem->token > SKL_TKN_MAX)
  2014. return -EINVAL;
  2015. switch (tkn_elem->token) {
  2016. case SKL_TKN_U8_IN_QUEUE_COUNT:
  2017. mconfig->module->max_input_pins = tkn_elem->value;
  2018. break;
  2019. case SKL_TKN_U8_OUT_QUEUE_COUNT:
  2020. mconfig->module->max_output_pins = tkn_elem->value;
  2021. break;
  2022. case SKL_TKN_U8_DYN_IN_PIN:
  2023. if (!mconfig->m_in_pin)
  2024. mconfig->m_in_pin =
  2025. devm_kcalloc(dev, MAX_IN_QUEUE,
  2026. sizeof(*mconfig->m_in_pin),
  2027. GFP_KERNEL);
  2028. if (!mconfig->m_in_pin)
  2029. return -ENOMEM;
  2030. skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin, MAX_IN_QUEUE,
  2031. tkn_elem->value);
  2032. break;
  2033. case SKL_TKN_U8_DYN_OUT_PIN:
  2034. if (!mconfig->m_out_pin)
  2035. mconfig->m_out_pin =
  2036. devm_kcalloc(dev, MAX_IN_QUEUE,
  2037. sizeof(*mconfig->m_in_pin),
  2038. GFP_KERNEL);
  2039. if (!mconfig->m_out_pin)
  2040. return -ENOMEM;
  2041. skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin, MAX_OUT_QUEUE,
  2042. tkn_elem->value);
  2043. break;
  2044. case SKL_TKN_U8_TIME_SLOT:
  2045. mconfig->time_slot = tkn_elem->value;
  2046. break;
  2047. case SKL_TKN_U8_CORE_ID:
  2048. mconfig->core_id = tkn_elem->value;
  2049. case SKL_TKN_U8_MOD_TYPE:
  2050. mconfig->m_type = tkn_elem->value;
  2051. break;
  2052. case SKL_TKN_U8_DEV_TYPE:
  2053. mconfig->dev_type = tkn_elem->value;
  2054. break;
  2055. case SKL_TKN_U8_HW_CONN_TYPE:
  2056. mconfig->hw_conn_type = tkn_elem->value;
  2057. break;
  2058. case SKL_TKN_U16_MOD_INST_ID:
  2059. mconfig->id.instance_id =
  2060. tkn_elem->value;
  2061. break;
  2062. case SKL_TKN_U32_MEM_PAGES:
  2063. case SKL_TKN_U32_MAX_MCPS:
  2064. case SKL_TKN_U32_OBS:
  2065. case SKL_TKN_U32_IBS:
  2066. ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_index, dir);
  2067. if (ret < 0)
  2068. return ret;
  2069. break;
  2070. case SKL_TKN_U32_VBUS_ID:
  2071. mconfig->vbus_id = tkn_elem->value;
  2072. break;
  2073. case SKL_TKN_U32_PARAMS_FIXUP:
  2074. mconfig->params_fixup = tkn_elem->value;
  2075. break;
  2076. case SKL_TKN_U32_CONVERTER:
  2077. mconfig->converter = tkn_elem->value;
  2078. break;
  2079. case SKL_TKN_U32_D0I3_CAPS:
  2080. mconfig->d0i3_caps = tkn_elem->value;
  2081. break;
  2082. case SKL_TKN_U32_PIPE_ID:
  2083. ret = skl_tplg_add_pipe(dev,
  2084. mconfig, skl, tkn_elem);
  2085. if (ret < 0) {
  2086. if (ret == -EEXIST) {
  2087. is_pipe_exists = 1;
  2088. break;
  2089. }
  2090. return is_pipe_exists;
  2091. }
  2092. break;
  2093. case SKL_TKN_U32_PIPE_CONFIG_ID:
  2094. conf_idx = tkn_elem->value;
  2095. break;
  2096. case SKL_TKN_U32_PIPE_CONN_TYPE:
  2097. case SKL_TKN_U32_PIPE_PRIORITY:
  2098. case SKL_TKN_U32_PIPE_MEM_PGS:
  2099. case SKL_TKN_U32_PMODE:
  2100. case SKL_TKN_U32_PIPE_DIRECTION:
  2101. case SKL_TKN_U32_NUM_CONFIGS:
  2102. if (is_pipe_exists) {
  2103. ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe,
  2104. tkn_elem->token, tkn_elem->value);
  2105. if (ret < 0)
  2106. return ret;
  2107. }
  2108. break;
  2109. case SKL_TKN_U32_PATH_MEM_PGS:
  2110. case SKL_TKN_U32_CFG_FREQ:
  2111. case SKL_TKN_U8_CFG_CHAN:
  2112. case SKL_TKN_U8_CFG_BPS:
  2113. if (mconfig->pipe->nr_cfgs) {
  2114. ret = skl_tplg_fill_pipe_cfg(dev, mconfig->pipe,
  2115. tkn_elem->token, tkn_elem->value,
  2116. conf_idx, dir);
  2117. if (ret < 0)
  2118. return ret;
  2119. }
  2120. break;
  2121. case SKL_TKN_CFG_MOD_RES_ID:
  2122. mconfig->mod_cfg[conf_idx].res_idx = tkn_elem->value;
  2123. break;
  2124. case SKL_TKN_CFG_MOD_FMT_ID:
  2125. mconfig->mod_cfg[conf_idx].fmt_idx = tkn_elem->value;
  2126. break;
  2127. /*
  2128. * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both
  2129. * direction and the pin count. The first four bits represent
  2130. * direction and next four the pin count.
  2131. */
  2132. case SKL_TKN_U32_DIR_PIN_COUNT:
  2133. dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
  2134. pin_index = (tkn_elem->value &
  2135. SKL_PIN_COUNT_MASK) >> 4;
  2136. break;
  2137. case SKL_TKN_U32_FMT_CH:
  2138. case SKL_TKN_U32_FMT_FREQ:
  2139. case SKL_TKN_U32_FMT_BIT_DEPTH:
  2140. case SKL_TKN_U32_FMT_SAMPLE_SIZE:
  2141. case SKL_TKN_U32_FMT_CH_CONFIG:
  2142. case SKL_TKN_U32_FMT_INTERLEAVE:
  2143. case SKL_TKN_U32_FMT_SAMPLE_TYPE:
  2144. case SKL_TKN_U32_FMT_CH_MAP:
  2145. ret = skl_tplg_widget_fill_fmt(dev, iface, tkn_elem->token,
  2146. tkn_elem->value, dir, pin_index);
  2147. if (ret < 0)
  2148. return ret;
  2149. break;
  2150. case SKL_TKN_U32_PIN_MOD_ID:
  2151. case SKL_TKN_U32_PIN_INST_ID:
  2152. case SKL_TKN_UUID:
  2153. ret = skl_tplg_fill_pins_info(dev,
  2154. mconfig, tkn_elem, dir,
  2155. pin_index);
  2156. if (ret < 0)
  2157. return ret;
  2158. break;
  2159. case SKL_TKN_U32_CAPS_SIZE:
  2160. mconfig->formats_config.caps_size =
  2161. tkn_elem->value;
  2162. break;
  2163. case SKL_TKN_U32_CAPS_SET_PARAMS:
  2164. mconfig->formats_config.set_params =
  2165. tkn_elem->value;
  2166. break;
  2167. case SKL_TKN_U32_CAPS_PARAMS_ID:
  2168. mconfig->formats_config.param_id =
  2169. tkn_elem->value;
  2170. break;
  2171. case SKL_TKN_U32_PROC_DOMAIN:
  2172. mconfig->domain =
  2173. tkn_elem->value;
  2174. break;
  2175. case SKL_TKN_U32_DMA_BUF_SIZE:
  2176. mconfig->dma_buffer_size = tkn_elem->value;
  2177. break;
  2178. case SKL_TKN_U8_IN_PIN_TYPE:
  2179. case SKL_TKN_U8_OUT_PIN_TYPE:
  2180. case SKL_TKN_U8_CONN_TYPE:
  2181. break;
  2182. default:
  2183. dev_err(dev, "Token %d not handled\n",
  2184. tkn_elem->token);
  2185. return -EINVAL;
  2186. }
  2187. tkn_count++;
  2188. return tkn_count;
  2189. }
  2190. /*
  2191. * Parse the vendor array for specific tokens to construct
  2192. * module private data
  2193. */
  2194. static int skl_tplg_get_tokens(struct device *dev,
  2195. char *pvt_data, struct skl *skl,
  2196. struct skl_module_cfg *mconfig, int block_size)
  2197. {
  2198. struct snd_soc_tplg_vendor_array *array;
  2199. struct snd_soc_tplg_vendor_value_elem *tkn_elem;
  2200. int tkn_count = 0, ret;
  2201. int off = 0, tuple_size = 0;
  2202. bool is_module_guid = true;
  2203. if (block_size <= 0)
  2204. return -EINVAL;
  2205. while (tuple_size < block_size) {
  2206. array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
  2207. off += array->size;
  2208. switch (array->type) {
  2209. case SND_SOC_TPLG_TUPLE_TYPE_STRING:
  2210. dev_warn(dev, "no string tokens expected for skl tplg\n");
  2211. continue;
  2212. case SND_SOC_TPLG_TUPLE_TYPE_UUID:
  2213. if (is_module_guid) {
  2214. ret = skl_tplg_get_uuid(dev, mconfig->guid,
  2215. array->uuid);
  2216. is_module_guid = false;
  2217. } else {
  2218. ret = skl_tplg_get_token(dev, array->value, skl,
  2219. mconfig);
  2220. }
  2221. if (ret < 0)
  2222. return ret;
  2223. tuple_size += sizeof(*array->uuid);
  2224. continue;
  2225. default:
  2226. tkn_elem = array->value;
  2227. tkn_count = 0;
  2228. break;
  2229. }
  2230. while (tkn_count <= (array->num_elems - 1)) {
  2231. ret = skl_tplg_get_token(dev, tkn_elem,
  2232. skl, mconfig);
  2233. if (ret < 0)
  2234. return ret;
  2235. tkn_count = tkn_count + ret;
  2236. tkn_elem++;
  2237. }
  2238. tuple_size += tkn_count * sizeof(*tkn_elem);
  2239. }
  2240. return off;
  2241. }
  2242. /*
  2243. * Every data block is preceded by a descriptor to read the number
  2244. * of data blocks, they type of the block and it's size
  2245. */
  2246. static int skl_tplg_get_desc_blocks(struct device *dev,
  2247. struct snd_soc_tplg_vendor_array *array)
  2248. {
  2249. struct snd_soc_tplg_vendor_value_elem *tkn_elem;
  2250. tkn_elem = array->value;
  2251. switch (tkn_elem->token) {
  2252. case SKL_TKN_U8_NUM_BLOCKS:
  2253. case SKL_TKN_U8_BLOCK_TYPE:
  2254. case SKL_TKN_U16_BLOCK_SIZE:
  2255. return tkn_elem->value;
  2256. default:
  2257. dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token);
  2258. break;
  2259. }
  2260. return -EINVAL;
  2261. }
  2262. /* Functions to parse private data from configuration file format v4 */
  2263. /*
  2264. * Add pipeline from topology binary into driver pipeline list
  2265. *
  2266. * If already added we return that instance
  2267. * Otherwise we create a new instance and add into driver list
  2268. */
  2269. static int skl_tplg_add_pipe_v4(struct device *dev,
  2270. struct skl_module_cfg *mconfig, struct skl *skl,
  2271. struct skl_dfw_v4_pipe *dfw_pipe)
  2272. {
  2273. struct skl_pipeline *ppl;
  2274. struct skl_pipe *pipe;
  2275. struct skl_pipe_params *params;
  2276. list_for_each_entry(ppl, &skl->ppl_list, node) {
  2277. if (ppl->pipe->ppl_id == dfw_pipe->pipe_id) {
  2278. mconfig->pipe = ppl->pipe;
  2279. return 0;
  2280. }
  2281. }
  2282. ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
  2283. if (!ppl)
  2284. return -ENOMEM;
  2285. pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
  2286. if (!pipe)
  2287. return -ENOMEM;
  2288. params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
  2289. if (!params)
  2290. return -ENOMEM;
  2291. pipe->ppl_id = dfw_pipe->pipe_id;
  2292. pipe->memory_pages = dfw_pipe->memory_pages;
  2293. pipe->pipe_priority = dfw_pipe->pipe_priority;
  2294. pipe->conn_type = dfw_pipe->conn_type;
  2295. pipe->state = SKL_PIPE_INVALID;
  2296. pipe->p_params = params;
  2297. INIT_LIST_HEAD(&pipe->w_list);
  2298. ppl->pipe = pipe;
  2299. list_add(&ppl->node, &skl->ppl_list);
  2300. mconfig->pipe = pipe;
  2301. return 0;
  2302. }
  2303. static void skl_fill_module_pin_info_v4(struct skl_dfw_v4_module_pin *dfw_pin,
  2304. struct skl_module_pin *m_pin,
  2305. bool is_dynamic, int max_pin)
  2306. {
  2307. int i;
  2308. for (i = 0; i < max_pin; i++) {
  2309. m_pin[i].id.module_id = dfw_pin[i].module_id;
  2310. m_pin[i].id.instance_id = dfw_pin[i].instance_id;
  2311. m_pin[i].in_use = false;
  2312. m_pin[i].is_dynamic = is_dynamic;
  2313. m_pin[i].pin_state = SKL_PIN_UNBIND;
  2314. }
  2315. }
  2316. static void skl_tplg_fill_fmt_v4(struct skl_module_pin_fmt *dst_fmt,
  2317. struct skl_dfw_v4_module_fmt *src_fmt,
  2318. int pins)
  2319. {
  2320. int i;
  2321. for (i = 0; i < pins; i++) {
  2322. dst_fmt[i].fmt.channels = src_fmt[i].channels;
  2323. dst_fmt[i].fmt.s_freq = src_fmt[i].freq;
  2324. dst_fmt[i].fmt.bit_depth = src_fmt[i].bit_depth;
  2325. dst_fmt[i].fmt.valid_bit_depth = src_fmt[i].valid_bit_depth;
  2326. dst_fmt[i].fmt.ch_cfg = src_fmt[i].ch_cfg;
  2327. dst_fmt[i].fmt.ch_map = src_fmt[i].ch_map;
  2328. dst_fmt[i].fmt.interleaving_style =
  2329. src_fmt[i].interleaving_style;
  2330. dst_fmt[i].fmt.sample_type = src_fmt[i].sample_type;
  2331. }
  2332. }
  2333. static int skl_tplg_get_pvt_data_v4(struct snd_soc_tplg_dapm_widget *tplg_w,
  2334. struct skl *skl, struct device *dev,
  2335. struct skl_module_cfg *mconfig)
  2336. {
  2337. struct skl_dfw_v4_module *dfw =
  2338. (struct skl_dfw_v4_module *)tplg_w->priv.data;
  2339. int ret;
  2340. dev_dbg(dev, "Parsing Skylake v4 widget topology data\n");
  2341. ret = guid_parse(dfw->uuid, (guid_t *)mconfig->guid);
  2342. if (ret)
  2343. return ret;
  2344. mconfig->id.module_id = -1;
  2345. mconfig->id.instance_id = dfw->instance_id;
  2346. mconfig->module->resources[0].cps = dfw->max_mcps;
  2347. mconfig->module->resources[0].ibs = dfw->ibs;
  2348. mconfig->module->resources[0].obs = dfw->obs;
  2349. mconfig->core_id = dfw->core_id;
  2350. mconfig->module->max_input_pins = dfw->max_in_queue;
  2351. mconfig->module->max_output_pins = dfw->max_out_queue;
  2352. mconfig->module->loadable = dfw->is_loadable;
  2353. skl_tplg_fill_fmt_v4(mconfig->module->formats[0].inputs, dfw->in_fmt,
  2354. MAX_IN_QUEUE);
  2355. skl_tplg_fill_fmt_v4(mconfig->module->formats[0].outputs, dfw->out_fmt,
  2356. MAX_OUT_QUEUE);
  2357. mconfig->params_fixup = dfw->params_fixup;
  2358. mconfig->converter = dfw->converter;
  2359. mconfig->m_type = dfw->module_type;
  2360. mconfig->vbus_id = dfw->vbus_id;
  2361. mconfig->module->resources[0].is_pages = dfw->mem_pages;
  2362. ret = skl_tplg_add_pipe_v4(dev, mconfig, skl, &dfw->pipe);
  2363. if (ret)
  2364. return ret;
  2365. mconfig->dev_type = dfw->dev_type;
  2366. mconfig->hw_conn_type = dfw->hw_conn_type;
  2367. mconfig->time_slot = dfw->time_slot;
  2368. mconfig->formats_config.caps_size = dfw->caps.caps_size;
  2369. mconfig->m_in_pin = devm_kcalloc(dev,
  2370. MAX_IN_QUEUE, sizeof(*mconfig->m_in_pin),
  2371. GFP_KERNEL);
  2372. if (!mconfig->m_in_pin)
  2373. return -ENOMEM;
  2374. mconfig->m_out_pin = devm_kcalloc(dev,
  2375. MAX_OUT_QUEUE, sizeof(*mconfig->m_out_pin),
  2376. GFP_KERNEL);
  2377. if (!mconfig->m_out_pin)
  2378. return -ENOMEM;
  2379. skl_fill_module_pin_info_v4(dfw->in_pin, mconfig->m_in_pin,
  2380. dfw->is_dynamic_in_pin,
  2381. mconfig->module->max_input_pins);
  2382. skl_fill_module_pin_info_v4(dfw->out_pin, mconfig->m_out_pin,
  2383. dfw->is_dynamic_out_pin,
  2384. mconfig->module->max_output_pins);
  2385. if (mconfig->formats_config.caps_size) {
  2386. mconfig->formats_config.set_params = dfw->caps.set_params;
  2387. mconfig->formats_config.param_id = dfw->caps.param_id;
  2388. mconfig->formats_config.caps =
  2389. devm_kzalloc(dev, mconfig->formats_config.caps_size,
  2390. GFP_KERNEL);
  2391. if (!mconfig->formats_config.caps)
  2392. return -ENOMEM;
  2393. memcpy(mconfig->formats_config.caps, dfw->caps.caps,
  2394. dfw->caps.caps_size);
  2395. }
  2396. return 0;
  2397. }
  2398. /*
  2399. * Parse the private data for the token and corresponding value.
  2400. * The private data can have multiple data blocks. So, a data block
  2401. * is preceded by a descriptor for number of blocks and a descriptor
  2402. * for the type and size of the suceeding data block.
  2403. */
  2404. static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w,
  2405. struct skl *skl, struct device *dev,
  2406. struct skl_module_cfg *mconfig)
  2407. {
  2408. struct snd_soc_tplg_vendor_array *array;
  2409. int num_blocks, block_size = 0, block_type, off = 0;
  2410. char *data;
  2411. int ret;
  2412. /*
  2413. * v4 configuration files have a valid UUID at the start of
  2414. * the widget's private data.
  2415. */
  2416. if (uuid_is_valid((char *)tplg_w->priv.data))
  2417. return skl_tplg_get_pvt_data_v4(tplg_w, skl, dev, mconfig);
  2418. /* Read the NUM_DATA_BLOCKS descriptor */
  2419. array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data;
  2420. ret = skl_tplg_get_desc_blocks(dev, array);
  2421. if (ret < 0)
  2422. return ret;
  2423. num_blocks = ret;
  2424. off += array->size;
  2425. /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
  2426. while (num_blocks > 0) {
  2427. array = (struct snd_soc_tplg_vendor_array *)
  2428. (tplg_w->priv.data + off);
  2429. ret = skl_tplg_get_desc_blocks(dev, array);
  2430. if (ret < 0)
  2431. return ret;
  2432. block_type = ret;
  2433. off += array->size;
  2434. array = (struct snd_soc_tplg_vendor_array *)
  2435. (tplg_w->priv.data + off);
  2436. ret = skl_tplg_get_desc_blocks(dev, array);
  2437. if (ret < 0)
  2438. return ret;
  2439. block_size = ret;
  2440. off += array->size;
  2441. array = (struct snd_soc_tplg_vendor_array *)
  2442. (tplg_w->priv.data + off);
  2443. data = (tplg_w->priv.data + off);
  2444. if (block_type == SKL_TYPE_TUPLE) {
  2445. ret = skl_tplg_get_tokens(dev, data,
  2446. skl, mconfig, block_size);
  2447. if (ret < 0)
  2448. return ret;
  2449. --num_blocks;
  2450. } else {
  2451. if (mconfig->formats_config.caps_size > 0)
  2452. memcpy(mconfig->formats_config.caps, data,
  2453. mconfig->formats_config.caps_size);
  2454. --num_blocks;
  2455. ret = mconfig->formats_config.caps_size;
  2456. }
  2457. off += ret;
  2458. }
  2459. return 0;
  2460. }
  2461. static void skl_clear_pin_config(struct snd_soc_component *component,
  2462. struct snd_soc_dapm_widget *w)
  2463. {
  2464. int i;
  2465. struct skl_module_cfg *mconfig;
  2466. struct skl_pipe *pipe;
  2467. if (!strncmp(w->dapm->component->name, component->name,
  2468. strlen(component->name))) {
  2469. mconfig = w->priv;
  2470. pipe = mconfig->pipe;
  2471. for (i = 0; i < mconfig->module->max_input_pins; i++) {
  2472. mconfig->m_in_pin[i].in_use = false;
  2473. mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND;
  2474. }
  2475. for (i = 0; i < mconfig->module->max_output_pins; i++) {
  2476. mconfig->m_out_pin[i].in_use = false;
  2477. mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND;
  2478. }
  2479. pipe->state = SKL_PIPE_INVALID;
  2480. mconfig->m_state = SKL_MODULE_UNINIT;
  2481. }
  2482. }
  2483. void skl_cleanup_resources(struct skl *skl)
  2484. {
  2485. struct skl_sst *ctx = skl->skl_sst;
  2486. struct snd_soc_component *soc_component = skl->component;
  2487. struct snd_soc_dapm_widget *w;
  2488. struct snd_soc_card *card;
  2489. if (soc_component == NULL)
  2490. return;
  2491. card = soc_component->card;
  2492. if (!card || !card->instantiated)
  2493. return;
  2494. skl->resource.mem = 0;
  2495. skl->resource.mcps = 0;
  2496. list_for_each_entry(w, &card->widgets, list) {
  2497. if (is_skl_dsp_widget_type(w, ctx->dev) && w->priv != NULL)
  2498. skl_clear_pin_config(soc_component, w);
  2499. }
  2500. skl_clear_module_cnt(ctx->dsp);
  2501. }
  2502. /*
  2503. * Topology core widget load callback
  2504. *
  2505. * This is used to save the private data for each widget which gives
  2506. * information to the driver about module and pipeline parameters which DSP
  2507. * FW expects like ids, resource values, formats etc
  2508. */
  2509. static int skl_tplg_widget_load(struct snd_soc_component *cmpnt, int index,
  2510. struct snd_soc_dapm_widget *w,
  2511. struct snd_soc_tplg_dapm_widget *tplg_w)
  2512. {
  2513. int ret;
  2514. struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt);
  2515. struct skl *skl = bus_to_skl(bus);
  2516. struct skl_module_cfg *mconfig;
  2517. if (!tplg_w->priv.size)
  2518. goto bind_event;
  2519. mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL);
  2520. if (!mconfig)
  2521. return -ENOMEM;
  2522. if (skl->nr_modules == 0) {
  2523. mconfig->module = devm_kzalloc(bus->dev,
  2524. sizeof(*mconfig->module), GFP_KERNEL);
  2525. if (!mconfig->module)
  2526. return -ENOMEM;
  2527. }
  2528. w->priv = mconfig;
  2529. /*
  2530. * module binary can be loaded later, so set it to query when
  2531. * module is load for a use case
  2532. */
  2533. mconfig->id.module_id = -1;
  2534. /* Parse private data for tuples */
  2535. ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig);
  2536. if (ret < 0)
  2537. return ret;
  2538. skl_debug_init_module(skl->debugfs, w, mconfig);
  2539. bind_event:
  2540. if (tplg_w->event_type == 0) {
  2541. dev_dbg(bus->dev, "ASoC: No event handler required\n");
  2542. return 0;
  2543. }
  2544. ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops,
  2545. ARRAY_SIZE(skl_tplg_widget_ops),
  2546. tplg_w->event_type);
  2547. if (ret) {
  2548. dev_err(bus->dev, "%s: No matching event handlers found for %d\n",
  2549. __func__, tplg_w->event_type);
  2550. return -EINVAL;
  2551. }
  2552. return 0;
  2553. }
  2554. static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be,
  2555. struct snd_soc_tplg_bytes_control *bc)
  2556. {
  2557. struct skl_algo_data *ac;
  2558. struct skl_dfw_algo_data *dfw_ac =
  2559. (struct skl_dfw_algo_data *)bc->priv.data;
  2560. ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL);
  2561. if (!ac)
  2562. return -ENOMEM;
  2563. /* Fill private data */
  2564. ac->max = dfw_ac->max;
  2565. ac->param_id = dfw_ac->param_id;
  2566. ac->set_params = dfw_ac->set_params;
  2567. ac->size = dfw_ac->max;
  2568. if (ac->max) {
  2569. ac->params = (char *) devm_kzalloc(dev, ac->max, GFP_KERNEL);
  2570. if (!ac->params)
  2571. return -ENOMEM;
  2572. memcpy(ac->params, dfw_ac->params, ac->max);
  2573. }
  2574. be->dobj.private = ac;
  2575. return 0;
  2576. }
  2577. static int skl_init_enum_data(struct device *dev, struct soc_enum *se,
  2578. struct snd_soc_tplg_enum_control *ec)
  2579. {
  2580. void *data;
  2581. if (ec->priv.size) {
  2582. data = devm_kzalloc(dev, sizeof(ec->priv.size), GFP_KERNEL);
  2583. if (!data)
  2584. return -ENOMEM;
  2585. memcpy(data, ec->priv.data, ec->priv.size);
  2586. se->dobj.private = data;
  2587. }
  2588. return 0;
  2589. }
  2590. static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
  2591. int index,
  2592. struct snd_kcontrol_new *kctl,
  2593. struct snd_soc_tplg_ctl_hdr *hdr)
  2594. {
  2595. struct soc_bytes_ext *sb;
  2596. struct snd_soc_tplg_bytes_control *tplg_bc;
  2597. struct snd_soc_tplg_enum_control *tplg_ec;
  2598. struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt);
  2599. struct soc_enum *se;
  2600. switch (hdr->ops.info) {
  2601. case SND_SOC_TPLG_CTL_BYTES:
  2602. tplg_bc = container_of(hdr,
  2603. struct snd_soc_tplg_bytes_control, hdr);
  2604. if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
  2605. sb = (struct soc_bytes_ext *)kctl->private_value;
  2606. if (tplg_bc->priv.size)
  2607. return skl_init_algo_data(
  2608. bus->dev, sb, tplg_bc);
  2609. }
  2610. break;
  2611. case SND_SOC_TPLG_CTL_ENUM:
  2612. tplg_ec = container_of(hdr,
  2613. struct snd_soc_tplg_enum_control, hdr);
  2614. if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READWRITE) {
  2615. se = (struct soc_enum *)kctl->private_value;
  2616. if (tplg_ec->priv.size)
  2617. return skl_init_enum_data(bus->dev, se,
  2618. tplg_ec);
  2619. }
  2620. break;
  2621. default:
  2622. dev_dbg(bus->dev, "Control load not supported %d:%d:%d\n",
  2623. hdr->ops.get, hdr->ops.put, hdr->ops.info);
  2624. break;
  2625. }
  2626. return 0;
  2627. }
  2628. static int skl_tplg_fill_str_mfest_tkn(struct device *dev,
  2629. struct snd_soc_tplg_vendor_string_elem *str_elem,
  2630. struct skl *skl)
  2631. {
  2632. int tkn_count = 0;
  2633. static int ref_count;
  2634. switch (str_elem->token) {
  2635. case SKL_TKN_STR_LIB_NAME:
  2636. if (ref_count > skl->skl_sst->lib_count - 1) {
  2637. ref_count = 0;
  2638. return -EINVAL;
  2639. }
  2640. strncpy(skl->skl_sst->lib_info[ref_count].name,
  2641. str_elem->string,
  2642. ARRAY_SIZE(skl->skl_sst->lib_info[ref_count].name));
  2643. ref_count++;
  2644. break;
  2645. default:
  2646. dev_err(dev, "Not a string token %d\n", str_elem->token);
  2647. break;
  2648. }
  2649. tkn_count++;
  2650. return tkn_count;
  2651. }
  2652. static int skl_tplg_get_str_tkn(struct device *dev,
  2653. struct snd_soc_tplg_vendor_array *array,
  2654. struct skl *skl)
  2655. {
  2656. int tkn_count = 0, ret;
  2657. struct snd_soc_tplg_vendor_string_elem *str_elem;
  2658. str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value;
  2659. while (tkn_count < array->num_elems) {
  2660. ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, skl);
  2661. str_elem++;
  2662. if (ret < 0)
  2663. return ret;
  2664. tkn_count = tkn_count + ret;
  2665. }
  2666. return tkn_count;
  2667. }
  2668. static int skl_tplg_manifest_fill_fmt(struct device *dev,
  2669. struct skl_module_iface *fmt,
  2670. struct snd_soc_tplg_vendor_value_elem *tkn_elem,
  2671. u32 dir, int fmt_idx)
  2672. {
  2673. struct skl_module_pin_fmt *dst_fmt;
  2674. struct skl_module_fmt *mod_fmt;
  2675. int ret;
  2676. if (!fmt)
  2677. return -EINVAL;
  2678. switch (dir) {
  2679. case SKL_DIR_IN:
  2680. dst_fmt = &fmt->inputs[fmt_idx];
  2681. break;
  2682. case SKL_DIR_OUT:
  2683. dst_fmt = &fmt->outputs[fmt_idx];
  2684. break;
  2685. default:
  2686. dev_err(dev, "Invalid direction: %d\n", dir);
  2687. return -EINVAL;
  2688. }
  2689. mod_fmt = &dst_fmt->fmt;
  2690. switch (tkn_elem->token) {
  2691. case SKL_TKN_MM_U32_INTF_PIN_ID:
  2692. dst_fmt->id = tkn_elem->value;
  2693. break;
  2694. default:
  2695. ret = skl_tplg_fill_fmt(dev, mod_fmt, tkn_elem->token,
  2696. tkn_elem->value);
  2697. if (ret < 0)
  2698. return ret;
  2699. break;
  2700. }
  2701. return 0;
  2702. }
  2703. static int skl_tplg_fill_mod_info(struct device *dev,
  2704. struct snd_soc_tplg_vendor_value_elem *tkn_elem,
  2705. struct skl_module *mod)
  2706. {
  2707. if (!mod)
  2708. return -EINVAL;
  2709. switch (tkn_elem->token) {
  2710. case SKL_TKN_U8_IN_PIN_TYPE:
  2711. mod->input_pin_type = tkn_elem->value;
  2712. break;
  2713. case SKL_TKN_U8_OUT_PIN_TYPE:
  2714. mod->output_pin_type = tkn_elem->value;
  2715. break;
  2716. case SKL_TKN_U8_IN_QUEUE_COUNT:
  2717. mod->max_input_pins = tkn_elem->value;
  2718. break;
  2719. case SKL_TKN_U8_OUT_QUEUE_COUNT:
  2720. mod->max_output_pins = tkn_elem->value;
  2721. break;
  2722. case SKL_TKN_MM_U8_NUM_RES:
  2723. mod->nr_resources = tkn_elem->value;
  2724. break;
  2725. case SKL_TKN_MM_U8_NUM_INTF:
  2726. mod->nr_interfaces = tkn_elem->value;
  2727. break;
  2728. default:
  2729. dev_err(dev, "Invalid mod info token %d", tkn_elem->token);
  2730. return -EINVAL;
  2731. }
  2732. return 0;
  2733. }
  2734. static int skl_tplg_get_int_tkn(struct device *dev,
  2735. struct snd_soc_tplg_vendor_value_elem *tkn_elem,
  2736. struct skl *skl)
  2737. {
  2738. int tkn_count = 0, ret, size;
  2739. static int mod_idx, res_val_idx, intf_val_idx, dir, pin_idx;
  2740. struct skl_module_res *res = NULL;
  2741. struct skl_module_iface *fmt = NULL;
  2742. struct skl_module *mod = NULL;
  2743. static struct skl_astate_param *astate_table;
  2744. static int astate_cfg_idx, count;
  2745. int i;
  2746. if (skl->modules) {
  2747. mod = skl->modules[mod_idx];
  2748. res = &mod->resources[res_val_idx];
  2749. fmt = &mod->formats[intf_val_idx];
  2750. }
  2751. switch (tkn_elem->token) {
  2752. case SKL_TKN_U32_LIB_COUNT:
  2753. skl->skl_sst->lib_count = tkn_elem->value;
  2754. break;
  2755. case SKL_TKN_U8_NUM_MOD:
  2756. skl->nr_modules = tkn_elem->value;
  2757. skl->modules = devm_kcalloc(dev, skl->nr_modules,
  2758. sizeof(*skl->modules), GFP_KERNEL);
  2759. if (!skl->modules)
  2760. return -ENOMEM;
  2761. for (i = 0; i < skl->nr_modules; i++) {
  2762. skl->modules[i] = devm_kzalloc(dev,
  2763. sizeof(struct skl_module), GFP_KERNEL);
  2764. if (!skl->modules[i])
  2765. return -ENOMEM;
  2766. }
  2767. break;
  2768. case SKL_TKN_MM_U8_MOD_IDX:
  2769. mod_idx = tkn_elem->value;
  2770. break;
  2771. case SKL_TKN_U32_ASTATE_COUNT:
  2772. if (astate_table != NULL) {
  2773. dev_err(dev, "More than one entry for A-State count");
  2774. return -EINVAL;
  2775. }
  2776. if (tkn_elem->value > SKL_MAX_ASTATE_CFG) {
  2777. dev_err(dev, "Invalid A-State count %d\n",
  2778. tkn_elem->value);
  2779. return -EINVAL;
  2780. }
  2781. size = tkn_elem->value * sizeof(struct skl_astate_param) +
  2782. sizeof(count);
  2783. skl->cfg.astate_cfg = devm_kzalloc(dev, size, GFP_KERNEL);
  2784. if (!skl->cfg.astate_cfg)
  2785. return -ENOMEM;
  2786. astate_table = skl->cfg.astate_cfg->astate_table;
  2787. count = skl->cfg.astate_cfg->count = tkn_elem->value;
  2788. break;
  2789. case SKL_TKN_U32_ASTATE_IDX:
  2790. if (tkn_elem->value >= count) {
  2791. dev_err(dev, "Invalid A-State index %d\n",
  2792. tkn_elem->value);
  2793. return -EINVAL;
  2794. }
  2795. astate_cfg_idx = tkn_elem->value;
  2796. break;
  2797. case SKL_TKN_U32_ASTATE_KCPS:
  2798. astate_table[astate_cfg_idx].kcps = tkn_elem->value;
  2799. break;
  2800. case SKL_TKN_U32_ASTATE_CLK_SRC:
  2801. astate_table[astate_cfg_idx].clk_src = tkn_elem->value;
  2802. break;
  2803. case SKL_TKN_U8_IN_PIN_TYPE:
  2804. case SKL_TKN_U8_OUT_PIN_TYPE:
  2805. case SKL_TKN_U8_IN_QUEUE_COUNT:
  2806. case SKL_TKN_U8_OUT_QUEUE_COUNT:
  2807. case SKL_TKN_MM_U8_NUM_RES:
  2808. case SKL_TKN_MM_U8_NUM_INTF:
  2809. ret = skl_tplg_fill_mod_info(dev, tkn_elem, mod);
  2810. if (ret < 0)
  2811. return ret;
  2812. break;
  2813. case SKL_TKN_U32_DIR_PIN_COUNT:
  2814. dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
  2815. pin_idx = (tkn_elem->value & SKL_PIN_COUNT_MASK) >> 4;
  2816. break;
  2817. case SKL_TKN_MM_U32_RES_ID:
  2818. if (!res)
  2819. return -EINVAL;
  2820. res->id = tkn_elem->value;
  2821. res_val_idx = tkn_elem->value;
  2822. break;
  2823. case SKL_TKN_MM_U32_FMT_ID:
  2824. if (!fmt)
  2825. return -EINVAL;
  2826. fmt->fmt_idx = tkn_elem->value;
  2827. intf_val_idx = tkn_elem->value;
  2828. break;
  2829. case SKL_TKN_MM_U32_CPS:
  2830. case SKL_TKN_MM_U32_DMA_SIZE:
  2831. case SKL_TKN_MM_U32_CPC:
  2832. case SKL_TKN_U32_MEM_PAGES:
  2833. case SKL_TKN_U32_OBS:
  2834. case SKL_TKN_U32_IBS:
  2835. case SKL_TKN_MM_U32_RES_PIN_ID:
  2836. case SKL_TKN_MM_U32_PIN_BUF:
  2837. ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_idx, dir);
  2838. if (ret < 0)
  2839. return ret;
  2840. break;
  2841. case SKL_TKN_MM_U32_NUM_IN_FMT:
  2842. if (!fmt)
  2843. return -EINVAL;
  2844. res->nr_input_pins = tkn_elem->value;
  2845. break;
  2846. case SKL_TKN_MM_U32_NUM_OUT_FMT:
  2847. if (!fmt)
  2848. return -EINVAL;
  2849. res->nr_output_pins = tkn_elem->value;
  2850. break;
  2851. case SKL_TKN_U32_FMT_CH:
  2852. case SKL_TKN_U32_FMT_FREQ:
  2853. case SKL_TKN_U32_FMT_BIT_DEPTH:
  2854. case SKL_TKN_U32_FMT_SAMPLE_SIZE:
  2855. case SKL_TKN_U32_FMT_CH_CONFIG:
  2856. case SKL_TKN_U32_FMT_INTERLEAVE:
  2857. case SKL_TKN_U32_FMT_SAMPLE_TYPE:
  2858. case SKL_TKN_U32_FMT_CH_MAP:
  2859. case SKL_TKN_MM_U32_INTF_PIN_ID:
  2860. ret = skl_tplg_manifest_fill_fmt(dev, fmt, tkn_elem,
  2861. dir, pin_idx);
  2862. if (ret < 0)
  2863. return ret;
  2864. break;
  2865. default:
  2866. dev_err(dev, "Not a manifest token %d\n", tkn_elem->token);
  2867. return -EINVAL;
  2868. }
  2869. tkn_count++;
  2870. return tkn_count;
  2871. }
  2872. static int skl_tplg_get_manifest_uuid(struct device *dev,
  2873. struct skl *skl,
  2874. struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
  2875. {
  2876. static int ref_count;
  2877. struct skl_module *mod;
  2878. if (uuid_tkn->token == SKL_TKN_UUID) {
  2879. mod = skl->modules[ref_count];
  2880. memcpy(&mod->uuid, &uuid_tkn->uuid, sizeof(uuid_tkn->uuid));
  2881. ref_count++;
  2882. } else {
  2883. dev_err(dev, "Not an UUID token tkn %d\n", uuid_tkn->token);
  2884. return -EINVAL;
  2885. }
  2886. return 0;
  2887. }
  2888. /*
  2889. * Fill the manifest structure by parsing the tokens based on the
  2890. * type.
  2891. */
  2892. static int skl_tplg_get_manifest_tkn(struct device *dev,
  2893. char *pvt_data, struct skl *skl,
  2894. int block_size)
  2895. {
  2896. int tkn_count = 0, ret;
  2897. int off = 0, tuple_size = 0;
  2898. struct snd_soc_tplg_vendor_array *array;
  2899. struct snd_soc_tplg_vendor_value_elem *tkn_elem;
  2900. if (block_size <= 0)
  2901. return -EINVAL;
  2902. while (tuple_size < block_size) {
  2903. array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
  2904. off += array->size;
  2905. switch (array->type) {
  2906. case SND_SOC_TPLG_TUPLE_TYPE_STRING:
  2907. ret = skl_tplg_get_str_tkn(dev, array, skl);
  2908. if (ret < 0)
  2909. return ret;
  2910. tkn_count = ret;
  2911. tuple_size += tkn_count *
  2912. sizeof(struct snd_soc_tplg_vendor_string_elem);
  2913. continue;
  2914. case SND_SOC_TPLG_TUPLE_TYPE_UUID:
  2915. ret = skl_tplg_get_manifest_uuid(dev, skl, array->uuid);
  2916. if (ret < 0)
  2917. return ret;
  2918. tuple_size += sizeof(*array->uuid);
  2919. continue;
  2920. default:
  2921. tkn_elem = array->value;
  2922. tkn_count = 0;
  2923. break;
  2924. }
  2925. while (tkn_count <= array->num_elems - 1) {
  2926. ret = skl_tplg_get_int_tkn(dev,
  2927. tkn_elem, skl);
  2928. if (ret < 0)
  2929. return ret;
  2930. tkn_count = tkn_count + ret;
  2931. tkn_elem++;
  2932. }
  2933. tuple_size += (tkn_count * sizeof(*tkn_elem));
  2934. tkn_count = 0;
  2935. }
  2936. return off;
  2937. }
  2938. /*
  2939. * Parse manifest private data for tokens. The private data block is
  2940. * preceded by descriptors for type and size of data block.
  2941. */
  2942. static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest,
  2943. struct device *dev, struct skl *skl)
  2944. {
  2945. struct snd_soc_tplg_vendor_array *array;
  2946. int num_blocks, block_size = 0, block_type, off = 0;
  2947. char *data;
  2948. int ret;
  2949. /* Read the NUM_DATA_BLOCKS descriptor */
  2950. array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data;
  2951. ret = skl_tplg_get_desc_blocks(dev, array);
  2952. if (ret < 0)
  2953. return ret;
  2954. num_blocks = ret;
  2955. off += array->size;
  2956. /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
  2957. while (num_blocks > 0) {
  2958. array = (struct snd_soc_tplg_vendor_array *)
  2959. (manifest->priv.data + off);
  2960. ret = skl_tplg_get_desc_blocks(dev, array);
  2961. if (ret < 0)
  2962. return ret;
  2963. block_type = ret;
  2964. off += array->size;
  2965. array = (struct snd_soc_tplg_vendor_array *)
  2966. (manifest->priv.data + off);
  2967. ret = skl_tplg_get_desc_blocks(dev, array);
  2968. if (ret < 0)
  2969. return ret;
  2970. block_size = ret;
  2971. off += array->size;
  2972. array = (struct snd_soc_tplg_vendor_array *)
  2973. (manifest->priv.data + off);
  2974. data = (manifest->priv.data + off);
  2975. if (block_type == SKL_TYPE_TUPLE) {
  2976. ret = skl_tplg_get_manifest_tkn(dev, data, skl,
  2977. block_size);
  2978. if (ret < 0)
  2979. return ret;
  2980. --num_blocks;
  2981. } else {
  2982. return -EINVAL;
  2983. }
  2984. off += ret;
  2985. }
  2986. return 0;
  2987. }
  2988. static int skl_manifest_load(struct snd_soc_component *cmpnt, int index,
  2989. struct snd_soc_tplg_manifest *manifest)
  2990. {
  2991. struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt);
  2992. struct skl *skl = bus_to_skl(bus);
  2993. /* proceed only if we have private data defined */
  2994. if (manifest->priv.size == 0)
  2995. return 0;
  2996. skl_tplg_get_manifest_data(manifest, bus->dev, skl);
  2997. if (skl->skl_sst->lib_count > SKL_MAX_LIB) {
  2998. dev_err(bus->dev, "Exceeding max Library count. Got:%d\n",
  2999. skl->skl_sst->lib_count);
  3000. return -EINVAL;
  3001. }
  3002. return 0;
  3003. }
  3004. static struct snd_soc_tplg_ops skl_tplg_ops = {
  3005. .widget_load = skl_tplg_widget_load,
  3006. .control_load = skl_tplg_control_load,
  3007. .bytes_ext_ops = skl_tlv_ops,
  3008. .bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops),
  3009. .io_ops = skl_tplg_kcontrol_ops,
  3010. .io_ops_count = ARRAY_SIZE(skl_tplg_kcontrol_ops),
  3011. .manifest = skl_manifest_load,
  3012. .dai_load = skl_dai_load,
  3013. };
  3014. /*
  3015. * A pipe can have multiple modules, each of them will be a DAPM widget as
  3016. * well. While managing a pipeline we need to get the list of all the
  3017. * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list()
  3018. * helps to get the SKL type widgets in that pipeline
  3019. */
  3020. static int skl_tplg_create_pipe_widget_list(struct snd_soc_component *component)
  3021. {
  3022. struct snd_soc_dapm_widget *w;
  3023. struct skl_module_cfg *mcfg = NULL;
  3024. struct skl_pipe_module *p_module = NULL;
  3025. struct skl_pipe *pipe;
  3026. list_for_each_entry(w, &component->card->widgets, list) {
  3027. if (is_skl_dsp_widget_type(w, component->dev) && w->priv) {
  3028. mcfg = w->priv;
  3029. pipe = mcfg->pipe;
  3030. p_module = devm_kzalloc(component->dev,
  3031. sizeof(*p_module), GFP_KERNEL);
  3032. if (!p_module)
  3033. return -ENOMEM;
  3034. p_module->w = w;
  3035. list_add_tail(&p_module->node, &pipe->w_list);
  3036. }
  3037. }
  3038. return 0;
  3039. }
  3040. static void skl_tplg_set_pipe_type(struct skl *skl, struct skl_pipe *pipe)
  3041. {
  3042. struct skl_pipe_module *w_module;
  3043. struct snd_soc_dapm_widget *w;
  3044. struct skl_module_cfg *mconfig;
  3045. bool host_found = false, link_found = false;
  3046. list_for_each_entry(w_module, &pipe->w_list, node) {
  3047. w = w_module->w;
  3048. mconfig = w->priv;
  3049. if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
  3050. host_found = true;
  3051. else if (mconfig->dev_type != SKL_DEVICE_NONE)
  3052. link_found = true;
  3053. }
  3054. if (host_found && link_found)
  3055. pipe->passthru = true;
  3056. else
  3057. pipe->passthru = false;
  3058. }
  3059. /* This will be read from topology manifest, currently defined here */
  3060. #define SKL_MAX_MCPS 30000000
  3061. #define SKL_FW_MAX_MEM 1000000
  3062. /*
  3063. * SKL topology init routine
  3064. */
  3065. int skl_tplg_init(struct snd_soc_component *component, struct hdac_bus *bus)
  3066. {
  3067. int ret;
  3068. const struct firmware *fw;
  3069. struct skl *skl = bus_to_skl(bus);
  3070. struct skl_pipeline *ppl;
  3071. ret = request_firmware(&fw, skl->tplg_name, bus->dev);
  3072. if (ret < 0) {
  3073. dev_info(bus->dev, "tplg fw %s load failed with %d, falling back to dfw_sst.bin",
  3074. skl->tplg_name, ret);
  3075. ret = request_firmware(&fw, "dfw_sst.bin", bus->dev);
  3076. if (ret < 0) {
  3077. dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n",
  3078. "dfw_sst.bin", ret);
  3079. return ret;
  3080. }
  3081. }
  3082. /*
  3083. * The complete tplg for SKL is loaded as index 0, we don't use
  3084. * any other index
  3085. */
  3086. ret = snd_soc_tplg_component_load(component,
  3087. &skl_tplg_ops, fw, 0);
  3088. if (ret < 0) {
  3089. dev_err(bus->dev, "tplg component load failed%d\n", ret);
  3090. release_firmware(fw);
  3091. return -EINVAL;
  3092. }
  3093. skl->resource.max_mcps = SKL_MAX_MCPS;
  3094. skl->resource.max_mem = SKL_FW_MAX_MEM;
  3095. skl->tplg = fw;
  3096. ret = skl_tplg_create_pipe_widget_list(component);
  3097. if (ret < 0)
  3098. return ret;
  3099. list_for_each_entry(ppl, &skl->ppl_list, node)
  3100. skl_tplg_set_pipe_type(skl, ppl->pipe);
  3101. return 0;
  3102. }