skl-topology.c 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611
  1. /*
  2. * skl-topology.c - Implements Platform component ALSA controls/widget
  3. * handlers.
  4. *
  5. * Copyright (C) 2014-2015 Intel Corp
  6. * Author: Jeeja KP <jeeja.kp@intel.com>
  7. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. */
  18. #include <linux/slab.h>
  19. #include <linux/types.h>
  20. #include <linux/firmware.h>
  21. #include <sound/soc.h>
  22. #include <sound/soc-topology.h>
  23. #include <uapi/sound/snd_sst_tokens.h>
  24. #include "skl-sst-dsp.h"
  25. #include "skl-sst-ipc.h"
  26. #include "skl-topology.h"
  27. #include "skl.h"
  28. #include "skl-tplg-interface.h"
  29. #include "../common/sst-dsp.h"
  30. #include "../common/sst-dsp-priv.h"
  31. #define SKL_CH_FIXUP_MASK (1 << 0)
  32. #define SKL_RATE_FIXUP_MASK (1 << 1)
  33. #define SKL_FMT_FIXUP_MASK (1 << 2)
  34. #define SKL_IN_DIR_BIT_MASK BIT(0)
  35. #define SKL_PIN_COUNT_MASK GENMASK(7, 4)
  36. void skl_tplg_d0i3_get(struct skl *skl, enum d0i3_capability caps)
  37. {
  38. struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3;
  39. switch (caps) {
  40. case SKL_D0I3_NONE:
  41. d0i3->non_d0i3++;
  42. break;
  43. case SKL_D0I3_STREAMING:
  44. d0i3->streaming++;
  45. break;
  46. case SKL_D0I3_NON_STREAMING:
  47. d0i3->non_streaming++;
  48. break;
  49. }
  50. }
  51. void skl_tplg_d0i3_put(struct skl *skl, enum d0i3_capability caps)
  52. {
  53. struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3;
  54. switch (caps) {
  55. case SKL_D0I3_NONE:
  56. d0i3->non_d0i3--;
  57. break;
  58. case SKL_D0I3_STREAMING:
  59. d0i3->streaming--;
  60. break;
  61. case SKL_D0I3_NON_STREAMING:
  62. d0i3->non_streaming--;
  63. break;
  64. }
  65. }
  66. /*
  67. * SKL DSP driver modelling uses only few DAPM widgets so for rest we will
  68. * ignore. This helpers checks if the SKL driver handles this widget type
  69. */
  70. static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w)
  71. {
  72. switch (w->id) {
  73. case snd_soc_dapm_dai_link:
  74. case snd_soc_dapm_dai_in:
  75. case snd_soc_dapm_aif_in:
  76. case snd_soc_dapm_aif_out:
  77. case snd_soc_dapm_dai_out:
  78. case snd_soc_dapm_switch:
  79. return false;
  80. default:
  81. return true;
  82. }
  83. }
  84. /*
  85. * Each pipelines needs memory to be allocated. Check if we have free memory
  86. * from available pool.
  87. */
  88. static bool skl_is_pipe_mem_avail(struct skl *skl,
  89. struct skl_module_cfg *mconfig)
  90. {
  91. struct skl_sst *ctx = skl->skl_sst;
  92. if (skl->resource.mem + mconfig->pipe->memory_pages >
  93. skl->resource.max_mem) {
  94. dev_err(ctx->dev,
  95. "%s: module_id %d instance %d\n", __func__,
  96. mconfig->id.module_id,
  97. mconfig->id.instance_id);
  98. dev_err(ctx->dev,
  99. "exceeds ppl memory available %d mem %d\n",
  100. skl->resource.max_mem, skl->resource.mem);
  101. return false;
  102. } else {
  103. return true;
  104. }
  105. }
  106. /*
  107. * Add the mem to the mem pool. This is freed when pipe is deleted.
  108. * Note: DSP does actual memory management we only keep track for complete
  109. * pool
  110. */
  111. static void skl_tplg_alloc_pipe_mem(struct skl *skl,
  112. struct skl_module_cfg *mconfig)
  113. {
  114. skl->resource.mem += mconfig->pipe->memory_pages;
  115. }
  116. /*
  117. * Pipeline needs needs DSP CPU resources for computation, this is
  118. * quantified in MCPS (Million Clocks Per Second) required for module/pipe
  119. *
  120. * Each pipelines needs mcps to be allocated. Check if we have mcps for this
  121. * pipe.
  122. */
  123. static bool skl_is_pipe_mcps_avail(struct skl *skl,
  124. struct skl_module_cfg *mconfig)
  125. {
  126. struct skl_sst *ctx = skl->skl_sst;
  127. if (skl->resource.mcps + mconfig->mcps > skl->resource.max_mcps) {
  128. dev_err(ctx->dev,
  129. "%s: module_id %d instance %d\n", __func__,
  130. mconfig->id.module_id, mconfig->id.instance_id);
  131. dev_err(ctx->dev,
  132. "exceeds ppl mcps available %d > mem %d\n",
  133. skl->resource.max_mcps, skl->resource.mcps);
  134. return false;
  135. } else {
  136. return true;
  137. }
  138. }
  139. static void skl_tplg_alloc_pipe_mcps(struct skl *skl,
  140. struct skl_module_cfg *mconfig)
  141. {
  142. skl->resource.mcps += mconfig->mcps;
  143. }
  144. /*
  145. * Free the mcps when tearing down
  146. */
  147. static void
  148. skl_tplg_free_pipe_mcps(struct skl *skl, struct skl_module_cfg *mconfig)
  149. {
  150. skl->resource.mcps -= mconfig->mcps;
  151. }
  152. /*
  153. * Free the memory when tearing down
  154. */
  155. static void
  156. skl_tplg_free_pipe_mem(struct skl *skl, struct skl_module_cfg *mconfig)
  157. {
  158. skl->resource.mem -= mconfig->pipe->memory_pages;
  159. }
  160. static void skl_dump_mconfig(struct skl_sst *ctx,
  161. struct skl_module_cfg *mcfg)
  162. {
  163. dev_dbg(ctx->dev, "Dumping config\n");
  164. dev_dbg(ctx->dev, "Input Format:\n");
  165. dev_dbg(ctx->dev, "channels = %d\n", mcfg->in_fmt[0].channels);
  166. dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->in_fmt[0].s_freq);
  167. dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->in_fmt[0].ch_cfg);
  168. dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->in_fmt[0].valid_bit_depth);
  169. dev_dbg(ctx->dev, "Output Format:\n");
  170. dev_dbg(ctx->dev, "channels = %d\n", mcfg->out_fmt[0].channels);
  171. dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->out_fmt[0].s_freq);
  172. dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->out_fmt[0].valid_bit_depth);
  173. dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->out_fmt[0].ch_cfg);
  174. }
  175. static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs)
  176. {
  177. int slot_map = 0xFFFFFFFF;
  178. int start_slot = 0;
  179. int i;
  180. for (i = 0; i < chs; i++) {
  181. /*
  182. * For 2 channels with starting slot as 0, slot map will
  183. * look like 0xFFFFFF10.
  184. */
  185. slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i)));
  186. start_slot++;
  187. }
  188. fmt->ch_map = slot_map;
  189. }
  190. static void skl_tplg_update_params(struct skl_module_fmt *fmt,
  191. struct skl_pipe_params *params, int fixup)
  192. {
  193. if (fixup & SKL_RATE_FIXUP_MASK)
  194. fmt->s_freq = params->s_freq;
  195. if (fixup & SKL_CH_FIXUP_MASK) {
  196. fmt->channels = params->ch;
  197. skl_tplg_update_chmap(fmt, fmt->channels);
  198. }
  199. if (fixup & SKL_FMT_FIXUP_MASK) {
  200. fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
  201. /*
  202. * 16 bit is 16 bit container whereas 24 bit is in 32 bit
  203. * container so update bit depth accordingly
  204. */
  205. switch (fmt->valid_bit_depth) {
  206. case SKL_DEPTH_16BIT:
  207. fmt->bit_depth = fmt->valid_bit_depth;
  208. break;
  209. default:
  210. fmt->bit_depth = SKL_DEPTH_32BIT;
  211. break;
  212. }
  213. }
  214. }
  215. /*
  216. * A pipeline may have modules which impact the pcm parameters, like SRC,
  217. * channel converter, format converter.
  218. * We need to calculate the output params by applying the 'fixup'
  219. * Topology will tell driver which type of fixup is to be applied by
  220. * supplying the fixup mask, so based on that we calculate the output
  221. *
  222. * Now In FE the pcm hw_params is source/target format. Same is applicable
  223. * for BE with its hw_params invoked.
  224. * here based on FE, BE pipeline and direction we calculate the input and
  225. * outfix and then apply that for a module
  226. */
  227. static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg,
  228. struct skl_pipe_params *params, bool is_fe)
  229. {
  230. int in_fixup, out_fixup;
  231. struct skl_module_fmt *in_fmt, *out_fmt;
  232. /* Fixups will be applied to pin 0 only */
  233. in_fmt = &m_cfg->in_fmt[0];
  234. out_fmt = &m_cfg->out_fmt[0];
  235. if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
  236. if (is_fe) {
  237. in_fixup = m_cfg->params_fixup;
  238. out_fixup = (~m_cfg->converter) &
  239. m_cfg->params_fixup;
  240. } else {
  241. out_fixup = m_cfg->params_fixup;
  242. in_fixup = (~m_cfg->converter) &
  243. m_cfg->params_fixup;
  244. }
  245. } else {
  246. if (is_fe) {
  247. out_fixup = m_cfg->params_fixup;
  248. in_fixup = (~m_cfg->converter) &
  249. m_cfg->params_fixup;
  250. } else {
  251. in_fixup = m_cfg->params_fixup;
  252. out_fixup = (~m_cfg->converter) &
  253. m_cfg->params_fixup;
  254. }
  255. }
  256. skl_tplg_update_params(in_fmt, params, in_fixup);
  257. skl_tplg_update_params(out_fmt, params, out_fixup);
  258. }
  259. /*
  260. * A module needs input and output buffers, which are dependent upon pcm
  261. * params, so once we have calculate params, we need buffer calculation as
  262. * well.
  263. */
  264. static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
  265. struct skl_module_cfg *mcfg)
  266. {
  267. int multiplier = 1;
  268. struct skl_module_fmt *in_fmt, *out_fmt;
  269. int in_rate, out_rate;
  270. /* Since fixups is applied to pin 0 only, ibs, obs needs
  271. * change for pin 0 only
  272. */
  273. in_fmt = &mcfg->in_fmt[0];
  274. out_fmt = &mcfg->out_fmt[0];
  275. if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
  276. multiplier = 5;
  277. if (in_fmt->s_freq % 1000)
  278. in_rate = (in_fmt->s_freq / 1000) + 1;
  279. else
  280. in_rate = (in_fmt->s_freq / 1000);
  281. mcfg->ibs = in_rate * (mcfg->in_fmt->channels) *
  282. (mcfg->in_fmt->bit_depth >> 3) *
  283. multiplier;
  284. if (mcfg->out_fmt->s_freq % 1000)
  285. out_rate = (mcfg->out_fmt->s_freq / 1000) + 1;
  286. else
  287. out_rate = (mcfg->out_fmt->s_freq / 1000);
  288. mcfg->obs = out_rate * (mcfg->out_fmt->channels) *
  289. (mcfg->out_fmt->bit_depth >> 3) *
  290. multiplier;
  291. }
  292. static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
  293. struct skl_sst *ctx)
  294. {
  295. struct skl_module_cfg *m_cfg = w->priv;
  296. int link_type, dir;
  297. u32 ch, s_freq, s_fmt;
  298. struct nhlt_specific_cfg *cfg;
  299. struct skl *skl = get_skl_ctx(ctx->dev);
  300. /* check if we already have blob */
  301. if (m_cfg->formats_config.caps_size > 0)
  302. return 0;
  303. dev_dbg(ctx->dev, "Applying default cfg blob\n");
  304. switch (m_cfg->dev_type) {
  305. case SKL_DEVICE_DMIC:
  306. link_type = NHLT_LINK_DMIC;
  307. dir = SNDRV_PCM_STREAM_CAPTURE;
  308. s_freq = m_cfg->in_fmt[0].s_freq;
  309. s_fmt = m_cfg->in_fmt[0].bit_depth;
  310. ch = m_cfg->in_fmt[0].channels;
  311. break;
  312. case SKL_DEVICE_I2S:
  313. link_type = NHLT_LINK_SSP;
  314. if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) {
  315. dir = SNDRV_PCM_STREAM_PLAYBACK;
  316. s_freq = m_cfg->out_fmt[0].s_freq;
  317. s_fmt = m_cfg->out_fmt[0].bit_depth;
  318. ch = m_cfg->out_fmt[0].channels;
  319. } else {
  320. dir = SNDRV_PCM_STREAM_CAPTURE;
  321. s_freq = m_cfg->in_fmt[0].s_freq;
  322. s_fmt = m_cfg->in_fmt[0].bit_depth;
  323. ch = m_cfg->in_fmt[0].channels;
  324. }
  325. break;
  326. default:
  327. return -EINVAL;
  328. }
  329. /* update the blob based on virtual bus_id and default params */
  330. cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type,
  331. s_fmt, ch, s_freq, dir);
  332. if (cfg) {
  333. m_cfg->formats_config.caps_size = cfg->size;
  334. m_cfg->formats_config.caps = (u32 *) &cfg->caps;
  335. } else {
  336. dev_err(ctx->dev, "Blob NULL for id %x type %d dirn %d\n",
  337. m_cfg->vbus_id, link_type, dir);
  338. dev_err(ctx->dev, "PCM: ch %d, freq %d, fmt %d\n",
  339. ch, s_freq, s_fmt);
  340. return -EIO;
  341. }
  342. return 0;
  343. }
  344. static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w,
  345. struct skl_sst *ctx)
  346. {
  347. struct skl_module_cfg *m_cfg = w->priv;
  348. struct skl_pipe_params *params = m_cfg->pipe->p_params;
  349. int p_conn_type = m_cfg->pipe->conn_type;
  350. bool is_fe;
  351. if (!m_cfg->params_fixup)
  352. return;
  353. dev_dbg(ctx->dev, "Mconfig for widget=%s BEFORE updation\n",
  354. w->name);
  355. skl_dump_mconfig(ctx, m_cfg);
  356. if (p_conn_type == SKL_PIPE_CONN_TYPE_FE)
  357. is_fe = true;
  358. else
  359. is_fe = false;
  360. skl_tplg_update_params_fixup(m_cfg, params, is_fe);
  361. skl_tplg_update_buffer_size(ctx, m_cfg);
  362. dev_dbg(ctx->dev, "Mconfig for widget=%s AFTER updation\n",
  363. w->name);
  364. skl_dump_mconfig(ctx, m_cfg);
  365. }
  366. /*
  367. * some modules can have multiple params set from user control and
  368. * need to be set after module is initialized. If set_param flag is
  369. * set module params will be done after module is initialised.
  370. */
  371. static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w,
  372. struct skl_sst *ctx)
  373. {
  374. int i, ret;
  375. struct skl_module_cfg *mconfig = w->priv;
  376. const struct snd_kcontrol_new *k;
  377. struct soc_bytes_ext *sb;
  378. struct skl_algo_data *bc;
  379. struct skl_specific_cfg *sp_cfg;
  380. if (mconfig->formats_config.caps_size > 0 &&
  381. mconfig->formats_config.set_params == SKL_PARAM_SET) {
  382. sp_cfg = &mconfig->formats_config;
  383. ret = skl_set_module_params(ctx, sp_cfg->caps,
  384. sp_cfg->caps_size,
  385. sp_cfg->param_id, mconfig);
  386. if (ret < 0)
  387. return ret;
  388. }
  389. for (i = 0; i < w->num_kcontrols; i++) {
  390. k = &w->kcontrol_news[i];
  391. if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
  392. sb = (void *) k->private_value;
  393. bc = (struct skl_algo_data *)sb->dobj.private;
  394. if (bc->set_params == SKL_PARAM_SET) {
  395. ret = skl_set_module_params(ctx,
  396. (u32 *)bc->params, bc->size,
  397. bc->param_id, mconfig);
  398. if (ret < 0)
  399. return ret;
  400. }
  401. }
  402. }
  403. return 0;
  404. }
  405. /*
  406. * some module param can set from user control and this is required as
  407. * when module is initailzed. if module param is required in init it is
  408. * identifed by set_param flag. if set_param flag is not set, then this
  409. * parameter needs to set as part of module init.
  410. */
  411. static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
  412. {
  413. const struct snd_kcontrol_new *k;
  414. struct soc_bytes_ext *sb;
  415. struct skl_algo_data *bc;
  416. struct skl_module_cfg *mconfig = w->priv;
  417. int i;
  418. for (i = 0; i < w->num_kcontrols; i++) {
  419. k = &w->kcontrol_news[i];
  420. if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
  421. sb = (struct soc_bytes_ext *)k->private_value;
  422. bc = (struct skl_algo_data *)sb->dobj.private;
  423. if (bc->set_params != SKL_PARAM_INIT)
  424. continue;
  425. mconfig->formats_config.caps = (u32 *)&bc->params;
  426. mconfig->formats_config.caps_size = bc->size;
  427. break;
  428. }
  429. }
  430. return 0;
  431. }
  432. /*
  433. * Inside a pipe instance, we can have various modules. These modules need
  434. * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by
  435. * skl_init_module() routine, so invoke that for all modules in a pipeline
  436. */
  437. static int
  438. skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
  439. {
  440. struct skl_pipe_module *w_module;
  441. struct snd_soc_dapm_widget *w;
  442. struct skl_module_cfg *mconfig;
  443. struct skl_sst *ctx = skl->skl_sst;
  444. int ret = 0;
  445. list_for_each_entry(w_module, &pipe->w_list, node) {
  446. w = w_module->w;
  447. mconfig = w->priv;
  448. /* check if module ids are populated */
  449. if (mconfig->id.module_id < 0) {
  450. dev_err(skl->skl_sst->dev,
  451. "module %pUL id not populated\n",
  452. (uuid_le *)mconfig->guid);
  453. return -EIO;
  454. }
  455. /* check resource available */
  456. if (!skl_is_pipe_mcps_avail(skl, mconfig))
  457. return -ENOMEM;
  458. if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
  459. ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
  460. mconfig->id.module_id, mconfig->guid);
  461. if (ret < 0)
  462. return ret;
  463. mconfig->m_state = SKL_MODULE_LOADED;
  464. }
  465. /* update blob if blob is null for be with default value */
  466. skl_tplg_update_be_blob(w, ctx);
  467. /*
  468. * apply fix/conversion to module params based on
  469. * FE/BE params
  470. */
  471. skl_tplg_update_module_params(w, ctx);
  472. mconfig->id.pvt_id = skl_get_pvt_id(ctx, mconfig);
  473. if (mconfig->id.pvt_id < 0)
  474. return ret;
  475. skl_tplg_set_module_init_data(w);
  476. ret = skl_init_module(ctx, mconfig);
  477. if (ret < 0) {
  478. skl_put_pvt_id(ctx, mconfig);
  479. return ret;
  480. }
  481. skl_tplg_alloc_pipe_mcps(skl, mconfig);
  482. ret = skl_tplg_set_module_params(w, ctx);
  483. if (ret < 0)
  484. return ret;
  485. }
  486. return 0;
  487. }
  488. static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
  489. struct skl_pipe *pipe)
  490. {
  491. int ret;
  492. struct skl_pipe_module *w_module = NULL;
  493. struct skl_module_cfg *mconfig = NULL;
  494. list_for_each_entry(w_module, &pipe->w_list, node) {
  495. mconfig = w_module->w->priv;
  496. if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod &&
  497. mconfig->m_state > SKL_MODULE_UNINIT) {
  498. ret = ctx->dsp->fw_ops.unload_mod(ctx->dsp,
  499. mconfig->id.module_id);
  500. if (ret < 0)
  501. return -EIO;
  502. }
  503. skl_put_pvt_id(ctx, mconfig);
  504. }
  505. /* no modules to unload in this path, so return */
  506. return 0;
  507. }
  508. /*
  509. * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we
  510. * need create the pipeline. So we do following:
  511. * - check the resources
  512. * - Create the pipeline
  513. * - Initialize the modules in pipeline
  514. * - finally bind all modules together
  515. */
  516. static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
  517. struct skl *skl)
  518. {
  519. int ret;
  520. struct skl_module_cfg *mconfig = w->priv;
  521. struct skl_pipe_module *w_module;
  522. struct skl_pipe *s_pipe = mconfig->pipe;
  523. struct skl_module_cfg *src_module = NULL, *dst_module;
  524. struct skl_sst *ctx = skl->skl_sst;
  525. /* check resource available */
  526. if (!skl_is_pipe_mcps_avail(skl, mconfig))
  527. return -EBUSY;
  528. if (!skl_is_pipe_mem_avail(skl, mconfig))
  529. return -ENOMEM;
  530. /*
  531. * Create a list of modules for pipe.
  532. * This list contains modules from source to sink
  533. */
  534. ret = skl_create_pipeline(ctx, mconfig->pipe);
  535. if (ret < 0)
  536. return ret;
  537. skl_tplg_alloc_pipe_mem(skl, mconfig);
  538. skl_tplg_alloc_pipe_mcps(skl, mconfig);
  539. /* Init all pipe modules from source to sink */
  540. ret = skl_tplg_init_pipe_modules(skl, s_pipe);
  541. if (ret < 0)
  542. return ret;
  543. /* Bind modules from source to sink */
  544. list_for_each_entry(w_module, &s_pipe->w_list, node) {
  545. dst_module = w_module->w->priv;
  546. if (src_module == NULL) {
  547. src_module = dst_module;
  548. continue;
  549. }
  550. ret = skl_bind_modules(ctx, src_module, dst_module);
  551. if (ret < 0)
  552. return ret;
  553. src_module = dst_module;
  554. }
  555. return 0;
  556. }
  557. static int skl_fill_sink_instance_id(struct skl_sst *ctx,
  558. struct skl_algo_data *alg_data)
  559. {
  560. struct skl_kpb_params *params = (struct skl_kpb_params *)alg_data->params;
  561. struct skl_mod_inst_map *inst;
  562. int i, pvt_id;
  563. inst = params->map;
  564. for (i = 0; i < params->num_modules; i++) {
  565. pvt_id = skl_get_pvt_instance_id_map(ctx,
  566. inst->mod_id, inst->inst_id);
  567. if (pvt_id < 0)
  568. return -EINVAL;
  569. inst->inst_id = pvt_id;
  570. inst++;
  571. }
  572. return 0;
  573. }
  574. /*
  575. * Some modules require params to be set after the module is bound to
  576. * all pins connected.
  577. *
  578. * The module provider initializes set_param flag for such modules and we
  579. * send params after binding
  580. */
  581. static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
  582. struct skl_module_cfg *mcfg, struct skl_sst *ctx)
  583. {
  584. int i, ret;
  585. struct skl_module_cfg *mconfig = w->priv;
  586. const struct snd_kcontrol_new *k;
  587. struct soc_bytes_ext *sb;
  588. struct skl_algo_data *bc;
  589. struct skl_specific_cfg *sp_cfg;
  590. /*
  591. * check all out/in pins are in bind state.
  592. * if so set the module param
  593. */
  594. for (i = 0; i < mcfg->max_out_queue; i++) {
  595. if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE)
  596. return 0;
  597. }
  598. for (i = 0; i < mcfg->max_in_queue; i++) {
  599. if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE)
  600. return 0;
  601. }
  602. if (mconfig->formats_config.caps_size > 0 &&
  603. mconfig->formats_config.set_params == SKL_PARAM_BIND) {
  604. sp_cfg = &mconfig->formats_config;
  605. ret = skl_set_module_params(ctx, sp_cfg->caps,
  606. sp_cfg->caps_size,
  607. sp_cfg->param_id, mconfig);
  608. if (ret < 0)
  609. return ret;
  610. }
  611. for (i = 0; i < w->num_kcontrols; i++) {
  612. k = &w->kcontrol_news[i];
  613. if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
  614. sb = (void *) k->private_value;
  615. bc = (struct skl_algo_data *)sb->dobj.private;
  616. if (bc->set_params == SKL_PARAM_BIND) {
  617. if (mconfig->m_type == SKL_MODULE_TYPE_KPB)
  618. skl_fill_sink_instance_id(ctx, bc);
  619. ret = skl_set_module_params(ctx,
  620. (u32 *)bc->params, bc->max,
  621. bc->param_id, mconfig);
  622. if (ret < 0)
  623. return ret;
  624. }
  625. }
  626. }
  627. return 0;
  628. }
  629. static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
  630. struct skl *skl,
  631. struct snd_soc_dapm_widget *src_w,
  632. struct skl_module_cfg *src_mconfig)
  633. {
  634. struct snd_soc_dapm_path *p;
  635. struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL;
  636. struct skl_module_cfg *sink_mconfig;
  637. struct skl_sst *ctx = skl->skl_sst;
  638. int ret;
  639. snd_soc_dapm_widget_for_each_sink_path(w, p) {
  640. if (!p->connect)
  641. continue;
  642. dev_dbg(ctx->dev, "%s: src widget=%s\n", __func__, w->name);
  643. dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name);
  644. next_sink = p->sink;
  645. if (!is_skl_dsp_widget_type(p->sink))
  646. return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
  647. /*
  648. * here we will check widgets in sink pipelines, so that
  649. * can be any widgets type and we are only interested if
  650. * they are ones used for SKL so check that first
  651. */
  652. if ((p->sink->priv != NULL) &&
  653. is_skl_dsp_widget_type(p->sink)) {
  654. sink = p->sink;
  655. sink_mconfig = sink->priv;
  656. if (src_mconfig->m_state == SKL_MODULE_UNINIT ||
  657. sink_mconfig->m_state == SKL_MODULE_UNINIT)
  658. continue;
  659. /* Bind source to sink, mixin is always source */
  660. ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
  661. if (ret)
  662. return ret;
  663. /* set module params after bind */
  664. skl_tplg_set_module_bind_params(src_w, src_mconfig, ctx);
  665. skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
  666. /* Start sinks pipe first */
  667. if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) {
  668. if (sink_mconfig->pipe->conn_type !=
  669. SKL_PIPE_CONN_TYPE_FE)
  670. ret = skl_run_pipe(ctx,
  671. sink_mconfig->pipe);
  672. if (ret)
  673. return ret;
  674. }
  675. }
  676. }
  677. if (!sink)
  678. return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig);
  679. return 0;
  680. }
  681. /*
  682. * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA
  683. * we need to do following:
  684. * - Bind to sink pipeline
  685. * Since the sink pipes can be running and we don't get mixer event on
  686. * connect for already running mixer, we need to find the sink pipes
  687. * here and bind to them. This way dynamic connect works.
  688. * - Start sink pipeline, if not running
  689. * - Then run current pipe
  690. */
  691. static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
  692. struct skl *skl)
  693. {
  694. struct skl_module_cfg *src_mconfig;
  695. struct skl_sst *ctx = skl->skl_sst;
  696. int ret = 0;
  697. src_mconfig = w->priv;
  698. /*
  699. * find which sink it is connected to, bind with the sink,
  700. * if sink is not started, start sink pipe first, then start
  701. * this pipe
  702. */
  703. ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig);
  704. if (ret)
  705. return ret;
  706. /* Start source pipe last after starting all sinks */
  707. if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
  708. return skl_run_pipe(ctx, src_mconfig->pipe);
  709. return 0;
  710. }
  711. static struct snd_soc_dapm_widget *skl_get_src_dsp_widget(
  712. struct snd_soc_dapm_widget *w, struct skl *skl)
  713. {
  714. struct snd_soc_dapm_path *p;
  715. struct snd_soc_dapm_widget *src_w = NULL;
  716. struct skl_sst *ctx = skl->skl_sst;
  717. snd_soc_dapm_widget_for_each_source_path(w, p) {
  718. src_w = p->source;
  719. if (!p->connect)
  720. continue;
  721. dev_dbg(ctx->dev, "sink widget=%s\n", w->name);
  722. dev_dbg(ctx->dev, "src widget=%s\n", p->source->name);
  723. /*
  724. * here we will check widgets in sink pipelines, so that can
  725. * be any widgets type and we are only interested if they are
  726. * ones used for SKL so check that first
  727. */
  728. if ((p->source->priv != NULL) &&
  729. is_skl_dsp_widget_type(p->source)) {
  730. return p->source;
  731. }
  732. }
  733. if (src_w != NULL)
  734. return skl_get_src_dsp_widget(src_w, skl);
  735. return NULL;
  736. }
  737. /*
  738. * in the Post-PMU event of mixer we need to do following:
  739. * - Check if this pipe is running
  740. * - if not, then
  741. * - bind this pipeline to its source pipeline
  742. * if source pipe is already running, this means it is a dynamic
  743. * connection and we need to bind only to that pipe
  744. * - start this pipeline
  745. */
  746. static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w,
  747. struct skl *skl)
  748. {
  749. int ret = 0;
  750. struct snd_soc_dapm_widget *source, *sink;
  751. struct skl_module_cfg *src_mconfig, *sink_mconfig;
  752. struct skl_sst *ctx = skl->skl_sst;
  753. int src_pipe_started = 0;
  754. sink = w;
  755. sink_mconfig = sink->priv;
  756. /*
  757. * If source pipe is already started, that means source is driving
  758. * one more sink before this sink got connected, Since source is
  759. * started, bind this sink to source and start this pipe.
  760. */
  761. source = skl_get_src_dsp_widget(w, skl);
  762. if (source != NULL) {
  763. src_mconfig = source->priv;
  764. sink_mconfig = sink->priv;
  765. src_pipe_started = 1;
  766. /*
  767. * check pipe state, then no need to bind or start the
  768. * pipe
  769. */
  770. if (src_mconfig->pipe->state != SKL_PIPE_STARTED)
  771. src_pipe_started = 0;
  772. }
  773. if (src_pipe_started) {
  774. ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
  775. if (ret)
  776. return ret;
  777. /* set module params after bind */
  778. skl_tplg_set_module_bind_params(source, src_mconfig, ctx);
  779. skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
  780. if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
  781. ret = skl_run_pipe(ctx, sink_mconfig->pipe);
  782. }
  783. return ret;
  784. }
  785. /*
  786. * in the Pre-PMD event of mixer we need to do following:
  787. * - Stop the pipe
  788. * - find the source connections and remove that from dapm_path_list
  789. * - unbind with source pipelines if still connected
  790. */
  791. static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w,
  792. struct skl *skl)
  793. {
  794. struct skl_module_cfg *src_mconfig, *sink_mconfig;
  795. int ret = 0, i;
  796. struct skl_sst *ctx = skl->skl_sst;
  797. sink_mconfig = w->priv;
  798. /* Stop the pipe */
  799. ret = skl_stop_pipe(ctx, sink_mconfig->pipe);
  800. if (ret)
  801. return ret;
  802. for (i = 0; i < sink_mconfig->max_in_queue; i++) {
  803. if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) {
  804. src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg;
  805. if (!src_mconfig)
  806. continue;
  807. /*
  808. * If path_found == 1, that means pmd for source
  809. * pipe has not occurred, source is connected to
  810. * some other sink. so its responsibility of sink
  811. * to unbind itself from source.
  812. */
  813. ret = skl_stop_pipe(ctx, src_mconfig->pipe);
  814. if (ret < 0)
  815. return ret;
  816. ret = skl_unbind_modules(ctx,
  817. src_mconfig, sink_mconfig);
  818. }
  819. }
  820. return ret;
  821. }
  822. /*
  823. * in the Post-PMD event of mixer we need to do following:
  824. * - Free the mcps used
  825. * - Free the mem used
  826. * - Unbind the modules within the pipeline
  827. * - Delete the pipeline (modules are not required to be explicitly
  828. * deleted, pipeline delete is enough here
  829. */
  830. static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
  831. struct skl *skl)
  832. {
  833. struct skl_module_cfg *mconfig = w->priv;
  834. struct skl_pipe_module *w_module;
  835. struct skl_module_cfg *src_module = NULL, *dst_module;
  836. struct skl_sst *ctx = skl->skl_sst;
  837. struct skl_pipe *s_pipe = mconfig->pipe;
  838. int ret = 0;
  839. if (s_pipe->state == SKL_PIPE_INVALID)
  840. return -EINVAL;
  841. skl_tplg_free_pipe_mcps(skl, mconfig);
  842. skl_tplg_free_pipe_mem(skl, mconfig);
  843. list_for_each_entry(w_module, &s_pipe->w_list, node) {
  844. dst_module = w_module->w->priv;
  845. if (mconfig->m_state >= SKL_MODULE_INIT_DONE)
  846. skl_tplg_free_pipe_mcps(skl, dst_module);
  847. if (src_module == NULL) {
  848. src_module = dst_module;
  849. continue;
  850. }
  851. skl_unbind_modules(ctx, src_module, dst_module);
  852. src_module = dst_module;
  853. }
  854. ret = skl_delete_pipe(ctx, mconfig->pipe);
  855. return skl_tplg_unload_pipe_modules(ctx, s_pipe);
  856. }
  857. /*
  858. * in the Post-PMD event of PGA we need to do following:
  859. * - Free the mcps used
  860. * - Stop the pipeline
  861. * - In source pipe is connected, unbind with source pipelines
  862. */
  863. static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
  864. struct skl *skl)
  865. {
  866. struct skl_module_cfg *src_mconfig, *sink_mconfig;
  867. int ret = 0, i;
  868. struct skl_sst *ctx = skl->skl_sst;
  869. src_mconfig = w->priv;
  870. /* Stop the pipe since this is a mixin module */
  871. ret = skl_stop_pipe(ctx, src_mconfig->pipe);
  872. if (ret)
  873. return ret;
  874. for (i = 0; i < src_mconfig->max_out_queue; i++) {
  875. if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) {
  876. sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg;
  877. if (!sink_mconfig)
  878. continue;
  879. /*
  880. * This is a connecter and if path is found that means
  881. * unbind between source and sink has not happened yet
  882. */
  883. ret = skl_unbind_modules(ctx, src_mconfig,
  884. sink_mconfig);
  885. }
  886. }
  887. return ret;
  888. }
  889. /*
  890. * In modelling, we assume there will be ONLY one mixer in a pipeline. If
  891. * mixer is not required then it is treated as static mixer aka vmixer with
  892. * a hard path to source module
  893. * So we don't need to check if source is started or not as hard path puts
  894. * dependency on each other
  895. */
  896. static int skl_tplg_vmixer_event(struct snd_soc_dapm_widget *w,
  897. struct snd_kcontrol *k, int event)
  898. {
  899. struct snd_soc_dapm_context *dapm = w->dapm;
  900. struct skl *skl = get_skl_ctx(dapm->dev);
  901. switch (event) {
  902. case SND_SOC_DAPM_PRE_PMU:
  903. return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
  904. case SND_SOC_DAPM_POST_PMU:
  905. return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
  906. case SND_SOC_DAPM_PRE_PMD:
  907. return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
  908. case SND_SOC_DAPM_POST_PMD:
  909. return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
  910. }
  911. return 0;
  912. }
  913. /*
  914. * In modelling, we assume there will be ONLY one mixer in a pipeline. If a
  915. * second one is required that is created as another pipe entity.
  916. * The mixer is responsible for pipe management and represent a pipeline
  917. * instance
  918. */
  919. static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w,
  920. struct snd_kcontrol *k, int event)
  921. {
  922. struct snd_soc_dapm_context *dapm = w->dapm;
  923. struct skl *skl = get_skl_ctx(dapm->dev);
  924. switch (event) {
  925. case SND_SOC_DAPM_PRE_PMU:
  926. return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
  927. case SND_SOC_DAPM_POST_PMU:
  928. return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
  929. case SND_SOC_DAPM_PRE_PMD:
  930. return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
  931. case SND_SOC_DAPM_POST_PMD:
  932. return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
  933. }
  934. return 0;
  935. }
  936. /*
  937. * In modelling, we assumed rest of the modules in pipeline are PGA. But we
  938. * are interested in last PGA (leaf PGA) in a pipeline to disconnect with
  939. * the sink when it is running (two FE to one BE or one FE to two BE)
  940. * scenarios
  941. */
  942. static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w,
  943. struct snd_kcontrol *k, int event)
  944. {
  945. struct snd_soc_dapm_context *dapm = w->dapm;
  946. struct skl *skl = get_skl_ctx(dapm->dev);
  947. switch (event) {
  948. case SND_SOC_DAPM_PRE_PMU:
  949. return skl_tplg_pga_dapm_pre_pmu_event(w, skl);
  950. case SND_SOC_DAPM_POST_PMD:
  951. return skl_tplg_pga_dapm_post_pmd_event(w, skl);
  952. }
  953. return 0;
  954. }
  955. static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
  956. unsigned int __user *data, unsigned int size)
  957. {
  958. struct soc_bytes_ext *sb =
  959. (struct soc_bytes_ext *)kcontrol->private_value;
  960. struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
  961. struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
  962. struct skl_module_cfg *mconfig = w->priv;
  963. struct skl *skl = get_skl_ctx(w->dapm->dev);
  964. if (w->power)
  965. skl_get_module_params(skl->skl_sst, (u32 *)bc->params,
  966. bc->size, bc->param_id, mconfig);
  967. /* decrement size for TLV header */
  968. size -= 2 * sizeof(u32);
  969. /* check size as we don't want to send kernel data */
  970. if (size > bc->max)
  971. size = bc->max;
  972. if (bc->params) {
  973. if (copy_to_user(data, &bc->param_id, sizeof(u32)))
  974. return -EFAULT;
  975. if (copy_to_user(data + 1, &size, sizeof(u32)))
  976. return -EFAULT;
  977. if (copy_to_user(data + 2, bc->params, size))
  978. return -EFAULT;
  979. }
  980. return 0;
  981. }
  982. #define SKL_PARAM_VENDOR_ID 0xff
  983. static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
  984. const unsigned int __user *data, unsigned int size)
  985. {
  986. struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
  987. struct skl_module_cfg *mconfig = w->priv;
  988. struct soc_bytes_ext *sb =
  989. (struct soc_bytes_ext *)kcontrol->private_value;
  990. struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private;
  991. struct skl *skl = get_skl_ctx(w->dapm->dev);
  992. if (ac->params) {
  993. if (size > ac->max)
  994. return -EINVAL;
  995. ac->size = size;
  996. /*
  997. * if the param_is is of type Vendor, firmware expects actual
  998. * parameter id and size from the control.
  999. */
  1000. if (ac->param_id == SKL_PARAM_VENDOR_ID) {
  1001. if (copy_from_user(ac->params, data, size))
  1002. return -EFAULT;
  1003. } else {
  1004. if (copy_from_user(ac->params,
  1005. data + 2, size))
  1006. return -EFAULT;
  1007. }
  1008. if (w->power)
  1009. return skl_set_module_params(skl->skl_sst,
  1010. (u32 *)ac->params, ac->size,
  1011. ac->param_id, mconfig);
  1012. }
  1013. return 0;
  1014. }
  1015. /*
  1016. * Fill the dma id for host and link. In case of passthrough
  1017. * pipeline, this will both host and link in the same
  1018. * pipeline, so need to copy the link and host based on dev_type
  1019. */
  1020. static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg,
  1021. struct skl_pipe_params *params)
  1022. {
  1023. struct skl_pipe *pipe = mcfg->pipe;
  1024. if (pipe->passthru) {
  1025. switch (mcfg->dev_type) {
  1026. case SKL_DEVICE_HDALINK:
  1027. pipe->p_params->link_dma_id = params->link_dma_id;
  1028. break;
  1029. case SKL_DEVICE_HDAHOST:
  1030. pipe->p_params->host_dma_id = params->host_dma_id;
  1031. break;
  1032. default:
  1033. break;
  1034. }
  1035. pipe->p_params->s_fmt = params->s_fmt;
  1036. pipe->p_params->ch = params->ch;
  1037. pipe->p_params->s_freq = params->s_freq;
  1038. pipe->p_params->stream = params->stream;
  1039. } else {
  1040. memcpy(pipe->p_params, params, sizeof(*params));
  1041. }
  1042. }
  1043. /*
  1044. * The FE params are passed by hw_params of the DAI.
  1045. * On hw_params, the params are stored in Gateway module of the FE and we
  1046. * need to calculate the format in DSP module configuration, that
  1047. * conversion is done here
  1048. */
  1049. int skl_tplg_update_pipe_params(struct device *dev,
  1050. struct skl_module_cfg *mconfig,
  1051. struct skl_pipe_params *params)
  1052. {
  1053. struct skl_module_fmt *format = NULL;
  1054. skl_tplg_fill_dma_id(mconfig, params);
  1055. if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
  1056. format = &mconfig->in_fmt[0];
  1057. else
  1058. format = &mconfig->out_fmt[0];
  1059. /* set the hw_params */
  1060. format->s_freq = params->s_freq;
  1061. format->channels = params->ch;
  1062. format->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
  1063. /*
  1064. * 16 bit is 16 bit container whereas 24 bit is in 32 bit
  1065. * container so update bit depth accordingly
  1066. */
  1067. switch (format->valid_bit_depth) {
  1068. case SKL_DEPTH_16BIT:
  1069. format->bit_depth = format->valid_bit_depth;
  1070. break;
  1071. case SKL_DEPTH_24BIT:
  1072. case SKL_DEPTH_32BIT:
  1073. format->bit_depth = SKL_DEPTH_32BIT;
  1074. break;
  1075. default:
  1076. dev_err(dev, "Invalid bit depth %x for pipe\n",
  1077. format->valid_bit_depth);
  1078. return -EINVAL;
  1079. }
  1080. if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
  1081. mconfig->ibs = (format->s_freq / 1000) *
  1082. (format->channels) *
  1083. (format->bit_depth >> 3);
  1084. } else {
  1085. mconfig->obs = (format->s_freq / 1000) *
  1086. (format->channels) *
  1087. (format->bit_depth >> 3);
  1088. }
  1089. return 0;
  1090. }
  1091. /*
  1092. * Query the module config for the FE DAI
  1093. * This is used to find the hw_params set for that DAI and apply to FE
  1094. * pipeline
  1095. */
  1096. struct skl_module_cfg *
  1097. skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream)
  1098. {
  1099. struct snd_soc_dapm_widget *w;
  1100. struct snd_soc_dapm_path *p = NULL;
  1101. if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
  1102. w = dai->playback_widget;
  1103. snd_soc_dapm_widget_for_each_sink_path(w, p) {
  1104. if (p->connect && p->sink->power &&
  1105. !is_skl_dsp_widget_type(p->sink))
  1106. continue;
  1107. if (p->sink->priv) {
  1108. dev_dbg(dai->dev, "set params for %s\n",
  1109. p->sink->name);
  1110. return p->sink->priv;
  1111. }
  1112. }
  1113. } else {
  1114. w = dai->capture_widget;
  1115. snd_soc_dapm_widget_for_each_source_path(w, p) {
  1116. if (p->connect && p->source->power &&
  1117. !is_skl_dsp_widget_type(p->source))
  1118. continue;
  1119. if (p->source->priv) {
  1120. dev_dbg(dai->dev, "set params for %s\n",
  1121. p->source->name);
  1122. return p->source->priv;
  1123. }
  1124. }
  1125. }
  1126. return NULL;
  1127. }
  1128. static struct skl_module_cfg *skl_get_mconfig_pb_cpr(
  1129. struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
  1130. {
  1131. struct snd_soc_dapm_path *p;
  1132. struct skl_module_cfg *mconfig = NULL;
  1133. snd_soc_dapm_widget_for_each_source_path(w, p) {
  1134. if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) {
  1135. if (p->connect &&
  1136. (p->sink->id == snd_soc_dapm_aif_out) &&
  1137. p->source->priv) {
  1138. mconfig = p->source->priv;
  1139. return mconfig;
  1140. }
  1141. mconfig = skl_get_mconfig_pb_cpr(dai, p->source);
  1142. if (mconfig)
  1143. return mconfig;
  1144. }
  1145. }
  1146. return mconfig;
  1147. }
  1148. static struct skl_module_cfg *skl_get_mconfig_cap_cpr(
  1149. struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
  1150. {
  1151. struct snd_soc_dapm_path *p;
  1152. struct skl_module_cfg *mconfig = NULL;
  1153. snd_soc_dapm_widget_for_each_sink_path(w, p) {
  1154. if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) {
  1155. if (p->connect &&
  1156. (p->source->id == snd_soc_dapm_aif_in) &&
  1157. p->sink->priv) {
  1158. mconfig = p->sink->priv;
  1159. return mconfig;
  1160. }
  1161. mconfig = skl_get_mconfig_cap_cpr(dai, p->sink);
  1162. if (mconfig)
  1163. return mconfig;
  1164. }
  1165. }
  1166. return mconfig;
  1167. }
  1168. struct skl_module_cfg *
  1169. skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream)
  1170. {
  1171. struct snd_soc_dapm_widget *w;
  1172. struct skl_module_cfg *mconfig;
  1173. if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
  1174. w = dai->playback_widget;
  1175. mconfig = skl_get_mconfig_pb_cpr(dai, w);
  1176. } else {
  1177. w = dai->capture_widget;
  1178. mconfig = skl_get_mconfig_cap_cpr(dai, w);
  1179. }
  1180. return mconfig;
  1181. }
  1182. static u8 skl_tplg_be_link_type(int dev_type)
  1183. {
  1184. int ret;
  1185. switch (dev_type) {
  1186. case SKL_DEVICE_BT:
  1187. ret = NHLT_LINK_SSP;
  1188. break;
  1189. case SKL_DEVICE_DMIC:
  1190. ret = NHLT_LINK_DMIC;
  1191. break;
  1192. case SKL_DEVICE_I2S:
  1193. ret = NHLT_LINK_SSP;
  1194. break;
  1195. case SKL_DEVICE_HDALINK:
  1196. ret = NHLT_LINK_HDA;
  1197. break;
  1198. default:
  1199. ret = NHLT_LINK_INVALID;
  1200. break;
  1201. }
  1202. return ret;
  1203. }
  1204. /*
  1205. * Fill the BE gateway parameters
  1206. * The BE gateway expects a blob of parameters which are kept in the ACPI
  1207. * NHLT blob, so query the blob for interface type (i2s/pdm) and instance.
  1208. * The port can have multiple settings so pick based on the PCM
  1209. * parameters
  1210. */
  1211. static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai,
  1212. struct skl_module_cfg *mconfig,
  1213. struct skl_pipe_params *params)
  1214. {
  1215. struct nhlt_specific_cfg *cfg;
  1216. struct skl *skl = get_skl_ctx(dai->dev);
  1217. int link_type = skl_tplg_be_link_type(mconfig->dev_type);
  1218. skl_tplg_fill_dma_id(mconfig, params);
  1219. if (link_type == NHLT_LINK_HDA)
  1220. return 0;
  1221. /* update the blob based on virtual bus_id*/
  1222. cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type,
  1223. params->s_fmt, params->ch,
  1224. params->s_freq, params->stream);
  1225. if (cfg) {
  1226. mconfig->formats_config.caps_size = cfg->size;
  1227. mconfig->formats_config.caps = (u32 *) &cfg->caps;
  1228. } else {
  1229. dev_err(dai->dev, "Blob NULL for id %x type %d dirn %d\n",
  1230. mconfig->vbus_id, link_type,
  1231. params->stream);
  1232. dev_err(dai->dev, "PCM: ch %d, freq %d, fmt %d\n",
  1233. params->ch, params->s_freq, params->s_fmt);
  1234. return -EINVAL;
  1235. }
  1236. return 0;
  1237. }
  1238. static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai,
  1239. struct snd_soc_dapm_widget *w,
  1240. struct skl_pipe_params *params)
  1241. {
  1242. struct snd_soc_dapm_path *p;
  1243. int ret = -EIO;
  1244. snd_soc_dapm_widget_for_each_source_path(w, p) {
  1245. if (p->connect && is_skl_dsp_widget_type(p->source) &&
  1246. p->source->priv) {
  1247. ret = skl_tplg_be_fill_pipe_params(dai,
  1248. p->source->priv, params);
  1249. if (ret < 0)
  1250. return ret;
  1251. } else {
  1252. ret = skl_tplg_be_set_src_pipe_params(dai,
  1253. p->source, params);
  1254. if (ret < 0)
  1255. return ret;
  1256. }
  1257. }
  1258. return ret;
  1259. }
  1260. static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai,
  1261. struct snd_soc_dapm_widget *w, struct skl_pipe_params *params)
  1262. {
  1263. struct snd_soc_dapm_path *p = NULL;
  1264. int ret = -EIO;
  1265. snd_soc_dapm_widget_for_each_sink_path(w, p) {
  1266. if (p->connect && is_skl_dsp_widget_type(p->sink) &&
  1267. p->sink->priv) {
  1268. ret = skl_tplg_be_fill_pipe_params(dai,
  1269. p->sink->priv, params);
  1270. if (ret < 0)
  1271. return ret;
  1272. } else {
  1273. ret = skl_tplg_be_set_sink_pipe_params(
  1274. dai, p->sink, params);
  1275. if (ret < 0)
  1276. return ret;
  1277. }
  1278. }
  1279. return ret;
  1280. }
  1281. /*
  1282. * BE hw_params can be a source parameters (capture) or sink parameters
  1283. * (playback). Based on sink and source we need to either find the source
  1284. * list or the sink list and set the pipeline parameters
  1285. */
  1286. int skl_tplg_be_update_params(struct snd_soc_dai *dai,
  1287. struct skl_pipe_params *params)
  1288. {
  1289. struct snd_soc_dapm_widget *w;
  1290. if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
  1291. w = dai->playback_widget;
  1292. return skl_tplg_be_set_src_pipe_params(dai, w, params);
  1293. } else {
  1294. w = dai->capture_widget;
  1295. return skl_tplg_be_set_sink_pipe_params(dai, w, params);
  1296. }
  1297. return 0;
  1298. }
  1299. static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = {
  1300. {SKL_MIXER_EVENT, skl_tplg_mixer_event},
  1301. {SKL_VMIXER_EVENT, skl_tplg_vmixer_event},
  1302. {SKL_PGA_EVENT, skl_tplg_pga_event},
  1303. };
  1304. static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = {
  1305. {SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get,
  1306. skl_tplg_tlv_control_set},
  1307. };
  1308. static int skl_tplg_fill_pipe_tkn(struct device *dev,
  1309. struct skl_pipe *pipe, u32 tkn,
  1310. u32 tkn_val)
  1311. {
  1312. switch (tkn) {
  1313. case SKL_TKN_U32_PIPE_CONN_TYPE:
  1314. pipe->conn_type = tkn_val;
  1315. break;
  1316. case SKL_TKN_U32_PIPE_PRIORITY:
  1317. pipe->pipe_priority = tkn_val;
  1318. break;
  1319. case SKL_TKN_U32_PIPE_MEM_PGS:
  1320. pipe->memory_pages = tkn_val;
  1321. break;
  1322. case SKL_TKN_U32_PMODE:
  1323. pipe->lp_mode = tkn_val;
  1324. break;
  1325. default:
  1326. dev_err(dev, "Token not handled %d\n", tkn);
  1327. return -EINVAL;
  1328. }
  1329. return 0;
  1330. }
  1331. /*
  1332. * Add pipeline by parsing the relevant tokens
  1333. * Return an existing pipe if the pipe already exists.
  1334. */
  1335. static int skl_tplg_add_pipe(struct device *dev,
  1336. struct skl_module_cfg *mconfig, struct skl *skl,
  1337. struct snd_soc_tplg_vendor_value_elem *tkn_elem)
  1338. {
  1339. struct skl_pipeline *ppl;
  1340. struct skl_pipe *pipe;
  1341. struct skl_pipe_params *params;
  1342. list_for_each_entry(ppl, &skl->ppl_list, node) {
  1343. if (ppl->pipe->ppl_id == tkn_elem->value) {
  1344. mconfig->pipe = ppl->pipe;
  1345. return EEXIST;
  1346. }
  1347. }
  1348. ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
  1349. if (!ppl)
  1350. return -ENOMEM;
  1351. pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
  1352. if (!pipe)
  1353. return -ENOMEM;
  1354. params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
  1355. if (!params)
  1356. return -ENOMEM;
  1357. pipe->p_params = params;
  1358. pipe->ppl_id = tkn_elem->value;
  1359. INIT_LIST_HEAD(&pipe->w_list);
  1360. ppl->pipe = pipe;
  1361. list_add(&ppl->node, &skl->ppl_list);
  1362. mconfig->pipe = pipe;
  1363. mconfig->pipe->state = SKL_PIPE_INVALID;
  1364. return 0;
  1365. }
  1366. static int skl_tplg_fill_pin(struct device *dev, u32 tkn,
  1367. struct skl_module_pin *m_pin,
  1368. int pin_index, u32 value)
  1369. {
  1370. switch (tkn) {
  1371. case SKL_TKN_U32_PIN_MOD_ID:
  1372. m_pin[pin_index].id.module_id = value;
  1373. break;
  1374. case SKL_TKN_U32_PIN_INST_ID:
  1375. m_pin[pin_index].id.instance_id = value;
  1376. break;
  1377. default:
  1378. dev_err(dev, "%d Not a pin token\n", value);
  1379. return -EINVAL;
  1380. }
  1381. return 0;
  1382. }
  1383. /*
  1384. * Parse for pin config specific tokens to fill up the
  1385. * module private data
  1386. */
  1387. static int skl_tplg_fill_pins_info(struct device *dev,
  1388. struct skl_module_cfg *mconfig,
  1389. struct snd_soc_tplg_vendor_value_elem *tkn_elem,
  1390. int dir, int pin_count)
  1391. {
  1392. int ret;
  1393. struct skl_module_pin *m_pin;
  1394. switch (dir) {
  1395. case SKL_DIR_IN:
  1396. m_pin = mconfig->m_in_pin;
  1397. break;
  1398. case SKL_DIR_OUT:
  1399. m_pin = mconfig->m_out_pin;
  1400. break;
  1401. default:
  1402. dev_err(dev, "Invalid direction value\n");
  1403. return -EINVAL;
  1404. }
  1405. ret = skl_tplg_fill_pin(dev, tkn_elem->token,
  1406. m_pin, pin_count, tkn_elem->value);
  1407. if (ret < 0)
  1408. return ret;
  1409. m_pin[pin_count].in_use = false;
  1410. m_pin[pin_count].pin_state = SKL_PIN_UNBIND;
  1411. return 0;
  1412. }
  1413. /*
  1414. * Fill up input/output module config format based
  1415. * on the direction
  1416. */
  1417. static int skl_tplg_fill_fmt(struct device *dev,
  1418. struct skl_module_cfg *mconfig, u32 tkn,
  1419. u32 value, u32 dir, u32 pin_count)
  1420. {
  1421. struct skl_module_fmt *dst_fmt;
  1422. switch (dir) {
  1423. case SKL_DIR_IN:
  1424. dst_fmt = mconfig->in_fmt;
  1425. dst_fmt += pin_count;
  1426. break;
  1427. case SKL_DIR_OUT:
  1428. dst_fmt = mconfig->out_fmt;
  1429. dst_fmt += pin_count;
  1430. break;
  1431. default:
  1432. dev_err(dev, "Invalid direction value\n");
  1433. return -EINVAL;
  1434. }
  1435. switch (tkn) {
  1436. case SKL_TKN_U32_FMT_CH:
  1437. dst_fmt->channels = value;
  1438. break;
  1439. case SKL_TKN_U32_FMT_FREQ:
  1440. dst_fmt->s_freq = value;
  1441. break;
  1442. case SKL_TKN_U32_FMT_BIT_DEPTH:
  1443. dst_fmt->bit_depth = value;
  1444. break;
  1445. case SKL_TKN_U32_FMT_SAMPLE_SIZE:
  1446. dst_fmt->valid_bit_depth = value;
  1447. break;
  1448. case SKL_TKN_U32_FMT_CH_CONFIG:
  1449. dst_fmt->ch_cfg = value;
  1450. break;
  1451. case SKL_TKN_U32_FMT_INTERLEAVE:
  1452. dst_fmt->interleaving_style = value;
  1453. break;
  1454. case SKL_TKN_U32_FMT_SAMPLE_TYPE:
  1455. dst_fmt->sample_type = value;
  1456. break;
  1457. case SKL_TKN_U32_FMT_CH_MAP:
  1458. dst_fmt->ch_map = value;
  1459. break;
  1460. default:
  1461. dev_err(dev, "Invalid token %d\n", tkn);
  1462. return -EINVAL;
  1463. }
  1464. return 0;
  1465. }
  1466. static int skl_tplg_get_uuid(struct device *dev, struct skl_module_cfg *mconfig,
  1467. struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
  1468. {
  1469. if (uuid_tkn->token == SKL_TKN_UUID)
  1470. memcpy(&mconfig->guid, &uuid_tkn->uuid, 16);
  1471. else {
  1472. dev_err(dev, "Not an UUID token tkn %d\n", uuid_tkn->token);
  1473. return -EINVAL;
  1474. }
  1475. return 0;
  1476. }
  1477. static void skl_tplg_fill_pin_dynamic_val(
  1478. struct skl_module_pin *mpin, u32 pin_count, u32 value)
  1479. {
  1480. int i;
  1481. for (i = 0; i < pin_count; i++)
  1482. mpin[i].is_dynamic = value;
  1483. }
  1484. /*
  1485. * Parse tokens to fill up the module private data
  1486. */
  1487. static int skl_tplg_get_token(struct device *dev,
  1488. struct snd_soc_tplg_vendor_value_elem *tkn_elem,
  1489. struct skl *skl, struct skl_module_cfg *mconfig)
  1490. {
  1491. int tkn_count = 0;
  1492. int ret;
  1493. static int is_pipe_exists;
  1494. static int pin_index, dir;
  1495. if (tkn_elem->token > SKL_TKN_MAX)
  1496. return -EINVAL;
  1497. switch (tkn_elem->token) {
  1498. case SKL_TKN_U8_IN_QUEUE_COUNT:
  1499. mconfig->max_in_queue = tkn_elem->value;
  1500. mconfig->m_in_pin = devm_kzalloc(dev, mconfig->max_in_queue *
  1501. sizeof(*mconfig->m_in_pin),
  1502. GFP_KERNEL);
  1503. if (!mconfig->m_in_pin)
  1504. return -ENOMEM;
  1505. break;
  1506. case SKL_TKN_U8_OUT_QUEUE_COUNT:
  1507. mconfig->max_out_queue = tkn_elem->value;
  1508. mconfig->m_out_pin = devm_kzalloc(dev, mconfig->max_out_queue *
  1509. sizeof(*mconfig->m_out_pin),
  1510. GFP_KERNEL);
  1511. if (!mconfig->m_out_pin)
  1512. return -ENOMEM;
  1513. break;
  1514. case SKL_TKN_U8_DYN_IN_PIN:
  1515. if (!mconfig->m_in_pin)
  1516. return -ENOMEM;
  1517. skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin,
  1518. mconfig->max_in_queue, tkn_elem->value);
  1519. break;
  1520. case SKL_TKN_U8_DYN_OUT_PIN:
  1521. if (!mconfig->m_out_pin)
  1522. return -ENOMEM;
  1523. skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin,
  1524. mconfig->max_out_queue, tkn_elem->value);
  1525. break;
  1526. case SKL_TKN_U8_TIME_SLOT:
  1527. mconfig->time_slot = tkn_elem->value;
  1528. break;
  1529. case SKL_TKN_U8_CORE_ID:
  1530. mconfig->core_id = tkn_elem->value;
  1531. case SKL_TKN_U8_MOD_TYPE:
  1532. mconfig->m_type = tkn_elem->value;
  1533. break;
  1534. case SKL_TKN_U8_DEV_TYPE:
  1535. mconfig->dev_type = tkn_elem->value;
  1536. break;
  1537. case SKL_TKN_U8_HW_CONN_TYPE:
  1538. mconfig->hw_conn_type = tkn_elem->value;
  1539. break;
  1540. case SKL_TKN_U16_MOD_INST_ID:
  1541. mconfig->id.instance_id =
  1542. tkn_elem->value;
  1543. break;
  1544. case SKL_TKN_U32_MEM_PAGES:
  1545. mconfig->mem_pages = tkn_elem->value;
  1546. break;
  1547. case SKL_TKN_U32_MAX_MCPS:
  1548. mconfig->mcps = tkn_elem->value;
  1549. break;
  1550. case SKL_TKN_U32_OBS:
  1551. mconfig->obs = tkn_elem->value;
  1552. break;
  1553. case SKL_TKN_U32_IBS:
  1554. mconfig->ibs = tkn_elem->value;
  1555. break;
  1556. case SKL_TKN_U32_VBUS_ID:
  1557. mconfig->vbus_id = tkn_elem->value;
  1558. break;
  1559. case SKL_TKN_U32_PARAMS_FIXUP:
  1560. mconfig->params_fixup = tkn_elem->value;
  1561. break;
  1562. case SKL_TKN_U32_CONVERTER:
  1563. mconfig->converter = tkn_elem->value;
  1564. break;
  1565. case SKL_TKL_U32_D0I3_CAPS:
  1566. mconfig->d0i3_caps = tkn_elem->value;
  1567. break;
  1568. case SKL_TKN_U32_PIPE_ID:
  1569. ret = skl_tplg_add_pipe(dev,
  1570. mconfig, skl, tkn_elem);
  1571. if (ret < 0)
  1572. return is_pipe_exists;
  1573. if (ret == EEXIST)
  1574. is_pipe_exists = 1;
  1575. break;
  1576. case SKL_TKN_U32_PIPE_CONN_TYPE:
  1577. case SKL_TKN_U32_PIPE_PRIORITY:
  1578. case SKL_TKN_U32_PIPE_MEM_PGS:
  1579. case SKL_TKN_U32_PMODE:
  1580. if (is_pipe_exists) {
  1581. ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe,
  1582. tkn_elem->token, tkn_elem->value);
  1583. if (ret < 0)
  1584. return ret;
  1585. }
  1586. break;
  1587. /*
  1588. * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both
  1589. * direction and the pin count. The first four bits represent
  1590. * direction and next four the pin count.
  1591. */
  1592. case SKL_TKN_U32_DIR_PIN_COUNT:
  1593. dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
  1594. pin_index = (tkn_elem->value &
  1595. SKL_PIN_COUNT_MASK) >> 4;
  1596. break;
  1597. case SKL_TKN_U32_FMT_CH:
  1598. case SKL_TKN_U32_FMT_FREQ:
  1599. case SKL_TKN_U32_FMT_BIT_DEPTH:
  1600. case SKL_TKN_U32_FMT_SAMPLE_SIZE:
  1601. case SKL_TKN_U32_FMT_CH_CONFIG:
  1602. case SKL_TKN_U32_FMT_INTERLEAVE:
  1603. case SKL_TKN_U32_FMT_SAMPLE_TYPE:
  1604. case SKL_TKN_U32_FMT_CH_MAP:
  1605. ret = skl_tplg_fill_fmt(dev, mconfig, tkn_elem->token,
  1606. tkn_elem->value, dir, pin_index);
  1607. if (ret < 0)
  1608. return ret;
  1609. break;
  1610. case SKL_TKN_U32_PIN_MOD_ID:
  1611. case SKL_TKN_U32_PIN_INST_ID:
  1612. ret = skl_tplg_fill_pins_info(dev,
  1613. mconfig, tkn_elem, dir,
  1614. pin_index);
  1615. if (ret < 0)
  1616. return ret;
  1617. break;
  1618. case SKL_TKN_U32_CAPS_SIZE:
  1619. mconfig->formats_config.caps_size =
  1620. tkn_elem->value;
  1621. break;
  1622. case SKL_TKN_U32_PROC_DOMAIN:
  1623. mconfig->domain =
  1624. tkn_elem->value;
  1625. break;
  1626. case SKL_TKN_U8_IN_PIN_TYPE:
  1627. case SKL_TKN_U8_OUT_PIN_TYPE:
  1628. case SKL_TKN_U8_CONN_TYPE:
  1629. break;
  1630. default:
  1631. dev_err(dev, "Token %d not handled\n",
  1632. tkn_elem->token);
  1633. return -EINVAL;
  1634. }
  1635. tkn_count++;
  1636. return tkn_count;
  1637. }
  1638. /*
  1639. * Parse the vendor array for specific tokens to construct
  1640. * module private data
  1641. */
  1642. static int skl_tplg_get_tokens(struct device *dev,
  1643. char *pvt_data, struct skl *skl,
  1644. struct skl_module_cfg *mconfig, int block_size)
  1645. {
  1646. struct snd_soc_tplg_vendor_array *array;
  1647. struct snd_soc_tplg_vendor_value_elem *tkn_elem;
  1648. int tkn_count = 0, ret;
  1649. int off = 0, tuple_size = 0;
  1650. if (block_size <= 0)
  1651. return -EINVAL;
  1652. while (tuple_size < block_size) {
  1653. array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
  1654. off += array->size;
  1655. switch (array->type) {
  1656. case SND_SOC_TPLG_TUPLE_TYPE_STRING:
  1657. dev_warn(dev, "no string tokens expected for skl tplg\n");
  1658. continue;
  1659. case SND_SOC_TPLG_TUPLE_TYPE_UUID:
  1660. ret = skl_tplg_get_uuid(dev, mconfig, array->uuid);
  1661. if (ret < 0)
  1662. return ret;
  1663. tuple_size += sizeof(*array->uuid);
  1664. continue;
  1665. default:
  1666. tkn_elem = array->value;
  1667. tkn_count = 0;
  1668. break;
  1669. }
  1670. while (tkn_count <= (array->num_elems - 1)) {
  1671. ret = skl_tplg_get_token(dev, tkn_elem,
  1672. skl, mconfig);
  1673. if (ret < 0)
  1674. return ret;
  1675. tkn_count = tkn_count + ret;
  1676. tkn_elem++;
  1677. }
  1678. tuple_size += tkn_count * sizeof(*tkn_elem);
  1679. }
  1680. return 0;
  1681. }
  1682. /*
  1683. * Every data block is preceded by a descriptor to read the number
  1684. * of data blocks, they type of the block and it's size
  1685. */
  1686. static int skl_tplg_get_desc_blocks(struct device *dev,
  1687. struct snd_soc_tplg_vendor_array *array)
  1688. {
  1689. struct snd_soc_tplg_vendor_value_elem *tkn_elem;
  1690. tkn_elem = array->value;
  1691. switch (tkn_elem->token) {
  1692. case SKL_TKN_U8_NUM_BLOCKS:
  1693. case SKL_TKN_U8_BLOCK_TYPE:
  1694. case SKL_TKN_U16_BLOCK_SIZE:
  1695. return tkn_elem->value;
  1696. default:
  1697. dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token);
  1698. break;
  1699. }
  1700. return -EINVAL;
  1701. }
  1702. /*
  1703. * Parse the private data for the token and corresponding value.
  1704. * The private data can have multiple data blocks. So, a data block
  1705. * is preceded by a descriptor for number of blocks and a descriptor
  1706. * for the type and size of the suceeding data block.
  1707. */
  1708. static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w,
  1709. struct skl *skl, struct device *dev,
  1710. struct skl_module_cfg *mconfig)
  1711. {
  1712. struct snd_soc_tplg_vendor_array *array;
  1713. int num_blocks, block_size = 0, block_type, off = 0;
  1714. char *data;
  1715. int ret;
  1716. /* Read the NUM_DATA_BLOCKS descriptor */
  1717. array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data;
  1718. ret = skl_tplg_get_desc_blocks(dev, array);
  1719. if (ret < 0)
  1720. return ret;
  1721. num_blocks = ret;
  1722. off += array->size;
  1723. array = (struct snd_soc_tplg_vendor_array *)(tplg_w->priv.data + off);
  1724. /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
  1725. while (num_blocks > 0) {
  1726. ret = skl_tplg_get_desc_blocks(dev, array);
  1727. if (ret < 0)
  1728. return ret;
  1729. block_type = ret;
  1730. off += array->size;
  1731. array = (struct snd_soc_tplg_vendor_array *)
  1732. (tplg_w->priv.data + off);
  1733. ret = skl_tplg_get_desc_blocks(dev, array);
  1734. if (ret < 0)
  1735. return ret;
  1736. block_size = ret;
  1737. off += array->size;
  1738. array = (struct snd_soc_tplg_vendor_array *)
  1739. (tplg_w->priv.data + off);
  1740. data = (tplg_w->priv.data + off);
  1741. if (block_type == SKL_TYPE_TUPLE) {
  1742. ret = skl_tplg_get_tokens(dev, data,
  1743. skl, mconfig, block_size);
  1744. if (ret < 0)
  1745. return ret;
  1746. --num_blocks;
  1747. } else {
  1748. if (mconfig->formats_config.caps_size > 0)
  1749. memcpy(mconfig->formats_config.caps, data,
  1750. mconfig->formats_config.caps_size);
  1751. --num_blocks;
  1752. }
  1753. }
  1754. return 0;
  1755. }
  1756. static void skl_clear_pin_config(struct snd_soc_platform *platform,
  1757. struct snd_soc_dapm_widget *w)
  1758. {
  1759. int i;
  1760. struct skl_module_cfg *mconfig;
  1761. struct skl_pipe *pipe;
  1762. if (!strncmp(w->dapm->component->name, platform->component.name,
  1763. strlen(platform->component.name))) {
  1764. mconfig = w->priv;
  1765. pipe = mconfig->pipe;
  1766. for (i = 0; i < mconfig->max_in_queue; i++) {
  1767. mconfig->m_in_pin[i].in_use = false;
  1768. mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND;
  1769. }
  1770. for (i = 0; i < mconfig->max_out_queue; i++) {
  1771. mconfig->m_out_pin[i].in_use = false;
  1772. mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND;
  1773. }
  1774. pipe->state = SKL_PIPE_INVALID;
  1775. mconfig->m_state = SKL_MODULE_UNINIT;
  1776. }
  1777. }
  1778. void skl_cleanup_resources(struct skl *skl)
  1779. {
  1780. struct skl_sst *ctx = skl->skl_sst;
  1781. struct snd_soc_platform *soc_platform = skl->platform;
  1782. struct snd_soc_dapm_widget *w;
  1783. struct snd_soc_card *card;
  1784. if (soc_platform == NULL)
  1785. return;
  1786. card = soc_platform->component.card;
  1787. if (!card || !card->instantiated)
  1788. return;
  1789. skl->resource.mem = 0;
  1790. skl->resource.mcps = 0;
  1791. list_for_each_entry(w, &card->widgets, list) {
  1792. if (is_skl_dsp_widget_type(w) && (w->priv != NULL))
  1793. skl_clear_pin_config(soc_platform, w);
  1794. }
  1795. skl_clear_module_cnt(ctx->dsp);
  1796. }
  1797. /*
  1798. * Topology core widget load callback
  1799. *
  1800. * This is used to save the private data for each widget which gives
  1801. * information to the driver about module and pipeline parameters which DSP
  1802. * FW expects like ids, resource values, formats etc
  1803. */
  1804. static int skl_tplg_widget_load(struct snd_soc_component *cmpnt,
  1805. struct snd_soc_dapm_widget *w,
  1806. struct snd_soc_tplg_dapm_widget *tplg_w)
  1807. {
  1808. int ret;
  1809. struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
  1810. struct skl *skl = ebus_to_skl(ebus);
  1811. struct hdac_bus *bus = ebus_to_hbus(ebus);
  1812. struct skl_module_cfg *mconfig;
  1813. if (!tplg_w->priv.size)
  1814. goto bind_event;
  1815. mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL);
  1816. if (!mconfig)
  1817. return -ENOMEM;
  1818. w->priv = mconfig;
  1819. /*
  1820. * module binary can be loaded later, so set it to query when
  1821. * module is load for a use case
  1822. */
  1823. mconfig->id.module_id = -1;
  1824. /* Parse private data for tuples */
  1825. ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig);
  1826. if (ret < 0)
  1827. return ret;
  1828. bind_event:
  1829. if (tplg_w->event_type == 0) {
  1830. dev_dbg(bus->dev, "ASoC: No event handler required\n");
  1831. return 0;
  1832. }
  1833. ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops,
  1834. ARRAY_SIZE(skl_tplg_widget_ops),
  1835. tplg_w->event_type);
  1836. if (ret) {
  1837. dev_err(bus->dev, "%s: No matching event handlers found for %d\n",
  1838. __func__, tplg_w->event_type);
  1839. return -EINVAL;
  1840. }
  1841. return 0;
  1842. }
  1843. static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be,
  1844. struct snd_soc_tplg_bytes_control *bc)
  1845. {
  1846. struct skl_algo_data *ac;
  1847. struct skl_dfw_algo_data *dfw_ac =
  1848. (struct skl_dfw_algo_data *)bc->priv.data;
  1849. ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL);
  1850. if (!ac)
  1851. return -ENOMEM;
  1852. /* Fill private data */
  1853. ac->max = dfw_ac->max;
  1854. ac->param_id = dfw_ac->param_id;
  1855. ac->set_params = dfw_ac->set_params;
  1856. ac->size = dfw_ac->max;
  1857. if (ac->max) {
  1858. ac->params = (char *) devm_kzalloc(dev, ac->max, GFP_KERNEL);
  1859. if (!ac->params)
  1860. return -ENOMEM;
  1861. memcpy(ac->params, dfw_ac->params, ac->max);
  1862. }
  1863. be->dobj.private = ac;
  1864. return 0;
  1865. }
  1866. static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
  1867. struct snd_kcontrol_new *kctl,
  1868. struct snd_soc_tplg_ctl_hdr *hdr)
  1869. {
  1870. struct soc_bytes_ext *sb;
  1871. struct snd_soc_tplg_bytes_control *tplg_bc;
  1872. struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
  1873. struct hdac_bus *bus = ebus_to_hbus(ebus);
  1874. switch (hdr->ops.info) {
  1875. case SND_SOC_TPLG_CTL_BYTES:
  1876. tplg_bc = container_of(hdr,
  1877. struct snd_soc_tplg_bytes_control, hdr);
  1878. if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
  1879. sb = (struct soc_bytes_ext *)kctl->private_value;
  1880. if (tplg_bc->priv.size)
  1881. return skl_init_algo_data(
  1882. bus->dev, sb, tplg_bc);
  1883. }
  1884. break;
  1885. default:
  1886. dev_warn(bus->dev, "Control load not supported %d:%d:%d\n",
  1887. hdr->ops.get, hdr->ops.put, hdr->ops.info);
  1888. break;
  1889. }
  1890. return 0;
  1891. }
  1892. static int skl_tplg_fill_str_mfest_tkn(struct device *dev,
  1893. struct snd_soc_tplg_vendor_string_elem *str_elem,
  1894. struct skl_dfw_manifest *minfo)
  1895. {
  1896. int tkn_count = 0;
  1897. static int ref_count;
  1898. switch (str_elem->token) {
  1899. case SKL_TKN_STR_LIB_NAME:
  1900. if (ref_count > minfo->lib_count - 1) {
  1901. ref_count = 0;
  1902. return -EINVAL;
  1903. }
  1904. strncpy(minfo->lib[ref_count].name, str_elem->string,
  1905. ARRAY_SIZE(minfo->lib[ref_count].name));
  1906. ref_count++;
  1907. tkn_count++;
  1908. break;
  1909. default:
  1910. dev_err(dev, "Not a string token %d\n", str_elem->token);
  1911. break;
  1912. }
  1913. return tkn_count;
  1914. }
  1915. static int skl_tplg_get_str_tkn(struct device *dev,
  1916. struct snd_soc_tplg_vendor_array *array,
  1917. struct skl_dfw_manifest *minfo)
  1918. {
  1919. int tkn_count = 0, ret;
  1920. struct snd_soc_tplg_vendor_string_elem *str_elem;
  1921. str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value;
  1922. while (tkn_count < array->num_elems) {
  1923. ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, minfo);
  1924. str_elem++;
  1925. if (ret < 0)
  1926. return ret;
  1927. tkn_count = tkn_count + ret;
  1928. }
  1929. return tkn_count;
  1930. }
  1931. static int skl_tplg_get_int_tkn(struct device *dev,
  1932. struct snd_soc_tplg_vendor_value_elem *tkn_elem,
  1933. struct skl_dfw_manifest *minfo)
  1934. {
  1935. int tkn_count = 0;
  1936. switch (tkn_elem->token) {
  1937. case SKL_TKN_U32_LIB_COUNT:
  1938. minfo->lib_count = tkn_elem->value;
  1939. tkn_count++;
  1940. break;
  1941. default:
  1942. dev_err(dev, "Not a manifest token %d\n", tkn_elem->token);
  1943. return -EINVAL;
  1944. }
  1945. return tkn_count;
  1946. }
  1947. /*
  1948. * Fill the manifest structure by parsing the tokens based on the
  1949. * type.
  1950. */
  1951. static int skl_tplg_get_manifest_tkn(struct device *dev,
  1952. char *pvt_data, struct skl_dfw_manifest *minfo,
  1953. int block_size)
  1954. {
  1955. int tkn_count = 0, ret;
  1956. int off = 0, tuple_size = 0;
  1957. struct snd_soc_tplg_vendor_array *array;
  1958. struct snd_soc_tplg_vendor_value_elem *tkn_elem;
  1959. if (block_size <= 0)
  1960. return -EINVAL;
  1961. while (tuple_size < block_size) {
  1962. array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
  1963. off += array->size;
  1964. switch (array->type) {
  1965. case SND_SOC_TPLG_TUPLE_TYPE_STRING:
  1966. ret = skl_tplg_get_str_tkn(dev, array, minfo);
  1967. if (ret < 0)
  1968. return ret;
  1969. tkn_count += ret;
  1970. tuple_size += tkn_count *
  1971. sizeof(struct snd_soc_tplg_vendor_string_elem);
  1972. continue;
  1973. case SND_SOC_TPLG_TUPLE_TYPE_UUID:
  1974. dev_warn(dev, "no uuid tokens for skl tplf manifest\n");
  1975. continue;
  1976. default:
  1977. tkn_elem = array->value;
  1978. tkn_count = 0;
  1979. break;
  1980. }
  1981. while (tkn_count <= array->num_elems - 1) {
  1982. ret = skl_tplg_get_int_tkn(dev,
  1983. tkn_elem, minfo);
  1984. if (ret < 0)
  1985. return ret;
  1986. tkn_count = tkn_count + ret;
  1987. tkn_elem++;
  1988. tuple_size += tkn_count *
  1989. sizeof(struct snd_soc_tplg_vendor_value_elem);
  1990. break;
  1991. }
  1992. tkn_count = 0;
  1993. }
  1994. return 0;
  1995. }
  1996. /*
  1997. * Parse manifest private data for tokens. The private data block is
  1998. * preceded by descriptors for type and size of data block.
  1999. */
  2000. static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest,
  2001. struct device *dev, struct skl_dfw_manifest *minfo)
  2002. {
  2003. struct snd_soc_tplg_vendor_array *array;
  2004. int num_blocks, block_size = 0, block_type, off = 0;
  2005. char *data;
  2006. int ret;
  2007. /* Read the NUM_DATA_BLOCKS descriptor */
  2008. array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data;
  2009. ret = skl_tplg_get_desc_blocks(dev, array);
  2010. if (ret < 0)
  2011. return ret;
  2012. num_blocks = ret;
  2013. off += array->size;
  2014. array = (struct snd_soc_tplg_vendor_array *)
  2015. (manifest->priv.data + off);
  2016. /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
  2017. while (num_blocks > 0) {
  2018. ret = skl_tplg_get_desc_blocks(dev, array);
  2019. if (ret < 0)
  2020. return ret;
  2021. block_type = ret;
  2022. off += array->size;
  2023. array = (struct snd_soc_tplg_vendor_array *)
  2024. (manifest->priv.data + off);
  2025. ret = skl_tplg_get_desc_blocks(dev, array);
  2026. if (ret < 0)
  2027. return ret;
  2028. block_size = ret;
  2029. off += array->size;
  2030. array = (struct snd_soc_tplg_vendor_array *)
  2031. (manifest->priv.data + off);
  2032. data = (manifest->priv.data + off);
  2033. if (block_type == SKL_TYPE_TUPLE) {
  2034. ret = skl_tplg_get_manifest_tkn(dev, data, minfo,
  2035. block_size);
  2036. if (ret < 0)
  2037. return ret;
  2038. --num_blocks;
  2039. } else {
  2040. return -EINVAL;
  2041. }
  2042. }
  2043. return 0;
  2044. }
  2045. static int skl_manifest_load(struct snd_soc_component *cmpnt,
  2046. struct snd_soc_tplg_manifest *manifest)
  2047. {
  2048. struct skl_dfw_manifest *minfo;
  2049. struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
  2050. struct hdac_bus *bus = ebus_to_hbus(ebus);
  2051. struct skl *skl = ebus_to_skl(ebus);
  2052. int ret = 0;
  2053. /* proceed only if we have private data defined */
  2054. if (manifest->priv.size == 0)
  2055. return 0;
  2056. minfo = &skl->skl_sst->manifest;
  2057. skl_tplg_get_manifest_data(manifest, bus->dev, minfo);
  2058. if (minfo->lib_count > HDA_MAX_LIB) {
  2059. dev_err(bus->dev, "Exceeding max Library count. Got:%d\n",
  2060. minfo->lib_count);
  2061. ret = -EINVAL;
  2062. }
  2063. return ret;
  2064. }
  2065. static struct snd_soc_tplg_ops skl_tplg_ops = {
  2066. .widget_load = skl_tplg_widget_load,
  2067. .control_load = skl_tplg_control_load,
  2068. .bytes_ext_ops = skl_tlv_ops,
  2069. .bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops),
  2070. .manifest = skl_manifest_load,
  2071. };
  2072. /*
  2073. * A pipe can have multiple modules, each of them will be a DAPM widget as
  2074. * well. While managing a pipeline we need to get the list of all the
  2075. * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list()
  2076. * helps to get the SKL type widgets in that pipeline
  2077. */
  2078. static int skl_tplg_create_pipe_widget_list(struct snd_soc_platform *platform)
  2079. {
  2080. struct snd_soc_dapm_widget *w;
  2081. struct skl_module_cfg *mcfg = NULL;
  2082. struct skl_pipe_module *p_module = NULL;
  2083. struct skl_pipe *pipe;
  2084. list_for_each_entry(w, &platform->component.card->widgets, list) {
  2085. if (is_skl_dsp_widget_type(w) && w->priv != NULL) {
  2086. mcfg = w->priv;
  2087. pipe = mcfg->pipe;
  2088. p_module = devm_kzalloc(platform->dev,
  2089. sizeof(*p_module), GFP_KERNEL);
  2090. if (!p_module)
  2091. return -ENOMEM;
  2092. p_module->w = w;
  2093. list_add_tail(&p_module->node, &pipe->w_list);
  2094. }
  2095. }
  2096. return 0;
  2097. }
  2098. static void skl_tplg_set_pipe_type(struct skl *skl, struct skl_pipe *pipe)
  2099. {
  2100. struct skl_pipe_module *w_module;
  2101. struct snd_soc_dapm_widget *w;
  2102. struct skl_module_cfg *mconfig;
  2103. bool host_found = false, link_found = false;
  2104. list_for_each_entry(w_module, &pipe->w_list, node) {
  2105. w = w_module->w;
  2106. mconfig = w->priv;
  2107. if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
  2108. host_found = true;
  2109. else if (mconfig->dev_type != SKL_DEVICE_NONE)
  2110. link_found = true;
  2111. }
  2112. if (host_found && link_found)
  2113. pipe->passthru = true;
  2114. else
  2115. pipe->passthru = false;
  2116. }
  2117. /* This will be read from topology manifest, currently defined here */
  2118. #define SKL_MAX_MCPS 30000000
  2119. #define SKL_FW_MAX_MEM 1000000
  2120. /*
  2121. * SKL topology init routine
  2122. */
  2123. int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus)
  2124. {
  2125. int ret;
  2126. const struct firmware *fw;
  2127. struct hdac_bus *bus = ebus_to_hbus(ebus);
  2128. struct skl *skl = ebus_to_skl(ebus);
  2129. struct skl_pipeline *ppl;
  2130. ret = request_firmware(&fw, skl->tplg_name, bus->dev);
  2131. if (ret < 0) {
  2132. dev_err(bus->dev, "tplg fw %s load failed with %d\n",
  2133. skl->tplg_name, ret);
  2134. ret = request_firmware(&fw, "dfw_sst.bin", bus->dev);
  2135. if (ret < 0) {
  2136. dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n",
  2137. "dfw_sst.bin", ret);
  2138. return ret;
  2139. }
  2140. }
  2141. /*
  2142. * The complete tplg for SKL is loaded as index 0, we don't use
  2143. * any other index
  2144. */
  2145. ret = snd_soc_tplg_component_load(&platform->component,
  2146. &skl_tplg_ops, fw, 0);
  2147. if (ret < 0) {
  2148. dev_err(bus->dev, "tplg component load failed%d\n", ret);
  2149. release_firmware(fw);
  2150. return -EINVAL;
  2151. }
  2152. skl->resource.max_mcps = SKL_MAX_MCPS;
  2153. skl->resource.max_mem = SKL_FW_MAX_MEM;
  2154. skl->tplg = fw;
  2155. ret = skl_tplg_create_pipe_widget_list(platform);
  2156. if (ret < 0)
  2157. return ret;
  2158. list_for_each_entry(ppl, &skl->ppl_list, node)
  2159. skl_tplg_set_pipe_type(skl, ppl->pipe);
  2160. return 0;
  2161. }