skl-topology.c 61 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564
  1. /*
  2. * skl-topology.c - Implements Platform component ALSA controls/widget
  3. * handlers.
  4. *
  5. * Copyright (C) 2014-2015 Intel Corp
  6. * Author: Jeeja KP <jeeja.kp@intel.com>
  7. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. */
  18. #include <linux/slab.h>
  19. #include <linux/types.h>
  20. #include <linux/firmware.h>
  21. #include <sound/soc.h>
  22. #include <sound/soc-topology.h>
  23. #include <uapi/sound/snd_sst_tokens.h>
  24. #include "skl-sst-dsp.h"
  25. #include "skl-sst-ipc.h"
  26. #include "skl-topology.h"
  27. #include "skl.h"
  28. #include "skl-tplg-interface.h"
  29. #include "../common/sst-dsp.h"
  30. #include "../common/sst-dsp-priv.h"
  31. #define SKL_CH_FIXUP_MASK (1 << 0)
  32. #define SKL_RATE_FIXUP_MASK (1 << 1)
  33. #define SKL_FMT_FIXUP_MASK (1 << 2)
  34. #define SKL_IN_DIR_BIT_MASK BIT(0)
  35. #define SKL_PIN_COUNT_MASK GENMASK(7, 4)
  36. /*
  37. * SKL DSP driver modelling uses only few DAPM widgets so for rest we will
  38. * ignore. This helpers checks if the SKL driver handles this widget type
  39. */
  40. static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w)
  41. {
  42. switch (w->id) {
  43. case snd_soc_dapm_dai_link:
  44. case snd_soc_dapm_dai_in:
  45. case snd_soc_dapm_aif_in:
  46. case snd_soc_dapm_aif_out:
  47. case snd_soc_dapm_dai_out:
  48. case snd_soc_dapm_switch:
  49. return false;
  50. default:
  51. return true;
  52. }
  53. }
  54. /*
  55. * Each pipelines needs memory to be allocated. Check if we have free memory
  56. * from available pool.
  57. */
  58. static bool skl_is_pipe_mem_avail(struct skl *skl,
  59. struct skl_module_cfg *mconfig)
  60. {
  61. struct skl_sst *ctx = skl->skl_sst;
  62. if (skl->resource.mem + mconfig->pipe->memory_pages >
  63. skl->resource.max_mem) {
  64. dev_err(ctx->dev,
  65. "%s: module_id %d instance %d\n", __func__,
  66. mconfig->id.module_id,
  67. mconfig->id.instance_id);
  68. dev_err(ctx->dev,
  69. "exceeds ppl memory available %d mem %d\n",
  70. skl->resource.max_mem, skl->resource.mem);
  71. return false;
  72. } else {
  73. return true;
  74. }
  75. }
  76. /*
  77. * Add the mem to the mem pool. This is freed when pipe is deleted.
  78. * Note: DSP does actual memory management we only keep track for complete
  79. * pool
  80. */
  81. static void skl_tplg_alloc_pipe_mem(struct skl *skl,
  82. struct skl_module_cfg *mconfig)
  83. {
  84. skl->resource.mem += mconfig->pipe->memory_pages;
  85. }
  86. /*
  87. * Pipeline needs needs DSP CPU resources for computation, this is
  88. * quantified in MCPS (Million Clocks Per Second) required for module/pipe
  89. *
  90. * Each pipelines needs mcps to be allocated. Check if we have mcps for this
  91. * pipe.
  92. */
  93. static bool skl_is_pipe_mcps_avail(struct skl *skl,
  94. struct skl_module_cfg *mconfig)
  95. {
  96. struct skl_sst *ctx = skl->skl_sst;
  97. if (skl->resource.mcps + mconfig->mcps > skl->resource.max_mcps) {
  98. dev_err(ctx->dev,
  99. "%s: module_id %d instance %d\n", __func__,
  100. mconfig->id.module_id, mconfig->id.instance_id);
  101. dev_err(ctx->dev,
  102. "exceeds ppl mcps available %d > mem %d\n",
  103. skl->resource.max_mcps, skl->resource.mcps);
  104. return false;
  105. } else {
  106. return true;
  107. }
  108. }
  109. static void skl_tplg_alloc_pipe_mcps(struct skl *skl,
  110. struct skl_module_cfg *mconfig)
  111. {
  112. skl->resource.mcps += mconfig->mcps;
  113. }
  114. /*
  115. * Free the mcps when tearing down
  116. */
  117. static void
  118. skl_tplg_free_pipe_mcps(struct skl *skl, struct skl_module_cfg *mconfig)
  119. {
  120. skl->resource.mcps -= mconfig->mcps;
  121. }
  122. /*
  123. * Free the memory when tearing down
  124. */
  125. static void
  126. skl_tplg_free_pipe_mem(struct skl *skl, struct skl_module_cfg *mconfig)
  127. {
  128. skl->resource.mem -= mconfig->pipe->memory_pages;
  129. }
  130. static void skl_dump_mconfig(struct skl_sst *ctx,
  131. struct skl_module_cfg *mcfg)
  132. {
  133. dev_dbg(ctx->dev, "Dumping config\n");
  134. dev_dbg(ctx->dev, "Input Format:\n");
  135. dev_dbg(ctx->dev, "channels = %d\n", mcfg->in_fmt[0].channels);
  136. dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->in_fmt[0].s_freq);
  137. dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->in_fmt[0].ch_cfg);
  138. dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->in_fmt[0].valid_bit_depth);
  139. dev_dbg(ctx->dev, "Output Format:\n");
  140. dev_dbg(ctx->dev, "channels = %d\n", mcfg->out_fmt[0].channels);
  141. dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->out_fmt[0].s_freq);
  142. dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->out_fmt[0].valid_bit_depth);
  143. dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->out_fmt[0].ch_cfg);
  144. }
  145. static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs)
  146. {
  147. int slot_map = 0xFFFFFFFF;
  148. int start_slot = 0;
  149. int i;
  150. for (i = 0; i < chs; i++) {
  151. /*
  152. * For 2 channels with starting slot as 0, slot map will
  153. * look like 0xFFFFFF10.
  154. */
  155. slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i)));
  156. start_slot++;
  157. }
  158. fmt->ch_map = slot_map;
  159. }
  160. static void skl_tplg_update_params(struct skl_module_fmt *fmt,
  161. struct skl_pipe_params *params, int fixup)
  162. {
  163. if (fixup & SKL_RATE_FIXUP_MASK)
  164. fmt->s_freq = params->s_freq;
  165. if (fixup & SKL_CH_FIXUP_MASK) {
  166. fmt->channels = params->ch;
  167. skl_tplg_update_chmap(fmt, fmt->channels);
  168. }
  169. if (fixup & SKL_FMT_FIXUP_MASK) {
  170. fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
  171. /*
  172. * 16 bit is 16 bit container whereas 24 bit is in 32 bit
  173. * container so update bit depth accordingly
  174. */
  175. switch (fmt->valid_bit_depth) {
  176. case SKL_DEPTH_16BIT:
  177. fmt->bit_depth = fmt->valid_bit_depth;
  178. break;
  179. default:
  180. fmt->bit_depth = SKL_DEPTH_32BIT;
  181. break;
  182. }
  183. }
  184. }
  185. /*
  186. * A pipeline may have modules which impact the pcm parameters, like SRC,
  187. * channel converter, format converter.
  188. * We need to calculate the output params by applying the 'fixup'
  189. * Topology will tell driver which type of fixup is to be applied by
  190. * supplying the fixup mask, so based on that we calculate the output
  191. *
  192. * Now In FE the pcm hw_params is source/target format. Same is applicable
  193. * for BE with its hw_params invoked.
  194. * here based on FE, BE pipeline and direction we calculate the input and
  195. * outfix and then apply that for a module
  196. */
  197. static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg,
  198. struct skl_pipe_params *params, bool is_fe)
  199. {
  200. int in_fixup, out_fixup;
  201. struct skl_module_fmt *in_fmt, *out_fmt;
  202. /* Fixups will be applied to pin 0 only */
  203. in_fmt = &m_cfg->in_fmt[0];
  204. out_fmt = &m_cfg->out_fmt[0];
  205. if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
  206. if (is_fe) {
  207. in_fixup = m_cfg->params_fixup;
  208. out_fixup = (~m_cfg->converter) &
  209. m_cfg->params_fixup;
  210. } else {
  211. out_fixup = m_cfg->params_fixup;
  212. in_fixup = (~m_cfg->converter) &
  213. m_cfg->params_fixup;
  214. }
  215. } else {
  216. if (is_fe) {
  217. out_fixup = m_cfg->params_fixup;
  218. in_fixup = (~m_cfg->converter) &
  219. m_cfg->params_fixup;
  220. } else {
  221. in_fixup = m_cfg->params_fixup;
  222. out_fixup = (~m_cfg->converter) &
  223. m_cfg->params_fixup;
  224. }
  225. }
  226. skl_tplg_update_params(in_fmt, params, in_fixup);
  227. skl_tplg_update_params(out_fmt, params, out_fixup);
  228. }
  229. /*
  230. * A module needs input and output buffers, which are dependent upon pcm
  231. * params, so once we have calculate params, we need buffer calculation as
  232. * well.
  233. */
  234. static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
  235. struct skl_module_cfg *mcfg)
  236. {
  237. int multiplier = 1;
  238. struct skl_module_fmt *in_fmt, *out_fmt;
  239. int in_rate, out_rate;
  240. /* Since fixups is applied to pin 0 only, ibs, obs needs
  241. * change for pin 0 only
  242. */
  243. in_fmt = &mcfg->in_fmt[0];
  244. out_fmt = &mcfg->out_fmt[0];
  245. if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
  246. multiplier = 5;
  247. if (in_fmt->s_freq % 1000)
  248. in_rate = (in_fmt->s_freq / 1000) + 1;
  249. else
  250. in_rate = (in_fmt->s_freq / 1000);
  251. mcfg->ibs = in_rate * (mcfg->in_fmt->channels) *
  252. (mcfg->in_fmt->bit_depth >> 3) *
  253. multiplier;
  254. if (mcfg->out_fmt->s_freq % 1000)
  255. out_rate = (mcfg->out_fmt->s_freq / 1000) + 1;
  256. else
  257. out_rate = (mcfg->out_fmt->s_freq / 1000);
  258. mcfg->obs = out_rate * (mcfg->out_fmt->channels) *
  259. (mcfg->out_fmt->bit_depth >> 3) *
  260. multiplier;
  261. }
  262. static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
  263. struct skl_sst *ctx)
  264. {
  265. struct skl_module_cfg *m_cfg = w->priv;
  266. int link_type, dir;
  267. u32 ch, s_freq, s_fmt;
  268. struct nhlt_specific_cfg *cfg;
  269. struct skl *skl = get_skl_ctx(ctx->dev);
  270. /* check if we already have blob */
  271. if (m_cfg->formats_config.caps_size > 0)
  272. return 0;
  273. dev_dbg(ctx->dev, "Applying default cfg blob\n");
  274. switch (m_cfg->dev_type) {
  275. case SKL_DEVICE_DMIC:
  276. link_type = NHLT_LINK_DMIC;
  277. dir = SNDRV_PCM_STREAM_CAPTURE;
  278. s_freq = m_cfg->in_fmt[0].s_freq;
  279. s_fmt = m_cfg->in_fmt[0].bit_depth;
  280. ch = m_cfg->in_fmt[0].channels;
  281. break;
  282. case SKL_DEVICE_I2S:
  283. link_type = NHLT_LINK_SSP;
  284. if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) {
  285. dir = SNDRV_PCM_STREAM_PLAYBACK;
  286. s_freq = m_cfg->out_fmt[0].s_freq;
  287. s_fmt = m_cfg->out_fmt[0].bit_depth;
  288. ch = m_cfg->out_fmt[0].channels;
  289. } else {
  290. dir = SNDRV_PCM_STREAM_CAPTURE;
  291. s_freq = m_cfg->in_fmt[0].s_freq;
  292. s_fmt = m_cfg->in_fmt[0].bit_depth;
  293. ch = m_cfg->in_fmt[0].channels;
  294. }
  295. break;
  296. default:
  297. return -EINVAL;
  298. }
  299. /* update the blob based on virtual bus_id and default params */
  300. cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type,
  301. s_fmt, ch, s_freq, dir);
  302. if (cfg) {
  303. m_cfg->formats_config.caps_size = cfg->size;
  304. m_cfg->formats_config.caps = (u32 *) &cfg->caps;
  305. } else {
  306. dev_err(ctx->dev, "Blob NULL for id %x type %d dirn %d\n",
  307. m_cfg->vbus_id, link_type, dir);
  308. dev_err(ctx->dev, "PCM: ch %d, freq %d, fmt %d\n",
  309. ch, s_freq, s_fmt);
  310. return -EIO;
  311. }
  312. return 0;
  313. }
  314. static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w,
  315. struct skl_sst *ctx)
  316. {
  317. struct skl_module_cfg *m_cfg = w->priv;
  318. struct skl_pipe_params *params = m_cfg->pipe->p_params;
  319. int p_conn_type = m_cfg->pipe->conn_type;
  320. bool is_fe;
  321. if (!m_cfg->params_fixup)
  322. return;
  323. dev_dbg(ctx->dev, "Mconfig for widget=%s BEFORE updation\n",
  324. w->name);
  325. skl_dump_mconfig(ctx, m_cfg);
  326. if (p_conn_type == SKL_PIPE_CONN_TYPE_FE)
  327. is_fe = true;
  328. else
  329. is_fe = false;
  330. skl_tplg_update_params_fixup(m_cfg, params, is_fe);
  331. skl_tplg_update_buffer_size(ctx, m_cfg);
  332. dev_dbg(ctx->dev, "Mconfig for widget=%s AFTER updation\n",
  333. w->name);
  334. skl_dump_mconfig(ctx, m_cfg);
  335. }
  336. /*
  337. * some modules can have multiple params set from user control and
  338. * need to be set after module is initialized. If set_param flag is
  339. * set module params will be done after module is initialised.
  340. */
  341. static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w,
  342. struct skl_sst *ctx)
  343. {
  344. int i, ret;
  345. struct skl_module_cfg *mconfig = w->priv;
  346. const struct snd_kcontrol_new *k;
  347. struct soc_bytes_ext *sb;
  348. struct skl_algo_data *bc;
  349. struct skl_specific_cfg *sp_cfg;
  350. if (mconfig->formats_config.caps_size > 0 &&
  351. mconfig->formats_config.set_params == SKL_PARAM_SET) {
  352. sp_cfg = &mconfig->formats_config;
  353. ret = skl_set_module_params(ctx, sp_cfg->caps,
  354. sp_cfg->caps_size,
  355. sp_cfg->param_id, mconfig);
  356. if (ret < 0)
  357. return ret;
  358. }
  359. for (i = 0; i < w->num_kcontrols; i++) {
  360. k = &w->kcontrol_news[i];
  361. if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
  362. sb = (void *) k->private_value;
  363. bc = (struct skl_algo_data *)sb->dobj.private;
  364. if (bc->set_params == SKL_PARAM_SET) {
  365. ret = skl_set_module_params(ctx,
  366. (u32 *)bc->params, bc->size,
  367. bc->param_id, mconfig);
  368. if (ret < 0)
  369. return ret;
  370. }
  371. }
  372. }
  373. return 0;
  374. }
  375. /*
  376. * some module param can set from user control and this is required as
  377. * when module is initailzed. if module param is required in init it is
  378. * identifed by set_param flag. if set_param flag is not set, then this
  379. * parameter needs to set as part of module init.
  380. */
  381. static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
  382. {
  383. const struct snd_kcontrol_new *k;
  384. struct soc_bytes_ext *sb;
  385. struct skl_algo_data *bc;
  386. struct skl_module_cfg *mconfig = w->priv;
  387. int i;
  388. for (i = 0; i < w->num_kcontrols; i++) {
  389. k = &w->kcontrol_news[i];
  390. if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
  391. sb = (struct soc_bytes_ext *)k->private_value;
  392. bc = (struct skl_algo_data *)sb->dobj.private;
  393. if (bc->set_params != SKL_PARAM_INIT)
  394. continue;
  395. mconfig->formats_config.caps = (u32 *)bc->params;
  396. mconfig->formats_config.caps_size = bc->size;
  397. break;
  398. }
  399. }
  400. return 0;
  401. }
  402. /*
  403. * Inside a pipe instance, we can have various modules. These modules need
  404. * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by
  405. * skl_init_module() routine, so invoke that for all modules in a pipeline
  406. */
  407. static int
  408. skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
  409. {
  410. struct skl_pipe_module *w_module;
  411. struct snd_soc_dapm_widget *w;
  412. struct skl_module_cfg *mconfig;
  413. struct skl_sst *ctx = skl->skl_sst;
  414. int ret = 0;
  415. list_for_each_entry(w_module, &pipe->w_list, node) {
  416. w = w_module->w;
  417. mconfig = w->priv;
  418. /* check if module ids are populated */
  419. if (mconfig->id.module_id < 0) {
  420. dev_err(skl->skl_sst->dev,
  421. "module %pUL id not populated\n",
  422. (uuid_le *)mconfig->guid);
  423. return -EIO;
  424. }
  425. /* check resource available */
  426. if (!skl_is_pipe_mcps_avail(skl, mconfig))
  427. return -ENOMEM;
  428. if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
  429. ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
  430. mconfig->id.module_id, mconfig->guid);
  431. if (ret < 0)
  432. return ret;
  433. mconfig->m_state = SKL_MODULE_LOADED;
  434. }
  435. /* update blob if blob is null for be with default value */
  436. skl_tplg_update_be_blob(w, ctx);
  437. /*
  438. * apply fix/conversion to module params based on
  439. * FE/BE params
  440. */
  441. skl_tplg_update_module_params(w, ctx);
  442. mconfig->id.pvt_id = skl_get_pvt_id(ctx, mconfig);
  443. if (mconfig->id.pvt_id < 0)
  444. return ret;
  445. skl_tplg_set_module_init_data(w);
  446. ret = skl_init_module(ctx, mconfig);
  447. if (ret < 0) {
  448. skl_put_pvt_id(ctx, mconfig);
  449. return ret;
  450. }
  451. skl_tplg_alloc_pipe_mcps(skl, mconfig);
  452. ret = skl_tplg_set_module_params(w, ctx);
  453. if (ret < 0)
  454. return ret;
  455. }
  456. return 0;
  457. }
  458. static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
  459. struct skl_pipe *pipe)
  460. {
  461. int ret;
  462. struct skl_pipe_module *w_module = NULL;
  463. struct skl_module_cfg *mconfig = NULL;
  464. list_for_each_entry(w_module, &pipe->w_list, node) {
  465. mconfig = w_module->w->priv;
  466. if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod &&
  467. mconfig->m_state > SKL_MODULE_UNINIT) {
  468. ret = ctx->dsp->fw_ops.unload_mod(ctx->dsp,
  469. mconfig->id.module_id);
  470. if (ret < 0)
  471. return -EIO;
  472. }
  473. skl_put_pvt_id(ctx, mconfig);
  474. }
  475. /* no modules to unload in this path, so return */
  476. return 0;
  477. }
  478. /*
  479. * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we
  480. * need create the pipeline. So we do following:
  481. * - check the resources
  482. * - Create the pipeline
  483. * - Initialize the modules in pipeline
  484. * - finally bind all modules together
  485. */
  486. static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
  487. struct skl *skl)
  488. {
  489. int ret;
  490. struct skl_module_cfg *mconfig = w->priv;
  491. struct skl_pipe_module *w_module;
  492. struct skl_pipe *s_pipe = mconfig->pipe;
  493. struct skl_module_cfg *src_module = NULL, *dst_module;
  494. struct skl_sst *ctx = skl->skl_sst;
  495. /* check resource available */
  496. if (!skl_is_pipe_mcps_avail(skl, mconfig))
  497. return -EBUSY;
  498. if (!skl_is_pipe_mem_avail(skl, mconfig))
  499. return -ENOMEM;
  500. /*
  501. * Create a list of modules for pipe.
  502. * This list contains modules from source to sink
  503. */
  504. ret = skl_create_pipeline(ctx, mconfig->pipe);
  505. if (ret < 0)
  506. return ret;
  507. skl_tplg_alloc_pipe_mem(skl, mconfig);
  508. skl_tplg_alloc_pipe_mcps(skl, mconfig);
  509. /* Init all pipe modules from source to sink */
  510. ret = skl_tplg_init_pipe_modules(skl, s_pipe);
  511. if (ret < 0)
  512. return ret;
  513. /* Bind modules from source to sink */
  514. list_for_each_entry(w_module, &s_pipe->w_list, node) {
  515. dst_module = w_module->w->priv;
  516. if (src_module == NULL) {
  517. src_module = dst_module;
  518. continue;
  519. }
  520. ret = skl_bind_modules(ctx, src_module, dst_module);
  521. if (ret < 0)
  522. return ret;
  523. src_module = dst_module;
  524. }
  525. return 0;
  526. }
  527. static int skl_fill_sink_instance_id(struct skl_sst *ctx,
  528. struct skl_algo_data *alg_data)
  529. {
  530. struct skl_kpb_params *params = (struct skl_kpb_params *)alg_data->params;
  531. struct skl_mod_inst_map *inst;
  532. int i, pvt_id;
  533. inst = params->map;
  534. for (i = 0; i < params->num_modules; i++) {
  535. pvt_id = skl_get_pvt_instance_id_map(ctx,
  536. inst->mod_id, inst->inst_id);
  537. if (pvt_id < 0)
  538. return -EINVAL;
  539. inst->inst_id = pvt_id;
  540. inst++;
  541. }
  542. return 0;
  543. }
  544. /*
  545. * Some modules require params to be set after the module is bound to
  546. * all pins connected.
  547. *
  548. * The module provider initializes set_param flag for such modules and we
  549. * send params after binding
  550. */
  551. static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
  552. struct skl_module_cfg *mcfg, struct skl_sst *ctx)
  553. {
  554. int i, ret;
  555. struct skl_module_cfg *mconfig = w->priv;
  556. const struct snd_kcontrol_new *k;
  557. struct soc_bytes_ext *sb;
  558. struct skl_algo_data *bc;
  559. struct skl_specific_cfg *sp_cfg;
  560. /*
  561. * check all out/in pins are in bind state.
  562. * if so set the module param
  563. */
  564. for (i = 0; i < mcfg->max_out_queue; i++) {
  565. if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE)
  566. return 0;
  567. }
  568. for (i = 0; i < mcfg->max_in_queue; i++) {
  569. if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE)
  570. return 0;
  571. }
  572. if (mconfig->formats_config.caps_size > 0 &&
  573. mconfig->formats_config.set_params == SKL_PARAM_BIND) {
  574. sp_cfg = &mconfig->formats_config;
  575. ret = skl_set_module_params(ctx, sp_cfg->caps,
  576. sp_cfg->caps_size,
  577. sp_cfg->param_id, mconfig);
  578. if (ret < 0)
  579. return ret;
  580. }
  581. for (i = 0; i < w->num_kcontrols; i++) {
  582. k = &w->kcontrol_news[i];
  583. if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
  584. sb = (void *) k->private_value;
  585. bc = (struct skl_algo_data *)sb->dobj.private;
  586. if (bc->set_params == SKL_PARAM_BIND) {
  587. if (mconfig->m_type == SKL_MODULE_TYPE_KPB)
  588. skl_fill_sink_instance_id(ctx, bc);
  589. ret = skl_set_module_params(ctx,
  590. (u32 *)bc->params, bc->max,
  591. bc->param_id, mconfig);
  592. if (ret < 0)
  593. return ret;
  594. }
  595. }
  596. }
  597. return 0;
  598. }
  599. static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
  600. struct skl *skl,
  601. struct snd_soc_dapm_widget *src_w,
  602. struct skl_module_cfg *src_mconfig)
  603. {
  604. struct snd_soc_dapm_path *p;
  605. struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL;
  606. struct skl_module_cfg *sink_mconfig;
  607. struct skl_sst *ctx = skl->skl_sst;
  608. int ret;
  609. snd_soc_dapm_widget_for_each_sink_path(w, p) {
  610. if (!p->connect)
  611. continue;
  612. dev_dbg(ctx->dev, "%s: src widget=%s\n", __func__, w->name);
  613. dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name);
  614. next_sink = p->sink;
  615. if (!is_skl_dsp_widget_type(p->sink))
  616. return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
  617. /*
  618. * here we will check widgets in sink pipelines, so that
  619. * can be any widgets type and we are only interested if
  620. * they are ones used for SKL so check that first
  621. */
  622. if ((p->sink->priv != NULL) &&
  623. is_skl_dsp_widget_type(p->sink)) {
  624. sink = p->sink;
  625. sink_mconfig = sink->priv;
  626. if (src_mconfig->m_state == SKL_MODULE_UNINIT ||
  627. sink_mconfig->m_state == SKL_MODULE_UNINIT)
  628. continue;
  629. /* Bind source to sink, mixin is always source */
  630. ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
  631. if (ret)
  632. return ret;
  633. /* set module params after bind */
  634. skl_tplg_set_module_bind_params(src_w, src_mconfig, ctx);
  635. skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
  636. /* Start sinks pipe first */
  637. if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) {
  638. if (sink_mconfig->pipe->conn_type !=
  639. SKL_PIPE_CONN_TYPE_FE)
  640. ret = skl_run_pipe(ctx,
  641. sink_mconfig->pipe);
  642. if (ret)
  643. return ret;
  644. }
  645. }
  646. }
  647. if (!sink)
  648. return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig);
  649. return 0;
  650. }
  651. /*
  652. * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA
  653. * we need to do following:
  654. * - Bind to sink pipeline
  655. * Since the sink pipes can be running and we don't get mixer event on
  656. * connect for already running mixer, we need to find the sink pipes
  657. * here and bind to them. This way dynamic connect works.
  658. * - Start sink pipeline, if not running
  659. * - Then run current pipe
  660. */
  661. static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
  662. struct skl *skl)
  663. {
  664. struct skl_module_cfg *src_mconfig;
  665. struct skl_sst *ctx = skl->skl_sst;
  666. int ret = 0;
  667. src_mconfig = w->priv;
  668. /*
  669. * find which sink it is connected to, bind with the sink,
  670. * if sink is not started, start sink pipe first, then start
  671. * this pipe
  672. */
  673. ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig);
  674. if (ret)
  675. return ret;
  676. /* Start source pipe last after starting all sinks */
  677. if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
  678. return skl_run_pipe(ctx, src_mconfig->pipe);
  679. return 0;
  680. }
  681. static struct snd_soc_dapm_widget *skl_get_src_dsp_widget(
  682. struct snd_soc_dapm_widget *w, struct skl *skl)
  683. {
  684. struct snd_soc_dapm_path *p;
  685. struct snd_soc_dapm_widget *src_w = NULL;
  686. struct skl_sst *ctx = skl->skl_sst;
  687. snd_soc_dapm_widget_for_each_source_path(w, p) {
  688. src_w = p->source;
  689. if (!p->connect)
  690. continue;
  691. dev_dbg(ctx->dev, "sink widget=%s\n", w->name);
  692. dev_dbg(ctx->dev, "src widget=%s\n", p->source->name);
  693. /*
  694. * here we will check widgets in sink pipelines, so that can
  695. * be any widgets type and we are only interested if they are
  696. * ones used for SKL so check that first
  697. */
  698. if ((p->source->priv != NULL) &&
  699. is_skl_dsp_widget_type(p->source)) {
  700. return p->source;
  701. }
  702. }
  703. if (src_w != NULL)
  704. return skl_get_src_dsp_widget(src_w, skl);
  705. return NULL;
  706. }
  707. /*
  708. * in the Post-PMU event of mixer we need to do following:
  709. * - Check if this pipe is running
  710. * - if not, then
  711. * - bind this pipeline to its source pipeline
  712. * if source pipe is already running, this means it is a dynamic
  713. * connection and we need to bind only to that pipe
  714. * - start this pipeline
  715. */
  716. static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w,
  717. struct skl *skl)
  718. {
  719. int ret = 0;
  720. struct snd_soc_dapm_widget *source, *sink;
  721. struct skl_module_cfg *src_mconfig, *sink_mconfig;
  722. struct skl_sst *ctx = skl->skl_sst;
  723. int src_pipe_started = 0;
  724. sink = w;
  725. sink_mconfig = sink->priv;
  726. /*
  727. * If source pipe is already started, that means source is driving
  728. * one more sink before this sink got connected, Since source is
  729. * started, bind this sink to source and start this pipe.
  730. */
  731. source = skl_get_src_dsp_widget(w, skl);
  732. if (source != NULL) {
  733. src_mconfig = source->priv;
  734. sink_mconfig = sink->priv;
  735. src_pipe_started = 1;
  736. /*
  737. * check pipe state, then no need to bind or start the
  738. * pipe
  739. */
  740. if (src_mconfig->pipe->state != SKL_PIPE_STARTED)
  741. src_pipe_started = 0;
  742. }
  743. if (src_pipe_started) {
  744. ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
  745. if (ret)
  746. return ret;
  747. /* set module params after bind */
  748. skl_tplg_set_module_bind_params(source, src_mconfig, ctx);
  749. skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
  750. if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
  751. ret = skl_run_pipe(ctx, sink_mconfig->pipe);
  752. }
  753. return ret;
  754. }
  755. /*
  756. * in the Pre-PMD event of mixer we need to do following:
  757. * - Stop the pipe
  758. * - find the source connections and remove that from dapm_path_list
  759. * - unbind with source pipelines if still connected
  760. */
  761. static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w,
  762. struct skl *skl)
  763. {
  764. struct skl_module_cfg *src_mconfig, *sink_mconfig;
  765. int ret = 0, i;
  766. struct skl_sst *ctx = skl->skl_sst;
  767. sink_mconfig = w->priv;
  768. /* Stop the pipe */
  769. ret = skl_stop_pipe(ctx, sink_mconfig->pipe);
  770. if (ret)
  771. return ret;
  772. for (i = 0; i < sink_mconfig->max_in_queue; i++) {
  773. if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) {
  774. src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg;
  775. if (!src_mconfig)
  776. continue;
  777. /*
  778. * If path_found == 1, that means pmd for source
  779. * pipe has not occurred, source is connected to
  780. * some other sink. so its responsibility of sink
  781. * to unbind itself from source.
  782. */
  783. ret = skl_stop_pipe(ctx, src_mconfig->pipe);
  784. if (ret < 0)
  785. return ret;
  786. ret = skl_unbind_modules(ctx,
  787. src_mconfig, sink_mconfig);
  788. }
  789. }
  790. return ret;
  791. }
  792. /*
  793. * in the Post-PMD event of mixer we need to do following:
  794. * - Free the mcps used
  795. * - Free the mem used
  796. * - Unbind the modules within the pipeline
  797. * - Delete the pipeline (modules are not required to be explicitly
  798. * deleted, pipeline delete is enough here
  799. */
  800. static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
  801. struct skl *skl)
  802. {
  803. struct skl_module_cfg *mconfig = w->priv;
  804. struct skl_pipe_module *w_module;
  805. struct skl_module_cfg *src_module = NULL, *dst_module;
  806. struct skl_sst *ctx = skl->skl_sst;
  807. struct skl_pipe *s_pipe = mconfig->pipe;
  808. int ret = 0;
  809. if (s_pipe->state == SKL_PIPE_INVALID)
  810. return -EINVAL;
  811. skl_tplg_free_pipe_mcps(skl, mconfig);
  812. skl_tplg_free_pipe_mem(skl, mconfig);
  813. list_for_each_entry(w_module, &s_pipe->w_list, node) {
  814. dst_module = w_module->w->priv;
  815. if (mconfig->m_state >= SKL_MODULE_INIT_DONE)
  816. skl_tplg_free_pipe_mcps(skl, dst_module);
  817. if (src_module == NULL) {
  818. src_module = dst_module;
  819. continue;
  820. }
  821. skl_unbind_modules(ctx, src_module, dst_module);
  822. src_module = dst_module;
  823. }
  824. ret = skl_delete_pipe(ctx, mconfig->pipe);
  825. return skl_tplg_unload_pipe_modules(ctx, s_pipe);
  826. }
  827. /*
  828. * in the Post-PMD event of PGA we need to do following:
  829. * - Free the mcps used
  830. * - Stop the pipeline
  831. * - In source pipe is connected, unbind with source pipelines
  832. */
  833. static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
  834. struct skl *skl)
  835. {
  836. struct skl_module_cfg *src_mconfig, *sink_mconfig;
  837. int ret = 0, i;
  838. struct skl_sst *ctx = skl->skl_sst;
  839. src_mconfig = w->priv;
  840. /* Stop the pipe since this is a mixin module */
  841. ret = skl_stop_pipe(ctx, src_mconfig->pipe);
  842. if (ret)
  843. return ret;
  844. for (i = 0; i < src_mconfig->max_out_queue; i++) {
  845. if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) {
  846. sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg;
  847. if (!sink_mconfig)
  848. continue;
  849. /*
  850. * This is a connecter and if path is found that means
  851. * unbind between source and sink has not happened yet
  852. */
  853. ret = skl_unbind_modules(ctx, src_mconfig,
  854. sink_mconfig);
  855. }
  856. }
  857. return ret;
  858. }
  859. /*
  860. * In modelling, we assume there will be ONLY one mixer in a pipeline. If
  861. * mixer is not required then it is treated as static mixer aka vmixer with
  862. * a hard path to source module
  863. * So we don't need to check if source is started or not as hard path puts
  864. * dependency on each other
  865. */
  866. static int skl_tplg_vmixer_event(struct snd_soc_dapm_widget *w,
  867. struct snd_kcontrol *k, int event)
  868. {
  869. struct snd_soc_dapm_context *dapm = w->dapm;
  870. struct skl *skl = get_skl_ctx(dapm->dev);
  871. switch (event) {
  872. case SND_SOC_DAPM_PRE_PMU:
  873. return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
  874. case SND_SOC_DAPM_POST_PMU:
  875. return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
  876. case SND_SOC_DAPM_PRE_PMD:
  877. return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
  878. case SND_SOC_DAPM_POST_PMD:
  879. return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
  880. }
  881. return 0;
  882. }
  883. /*
  884. * In modelling, we assume there will be ONLY one mixer in a pipeline. If a
  885. * second one is required that is created as another pipe entity.
  886. * The mixer is responsible for pipe management and represent a pipeline
  887. * instance
  888. */
  889. static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w,
  890. struct snd_kcontrol *k, int event)
  891. {
  892. struct snd_soc_dapm_context *dapm = w->dapm;
  893. struct skl *skl = get_skl_ctx(dapm->dev);
  894. switch (event) {
  895. case SND_SOC_DAPM_PRE_PMU:
  896. return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
  897. case SND_SOC_DAPM_POST_PMU:
  898. return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
  899. case SND_SOC_DAPM_PRE_PMD:
  900. return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
  901. case SND_SOC_DAPM_POST_PMD:
  902. return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
  903. }
  904. return 0;
  905. }
  906. /*
  907. * In modelling, we assumed rest of the modules in pipeline are PGA. But we
  908. * are interested in last PGA (leaf PGA) in a pipeline to disconnect with
  909. * the sink when it is running (two FE to one BE or one FE to two BE)
  910. * scenarios
  911. */
  912. static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w,
  913. struct snd_kcontrol *k, int event)
  914. {
  915. struct snd_soc_dapm_context *dapm = w->dapm;
  916. struct skl *skl = get_skl_ctx(dapm->dev);
  917. switch (event) {
  918. case SND_SOC_DAPM_PRE_PMU:
  919. return skl_tplg_pga_dapm_pre_pmu_event(w, skl);
  920. case SND_SOC_DAPM_POST_PMD:
  921. return skl_tplg_pga_dapm_post_pmd_event(w, skl);
  922. }
  923. return 0;
  924. }
  925. static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
  926. unsigned int __user *data, unsigned int size)
  927. {
  928. struct soc_bytes_ext *sb =
  929. (struct soc_bytes_ext *)kcontrol->private_value;
  930. struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
  931. struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
  932. struct skl_module_cfg *mconfig = w->priv;
  933. struct skl *skl = get_skl_ctx(w->dapm->dev);
  934. if (w->power)
  935. skl_get_module_params(skl->skl_sst, (u32 *)bc->params,
  936. bc->size, bc->param_id, mconfig);
  937. /* decrement size for TLV header */
  938. size -= 2 * sizeof(u32);
  939. /* check size as we don't want to send kernel data */
  940. if (size > bc->max)
  941. size = bc->max;
  942. if (bc->params) {
  943. if (copy_to_user(data, &bc->param_id, sizeof(u32)))
  944. return -EFAULT;
  945. if (copy_to_user(data + 1, &size, sizeof(u32)))
  946. return -EFAULT;
  947. if (copy_to_user(data + 2, bc->params, size))
  948. return -EFAULT;
  949. }
  950. return 0;
  951. }
  952. #define SKL_PARAM_VENDOR_ID 0xff
  953. static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
  954. const unsigned int __user *data, unsigned int size)
  955. {
  956. struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
  957. struct skl_module_cfg *mconfig = w->priv;
  958. struct soc_bytes_ext *sb =
  959. (struct soc_bytes_ext *)kcontrol->private_value;
  960. struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private;
  961. struct skl *skl = get_skl_ctx(w->dapm->dev);
  962. if (ac->params) {
  963. if (size > ac->max)
  964. return -EINVAL;
  965. ac->size = size;
  966. /*
  967. * if the param_is is of type Vendor, firmware expects actual
  968. * parameter id and size from the control.
  969. */
  970. if (ac->param_id == SKL_PARAM_VENDOR_ID) {
  971. if (copy_from_user(ac->params, data, size))
  972. return -EFAULT;
  973. } else {
  974. if (copy_from_user(ac->params,
  975. data + 2, size))
  976. return -EFAULT;
  977. }
  978. if (w->power)
  979. return skl_set_module_params(skl->skl_sst,
  980. (u32 *)ac->params, ac->size,
  981. ac->param_id, mconfig);
  982. }
  983. return 0;
  984. }
  985. /*
  986. * Fill the dma id for host and link. In case of passthrough
  987. * pipeline, this will both host and link in the same
  988. * pipeline, so need to copy the link and host based on dev_type
  989. */
  990. static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg,
  991. struct skl_pipe_params *params)
  992. {
  993. struct skl_pipe *pipe = mcfg->pipe;
  994. if (pipe->passthru) {
  995. switch (mcfg->dev_type) {
  996. case SKL_DEVICE_HDALINK:
  997. pipe->p_params->link_dma_id = params->link_dma_id;
  998. break;
  999. case SKL_DEVICE_HDAHOST:
  1000. pipe->p_params->host_dma_id = params->host_dma_id;
  1001. break;
  1002. default:
  1003. break;
  1004. }
  1005. pipe->p_params->s_fmt = params->s_fmt;
  1006. pipe->p_params->ch = params->ch;
  1007. pipe->p_params->s_freq = params->s_freq;
  1008. pipe->p_params->stream = params->stream;
  1009. } else {
  1010. memcpy(pipe->p_params, params, sizeof(*params));
  1011. }
  1012. }
  1013. /*
  1014. * The FE params are passed by hw_params of the DAI.
  1015. * On hw_params, the params are stored in Gateway module of the FE and we
  1016. * need to calculate the format in DSP module configuration, that
  1017. * conversion is done here
  1018. */
  1019. int skl_tplg_update_pipe_params(struct device *dev,
  1020. struct skl_module_cfg *mconfig,
  1021. struct skl_pipe_params *params)
  1022. {
  1023. struct skl_module_fmt *format = NULL;
  1024. skl_tplg_fill_dma_id(mconfig, params);
  1025. if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
  1026. format = &mconfig->in_fmt[0];
  1027. else
  1028. format = &mconfig->out_fmt[0];
  1029. /* set the hw_params */
  1030. format->s_freq = params->s_freq;
  1031. format->channels = params->ch;
  1032. format->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
  1033. /*
  1034. * 16 bit is 16 bit container whereas 24 bit is in 32 bit
  1035. * container so update bit depth accordingly
  1036. */
  1037. switch (format->valid_bit_depth) {
  1038. case SKL_DEPTH_16BIT:
  1039. format->bit_depth = format->valid_bit_depth;
  1040. break;
  1041. case SKL_DEPTH_24BIT:
  1042. case SKL_DEPTH_32BIT:
  1043. format->bit_depth = SKL_DEPTH_32BIT;
  1044. break;
  1045. default:
  1046. dev_err(dev, "Invalid bit depth %x for pipe\n",
  1047. format->valid_bit_depth);
  1048. return -EINVAL;
  1049. }
  1050. if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
  1051. mconfig->ibs = (format->s_freq / 1000) *
  1052. (format->channels) *
  1053. (format->bit_depth >> 3);
  1054. } else {
  1055. mconfig->obs = (format->s_freq / 1000) *
  1056. (format->channels) *
  1057. (format->bit_depth >> 3);
  1058. }
  1059. return 0;
  1060. }
  1061. /*
  1062. * Query the module config for the FE DAI
  1063. * This is used to find the hw_params set for that DAI and apply to FE
  1064. * pipeline
  1065. */
  1066. struct skl_module_cfg *
  1067. skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream)
  1068. {
  1069. struct snd_soc_dapm_widget *w;
  1070. struct snd_soc_dapm_path *p = NULL;
  1071. if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
  1072. w = dai->playback_widget;
  1073. snd_soc_dapm_widget_for_each_sink_path(w, p) {
  1074. if (p->connect && p->sink->power &&
  1075. !is_skl_dsp_widget_type(p->sink))
  1076. continue;
  1077. if (p->sink->priv) {
  1078. dev_dbg(dai->dev, "set params for %s\n",
  1079. p->sink->name);
  1080. return p->sink->priv;
  1081. }
  1082. }
  1083. } else {
  1084. w = dai->capture_widget;
  1085. snd_soc_dapm_widget_for_each_source_path(w, p) {
  1086. if (p->connect && p->source->power &&
  1087. !is_skl_dsp_widget_type(p->source))
  1088. continue;
  1089. if (p->source->priv) {
  1090. dev_dbg(dai->dev, "set params for %s\n",
  1091. p->source->name);
  1092. return p->source->priv;
  1093. }
  1094. }
  1095. }
  1096. return NULL;
  1097. }
  1098. static struct skl_module_cfg *skl_get_mconfig_pb_cpr(
  1099. struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
  1100. {
  1101. struct snd_soc_dapm_path *p;
  1102. struct skl_module_cfg *mconfig = NULL;
  1103. snd_soc_dapm_widget_for_each_source_path(w, p) {
  1104. if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) {
  1105. if (p->connect &&
  1106. (p->sink->id == snd_soc_dapm_aif_out) &&
  1107. p->source->priv) {
  1108. mconfig = p->source->priv;
  1109. return mconfig;
  1110. }
  1111. mconfig = skl_get_mconfig_pb_cpr(dai, p->source);
  1112. if (mconfig)
  1113. return mconfig;
  1114. }
  1115. }
  1116. return mconfig;
  1117. }
  1118. static struct skl_module_cfg *skl_get_mconfig_cap_cpr(
  1119. struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
  1120. {
  1121. struct snd_soc_dapm_path *p;
  1122. struct skl_module_cfg *mconfig = NULL;
  1123. snd_soc_dapm_widget_for_each_sink_path(w, p) {
  1124. if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) {
  1125. if (p->connect &&
  1126. (p->source->id == snd_soc_dapm_aif_in) &&
  1127. p->sink->priv) {
  1128. mconfig = p->sink->priv;
  1129. return mconfig;
  1130. }
  1131. mconfig = skl_get_mconfig_cap_cpr(dai, p->sink);
  1132. if (mconfig)
  1133. return mconfig;
  1134. }
  1135. }
  1136. return mconfig;
  1137. }
  1138. struct skl_module_cfg *
  1139. skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream)
  1140. {
  1141. struct snd_soc_dapm_widget *w;
  1142. struct skl_module_cfg *mconfig;
  1143. if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
  1144. w = dai->playback_widget;
  1145. mconfig = skl_get_mconfig_pb_cpr(dai, w);
  1146. } else {
  1147. w = dai->capture_widget;
  1148. mconfig = skl_get_mconfig_cap_cpr(dai, w);
  1149. }
  1150. return mconfig;
  1151. }
  1152. static u8 skl_tplg_be_link_type(int dev_type)
  1153. {
  1154. int ret;
  1155. switch (dev_type) {
  1156. case SKL_DEVICE_BT:
  1157. ret = NHLT_LINK_SSP;
  1158. break;
  1159. case SKL_DEVICE_DMIC:
  1160. ret = NHLT_LINK_DMIC;
  1161. break;
  1162. case SKL_DEVICE_I2S:
  1163. ret = NHLT_LINK_SSP;
  1164. break;
  1165. case SKL_DEVICE_HDALINK:
  1166. ret = NHLT_LINK_HDA;
  1167. break;
  1168. default:
  1169. ret = NHLT_LINK_INVALID;
  1170. break;
  1171. }
  1172. return ret;
  1173. }
  1174. /*
  1175. * Fill the BE gateway parameters
  1176. * The BE gateway expects a blob of parameters which are kept in the ACPI
  1177. * NHLT blob, so query the blob for interface type (i2s/pdm) and instance.
  1178. * The port can have multiple settings so pick based on the PCM
  1179. * parameters
  1180. */
  1181. static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai,
  1182. struct skl_module_cfg *mconfig,
  1183. struct skl_pipe_params *params)
  1184. {
  1185. struct nhlt_specific_cfg *cfg;
  1186. struct skl *skl = get_skl_ctx(dai->dev);
  1187. int link_type = skl_tplg_be_link_type(mconfig->dev_type);
  1188. skl_tplg_fill_dma_id(mconfig, params);
  1189. if (link_type == NHLT_LINK_HDA)
  1190. return 0;
  1191. /* update the blob based on virtual bus_id*/
  1192. cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type,
  1193. params->s_fmt, params->ch,
  1194. params->s_freq, params->stream);
  1195. if (cfg) {
  1196. mconfig->formats_config.caps_size = cfg->size;
  1197. mconfig->formats_config.caps = (u32 *) &cfg->caps;
  1198. } else {
  1199. dev_err(dai->dev, "Blob NULL for id %x type %d dirn %d\n",
  1200. mconfig->vbus_id, link_type,
  1201. params->stream);
  1202. dev_err(dai->dev, "PCM: ch %d, freq %d, fmt %d\n",
  1203. params->ch, params->s_freq, params->s_fmt);
  1204. return -EINVAL;
  1205. }
  1206. return 0;
  1207. }
  1208. static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai,
  1209. struct snd_soc_dapm_widget *w,
  1210. struct skl_pipe_params *params)
  1211. {
  1212. struct snd_soc_dapm_path *p;
  1213. int ret = -EIO;
  1214. snd_soc_dapm_widget_for_each_source_path(w, p) {
  1215. if (p->connect && is_skl_dsp_widget_type(p->source) &&
  1216. p->source->priv) {
  1217. ret = skl_tplg_be_fill_pipe_params(dai,
  1218. p->source->priv, params);
  1219. if (ret < 0)
  1220. return ret;
  1221. } else {
  1222. ret = skl_tplg_be_set_src_pipe_params(dai,
  1223. p->source, params);
  1224. if (ret < 0)
  1225. return ret;
  1226. }
  1227. }
  1228. return ret;
  1229. }
  1230. static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai,
  1231. struct snd_soc_dapm_widget *w, struct skl_pipe_params *params)
  1232. {
  1233. struct snd_soc_dapm_path *p = NULL;
  1234. int ret = -EIO;
  1235. snd_soc_dapm_widget_for_each_sink_path(w, p) {
  1236. if (p->connect && is_skl_dsp_widget_type(p->sink) &&
  1237. p->sink->priv) {
  1238. ret = skl_tplg_be_fill_pipe_params(dai,
  1239. p->sink->priv, params);
  1240. if (ret < 0)
  1241. return ret;
  1242. } else {
  1243. ret = skl_tplg_be_set_sink_pipe_params(
  1244. dai, p->sink, params);
  1245. if (ret < 0)
  1246. return ret;
  1247. }
  1248. }
  1249. return ret;
  1250. }
  1251. /*
  1252. * BE hw_params can be a source parameters (capture) or sink parameters
  1253. * (playback). Based on sink and source we need to either find the source
  1254. * list or the sink list and set the pipeline parameters
  1255. */
  1256. int skl_tplg_be_update_params(struct snd_soc_dai *dai,
  1257. struct skl_pipe_params *params)
  1258. {
  1259. struct snd_soc_dapm_widget *w;
  1260. if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
  1261. w = dai->playback_widget;
  1262. return skl_tplg_be_set_src_pipe_params(dai, w, params);
  1263. } else {
  1264. w = dai->capture_widget;
  1265. return skl_tplg_be_set_sink_pipe_params(dai, w, params);
  1266. }
  1267. return 0;
  1268. }
  1269. static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = {
  1270. {SKL_MIXER_EVENT, skl_tplg_mixer_event},
  1271. {SKL_VMIXER_EVENT, skl_tplg_vmixer_event},
  1272. {SKL_PGA_EVENT, skl_tplg_pga_event},
  1273. };
  1274. static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = {
  1275. {SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get,
  1276. skl_tplg_tlv_control_set},
  1277. };
  1278. static int skl_tplg_fill_pipe_tkn(struct device *dev,
  1279. struct skl_pipe *pipe, u32 tkn,
  1280. u32 tkn_val)
  1281. {
  1282. switch (tkn) {
  1283. case SKL_TKN_U32_PIPE_CONN_TYPE:
  1284. pipe->conn_type = tkn_val;
  1285. break;
  1286. case SKL_TKN_U32_PIPE_PRIORITY:
  1287. pipe->pipe_priority = tkn_val;
  1288. break;
  1289. case SKL_TKN_U32_PIPE_MEM_PGS:
  1290. pipe->memory_pages = tkn_val;
  1291. break;
  1292. default:
  1293. dev_err(dev, "Token not handled %d\n", tkn);
  1294. return -EINVAL;
  1295. }
  1296. return 0;
  1297. }
  1298. /*
  1299. * Add pipeline by parsing the relevant tokens
  1300. * Return an existing pipe if the pipe already exists.
  1301. */
  1302. static int skl_tplg_add_pipe(struct device *dev,
  1303. struct skl_module_cfg *mconfig, struct skl *skl,
  1304. struct snd_soc_tplg_vendor_value_elem *tkn_elem)
  1305. {
  1306. struct skl_pipeline *ppl;
  1307. struct skl_pipe *pipe;
  1308. struct skl_pipe_params *params;
  1309. list_for_each_entry(ppl, &skl->ppl_list, node) {
  1310. if (ppl->pipe->ppl_id == tkn_elem->value) {
  1311. mconfig->pipe = ppl->pipe;
  1312. return EEXIST;
  1313. }
  1314. }
  1315. ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
  1316. if (!ppl)
  1317. return -ENOMEM;
  1318. pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
  1319. if (!pipe)
  1320. return -ENOMEM;
  1321. params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
  1322. if (!params)
  1323. return -ENOMEM;
  1324. pipe->p_params = params;
  1325. pipe->ppl_id = tkn_elem->value;
  1326. INIT_LIST_HEAD(&pipe->w_list);
  1327. ppl->pipe = pipe;
  1328. list_add(&ppl->node, &skl->ppl_list);
  1329. mconfig->pipe = pipe;
  1330. mconfig->pipe->state = SKL_PIPE_INVALID;
  1331. return 0;
  1332. }
  1333. static int skl_tplg_fill_pin(struct device *dev, u32 tkn,
  1334. struct skl_module_pin *m_pin,
  1335. int pin_index, u32 value)
  1336. {
  1337. switch (tkn) {
  1338. case SKL_TKN_U32_PIN_MOD_ID:
  1339. m_pin[pin_index].id.module_id = value;
  1340. break;
  1341. case SKL_TKN_U32_PIN_INST_ID:
  1342. m_pin[pin_index].id.instance_id = value;
  1343. break;
  1344. default:
  1345. dev_err(dev, "%d Not a pin token\n", value);
  1346. return -EINVAL;
  1347. }
  1348. return 0;
  1349. }
  1350. /*
  1351. * Parse for pin config specific tokens to fill up the
  1352. * module private data
  1353. */
  1354. static int skl_tplg_fill_pins_info(struct device *dev,
  1355. struct skl_module_cfg *mconfig,
  1356. struct snd_soc_tplg_vendor_value_elem *tkn_elem,
  1357. int dir, int pin_count)
  1358. {
  1359. int ret;
  1360. struct skl_module_pin *m_pin;
  1361. switch (dir) {
  1362. case SKL_DIR_IN:
  1363. m_pin = mconfig->m_in_pin;
  1364. break;
  1365. case SKL_DIR_OUT:
  1366. m_pin = mconfig->m_out_pin;
  1367. break;
  1368. default:
  1369. dev_err(dev, "Invalid direction value\n");
  1370. return -EINVAL;
  1371. }
  1372. ret = skl_tplg_fill_pin(dev, tkn_elem->token,
  1373. m_pin, pin_count, tkn_elem->value);
  1374. if (ret < 0)
  1375. return ret;
  1376. m_pin[pin_count].in_use = false;
  1377. m_pin[pin_count].pin_state = SKL_PIN_UNBIND;
  1378. return 0;
  1379. }
  1380. /*
  1381. * Fill up input/output module config format based
  1382. * on the direction
  1383. */
  1384. static int skl_tplg_fill_fmt(struct device *dev,
  1385. struct skl_module_cfg *mconfig, u32 tkn,
  1386. u32 value, u32 dir, u32 pin_count)
  1387. {
  1388. struct skl_module_fmt *dst_fmt;
  1389. switch (dir) {
  1390. case SKL_DIR_IN:
  1391. dst_fmt = mconfig->in_fmt;
  1392. dst_fmt += pin_count;
  1393. break;
  1394. case SKL_DIR_OUT:
  1395. dst_fmt = mconfig->out_fmt;
  1396. dst_fmt += pin_count;
  1397. break;
  1398. default:
  1399. dev_err(dev, "Invalid direction value\n");
  1400. return -EINVAL;
  1401. }
  1402. switch (tkn) {
  1403. case SKL_TKN_U32_FMT_CH:
  1404. dst_fmt->channels = value;
  1405. break;
  1406. case SKL_TKN_U32_FMT_FREQ:
  1407. dst_fmt->s_freq = value;
  1408. break;
  1409. case SKL_TKN_U32_FMT_BIT_DEPTH:
  1410. dst_fmt->bit_depth = value;
  1411. break;
  1412. case SKL_TKN_U32_FMT_SAMPLE_SIZE:
  1413. dst_fmt->valid_bit_depth = value;
  1414. break;
  1415. case SKL_TKN_U32_FMT_CH_CONFIG:
  1416. dst_fmt->ch_cfg = value;
  1417. break;
  1418. case SKL_TKN_U32_FMT_INTERLEAVE:
  1419. dst_fmt->interleaving_style = value;
  1420. break;
  1421. case SKL_TKN_U32_FMT_SAMPLE_TYPE:
  1422. dst_fmt->sample_type = value;
  1423. break;
  1424. case SKL_TKN_U32_FMT_CH_MAP:
  1425. dst_fmt->ch_map = value;
  1426. break;
  1427. default:
  1428. dev_err(dev, "Invalid token %d\n", tkn);
  1429. return -EINVAL;
  1430. }
  1431. return 0;
  1432. }
  1433. static int skl_tplg_get_uuid(struct device *dev, struct skl_module_cfg *mconfig,
  1434. struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
  1435. {
  1436. if (uuid_tkn->token == SKL_TKN_UUID)
  1437. memcpy(&mconfig->guid, &uuid_tkn->uuid, 16);
  1438. else {
  1439. dev_err(dev, "Not an UUID token tkn %d\n", uuid_tkn->token);
  1440. return -EINVAL;
  1441. }
  1442. return 0;
  1443. }
  1444. static void skl_tplg_fill_pin_dynamic_val(
  1445. struct skl_module_pin *mpin, u32 pin_count, u32 value)
  1446. {
  1447. int i;
  1448. for (i = 0; i < pin_count; i++)
  1449. mpin[i].is_dynamic = value;
  1450. }
  1451. /*
  1452. * Parse tokens to fill up the module private data
  1453. */
  1454. static int skl_tplg_get_token(struct device *dev,
  1455. struct snd_soc_tplg_vendor_value_elem *tkn_elem,
  1456. struct skl *skl, struct skl_module_cfg *mconfig)
  1457. {
  1458. int tkn_count = 0;
  1459. int ret;
  1460. static int is_pipe_exists;
  1461. static int pin_index, dir;
  1462. if (tkn_elem->token > SKL_TKN_MAX)
  1463. return -EINVAL;
  1464. switch (tkn_elem->token) {
  1465. case SKL_TKN_U8_IN_QUEUE_COUNT:
  1466. mconfig->max_in_queue = tkn_elem->value;
  1467. mconfig->m_in_pin = devm_kzalloc(dev, mconfig->max_in_queue *
  1468. sizeof(*mconfig->m_in_pin),
  1469. GFP_KERNEL);
  1470. if (!mconfig->m_in_pin)
  1471. return -ENOMEM;
  1472. break;
  1473. case SKL_TKN_U8_OUT_QUEUE_COUNT:
  1474. mconfig->max_out_queue = tkn_elem->value;
  1475. mconfig->m_out_pin = devm_kzalloc(dev, mconfig->max_out_queue *
  1476. sizeof(*mconfig->m_out_pin),
  1477. GFP_KERNEL);
  1478. if (!mconfig->m_out_pin)
  1479. return -ENOMEM;
  1480. break;
  1481. case SKL_TKN_U8_DYN_IN_PIN:
  1482. if (!mconfig->m_in_pin)
  1483. return -ENOMEM;
  1484. skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin,
  1485. mconfig->max_in_queue, tkn_elem->value);
  1486. break;
  1487. case SKL_TKN_U8_DYN_OUT_PIN:
  1488. if (!mconfig->m_out_pin)
  1489. return -ENOMEM;
  1490. skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin,
  1491. mconfig->max_out_queue, tkn_elem->value);
  1492. break;
  1493. case SKL_TKN_U8_TIME_SLOT:
  1494. mconfig->time_slot = tkn_elem->value;
  1495. break;
  1496. case SKL_TKN_U8_CORE_ID:
  1497. mconfig->core_id = tkn_elem->value;
  1498. case SKL_TKN_U8_MOD_TYPE:
  1499. mconfig->m_type = tkn_elem->value;
  1500. break;
  1501. case SKL_TKN_U8_DEV_TYPE:
  1502. mconfig->dev_type = tkn_elem->value;
  1503. break;
  1504. case SKL_TKN_U8_HW_CONN_TYPE:
  1505. mconfig->hw_conn_type = tkn_elem->value;
  1506. break;
  1507. case SKL_TKN_U16_MOD_INST_ID:
  1508. mconfig->id.instance_id =
  1509. tkn_elem->value;
  1510. break;
  1511. case SKL_TKN_U32_MEM_PAGES:
  1512. mconfig->mem_pages = tkn_elem->value;
  1513. break;
  1514. case SKL_TKN_U32_MAX_MCPS:
  1515. mconfig->mcps = tkn_elem->value;
  1516. break;
  1517. case SKL_TKN_U32_OBS:
  1518. mconfig->obs = tkn_elem->value;
  1519. break;
  1520. case SKL_TKN_U32_IBS:
  1521. mconfig->ibs = tkn_elem->value;
  1522. break;
  1523. case SKL_TKN_U32_VBUS_ID:
  1524. mconfig->vbus_id = tkn_elem->value;
  1525. break;
  1526. case SKL_TKN_U32_PARAMS_FIXUP:
  1527. mconfig->params_fixup = tkn_elem->value;
  1528. break;
  1529. case SKL_TKN_U32_CONVERTER:
  1530. mconfig->converter = tkn_elem->value;
  1531. break;
  1532. case SKL_TKN_U32_PIPE_ID:
  1533. ret = skl_tplg_add_pipe(dev,
  1534. mconfig, skl, tkn_elem);
  1535. if (ret < 0)
  1536. return is_pipe_exists;
  1537. if (ret == EEXIST)
  1538. is_pipe_exists = 1;
  1539. break;
  1540. case SKL_TKN_U32_PIPE_CONN_TYPE:
  1541. case SKL_TKN_U32_PIPE_PRIORITY:
  1542. case SKL_TKN_U32_PIPE_MEM_PGS:
  1543. if (is_pipe_exists) {
  1544. ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe,
  1545. tkn_elem->token, tkn_elem->value);
  1546. if (ret < 0)
  1547. return ret;
  1548. }
  1549. break;
  1550. /*
  1551. * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both
  1552. * direction and the pin count. The first four bits represent
  1553. * direction and next four the pin count.
  1554. */
  1555. case SKL_TKN_U32_DIR_PIN_COUNT:
  1556. dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
  1557. pin_index = (tkn_elem->value &
  1558. SKL_PIN_COUNT_MASK) >> 4;
  1559. break;
  1560. case SKL_TKN_U32_FMT_CH:
  1561. case SKL_TKN_U32_FMT_FREQ:
  1562. case SKL_TKN_U32_FMT_BIT_DEPTH:
  1563. case SKL_TKN_U32_FMT_SAMPLE_SIZE:
  1564. case SKL_TKN_U32_FMT_CH_CONFIG:
  1565. case SKL_TKN_U32_FMT_INTERLEAVE:
  1566. case SKL_TKN_U32_FMT_SAMPLE_TYPE:
  1567. case SKL_TKN_U32_FMT_CH_MAP:
  1568. ret = skl_tplg_fill_fmt(dev, mconfig, tkn_elem->token,
  1569. tkn_elem->value, dir, pin_index);
  1570. if (ret < 0)
  1571. return ret;
  1572. break;
  1573. case SKL_TKN_U32_PIN_MOD_ID:
  1574. case SKL_TKN_U32_PIN_INST_ID:
  1575. ret = skl_tplg_fill_pins_info(dev,
  1576. mconfig, tkn_elem, dir,
  1577. pin_index);
  1578. if (ret < 0)
  1579. return ret;
  1580. break;
  1581. case SKL_TKN_U32_CAPS_SIZE:
  1582. mconfig->formats_config.caps_size =
  1583. tkn_elem->value;
  1584. break;
  1585. case SKL_TKN_U32_PROC_DOMAIN:
  1586. mconfig->domain =
  1587. tkn_elem->value;
  1588. break;
  1589. case SKL_TKN_U8_IN_PIN_TYPE:
  1590. case SKL_TKN_U8_OUT_PIN_TYPE:
  1591. case SKL_TKN_U8_CONN_TYPE:
  1592. break;
  1593. default:
  1594. dev_err(dev, "Token %d not handled\n",
  1595. tkn_elem->token);
  1596. return -EINVAL;
  1597. }
  1598. tkn_count++;
  1599. return tkn_count;
  1600. }
  1601. /*
  1602. * Parse the vendor array for specific tokens to construct
  1603. * module private data
  1604. */
  1605. static int skl_tplg_get_tokens(struct device *dev,
  1606. char *pvt_data, struct skl *skl,
  1607. struct skl_module_cfg *mconfig, int block_size)
  1608. {
  1609. struct snd_soc_tplg_vendor_array *array;
  1610. struct snd_soc_tplg_vendor_value_elem *tkn_elem;
  1611. int tkn_count = 0, ret;
  1612. int off = 0, tuple_size = 0;
  1613. if (block_size <= 0)
  1614. return -EINVAL;
  1615. while (tuple_size < block_size) {
  1616. array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
  1617. off += array->size;
  1618. switch (array->type) {
  1619. case SND_SOC_TPLG_TUPLE_TYPE_STRING:
  1620. dev_warn(dev, "no string tokens expected for skl tplg\n");
  1621. continue;
  1622. case SND_SOC_TPLG_TUPLE_TYPE_UUID:
  1623. ret = skl_tplg_get_uuid(dev, mconfig, array->uuid);
  1624. if (ret < 0)
  1625. return ret;
  1626. tuple_size += sizeof(*array->uuid);
  1627. continue;
  1628. default:
  1629. tkn_elem = array->value;
  1630. tkn_count = 0;
  1631. break;
  1632. }
  1633. while (tkn_count <= (array->num_elems - 1)) {
  1634. ret = skl_tplg_get_token(dev, tkn_elem,
  1635. skl, mconfig);
  1636. if (ret < 0)
  1637. return ret;
  1638. tkn_count = tkn_count + ret;
  1639. tkn_elem++;
  1640. }
  1641. tuple_size += tkn_count * sizeof(*tkn_elem);
  1642. }
  1643. return 0;
  1644. }
  1645. /*
  1646. * Every data block is preceded by a descriptor to read the number
  1647. * of data blocks, they type of the block and it's size
  1648. */
  1649. static int skl_tplg_get_desc_blocks(struct device *dev,
  1650. struct snd_soc_tplg_vendor_array *array)
  1651. {
  1652. struct snd_soc_tplg_vendor_value_elem *tkn_elem;
  1653. tkn_elem = array->value;
  1654. switch (tkn_elem->token) {
  1655. case SKL_TKN_U8_NUM_BLOCKS:
  1656. case SKL_TKN_U8_BLOCK_TYPE:
  1657. case SKL_TKN_U16_BLOCK_SIZE:
  1658. return tkn_elem->value;
  1659. default:
  1660. dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token);
  1661. break;
  1662. }
  1663. return -EINVAL;
  1664. }
  1665. /*
  1666. * Parse the private data for the token and corresponding value.
  1667. * The private data can have multiple data blocks. So, a data block
  1668. * is preceded by a descriptor for number of blocks and a descriptor
  1669. * for the type and size of the suceeding data block.
  1670. */
  1671. static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w,
  1672. struct skl *skl, struct device *dev,
  1673. struct skl_module_cfg *mconfig)
  1674. {
  1675. struct snd_soc_tplg_vendor_array *array;
  1676. int num_blocks, block_size = 0, block_type, off = 0;
  1677. char *data;
  1678. int ret;
  1679. /* Read the NUM_DATA_BLOCKS descriptor */
  1680. array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data;
  1681. ret = skl_tplg_get_desc_blocks(dev, array);
  1682. if (ret < 0)
  1683. return ret;
  1684. num_blocks = ret;
  1685. off += array->size;
  1686. array = (struct snd_soc_tplg_vendor_array *)(tplg_w->priv.data + off);
  1687. /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
  1688. while (num_blocks > 0) {
  1689. ret = skl_tplg_get_desc_blocks(dev, array);
  1690. if (ret < 0)
  1691. return ret;
  1692. block_type = ret;
  1693. off += array->size;
  1694. array = (struct snd_soc_tplg_vendor_array *)
  1695. (tplg_w->priv.data + off);
  1696. ret = skl_tplg_get_desc_blocks(dev, array);
  1697. if (ret < 0)
  1698. return ret;
  1699. block_size = ret;
  1700. off += array->size;
  1701. array = (struct snd_soc_tplg_vendor_array *)
  1702. (tplg_w->priv.data + off);
  1703. data = (tplg_w->priv.data + off);
  1704. if (block_type == SKL_TYPE_TUPLE) {
  1705. ret = skl_tplg_get_tokens(dev, data,
  1706. skl, mconfig, block_size);
  1707. if (ret < 0)
  1708. return ret;
  1709. --num_blocks;
  1710. } else {
  1711. if (mconfig->formats_config.caps_size > 0)
  1712. memcpy(mconfig->formats_config.caps, data,
  1713. mconfig->formats_config.caps_size);
  1714. --num_blocks;
  1715. }
  1716. }
  1717. return 0;
  1718. }
  1719. static void skl_clear_pin_config(struct snd_soc_platform *platform,
  1720. struct snd_soc_dapm_widget *w)
  1721. {
  1722. int i;
  1723. struct skl_module_cfg *mconfig;
  1724. struct skl_pipe *pipe;
  1725. if (!strncmp(w->dapm->component->name, platform->component.name,
  1726. strlen(platform->component.name))) {
  1727. mconfig = w->priv;
  1728. pipe = mconfig->pipe;
  1729. for (i = 0; i < mconfig->max_in_queue; i++) {
  1730. mconfig->m_in_pin[i].in_use = false;
  1731. mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND;
  1732. }
  1733. for (i = 0; i < mconfig->max_out_queue; i++) {
  1734. mconfig->m_out_pin[i].in_use = false;
  1735. mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND;
  1736. }
  1737. pipe->state = SKL_PIPE_INVALID;
  1738. mconfig->m_state = SKL_MODULE_UNINIT;
  1739. }
  1740. }
  1741. void skl_cleanup_resources(struct skl *skl)
  1742. {
  1743. struct skl_sst *ctx = skl->skl_sst;
  1744. struct snd_soc_platform *soc_platform = skl->platform;
  1745. struct snd_soc_dapm_widget *w;
  1746. struct snd_soc_card *card;
  1747. if (soc_platform == NULL)
  1748. return;
  1749. card = soc_platform->component.card;
  1750. if (!card || !card->instantiated)
  1751. return;
  1752. skl->resource.mem = 0;
  1753. skl->resource.mcps = 0;
  1754. list_for_each_entry(w, &card->widgets, list) {
  1755. if (is_skl_dsp_widget_type(w) && (w->priv != NULL))
  1756. skl_clear_pin_config(soc_platform, w);
  1757. }
  1758. skl_clear_module_cnt(ctx->dsp);
  1759. }
  1760. /*
  1761. * Topology core widget load callback
  1762. *
  1763. * This is used to save the private data for each widget which gives
  1764. * information to the driver about module and pipeline parameters which DSP
  1765. * FW expects like ids, resource values, formats etc
  1766. */
  1767. static int skl_tplg_widget_load(struct snd_soc_component *cmpnt,
  1768. struct snd_soc_dapm_widget *w,
  1769. struct snd_soc_tplg_dapm_widget *tplg_w)
  1770. {
  1771. int ret;
  1772. struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
  1773. struct skl *skl = ebus_to_skl(ebus);
  1774. struct hdac_bus *bus = ebus_to_hbus(ebus);
  1775. struct skl_module_cfg *mconfig;
  1776. if (!tplg_w->priv.size)
  1777. goto bind_event;
  1778. mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL);
  1779. if (!mconfig)
  1780. return -ENOMEM;
  1781. w->priv = mconfig;
  1782. /*
  1783. * module binary can be loaded later, so set it to query when
  1784. * module is load for a use case
  1785. */
  1786. mconfig->id.module_id = -1;
  1787. /* Parse private data for tuples */
  1788. ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig);
  1789. if (ret < 0)
  1790. return ret;
  1791. bind_event:
  1792. if (tplg_w->event_type == 0) {
  1793. dev_dbg(bus->dev, "ASoC: No event handler required\n");
  1794. return 0;
  1795. }
  1796. ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops,
  1797. ARRAY_SIZE(skl_tplg_widget_ops),
  1798. tplg_w->event_type);
  1799. if (ret) {
  1800. dev_err(bus->dev, "%s: No matching event handlers found for %d\n",
  1801. __func__, tplg_w->event_type);
  1802. return -EINVAL;
  1803. }
  1804. return 0;
  1805. }
  1806. static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be,
  1807. struct snd_soc_tplg_bytes_control *bc)
  1808. {
  1809. struct skl_algo_data *ac;
  1810. struct skl_dfw_algo_data *dfw_ac =
  1811. (struct skl_dfw_algo_data *)bc->priv.data;
  1812. ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL);
  1813. if (!ac)
  1814. return -ENOMEM;
  1815. /* Fill private data */
  1816. ac->max = dfw_ac->max;
  1817. ac->param_id = dfw_ac->param_id;
  1818. ac->set_params = dfw_ac->set_params;
  1819. ac->size = dfw_ac->max;
  1820. if (ac->max) {
  1821. ac->params = (char *) devm_kzalloc(dev, ac->max, GFP_KERNEL);
  1822. if (!ac->params)
  1823. return -ENOMEM;
  1824. memcpy(ac->params, dfw_ac->params, ac->max);
  1825. }
  1826. be->dobj.private = ac;
  1827. return 0;
  1828. }
  1829. static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
  1830. struct snd_kcontrol_new *kctl,
  1831. struct snd_soc_tplg_ctl_hdr *hdr)
  1832. {
  1833. struct soc_bytes_ext *sb;
  1834. struct snd_soc_tplg_bytes_control *tplg_bc;
  1835. struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
  1836. struct hdac_bus *bus = ebus_to_hbus(ebus);
  1837. switch (hdr->ops.info) {
  1838. case SND_SOC_TPLG_CTL_BYTES:
  1839. tplg_bc = container_of(hdr,
  1840. struct snd_soc_tplg_bytes_control, hdr);
  1841. if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
  1842. sb = (struct soc_bytes_ext *)kctl->private_value;
  1843. if (tplg_bc->priv.size)
  1844. return skl_init_algo_data(
  1845. bus->dev, sb, tplg_bc);
  1846. }
  1847. break;
  1848. default:
  1849. dev_warn(bus->dev, "Control load not supported %d:%d:%d\n",
  1850. hdr->ops.get, hdr->ops.put, hdr->ops.info);
  1851. break;
  1852. }
  1853. return 0;
  1854. }
  1855. static int skl_tplg_fill_str_mfest_tkn(struct device *dev,
  1856. struct snd_soc_tplg_vendor_string_elem *str_elem,
  1857. struct skl_dfw_manifest *minfo)
  1858. {
  1859. int tkn_count = 0;
  1860. static int ref_count;
  1861. switch (str_elem->token) {
  1862. case SKL_TKN_STR_LIB_NAME:
  1863. if (ref_count > minfo->lib_count - 1) {
  1864. ref_count = 0;
  1865. return -EINVAL;
  1866. }
  1867. strncpy(minfo->lib[ref_count].name, str_elem->string,
  1868. ARRAY_SIZE(minfo->lib[ref_count].name));
  1869. ref_count++;
  1870. tkn_count++;
  1871. break;
  1872. default:
  1873. dev_err(dev, "Not a string token %d\n", str_elem->token);
  1874. break;
  1875. }
  1876. return tkn_count;
  1877. }
  1878. static int skl_tplg_get_str_tkn(struct device *dev,
  1879. struct snd_soc_tplg_vendor_array *array,
  1880. struct skl_dfw_manifest *minfo)
  1881. {
  1882. int tkn_count = 0, ret;
  1883. struct snd_soc_tplg_vendor_string_elem *str_elem;
  1884. str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value;
  1885. while (tkn_count < array->num_elems) {
  1886. ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, minfo);
  1887. str_elem++;
  1888. if (ret < 0)
  1889. return ret;
  1890. tkn_count = tkn_count + ret;
  1891. }
  1892. return tkn_count;
  1893. }
  1894. static int skl_tplg_get_int_tkn(struct device *dev,
  1895. struct snd_soc_tplg_vendor_value_elem *tkn_elem,
  1896. struct skl_dfw_manifest *minfo)
  1897. {
  1898. int tkn_count = 0;
  1899. switch (tkn_elem->token) {
  1900. case SKL_TKN_U32_LIB_COUNT:
  1901. minfo->lib_count = tkn_elem->value;
  1902. tkn_count++;
  1903. break;
  1904. default:
  1905. dev_err(dev, "Not a manifest token %d\n", tkn_elem->token);
  1906. return -EINVAL;
  1907. }
  1908. return tkn_count;
  1909. }
  1910. /*
  1911. * Fill the manifest structure by parsing the tokens based on the
  1912. * type.
  1913. */
  1914. static int skl_tplg_get_manifest_tkn(struct device *dev,
  1915. char *pvt_data, struct skl_dfw_manifest *minfo,
  1916. int block_size)
  1917. {
  1918. int tkn_count = 0, ret;
  1919. int off = 0, tuple_size = 0;
  1920. struct snd_soc_tplg_vendor_array *array;
  1921. struct snd_soc_tplg_vendor_value_elem *tkn_elem;
  1922. if (block_size <= 0)
  1923. return -EINVAL;
  1924. while (tuple_size < block_size) {
  1925. array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
  1926. off += array->size;
  1927. switch (array->type) {
  1928. case SND_SOC_TPLG_TUPLE_TYPE_STRING:
  1929. ret = skl_tplg_get_str_tkn(dev, array, minfo);
  1930. if (ret < 0)
  1931. return ret;
  1932. tkn_count += ret;
  1933. tuple_size += tkn_count *
  1934. sizeof(struct snd_soc_tplg_vendor_string_elem);
  1935. continue;
  1936. case SND_SOC_TPLG_TUPLE_TYPE_UUID:
  1937. dev_warn(dev, "no uuid tokens for skl tplf manifest\n");
  1938. continue;
  1939. default:
  1940. tkn_elem = array->value;
  1941. tkn_count = 0;
  1942. break;
  1943. }
  1944. while (tkn_count <= array->num_elems - 1) {
  1945. ret = skl_tplg_get_int_tkn(dev,
  1946. tkn_elem, minfo);
  1947. if (ret < 0)
  1948. return ret;
  1949. tkn_count = tkn_count + ret;
  1950. tkn_elem++;
  1951. tuple_size += tkn_count *
  1952. sizeof(struct snd_soc_tplg_vendor_value_elem);
  1953. break;
  1954. }
  1955. tkn_count = 0;
  1956. }
  1957. return 0;
  1958. }
  1959. /*
  1960. * Parse manifest private data for tokens. The private data block is
  1961. * preceded by descriptors for type and size of data block.
  1962. */
  1963. static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest,
  1964. struct device *dev, struct skl_dfw_manifest *minfo)
  1965. {
  1966. struct snd_soc_tplg_vendor_array *array;
  1967. int num_blocks, block_size = 0, block_type, off = 0;
  1968. char *data;
  1969. int ret;
  1970. /* Read the NUM_DATA_BLOCKS descriptor */
  1971. array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data;
  1972. ret = skl_tplg_get_desc_blocks(dev, array);
  1973. if (ret < 0)
  1974. return ret;
  1975. num_blocks = ret;
  1976. off += array->size;
  1977. array = (struct snd_soc_tplg_vendor_array *)
  1978. (manifest->priv.data + off);
  1979. /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
  1980. while (num_blocks > 0) {
  1981. ret = skl_tplg_get_desc_blocks(dev, array);
  1982. if (ret < 0)
  1983. return ret;
  1984. block_type = ret;
  1985. off += array->size;
  1986. array = (struct snd_soc_tplg_vendor_array *)
  1987. (manifest->priv.data + off);
  1988. ret = skl_tplg_get_desc_blocks(dev, array);
  1989. if (ret < 0)
  1990. return ret;
  1991. block_size = ret;
  1992. off += array->size;
  1993. array = (struct snd_soc_tplg_vendor_array *)
  1994. (manifest->priv.data + off);
  1995. data = (manifest->priv.data + off);
  1996. if (block_type == SKL_TYPE_TUPLE) {
  1997. ret = skl_tplg_get_manifest_tkn(dev, data, minfo,
  1998. block_size);
  1999. if (ret < 0)
  2000. return ret;
  2001. --num_blocks;
  2002. } else {
  2003. return -EINVAL;
  2004. }
  2005. }
  2006. return 0;
  2007. }
  2008. static int skl_manifest_load(struct snd_soc_component *cmpnt,
  2009. struct snd_soc_tplg_manifest *manifest)
  2010. {
  2011. struct skl_dfw_manifest *minfo;
  2012. struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
  2013. struct hdac_bus *bus = ebus_to_hbus(ebus);
  2014. struct skl *skl = ebus_to_skl(ebus);
  2015. int ret = 0;
  2016. /* proceed only if we have private data defined */
  2017. if (manifest->priv.size == 0)
  2018. return 0;
  2019. minfo = &skl->skl_sst->manifest;
  2020. skl_tplg_get_manifest_data(manifest, bus->dev, minfo);
  2021. if (minfo->lib_count > HDA_MAX_LIB) {
  2022. dev_err(bus->dev, "Exceeding max Library count. Got:%d\n",
  2023. minfo->lib_count);
  2024. ret = -EINVAL;
  2025. }
  2026. return ret;
  2027. }
  2028. static struct snd_soc_tplg_ops skl_tplg_ops = {
  2029. .widget_load = skl_tplg_widget_load,
  2030. .control_load = skl_tplg_control_load,
  2031. .bytes_ext_ops = skl_tlv_ops,
  2032. .bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops),
  2033. .manifest = skl_manifest_load,
  2034. };
  2035. /*
  2036. * A pipe can have multiple modules, each of them will be a DAPM widget as
  2037. * well. While managing a pipeline we need to get the list of all the
  2038. * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list()
  2039. * helps to get the SKL type widgets in that pipeline
  2040. */
  2041. static int skl_tplg_create_pipe_widget_list(struct snd_soc_platform *platform)
  2042. {
  2043. struct snd_soc_dapm_widget *w;
  2044. struct skl_module_cfg *mcfg = NULL;
  2045. struct skl_pipe_module *p_module = NULL;
  2046. struct skl_pipe *pipe;
  2047. list_for_each_entry(w, &platform->component.card->widgets, list) {
  2048. if (is_skl_dsp_widget_type(w) && w->priv != NULL) {
  2049. mcfg = w->priv;
  2050. pipe = mcfg->pipe;
  2051. p_module = devm_kzalloc(platform->dev,
  2052. sizeof(*p_module), GFP_KERNEL);
  2053. if (!p_module)
  2054. return -ENOMEM;
  2055. p_module->w = w;
  2056. list_add_tail(&p_module->node, &pipe->w_list);
  2057. }
  2058. }
  2059. return 0;
  2060. }
  2061. static void skl_tplg_set_pipe_type(struct skl *skl, struct skl_pipe *pipe)
  2062. {
  2063. struct skl_pipe_module *w_module;
  2064. struct snd_soc_dapm_widget *w;
  2065. struct skl_module_cfg *mconfig;
  2066. bool host_found = false, link_found = false;
  2067. list_for_each_entry(w_module, &pipe->w_list, node) {
  2068. w = w_module->w;
  2069. mconfig = w->priv;
  2070. if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
  2071. host_found = true;
  2072. else if (mconfig->dev_type != SKL_DEVICE_NONE)
  2073. link_found = true;
  2074. }
  2075. if (host_found && link_found)
  2076. pipe->passthru = true;
  2077. else
  2078. pipe->passthru = false;
  2079. }
  2080. /* This will be read from topology manifest, currently defined here */
  2081. #define SKL_MAX_MCPS 30000000
  2082. #define SKL_FW_MAX_MEM 1000000
  2083. /*
  2084. * SKL topology init routine
  2085. */
  2086. int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus)
  2087. {
  2088. int ret;
  2089. const struct firmware *fw;
  2090. struct hdac_bus *bus = ebus_to_hbus(ebus);
  2091. struct skl *skl = ebus_to_skl(ebus);
  2092. struct skl_pipeline *ppl;
  2093. ret = request_firmware(&fw, skl->tplg_name, bus->dev);
  2094. if (ret < 0) {
  2095. dev_err(bus->dev, "tplg fw %s load failed with %d\n",
  2096. skl->tplg_name, ret);
  2097. ret = request_firmware(&fw, "dfw_sst.bin", bus->dev);
  2098. if (ret < 0) {
  2099. dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n",
  2100. "dfw_sst.bin", ret);
  2101. return ret;
  2102. }
  2103. }
  2104. /*
  2105. * The complete tplg for SKL is loaded as index 0, we don't use
  2106. * any other index
  2107. */
  2108. ret = snd_soc_tplg_component_load(&platform->component,
  2109. &skl_tplg_ops, fw, 0);
  2110. if (ret < 0) {
  2111. dev_err(bus->dev, "tplg component load failed%d\n", ret);
  2112. release_firmware(fw);
  2113. return -EINVAL;
  2114. }
  2115. skl->resource.max_mcps = SKL_MAX_MCPS;
  2116. skl->resource.max_mem = SKL_FW_MAX_MEM;
  2117. skl->tplg = fw;
  2118. ret = skl_tplg_create_pipe_widget_list(platform);
  2119. if (ret < 0)
  2120. return ret;
  2121. list_for_each_entry(ppl, &skl->ppl_list, node)
  2122. skl_tplg_set_pipe_type(skl, ppl->pipe);
  2123. return 0;
  2124. }