skl-messages.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202
  1. /*
  2. * skl-message.c - HDA DSP interface for FW registration, Pipe and Module
  3. * configurations
  4. *
  5. * Copyright (C) 2015 Intel Corp
  6. * Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
  7. * Jeeja KP <jeeja.kp@intel.com>
  8. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as version 2, as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * General Public License for more details.
  18. */
  19. #include <linux/slab.h>
  20. #include <linux/pci.h>
  21. #include <sound/core.h>
  22. #include <sound/pcm.h>
  23. #include "skl-sst-dsp.h"
  24. #include "skl-sst-ipc.h"
  25. #include "skl.h"
  26. #include "../common/sst-dsp.h"
  27. #include "../common/sst-dsp-priv.h"
  28. #include "skl-topology.h"
  29. #include "skl-tplg-interface.h"
  30. static int skl_alloc_dma_buf(struct device *dev,
  31. struct snd_dma_buffer *dmab, size_t size)
  32. {
  33. struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
  34. struct hdac_bus *bus = ebus_to_hbus(ebus);
  35. if (!bus)
  36. return -ENODEV;
  37. return bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV, size, dmab);
  38. }
  39. static int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab)
  40. {
  41. struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
  42. struct hdac_bus *bus = ebus_to_hbus(ebus);
  43. if (!bus)
  44. return -ENODEV;
  45. bus->io_ops->dma_free_pages(bus, dmab);
  46. return 0;
  47. }
  48. #define NOTIFICATION_PARAM_ID 3
  49. #define NOTIFICATION_MASK 0xf
  50. /* disable notfication for underruns/overruns from firmware module */
  51. static void skl_dsp_enable_notification(struct skl_sst *ctx, bool enable)
  52. {
  53. struct notification_mask mask;
  54. struct skl_ipc_large_config_msg msg = {0};
  55. mask.notify = NOTIFICATION_MASK;
  56. mask.enable = enable;
  57. msg.large_param_id = NOTIFICATION_PARAM_ID;
  58. msg.param_data_size = sizeof(mask);
  59. skl_ipc_set_large_config(&ctx->ipc, &msg, (u32 *)&mask);
  60. }
  61. static int skl_dsp_setup_spib(struct device *dev, unsigned int size,
  62. int stream_tag, int enable)
  63. {
  64. struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
  65. struct hdac_bus *bus = ebus_to_hbus(ebus);
  66. struct hdac_stream *stream = snd_hdac_get_stream(bus,
  67. SNDRV_PCM_STREAM_PLAYBACK, stream_tag);
  68. struct hdac_ext_stream *estream;
  69. if (!stream)
  70. return -EINVAL;
  71. estream = stream_to_hdac_ext_stream(stream);
  72. /* enable/disable SPIB for this hdac stream */
  73. snd_hdac_ext_stream_spbcap_enable(ebus, enable, stream->index);
  74. /* set the spib value */
  75. snd_hdac_ext_stream_set_spib(ebus, estream, size);
  76. return 0;
  77. }
  78. static int skl_dsp_prepare(struct device *dev, unsigned int format,
  79. unsigned int size, struct snd_dma_buffer *dmab)
  80. {
  81. struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
  82. struct hdac_bus *bus = ebus_to_hbus(ebus);
  83. struct hdac_ext_stream *estream;
  84. struct hdac_stream *stream;
  85. struct snd_pcm_substream substream;
  86. int ret;
  87. if (!bus)
  88. return -ENODEV;
  89. memset(&substream, 0, sizeof(substream));
  90. substream.stream = SNDRV_PCM_STREAM_PLAYBACK;
  91. estream = snd_hdac_ext_stream_assign(ebus, &substream,
  92. HDAC_EXT_STREAM_TYPE_HOST);
  93. if (!estream)
  94. return -ENODEV;
  95. stream = hdac_stream(estream);
  96. /* assign decouple host dma channel */
  97. ret = snd_hdac_dsp_prepare(stream, format, size, dmab);
  98. if (ret < 0)
  99. return ret;
  100. skl_dsp_setup_spib(dev, size, stream->stream_tag, true);
  101. return stream->stream_tag;
  102. }
  103. static int skl_dsp_trigger(struct device *dev, bool start, int stream_tag)
  104. {
  105. struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
  106. struct hdac_stream *stream;
  107. struct hdac_bus *bus = ebus_to_hbus(ebus);
  108. if (!bus)
  109. return -ENODEV;
  110. stream = snd_hdac_get_stream(bus,
  111. SNDRV_PCM_STREAM_PLAYBACK, stream_tag);
  112. if (!stream)
  113. return -EINVAL;
  114. snd_hdac_dsp_trigger(stream, start);
  115. return 0;
  116. }
  117. static int skl_dsp_cleanup(struct device *dev,
  118. struct snd_dma_buffer *dmab, int stream_tag)
  119. {
  120. struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
  121. struct hdac_stream *stream;
  122. struct hdac_ext_stream *estream;
  123. struct hdac_bus *bus = ebus_to_hbus(ebus);
  124. if (!bus)
  125. return -ENODEV;
  126. stream = snd_hdac_get_stream(bus,
  127. SNDRV_PCM_STREAM_PLAYBACK, stream_tag);
  128. if (!stream)
  129. return -EINVAL;
  130. estream = stream_to_hdac_ext_stream(stream);
  131. skl_dsp_setup_spib(dev, 0, stream_tag, false);
  132. snd_hdac_ext_stream_release(estream, HDAC_EXT_STREAM_TYPE_HOST);
  133. snd_hdac_dsp_cleanup(stream, dmab);
  134. return 0;
  135. }
  136. static struct skl_dsp_loader_ops skl_get_loader_ops(void)
  137. {
  138. struct skl_dsp_loader_ops loader_ops;
  139. memset(&loader_ops, 0, sizeof(struct skl_dsp_loader_ops));
  140. loader_ops.alloc_dma_buf = skl_alloc_dma_buf;
  141. loader_ops.free_dma_buf = skl_free_dma_buf;
  142. return loader_ops;
  143. };
  144. static struct skl_dsp_loader_ops bxt_get_loader_ops(void)
  145. {
  146. struct skl_dsp_loader_ops loader_ops;
  147. memset(&loader_ops, 0, sizeof(loader_ops));
  148. loader_ops.alloc_dma_buf = skl_alloc_dma_buf;
  149. loader_ops.free_dma_buf = skl_free_dma_buf;
  150. loader_ops.prepare = skl_dsp_prepare;
  151. loader_ops.trigger = skl_dsp_trigger;
  152. loader_ops.cleanup = skl_dsp_cleanup;
  153. return loader_ops;
  154. };
  155. static const struct skl_dsp_ops dsp_ops[] = {
  156. {
  157. .id = 0x9d70,
  158. .loader_ops = skl_get_loader_ops,
  159. .init = skl_sst_dsp_init,
  160. .init_fw = skl_sst_init_fw,
  161. .cleanup = skl_sst_dsp_cleanup
  162. },
  163. {
  164. .id = 0x9d71,
  165. .loader_ops = skl_get_loader_ops,
  166. .init = skl_sst_dsp_init,
  167. .init_fw = skl_sst_init_fw,
  168. .cleanup = skl_sst_dsp_cleanup
  169. },
  170. {
  171. .id = 0x5a98,
  172. .loader_ops = bxt_get_loader_ops,
  173. .init = bxt_sst_dsp_init,
  174. .init_fw = bxt_sst_init_fw,
  175. .cleanup = bxt_sst_dsp_cleanup
  176. },
  177. };
  178. const struct skl_dsp_ops *skl_get_dsp_ops(int pci_id)
  179. {
  180. int i;
  181. for (i = 0; i < ARRAY_SIZE(dsp_ops); i++) {
  182. if (dsp_ops[i].id == pci_id)
  183. return &dsp_ops[i];
  184. }
  185. return NULL;
  186. }
  187. int skl_init_dsp(struct skl *skl)
  188. {
  189. void __iomem *mmio_base;
  190. struct hdac_ext_bus *ebus = &skl->ebus;
  191. struct hdac_bus *bus = ebus_to_hbus(ebus);
  192. struct skl_dsp_loader_ops loader_ops;
  193. int irq = bus->irq;
  194. const struct skl_dsp_ops *ops;
  195. int ret;
  196. /* enable ppcap interrupt */
  197. snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true);
  198. snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true);
  199. /* read the BAR of the ADSP MMIO */
  200. mmio_base = pci_ioremap_bar(skl->pci, 4);
  201. if (mmio_base == NULL) {
  202. dev_err(bus->dev, "ioremap error\n");
  203. return -ENXIO;
  204. }
  205. ops = skl_get_dsp_ops(skl->pci->device);
  206. if (!ops)
  207. return -EIO;
  208. loader_ops = ops->loader_ops();
  209. ret = ops->init(bus->dev, mmio_base, irq,
  210. skl->fw_name, loader_ops,
  211. &skl->skl_sst);
  212. if (ret < 0)
  213. return ret;
  214. dev_dbg(bus->dev, "dsp registration status=%d\n", ret);
  215. return ret;
  216. }
  217. int skl_free_dsp(struct skl *skl)
  218. {
  219. struct hdac_ext_bus *ebus = &skl->ebus;
  220. struct hdac_bus *bus = ebus_to_hbus(ebus);
  221. struct skl_sst *ctx = skl->skl_sst;
  222. const struct skl_dsp_ops *ops;
  223. /* disable ppcap interrupt */
  224. snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false);
  225. ops = skl_get_dsp_ops(skl->pci->device);
  226. if (!ops)
  227. return -EIO;
  228. ops->cleanup(bus->dev, ctx);
  229. if (ctx->dsp->addr.lpe)
  230. iounmap(ctx->dsp->addr.lpe);
  231. return 0;
  232. }
  233. int skl_suspend_dsp(struct skl *skl)
  234. {
  235. struct skl_sst *ctx = skl->skl_sst;
  236. int ret;
  237. /* if ppcap is not supported return 0 */
  238. if (!skl->ebus.bus.ppcap)
  239. return 0;
  240. ret = skl_dsp_sleep(ctx->dsp);
  241. if (ret < 0)
  242. return ret;
  243. /* disable ppcap interrupt */
  244. snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false);
  245. snd_hdac_ext_bus_ppcap_enable(&skl->ebus, false);
  246. return 0;
  247. }
  248. int skl_resume_dsp(struct skl *skl)
  249. {
  250. struct skl_sst *ctx = skl->skl_sst;
  251. int ret;
  252. /* if ppcap is not supported return 0 */
  253. if (!skl->ebus.bus.ppcap)
  254. return 0;
  255. /* enable ppcap interrupt */
  256. snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true);
  257. snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true);
  258. /* check if DSP 1st boot is done */
  259. if (skl->skl_sst->is_first_boot == true)
  260. return 0;
  261. ret = skl_dsp_wake(ctx->dsp);
  262. if (ret < 0)
  263. return ret;
  264. skl_dsp_enable_notification(skl->skl_sst, false);
  265. return ret;
  266. }
  267. enum skl_bitdepth skl_get_bit_depth(int params)
  268. {
  269. switch (params) {
  270. case 8:
  271. return SKL_DEPTH_8BIT;
  272. case 16:
  273. return SKL_DEPTH_16BIT;
  274. case 24:
  275. return SKL_DEPTH_24BIT;
  276. case 32:
  277. return SKL_DEPTH_32BIT;
  278. default:
  279. return SKL_DEPTH_INVALID;
  280. }
  281. }
  282. /*
  283. * Each module in DSP expects a base module configuration, which consists of
  284. * PCM format information, which we calculate in driver and resource values
  285. * which are read from widget information passed through topology binary
  286. * This is send when we create a module with INIT_INSTANCE IPC msg
  287. */
  288. static void skl_set_base_module_format(struct skl_sst *ctx,
  289. struct skl_module_cfg *mconfig,
  290. struct skl_base_cfg *base_cfg)
  291. {
  292. struct skl_module_fmt *format = &mconfig->in_fmt[0];
  293. base_cfg->audio_fmt.number_of_channels = (u8)format->channels;
  294. base_cfg->audio_fmt.s_freq = format->s_freq;
  295. base_cfg->audio_fmt.bit_depth = format->bit_depth;
  296. base_cfg->audio_fmt.valid_bit_depth = format->valid_bit_depth;
  297. base_cfg->audio_fmt.ch_cfg = format->ch_cfg;
  298. dev_dbg(ctx->dev, "bit_depth=%x valid_bd=%x ch_config=%x\n",
  299. format->bit_depth, format->valid_bit_depth,
  300. format->ch_cfg);
  301. base_cfg->audio_fmt.channel_map = format->ch_map;
  302. base_cfg->audio_fmt.interleaving = format->interleaving_style;
  303. base_cfg->cps = mconfig->mcps;
  304. base_cfg->ibs = mconfig->ibs;
  305. base_cfg->obs = mconfig->obs;
  306. base_cfg->is_pages = mconfig->mem_pages;
  307. }
  308. /*
  309. * Copies copier capabilities into copier module and updates copier module
  310. * config size.
  311. */
  312. static void skl_copy_copier_caps(struct skl_module_cfg *mconfig,
  313. struct skl_cpr_cfg *cpr_mconfig)
  314. {
  315. if (mconfig->formats_config.caps_size == 0)
  316. return;
  317. memcpy(cpr_mconfig->gtw_cfg.config_data,
  318. mconfig->formats_config.caps,
  319. mconfig->formats_config.caps_size);
  320. cpr_mconfig->gtw_cfg.config_length =
  321. (mconfig->formats_config.caps_size) / 4;
  322. }
  323. #define SKL_NON_GATEWAY_CPR_NODE_ID 0xFFFFFFFF
  324. /*
  325. * Calculate the gatewat settings required for copier module, type of
  326. * gateway and index of gateway to use
  327. */
  328. static u32 skl_get_node_id(struct skl_sst *ctx,
  329. struct skl_module_cfg *mconfig)
  330. {
  331. union skl_connector_node_id node_id = {0};
  332. union skl_ssp_dma_node ssp_node = {0};
  333. struct skl_pipe_params *params = mconfig->pipe->p_params;
  334. switch (mconfig->dev_type) {
  335. case SKL_DEVICE_BT:
  336. node_id.node.dma_type =
  337. (SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
  338. SKL_DMA_I2S_LINK_OUTPUT_CLASS :
  339. SKL_DMA_I2S_LINK_INPUT_CLASS;
  340. node_id.node.vindex = params->host_dma_id +
  341. (mconfig->vbus_id << 3);
  342. break;
  343. case SKL_DEVICE_I2S:
  344. node_id.node.dma_type =
  345. (SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
  346. SKL_DMA_I2S_LINK_OUTPUT_CLASS :
  347. SKL_DMA_I2S_LINK_INPUT_CLASS;
  348. ssp_node.dma_node.time_slot_index = mconfig->time_slot;
  349. ssp_node.dma_node.i2s_instance = mconfig->vbus_id;
  350. node_id.node.vindex = ssp_node.val;
  351. break;
  352. case SKL_DEVICE_DMIC:
  353. node_id.node.dma_type = SKL_DMA_DMIC_LINK_INPUT_CLASS;
  354. node_id.node.vindex = mconfig->vbus_id +
  355. (mconfig->time_slot);
  356. break;
  357. case SKL_DEVICE_HDALINK:
  358. node_id.node.dma_type =
  359. (SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
  360. SKL_DMA_HDA_LINK_OUTPUT_CLASS :
  361. SKL_DMA_HDA_LINK_INPUT_CLASS;
  362. node_id.node.vindex = params->link_dma_id;
  363. break;
  364. case SKL_DEVICE_HDAHOST:
  365. node_id.node.dma_type =
  366. (SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
  367. SKL_DMA_HDA_HOST_OUTPUT_CLASS :
  368. SKL_DMA_HDA_HOST_INPUT_CLASS;
  369. node_id.node.vindex = params->host_dma_id;
  370. break;
  371. default:
  372. node_id.val = 0xFFFFFFFF;
  373. break;
  374. }
  375. return node_id.val;
  376. }
  377. static void skl_setup_cpr_gateway_cfg(struct skl_sst *ctx,
  378. struct skl_module_cfg *mconfig,
  379. struct skl_cpr_cfg *cpr_mconfig)
  380. {
  381. cpr_mconfig->gtw_cfg.node_id = skl_get_node_id(ctx, mconfig);
  382. if (cpr_mconfig->gtw_cfg.node_id == SKL_NON_GATEWAY_CPR_NODE_ID) {
  383. cpr_mconfig->cpr_feature_mask = 0;
  384. return;
  385. }
  386. if (SKL_CONN_SOURCE == mconfig->hw_conn_type)
  387. cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * mconfig->obs;
  388. else
  389. cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * mconfig->ibs;
  390. cpr_mconfig->cpr_feature_mask = 0;
  391. cpr_mconfig->gtw_cfg.config_length = 0;
  392. skl_copy_copier_caps(mconfig, cpr_mconfig);
  393. }
  394. #define DMA_CONTROL_ID 5
  395. int skl_dsp_set_dma_control(struct skl_sst *ctx, struct skl_module_cfg *mconfig)
  396. {
  397. struct skl_dma_control *dma_ctrl;
  398. struct skl_i2s_config_blob config_blob;
  399. struct skl_ipc_large_config_msg msg = {0};
  400. int err = 0;
  401. /*
  402. * if blob size is same as capablity size, then no dma control
  403. * present so return
  404. */
  405. if (mconfig->formats_config.caps_size == sizeof(config_blob))
  406. return 0;
  407. msg.large_param_id = DMA_CONTROL_ID;
  408. msg.param_data_size = sizeof(struct skl_dma_control) +
  409. mconfig->formats_config.caps_size;
  410. dma_ctrl = kzalloc(msg.param_data_size, GFP_KERNEL);
  411. if (dma_ctrl == NULL)
  412. return -ENOMEM;
  413. dma_ctrl->node_id = skl_get_node_id(ctx, mconfig);
  414. /* size in dwords */
  415. dma_ctrl->config_length = sizeof(config_blob) / 4;
  416. memcpy(dma_ctrl->config_data, mconfig->formats_config.caps,
  417. mconfig->formats_config.caps_size);
  418. err = skl_ipc_set_large_config(&ctx->ipc, &msg, (u32 *)dma_ctrl);
  419. kfree(dma_ctrl);
  420. return err;
  421. }
  422. static void skl_setup_out_format(struct skl_sst *ctx,
  423. struct skl_module_cfg *mconfig,
  424. struct skl_audio_data_format *out_fmt)
  425. {
  426. struct skl_module_fmt *format = &mconfig->out_fmt[0];
  427. out_fmt->number_of_channels = (u8)format->channels;
  428. out_fmt->s_freq = format->s_freq;
  429. out_fmt->bit_depth = format->bit_depth;
  430. out_fmt->valid_bit_depth = format->valid_bit_depth;
  431. out_fmt->ch_cfg = format->ch_cfg;
  432. out_fmt->channel_map = format->ch_map;
  433. out_fmt->interleaving = format->interleaving_style;
  434. out_fmt->sample_type = format->sample_type;
  435. dev_dbg(ctx->dev, "copier out format chan=%d fre=%d bitdepth=%d\n",
  436. out_fmt->number_of_channels, format->s_freq, format->bit_depth);
  437. }
  438. /*
  439. * DSP needs SRC module for frequency conversion, SRC takes base module
  440. * configuration and the target frequency as extra parameter passed as src
  441. * config
  442. */
  443. static void skl_set_src_format(struct skl_sst *ctx,
  444. struct skl_module_cfg *mconfig,
  445. struct skl_src_module_cfg *src_mconfig)
  446. {
  447. struct skl_module_fmt *fmt = &mconfig->out_fmt[0];
  448. skl_set_base_module_format(ctx, mconfig,
  449. (struct skl_base_cfg *)src_mconfig);
  450. src_mconfig->src_cfg = fmt->s_freq;
  451. }
  452. /*
  453. * DSP needs updown module to do channel conversion. updown module take base
  454. * module configuration and channel configuration
  455. * It also take coefficients and now we have defaults applied here
  456. */
  457. static void skl_set_updown_mixer_format(struct skl_sst *ctx,
  458. struct skl_module_cfg *mconfig,
  459. struct skl_up_down_mixer_cfg *mixer_mconfig)
  460. {
  461. struct skl_module_fmt *fmt = &mconfig->out_fmt[0];
  462. int i = 0;
  463. skl_set_base_module_format(ctx, mconfig,
  464. (struct skl_base_cfg *)mixer_mconfig);
  465. mixer_mconfig->out_ch_cfg = fmt->ch_cfg;
  466. /* Select F/W default coefficient */
  467. mixer_mconfig->coeff_sel = 0x0;
  468. /* User coeff, don't care since we are selecting F/W defaults */
  469. for (i = 0; i < UP_DOWN_MIXER_MAX_COEFF; i++)
  470. mixer_mconfig->coeff[i] = 0xDEADBEEF;
  471. }
  472. /*
  473. * 'copier' is DSP internal module which copies data from Host DMA (HDA host
  474. * dma) or link (hda link, SSP, PDM)
  475. * Here we calculate the copier module parameters, like PCM format, output
  476. * format, gateway settings
  477. * copier_module_config is sent as input buffer with INIT_INSTANCE IPC msg
  478. */
  479. static void skl_set_copier_format(struct skl_sst *ctx,
  480. struct skl_module_cfg *mconfig,
  481. struct skl_cpr_cfg *cpr_mconfig)
  482. {
  483. struct skl_audio_data_format *out_fmt = &cpr_mconfig->out_fmt;
  484. struct skl_base_cfg *base_cfg = (struct skl_base_cfg *)cpr_mconfig;
  485. skl_set_base_module_format(ctx, mconfig, base_cfg);
  486. skl_setup_out_format(ctx, mconfig, out_fmt);
  487. skl_setup_cpr_gateway_cfg(ctx, mconfig, cpr_mconfig);
  488. }
  489. /*
  490. * Algo module are DSP pre processing modules. Algo module take base module
  491. * configuration and params
  492. */
  493. static void skl_set_algo_format(struct skl_sst *ctx,
  494. struct skl_module_cfg *mconfig,
  495. struct skl_algo_cfg *algo_mcfg)
  496. {
  497. struct skl_base_cfg *base_cfg = (struct skl_base_cfg *)algo_mcfg;
  498. skl_set_base_module_format(ctx, mconfig, base_cfg);
  499. if (mconfig->formats_config.caps_size == 0)
  500. return;
  501. memcpy(algo_mcfg->params,
  502. mconfig->formats_config.caps,
  503. mconfig->formats_config.caps_size);
  504. }
  505. /*
  506. * Mic select module allows selecting one or many input channels, thus
  507. * acting as a demux.
  508. *
  509. * Mic select module take base module configuration and out-format
  510. * configuration
  511. */
  512. static void skl_set_base_outfmt_format(struct skl_sst *ctx,
  513. struct skl_module_cfg *mconfig,
  514. struct skl_base_outfmt_cfg *base_outfmt_mcfg)
  515. {
  516. struct skl_audio_data_format *out_fmt = &base_outfmt_mcfg->out_fmt;
  517. struct skl_base_cfg *base_cfg =
  518. (struct skl_base_cfg *)base_outfmt_mcfg;
  519. skl_set_base_module_format(ctx, mconfig, base_cfg);
  520. skl_setup_out_format(ctx, mconfig, out_fmt);
  521. }
  522. static u16 skl_get_module_param_size(struct skl_sst *ctx,
  523. struct skl_module_cfg *mconfig)
  524. {
  525. u16 param_size;
  526. switch (mconfig->m_type) {
  527. case SKL_MODULE_TYPE_COPIER:
  528. param_size = sizeof(struct skl_cpr_cfg);
  529. param_size += mconfig->formats_config.caps_size;
  530. return param_size;
  531. case SKL_MODULE_TYPE_SRCINT:
  532. return sizeof(struct skl_src_module_cfg);
  533. case SKL_MODULE_TYPE_UPDWMIX:
  534. return sizeof(struct skl_up_down_mixer_cfg);
  535. case SKL_MODULE_TYPE_ALGO:
  536. param_size = sizeof(struct skl_base_cfg);
  537. param_size += mconfig->formats_config.caps_size;
  538. return param_size;
  539. case SKL_MODULE_TYPE_BASE_OUTFMT:
  540. case SKL_MODULE_TYPE_KPB:
  541. return sizeof(struct skl_base_outfmt_cfg);
  542. default:
  543. /*
  544. * return only base cfg when no specific module type is
  545. * specified
  546. */
  547. return sizeof(struct skl_base_cfg);
  548. }
  549. return 0;
  550. }
  551. /*
  552. * DSP firmware supports various modules like copier, SRC, updown etc.
  553. * These modules required various parameters to be calculated and sent for
  554. * the module initialization to DSP. By default a generic module needs only
  555. * base module format configuration
  556. */
  557. static int skl_set_module_format(struct skl_sst *ctx,
  558. struct skl_module_cfg *module_config,
  559. u16 *module_config_size,
  560. void **param_data)
  561. {
  562. u16 param_size;
  563. param_size = skl_get_module_param_size(ctx, module_config);
  564. *param_data = kzalloc(param_size, GFP_KERNEL);
  565. if (NULL == *param_data)
  566. return -ENOMEM;
  567. *module_config_size = param_size;
  568. switch (module_config->m_type) {
  569. case SKL_MODULE_TYPE_COPIER:
  570. skl_set_copier_format(ctx, module_config, *param_data);
  571. break;
  572. case SKL_MODULE_TYPE_SRCINT:
  573. skl_set_src_format(ctx, module_config, *param_data);
  574. break;
  575. case SKL_MODULE_TYPE_UPDWMIX:
  576. skl_set_updown_mixer_format(ctx, module_config, *param_data);
  577. break;
  578. case SKL_MODULE_TYPE_ALGO:
  579. skl_set_algo_format(ctx, module_config, *param_data);
  580. break;
  581. case SKL_MODULE_TYPE_BASE_OUTFMT:
  582. case SKL_MODULE_TYPE_KPB:
  583. skl_set_base_outfmt_format(ctx, module_config, *param_data);
  584. break;
  585. default:
  586. skl_set_base_module_format(ctx, module_config, *param_data);
  587. break;
  588. }
  589. dev_dbg(ctx->dev, "Module type=%d config size: %d bytes\n",
  590. module_config->id.module_id, param_size);
  591. print_hex_dump_debug("Module params:", DUMP_PREFIX_OFFSET, 8, 4,
  592. *param_data, param_size, false);
  593. return 0;
  594. }
  595. static int skl_get_queue_index(struct skl_module_pin *mpin,
  596. struct skl_module_inst_id id, int max)
  597. {
  598. int i;
  599. for (i = 0; i < max; i++) {
  600. if (mpin[i].id.module_id == id.module_id &&
  601. mpin[i].id.instance_id == id.instance_id)
  602. return i;
  603. }
  604. return -EINVAL;
  605. }
  606. /*
  607. * Allocates queue for each module.
  608. * if dynamic, the pin_index is allocated 0 to max_pin.
  609. * In static, the pin_index is fixed based on module_id and instance id
  610. */
  611. static int skl_alloc_queue(struct skl_module_pin *mpin,
  612. struct skl_module_cfg *tgt_cfg, int max)
  613. {
  614. int i;
  615. struct skl_module_inst_id id = tgt_cfg->id;
  616. /*
  617. * if pin in dynamic, find first free pin
  618. * otherwise find match module and instance id pin as topology will
  619. * ensure a unique pin is assigned to this so no need to
  620. * allocate/free
  621. */
  622. for (i = 0; i < max; i++) {
  623. if (mpin[i].is_dynamic) {
  624. if (!mpin[i].in_use &&
  625. mpin[i].pin_state == SKL_PIN_UNBIND) {
  626. mpin[i].in_use = true;
  627. mpin[i].id.module_id = id.module_id;
  628. mpin[i].id.instance_id = id.instance_id;
  629. mpin[i].id.pvt_id = id.pvt_id;
  630. mpin[i].tgt_mcfg = tgt_cfg;
  631. return i;
  632. }
  633. } else {
  634. if (mpin[i].id.module_id == id.module_id &&
  635. mpin[i].id.instance_id == id.instance_id &&
  636. mpin[i].pin_state == SKL_PIN_UNBIND) {
  637. mpin[i].tgt_mcfg = tgt_cfg;
  638. return i;
  639. }
  640. }
  641. }
  642. return -EINVAL;
  643. }
  644. static void skl_free_queue(struct skl_module_pin *mpin, int q_index)
  645. {
  646. if (mpin[q_index].is_dynamic) {
  647. mpin[q_index].in_use = false;
  648. mpin[q_index].id.module_id = 0;
  649. mpin[q_index].id.instance_id = 0;
  650. mpin[q_index].id.pvt_id = 0;
  651. }
  652. mpin[q_index].pin_state = SKL_PIN_UNBIND;
  653. mpin[q_index].tgt_mcfg = NULL;
  654. }
  655. /* Module state will be set to unint, if all the out pin state is UNBIND */
  656. static void skl_clear_module_state(struct skl_module_pin *mpin, int max,
  657. struct skl_module_cfg *mcfg)
  658. {
  659. int i;
  660. bool found = false;
  661. for (i = 0; i < max; i++) {
  662. if (mpin[i].pin_state == SKL_PIN_UNBIND)
  663. continue;
  664. found = true;
  665. break;
  666. }
  667. if (!found)
  668. mcfg->m_state = SKL_MODULE_UNINIT;
  669. return;
  670. }
  671. /*
  672. * A module needs to be instanataited in DSP. A mdoule is present in a
  673. * collection of module referred as a PIPE.
  674. * We first calculate the module format, based on module type and then
  675. * invoke the DSP by sending IPC INIT_INSTANCE using ipc helper
  676. */
  677. int skl_init_module(struct skl_sst *ctx,
  678. struct skl_module_cfg *mconfig)
  679. {
  680. u16 module_config_size = 0;
  681. void *param_data = NULL;
  682. int ret;
  683. struct skl_ipc_init_instance_msg msg;
  684. dev_dbg(ctx->dev, "%s: module_id = %d instance=%d\n", __func__,
  685. mconfig->id.module_id, mconfig->id.pvt_id);
  686. if (mconfig->pipe->state != SKL_PIPE_CREATED) {
  687. dev_err(ctx->dev, "Pipe not created state= %d pipe_id= %d\n",
  688. mconfig->pipe->state, mconfig->pipe->ppl_id);
  689. return -EIO;
  690. }
  691. ret = skl_set_module_format(ctx, mconfig,
  692. &module_config_size, &param_data);
  693. if (ret < 0) {
  694. dev_err(ctx->dev, "Failed to set module format ret=%d\n", ret);
  695. return ret;
  696. }
  697. msg.module_id = mconfig->id.module_id;
  698. msg.instance_id = mconfig->id.pvt_id;
  699. msg.ppl_instance_id = mconfig->pipe->ppl_id;
  700. msg.param_data_size = module_config_size;
  701. msg.core_id = mconfig->core_id;
  702. msg.domain = mconfig->domain;
  703. ret = skl_ipc_init_instance(&ctx->ipc, &msg, param_data);
  704. if (ret < 0) {
  705. dev_err(ctx->dev, "Failed to init instance ret=%d\n", ret);
  706. kfree(param_data);
  707. return ret;
  708. }
  709. mconfig->m_state = SKL_MODULE_INIT_DONE;
  710. kfree(param_data);
  711. return ret;
  712. }
  713. static void skl_dump_bind_info(struct skl_sst *ctx, struct skl_module_cfg
  714. *src_module, struct skl_module_cfg *dst_module)
  715. {
  716. dev_dbg(ctx->dev, "%s: src module_id = %d src_instance=%d\n",
  717. __func__, src_module->id.module_id, src_module->id.pvt_id);
  718. dev_dbg(ctx->dev, "%s: dst_module=%d dst_instacne=%d\n", __func__,
  719. dst_module->id.module_id, dst_module->id.pvt_id);
  720. dev_dbg(ctx->dev, "src_module state = %d dst module state = %d\n",
  721. src_module->m_state, dst_module->m_state);
  722. }
  723. /*
  724. * On module freeup, we need to unbind the module with modules
  725. * it is already bind.
  726. * Find the pin allocated and unbind then using bind_unbind IPC
  727. */
  728. int skl_unbind_modules(struct skl_sst *ctx,
  729. struct skl_module_cfg *src_mcfg,
  730. struct skl_module_cfg *dst_mcfg)
  731. {
  732. int ret;
  733. struct skl_ipc_bind_unbind_msg msg;
  734. struct skl_module_inst_id src_id = src_mcfg->id;
  735. struct skl_module_inst_id dst_id = dst_mcfg->id;
  736. int in_max = dst_mcfg->max_in_queue;
  737. int out_max = src_mcfg->max_out_queue;
  738. int src_index, dst_index, src_pin_state, dst_pin_state;
  739. skl_dump_bind_info(ctx, src_mcfg, dst_mcfg);
  740. /* get src queue index */
  741. src_index = skl_get_queue_index(src_mcfg->m_out_pin, dst_id, out_max);
  742. if (src_index < 0)
  743. return 0;
  744. msg.src_queue = src_index;
  745. /* get dst queue index */
  746. dst_index = skl_get_queue_index(dst_mcfg->m_in_pin, src_id, in_max);
  747. if (dst_index < 0)
  748. return 0;
  749. msg.dst_queue = dst_index;
  750. src_pin_state = src_mcfg->m_out_pin[src_index].pin_state;
  751. dst_pin_state = dst_mcfg->m_in_pin[dst_index].pin_state;
  752. if (src_pin_state != SKL_PIN_BIND_DONE ||
  753. dst_pin_state != SKL_PIN_BIND_DONE)
  754. return 0;
  755. msg.module_id = src_mcfg->id.module_id;
  756. msg.instance_id = src_mcfg->id.pvt_id;
  757. msg.dst_module_id = dst_mcfg->id.module_id;
  758. msg.dst_instance_id = dst_mcfg->id.pvt_id;
  759. msg.bind = false;
  760. ret = skl_ipc_bind_unbind(&ctx->ipc, &msg);
  761. if (!ret) {
  762. /* free queue only if unbind is success */
  763. skl_free_queue(src_mcfg->m_out_pin, src_index);
  764. skl_free_queue(dst_mcfg->m_in_pin, dst_index);
  765. /*
  766. * check only if src module bind state, bind is
  767. * always from src -> sink
  768. */
  769. skl_clear_module_state(src_mcfg->m_out_pin, out_max, src_mcfg);
  770. }
  771. return ret;
  772. }
  773. /*
  774. * Once a module is instantiated it need to be 'bind' with other modules in
  775. * the pipeline. For binding we need to find the module pins which are bind
  776. * together
  777. * This function finds the pins and then sends bund_unbind IPC message to
  778. * DSP using IPC helper
  779. */
  780. int skl_bind_modules(struct skl_sst *ctx,
  781. struct skl_module_cfg *src_mcfg,
  782. struct skl_module_cfg *dst_mcfg)
  783. {
  784. int ret;
  785. struct skl_ipc_bind_unbind_msg msg;
  786. int in_max = dst_mcfg->max_in_queue;
  787. int out_max = src_mcfg->max_out_queue;
  788. int src_index, dst_index;
  789. skl_dump_bind_info(ctx, src_mcfg, dst_mcfg);
  790. if (src_mcfg->m_state < SKL_MODULE_INIT_DONE ||
  791. dst_mcfg->m_state < SKL_MODULE_INIT_DONE)
  792. return 0;
  793. src_index = skl_alloc_queue(src_mcfg->m_out_pin, dst_mcfg, out_max);
  794. if (src_index < 0)
  795. return -EINVAL;
  796. msg.src_queue = src_index;
  797. dst_index = skl_alloc_queue(dst_mcfg->m_in_pin, src_mcfg, in_max);
  798. if (dst_index < 0) {
  799. skl_free_queue(src_mcfg->m_out_pin, src_index);
  800. return -EINVAL;
  801. }
  802. msg.dst_queue = dst_index;
  803. dev_dbg(ctx->dev, "src queue = %d dst queue =%d\n",
  804. msg.src_queue, msg.dst_queue);
  805. msg.module_id = src_mcfg->id.module_id;
  806. msg.instance_id = src_mcfg->id.pvt_id;
  807. msg.dst_module_id = dst_mcfg->id.module_id;
  808. msg.dst_instance_id = dst_mcfg->id.pvt_id;
  809. msg.bind = true;
  810. ret = skl_ipc_bind_unbind(&ctx->ipc, &msg);
  811. if (!ret) {
  812. src_mcfg->m_state = SKL_MODULE_BIND_DONE;
  813. src_mcfg->m_out_pin[src_index].pin_state = SKL_PIN_BIND_DONE;
  814. dst_mcfg->m_in_pin[dst_index].pin_state = SKL_PIN_BIND_DONE;
  815. } else {
  816. /* error case , if IPC fails, clear the queue index */
  817. skl_free_queue(src_mcfg->m_out_pin, src_index);
  818. skl_free_queue(dst_mcfg->m_in_pin, dst_index);
  819. }
  820. return ret;
  821. }
  822. static int skl_set_pipe_state(struct skl_sst *ctx, struct skl_pipe *pipe,
  823. enum skl_ipc_pipeline_state state)
  824. {
  825. dev_dbg(ctx->dev, "%s: pipe_satate = %d\n", __func__, state);
  826. return skl_ipc_set_pipeline_state(&ctx->ipc, pipe->ppl_id, state);
  827. }
  828. /*
  829. * A pipeline is a collection of modules. Before a module in instantiated a
  830. * pipeline needs to be created for it.
  831. * This function creates pipeline, by sending create pipeline IPC messages
  832. * to FW
  833. */
  834. int skl_create_pipeline(struct skl_sst *ctx, struct skl_pipe *pipe)
  835. {
  836. int ret;
  837. dev_dbg(ctx->dev, "%s: pipe_id = %d\n", __func__, pipe->ppl_id);
  838. ret = skl_ipc_create_pipeline(&ctx->ipc, pipe->memory_pages,
  839. pipe->pipe_priority, pipe->ppl_id);
  840. if (ret < 0) {
  841. dev_err(ctx->dev, "Failed to create pipeline\n");
  842. return ret;
  843. }
  844. pipe->state = SKL_PIPE_CREATED;
  845. return 0;
  846. }
  847. /*
  848. * A pipeline needs to be deleted on cleanup. If a pipeline is running, then
  849. * pause the pipeline first and then delete it
  850. * The pipe delete is done by sending delete pipeline IPC. DSP will stop the
  851. * DMA engines and releases resources
  852. */
  853. int skl_delete_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
  854. {
  855. int ret;
  856. dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id);
  857. /* If pipe is started, do stop the pipe in FW. */
  858. if (pipe->state > SKL_PIPE_STARTED) {
  859. ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
  860. if (ret < 0) {
  861. dev_err(ctx->dev, "Failed to stop pipeline\n");
  862. return ret;
  863. }
  864. pipe->state = SKL_PIPE_PAUSED;
  865. }
  866. /* If pipe was not created in FW, do not try to delete it */
  867. if (pipe->state < SKL_PIPE_CREATED)
  868. return 0;
  869. ret = skl_ipc_delete_pipeline(&ctx->ipc, pipe->ppl_id);
  870. if (ret < 0) {
  871. dev_err(ctx->dev, "Failed to delete pipeline\n");
  872. return ret;
  873. }
  874. pipe->state = SKL_PIPE_INVALID;
  875. return ret;
  876. }
  877. /*
  878. * A pipeline is also a scheduling entity in DSP which can be run, stopped
  879. * For processing data the pipe need to be run by sending IPC set pipe state
  880. * to DSP
  881. */
  882. int skl_run_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
  883. {
  884. int ret;
  885. dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id);
  886. /* If pipe was not created in FW, do not try to pause or delete */
  887. if (pipe->state < SKL_PIPE_CREATED)
  888. return 0;
  889. /* Pipe has to be paused before it is started */
  890. ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
  891. if (ret < 0) {
  892. dev_err(ctx->dev, "Failed to pause pipe\n");
  893. return ret;
  894. }
  895. pipe->state = SKL_PIPE_PAUSED;
  896. ret = skl_set_pipe_state(ctx, pipe, PPL_RUNNING);
  897. if (ret < 0) {
  898. dev_err(ctx->dev, "Failed to start pipe\n");
  899. return ret;
  900. }
  901. pipe->state = SKL_PIPE_STARTED;
  902. return 0;
  903. }
  904. /*
  905. * Stop the pipeline by sending set pipe state IPC
  906. * DSP doesnt implement stop so we always send pause message
  907. */
  908. int skl_stop_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
  909. {
  910. int ret;
  911. dev_dbg(ctx->dev, "In %s pipe=%d\n", __func__, pipe->ppl_id);
  912. /* If pipe was not created in FW, do not try to pause or delete */
  913. if (pipe->state < SKL_PIPE_PAUSED)
  914. return 0;
  915. ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
  916. if (ret < 0) {
  917. dev_dbg(ctx->dev, "Failed to stop pipe\n");
  918. return ret;
  919. }
  920. pipe->state = SKL_PIPE_PAUSED;
  921. return 0;
  922. }
  923. /*
  924. * Reset the pipeline by sending set pipe state IPC this will reset the DMA
  925. * from the DSP side
  926. */
  927. int skl_reset_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
  928. {
  929. int ret;
  930. /* If pipe was not created in FW, do not try to pause or delete */
  931. if (pipe->state < SKL_PIPE_PAUSED)
  932. return 0;
  933. ret = skl_set_pipe_state(ctx, pipe, PPL_RESET);
  934. if (ret < 0) {
  935. dev_dbg(ctx->dev, "Failed to reset pipe ret=%d\n", ret);
  936. return ret;
  937. }
  938. pipe->state = SKL_PIPE_RESET;
  939. return 0;
  940. }
  941. /* Algo parameter set helper function */
  942. int skl_set_module_params(struct skl_sst *ctx, u32 *params, int size,
  943. u32 param_id, struct skl_module_cfg *mcfg)
  944. {
  945. struct skl_ipc_large_config_msg msg;
  946. msg.module_id = mcfg->id.module_id;
  947. msg.instance_id = mcfg->id.pvt_id;
  948. msg.param_data_size = size;
  949. msg.large_param_id = param_id;
  950. return skl_ipc_set_large_config(&ctx->ipc, &msg, params);
  951. }
  952. int skl_get_module_params(struct skl_sst *ctx, u32 *params, int size,
  953. u32 param_id, struct skl_module_cfg *mcfg)
  954. {
  955. struct skl_ipc_large_config_msg msg;
  956. msg.module_id = mcfg->id.module_id;
  957. msg.instance_id = mcfg->id.pvt_id;
  958. msg.param_data_size = size;
  959. msg.large_param_id = param_id;
  960. return skl_ipc_get_large_config(&ctx->ipc, &msg, params);
  961. }