sst-firmware.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281
  1. /*
  2. * Intel SST Firmware Loader
  3. *
  4. * Copyright (C) 2013, Intel Corporation. All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/slab.h>
  18. #include <linux/sched.h>
  19. #include <linux/firmware.h>
  20. #include <linux/export.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/dma-mapping.h>
  23. #include <linux/dmaengine.h>
  24. #include <linux/pci.h>
  25. #include <linux/acpi.h>
  26. /* supported DMA engine drivers */
  27. #include <linux/dma/dw.h>
  28. #include <asm/page.h>
  29. #include <asm/pgtable.h>
  30. #include "sst-dsp.h"
  31. #include "sst-dsp-priv.h"
  32. #define SST_DMA_RESOURCES 2
  33. #define SST_DSP_DMA_MAX_BURST 0x3
  34. #define SST_HSW_BLOCK_ANY 0xffffffff
  35. #define SST_HSW_MASK_DMA_ADDR_DSP 0xfff00000
  36. struct sst_dma {
  37. struct sst_dsp *sst;
  38. struct dw_dma_chip *chip;
  39. struct dma_async_tx_descriptor *desc;
  40. struct dma_chan *ch;
  41. };
  42. static inline void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes)
  43. {
  44. u32 tmp = 0;
  45. int i, m, n;
  46. const u8 *src_byte = src;
  47. m = bytes / 4;
  48. n = bytes % 4;
  49. /* __iowrite32_copy use 32bit size values so divide by 4 */
  50. __iowrite32_copy((void *)dest, src, m);
  51. if (n) {
  52. for (i = 0; i < n; i++)
  53. tmp |= (u32)*(src_byte + m * 4 + i) << (i * 8);
  54. __iowrite32_copy((void *)(dest + m * 4), &tmp, 1);
  55. }
  56. }
  57. static void sst_dma_transfer_complete(void *arg)
  58. {
  59. struct sst_dsp *sst = (struct sst_dsp *)arg;
  60. dev_dbg(sst->dev, "DMA: callback\n");
  61. }
  62. static int sst_dsp_dma_copy(struct sst_dsp *sst, dma_addr_t dest_addr,
  63. dma_addr_t src_addr, size_t size)
  64. {
  65. struct dma_async_tx_descriptor *desc;
  66. struct sst_dma *dma = sst->dma;
  67. if (dma->ch == NULL) {
  68. dev_err(sst->dev, "error: no DMA channel\n");
  69. return -ENODEV;
  70. }
  71. dev_dbg(sst->dev, "DMA: src: 0x%lx dest 0x%lx size %zu\n",
  72. (unsigned long)src_addr, (unsigned long)dest_addr, size);
  73. desc = dma->ch->device->device_prep_dma_memcpy(dma->ch, dest_addr,
  74. src_addr, size, DMA_CTRL_ACK);
  75. if (!desc){
  76. dev_err(sst->dev, "error: dma prep memcpy failed\n");
  77. return -EINVAL;
  78. }
  79. desc->callback = sst_dma_transfer_complete;
  80. desc->callback_param = sst;
  81. desc->tx_submit(desc);
  82. dma_wait_for_async_tx(desc);
  83. return 0;
  84. }
  85. /* copy to DSP */
  86. int sst_dsp_dma_copyto(struct sst_dsp *sst, dma_addr_t dest_addr,
  87. dma_addr_t src_addr, size_t size)
  88. {
  89. return sst_dsp_dma_copy(sst, dest_addr | SST_HSW_MASK_DMA_ADDR_DSP,
  90. src_addr, size);
  91. }
  92. EXPORT_SYMBOL_GPL(sst_dsp_dma_copyto);
  93. /* copy from DSP */
  94. int sst_dsp_dma_copyfrom(struct sst_dsp *sst, dma_addr_t dest_addr,
  95. dma_addr_t src_addr, size_t size)
  96. {
  97. return sst_dsp_dma_copy(sst, dest_addr,
  98. src_addr | SST_HSW_MASK_DMA_ADDR_DSP, size);
  99. }
  100. EXPORT_SYMBOL_GPL(sst_dsp_dma_copyfrom);
  101. /* remove module from memory - callers hold locks */
  102. static void block_list_remove(struct sst_dsp *dsp,
  103. struct list_head *block_list)
  104. {
  105. struct sst_mem_block *block, *tmp;
  106. int err;
  107. /* disable each block */
  108. list_for_each_entry(block, block_list, module_list) {
  109. if (block->ops && block->ops->disable) {
  110. err = block->ops->disable(block);
  111. if (err < 0)
  112. dev_err(dsp->dev,
  113. "error: cant disable block %d:%d\n",
  114. block->type, block->index);
  115. }
  116. }
  117. /* mark each block as free */
  118. list_for_each_entry_safe(block, tmp, block_list, module_list) {
  119. list_del(&block->module_list);
  120. list_move(&block->list, &dsp->free_block_list);
  121. dev_dbg(dsp->dev, "block freed %d:%d at offset 0x%x\n",
  122. block->type, block->index, block->offset);
  123. }
  124. }
  125. /* prepare the memory block to receive data from host - callers hold locks */
  126. static int block_list_prepare(struct sst_dsp *dsp,
  127. struct list_head *block_list)
  128. {
  129. struct sst_mem_block *block;
  130. int ret = 0;
  131. /* enable each block so that's it'e ready for data */
  132. list_for_each_entry(block, block_list, module_list) {
  133. if (block->ops && block->ops->enable && !block->users) {
  134. ret = block->ops->enable(block);
  135. if (ret < 0) {
  136. dev_err(dsp->dev,
  137. "error: cant disable block %d:%d\n",
  138. block->type, block->index);
  139. goto err;
  140. }
  141. }
  142. }
  143. return ret;
  144. err:
  145. list_for_each_entry(block, block_list, module_list) {
  146. if (block->ops && block->ops->disable)
  147. block->ops->disable(block);
  148. }
  149. return ret;
  150. }
  151. static struct dw_dma_chip *dw_probe(struct device *dev, struct resource *mem,
  152. int irq)
  153. {
  154. struct dw_dma_chip *chip;
  155. int err;
  156. chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
  157. if (!chip)
  158. return ERR_PTR(-ENOMEM);
  159. chip->irq = irq;
  160. chip->regs = devm_ioremap_resource(dev, mem);
  161. if (IS_ERR(chip->regs))
  162. return ERR_CAST(chip->regs);
  163. err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31));
  164. if (err)
  165. return ERR_PTR(err);
  166. chip->dev = dev;
  167. err = dw_dma_probe(chip);
  168. if (err)
  169. return ERR_PTR(err);
  170. return chip;
  171. }
  172. static void dw_remove(struct dw_dma_chip *chip)
  173. {
  174. dw_dma_remove(chip);
  175. }
  176. static bool dma_chan_filter(struct dma_chan *chan, void *param)
  177. {
  178. struct sst_dsp *dsp = (struct sst_dsp *)param;
  179. return chan->device->dev == dsp->dma_dev;
  180. }
  181. int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id)
  182. {
  183. struct sst_dma *dma = dsp->dma;
  184. struct dma_slave_config slave;
  185. dma_cap_mask_t mask;
  186. int ret;
  187. dma_cap_zero(mask);
  188. dma_cap_set(DMA_SLAVE, mask);
  189. dma_cap_set(DMA_MEMCPY, mask);
  190. dma->ch = dma_request_channel(mask, dma_chan_filter, dsp);
  191. if (dma->ch == NULL) {
  192. dev_err(dsp->dev, "error: DMA request channel failed\n");
  193. return -EIO;
  194. }
  195. memset(&slave, 0, sizeof(slave));
  196. slave.direction = DMA_MEM_TO_DEV;
  197. slave.src_addr_width =
  198. slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  199. slave.src_maxburst = slave.dst_maxburst = SST_DSP_DMA_MAX_BURST;
  200. ret = dmaengine_slave_config(dma->ch, &slave);
  201. if (ret) {
  202. dev_err(dsp->dev, "error: unable to set DMA slave config %d\n",
  203. ret);
  204. dma_release_channel(dma->ch);
  205. dma->ch = NULL;
  206. }
  207. return ret;
  208. }
  209. EXPORT_SYMBOL_GPL(sst_dsp_dma_get_channel);
  210. void sst_dsp_dma_put_channel(struct sst_dsp *dsp)
  211. {
  212. struct sst_dma *dma = dsp->dma;
  213. if (!dma->ch)
  214. return;
  215. dma_release_channel(dma->ch);
  216. dma->ch = NULL;
  217. }
  218. EXPORT_SYMBOL_GPL(sst_dsp_dma_put_channel);
  219. int sst_dma_new(struct sst_dsp *sst)
  220. {
  221. struct sst_pdata *sst_pdata = sst->pdata;
  222. struct sst_dma *dma;
  223. struct resource mem;
  224. const char *dma_dev_name;
  225. int ret = 0;
  226. if (sst->pdata->resindex_dma_base == -1)
  227. /* DMA is not used, return and squelsh error messages */
  228. return 0;
  229. /* configure the correct platform data for whatever DMA engine
  230. * is attached to the ADSP IP. */
  231. switch (sst->pdata->dma_engine) {
  232. case SST_DMA_TYPE_DW:
  233. dma_dev_name = "dw_dmac";
  234. break;
  235. default:
  236. dev_err(sst->dev, "error: invalid DMA engine %d\n",
  237. sst->pdata->dma_engine);
  238. return -EINVAL;
  239. }
  240. dma = devm_kzalloc(sst->dev, sizeof(struct sst_dma), GFP_KERNEL);
  241. if (!dma)
  242. return -ENOMEM;
  243. dma->sst = sst;
  244. memset(&mem, 0, sizeof(mem));
  245. mem.start = sst->addr.lpe_base + sst_pdata->dma_base;
  246. mem.end = sst->addr.lpe_base + sst_pdata->dma_base + sst_pdata->dma_size - 1;
  247. mem.flags = IORESOURCE_MEM;
  248. /* now register DMA engine device */
  249. dma->chip = dw_probe(sst->dma_dev, &mem, sst_pdata->irq);
  250. if (IS_ERR(dma->chip)) {
  251. dev_err(sst->dev, "error: DMA device register failed\n");
  252. ret = PTR_ERR(dma->chip);
  253. goto err_dma_dev;
  254. }
  255. sst->dma = dma;
  256. sst->fw_use_dma = true;
  257. return 0;
  258. err_dma_dev:
  259. devm_kfree(sst->dev, dma);
  260. return ret;
  261. }
  262. EXPORT_SYMBOL(sst_dma_new);
  263. void sst_dma_free(struct sst_dma *dma)
  264. {
  265. if (dma == NULL)
  266. return;
  267. if (dma->ch)
  268. dma_release_channel(dma->ch);
  269. if (dma->chip)
  270. dw_remove(dma->chip);
  271. }
  272. EXPORT_SYMBOL(sst_dma_free);
  273. /* create new generic firmware object */
  274. struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
  275. const struct firmware *fw, void *private)
  276. {
  277. struct sst_fw *sst_fw;
  278. int err;
  279. if (!dsp->ops->parse_fw)
  280. return NULL;
  281. sst_fw = kzalloc(sizeof(*sst_fw), GFP_KERNEL);
  282. if (sst_fw == NULL)
  283. return NULL;
  284. sst_fw->dsp = dsp;
  285. sst_fw->private = private;
  286. sst_fw->size = fw->size;
  287. /* allocate DMA buffer to store FW data */
  288. sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size,
  289. &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
  290. if (!sst_fw->dma_buf) {
  291. dev_err(dsp->dev, "error: DMA alloc failed\n");
  292. kfree(sst_fw);
  293. return NULL;
  294. }
  295. /* copy FW data to DMA-able memory */
  296. memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size);
  297. if (dsp->fw_use_dma) {
  298. err = sst_dsp_dma_get_channel(dsp, 0);
  299. if (err < 0)
  300. goto chan_err;
  301. }
  302. /* call core specific FW paser to load FW data into DSP */
  303. err = dsp->ops->parse_fw(sst_fw);
  304. if (err < 0) {
  305. dev_err(dsp->dev, "error: parse fw failed %d\n", err);
  306. goto parse_err;
  307. }
  308. if (dsp->fw_use_dma)
  309. sst_dsp_dma_put_channel(dsp);
  310. mutex_lock(&dsp->mutex);
  311. list_add(&sst_fw->list, &dsp->fw_list);
  312. mutex_unlock(&dsp->mutex);
  313. return sst_fw;
  314. parse_err:
  315. if (dsp->fw_use_dma)
  316. sst_dsp_dma_put_channel(dsp);
  317. chan_err:
  318. dma_free_coherent(dsp->dma_dev, sst_fw->size,
  319. sst_fw->dma_buf,
  320. sst_fw->dmable_fw_paddr);
  321. sst_fw->dma_buf = NULL;
  322. kfree(sst_fw);
  323. return NULL;
  324. }
  325. EXPORT_SYMBOL_GPL(sst_fw_new);
  326. int sst_fw_reload(struct sst_fw *sst_fw)
  327. {
  328. struct sst_dsp *dsp = sst_fw->dsp;
  329. int ret;
  330. dev_dbg(dsp->dev, "reloading firmware\n");
  331. /* call core specific FW paser to load FW data into DSP */
  332. ret = dsp->ops->parse_fw(sst_fw);
  333. if (ret < 0)
  334. dev_err(dsp->dev, "error: parse fw failed %d\n", ret);
  335. return ret;
  336. }
  337. EXPORT_SYMBOL_GPL(sst_fw_reload);
  338. void sst_fw_unload(struct sst_fw *sst_fw)
  339. {
  340. struct sst_dsp *dsp = sst_fw->dsp;
  341. struct sst_module *module, *mtmp;
  342. struct sst_module_runtime *runtime, *rtmp;
  343. dev_dbg(dsp->dev, "unloading firmware\n");
  344. mutex_lock(&dsp->mutex);
  345. /* check module by module */
  346. list_for_each_entry_safe(module, mtmp, &dsp->module_list, list) {
  347. if (module->sst_fw == sst_fw) {
  348. /* remove runtime modules */
  349. list_for_each_entry_safe(runtime, rtmp, &module->runtime_list, list) {
  350. block_list_remove(dsp, &runtime->block_list);
  351. list_del(&runtime->list);
  352. kfree(runtime);
  353. }
  354. /* now remove the module */
  355. block_list_remove(dsp, &module->block_list);
  356. list_del(&module->list);
  357. kfree(module);
  358. }
  359. }
  360. /* remove all scratch blocks */
  361. block_list_remove(dsp, &dsp->scratch_block_list);
  362. mutex_unlock(&dsp->mutex);
  363. }
  364. EXPORT_SYMBOL_GPL(sst_fw_unload);
  365. /* free single firmware object */
  366. void sst_fw_free(struct sst_fw *sst_fw)
  367. {
  368. struct sst_dsp *dsp = sst_fw->dsp;
  369. mutex_lock(&dsp->mutex);
  370. list_del(&sst_fw->list);
  371. mutex_unlock(&dsp->mutex);
  372. if (sst_fw->dma_buf)
  373. dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
  374. sst_fw->dmable_fw_paddr);
  375. kfree(sst_fw);
  376. }
  377. EXPORT_SYMBOL_GPL(sst_fw_free);
  378. /* free all firmware objects */
  379. void sst_fw_free_all(struct sst_dsp *dsp)
  380. {
  381. struct sst_fw *sst_fw, *t;
  382. mutex_lock(&dsp->mutex);
  383. list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
  384. list_del(&sst_fw->list);
  385. dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
  386. sst_fw->dmable_fw_paddr);
  387. kfree(sst_fw);
  388. }
  389. mutex_unlock(&dsp->mutex);
  390. }
  391. EXPORT_SYMBOL_GPL(sst_fw_free_all);
  392. /* create a new SST generic module from FW template */
  393. struct sst_module *sst_module_new(struct sst_fw *sst_fw,
  394. struct sst_module_template *template, void *private)
  395. {
  396. struct sst_dsp *dsp = sst_fw->dsp;
  397. struct sst_module *sst_module;
  398. sst_module = kzalloc(sizeof(*sst_module), GFP_KERNEL);
  399. if (sst_module == NULL)
  400. return NULL;
  401. sst_module->id = template->id;
  402. sst_module->dsp = dsp;
  403. sst_module->sst_fw = sst_fw;
  404. sst_module->scratch_size = template->scratch_size;
  405. sst_module->persistent_size = template->persistent_size;
  406. sst_module->entry = template->entry;
  407. sst_module->state = SST_MODULE_STATE_UNLOADED;
  408. INIT_LIST_HEAD(&sst_module->block_list);
  409. INIT_LIST_HEAD(&sst_module->runtime_list);
  410. mutex_lock(&dsp->mutex);
  411. list_add(&sst_module->list, &dsp->module_list);
  412. mutex_unlock(&dsp->mutex);
  413. return sst_module;
  414. }
  415. EXPORT_SYMBOL_GPL(sst_module_new);
  416. /* free firmware module and remove from available list */
  417. void sst_module_free(struct sst_module *sst_module)
  418. {
  419. struct sst_dsp *dsp = sst_module->dsp;
  420. mutex_lock(&dsp->mutex);
  421. list_del(&sst_module->list);
  422. mutex_unlock(&dsp->mutex);
  423. kfree(sst_module);
  424. }
  425. EXPORT_SYMBOL_GPL(sst_module_free);
  426. struct sst_module_runtime *sst_module_runtime_new(struct sst_module *module,
  427. int id, void *private)
  428. {
  429. struct sst_dsp *dsp = module->dsp;
  430. struct sst_module_runtime *runtime;
  431. runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
  432. if (runtime == NULL)
  433. return NULL;
  434. runtime->id = id;
  435. runtime->dsp = dsp;
  436. runtime->module = module;
  437. INIT_LIST_HEAD(&runtime->block_list);
  438. mutex_lock(&dsp->mutex);
  439. list_add(&runtime->list, &module->runtime_list);
  440. mutex_unlock(&dsp->mutex);
  441. return runtime;
  442. }
  443. EXPORT_SYMBOL_GPL(sst_module_runtime_new);
  444. void sst_module_runtime_free(struct sst_module_runtime *runtime)
  445. {
  446. struct sst_dsp *dsp = runtime->dsp;
  447. mutex_lock(&dsp->mutex);
  448. list_del(&runtime->list);
  449. mutex_unlock(&dsp->mutex);
  450. kfree(runtime);
  451. }
  452. EXPORT_SYMBOL_GPL(sst_module_runtime_free);
  453. static struct sst_mem_block *find_block(struct sst_dsp *dsp,
  454. struct sst_block_allocator *ba)
  455. {
  456. struct sst_mem_block *block;
  457. list_for_each_entry(block, &dsp->free_block_list, list) {
  458. if (block->type == ba->type && block->offset == ba->offset)
  459. return block;
  460. }
  461. return NULL;
  462. }
  463. /* Block allocator must be on block boundary */
  464. static int block_alloc_contiguous(struct sst_dsp *dsp,
  465. struct sst_block_allocator *ba, struct list_head *block_list)
  466. {
  467. struct list_head tmp = LIST_HEAD_INIT(tmp);
  468. struct sst_mem_block *block;
  469. u32 block_start = SST_HSW_BLOCK_ANY;
  470. int size = ba->size, offset = ba->offset;
  471. while (ba->size > 0) {
  472. block = find_block(dsp, ba);
  473. if (!block) {
  474. list_splice(&tmp, &dsp->free_block_list);
  475. ba->size = size;
  476. ba->offset = offset;
  477. return -ENOMEM;
  478. }
  479. list_move_tail(&block->list, &tmp);
  480. ba->offset += block->size;
  481. ba->size -= block->size;
  482. }
  483. ba->size = size;
  484. ba->offset = offset;
  485. list_for_each_entry(block, &tmp, list) {
  486. if (block->offset < block_start)
  487. block_start = block->offset;
  488. list_add(&block->module_list, block_list);
  489. dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
  490. block->type, block->index, block->offset);
  491. }
  492. list_splice(&tmp, &dsp->used_block_list);
  493. return 0;
  494. }
  495. /* allocate first free DSP blocks for data - callers hold locks */
  496. static int block_alloc(struct sst_dsp *dsp, struct sst_block_allocator *ba,
  497. struct list_head *block_list)
  498. {
  499. struct sst_mem_block *block, *tmp;
  500. int ret = 0;
  501. if (ba->size == 0)
  502. return 0;
  503. /* find first free whole blocks that can hold module */
  504. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  505. /* ignore blocks with wrong type */
  506. if (block->type != ba->type)
  507. continue;
  508. if (ba->size > block->size)
  509. continue;
  510. ba->offset = block->offset;
  511. block->bytes_used = ba->size % block->size;
  512. list_add(&block->module_list, block_list);
  513. list_move(&block->list, &dsp->used_block_list);
  514. dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
  515. block->type, block->index, block->offset);
  516. return 0;
  517. }
  518. /* then find free multiple blocks that can hold module */
  519. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  520. /* ignore blocks with wrong type */
  521. if (block->type != ba->type)
  522. continue;
  523. /* do we span > 1 blocks */
  524. if (ba->size > block->size) {
  525. /* align ba to block boundary */
  526. ba->offset = block->offset;
  527. ret = block_alloc_contiguous(dsp, ba, block_list);
  528. if (ret == 0)
  529. return ret;
  530. }
  531. }
  532. /* not enough free block space */
  533. return -ENOMEM;
  534. }
  535. int sst_alloc_blocks(struct sst_dsp *dsp, struct sst_block_allocator *ba,
  536. struct list_head *block_list)
  537. {
  538. int ret;
  539. dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
  540. ba->size, ba->offset, ba->type);
  541. mutex_lock(&dsp->mutex);
  542. ret = block_alloc(dsp, ba, block_list);
  543. if (ret < 0) {
  544. dev_err(dsp->dev, "error: can't alloc blocks %d\n", ret);
  545. goto out;
  546. }
  547. /* prepare DSP blocks for module usage */
  548. ret = block_list_prepare(dsp, block_list);
  549. if (ret < 0)
  550. dev_err(dsp->dev, "error: prepare failed\n");
  551. out:
  552. mutex_unlock(&dsp->mutex);
  553. return ret;
  554. }
  555. EXPORT_SYMBOL_GPL(sst_alloc_blocks);
  556. int sst_free_blocks(struct sst_dsp *dsp, struct list_head *block_list)
  557. {
  558. mutex_lock(&dsp->mutex);
  559. block_list_remove(dsp, block_list);
  560. mutex_unlock(&dsp->mutex);
  561. return 0;
  562. }
  563. EXPORT_SYMBOL_GPL(sst_free_blocks);
  564. /* allocate memory blocks for static module addresses - callers hold locks */
  565. static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba,
  566. struct list_head *block_list)
  567. {
  568. struct sst_mem_block *block, *tmp;
  569. struct sst_block_allocator ba_tmp = *ba;
  570. u32 end = ba->offset + ba->size, block_end;
  571. int err;
  572. /* only IRAM/DRAM blocks are managed */
  573. if (ba->type != SST_MEM_IRAM && ba->type != SST_MEM_DRAM)
  574. return 0;
  575. /* are blocks already attached to this module */
  576. list_for_each_entry_safe(block, tmp, block_list, module_list) {
  577. /* ignore blocks with wrong type */
  578. if (block->type != ba->type)
  579. continue;
  580. block_end = block->offset + block->size;
  581. /* find block that holds section */
  582. if (ba->offset >= block->offset && end <= block_end)
  583. return 0;
  584. /* does block span more than 1 section */
  585. if (ba->offset >= block->offset && ba->offset < block_end) {
  586. /* align ba to block boundary */
  587. ba_tmp.size -= block_end - ba->offset;
  588. ba_tmp.offset = block_end;
  589. err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
  590. if (err < 0)
  591. return -ENOMEM;
  592. /* module already owns blocks */
  593. return 0;
  594. }
  595. }
  596. /* find first free blocks that can hold section in free list */
  597. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  598. block_end = block->offset + block->size;
  599. /* ignore blocks with wrong type */
  600. if (block->type != ba->type)
  601. continue;
  602. /* find block that holds section */
  603. if (ba->offset >= block->offset && end <= block_end) {
  604. /* add block */
  605. list_move(&block->list, &dsp->used_block_list);
  606. list_add(&block->module_list, block_list);
  607. dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
  608. block->type, block->index, block->offset);
  609. return 0;
  610. }
  611. /* does block span more than 1 section */
  612. if (ba->offset >= block->offset && ba->offset < block_end) {
  613. /* add block */
  614. list_move(&block->list, &dsp->used_block_list);
  615. list_add(&block->module_list, block_list);
  616. /* align ba to block boundary */
  617. ba_tmp.size -= block_end - ba->offset;
  618. ba_tmp.offset = block_end;
  619. err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
  620. if (err < 0)
  621. return -ENOMEM;
  622. return 0;
  623. }
  624. }
  625. return -ENOMEM;
  626. }
  627. /* Load fixed module data into DSP memory blocks */
  628. int sst_module_alloc_blocks(struct sst_module *module)
  629. {
  630. struct sst_dsp *dsp = module->dsp;
  631. struct sst_fw *sst_fw = module->sst_fw;
  632. struct sst_block_allocator ba;
  633. int ret;
  634. memset(&ba, 0, sizeof(ba));
  635. ba.size = module->size;
  636. ba.type = module->type;
  637. ba.offset = module->offset;
  638. dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
  639. ba.size, ba.offset, ba.type);
  640. mutex_lock(&dsp->mutex);
  641. /* alloc blocks that includes this section */
  642. ret = block_alloc_fixed(dsp, &ba, &module->block_list);
  643. if (ret < 0) {
  644. dev_err(dsp->dev,
  645. "error: no free blocks for section at offset 0x%x size 0x%x\n",
  646. module->offset, module->size);
  647. mutex_unlock(&dsp->mutex);
  648. return -ENOMEM;
  649. }
  650. /* prepare DSP blocks for module copy */
  651. ret = block_list_prepare(dsp, &module->block_list);
  652. if (ret < 0) {
  653. dev_err(dsp->dev, "error: fw module prepare failed\n");
  654. goto err;
  655. }
  656. /* copy partial module data to blocks */
  657. if (dsp->fw_use_dma) {
  658. ret = sst_dsp_dma_copyto(dsp,
  659. dsp->addr.lpe_base + module->offset,
  660. sst_fw->dmable_fw_paddr + module->data_offset,
  661. module->size);
  662. if (ret < 0) {
  663. dev_err(dsp->dev, "error: module copy failed\n");
  664. goto err;
  665. }
  666. } else
  667. sst_memcpy32(dsp->addr.lpe + module->offset, module->data,
  668. module->size);
  669. mutex_unlock(&dsp->mutex);
  670. return ret;
  671. err:
  672. block_list_remove(dsp, &module->block_list);
  673. mutex_unlock(&dsp->mutex);
  674. return ret;
  675. }
  676. EXPORT_SYMBOL_GPL(sst_module_alloc_blocks);
  677. /* Unload entire module from DSP memory */
  678. int sst_module_free_blocks(struct sst_module *module)
  679. {
  680. struct sst_dsp *dsp = module->dsp;
  681. mutex_lock(&dsp->mutex);
  682. block_list_remove(dsp, &module->block_list);
  683. mutex_unlock(&dsp->mutex);
  684. return 0;
  685. }
  686. EXPORT_SYMBOL_GPL(sst_module_free_blocks);
  687. int sst_module_runtime_alloc_blocks(struct sst_module_runtime *runtime,
  688. int offset)
  689. {
  690. struct sst_dsp *dsp = runtime->dsp;
  691. struct sst_module *module = runtime->module;
  692. struct sst_block_allocator ba;
  693. int ret;
  694. if (module->persistent_size == 0)
  695. return 0;
  696. memset(&ba, 0, sizeof(ba));
  697. ba.size = module->persistent_size;
  698. ba.type = SST_MEM_DRAM;
  699. mutex_lock(&dsp->mutex);
  700. /* do we need to allocate at a fixed address ? */
  701. if (offset != 0) {
  702. ba.offset = offset;
  703. dev_dbg(dsp->dev, "persistent fixed block request 0x%x bytes type %d offset 0x%x\n",
  704. ba.size, ba.type, ba.offset);
  705. /* alloc blocks that includes this section */
  706. ret = block_alloc_fixed(dsp, &ba, &runtime->block_list);
  707. } else {
  708. dev_dbg(dsp->dev, "persistent block request 0x%x bytes type %d\n",
  709. ba.size, ba.type);
  710. /* alloc blocks that includes this section */
  711. ret = block_alloc(dsp, &ba, &runtime->block_list);
  712. }
  713. if (ret < 0) {
  714. dev_err(dsp->dev,
  715. "error: no free blocks for runtime module size 0x%x\n",
  716. module->persistent_size);
  717. mutex_unlock(&dsp->mutex);
  718. return -ENOMEM;
  719. }
  720. runtime->persistent_offset = ba.offset;
  721. /* prepare DSP blocks for module copy */
  722. ret = block_list_prepare(dsp, &runtime->block_list);
  723. if (ret < 0) {
  724. dev_err(dsp->dev, "error: runtime block prepare failed\n");
  725. goto err;
  726. }
  727. mutex_unlock(&dsp->mutex);
  728. return ret;
  729. err:
  730. block_list_remove(dsp, &module->block_list);
  731. mutex_unlock(&dsp->mutex);
  732. return ret;
  733. }
  734. EXPORT_SYMBOL_GPL(sst_module_runtime_alloc_blocks);
  735. int sst_module_runtime_free_blocks(struct sst_module_runtime *runtime)
  736. {
  737. struct sst_dsp *dsp = runtime->dsp;
  738. mutex_lock(&dsp->mutex);
  739. block_list_remove(dsp, &runtime->block_list);
  740. mutex_unlock(&dsp->mutex);
  741. return 0;
  742. }
  743. EXPORT_SYMBOL_GPL(sst_module_runtime_free_blocks);
  744. int sst_module_runtime_save(struct sst_module_runtime *runtime,
  745. struct sst_module_runtime_context *context)
  746. {
  747. struct sst_dsp *dsp = runtime->dsp;
  748. struct sst_module *module = runtime->module;
  749. int ret = 0;
  750. dev_dbg(dsp->dev, "saving runtime %d memory at 0x%x size 0x%x\n",
  751. runtime->id, runtime->persistent_offset,
  752. module->persistent_size);
  753. context->buffer = dma_alloc_coherent(dsp->dma_dev,
  754. module->persistent_size,
  755. &context->dma_buffer, GFP_DMA | GFP_KERNEL);
  756. if (!context->buffer) {
  757. dev_err(dsp->dev, "error: DMA context alloc failed\n");
  758. return -ENOMEM;
  759. }
  760. mutex_lock(&dsp->mutex);
  761. if (dsp->fw_use_dma) {
  762. ret = sst_dsp_dma_get_channel(dsp, 0);
  763. if (ret < 0)
  764. goto err;
  765. ret = sst_dsp_dma_copyfrom(dsp, context->dma_buffer,
  766. dsp->addr.lpe_base + runtime->persistent_offset,
  767. module->persistent_size);
  768. sst_dsp_dma_put_channel(dsp);
  769. if (ret < 0) {
  770. dev_err(dsp->dev, "error: context copy failed\n");
  771. goto err;
  772. }
  773. } else
  774. sst_memcpy32(context->buffer, dsp->addr.lpe +
  775. runtime->persistent_offset,
  776. module->persistent_size);
  777. err:
  778. mutex_unlock(&dsp->mutex);
  779. return ret;
  780. }
  781. EXPORT_SYMBOL_GPL(sst_module_runtime_save);
  782. int sst_module_runtime_restore(struct sst_module_runtime *runtime,
  783. struct sst_module_runtime_context *context)
  784. {
  785. struct sst_dsp *dsp = runtime->dsp;
  786. struct sst_module *module = runtime->module;
  787. int ret = 0;
  788. dev_dbg(dsp->dev, "restoring runtime %d memory at 0x%x size 0x%x\n",
  789. runtime->id, runtime->persistent_offset,
  790. module->persistent_size);
  791. mutex_lock(&dsp->mutex);
  792. if (!context->buffer) {
  793. dev_info(dsp->dev, "no context buffer need to restore!\n");
  794. goto err;
  795. }
  796. if (dsp->fw_use_dma) {
  797. ret = sst_dsp_dma_get_channel(dsp, 0);
  798. if (ret < 0)
  799. goto err;
  800. ret = sst_dsp_dma_copyto(dsp,
  801. dsp->addr.lpe_base + runtime->persistent_offset,
  802. context->dma_buffer, module->persistent_size);
  803. sst_dsp_dma_put_channel(dsp);
  804. if (ret < 0) {
  805. dev_err(dsp->dev, "error: module copy failed\n");
  806. goto err;
  807. }
  808. } else
  809. sst_memcpy32(dsp->addr.lpe + runtime->persistent_offset,
  810. context->buffer, module->persistent_size);
  811. dma_free_coherent(dsp->dma_dev, module->persistent_size,
  812. context->buffer, context->dma_buffer);
  813. context->buffer = NULL;
  814. err:
  815. mutex_unlock(&dsp->mutex);
  816. return ret;
  817. }
  818. EXPORT_SYMBOL_GPL(sst_module_runtime_restore);
  819. /* register a DSP memory block for use with FW based modules */
  820. struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
  821. u32 size, enum sst_mem_type type, const struct sst_block_ops *ops,
  822. u32 index, void *private)
  823. {
  824. struct sst_mem_block *block;
  825. block = kzalloc(sizeof(*block), GFP_KERNEL);
  826. if (block == NULL)
  827. return NULL;
  828. block->offset = offset;
  829. block->size = size;
  830. block->index = index;
  831. block->type = type;
  832. block->dsp = dsp;
  833. block->private = private;
  834. block->ops = ops;
  835. mutex_lock(&dsp->mutex);
  836. list_add(&block->list, &dsp->free_block_list);
  837. mutex_unlock(&dsp->mutex);
  838. return block;
  839. }
  840. EXPORT_SYMBOL_GPL(sst_mem_block_register);
  841. /* unregister all DSP memory blocks */
  842. void sst_mem_block_unregister_all(struct sst_dsp *dsp)
  843. {
  844. struct sst_mem_block *block, *tmp;
  845. mutex_lock(&dsp->mutex);
  846. /* unregister used blocks */
  847. list_for_each_entry_safe(block, tmp, &dsp->used_block_list, list) {
  848. list_del(&block->list);
  849. kfree(block);
  850. }
  851. /* unregister free blocks */
  852. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  853. list_del(&block->list);
  854. kfree(block);
  855. }
  856. mutex_unlock(&dsp->mutex);
  857. }
  858. EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all);
  859. /* allocate scratch buffer blocks */
  860. int sst_block_alloc_scratch(struct sst_dsp *dsp)
  861. {
  862. struct sst_module *module;
  863. struct sst_block_allocator ba;
  864. int ret;
  865. mutex_lock(&dsp->mutex);
  866. /* calculate required scratch size */
  867. dsp->scratch_size = 0;
  868. list_for_each_entry(module, &dsp->module_list, list) {
  869. dev_dbg(dsp->dev, "module %d scratch req 0x%x bytes\n",
  870. module->id, module->scratch_size);
  871. if (dsp->scratch_size < module->scratch_size)
  872. dsp->scratch_size = module->scratch_size;
  873. }
  874. dev_dbg(dsp->dev, "scratch buffer required is 0x%x bytes\n",
  875. dsp->scratch_size);
  876. if (dsp->scratch_size == 0) {
  877. dev_info(dsp->dev, "no modules need scratch buffer\n");
  878. mutex_unlock(&dsp->mutex);
  879. return 0;
  880. }
  881. /* allocate blocks for module scratch buffers */
  882. dev_dbg(dsp->dev, "allocating scratch blocks\n");
  883. ba.size = dsp->scratch_size;
  884. ba.type = SST_MEM_DRAM;
  885. /* do we need to allocate at fixed offset */
  886. if (dsp->scratch_offset != 0) {
  887. dev_dbg(dsp->dev, "block request 0x%x bytes type %d at 0x%x\n",
  888. ba.size, ba.type, ba.offset);
  889. ba.offset = dsp->scratch_offset;
  890. /* alloc blocks that includes this section */
  891. ret = block_alloc_fixed(dsp, &ba, &dsp->scratch_block_list);
  892. } else {
  893. dev_dbg(dsp->dev, "block request 0x%x bytes type %d\n",
  894. ba.size, ba.type);
  895. ba.offset = 0;
  896. ret = block_alloc(dsp, &ba, &dsp->scratch_block_list);
  897. }
  898. if (ret < 0) {
  899. dev_err(dsp->dev, "error: can't alloc scratch blocks\n");
  900. mutex_unlock(&dsp->mutex);
  901. return ret;
  902. }
  903. ret = block_list_prepare(dsp, &dsp->scratch_block_list);
  904. if (ret < 0) {
  905. dev_err(dsp->dev, "error: scratch block prepare failed\n");
  906. mutex_unlock(&dsp->mutex);
  907. return ret;
  908. }
  909. /* assign the same offset of scratch to each module */
  910. dsp->scratch_offset = ba.offset;
  911. mutex_unlock(&dsp->mutex);
  912. return dsp->scratch_size;
  913. }
  914. EXPORT_SYMBOL_GPL(sst_block_alloc_scratch);
  915. /* free all scratch blocks */
  916. void sst_block_free_scratch(struct sst_dsp *dsp)
  917. {
  918. mutex_lock(&dsp->mutex);
  919. block_list_remove(dsp, &dsp->scratch_block_list);
  920. mutex_unlock(&dsp->mutex);
  921. }
  922. EXPORT_SYMBOL_GPL(sst_block_free_scratch);
  923. /* get a module from it's unique ID */
  924. struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
  925. {
  926. struct sst_module *module;
  927. mutex_lock(&dsp->mutex);
  928. list_for_each_entry(module, &dsp->module_list, list) {
  929. if (module->id == id) {
  930. mutex_unlock(&dsp->mutex);
  931. return module;
  932. }
  933. }
  934. mutex_unlock(&dsp->mutex);
  935. return NULL;
  936. }
  937. EXPORT_SYMBOL_GPL(sst_module_get_from_id);
  938. struct sst_module_runtime *sst_module_runtime_get_from_id(
  939. struct sst_module *module, u32 id)
  940. {
  941. struct sst_module_runtime *runtime;
  942. struct sst_dsp *dsp = module->dsp;
  943. mutex_lock(&dsp->mutex);
  944. list_for_each_entry(runtime, &module->runtime_list, list) {
  945. if (runtime->id == id) {
  946. mutex_unlock(&dsp->mutex);
  947. return runtime;
  948. }
  949. }
  950. mutex_unlock(&dsp->mutex);
  951. return NULL;
  952. }
  953. EXPORT_SYMBOL_GPL(sst_module_runtime_get_from_id);
  954. /* returns block address in DSP address space */
  955. u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset,
  956. enum sst_mem_type type)
  957. {
  958. switch (type) {
  959. case SST_MEM_IRAM:
  960. return offset - dsp->addr.iram_offset +
  961. dsp->addr.dsp_iram_offset;
  962. case SST_MEM_DRAM:
  963. return offset - dsp->addr.dram_offset +
  964. dsp->addr.dsp_dram_offset;
  965. default:
  966. return 0;
  967. }
  968. }
  969. EXPORT_SYMBOL_GPL(sst_dsp_get_offset);
  970. struct sst_dsp *sst_dsp_new(struct device *dev,
  971. struct sst_dsp_device *sst_dev, struct sst_pdata *pdata)
  972. {
  973. struct sst_dsp *sst;
  974. int err;
  975. dev_dbg(dev, "initialising audio DSP id 0x%x\n", pdata->id);
  976. sst = devm_kzalloc(dev, sizeof(*sst), GFP_KERNEL);
  977. if (sst == NULL)
  978. return NULL;
  979. spin_lock_init(&sst->spinlock);
  980. mutex_init(&sst->mutex);
  981. sst->dev = dev;
  982. sst->dma_dev = pdata->dma_dev;
  983. sst->thread_context = sst_dev->thread_context;
  984. sst->sst_dev = sst_dev;
  985. sst->id = pdata->id;
  986. sst->irq = pdata->irq;
  987. sst->ops = sst_dev->ops;
  988. sst->pdata = pdata;
  989. INIT_LIST_HEAD(&sst->used_block_list);
  990. INIT_LIST_HEAD(&sst->free_block_list);
  991. INIT_LIST_HEAD(&sst->module_list);
  992. INIT_LIST_HEAD(&sst->fw_list);
  993. INIT_LIST_HEAD(&sst->scratch_block_list);
  994. /* Initialise SST Audio DSP */
  995. if (sst->ops->init) {
  996. err = sst->ops->init(sst, pdata);
  997. if (err < 0)
  998. return NULL;
  999. }
  1000. /* Register the ISR */
  1001. err = request_threaded_irq(sst->irq, sst->ops->irq_handler,
  1002. sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
  1003. if (err)
  1004. goto irq_err;
  1005. err = sst_dma_new(sst);
  1006. if (err)
  1007. dev_warn(dev, "sst_dma_new failed %d\n", err);
  1008. return sst;
  1009. irq_err:
  1010. if (sst->ops->free)
  1011. sst->ops->free(sst);
  1012. return NULL;
  1013. }
  1014. EXPORT_SYMBOL_GPL(sst_dsp_new);
  1015. void sst_dsp_free(struct sst_dsp *sst)
  1016. {
  1017. free_irq(sst->irq, sst);
  1018. if (sst->ops->free)
  1019. sst->ops->free(sst);
  1020. sst_dma_free(sst->dma);
  1021. }
  1022. EXPORT_SYMBOL_GPL(sst_dsp_free);
  1023. MODULE_DESCRIPTION("Intel SST Firmware Loader");
  1024. MODULE_LICENSE("GPL v2");