sst-firmware.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280
  1. /*
  2. * Intel SST Firmware Loader
  3. *
  4. * Copyright (C) 2013, Intel Corporation. All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/slab.h>
  18. #include <linux/sched.h>
  19. #include <linux/firmware.h>
  20. #include <linux/export.h>
  21. #include <linux/module.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/dma-mapping.h>
  24. #include <linux/dmaengine.h>
  25. #include <linux/pci.h>
  26. #include <linux/acpi.h>
  27. /* supported DMA engine drivers */
  28. #include <linux/dma/dw.h>
  29. #include <asm/page.h>
  30. #include <asm/pgtable.h>
  31. #include "sst-dsp.h"
  32. #include "sst-dsp-priv.h"
  33. #define SST_DMA_RESOURCES 2
  34. #define SST_DSP_DMA_MAX_BURST 0x3
  35. #define SST_HSW_BLOCK_ANY 0xffffffff
  36. #define SST_HSW_MASK_DMA_ADDR_DSP 0xfff00000
  37. struct sst_dma {
  38. struct sst_dsp *sst;
  39. struct dw_dma_chip *chip;
  40. struct dma_async_tx_descriptor *desc;
  41. struct dma_chan *ch;
  42. };
  43. static inline void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes)
  44. {
  45. u32 tmp = 0;
  46. int i, m, n;
  47. const u8 *src_byte = src;
  48. m = bytes / 4;
  49. n = bytes % 4;
  50. /* __iowrite32_copy use 32bit size values so divide by 4 */
  51. __iowrite32_copy((void *)dest, src, m);
  52. if (n) {
  53. for (i = 0; i < n; i++)
  54. tmp |= (u32)*(src_byte + m * 4 + i) << (i * 8);
  55. __iowrite32_copy((void *)(dest + m * 4), &tmp, 1);
  56. }
  57. }
  58. static void sst_dma_transfer_complete(void *arg)
  59. {
  60. struct sst_dsp *sst = (struct sst_dsp *)arg;
  61. dev_dbg(sst->dev, "DMA: callback\n");
  62. }
  63. static int sst_dsp_dma_copy(struct sst_dsp *sst, dma_addr_t dest_addr,
  64. dma_addr_t src_addr, size_t size)
  65. {
  66. struct dma_async_tx_descriptor *desc;
  67. struct sst_dma *dma = sst->dma;
  68. if (dma->ch == NULL) {
  69. dev_err(sst->dev, "error: no DMA channel\n");
  70. return -ENODEV;
  71. }
  72. dev_dbg(sst->dev, "DMA: src: 0x%lx dest 0x%lx size %zu\n",
  73. (unsigned long)src_addr, (unsigned long)dest_addr, size);
  74. desc = dma->ch->device->device_prep_dma_memcpy(dma->ch, dest_addr,
  75. src_addr, size, DMA_CTRL_ACK);
  76. if (!desc){
  77. dev_err(sst->dev, "error: dma prep memcpy failed\n");
  78. return -EINVAL;
  79. }
  80. desc->callback = sst_dma_transfer_complete;
  81. desc->callback_param = sst;
  82. desc->tx_submit(desc);
  83. dma_wait_for_async_tx(desc);
  84. return 0;
  85. }
  86. /* copy to DSP */
  87. int sst_dsp_dma_copyto(struct sst_dsp *sst, dma_addr_t dest_addr,
  88. dma_addr_t src_addr, size_t size)
  89. {
  90. return sst_dsp_dma_copy(sst, dest_addr | SST_HSW_MASK_DMA_ADDR_DSP,
  91. src_addr, size);
  92. }
  93. EXPORT_SYMBOL_GPL(sst_dsp_dma_copyto);
  94. /* copy from DSP */
  95. int sst_dsp_dma_copyfrom(struct sst_dsp *sst, dma_addr_t dest_addr,
  96. dma_addr_t src_addr, size_t size)
  97. {
  98. return sst_dsp_dma_copy(sst, dest_addr,
  99. src_addr | SST_HSW_MASK_DMA_ADDR_DSP, size);
  100. }
  101. EXPORT_SYMBOL_GPL(sst_dsp_dma_copyfrom);
  102. /* remove module from memory - callers hold locks */
  103. static void block_list_remove(struct sst_dsp *dsp,
  104. struct list_head *block_list)
  105. {
  106. struct sst_mem_block *block, *tmp;
  107. int err;
  108. /* disable each block */
  109. list_for_each_entry(block, block_list, module_list) {
  110. if (block->ops && block->ops->disable) {
  111. err = block->ops->disable(block);
  112. if (err < 0)
  113. dev_err(dsp->dev,
  114. "error: cant disable block %d:%d\n",
  115. block->type, block->index);
  116. }
  117. }
  118. /* mark each block as free */
  119. list_for_each_entry_safe(block, tmp, block_list, module_list) {
  120. list_del(&block->module_list);
  121. list_move(&block->list, &dsp->free_block_list);
  122. dev_dbg(dsp->dev, "block freed %d:%d at offset 0x%x\n",
  123. block->type, block->index, block->offset);
  124. }
  125. }
  126. /* prepare the memory block to receive data from host - callers hold locks */
  127. static int block_list_prepare(struct sst_dsp *dsp,
  128. struct list_head *block_list)
  129. {
  130. struct sst_mem_block *block;
  131. int ret = 0;
  132. /* enable each block so that's it'e ready for data */
  133. list_for_each_entry(block, block_list, module_list) {
  134. if (block->ops && block->ops->enable && !block->users) {
  135. ret = block->ops->enable(block);
  136. if (ret < 0) {
  137. dev_err(dsp->dev,
  138. "error: cant disable block %d:%d\n",
  139. block->type, block->index);
  140. goto err;
  141. }
  142. }
  143. }
  144. return ret;
  145. err:
  146. list_for_each_entry(block, block_list, module_list) {
  147. if (block->ops && block->ops->disable)
  148. block->ops->disable(block);
  149. }
  150. return ret;
  151. }
  152. static struct dw_dma_chip *dw_probe(struct device *dev, struct resource *mem,
  153. int irq)
  154. {
  155. struct dw_dma_chip *chip;
  156. int err;
  157. chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
  158. if (!chip)
  159. return ERR_PTR(-ENOMEM);
  160. chip->irq = irq;
  161. chip->regs = devm_ioremap_resource(dev, mem);
  162. if (IS_ERR(chip->regs))
  163. return ERR_CAST(chip->regs);
  164. err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31));
  165. if (err)
  166. return ERR_PTR(err);
  167. chip->dev = dev;
  168. err = dw_dma_probe(chip);
  169. if (err)
  170. return ERR_PTR(err);
  171. return chip;
  172. }
  173. static void dw_remove(struct dw_dma_chip *chip)
  174. {
  175. dw_dma_remove(chip);
  176. }
  177. static bool dma_chan_filter(struct dma_chan *chan, void *param)
  178. {
  179. struct sst_dsp *dsp = (struct sst_dsp *)param;
  180. return chan->device->dev == dsp->dma_dev;
  181. }
  182. int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id)
  183. {
  184. struct sst_dma *dma = dsp->dma;
  185. struct dma_slave_config slave;
  186. dma_cap_mask_t mask;
  187. int ret;
  188. dma_cap_zero(mask);
  189. dma_cap_set(DMA_SLAVE, mask);
  190. dma_cap_set(DMA_MEMCPY, mask);
  191. dma->ch = dma_request_channel(mask, dma_chan_filter, dsp);
  192. if (dma->ch == NULL) {
  193. dev_err(dsp->dev, "error: DMA request channel failed\n");
  194. return -EIO;
  195. }
  196. memset(&slave, 0, sizeof(slave));
  197. slave.direction = DMA_MEM_TO_DEV;
  198. slave.src_addr_width =
  199. slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  200. slave.src_maxburst = slave.dst_maxburst = SST_DSP_DMA_MAX_BURST;
  201. ret = dmaengine_slave_config(dma->ch, &slave);
  202. if (ret) {
  203. dev_err(dsp->dev, "error: unable to set DMA slave config %d\n",
  204. ret);
  205. dma_release_channel(dma->ch);
  206. dma->ch = NULL;
  207. }
  208. return ret;
  209. }
  210. EXPORT_SYMBOL_GPL(sst_dsp_dma_get_channel);
  211. void sst_dsp_dma_put_channel(struct sst_dsp *dsp)
  212. {
  213. struct sst_dma *dma = dsp->dma;
  214. if (!dma->ch)
  215. return;
  216. dma_release_channel(dma->ch);
  217. dma->ch = NULL;
  218. }
  219. EXPORT_SYMBOL_GPL(sst_dsp_dma_put_channel);
  220. int sst_dma_new(struct sst_dsp *sst)
  221. {
  222. struct sst_pdata *sst_pdata = sst->pdata;
  223. struct sst_dma *dma;
  224. struct resource mem;
  225. int ret = 0;
  226. if (sst->pdata->resindex_dma_base == -1)
  227. /* DMA is not used, return and squelsh error messages */
  228. return 0;
  229. /* configure the correct platform data for whatever DMA engine
  230. * is attached to the ADSP IP. */
  231. switch (sst->pdata->dma_engine) {
  232. case SST_DMA_TYPE_DW:
  233. break;
  234. default:
  235. dev_err(sst->dev, "error: invalid DMA engine %d\n",
  236. sst->pdata->dma_engine);
  237. return -EINVAL;
  238. }
  239. dma = devm_kzalloc(sst->dev, sizeof(struct sst_dma), GFP_KERNEL);
  240. if (!dma)
  241. return -ENOMEM;
  242. dma->sst = sst;
  243. memset(&mem, 0, sizeof(mem));
  244. mem.start = sst->addr.lpe_base + sst_pdata->dma_base;
  245. mem.end = sst->addr.lpe_base + sst_pdata->dma_base + sst_pdata->dma_size - 1;
  246. mem.flags = IORESOURCE_MEM;
  247. /* now register DMA engine device */
  248. dma->chip = dw_probe(sst->dma_dev, &mem, sst_pdata->irq);
  249. if (IS_ERR(dma->chip)) {
  250. dev_err(sst->dev, "error: DMA device register failed\n");
  251. ret = PTR_ERR(dma->chip);
  252. goto err_dma_dev;
  253. }
  254. sst->dma = dma;
  255. sst->fw_use_dma = true;
  256. return 0;
  257. err_dma_dev:
  258. devm_kfree(sst->dev, dma);
  259. return ret;
  260. }
  261. EXPORT_SYMBOL(sst_dma_new);
  262. void sst_dma_free(struct sst_dma *dma)
  263. {
  264. if (dma == NULL)
  265. return;
  266. if (dma->ch)
  267. dma_release_channel(dma->ch);
  268. if (dma->chip)
  269. dw_remove(dma->chip);
  270. }
  271. EXPORT_SYMBOL(sst_dma_free);
  272. /* create new generic firmware object */
  273. struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
  274. const struct firmware *fw, void *private)
  275. {
  276. struct sst_fw *sst_fw;
  277. int err;
  278. if (!dsp->ops->parse_fw)
  279. return NULL;
  280. sst_fw = kzalloc(sizeof(*sst_fw), GFP_KERNEL);
  281. if (sst_fw == NULL)
  282. return NULL;
  283. sst_fw->dsp = dsp;
  284. sst_fw->private = private;
  285. sst_fw->size = fw->size;
  286. /* allocate DMA buffer to store FW data */
  287. sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size,
  288. &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
  289. if (!sst_fw->dma_buf) {
  290. dev_err(dsp->dev, "error: DMA alloc failed\n");
  291. kfree(sst_fw);
  292. return NULL;
  293. }
  294. /* copy FW data to DMA-able memory */
  295. memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size);
  296. if (dsp->fw_use_dma) {
  297. err = sst_dsp_dma_get_channel(dsp, 0);
  298. if (err < 0)
  299. goto chan_err;
  300. }
  301. /* call core specific FW paser to load FW data into DSP */
  302. err = dsp->ops->parse_fw(sst_fw);
  303. if (err < 0) {
  304. dev_err(dsp->dev, "error: parse fw failed %d\n", err);
  305. goto parse_err;
  306. }
  307. if (dsp->fw_use_dma)
  308. sst_dsp_dma_put_channel(dsp);
  309. mutex_lock(&dsp->mutex);
  310. list_add(&sst_fw->list, &dsp->fw_list);
  311. mutex_unlock(&dsp->mutex);
  312. return sst_fw;
  313. parse_err:
  314. if (dsp->fw_use_dma)
  315. sst_dsp_dma_put_channel(dsp);
  316. chan_err:
  317. dma_free_coherent(dsp->dma_dev, sst_fw->size,
  318. sst_fw->dma_buf,
  319. sst_fw->dmable_fw_paddr);
  320. sst_fw->dma_buf = NULL;
  321. kfree(sst_fw);
  322. return NULL;
  323. }
  324. EXPORT_SYMBOL_GPL(sst_fw_new);
  325. int sst_fw_reload(struct sst_fw *sst_fw)
  326. {
  327. struct sst_dsp *dsp = sst_fw->dsp;
  328. int ret;
  329. dev_dbg(dsp->dev, "reloading firmware\n");
  330. /* call core specific FW paser to load FW data into DSP */
  331. ret = dsp->ops->parse_fw(sst_fw);
  332. if (ret < 0)
  333. dev_err(dsp->dev, "error: parse fw failed %d\n", ret);
  334. return ret;
  335. }
  336. EXPORT_SYMBOL_GPL(sst_fw_reload);
  337. void sst_fw_unload(struct sst_fw *sst_fw)
  338. {
  339. struct sst_dsp *dsp = sst_fw->dsp;
  340. struct sst_module *module, *mtmp;
  341. struct sst_module_runtime *runtime, *rtmp;
  342. dev_dbg(dsp->dev, "unloading firmware\n");
  343. mutex_lock(&dsp->mutex);
  344. /* check module by module */
  345. list_for_each_entry_safe(module, mtmp, &dsp->module_list, list) {
  346. if (module->sst_fw == sst_fw) {
  347. /* remove runtime modules */
  348. list_for_each_entry_safe(runtime, rtmp, &module->runtime_list, list) {
  349. block_list_remove(dsp, &runtime->block_list);
  350. list_del(&runtime->list);
  351. kfree(runtime);
  352. }
  353. /* now remove the module */
  354. block_list_remove(dsp, &module->block_list);
  355. list_del(&module->list);
  356. kfree(module);
  357. }
  358. }
  359. /* remove all scratch blocks */
  360. block_list_remove(dsp, &dsp->scratch_block_list);
  361. mutex_unlock(&dsp->mutex);
  362. }
  363. EXPORT_SYMBOL_GPL(sst_fw_unload);
  364. /* free single firmware object */
  365. void sst_fw_free(struct sst_fw *sst_fw)
  366. {
  367. struct sst_dsp *dsp = sst_fw->dsp;
  368. mutex_lock(&dsp->mutex);
  369. list_del(&sst_fw->list);
  370. mutex_unlock(&dsp->mutex);
  371. if (sst_fw->dma_buf)
  372. dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
  373. sst_fw->dmable_fw_paddr);
  374. kfree(sst_fw);
  375. }
  376. EXPORT_SYMBOL_GPL(sst_fw_free);
  377. /* free all firmware objects */
  378. void sst_fw_free_all(struct sst_dsp *dsp)
  379. {
  380. struct sst_fw *sst_fw, *t;
  381. mutex_lock(&dsp->mutex);
  382. list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
  383. list_del(&sst_fw->list);
  384. dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
  385. sst_fw->dmable_fw_paddr);
  386. kfree(sst_fw);
  387. }
  388. mutex_unlock(&dsp->mutex);
  389. }
  390. EXPORT_SYMBOL_GPL(sst_fw_free_all);
  391. /* create a new SST generic module from FW template */
  392. struct sst_module *sst_module_new(struct sst_fw *sst_fw,
  393. struct sst_module_template *template, void *private)
  394. {
  395. struct sst_dsp *dsp = sst_fw->dsp;
  396. struct sst_module *sst_module;
  397. sst_module = kzalloc(sizeof(*sst_module), GFP_KERNEL);
  398. if (sst_module == NULL)
  399. return NULL;
  400. sst_module->id = template->id;
  401. sst_module->dsp = dsp;
  402. sst_module->sst_fw = sst_fw;
  403. sst_module->scratch_size = template->scratch_size;
  404. sst_module->persistent_size = template->persistent_size;
  405. sst_module->entry = template->entry;
  406. sst_module->state = SST_MODULE_STATE_UNLOADED;
  407. INIT_LIST_HEAD(&sst_module->block_list);
  408. INIT_LIST_HEAD(&sst_module->runtime_list);
  409. mutex_lock(&dsp->mutex);
  410. list_add(&sst_module->list, &dsp->module_list);
  411. mutex_unlock(&dsp->mutex);
  412. return sst_module;
  413. }
  414. EXPORT_SYMBOL_GPL(sst_module_new);
  415. /* free firmware module and remove from available list */
  416. void sst_module_free(struct sst_module *sst_module)
  417. {
  418. struct sst_dsp *dsp = sst_module->dsp;
  419. mutex_lock(&dsp->mutex);
  420. list_del(&sst_module->list);
  421. mutex_unlock(&dsp->mutex);
  422. kfree(sst_module);
  423. }
  424. EXPORT_SYMBOL_GPL(sst_module_free);
  425. struct sst_module_runtime *sst_module_runtime_new(struct sst_module *module,
  426. int id, void *private)
  427. {
  428. struct sst_dsp *dsp = module->dsp;
  429. struct sst_module_runtime *runtime;
  430. runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
  431. if (runtime == NULL)
  432. return NULL;
  433. runtime->id = id;
  434. runtime->dsp = dsp;
  435. runtime->module = module;
  436. INIT_LIST_HEAD(&runtime->block_list);
  437. mutex_lock(&dsp->mutex);
  438. list_add(&runtime->list, &module->runtime_list);
  439. mutex_unlock(&dsp->mutex);
  440. return runtime;
  441. }
  442. EXPORT_SYMBOL_GPL(sst_module_runtime_new);
  443. void sst_module_runtime_free(struct sst_module_runtime *runtime)
  444. {
  445. struct sst_dsp *dsp = runtime->dsp;
  446. mutex_lock(&dsp->mutex);
  447. list_del(&runtime->list);
  448. mutex_unlock(&dsp->mutex);
  449. kfree(runtime);
  450. }
  451. EXPORT_SYMBOL_GPL(sst_module_runtime_free);
  452. static struct sst_mem_block *find_block(struct sst_dsp *dsp,
  453. struct sst_block_allocator *ba)
  454. {
  455. struct sst_mem_block *block;
  456. list_for_each_entry(block, &dsp->free_block_list, list) {
  457. if (block->type == ba->type && block->offset == ba->offset)
  458. return block;
  459. }
  460. return NULL;
  461. }
  462. /* Block allocator must be on block boundary */
  463. static int block_alloc_contiguous(struct sst_dsp *dsp,
  464. struct sst_block_allocator *ba, struct list_head *block_list)
  465. {
  466. struct list_head tmp = LIST_HEAD_INIT(tmp);
  467. struct sst_mem_block *block;
  468. u32 block_start = SST_HSW_BLOCK_ANY;
  469. int size = ba->size, offset = ba->offset;
  470. while (ba->size > 0) {
  471. block = find_block(dsp, ba);
  472. if (!block) {
  473. list_splice(&tmp, &dsp->free_block_list);
  474. ba->size = size;
  475. ba->offset = offset;
  476. return -ENOMEM;
  477. }
  478. list_move_tail(&block->list, &tmp);
  479. ba->offset += block->size;
  480. ba->size -= block->size;
  481. }
  482. ba->size = size;
  483. ba->offset = offset;
  484. list_for_each_entry(block, &tmp, list) {
  485. if (block->offset < block_start)
  486. block_start = block->offset;
  487. list_add(&block->module_list, block_list);
  488. dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
  489. block->type, block->index, block->offset);
  490. }
  491. list_splice(&tmp, &dsp->used_block_list);
  492. return 0;
  493. }
  494. /* allocate first free DSP blocks for data - callers hold locks */
  495. static int block_alloc(struct sst_dsp *dsp, struct sst_block_allocator *ba,
  496. struct list_head *block_list)
  497. {
  498. struct sst_mem_block *block, *tmp;
  499. int ret = 0;
  500. if (ba->size == 0)
  501. return 0;
  502. /* find first free whole blocks that can hold module */
  503. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  504. /* ignore blocks with wrong type */
  505. if (block->type != ba->type)
  506. continue;
  507. if (ba->size > block->size)
  508. continue;
  509. ba->offset = block->offset;
  510. block->bytes_used = ba->size % block->size;
  511. list_add(&block->module_list, block_list);
  512. list_move(&block->list, &dsp->used_block_list);
  513. dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
  514. block->type, block->index, block->offset);
  515. return 0;
  516. }
  517. /* then find free multiple blocks that can hold module */
  518. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  519. /* ignore blocks with wrong type */
  520. if (block->type != ba->type)
  521. continue;
  522. /* do we span > 1 blocks */
  523. if (ba->size > block->size) {
  524. /* align ba to block boundary */
  525. ba->offset = block->offset;
  526. ret = block_alloc_contiguous(dsp, ba, block_list);
  527. if (ret == 0)
  528. return ret;
  529. }
  530. }
  531. /* not enough free block space */
  532. return -ENOMEM;
  533. }
  534. int sst_alloc_blocks(struct sst_dsp *dsp, struct sst_block_allocator *ba,
  535. struct list_head *block_list)
  536. {
  537. int ret;
  538. dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
  539. ba->size, ba->offset, ba->type);
  540. mutex_lock(&dsp->mutex);
  541. ret = block_alloc(dsp, ba, block_list);
  542. if (ret < 0) {
  543. dev_err(dsp->dev, "error: can't alloc blocks %d\n", ret);
  544. goto out;
  545. }
  546. /* prepare DSP blocks for module usage */
  547. ret = block_list_prepare(dsp, block_list);
  548. if (ret < 0)
  549. dev_err(dsp->dev, "error: prepare failed\n");
  550. out:
  551. mutex_unlock(&dsp->mutex);
  552. return ret;
  553. }
  554. EXPORT_SYMBOL_GPL(sst_alloc_blocks);
  555. int sst_free_blocks(struct sst_dsp *dsp, struct list_head *block_list)
  556. {
  557. mutex_lock(&dsp->mutex);
  558. block_list_remove(dsp, block_list);
  559. mutex_unlock(&dsp->mutex);
  560. return 0;
  561. }
  562. EXPORT_SYMBOL_GPL(sst_free_blocks);
  563. /* allocate memory blocks for static module addresses - callers hold locks */
  564. static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba,
  565. struct list_head *block_list)
  566. {
  567. struct sst_mem_block *block, *tmp;
  568. struct sst_block_allocator ba_tmp = *ba;
  569. u32 end = ba->offset + ba->size, block_end;
  570. int err;
  571. /* only IRAM/DRAM blocks are managed */
  572. if (ba->type != SST_MEM_IRAM && ba->type != SST_MEM_DRAM)
  573. return 0;
  574. /* are blocks already attached to this module */
  575. list_for_each_entry_safe(block, tmp, block_list, module_list) {
  576. /* ignore blocks with wrong type */
  577. if (block->type != ba->type)
  578. continue;
  579. block_end = block->offset + block->size;
  580. /* find block that holds section */
  581. if (ba->offset >= block->offset && end <= block_end)
  582. return 0;
  583. /* does block span more than 1 section */
  584. if (ba->offset >= block->offset && ba->offset < block_end) {
  585. /* align ba to block boundary */
  586. ba_tmp.size -= block_end - ba->offset;
  587. ba_tmp.offset = block_end;
  588. err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
  589. if (err < 0)
  590. return -ENOMEM;
  591. /* module already owns blocks */
  592. return 0;
  593. }
  594. }
  595. /* find first free blocks that can hold section in free list */
  596. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  597. block_end = block->offset + block->size;
  598. /* ignore blocks with wrong type */
  599. if (block->type != ba->type)
  600. continue;
  601. /* find block that holds section */
  602. if (ba->offset >= block->offset && end <= block_end) {
  603. /* add block */
  604. list_move(&block->list, &dsp->used_block_list);
  605. list_add(&block->module_list, block_list);
  606. dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
  607. block->type, block->index, block->offset);
  608. return 0;
  609. }
  610. /* does block span more than 1 section */
  611. if (ba->offset >= block->offset && ba->offset < block_end) {
  612. /* add block */
  613. list_move(&block->list, &dsp->used_block_list);
  614. list_add(&block->module_list, block_list);
  615. /* align ba to block boundary */
  616. ba_tmp.size -= block_end - ba->offset;
  617. ba_tmp.offset = block_end;
  618. err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
  619. if (err < 0)
  620. return -ENOMEM;
  621. return 0;
  622. }
  623. }
  624. return -ENOMEM;
  625. }
  626. /* Load fixed module data into DSP memory blocks */
  627. int sst_module_alloc_blocks(struct sst_module *module)
  628. {
  629. struct sst_dsp *dsp = module->dsp;
  630. struct sst_fw *sst_fw = module->sst_fw;
  631. struct sst_block_allocator ba;
  632. int ret;
  633. memset(&ba, 0, sizeof(ba));
  634. ba.size = module->size;
  635. ba.type = module->type;
  636. ba.offset = module->offset;
  637. dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
  638. ba.size, ba.offset, ba.type);
  639. mutex_lock(&dsp->mutex);
  640. /* alloc blocks that includes this section */
  641. ret = block_alloc_fixed(dsp, &ba, &module->block_list);
  642. if (ret < 0) {
  643. dev_err(dsp->dev,
  644. "error: no free blocks for section at offset 0x%x size 0x%x\n",
  645. module->offset, module->size);
  646. mutex_unlock(&dsp->mutex);
  647. return -ENOMEM;
  648. }
  649. /* prepare DSP blocks for module copy */
  650. ret = block_list_prepare(dsp, &module->block_list);
  651. if (ret < 0) {
  652. dev_err(dsp->dev, "error: fw module prepare failed\n");
  653. goto err;
  654. }
  655. /* copy partial module data to blocks */
  656. if (dsp->fw_use_dma) {
  657. ret = sst_dsp_dma_copyto(dsp,
  658. dsp->addr.lpe_base + module->offset,
  659. sst_fw->dmable_fw_paddr + module->data_offset,
  660. module->size);
  661. if (ret < 0) {
  662. dev_err(dsp->dev, "error: module copy failed\n");
  663. goto err;
  664. }
  665. } else
  666. sst_memcpy32(dsp->addr.lpe + module->offset, module->data,
  667. module->size);
  668. mutex_unlock(&dsp->mutex);
  669. return ret;
  670. err:
  671. block_list_remove(dsp, &module->block_list);
  672. mutex_unlock(&dsp->mutex);
  673. return ret;
  674. }
  675. EXPORT_SYMBOL_GPL(sst_module_alloc_blocks);
  676. /* Unload entire module from DSP memory */
  677. int sst_module_free_blocks(struct sst_module *module)
  678. {
  679. struct sst_dsp *dsp = module->dsp;
  680. mutex_lock(&dsp->mutex);
  681. block_list_remove(dsp, &module->block_list);
  682. mutex_unlock(&dsp->mutex);
  683. return 0;
  684. }
  685. EXPORT_SYMBOL_GPL(sst_module_free_blocks);
  686. int sst_module_runtime_alloc_blocks(struct sst_module_runtime *runtime,
  687. int offset)
  688. {
  689. struct sst_dsp *dsp = runtime->dsp;
  690. struct sst_module *module = runtime->module;
  691. struct sst_block_allocator ba;
  692. int ret;
  693. if (module->persistent_size == 0)
  694. return 0;
  695. memset(&ba, 0, sizeof(ba));
  696. ba.size = module->persistent_size;
  697. ba.type = SST_MEM_DRAM;
  698. mutex_lock(&dsp->mutex);
  699. /* do we need to allocate at a fixed address ? */
  700. if (offset != 0) {
  701. ba.offset = offset;
  702. dev_dbg(dsp->dev, "persistent fixed block request 0x%x bytes type %d offset 0x%x\n",
  703. ba.size, ba.type, ba.offset);
  704. /* alloc blocks that includes this section */
  705. ret = block_alloc_fixed(dsp, &ba, &runtime->block_list);
  706. } else {
  707. dev_dbg(dsp->dev, "persistent block request 0x%x bytes type %d\n",
  708. ba.size, ba.type);
  709. /* alloc blocks that includes this section */
  710. ret = block_alloc(dsp, &ba, &runtime->block_list);
  711. }
  712. if (ret < 0) {
  713. dev_err(dsp->dev,
  714. "error: no free blocks for runtime module size 0x%x\n",
  715. module->persistent_size);
  716. mutex_unlock(&dsp->mutex);
  717. return -ENOMEM;
  718. }
  719. runtime->persistent_offset = ba.offset;
  720. /* prepare DSP blocks for module copy */
  721. ret = block_list_prepare(dsp, &runtime->block_list);
  722. if (ret < 0) {
  723. dev_err(dsp->dev, "error: runtime block prepare failed\n");
  724. goto err;
  725. }
  726. mutex_unlock(&dsp->mutex);
  727. return ret;
  728. err:
  729. block_list_remove(dsp, &module->block_list);
  730. mutex_unlock(&dsp->mutex);
  731. return ret;
  732. }
  733. EXPORT_SYMBOL_GPL(sst_module_runtime_alloc_blocks);
  734. int sst_module_runtime_free_blocks(struct sst_module_runtime *runtime)
  735. {
  736. struct sst_dsp *dsp = runtime->dsp;
  737. mutex_lock(&dsp->mutex);
  738. block_list_remove(dsp, &runtime->block_list);
  739. mutex_unlock(&dsp->mutex);
  740. return 0;
  741. }
  742. EXPORT_SYMBOL_GPL(sst_module_runtime_free_blocks);
  743. int sst_module_runtime_save(struct sst_module_runtime *runtime,
  744. struct sst_module_runtime_context *context)
  745. {
  746. struct sst_dsp *dsp = runtime->dsp;
  747. struct sst_module *module = runtime->module;
  748. int ret = 0;
  749. dev_dbg(dsp->dev, "saving runtime %d memory at 0x%x size 0x%x\n",
  750. runtime->id, runtime->persistent_offset,
  751. module->persistent_size);
  752. context->buffer = dma_alloc_coherent(dsp->dma_dev,
  753. module->persistent_size,
  754. &context->dma_buffer, GFP_DMA | GFP_KERNEL);
  755. if (!context->buffer) {
  756. dev_err(dsp->dev, "error: DMA context alloc failed\n");
  757. return -ENOMEM;
  758. }
  759. mutex_lock(&dsp->mutex);
  760. if (dsp->fw_use_dma) {
  761. ret = sst_dsp_dma_get_channel(dsp, 0);
  762. if (ret < 0)
  763. goto err;
  764. ret = sst_dsp_dma_copyfrom(dsp, context->dma_buffer,
  765. dsp->addr.lpe_base + runtime->persistent_offset,
  766. module->persistent_size);
  767. sst_dsp_dma_put_channel(dsp);
  768. if (ret < 0) {
  769. dev_err(dsp->dev, "error: context copy failed\n");
  770. goto err;
  771. }
  772. } else
  773. sst_memcpy32(context->buffer, dsp->addr.lpe +
  774. runtime->persistent_offset,
  775. module->persistent_size);
  776. err:
  777. mutex_unlock(&dsp->mutex);
  778. return ret;
  779. }
  780. EXPORT_SYMBOL_GPL(sst_module_runtime_save);
  781. int sst_module_runtime_restore(struct sst_module_runtime *runtime,
  782. struct sst_module_runtime_context *context)
  783. {
  784. struct sst_dsp *dsp = runtime->dsp;
  785. struct sst_module *module = runtime->module;
  786. int ret = 0;
  787. dev_dbg(dsp->dev, "restoring runtime %d memory at 0x%x size 0x%x\n",
  788. runtime->id, runtime->persistent_offset,
  789. module->persistent_size);
  790. mutex_lock(&dsp->mutex);
  791. if (!context->buffer) {
  792. dev_info(dsp->dev, "no context buffer need to restore!\n");
  793. goto err;
  794. }
  795. if (dsp->fw_use_dma) {
  796. ret = sst_dsp_dma_get_channel(dsp, 0);
  797. if (ret < 0)
  798. goto err;
  799. ret = sst_dsp_dma_copyto(dsp,
  800. dsp->addr.lpe_base + runtime->persistent_offset,
  801. context->dma_buffer, module->persistent_size);
  802. sst_dsp_dma_put_channel(dsp);
  803. if (ret < 0) {
  804. dev_err(dsp->dev, "error: module copy failed\n");
  805. goto err;
  806. }
  807. } else
  808. sst_memcpy32(dsp->addr.lpe + runtime->persistent_offset,
  809. context->buffer, module->persistent_size);
  810. dma_free_coherent(dsp->dma_dev, module->persistent_size,
  811. context->buffer, context->dma_buffer);
  812. context->buffer = NULL;
  813. err:
  814. mutex_unlock(&dsp->mutex);
  815. return ret;
  816. }
  817. EXPORT_SYMBOL_GPL(sst_module_runtime_restore);
  818. /* register a DSP memory block for use with FW based modules */
  819. struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
  820. u32 size, enum sst_mem_type type, const struct sst_block_ops *ops,
  821. u32 index, void *private)
  822. {
  823. struct sst_mem_block *block;
  824. block = kzalloc(sizeof(*block), GFP_KERNEL);
  825. if (block == NULL)
  826. return NULL;
  827. block->offset = offset;
  828. block->size = size;
  829. block->index = index;
  830. block->type = type;
  831. block->dsp = dsp;
  832. block->private = private;
  833. block->ops = ops;
  834. mutex_lock(&dsp->mutex);
  835. list_add(&block->list, &dsp->free_block_list);
  836. mutex_unlock(&dsp->mutex);
  837. return block;
  838. }
  839. EXPORT_SYMBOL_GPL(sst_mem_block_register);
  840. /* unregister all DSP memory blocks */
  841. void sst_mem_block_unregister_all(struct sst_dsp *dsp)
  842. {
  843. struct sst_mem_block *block, *tmp;
  844. mutex_lock(&dsp->mutex);
  845. /* unregister used blocks */
  846. list_for_each_entry_safe(block, tmp, &dsp->used_block_list, list) {
  847. list_del(&block->list);
  848. kfree(block);
  849. }
  850. /* unregister free blocks */
  851. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  852. list_del(&block->list);
  853. kfree(block);
  854. }
  855. mutex_unlock(&dsp->mutex);
  856. }
  857. EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all);
  858. /* allocate scratch buffer blocks */
  859. int sst_block_alloc_scratch(struct sst_dsp *dsp)
  860. {
  861. struct sst_module *module;
  862. struct sst_block_allocator ba;
  863. int ret;
  864. mutex_lock(&dsp->mutex);
  865. /* calculate required scratch size */
  866. dsp->scratch_size = 0;
  867. list_for_each_entry(module, &dsp->module_list, list) {
  868. dev_dbg(dsp->dev, "module %d scratch req 0x%x bytes\n",
  869. module->id, module->scratch_size);
  870. if (dsp->scratch_size < module->scratch_size)
  871. dsp->scratch_size = module->scratch_size;
  872. }
  873. dev_dbg(dsp->dev, "scratch buffer required is 0x%x bytes\n",
  874. dsp->scratch_size);
  875. if (dsp->scratch_size == 0) {
  876. dev_info(dsp->dev, "no modules need scratch buffer\n");
  877. mutex_unlock(&dsp->mutex);
  878. return 0;
  879. }
  880. /* allocate blocks for module scratch buffers */
  881. dev_dbg(dsp->dev, "allocating scratch blocks\n");
  882. ba.size = dsp->scratch_size;
  883. ba.type = SST_MEM_DRAM;
  884. /* do we need to allocate at fixed offset */
  885. if (dsp->scratch_offset != 0) {
  886. dev_dbg(dsp->dev, "block request 0x%x bytes type %d at 0x%x\n",
  887. ba.size, ba.type, ba.offset);
  888. ba.offset = dsp->scratch_offset;
  889. /* alloc blocks that includes this section */
  890. ret = block_alloc_fixed(dsp, &ba, &dsp->scratch_block_list);
  891. } else {
  892. dev_dbg(dsp->dev, "block request 0x%x bytes type %d\n",
  893. ba.size, ba.type);
  894. ba.offset = 0;
  895. ret = block_alloc(dsp, &ba, &dsp->scratch_block_list);
  896. }
  897. if (ret < 0) {
  898. dev_err(dsp->dev, "error: can't alloc scratch blocks\n");
  899. mutex_unlock(&dsp->mutex);
  900. return ret;
  901. }
  902. ret = block_list_prepare(dsp, &dsp->scratch_block_list);
  903. if (ret < 0) {
  904. dev_err(dsp->dev, "error: scratch block prepare failed\n");
  905. mutex_unlock(&dsp->mutex);
  906. return ret;
  907. }
  908. /* assign the same offset of scratch to each module */
  909. dsp->scratch_offset = ba.offset;
  910. mutex_unlock(&dsp->mutex);
  911. return dsp->scratch_size;
  912. }
  913. EXPORT_SYMBOL_GPL(sst_block_alloc_scratch);
  914. /* free all scratch blocks */
  915. void sst_block_free_scratch(struct sst_dsp *dsp)
  916. {
  917. mutex_lock(&dsp->mutex);
  918. block_list_remove(dsp, &dsp->scratch_block_list);
  919. mutex_unlock(&dsp->mutex);
  920. }
  921. EXPORT_SYMBOL_GPL(sst_block_free_scratch);
  922. /* get a module from it's unique ID */
  923. struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
  924. {
  925. struct sst_module *module;
  926. mutex_lock(&dsp->mutex);
  927. list_for_each_entry(module, &dsp->module_list, list) {
  928. if (module->id == id) {
  929. mutex_unlock(&dsp->mutex);
  930. return module;
  931. }
  932. }
  933. mutex_unlock(&dsp->mutex);
  934. return NULL;
  935. }
  936. EXPORT_SYMBOL_GPL(sst_module_get_from_id);
  937. struct sst_module_runtime *sst_module_runtime_get_from_id(
  938. struct sst_module *module, u32 id)
  939. {
  940. struct sst_module_runtime *runtime;
  941. struct sst_dsp *dsp = module->dsp;
  942. mutex_lock(&dsp->mutex);
  943. list_for_each_entry(runtime, &module->runtime_list, list) {
  944. if (runtime->id == id) {
  945. mutex_unlock(&dsp->mutex);
  946. return runtime;
  947. }
  948. }
  949. mutex_unlock(&dsp->mutex);
  950. return NULL;
  951. }
  952. EXPORT_SYMBOL_GPL(sst_module_runtime_get_from_id);
  953. /* returns block address in DSP address space */
  954. u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset,
  955. enum sst_mem_type type)
  956. {
  957. switch (type) {
  958. case SST_MEM_IRAM:
  959. return offset - dsp->addr.iram_offset +
  960. dsp->addr.dsp_iram_offset;
  961. case SST_MEM_DRAM:
  962. return offset - dsp->addr.dram_offset +
  963. dsp->addr.dsp_dram_offset;
  964. default:
  965. return 0;
  966. }
  967. }
  968. EXPORT_SYMBOL_GPL(sst_dsp_get_offset);
  969. struct sst_dsp *sst_dsp_new(struct device *dev,
  970. struct sst_dsp_device *sst_dev, struct sst_pdata *pdata)
  971. {
  972. struct sst_dsp *sst;
  973. int err;
  974. dev_dbg(dev, "initialising audio DSP id 0x%x\n", pdata->id);
  975. sst = devm_kzalloc(dev, sizeof(*sst), GFP_KERNEL);
  976. if (sst == NULL)
  977. return NULL;
  978. spin_lock_init(&sst->spinlock);
  979. mutex_init(&sst->mutex);
  980. sst->dev = dev;
  981. sst->dma_dev = pdata->dma_dev;
  982. sst->thread_context = sst_dev->thread_context;
  983. sst->sst_dev = sst_dev;
  984. sst->id = pdata->id;
  985. sst->irq = pdata->irq;
  986. sst->ops = sst_dev->ops;
  987. sst->pdata = pdata;
  988. INIT_LIST_HEAD(&sst->used_block_list);
  989. INIT_LIST_HEAD(&sst->free_block_list);
  990. INIT_LIST_HEAD(&sst->module_list);
  991. INIT_LIST_HEAD(&sst->fw_list);
  992. INIT_LIST_HEAD(&sst->scratch_block_list);
  993. /* Initialise SST Audio DSP */
  994. if (sst->ops->init) {
  995. err = sst->ops->init(sst, pdata);
  996. if (err < 0)
  997. return NULL;
  998. }
  999. /* Register the ISR */
  1000. err = request_threaded_irq(sst->irq, sst->ops->irq_handler,
  1001. sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
  1002. if (err)
  1003. goto irq_err;
  1004. err = sst_dma_new(sst);
  1005. if (err)
  1006. dev_warn(dev, "sst_dma_new failed %d\n", err);
  1007. return sst;
  1008. irq_err:
  1009. if (sst->ops->free)
  1010. sst->ops->free(sst);
  1011. return NULL;
  1012. }
  1013. EXPORT_SYMBOL_GPL(sst_dsp_new);
  1014. void sst_dsp_free(struct sst_dsp *sst)
  1015. {
  1016. free_irq(sst->irq, sst);
  1017. if (sst->ops->free)
  1018. sst->ops->free(sst);
  1019. sst_dma_free(sst->dma);
  1020. }
  1021. EXPORT_SYMBOL_GPL(sst_dsp_free);
  1022. MODULE_DESCRIPTION("Intel SST Firmware Loader");
  1023. MODULE_LICENSE("GPL v2");