sst-firmware.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205
  1. /*
  2. * Intel SST Firmware Loader
  3. *
  4. * Copyright (C) 2013, Intel Corporation. All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/slab.h>
  18. #include <linux/sched.h>
  19. #include <linux/firmware.h>
  20. #include <linux/export.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/dma-mapping.h>
  23. #include <linux/dmaengine.h>
  24. #include <linux/pci.h>
  25. #include <linux/acpi.h>
  26. /* supported DMA engine drivers */
  27. #include <linux/platform_data/dma-dw.h>
  28. #include <linux/dma/dw.h>
  29. #include <asm/page.h>
  30. #include <asm/pgtable.h>
  31. #include "sst-dsp.h"
  32. #include "sst-dsp-priv.h"
  33. #define SST_DMA_RESOURCES 2
  34. #define SST_DSP_DMA_MAX_BURST 0x3
  35. #define SST_HSW_BLOCK_ANY 0xffffffff
  36. #define SST_HSW_MASK_DMA_ADDR_DSP 0xfff00000
  37. struct sst_dma {
  38. struct sst_dsp *sst;
  39. struct dw_dma_chip *chip;
  40. struct dma_async_tx_descriptor *desc;
  41. struct dma_chan *ch;
  42. };
  43. static inline void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes)
  44. {
  45. /* __iowrite32_copy use 32bit size values so divide by 4 */
  46. __iowrite32_copy((void *)dest, src, bytes/4);
  47. }
  48. static void sst_dma_transfer_complete(void *arg)
  49. {
  50. struct sst_dsp *sst = (struct sst_dsp *)arg;
  51. dev_dbg(sst->dev, "DMA: callback\n");
  52. }
  53. static int sst_dsp_dma_copy(struct sst_dsp *sst, dma_addr_t dest_addr,
  54. dma_addr_t src_addr, size_t size)
  55. {
  56. struct dma_async_tx_descriptor *desc;
  57. struct sst_dma *dma = sst->dma;
  58. if (dma->ch == NULL) {
  59. dev_err(sst->dev, "error: no DMA channel\n");
  60. return -ENODEV;
  61. }
  62. dev_dbg(sst->dev, "DMA: src: 0x%lx dest 0x%lx size %zu\n",
  63. (unsigned long)src_addr, (unsigned long)dest_addr, size);
  64. desc = dma->ch->device->device_prep_dma_memcpy(dma->ch, dest_addr,
  65. src_addr, size, DMA_CTRL_ACK);
  66. if (!desc){
  67. dev_err(sst->dev, "error: dma prep memcpy failed\n");
  68. return -EINVAL;
  69. }
  70. desc->callback = sst_dma_transfer_complete;
  71. desc->callback_param = sst;
  72. desc->tx_submit(desc);
  73. dma_wait_for_async_tx(desc);
  74. return 0;
  75. }
  76. /* copy to DSP */
  77. int sst_dsp_dma_copyto(struct sst_dsp *sst, dma_addr_t dest_addr,
  78. dma_addr_t src_addr, size_t size)
  79. {
  80. return sst_dsp_dma_copy(sst, dest_addr | SST_HSW_MASK_DMA_ADDR_DSP,
  81. src_addr, size);
  82. }
  83. EXPORT_SYMBOL_GPL(sst_dsp_dma_copyto);
  84. /* copy from DSP */
  85. int sst_dsp_dma_copyfrom(struct sst_dsp *sst, dma_addr_t dest_addr,
  86. dma_addr_t src_addr, size_t size)
  87. {
  88. return sst_dsp_dma_copy(sst, dest_addr,
  89. src_addr | SST_HSW_MASK_DMA_ADDR_DSP, size);
  90. }
  91. EXPORT_SYMBOL_GPL(sst_dsp_dma_copyfrom);
  92. /* remove module from memory - callers hold locks */
  93. static void block_list_remove(struct sst_dsp *dsp,
  94. struct list_head *block_list)
  95. {
  96. struct sst_mem_block *block, *tmp;
  97. int err;
  98. /* disable each block */
  99. list_for_each_entry(block, block_list, module_list) {
  100. if (block->ops && block->ops->disable) {
  101. err = block->ops->disable(block);
  102. if (err < 0)
  103. dev_err(dsp->dev,
  104. "error: cant disable block %d:%d\n",
  105. block->type, block->index);
  106. }
  107. }
  108. /* mark each block as free */
  109. list_for_each_entry_safe(block, tmp, block_list, module_list) {
  110. list_del(&block->module_list);
  111. list_move(&block->list, &dsp->free_block_list);
  112. dev_dbg(dsp->dev, "block freed %d:%d at offset 0x%x\n",
  113. block->type, block->index, block->offset);
  114. }
  115. }
  116. /* prepare the memory block to receive data from host - callers hold locks */
  117. static int block_list_prepare(struct sst_dsp *dsp,
  118. struct list_head *block_list)
  119. {
  120. struct sst_mem_block *block;
  121. int ret = 0;
  122. /* enable each block so that's it'e ready for data */
  123. list_for_each_entry(block, block_list, module_list) {
  124. if (block->ops && block->ops->enable && !block->users) {
  125. ret = block->ops->enable(block);
  126. if (ret < 0) {
  127. dev_err(dsp->dev,
  128. "error: cant disable block %d:%d\n",
  129. block->type, block->index);
  130. goto err;
  131. }
  132. }
  133. }
  134. return ret;
  135. err:
  136. list_for_each_entry(block, block_list, module_list) {
  137. if (block->ops && block->ops->disable)
  138. block->ops->disable(block);
  139. }
  140. return ret;
  141. }
  142. static struct dw_dma_platform_data dw_pdata = {
  143. .is_private = 1,
  144. .chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
  145. .chan_priority = CHAN_PRIORITY_ASCENDING,
  146. };
  147. static struct dw_dma_chip *dw_probe(struct device *dev, struct resource *mem,
  148. int irq)
  149. {
  150. struct dw_dma_chip *chip;
  151. int err;
  152. chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
  153. if (!chip)
  154. return ERR_PTR(-ENOMEM);
  155. chip->irq = irq;
  156. chip->regs = devm_ioremap_resource(dev, mem);
  157. if (IS_ERR(chip->regs))
  158. return ERR_CAST(chip->regs);
  159. err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31));
  160. if (err)
  161. return ERR_PTR(err);
  162. chip->dev = dev;
  163. err = dw_dma_probe(chip, &dw_pdata);
  164. if (err)
  165. return ERR_PTR(err);
  166. return chip;
  167. }
  168. static void dw_remove(struct dw_dma_chip *chip)
  169. {
  170. dw_dma_remove(chip);
  171. }
  172. static bool dma_chan_filter(struct dma_chan *chan, void *param)
  173. {
  174. struct sst_dsp *dsp = (struct sst_dsp *)param;
  175. return chan->device->dev == dsp->dma_dev;
  176. }
  177. int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id)
  178. {
  179. struct sst_dma *dma = dsp->dma;
  180. struct dma_slave_config slave;
  181. dma_cap_mask_t mask;
  182. int ret;
  183. dma_cap_zero(mask);
  184. dma_cap_set(DMA_SLAVE, mask);
  185. dma_cap_set(DMA_MEMCPY, mask);
  186. dma->ch = dma_request_channel(mask, dma_chan_filter, dsp);
  187. if (dma->ch == NULL) {
  188. dev_err(dsp->dev, "error: DMA request channel failed\n");
  189. return -EIO;
  190. }
  191. memset(&slave, 0, sizeof(slave));
  192. slave.direction = DMA_MEM_TO_DEV;
  193. slave.src_addr_width =
  194. slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  195. slave.src_maxburst = slave.dst_maxburst = SST_DSP_DMA_MAX_BURST;
  196. ret = dmaengine_slave_config(dma->ch, &slave);
  197. if (ret) {
  198. dev_err(dsp->dev, "error: unable to set DMA slave config %d\n",
  199. ret);
  200. dma_release_channel(dma->ch);
  201. dma->ch = NULL;
  202. }
  203. return ret;
  204. }
  205. EXPORT_SYMBOL_GPL(sst_dsp_dma_get_channel);
  206. void sst_dsp_dma_put_channel(struct sst_dsp *dsp)
  207. {
  208. struct sst_dma *dma = dsp->dma;
  209. if (!dma->ch)
  210. return;
  211. dma_release_channel(dma->ch);
  212. dma->ch = NULL;
  213. }
  214. EXPORT_SYMBOL_GPL(sst_dsp_dma_put_channel);
  215. int sst_dma_new(struct sst_dsp *sst)
  216. {
  217. struct sst_pdata *sst_pdata = sst->pdata;
  218. struct sst_dma *dma;
  219. struct resource mem;
  220. const char *dma_dev_name;
  221. int ret = 0;
  222. if (sst->pdata->resindex_dma_base == -1)
  223. /* DMA is not used, return and squelsh error messages */
  224. return 0;
  225. /* configure the correct platform data for whatever DMA engine
  226. * is attached to the ADSP IP. */
  227. switch (sst->pdata->dma_engine) {
  228. case SST_DMA_TYPE_DW:
  229. dma_dev_name = "dw_dmac";
  230. break;
  231. default:
  232. dev_err(sst->dev, "error: invalid DMA engine %d\n",
  233. sst->pdata->dma_engine);
  234. return -EINVAL;
  235. }
  236. dma = devm_kzalloc(sst->dev, sizeof(struct sst_dma), GFP_KERNEL);
  237. if (!dma)
  238. return -ENOMEM;
  239. dma->sst = sst;
  240. memset(&mem, 0, sizeof(mem));
  241. mem.start = sst->addr.lpe_base + sst_pdata->dma_base;
  242. mem.end = sst->addr.lpe_base + sst_pdata->dma_base + sst_pdata->dma_size - 1;
  243. mem.flags = IORESOURCE_MEM;
  244. /* now register DMA engine device */
  245. dma->chip = dw_probe(sst->dma_dev, &mem, sst_pdata->irq);
  246. if (IS_ERR(dma->chip)) {
  247. dev_err(sst->dev, "error: DMA device register failed\n");
  248. ret = PTR_ERR(dma->chip);
  249. goto err_dma_dev;
  250. }
  251. sst->dma = dma;
  252. sst->fw_use_dma = true;
  253. return 0;
  254. err_dma_dev:
  255. devm_kfree(sst->dev, dma);
  256. return ret;
  257. }
  258. EXPORT_SYMBOL(sst_dma_new);
  259. void sst_dma_free(struct sst_dma *dma)
  260. {
  261. if (dma == NULL)
  262. return;
  263. if (dma->ch)
  264. dma_release_channel(dma->ch);
  265. if (dma->chip)
  266. dw_remove(dma->chip);
  267. }
  268. EXPORT_SYMBOL(sst_dma_free);
  269. /* create new generic firmware object */
  270. struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
  271. const struct firmware *fw, void *private)
  272. {
  273. struct sst_fw *sst_fw;
  274. int err;
  275. if (!dsp->ops->parse_fw)
  276. return NULL;
  277. sst_fw = kzalloc(sizeof(*sst_fw), GFP_KERNEL);
  278. if (sst_fw == NULL)
  279. return NULL;
  280. sst_fw->dsp = dsp;
  281. sst_fw->private = private;
  282. sst_fw->size = fw->size;
  283. /* allocate DMA buffer to store FW data */
  284. sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size,
  285. &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
  286. if (!sst_fw->dma_buf) {
  287. dev_err(dsp->dev, "error: DMA alloc failed\n");
  288. kfree(sst_fw);
  289. return NULL;
  290. }
  291. /* copy FW data to DMA-able memory */
  292. memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size);
  293. if (dsp->fw_use_dma) {
  294. err = sst_dsp_dma_get_channel(dsp, 0);
  295. if (err < 0)
  296. goto chan_err;
  297. }
  298. /* call core specific FW paser to load FW data into DSP */
  299. err = dsp->ops->parse_fw(sst_fw);
  300. if (err < 0) {
  301. dev_err(dsp->dev, "error: parse fw failed %d\n", err);
  302. goto parse_err;
  303. }
  304. if (dsp->fw_use_dma)
  305. sst_dsp_dma_put_channel(dsp);
  306. mutex_lock(&dsp->mutex);
  307. list_add(&sst_fw->list, &dsp->fw_list);
  308. mutex_unlock(&dsp->mutex);
  309. return sst_fw;
  310. parse_err:
  311. if (dsp->fw_use_dma)
  312. sst_dsp_dma_put_channel(dsp);
  313. chan_err:
  314. dma_free_coherent(dsp->dma_dev, sst_fw->size,
  315. sst_fw->dma_buf,
  316. sst_fw->dmable_fw_paddr);
  317. sst_fw->dma_buf = NULL;
  318. kfree(sst_fw);
  319. return NULL;
  320. }
  321. EXPORT_SYMBOL_GPL(sst_fw_new);
  322. int sst_fw_reload(struct sst_fw *sst_fw)
  323. {
  324. struct sst_dsp *dsp = sst_fw->dsp;
  325. int ret;
  326. dev_dbg(dsp->dev, "reloading firmware\n");
  327. /* call core specific FW paser to load FW data into DSP */
  328. ret = dsp->ops->parse_fw(sst_fw);
  329. if (ret < 0)
  330. dev_err(dsp->dev, "error: parse fw failed %d\n", ret);
  331. return ret;
  332. }
  333. EXPORT_SYMBOL_GPL(sst_fw_reload);
  334. void sst_fw_unload(struct sst_fw *sst_fw)
  335. {
  336. struct sst_dsp *dsp = sst_fw->dsp;
  337. struct sst_module *module, *mtmp;
  338. struct sst_module_runtime *runtime, *rtmp;
  339. dev_dbg(dsp->dev, "unloading firmware\n");
  340. mutex_lock(&dsp->mutex);
  341. /* check module by module */
  342. list_for_each_entry_safe(module, mtmp, &dsp->module_list, list) {
  343. if (module->sst_fw == sst_fw) {
  344. /* remove runtime modules */
  345. list_for_each_entry_safe(runtime, rtmp, &module->runtime_list, list) {
  346. block_list_remove(dsp, &runtime->block_list);
  347. list_del(&runtime->list);
  348. kfree(runtime);
  349. }
  350. /* now remove the module */
  351. block_list_remove(dsp, &module->block_list);
  352. list_del(&module->list);
  353. kfree(module);
  354. }
  355. }
  356. /* remove all scratch blocks */
  357. block_list_remove(dsp, &dsp->scratch_block_list);
  358. mutex_unlock(&dsp->mutex);
  359. }
  360. EXPORT_SYMBOL_GPL(sst_fw_unload);
  361. /* free single firmware object */
  362. void sst_fw_free(struct sst_fw *sst_fw)
  363. {
  364. struct sst_dsp *dsp = sst_fw->dsp;
  365. mutex_lock(&dsp->mutex);
  366. list_del(&sst_fw->list);
  367. mutex_unlock(&dsp->mutex);
  368. if (sst_fw->dma_buf)
  369. dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
  370. sst_fw->dmable_fw_paddr);
  371. kfree(sst_fw);
  372. }
  373. EXPORT_SYMBOL_GPL(sst_fw_free);
  374. /* free all firmware objects */
  375. void sst_fw_free_all(struct sst_dsp *dsp)
  376. {
  377. struct sst_fw *sst_fw, *t;
  378. mutex_lock(&dsp->mutex);
  379. list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
  380. list_del(&sst_fw->list);
  381. dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
  382. sst_fw->dmable_fw_paddr);
  383. kfree(sst_fw);
  384. }
  385. mutex_unlock(&dsp->mutex);
  386. }
  387. EXPORT_SYMBOL_GPL(sst_fw_free_all);
  388. /* create a new SST generic module from FW template */
  389. struct sst_module *sst_module_new(struct sst_fw *sst_fw,
  390. struct sst_module_template *template, void *private)
  391. {
  392. struct sst_dsp *dsp = sst_fw->dsp;
  393. struct sst_module *sst_module;
  394. sst_module = kzalloc(sizeof(*sst_module), GFP_KERNEL);
  395. if (sst_module == NULL)
  396. return NULL;
  397. sst_module->id = template->id;
  398. sst_module->dsp = dsp;
  399. sst_module->sst_fw = sst_fw;
  400. sst_module->scratch_size = template->scratch_size;
  401. sst_module->persistent_size = template->persistent_size;
  402. sst_module->entry = template->entry;
  403. sst_module->state = SST_MODULE_STATE_UNLOADED;
  404. INIT_LIST_HEAD(&sst_module->block_list);
  405. INIT_LIST_HEAD(&sst_module->runtime_list);
  406. mutex_lock(&dsp->mutex);
  407. list_add(&sst_module->list, &dsp->module_list);
  408. mutex_unlock(&dsp->mutex);
  409. return sst_module;
  410. }
  411. EXPORT_SYMBOL_GPL(sst_module_new);
  412. /* free firmware module and remove from available list */
  413. void sst_module_free(struct sst_module *sst_module)
  414. {
  415. struct sst_dsp *dsp = sst_module->dsp;
  416. mutex_lock(&dsp->mutex);
  417. list_del(&sst_module->list);
  418. mutex_unlock(&dsp->mutex);
  419. kfree(sst_module);
  420. }
  421. EXPORT_SYMBOL_GPL(sst_module_free);
  422. struct sst_module_runtime *sst_module_runtime_new(struct sst_module *module,
  423. int id, void *private)
  424. {
  425. struct sst_dsp *dsp = module->dsp;
  426. struct sst_module_runtime *runtime;
  427. runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
  428. if (runtime == NULL)
  429. return NULL;
  430. runtime->id = id;
  431. runtime->dsp = dsp;
  432. runtime->module = module;
  433. INIT_LIST_HEAD(&runtime->block_list);
  434. mutex_lock(&dsp->mutex);
  435. list_add(&runtime->list, &module->runtime_list);
  436. mutex_unlock(&dsp->mutex);
  437. return runtime;
  438. }
  439. EXPORT_SYMBOL_GPL(sst_module_runtime_new);
  440. void sst_module_runtime_free(struct sst_module_runtime *runtime)
  441. {
  442. struct sst_dsp *dsp = runtime->dsp;
  443. mutex_lock(&dsp->mutex);
  444. list_del(&runtime->list);
  445. mutex_unlock(&dsp->mutex);
  446. kfree(runtime);
  447. }
  448. EXPORT_SYMBOL_GPL(sst_module_runtime_free);
  449. static struct sst_mem_block *find_block(struct sst_dsp *dsp,
  450. struct sst_block_allocator *ba)
  451. {
  452. struct sst_mem_block *block;
  453. list_for_each_entry(block, &dsp->free_block_list, list) {
  454. if (block->type == ba->type && block->offset == ba->offset)
  455. return block;
  456. }
  457. return NULL;
  458. }
  459. /* Block allocator must be on block boundary */
  460. static int block_alloc_contiguous(struct sst_dsp *dsp,
  461. struct sst_block_allocator *ba, struct list_head *block_list)
  462. {
  463. struct list_head tmp = LIST_HEAD_INIT(tmp);
  464. struct sst_mem_block *block;
  465. u32 block_start = SST_HSW_BLOCK_ANY;
  466. int size = ba->size, offset = ba->offset;
  467. while (ba->size > 0) {
  468. block = find_block(dsp, ba);
  469. if (!block) {
  470. list_splice(&tmp, &dsp->free_block_list);
  471. ba->size = size;
  472. ba->offset = offset;
  473. return -ENOMEM;
  474. }
  475. list_move_tail(&block->list, &tmp);
  476. ba->offset += block->size;
  477. ba->size -= block->size;
  478. }
  479. ba->size = size;
  480. ba->offset = offset;
  481. list_for_each_entry(block, &tmp, list) {
  482. if (block->offset < block_start)
  483. block_start = block->offset;
  484. list_add(&block->module_list, block_list);
  485. dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
  486. block->type, block->index, block->offset);
  487. }
  488. list_splice(&tmp, &dsp->used_block_list);
  489. return 0;
  490. }
  491. /* allocate first free DSP blocks for data - callers hold locks */
  492. static int block_alloc(struct sst_dsp *dsp, struct sst_block_allocator *ba,
  493. struct list_head *block_list)
  494. {
  495. struct sst_mem_block *block, *tmp;
  496. int ret = 0;
  497. if (ba->size == 0)
  498. return 0;
  499. /* find first free whole blocks that can hold module */
  500. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  501. /* ignore blocks with wrong type */
  502. if (block->type != ba->type)
  503. continue;
  504. if (ba->size > block->size)
  505. continue;
  506. ba->offset = block->offset;
  507. block->bytes_used = ba->size % block->size;
  508. list_add(&block->module_list, block_list);
  509. list_move(&block->list, &dsp->used_block_list);
  510. dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
  511. block->type, block->index, block->offset);
  512. return 0;
  513. }
  514. /* then find free multiple blocks that can hold module */
  515. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  516. /* ignore blocks with wrong type */
  517. if (block->type != ba->type)
  518. continue;
  519. /* do we span > 1 blocks */
  520. if (ba->size > block->size) {
  521. /* align ba to block boundary */
  522. ba->offset = block->offset;
  523. ret = block_alloc_contiguous(dsp, ba, block_list);
  524. if (ret == 0)
  525. return ret;
  526. }
  527. }
  528. /* not enough free block space */
  529. return -ENOMEM;
  530. }
  531. int sst_alloc_blocks(struct sst_dsp *dsp, struct sst_block_allocator *ba,
  532. struct list_head *block_list)
  533. {
  534. int ret;
  535. dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
  536. ba->size, ba->offset, ba->type);
  537. mutex_lock(&dsp->mutex);
  538. ret = block_alloc(dsp, ba, block_list);
  539. if (ret < 0) {
  540. dev_err(dsp->dev, "error: can't alloc blocks %d\n", ret);
  541. goto out;
  542. }
  543. /* prepare DSP blocks for module usage */
  544. ret = block_list_prepare(dsp, block_list);
  545. if (ret < 0)
  546. dev_err(dsp->dev, "error: prepare failed\n");
  547. out:
  548. mutex_unlock(&dsp->mutex);
  549. return ret;
  550. }
  551. EXPORT_SYMBOL_GPL(sst_alloc_blocks);
  552. int sst_free_blocks(struct sst_dsp *dsp, struct list_head *block_list)
  553. {
  554. mutex_lock(&dsp->mutex);
  555. block_list_remove(dsp, block_list);
  556. mutex_unlock(&dsp->mutex);
  557. return 0;
  558. }
  559. EXPORT_SYMBOL_GPL(sst_free_blocks);
  560. /* allocate memory blocks for static module addresses - callers hold locks */
  561. static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba,
  562. struct list_head *block_list)
  563. {
  564. struct sst_mem_block *block, *tmp;
  565. struct sst_block_allocator ba_tmp = *ba;
  566. u32 end = ba->offset + ba->size, block_end;
  567. int err;
  568. /* only IRAM/DRAM blocks are managed */
  569. if (ba->type != SST_MEM_IRAM && ba->type != SST_MEM_DRAM)
  570. return 0;
  571. /* are blocks already attached to this module */
  572. list_for_each_entry_safe(block, tmp, block_list, module_list) {
  573. /* ignore blocks with wrong type */
  574. if (block->type != ba->type)
  575. continue;
  576. block_end = block->offset + block->size;
  577. /* find block that holds section */
  578. if (ba->offset >= block->offset && end <= block_end)
  579. return 0;
  580. /* does block span more than 1 section */
  581. if (ba->offset >= block->offset && ba->offset < block_end) {
  582. /* align ba to block boundary */
  583. ba_tmp.size -= block_end - ba->offset;
  584. ba_tmp.offset = block_end;
  585. err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
  586. if (err < 0)
  587. return -ENOMEM;
  588. /* module already owns blocks */
  589. return 0;
  590. }
  591. }
  592. /* find first free blocks that can hold section in free list */
  593. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  594. block_end = block->offset + block->size;
  595. /* ignore blocks with wrong type */
  596. if (block->type != ba->type)
  597. continue;
  598. /* find block that holds section */
  599. if (ba->offset >= block->offset && end <= block_end) {
  600. /* add block */
  601. list_move(&block->list, &dsp->used_block_list);
  602. list_add(&block->module_list, block_list);
  603. dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
  604. block->type, block->index, block->offset);
  605. return 0;
  606. }
  607. /* does block span more than 1 section */
  608. if (ba->offset >= block->offset && ba->offset < block_end) {
  609. /* add block */
  610. list_move(&block->list, &dsp->used_block_list);
  611. list_add(&block->module_list, block_list);
  612. /* align ba to block boundary */
  613. ba_tmp.size -= block_end - ba->offset;
  614. ba_tmp.offset = block_end;
  615. err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
  616. if (err < 0)
  617. return -ENOMEM;
  618. return 0;
  619. }
  620. }
  621. return -ENOMEM;
  622. }
  623. /* Load fixed module data into DSP memory blocks */
  624. int sst_module_alloc_blocks(struct sst_module *module)
  625. {
  626. struct sst_dsp *dsp = module->dsp;
  627. struct sst_fw *sst_fw = module->sst_fw;
  628. struct sst_block_allocator ba;
  629. int ret;
  630. memset(&ba, 0, sizeof(ba));
  631. ba.size = module->size;
  632. ba.type = module->type;
  633. ba.offset = module->offset;
  634. dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
  635. ba.size, ba.offset, ba.type);
  636. mutex_lock(&dsp->mutex);
  637. /* alloc blocks that includes this section */
  638. ret = block_alloc_fixed(dsp, &ba, &module->block_list);
  639. if (ret < 0) {
  640. dev_err(dsp->dev,
  641. "error: no free blocks for section at offset 0x%x size 0x%x\n",
  642. module->offset, module->size);
  643. mutex_unlock(&dsp->mutex);
  644. return -ENOMEM;
  645. }
  646. /* prepare DSP blocks for module copy */
  647. ret = block_list_prepare(dsp, &module->block_list);
  648. if (ret < 0) {
  649. dev_err(dsp->dev, "error: fw module prepare failed\n");
  650. goto err;
  651. }
  652. /* copy partial module data to blocks */
  653. if (dsp->fw_use_dma) {
  654. ret = sst_dsp_dma_copyto(dsp,
  655. dsp->addr.lpe_base + module->offset,
  656. sst_fw->dmable_fw_paddr + module->data_offset,
  657. module->size);
  658. if (ret < 0) {
  659. dev_err(dsp->dev, "error: module copy failed\n");
  660. goto err;
  661. }
  662. } else
  663. sst_memcpy32(dsp->addr.lpe + module->offset, module->data,
  664. module->size);
  665. mutex_unlock(&dsp->mutex);
  666. return ret;
  667. err:
  668. block_list_remove(dsp, &module->block_list);
  669. mutex_unlock(&dsp->mutex);
  670. return ret;
  671. }
  672. EXPORT_SYMBOL_GPL(sst_module_alloc_blocks);
  673. /* Unload entire module from DSP memory */
  674. int sst_module_free_blocks(struct sst_module *module)
  675. {
  676. struct sst_dsp *dsp = module->dsp;
  677. mutex_lock(&dsp->mutex);
  678. block_list_remove(dsp, &module->block_list);
  679. mutex_unlock(&dsp->mutex);
  680. return 0;
  681. }
  682. EXPORT_SYMBOL_GPL(sst_module_free_blocks);
  683. int sst_module_runtime_alloc_blocks(struct sst_module_runtime *runtime,
  684. int offset)
  685. {
  686. struct sst_dsp *dsp = runtime->dsp;
  687. struct sst_module *module = runtime->module;
  688. struct sst_block_allocator ba;
  689. int ret;
  690. if (module->persistent_size == 0)
  691. return 0;
  692. memset(&ba, 0, sizeof(ba));
  693. ba.size = module->persistent_size;
  694. ba.type = SST_MEM_DRAM;
  695. mutex_lock(&dsp->mutex);
  696. /* do we need to allocate at a fixed address ? */
  697. if (offset != 0) {
  698. ba.offset = offset;
  699. dev_dbg(dsp->dev, "persistent fixed block request 0x%x bytes type %d offset 0x%x\n",
  700. ba.size, ba.type, ba.offset);
  701. /* alloc blocks that includes this section */
  702. ret = block_alloc_fixed(dsp, &ba, &runtime->block_list);
  703. } else {
  704. dev_dbg(dsp->dev, "persistent block request 0x%x bytes type %d\n",
  705. ba.size, ba.type);
  706. /* alloc blocks that includes this section */
  707. ret = block_alloc(dsp, &ba, &runtime->block_list);
  708. }
  709. if (ret < 0) {
  710. dev_err(dsp->dev,
  711. "error: no free blocks for runtime module size 0x%x\n",
  712. module->persistent_size);
  713. mutex_unlock(&dsp->mutex);
  714. return -ENOMEM;
  715. }
  716. runtime->persistent_offset = ba.offset;
  717. /* prepare DSP blocks for module copy */
  718. ret = block_list_prepare(dsp, &runtime->block_list);
  719. if (ret < 0) {
  720. dev_err(dsp->dev, "error: runtime block prepare failed\n");
  721. goto err;
  722. }
  723. mutex_unlock(&dsp->mutex);
  724. return ret;
  725. err:
  726. block_list_remove(dsp, &module->block_list);
  727. mutex_unlock(&dsp->mutex);
  728. return ret;
  729. }
  730. EXPORT_SYMBOL_GPL(sst_module_runtime_alloc_blocks);
  731. int sst_module_runtime_free_blocks(struct sst_module_runtime *runtime)
  732. {
  733. struct sst_dsp *dsp = runtime->dsp;
  734. mutex_lock(&dsp->mutex);
  735. block_list_remove(dsp, &runtime->block_list);
  736. mutex_unlock(&dsp->mutex);
  737. return 0;
  738. }
  739. EXPORT_SYMBOL_GPL(sst_module_runtime_free_blocks);
  740. int sst_module_runtime_save(struct sst_module_runtime *runtime,
  741. struct sst_module_runtime_context *context)
  742. {
  743. struct sst_dsp *dsp = runtime->dsp;
  744. struct sst_module *module = runtime->module;
  745. int ret = 0;
  746. dev_dbg(dsp->dev, "saving runtime %d memory at 0x%x size 0x%x\n",
  747. runtime->id, runtime->persistent_offset,
  748. module->persistent_size);
  749. context->buffer = dma_alloc_coherent(dsp->dma_dev,
  750. module->persistent_size,
  751. &context->dma_buffer, GFP_DMA | GFP_KERNEL);
  752. if (!context->buffer) {
  753. dev_err(dsp->dev, "error: DMA context alloc failed\n");
  754. return -ENOMEM;
  755. }
  756. mutex_lock(&dsp->mutex);
  757. if (dsp->fw_use_dma) {
  758. ret = sst_dsp_dma_get_channel(dsp, 0);
  759. if (ret < 0)
  760. goto err;
  761. ret = sst_dsp_dma_copyfrom(dsp, context->dma_buffer,
  762. dsp->addr.lpe_base + runtime->persistent_offset,
  763. module->persistent_size);
  764. sst_dsp_dma_put_channel(dsp);
  765. if (ret < 0) {
  766. dev_err(dsp->dev, "error: context copy failed\n");
  767. goto err;
  768. }
  769. } else
  770. sst_memcpy32(context->buffer, dsp->addr.lpe +
  771. runtime->persistent_offset,
  772. module->persistent_size);
  773. err:
  774. mutex_unlock(&dsp->mutex);
  775. return ret;
  776. }
  777. EXPORT_SYMBOL_GPL(sst_module_runtime_save);
  778. int sst_module_runtime_restore(struct sst_module_runtime *runtime,
  779. struct sst_module_runtime_context *context)
  780. {
  781. struct sst_dsp *dsp = runtime->dsp;
  782. struct sst_module *module = runtime->module;
  783. int ret = 0;
  784. dev_dbg(dsp->dev, "restoring runtime %d memory at 0x%x size 0x%x\n",
  785. runtime->id, runtime->persistent_offset,
  786. module->persistent_size);
  787. mutex_lock(&dsp->mutex);
  788. if (!context->buffer) {
  789. dev_info(dsp->dev, "no context buffer need to restore!\n");
  790. goto err;
  791. }
  792. if (dsp->fw_use_dma) {
  793. ret = sst_dsp_dma_get_channel(dsp, 0);
  794. if (ret < 0)
  795. goto err;
  796. ret = sst_dsp_dma_copyto(dsp,
  797. dsp->addr.lpe_base + runtime->persistent_offset,
  798. context->dma_buffer, module->persistent_size);
  799. sst_dsp_dma_put_channel(dsp);
  800. if (ret < 0) {
  801. dev_err(dsp->dev, "error: module copy failed\n");
  802. goto err;
  803. }
  804. } else
  805. sst_memcpy32(dsp->addr.lpe + runtime->persistent_offset,
  806. context->buffer, module->persistent_size);
  807. dma_free_coherent(dsp->dma_dev, module->persistent_size,
  808. context->buffer, context->dma_buffer);
  809. context->buffer = NULL;
  810. err:
  811. mutex_unlock(&dsp->mutex);
  812. return ret;
  813. }
  814. EXPORT_SYMBOL_GPL(sst_module_runtime_restore);
  815. /* register a DSP memory block for use with FW based modules */
  816. struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
  817. u32 size, enum sst_mem_type type, struct sst_block_ops *ops, u32 index,
  818. void *private)
  819. {
  820. struct sst_mem_block *block;
  821. block = kzalloc(sizeof(*block), GFP_KERNEL);
  822. if (block == NULL)
  823. return NULL;
  824. block->offset = offset;
  825. block->size = size;
  826. block->index = index;
  827. block->type = type;
  828. block->dsp = dsp;
  829. block->private = private;
  830. block->ops = ops;
  831. mutex_lock(&dsp->mutex);
  832. list_add(&block->list, &dsp->free_block_list);
  833. mutex_unlock(&dsp->mutex);
  834. return block;
  835. }
  836. EXPORT_SYMBOL_GPL(sst_mem_block_register);
  837. /* unregister all DSP memory blocks */
  838. void sst_mem_block_unregister_all(struct sst_dsp *dsp)
  839. {
  840. struct sst_mem_block *block, *tmp;
  841. mutex_lock(&dsp->mutex);
  842. /* unregister used blocks */
  843. list_for_each_entry_safe(block, tmp, &dsp->used_block_list, list) {
  844. list_del(&block->list);
  845. kfree(block);
  846. }
  847. /* unregister free blocks */
  848. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  849. list_del(&block->list);
  850. kfree(block);
  851. }
  852. mutex_unlock(&dsp->mutex);
  853. }
  854. EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all);
  855. /* allocate scratch buffer blocks */
  856. int sst_block_alloc_scratch(struct sst_dsp *dsp)
  857. {
  858. struct sst_module *module;
  859. struct sst_block_allocator ba;
  860. int ret;
  861. mutex_lock(&dsp->mutex);
  862. /* calculate required scratch size */
  863. dsp->scratch_size = 0;
  864. list_for_each_entry(module, &dsp->module_list, list) {
  865. dev_dbg(dsp->dev, "module %d scratch req 0x%x bytes\n",
  866. module->id, module->scratch_size);
  867. if (dsp->scratch_size < module->scratch_size)
  868. dsp->scratch_size = module->scratch_size;
  869. }
  870. dev_dbg(dsp->dev, "scratch buffer required is 0x%x bytes\n",
  871. dsp->scratch_size);
  872. if (dsp->scratch_size == 0) {
  873. dev_info(dsp->dev, "no modules need scratch buffer\n");
  874. mutex_unlock(&dsp->mutex);
  875. return 0;
  876. }
  877. /* allocate blocks for module scratch buffers */
  878. dev_dbg(dsp->dev, "allocating scratch blocks\n");
  879. ba.size = dsp->scratch_size;
  880. ba.type = SST_MEM_DRAM;
  881. /* do we need to allocate at fixed offset */
  882. if (dsp->scratch_offset != 0) {
  883. dev_dbg(dsp->dev, "block request 0x%x bytes type %d at 0x%x\n",
  884. ba.size, ba.type, ba.offset);
  885. ba.offset = dsp->scratch_offset;
  886. /* alloc blocks that includes this section */
  887. ret = block_alloc_fixed(dsp, &ba, &dsp->scratch_block_list);
  888. } else {
  889. dev_dbg(dsp->dev, "block request 0x%x bytes type %d\n",
  890. ba.size, ba.type);
  891. ba.offset = 0;
  892. ret = block_alloc(dsp, &ba, &dsp->scratch_block_list);
  893. }
  894. if (ret < 0) {
  895. dev_err(dsp->dev, "error: can't alloc scratch blocks\n");
  896. mutex_unlock(&dsp->mutex);
  897. return ret;
  898. }
  899. ret = block_list_prepare(dsp, &dsp->scratch_block_list);
  900. if (ret < 0) {
  901. dev_err(dsp->dev, "error: scratch block prepare failed\n");
  902. mutex_unlock(&dsp->mutex);
  903. return ret;
  904. }
  905. /* assign the same offset of scratch to each module */
  906. dsp->scratch_offset = ba.offset;
  907. mutex_unlock(&dsp->mutex);
  908. return dsp->scratch_size;
  909. }
  910. EXPORT_SYMBOL_GPL(sst_block_alloc_scratch);
  911. /* free all scratch blocks */
  912. void sst_block_free_scratch(struct sst_dsp *dsp)
  913. {
  914. mutex_lock(&dsp->mutex);
  915. block_list_remove(dsp, &dsp->scratch_block_list);
  916. mutex_unlock(&dsp->mutex);
  917. }
  918. EXPORT_SYMBOL_GPL(sst_block_free_scratch);
  919. /* get a module from it's unique ID */
  920. struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
  921. {
  922. struct sst_module *module;
  923. mutex_lock(&dsp->mutex);
  924. list_for_each_entry(module, &dsp->module_list, list) {
  925. if (module->id == id) {
  926. mutex_unlock(&dsp->mutex);
  927. return module;
  928. }
  929. }
  930. mutex_unlock(&dsp->mutex);
  931. return NULL;
  932. }
  933. EXPORT_SYMBOL_GPL(sst_module_get_from_id);
  934. struct sst_module_runtime *sst_module_runtime_get_from_id(
  935. struct sst_module *module, u32 id)
  936. {
  937. struct sst_module_runtime *runtime;
  938. struct sst_dsp *dsp = module->dsp;
  939. mutex_lock(&dsp->mutex);
  940. list_for_each_entry(runtime, &module->runtime_list, list) {
  941. if (runtime->id == id) {
  942. mutex_unlock(&dsp->mutex);
  943. return runtime;
  944. }
  945. }
  946. mutex_unlock(&dsp->mutex);
  947. return NULL;
  948. }
  949. EXPORT_SYMBOL_GPL(sst_module_runtime_get_from_id);
  950. /* returns block address in DSP address space */
  951. u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset,
  952. enum sst_mem_type type)
  953. {
  954. switch (type) {
  955. case SST_MEM_IRAM:
  956. return offset - dsp->addr.iram_offset +
  957. dsp->addr.dsp_iram_offset;
  958. case SST_MEM_DRAM:
  959. return offset - dsp->addr.dram_offset +
  960. dsp->addr.dsp_dram_offset;
  961. default:
  962. return 0;
  963. }
  964. }
  965. EXPORT_SYMBOL_GPL(sst_dsp_get_offset);