sst-firmware.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614
  1. /*
  2. * Intel SST Firmware Loader
  3. *
  4. * Copyright (C) 2013, Intel Corporation. All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/slab.h>
  18. #include <linux/sched.h>
  19. #include <linux/firmware.h>
  20. #include <linux/export.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/dma-mapping.h>
  23. #include <linux/dmaengine.h>
  24. #include <linux/pci.h>
  25. #include <asm/page.h>
  26. #include <asm/pgtable.h>
  27. #include "sst-dsp.h"
  28. #include "sst-dsp-priv.h"
  29. static void block_module_remove(struct sst_module *module);
  30. static void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes)
  31. {
  32. u32 i;
  33. /* copy one 32 bit word at a time as 64 bit access is not supported */
  34. for (i = 0; i < bytes; i += 4)
  35. memcpy_toio(dest + i, src + i, 4);
  36. }
  37. /* create new generic firmware object */
  38. struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
  39. const struct firmware *fw, void *private)
  40. {
  41. struct sst_fw *sst_fw;
  42. int err;
  43. if (!dsp->ops->parse_fw)
  44. return NULL;
  45. sst_fw = kzalloc(sizeof(*sst_fw), GFP_KERNEL);
  46. if (sst_fw == NULL)
  47. return NULL;
  48. sst_fw->dsp = dsp;
  49. sst_fw->private = private;
  50. sst_fw->size = fw->size;
  51. /* allocate DMA buffer to store FW data */
  52. sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size,
  53. &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
  54. if (!sst_fw->dma_buf) {
  55. dev_err(dsp->dev, "error: DMA alloc failed\n");
  56. kfree(sst_fw);
  57. return NULL;
  58. }
  59. /* copy FW data to DMA-able memory */
  60. memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size);
  61. /* call core specific FW paser to load FW data into DSP */
  62. err = dsp->ops->parse_fw(sst_fw);
  63. if (err < 0) {
  64. dev_err(dsp->dev, "error: parse fw failed %d\n", err);
  65. goto parse_err;
  66. }
  67. mutex_lock(&dsp->mutex);
  68. list_add(&sst_fw->list, &dsp->fw_list);
  69. mutex_unlock(&dsp->mutex);
  70. return sst_fw;
  71. parse_err:
  72. dma_free_coherent(dsp->dev, sst_fw->size,
  73. sst_fw->dma_buf,
  74. sst_fw->dmable_fw_paddr);
  75. kfree(sst_fw);
  76. return NULL;
  77. }
  78. EXPORT_SYMBOL_GPL(sst_fw_new);
  79. int sst_fw_reload(struct sst_fw *sst_fw)
  80. {
  81. struct sst_dsp *dsp = sst_fw->dsp;
  82. int ret;
  83. dev_dbg(dsp->dev, "reloading firmware\n");
  84. /* call core specific FW paser to load FW data into DSP */
  85. ret = dsp->ops->parse_fw(sst_fw);
  86. if (ret < 0)
  87. dev_err(dsp->dev, "error: parse fw failed %d\n", ret);
  88. return ret;
  89. }
  90. EXPORT_SYMBOL_GPL(sst_fw_reload);
  91. void sst_fw_unload(struct sst_fw *sst_fw)
  92. {
  93. struct sst_dsp *dsp = sst_fw->dsp;
  94. struct sst_module *module, *tmp;
  95. dev_dbg(dsp->dev, "unloading firmware\n");
  96. mutex_lock(&dsp->mutex);
  97. list_for_each_entry_safe(module, tmp, &dsp->module_list, list) {
  98. if (module->sst_fw == sst_fw) {
  99. block_module_remove(module);
  100. list_del(&module->list);
  101. kfree(module);
  102. }
  103. }
  104. mutex_unlock(&dsp->mutex);
  105. }
  106. EXPORT_SYMBOL_GPL(sst_fw_unload);
  107. /* free single firmware object */
  108. void sst_fw_free(struct sst_fw *sst_fw)
  109. {
  110. struct sst_dsp *dsp = sst_fw->dsp;
  111. mutex_lock(&dsp->mutex);
  112. list_del(&sst_fw->list);
  113. mutex_unlock(&dsp->mutex);
  114. dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
  115. sst_fw->dmable_fw_paddr);
  116. kfree(sst_fw);
  117. }
  118. EXPORT_SYMBOL_GPL(sst_fw_free);
  119. /* free all firmware objects */
  120. void sst_fw_free_all(struct sst_dsp *dsp)
  121. {
  122. struct sst_fw *sst_fw, *t;
  123. mutex_lock(&dsp->mutex);
  124. list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
  125. list_del(&sst_fw->list);
  126. dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
  127. sst_fw->dmable_fw_paddr);
  128. kfree(sst_fw);
  129. }
  130. mutex_unlock(&dsp->mutex);
  131. }
  132. EXPORT_SYMBOL_GPL(sst_fw_free_all);
  133. /* create a new SST generic module from FW template */
  134. struct sst_module *sst_module_new(struct sst_fw *sst_fw,
  135. struct sst_module_template *template, void *private)
  136. {
  137. struct sst_dsp *dsp = sst_fw->dsp;
  138. struct sst_module *sst_module;
  139. sst_module = kzalloc(sizeof(*sst_module), GFP_KERNEL);
  140. if (sst_module == NULL)
  141. return NULL;
  142. sst_module->id = template->id;
  143. sst_module->dsp = dsp;
  144. sst_module->sst_fw = sst_fw;
  145. memcpy(&sst_module->s, &template->s, sizeof(struct sst_module_data));
  146. memcpy(&sst_module->p, &template->p, sizeof(struct sst_module_data));
  147. INIT_LIST_HEAD(&sst_module->block_list);
  148. mutex_lock(&dsp->mutex);
  149. list_add(&sst_module->list, &dsp->module_list);
  150. mutex_unlock(&dsp->mutex);
  151. return sst_module;
  152. }
  153. EXPORT_SYMBOL_GPL(sst_module_new);
  154. /* free firmware module and remove from available list */
  155. void sst_module_free(struct sst_module *sst_module)
  156. {
  157. struct sst_dsp *dsp = sst_module->dsp;
  158. mutex_lock(&dsp->mutex);
  159. list_del(&sst_module->list);
  160. mutex_unlock(&dsp->mutex);
  161. kfree(sst_module);
  162. }
  163. EXPORT_SYMBOL_GPL(sst_module_free);
  164. static struct sst_mem_block *find_block(struct sst_dsp *dsp, int type,
  165. u32 offset)
  166. {
  167. struct sst_mem_block *block;
  168. list_for_each_entry(block, &dsp->free_block_list, list) {
  169. if (block->type == type && block->offset == offset)
  170. return block;
  171. }
  172. return NULL;
  173. }
  174. static int block_alloc_contiguous(struct sst_module *module,
  175. struct sst_module_data *data, u32 offset, int size)
  176. {
  177. struct list_head tmp = LIST_HEAD_INIT(tmp);
  178. struct sst_dsp *dsp = module->dsp;
  179. struct sst_mem_block *block;
  180. while (size > 0) {
  181. block = find_block(dsp, data->type, offset);
  182. if (!block) {
  183. list_splice(&tmp, &dsp->free_block_list);
  184. return -ENOMEM;
  185. }
  186. list_move_tail(&block->list, &tmp);
  187. offset += block->size;
  188. size -= block->size;
  189. }
  190. list_for_each_entry(block, &tmp, list)
  191. list_add(&block->module_list, &module->block_list);
  192. list_splice(&tmp, &dsp->used_block_list);
  193. return 0;
  194. }
  195. /* allocate free DSP blocks for module data - callers hold locks */
  196. static int block_alloc(struct sst_module *module,
  197. struct sst_module_data *data)
  198. {
  199. struct sst_dsp *dsp = module->dsp;
  200. struct sst_mem_block *block, *tmp;
  201. int ret = 0;
  202. if (data->size == 0)
  203. return 0;
  204. /* find first free whole blocks that can hold module */
  205. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  206. /* ignore blocks with wrong type */
  207. if (block->type != data->type)
  208. continue;
  209. if (data->size > block->size)
  210. continue;
  211. data->offset = block->offset;
  212. block->data_type = data->data_type;
  213. block->bytes_used = data->size % block->size;
  214. list_add(&block->module_list, &module->block_list);
  215. list_move(&block->list, &dsp->used_block_list);
  216. dev_dbg(dsp->dev, " *module %d added block %d:%d\n",
  217. module->id, block->type, block->index);
  218. return 0;
  219. }
  220. /* then find free multiple blocks that can hold module */
  221. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  222. /* ignore blocks with wrong type */
  223. if (block->type != data->type)
  224. continue;
  225. /* do we span > 1 blocks */
  226. if (data->size > block->size) {
  227. ret = block_alloc_contiguous(module, data,
  228. block->offset, data->size);
  229. if (ret == 0)
  230. return ret;
  231. }
  232. }
  233. /* not enough free block space */
  234. return -ENOMEM;
  235. }
  236. /* remove module from memory - callers hold locks */
  237. static void block_module_remove(struct sst_module *module)
  238. {
  239. struct sst_mem_block *block, *tmp;
  240. struct sst_dsp *dsp = module->dsp;
  241. int err;
  242. /* disable each block */
  243. list_for_each_entry(block, &module->block_list, module_list) {
  244. if (block->ops && block->ops->disable) {
  245. err = block->ops->disable(block);
  246. if (err < 0)
  247. dev_err(dsp->dev,
  248. "error: cant disable block %d:%d\n",
  249. block->type, block->index);
  250. }
  251. }
  252. /* mark each block as free */
  253. list_for_each_entry_safe(block, tmp, &module->block_list, module_list) {
  254. list_del(&block->module_list);
  255. list_move(&block->list, &dsp->free_block_list);
  256. }
  257. }
  258. /* prepare the memory block to receive data from host - callers hold locks */
  259. static int block_module_prepare(struct sst_module *module)
  260. {
  261. struct sst_mem_block *block;
  262. int ret = 0;
  263. /* enable each block so that's it'e ready for module P/S data */
  264. list_for_each_entry(block, &module->block_list, module_list) {
  265. if (block->ops && block->ops->enable) {
  266. ret = block->ops->enable(block);
  267. if (ret < 0) {
  268. dev_err(module->dsp->dev,
  269. "error: cant disable block %d:%d\n",
  270. block->type, block->index);
  271. goto err;
  272. }
  273. }
  274. }
  275. return ret;
  276. err:
  277. list_for_each_entry(block, &module->block_list, module_list) {
  278. if (block->ops && block->ops->disable)
  279. block->ops->disable(block);
  280. }
  281. return ret;
  282. }
  283. /* allocate memory blocks for static module addresses - callers hold locks */
  284. static int block_alloc_fixed(struct sst_module *module,
  285. struct sst_module_data *data)
  286. {
  287. struct sst_dsp *dsp = module->dsp;
  288. struct sst_mem_block *block, *tmp;
  289. u32 end = data->offset + data->size, block_end;
  290. int err;
  291. /* only IRAM/DRAM blocks are managed */
  292. if (data->type != SST_MEM_IRAM && data->type != SST_MEM_DRAM)
  293. return 0;
  294. /* are blocks already attached to this module */
  295. list_for_each_entry_safe(block, tmp, &module->block_list, module_list) {
  296. /* force compacting mem blocks of the same data_type */
  297. if (block->data_type != data->data_type)
  298. continue;
  299. block_end = block->offset + block->size;
  300. /* find block that holds section */
  301. if (data->offset >= block->offset && end < block_end)
  302. return 0;
  303. /* does block span more than 1 section */
  304. if (data->offset >= block->offset && data->offset < block_end) {
  305. err = block_alloc_contiguous(module, data,
  306. block->offset + block->size,
  307. data->size - block->size);
  308. if (err < 0)
  309. return -ENOMEM;
  310. /* module already owns blocks */
  311. return 0;
  312. }
  313. }
  314. /* find first free blocks that can hold section in free list */
  315. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  316. block_end = block->offset + block->size;
  317. /* find block that holds section */
  318. if (data->offset >= block->offset && end < block_end) {
  319. /* add block */
  320. block->data_type = data->data_type;
  321. list_move(&block->list, &dsp->used_block_list);
  322. list_add(&block->module_list, &module->block_list);
  323. return 0;
  324. }
  325. /* does block span more than 1 section */
  326. if (data->offset >= block->offset && data->offset < block_end) {
  327. err = block_alloc_contiguous(module, data,
  328. block->offset, data->size);
  329. if (err < 0)
  330. return -ENOMEM;
  331. return 0;
  332. }
  333. }
  334. return -ENOMEM;
  335. }
  336. /* Load fixed module data into DSP memory blocks */
  337. int sst_module_insert_fixed_block(struct sst_module *module,
  338. struct sst_module_data *data)
  339. {
  340. struct sst_dsp *dsp = module->dsp;
  341. int ret;
  342. mutex_lock(&dsp->mutex);
  343. /* alloc blocks that includes this section */
  344. ret = block_alloc_fixed(module, data);
  345. if (ret < 0) {
  346. dev_err(dsp->dev,
  347. "error: no free blocks for section at offset 0x%x size 0x%x\n",
  348. data->offset, data->size);
  349. mutex_unlock(&dsp->mutex);
  350. return -ENOMEM;
  351. }
  352. /* prepare DSP blocks for module copy */
  353. ret = block_module_prepare(module);
  354. if (ret < 0) {
  355. dev_err(dsp->dev, "error: fw module prepare failed\n");
  356. goto err;
  357. }
  358. /* copy partial module data to blocks */
  359. sst_memcpy32(dsp->addr.lpe + data->offset, data->data, data->size);
  360. mutex_unlock(&dsp->mutex);
  361. return ret;
  362. err:
  363. block_module_remove(module);
  364. mutex_unlock(&dsp->mutex);
  365. return ret;
  366. }
  367. EXPORT_SYMBOL_GPL(sst_module_insert_fixed_block);
  368. /* Unload entire module from DSP memory */
  369. int sst_block_module_remove(struct sst_module *module)
  370. {
  371. struct sst_dsp *dsp = module->dsp;
  372. mutex_lock(&dsp->mutex);
  373. block_module_remove(module);
  374. mutex_unlock(&dsp->mutex);
  375. return 0;
  376. }
  377. EXPORT_SYMBOL_GPL(sst_block_module_remove);
  378. /* register a DSP memory block for use with FW based modules */
  379. struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
  380. u32 size, enum sst_mem_type type, struct sst_block_ops *ops, u32 index,
  381. void *private)
  382. {
  383. struct sst_mem_block *block;
  384. block = kzalloc(sizeof(*block), GFP_KERNEL);
  385. if (block == NULL)
  386. return NULL;
  387. block->offset = offset;
  388. block->size = size;
  389. block->index = index;
  390. block->type = type;
  391. block->dsp = dsp;
  392. block->private = private;
  393. block->ops = ops;
  394. mutex_lock(&dsp->mutex);
  395. list_add(&block->list, &dsp->free_block_list);
  396. mutex_unlock(&dsp->mutex);
  397. return block;
  398. }
  399. EXPORT_SYMBOL_GPL(sst_mem_block_register);
  400. /* unregister all DSP memory blocks */
  401. void sst_mem_block_unregister_all(struct sst_dsp *dsp)
  402. {
  403. struct sst_mem_block *block, *tmp;
  404. mutex_lock(&dsp->mutex);
  405. /* unregister used blocks */
  406. list_for_each_entry_safe(block, tmp, &dsp->used_block_list, list) {
  407. list_del(&block->list);
  408. kfree(block);
  409. }
  410. /* unregister free blocks */
  411. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  412. list_del(&block->list);
  413. kfree(block);
  414. }
  415. mutex_unlock(&dsp->mutex);
  416. }
  417. EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all);
  418. /* allocate scratch buffer blocks */
  419. struct sst_module *sst_mem_block_alloc_scratch(struct sst_dsp *dsp)
  420. {
  421. struct sst_module *sst_module, *scratch;
  422. struct sst_mem_block *block, *tmp;
  423. u32 block_size;
  424. int ret = 0;
  425. scratch = kzalloc(sizeof(struct sst_module), GFP_KERNEL);
  426. if (scratch == NULL)
  427. return NULL;
  428. mutex_lock(&dsp->mutex);
  429. /* calculate required scratch size */
  430. list_for_each_entry(sst_module, &dsp->module_list, list) {
  431. if (scratch->s.size < sst_module->s.size)
  432. scratch->s.size = sst_module->s.size;
  433. }
  434. dev_dbg(dsp->dev, "scratch buffer required is %d bytes\n",
  435. scratch->s.size);
  436. /* init scratch module */
  437. scratch->dsp = dsp;
  438. scratch->s.type = SST_MEM_DRAM;
  439. scratch->s.data_type = SST_DATA_S;
  440. INIT_LIST_HEAD(&scratch->block_list);
  441. /* check free blocks before looking at used blocks for space */
  442. if (!list_empty(&dsp->free_block_list))
  443. block = list_first_entry(&dsp->free_block_list,
  444. struct sst_mem_block, list);
  445. else
  446. block = list_first_entry(&dsp->used_block_list,
  447. struct sst_mem_block, list);
  448. block_size = block->size;
  449. /* allocate blocks for module scratch buffers */
  450. dev_dbg(dsp->dev, "allocating scratch blocks\n");
  451. ret = block_alloc(scratch, &scratch->s);
  452. if (ret < 0) {
  453. dev_err(dsp->dev, "error: can't alloc scratch blocks\n");
  454. goto err;
  455. }
  456. /* assign the same offset of scratch to each module */
  457. list_for_each_entry(sst_module, &dsp->module_list, list)
  458. sst_module->s.offset = scratch->s.offset;
  459. mutex_unlock(&dsp->mutex);
  460. return scratch;
  461. err:
  462. list_for_each_entry_safe(block, tmp, &scratch->block_list, module_list)
  463. list_del(&block->module_list);
  464. mutex_unlock(&dsp->mutex);
  465. return NULL;
  466. }
  467. EXPORT_SYMBOL_GPL(sst_mem_block_alloc_scratch);
  468. /* free all scratch blocks */
  469. void sst_mem_block_free_scratch(struct sst_dsp *dsp,
  470. struct sst_module *scratch)
  471. {
  472. struct sst_mem_block *block, *tmp;
  473. mutex_lock(&dsp->mutex);
  474. list_for_each_entry_safe(block, tmp, &scratch->block_list, module_list)
  475. list_del(&block->module_list);
  476. mutex_unlock(&dsp->mutex);
  477. }
  478. EXPORT_SYMBOL_GPL(sst_mem_block_free_scratch);
  479. /* get a module from it's unique ID */
  480. struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
  481. {
  482. struct sst_module *module;
  483. mutex_lock(&dsp->mutex);
  484. list_for_each_entry(module, &dsp->module_list, list) {
  485. if (module->id == id) {
  486. mutex_unlock(&dsp->mutex);
  487. return module;
  488. }
  489. }
  490. mutex_unlock(&dsp->mutex);
  491. return NULL;
  492. }
  493. EXPORT_SYMBOL_GPL(sst_module_get_from_id);