sst-firmware.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585
  1. /*
  2. * Intel SST Firmware Loader
  3. *
  4. * Copyright (C) 2013, Intel Corporation. All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/slab.h>
  18. #include <linux/sched.h>
  19. #include <linux/firmware.h>
  20. #include <linux/export.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/dma-mapping.h>
  23. #include <linux/dmaengine.h>
  24. #include <linux/pci.h>
  25. #include <asm/page.h>
  26. #include <asm/pgtable.h>
  27. #include "sst-dsp.h"
  28. #include "sst-dsp-priv.h"
  29. static void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes)
  30. {
  31. u32 i;
  32. /* copy one 32 bit word at a time as 64 bit access is not supported */
  33. for (i = 0; i < bytes; i += 4)
  34. memcpy_toio(dest + i, src + i, 4);
  35. }
  36. /* create new generic firmware object */
  37. struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
  38. const struct firmware *fw, void *private)
  39. {
  40. struct sst_fw *sst_fw;
  41. int err;
  42. if (!dsp->ops->parse_fw)
  43. return NULL;
  44. sst_fw = kzalloc(sizeof(*sst_fw), GFP_KERNEL);
  45. if (sst_fw == NULL)
  46. return NULL;
  47. sst_fw->dsp = dsp;
  48. sst_fw->private = private;
  49. sst_fw->size = fw->size;
  50. err = dma_coerce_mask_and_coherent(dsp->dev, DMA_BIT_MASK(32));
  51. if (err < 0) {
  52. kfree(sst_fw);
  53. return NULL;
  54. }
  55. /* allocate DMA buffer to store FW data */
  56. sst_fw->dma_buf = dma_alloc_coherent(dsp->dev, sst_fw->size,
  57. &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
  58. if (!sst_fw->dma_buf) {
  59. dev_err(dsp->dev, "error: DMA alloc failed\n");
  60. kfree(sst_fw);
  61. return NULL;
  62. }
  63. /* copy FW data to DMA-able memory */
  64. memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size);
  65. /* call core specific FW paser to load FW data into DSP */
  66. err = dsp->ops->parse_fw(sst_fw);
  67. if (err < 0) {
  68. dev_err(dsp->dev, "error: parse fw failed %d\n", err);
  69. goto parse_err;
  70. }
  71. mutex_lock(&dsp->mutex);
  72. list_add(&sst_fw->list, &dsp->fw_list);
  73. mutex_unlock(&dsp->mutex);
  74. return sst_fw;
  75. parse_err:
  76. dma_free_coherent(dsp->dev, sst_fw->size,
  77. sst_fw->dma_buf,
  78. sst_fw->dmable_fw_paddr);
  79. kfree(sst_fw);
  80. return NULL;
  81. }
  82. EXPORT_SYMBOL_GPL(sst_fw_new);
  83. /* free single firmware object */
  84. void sst_fw_free(struct sst_fw *sst_fw)
  85. {
  86. struct sst_dsp *dsp = sst_fw->dsp;
  87. mutex_lock(&dsp->mutex);
  88. list_del(&sst_fw->list);
  89. mutex_unlock(&dsp->mutex);
  90. dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
  91. sst_fw->dmable_fw_paddr);
  92. kfree(sst_fw);
  93. }
  94. EXPORT_SYMBOL_GPL(sst_fw_free);
  95. /* free all firmware objects */
  96. void sst_fw_free_all(struct sst_dsp *dsp)
  97. {
  98. struct sst_fw *sst_fw, *t;
  99. mutex_lock(&dsp->mutex);
  100. list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
  101. list_del(&sst_fw->list);
  102. dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
  103. sst_fw->dmable_fw_paddr);
  104. kfree(sst_fw);
  105. }
  106. mutex_unlock(&dsp->mutex);
  107. }
  108. EXPORT_SYMBOL_GPL(sst_fw_free_all);
  109. /* create a new SST generic module from FW template */
  110. struct sst_module *sst_module_new(struct sst_fw *sst_fw,
  111. struct sst_module_template *template, void *private)
  112. {
  113. struct sst_dsp *dsp = sst_fw->dsp;
  114. struct sst_module *sst_module;
  115. sst_module = kzalloc(sizeof(*sst_module), GFP_KERNEL);
  116. if (sst_module == NULL)
  117. return NULL;
  118. sst_module->id = template->id;
  119. sst_module->dsp = dsp;
  120. sst_module->sst_fw = sst_fw;
  121. memcpy(&sst_module->s, &template->s, sizeof(struct sst_module_data));
  122. memcpy(&sst_module->p, &template->p, sizeof(struct sst_module_data));
  123. INIT_LIST_HEAD(&sst_module->block_list);
  124. mutex_lock(&dsp->mutex);
  125. list_add(&sst_module->list, &dsp->module_list);
  126. mutex_unlock(&dsp->mutex);
  127. return sst_module;
  128. }
  129. EXPORT_SYMBOL_GPL(sst_module_new);
  130. /* free firmware module and remove from available list */
  131. void sst_module_free(struct sst_module *sst_module)
  132. {
  133. struct sst_dsp *dsp = sst_module->dsp;
  134. mutex_lock(&dsp->mutex);
  135. list_del(&sst_module->list);
  136. mutex_unlock(&dsp->mutex);
  137. kfree(sst_module);
  138. }
  139. EXPORT_SYMBOL_GPL(sst_module_free);
  140. static struct sst_mem_block *find_block(struct sst_dsp *dsp, int type,
  141. u32 offset)
  142. {
  143. struct sst_mem_block *block;
  144. list_for_each_entry(block, &dsp->free_block_list, list) {
  145. if (block->type == type && block->offset == offset)
  146. return block;
  147. }
  148. return NULL;
  149. }
  150. static int block_alloc_contiguous(struct sst_module *module,
  151. struct sst_module_data *data, u32 offset, int size)
  152. {
  153. struct list_head tmp = LIST_HEAD_INIT(tmp);
  154. struct sst_dsp *dsp = module->dsp;
  155. struct sst_mem_block *block;
  156. while (size > 0) {
  157. block = find_block(dsp, data->type, offset);
  158. if (!block) {
  159. list_splice(&tmp, &dsp->free_block_list);
  160. return -ENOMEM;
  161. }
  162. list_move_tail(&block->list, &tmp);
  163. offset += block->size;
  164. size -= block->size;
  165. }
  166. list_splice(&tmp, &dsp->used_block_list);
  167. return 0;
  168. }
  169. /* allocate free DSP blocks for module data - callers hold locks */
  170. static int block_alloc(struct sst_module *module,
  171. struct sst_module_data *data)
  172. {
  173. struct sst_dsp *dsp = module->dsp;
  174. struct sst_mem_block *block, *tmp;
  175. int ret = 0;
  176. if (data->size == 0)
  177. return 0;
  178. /* find first free whole blocks that can hold module */
  179. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  180. /* ignore blocks with wrong type */
  181. if (block->type != data->type)
  182. continue;
  183. if (data->size > block->size)
  184. continue;
  185. data->offset = block->offset;
  186. block->data_type = data->data_type;
  187. block->bytes_used = data->size % block->size;
  188. list_add(&block->module_list, &module->block_list);
  189. list_move(&block->list, &dsp->used_block_list);
  190. dev_dbg(dsp->dev, " *module %d added block %d:%d\n",
  191. module->id, block->type, block->index);
  192. return 0;
  193. }
  194. /* then find free multiple blocks that can hold module */
  195. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  196. /* ignore blocks with wrong type */
  197. if (block->type != data->type)
  198. continue;
  199. /* do we span > 1 blocks */
  200. if (data->size > block->size) {
  201. ret = block_alloc_contiguous(module, data,
  202. block->offset + block->size,
  203. data->size - block->size);
  204. if (ret == 0)
  205. return ret;
  206. }
  207. }
  208. /* not enough free block space */
  209. return -ENOMEM;
  210. }
  211. /* remove module from memory - callers hold locks */
  212. static void block_module_remove(struct sst_module *module)
  213. {
  214. struct sst_mem_block *block, *tmp;
  215. struct sst_dsp *dsp = module->dsp;
  216. int err;
  217. /* disable each block */
  218. list_for_each_entry(block, &module->block_list, module_list) {
  219. if (block->ops && block->ops->disable) {
  220. err = block->ops->disable(block);
  221. if (err < 0)
  222. dev_err(dsp->dev,
  223. "error: cant disable block %d:%d\n",
  224. block->type, block->index);
  225. }
  226. }
  227. /* mark each block as free */
  228. list_for_each_entry_safe(block, tmp, &module->block_list, module_list) {
  229. list_del(&block->module_list);
  230. list_move(&block->list, &dsp->free_block_list);
  231. }
  232. }
  233. /* prepare the memory block to receive data from host - callers hold locks */
  234. static int block_module_prepare(struct sst_module *module)
  235. {
  236. struct sst_mem_block *block;
  237. int ret = 0;
  238. /* enable each block so that's it'e ready for module P/S data */
  239. list_for_each_entry(block, &module->block_list, module_list) {
  240. if (block->ops && block->ops->enable) {
  241. ret = block->ops->enable(block);
  242. if (ret < 0) {
  243. dev_err(module->dsp->dev,
  244. "error: cant disable block %d:%d\n",
  245. block->type, block->index);
  246. goto err;
  247. }
  248. }
  249. }
  250. return ret;
  251. err:
  252. list_for_each_entry(block, &module->block_list, module_list) {
  253. if (block->ops && block->ops->disable)
  254. block->ops->disable(block);
  255. }
  256. return ret;
  257. }
  258. /* allocate memory blocks for static module addresses - callers hold locks */
  259. static int block_alloc_fixed(struct sst_module *module,
  260. struct sst_module_data *data)
  261. {
  262. struct sst_dsp *dsp = module->dsp;
  263. struct sst_mem_block *block, *tmp;
  264. u32 end = data->offset + data->size, block_end;
  265. int err;
  266. /* only IRAM/DRAM blocks are managed */
  267. if (data->type != SST_MEM_IRAM && data->type != SST_MEM_DRAM)
  268. return 0;
  269. /* are blocks already attached to this module */
  270. list_for_each_entry_safe(block, tmp, &module->block_list, module_list) {
  271. /* force compacting mem blocks of the same data_type */
  272. if (block->data_type != data->data_type)
  273. continue;
  274. block_end = block->offset + block->size;
  275. /* find block that holds section */
  276. if (data->offset >= block->offset && end < block_end)
  277. return 0;
  278. /* does block span more than 1 section */
  279. if (data->offset >= block->offset && data->offset < block_end) {
  280. err = block_alloc_contiguous(module, data,
  281. block->offset + block->size,
  282. data->size - block->size + data->offset - block->offset);
  283. if (err < 0)
  284. return -ENOMEM;
  285. /* module already owns blocks */
  286. return 0;
  287. }
  288. }
  289. /* find first free blocks that can hold section in free list */
  290. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  291. block_end = block->offset + block->size;
  292. /* find block that holds section */
  293. if (data->offset >= block->offset && end < block_end) {
  294. /* add block */
  295. block->data_type = data->data_type;
  296. list_move(&block->list, &dsp->used_block_list);
  297. list_add(&block->module_list, &module->block_list);
  298. return 0;
  299. }
  300. /* does block span more than 1 section */
  301. if (data->offset >= block->offset && data->offset < block_end) {
  302. err = block_alloc_contiguous(module, data,
  303. block->offset + block->size,
  304. data->size - block->size);
  305. if (err < 0)
  306. return -ENOMEM;
  307. /* add block */
  308. block->data_type = data->data_type;
  309. list_move(&block->list, &dsp->used_block_list);
  310. list_add(&block->module_list, &module->block_list);
  311. return 0;
  312. }
  313. }
  314. return -ENOMEM;
  315. }
  316. /* Load fixed module data into DSP memory blocks */
  317. int sst_module_insert_fixed_block(struct sst_module *module,
  318. struct sst_module_data *data)
  319. {
  320. struct sst_dsp *dsp = module->dsp;
  321. int ret;
  322. mutex_lock(&dsp->mutex);
  323. /* alloc blocks that includes this section */
  324. ret = block_alloc_fixed(module, data);
  325. if (ret < 0) {
  326. dev_err(dsp->dev,
  327. "error: no free blocks for section at offset 0x%x size 0x%x\n",
  328. data->offset, data->size);
  329. mutex_unlock(&dsp->mutex);
  330. return -ENOMEM;
  331. }
  332. /* prepare DSP blocks for module copy */
  333. ret = block_module_prepare(module);
  334. if (ret < 0) {
  335. dev_err(dsp->dev, "error: fw module prepare failed\n");
  336. goto err;
  337. }
  338. /* copy partial module data to blocks */
  339. sst_memcpy32(dsp->addr.lpe + data->offset, data->data, data->size);
  340. mutex_unlock(&dsp->mutex);
  341. return ret;
  342. err:
  343. block_module_remove(module);
  344. mutex_unlock(&dsp->mutex);
  345. return ret;
  346. }
  347. EXPORT_SYMBOL_GPL(sst_module_insert_fixed_block);
  348. /* Unload entire module from DSP memory */
  349. int sst_block_module_remove(struct sst_module *module)
  350. {
  351. struct sst_dsp *dsp = module->dsp;
  352. mutex_lock(&dsp->mutex);
  353. block_module_remove(module);
  354. mutex_unlock(&dsp->mutex);
  355. return 0;
  356. }
  357. EXPORT_SYMBOL_GPL(sst_block_module_remove);
  358. /* register a DSP memory block for use with FW based modules */
  359. struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
  360. u32 size, enum sst_mem_type type, struct sst_block_ops *ops, u32 index,
  361. void *private)
  362. {
  363. struct sst_mem_block *block;
  364. block = kzalloc(sizeof(*block), GFP_KERNEL);
  365. if (block == NULL)
  366. return NULL;
  367. block->offset = offset;
  368. block->size = size;
  369. block->index = index;
  370. block->type = type;
  371. block->dsp = dsp;
  372. block->private = private;
  373. block->ops = ops;
  374. mutex_lock(&dsp->mutex);
  375. list_add(&block->list, &dsp->free_block_list);
  376. mutex_unlock(&dsp->mutex);
  377. return block;
  378. }
  379. EXPORT_SYMBOL_GPL(sst_mem_block_register);
  380. /* unregister all DSP memory blocks */
  381. void sst_mem_block_unregister_all(struct sst_dsp *dsp)
  382. {
  383. struct sst_mem_block *block, *tmp;
  384. mutex_lock(&dsp->mutex);
  385. /* unregister used blocks */
  386. list_for_each_entry_safe(block, tmp, &dsp->used_block_list, list) {
  387. list_del(&block->list);
  388. kfree(block);
  389. }
  390. /* unregister free blocks */
  391. list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
  392. list_del(&block->list);
  393. kfree(block);
  394. }
  395. mutex_unlock(&dsp->mutex);
  396. }
  397. EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all);
  398. /* allocate scratch buffer blocks */
  399. struct sst_module *sst_mem_block_alloc_scratch(struct sst_dsp *dsp)
  400. {
  401. struct sst_module *sst_module, *scratch;
  402. struct sst_mem_block *block, *tmp;
  403. u32 block_size;
  404. int ret = 0;
  405. scratch = kzalloc(sizeof(struct sst_module), GFP_KERNEL);
  406. if (scratch == NULL)
  407. return NULL;
  408. mutex_lock(&dsp->mutex);
  409. /* calculate required scratch size */
  410. list_for_each_entry(sst_module, &dsp->module_list, list) {
  411. if (scratch->s.size < sst_module->s.size)
  412. scratch->s.size = sst_module->s.size;
  413. }
  414. dev_dbg(dsp->dev, "scratch buffer required is %d bytes\n",
  415. scratch->s.size);
  416. /* init scratch module */
  417. scratch->dsp = dsp;
  418. scratch->s.type = SST_MEM_DRAM;
  419. scratch->s.data_type = SST_DATA_S;
  420. INIT_LIST_HEAD(&scratch->block_list);
  421. /* check free blocks before looking at used blocks for space */
  422. if (!list_empty(&dsp->free_block_list))
  423. block = list_first_entry(&dsp->free_block_list,
  424. struct sst_mem_block, list);
  425. else
  426. block = list_first_entry(&dsp->used_block_list,
  427. struct sst_mem_block, list);
  428. block_size = block->size;
  429. /* allocate blocks for module scratch buffers */
  430. dev_dbg(dsp->dev, "allocating scratch blocks\n");
  431. ret = block_alloc(scratch, &scratch->s);
  432. if (ret < 0) {
  433. dev_err(dsp->dev, "error: can't alloc scratch blocks\n");
  434. goto err;
  435. }
  436. /* assign the same offset of scratch to each module */
  437. list_for_each_entry(sst_module, &dsp->module_list, list)
  438. sst_module->s.offset = scratch->s.offset;
  439. mutex_unlock(&dsp->mutex);
  440. return scratch;
  441. err:
  442. list_for_each_entry_safe(block, tmp, &scratch->block_list, module_list)
  443. list_del(&block->module_list);
  444. mutex_unlock(&dsp->mutex);
  445. return NULL;
  446. }
  447. EXPORT_SYMBOL_GPL(sst_mem_block_alloc_scratch);
  448. /* free all scratch blocks */
  449. void sst_mem_block_free_scratch(struct sst_dsp *dsp,
  450. struct sst_module *scratch)
  451. {
  452. struct sst_mem_block *block, *tmp;
  453. mutex_lock(&dsp->mutex);
  454. list_for_each_entry_safe(block, tmp, &scratch->block_list, module_list)
  455. list_del(&block->module_list);
  456. mutex_unlock(&dsp->mutex);
  457. }
  458. EXPORT_SYMBOL_GPL(sst_mem_block_free_scratch);
  459. /* get a module from it's unique ID */
  460. struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
  461. {
  462. struct sst_module *module;
  463. mutex_lock(&dsp->mutex);
  464. list_for_each_entry(module, &dsp->module_list, list) {
  465. if (module->id == id) {
  466. mutex_unlock(&dsp->mutex);
  467. return module;
  468. }
  469. }
  470. mutex_unlock(&dsp->mutex);
  471. return NULL;
  472. }
  473. EXPORT_SYMBOL_GPL(sst_module_get_from_id);