cesa.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546
  1. /*
  2. * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA)
  3. * that can be found on the following platform: Orion, Kirkwood, Armada. This
  4. * driver supports the TDMA engine on platforms on which it is available.
  5. *
  6. * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
  7. * Author: Arnaud Ebalard <arno@natisbad.org>
  8. *
  9. * This work is based on an initial version written by
  10. * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
  11. *
  12. * This program is free software; you can redistribute it and/or modify it
  13. * under the terms of the GNU General Public License version 2 as published
  14. * by the Free Software Foundation.
  15. */
  16. #include <linux/delay.h>
  17. #include <linux/genalloc.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/io.h>
  20. #include <linux/kthread.h>
  21. #include <linux/mbus.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/scatterlist.h>
  24. #include <linux/slab.h>
  25. #include <linux/module.h>
  26. #include <linux/clk.h>
  27. #include <linux/of.h>
  28. #include <linux/of_platform.h>
  29. #include <linux/of_irq.h>
  30. #include "cesa.h"
  31. static int allhwsupport = !IS_ENABLED(CONFIG_CRYPTO_DEV_MV_CESA);
  32. module_param_named(allhwsupport, allhwsupport, int, 0444);
  33. MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the mv_cesa driver)");
  34. struct mv_cesa_dev *cesa_dev;
  35. static void mv_cesa_dequeue_req_unlocked(struct mv_cesa_engine *engine)
  36. {
  37. struct crypto_async_request *req, *backlog;
  38. struct mv_cesa_ctx *ctx;
  39. spin_lock_bh(&cesa_dev->lock);
  40. backlog = crypto_get_backlog(&cesa_dev->queue);
  41. req = crypto_dequeue_request(&cesa_dev->queue);
  42. engine->req = req;
  43. spin_unlock_bh(&cesa_dev->lock);
  44. if (!req)
  45. return;
  46. if (backlog)
  47. backlog->complete(backlog, -EINPROGRESS);
  48. ctx = crypto_tfm_ctx(req->tfm);
  49. ctx->ops->prepare(req, engine);
  50. ctx->ops->step(req);
  51. }
  52. static irqreturn_t mv_cesa_int(int irq, void *priv)
  53. {
  54. struct mv_cesa_engine *engine = priv;
  55. struct crypto_async_request *req;
  56. struct mv_cesa_ctx *ctx;
  57. u32 status, mask;
  58. irqreturn_t ret = IRQ_NONE;
  59. while (true) {
  60. int res;
  61. mask = mv_cesa_get_int_mask(engine);
  62. status = readl(engine->regs + CESA_SA_INT_STATUS);
  63. if (!(status & mask))
  64. break;
  65. /*
  66. * TODO: avoid clearing the FPGA_INT_STATUS if this not
  67. * relevant on some platforms.
  68. */
  69. writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS);
  70. writel(~status, engine->regs + CESA_SA_INT_STATUS);
  71. ret = IRQ_HANDLED;
  72. spin_lock_bh(&engine->lock);
  73. req = engine->req;
  74. spin_unlock_bh(&engine->lock);
  75. if (req) {
  76. ctx = crypto_tfm_ctx(req->tfm);
  77. res = ctx->ops->process(req, status & mask);
  78. if (res != -EINPROGRESS) {
  79. spin_lock_bh(&engine->lock);
  80. engine->req = NULL;
  81. mv_cesa_dequeue_req_unlocked(engine);
  82. spin_unlock_bh(&engine->lock);
  83. ctx->ops->cleanup(req);
  84. local_bh_disable();
  85. req->complete(req, res);
  86. local_bh_enable();
  87. } else {
  88. ctx->ops->step(req);
  89. }
  90. }
  91. }
  92. return ret;
  93. }
  94. int mv_cesa_queue_req(struct crypto_async_request *req)
  95. {
  96. int ret;
  97. int i;
  98. spin_lock_bh(&cesa_dev->lock);
  99. ret = crypto_enqueue_request(&cesa_dev->queue, req);
  100. spin_unlock_bh(&cesa_dev->lock);
  101. if (ret != -EINPROGRESS)
  102. return ret;
  103. for (i = 0; i < cesa_dev->caps->nengines; i++) {
  104. spin_lock_bh(&cesa_dev->engines[i].lock);
  105. if (!cesa_dev->engines[i].req)
  106. mv_cesa_dequeue_req_unlocked(&cesa_dev->engines[i]);
  107. spin_unlock_bh(&cesa_dev->engines[i].lock);
  108. }
  109. return -EINPROGRESS;
  110. }
  111. static int mv_cesa_add_algs(struct mv_cesa_dev *cesa)
  112. {
  113. int ret;
  114. int i, j;
  115. for (i = 0; i < cesa->caps->ncipher_algs; i++) {
  116. ret = crypto_register_alg(cesa->caps->cipher_algs[i]);
  117. if (ret)
  118. goto err_unregister_crypto;
  119. }
  120. for (i = 0; i < cesa->caps->nahash_algs; i++) {
  121. ret = crypto_register_ahash(cesa->caps->ahash_algs[i]);
  122. if (ret)
  123. goto err_unregister_ahash;
  124. }
  125. return 0;
  126. err_unregister_ahash:
  127. for (j = 0; j < i; j++)
  128. crypto_unregister_ahash(cesa->caps->ahash_algs[j]);
  129. i = cesa->caps->ncipher_algs;
  130. err_unregister_crypto:
  131. for (j = 0; j < i; j++)
  132. crypto_unregister_alg(cesa->caps->cipher_algs[j]);
  133. return ret;
  134. }
  135. static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa)
  136. {
  137. int i;
  138. for (i = 0; i < cesa->caps->nahash_algs; i++)
  139. crypto_unregister_ahash(cesa->caps->ahash_algs[i]);
  140. for (i = 0; i < cesa->caps->ncipher_algs; i++)
  141. crypto_unregister_alg(cesa->caps->cipher_algs[i]);
  142. }
  143. static struct crypto_alg *orion_cipher_algs[] = {
  144. &mv_cesa_ecb_des_alg,
  145. &mv_cesa_cbc_des_alg,
  146. &mv_cesa_ecb_des3_ede_alg,
  147. &mv_cesa_cbc_des3_ede_alg,
  148. &mv_cesa_ecb_aes_alg,
  149. &mv_cesa_cbc_aes_alg,
  150. };
  151. static struct ahash_alg *orion_ahash_algs[] = {
  152. &mv_md5_alg,
  153. &mv_sha1_alg,
  154. &mv_ahmac_md5_alg,
  155. &mv_ahmac_sha1_alg,
  156. };
  157. static struct crypto_alg *armada_370_cipher_algs[] = {
  158. &mv_cesa_ecb_des_alg,
  159. &mv_cesa_cbc_des_alg,
  160. &mv_cesa_ecb_des3_ede_alg,
  161. &mv_cesa_cbc_des3_ede_alg,
  162. &mv_cesa_ecb_aes_alg,
  163. &mv_cesa_cbc_aes_alg,
  164. };
  165. static struct ahash_alg *armada_370_ahash_algs[] = {
  166. &mv_md5_alg,
  167. &mv_sha1_alg,
  168. &mv_sha256_alg,
  169. &mv_ahmac_md5_alg,
  170. &mv_ahmac_sha1_alg,
  171. &mv_ahmac_sha256_alg,
  172. };
  173. static const struct mv_cesa_caps orion_caps = {
  174. .nengines = 1,
  175. .cipher_algs = orion_cipher_algs,
  176. .ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
  177. .ahash_algs = orion_ahash_algs,
  178. .nahash_algs = ARRAY_SIZE(orion_ahash_algs),
  179. .has_tdma = false,
  180. };
  181. static const struct mv_cesa_caps kirkwood_caps = {
  182. .nengines = 1,
  183. .cipher_algs = orion_cipher_algs,
  184. .ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
  185. .ahash_algs = orion_ahash_algs,
  186. .nahash_algs = ARRAY_SIZE(orion_ahash_algs),
  187. .has_tdma = true,
  188. };
  189. static const struct mv_cesa_caps armada_370_caps = {
  190. .nengines = 1,
  191. .cipher_algs = armada_370_cipher_algs,
  192. .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
  193. .ahash_algs = armada_370_ahash_algs,
  194. .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
  195. .has_tdma = true,
  196. };
  197. static const struct mv_cesa_caps armada_xp_caps = {
  198. .nengines = 2,
  199. .cipher_algs = armada_370_cipher_algs,
  200. .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
  201. .ahash_algs = armada_370_ahash_algs,
  202. .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
  203. .has_tdma = true,
  204. };
  205. static const struct of_device_id mv_cesa_of_match_table[] = {
  206. { .compatible = "marvell,orion-crypto", .data = &orion_caps },
  207. { .compatible = "marvell,kirkwood-crypto", .data = &kirkwood_caps },
  208. { .compatible = "marvell,dove-crypto", .data = &kirkwood_caps },
  209. { .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps },
  210. { .compatible = "marvell,armada-xp-crypto", .data = &armada_xp_caps },
  211. { .compatible = "marvell,armada-375-crypto", .data = &armada_xp_caps },
  212. { .compatible = "marvell,armada-38x-crypto", .data = &armada_xp_caps },
  213. {}
  214. };
  215. MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
  216. static void
  217. mv_cesa_conf_mbus_windows(struct mv_cesa_engine *engine,
  218. const struct mbus_dram_target_info *dram)
  219. {
  220. void __iomem *iobase = engine->regs;
  221. int i;
  222. for (i = 0; i < 4; i++) {
  223. writel(0, iobase + CESA_TDMA_WINDOW_CTRL(i));
  224. writel(0, iobase + CESA_TDMA_WINDOW_BASE(i));
  225. }
  226. for (i = 0; i < dram->num_cs; i++) {
  227. const struct mbus_dram_window *cs = dram->cs + i;
  228. writel(((cs->size - 1) & 0xffff0000) |
  229. (cs->mbus_attr << 8) |
  230. (dram->mbus_dram_target_id << 4) | 1,
  231. iobase + CESA_TDMA_WINDOW_CTRL(i));
  232. writel(cs->base, iobase + CESA_TDMA_WINDOW_BASE(i));
  233. }
  234. }
  235. static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
  236. {
  237. struct device *dev = cesa->dev;
  238. struct mv_cesa_dev_dma *dma;
  239. if (!cesa->caps->has_tdma)
  240. return 0;
  241. dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
  242. if (!dma)
  243. return -ENOMEM;
  244. dma->tdma_desc_pool = dmam_pool_create("tdma_desc", dev,
  245. sizeof(struct mv_cesa_tdma_desc),
  246. 16, 0);
  247. if (!dma->tdma_desc_pool)
  248. return -ENOMEM;
  249. dma->op_pool = dmam_pool_create("cesa_op", dev,
  250. sizeof(struct mv_cesa_op_ctx), 16, 0);
  251. if (!dma->op_pool)
  252. return -ENOMEM;
  253. dma->cache_pool = dmam_pool_create("cesa_cache", dev,
  254. CESA_MAX_HASH_BLOCK_SIZE, 1, 0);
  255. if (!dma->cache_pool)
  256. return -ENOMEM;
  257. dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
  258. if (!dma->padding_pool)
  259. return -ENOMEM;
  260. cesa->dma = dma;
  261. return 0;
  262. }
  263. static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
  264. {
  265. struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
  266. struct mv_cesa_engine *engine = &cesa->engines[idx];
  267. const char *res_name = "sram";
  268. struct resource *res;
  269. engine->pool = of_gen_pool_get(cesa->dev->of_node,
  270. "marvell,crypto-srams", idx);
  271. if (engine->pool) {
  272. engine->sram = gen_pool_dma_alloc(engine->pool,
  273. cesa->sram_size,
  274. &engine->sram_dma);
  275. if (engine->sram)
  276. return 0;
  277. engine->pool = NULL;
  278. return -ENOMEM;
  279. }
  280. if (cesa->caps->nengines > 1) {
  281. if (!idx)
  282. res_name = "sram0";
  283. else
  284. res_name = "sram1";
  285. }
  286. res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  287. res_name);
  288. if (!res || resource_size(res) < cesa->sram_size)
  289. return -EINVAL;
  290. engine->sram = devm_ioremap_resource(cesa->dev, res);
  291. if (IS_ERR(engine->sram))
  292. return PTR_ERR(engine->sram);
  293. engine->sram_dma = phys_to_dma(cesa->dev,
  294. (phys_addr_t)res->start);
  295. return 0;
  296. }
  297. static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
  298. {
  299. struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
  300. struct mv_cesa_engine *engine = &cesa->engines[idx];
  301. if (!engine->pool)
  302. return;
  303. gen_pool_free(engine->pool, (unsigned long)engine->sram,
  304. cesa->sram_size);
  305. }
  306. static int mv_cesa_probe(struct platform_device *pdev)
  307. {
  308. const struct mv_cesa_caps *caps = &orion_caps;
  309. const struct mbus_dram_target_info *dram;
  310. const struct of_device_id *match;
  311. struct device *dev = &pdev->dev;
  312. struct mv_cesa_dev *cesa;
  313. struct mv_cesa_engine *engines;
  314. struct resource *res;
  315. int irq, ret, i;
  316. u32 sram_size;
  317. if (cesa_dev) {
  318. dev_err(&pdev->dev, "Only one CESA device authorized\n");
  319. return -EEXIST;
  320. }
  321. if (dev->of_node) {
  322. match = of_match_node(mv_cesa_of_match_table, dev->of_node);
  323. if (!match || !match->data)
  324. return -ENOTSUPP;
  325. caps = match->data;
  326. }
  327. if ((caps == &orion_caps || caps == &kirkwood_caps) && !allhwsupport)
  328. return -ENOTSUPP;
  329. cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL);
  330. if (!cesa)
  331. return -ENOMEM;
  332. cesa->caps = caps;
  333. cesa->dev = dev;
  334. sram_size = CESA_SA_DEFAULT_SRAM_SIZE;
  335. of_property_read_u32(cesa->dev->of_node, "marvell,crypto-sram-size",
  336. &sram_size);
  337. if (sram_size < CESA_SA_MIN_SRAM_SIZE)
  338. sram_size = CESA_SA_MIN_SRAM_SIZE;
  339. cesa->sram_size = sram_size;
  340. cesa->engines = devm_kzalloc(dev, caps->nengines * sizeof(*engines),
  341. GFP_KERNEL);
  342. if (!cesa->engines)
  343. return -ENOMEM;
  344. spin_lock_init(&cesa->lock);
  345. crypto_init_queue(&cesa->queue, 50);
  346. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
  347. cesa->regs = devm_ioremap_resource(dev, res);
  348. if (IS_ERR(cesa->regs))
  349. return PTR_ERR(cesa->regs);
  350. ret = mv_cesa_dev_dma_init(cesa);
  351. if (ret)
  352. return ret;
  353. dram = mv_mbus_dram_info_nooverlap();
  354. platform_set_drvdata(pdev, cesa);
  355. for (i = 0; i < caps->nengines; i++) {
  356. struct mv_cesa_engine *engine = &cesa->engines[i];
  357. char res_name[7];
  358. engine->id = i;
  359. spin_lock_init(&engine->lock);
  360. ret = mv_cesa_get_sram(pdev, i);
  361. if (ret)
  362. goto err_cleanup;
  363. irq = platform_get_irq(pdev, i);
  364. if (irq < 0) {
  365. ret = irq;
  366. goto err_cleanup;
  367. }
  368. /*
  369. * Not all platforms can gate the CESA clocks: do not complain
  370. * if the clock does not exist.
  371. */
  372. snprintf(res_name, sizeof(res_name), "cesa%d", i);
  373. engine->clk = devm_clk_get(dev, res_name);
  374. if (IS_ERR(engine->clk)) {
  375. engine->clk = devm_clk_get(dev, NULL);
  376. if (IS_ERR(engine->clk))
  377. engine->clk = NULL;
  378. }
  379. snprintf(res_name, sizeof(res_name), "cesaz%d", i);
  380. engine->zclk = devm_clk_get(dev, res_name);
  381. if (IS_ERR(engine->zclk))
  382. engine->zclk = NULL;
  383. ret = clk_prepare_enable(engine->clk);
  384. if (ret)
  385. goto err_cleanup;
  386. ret = clk_prepare_enable(engine->zclk);
  387. if (ret)
  388. goto err_cleanup;
  389. engine->regs = cesa->regs + CESA_ENGINE_OFF(i);
  390. if (dram && cesa->caps->has_tdma)
  391. mv_cesa_conf_mbus_windows(&cesa->engines[i], dram);
  392. writel(0, cesa->engines[i].regs + CESA_SA_INT_STATUS);
  393. writel(CESA_SA_CFG_STOP_DIG_ERR,
  394. cesa->engines[i].regs + CESA_SA_CFG);
  395. writel(engine->sram_dma & CESA_SA_SRAM_MSK,
  396. cesa->engines[i].regs + CESA_SA_DESC_P0);
  397. ret = devm_request_threaded_irq(dev, irq, NULL, mv_cesa_int,
  398. IRQF_ONESHOT,
  399. dev_name(&pdev->dev),
  400. &cesa->engines[i]);
  401. if (ret)
  402. goto err_cleanup;
  403. }
  404. cesa_dev = cesa;
  405. ret = mv_cesa_add_algs(cesa);
  406. if (ret) {
  407. cesa_dev = NULL;
  408. goto err_cleanup;
  409. }
  410. dev_info(dev, "CESA device successfully registered\n");
  411. return 0;
  412. err_cleanup:
  413. for (i = 0; i < caps->nengines; i++) {
  414. clk_disable_unprepare(cesa->engines[i].zclk);
  415. clk_disable_unprepare(cesa->engines[i].clk);
  416. mv_cesa_put_sram(pdev, i);
  417. }
  418. return ret;
  419. }
  420. static int mv_cesa_remove(struct platform_device *pdev)
  421. {
  422. struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
  423. int i;
  424. mv_cesa_remove_algs(cesa);
  425. for (i = 0; i < cesa->caps->nengines; i++) {
  426. clk_disable_unprepare(cesa->engines[i].zclk);
  427. clk_disable_unprepare(cesa->engines[i].clk);
  428. mv_cesa_put_sram(pdev, i);
  429. }
  430. return 0;
  431. }
  432. static struct platform_driver marvell_cesa = {
  433. .probe = mv_cesa_probe,
  434. .remove = mv_cesa_remove,
  435. .driver = {
  436. .name = "marvell-cesa",
  437. .of_match_table = mv_cesa_of_match_table,
  438. },
  439. };
  440. module_platform_driver(marvell_cesa);
  441. MODULE_ALIAS("platform:mv_crypto");
  442. MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
  443. MODULE_AUTHOR("Arnaud Ebalard <arno@natisbad.org>");
  444. MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
  445. MODULE_LICENSE("GPL v2");