cesa.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. /*
  2. * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA)
  3. * that can be found on the following platform: Orion, Kirkwood, Armada. This
  4. * driver supports the TDMA engine on platforms on which it is available.
  5. *
  6. * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
  7. * Author: Arnaud Ebalard <arno@natisbad.org>
  8. *
  9. * This work is based on an initial version written by
  10. * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
  11. *
  12. * This program is free software; you can redistribute it and/or modify it
  13. * under the terms of the GNU General Public License version 2 as published
  14. * by the Free Software Foundation.
  15. */
  16. #include <linux/delay.h>
  17. #include <linux/genalloc.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/io.h>
  20. #include <linux/kthread.h>
  21. #include <linux/mbus.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/scatterlist.h>
  24. #include <linux/slab.h>
  25. #include <linux/module.h>
  26. #include <linux/clk.h>
  27. #include <linux/of.h>
  28. #include <linux/of_platform.h>
  29. #include <linux/of_irq.h>
  30. #include "cesa.h"
  31. /* Limit of the crypto queue before reaching the backlog */
  32. #define CESA_CRYPTO_DEFAULT_MAX_QLEN 128
  33. static int allhwsupport = !IS_ENABLED(CONFIG_CRYPTO_DEV_MV_CESA);
  34. module_param_named(allhwsupport, allhwsupport, int, 0444);
  35. MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the mv_cesa driver)");
  36. struct mv_cesa_dev *cesa_dev;
  37. struct crypto_async_request *
  38. mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
  39. struct crypto_async_request **backlog)
  40. {
  41. struct crypto_async_request *req;
  42. *backlog = crypto_get_backlog(&engine->queue);
  43. req = crypto_dequeue_request(&engine->queue);
  44. if (!req)
  45. return NULL;
  46. return req;
  47. }
  48. static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)
  49. {
  50. struct crypto_async_request *req = NULL, *backlog = NULL;
  51. struct mv_cesa_ctx *ctx;
  52. spin_lock_bh(&engine->lock);
  53. if (!engine->req) {
  54. req = mv_cesa_dequeue_req_locked(engine, &backlog);
  55. engine->req = req;
  56. }
  57. spin_unlock_bh(&engine->lock);
  58. if (!req)
  59. return;
  60. if (backlog)
  61. backlog->complete(backlog, -EINPROGRESS);
  62. ctx = crypto_tfm_ctx(req->tfm);
  63. ctx->ops->step(req);
  64. return;
  65. }
  66. static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
  67. {
  68. struct crypto_async_request *req;
  69. struct mv_cesa_ctx *ctx;
  70. int res;
  71. req = engine->req;
  72. ctx = crypto_tfm_ctx(req->tfm);
  73. res = ctx->ops->process(req, status);
  74. if (res == 0) {
  75. ctx->ops->complete(req);
  76. mv_cesa_engine_enqueue_complete_request(engine, req);
  77. } else if (res == -EINPROGRESS) {
  78. ctx->ops->step(req);
  79. }
  80. return res;
  81. }
  82. static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
  83. {
  84. if (engine->chain.first && engine->chain.last)
  85. return mv_cesa_tdma_process(engine, status);
  86. return mv_cesa_std_process(engine, status);
  87. }
  88. static inline void
  89. mv_cesa_complete_req(struct mv_cesa_ctx *ctx, struct crypto_async_request *req,
  90. int res)
  91. {
  92. ctx->ops->cleanup(req);
  93. local_bh_disable();
  94. req->complete(req, res);
  95. local_bh_enable();
  96. }
  97. static irqreturn_t mv_cesa_int(int irq, void *priv)
  98. {
  99. struct mv_cesa_engine *engine = priv;
  100. struct crypto_async_request *req;
  101. struct mv_cesa_ctx *ctx;
  102. u32 status, mask;
  103. irqreturn_t ret = IRQ_NONE;
  104. while (true) {
  105. int res;
  106. mask = mv_cesa_get_int_mask(engine);
  107. status = readl(engine->regs + CESA_SA_INT_STATUS);
  108. if (!(status & mask))
  109. break;
  110. /*
  111. * TODO: avoid clearing the FPGA_INT_STATUS if this not
  112. * relevant on some platforms.
  113. */
  114. writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS);
  115. writel(~status, engine->regs + CESA_SA_INT_STATUS);
  116. /* Process fetched requests */
  117. res = mv_cesa_int_process(engine, status & mask);
  118. ret = IRQ_HANDLED;
  119. spin_lock_bh(&engine->lock);
  120. req = engine->req;
  121. if (res != -EINPROGRESS)
  122. engine->req = NULL;
  123. spin_unlock_bh(&engine->lock);
  124. ctx = crypto_tfm_ctx(req->tfm);
  125. if (res && res != -EINPROGRESS)
  126. mv_cesa_complete_req(ctx, req, res);
  127. /* Launch the next pending request */
  128. mv_cesa_rearm_engine(engine);
  129. /* Iterate over the complete queue */
  130. while (true) {
  131. req = mv_cesa_engine_dequeue_complete_request(engine);
  132. if (!req)
  133. break;
  134. mv_cesa_complete_req(ctx, req, 0);
  135. }
  136. }
  137. return ret;
  138. }
  139. int mv_cesa_queue_req(struct crypto_async_request *req,
  140. struct mv_cesa_req *creq)
  141. {
  142. int ret;
  143. struct mv_cesa_engine *engine = creq->engine;
  144. spin_lock_bh(&engine->lock);
  145. ret = crypto_enqueue_request(&engine->queue, req);
  146. if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) &&
  147. (ret == -EINPROGRESS ||
  148. (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
  149. mv_cesa_tdma_chain(engine, creq);
  150. spin_unlock_bh(&engine->lock);
  151. if (ret != -EINPROGRESS)
  152. return ret;
  153. mv_cesa_rearm_engine(engine);
  154. return -EINPROGRESS;
  155. }
  156. static int mv_cesa_add_algs(struct mv_cesa_dev *cesa)
  157. {
  158. int ret;
  159. int i, j;
  160. for (i = 0; i < cesa->caps->ncipher_algs; i++) {
  161. ret = crypto_register_alg(cesa->caps->cipher_algs[i]);
  162. if (ret)
  163. goto err_unregister_crypto;
  164. }
  165. for (i = 0; i < cesa->caps->nahash_algs; i++) {
  166. ret = crypto_register_ahash(cesa->caps->ahash_algs[i]);
  167. if (ret)
  168. goto err_unregister_ahash;
  169. }
  170. return 0;
  171. err_unregister_ahash:
  172. for (j = 0; j < i; j++)
  173. crypto_unregister_ahash(cesa->caps->ahash_algs[j]);
  174. i = cesa->caps->ncipher_algs;
  175. err_unregister_crypto:
  176. for (j = 0; j < i; j++)
  177. crypto_unregister_alg(cesa->caps->cipher_algs[j]);
  178. return ret;
  179. }
  180. static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa)
  181. {
  182. int i;
  183. for (i = 0; i < cesa->caps->nahash_algs; i++)
  184. crypto_unregister_ahash(cesa->caps->ahash_algs[i]);
  185. for (i = 0; i < cesa->caps->ncipher_algs; i++)
  186. crypto_unregister_alg(cesa->caps->cipher_algs[i]);
  187. }
  188. static struct crypto_alg *orion_cipher_algs[] = {
  189. &mv_cesa_ecb_des_alg,
  190. &mv_cesa_cbc_des_alg,
  191. &mv_cesa_ecb_des3_ede_alg,
  192. &mv_cesa_cbc_des3_ede_alg,
  193. &mv_cesa_ecb_aes_alg,
  194. &mv_cesa_cbc_aes_alg,
  195. };
  196. static struct ahash_alg *orion_ahash_algs[] = {
  197. &mv_md5_alg,
  198. &mv_sha1_alg,
  199. &mv_ahmac_md5_alg,
  200. &mv_ahmac_sha1_alg,
  201. };
  202. static struct crypto_alg *armada_370_cipher_algs[] = {
  203. &mv_cesa_ecb_des_alg,
  204. &mv_cesa_cbc_des_alg,
  205. &mv_cesa_ecb_des3_ede_alg,
  206. &mv_cesa_cbc_des3_ede_alg,
  207. &mv_cesa_ecb_aes_alg,
  208. &mv_cesa_cbc_aes_alg,
  209. };
  210. static struct ahash_alg *armada_370_ahash_algs[] = {
  211. &mv_md5_alg,
  212. &mv_sha1_alg,
  213. &mv_sha256_alg,
  214. &mv_ahmac_md5_alg,
  215. &mv_ahmac_sha1_alg,
  216. &mv_ahmac_sha256_alg,
  217. };
  218. static const struct mv_cesa_caps orion_caps = {
  219. .nengines = 1,
  220. .cipher_algs = orion_cipher_algs,
  221. .ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
  222. .ahash_algs = orion_ahash_algs,
  223. .nahash_algs = ARRAY_SIZE(orion_ahash_algs),
  224. .has_tdma = false,
  225. };
  226. static const struct mv_cesa_caps kirkwood_caps = {
  227. .nengines = 1,
  228. .cipher_algs = orion_cipher_algs,
  229. .ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
  230. .ahash_algs = orion_ahash_algs,
  231. .nahash_algs = ARRAY_SIZE(orion_ahash_algs),
  232. .has_tdma = true,
  233. };
  234. static const struct mv_cesa_caps armada_370_caps = {
  235. .nengines = 1,
  236. .cipher_algs = armada_370_cipher_algs,
  237. .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
  238. .ahash_algs = armada_370_ahash_algs,
  239. .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
  240. .has_tdma = true,
  241. };
  242. static const struct mv_cesa_caps armada_xp_caps = {
  243. .nengines = 2,
  244. .cipher_algs = armada_370_cipher_algs,
  245. .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
  246. .ahash_algs = armada_370_ahash_algs,
  247. .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
  248. .has_tdma = true,
  249. };
  250. static const struct of_device_id mv_cesa_of_match_table[] = {
  251. { .compatible = "marvell,orion-crypto", .data = &orion_caps },
  252. { .compatible = "marvell,kirkwood-crypto", .data = &kirkwood_caps },
  253. { .compatible = "marvell,dove-crypto", .data = &kirkwood_caps },
  254. { .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps },
  255. { .compatible = "marvell,armada-xp-crypto", .data = &armada_xp_caps },
  256. { .compatible = "marvell,armada-375-crypto", .data = &armada_xp_caps },
  257. { .compatible = "marvell,armada-38x-crypto", .data = &armada_xp_caps },
  258. {}
  259. };
  260. MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
  261. static void
  262. mv_cesa_conf_mbus_windows(struct mv_cesa_engine *engine,
  263. const struct mbus_dram_target_info *dram)
  264. {
  265. void __iomem *iobase = engine->regs;
  266. int i;
  267. for (i = 0; i < 4; i++) {
  268. writel(0, iobase + CESA_TDMA_WINDOW_CTRL(i));
  269. writel(0, iobase + CESA_TDMA_WINDOW_BASE(i));
  270. }
  271. for (i = 0; i < dram->num_cs; i++) {
  272. const struct mbus_dram_window *cs = dram->cs + i;
  273. writel(((cs->size - 1) & 0xffff0000) |
  274. (cs->mbus_attr << 8) |
  275. (dram->mbus_dram_target_id << 4) | 1,
  276. iobase + CESA_TDMA_WINDOW_CTRL(i));
  277. writel(cs->base, iobase + CESA_TDMA_WINDOW_BASE(i));
  278. }
  279. }
  280. static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
  281. {
  282. struct device *dev = cesa->dev;
  283. struct mv_cesa_dev_dma *dma;
  284. if (!cesa->caps->has_tdma)
  285. return 0;
  286. dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
  287. if (!dma)
  288. return -ENOMEM;
  289. dma->tdma_desc_pool = dmam_pool_create("tdma_desc", dev,
  290. sizeof(struct mv_cesa_tdma_desc),
  291. 16, 0);
  292. if (!dma->tdma_desc_pool)
  293. return -ENOMEM;
  294. dma->op_pool = dmam_pool_create("cesa_op", dev,
  295. sizeof(struct mv_cesa_op_ctx), 16, 0);
  296. if (!dma->op_pool)
  297. return -ENOMEM;
  298. dma->cache_pool = dmam_pool_create("cesa_cache", dev,
  299. CESA_MAX_HASH_BLOCK_SIZE, 1, 0);
  300. if (!dma->cache_pool)
  301. return -ENOMEM;
  302. dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
  303. if (!dma->padding_pool)
  304. return -ENOMEM;
  305. dma->iv_pool = dmam_pool_create("cesa_iv", dev, 16, 1, 0);
  306. if (!dma->iv_pool)
  307. return -ENOMEM;
  308. cesa->dma = dma;
  309. return 0;
  310. }
  311. static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
  312. {
  313. struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
  314. struct mv_cesa_engine *engine = &cesa->engines[idx];
  315. const char *res_name = "sram";
  316. struct resource *res;
  317. engine->pool = of_gen_pool_get(cesa->dev->of_node,
  318. "marvell,crypto-srams", idx);
  319. if (engine->pool) {
  320. engine->sram = gen_pool_dma_alloc(engine->pool,
  321. cesa->sram_size,
  322. &engine->sram_dma);
  323. if (engine->sram)
  324. return 0;
  325. engine->pool = NULL;
  326. return -ENOMEM;
  327. }
  328. if (cesa->caps->nengines > 1) {
  329. if (!idx)
  330. res_name = "sram0";
  331. else
  332. res_name = "sram1";
  333. }
  334. res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  335. res_name);
  336. if (!res || resource_size(res) < cesa->sram_size)
  337. return -EINVAL;
  338. engine->sram = devm_ioremap_resource(cesa->dev, res);
  339. if (IS_ERR(engine->sram))
  340. return PTR_ERR(engine->sram);
  341. engine->sram_dma = phys_to_dma(cesa->dev,
  342. (phys_addr_t)res->start);
  343. return 0;
  344. }
  345. static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
  346. {
  347. struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
  348. struct mv_cesa_engine *engine = &cesa->engines[idx];
  349. if (!engine->pool)
  350. return;
  351. gen_pool_free(engine->pool, (unsigned long)engine->sram,
  352. cesa->sram_size);
  353. }
  354. static int mv_cesa_probe(struct platform_device *pdev)
  355. {
  356. const struct mv_cesa_caps *caps = &orion_caps;
  357. const struct mbus_dram_target_info *dram;
  358. const struct of_device_id *match;
  359. struct device *dev = &pdev->dev;
  360. struct mv_cesa_dev *cesa;
  361. struct mv_cesa_engine *engines;
  362. struct resource *res;
  363. int irq, ret, i;
  364. u32 sram_size;
  365. if (cesa_dev) {
  366. dev_err(&pdev->dev, "Only one CESA device authorized\n");
  367. return -EEXIST;
  368. }
  369. if (dev->of_node) {
  370. match = of_match_node(mv_cesa_of_match_table, dev->of_node);
  371. if (!match || !match->data)
  372. return -ENOTSUPP;
  373. caps = match->data;
  374. }
  375. if ((caps == &orion_caps || caps == &kirkwood_caps) && !allhwsupport)
  376. return -ENOTSUPP;
  377. cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL);
  378. if (!cesa)
  379. return -ENOMEM;
  380. cesa->caps = caps;
  381. cesa->dev = dev;
  382. sram_size = CESA_SA_DEFAULT_SRAM_SIZE;
  383. of_property_read_u32(cesa->dev->of_node, "marvell,crypto-sram-size",
  384. &sram_size);
  385. if (sram_size < CESA_SA_MIN_SRAM_SIZE)
  386. sram_size = CESA_SA_MIN_SRAM_SIZE;
  387. cesa->sram_size = sram_size;
  388. cesa->engines = devm_kzalloc(dev, caps->nengines * sizeof(*engines),
  389. GFP_KERNEL);
  390. if (!cesa->engines)
  391. return -ENOMEM;
  392. spin_lock_init(&cesa->lock);
  393. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
  394. cesa->regs = devm_ioremap_resource(dev, res);
  395. if (IS_ERR(cesa->regs))
  396. return PTR_ERR(cesa->regs);
  397. ret = mv_cesa_dev_dma_init(cesa);
  398. if (ret)
  399. return ret;
  400. dram = mv_mbus_dram_info_nooverlap();
  401. platform_set_drvdata(pdev, cesa);
  402. for (i = 0; i < caps->nengines; i++) {
  403. struct mv_cesa_engine *engine = &cesa->engines[i];
  404. char res_name[7];
  405. engine->id = i;
  406. spin_lock_init(&engine->lock);
  407. ret = mv_cesa_get_sram(pdev, i);
  408. if (ret)
  409. goto err_cleanup;
  410. irq = platform_get_irq(pdev, i);
  411. if (irq < 0) {
  412. ret = irq;
  413. goto err_cleanup;
  414. }
  415. /*
  416. * Not all platforms can gate the CESA clocks: do not complain
  417. * if the clock does not exist.
  418. */
  419. snprintf(res_name, sizeof(res_name), "cesa%d", i);
  420. engine->clk = devm_clk_get(dev, res_name);
  421. if (IS_ERR(engine->clk)) {
  422. engine->clk = devm_clk_get(dev, NULL);
  423. if (IS_ERR(engine->clk))
  424. engine->clk = NULL;
  425. }
  426. snprintf(res_name, sizeof(res_name), "cesaz%d", i);
  427. engine->zclk = devm_clk_get(dev, res_name);
  428. if (IS_ERR(engine->zclk))
  429. engine->zclk = NULL;
  430. ret = clk_prepare_enable(engine->clk);
  431. if (ret)
  432. goto err_cleanup;
  433. ret = clk_prepare_enable(engine->zclk);
  434. if (ret)
  435. goto err_cleanup;
  436. engine->regs = cesa->regs + CESA_ENGINE_OFF(i);
  437. if (dram && cesa->caps->has_tdma)
  438. mv_cesa_conf_mbus_windows(engine, dram);
  439. writel(0, engine->regs + CESA_SA_INT_STATUS);
  440. writel(CESA_SA_CFG_STOP_DIG_ERR,
  441. engine->regs + CESA_SA_CFG);
  442. writel(engine->sram_dma & CESA_SA_SRAM_MSK,
  443. engine->regs + CESA_SA_DESC_P0);
  444. ret = devm_request_threaded_irq(dev, irq, NULL, mv_cesa_int,
  445. IRQF_ONESHOT,
  446. dev_name(&pdev->dev),
  447. engine);
  448. if (ret)
  449. goto err_cleanup;
  450. crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN);
  451. atomic_set(&engine->load, 0);
  452. INIT_LIST_HEAD(&engine->complete_queue);
  453. }
  454. cesa_dev = cesa;
  455. ret = mv_cesa_add_algs(cesa);
  456. if (ret) {
  457. cesa_dev = NULL;
  458. goto err_cleanup;
  459. }
  460. dev_info(dev, "CESA device successfully registered\n");
  461. return 0;
  462. err_cleanup:
  463. for (i = 0; i < caps->nengines; i++) {
  464. clk_disable_unprepare(cesa->engines[i].zclk);
  465. clk_disable_unprepare(cesa->engines[i].clk);
  466. mv_cesa_put_sram(pdev, i);
  467. }
  468. return ret;
  469. }
  470. static int mv_cesa_remove(struct platform_device *pdev)
  471. {
  472. struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
  473. int i;
  474. mv_cesa_remove_algs(cesa);
  475. for (i = 0; i < cesa->caps->nengines; i++) {
  476. clk_disable_unprepare(cesa->engines[i].zclk);
  477. clk_disable_unprepare(cesa->engines[i].clk);
  478. mv_cesa_put_sram(pdev, i);
  479. }
  480. return 0;
  481. }
  482. static struct platform_driver marvell_cesa = {
  483. .probe = mv_cesa_probe,
  484. .remove = mv_cesa_remove,
  485. .driver = {
  486. .name = "marvell-cesa",
  487. .of_match_table = mv_cesa_of_match_table,
  488. },
  489. };
  490. module_platform_driver(marvell_cesa);
  491. MODULE_ALIAS("platform:mv_crypto");
  492. MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
  493. MODULE_AUTHOR("Arnaud Ebalard <arno@natisbad.org>");
  494. MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
  495. MODULE_LICENSE("GPL v2");