rk3288_crypto.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448
  1. /*
  2. * Crypto acceleration support for Rockchip RK3288
  3. *
  4. * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
  5. *
  6. * Author: Zain Wang <zain.wang@rock-chips.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms and conditions of the GNU General Public License,
  10. * version 2, as published by the Free Software Foundation.
  11. *
  12. * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
  13. */
  14. #include "rk3288_crypto.h"
  15. #include <linux/module.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/of.h>
  18. #include <linux/clk.h>
  19. #include <linux/crypto.h>
  20. #include <linux/reset.h>
  21. static int rk_crypto_enable_clk(struct rk_crypto_info *dev)
  22. {
  23. int err;
  24. err = clk_prepare_enable(dev->sclk);
  25. if (err) {
  26. dev_err(dev->dev, "[%s:%d], Couldn't enable clock sclk\n",
  27. __func__, __LINE__);
  28. goto err_return;
  29. }
  30. err = clk_prepare_enable(dev->aclk);
  31. if (err) {
  32. dev_err(dev->dev, "[%s:%d], Couldn't enable clock aclk\n",
  33. __func__, __LINE__);
  34. goto err_aclk;
  35. }
  36. err = clk_prepare_enable(dev->hclk);
  37. if (err) {
  38. dev_err(dev->dev, "[%s:%d], Couldn't enable clock hclk\n",
  39. __func__, __LINE__);
  40. goto err_hclk;
  41. }
  42. err = clk_prepare_enable(dev->dmaclk);
  43. if (err) {
  44. dev_err(dev->dev, "[%s:%d], Couldn't enable clock dmaclk\n",
  45. __func__, __LINE__);
  46. goto err_dmaclk;
  47. }
  48. return err;
  49. err_dmaclk:
  50. clk_disable_unprepare(dev->hclk);
  51. err_hclk:
  52. clk_disable_unprepare(dev->aclk);
  53. err_aclk:
  54. clk_disable_unprepare(dev->sclk);
  55. err_return:
  56. return err;
  57. }
  58. static void rk_crypto_disable_clk(struct rk_crypto_info *dev)
  59. {
  60. clk_disable_unprepare(dev->dmaclk);
  61. clk_disable_unprepare(dev->hclk);
  62. clk_disable_unprepare(dev->aclk);
  63. clk_disable_unprepare(dev->sclk);
  64. }
  65. static int check_alignment(struct scatterlist *sg_src,
  66. struct scatterlist *sg_dst,
  67. int align_mask)
  68. {
  69. int in, out, align;
  70. in = IS_ALIGNED((uint32_t)sg_src->offset, 4) &&
  71. IS_ALIGNED((uint32_t)sg_src->length, align_mask);
  72. if (!sg_dst)
  73. return in;
  74. out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) &&
  75. IS_ALIGNED((uint32_t)sg_dst->length, align_mask);
  76. align = in && out;
  77. return (align && (sg_src->length == sg_dst->length));
  78. }
  79. static int rk_load_data(struct rk_crypto_info *dev,
  80. struct scatterlist *sg_src,
  81. struct scatterlist *sg_dst)
  82. {
  83. unsigned int count;
  84. dev->aligned = dev->aligned ?
  85. check_alignment(sg_src, sg_dst, dev->align_size) :
  86. dev->aligned;
  87. if (dev->aligned) {
  88. count = min(dev->left_bytes, sg_src->length);
  89. dev->left_bytes -= count;
  90. if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) {
  91. dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n",
  92. __func__, __LINE__);
  93. return -EINVAL;
  94. }
  95. dev->addr_in = sg_dma_address(sg_src);
  96. if (sg_dst) {
  97. if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) {
  98. dev_err(dev->dev,
  99. "[%s:%d] dma_map_sg(dst) error\n",
  100. __func__, __LINE__);
  101. dma_unmap_sg(dev->dev, sg_src, 1,
  102. DMA_TO_DEVICE);
  103. return -EINVAL;
  104. }
  105. dev->addr_out = sg_dma_address(sg_dst);
  106. }
  107. } else {
  108. count = (dev->left_bytes > PAGE_SIZE) ?
  109. PAGE_SIZE : dev->left_bytes;
  110. if (!sg_pcopy_to_buffer(dev->first, dev->nents,
  111. dev->addr_vir, count,
  112. dev->total - dev->left_bytes)) {
  113. dev_err(dev->dev, "[%s:%d] pcopy err\n",
  114. __func__, __LINE__);
  115. return -EINVAL;
  116. }
  117. dev->left_bytes -= count;
  118. sg_init_one(&dev->sg_tmp, dev->addr_vir, count);
  119. if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) {
  120. dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp) error\n",
  121. __func__, __LINE__);
  122. return -ENOMEM;
  123. }
  124. dev->addr_in = sg_dma_address(&dev->sg_tmp);
  125. if (sg_dst) {
  126. if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1,
  127. DMA_FROM_DEVICE)) {
  128. dev_err(dev->dev,
  129. "[%s:%d] dma_map_sg(sg_tmp) error\n",
  130. __func__, __LINE__);
  131. dma_unmap_sg(dev->dev, &dev->sg_tmp, 1,
  132. DMA_TO_DEVICE);
  133. return -ENOMEM;
  134. }
  135. dev->addr_out = sg_dma_address(&dev->sg_tmp);
  136. }
  137. }
  138. dev->count = count;
  139. return 0;
  140. }
  141. static void rk_unload_data(struct rk_crypto_info *dev)
  142. {
  143. struct scatterlist *sg_in, *sg_out;
  144. sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp;
  145. dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE);
  146. if (dev->sg_dst) {
  147. sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp;
  148. dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE);
  149. }
  150. }
  151. static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
  152. {
  153. struct rk_crypto_info *dev = platform_get_drvdata(dev_id);
  154. u32 interrupt_status;
  155. spin_lock(&dev->lock);
  156. interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS);
  157. CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status);
  158. if (interrupt_status & 0x0a) {
  159. dev_warn(dev->dev, "DMA Error\n");
  160. dev->err = -EFAULT;
  161. }
  162. tasklet_schedule(&dev->done_task);
  163. spin_unlock(&dev->lock);
  164. return IRQ_HANDLED;
  165. }
  166. static int rk_crypto_enqueue(struct rk_crypto_info *dev,
  167. struct crypto_async_request *async_req)
  168. {
  169. unsigned long flags;
  170. int ret;
  171. spin_lock_irqsave(&dev->lock, flags);
  172. ret = crypto_enqueue_request(&dev->queue, async_req);
  173. if (dev->busy) {
  174. spin_unlock_irqrestore(&dev->lock, flags);
  175. return ret;
  176. }
  177. dev->busy = true;
  178. spin_unlock_irqrestore(&dev->lock, flags);
  179. tasklet_schedule(&dev->queue_task);
  180. return ret;
  181. }
  182. static void rk_crypto_queue_task_cb(unsigned long data)
  183. {
  184. struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
  185. struct crypto_async_request *async_req, *backlog;
  186. unsigned long flags;
  187. int err = 0;
  188. dev->err = 0;
  189. spin_lock_irqsave(&dev->lock, flags);
  190. backlog = crypto_get_backlog(&dev->queue);
  191. async_req = crypto_dequeue_request(&dev->queue);
  192. if (!async_req) {
  193. dev->busy = false;
  194. spin_unlock_irqrestore(&dev->lock, flags);
  195. return;
  196. }
  197. spin_unlock_irqrestore(&dev->lock, flags);
  198. if (backlog) {
  199. backlog->complete(backlog, -EINPROGRESS);
  200. backlog = NULL;
  201. }
  202. dev->async_req = async_req;
  203. err = dev->start(dev);
  204. if (err)
  205. dev->complete(dev->async_req, err);
  206. }
  207. static void rk_crypto_done_task_cb(unsigned long data)
  208. {
  209. struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
  210. if (dev->err) {
  211. dev->complete(dev->async_req, dev->err);
  212. return;
  213. }
  214. dev->err = dev->update(dev);
  215. if (dev->err)
  216. dev->complete(dev->async_req, dev->err);
  217. }
  218. static struct rk_crypto_tmp *rk_cipher_algs[] = {
  219. &rk_ecb_aes_alg,
  220. &rk_cbc_aes_alg,
  221. &rk_ecb_des_alg,
  222. &rk_cbc_des_alg,
  223. &rk_ecb_des3_ede_alg,
  224. &rk_cbc_des3_ede_alg,
  225. &rk_ahash_sha1,
  226. &rk_ahash_sha256,
  227. &rk_ahash_md5,
  228. };
  229. static int rk_crypto_register(struct rk_crypto_info *crypto_info)
  230. {
  231. unsigned int i, k;
  232. int err = 0;
  233. for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
  234. rk_cipher_algs[i]->dev = crypto_info;
  235. if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
  236. err = crypto_register_alg(
  237. &rk_cipher_algs[i]->alg.crypto);
  238. else
  239. err = crypto_register_ahash(
  240. &rk_cipher_algs[i]->alg.hash);
  241. if (err)
  242. goto err_cipher_algs;
  243. }
  244. return 0;
  245. err_cipher_algs:
  246. for (k = 0; k < i; k++) {
  247. if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
  248. crypto_unregister_alg(&rk_cipher_algs[k]->alg.crypto);
  249. else
  250. crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
  251. }
  252. return err;
  253. }
  254. static void rk_crypto_unregister(void)
  255. {
  256. unsigned int i;
  257. for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
  258. if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
  259. crypto_unregister_alg(&rk_cipher_algs[i]->alg.crypto);
  260. else
  261. crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
  262. }
  263. }
  264. static void rk_crypto_action(void *data)
  265. {
  266. struct rk_crypto_info *crypto_info = data;
  267. reset_control_assert(crypto_info->rst);
  268. }
  269. static const struct of_device_id crypto_of_id_table[] = {
  270. { .compatible = "rockchip,rk3288-crypto" },
  271. {}
  272. };
  273. MODULE_DEVICE_TABLE(of, crypto_of_id_table);
  274. static int rk_crypto_probe(struct platform_device *pdev)
  275. {
  276. struct resource *res;
  277. struct device *dev = &pdev->dev;
  278. struct rk_crypto_info *crypto_info;
  279. int err = 0;
  280. crypto_info = devm_kzalloc(&pdev->dev,
  281. sizeof(*crypto_info), GFP_KERNEL);
  282. if (!crypto_info) {
  283. err = -ENOMEM;
  284. goto err_crypto;
  285. }
  286. crypto_info->rst = devm_reset_control_get(dev, "crypto-rst");
  287. if (IS_ERR(crypto_info->rst)) {
  288. err = PTR_ERR(crypto_info->rst);
  289. goto err_crypto;
  290. }
  291. reset_control_assert(crypto_info->rst);
  292. usleep_range(10, 20);
  293. reset_control_deassert(crypto_info->rst);
  294. err = devm_add_action_or_reset(dev, rk_crypto_action, crypto_info);
  295. if (err)
  296. goto err_crypto;
  297. spin_lock_init(&crypto_info->lock);
  298. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  299. crypto_info->reg = devm_ioremap_resource(&pdev->dev, res);
  300. if (IS_ERR(crypto_info->reg)) {
  301. err = PTR_ERR(crypto_info->reg);
  302. goto err_crypto;
  303. }
  304. crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk");
  305. if (IS_ERR(crypto_info->aclk)) {
  306. err = PTR_ERR(crypto_info->aclk);
  307. goto err_crypto;
  308. }
  309. crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk");
  310. if (IS_ERR(crypto_info->hclk)) {
  311. err = PTR_ERR(crypto_info->hclk);
  312. goto err_crypto;
  313. }
  314. crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk");
  315. if (IS_ERR(crypto_info->sclk)) {
  316. err = PTR_ERR(crypto_info->sclk);
  317. goto err_crypto;
  318. }
  319. crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk");
  320. if (IS_ERR(crypto_info->dmaclk)) {
  321. err = PTR_ERR(crypto_info->dmaclk);
  322. goto err_crypto;
  323. }
  324. crypto_info->irq = platform_get_irq(pdev, 0);
  325. if (crypto_info->irq < 0) {
  326. dev_warn(crypto_info->dev,
  327. "control Interrupt is not available.\n");
  328. err = crypto_info->irq;
  329. goto err_crypto;
  330. }
  331. err = devm_request_irq(&pdev->dev, crypto_info->irq,
  332. rk_crypto_irq_handle, IRQF_SHARED,
  333. "rk-crypto", pdev);
  334. if (err) {
  335. dev_err(crypto_info->dev, "irq request failed.\n");
  336. goto err_crypto;
  337. }
  338. crypto_info->dev = &pdev->dev;
  339. platform_set_drvdata(pdev, crypto_info);
  340. tasklet_init(&crypto_info->queue_task,
  341. rk_crypto_queue_task_cb, (unsigned long)crypto_info);
  342. tasklet_init(&crypto_info->done_task,
  343. rk_crypto_done_task_cb, (unsigned long)crypto_info);
  344. crypto_init_queue(&crypto_info->queue, 50);
  345. crypto_info->enable_clk = rk_crypto_enable_clk;
  346. crypto_info->disable_clk = rk_crypto_disable_clk;
  347. crypto_info->load_data = rk_load_data;
  348. crypto_info->unload_data = rk_unload_data;
  349. crypto_info->enqueue = rk_crypto_enqueue;
  350. crypto_info->busy = false;
  351. err = rk_crypto_register(crypto_info);
  352. if (err) {
  353. dev_err(dev, "err in register alg");
  354. goto err_register_alg;
  355. }
  356. dev_info(dev, "Crypto Accelerator successfully registered\n");
  357. return 0;
  358. err_register_alg:
  359. tasklet_kill(&crypto_info->queue_task);
  360. tasklet_kill(&crypto_info->done_task);
  361. err_crypto:
  362. return err;
  363. }
  364. static int rk_crypto_remove(struct platform_device *pdev)
  365. {
  366. struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
  367. rk_crypto_unregister();
  368. tasklet_kill(&crypto_tmp->done_task);
  369. tasklet_kill(&crypto_tmp->queue_task);
  370. return 0;
  371. }
  372. static struct platform_driver crypto_driver = {
  373. .probe = rk_crypto_probe,
  374. .remove = rk_crypto_remove,
  375. .driver = {
  376. .name = "rk3288-crypto",
  377. .of_match_table = crypto_of_id_table,
  378. },
  379. };
  380. module_platform_driver(crypto_driver);
  381. MODULE_AUTHOR("Zain Wang <zain.wang@rock-chips.com>");
  382. MODULE_DESCRIPTION("Support for Rockchip's cryptographic engine");
  383. MODULE_LICENSE("GPL");