safexcel_cipher.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561
  1. /*
  2. * Copyright (C) 2017 Marvell
  3. *
  4. * Antoine Tenart <antoine.tenart@free-electrons.com>
  5. *
  6. * This file is licensed under the terms of the GNU General Public
  7. * License version 2. This program is licensed "as is" without any
  8. * warranty of any kind, whether express or implied.
  9. */
  10. #include <linux/device.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/dmapool.h>
  13. #include <crypto/aes.h>
  14. #include <crypto/skcipher.h>
  15. #include "safexcel.h"
  16. enum safexcel_cipher_direction {
  17. SAFEXCEL_ENCRYPT,
  18. SAFEXCEL_DECRYPT,
  19. };
  20. struct safexcel_cipher_ctx {
  21. struct safexcel_context base;
  22. struct safexcel_crypto_priv *priv;
  23. enum safexcel_cipher_direction direction;
  24. u32 mode;
  25. __le32 key[8];
  26. unsigned int key_len;
  27. };
  28. static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
  29. struct crypto_async_request *async,
  30. struct safexcel_command_desc *cdesc,
  31. u32 length)
  32. {
  33. struct skcipher_request *req = skcipher_request_cast(async);
  34. struct safexcel_token *token;
  35. unsigned offset = 0;
  36. if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
  37. offset = AES_BLOCK_SIZE / sizeof(u32);
  38. memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_SIZE);
  39. cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
  40. }
  41. token = (struct safexcel_token *)(cdesc->control_data.token + offset);
  42. token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
  43. token[0].packet_length = length;
  44. token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET;
  45. token[0].instructions = EIP197_TOKEN_INS_LAST |
  46. EIP197_TOKEN_INS_TYPE_CRYTO |
  47. EIP197_TOKEN_INS_TYPE_OUTPUT;
  48. }
  49. static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
  50. unsigned int len)
  51. {
  52. struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
  53. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  54. struct crypto_aes_ctx aes;
  55. int ret, i;
  56. ret = crypto_aes_expand_key(&aes, key, len);
  57. if (ret) {
  58. crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  59. return ret;
  60. }
  61. for (i = 0; i < len / sizeof(u32); i++) {
  62. if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
  63. ctx->base.needs_inv = true;
  64. break;
  65. }
  66. }
  67. for (i = 0; i < len / sizeof(u32); i++)
  68. ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
  69. ctx->key_len = len;
  70. memzero_explicit(&aes, sizeof(aes));
  71. return 0;
  72. }
  73. static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
  74. struct safexcel_command_desc *cdesc)
  75. {
  76. struct safexcel_crypto_priv *priv = ctx->priv;
  77. int ctrl_size;
  78. if (ctx->direction == SAFEXCEL_ENCRYPT)
  79. cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT;
  80. else
  81. cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN;
  82. cdesc->control_data.control0 |= CONTEXT_CONTROL_KEY_EN;
  83. cdesc->control_data.control1 |= ctx->mode;
  84. switch (ctx->key_len) {
  85. case AES_KEYSIZE_128:
  86. cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
  87. ctrl_size = 4;
  88. break;
  89. case AES_KEYSIZE_192:
  90. cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
  91. ctrl_size = 6;
  92. break;
  93. case AES_KEYSIZE_256:
  94. cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
  95. ctrl_size = 8;
  96. break;
  97. default:
  98. dev_err(priv->dev, "aes keysize not supported: %u\n",
  99. ctx->key_len);
  100. return -EINVAL;
  101. }
  102. cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(ctrl_size);
  103. return 0;
  104. }
  105. static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
  106. struct crypto_async_request *async,
  107. bool *should_complete, int *ret)
  108. {
  109. struct skcipher_request *req = skcipher_request_cast(async);
  110. struct safexcel_result_desc *rdesc;
  111. int ndesc = 0;
  112. *ret = 0;
  113. spin_lock_bh(&priv->ring[ring].egress_lock);
  114. do {
  115. rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
  116. if (IS_ERR(rdesc)) {
  117. dev_err(priv->dev,
  118. "cipher: result: could not retrieve the result descriptor\n");
  119. *ret = PTR_ERR(rdesc);
  120. break;
  121. }
  122. if (rdesc->result_data.error_code) {
  123. dev_err(priv->dev,
  124. "cipher: result: result descriptor error (%d)\n",
  125. rdesc->result_data.error_code);
  126. *ret = -EIO;
  127. }
  128. ndesc++;
  129. } while (!rdesc->last_seg);
  130. safexcel_complete(priv, ring);
  131. spin_unlock_bh(&priv->ring[ring].egress_lock);
  132. if (req->src == req->dst) {
  133. dma_unmap_sg(priv->dev, req->src,
  134. sg_nents_for_len(req->src, req->cryptlen),
  135. DMA_BIDIRECTIONAL);
  136. } else {
  137. dma_unmap_sg(priv->dev, req->src,
  138. sg_nents_for_len(req->src, req->cryptlen),
  139. DMA_TO_DEVICE);
  140. dma_unmap_sg(priv->dev, req->dst,
  141. sg_nents_for_len(req->dst, req->cryptlen),
  142. DMA_FROM_DEVICE);
  143. }
  144. *should_complete = true;
  145. return ndesc;
  146. }
  147. static int safexcel_aes_send(struct crypto_async_request *async,
  148. int ring, struct safexcel_request *request,
  149. int *commands, int *results)
  150. {
  151. struct skcipher_request *req = skcipher_request_cast(async);
  152. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  153. struct safexcel_crypto_priv *priv = ctx->priv;
  154. struct safexcel_command_desc *cdesc;
  155. struct safexcel_result_desc *rdesc;
  156. struct scatterlist *sg;
  157. int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = req->cryptlen;
  158. int i, ret = 0;
  159. if (req->src == req->dst) {
  160. nr_src = dma_map_sg(priv->dev, req->src,
  161. sg_nents_for_len(req->src, req->cryptlen),
  162. DMA_BIDIRECTIONAL);
  163. nr_dst = nr_src;
  164. if (!nr_src)
  165. return -EINVAL;
  166. } else {
  167. nr_src = dma_map_sg(priv->dev, req->src,
  168. sg_nents_for_len(req->src, req->cryptlen),
  169. DMA_TO_DEVICE);
  170. if (!nr_src)
  171. return -EINVAL;
  172. nr_dst = dma_map_sg(priv->dev, req->dst,
  173. sg_nents_for_len(req->dst, req->cryptlen),
  174. DMA_FROM_DEVICE);
  175. if (!nr_dst) {
  176. dma_unmap_sg(priv->dev, req->src,
  177. sg_nents_for_len(req->src, req->cryptlen),
  178. DMA_TO_DEVICE);
  179. return -EINVAL;
  180. }
  181. }
  182. memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
  183. spin_lock_bh(&priv->ring[ring].egress_lock);
  184. /* command descriptors */
  185. for_each_sg(req->src, sg, nr_src, i) {
  186. int len = sg_dma_len(sg);
  187. /* Do not overflow the request */
  188. if (queued - len < 0)
  189. len = queued;
  190. cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len),
  191. sg_dma_address(sg), len, req->cryptlen,
  192. ctx->base.ctxr_dma);
  193. if (IS_ERR(cdesc)) {
  194. /* No space left in the command descriptor ring */
  195. ret = PTR_ERR(cdesc);
  196. goto cdesc_rollback;
  197. }
  198. n_cdesc++;
  199. if (n_cdesc == 1) {
  200. safexcel_context_control(ctx, cdesc);
  201. safexcel_cipher_token(ctx, async, cdesc, req->cryptlen);
  202. }
  203. queued -= len;
  204. if (!queued)
  205. break;
  206. }
  207. /* result descriptors */
  208. for_each_sg(req->dst, sg, nr_dst, i) {
  209. bool first = !i, last = (i == nr_dst - 1);
  210. u32 len = sg_dma_len(sg);
  211. rdesc = safexcel_add_rdesc(priv, ring, first, last,
  212. sg_dma_address(sg), len);
  213. if (IS_ERR(rdesc)) {
  214. /* No space left in the result descriptor ring */
  215. ret = PTR_ERR(rdesc);
  216. goto rdesc_rollback;
  217. }
  218. n_rdesc++;
  219. }
  220. spin_unlock_bh(&priv->ring[ring].egress_lock);
  221. request->req = &req->base;
  222. ctx->base.handle_result = safexcel_handle_result;
  223. *commands = n_cdesc;
  224. *results = n_rdesc;
  225. return 0;
  226. rdesc_rollback:
  227. for (i = 0; i < n_rdesc; i++)
  228. safexcel_ring_rollback_wptr(priv, &priv->ring[ring].rdr);
  229. cdesc_rollback:
  230. for (i = 0; i < n_cdesc; i++)
  231. safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
  232. spin_unlock_bh(&priv->ring[ring].egress_lock);
  233. if (req->src == req->dst) {
  234. dma_unmap_sg(priv->dev, req->src,
  235. sg_nents_for_len(req->src, req->cryptlen),
  236. DMA_BIDIRECTIONAL);
  237. } else {
  238. dma_unmap_sg(priv->dev, req->src,
  239. sg_nents_for_len(req->src, req->cryptlen),
  240. DMA_TO_DEVICE);
  241. dma_unmap_sg(priv->dev, req->dst,
  242. sg_nents_for_len(req->dst, req->cryptlen),
  243. DMA_FROM_DEVICE);
  244. }
  245. return ret;
  246. }
  247. static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
  248. int ring,
  249. struct crypto_async_request *async,
  250. bool *should_complete, int *ret)
  251. {
  252. struct skcipher_request *req = skcipher_request_cast(async);
  253. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  254. struct safexcel_result_desc *rdesc;
  255. int ndesc = 0, enq_ret;
  256. *ret = 0;
  257. spin_lock_bh(&priv->ring[ring].egress_lock);
  258. do {
  259. rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
  260. if (IS_ERR(rdesc)) {
  261. dev_err(priv->dev,
  262. "cipher: invalidate: could not retrieve the result descriptor\n");
  263. *ret = PTR_ERR(rdesc);
  264. break;
  265. }
  266. if (rdesc->result_data.error_code) {
  267. dev_err(priv->dev, "cipher: invalidate: result descriptor error (%d)\n",
  268. rdesc->result_data.error_code);
  269. *ret = -EIO;
  270. }
  271. ndesc++;
  272. } while (!rdesc->last_seg);
  273. safexcel_complete(priv, ring);
  274. spin_unlock_bh(&priv->ring[ring].egress_lock);
  275. if (ctx->base.exit_inv) {
  276. dma_pool_free(priv->context_pool, ctx->base.ctxr,
  277. ctx->base.ctxr_dma);
  278. *should_complete = true;
  279. return ndesc;
  280. }
  281. ring = safexcel_select_ring(priv);
  282. ctx->base.ring = ring;
  283. ctx->base.needs_inv = false;
  284. ctx->base.send = safexcel_aes_send;
  285. spin_lock_bh(&priv->ring[ring].queue_lock);
  286. enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
  287. spin_unlock_bh(&priv->ring[ring].queue_lock);
  288. if (enq_ret != -EINPROGRESS)
  289. *ret = enq_ret;
  290. if (!priv->ring[ring].need_dequeue)
  291. safexcel_dequeue(priv, ring);
  292. *should_complete = false;
  293. return ndesc;
  294. }
  295. static int safexcel_cipher_send_inv(struct crypto_async_request *async,
  296. int ring, struct safexcel_request *request,
  297. int *commands, int *results)
  298. {
  299. struct skcipher_request *req = skcipher_request_cast(async);
  300. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  301. struct safexcel_crypto_priv *priv = ctx->priv;
  302. int ret;
  303. ctx->base.handle_result = safexcel_handle_inv_result;
  304. ret = safexcel_invalidate_cache(async, &ctx->base, priv,
  305. ctx->base.ctxr_dma, ring, request);
  306. if (unlikely(ret))
  307. return ret;
  308. *commands = 1;
  309. *results = 1;
  310. return 0;
  311. }
  312. static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
  313. {
  314. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  315. struct safexcel_crypto_priv *priv = ctx->priv;
  316. struct skcipher_request req;
  317. struct safexcel_inv_result result = {};
  318. int ring = ctx->base.ring;
  319. memset(&req, 0, sizeof(struct skcipher_request));
  320. /* create invalidation request */
  321. init_completion(&result.completion);
  322. skcipher_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  323. safexcel_inv_complete, &result);
  324. skcipher_request_set_tfm(&req, __crypto_skcipher_cast(tfm));
  325. ctx = crypto_tfm_ctx(req.base.tfm);
  326. ctx->base.exit_inv = true;
  327. ctx->base.send = safexcel_cipher_send_inv;
  328. spin_lock_bh(&priv->ring[ring].queue_lock);
  329. crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
  330. spin_unlock_bh(&priv->ring[ring].queue_lock);
  331. if (!priv->ring[ring].need_dequeue)
  332. safexcel_dequeue(priv, ring);
  333. wait_for_completion_interruptible(&result.completion);
  334. if (result.error) {
  335. dev_warn(priv->dev,
  336. "cipher: sync: invalidate: completion error %d\n",
  337. result.error);
  338. return result.error;
  339. }
  340. return 0;
  341. }
  342. static int safexcel_aes(struct skcipher_request *req,
  343. enum safexcel_cipher_direction dir, u32 mode)
  344. {
  345. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  346. struct safexcel_crypto_priv *priv = ctx->priv;
  347. int ret, ring;
  348. ctx->direction = dir;
  349. ctx->mode = mode;
  350. if (ctx->base.ctxr) {
  351. if (ctx->base.needs_inv)
  352. ctx->base.send = safexcel_cipher_send_inv;
  353. } else {
  354. ctx->base.ring = safexcel_select_ring(priv);
  355. ctx->base.send = safexcel_aes_send;
  356. ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
  357. EIP197_GFP_FLAGS(req->base),
  358. &ctx->base.ctxr_dma);
  359. if (!ctx->base.ctxr)
  360. return -ENOMEM;
  361. }
  362. ring = ctx->base.ring;
  363. spin_lock_bh(&priv->ring[ring].queue_lock);
  364. ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
  365. spin_unlock_bh(&priv->ring[ring].queue_lock);
  366. if (!priv->ring[ring].need_dequeue)
  367. safexcel_dequeue(priv, ring);
  368. return ret;
  369. }
  370. static int safexcel_ecb_aes_encrypt(struct skcipher_request *req)
  371. {
  372. return safexcel_aes(req, SAFEXCEL_ENCRYPT,
  373. CONTEXT_CONTROL_CRYPTO_MODE_ECB);
  374. }
  375. static int safexcel_ecb_aes_decrypt(struct skcipher_request *req)
  376. {
  377. return safexcel_aes(req, SAFEXCEL_DECRYPT,
  378. CONTEXT_CONTROL_CRYPTO_MODE_ECB);
  379. }
  380. static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
  381. {
  382. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  383. struct safexcel_alg_template *tmpl =
  384. container_of(tfm->__crt_alg, struct safexcel_alg_template,
  385. alg.skcipher.base);
  386. ctx->priv = tmpl->priv;
  387. return 0;
  388. }
  389. static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
  390. {
  391. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  392. struct safexcel_crypto_priv *priv = ctx->priv;
  393. int ret;
  394. memzero_explicit(ctx->key, 8 * sizeof(u32));
  395. /* context not allocated, skip invalidation */
  396. if (!ctx->base.ctxr)
  397. return;
  398. memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32));
  399. ret = safexcel_cipher_exit_inv(tfm);
  400. if (ret)
  401. dev_warn(priv->dev, "cipher: invalidation error %d\n", ret);
  402. }
  403. struct safexcel_alg_template safexcel_alg_ecb_aes = {
  404. .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
  405. .alg.skcipher = {
  406. .setkey = safexcel_aes_setkey,
  407. .encrypt = safexcel_ecb_aes_encrypt,
  408. .decrypt = safexcel_ecb_aes_decrypt,
  409. .min_keysize = AES_MIN_KEY_SIZE,
  410. .max_keysize = AES_MAX_KEY_SIZE,
  411. .base = {
  412. .cra_name = "ecb(aes)",
  413. .cra_driver_name = "safexcel-ecb-aes",
  414. .cra_priority = 300,
  415. .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
  416. CRYPTO_ALG_KERN_DRIVER_ONLY,
  417. .cra_blocksize = AES_BLOCK_SIZE,
  418. .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
  419. .cra_alignmask = 0,
  420. .cra_init = safexcel_skcipher_cra_init,
  421. .cra_exit = safexcel_skcipher_cra_exit,
  422. .cra_module = THIS_MODULE,
  423. },
  424. },
  425. };
  426. static int safexcel_cbc_aes_encrypt(struct skcipher_request *req)
  427. {
  428. return safexcel_aes(req, SAFEXCEL_ENCRYPT,
  429. CONTEXT_CONTROL_CRYPTO_MODE_CBC);
  430. }
  431. static int safexcel_cbc_aes_decrypt(struct skcipher_request *req)
  432. {
  433. return safexcel_aes(req, SAFEXCEL_DECRYPT,
  434. CONTEXT_CONTROL_CRYPTO_MODE_CBC);
  435. }
  436. struct safexcel_alg_template safexcel_alg_cbc_aes = {
  437. .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
  438. .alg.skcipher = {
  439. .setkey = safexcel_aes_setkey,
  440. .encrypt = safexcel_cbc_aes_encrypt,
  441. .decrypt = safexcel_cbc_aes_decrypt,
  442. .min_keysize = AES_MIN_KEY_SIZE,
  443. .max_keysize = AES_MAX_KEY_SIZE,
  444. .ivsize = AES_BLOCK_SIZE,
  445. .base = {
  446. .cra_name = "cbc(aes)",
  447. .cra_driver_name = "safexcel-cbc-aes",
  448. .cra_priority = 300,
  449. .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
  450. CRYPTO_ALG_KERN_DRIVER_ONLY,
  451. .cra_blocksize = AES_BLOCK_SIZE,
  452. .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
  453. .cra_alignmask = 0,
  454. .cra_init = safexcel_skcipher_cra_init,
  455. .cra_exit = safexcel_skcipher_cra_exit,
  456. .cra_module = THIS_MODULE,
  457. },
  458. },
  459. };