virtio_crypto_algs.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667
  1. /* Algorithms supported by virtio crypto device
  2. *
  3. * Authors: Gonglei <arei.gonglei@huawei.com>
  4. *
  5. * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #include <linux/scatterlist.h>
  21. #include <crypto/algapi.h>
  22. #include <linux/err.h>
  23. #include <crypto/scatterwalk.h>
  24. #include <linux/atomic.h>
  25. #include <uapi/linux/virtio_crypto.h>
  26. #include "virtio_crypto_common.h"
  27. struct virtio_crypto_ablkcipher_ctx {
  28. struct crypto_engine_ctx enginectx;
  29. struct virtio_crypto *vcrypto;
  30. struct crypto_tfm *tfm;
  31. struct virtio_crypto_sym_session_info enc_sess_info;
  32. struct virtio_crypto_sym_session_info dec_sess_info;
  33. };
  34. struct virtio_crypto_sym_request {
  35. struct virtio_crypto_request base;
  36. /* Cipher or aead */
  37. uint32_t type;
  38. struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
  39. struct ablkcipher_request *ablkcipher_req;
  40. uint8_t *iv;
  41. /* Encryption? */
  42. bool encrypt;
  43. };
  44. struct virtio_crypto_algo {
  45. uint32_t algonum;
  46. uint32_t service;
  47. unsigned int active_devs;
  48. struct crypto_alg algo;
  49. };
  50. /*
  51. * The algs_lock protects the below global virtio_crypto_active_devs
  52. * and crypto algorithms registion.
  53. */
  54. static DEFINE_MUTEX(algs_lock);
  55. static void virtio_crypto_ablkcipher_finalize_req(
  56. struct virtio_crypto_sym_request *vc_sym_req,
  57. struct ablkcipher_request *req,
  58. int err);
  59. static void virtio_crypto_dataq_sym_callback
  60. (struct virtio_crypto_request *vc_req, int len)
  61. {
  62. struct virtio_crypto_sym_request *vc_sym_req =
  63. container_of(vc_req, struct virtio_crypto_sym_request, base);
  64. struct ablkcipher_request *ablk_req;
  65. int error;
  66. /* Finish the encrypt or decrypt process */
  67. if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
  68. switch (vc_req->status) {
  69. case VIRTIO_CRYPTO_OK:
  70. error = 0;
  71. break;
  72. case VIRTIO_CRYPTO_INVSESS:
  73. case VIRTIO_CRYPTO_ERR:
  74. error = -EINVAL;
  75. break;
  76. case VIRTIO_CRYPTO_BADMSG:
  77. error = -EBADMSG;
  78. break;
  79. default:
  80. error = -EIO;
  81. break;
  82. }
  83. ablk_req = vc_sym_req->ablkcipher_req;
  84. virtio_crypto_ablkcipher_finalize_req(vc_sym_req,
  85. ablk_req, error);
  86. }
  87. }
  88. static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
  89. {
  90. u64 total = 0;
  91. for (total = 0; sg; sg = sg_next(sg))
  92. total += sg->length;
  93. return total;
  94. }
  95. static int
  96. virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
  97. {
  98. switch (key_len) {
  99. case AES_KEYSIZE_128:
  100. case AES_KEYSIZE_192:
  101. case AES_KEYSIZE_256:
  102. *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
  103. break;
  104. default:
  105. pr_err("virtio_crypto: Unsupported key length: %d\n",
  106. key_len);
  107. return -EINVAL;
  108. }
  109. return 0;
  110. }
  111. static int virtio_crypto_alg_ablkcipher_init_session(
  112. struct virtio_crypto_ablkcipher_ctx *ctx,
  113. uint32_t alg, const uint8_t *key,
  114. unsigned int keylen,
  115. int encrypt)
  116. {
  117. struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
  118. unsigned int tmp;
  119. struct virtio_crypto *vcrypto = ctx->vcrypto;
  120. int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
  121. int err;
  122. unsigned int num_out = 0, num_in = 0;
  123. /*
  124. * Avoid to do DMA from the stack, switch to using
  125. * dynamically-allocated for the key
  126. */
  127. uint8_t *cipher_key = kmalloc(keylen, GFP_ATOMIC);
  128. if (!cipher_key)
  129. return -ENOMEM;
  130. memcpy(cipher_key, key, keylen);
  131. spin_lock(&vcrypto->ctrl_lock);
  132. /* Pad ctrl header */
  133. vcrypto->ctrl.header.opcode =
  134. cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
  135. vcrypto->ctrl.header.algo = cpu_to_le32(alg);
  136. /* Set the default dataqueue id to 0 */
  137. vcrypto->ctrl.header.queue_id = 0;
  138. vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
  139. /* Pad cipher's parameters */
  140. vcrypto->ctrl.u.sym_create_session.op_type =
  141. cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
  142. vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
  143. vcrypto->ctrl.header.algo;
  144. vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
  145. cpu_to_le32(keylen);
  146. vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
  147. cpu_to_le32(op);
  148. sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
  149. sgs[num_out++] = &outhdr;
  150. /* Set key */
  151. sg_init_one(&key_sg, cipher_key, keylen);
  152. sgs[num_out++] = &key_sg;
  153. /* Return status and session id back */
  154. sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
  155. sgs[num_out + num_in++] = &inhdr;
  156. err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
  157. num_in, vcrypto, GFP_ATOMIC);
  158. if (err < 0) {
  159. spin_unlock(&vcrypto->ctrl_lock);
  160. kzfree(cipher_key);
  161. return err;
  162. }
  163. virtqueue_kick(vcrypto->ctrl_vq);
  164. /*
  165. * Trapping into the hypervisor, so the request should be
  166. * handled immediately.
  167. */
  168. while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
  169. !virtqueue_is_broken(vcrypto->ctrl_vq))
  170. cpu_relax();
  171. if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
  172. spin_unlock(&vcrypto->ctrl_lock);
  173. pr_err("virtio_crypto: Create session failed status: %u\n",
  174. le32_to_cpu(vcrypto->input.status));
  175. kzfree(cipher_key);
  176. return -EINVAL;
  177. }
  178. if (encrypt)
  179. ctx->enc_sess_info.session_id =
  180. le64_to_cpu(vcrypto->input.session_id);
  181. else
  182. ctx->dec_sess_info.session_id =
  183. le64_to_cpu(vcrypto->input.session_id);
  184. spin_unlock(&vcrypto->ctrl_lock);
  185. kzfree(cipher_key);
  186. return 0;
  187. }
  188. static int virtio_crypto_alg_ablkcipher_close_session(
  189. struct virtio_crypto_ablkcipher_ctx *ctx,
  190. int encrypt)
  191. {
  192. struct scatterlist outhdr, status_sg, *sgs[2];
  193. unsigned int tmp;
  194. struct virtio_crypto_destroy_session_req *destroy_session;
  195. struct virtio_crypto *vcrypto = ctx->vcrypto;
  196. int err;
  197. unsigned int num_out = 0, num_in = 0;
  198. spin_lock(&vcrypto->ctrl_lock);
  199. vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
  200. /* Pad ctrl header */
  201. vcrypto->ctrl.header.opcode =
  202. cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
  203. /* Set the default virtqueue id to 0 */
  204. vcrypto->ctrl.header.queue_id = 0;
  205. destroy_session = &vcrypto->ctrl.u.destroy_session;
  206. if (encrypt)
  207. destroy_session->session_id =
  208. cpu_to_le64(ctx->enc_sess_info.session_id);
  209. else
  210. destroy_session->session_id =
  211. cpu_to_le64(ctx->dec_sess_info.session_id);
  212. sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
  213. sgs[num_out++] = &outhdr;
  214. /* Return status and session id back */
  215. sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
  216. sizeof(vcrypto->ctrl_status.status));
  217. sgs[num_out + num_in++] = &status_sg;
  218. err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
  219. num_in, vcrypto, GFP_ATOMIC);
  220. if (err < 0) {
  221. spin_unlock(&vcrypto->ctrl_lock);
  222. return err;
  223. }
  224. virtqueue_kick(vcrypto->ctrl_vq);
  225. while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
  226. !virtqueue_is_broken(vcrypto->ctrl_vq))
  227. cpu_relax();
  228. if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
  229. spin_unlock(&vcrypto->ctrl_lock);
  230. pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
  231. vcrypto->ctrl_status.status,
  232. destroy_session->session_id);
  233. return -EINVAL;
  234. }
  235. spin_unlock(&vcrypto->ctrl_lock);
  236. return 0;
  237. }
  238. static int virtio_crypto_alg_ablkcipher_init_sessions(
  239. struct virtio_crypto_ablkcipher_ctx *ctx,
  240. const uint8_t *key, unsigned int keylen)
  241. {
  242. uint32_t alg;
  243. int ret;
  244. struct virtio_crypto *vcrypto = ctx->vcrypto;
  245. if (keylen > vcrypto->max_cipher_key_len) {
  246. pr_err("virtio_crypto: the key is too long\n");
  247. goto bad_key;
  248. }
  249. if (virtio_crypto_alg_validate_key(keylen, &alg))
  250. goto bad_key;
  251. /* Create encryption session */
  252. ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
  253. alg, key, keylen, 1);
  254. if (ret)
  255. return ret;
  256. /* Create decryption session */
  257. ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
  258. alg, key, keylen, 0);
  259. if (ret) {
  260. virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
  261. return ret;
  262. }
  263. return 0;
  264. bad_key:
  265. crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  266. return -EINVAL;
  267. }
  268. /* Note: kernel crypto API realization */
  269. static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
  270. const uint8_t *key,
  271. unsigned int keylen)
  272. {
  273. struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  274. uint32_t alg;
  275. int ret;
  276. ret = virtio_crypto_alg_validate_key(keylen, &alg);
  277. if (ret)
  278. return ret;
  279. if (!ctx->vcrypto) {
  280. /* New key */
  281. int node = virtio_crypto_get_current_node();
  282. struct virtio_crypto *vcrypto =
  283. virtcrypto_get_dev_node(node,
  284. VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
  285. if (!vcrypto) {
  286. pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
  287. return -ENODEV;
  288. }
  289. ctx->vcrypto = vcrypto;
  290. } else {
  291. /* Rekeying, we should close the created sessions previously */
  292. virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
  293. virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
  294. }
  295. ret = virtio_crypto_alg_ablkcipher_init_sessions(ctx, key, keylen);
  296. if (ret) {
  297. virtcrypto_dev_put(ctx->vcrypto);
  298. ctx->vcrypto = NULL;
  299. return ret;
  300. }
  301. return 0;
  302. }
  303. static int
  304. __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
  305. struct ablkcipher_request *req,
  306. struct data_queue *data_vq)
  307. {
  308. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  309. struct virtio_crypto_ablkcipher_ctx *ctx = vc_sym_req->ablkcipher_ctx;
  310. struct virtio_crypto_request *vc_req = &vc_sym_req->base;
  311. unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
  312. struct virtio_crypto *vcrypto = ctx->vcrypto;
  313. struct virtio_crypto_op_data_req *req_data;
  314. int src_nents, dst_nents;
  315. int err;
  316. unsigned long flags;
  317. struct scatterlist outhdr, iv_sg, status_sg, **sgs;
  318. int i;
  319. u64 dst_len;
  320. unsigned int num_out = 0, num_in = 0;
  321. int sg_total;
  322. uint8_t *iv;
  323. src_nents = sg_nents_for_len(req->src, req->nbytes);
  324. dst_nents = sg_nents(req->dst);
  325. pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
  326. src_nents, dst_nents);
  327. /* Why 3? outhdr + iv + inhdr */
  328. sg_total = src_nents + dst_nents + 3;
  329. sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
  330. dev_to_node(&vcrypto->vdev->dev));
  331. if (!sgs)
  332. return -ENOMEM;
  333. req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
  334. dev_to_node(&vcrypto->vdev->dev));
  335. if (!req_data) {
  336. kfree(sgs);
  337. return -ENOMEM;
  338. }
  339. vc_req->req_data = req_data;
  340. vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
  341. /* Head of operation */
  342. if (vc_sym_req->encrypt) {
  343. req_data->header.session_id =
  344. cpu_to_le64(ctx->enc_sess_info.session_id);
  345. req_data->header.opcode =
  346. cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
  347. } else {
  348. req_data->header.session_id =
  349. cpu_to_le64(ctx->dec_sess_info.session_id);
  350. req_data->header.opcode =
  351. cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
  352. }
  353. req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
  354. req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
  355. req_data->u.sym_req.u.cipher.para.src_data_len =
  356. cpu_to_le32(req->nbytes);
  357. dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
  358. if (unlikely(dst_len > U32_MAX)) {
  359. pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
  360. err = -EINVAL;
  361. goto free;
  362. }
  363. pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
  364. req->nbytes, dst_len);
  365. if (unlikely(req->nbytes + dst_len + ivsize +
  366. sizeof(vc_req->status) > vcrypto->max_size)) {
  367. pr_err("virtio_crypto: The length is too big\n");
  368. err = -EINVAL;
  369. goto free;
  370. }
  371. req_data->u.sym_req.u.cipher.para.dst_data_len =
  372. cpu_to_le32((uint32_t)dst_len);
  373. /* Outhdr */
  374. sg_init_one(&outhdr, req_data, sizeof(*req_data));
  375. sgs[num_out++] = &outhdr;
  376. /* IV */
  377. /*
  378. * Avoid to do DMA from the stack, switch to using
  379. * dynamically-allocated for the IV
  380. */
  381. iv = kzalloc_node(ivsize, GFP_ATOMIC,
  382. dev_to_node(&vcrypto->vdev->dev));
  383. if (!iv) {
  384. err = -ENOMEM;
  385. goto free;
  386. }
  387. memcpy(iv, req->info, ivsize);
  388. sg_init_one(&iv_sg, iv, ivsize);
  389. sgs[num_out++] = &iv_sg;
  390. vc_sym_req->iv = iv;
  391. /* Source data */
  392. for (i = 0; i < src_nents; i++)
  393. sgs[num_out++] = &req->src[i];
  394. /* Destination data */
  395. for (i = 0; i < dst_nents; i++)
  396. sgs[num_out + num_in++] = &req->dst[i];
  397. /* Status */
  398. sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
  399. sgs[num_out + num_in++] = &status_sg;
  400. vc_req->sgs = sgs;
  401. spin_lock_irqsave(&data_vq->lock, flags);
  402. err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
  403. num_in, vc_req, GFP_ATOMIC);
  404. virtqueue_kick(data_vq->vq);
  405. spin_unlock_irqrestore(&data_vq->lock, flags);
  406. if (unlikely(err < 0))
  407. goto free_iv;
  408. return 0;
  409. free_iv:
  410. kzfree(iv);
  411. free:
  412. kzfree(req_data);
  413. kfree(sgs);
  414. return err;
  415. }
  416. static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
  417. {
  418. struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
  419. struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
  420. struct virtio_crypto_sym_request *vc_sym_req =
  421. ablkcipher_request_ctx(req);
  422. struct virtio_crypto_request *vc_req = &vc_sym_req->base;
  423. struct virtio_crypto *vcrypto = ctx->vcrypto;
  424. /* Use the first data virtqueue as default */
  425. struct data_queue *data_vq = &vcrypto->data_vq[0];
  426. vc_req->dataq = data_vq;
  427. vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
  428. vc_sym_req->ablkcipher_ctx = ctx;
  429. vc_sym_req->ablkcipher_req = req;
  430. vc_sym_req->encrypt = true;
  431. return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
  432. }
  433. static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
  434. {
  435. struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
  436. struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
  437. struct virtio_crypto_sym_request *vc_sym_req =
  438. ablkcipher_request_ctx(req);
  439. struct virtio_crypto_request *vc_req = &vc_sym_req->base;
  440. struct virtio_crypto *vcrypto = ctx->vcrypto;
  441. /* Use the first data virtqueue as default */
  442. struct data_queue *data_vq = &vcrypto->data_vq[0];
  443. vc_req->dataq = data_vq;
  444. vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
  445. vc_sym_req->ablkcipher_ctx = ctx;
  446. vc_sym_req->ablkcipher_req = req;
  447. vc_sym_req->encrypt = false;
  448. return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
  449. }
  450. static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
  451. {
  452. struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
  453. tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_sym_request);
  454. ctx->tfm = tfm;
  455. ctx->enginectx.op.do_one_request = virtio_crypto_ablkcipher_crypt_req;
  456. ctx->enginectx.op.prepare_request = NULL;
  457. ctx->enginectx.op.unprepare_request = NULL;
  458. return 0;
  459. }
  460. static void virtio_crypto_ablkcipher_exit(struct crypto_tfm *tfm)
  461. {
  462. struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
  463. if (!ctx->vcrypto)
  464. return;
  465. virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
  466. virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
  467. virtcrypto_dev_put(ctx->vcrypto);
  468. ctx->vcrypto = NULL;
  469. }
  470. int virtio_crypto_ablkcipher_crypt_req(
  471. struct crypto_engine *engine, void *vreq)
  472. {
  473. struct ablkcipher_request *req = container_of(vreq, struct ablkcipher_request, base);
  474. struct virtio_crypto_sym_request *vc_sym_req =
  475. ablkcipher_request_ctx(req);
  476. struct virtio_crypto_request *vc_req = &vc_sym_req->base;
  477. struct data_queue *data_vq = vc_req->dataq;
  478. int ret;
  479. ret = __virtio_crypto_ablkcipher_do_req(vc_sym_req, req, data_vq);
  480. if (ret < 0)
  481. return ret;
  482. virtqueue_kick(data_vq->vq);
  483. return 0;
  484. }
  485. static void virtio_crypto_ablkcipher_finalize_req(
  486. struct virtio_crypto_sym_request *vc_sym_req,
  487. struct ablkcipher_request *req,
  488. int err)
  489. {
  490. crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine,
  491. req, err);
  492. kzfree(vc_sym_req->iv);
  493. virtcrypto_clear_request(&vc_sym_req->base);
  494. }
  495. static struct virtio_crypto_algo virtio_crypto_algs[] = { {
  496. .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
  497. .service = VIRTIO_CRYPTO_SERVICE_CIPHER,
  498. .algo = {
  499. .cra_name = "cbc(aes)",
  500. .cra_driver_name = "virtio_crypto_aes_cbc",
  501. .cra_priority = 150,
  502. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  503. .cra_blocksize = AES_BLOCK_SIZE,
  504. .cra_ctxsize = sizeof(struct virtio_crypto_ablkcipher_ctx),
  505. .cra_alignmask = 0,
  506. .cra_module = THIS_MODULE,
  507. .cra_type = &crypto_ablkcipher_type,
  508. .cra_init = virtio_crypto_ablkcipher_init,
  509. .cra_exit = virtio_crypto_ablkcipher_exit,
  510. .cra_u = {
  511. .ablkcipher = {
  512. .setkey = virtio_crypto_ablkcipher_setkey,
  513. .decrypt = virtio_crypto_ablkcipher_decrypt,
  514. .encrypt = virtio_crypto_ablkcipher_encrypt,
  515. .min_keysize = AES_MIN_KEY_SIZE,
  516. .max_keysize = AES_MAX_KEY_SIZE,
  517. .ivsize = AES_BLOCK_SIZE,
  518. },
  519. },
  520. },
  521. } };
  522. int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
  523. {
  524. int ret = 0;
  525. int i = 0;
  526. mutex_lock(&algs_lock);
  527. for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
  528. uint32_t service = virtio_crypto_algs[i].service;
  529. uint32_t algonum = virtio_crypto_algs[i].algonum;
  530. if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
  531. continue;
  532. if (virtio_crypto_algs[i].active_devs == 0) {
  533. ret = crypto_register_alg(&virtio_crypto_algs[i].algo);
  534. if (ret)
  535. goto unlock;
  536. }
  537. virtio_crypto_algs[i].active_devs++;
  538. dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
  539. virtio_crypto_algs[i].algo.cra_name);
  540. }
  541. unlock:
  542. mutex_unlock(&algs_lock);
  543. return ret;
  544. }
  545. void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto)
  546. {
  547. int i = 0;
  548. mutex_lock(&algs_lock);
  549. for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
  550. uint32_t service = virtio_crypto_algs[i].service;
  551. uint32_t algonum = virtio_crypto_algs[i].algonum;
  552. if (virtio_crypto_algs[i].active_devs == 0 ||
  553. !virtcrypto_algo_is_supported(vcrypto, service, algonum))
  554. continue;
  555. if (virtio_crypto_algs[i].active_devs == 1)
  556. crypto_unregister_alg(&virtio_crypto_algs[i].algo);
  557. virtio_crypto_algs[i].active_devs--;
  558. }
  559. mutex_unlock(&algs_lock);
  560. }