virtio_crypto_algs.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558
  1. /* Algorithms supported by virtio crypto device
  2. *
  3. * Authors: Gonglei <arei.gonglei@huawei.com>
  4. *
  5. * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #include <linux/scatterlist.h>
  21. #include <crypto/algapi.h>
  22. #include <linux/err.h>
  23. #include <crypto/scatterwalk.h>
  24. #include <linux/atomic.h>
  25. #include <uapi/linux/virtio_crypto.h>
  26. #include "virtio_crypto_common.h"
  27. /*
  28. * The algs_lock protects the below global virtio_crypto_active_devs
  29. * and crypto algorithms registion.
  30. */
  31. static DEFINE_MUTEX(algs_lock);
  32. static unsigned int virtio_crypto_active_devs;
  33. static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
  34. {
  35. u64 total = 0;
  36. for (total = 0; sg; sg = sg_next(sg))
  37. total += sg->length;
  38. return total;
  39. }
  40. static int
  41. virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
  42. {
  43. switch (key_len) {
  44. case AES_KEYSIZE_128:
  45. case AES_KEYSIZE_192:
  46. case AES_KEYSIZE_256:
  47. *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
  48. break;
  49. default:
  50. pr_err("virtio_crypto: Unsupported key length: %d\n",
  51. key_len);
  52. return -EINVAL;
  53. }
  54. return 0;
  55. }
  56. static int virtio_crypto_alg_ablkcipher_init_session(
  57. struct virtio_crypto_ablkcipher_ctx *ctx,
  58. uint32_t alg, const uint8_t *key,
  59. unsigned int keylen,
  60. int encrypt)
  61. {
  62. struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
  63. unsigned int tmp;
  64. struct virtio_crypto *vcrypto = ctx->vcrypto;
  65. int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
  66. int err;
  67. unsigned int num_out = 0, num_in = 0;
  68. /*
  69. * Avoid to do DMA from the stack, switch to using
  70. * dynamically-allocated for the key
  71. */
  72. uint8_t *cipher_key = kmalloc(keylen, GFP_ATOMIC);
  73. if (!cipher_key)
  74. return -ENOMEM;
  75. memcpy(cipher_key, key, keylen);
  76. spin_lock(&vcrypto->ctrl_lock);
  77. /* Pad ctrl header */
  78. vcrypto->ctrl.header.opcode =
  79. cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
  80. vcrypto->ctrl.header.algo = cpu_to_le32(alg);
  81. /* Set the default dataqueue id to 0 */
  82. vcrypto->ctrl.header.queue_id = 0;
  83. vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
  84. /* Pad cipher's parameters */
  85. vcrypto->ctrl.u.sym_create_session.op_type =
  86. cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
  87. vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
  88. vcrypto->ctrl.header.algo;
  89. vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
  90. cpu_to_le32(keylen);
  91. vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
  92. cpu_to_le32(op);
  93. sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
  94. sgs[num_out++] = &outhdr;
  95. /* Set key */
  96. sg_init_one(&key_sg, cipher_key, keylen);
  97. sgs[num_out++] = &key_sg;
  98. /* Return status and session id back */
  99. sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
  100. sgs[num_out + num_in++] = &inhdr;
  101. err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
  102. num_in, vcrypto, GFP_ATOMIC);
  103. if (err < 0) {
  104. spin_unlock(&vcrypto->ctrl_lock);
  105. kzfree(cipher_key);
  106. return err;
  107. }
  108. virtqueue_kick(vcrypto->ctrl_vq);
  109. /*
  110. * Trapping into the hypervisor, so the request should be
  111. * handled immediately.
  112. */
  113. while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
  114. !virtqueue_is_broken(vcrypto->ctrl_vq))
  115. cpu_relax();
  116. if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
  117. spin_unlock(&vcrypto->ctrl_lock);
  118. pr_err("virtio_crypto: Create session failed status: %u\n",
  119. le32_to_cpu(vcrypto->input.status));
  120. kzfree(cipher_key);
  121. return -EINVAL;
  122. }
  123. if (encrypt)
  124. ctx->enc_sess_info.session_id =
  125. le64_to_cpu(vcrypto->input.session_id);
  126. else
  127. ctx->dec_sess_info.session_id =
  128. le64_to_cpu(vcrypto->input.session_id);
  129. spin_unlock(&vcrypto->ctrl_lock);
  130. kzfree(cipher_key);
  131. return 0;
  132. }
  133. static int virtio_crypto_alg_ablkcipher_close_session(
  134. struct virtio_crypto_ablkcipher_ctx *ctx,
  135. int encrypt)
  136. {
  137. struct scatterlist outhdr, status_sg, *sgs[2];
  138. unsigned int tmp;
  139. struct virtio_crypto_destroy_session_req *destroy_session;
  140. struct virtio_crypto *vcrypto = ctx->vcrypto;
  141. int err;
  142. unsigned int num_out = 0, num_in = 0;
  143. spin_lock(&vcrypto->ctrl_lock);
  144. vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
  145. /* Pad ctrl header */
  146. vcrypto->ctrl.header.opcode =
  147. cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
  148. /* Set the default virtqueue id to 0 */
  149. vcrypto->ctrl.header.queue_id = 0;
  150. destroy_session = &vcrypto->ctrl.u.destroy_session;
  151. if (encrypt)
  152. destroy_session->session_id =
  153. cpu_to_le64(ctx->enc_sess_info.session_id);
  154. else
  155. destroy_session->session_id =
  156. cpu_to_le64(ctx->dec_sess_info.session_id);
  157. sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
  158. sgs[num_out++] = &outhdr;
  159. /* Return status and session id back */
  160. sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
  161. sizeof(vcrypto->ctrl_status.status));
  162. sgs[num_out + num_in++] = &status_sg;
  163. err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
  164. num_in, vcrypto, GFP_ATOMIC);
  165. if (err < 0) {
  166. spin_unlock(&vcrypto->ctrl_lock);
  167. return err;
  168. }
  169. virtqueue_kick(vcrypto->ctrl_vq);
  170. while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
  171. !virtqueue_is_broken(vcrypto->ctrl_vq))
  172. cpu_relax();
  173. if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
  174. spin_unlock(&vcrypto->ctrl_lock);
  175. pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
  176. vcrypto->ctrl_status.status,
  177. destroy_session->session_id);
  178. return -EINVAL;
  179. }
  180. spin_unlock(&vcrypto->ctrl_lock);
  181. return 0;
  182. }
  183. static int virtio_crypto_alg_ablkcipher_init_sessions(
  184. struct virtio_crypto_ablkcipher_ctx *ctx,
  185. const uint8_t *key, unsigned int keylen)
  186. {
  187. uint32_t alg;
  188. int ret;
  189. struct virtio_crypto *vcrypto = ctx->vcrypto;
  190. if (keylen > vcrypto->max_cipher_key_len) {
  191. pr_err("virtio_crypto: the key is too long\n");
  192. goto bad_key;
  193. }
  194. if (virtio_crypto_alg_validate_key(keylen, &alg))
  195. goto bad_key;
  196. /* Create encryption session */
  197. ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
  198. alg, key, keylen, 1);
  199. if (ret)
  200. return ret;
  201. /* Create decryption session */
  202. ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
  203. alg, key, keylen, 0);
  204. if (ret) {
  205. virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
  206. return ret;
  207. }
  208. return 0;
  209. bad_key:
  210. crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  211. return -EINVAL;
  212. }
  213. /* Note: kernel crypto API realization */
  214. static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
  215. const uint8_t *key,
  216. unsigned int keylen)
  217. {
  218. struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  219. int ret;
  220. if (!ctx->vcrypto) {
  221. /* New key */
  222. int node = virtio_crypto_get_current_node();
  223. struct virtio_crypto *vcrypto =
  224. virtcrypto_get_dev_node(node);
  225. if (!vcrypto) {
  226. pr_err("virtio_crypto: Could not find a virtio device in the system");
  227. return -ENODEV;
  228. }
  229. ctx->vcrypto = vcrypto;
  230. } else {
  231. /* Rekeying, we should close the created sessions previously */
  232. virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
  233. virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
  234. }
  235. ret = virtio_crypto_alg_ablkcipher_init_sessions(ctx, key, keylen);
  236. if (ret) {
  237. virtcrypto_dev_put(ctx->vcrypto);
  238. ctx->vcrypto = NULL;
  239. return ret;
  240. }
  241. return 0;
  242. }
  243. static int
  244. __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req,
  245. struct ablkcipher_request *req,
  246. struct data_queue *data_vq)
  247. {
  248. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  249. unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
  250. struct virtio_crypto_ablkcipher_ctx *ctx = vc_req->ablkcipher_ctx;
  251. struct virtio_crypto *vcrypto = ctx->vcrypto;
  252. struct virtio_crypto_op_data_req *req_data;
  253. int src_nents, dst_nents;
  254. int err;
  255. unsigned long flags;
  256. struct scatterlist outhdr, iv_sg, status_sg, **sgs;
  257. int i;
  258. u64 dst_len;
  259. unsigned int num_out = 0, num_in = 0;
  260. int sg_total;
  261. uint8_t *iv;
  262. src_nents = sg_nents_for_len(req->src, req->nbytes);
  263. dst_nents = sg_nents(req->dst);
  264. pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
  265. src_nents, dst_nents);
  266. /* Why 3? outhdr + iv + inhdr */
  267. sg_total = src_nents + dst_nents + 3;
  268. sgs = kzalloc_node(sg_total * sizeof(*sgs), GFP_ATOMIC,
  269. dev_to_node(&vcrypto->vdev->dev));
  270. if (!sgs)
  271. return -ENOMEM;
  272. req_data = kzalloc_node(sizeof(*req_data), GFP_ATOMIC,
  273. dev_to_node(&vcrypto->vdev->dev));
  274. if (!req_data) {
  275. kfree(sgs);
  276. return -ENOMEM;
  277. }
  278. vc_req->req_data = req_data;
  279. vc_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
  280. /* Head of operation */
  281. if (vc_req->encrypt) {
  282. req_data->header.session_id =
  283. cpu_to_le64(ctx->enc_sess_info.session_id);
  284. req_data->header.opcode =
  285. cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
  286. } else {
  287. req_data->header.session_id =
  288. cpu_to_le64(ctx->dec_sess_info.session_id);
  289. req_data->header.opcode =
  290. cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
  291. }
  292. req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
  293. req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
  294. req_data->u.sym_req.u.cipher.para.src_data_len =
  295. cpu_to_le32(req->nbytes);
  296. dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
  297. if (unlikely(dst_len > U32_MAX)) {
  298. pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
  299. err = -EINVAL;
  300. goto free;
  301. }
  302. pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
  303. req->nbytes, dst_len);
  304. if (unlikely(req->nbytes + dst_len + ivsize +
  305. sizeof(vc_req->status) > vcrypto->max_size)) {
  306. pr_err("virtio_crypto: The length is too big\n");
  307. err = -EINVAL;
  308. goto free;
  309. }
  310. req_data->u.sym_req.u.cipher.para.dst_data_len =
  311. cpu_to_le32((uint32_t)dst_len);
  312. /* Outhdr */
  313. sg_init_one(&outhdr, req_data, sizeof(*req_data));
  314. sgs[num_out++] = &outhdr;
  315. /* IV */
  316. /*
  317. * Avoid to do DMA from the stack, switch to using
  318. * dynamically-allocated for the IV
  319. */
  320. iv = kzalloc_node(ivsize, GFP_ATOMIC,
  321. dev_to_node(&vcrypto->vdev->dev));
  322. if (!iv) {
  323. err = -ENOMEM;
  324. goto free;
  325. }
  326. memcpy(iv, req->info, ivsize);
  327. sg_init_one(&iv_sg, iv, ivsize);
  328. sgs[num_out++] = &iv_sg;
  329. vc_req->iv = iv;
  330. /* Source data */
  331. for (i = 0; i < src_nents; i++)
  332. sgs[num_out++] = &req->src[i];
  333. /* Destination data */
  334. for (i = 0; i < dst_nents; i++)
  335. sgs[num_out + num_in++] = &req->dst[i];
  336. /* Status */
  337. sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
  338. sgs[num_out + num_in++] = &status_sg;
  339. vc_req->sgs = sgs;
  340. spin_lock_irqsave(&data_vq->lock, flags);
  341. err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
  342. num_in, vc_req, GFP_ATOMIC);
  343. virtqueue_kick(data_vq->vq);
  344. spin_unlock_irqrestore(&data_vq->lock, flags);
  345. if (unlikely(err < 0))
  346. goto free_iv;
  347. return 0;
  348. free_iv:
  349. kzfree(iv);
  350. free:
  351. kzfree(req_data);
  352. kfree(sgs);
  353. return err;
  354. }
  355. static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
  356. {
  357. struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
  358. struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
  359. struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req);
  360. struct virtio_crypto *vcrypto = ctx->vcrypto;
  361. /* Use the first data virtqueue as default */
  362. struct data_queue *data_vq = &vcrypto->data_vq[0];
  363. vc_req->ablkcipher_ctx = ctx;
  364. vc_req->ablkcipher_req = req;
  365. vc_req->encrypt = true;
  366. vc_req->dataq = data_vq;
  367. return crypto_transfer_cipher_request_to_engine(data_vq->engine, req);
  368. }
  369. static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
  370. {
  371. struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
  372. struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
  373. struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req);
  374. struct virtio_crypto *vcrypto = ctx->vcrypto;
  375. /* Use the first data virtqueue as default */
  376. struct data_queue *data_vq = &vcrypto->data_vq[0];
  377. vc_req->ablkcipher_ctx = ctx;
  378. vc_req->ablkcipher_req = req;
  379. vc_req->encrypt = false;
  380. vc_req->dataq = data_vq;
  381. return crypto_transfer_cipher_request_to_engine(data_vq->engine, req);
  382. }
  383. static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
  384. {
  385. struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
  386. tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_request);
  387. ctx->tfm = tfm;
  388. return 0;
  389. }
  390. static void virtio_crypto_ablkcipher_exit(struct crypto_tfm *tfm)
  391. {
  392. struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
  393. if (!ctx->vcrypto)
  394. return;
  395. virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
  396. virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
  397. virtcrypto_dev_put(ctx->vcrypto);
  398. ctx->vcrypto = NULL;
  399. }
  400. int virtio_crypto_ablkcipher_crypt_req(
  401. struct crypto_engine *engine,
  402. struct ablkcipher_request *req)
  403. {
  404. struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req);
  405. struct data_queue *data_vq = vc_req->dataq;
  406. int ret;
  407. ret = __virtio_crypto_ablkcipher_do_req(vc_req, req, data_vq);
  408. if (ret < 0)
  409. return ret;
  410. virtqueue_kick(data_vq->vq);
  411. return 0;
  412. }
  413. void virtio_crypto_ablkcipher_finalize_req(
  414. struct virtio_crypto_request *vc_req,
  415. struct ablkcipher_request *req,
  416. int err)
  417. {
  418. crypto_finalize_cipher_request(vc_req->dataq->engine, req, err);
  419. virtcrypto_clear_request(vc_req);
  420. }
  421. static struct crypto_alg virtio_crypto_algs[] = { {
  422. .cra_name = "cbc(aes)",
  423. .cra_driver_name = "virtio_crypto_aes_cbc",
  424. .cra_priority = 150,
  425. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  426. .cra_blocksize = AES_BLOCK_SIZE,
  427. .cra_ctxsize = sizeof(struct virtio_crypto_ablkcipher_ctx),
  428. .cra_alignmask = 0,
  429. .cra_module = THIS_MODULE,
  430. .cra_type = &crypto_ablkcipher_type,
  431. .cra_init = virtio_crypto_ablkcipher_init,
  432. .cra_exit = virtio_crypto_ablkcipher_exit,
  433. .cra_u = {
  434. .ablkcipher = {
  435. .setkey = virtio_crypto_ablkcipher_setkey,
  436. .decrypt = virtio_crypto_ablkcipher_decrypt,
  437. .encrypt = virtio_crypto_ablkcipher_encrypt,
  438. .min_keysize = AES_MIN_KEY_SIZE,
  439. .max_keysize = AES_MAX_KEY_SIZE,
  440. .ivsize = AES_BLOCK_SIZE,
  441. },
  442. },
  443. } };
  444. int virtio_crypto_algs_register(void)
  445. {
  446. int ret = 0;
  447. mutex_lock(&algs_lock);
  448. if (++virtio_crypto_active_devs != 1)
  449. goto unlock;
  450. ret = crypto_register_algs(virtio_crypto_algs,
  451. ARRAY_SIZE(virtio_crypto_algs));
  452. if (ret)
  453. virtio_crypto_active_devs--;
  454. unlock:
  455. mutex_unlock(&algs_lock);
  456. return ret;
  457. }
  458. void virtio_crypto_algs_unregister(void)
  459. {
  460. mutex_lock(&algs_lock);
  461. if (--virtio_crypto_active_devs != 0)
  462. goto unlock;
  463. crypto_unregister_algs(virtio_crypto_algs,
  464. ARRAY_SIZE(virtio_crypto_algs));
  465. unlock:
  466. mutex_unlock(&algs_lock);
  467. }