rsa-pkcs1pad.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628
  1. /*
  2. * RSA padding templates.
  3. *
  4. * Copyright (c) 2015 Intel Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the Free
  8. * Software Foundation; either version 2 of the License, or (at your option)
  9. * any later version.
  10. */
  11. #include <crypto/algapi.h>
  12. #include <crypto/akcipher.h>
  13. #include <crypto/internal/akcipher.h>
  14. #include <linux/err.h>
  15. #include <linux/init.h>
  16. #include <linux/kernel.h>
  17. #include <linux/module.h>
  18. #include <linux/random.h>
  19. struct pkcs1pad_ctx {
  20. struct crypto_akcipher *child;
  21. unsigned int key_size;
  22. };
  23. struct pkcs1pad_request {
  24. struct akcipher_request child_req;
  25. struct scatterlist in_sg[3], out_sg[2];
  26. uint8_t *in_buf, *out_buf;
  27. };
  28. static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
  29. unsigned int keylen)
  30. {
  31. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  32. int err, size;
  33. err = crypto_akcipher_set_pub_key(ctx->child, key, keylen);
  34. if (!err) {
  35. /* Find out new modulus size from rsa implementation */
  36. size = crypto_akcipher_maxsize(ctx->child);
  37. ctx->key_size = size > 0 ? size : 0;
  38. if (size <= 0)
  39. err = size;
  40. }
  41. return err;
  42. }
  43. static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key,
  44. unsigned int keylen)
  45. {
  46. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  47. int err, size;
  48. err = crypto_akcipher_set_priv_key(ctx->child, key, keylen);
  49. if (!err) {
  50. /* Find out new modulus size from rsa implementation */
  51. size = crypto_akcipher_maxsize(ctx->child);
  52. ctx->key_size = size > 0 ? size : 0;
  53. if (size <= 0)
  54. err = size;
  55. }
  56. return err;
  57. }
  58. static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
  59. {
  60. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  61. /*
  62. * The maximum destination buffer size for the encrypt/sign operations
  63. * will be the same as for RSA, even though it's smaller for
  64. * decrypt/verify.
  65. */
  66. return ctx->key_size ?: -EINVAL;
  67. }
  68. static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len,
  69. struct scatterlist *next)
  70. {
  71. int nsegs = next ? 1 : 0;
  72. if (offset_in_page(buf) + len <= PAGE_SIZE) {
  73. nsegs += 1;
  74. sg_init_table(sg, nsegs);
  75. sg_set_buf(sg, buf, len);
  76. } else {
  77. nsegs += 2;
  78. sg_init_table(sg, nsegs);
  79. sg_set_buf(sg + 0, buf, PAGE_SIZE - offset_in_page(buf));
  80. sg_set_buf(sg + 1, buf + PAGE_SIZE - offset_in_page(buf),
  81. offset_in_page(buf) + len - PAGE_SIZE);
  82. }
  83. if (next)
  84. sg_chain(sg, nsegs, next);
  85. }
  86. static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
  87. {
  88. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  89. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  90. struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
  91. size_t pad_len = ctx->key_size - req_ctx->child_req.dst_len;
  92. size_t chunk_len, pad_left;
  93. struct sg_mapping_iter miter;
  94. if (!err) {
  95. if (pad_len) {
  96. sg_miter_start(&miter, req->dst,
  97. sg_nents_for_len(req->dst, pad_len),
  98. SG_MITER_ATOMIC | SG_MITER_TO_SG);
  99. pad_left = pad_len;
  100. while (pad_left) {
  101. sg_miter_next(&miter);
  102. chunk_len = min(miter.length, pad_left);
  103. memset(miter.addr, 0, chunk_len);
  104. pad_left -= chunk_len;
  105. }
  106. sg_miter_stop(&miter);
  107. }
  108. sg_pcopy_from_buffer(req->dst,
  109. sg_nents_for_len(req->dst, ctx->key_size),
  110. req_ctx->out_buf, req_ctx->child_req.dst_len,
  111. pad_len);
  112. }
  113. req->dst_len = ctx->key_size;
  114. kfree(req_ctx->in_buf);
  115. kzfree(req_ctx->out_buf);
  116. return err;
  117. }
  118. static void pkcs1pad_encrypt_sign_complete_cb(
  119. struct crypto_async_request *child_async_req, int err)
  120. {
  121. struct akcipher_request *req = child_async_req->data;
  122. struct crypto_async_request async_req;
  123. if (err == -EINPROGRESS)
  124. return;
  125. async_req.data = req->base.data;
  126. async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
  127. async_req.flags = child_async_req->flags;
  128. req->base.complete(&async_req,
  129. pkcs1pad_encrypt_sign_complete(req, err));
  130. }
  131. static int pkcs1pad_encrypt(struct akcipher_request *req)
  132. {
  133. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  134. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  135. struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
  136. int err;
  137. unsigned int i, ps_end;
  138. if (!ctx->key_size)
  139. return -EINVAL;
  140. if (req->src_len > ctx->key_size - 11)
  141. return -EOVERFLOW;
  142. if (req->dst_len < ctx->key_size) {
  143. req->dst_len = ctx->key_size;
  144. return -EOVERFLOW;
  145. }
  146. if (ctx->key_size > PAGE_SIZE)
  147. return -ENOTSUPP;
  148. /*
  149. * Replace both input and output to add the padding in the input and
  150. * the potential missing leading zeros in the output.
  151. */
  152. req_ctx->child_req.src = req_ctx->in_sg;
  153. req_ctx->child_req.src_len = ctx->key_size - 1;
  154. req_ctx->child_req.dst = req_ctx->out_sg;
  155. req_ctx->child_req.dst_len = ctx->key_size;
  156. req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
  157. (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  158. GFP_KERNEL : GFP_ATOMIC);
  159. if (!req_ctx->in_buf)
  160. return -ENOMEM;
  161. ps_end = ctx->key_size - req->src_len - 2;
  162. req_ctx->in_buf[0] = 0x02;
  163. for (i = 1; i < ps_end; i++)
  164. req_ctx->in_buf[i] = 1 + prandom_u32_max(255);
  165. req_ctx->in_buf[ps_end] = 0x00;
  166. pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
  167. ctx->key_size - 1 - req->src_len, req->src);
  168. req_ctx->out_buf = kmalloc(ctx->key_size,
  169. (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  170. GFP_KERNEL : GFP_ATOMIC);
  171. if (!req_ctx->out_buf) {
  172. kfree(req_ctx->in_buf);
  173. return -ENOMEM;
  174. }
  175. pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
  176. ctx->key_size, NULL);
  177. akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
  178. akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
  179. pkcs1pad_encrypt_sign_complete_cb, req);
  180. err = crypto_akcipher_encrypt(&req_ctx->child_req);
  181. if (err != -EINPROGRESS &&
  182. (err != -EBUSY ||
  183. !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
  184. return pkcs1pad_encrypt_sign_complete(req, err);
  185. return err;
  186. }
  187. static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
  188. {
  189. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  190. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  191. struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
  192. unsigned int pos;
  193. if (err == -EOVERFLOW)
  194. /* Decrypted value had no leading 0 byte */
  195. err = -EINVAL;
  196. if (err)
  197. goto done;
  198. if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
  199. err = -EINVAL;
  200. goto done;
  201. }
  202. if (req_ctx->out_buf[0] != 0x02) {
  203. err = -EINVAL;
  204. goto done;
  205. }
  206. for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
  207. if (req_ctx->out_buf[pos] == 0x00)
  208. break;
  209. if (pos < 9 || pos == req_ctx->child_req.dst_len) {
  210. err = -EINVAL;
  211. goto done;
  212. }
  213. pos++;
  214. if (req->dst_len < req_ctx->child_req.dst_len - pos)
  215. err = -EOVERFLOW;
  216. req->dst_len = req_ctx->child_req.dst_len - pos;
  217. if (!err)
  218. sg_copy_from_buffer(req->dst,
  219. sg_nents_for_len(req->dst, req->dst_len),
  220. req_ctx->out_buf + pos, req->dst_len);
  221. done:
  222. kzfree(req_ctx->out_buf);
  223. return err;
  224. }
  225. static void pkcs1pad_decrypt_complete_cb(
  226. struct crypto_async_request *child_async_req, int err)
  227. {
  228. struct akcipher_request *req = child_async_req->data;
  229. struct crypto_async_request async_req;
  230. if (err == -EINPROGRESS)
  231. return;
  232. async_req.data = req->base.data;
  233. async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
  234. async_req.flags = child_async_req->flags;
  235. req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err));
  236. }
  237. static int pkcs1pad_decrypt(struct akcipher_request *req)
  238. {
  239. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  240. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  241. struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
  242. int err;
  243. if (!ctx->key_size || req->src_len != ctx->key_size)
  244. return -EINVAL;
  245. if (ctx->key_size > PAGE_SIZE)
  246. return -ENOTSUPP;
  247. /* Reuse input buffer, output to a new buffer */
  248. req_ctx->child_req.src = req->src;
  249. req_ctx->child_req.src_len = req->src_len;
  250. req_ctx->child_req.dst = req_ctx->out_sg;
  251. req_ctx->child_req.dst_len = ctx->key_size - 1;
  252. req_ctx->out_buf = kmalloc(ctx->key_size - 1,
  253. (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  254. GFP_KERNEL : GFP_ATOMIC);
  255. if (!req_ctx->out_buf)
  256. return -ENOMEM;
  257. pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
  258. ctx->key_size - 1, NULL);
  259. akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
  260. akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
  261. pkcs1pad_decrypt_complete_cb, req);
  262. err = crypto_akcipher_decrypt(&req_ctx->child_req);
  263. if (err != -EINPROGRESS &&
  264. (err != -EBUSY ||
  265. !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
  266. return pkcs1pad_decrypt_complete(req, err);
  267. return err;
  268. }
  269. static int pkcs1pad_sign(struct akcipher_request *req)
  270. {
  271. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  272. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  273. struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
  274. int err;
  275. unsigned int ps_end;
  276. if (!ctx->key_size)
  277. return -EINVAL;
  278. if (req->src_len > ctx->key_size - 11)
  279. return -EOVERFLOW;
  280. if (req->dst_len < ctx->key_size) {
  281. req->dst_len = ctx->key_size;
  282. return -EOVERFLOW;
  283. }
  284. if (ctx->key_size > PAGE_SIZE)
  285. return -ENOTSUPP;
  286. /*
  287. * Replace both input and output to add the padding in the input and
  288. * the potential missing leading zeros in the output.
  289. */
  290. req_ctx->child_req.src = req_ctx->in_sg;
  291. req_ctx->child_req.src_len = ctx->key_size - 1;
  292. req_ctx->child_req.dst = req_ctx->out_sg;
  293. req_ctx->child_req.dst_len = ctx->key_size;
  294. req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
  295. (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  296. GFP_KERNEL : GFP_ATOMIC);
  297. if (!req_ctx->in_buf)
  298. return -ENOMEM;
  299. ps_end = ctx->key_size - req->src_len - 2;
  300. req_ctx->in_buf[0] = 0x01;
  301. memset(req_ctx->in_buf + 1, 0xff, ps_end - 1);
  302. req_ctx->in_buf[ps_end] = 0x00;
  303. pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
  304. ctx->key_size - 1 - req->src_len, req->src);
  305. req_ctx->out_buf = kmalloc(ctx->key_size,
  306. (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  307. GFP_KERNEL : GFP_ATOMIC);
  308. if (!req_ctx->out_buf) {
  309. kfree(req_ctx->in_buf);
  310. return -ENOMEM;
  311. }
  312. pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
  313. ctx->key_size, NULL);
  314. akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
  315. akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
  316. pkcs1pad_encrypt_sign_complete_cb, req);
  317. err = crypto_akcipher_sign(&req_ctx->child_req);
  318. if (err != -EINPROGRESS &&
  319. (err != -EBUSY ||
  320. !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
  321. return pkcs1pad_encrypt_sign_complete(req, err);
  322. return err;
  323. }
  324. static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
  325. {
  326. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  327. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  328. struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
  329. unsigned int pos;
  330. if (err == -EOVERFLOW)
  331. /* Decrypted value had no leading 0 byte */
  332. err = -EINVAL;
  333. if (err)
  334. goto done;
  335. if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
  336. err = -EINVAL;
  337. goto done;
  338. }
  339. if (req_ctx->out_buf[0] != 0x01) {
  340. err = -EINVAL;
  341. goto done;
  342. }
  343. for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
  344. if (req_ctx->out_buf[pos] != 0xff)
  345. break;
  346. if (pos < 9 || pos == req_ctx->child_req.dst_len ||
  347. req_ctx->out_buf[pos] != 0x00) {
  348. err = -EINVAL;
  349. goto done;
  350. }
  351. pos++;
  352. if (req->dst_len < req_ctx->child_req.dst_len - pos)
  353. err = -EOVERFLOW;
  354. req->dst_len = req_ctx->child_req.dst_len - pos;
  355. if (!err)
  356. sg_copy_from_buffer(req->dst,
  357. sg_nents_for_len(req->dst, req->dst_len),
  358. req_ctx->out_buf + pos, req->dst_len);
  359. done:
  360. kzfree(req_ctx->out_buf);
  361. return err;
  362. }
  363. static void pkcs1pad_verify_complete_cb(
  364. struct crypto_async_request *child_async_req, int err)
  365. {
  366. struct akcipher_request *req = child_async_req->data;
  367. struct crypto_async_request async_req;
  368. if (err == -EINPROGRESS)
  369. return;
  370. async_req.data = req->base.data;
  371. async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
  372. async_req.flags = child_async_req->flags;
  373. req->base.complete(&async_req, pkcs1pad_verify_complete(req, err));
  374. }
  375. /*
  376. * The verify operation is here for completeness similar to the verification
  377. * defined in RFC2313 section 10.2 except that block type 0 is not accepted,
  378. * as in RFC2437. RFC2437 section 9.2 doesn't define any operation to
  379. * retrieve the DigestInfo from a signature, instead the user is expected
  380. * to call the sign operation to generate the expected signature and compare
  381. * signatures instead of the message-digests.
  382. */
  383. static int pkcs1pad_verify(struct akcipher_request *req)
  384. {
  385. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  386. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  387. struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
  388. int err;
  389. if (!ctx->key_size || req->src_len != ctx->key_size)
  390. return -EINVAL;
  391. if (ctx->key_size > PAGE_SIZE)
  392. return -ENOTSUPP;
  393. /* Reuse input buffer, output to a new buffer */
  394. req_ctx->child_req.src = req->src;
  395. req_ctx->child_req.src_len = req->src_len;
  396. req_ctx->child_req.dst = req_ctx->out_sg;
  397. req_ctx->child_req.dst_len = ctx->key_size - 1;
  398. req_ctx->out_buf = kmalloc(ctx->key_size - 1,
  399. (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  400. GFP_KERNEL : GFP_ATOMIC);
  401. if (!req_ctx->out_buf)
  402. return -ENOMEM;
  403. pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
  404. ctx->key_size - 1, NULL);
  405. akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
  406. akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
  407. pkcs1pad_verify_complete_cb, req);
  408. err = crypto_akcipher_verify(&req_ctx->child_req);
  409. if (err != -EINPROGRESS &&
  410. (err != -EBUSY ||
  411. !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
  412. return pkcs1pad_verify_complete(req, err);
  413. return err;
  414. }
  415. static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm)
  416. {
  417. struct akcipher_instance *inst = akcipher_alg_instance(tfm);
  418. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  419. struct crypto_akcipher *child_tfm;
  420. child_tfm = crypto_spawn_akcipher(akcipher_instance_ctx(inst));
  421. if (IS_ERR(child_tfm))
  422. return PTR_ERR(child_tfm);
  423. ctx->child = child_tfm;
  424. return 0;
  425. }
  426. static void pkcs1pad_exit_tfm(struct crypto_akcipher *tfm)
  427. {
  428. struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
  429. crypto_free_akcipher(ctx->child);
  430. }
  431. static void pkcs1pad_free(struct akcipher_instance *inst)
  432. {
  433. struct crypto_akcipher_spawn *spawn = akcipher_instance_ctx(inst);
  434. crypto_drop_akcipher(spawn);
  435. kfree(inst);
  436. }
  437. static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
  438. {
  439. struct crypto_attr_type *algt;
  440. struct akcipher_instance *inst;
  441. struct crypto_akcipher_spawn *spawn;
  442. struct akcipher_alg *rsa_alg;
  443. const char *rsa_alg_name;
  444. int err;
  445. algt = crypto_get_attr_type(tb);
  446. if (IS_ERR(algt))
  447. return PTR_ERR(algt);
  448. if ((algt->type ^ CRYPTO_ALG_TYPE_AKCIPHER) & algt->mask)
  449. return -EINVAL;
  450. rsa_alg_name = crypto_attr_alg_name(tb[1]);
  451. if (IS_ERR(rsa_alg_name))
  452. return PTR_ERR(rsa_alg_name);
  453. inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
  454. if (!inst)
  455. return -ENOMEM;
  456. spawn = akcipher_instance_ctx(inst);
  457. crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst));
  458. err = crypto_grab_akcipher(spawn, rsa_alg_name, 0,
  459. crypto_requires_sync(algt->type, algt->mask));
  460. if (err)
  461. goto out_free_inst;
  462. rsa_alg = crypto_spawn_akcipher_alg(spawn);
  463. err = -ENAMETOOLONG;
  464. if (snprintf(inst->alg.base.cra_name,
  465. CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
  466. rsa_alg->base.cra_name) >=
  467. CRYPTO_MAX_ALG_NAME ||
  468. snprintf(inst->alg.base.cra_driver_name,
  469. CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
  470. rsa_alg->base.cra_driver_name) >=
  471. CRYPTO_MAX_ALG_NAME)
  472. goto out_drop_alg;
  473. inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC;
  474. inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
  475. inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx);
  476. inst->alg.init = pkcs1pad_init_tfm;
  477. inst->alg.exit = pkcs1pad_exit_tfm;
  478. inst->alg.encrypt = pkcs1pad_encrypt;
  479. inst->alg.decrypt = pkcs1pad_decrypt;
  480. inst->alg.sign = pkcs1pad_sign;
  481. inst->alg.verify = pkcs1pad_verify;
  482. inst->alg.set_pub_key = pkcs1pad_set_pub_key;
  483. inst->alg.set_priv_key = pkcs1pad_set_priv_key;
  484. inst->alg.max_size = pkcs1pad_get_max_size;
  485. inst->alg.reqsize = sizeof(struct pkcs1pad_request) + rsa_alg->reqsize;
  486. inst->free = pkcs1pad_free;
  487. err = akcipher_register_instance(tmpl, inst);
  488. if (err)
  489. goto out_drop_alg;
  490. return 0;
  491. out_drop_alg:
  492. crypto_drop_akcipher(spawn);
  493. out_free_inst:
  494. kfree(inst);
  495. return err;
  496. }
  497. struct crypto_template rsa_pkcs1pad_tmpl = {
  498. .name = "pkcs1pad",
  499. .create = pkcs1pad_create,
  500. .module = THIS_MODULE,
  501. };