nx-aes-gcm.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524
  1. /**
  2. * AES GCM routines supporting the Power 7+ Nest Accelerators driver
  3. *
  4. * Copyright (C) 2012 International Business Machines Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; version 2 only.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  18. *
  19. * Author: Kent Yoder <yoder1@us.ibm.com>
  20. */
  21. #include <crypto/internal/aead.h>
  22. #include <crypto/aes.h>
  23. #include <crypto/scatterwalk.h>
  24. #include <linux/module.h>
  25. #include <linux/types.h>
  26. #include <asm/vio.h>
  27. #include "nx_csbcpb.h"
  28. #include "nx.h"
  29. static int gcm_aes_nx_set_key(struct crypto_aead *tfm,
  30. const u8 *in_key,
  31. unsigned int key_len)
  32. {
  33. struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
  34. struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
  35. struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
  36. nx_ctx_init(nx_ctx, HCOP_FC_AES);
  37. switch (key_len) {
  38. case AES_KEYSIZE_128:
  39. NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
  40. NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
  41. nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
  42. break;
  43. case AES_KEYSIZE_192:
  44. NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
  45. NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192);
  46. nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
  47. break;
  48. case AES_KEYSIZE_256:
  49. NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
  50. NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256);
  51. nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
  52. break;
  53. default:
  54. return -EINVAL;
  55. }
  56. csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
  57. memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len);
  58. csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA;
  59. memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len);
  60. return 0;
  61. }
  62. static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm,
  63. const u8 *in_key,
  64. unsigned int key_len)
  65. {
  66. struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
  67. char *nonce = nx_ctx->priv.gcm.nonce;
  68. int rc;
  69. if (key_len < 4)
  70. return -EINVAL;
  71. key_len -= 4;
  72. rc = gcm_aes_nx_set_key(tfm, in_key, key_len);
  73. if (rc)
  74. goto out;
  75. memcpy(nonce, in_key + key_len, 4);
  76. out:
  77. return rc;
  78. }
  79. static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm,
  80. unsigned int authsize)
  81. {
  82. switch (authsize) {
  83. case 8:
  84. case 12:
  85. case 16:
  86. break;
  87. default:
  88. return -EINVAL;
  89. }
  90. return 0;
  91. }
  92. static int nx_gca(struct nx_crypto_ctx *nx_ctx,
  93. struct aead_request *req,
  94. u8 *out,
  95. unsigned int assoclen)
  96. {
  97. int rc;
  98. struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
  99. struct scatter_walk walk;
  100. struct nx_sg *nx_sg = nx_ctx->in_sg;
  101. unsigned int nbytes = assoclen;
  102. unsigned int processed = 0, to_process;
  103. unsigned int max_sg_len;
  104. if (nbytes <= AES_BLOCK_SIZE) {
  105. scatterwalk_start(&walk, req->src);
  106. scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG);
  107. scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
  108. return 0;
  109. }
  110. NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;
  111. /* page_limit: number of sg entries that fit on one page */
  112. max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
  113. nx_ctx->ap->sglen);
  114. max_sg_len = min_t(u64, max_sg_len,
  115. nx_ctx->ap->databytelen/NX_PAGE_SIZE);
  116. do {
  117. /*
  118. * to_process: the data chunk to process in this update.
  119. * This value is bound by sg list limits.
  120. */
  121. to_process = min_t(u64, nbytes - processed,
  122. nx_ctx->ap->databytelen);
  123. to_process = min_t(u64, to_process,
  124. NX_PAGE_SIZE * (max_sg_len - 1));
  125. nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
  126. req->src, processed, &to_process);
  127. if ((to_process + processed) < nbytes)
  128. NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
  129. else
  130. NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;
  131. nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
  132. * sizeof(struct nx_sg);
  133. rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
  134. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
  135. if (rc)
  136. return rc;
  137. memcpy(csbcpb_aead->cpb.aes_gca.in_pat,
  138. csbcpb_aead->cpb.aes_gca.out_pat,
  139. AES_BLOCK_SIZE);
  140. NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
  141. atomic_inc(&(nx_ctx->stats->aes_ops));
  142. atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
  143. processed += to_process;
  144. } while (processed < nbytes);
  145. memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE);
  146. return rc;
  147. }
  148. static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
  149. unsigned int assoclen)
  150. {
  151. int rc;
  152. struct nx_crypto_ctx *nx_ctx =
  153. crypto_aead_ctx(crypto_aead_reqtfm(req));
  154. struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
  155. struct nx_sg *nx_sg;
  156. unsigned int nbytes = assoclen;
  157. unsigned int processed = 0, to_process;
  158. unsigned int max_sg_len;
  159. /* Set GMAC mode */
  160. csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;
  161. NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
  162. /* page_limit: number of sg entries that fit on one page */
  163. max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
  164. nx_ctx->ap->sglen);
  165. max_sg_len = min_t(u64, max_sg_len,
  166. nx_ctx->ap->databytelen/NX_PAGE_SIZE);
  167. /* Copy IV */
  168. memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);
  169. do {
  170. /*
  171. * to_process: the data chunk to process in this update.
  172. * This value is bound by sg list limits.
  173. */
  174. to_process = min_t(u64, nbytes - processed,
  175. nx_ctx->ap->databytelen);
  176. to_process = min_t(u64, to_process,
  177. NX_PAGE_SIZE * (max_sg_len - 1));
  178. nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
  179. req->src, processed, &to_process);
  180. if ((to_process + processed) < nbytes)
  181. NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
  182. else
  183. NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
  184. nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
  185. * sizeof(struct nx_sg);
  186. csbcpb->cpb.aes_gcm.bit_length_data = 0;
  187. csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes;
  188. rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
  189. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
  190. if (rc)
  191. goto out;
  192. memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
  193. csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
  194. memcpy(csbcpb->cpb.aes_gcm.in_s0,
  195. csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
  196. NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
  197. atomic_inc(&(nx_ctx->stats->aes_ops));
  198. atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
  199. processed += to_process;
  200. } while (processed < nbytes);
  201. out:
  202. /* Restore GCM mode */
  203. csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
  204. return rc;
  205. }
  206. static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
  207. int enc)
  208. {
  209. int rc;
  210. struct nx_crypto_ctx *nx_ctx =
  211. crypto_aead_ctx(crypto_aead_reqtfm(req));
  212. struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
  213. char out[AES_BLOCK_SIZE];
  214. struct nx_sg *in_sg, *out_sg;
  215. int len;
  216. /* For scenarios where the input message is zero length, AES CTR mode
  217. * may be used. Set the source data to be a single block (16B) of all
  218. * zeros, and set the input IV value to be the same as the GMAC IV
  219. * value. - nx_wb 4.8.1.3 */
  220. /* Change to ECB mode */
  221. csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
  222. memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key,
  223. sizeof(csbcpb->cpb.aes_ecb.key));
  224. if (enc)
  225. NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
  226. else
  227. NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
  228. len = AES_BLOCK_SIZE;
  229. /* Encrypt the counter/IV */
  230. in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
  231. &len, nx_ctx->ap->sglen);
  232. if (len != AES_BLOCK_SIZE)
  233. return -EINVAL;
  234. len = sizeof(out);
  235. out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len,
  236. nx_ctx->ap->sglen);
  237. if (len != sizeof(out))
  238. return -EINVAL;
  239. nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
  240. nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
  241. rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
  242. desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
  243. if (rc)
  244. goto out;
  245. atomic_inc(&(nx_ctx->stats->aes_ops));
  246. /* Copy out the auth tag */
  247. memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out,
  248. crypto_aead_authsize(crypto_aead_reqtfm(req)));
  249. out:
  250. /* Restore XCBC mode */
  251. csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
  252. /*
  253. * ECB key uses the same region that GCM AAD and counter, so it's safe
  254. * to just fill it with zeroes.
  255. */
  256. memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key));
  257. return rc;
  258. }
  259. static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
  260. unsigned int assoclen)
  261. {
  262. struct nx_crypto_ctx *nx_ctx =
  263. crypto_aead_ctx(crypto_aead_reqtfm(req));
  264. struct nx_gcm_rctx *rctx = aead_request_ctx(req);
  265. struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
  266. struct blkcipher_desc desc;
  267. unsigned int nbytes = req->cryptlen;
  268. unsigned int processed = 0, to_process;
  269. unsigned long irq_flags;
  270. int rc = -EINVAL;
  271. spin_lock_irqsave(&nx_ctx->lock, irq_flags);
  272. desc.info = rctx->iv;
  273. /* initialize the counter */
  274. *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
  275. if (nbytes == 0) {
  276. if (assoclen == 0)
  277. rc = gcm_empty(req, &desc, enc);
  278. else
  279. rc = gmac(req, &desc, assoclen);
  280. if (rc)
  281. goto out;
  282. else
  283. goto mac;
  284. }
  285. /* Process associated data */
  286. csbcpb->cpb.aes_gcm.bit_length_aad = assoclen * 8;
  287. if (assoclen) {
  288. rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad,
  289. assoclen);
  290. if (rc)
  291. goto out;
  292. }
  293. /* Set flags for encryption */
  294. NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
  295. if (enc) {
  296. NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
  297. } else {
  298. NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
  299. nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
  300. }
  301. do {
  302. to_process = nbytes - processed;
  303. csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
  304. rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
  305. req->src, &to_process,
  306. processed + req->assoclen,
  307. csbcpb->cpb.aes_gcm.iv_or_cnt);
  308. if (rc)
  309. goto out;
  310. if ((to_process + processed) < nbytes)
  311. NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
  312. else
  313. NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
  314. rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
  315. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
  316. if (rc)
  317. goto out;
  318. memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
  319. memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
  320. csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
  321. memcpy(csbcpb->cpb.aes_gcm.in_s0,
  322. csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
  323. NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
  324. atomic_inc(&(nx_ctx->stats->aes_ops));
  325. atomic64_add(csbcpb->csb.processed_byte_count,
  326. &(nx_ctx->stats->aes_bytes));
  327. processed += to_process;
  328. } while (processed < nbytes);
  329. mac:
  330. if (enc) {
  331. /* copy out the auth tag */
  332. scatterwalk_map_and_copy(
  333. csbcpb->cpb.aes_gcm.out_pat_or_mac,
  334. req->dst, req->assoclen + nbytes,
  335. crypto_aead_authsize(crypto_aead_reqtfm(req)),
  336. SCATTERWALK_TO_SG);
  337. } else {
  338. u8 *itag = nx_ctx->priv.gcm.iauth_tag;
  339. u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
  340. scatterwalk_map_and_copy(
  341. itag, req->src, req->assoclen + nbytes,
  342. crypto_aead_authsize(crypto_aead_reqtfm(req)),
  343. SCATTERWALK_FROM_SG);
  344. rc = memcmp(itag, otag,
  345. crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
  346. -EBADMSG : 0;
  347. }
  348. out:
  349. spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
  350. return rc;
  351. }
  352. static int gcm_aes_nx_encrypt(struct aead_request *req)
  353. {
  354. struct nx_gcm_rctx *rctx = aead_request_ctx(req);
  355. char *iv = rctx->iv;
  356. memcpy(iv, req->iv, 12);
  357. return gcm_aes_nx_crypt(req, 1, req->assoclen);
  358. }
  359. static int gcm_aes_nx_decrypt(struct aead_request *req)
  360. {
  361. struct nx_gcm_rctx *rctx = aead_request_ctx(req);
  362. char *iv = rctx->iv;
  363. memcpy(iv, req->iv, 12);
  364. return gcm_aes_nx_crypt(req, 0, req->assoclen);
  365. }
  366. static int gcm4106_aes_nx_encrypt(struct aead_request *req)
  367. {
  368. struct nx_crypto_ctx *nx_ctx =
  369. crypto_aead_ctx(crypto_aead_reqtfm(req));
  370. struct nx_gcm_rctx *rctx = aead_request_ctx(req);
  371. char *iv = rctx->iv;
  372. char *nonce = nx_ctx->priv.gcm.nonce;
  373. memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
  374. memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
  375. if (req->assoclen < 8)
  376. return -EINVAL;
  377. return gcm_aes_nx_crypt(req, 1, req->assoclen - 8);
  378. }
  379. static int gcm4106_aes_nx_decrypt(struct aead_request *req)
  380. {
  381. struct nx_crypto_ctx *nx_ctx =
  382. crypto_aead_ctx(crypto_aead_reqtfm(req));
  383. struct nx_gcm_rctx *rctx = aead_request_ctx(req);
  384. char *iv = rctx->iv;
  385. char *nonce = nx_ctx->priv.gcm.nonce;
  386. memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
  387. memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
  388. if (req->assoclen < 8)
  389. return -EINVAL;
  390. return gcm_aes_nx_crypt(req, 0, req->assoclen - 8);
  391. }
  392. /* tell the block cipher walk routines that this is a stream cipher by
  393. * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
  394. * during encrypt/decrypt doesn't solve this problem, because it calls
  395. * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
  396. * but instead uses this tfm->blocksize. */
  397. struct aead_alg nx_gcm_aes_alg = {
  398. .base = {
  399. .cra_name = "gcm(aes)",
  400. .cra_driver_name = "gcm-aes-nx",
  401. .cra_priority = 300,
  402. .cra_blocksize = 1,
  403. .cra_ctxsize = sizeof(struct nx_crypto_ctx),
  404. .cra_module = THIS_MODULE,
  405. },
  406. .init = nx_crypto_ctx_aes_gcm_init,
  407. .exit = nx_crypto_ctx_aead_exit,
  408. .ivsize = 12,
  409. .maxauthsize = AES_BLOCK_SIZE,
  410. .setkey = gcm_aes_nx_set_key,
  411. .encrypt = gcm_aes_nx_encrypt,
  412. .decrypt = gcm_aes_nx_decrypt,
  413. };
  414. struct aead_alg nx_gcm4106_aes_alg = {
  415. .base = {
  416. .cra_name = "rfc4106(gcm(aes))",
  417. .cra_driver_name = "rfc4106-gcm-aes-nx",
  418. .cra_priority = 300,
  419. .cra_blocksize = 1,
  420. .cra_ctxsize = sizeof(struct nx_crypto_ctx),
  421. .cra_module = THIS_MODULE,
  422. },
  423. .init = nx_crypto_ctx_aes_gcm_init,
  424. .exit = nx_crypto_ctx_aead_exit,
  425. .ivsize = 8,
  426. .maxauthsize = AES_BLOCK_SIZE,
  427. .setkey = gcm4106_aes_nx_set_key,
  428. .setauthsize = gcm4106_aes_nx_setauthsize,
  429. .encrypt = gcm4106_aes_nx_encrypt,
  430. .decrypt = gcm4106_aes_nx_decrypt,
  431. };