aes_s390.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Cryptographic API.
  4. *
  5. * s390 implementation of the AES Cipher Algorithm.
  6. *
  7. * s390 Version:
  8. * Copyright IBM Corp. 2005, 2017
  9. * Author(s): Jan Glauber (jang@de.ibm.com)
  10. * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
  11. * Patrick Steuer <patrick.steuer@de.ibm.com>
  12. * Harald Freudenberger <freude@de.ibm.com>
  13. *
  14. * Derived from "crypto/aes_generic.c"
  15. */
  16. #define KMSG_COMPONENT "aes_s390"
  17. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  18. #include <crypto/aes.h>
  19. #include <crypto/algapi.h>
  20. #include <crypto/ghash.h>
  21. #include <crypto/internal/aead.h>
  22. #include <crypto/internal/skcipher.h>
  23. #include <crypto/scatterwalk.h>
  24. #include <linux/err.h>
  25. #include <linux/module.h>
  26. #include <linux/cpufeature.h>
  27. #include <linux/init.h>
  28. #include <linux/spinlock.h>
  29. #include <linux/fips.h>
  30. #include <linux/string.h>
  31. #include <crypto/xts.h>
  32. #include <asm/cpacf.h>
  33. static u8 *ctrblk;
  34. static DEFINE_SPINLOCK(ctrblk_lock);
  35. static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
  36. kma_functions;
  37. struct s390_aes_ctx {
  38. u8 key[AES_MAX_KEY_SIZE];
  39. int key_len;
  40. unsigned long fc;
  41. union {
  42. struct crypto_sync_skcipher *blk;
  43. struct crypto_cipher *cip;
  44. } fallback;
  45. };
  46. struct s390_xts_ctx {
  47. u8 key[32];
  48. u8 pcc_key[32];
  49. int key_len;
  50. unsigned long fc;
  51. struct crypto_sync_skcipher *fallback;
  52. };
  53. struct gcm_sg_walk {
  54. struct scatter_walk walk;
  55. unsigned int walk_bytes;
  56. u8 *walk_ptr;
  57. unsigned int walk_bytes_remain;
  58. u8 buf[AES_BLOCK_SIZE];
  59. unsigned int buf_bytes;
  60. u8 *ptr;
  61. unsigned int nbytes;
  62. };
  63. static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
  64. unsigned int key_len)
  65. {
  66. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  67. int ret;
  68. sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
  69. sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
  70. CRYPTO_TFM_REQ_MASK);
  71. ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
  72. if (ret) {
  73. tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
  74. tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
  75. CRYPTO_TFM_RES_MASK);
  76. }
  77. return ret;
  78. }
  79. static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
  80. unsigned int key_len)
  81. {
  82. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  83. unsigned long fc;
  84. /* Pick the correct function code based on the key length */
  85. fc = (key_len == 16) ? CPACF_KM_AES_128 :
  86. (key_len == 24) ? CPACF_KM_AES_192 :
  87. (key_len == 32) ? CPACF_KM_AES_256 : 0;
  88. /* Check if the function code is available */
  89. sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
  90. if (!sctx->fc)
  91. return setkey_fallback_cip(tfm, in_key, key_len);
  92. sctx->key_len = key_len;
  93. memcpy(sctx->key, in_key, key_len);
  94. return 0;
  95. }
  96. static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
  97. {
  98. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  99. if (unlikely(!sctx->fc)) {
  100. crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
  101. return;
  102. }
  103. cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
  104. }
  105. static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
  106. {
  107. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  108. if (unlikely(!sctx->fc)) {
  109. crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
  110. return;
  111. }
  112. cpacf_km(sctx->fc | CPACF_DECRYPT,
  113. &sctx->key, out, in, AES_BLOCK_SIZE);
  114. }
  115. static int fallback_init_cip(struct crypto_tfm *tfm)
  116. {
  117. const char *name = tfm->__crt_alg->cra_name;
  118. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  119. sctx->fallback.cip = crypto_alloc_cipher(name, 0,
  120. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
  121. if (IS_ERR(sctx->fallback.cip)) {
  122. pr_err("Allocating AES fallback algorithm %s failed\n",
  123. name);
  124. return PTR_ERR(sctx->fallback.cip);
  125. }
  126. return 0;
  127. }
  128. static void fallback_exit_cip(struct crypto_tfm *tfm)
  129. {
  130. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  131. crypto_free_cipher(sctx->fallback.cip);
  132. sctx->fallback.cip = NULL;
  133. }
  134. static struct crypto_alg aes_alg = {
  135. .cra_name = "aes",
  136. .cra_driver_name = "aes-s390",
  137. .cra_priority = 300,
  138. .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
  139. CRYPTO_ALG_NEED_FALLBACK,
  140. .cra_blocksize = AES_BLOCK_SIZE,
  141. .cra_ctxsize = sizeof(struct s390_aes_ctx),
  142. .cra_module = THIS_MODULE,
  143. .cra_init = fallback_init_cip,
  144. .cra_exit = fallback_exit_cip,
  145. .cra_u = {
  146. .cipher = {
  147. .cia_min_keysize = AES_MIN_KEY_SIZE,
  148. .cia_max_keysize = AES_MAX_KEY_SIZE,
  149. .cia_setkey = aes_set_key,
  150. .cia_encrypt = aes_encrypt,
  151. .cia_decrypt = aes_decrypt,
  152. }
  153. }
  154. };
  155. static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
  156. unsigned int len)
  157. {
  158. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  159. unsigned int ret;
  160. crypto_sync_skcipher_clear_flags(sctx->fallback.blk,
  161. CRYPTO_TFM_REQ_MASK);
  162. crypto_sync_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags &
  163. CRYPTO_TFM_REQ_MASK);
  164. ret = crypto_sync_skcipher_setkey(sctx->fallback.blk, key, len);
  165. tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
  166. tfm->crt_flags |= crypto_sync_skcipher_get_flags(sctx->fallback.blk) &
  167. CRYPTO_TFM_RES_MASK;
  168. return ret;
  169. }
  170. static int fallback_blk_dec(struct blkcipher_desc *desc,
  171. struct scatterlist *dst, struct scatterlist *src,
  172. unsigned int nbytes)
  173. {
  174. unsigned int ret;
  175. struct crypto_blkcipher *tfm = desc->tfm;
  176. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
  177. SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
  178. skcipher_request_set_sync_tfm(req, sctx->fallback.blk);
  179. skcipher_request_set_callback(req, desc->flags, NULL, NULL);
  180. skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
  181. ret = crypto_skcipher_decrypt(req);
  182. skcipher_request_zero(req);
  183. return ret;
  184. }
  185. static int fallback_blk_enc(struct blkcipher_desc *desc,
  186. struct scatterlist *dst, struct scatterlist *src,
  187. unsigned int nbytes)
  188. {
  189. unsigned int ret;
  190. struct crypto_blkcipher *tfm = desc->tfm;
  191. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
  192. SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
  193. skcipher_request_set_sync_tfm(req, sctx->fallback.blk);
  194. skcipher_request_set_callback(req, desc->flags, NULL, NULL);
  195. skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
  196. ret = crypto_skcipher_encrypt(req);
  197. return ret;
  198. }
  199. static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
  200. unsigned int key_len)
  201. {
  202. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  203. unsigned long fc;
  204. /* Pick the correct function code based on the key length */
  205. fc = (key_len == 16) ? CPACF_KM_AES_128 :
  206. (key_len == 24) ? CPACF_KM_AES_192 :
  207. (key_len == 32) ? CPACF_KM_AES_256 : 0;
  208. /* Check if the function code is available */
  209. sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
  210. if (!sctx->fc)
  211. return setkey_fallback_blk(tfm, in_key, key_len);
  212. sctx->key_len = key_len;
  213. memcpy(sctx->key, in_key, key_len);
  214. return 0;
  215. }
  216. static int ecb_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
  217. struct blkcipher_walk *walk)
  218. {
  219. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  220. unsigned int nbytes, n;
  221. int ret;
  222. ret = blkcipher_walk_virt(desc, walk);
  223. while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
  224. /* only use complete blocks */
  225. n = nbytes & ~(AES_BLOCK_SIZE - 1);
  226. cpacf_km(sctx->fc | modifier, sctx->key,
  227. walk->dst.virt.addr, walk->src.virt.addr, n);
  228. ret = blkcipher_walk_done(desc, walk, nbytes - n);
  229. }
  230. return ret;
  231. }
  232. static int ecb_aes_encrypt(struct blkcipher_desc *desc,
  233. struct scatterlist *dst, struct scatterlist *src,
  234. unsigned int nbytes)
  235. {
  236. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  237. struct blkcipher_walk walk;
  238. if (unlikely(!sctx->fc))
  239. return fallback_blk_enc(desc, dst, src, nbytes);
  240. blkcipher_walk_init(&walk, dst, src, nbytes);
  241. return ecb_aes_crypt(desc, 0, &walk);
  242. }
  243. static int ecb_aes_decrypt(struct blkcipher_desc *desc,
  244. struct scatterlist *dst, struct scatterlist *src,
  245. unsigned int nbytes)
  246. {
  247. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  248. struct blkcipher_walk walk;
  249. if (unlikely(!sctx->fc))
  250. return fallback_blk_dec(desc, dst, src, nbytes);
  251. blkcipher_walk_init(&walk, dst, src, nbytes);
  252. return ecb_aes_crypt(desc, CPACF_DECRYPT, &walk);
  253. }
  254. static int fallback_init_blk(struct crypto_tfm *tfm)
  255. {
  256. const char *name = tfm->__crt_alg->cra_name;
  257. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  258. sctx->fallback.blk = crypto_alloc_sync_skcipher(name, 0,
  259. CRYPTO_ALG_NEED_FALLBACK);
  260. if (IS_ERR(sctx->fallback.blk)) {
  261. pr_err("Allocating AES fallback algorithm %s failed\n",
  262. name);
  263. return PTR_ERR(sctx->fallback.blk);
  264. }
  265. return 0;
  266. }
  267. static void fallback_exit_blk(struct crypto_tfm *tfm)
  268. {
  269. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  270. crypto_free_sync_skcipher(sctx->fallback.blk);
  271. }
  272. static struct crypto_alg ecb_aes_alg = {
  273. .cra_name = "ecb(aes)",
  274. .cra_driver_name = "ecb-aes-s390",
  275. .cra_priority = 401, /* combo: aes + ecb + 1 */
  276. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
  277. CRYPTO_ALG_NEED_FALLBACK,
  278. .cra_blocksize = AES_BLOCK_SIZE,
  279. .cra_ctxsize = sizeof(struct s390_aes_ctx),
  280. .cra_type = &crypto_blkcipher_type,
  281. .cra_module = THIS_MODULE,
  282. .cra_init = fallback_init_blk,
  283. .cra_exit = fallback_exit_blk,
  284. .cra_u = {
  285. .blkcipher = {
  286. .min_keysize = AES_MIN_KEY_SIZE,
  287. .max_keysize = AES_MAX_KEY_SIZE,
  288. .setkey = ecb_aes_set_key,
  289. .encrypt = ecb_aes_encrypt,
  290. .decrypt = ecb_aes_decrypt,
  291. }
  292. }
  293. };
  294. static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
  295. unsigned int key_len)
  296. {
  297. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  298. unsigned long fc;
  299. /* Pick the correct function code based on the key length */
  300. fc = (key_len == 16) ? CPACF_KMC_AES_128 :
  301. (key_len == 24) ? CPACF_KMC_AES_192 :
  302. (key_len == 32) ? CPACF_KMC_AES_256 : 0;
  303. /* Check if the function code is available */
  304. sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
  305. if (!sctx->fc)
  306. return setkey_fallback_blk(tfm, in_key, key_len);
  307. sctx->key_len = key_len;
  308. memcpy(sctx->key, in_key, key_len);
  309. return 0;
  310. }
  311. static int cbc_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
  312. struct blkcipher_walk *walk)
  313. {
  314. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  315. unsigned int nbytes, n;
  316. int ret;
  317. struct {
  318. u8 iv[AES_BLOCK_SIZE];
  319. u8 key[AES_MAX_KEY_SIZE];
  320. } param;
  321. ret = blkcipher_walk_virt(desc, walk);
  322. memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
  323. memcpy(param.key, sctx->key, sctx->key_len);
  324. while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
  325. /* only use complete blocks */
  326. n = nbytes & ~(AES_BLOCK_SIZE - 1);
  327. cpacf_kmc(sctx->fc | modifier, &param,
  328. walk->dst.virt.addr, walk->src.virt.addr, n);
  329. ret = blkcipher_walk_done(desc, walk, nbytes - n);
  330. }
  331. memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
  332. return ret;
  333. }
  334. static int cbc_aes_encrypt(struct blkcipher_desc *desc,
  335. struct scatterlist *dst, struct scatterlist *src,
  336. unsigned int nbytes)
  337. {
  338. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  339. struct blkcipher_walk walk;
  340. if (unlikely(!sctx->fc))
  341. return fallback_blk_enc(desc, dst, src, nbytes);
  342. blkcipher_walk_init(&walk, dst, src, nbytes);
  343. return cbc_aes_crypt(desc, 0, &walk);
  344. }
  345. static int cbc_aes_decrypt(struct blkcipher_desc *desc,
  346. struct scatterlist *dst, struct scatterlist *src,
  347. unsigned int nbytes)
  348. {
  349. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  350. struct blkcipher_walk walk;
  351. if (unlikely(!sctx->fc))
  352. return fallback_blk_dec(desc, dst, src, nbytes);
  353. blkcipher_walk_init(&walk, dst, src, nbytes);
  354. return cbc_aes_crypt(desc, CPACF_DECRYPT, &walk);
  355. }
  356. static struct crypto_alg cbc_aes_alg = {
  357. .cra_name = "cbc(aes)",
  358. .cra_driver_name = "cbc-aes-s390",
  359. .cra_priority = 402, /* ecb-aes-s390 + 1 */
  360. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
  361. CRYPTO_ALG_NEED_FALLBACK,
  362. .cra_blocksize = AES_BLOCK_SIZE,
  363. .cra_ctxsize = sizeof(struct s390_aes_ctx),
  364. .cra_type = &crypto_blkcipher_type,
  365. .cra_module = THIS_MODULE,
  366. .cra_init = fallback_init_blk,
  367. .cra_exit = fallback_exit_blk,
  368. .cra_u = {
  369. .blkcipher = {
  370. .min_keysize = AES_MIN_KEY_SIZE,
  371. .max_keysize = AES_MAX_KEY_SIZE,
  372. .ivsize = AES_BLOCK_SIZE,
  373. .setkey = cbc_aes_set_key,
  374. .encrypt = cbc_aes_encrypt,
  375. .decrypt = cbc_aes_decrypt,
  376. }
  377. }
  378. };
  379. static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
  380. unsigned int len)
  381. {
  382. struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
  383. unsigned int ret;
  384. crypto_sync_skcipher_clear_flags(xts_ctx->fallback,
  385. CRYPTO_TFM_REQ_MASK);
  386. crypto_sync_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags &
  387. CRYPTO_TFM_REQ_MASK);
  388. ret = crypto_sync_skcipher_setkey(xts_ctx->fallback, key, len);
  389. tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
  390. tfm->crt_flags |= crypto_sync_skcipher_get_flags(xts_ctx->fallback) &
  391. CRYPTO_TFM_RES_MASK;
  392. return ret;
  393. }
  394. static int xts_fallback_decrypt(struct blkcipher_desc *desc,
  395. struct scatterlist *dst, struct scatterlist *src,
  396. unsigned int nbytes)
  397. {
  398. struct crypto_blkcipher *tfm = desc->tfm;
  399. struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
  400. SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
  401. unsigned int ret;
  402. skcipher_request_set_sync_tfm(req, xts_ctx->fallback);
  403. skcipher_request_set_callback(req, desc->flags, NULL, NULL);
  404. skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
  405. ret = crypto_skcipher_decrypt(req);
  406. skcipher_request_zero(req);
  407. return ret;
  408. }
  409. static int xts_fallback_encrypt(struct blkcipher_desc *desc,
  410. struct scatterlist *dst, struct scatterlist *src,
  411. unsigned int nbytes)
  412. {
  413. struct crypto_blkcipher *tfm = desc->tfm;
  414. struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
  415. SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
  416. unsigned int ret;
  417. skcipher_request_set_sync_tfm(req, xts_ctx->fallback);
  418. skcipher_request_set_callback(req, desc->flags, NULL, NULL);
  419. skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
  420. ret = crypto_skcipher_encrypt(req);
  421. skcipher_request_zero(req);
  422. return ret;
  423. }
  424. static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
  425. unsigned int key_len)
  426. {
  427. struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
  428. unsigned long fc;
  429. int err;
  430. err = xts_check_key(tfm, in_key, key_len);
  431. if (err)
  432. return err;
  433. /* In fips mode only 128 bit or 256 bit keys are valid */
  434. if (fips_enabled && key_len != 32 && key_len != 64) {
  435. tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
  436. return -EINVAL;
  437. }
  438. /* Pick the correct function code based on the key length */
  439. fc = (key_len == 32) ? CPACF_KM_XTS_128 :
  440. (key_len == 64) ? CPACF_KM_XTS_256 : 0;
  441. /* Check if the function code is available */
  442. xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
  443. if (!xts_ctx->fc)
  444. return xts_fallback_setkey(tfm, in_key, key_len);
  445. /* Split the XTS key into the two subkeys */
  446. key_len = key_len / 2;
  447. xts_ctx->key_len = key_len;
  448. memcpy(xts_ctx->key, in_key, key_len);
  449. memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
  450. return 0;
  451. }
  452. static int xts_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
  453. struct blkcipher_walk *walk)
  454. {
  455. struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
  456. unsigned int offset, nbytes, n;
  457. int ret;
  458. struct {
  459. u8 key[32];
  460. u8 tweak[16];
  461. u8 block[16];
  462. u8 bit[16];
  463. u8 xts[16];
  464. } pcc_param;
  465. struct {
  466. u8 key[32];
  467. u8 init[16];
  468. } xts_param;
  469. ret = blkcipher_walk_virt(desc, walk);
  470. offset = xts_ctx->key_len & 0x10;
  471. memset(pcc_param.block, 0, sizeof(pcc_param.block));
  472. memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
  473. memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
  474. memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
  475. memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
  476. cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
  477. memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
  478. memcpy(xts_param.init, pcc_param.xts, 16);
  479. while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
  480. /* only use complete blocks */
  481. n = nbytes & ~(AES_BLOCK_SIZE - 1);
  482. cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
  483. walk->dst.virt.addr, walk->src.virt.addr, n);
  484. ret = blkcipher_walk_done(desc, walk, nbytes - n);
  485. }
  486. return ret;
  487. }
  488. static int xts_aes_encrypt(struct blkcipher_desc *desc,
  489. struct scatterlist *dst, struct scatterlist *src,
  490. unsigned int nbytes)
  491. {
  492. struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
  493. struct blkcipher_walk walk;
  494. if (unlikely(!xts_ctx->fc))
  495. return xts_fallback_encrypt(desc, dst, src, nbytes);
  496. blkcipher_walk_init(&walk, dst, src, nbytes);
  497. return xts_aes_crypt(desc, 0, &walk);
  498. }
  499. static int xts_aes_decrypt(struct blkcipher_desc *desc,
  500. struct scatterlist *dst, struct scatterlist *src,
  501. unsigned int nbytes)
  502. {
  503. struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
  504. struct blkcipher_walk walk;
  505. if (unlikely(!xts_ctx->fc))
  506. return xts_fallback_decrypt(desc, dst, src, nbytes);
  507. blkcipher_walk_init(&walk, dst, src, nbytes);
  508. return xts_aes_crypt(desc, CPACF_DECRYPT, &walk);
  509. }
  510. static int xts_fallback_init(struct crypto_tfm *tfm)
  511. {
  512. const char *name = tfm->__crt_alg->cra_name;
  513. struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
  514. xts_ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
  515. CRYPTO_ALG_NEED_FALLBACK);
  516. if (IS_ERR(xts_ctx->fallback)) {
  517. pr_err("Allocating XTS fallback algorithm %s failed\n",
  518. name);
  519. return PTR_ERR(xts_ctx->fallback);
  520. }
  521. return 0;
  522. }
  523. static void xts_fallback_exit(struct crypto_tfm *tfm)
  524. {
  525. struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
  526. crypto_free_sync_skcipher(xts_ctx->fallback);
  527. }
  528. static struct crypto_alg xts_aes_alg = {
  529. .cra_name = "xts(aes)",
  530. .cra_driver_name = "xts-aes-s390",
  531. .cra_priority = 402, /* ecb-aes-s390 + 1 */
  532. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
  533. CRYPTO_ALG_NEED_FALLBACK,
  534. .cra_blocksize = AES_BLOCK_SIZE,
  535. .cra_ctxsize = sizeof(struct s390_xts_ctx),
  536. .cra_type = &crypto_blkcipher_type,
  537. .cra_module = THIS_MODULE,
  538. .cra_init = xts_fallback_init,
  539. .cra_exit = xts_fallback_exit,
  540. .cra_u = {
  541. .blkcipher = {
  542. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  543. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  544. .ivsize = AES_BLOCK_SIZE,
  545. .setkey = xts_aes_set_key,
  546. .encrypt = xts_aes_encrypt,
  547. .decrypt = xts_aes_decrypt,
  548. }
  549. }
  550. };
  551. static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
  552. unsigned int key_len)
  553. {
  554. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  555. unsigned long fc;
  556. /* Pick the correct function code based on the key length */
  557. fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
  558. (key_len == 24) ? CPACF_KMCTR_AES_192 :
  559. (key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
  560. /* Check if the function code is available */
  561. sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
  562. if (!sctx->fc)
  563. return setkey_fallback_blk(tfm, in_key, key_len);
  564. sctx->key_len = key_len;
  565. memcpy(sctx->key, in_key, key_len);
  566. return 0;
  567. }
  568. static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
  569. {
  570. unsigned int i, n;
  571. /* only use complete blocks, max. PAGE_SIZE */
  572. memcpy(ctrptr, iv, AES_BLOCK_SIZE);
  573. n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
  574. for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
  575. memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
  576. crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
  577. ctrptr += AES_BLOCK_SIZE;
  578. }
  579. return n;
  580. }
  581. static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
  582. struct blkcipher_walk *walk)
  583. {
  584. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  585. u8 buf[AES_BLOCK_SIZE], *ctrptr;
  586. unsigned int n, nbytes;
  587. int ret, locked;
  588. locked = spin_trylock(&ctrblk_lock);
  589. ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
  590. while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
  591. n = AES_BLOCK_SIZE;
  592. if (nbytes >= 2*AES_BLOCK_SIZE && locked)
  593. n = __ctrblk_init(ctrblk, walk->iv, nbytes);
  594. ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
  595. cpacf_kmctr(sctx->fc | modifier, sctx->key,
  596. walk->dst.virt.addr, walk->src.virt.addr,
  597. n, ctrptr);
  598. if (ctrptr == ctrblk)
  599. memcpy(walk->iv, ctrptr + n - AES_BLOCK_SIZE,
  600. AES_BLOCK_SIZE);
  601. crypto_inc(walk->iv, AES_BLOCK_SIZE);
  602. ret = blkcipher_walk_done(desc, walk, nbytes - n);
  603. }
  604. if (locked)
  605. spin_unlock(&ctrblk_lock);
  606. /*
  607. * final block may be < AES_BLOCK_SIZE, copy only nbytes
  608. */
  609. if (nbytes) {
  610. cpacf_kmctr(sctx->fc | modifier, sctx->key,
  611. buf, walk->src.virt.addr,
  612. AES_BLOCK_SIZE, walk->iv);
  613. memcpy(walk->dst.virt.addr, buf, nbytes);
  614. crypto_inc(walk->iv, AES_BLOCK_SIZE);
  615. ret = blkcipher_walk_done(desc, walk, 0);
  616. }
  617. return ret;
  618. }
  619. static int ctr_aes_encrypt(struct blkcipher_desc *desc,
  620. struct scatterlist *dst, struct scatterlist *src,
  621. unsigned int nbytes)
  622. {
  623. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  624. struct blkcipher_walk walk;
  625. if (unlikely(!sctx->fc))
  626. return fallback_blk_enc(desc, dst, src, nbytes);
  627. blkcipher_walk_init(&walk, dst, src, nbytes);
  628. return ctr_aes_crypt(desc, 0, &walk);
  629. }
  630. static int ctr_aes_decrypt(struct blkcipher_desc *desc,
  631. struct scatterlist *dst, struct scatterlist *src,
  632. unsigned int nbytes)
  633. {
  634. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  635. struct blkcipher_walk walk;
  636. if (unlikely(!sctx->fc))
  637. return fallback_blk_dec(desc, dst, src, nbytes);
  638. blkcipher_walk_init(&walk, dst, src, nbytes);
  639. return ctr_aes_crypt(desc, CPACF_DECRYPT, &walk);
  640. }
  641. static struct crypto_alg ctr_aes_alg = {
  642. .cra_name = "ctr(aes)",
  643. .cra_driver_name = "ctr-aes-s390",
  644. .cra_priority = 402, /* ecb-aes-s390 + 1 */
  645. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
  646. CRYPTO_ALG_NEED_FALLBACK,
  647. .cra_blocksize = 1,
  648. .cra_ctxsize = sizeof(struct s390_aes_ctx),
  649. .cra_type = &crypto_blkcipher_type,
  650. .cra_module = THIS_MODULE,
  651. .cra_init = fallback_init_blk,
  652. .cra_exit = fallback_exit_blk,
  653. .cra_u = {
  654. .blkcipher = {
  655. .min_keysize = AES_MIN_KEY_SIZE,
  656. .max_keysize = AES_MAX_KEY_SIZE,
  657. .ivsize = AES_BLOCK_SIZE,
  658. .setkey = ctr_aes_set_key,
  659. .encrypt = ctr_aes_encrypt,
  660. .decrypt = ctr_aes_decrypt,
  661. }
  662. }
  663. };
  664. static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
  665. unsigned int keylen)
  666. {
  667. struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
  668. switch (keylen) {
  669. case AES_KEYSIZE_128:
  670. ctx->fc = CPACF_KMA_GCM_AES_128;
  671. break;
  672. case AES_KEYSIZE_192:
  673. ctx->fc = CPACF_KMA_GCM_AES_192;
  674. break;
  675. case AES_KEYSIZE_256:
  676. ctx->fc = CPACF_KMA_GCM_AES_256;
  677. break;
  678. default:
  679. return -EINVAL;
  680. }
  681. memcpy(ctx->key, key, keylen);
  682. ctx->key_len = keylen;
  683. return 0;
  684. }
  685. static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
  686. {
  687. switch (authsize) {
  688. case 4:
  689. case 8:
  690. case 12:
  691. case 13:
  692. case 14:
  693. case 15:
  694. case 16:
  695. break;
  696. default:
  697. return -EINVAL;
  698. }
  699. return 0;
  700. }
  701. static void gcm_sg_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
  702. unsigned int len)
  703. {
  704. memset(gw, 0, sizeof(*gw));
  705. gw->walk_bytes_remain = len;
  706. scatterwalk_start(&gw->walk, sg);
  707. }
  708. static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
  709. {
  710. int n;
  711. /* minbytesneeded <= AES_BLOCK_SIZE */
  712. if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
  713. gw->ptr = gw->buf;
  714. gw->nbytes = gw->buf_bytes;
  715. goto out;
  716. }
  717. if (gw->walk_bytes_remain == 0) {
  718. gw->ptr = NULL;
  719. gw->nbytes = 0;
  720. goto out;
  721. }
  722. gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
  723. if (!gw->walk_bytes) {
  724. scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
  725. gw->walk_bytes = scatterwalk_clamp(&gw->walk,
  726. gw->walk_bytes_remain);
  727. }
  728. gw->walk_ptr = scatterwalk_map(&gw->walk);
  729. if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
  730. gw->ptr = gw->walk_ptr;
  731. gw->nbytes = gw->walk_bytes;
  732. goto out;
  733. }
  734. while (1) {
  735. n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
  736. memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
  737. gw->buf_bytes += n;
  738. gw->walk_bytes_remain -= n;
  739. scatterwalk_unmap(&gw->walk);
  740. scatterwalk_advance(&gw->walk, n);
  741. scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
  742. if (gw->buf_bytes >= minbytesneeded) {
  743. gw->ptr = gw->buf;
  744. gw->nbytes = gw->buf_bytes;
  745. goto out;
  746. }
  747. gw->walk_bytes = scatterwalk_clamp(&gw->walk,
  748. gw->walk_bytes_remain);
  749. if (!gw->walk_bytes) {
  750. scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
  751. gw->walk_bytes = scatterwalk_clamp(&gw->walk,
  752. gw->walk_bytes_remain);
  753. }
  754. gw->walk_ptr = scatterwalk_map(&gw->walk);
  755. }
  756. out:
  757. return gw->nbytes;
  758. }
  759. static void gcm_sg_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
  760. {
  761. int n;
  762. if (gw->ptr == NULL)
  763. return;
  764. if (gw->ptr == gw->buf) {
  765. n = gw->buf_bytes - bytesdone;
  766. if (n > 0) {
  767. memmove(gw->buf, gw->buf + bytesdone, n);
  768. gw->buf_bytes -= n;
  769. } else
  770. gw->buf_bytes = 0;
  771. } else {
  772. gw->walk_bytes_remain -= bytesdone;
  773. scatterwalk_unmap(&gw->walk);
  774. scatterwalk_advance(&gw->walk, bytesdone);
  775. scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
  776. }
  777. }
  778. static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
  779. {
  780. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  781. struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
  782. unsigned int ivsize = crypto_aead_ivsize(tfm);
  783. unsigned int taglen = crypto_aead_authsize(tfm);
  784. unsigned int aadlen = req->assoclen;
  785. unsigned int pclen = req->cryptlen;
  786. int ret = 0;
  787. unsigned int len, in_bytes, out_bytes,
  788. min_bytes, bytes, aad_bytes, pc_bytes;
  789. struct gcm_sg_walk gw_in, gw_out;
  790. u8 tag[GHASH_DIGEST_SIZE];
  791. struct {
  792. u32 _[3]; /* reserved */
  793. u32 cv; /* Counter Value */
  794. u8 t[GHASH_DIGEST_SIZE];/* Tag */
  795. u8 h[AES_BLOCK_SIZE]; /* Hash-subkey */
  796. u64 taadl; /* Total AAD Length */
  797. u64 tpcl; /* Total Plain-/Cipher-text Length */
  798. u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */
  799. u8 k[AES_MAX_KEY_SIZE]; /* Key */
  800. } param;
  801. /*
  802. * encrypt
  803. * req->src: aad||plaintext
  804. * req->dst: aad||ciphertext||tag
  805. * decrypt
  806. * req->src: aad||ciphertext||tag
  807. * req->dst: aad||plaintext, return 0 or -EBADMSG
  808. * aad, plaintext and ciphertext may be empty.
  809. */
  810. if (flags & CPACF_DECRYPT)
  811. pclen -= taglen;
  812. len = aadlen + pclen;
  813. memset(&param, 0, sizeof(param));
  814. param.cv = 1;
  815. param.taadl = aadlen * 8;
  816. param.tpcl = pclen * 8;
  817. memcpy(param.j0, req->iv, ivsize);
  818. *(u32 *)(param.j0 + ivsize) = 1;
  819. memcpy(param.k, ctx->key, ctx->key_len);
  820. gcm_sg_walk_start(&gw_in, req->src, len);
  821. gcm_sg_walk_start(&gw_out, req->dst, len);
  822. do {
  823. min_bytes = min_t(unsigned int,
  824. aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
  825. in_bytes = gcm_sg_walk_go(&gw_in, min_bytes);
  826. out_bytes = gcm_sg_walk_go(&gw_out, min_bytes);
  827. bytes = min(in_bytes, out_bytes);
  828. if (aadlen + pclen <= bytes) {
  829. aad_bytes = aadlen;
  830. pc_bytes = pclen;
  831. flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
  832. } else {
  833. if (aadlen <= bytes) {
  834. aad_bytes = aadlen;
  835. pc_bytes = (bytes - aadlen) &
  836. ~(AES_BLOCK_SIZE - 1);
  837. flags |= CPACF_KMA_LAAD;
  838. } else {
  839. aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
  840. pc_bytes = 0;
  841. }
  842. }
  843. if (aad_bytes > 0)
  844. memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
  845. cpacf_kma(ctx->fc | flags, &param,
  846. gw_out.ptr + aad_bytes,
  847. gw_in.ptr + aad_bytes, pc_bytes,
  848. gw_in.ptr, aad_bytes);
  849. gcm_sg_walk_done(&gw_in, aad_bytes + pc_bytes);
  850. gcm_sg_walk_done(&gw_out, aad_bytes + pc_bytes);
  851. aadlen -= aad_bytes;
  852. pclen -= pc_bytes;
  853. } while (aadlen + pclen > 0);
  854. if (flags & CPACF_DECRYPT) {
  855. scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
  856. if (crypto_memneq(tag, param.t, taglen))
  857. ret = -EBADMSG;
  858. } else
  859. scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
  860. memzero_explicit(&param, sizeof(param));
  861. return ret;
  862. }
  863. static int gcm_aes_encrypt(struct aead_request *req)
  864. {
  865. return gcm_aes_crypt(req, CPACF_ENCRYPT);
  866. }
  867. static int gcm_aes_decrypt(struct aead_request *req)
  868. {
  869. return gcm_aes_crypt(req, CPACF_DECRYPT);
  870. }
  871. static struct aead_alg gcm_aes_aead = {
  872. .setkey = gcm_aes_setkey,
  873. .setauthsize = gcm_aes_setauthsize,
  874. .encrypt = gcm_aes_encrypt,
  875. .decrypt = gcm_aes_decrypt,
  876. .ivsize = GHASH_BLOCK_SIZE - sizeof(u32),
  877. .maxauthsize = GHASH_DIGEST_SIZE,
  878. .chunksize = AES_BLOCK_SIZE,
  879. .base = {
  880. .cra_blocksize = 1,
  881. .cra_ctxsize = sizeof(struct s390_aes_ctx),
  882. .cra_priority = 900,
  883. .cra_name = "gcm(aes)",
  884. .cra_driver_name = "gcm-aes-s390",
  885. .cra_module = THIS_MODULE,
  886. },
  887. };
  888. static struct crypto_alg *aes_s390_algs_ptr[5];
  889. static int aes_s390_algs_num;
  890. static struct aead_alg *aes_s390_aead_alg;
  891. static int aes_s390_register_alg(struct crypto_alg *alg)
  892. {
  893. int ret;
  894. ret = crypto_register_alg(alg);
  895. if (!ret)
  896. aes_s390_algs_ptr[aes_s390_algs_num++] = alg;
  897. return ret;
  898. }
  899. static void aes_s390_fini(void)
  900. {
  901. while (aes_s390_algs_num--)
  902. crypto_unregister_alg(aes_s390_algs_ptr[aes_s390_algs_num]);
  903. if (ctrblk)
  904. free_page((unsigned long) ctrblk);
  905. if (aes_s390_aead_alg)
  906. crypto_unregister_aead(aes_s390_aead_alg);
  907. }
  908. static int __init aes_s390_init(void)
  909. {
  910. int ret;
  911. /* Query available functions for KM, KMC, KMCTR and KMA */
  912. cpacf_query(CPACF_KM, &km_functions);
  913. cpacf_query(CPACF_KMC, &kmc_functions);
  914. cpacf_query(CPACF_KMCTR, &kmctr_functions);
  915. cpacf_query(CPACF_KMA, &kma_functions);
  916. if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
  917. cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
  918. cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
  919. ret = aes_s390_register_alg(&aes_alg);
  920. if (ret)
  921. goto out_err;
  922. ret = aes_s390_register_alg(&ecb_aes_alg);
  923. if (ret)
  924. goto out_err;
  925. }
  926. if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
  927. cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
  928. cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
  929. ret = aes_s390_register_alg(&cbc_aes_alg);
  930. if (ret)
  931. goto out_err;
  932. }
  933. if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
  934. cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
  935. ret = aes_s390_register_alg(&xts_aes_alg);
  936. if (ret)
  937. goto out_err;
  938. }
  939. if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
  940. cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
  941. cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
  942. ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
  943. if (!ctrblk) {
  944. ret = -ENOMEM;
  945. goto out_err;
  946. }
  947. ret = aes_s390_register_alg(&ctr_aes_alg);
  948. if (ret)
  949. goto out_err;
  950. }
  951. if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
  952. cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
  953. cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
  954. ret = crypto_register_aead(&gcm_aes_aead);
  955. if (ret)
  956. goto out_err;
  957. aes_s390_aead_alg = &gcm_aes_aead;
  958. }
  959. return 0;
  960. out_err:
  961. aes_s390_fini();
  962. return ret;
  963. }
  964. module_cpu_feature_match(MSA, aes_s390_init);
  965. module_exit(aes_s390_fini);
  966. MODULE_ALIAS_CRYPTO("aes-all");
  967. MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
  968. MODULE_LICENSE("GPL");