glue_helper.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471
  1. /*
  2. * Shared glue code for 128bit block ciphers
  3. *
  4. * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
  5. *
  6. * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
  7. * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  8. * CTR part based on code (crypto/ctr.c) by:
  9. * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2 of the License, or
  14. * (at your option) any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; if not, write to the Free Software
  23. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  24. * USA
  25. *
  26. */
  27. #include <linux/module.h>
  28. #include <crypto/b128ops.h>
  29. #include <crypto/gf128mul.h>
  30. #include <crypto/internal/skcipher.h>
  31. #include <crypto/lrw.h>
  32. #include <crypto/xts.h>
  33. #include <asm/crypto/glue_helper.h>
  34. static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
  35. struct blkcipher_desc *desc,
  36. struct blkcipher_walk *walk)
  37. {
  38. void *ctx = crypto_blkcipher_ctx(desc->tfm);
  39. const unsigned int bsize = 128 / 8;
  40. unsigned int nbytes, i, func_bytes;
  41. bool fpu_enabled = false;
  42. int err;
  43. err = blkcipher_walk_virt(desc, walk);
  44. while ((nbytes = walk->nbytes)) {
  45. u8 *wsrc = walk->src.virt.addr;
  46. u8 *wdst = walk->dst.virt.addr;
  47. fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
  48. desc, fpu_enabled, nbytes);
  49. for (i = 0; i < gctx->num_funcs; i++) {
  50. func_bytes = bsize * gctx->funcs[i].num_blocks;
  51. /* Process multi-block batch */
  52. if (nbytes >= func_bytes) {
  53. do {
  54. gctx->funcs[i].fn_u.ecb(ctx, wdst,
  55. wsrc);
  56. wsrc += func_bytes;
  57. wdst += func_bytes;
  58. nbytes -= func_bytes;
  59. } while (nbytes >= func_bytes);
  60. if (nbytes < bsize)
  61. goto done;
  62. }
  63. }
  64. done:
  65. err = blkcipher_walk_done(desc, walk, nbytes);
  66. }
  67. glue_fpu_end(fpu_enabled);
  68. return err;
  69. }
  70. int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
  71. struct blkcipher_desc *desc, struct scatterlist *dst,
  72. struct scatterlist *src, unsigned int nbytes)
  73. {
  74. struct blkcipher_walk walk;
  75. blkcipher_walk_init(&walk, dst, src, nbytes);
  76. return __glue_ecb_crypt_128bit(gctx, desc, &walk);
  77. }
  78. EXPORT_SYMBOL_GPL(glue_ecb_crypt_128bit);
  79. static unsigned int __glue_cbc_encrypt_128bit(const common_glue_func_t fn,
  80. struct blkcipher_desc *desc,
  81. struct blkcipher_walk *walk)
  82. {
  83. void *ctx = crypto_blkcipher_ctx(desc->tfm);
  84. const unsigned int bsize = 128 / 8;
  85. unsigned int nbytes = walk->nbytes;
  86. u128 *src = (u128 *)walk->src.virt.addr;
  87. u128 *dst = (u128 *)walk->dst.virt.addr;
  88. u128 *iv = (u128 *)walk->iv;
  89. do {
  90. u128_xor(dst, src, iv);
  91. fn(ctx, (u8 *)dst, (u8 *)dst);
  92. iv = dst;
  93. src += 1;
  94. dst += 1;
  95. nbytes -= bsize;
  96. } while (nbytes >= bsize);
  97. *(u128 *)walk->iv = *iv;
  98. return nbytes;
  99. }
  100. int glue_cbc_encrypt_128bit(const common_glue_func_t fn,
  101. struct blkcipher_desc *desc,
  102. struct scatterlist *dst,
  103. struct scatterlist *src, unsigned int nbytes)
  104. {
  105. struct blkcipher_walk walk;
  106. int err;
  107. blkcipher_walk_init(&walk, dst, src, nbytes);
  108. err = blkcipher_walk_virt(desc, &walk);
  109. while ((nbytes = walk.nbytes)) {
  110. nbytes = __glue_cbc_encrypt_128bit(fn, desc, &walk);
  111. err = blkcipher_walk_done(desc, &walk, nbytes);
  112. }
  113. return err;
  114. }
  115. EXPORT_SYMBOL_GPL(glue_cbc_encrypt_128bit);
  116. static unsigned int
  117. __glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
  118. struct blkcipher_desc *desc,
  119. struct blkcipher_walk *walk)
  120. {
  121. void *ctx = crypto_blkcipher_ctx(desc->tfm);
  122. const unsigned int bsize = 128 / 8;
  123. unsigned int nbytes = walk->nbytes;
  124. u128 *src = (u128 *)walk->src.virt.addr;
  125. u128 *dst = (u128 *)walk->dst.virt.addr;
  126. u128 last_iv;
  127. unsigned int num_blocks, func_bytes;
  128. unsigned int i;
  129. /* Start of the last block. */
  130. src += nbytes / bsize - 1;
  131. dst += nbytes / bsize - 1;
  132. last_iv = *src;
  133. for (i = 0; i < gctx->num_funcs; i++) {
  134. num_blocks = gctx->funcs[i].num_blocks;
  135. func_bytes = bsize * num_blocks;
  136. /* Process multi-block batch */
  137. if (nbytes >= func_bytes) {
  138. do {
  139. nbytes -= func_bytes - bsize;
  140. src -= num_blocks - 1;
  141. dst -= num_blocks - 1;
  142. gctx->funcs[i].fn_u.cbc(ctx, dst, src);
  143. nbytes -= bsize;
  144. if (nbytes < bsize)
  145. goto done;
  146. u128_xor(dst, dst, src - 1);
  147. src -= 1;
  148. dst -= 1;
  149. } while (nbytes >= func_bytes);
  150. }
  151. }
  152. done:
  153. u128_xor(dst, dst, (u128 *)walk->iv);
  154. *(u128 *)walk->iv = last_iv;
  155. return nbytes;
  156. }
  157. int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
  158. struct blkcipher_desc *desc,
  159. struct scatterlist *dst,
  160. struct scatterlist *src, unsigned int nbytes)
  161. {
  162. const unsigned int bsize = 128 / 8;
  163. bool fpu_enabled = false;
  164. struct blkcipher_walk walk;
  165. int err;
  166. blkcipher_walk_init(&walk, dst, src, nbytes);
  167. err = blkcipher_walk_virt(desc, &walk);
  168. while ((nbytes = walk.nbytes)) {
  169. fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
  170. desc, fpu_enabled, nbytes);
  171. nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
  172. err = blkcipher_walk_done(desc, &walk, nbytes);
  173. }
  174. glue_fpu_end(fpu_enabled);
  175. return err;
  176. }
  177. EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
  178. static void glue_ctr_crypt_final_128bit(const common_glue_ctr_func_t fn_ctr,
  179. struct blkcipher_desc *desc,
  180. struct blkcipher_walk *walk)
  181. {
  182. void *ctx = crypto_blkcipher_ctx(desc->tfm);
  183. u8 *src = (u8 *)walk->src.virt.addr;
  184. u8 *dst = (u8 *)walk->dst.virt.addr;
  185. unsigned int nbytes = walk->nbytes;
  186. le128 ctrblk;
  187. u128 tmp;
  188. be128_to_le128(&ctrblk, (be128 *)walk->iv);
  189. memcpy(&tmp, src, nbytes);
  190. fn_ctr(ctx, &tmp, &tmp, &ctrblk);
  191. memcpy(dst, &tmp, nbytes);
  192. le128_to_be128((be128 *)walk->iv, &ctrblk);
  193. }
  194. static unsigned int __glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
  195. struct blkcipher_desc *desc,
  196. struct blkcipher_walk *walk)
  197. {
  198. const unsigned int bsize = 128 / 8;
  199. void *ctx = crypto_blkcipher_ctx(desc->tfm);
  200. unsigned int nbytes = walk->nbytes;
  201. u128 *src = (u128 *)walk->src.virt.addr;
  202. u128 *dst = (u128 *)walk->dst.virt.addr;
  203. le128 ctrblk;
  204. unsigned int num_blocks, func_bytes;
  205. unsigned int i;
  206. be128_to_le128(&ctrblk, (be128 *)walk->iv);
  207. /* Process multi-block batch */
  208. for (i = 0; i < gctx->num_funcs; i++) {
  209. num_blocks = gctx->funcs[i].num_blocks;
  210. func_bytes = bsize * num_blocks;
  211. if (nbytes >= func_bytes) {
  212. do {
  213. gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk);
  214. src += num_blocks;
  215. dst += num_blocks;
  216. nbytes -= func_bytes;
  217. } while (nbytes >= func_bytes);
  218. if (nbytes < bsize)
  219. goto done;
  220. }
  221. }
  222. done:
  223. le128_to_be128((be128 *)walk->iv, &ctrblk);
  224. return nbytes;
  225. }
  226. int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
  227. struct blkcipher_desc *desc, struct scatterlist *dst,
  228. struct scatterlist *src, unsigned int nbytes)
  229. {
  230. const unsigned int bsize = 128 / 8;
  231. bool fpu_enabled = false;
  232. struct blkcipher_walk walk;
  233. int err;
  234. blkcipher_walk_init(&walk, dst, src, nbytes);
  235. err = blkcipher_walk_virt_block(desc, &walk, bsize);
  236. while ((nbytes = walk.nbytes) >= bsize) {
  237. fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
  238. desc, fpu_enabled, nbytes);
  239. nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
  240. err = blkcipher_walk_done(desc, &walk, nbytes);
  241. }
  242. glue_fpu_end(fpu_enabled);
  243. if (walk.nbytes) {
  244. glue_ctr_crypt_final_128bit(
  245. gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
  246. err = blkcipher_walk_done(desc, &walk, 0);
  247. }
  248. return err;
  249. }
  250. EXPORT_SYMBOL_GPL(glue_ctr_crypt_128bit);
  251. static unsigned int __glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
  252. void *ctx,
  253. struct blkcipher_desc *desc,
  254. struct blkcipher_walk *walk)
  255. {
  256. const unsigned int bsize = 128 / 8;
  257. unsigned int nbytes = walk->nbytes;
  258. u128 *src = (u128 *)walk->src.virt.addr;
  259. u128 *dst = (u128 *)walk->dst.virt.addr;
  260. unsigned int num_blocks, func_bytes;
  261. unsigned int i;
  262. /* Process multi-block batch */
  263. for (i = 0; i < gctx->num_funcs; i++) {
  264. num_blocks = gctx->funcs[i].num_blocks;
  265. func_bytes = bsize * num_blocks;
  266. if (nbytes >= func_bytes) {
  267. do {
  268. gctx->funcs[i].fn_u.xts(ctx, dst, src,
  269. (le128 *)walk->iv);
  270. src += num_blocks;
  271. dst += num_blocks;
  272. nbytes -= func_bytes;
  273. } while (nbytes >= func_bytes);
  274. if (nbytes < bsize)
  275. goto done;
  276. }
  277. }
  278. done:
  279. return nbytes;
  280. }
  281. static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx,
  282. void *ctx,
  283. struct skcipher_walk *walk)
  284. {
  285. const unsigned int bsize = 128 / 8;
  286. unsigned int nbytes = walk->nbytes;
  287. u128 *src = walk->src.virt.addr;
  288. u128 *dst = walk->dst.virt.addr;
  289. unsigned int num_blocks, func_bytes;
  290. unsigned int i;
  291. /* Process multi-block batch */
  292. for (i = 0; i < gctx->num_funcs; i++) {
  293. num_blocks = gctx->funcs[i].num_blocks;
  294. func_bytes = bsize * num_blocks;
  295. if (nbytes >= func_bytes) {
  296. do {
  297. gctx->funcs[i].fn_u.xts(ctx, dst, src,
  298. walk->iv);
  299. src += num_blocks;
  300. dst += num_blocks;
  301. nbytes -= func_bytes;
  302. } while (nbytes >= func_bytes);
  303. if (nbytes < bsize)
  304. goto done;
  305. }
  306. }
  307. done:
  308. return nbytes;
  309. }
  310. /* for implementations implementing faster XTS IV generator */
  311. int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
  312. struct blkcipher_desc *desc, struct scatterlist *dst,
  313. struct scatterlist *src, unsigned int nbytes,
  314. void (*tweak_fn)(void *ctx, u8 *dst, const u8 *src),
  315. void *tweak_ctx, void *crypt_ctx)
  316. {
  317. const unsigned int bsize = 128 / 8;
  318. bool fpu_enabled = false;
  319. struct blkcipher_walk walk;
  320. int err;
  321. blkcipher_walk_init(&walk, dst, src, nbytes);
  322. err = blkcipher_walk_virt(desc, &walk);
  323. nbytes = walk.nbytes;
  324. if (!nbytes)
  325. return err;
  326. /* set minimum length to bsize, for tweak_fn */
  327. fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
  328. desc, fpu_enabled,
  329. nbytes < bsize ? bsize : nbytes);
  330. /* calculate first value of T */
  331. tweak_fn(tweak_ctx, walk.iv, walk.iv);
  332. while (nbytes) {
  333. nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk);
  334. err = blkcipher_walk_done(desc, &walk, nbytes);
  335. nbytes = walk.nbytes;
  336. }
  337. glue_fpu_end(fpu_enabled);
  338. return err;
  339. }
  340. EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
  341. int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
  342. struct skcipher_request *req,
  343. common_glue_func_t tweak_fn, void *tweak_ctx,
  344. void *crypt_ctx)
  345. {
  346. const unsigned int bsize = 128 / 8;
  347. struct skcipher_walk walk;
  348. bool fpu_enabled = false;
  349. unsigned int nbytes;
  350. int err;
  351. err = skcipher_walk_virt(&walk, req, false);
  352. nbytes = walk.nbytes;
  353. if (!nbytes)
  354. return err;
  355. /* set minimum length to bsize, for tweak_fn */
  356. fpu_enabled = glue_skwalk_fpu_begin(bsize, gctx->fpu_blocks_limit,
  357. &walk, fpu_enabled,
  358. nbytes < bsize ? bsize : nbytes);
  359. /* calculate first value of T */
  360. tweak_fn(tweak_ctx, walk.iv, walk.iv);
  361. while (nbytes) {
  362. nbytes = __glue_xts_req_128bit(gctx, crypt_ctx, &walk);
  363. err = skcipher_walk_done(&walk, nbytes);
  364. nbytes = walk.nbytes;
  365. }
  366. glue_fpu_end(fpu_enabled);
  367. return err;
  368. }
  369. EXPORT_SYMBOL_GPL(glue_xts_req_128bit);
  370. void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv,
  371. common_glue_func_t fn)
  372. {
  373. le128 ivblk = *iv;
  374. /* generate next IV */
  375. gf128mul_x_ble(iv, &ivblk);
  376. /* CC <- T xor C */
  377. u128_xor(dst, src, (u128 *)&ivblk);
  378. /* PP <- D(Key2,CC) */
  379. fn(ctx, (u8 *)dst, (u8 *)dst);
  380. /* P <- T xor PP */
  381. u128_xor(dst, dst, (u128 *)&ivblk);
  382. }
  383. EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one);
  384. MODULE_LICENSE("GPL");