sha1_ssse3_glue.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. /*
  2. * Cryptographic API.
  3. *
  4. * Glue code for the SHA1 Secure Hash Algorithm assembler implementation using
  5. * Supplemental SSE3 instructions.
  6. *
  7. * This file is based on sha1_generic.c
  8. *
  9. * Copyright (c) Alan Smithee.
  10. * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
  11. * Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
  12. * Copyright (c) Mathias Krause <minipli@googlemail.com>
  13. * Copyright (c) Chandramouli Narayanan <mouli@linux.intel.com>
  14. *
  15. * This program is free software; you can redistribute it and/or modify it
  16. * under the terms of the GNU General Public License as published by the Free
  17. * Software Foundation; either version 2 of the License, or (at your option)
  18. * any later version.
  19. *
  20. */
  21. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  22. #include <crypto/internal/hash.h>
  23. #include <linux/init.h>
  24. #include <linux/module.h>
  25. #include <linux/mm.h>
  26. #include <linux/cryptohash.h>
  27. #include <linux/types.h>
  28. #include <crypto/sha.h>
  29. #include <asm/byteorder.h>
  30. #include <asm/i387.h>
  31. #include <asm/xcr.h>
  32. #include <asm/xsave.h>
  33. asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data,
  34. unsigned int rounds);
  35. #ifdef CONFIG_AS_AVX
  36. asmlinkage void sha1_transform_avx(u32 *digest, const char *data,
  37. unsigned int rounds);
  38. #endif
  39. #ifdef CONFIG_AS_AVX2
  40. #define SHA1_AVX2_BLOCK_OPTSIZE 4 /* optimal 4*64 bytes of SHA1 blocks */
  41. asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
  42. unsigned int rounds);
  43. #endif
  44. static asmlinkage void (*sha1_transform_asm)(u32 *, const char *, unsigned int);
  45. static int sha1_ssse3_init(struct shash_desc *desc)
  46. {
  47. struct sha1_state *sctx = shash_desc_ctx(desc);
  48. *sctx = (struct sha1_state){
  49. .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
  50. };
  51. return 0;
  52. }
  53. static int __sha1_ssse3_update(struct shash_desc *desc, const u8 *data,
  54. unsigned int len, unsigned int partial)
  55. {
  56. struct sha1_state *sctx = shash_desc_ctx(desc);
  57. unsigned int done = 0;
  58. sctx->count += len;
  59. if (partial) {
  60. done = SHA1_BLOCK_SIZE - partial;
  61. memcpy(sctx->buffer + partial, data, done);
  62. sha1_transform_asm(sctx->state, sctx->buffer, 1);
  63. }
  64. if (len - done >= SHA1_BLOCK_SIZE) {
  65. const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE;
  66. sha1_transform_asm(sctx->state, data + done, rounds);
  67. done += rounds * SHA1_BLOCK_SIZE;
  68. }
  69. memcpy(sctx->buffer, data + done, len - done);
  70. return 0;
  71. }
  72. static int sha1_ssse3_update(struct shash_desc *desc, const u8 *data,
  73. unsigned int len)
  74. {
  75. struct sha1_state *sctx = shash_desc_ctx(desc);
  76. unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
  77. int res;
  78. /* Handle the fast case right here */
  79. if (partial + len < SHA1_BLOCK_SIZE) {
  80. sctx->count += len;
  81. memcpy(sctx->buffer + partial, data, len);
  82. return 0;
  83. }
  84. if (!irq_fpu_usable()) {
  85. res = crypto_sha1_update(desc, data, len);
  86. } else {
  87. kernel_fpu_begin();
  88. res = __sha1_ssse3_update(desc, data, len, partial);
  89. kernel_fpu_end();
  90. }
  91. return res;
  92. }
  93. /* Add padding and return the message digest. */
  94. static int sha1_ssse3_final(struct shash_desc *desc, u8 *out)
  95. {
  96. struct sha1_state *sctx = shash_desc_ctx(desc);
  97. unsigned int i, index, padlen;
  98. __be32 *dst = (__be32 *)out;
  99. __be64 bits;
  100. static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };
  101. bits = cpu_to_be64(sctx->count << 3);
  102. /* Pad out to 56 mod 64 and append length */
  103. index = sctx->count % SHA1_BLOCK_SIZE;
  104. padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index);
  105. if (!irq_fpu_usable()) {
  106. crypto_sha1_update(desc, padding, padlen);
  107. crypto_sha1_update(desc, (const u8 *)&bits, sizeof(bits));
  108. } else {
  109. kernel_fpu_begin();
  110. /* We need to fill a whole block for __sha1_ssse3_update() */
  111. if (padlen <= 56) {
  112. sctx->count += padlen;
  113. memcpy(sctx->buffer + index, padding, padlen);
  114. } else {
  115. __sha1_ssse3_update(desc, padding, padlen, index);
  116. }
  117. __sha1_ssse3_update(desc, (const u8 *)&bits, sizeof(bits), 56);
  118. kernel_fpu_end();
  119. }
  120. /* Store state in digest */
  121. for (i = 0; i < 5; i++)
  122. dst[i] = cpu_to_be32(sctx->state[i]);
  123. /* Wipe context */
  124. memset(sctx, 0, sizeof(*sctx));
  125. return 0;
  126. }
  127. static int sha1_ssse3_export(struct shash_desc *desc, void *out)
  128. {
  129. struct sha1_state *sctx = shash_desc_ctx(desc);
  130. memcpy(out, sctx, sizeof(*sctx));
  131. return 0;
  132. }
  133. static int sha1_ssse3_import(struct shash_desc *desc, const void *in)
  134. {
  135. struct sha1_state *sctx = shash_desc_ctx(desc);
  136. memcpy(sctx, in, sizeof(*sctx));
  137. return 0;
  138. }
  139. #ifdef CONFIG_AS_AVX2
  140. static void sha1_apply_transform_avx2(u32 *digest, const char *data,
  141. unsigned int rounds)
  142. {
  143. /* Select the optimal transform based on data block size */
  144. if (rounds >= SHA1_AVX2_BLOCK_OPTSIZE)
  145. sha1_transform_avx2(digest, data, rounds);
  146. else
  147. sha1_transform_avx(digest, data, rounds);
  148. }
  149. #endif
  150. static struct shash_alg alg = {
  151. .digestsize = SHA1_DIGEST_SIZE,
  152. .init = sha1_ssse3_init,
  153. .update = sha1_ssse3_update,
  154. .final = sha1_ssse3_final,
  155. .export = sha1_ssse3_export,
  156. .import = sha1_ssse3_import,
  157. .descsize = sizeof(struct sha1_state),
  158. .statesize = sizeof(struct sha1_state),
  159. .base = {
  160. .cra_name = "sha1",
  161. .cra_driver_name= "sha1-ssse3",
  162. .cra_priority = 150,
  163. .cra_flags = CRYPTO_ALG_TYPE_SHASH,
  164. .cra_blocksize = SHA1_BLOCK_SIZE,
  165. .cra_module = THIS_MODULE,
  166. }
  167. };
  168. #ifdef CONFIG_AS_AVX
  169. static bool __init avx_usable(void)
  170. {
  171. u64 xcr0;
  172. if (!cpu_has_avx || !cpu_has_osxsave)
  173. return false;
  174. xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
  175. if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
  176. pr_info("AVX detected but unusable.\n");
  177. return false;
  178. }
  179. return true;
  180. }
  181. #ifdef CONFIG_AS_AVX2
  182. static bool __init avx2_usable(void)
  183. {
  184. if (avx_usable() && cpu_has_avx2 && boot_cpu_has(X86_FEATURE_BMI1) &&
  185. boot_cpu_has(X86_FEATURE_BMI2))
  186. return true;
  187. return false;
  188. }
  189. #endif
  190. #endif
  191. static int __init sha1_ssse3_mod_init(void)
  192. {
  193. char *algo_name;
  194. /* test for SSSE3 first */
  195. if (cpu_has_ssse3) {
  196. sha1_transform_asm = sha1_transform_ssse3;
  197. algo_name = "SSSE3";
  198. }
  199. #ifdef CONFIG_AS_AVX
  200. /* allow AVX to override SSSE3, it's a little faster */
  201. if (avx_usable()) {
  202. sha1_transform_asm = sha1_transform_avx;
  203. algo_name = "AVX";
  204. #ifdef CONFIG_AS_AVX2
  205. /* allow AVX2 to override AVX, it's a little faster */
  206. if (avx2_usable()) {
  207. sha1_transform_asm = sha1_apply_transform_avx2;
  208. algo_name = "AVX2";
  209. }
  210. #endif
  211. }
  212. #endif
  213. if (sha1_transform_asm) {
  214. pr_info("Using %s optimized SHA-1 implementation\n", algo_name);
  215. return crypto_register_shash(&alg);
  216. }
  217. pr_info("Neither AVX nor AVX2 nor SSSE3 is available/usable.\n");
  218. return -ENODEV;
  219. }
  220. static void __exit sha1_ssse3_mod_fini(void)
  221. {
  222. crypto_unregister_shash(&alg);
  223. }
  224. module_init(sha1_ssse3_mod_init);
  225. module_exit(sha1_ssse3_mod_fini);
  226. MODULE_LICENSE("GPL");
  227. MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated");
  228. MODULE_ALIAS("sha1");