浏览代码

Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:
 "This fixes the following issues:

   - oversize stack frames on mn10300 in sha3-generic

   - warning on old compilers in sha3-generic

   - API error in sun4i_ss_prng

   - potential dead-lock in sun4i_ss_prng

   - null-pointer dereference in sha512-mb

   - endless loop when DECO acquire fails in caam

   - kernel oops when hashing empty message in talitos"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: sun4i_ss_prng - convert lock to _bh in sun4i_ss_prng_generate
  crypto: sun4i_ss_prng - fix return value of sun4i_ss_prng_generate
  crypto: caam - fix endless loop when DECO acquire fails
  crypto: sha3-generic - Use __optimize to support old compilers
  compiler-gcc.h: __nostackprotector needs gcc-4.4 and up
  compiler-gcc.h: Introduce __optimize function attribute
  crypto: sha3-generic - deal with oversize stack frames
  crypto: talitos - fix Kernel Oops on hashing an empty file
  crypto: sha512-mb - initialize pending lengths correctly
Linus Torvalds 7 年之前
父节点
当前提交
178e834c47

+ 6 - 4
arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c

@@ -57,10 +57,12 @@ void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state)
 {
 	unsigned int j;
 
-	state->lens[0] = 0;
-	state->lens[1] = 1;
-	state->lens[2] = 2;
-	state->lens[3] = 3;
+	/* initially all lanes are unused */
+	state->lens[0] = 0xFFFFFFFF00000000;
+	state->lens[1] = 0xFFFFFFFF00000001;
+	state->lens[2] = 0xFFFFFFFF00000002;
+	state->lens[3] = 0xFFFFFFFF00000003;
+
 	state->unused_lanes = 0xFF03020100;
 	for (j = 0; j < 4; j++)
 		state->ldata[j].job_in_lane = NULL;

+ 118 - 100
crypto/sha3_generic.c

@@ -20,6 +20,20 @@
 #include <crypto/sha3.h>
 #include <asm/unaligned.h>
 
+/*
+ * On some 32-bit architectures (mn10300 and h8300), GCC ends up using
+ * over 1 KB of stack if we inline the round calculation into the loop
+ * in keccakf(). On the other hand, on 64-bit architectures with plenty
+ * of [64-bit wide] general purpose registers, not inlining it severely
+ * hurts performance. So let's use 64-bitness as a heuristic to decide
+ * whether to inline or not.
+ */
+#ifdef CONFIG_64BIT
+#define SHA3_INLINE	inline
+#else
+#define SHA3_INLINE	noinline
+#endif
+
 #define KECCAK_ROUNDS 24
 
 static const u64 keccakf_rndc[24] = {
@@ -35,111 +49,115 @@ static const u64 keccakf_rndc[24] = {
 
 /* update the state with given number of rounds */
 
-static void __attribute__((__optimize__("O3"))) keccakf(u64 st[25])
+static SHA3_INLINE void keccakf_round(u64 st[25])
 {
 	u64 t[5], tt, bc[5];
-	int round;
 
-	for (round = 0; round < KECCAK_ROUNDS; round++) {
+	/* Theta */
+	bc[0] = st[0] ^ st[5] ^ st[10] ^ st[15] ^ st[20];
+	bc[1] = st[1] ^ st[6] ^ st[11] ^ st[16] ^ st[21];
+	bc[2] = st[2] ^ st[7] ^ st[12] ^ st[17] ^ st[22];
+	bc[3] = st[3] ^ st[8] ^ st[13] ^ st[18] ^ st[23];
+	bc[4] = st[4] ^ st[9] ^ st[14] ^ st[19] ^ st[24];
+
+	t[0] = bc[4] ^ rol64(bc[1], 1);
+	t[1] = bc[0] ^ rol64(bc[2], 1);
+	t[2] = bc[1] ^ rol64(bc[3], 1);
+	t[3] = bc[2] ^ rol64(bc[4], 1);
+	t[4] = bc[3] ^ rol64(bc[0], 1);
+
+	st[0] ^= t[0];
+
+	/* Rho Pi */
+	tt = st[1];
+	st[ 1] = rol64(st[ 6] ^ t[1], 44);
+	st[ 6] = rol64(st[ 9] ^ t[4], 20);
+	st[ 9] = rol64(st[22] ^ t[2], 61);
+	st[22] = rol64(st[14] ^ t[4], 39);
+	st[14] = rol64(st[20] ^ t[0], 18);
+	st[20] = rol64(st[ 2] ^ t[2], 62);
+	st[ 2] = rol64(st[12] ^ t[2], 43);
+	st[12] = rol64(st[13] ^ t[3], 25);
+	st[13] = rol64(st[19] ^ t[4],  8);
+	st[19] = rol64(st[23] ^ t[3], 56);
+	st[23] = rol64(st[15] ^ t[0], 41);
+	st[15] = rol64(st[ 4] ^ t[4], 27);
+	st[ 4] = rol64(st[24] ^ t[4], 14);
+	st[24] = rol64(st[21] ^ t[1],  2);
+	st[21] = rol64(st[ 8] ^ t[3], 55);
+	st[ 8] = rol64(st[16] ^ t[1], 45);
+	st[16] = rol64(st[ 5] ^ t[0], 36);
+	st[ 5] = rol64(st[ 3] ^ t[3], 28);
+	st[ 3] = rol64(st[18] ^ t[3], 21);
+	st[18] = rol64(st[17] ^ t[2], 15);
+	st[17] = rol64(st[11] ^ t[1], 10);
+	st[11] = rol64(st[ 7] ^ t[2],  6);
+	st[ 7] = rol64(st[10] ^ t[0],  3);
+	st[10] = rol64(    tt ^ t[1],  1);
+
+	/* Chi */
+	bc[ 0] = ~st[ 1] & st[ 2];
+	bc[ 1] = ~st[ 2] & st[ 3];
+	bc[ 2] = ~st[ 3] & st[ 4];
+	bc[ 3] = ~st[ 4] & st[ 0];
+	bc[ 4] = ~st[ 0] & st[ 1];
+	st[ 0] ^= bc[ 0];
+	st[ 1] ^= bc[ 1];
+	st[ 2] ^= bc[ 2];
+	st[ 3] ^= bc[ 3];
+	st[ 4] ^= bc[ 4];
+
+	bc[ 0] = ~st[ 6] & st[ 7];
+	bc[ 1] = ~st[ 7] & st[ 8];
+	bc[ 2] = ~st[ 8] & st[ 9];
+	bc[ 3] = ~st[ 9] & st[ 5];
+	bc[ 4] = ~st[ 5] & st[ 6];
+	st[ 5] ^= bc[ 0];
+	st[ 6] ^= bc[ 1];
+	st[ 7] ^= bc[ 2];
+	st[ 8] ^= bc[ 3];
+	st[ 9] ^= bc[ 4];
+
+	bc[ 0] = ~st[11] & st[12];
+	bc[ 1] = ~st[12] & st[13];
+	bc[ 2] = ~st[13] & st[14];
+	bc[ 3] = ~st[14] & st[10];
+	bc[ 4] = ~st[10] & st[11];
+	st[10] ^= bc[ 0];
+	st[11] ^= bc[ 1];
+	st[12] ^= bc[ 2];
+	st[13] ^= bc[ 3];
+	st[14] ^= bc[ 4];
+
+	bc[ 0] = ~st[16] & st[17];
+	bc[ 1] = ~st[17] & st[18];
+	bc[ 2] = ~st[18] & st[19];
+	bc[ 3] = ~st[19] & st[15];
+	bc[ 4] = ~st[15] & st[16];
+	st[15] ^= bc[ 0];
+	st[16] ^= bc[ 1];
+	st[17] ^= bc[ 2];
+	st[18] ^= bc[ 3];
+	st[19] ^= bc[ 4];
+
+	bc[ 0] = ~st[21] & st[22];
+	bc[ 1] = ~st[22] & st[23];
+	bc[ 2] = ~st[23] & st[24];
+	bc[ 3] = ~st[24] & st[20];
+	bc[ 4] = ~st[20] & st[21];
+	st[20] ^= bc[ 0];
+	st[21] ^= bc[ 1];
+	st[22] ^= bc[ 2];
+	st[23] ^= bc[ 3];
+	st[24] ^= bc[ 4];
+}
 
-		/* Theta */
-		bc[0] = st[0] ^ st[5] ^ st[10] ^ st[15] ^ st[20];
-		bc[1] = st[1] ^ st[6] ^ st[11] ^ st[16] ^ st[21];
-		bc[2] = st[2] ^ st[7] ^ st[12] ^ st[17] ^ st[22];
-		bc[3] = st[3] ^ st[8] ^ st[13] ^ st[18] ^ st[23];
-		bc[4] = st[4] ^ st[9] ^ st[14] ^ st[19] ^ st[24];
-
-		t[0] = bc[4] ^ rol64(bc[1], 1);
-		t[1] = bc[0] ^ rol64(bc[2], 1);
-		t[2] = bc[1] ^ rol64(bc[3], 1);
-		t[3] = bc[2] ^ rol64(bc[4], 1);
-		t[4] = bc[3] ^ rol64(bc[0], 1);
-
-		st[0] ^= t[0];
-
-		/* Rho Pi */
-		tt = st[1];
-		st[ 1] = rol64(st[ 6] ^ t[1], 44);
-		st[ 6] = rol64(st[ 9] ^ t[4], 20);
-		st[ 9] = rol64(st[22] ^ t[2], 61);
-		st[22] = rol64(st[14] ^ t[4], 39);
-		st[14] = rol64(st[20] ^ t[0], 18);
-		st[20] = rol64(st[ 2] ^ t[2], 62);
-		st[ 2] = rol64(st[12] ^ t[2], 43);
-		st[12] = rol64(st[13] ^ t[3], 25);
-		st[13] = rol64(st[19] ^ t[4],  8);
-		st[19] = rol64(st[23] ^ t[3], 56);
-		st[23] = rol64(st[15] ^ t[0], 41);
-		st[15] = rol64(st[ 4] ^ t[4], 27);
-		st[ 4] = rol64(st[24] ^ t[4], 14);
-		st[24] = rol64(st[21] ^ t[1],  2);
-		st[21] = rol64(st[ 8] ^ t[3], 55);
-		st[ 8] = rol64(st[16] ^ t[1], 45);
-		st[16] = rol64(st[ 5] ^ t[0], 36);
-		st[ 5] = rol64(st[ 3] ^ t[3], 28);
-		st[ 3] = rol64(st[18] ^ t[3], 21);
-		st[18] = rol64(st[17] ^ t[2], 15);
-		st[17] = rol64(st[11] ^ t[1], 10);
-		st[11] = rol64(st[ 7] ^ t[2],  6);
-		st[ 7] = rol64(st[10] ^ t[0],  3);
-		st[10] = rol64(    tt ^ t[1],  1);
-
-		/* Chi */
-		bc[ 0] = ~st[ 1] & st[ 2];
-		bc[ 1] = ~st[ 2] & st[ 3];
-		bc[ 2] = ~st[ 3] & st[ 4];
-		bc[ 3] = ~st[ 4] & st[ 0];
-		bc[ 4] = ~st[ 0] & st[ 1];
-		st[ 0] ^= bc[ 0];
-		st[ 1] ^= bc[ 1];
-		st[ 2] ^= bc[ 2];
-		st[ 3] ^= bc[ 3];
-		st[ 4] ^= bc[ 4];
-
-		bc[ 0] = ~st[ 6] & st[ 7];
-		bc[ 1] = ~st[ 7] & st[ 8];
-		bc[ 2] = ~st[ 8] & st[ 9];
-		bc[ 3] = ~st[ 9] & st[ 5];
-		bc[ 4] = ~st[ 5] & st[ 6];
-		st[ 5] ^= bc[ 0];
-		st[ 6] ^= bc[ 1];
-		st[ 7] ^= bc[ 2];
-		st[ 8] ^= bc[ 3];
-		st[ 9] ^= bc[ 4];
-
-		bc[ 0] = ~st[11] & st[12];
-		bc[ 1] = ~st[12] & st[13];
-		bc[ 2] = ~st[13] & st[14];
-		bc[ 3] = ~st[14] & st[10];
-		bc[ 4] = ~st[10] & st[11];
-		st[10] ^= bc[ 0];
-		st[11] ^= bc[ 1];
-		st[12] ^= bc[ 2];
-		st[13] ^= bc[ 3];
-		st[14] ^= bc[ 4];
-
-		bc[ 0] = ~st[16] & st[17];
-		bc[ 1] = ~st[17] & st[18];
-		bc[ 2] = ~st[18] & st[19];
-		bc[ 3] = ~st[19] & st[15];
-		bc[ 4] = ~st[15] & st[16];
-		st[15] ^= bc[ 0];
-		st[16] ^= bc[ 1];
-		st[17] ^= bc[ 2];
-		st[18] ^= bc[ 3];
-		st[19] ^= bc[ 4];
-
-		bc[ 0] = ~st[21] & st[22];
-		bc[ 1] = ~st[22] & st[23];
-		bc[ 2] = ~st[23] & st[24];
-		bc[ 3] = ~st[24] & st[20];
-		bc[ 4] = ~st[20] & st[21];
-		st[20] ^= bc[ 0];
-		st[21] ^= bc[ 1];
-		st[22] ^= bc[ 2];
-		st[23] ^= bc[ 3];
-		st[24] ^= bc[ 4];
+static void __optimize("O3") keccakf(u64 st[25])
+{
+	int round;
 
+	for (round = 0; round < KECCAK_ROUNDS; round++) {
+		keccakf_round(st);
 		/* Iota */
 		st[0] ^= keccakf_rndc[round];
 	}

+ 6 - 2
drivers/crypto/caam/ctrl.c

@@ -228,12 +228,16 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
 		 * without any error (HW optimizations for later
 		 * CAAM eras), then try again.
 		 */
+		if (ret)
+			break;
+
 		rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
 		if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
-		    !(rdsta_val & (1 << sh_idx)))
+		    !(rdsta_val & (1 << sh_idx))) {
 			ret = -EAGAIN;
-		if (ret)
 			break;
+		}
+
 		dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
 		/* Clear the contents before recreating the descriptor */
 		memset(desc, 0x00, CAAM_CMD_SZ * 7);

+ 3 - 3
drivers/crypto/sunxi-ss/sun4i-ss-prng.c

@@ -28,7 +28,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
 	algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
 	ss = algt->ss;
 
-	spin_lock(&ss->slock);
+	spin_lock_bh(&ss->slock);
 
 	writel(mode, ss->base + SS_CTL);
 
@@ -51,6 +51,6 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
 	}
 
 	writel(0, ss->base + SS_CTL);
-	spin_unlock(&ss->slock);
-	return dlen;
+	spin_unlock_bh(&ss->slock);
+	return 0;
 }

+ 4 - 0
drivers/crypto/talitos.c

@@ -1138,6 +1138,10 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
 	struct talitos_private *priv = dev_get_drvdata(dev);
 	bool is_sec1 = has_ftr_sec1(priv);
 
+	if (!src) {
+		to_talitos_ptr(ptr, 0, 0, is_sec1);
+		return 1;
+	}
 	if (sg_count == 1) {
 		to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
 		return sg_count;

+ 5 - 2
include/linux/compiler-gcc.h

@@ -167,8 +167,6 @@
 
 #if GCC_VERSION >= 40100
 # define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
-
-#define __nostackprotector	__attribute__((__optimize__("no-stack-protector")))
 #endif
 
 #if GCC_VERSION >= 40300
@@ -196,6 +194,11 @@
 #endif /* __CHECKER__ */
 #endif /* GCC_VERSION >= 40300 */
 
+#if GCC_VERSION >= 40400
+#define __optimize(level)	__attribute__((__optimize__(level)))
+#define __nostackprotector	__optimize("no-stack-protector")
+#endif /* GCC_VERSION >= 40400 */
+
 #if GCC_VERSION >= 40500
 
 #ifndef __CHECKER__

+ 4 - 0
include/linux/compiler.h

@@ -277,6 +277,10 @@ unsigned long read_word_at_a_time(const void *addr)
 
 #endif /* __ASSEMBLY__ */
 
+#ifndef __optimize
+# define __optimize(level)
+#endif
+
 /* Compile time object size, -1 for unknown */
 #ifndef __compiletime_object_size
 # define __compiletime_object_size(obj) -1