Răsfoiți Sursa

crypto: vmx - disable preemption to enable vsx in aes_ctr.c

Some preemptible check warnings were reported from enable_kernel_vsx(). This
patch disables preemption in aes_ctr.c before enabling vsx, and they are now
consistent with other files in the same directory.

Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Li Zhong 8 ani în urmă
părinte
comite
7dede913fc
1 a modificat fișierele cu 6 adăugiri și 0 ștergeri
  1. 6 0
      drivers/crypto/vmx/aes_ctr.c

+ 6 - 0
drivers/crypto/vmx/aes_ctr.c

@@ -80,11 +80,13 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
 	int ret;
 	int ret;
 	struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
 	struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
 
 
+	preempt_disable();
 	pagefault_disable();
 	pagefault_disable();
 	enable_kernel_vsx();
 	enable_kernel_vsx();
 	ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
 	ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
 	disable_kernel_vsx();
 	disable_kernel_vsx();
 	pagefault_enable();
 	pagefault_enable();
+	preempt_enable();
 
 
 	ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
 	ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
 	return ret;
 	return ret;
@@ -99,11 +101,13 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
 	u8 *dst = walk->dst.virt.addr;
 	u8 *dst = walk->dst.virt.addr;
 	unsigned int nbytes = walk->nbytes;
 	unsigned int nbytes = walk->nbytes;
 
 
+	preempt_disable();
 	pagefault_disable();
 	pagefault_disable();
 	enable_kernel_vsx();
 	enable_kernel_vsx();
 	aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
 	aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
 	disable_kernel_vsx();
 	disable_kernel_vsx();
 	pagefault_enable();
 	pagefault_enable();
+	preempt_enable();
 
 
 	crypto_xor(keystream, src, nbytes);
 	crypto_xor(keystream, src, nbytes);
 	memcpy(dst, keystream, nbytes);
 	memcpy(dst, keystream, nbytes);
@@ -132,6 +136,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
 		blkcipher_walk_init(&walk, dst, src, nbytes);
 		blkcipher_walk_init(&walk, dst, src, nbytes);
 		ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
 		ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
 		while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
 		while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
+			preempt_disable();
 			pagefault_disable();
 			pagefault_disable();
 			enable_kernel_vsx();
 			enable_kernel_vsx();
 			aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
 			aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
@@ -143,6 +148,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
 						    walk.iv);
 						    walk.iv);
 			disable_kernel_vsx();
 			disable_kernel_vsx();
 			pagefault_enable();
 			pagefault_enable();
+			preempt_enable();
 
 
 			/* We need to update IV mostly for last bytes/round */
 			/* We need to update IV mostly for last bytes/round */
 			inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE;
 			inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE;