Browse Source

Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto updates from Herbert Xu:
 "Here is the crypto update for 4.15:

  API:

   - Disambiguate EBUSY when queueing crypto request by adding ENOSPC.
     This change touches code outside the crypto API.
   - Reset settings when empty string is written to rng_current.

  Algorithms:

   - Add OSCCA SM3 secure hash.

  Drivers:

   - Remove old mv_cesa driver (replaced by marvell/cesa).
   - Enable rfc3686/ecb/cfb/ofb AES in crypto4xx.
   - Add ccm/gcm AES in crypto4xx.
   - Add support for BCM7278 in iproc-rng200.
   - Add hash support on Exynos in s5p-sss.
   - Fix fallback-induced error in vmx.
   - Fix output IV in atmel-aes.
   - Fix empty GCM hash in mediatek.

  Others:

   - Fix DoS potential in lib/mpi.
   - Fix potential out-of-order issues with padata"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (162 commits)
  lib/mpi: call cond_resched() from mpi_powm() loop
  crypto: stm32/hash - Fix return issue on update
  crypto: dh - Remove pointless checks for NULL 'p' and 'g'
  crypto: qat - Clean up error handling in qat_dh_set_secret()
  crypto: dh - Don't permit 'key' or 'g' size longer than 'p'
  crypto: dh - Don't permit 'p' to be 0
  crypto: dh - Fix double free of ctx->p
  hwrng: iproc-rng200 - Add support for BCM7278
  dt-bindings: rng: Document BCM7278 RNG200 compatible
  crypto: chcr - Replace _manual_ swap with swap macro
  crypto: marvell - Add a NULL entry at the end of mv_cesa_plat_id_table[]
  hwrng: virtio - Virtio RNG devices need to be re-registered after suspend/resume
  crypto: atmel - remove empty functions
  crypto: ecdh - remove empty exit()
  MAINTAINERS: update maintainer for qat
  crypto: caam - remove unused param of ctx_map_to_sec4_sg()
  crypto: caam - remove unneeded edesc zeroization
  crypto: atmel-aes - Reset the controller before each use
  crypto: atmel-aes - properly set IV after {en,de}crypt
  hwrng: core - Reset user selected rng by writing "" to rng_current
  ...
Linus Torvalds 7 years ago
parent
commit
37dc79565c
100 changed files with 3456 additions and 4000 deletions
  1. 10 42
      Documentation/crypto/api-samples.rst
  2. 0 0
      Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
  3. 3 1
      Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt
  4. 2 3
      MAINTAINERS
  5. 1 1
      arch/arm/configs/dove_defconfig
  6. 1 1
      arch/arm/configs/multi_v5_defconfig
  7. 1 1
      arch/arm/configs/orion5x_defconfig
  8. 6 4
      arch/x86/crypto/aesni-intel_glue.c
  9. 6 11
      arch/x86/crypto/crc32-pclmul_asm.S
  10. 11 0
      crypto/Kconfig
  11. 1 0
      crypto/Makefile
  12. 0 27
      crypto/af_alg.c
  13. 3 9
      crypto/ahash.c
  14. 4 2
      crypto/algapi.c
  15. 0 1
      crypto/algboss.c
  16. 4 4
      crypto/algif_aead.c
  17. 14 16
      crypto/algif_hash.c
  18. 4 5
      crypto/algif_skcipher.c
  19. 13 0
      crypto/api.c
  20. 4 24
      crypto/asymmetric_keys/public_key.c
  21. 1 3
      crypto/cryptd.c
  22. 2 4
      crypto/cts.c
  23. 13 23
      crypto/dh.c
  24. 18 2
      crypto/dh_helper.c
  25. 9 27
      crypto/drbg.c
  26. 0 6
      crypto/ecdh.c
  27. 1 1
      crypto/ecdh_helper.c
  28. 18 37
      crypto/gcm.c
  29. 13 0
      crypto/gf128mul.c
  30. 26 58
      crypto/keywrap.c
  31. 8 9
      crypto/lrw.c
  32. 0 2
      crypto/rmd128.c
  33. 0 2
      crypto/rmd160.c
  34. 0 2
      crypto/rmd256.c
  35. 0 2
      crypto/rmd320.c
  36. 4 12
      crypto/rsa-pkcs1pad.c
  37. 210 0
      crypto/sm3_generic.c
  38. 91 118
      crypto/tcrypt.c
  39. 72 138
      crypto/testmgr.c
  40. 67 0
      crypto/testmgr.h
  41. 2 6
      crypto/xts.c
  42. 3 3
      drivers/char/hw_random/Kconfig
  43. 33 20
      drivers/char/hw_random/core.c
  44. 1 0
      drivers/char/hw_random/iproc-rng200.c
  45. 1 1
      drivers/char/hw_random/pseries-rng.c
  46. 0 7
      drivers/char/hw_random/timeriomem-rng.c
  47. 20 1
      drivers/char/hw_random/virtio-rng.c
  48. 21 19
      drivers/crypto/Kconfig
  49. 0 1
      drivers/crypto/Makefile
  50. 1 1
      drivers/crypto/amcc/Makefile
  51. 438 74
      drivers/crypto/amcc/crypto4xx_alg.c
  52. 476 355
      drivers/crypto/amcc/crypto4xx_core.c
  53. 118 81
      drivers/crypto/amcc/crypto4xx_core.h
  54. 3 0
      drivers/crypto/amcc/crypto4xx_reg_def.h
  55. 0 85
      drivers/crypto/amcc/crypto4xx_sa.c
  56. 87 12
      drivers/crypto/amcc/crypto4xx_sa.h
  57. 44 36
      drivers/crypto/atmel-aes.c
  58. 1 4
      drivers/crypto/atmel-sha.c
  59. 1 22
      drivers/crypto/atmel-tdes.c
  60. 52 64
      drivers/crypto/bcm/cipher.c
  61. 1 2
      drivers/crypto/bcm/cipher.h
  62. 7 7
      drivers/crypto/bcm/util.c
  63. 5 5
      drivers/crypto/caam/caamalg.c
  64. 6 1
      drivers/crypto/caam/caamalg_qi.c
  65. 4 8
      drivers/crypto/caam/caamhash.c
  66. 1 0
      drivers/crypto/caam/compat.h
  67. 1 1
      drivers/crypto/caam/desc.h
  68. 1 1
      drivers/crypto/cavium/nitrox/nitrox_hal.c
  69. 4 5
      drivers/crypto/ccp/ccp-crypto-aes-galois.c
  70. 3 5
      drivers/crypto/ccp/ccp-crypto-main.c
  71. 1 2
      drivers/crypto/ccp/ccp-dev-v5.c
  72. 5 2
      drivers/crypto/ccp/ccp-dev.c
  73. 2 3
      drivers/crypto/ccp/ccp-dmaengine.c
  74. 1045 753
      drivers/crypto/chelsio/chcr_algo.c
  75. 15 42
      drivers/crypto/chelsio/chcr_algo.h
  76. 5 5
      drivers/crypto/chelsio/chcr_core.c
  77. 1 1
      drivers/crypto/chelsio/chcr_core.h
  78. 86 35
      drivers/crypto/chelsio/chcr_crypto.h
  79. 2 4
      drivers/crypto/inside-secure/safexcel_hash.c
  80. 0 1
      drivers/crypto/ixp4xx_crypto.c
  81. 13 16
      drivers/crypto/marvell/cesa.c
  82. 14 13
      drivers/crypto/marvell/cesa.h
  83. 233 243
      drivers/crypto/marvell/cipher.c
  84. 1 4
      drivers/crypto/marvell/tdma.c
  85. 12 27
      drivers/crypto/mediatek/mtk-aes.c
  86. 0 1216
      drivers/crypto/mv_cesa.c
  87. 0 151
      drivers/crypto/mv_cesa.h
  88. 4 8
      drivers/crypto/n2_core.c
  89. 1 1
      drivers/crypto/nx/nx-842-pseries.c
  90. 5 4
      drivers/crypto/nx/nx-aes-gcm.c
  91. 1 1
      drivers/crypto/nx/nx.c
  92. 6 5
      drivers/crypto/omap-aes-gcm.c
  93. 5 7
      drivers/crypto/omap-aes.c
  94. 2 5
      drivers/crypto/omap-des.c
  95. 2 5
      drivers/crypto/omap-sham.c
  96. 1 1
      drivers/crypto/padlock-aes.c
  97. 1 1
      drivers/crypto/padlock-sha.c
  98. 0 3
      drivers/crypto/qat/qat_common/adf_dev_mgr.c
  99. 8 10
      drivers/crypto/qat/qat_common/qat_asym_algs.c
  100. 9 6
      drivers/crypto/qat/qat_common/qat_uclo.c

+ 10 - 42
Documentation/crypto/api-samples.rst

@@ -7,59 +7,27 @@ Code Example For Symmetric Key Cipher Operation
 ::
 
 
-    struct tcrypt_result {
-        struct completion completion;
-        int err;
-    };
-
     /* tie all data structures together */
     struct skcipher_def {
         struct scatterlist sg;
         struct crypto_skcipher *tfm;
         struct skcipher_request *req;
-        struct tcrypt_result result;
+        struct crypto_wait wait;
     };
 
-    /* Callback function */
-    static void test_skcipher_cb(struct crypto_async_request *req, int error)
-    {
-        struct tcrypt_result *result = req->data;
-
-        if (error == -EINPROGRESS)
-            return;
-        result->err = error;
-        complete(&result->completion);
-        pr_info("Encryption finished successfully\n");
-    }
-
     /* Perform cipher operation */
     static unsigned int test_skcipher_encdec(struct skcipher_def *sk,
                          int enc)
     {
-        int rc = 0;
+        int rc;
 
         if (enc)
-            rc = crypto_skcipher_encrypt(sk->req);
+            rc = crypto_wait_req(crypto_skcipher_encrypt(sk->req), &sk->wait);
         else
-            rc = crypto_skcipher_decrypt(sk->req);
-
-        switch (rc) {
-        case 0:
-            break;
-        case -EINPROGRESS:
-        case -EBUSY:
-            rc = wait_for_completion_interruptible(
-                &sk->result.completion);
-            if (!rc && !sk->result.err) {
-                reinit_completion(&sk->result.completion);
-                break;
-            }
-        default:
-            pr_info("skcipher encrypt returned with %d result %d\n",
-                rc, sk->result.err);
-            break;
-        }
-        init_completion(&sk->result.completion);
+            rc = crypto_wait_req(crypto_skcipher_decrypt(sk->req), &sk->wait);
+
+	if (rc)
+		pr_info("skcipher encrypt returned with result %d\n", rc);
 
         return rc;
     }
@@ -89,8 +57,8 @@ Code Example For Symmetric Key Cipher Operation
         }
 
         skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-                          test_skcipher_cb,
-                          &sk.result);
+                          crypto_req_done,
+                          &sk.wait);
 
         /* AES 256 with random key */
         get_random_bytes(&key, 32);
@@ -122,7 +90,7 @@ Code Example For Symmetric Key Cipher Operation
         /* We encrypt one block */
         sg_init_one(&sk.sg, scratchpad, 16);
         skcipher_request_set_crypt(req, &sk.sg, &sk.sg, 16, ivdata);
-        init_completion(&sk.result.completion);
+        crypto_init_wait(&sk.wait);
 
         /* encrypt data */
         ret = test_skcipher_encdec(&sk, 1);

+ 0 - 0
Documentation/devicetree/bindings/rng/samsung,exynos-rng4.txt → Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt


+ 3 - 1
Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt

@@ -1,7 +1,9 @@
 HWRNG support for the iproc-rng200 driver
 
 Required properties:
-- compatible : "brcm,iproc-rng200"
+- compatible : Must be one of:
+	       "brcm,bcm7278-rng200"
+	       "brcm,iproc-rng200"
 - reg : base address and size of control register block
 
 Example:

+ 2 - 3
MAINTAINERS

@@ -5484,7 +5484,7 @@ F:	include/uapi/linux/fb.h
 
 FREESCALE CAAM (Cryptographic Acceleration and Assurance Module) DRIVER
 M:	Horia Geantă <horia.geanta@nxp.com>
-M:	Dan Douglass <dan.douglass@nxp.com>
+M:	Aymen Sghaier <aymen.sghaier@nxp.com>
 L:	linux-crypto@vger.kernel.org
 S:	Maintained
 F:	drivers/crypto/caam/
@@ -11060,7 +11060,6 @@ F:	drivers/mtd/nand/pxa3xx_nand.c
 
 QAT DRIVER
 M:	Giovanni Cabiddu <giovanni.cabiddu@intel.com>
-M:	Salvatore Benedetto <salvatore.benedetto@intel.com>
 L:	qat-linux@intel.com
 S:	Supported
 F:	drivers/crypto/qat/
@@ -11793,7 +11792,7 @@ L:	linux-crypto@vger.kernel.org
 L:	linux-samsung-soc@vger.kernel.org
 S:	Maintained
 F:	drivers/crypto/exynos-rng.c
-F:	Documentation/devicetree/bindings/rng/samsung,exynos-rng4.txt
+F:	Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
 
 SAMSUNG FRAMEBUFFER DRIVER
 M:	Jingoo Han <jingoohan1@gmail.com>

+ 1 - 1
arch/arm/configs/dove_defconfig

@@ -140,6 +140,6 @@ CONFIG_CRYPTO_TWOFISH=y
 CONFIG_CRYPTO_DEFLATE=y
 CONFIG_CRYPTO_LZO=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DEV_MV_CESA=y
+CONFIG_CRYPTO_DEV_MARVELL_CESA=y
 CONFIG_CRC_CCITT=y
 CONFIG_LIBCRC32C=y

+ 1 - 1
arch/arm/configs/multi_v5_defconfig

@@ -279,6 +279,6 @@ CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_USER=y
 CONFIG_CRYPTO_CBC=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_DEV_MV_CESA=y
+CONFIG_CRYPTO_DEV_MARVELL_CESA=y
 CONFIG_CRC_CCITT=y
 CONFIG_LIBCRC32C=y

+ 1 - 1
arch/arm/configs/orion5x_defconfig

@@ -163,5 +163,5 @@ CONFIG_CRYPTO_CBC=m
 CONFIG_CRYPTO_ECB=m
 CONFIG_CRYPTO_PCBC=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DEV_MV_CESA=y
+CONFIG_CRYPTO_DEV_MARVELL_CESA=y
 CONFIG_CRC_T10DIF=y

+ 6 - 4
arch/x86/crypto/aesni-intel_glue.c

@@ -28,6 +28,7 @@
 #include <crypto/cryptd.h>
 #include <crypto/ctr.h>
 #include <crypto/b128ops.h>
+#include <crypto/gcm.h>
 #include <crypto/xts.h>
 #include <asm/cpu_device_id.h>
 #include <asm/fpu/api.h>
@@ -1067,9 +1068,10 @@ static struct skcipher_alg aesni_skciphers[] = {
 	}
 };
 
+static
 struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
 
-struct {
+static struct {
 	const char *algname;
 	const char *drvname;
 	const char *basename;
@@ -1131,7 +1133,7 @@ static struct aead_alg aesni_aead_algs[] = { {
 	.setauthsize		= common_rfc4106_set_authsize,
 	.encrypt		= helper_rfc4106_encrypt,
 	.decrypt		= helper_rfc4106_decrypt,
-	.ivsize			= 8,
+	.ivsize			= GCM_RFC4106_IV_SIZE,
 	.maxauthsize		= 16,
 	.base = {
 		.cra_name		= "__gcm-aes-aesni",
@@ -1149,7 +1151,7 @@ static struct aead_alg aesni_aead_algs[] = { {
 	.setauthsize		= rfc4106_set_authsize,
 	.encrypt		= rfc4106_encrypt,
 	.decrypt		= rfc4106_decrypt,
-	.ivsize			= 8,
+	.ivsize			= GCM_RFC4106_IV_SIZE,
 	.maxauthsize		= 16,
 	.base = {
 		.cra_name		= "rfc4106(gcm(aes))",
@@ -1165,7 +1167,7 @@ static struct aead_alg aesni_aead_algs[] = { {
 	.setauthsize		= generic_gcmaes_set_authsize,
 	.encrypt		= generic_gcmaes_encrypt,
 	.decrypt		= generic_gcmaes_decrypt,
-	.ivsize			= 12,
+	.ivsize			= GCM_AES_IV_SIZE,
 	.maxauthsize		= 16,
 	.base = {
 		.cra_name		= "gcm(aes)",

+ 6 - 11
arch/x86/crypto/crc32-pclmul_asm.S

@@ -41,6 +41,7 @@
 #include <asm/inst.h>
 
 
+.section .rodata
 .align 16
 /*
  * [x4*128+32 mod P(x) << 32)]'  << 1   = 0x154442bd4
@@ -111,19 +112,13 @@ ENTRY(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */
 	pxor    CONSTANT, %xmm1
 	sub     $0x40, LEN
 	add     $0x40, BUF
-#ifndef __x86_64__
-	/* This is for position independent code(-fPIC) support for 32bit */
-	call    delta
-delta:
-	pop     %ecx
-#endif
 	cmp     $0x40, LEN
 	jb      less_64
 
 #ifdef __x86_64__
 	movdqa .Lconstant_R2R1(%rip), CONSTANT
 #else
-	movdqa .Lconstant_R2R1 - delta(%ecx), CONSTANT
+	movdqa .Lconstant_R2R1, CONSTANT
 #endif
 
 loop_64:/*  64 bytes Full cache line folding */
@@ -172,7 +167,7 @@ less_64:/*  Folding cache line into 128bit */
 #ifdef __x86_64__
 	movdqa  .Lconstant_R4R3(%rip), CONSTANT
 #else
-	movdqa  .Lconstant_R4R3 - delta(%ecx), CONSTANT
+	movdqa  .Lconstant_R4R3, CONSTANT
 #endif
 	prefetchnta     (BUF)
 
@@ -220,8 +215,8 @@ fold_64:
 	movdqa  .Lconstant_R5(%rip), CONSTANT
 	movdqa  .Lconstant_mask32(%rip), %xmm3
 #else
-	movdqa  .Lconstant_R5 - delta(%ecx), CONSTANT
-	movdqa  .Lconstant_mask32 - delta(%ecx), %xmm3
+	movdqa  .Lconstant_R5, CONSTANT
+	movdqa  .Lconstant_mask32, %xmm3
 #endif
 	psrldq  $0x04, %xmm2
 	pand    %xmm3, %xmm1
@@ -232,7 +227,7 @@ fold_64:
 #ifdef __x86_64__
 	movdqa  .Lconstant_RUpoly(%rip), CONSTANT
 #else
-	movdqa  .Lconstant_RUpoly - delta(%ecx), CONSTANT
+	movdqa  .Lconstant_RUpoly, CONSTANT
 #endif
 	movdqa  %xmm1, %xmm2
 	pand    %xmm3, %xmm1

+ 11 - 0
crypto/Kconfig

@@ -860,6 +860,17 @@ config CRYPTO_SHA3
 	  References:
 	  http://keccak.noekeon.org/
 
+config CRYPTO_SM3
+	tristate "SM3 digest algorithm"
+	select CRYPTO_HASH
+	help
+	  SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3).
+	  It is part of the Chinese Commercial Cryptography suite.
+
+	  References:
+	  http://www.oscca.gov.cn/UpFile/20101222141857786.pdf
+	  https://datatracker.ietf.org/doc/html/draft-shen-sm3-hash
+
 config CRYPTO_TGR192
 	tristate "Tiger digest algorithms"
 	select CRYPTO_HASH

+ 1 - 0
crypto/Makefile

@@ -71,6 +71,7 @@ obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
 obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
 obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
 obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o
+obj-$(CONFIG_CRYPTO_SM3) += sm3_generic.o
 obj-$(CONFIG_CRYPTO_WP512) += wp512.o
 CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns)  # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
 obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o

+ 0 - 27
crypto/af_alg.c

@@ -481,33 +481,6 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
 }
 EXPORT_SYMBOL_GPL(af_alg_cmsg_send);
 
-int af_alg_wait_for_completion(int err, struct af_alg_completion *completion)
-{
-	switch (err) {
-	case -EINPROGRESS:
-	case -EBUSY:
-		wait_for_completion(&completion->completion);
-		reinit_completion(&completion->completion);
-		err = completion->err;
-		break;
-	};
-
-	return err;
-}
-EXPORT_SYMBOL_GPL(af_alg_wait_for_completion);
-
-void af_alg_complete(struct crypto_async_request *req, int err)
-{
-	struct af_alg_completion *completion = req->data;
-
-	if (err == -EINPROGRESS)
-		return;
-
-	completion->err = err;
-	complete(&completion->completion);
-}
-EXPORT_SYMBOL_GPL(af_alg_complete);
-
 /**
  * af_alg_alloc_tsgl - allocate the TX SGL
  *

+ 3 - 9
crypto/ahash.c

@@ -334,9 +334,7 @@ static int ahash_op_unaligned(struct ahash_request *req,
 		return err;
 
 	err = op(req);
-	if (err == -EINPROGRESS ||
-	    (err == -EBUSY && (ahash_request_flags(req) &
-			       CRYPTO_TFM_REQ_MAY_BACKLOG)))
+	if (err == -EINPROGRESS || err == -EBUSY)
 		return err;
 
 	ahash_restore_req(req, err);
@@ -394,9 +392,7 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err)
 	req->base.complete = ahash_def_finup_done2;
 
 	err = crypto_ahash_reqtfm(req)->final(req);
-	if (err == -EINPROGRESS ||
-	    (err == -EBUSY && (ahash_request_flags(req) &
-			       CRYPTO_TFM_REQ_MAY_BACKLOG)))
+	if (err == -EINPROGRESS || err == -EBUSY)
 		return err;
 
 out:
@@ -432,9 +428,7 @@ static int ahash_def_finup(struct ahash_request *req)
 		return err;
 
 	err = tfm->update(req);
-	if (err == -EINPROGRESS ||
-	    (err == -EBUSY && (ahash_request_flags(req) &
-			       CRYPTO_TFM_REQ_MAY_BACKLOG)))
+	if (err == -EINPROGRESS || err == -EBUSY)
 		return err;
 
 	return ahash_def_finup_finish1(req, err);

+ 4 - 2
crypto/algapi.c

@@ -897,9 +897,11 @@ int crypto_enqueue_request(struct crypto_queue *queue,
 	int err = -EINPROGRESS;
 
 	if (unlikely(queue->qlen >= queue->max_qlen)) {
-		err = -EBUSY;
-		if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+		if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+			err = -ENOSPC;
 			goto out;
+		}
+		err = -EBUSY;
 		if (queue->backlog == &queue->list)
 			queue->backlog = &request->list;
 	}

+ 0 - 1
crypto/algboss.c

@@ -122,7 +122,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
 		int notnum = 0;
 
 		name = ++p;
-		len = 0;
 
 		for (; isalnum(*p) || *p == '-' || *p == '_'; p++)
 			notnum |= !isdigit(*p);

+ 4 - 4
crypto/algif_aead.c

@@ -278,11 +278,11 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
 		/* Synchronous operation */
 		aead_request_set_callback(&areq->cra_u.aead_req,
 					  CRYPTO_TFM_REQ_MAY_BACKLOG,
-					  af_alg_complete, &ctx->completion);
-		err = af_alg_wait_for_completion(ctx->enc ?
+					  crypto_req_done, &ctx->wait);
+		err = crypto_wait_req(ctx->enc ?
 				crypto_aead_encrypt(&areq->cra_u.aead_req) :
 				crypto_aead_decrypt(&areq->cra_u.aead_req),
-						 &ctx->completion);
+				&ctx->wait);
 	}
 
 	/* AIO operation in progress */
@@ -554,7 +554,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
 	ctx->merge = 0;
 	ctx->enc = 0;
 	ctx->aead_assoclen = 0;
-	af_alg_init_completion(&ctx->completion);
+	crypto_init_wait(&ctx->wait);
 
 	ask->private = ctx;
 

+ 14 - 16
crypto/algif_hash.c

@@ -26,7 +26,7 @@ struct hash_ctx {
 
 	u8 *result;
 
-	struct af_alg_completion completion;
+	struct crypto_wait wait;
 
 	unsigned int len;
 	bool more;
@@ -88,8 +88,7 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
 		if ((msg->msg_flags & MSG_MORE))
 			hash_free_result(sk, ctx);
 
-		err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req),
-						&ctx->completion);
+		err = crypto_wait_req(crypto_ahash_init(&ctx->req), &ctx->wait);
 		if (err)
 			goto unlock;
 	}
@@ -110,8 +109,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
 
 		ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len);
 
-		err = af_alg_wait_for_completion(crypto_ahash_update(&ctx->req),
-						 &ctx->completion);
+		err = crypto_wait_req(crypto_ahash_update(&ctx->req),
+				      &ctx->wait);
 		af_alg_free_sg(&ctx->sgl);
 		if (err)
 			goto unlock;
@@ -129,8 +128,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
 			goto unlock;
 
 		ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
-		err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
-						 &ctx->completion);
+		err = crypto_wait_req(crypto_ahash_final(&ctx->req),
+				      &ctx->wait);
 	}
 
 unlock:
@@ -171,7 +170,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
 	} else {
 		if (!ctx->more) {
 			err = crypto_ahash_init(&ctx->req);
-			err = af_alg_wait_for_completion(err, &ctx->completion);
+			err = crypto_wait_req(err, &ctx->wait);
 			if (err)
 				goto unlock;
 		}
@@ -179,7 +178,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
 		err = crypto_ahash_update(&ctx->req);
 	}
 
-	err = af_alg_wait_for_completion(err, &ctx->completion);
+	err = crypto_wait_req(err, &ctx->wait);
 	if (err)
 		goto unlock;
 
@@ -215,17 +214,16 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
 	ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
 
 	if (!result && !ctx->more) {
-		err = af_alg_wait_for_completion(
-				crypto_ahash_init(&ctx->req),
-				&ctx->completion);
+		err = crypto_wait_req(crypto_ahash_init(&ctx->req),
+				      &ctx->wait);
 		if (err)
 			goto unlock;
 	}
 
 	if (!result || ctx->more) {
 		ctx->more = 0;
-		err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
-						 &ctx->completion);
+		err = crypto_wait_req(crypto_ahash_final(&ctx->req),
+				      &ctx->wait);
 		if (err)
 			goto unlock;
 	}
@@ -476,13 +474,13 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
 	ctx->result = NULL;
 	ctx->len = len;
 	ctx->more = 0;
-	af_alg_init_completion(&ctx->completion);
+	crypto_init_wait(&ctx->wait);
 
 	ask->private = ctx;
 
 	ahash_request_set_tfm(&ctx->req, hash);
 	ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-				   af_alg_complete, &ctx->completion);
+				   crypto_req_done, &ctx->wait);
 
 	sk->sk_destruct = hash_sock_destruct;
 

+ 4 - 5
crypto/algif_skcipher.c

@@ -129,12 +129,11 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
 		skcipher_request_set_callback(&areq->cra_u.skcipher_req,
 					      CRYPTO_TFM_REQ_MAY_SLEEP |
 					      CRYPTO_TFM_REQ_MAY_BACKLOG,
-					      af_alg_complete,
-					      &ctx->completion);
-		err = af_alg_wait_for_completion(ctx->enc ?
+					      crypto_req_done, &ctx->wait);
+		err = crypto_wait_req(ctx->enc ?
 			crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
 			crypto_skcipher_decrypt(&areq->cra_u.skcipher_req),
-						 &ctx->completion);
+						 &ctx->wait);
 	}
 
 	/* AIO operation in progress */
@@ -388,7 +387,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
 	ctx->more = 0;
 	ctx->merge = 0;
 	ctx->enc = 0;
-	af_alg_init_completion(&ctx->completion);
+	crypto_init_wait(&ctx->wait);
 
 	ask->private = ctx;
 

+ 13 - 0
crypto/api.c

@@ -24,6 +24,7 @@
 #include <linux/sched/signal.h>
 #include <linux/slab.h>
 #include <linux/string.h>
+#include <linux/completion.h>
 #include "internal.h"
 
 LIST_HEAD(crypto_alg_list);
@@ -595,5 +596,17 @@ int crypto_has_alg(const char *name, u32 type, u32 mask)
 }
 EXPORT_SYMBOL_GPL(crypto_has_alg);
 
+void crypto_req_done(struct crypto_async_request *req, int err)
+{
+	struct crypto_wait *wait = req->data;
+
+	if (err == -EINPROGRESS)
+		return;
+
+	wait->err = err;
+	complete(&wait->completion);
+}
+EXPORT_SYMBOL_GPL(crypto_req_done);
+
 MODULE_DESCRIPTION("Cryptographic core API");
 MODULE_LICENSE("GPL");

+ 4 - 24
crypto/asymmetric_keys/public_key.c

@@ -57,29 +57,13 @@ static void public_key_destroy(void *payload0, void *payload3)
 	public_key_signature_free(payload3);
 }
 
-struct public_key_completion {
-	struct completion completion;
-	int err;
-};
-
-static void public_key_verify_done(struct crypto_async_request *req, int err)
-{
-	struct public_key_completion *compl = req->data;
-
-	if (err == -EINPROGRESS)
-		return;
-
-	compl->err = err;
-	complete(&compl->completion);
-}
-
 /*
  * Verify a signature using a public key.
  */
 int public_key_verify_signature(const struct public_key *pkey,
 				const struct public_key_signature *sig)
 {
-	struct public_key_completion compl;
+	struct crypto_wait cwait;
 	struct crypto_akcipher *tfm;
 	struct akcipher_request *req;
 	struct scatterlist sig_sg, digest_sg;
@@ -131,20 +115,16 @@ int public_key_verify_signature(const struct public_key *pkey,
 	sg_init_one(&digest_sg, output, outlen);
 	akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size,
 				   outlen);
-	init_completion(&compl.completion);
+	crypto_init_wait(&cwait);
 	akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
 				      CRYPTO_TFM_REQ_MAY_SLEEP,
-				      public_key_verify_done, &compl);
+				      crypto_req_done, &cwait);
 
 	/* Perform the verification calculation.  This doesn't actually do the
 	 * verification, but rather calculates the hash expected by the
 	 * signature and returns that to us.
 	 */
-	ret = crypto_akcipher_verify(req);
-	if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
-		wait_for_completion(&compl.completion);
-		ret = compl.err;
-	}
+	ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
 	if (ret < 0)
 		goto out_free_output;
 

+ 1 - 3
crypto/cryptd.c

@@ -137,16 +137,14 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
 	int cpu, err;
 	struct cryptd_cpu_queue *cpu_queue;
 	atomic_t *refcnt;
-	bool may_backlog;
 
 	cpu = get_cpu();
 	cpu_queue = this_cpu_ptr(queue->cpu_queue);
 	err = crypto_enqueue_request(&cpu_queue->queue, request);
 
 	refcnt = crypto_tfm_ctx(request->tfm);
-	may_backlog = request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
 
-	if (err == -EBUSY && !may_backlog)
+	if (err == -ENOSPC)
 		goto out_put_cpu;
 
 	queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);

+ 2 - 4
crypto/cts.c

@@ -136,8 +136,7 @@ static void crypto_cts_encrypt_done(struct crypto_async_request *areq, int err)
 		goto out;
 
 	err = cts_cbc_encrypt(req);
-	if (err == -EINPROGRESS ||
-	    (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+	if (err == -EINPROGRESS || err == -EBUSY)
 		return;
 
 out:
@@ -229,8 +228,7 @@ static void crypto_cts_decrypt_done(struct crypto_async_request *areq, int err)
 		goto out;
 
 	err = cts_cbc_decrypt(req);
-	if (err == -EINPROGRESS ||
-	    (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+	if (err == -EINPROGRESS || err == -EBUSY)
 		return;
 
 out:

+ 13 - 23
crypto/dh.c

@@ -21,19 +21,12 @@ struct dh_ctx {
 	MPI xa;
 };
 
-static inline void dh_clear_params(struct dh_ctx *ctx)
+static void dh_clear_ctx(struct dh_ctx *ctx)
 {
 	mpi_free(ctx->p);
 	mpi_free(ctx->g);
-	ctx->p = NULL;
-	ctx->g = NULL;
-}
-
-static void dh_free_ctx(struct dh_ctx *ctx)
-{
-	dh_clear_params(ctx);
 	mpi_free(ctx->xa);
-	ctx->xa = NULL;
+	memset(ctx, 0, sizeof(*ctx));
 }
 
 /*
@@ -60,9 +53,6 @@ static int dh_check_params_length(unsigned int p_len)
 
 static int dh_set_params(struct dh_ctx *ctx, struct dh *params)
 {
-	if (unlikely(!params->p || !params->g))
-		return -EINVAL;
-
 	if (dh_check_params_length(params->p_size << 3))
 		return -EINVAL;
 
@@ -71,10 +61,8 @@ static int dh_set_params(struct dh_ctx *ctx, struct dh *params)
 		return -EINVAL;
 
 	ctx->g = mpi_read_raw_data(params->g, params->g_size);
-	if (!ctx->g) {
-		mpi_free(ctx->p);
+	if (!ctx->g)
 		return -EINVAL;
-	}
 
 	return 0;
 }
@@ -86,21 +74,23 @@ static int dh_set_secret(struct crypto_kpp *tfm, const void *buf,
 	struct dh params;
 
 	/* Free the old MPI key if any */
-	dh_free_ctx(ctx);
+	dh_clear_ctx(ctx);
 
 	if (crypto_dh_decode_key(buf, len, &params) < 0)
-		return -EINVAL;
+		goto err_clear_ctx;
 
 	if (dh_set_params(ctx, &params) < 0)
-		return -EINVAL;
+		goto err_clear_ctx;
 
 	ctx->xa = mpi_read_raw_data(params.key, params.key_size);
-	if (!ctx->xa) {
-		dh_clear_params(ctx);
-		return -EINVAL;
-	}
+	if (!ctx->xa)
+		goto err_clear_ctx;
 
 	return 0;
+
+err_clear_ctx:
+	dh_clear_ctx(ctx);
+	return -EINVAL;
 }
 
 static int dh_compute_value(struct kpp_request *req)
@@ -158,7 +148,7 @@ static void dh_exit_tfm(struct crypto_kpp *tfm)
 {
 	struct dh_ctx *ctx = dh_get_ctx(tfm);
 
-	dh_free_ctx(ctx);
+	dh_clear_ctx(ctx);
 }
 
 static struct kpp_alg dh = {

+ 18 - 2
crypto/dh_helper.c

@@ -28,12 +28,12 @@ static inline const u8 *dh_unpack_data(void *dst, const void *src, size_t size)
 	return src + size;
 }
 
-static inline int dh_data_size(const struct dh *p)
+static inline unsigned int dh_data_size(const struct dh *p)
 {
 	return p->key_size + p->p_size + p->g_size;
 }
 
-int crypto_dh_key_len(const struct dh *p)
+unsigned int crypto_dh_key_len(const struct dh *p)
 {
 	return DH_KPP_SECRET_MIN_SIZE + dh_data_size(p);
 }
@@ -83,6 +83,14 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
 	if (secret.len != crypto_dh_key_len(params))
 		return -EINVAL;
 
+	/*
+	 * Don't permit the buffer for 'key' or 'g' to be larger than 'p', since
+	 * some drivers assume otherwise.
+	 */
+	if (params->key_size > params->p_size ||
+	    params->g_size > params->p_size)
+		return -EINVAL;
+
 	/* Don't allocate memory. Set pointers to data within
 	 * the given buffer
 	 */
@@ -90,6 +98,14 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
 	params->p = (void *)(ptr + params->key_size);
 	params->g = (void *)(ptr + params->key_size + params->p_size);
 
+	/*
+	 * Don't permit 'p' to be 0.  It's not a prime number, and it's subject
+	 * to corner cases such as 'mod 0' being undefined or
+	 * crypto_kpp_maxsize() returning 0.
+	 */
+	if (memchr_inv(params->p, 0, params->p_size) == NULL)
+		return -EINVAL;
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(crypto_dh_decode_key);

+ 9 - 27
crypto/drbg.c

@@ -1651,16 +1651,6 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg)
 	return 0;
 }
 
-static void drbg_skcipher_cb(struct crypto_async_request *req, int error)
-{
-	struct drbg_state *drbg = req->data;
-
-	if (error == -EINPROGRESS)
-		return;
-	drbg->ctr_async_err = error;
-	complete(&drbg->ctr_completion);
-}
-
 static int drbg_init_sym_kernel(struct drbg_state *drbg)
 {
 	struct crypto_cipher *tfm;
@@ -1691,7 +1681,7 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
 		return PTR_ERR(sk_tfm);
 	}
 	drbg->ctr_handle = sk_tfm;
-	init_completion(&drbg->ctr_completion);
+	crypto_init_wait(&drbg->ctr_wait);
 
 	req = skcipher_request_alloc(sk_tfm, GFP_KERNEL);
 	if (!req) {
@@ -1700,8 +1690,9 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
 		return -ENOMEM;
 	}
 	drbg->ctr_req = req;
-	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-					drbg_skcipher_cb, drbg);
+	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+						CRYPTO_TFM_REQ_MAY_SLEEP,
+					crypto_req_done, &drbg->ctr_wait);
 
 	alignmask = crypto_skcipher_alignmask(sk_tfm);
 	drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask,
@@ -1762,21 +1753,12 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
 		/* Output buffer may not be valid for SGL, use scratchpad */
 		skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out,
 					   cryptlen, drbg->V);
-		ret = crypto_skcipher_encrypt(drbg->ctr_req);
-		switch (ret) {
-		case 0:
-			break;
-		case -EINPROGRESS:
-		case -EBUSY:
-			wait_for_completion(&drbg->ctr_completion);
-			if (!drbg->ctr_async_err) {
-				reinit_completion(&drbg->ctr_completion);
-				break;
-			}
-		default:
+		ret = crypto_wait_req(crypto_skcipher_encrypt(drbg->ctr_req),
+					&drbg->ctr_wait);
+		if (ret)
 			goto out;
-		}
-		init_completion(&drbg->ctr_completion);
+
+		crypto_init_wait(&drbg->ctr_wait);
 
 		memcpy(outbuf, drbg->outscratchpad, cryptlen);
 

+ 0 - 6
crypto/ecdh.c

@@ -131,17 +131,11 @@ static unsigned int ecdh_max_size(struct crypto_kpp *tfm)
 	return ctx->ndigits << (ECC_DIGITS_TO_BYTES_SHIFT + 1);
 }
 
-static void no_exit_tfm(struct crypto_kpp *tfm)
-{
-	return;
-}
-
 static struct kpp_alg ecdh = {
 	.set_secret = ecdh_set_secret,
 	.generate_public_key = ecdh_compute_value,
 	.compute_shared_secret = ecdh_compute_value,
 	.max_size = ecdh_max_size,
-	.exit = no_exit_tfm,
 	.base = {
 		.cra_name = "ecdh",
 		.cra_driver_name = "ecdh-generic",

+ 1 - 1
crypto/ecdh_helper.c

@@ -28,7 +28,7 @@ static inline const u8 *ecdh_unpack_data(void *dst, const void *src, size_t sz)
 	return src + sz;
 }
 
-int crypto_ecdh_key_len(const struct ecdh *params)
+unsigned int crypto_ecdh_key_len(const struct ecdh *params)
 {
 	return ECDH_KPP_SECRET_MIN_SIZE + params->key_size;
 }

+ 18 - 37
crypto/gcm.c

@@ -14,9 +14,9 @@
 #include <crypto/internal/hash.h>
 #include <crypto/null.h>
 #include <crypto/scatterwalk.h>
+#include <crypto/gcm.h>
 #include <crypto/hash.h>
 #include "internal.h"
-#include <linux/completion.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -78,11 +78,6 @@ struct crypto_gcm_req_priv_ctx {
 	} u;
 };
 
-struct crypto_gcm_setkey_result {
-	int err;
-	struct completion completion;
-};
-
 static struct {
 	u8 buf[16];
 	struct scatterlist sg;
@@ -98,17 +93,6 @@ static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
 	return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
 }
 
-static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err)
-{
-	struct crypto_gcm_setkey_result *result = req->data;
-
-	if (err == -EINPROGRESS)
-		return;
-
-	result->err = err;
-	complete(&result->completion);
-}
-
 static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
 			     unsigned int keylen)
 {
@@ -119,7 +103,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
 		be128 hash;
 		u8 iv[16];
 
-		struct crypto_gcm_setkey_result result;
+		struct crypto_wait wait;
 
 		struct scatterlist sg[1];
 		struct skcipher_request req;
@@ -140,21 +124,18 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
 	if (!data)
 		return -ENOMEM;
 
-	init_completion(&data->result.completion);
+	crypto_init_wait(&data->wait);
 	sg_init_one(data->sg, &data->hash, sizeof(data->hash));
 	skcipher_request_set_tfm(&data->req, ctr);
 	skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
 						  CRYPTO_TFM_REQ_MAY_BACKLOG,
-				      crypto_gcm_setkey_done,
-				      &data->result);
+				      crypto_req_done,
+				      &data->wait);
 	skcipher_request_set_crypt(&data->req, data->sg, data->sg,
 				   sizeof(data->hash), data->iv);
 
-	err = crypto_skcipher_encrypt(&data->req);
-	if (err == -EINPROGRESS || err == -EBUSY) {
-		wait_for_completion(&data->result.completion);
-		err = data->result.err;
-	}
+	err = crypto_wait_req(crypto_skcipher_encrypt(&data->req),
+							&data->wait);
 
 	if (err)
 		goto out;
@@ -197,8 +178,8 @@ static void crypto_gcm_init_common(struct aead_request *req)
 	struct scatterlist *sg;
 
 	memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag));
-	memcpy(pctx->iv, req->iv, 12);
-	memcpy(pctx->iv + 12, &counter, 4);
+	memcpy(pctx->iv, req->iv, GCM_AES_IV_SIZE);
+	memcpy(pctx->iv + GCM_AES_IV_SIZE, &counter, 4);
 
 	sg_init_table(pctx->src, 3);
 	sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag));
@@ -695,7 +676,7 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
 	inst->alg.base.cra_alignmask = ghash->base.cra_alignmask |
 				       ctr->base.cra_alignmask;
 	inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx);
-	inst->alg.ivsize = 12;
+	inst->alg.ivsize = GCM_AES_IV_SIZE;
 	inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr);
 	inst->alg.maxauthsize = 16;
 	inst->alg.init = crypto_gcm_init_tfm;
@@ -832,20 +813,20 @@ static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req)
 	u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
 			   crypto_aead_alignmask(child) + 1);
 
-	scatterwalk_map_and_copy(iv + 12, req->src, 0, req->assoclen - 8, 0);
+	scatterwalk_map_and_copy(iv + GCM_AES_IV_SIZE, req->src, 0, req->assoclen - 8, 0);
 
 	memcpy(iv, ctx->nonce, 4);
 	memcpy(iv + 4, req->iv, 8);
 
 	sg_init_table(rctx->src, 3);
-	sg_set_buf(rctx->src, iv + 12, req->assoclen - 8);
+	sg_set_buf(rctx->src, iv + GCM_AES_IV_SIZE, req->assoclen - 8);
 	sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen);
 	if (sg != rctx->src + 1)
 		sg_chain(rctx->src, 2, sg);
 
 	if (req->src != req->dst) {
 		sg_init_table(rctx->dst, 3);
-		sg_set_buf(rctx->dst, iv + 12, req->assoclen - 8);
+		sg_set_buf(rctx->dst, iv + GCM_AES_IV_SIZE, req->assoclen - 8);
 		sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen);
 		if (sg != rctx->dst + 1)
 			sg_chain(rctx->dst, 2, sg);
@@ -957,7 +938,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
 	err = -EINVAL;
 
 	/* Underlying IV size must be 12. */
-	if (crypto_aead_alg_ivsize(alg) != 12)
+	if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE)
 		goto out_drop_alg;
 
 	/* Not a stream cipher? */
@@ -980,7 +961,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
 
 	inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx);
 
-	inst->alg.ivsize = 8;
+	inst->alg.ivsize = GCM_RFC4106_IV_SIZE;
 	inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
 	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
 
@@ -1134,7 +1115,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
 		tfm,
 		sizeof(struct crypto_rfc4543_req_ctx) +
 		ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) +
-		align + 12);
+		align + GCM_AES_IV_SIZE);
 
 	return 0;
 
@@ -1199,7 +1180,7 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
 	err = -EINVAL;
 
 	/* Underlying IV size must be 12. */
-	if (crypto_aead_alg_ivsize(alg) != 12)
+	if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE)
 		goto out_drop_alg;
 
 	/* Not a stream cipher? */
@@ -1222,7 +1203,7 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
 
 	inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx);
 
-	inst->alg.ivsize = 8;
+	inst->alg.ivsize = GCM_RFC4543_IV_SIZE;
 	inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
 	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
 

+ 13 - 0
crypto/gf128mul.c

@@ -156,6 +156,19 @@ static void gf128mul_x8_bbe(be128 *x)
 	x->b = cpu_to_be64((b << 8) ^ _tt);
 }
 
+void gf128mul_x8_ble(le128 *r, const le128 *x)
+{
+	u64 a = le64_to_cpu(x->a);
+	u64 b = le64_to_cpu(x->b);
+
+	/* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */
+	u64 _tt = gf128mul_table_be[a >> 56];
+
+	r->a = cpu_to_le64((a << 8) | (b >> 56));
+	r->b = cpu_to_le64((b << 8) ^ _tt);
+}
+EXPORT_SYMBOL(gf128mul_x8_ble);
+
 void gf128mul_lle(be128 *r, const be128 *b)
 {
 	be128 p[8];

+ 26 - 58
crypto/keywrap.c

@@ -93,18 +93,10 @@ struct crypto_kw_ctx {
 
 struct crypto_kw_block {
 #define SEMIBSIZE 8
-	u8 A[SEMIBSIZE];
-	u8 R[SEMIBSIZE];
+	__be64 A;
+	__be64 R;
 };
 
-/* convert 64 bit integer into its string representation */
-static inline void crypto_kw_cpu_to_be64(u64 val, u8 *buf)
-{
-	__be64 *a = (__be64 *)buf;
-
-	*a = cpu_to_be64(val);
-}
-
 /*
  * Fast forward the SGL to the "end" length minus SEMIBSIZE.
  * The start in the SGL defined by the fast-forward is returned with
@@ -139,17 +131,10 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
 	struct crypto_blkcipher *tfm = desc->tfm;
 	struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm);
 	struct crypto_cipher *child = ctx->child;
-
-	unsigned long alignmask = max_t(unsigned long, SEMIBSIZE,
-					crypto_cipher_alignmask(child));
-	unsigned int i;
-
-	u8 blockbuf[sizeof(struct crypto_kw_block) + alignmask];
-	struct crypto_kw_block *block = (struct crypto_kw_block *)
-					PTR_ALIGN(blockbuf + 0, alignmask + 1);
-
-	u64 t = 6 * ((nbytes) >> 3);
+	struct crypto_kw_block block;
 	struct scatterlist *lsrc, *ldst;
+	u64 t = 6 * ((nbytes) >> 3);
+	unsigned int i;
 	int ret = 0;
 
 	/*
@@ -160,7 +145,7 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
 		return -EINVAL;
 
 	/* Place the IV into block A */
-	memcpy(block->A, desc->info, SEMIBSIZE);
+	memcpy(&block.A, desc->info, SEMIBSIZE);
 
 	/*
 	 * src scatterlist is read-only. dst scatterlist is r/w. During the
@@ -171,32 +156,27 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
 	ldst = dst;
 
 	for (i = 0; i < 6; i++) {
-		u8 tbe_buffer[SEMIBSIZE + alignmask];
-		/* alignment for the crypto_xor and the _to_be64 operation */
-		u8 *tbe = PTR_ALIGN(tbe_buffer + 0, alignmask + 1);
-		unsigned int tmp_nbytes = nbytes;
 		struct scatter_walk src_walk, dst_walk;
+		unsigned int tmp_nbytes = nbytes;
 
 		while (tmp_nbytes) {
 			/* move pointer by tmp_nbytes in the SGL */
 			crypto_kw_scatterlist_ff(&src_walk, lsrc, tmp_nbytes);
 			/* get the source block */
-			scatterwalk_copychunks(block->R, &src_walk, SEMIBSIZE,
+			scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
 					       false);
 
-			/* perform KW operation: get counter as byte string */
-			crypto_kw_cpu_to_be64(t, tbe);
 			/* perform KW operation: modify IV with counter */
-			crypto_xor(block->A, tbe, SEMIBSIZE);
+			block.A ^= cpu_to_be64(t);
 			t--;
 			/* perform KW operation: decrypt block */
-			crypto_cipher_decrypt_one(child, (u8*)block,
-						  (u8*)block);
+			crypto_cipher_decrypt_one(child, (u8*)&block,
+						  (u8*)&block);
 
 			/* move pointer by tmp_nbytes in the SGL */
 			crypto_kw_scatterlist_ff(&dst_walk, ldst, tmp_nbytes);
 			/* Copy block->R into place */
-			scatterwalk_copychunks(block->R, &dst_walk, SEMIBSIZE,
+			scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
 					       true);
 
 			tmp_nbytes -= SEMIBSIZE;
@@ -208,11 +188,10 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
 	}
 
 	/* Perform authentication check */
-	if (crypto_memneq("\xA6\xA6\xA6\xA6\xA6\xA6\xA6\xA6", block->A,
-			  SEMIBSIZE))
+	if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6))
 		ret = -EBADMSG;
 
-	memzero_explicit(block, sizeof(struct crypto_kw_block));
+	memzero_explicit(&block, sizeof(struct crypto_kw_block));
 
 	return ret;
 }
@@ -224,17 +203,10 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
 	struct crypto_blkcipher *tfm = desc->tfm;
 	struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm);
 	struct crypto_cipher *child = ctx->child;
-
-	unsigned long alignmask = max_t(unsigned long, SEMIBSIZE,
-					crypto_cipher_alignmask(child));
-	unsigned int i;
-
-	u8 blockbuf[sizeof(struct crypto_kw_block) + alignmask];
-	struct crypto_kw_block *block = (struct crypto_kw_block *)
-					PTR_ALIGN(blockbuf + 0, alignmask + 1);
-
-	u64 t = 1;
+	struct crypto_kw_block block;
 	struct scatterlist *lsrc, *ldst;
+	u64 t = 1;
+	unsigned int i;
 
 	/*
 	 * Require at least 2 semiblocks (note, the 3rd semiblock that is
@@ -249,7 +221,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
 	 * Place the predefined IV into block A -- for encrypt, the caller
 	 * does not need to provide an IV, but he needs to fetch the final IV.
 	 */
-	memcpy(block->A, "\xA6\xA6\xA6\xA6\xA6\xA6\xA6\xA6", SEMIBSIZE);
+	block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6);
 
 	/*
 	 * src scatterlist is read-only. dst scatterlist is r/w. During the
@@ -260,30 +232,26 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
 	ldst = dst;
 
 	for (i = 0; i < 6; i++) {
-		u8 tbe_buffer[SEMIBSIZE + alignmask];
-		u8 *tbe = PTR_ALIGN(tbe_buffer + 0, alignmask + 1);
-		unsigned int tmp_nbytes = nbytes;
 		struct scatter_walk src_walk, dst_walk;
+		unsigned int tmp_nbytes = nbytes;
 
 		scatterwalk_start(&src_walk, lsrc);
 		scatterwalk_start(&dst_walk, ldst);
 
 		while (tmp_nbytes) {
 			/* get the source block */
-			scatterwalk_copychunks(block->R, &src_walk, SEMIBSIZE,
+			scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
 					       false);
 
 			/* perform KW operation: encrypt block */
-			crypto_cipher_encrypt_one(child, (u8 *)block,
-						  (u8 *)block);
-			/* perform KW operation: get counter as byte string */
-			crypto_kw_cpu_to_be64(t, tbe);
+			crypto_cipher_encrypt_one(child, (u8 *)&block,
+						  (u8 *)&block);
 			/* perform KW operation: modify IV with counter */
-			crypto_xor(block->A, tbe, SEMIBSIZE);
+			block.A ^= cpu_to_be64(t);
 			t++;
 
 			/* Copy block->R into place */
-			scatterwalk_copychunks(block->R, &dst_walk, SEMIBSIZE,
+			scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
 					       true);
 
 			tmp_nbytes -= SEMIBSIZE;
@@ -295,9 +263,9 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
 	}
 
 	/* establish the IV for the caller to pick up */
-	memcpy(desc->info, block->A, SEMIBSIZE);
+	memcpy(desc->info, &block.A, SEMIBSIZE);
 
-	memzero_explicit(block, sizeof(struct crypto_kw_block));
+	memzero_explicit(&block, sizeof(struct crypto_kw_block));
 
 	return 0;
 }

+ 8 - 9
crypto/lrw.c

@@ -328,9 +328,7 @@ static int do_encrypt(struct skcipher_request *req, int err)
 		      crypto_skcipher_encrypt(subreq) ?:
 		      post_crypt(req);
 
-		if (err == -EINPROGRESS ||
-		    (err == -EBUSY &&
-		     req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+		if (err == -EINPROGRESS || err == -EBUSY)
 			return err;
 	}
 
@@ -380,9 +378,7 @@ static int do_decrypt(struct skcipher_request *req, int err)
 		      crypto_skcipher_decrypt(subreq) ?:
 		      post_crypt(req);
 
-		if (err == -EINPROGRESS ||
-		    (err == -EBUSY &&
-		     req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+		if (err == -EINPROGRESS || err == -EBUSY)
 			return err;
 	}
 
@@ -610,9 +606,12 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
 		ecb_name[len - 1] = 0;
 
 		if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
-			     "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME)
-			return -ENAMETOOLONG;
-	}
+			     "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) {
+			err = -ENAMETOOLONG;
+			goto err_drop_spawn;
+		}
+	} else
+		goto err_drop_spawn;
 
 	inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
 	inst->alg.base.cra_priority = alg->base.cra_priority;

+ 0 - 2
crypto/rmd128.c

@@ -213,8 +213,6 @@ static void rmd128_transform(u32 *state, const __le32 *in)
 	state[2] = state[3] + aa + bbb;
 	state[3] = state[0] + bb + ccc;
 	state[0] = ddd;
-
-	return;
 }
 
 static int rmd128_init(struct shash_desc *desc)

+ 0 - 2
crypto/rmd160.c

@@ -256,8 +256,6 @@ static void rmd160_transform(u32 *state, const __le32 *in)
 	state[3] = state[4] + aa + bbb;
 	state[4] = state[0] + bb + ccc;
 	state[0] = ddd;
-
-	return;
 }
 
 static int rmd160_init(struct shash_desc *desc)

+ 0 - 2
crypto/rmd256.c

@@ -228,8 +228,6 @@ static void rmd256_transform(u32 *state, const __le32 *in)
 	state[5] += bbb;
 	state[6] += ccc;
 	state[7] += ddd;
-
-	return;
 }
 
 static int rmd256_init(struct shash_desc *desc)

+ 0 - 2
crypto/rmd320.c

@@ -275,8 +275,6 @@ static void rmd320_transform(u32 *state, const __le32 *in)
 	state[7] += ccc;
 	state[8] += ddd;
 	state[9] += eee;
-
-	return;
 }
 
 static int rmd320_init(struct shash_desc *desc)

+ 4 - 12
crypto/rsa-pkcs1pad.c

@@ -279,9 +279,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
 				   req->dst, ctx->key_size - 1, req->dst_len);
 
 	err = crypto_akcipher_encrypt(&req_ctx->child_req);
-	if (err != -EINPROGRESS &&
-			(err != -EBUSY ||
-			 !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+	if (err != -EINPROGRESS && err != -EBUSY)
 		return pkcs1pad_encrypt_sign_complete(req, err);
 
 	return err;
@@ -383,9 +381,7 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
 				   ctx->key_size);
 
 	err = crypto_akcipher_decrypt(&req_ctx->child_req);
-	if (err != -EINPROGRESS &&
-			(err != -EBUSY ||
-			 !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+	if (err != -EINPROGRESS && err != -EBUSY)
 		return pkcs1pad_decrypt_complete(req, err);
 
 	return err;
@@ -440,9 +436,7 @@ static int pkcs1pad_sign(struct akcipher_request *req)
 				   req->dst, ctx->key_size - 1, req->dst_len);
 
 	err = crypto_akcipher_sign(&req_ctx->child_req);
-	if (err != -EINPROGRESS &&
-			(err != -EBUSY ||
-			 !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+	if (err != -EINPROGRESS && err != -EBUSY)
 		return pkcs1pad_encrypt_sign_complete(req, err);
 
 	return err;
@@ -561,9 +555,7 @@ static int pkcs1pad_verify(struct akcipher_request *req)
 				   ctx->key_size);
 
 	err = crypto_akcipher_verify(&req_ctx->child_req);
-	if (err != -EINPROGRESS &&
-			(err != -EBUSY ||
-			 !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+	if (err != -EINPROGRESS && err != -EBUSY)
 		return pkcs1pad_verify_complete(req, err);
 
 	return err;

+ 210 - 0
crypto/sm3_generic.c

@@ -0,0 +1,210 @@
+/*
+ * SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and
+ * described at https://tools.ietf.org/html/draft-shen-sm3-hash-01
+ *
+ * Copyright (C) 2017 ARM Limited or its affiliates.
+ * Written by Gilad Ben-Yossef <gilad@benyossef.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <crypto/internal/hash.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <crypto/sm3.h>
+#include <crypto/sm3_base.h>
+#include <linux/bitops.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = {
+	0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F,
+	0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F,
+	0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74,
+	0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B
+};
+EXPORT_SYMBOL_GPL(sm3_zero_message_hash);
+
+static inline u32 p0(u32 x)
+{
+	return x ^ rol32(x, 9) ^ rol32(x, 17);
+}
+
+static inline u32 p1(u32 x)
+{
+	return x ^ rol32(x, 15) ^ rol32(x, 23);
+}
+
+static inline u32 ff(unsigned int n, u32 a, u32 b, u32 c)
+{
+	return (n < 16) ? (a ^ b ^ c) : ((a & b) | (a & c) | (b & c));
+}
+
+static inline u32 gg(unsigned int n, u32 e, u32 f, u32 g)
+{
+	return (n < 16) ? (e ^ f ^ g) : ((e & f) | ((~e) & g));
+}
+
+static inline u32 t(unsigned int n)
+{
+	return (n < 16) ? SM3_T1 : SM3_T2;
+}
+
+static void sm3_expand(u32 *t, u32 *w, u32 *wt)
+{
+	int i;
+	unsigned int tmp;
+
+	/* load the input */
+	for (i = 0; i <= 15; i++)
+		w[i] = get_unaligned_be32((__u32 *)t + i);
+
+	for (i = 16; i <= 67; i++) {
+		tmp = w[i - 16] ^ w[i - 9] ^ rol32(w[i - 3], 15);
+		w[i] = p1(tmp) ^ (rol32(w[i - 13], 7)) ^ w[i - 6];
+	}
+
+	for (i = 0; i <= 63; i++)
+		wt[i] = w[i] ^ w[i + 4];
+}
+
+static void sm3_compress(u32 *w, u32 *wt, u32 *m)
+{
+	u32 ss1;
+	u32 ss2;
+	u32 tt1;
+	u32 tt2;
+	u32 a, b, c, d, e, f, g, h;
+	int i;
+
+	a = m[0];
+	b = m[1];
+	c = m[2];
+	d = m[3];
+	e = m[4];
+	f = m[5];
+	g = m[6];
+	h = m[7];
+
+	for (i = 0; i <= 63; i++) {
+
+		ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7);
+
+		ss2 = ss1 ^ rol32(a, 12);
+
+		tt1 = ff(i, a, b, c) + d + ss2 + *wt;
+		wt++;
+
+		tt2 = gg(i, e, f, g) + h + ss1 + *w;
+		w++;
+
+		d = c;
+		c = rol32(b, 9);
+		b = a;
+		a = tt1;
+		h = g;
+		g = rol32(f, 19);
+		f = e;
+		e = p0(tt2);
+	}
+
+	m[0] = a ^ m[0];
+	m[1] = b ^ m[1];
+	m[2] = c ^ m[2];
+	m[3] = d ^ m[3];
+	m[4] = e ^ m[4];
+	m[5] = f ^ m[5];
+	m[6] = g ^ m[6];
+	m[7] = h ^ m[7];
+
+	a = b = c = d = e = f = g = h = ss1 = ss2 = tt1 = tt2 = 0;
+}
+
+static void sm3_transform(struct sm3_state *sst, u8 const *src)
+{
+	unsigned int w[68];
+	unsigned int wt[64];
+
+	sm3_expand((u32 *)src, w, wt);
+	sm3_compress(w, wt, sst->state);
+
+	memzero_explicit(w, sizeof(w));
+	memzero_explicit(wt, sizeof(wt));
+}
+
+static void sm3_generic_block_fn(struct sm3_state *sst, u8 const *src,
+				    int blocks)
+{
+	while (blocks--) {
+		sm3_transform(sst, src);
+		src += SM3_BLOCK_SIZE;
+	}
+}
+
+int crypto_sm3_update(struct shash_desc *desc, const u8 *data,
+			  unsigned int len)
+{
+	return sm3_base_do_update(desc, data, len, sm3_generic_block_fn);
+}
+EXPORT_SYMBOL(crypto_sm3_update);
+
+static int sm3_final(struct shash_desc *desc, u8 *out)
+{
+	sm3_base_do_finalize(desc, sm3_generic_block_fn);
+	return sm3_base_finish(desc, out);
+}
+
+int crypto_sm3_finup(struct shash_desc *desc, const u8 *data,
+			unsigned int len, u8 *hash)
+{
+	sm3_base_do_update(desc, data, len, sm3_generic_block_fn);
+	return sm3_final(desc, hash);
+}
+EXPORT_SYMBOL(crypto_sm3_finup);
+
+static struct shash_alg sm3_alg = {
+	.digestsize	=	SM3_DIGEST_SIZE,
+	.init		=	sm3_base_init,
+	.update		=	crypto_sm3_update,
+	.final		=	sm3_final,
+	.finup		=	crypto_sm3_finup,
+	.descsize	=	sizeof(struct sm3_state),
+	.base		=	{
+		.cra_name	 =	"sm3",
+		.cra_driver_name =	"sm3-generic",
+		.cra_flags	 =	CRYPTO_ALG_TYPE_SHASH,
+		.cra_blocksize	 =	SM3_BLOCK_SIZE,
+		.cra_module	 =	THIS_MODULE,
+	}
+};
+
+static int __init sm3_generic_mod_init(void)
+{
+	return crypto_register_shash(&sm3_alg);
+}
+
+static void __exit sm3_generic_mod_fini(void)
+{
+	crypto_unregister_shash(&sm3_alg);
+}
+
+module_init(sm3_generic_mod_init);
+module_exit(sm3_generic_mod_fini);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SM3 Secure Hash Algorithm");
+
+MODULE_ALIAS_CRYPTO("sm3");
+MODULE_ALIAS_CRYPTO("sm3-generic");

+ 91 - 118
crypto/tcrypt.c

@@ -70,7 +70,7 @@ static int mode;
 static char *tvmem[TVMEMSIZE];
 
 static char *check[] = {
-	"des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256",
+	"des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256", "sm3",
 	"blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes",
 	"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
 	"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta",  "fcrypt",
@@ -79,34 +79,11 @@ static char *check[] = {
 	NULL
 };
 
-struct tcrypt_result {
-	struct completion completion;
-	int err;
-};
-
-static void tcrypt_complete(struct crypto_async_request *req, int err)
-{
-	struct tcrypt_result *res = req->data;
-
-	if (err == -EINPROGRESS)
-		return;
-
-	res->err = err;
-	complete(&res->completion);
-}
-
 static inline int do_one_aead_op(struct aead_request *req, int ret)
 {
-	if (ret == -EINPROGRESS || ret == -EBUSY) {
-		struct tcrypt_result *tr = req->base.data;
+	struct crypto_wait *wait = req->base.data;
 
-		ret = wait_for_completion_interruptible(&tr->completion);
-		if (!ret)
-			ret = tr->err;
-		reinit_completion(&tr->completion);
-	}
-
-	return ret;
+	return crypto_wait_req(ret, wait);
 }
 
 static int test_aead_jiffies(struct aead_request *req, int enc,
@@ -248,7 +225,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
 	char *axbuf[XBUFSIZE];
 	unsigned int *b_size;
 	unsigned int iv_len;
-	struct tcrypt_result result;
+	struct crypto_wait wait;
 
 	iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
 	if (!iv)
@@ -284,7 +261,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
 		goto out_notfm;
 	}
 
-	init_completion(&result.completion);
+	crypto_init_wait(&wait);
 	printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
 			get_driver_name(crypto_aead, tfm), e);
 
@@ -296,7 +273,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
 	}
 
 	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-				  tcrypt_complete, &result);
+				  crypto_req_done, &wait);
 
 	i = 0;
 	do {
@@ -340,7 +317,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
 			}
 
 			sg_init_aead(sg, xbuf,
-				    *b_size + (enc ? authsize : 0));
+				    *b_size + (enc ? 0 : authsize));
 
 			sg_init_aead(sgout, xoutbuf,
 				    *b_size + (enc ? authsize : 0));
@@ -348,7 +325,9 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
 			sg_set_buf(&sg[0], assoc, aad_size);
 			sg_set_buf(&sgout[0], assoc, aad_size);
 
-			aead_request_set_crypt(req, sg, sgout, *b_size, iv);
+			aead_request_set_crypt(req, sg, sgout,
+					       *b_size + (enc ? 0 : authsize),
+					       iv);
 			aead_request_set_ad(req, aad_size);
 
 			if (secs)
@@ -381,7 +360,6 @@ out_noaxbuf:
 	testmgr_free_buf(xbuf);
 out_noxbuf:
 	kfree(iv);
-	return;
 }
 
 static void test_hash_sg_init(struct scatterlist *sg)
@@ -397,21 +375,16 @@ static void test_hash_sg_init(struct scatterlist *sg)
 
 static inline int do_one_ahash_op(struct ahash_request *req, int ret)
 {
-	if (ret == -EINPROGRESS || ret == -EBUSY) {
-		struct tcrypt_result *tr = req->base.data;
+	struct crypto_wait *wait = req->base.data;
 
-		wait_for_completion(&tr->completion);
-		reinit_completion(&tr->completion);
-		ret = tr->err;
-	}
-	return ret;
+	return crypto_wait_req(ret, wait);
 }
 
 struct test_mb_ahash_data {
 	struct scatterlist sg[TVMEMSIZE];
 	char result[64];
 	struct ahash_request *req;
-	struct tcrypt_result tresult;
+	struct crypto_wait wait;
 	char *xbuf[XBUFSIZE];
 };
 
@@ -440,7 +413,7 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
 		if (testmgr_alloc_buf(data[i].xbuf))
 			goto out;
 
-		init_completion(&data[i].tresult.completion);
+		crypto_init_wait(&data[i].wait);
 
 		data[i].req = ahash_request_alloc(tfm, GFP_KERNEL);
 		if (!data[i].req) {
@@ -449,8 +422,8 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
 			goto out;
 		}
 
-		ahash_request_set_callback(data[i].req, 0,
-					   tcrypt_complete, &data[i].tresult);
+		ahash_request_set_callback(data[i].req, 0, crypto_req_done,
+					   &data[i].wait);
 		test_hash_sg_init(data[i].sg);
 	}
 
@@ -492,16 +465,16 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
 			if (ret)
 				break;
 
-			complete(&data[k].tresult.completion);
-			data[k].tresult.err = 0;
+			crypto_req_done(&data[k].req->base, 0);
 		}
 
 		for (j = 0; j < k; j++) {
-			struct tcrypt_result *tr = &data[j].tresult;
+			struct crypto_wait *wait = &data[j].wait;
+			int wait_ret;
 
-			wait_for_completion(&tr->completion);
-			if (tr->err)
-				ret = tr->err;
+			wait_ret = crypto_wait_req(-EINPROGRESS, wait);
+			if (wait_ret)
+				ret = wait_ret;
 		}
 
 		end = get_cycles();
@@ -679,7 +652,7 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs,
 				    struct hash_speed *speed, unsigned mask)
 {
 	struct scatterlist sg[TVMEMSIZE];
-	struct tcrypt_result tresult;
+	struct crypto_wait wait;
 	struct ahash_request *req;
 	struct crypto_ahash *tfm;
 	char *output;
@@ -708,9 +681,9 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs,
 		goto out;
 	}
 
-	init_completion(&tresult.completion);
+	crypto_init_wait(&wait);
 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-				   tcrypt_complete, &tresult);
+				   crypto_req_done, &wait);
 
 	output = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL);
 	if (!output)
@@ -765,15 +738,9 @@ static void test_hash_speed(const char *algo, unsigned int secs,
 
 static inline int do_one_acipher_op(struct skcipher_request *req, int ret)
 {
-	if (ret == -EINPROGRESS || ret == -EBUSY) {
-		struct tcrypt_result *tr = req->base.data;
-
-		wait_for_completion(&tr->completion);
-		reinit_completion(&tr->completion);
-		ret = tr->err;
-	}
+	struct crypto_wait *wait = req->base.data;
 
-	return ret;
+	return crypto_wait_req(ret, wait);
 }
 
 static int test_acipher_jiffies(struct skcipher_request *req, int enc,
@@ -853,7 +820,7 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
 				unsigned int tcount, u8 *keysize, bool async)
 {
 	unsigned int ret, i, j, k, iv_len;
-	struct tcrypt_result tresult;
+	struct crypto_wait wait;
 	const char *key;
 	char iv[128];
 	struct skcipher_request *req;
@@ -866,7 +833,7 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
 	else
 		e = "decryption";
 
-	init_completion(&tresult.completion);
+	crypto_init_wait(&wait);
 
 	tfm = crypto_alloc_skcipher(algo, 0, async ? 0 : CRYPTO_ALG_ASYNC);
 
@@ -887,7 +854,7 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
 	}
 
 	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-				      tcrypt_complete, &tresult);
+				      crypto_req_done, &wait);
 
 	i = 0;
 	do {
@@ -1269,6 +1236,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
 		ret += tcrypt_test("sha3-512");
 		break;
 
+	case 52:
+		ret += tcrypt_test("sm3");
+		break;
+
 	case 100:
 		ret += tcrypt_test("hmac(md5)");
 		break;
@@ -1603,115 +1574,116 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
 				  speed_template_32);
 		break;
 
-
 	case 300:
 		if (alg) {
 			test_hash_speed(alg, sec, generic_hash_speed_template);
 			break;
 		}
-
 		/* fall through */
-
 	case 301:
 		test_hash_speed("md4", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
-
+		/* fall through */
 	case 302:
 		test_hash_speed("md5", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
-
+		/* fall through */
 	case 303:
 		test_hash_speed("sha1", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
-
+		/* fall through */
 	case 304:
 		test_hash_speed("sha256", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
-
+		/* fall through */
 	case 305:
 		test_hash_speed("sha384", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
-
+		/* fall through */
 	case 306:
 		test_hash_speed("sha512", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
-
+		/* fall through */
 	case 307:
 		test_hash_speed("wp256", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
-
+		/* fall through */
 	case 308:
 		test_hash_speed("wp384", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
-
+		/* fall through */
 	case 309:
 		test_hash_speed("wp512", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
-
+		/* fall through */
 	case 310:
 		test_hash_speed("tgr128", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
-
+		/* fall through */
 	case 311:
 		test_hash_speed("tgr160", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
-
+		/* fall through */
 	case 312:
 		test_hash_speed("tgr192", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
-
+		/* fall through */
 	case 313:
 		test_hash_speed("sha224", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
-
+		/* fall through */
 	case 314:
 		test_hash_speed("rmd128", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
-
+		/* fall through */
 	case 315:
 		test_hash_speed("rmd160", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
-
+		/* fall through */
 	case 316:
 		test_hash_speed("rmd256", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
-
+		/* fall through */
 	case 317:
 		test_hash_speed("rmd320", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
-
+		/* fall through */
 	case 318:
 		test_hash_speed("ghash-generic", sec, hash_speed_template_16);
 		if (mode > 300 && mode < 400) break;
-
+		/* fall through */
 	case 319:
 		test_hash_speed("crc32c", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
-
+		/* fall through */
 	case 320:
 		test_hash_speed("crct10dif", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
-
+		/* fall through */
 	case 321:
 		test_hash_speed("poly1305", sec, poly1305_speed_template);
 		if (mode > 300 && mode < 400) break;
-
+		/* fall through */
 	case 322:
 		test_hash_speed("sha3-224", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
-
+		/* fall through */
 	case 323:
 		test_hash_speed("sha3-256", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
-
+		/* fall through */
 	case 324:
 		test_hash_speed("sha3-384", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
-
+		/* fall through */
 	case 325:
 		test_hash_speed("sha3-512", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
-
+		/* fall through */
+	case 326:
+		test_hash_speed("sm3", sec, generic_hash_speed_template);
+		if (mode > 300 && mode < 400) break;
+		/* fall through */
 	case 399:
 		break;
 
@@ -1720,106 +1692,107 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
 			test_ahash_speed(alg, sec, generic_hash_speed_template);
 			break;
 		}
-
 		/* fall through */
-
 	case 401:
 		test_ahash_speed("md4", sec, generic_hash_speed_template);
 		if (mode > 400 && mode < 500) break;
-
+		/* fall through */
 	case 402:
 		test_ahash_speed("md5", sec, generic_hash_speed_template);
 		if (mode > 400 && mode < 500) break;
-
+		/* fall through */
 	case 403:
 		test_ahash_speed("sha1", sec, generic_hash_speed_template);
 		if (mode > 400 && mode < 500) break;
-
+		/* fall through */
 	case 404:
 		test_ahash_speed("sha256", sec, generic_hash_speed_template);
 		if (mode > 400 && mode < 500) break;
-
+		/* fall through */
 	case 405:
 		test_ahash_speed("sha384", sec, generic_hash_speed_template);
 		if (mode > 400 && mode < 500) break;
-
+		/* fall through */
 	case 406:
 		test_ahash_speed("sha512", sec, generic_hash_speed_template);
 		if (mode > 400 && mode < 500) break;
-
+		/* fall through */
 	case 407:
 		test_ahash_speed("wp256", sec, generic_hash_speed_template);
 		if (mode > 400 && mode < 500) break;
-
+		/* fall through */
 	case 408:
 		test_ahash_speed("wp384", sec, generic_hash_speed_template);
 		if (mode > 400 && mode < 500) break;
-
+		/* fall through */
 	case 409:
 		test_ahash_speed("wp512", sec, generic_hash_speed_template);
 		if (mode > 400 && mode < 500) break;
-
+		/* fall through */
 	case 410:
 		test_ahash_speed("tgr128", sec, generic_hash_speed_template);
 		if (mode > 400 && mode < 500) break;
-
+		/* fall through */
 	case 411:
 		test_ahash_speed("tgr160", sec, generic_hash_speed_template);
 		if (mode > 400 && mode < 500) break;
-
+		/* fall through */
 	case 412:
 		test_ahash_speed("tgr192", sec, generic_hash_speed_template);
 		if (mode > 400 && mode < 500) break;
-
+		/* fall through */
 	case 413:
 		test_ahash_speed("sha224", sec, generic_hash_speed_template);
 		if (mode > 400 && mode < 500) break;
-
+		/* fall through */
 	case 414:
 		test_ahash_speed("rmd128", sec, generic_hash_speed_template);
 		if (mode > 400 && mode < 500) break;
-
+		/* fall through */
 	case 415:
 		test_ahash_speed("rmd160", sec, generic_hash_speed_template);
 		if (mode > 400 && mode < 500) break;
-
+		/* fall through */
 	case 416:
 		test_ahash_speed("rmd256", sec, generic_hash_speed_template);
 		if (mode > 400 && mode < 500) break;
-
+		/* fall through */
 	case 417:
 		test_ahash_speed("rmd320", sec, generic_hash_speed_template);
 		if (mode > 400 && mode < 500) break;
-
+		/* fall through */
 	case 418:
 		test_ahash_speed("sha3-224", sec, generic_hash_speed_template);
 		if (mode > 400 && mode < 500) break;
-
+		/* fall through */
 	case 419:
 		test_ahash_speed("sha3-256", sec, generic_hash_speed_template);
 		if (mode > 400 && mode < 500) break;
-
+		/* fall through */
 	case 420:
 		test_ahash_speed("sha3-384", sec, generic_hash_speed_template);
 		if (mode > 400 && mode < 500) break;
-
-
+		/* fall through */
 	case 421:
 		test_ahash_speed("sha3-512", sec, generic_hash_speed_template);
 		if (mode > 400 && mode < 500) break;
-
+		/* fall through */
 	case 422:
 		test_mb_ahash_speed("sha1", sec, generic_hash_speed_template);
 		if (mode > 400 && mode < 500) break;
-
+		/* fall through */
 	case 423:
 		test_mb_ahash_speed("sha256", sec, generic_hash_speed_template);
 		if (mode > 400 && mode < 500) break;
-
+		/* fall through */
 	case 424:
 		test_mb_ahash_speed("sha512", sec, generic_hash_speed_template);
 		if (mode > 400 && mode < 500) break;
-
+		/* fall through */
+	case 425:
+		test_mb_ahash_speed("sm3", sec, generic_hash_speed_template);
+		if (mode > 400 && mode < 500) break;
+		/* fall through */
 	case 499:
 		break;
 

+ 72 - 138
crypto/testmgr.c

@@ -76,11 +76,6 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
 #define ENCRYPT 1
 #define DECRYPT 0
 
-struct tcrypt_result {
-	struct completion completion;
-	int err;
-};
-
 struct aead_test_suite {
 	struct {
 		const struct aead_testvec *vecs;
@@ -155,17 +150,6 @@ static void hexdump(unsigned char *buf, unsigned int len)
 			buf, len, false);
 }
 
-static void tcrypt_complete(struct crypto_async_request *req, int err)
-{
-	struct tcrypt_result *res = req->data;
-
-	if (err == -EINPROGRESS)
-		return;
-
-	res->err = err;
-	complete(&res->completion);
-}
-
 static int testmgr_alloc_buf(char *buf[XBUFSIZE])
 {
 	int i;
@@ -193,20 +177,10 @@ static void testmgr_free_buf(char *buf[XBUFSIZE])
 		free_page((unsigned long)buf[i]);
 }
 
-static int wait_async_op(struct tcrypt_result *tr, int ret)
-{
-	if (ret == -EINPROGRESS || ret == -EBUSY) {
-		wait_for_completion(&tr->completion);
-		reinit_completion(&tr->completion);
-		ret = tr->err;
-	}
-	return ret;
-}
-
 static int ahash_partial_update(struct ahash_request **preq,
 	struct crypto_ahash *tfm, const struct hash_testvec *template,
 	void *hash_buff, int k, int temp, struct scatterlist *sg,
-	const char *algo, char *result, struct tcrypt_result *tresult)
+	const char *algo, char *result, struct crypto_wait *wait)
 {
 	char *state;
 	struct ahash_request *req;
@@ -236,7 +210,7 @@ static int ahash_partial_update(struct ahash_request **preq,
 	}
 	ahash_request_set_callback(req,
 		CRYPTO_TFM_REQ_MAY_BACKLOG,
-		tcrypt_complete, tresult);
+		crypto_req_done, wait);
 
 	memcpy(hash_buff, template->plaintext + temp,
 		template->tap[k]);
@@ -247,7 +221,7 @@ static int ahash_partial_update(struct ahash_request **preq,
 		pr_err("alg: hash: Failed to import() for %s\n", algo);
 		goto out;
 	}
-	ret = wait_async_op(tresult, crypto_ahash_update(req));
+	ret = crypto_wait_req(crypto_ahash_update(req), wait);
 	if (ret)
 		goto out;
 	*preq = req;
@@ -272,7 +246,7 @@ static int __test_hash(struct crypto_ahash *tfm,
 	char *result;
 	char *key;
 	struct ahash_request *req;
-	struct tcrypt_result tresult;
+	struct crypto_wait wait;
 	void *hash_buff;
 	char *xbuf[XBUFSIZE];
 	int ret = -ENOMEM;
@@ -286,7 +260,7 @@ static int __test_hash(struct crypto_ahash *tfm,
 	if (testmgr_alloc_buf(xbuf))
 		goto out_nobuf;
 
-	init_completion(&tresult.completion);
+	crypto_init_wait(&wait);
 
 	req = ahash_request_alloc(tfm, GFP_KERNEL);
 	if (!req) {
@@ -295,7 +269,7 @@ static int __test_hash(struct crypto_ahash *tfm,
 		goto out_noreq;
 	}
 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-				   tcrypt_complete, &tresult);
+				   crypto_req_done, &wait);
 
 	j = 0;
 	for (i = 0; i < tcount; i++) {
@@ -335,26 +309,26 @@ static int __test_hash(struct crypto_ahash *tfm,
 
 		ahash_request_set_crypt(req, sg, result, template[i].psize);
 		if (use_digest) {
-			ret = wait_async_op(&tresult, crypto_ahash_digest(req));
+			ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
 			if (ret) {
 				pr_err("alg: hash: digest failed on test %d "
 				       "for %s: ret=%d\n", j, algo, -ret);
 				goto out;
 			}
 		} else {
-			ret = wait_async_op(&tresult, crypto_ahash_init(req));
+			ret = crypto_wait_req(crypto_ahash_init(req), &wait);
 			if (ret) {
 				pr_err("alg: hash: init failed on test %d "
 				       "for %s: ret=%d\n", j, algo, -ret);
 				goto out;
 			}
-			ret = wait_async_op(&tresult, crypto_ahash_update(req));
+			ret = crypto_wait_req(crypto_ahash_update(req), &wait);
 			if (ret) {
 				pr_err("alg: hash: update failed on test %d "
 				       "for %s: ret=%d\n", j, algo, -ret);
 				goto out;
 			}
-			ret = wait_async_op(&tresult, crypto_ahash_final(req));
+			ret = crypto_wait_req(crypto_ahash_final(req), &wait);
 			if (ret) {
 				pr_err("alg: hash: final failed on test %d "
 				       "for %s: ret=%d\n", j, algo, -ret);
@@ -420,22 +394,10 @@ static int __test_hash(struct crypto_ahash *tfm,
 		}
 
 		ahash_request_set_crypt(req, sg, result, template[i].psize);
-		ret = crypto_ahash_digest(req);
-		switch (ret) {
-		case 0:
-			break;
-		case -EINPROGRESS:
-		case -EBUSY:
-			wait_for_completion(&tresult.completion);
-			reinit_completion(&tresult.completion);
-			ret = tresult.err;
-			if (!ret)
-				break;
-			/* fall through */
-		default:
-			printk(KERN_ERR "alg: hash: digest failed "
-			       "on chunking test %d for %s: "
-			       "ret=%d\n", j, algo, -ret);
+		ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
+		if (ret) {
+			pr_err("alg: hash: digest failed on chunking test %d for %s: ret=%d\n",
+			       j, algo, -ret);
 			goto out;
 		}
 
@@ -486,13 +448,13 @@ static int __test_hash(struct crypto_ahash *tfm,
 		}
 
 		ahash_request_set_crypt(req, sg, result, template[i].tap[0]);
-		ret = wait_async_op(&tresult, crypto_ahash_init(req));
+		ret = crypto_wait_req(crypto_ahash_init(req), &wait);
 		if (ret) {
 			pr_err("alg: hash: init failed on test %d for %s: ret=%d\n",
 				j, algo, -ret);
 			goto out;
 		}
-		ret = wait_async_op(&tresult, crypto_ahash_update(req));
+		ret = crypto_wait_req(crypto_ahash_update(req), &wait);
 		if (ret) {
 			pr_err("alg: hash: update failed on test %d for %s: ret=%d\n",
 				j, algo, -ret);
@@ -503,7 +465,7 @@ static int __test_hash(struct crypto_ahash *tfm,
 		for (k = 1; k < template[i].np; k++) {
 			ret = ahash_partial_update(&req, tfm, &template[i],
 				hash_buff, k, temp, &sg[0], algo, result,
-				&tresult);
+				&wait);
 			if (ret) {
 				pr_err("alg: hash: partial update failed on test %d for %s: ret=%d\n",
 					j, algo, -ret);
@@ -511,7 +473,7 @@ static int __test_hash(struct crypto_ahash *tfm,
 			}
 			temp += template[i].tap[k];
 		}
-		ret = wait_async_op(&tresult, crypto_ahash_final(req));
+		ret = crypto_wait_req(crypto_ahash_final(req), &wait);
 		if (ret) {
 			pr_err("alg: hash: final failed on test %d for %s: ret=%d\n",
 				j, algo, -ret);
@@ -580,7 +542,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
 	struct scatterlist *sg;
 	struct scatterlist *sgout;
 	const char *e, *d;
-	struct tcrypt_result result;
+	struct crypto_wait wait;
 	unsigned int authsize, iv_len;
 	void *input;
 	void *output;
@@ -619,7 +581,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
 	else
 		e = "decryption";
 
-	init_completion(&result.completion);
+	crypto_init_wait(&wait);
 
 	req = aead_request_alloc(tfm, GFP_KERNEL);
 	if (!req) {
@@ -629,7 +591,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
 	}
 
 	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-				  tcrypt_complete, &result);
+				  crypto_req_done, &wait);
 
 	iv_len = crypto_aead_ivsize(tfm);
 
@@ -709,7 +671,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
 
 		aead_request_set_ad(req, template[i].alen);
 
-		ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
+		ret = crypto_wait_req(enc ? crypto_aead_encrypt(req)
+				      : crypto_aead_decrypt(req), &wait);
 
 		switch (ret) {
 		case 0:
@@ -722,13 +685,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
 				goto out;
 			}
 			break;
-		case -EINPROGRESS:
-		case -EBUSY:
-			wait_for_completion(&result.completion);
-			reinit_completion(&result.completion);
-			ret = result.err;
-			if (!ret)
-				break;
 		case -EBADMSG:
 			if (template[i].novrfy)
 				/* verification failure was expected */
@@ -866,7 +822,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
 
 		aead_request_set_ad(req, template[i].alen);
 
-		ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
+		ret = crypto_wait_req(enc ? crypto_aead_encrypt(req)
+				      : crypto_aead_decrypt(req), &wait);
 
 		switch (ret) {
 		case 0:
@@ -879,13 +836,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
 				goto out;
 			}
 			break;
-		case -EINPROGRESS:
-		case -EBUSY:
-			wait_for_completion(&result.completion);
-			reinit_completion(&result.completion);
-			ret = result.err;
-			if (!ret)
-				break;
 		case -EBADMSG:
 			if (template[i].novrfy)
 				/* verification failure was expected */
@@ -1083,7 +1033,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
 	struct scatterlist sg[8];
 	struct scatterlist sgout[8];
 	const char *e, *d;
-	struct tcrypt_result result;
+	struct crypto_wait wait;
 	void *data;
 	char iv[MAX_IVLEN];
 	char *xbuf[XBUFSIZE];
@@ -1107,7 +1057,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
 	else
 		e = "decryption";
 
-	init_completion(&result.completion);
+	crypto_init_wait(&wait);
 
 	req = skcipher_request_alloc(tfm, GFP_KERNEL);
 	if (!req) {
@@ -1117,7 +1067,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
 	}
 
 	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-				      tcrypt_complete, &result);
+				      crypto_req_done, &wait);
 
 	j = 0;
 	for (i = 0; i < tcount; i++) {
@@ -1164,21 +1114,10 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
 
 		skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
 					   template[i].ilen, iv);
-		ret = enc ? crypto_skcipher_encrypt(req) :
-			    crypto_skcipher_decrypt(req);
+		ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) :
+				      crypto_skcipher_decrypt(req), &wait);
 
-		switch (ret) {
-		case 0:
-			break;
-		case -EINPROGRESS:
-		case -EBUSY:
-			wait_for_completion(&result.completion);
-			reinit_completion(&result.completion);
-			ret = result.err;
-			if (!ret)
-				break;
-			/* fall through */
-		default:
+		if (ret) {
 			pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n",
 			       d, e, j, algo, -ret);
 			goto out;
@@ -1272,21 +1211,10 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
 		skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
 					   template[i].ilen, iv);
 
-		ret = enc ? crypto_skcipher_encrypt(req) :
-			    crypto_skcipher_decrypt(req);
+		ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) :
+				      crypto_skcipher_decrypt(req), &wait);
 
-		switch (ret) {
-		case 0:
-			break;
-		case -EINPROGRESS:
-		case -EBUSY:
-			wait_for_completion(&result.completion);
-			reinit_completion(&result.completion);
-			ret = result.err;
-			if (!ret)
-				break;
-			/* fall through */
-		default:
+		if (ret) {
 			pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n",
 			       d, e, j, algo, -ret);
 			goto out;
@@ -1462,7 +1390,7 @@ static int test_acomp(struct crypto_acomp *tfm,
 	int ret;
 	struct scatterlist src, dst;
 	struct acomp_req *req;
-	struct tcrypt_result result;
+	struct crypto_wait wait;
 
 	output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
 	if (!output)
@@ -1486,7 +1414,7 @@ static int test_acomp(struct crypto_acomp *tfm,
 		}
 
 		memset(output, 0, dlen);
-		init_completion(&result.completion);
+		crypto_init_wait(&wait);
 		sg_init_one(&src, input_vec, ilen);
 		sg_init_one(&dst, output, dlen);
 
@@ -1501,9 +1429,9 @@ static int test_acomp(struct crypto_acomp *tfm,
 
 		acomp_request_set_params(req, &src, &dst, ilen, dlen);
 		acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-					   tcrypt_complete, &result);
+					   crypto_req_done, &wait);
 
-		ret = wait_async_op(&result, crypto_acomp_compress(req));
+		ret = crypto_wait_req(crypto_acomp_compress(req), &wait);
 		if (ret) {
 			pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
 			       i + 1, algo, -ret);
@@ -1516,10 +1444,10 @@ static int test_acomp(struct crypto_acomp *tfm,
 		dlen = COMP_BUF_SIZE;
 		sg_init_one(&src, output, ilen);
 		sg_init_one(&dst, decomp_out, dlen);
-		init_completion(&result.completion);
+		crypto_init_wait(&wait);
 		acomp_request_set_params(req, &src, &dst, ilen, dlen);
 
-		ret = wait_async_op(&result, crypto_acomp_decompress(req));
+		ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
 		if (ret) {
 			pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
 			       i + 1, algo, -ret);
@@ -1563,7 +1491,7 @@ static int test_acomp(struct crypto_acomp *tfm,
 		}
 
 		memset(output, 0, dlen);
-		init_completion(&result.completion);
+		crypto_init_wait(&wait);
 		sg_init_one(&src, input_vec, ilen);
 		sg_init_one(&dst, output, dlen);
 
@@ -1578,9 +1506,9 @@ static int test_acomp(struct crypto_acomp *tfm,
 
 		acomp_request_set_params(req, &src, &dst, ilen, dlen);
 		acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-					   tcrypt_complete, &result);
+					   crypto_req_done, &wait);
 
-		ret = wait_async_op(&result, crypto_acomp_decompress(req));
+		ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
 		if (ret) {
 			pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
 			       i + 1, algo, -ret);
@@ -2000,7 +1928,7 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
 	void *a_public = NULL;
 	void *a_ss = NULL;
 	void *shared_secret = NULL;
-	struct tcrypt_result result;
+	struct crypto_wait wait;
 	unsigned int out_len_max;
 	int err = -ENOMEM;
 	struct scatterlist src, dst;
@@ -2009,7 +1937,7 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
 	if (!req)
 		return err;
 
-	init_completion(&result.completion);
+	crypto_init_wait(&wait);
 
 	err = crypto_kpp_set_secret(tfm, vec->secret, vec->secret_size);
 	if (err < 0)
@@ -2027,10 +1955,10 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
 	sg_init_one(&dst, output_buf, out_len_max);
 	kpp_request_set_output(req, &dst, out_len_max);
 	kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-				 tcrypt_complete, &result);
+				 crypto_req_done, &wait);
 
 	/* Compute party A's public key */
-	err = wait_async_op(&result, crypto_kpp_generate_public_key(req));
+	err = crypto_wait_req(crypto_kpp_generate_public_key(req), &wait);
 	if (err) {
 		pr_err("alg: %s: Party A: generate public key test failed. err %d\n",
 		       alg, err);
@@ -2069,8 +1997,8 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
 	kpp_request_set_input(req, &src, vec->b_public_size);
 	kpp_request_set_output(req, &dst, out_len_max);
 	kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-				 tcrypt_complete, &result);
-	err = wait_async_op(&result, crypto_kpp_compute_shared_secret(req));
+				 crypto_req_done, &wait);
+	err = crypto_wait_req(crypto_kpp_compute_shared_secret(req), &wait);
 	if (err) {
 		pr_err("alg: %s: Party A: compute shared secret test failed. err %d\n",
 		       alg, err);
@@ -2100,9 +2028,9 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
 		kpp_request_set_input(req, &src, vec->expected_a_public_size);
 		kpp_request_set_output(req, &dst, out_len_max);
 		kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-					 tcrypt_complete, &result);
-		err = wait_async_op(&result,
-				    crypto_kpp_compute_shared_secret(req));
+					 crypto_req_done, &wait);
+		err = crypto_wait_req(crypto_kpp_compute_shared_secret(req),
+				      &wait);
 		if (err) {
 			pr_err("alg: %s: Party B: compute shared secret failed. err %d\n",
 			       alg, err);
@@ -2179,7 +2107,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
 	struct akcipher_request *req;
 	void *outbuf_enc = NULL;
 	void *outbuf_dec = NULL;
-	struct tcrypt_result result;
+	struct crypto_wait wait;
 	unsigned int out_len_max, out_len = 0;
 	int err = -ENOMEM;
 	struct scatterlist src, dst, src_tab[2];
@@ -2191,7 +2119,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
 	if (!req)
 		goto free_xbuf;
 
-	init_completion(&result.completion);
+	crypto_init_wait(&wait);
 
 	if (vecs->public_key_vec)
 		err = crypto_akcipher_set_pub_key(tfm, vecs->key,
@@ -2220,13 +2148,13 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
 	akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size,
 				   out_len_max);
 	akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-				      tcrypt_complete, &result);
+				      crypto_req_done, &wait);
 
-	err = wait_async_op(&result, vecs->siggen_sigver_test ?
-				     /* Run asymmetric signature generation */
-				     crypto_akcipher_sign(req) :
-				     /* Run asymmetric encrypt */
-				     crypto_akcipher_encrypt(req));
+	err = crypto_wait_req(vecs->siggen_sigver_test ?
+			      /* Run asymmetric signature generation */
+			      crypto_akcipher_sign(req) :
+			      /* Run asymmetric encrypt */
+			      crypto_akcipher_encrypt(req), &wait);
 	if (err) {
 		pr_err("alg: akcipher: encrypt test failed. err %d\n", err);
 		goto free_all;
@@ -2261,14 +2189,14 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
 
 	sg_init_one(&src, xbuf[0], vecs->c_size);
 	sg_init_one(&dst, outbuf_dec, out_len_max);
-	init_completion(&result.completion);
+	crypto_init_wait(&wait);
 	akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max);
 
-	err = wait_async_op(&result, vecs->siggen_sigver_test ?
-				     /* Run asymmetric signature verification */
-				     crypto_akcipher_verify(req) :
-				     /* Run asymmetric decrypt */
-				     crypto_akcipher_decrypt(req));
+	err = crypto_wait_req(vecs->siggen_sigver_test ?
+			      /* Run asymmetric signature verification */
+			      crypto_akcipher_verify(req) :
+			      /* Run asymmetric decrypt */
+			      crypto_akcipher_decrypt(req), &wait);
 	if (err) {
 		pr_err("alg: akcipher: decrypt test failed. err %d\n", err);
 		goto free_all;
@@ -3499,6 +3427,12 @@ static const struct alg_test_desc alg_test_descs[] = {
 		.suite = {
 			.hash = __VECS(sha512_tv_template)
 		}
+	}, {
+		.alg = "sm3",
+		.test = alg_test_hash,
+		.suite = {
+			.hash = __VECS(sm3_tv_template)
+		}
 	}, {
 		.alg = "tgr128",
 		.test = alg_test_hash,

+ 67 - 0
crypto/testmgr.h

@@ -1497,6 +1497,73 @@ static const struct hash_testvec crct10dif_tv_template[] = {
 	}
 };
 
+/* Example vectors below taken from
+ * http://www.oscca.gov.cn/UpFile/20101222141857786.pdf
+ *
+ * The rest taken from
+ * https://github.com/adamws/oscca-sm3
+ */
+static const struct hash_testvec sm3_tv_template[] = {
+	{
+		.plaintext = "",
+		.psize = 0,
+		.digest = (u8 *)(u8 []) {
+			0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F,
+			0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F,
+			0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74,
+			0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B }
+	}, {
+		.plaintext = "a",
+		.psize = 1,
+		.digest = (u8 *)(u8 []) {
+			0x62, 0x34, 0x76, 0xAC, 0x18, 0xF6, 0x5A, 0x29,
+			0x09, 0xE4, 0x3C, 0x7F, 0xEC, 0x61, 0xB4, 0x9C,
+			0x7E, 0x76, 0x4A, 0x91, 0xA1, 0x8C, 0xCB, 0x82,
+			0xF1, 0x91, 0x7A, 0x29, 0xC8, 0x6C, 0x5E, 0x88 }
+	}, {
+		/* A.1. Example 1 */
+		.plaintext = "abc",
+		.psize = 3,
+		.digest = (u8 *)(u8 []) {
+			0x66, 0xC7, 0xF0, 0xF4, 0x62, 0xEE, 0xED, 0xD9,
+			0xD1, 0xF2, 0xD4, 0x6B, 0xDC, 0x10, 0xE4, 0xE2,
+			0x41, 0x67, 0xC4, 0x87, 0x5C, 0xF2, 0xF7, 0xA2,
+			0x29, 0x7D, 0xA0, 0x2B, 0x8F, 0x4B, 0xA8, 0xE0 }
+	}, {
+		.plaintext = "abcdefghijklmnopqrstuvwxyz",
+		.psize = 26,
+		.digest = (u8 *)(u8 []) {
+			0xB8, 0x0F, 0xE9, 0x7A, 0x4D, 0xA2, 0x4A, 0xFC,
+			0x27, 0x75, 0x64, 0xF6, 0x6A, 0x35, 0x9E, 0xF4,
+			0x40, 0x46, 0x2A, 0xD2, 0x8D, 0xCC, 0x6D, 0x63,
+			0xAD, 0xB2, 0x4D, 0x5C, 0x20, 0xA6, 0x15, 0x95 }
+	}, {
+		/* A.1. Example 2 */
+		.plaintext = "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdab"
+			     "cdabcdabcdabcdabcd",
+		.psize = 64,
+		.digest = (u8 *)(u8 []) {
+			0xDE, 0xBE, 0x9F, 0xF9, 0x22, 0x75, 0xB8, 0xA1,
+			0x38, 0x60, 0x48, 0x89, 0xC1, 0x8E, 0x5A, 0x4D,
+			0x6F, 0xDB, 0x70, 0xE5, 0x38, 0x7E, 0x57, 0x65,
+			0x29, 0x3D, 0xCB, 0xA3, 0x9C, 0x0C, 0x57, 0x32 }
+	}, {
+		.plaintext = "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
+			     "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
+			     "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
+			     "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
+			     "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
+			     "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
+			     "abcdabcdabcdabcdabcdabcdabcdabcd",
+		.psize = 256,
+		.digest = (u8 *)(u8 []) {
+			0xB9, 0x65, 0x76, 0x4C, 0x8B, 0xEB, 0xB0, 0x91,
+			0xC7, 0x60, 0x2B, 0x74, 0xAF, 0xD3, 0x4E, 0xEF,
+			0xB5, 0x31, 0xDC, 0xCB, 0x4E, 0x00, 0x76, 0xD9,
+			0xB7, 0xCD, 0x81, 0x31, 0x99, 0xB4, 0x59, 0x71 }
+	}
+};
+
 /*
  * SHA1 test vectors  from from FIPS PUB 180-1
  * Long vector from CAVS 5.0

+ 2 - 6
crypto/xts.c

@@ -269,9 +269,7 @@ static int do_encrypt(struct skcipher_request *req, int err)
 		      crypto_skcipher_encrypt(subreq) ?:
 		      post_crypt(req);
 
-		if (err == -EINPROGRESS ||
-		    (err == -EBUSY &&
-		     req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+		if (err == -EINPROGRESS || err == -EBUSY)
 			return err;
 	}
 
@@ -321,9 +319,7 @@ static int do_decrypt(struct skcipher_request *req, int err)
 		      crypto_skcipher_decrypt(subreq) ?:
 		      post_crypt(req);
 
-		if (err == -EINPROGRESS ||
-		    (err == -EBUSY &&
-		     req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+		if (err == -EINPROGRESS || err == -EBUSY)
 			return err;
 	}
 

+ 3 - 3
drivers/char/hw_random/Kconfig

@@ -100,12 +100,12 @@ config HW_RANDOM_BCM2835
 	  If unsure, say Y.
 
 config HW_RANDOM_IPROC_RNG200
-	tristate "Broadcom iProc RNG200 support"
-	depends on ARCH_BCM_IPROC
+	tristate "Broadcom iProc/STB RNG200 support"
+	depends on ARCH_BCM_IPROC || ARCH_BRCMSTB
 	default HW_RANDOM
 	---help---
 	  This driver provides kernel-side support for the RNG200
-	  hardware found on the Broadcom iProc SoCs.
+	  hardware found on the Broadcom iProc and STB SoCs.
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called iproc-rng200

+ 33 - 20
drivers/char/hw_random/core.c

@@ -292,26 +292,48 @@ static struct miscdevice rng_miscdev = {
 	.groups		= rng_dev_groups,
 };
 
+static int enable_best_rng(void)
+{
+	int ret = -ENODEV;
+
+	BUG_ON(!mutex_is_locked(&rng_mutex));
+
+	/* rng_list is sorted by quality, use the best (=first) one */
+	if (!list_empty(&rng_list)) {
+		struct hwrng *new_rng;
+
+		new_rng = list_entry(rng_list.next, struct hwrng, list);
+		ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
+		if (!ret)
+			cur_rng_set_by_user = 0;
+	}
+
+	return ret;
+}
+
 static ssize_t hwrng_attr_current_store(struct device *dev,
 					struct device_attribute *attr,
 					const char *buf, size_t len)
 {
-	int err;
+	int err = -ENODEV;
 	struct hwrng *rng;
 
 	err = mutex_lock_interruptible(&rng_mutex);
 	if (err)
 		return -ERESTARTSYS;
-	err = -ENODEV;
-	list_for_each_entry(rng, &rng_list, list) {
-		if (sysfs_streq(rng->name, buf)) {
-			err = 0;
-			cur_rng_set_by_user = 1;
-			if (rng != current_rng)
+
+	if (sysfs_streq(buf, "")) {
+		err = enable_best_rng();
+	} else {
+		list_for_each_entry(rng, &rng_list, list) {
+			if (sysfs_streq(rng->name, buf)) {
+				cur_rng_set_by_user = 1;
 				err = set_current_rng(rng);
-			break;
+				break;
+			}
 		}
 	}
+
 	mutex_unlock(&rng_mutex);
 
 	return err ? : len;
@@ -423,7 +445,7 @@ static void start_khwrngd(void)
 {
 	hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
 	if (IS_ERR(hwrng_fill)) {
-		pr_err("hwrng_fill thread creation failed");
+		pr_err("hwrng_fill thread creation failed\n");
 		hwrng_fill = NULL;
 	}
 }
@@ -493,17 +515,8 @@ void hwrng_unregister(struct hwrng *rng)
 	mutex_lock(&rng_mutex);
 
 	list_del(&rng->list);
-	if (current_rng == rng) {
-		drop_current_rng();
-		cur_rng_set_by_user = 0;
-		/* rng_list is sorted by quality, use the best (=first) one */
-		if (!list_empty(&rng_list)) {
-			struct hwrng *new_rng;
-
-			new_rng = list_entry(rng_list.next, struct hwrng, list);
-			set_current_rng(new_rng);
-		}
-	}
+	if (current_rng == rng)
+		enable_best_rng();
 
 	if (list_empty(&rng_list)) {
 		mutex_unlock(&rng_mutex);

+ 1 - 0
drivers/char/hw_random/iproc-rng200.c

@@ -220,6 +220,7 @@ static int iproc_rng200_probe(struct platform_device *pdev)
 }
 
 static const struct of_device_id iproc_rng200_of_match[] = {
+	{ .compatible = "brcm,bcm7278-rng200", },
 	{ .compatible = "brcm,iproc-rng200", },
 	{},
 };

+ 1 - 1
drivers/char/hw_random/pseries-rng.c

@@ -72,7 +72,7 @@ static int pseries_rng_remove(struct vio_dev *dev)
 	return 0;
 }
 
-static struct vio_device_id pseries_rng_driver_ids[] = {
+static const struct vio_device_id pseries_rng_driver_ids[] = {
 	{ "ibm,random-v1", "ibm,random"},
 	{ "", "" }
 };

+ 0 - 7
drivers/char/hw_random/timeriomem-rng.c

@@ -52,13 +52,6 @@ static int timeriomem_rng_read(struct hwrng *hwrng, void *data,
 	int retval = 0;
 	int period_us = ktime_to_us(priv->period);
 
-	/*
-	 * The RNG provides 32-bits per read.  Ensure there is enough space for
-	 * at minimum one read.
-	 */
-	if (max < sizeof(u32))
-		return 0;
-
 	/*
 	 * There may not have been enough time for new data to be generated
 	 * since the last request.  If the caller doesn't want to wait, let them

+ 20 - 1
drivers/char/hw_random/virtio-rng.c

@@ -184,7 +184,26 @@ static int virtrng_freeze(struct virtio_device *vdev)
 
 static int virtrng_restore(struct virtio_device *vdev)
 {
-	return probe_common(vdev);
+	int err;
+
+	err = probe_common(vdev);
+	if (!err) {
+		struct virtrng_info *vi = vdev->priv;
+
+		/*
+		 * Set hwrng_removed to ensure that virtio_read()
+		 * does not block waiting for data before the
+		 * registration is complete.
+		 */
+		vi->hwrng_removed = true;
+		err = hwrng_register(&vi->hwrng);
+		if (!err) {
+			vi->hwrng_register_done = true;
+			vi->hwrng_removed = false;
+		}
+	}
+
+	return err;
 }
 #endif
 

+ 21 - 19
drivers/crypto/Kconfig

@@ -199,22 +199,8 @@ config CRYPTO_CRC32_S390
 
 	  It is available with IBM z13 or later.
 
-config CRYPTO_DEV_MV_CESA
-	tristate "Marvell's Cryptographic Engine"
-	depends on PLAT_ORION
-	select CRYPTO_AES
-	select CRYPTO_BLKCIPHER
-	select CRYPTO_HASH
-	select SRAM
-	help
-	  This driver allows you to utilize the Cryptographic Engines and
-	  Security Accelerator (CESA) which can be found on the Marvell Orion
-	  and Kirkwood SoCs, such as QNAP's TS-209.
-
-	  Currently the driver supports AES in ECB and CBC mode without DMA.
-
 config CRYPTO_DEV_MARVELL_CESA
-	tristate "New Marvell's Cryptographic Engine driver"
+	tristate "Marvell's Cryptographic Engine driver"
 	depends on PLAT_ORION || ARCH_MVEBU
 	select CRYPTO_AES
 	select CRYPTO_DES
@@ -223,12 +209,10 @@ config CRYPTO_DEV_MARVELL_CESA
 	select SRAM
 	help
 	  This driver allows you to utilize the Cryptographic Engines and
-	  Security Accelerator (CESA) which can be found on the Armada 370.
+	  Security Accelerator (CESA) which can be found on MVEBU and ORION
+	  platforms.
 	  This driver supports CPU offload through DMA transfers.
 
-	  This driver is aimed at replacing the mv_cesa driver. This will only
-	  happen once it has received proper testing.
-
 config CRYPTO_DEV_NIAGARA2
        tristate "Niagara2 Stream Processing Unit driver"
        select CRYPTO_DES
@@ -315,6 +299,10 @@ config CRYPTO_DEV_PPC4XX
 	tristate "Driver AMCC PPC4xx crypto accelerator"
 	depends on PPC && 4xx
 	select CRYPTO_HASH
+	select CRYPTO_AEAD
+	select CRYPTO_AES
+	select CRYPTO_CCM
+	select CRYPTO_GCM
 	select CRYPTO_BLKCIPHER
 	help
 	  This option allows you to have support for AMCC crypto acceleration.
@@ -439,6 +427,20 @@ config CRYPTO_DEV_S5P
 	  Select this to offload Samsung S5PV210 or S5PC110, Exynos from AES
 	  algorithms execution.
 
+config CRYPTO_DEV_EXYNOS_HASH
+	bool "Support for Samsung Exynos HASH accelerator"
+	depends on CRYPTO_DEV_S5P
+	depends on !CRYPTO_DEV_EXYNOS_RNG && CRYPTO_DEV_EXYNOS_RNG!=m
+	select CRYPTO_SHA1
+	select CRYPTO_MD5
+	select CRYPTO_SHA256
+	help
+	  Select this to offload Exynos from HASH MD5/SHA1/SHA256.
+	  This will select software SHA1, MD5 and SHA256 as they are
+	  needed for small and zero-size messages.
+	  HASH algorithms will be disabled if EXYNOS_RNG
+	  is enabled due to hw conflict.
+
 config CRYPTO_DEV_NX
 	bool "Support for IBM PowerPC Nest (NX) cryptographic acceleration"
 	depends on PPC64

+ 0 - 1
drivers/crypto/Makefile

@@ -15,7 +15,6 @@ obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
 obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
 obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
 obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
-obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
 obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/
 obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/
 obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o

+ 1 - 1
drivers/crypto/amcc/Makefile

@@ -1,3 +1,3 @@
 obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o
-crypto4xx-y :=  crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o
+crypto4xx-y :=  crypto4xx_core.o crypto4xx_alg.o
 crypto4xx-$(CONFIG_HW_RANDOM_PPC4XX) += crypto4xx_trng.o

+ 438 - 74
drivers/crypto/amcc/crypto4xx_alg.c

@@ -26,11 +26,14 @@
 #include <crypto/internal/hash.h>
 #include <linux/dma-mapping.h>
 #include <crypto/algapi.h>
+#include <crypto/aead.h>
 #include <crypto/aes.h>
+#include <crypto/gcm.h>
 #include <crypto/sha.h>
+#include <crypto/ctr.h>
 #include "crypto4xx_reg_def.h"
-#include "crypto4xx_sa.h"
 #include "crypto4xx_core.h"
+#include "crypto4xx_sa.h"
 
 static void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
 				     u32 save_iv, u32 ld_h, u32 ld_iv,
@@ -62,6 +65,7 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
 	sa->sa_command_1.bf.crypto_mode9_8 = cm & 3;
 	sa->sa_command_1.bf.feedback_mode = cfb,
 	sa->sa_command_1.bf.sa_rev = 1;
+	sa->sa_command_1.bf.hmac_muting = hmac_mc;
 	sa->sa_command_1.bf.extended_seq_num = esn;
 	sa->sa_command_1.bf.seq_num_mask = sn_mask;
 	sa->sa_command_1.bf.mutable_bit_proc = mute;
@@ -73,29 +77,29 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
 int crypto4xx_encrypt(struct ablkcipher_request *req)
 {
 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	unsigned int ivlen = crypto_ablkcipher_ivsize(
+		crypto_ablkcipher_reqtfm(req));
+	__le32 iv[ivlen];
 
-	ctx->direction = DIR_OUTBOUND;
-	ctx->hash_final = 0;
-	ctx->is_hash = 0;
-	ctx->pd_ctl = 0x1;
+	if (ivlen)
+		crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
 
 	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
-				  req->nbytes, req->info,
-				  get_dynamic_sa_iv_size(ctx));
+		req->nbytes, iv, ivlen, ctx->sa_out, ctx->sa_len, 0);
 }
 
 int crypto4xx_decrypt(struct ablkcipher_request *req)
 {
 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	unsigned int ivlen = crypto_ablkcipher_ivsize(
+		crypto_ablkcipher_reqtfm(req));
+	__le32 iv[ivlen];
 
-	ctx->direction = DIR_INBOUND;
-	ctx->hash_final = 0;
-	ctx->is_hash = 0;
-	ctx->pd_ctl = 1;
+	if (ivlen)
+		crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
 
 	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
-				  req->nbytes, req->info,
-				  get_dynamic_sa_iv_size(ctx));
+		req->nbytes, iv, ivlen, ctx->sa_in, ctx->sa_len, 0);
 }
 
 /**
@@ -120,23 +124,15 @@ static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher,
 	}
 
 	/* Create SA */
-	if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+	if (ctx->sa_in || ctx->sa_out)
 		crypto4xx_free_sa(ctx);
 
 	rc = crypto4xx_alloc_sa(ctx, SA_AES128_LEN + (keylen-16) / 4);
 	if (rc)
 		return rc;
 
-	if (ctx->state_record_dma_addr == 0) {
-		rc = crypto4xx_alloc_state_record(ctx);
-		if (rc) {
-			crypto4xx_free_sa(ctx);
-			return rc;
-		}
-	}
 	/* Setup SA */
-	sa = (struct dynamic_sa_ctl *) ctx->sa_in;
-	ctx->hash_final = 0;
+	sa = ctx->sa_in;
 
 	set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
 				 SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
@@ -150,18 +146,13 @@ static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher,
 				 SA_SEQ_MASK_OFF, SA_MC_ENABLE,
 				 SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
 				 SA_NOT_COPY_HDR);
-	crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
-			    key, keylen);
-	sa->sa_contents = SA_AES_CONTENTS | (keylen << 2);
+	crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa),
+				 key, keylen);
+	sa->sa_contents.w = SA_AES_CONTENTS | (keylen << 2);
 	sa->sa_command_1.bf.key_len = keylen >> 3;
-	ctx->is_hash = 0;
-	ctx->direction = DIR_INBOUND;
-	memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
-			(void *)&ctx->state_record_dma_addr, 4);
-	ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
 
 	memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
-	sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+	sa = ctx->sa_out;
 	sa->sa_command_0.bf.dir = DIR_OUTBOUND;
 
 	return 0;
@@ -174,6 +165,396 @@ int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
 				    CRYPTO_FEEDBACK_MODE_NO_FB);
 }
 
+int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
+			     const u8 *key, unsigned int keylen)
+{
+	return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CFB,
+				    CRYPTO_FEEDBACK_MODE_128BIT_CFB);
+}
+
+int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher,
+			     const u8 *key, unsigned int keylen)
+{
+	return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_ECB,
+				    CRYPTO_FEEDBACK_MODE_NO_FB);
+}
+
+int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher,
+			     const u8 *key, unsigned int keylen)
+{
+	return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_OFB,
+				    CRYPTO_FEEDBACK_MODE_64BIT_OFB);
+}
+
+int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher,
+			     const u8 *key, unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+	int rc;
+
+	rc = crypto4xx_setkey_aes(cipher, key, keylen - CTR_RFC3686_NONCE_SIZE,
+		CRYPTO_MODE_CTR, CRYPTO_FEEDBACK_MODE_NO_FB);
+	if (rc)
+		return rc;
+
+	ctx->iv_nonce = cpu_to_le32p((u32 *)&key[keylen -
+						 CTR_RFC3686_NONCE_SIZE]);
+
+	return 0;
+}
+
+int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	__le32 iv[AES_IV_SIZE / 4] = {
+		ctx->iv_nonce,
+		cpu_to_le32p((u32 *) req->info),
+		cpu_to_le32p((u32 *) (req->info + 4)),
+		cpu_to_le32(1) };
+
+	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+				  req->nbytes, iv, AES_IV_SIZE,
+				  ctx->sa_out, ctx->sa_len, 0);
+}
+
+int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	__le32 iv[AES_IV_SIZE / 4] = {
+		ctx->iv_nonce,
+		cpu_to_le32p((u32 *) req->info),
+		cpu_to_le32p((u32 *) (req->info + 4)),
+		cpu_to_le32(1) };
+
+	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+				  req->nbytes, iv, AES_IV_SIZE,
+				  ctx->sa_out, ctx->sa_len, 0);
+}
+
+static inline bool crypto4xx_aead_need_fallback(struct aead_request *req,
+						bool is_ccm, bool decrypt)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+
+	/* authsize has to be a multiple of 4 */
+	if (aead->authsize & 3)
+		return true;
+
+	/*
+	 * hardware does not handle cases where cryptlen
+	 * is less than a block
+	 */
+	if (req->cryptlen < AES_BLOCK_SIZE)
+		return true;
+
+	/* assoc len needs to be a multiple of 4 */
+	if (req->assoclen & 0x3)
+		return true;
+
+	/* CCM supports only counter field length of 2 and 4 bytes */
+	if (is_ccm && !(req->iv[0] == 1 || req->iv[0] == 3))
+		return true;
+
+	/* CCM - fix CBC MAC mismatch in special case */
+	if (is_ccm && decrypt && !req->assoclen)
+		return true;
+
+	return false;
+}
+
+static int crypto4xx_aead_fallback(struct aead_request *req,
+	struct crypto4xx_ctx *ctx, bool do_decrypt)
+{
+	char aead_req_data[sizeof(struct aead_request) +
+			   crypto_aead_reqsize(ctx->sw_cipher.aead)]
+		__aligned(__alignof__(struct aead_request));
+
+	struct aead_request *subreq = (void *) aead_req_data;
+
+	memset(subreq, 0, sizeof(aead_req_data));
+
+	aead_request_set_tfm(subreq, ctx->sw_cipher.aead);
+	aead_request_set_callback(subreq, req->base.flags,
+				  req->base.complete, req->base.data);
+	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+			       req->iv);
+	aead_request_set_ad(subreq, req->assoclen);
+	return do_decrypt ? crypto_aead_decrypt(subreq) :
+			    crypto_aead_encrypt(subreq);
+}
+
+static int crypto4xx_setup_fallback(struct crypto4xx_ctx *ctx,
+				    struct crypto_aead *cipher,
+				    const u8 *key,
+				    unsigned int keylen)
+{
+	int rc;
+
+	crypto_aead_clear_flags(ctx->sw_cipher.aead, CRYPTO_TFM_REQ_MASK);
+	crypto_aead_set_flags(ctx->sw_cipher.aead,
+		crypto_aead_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
+	rc = crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen);
+	crypto_aead_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
+	crypto_aead_set_flags(cipher,
+		crypto_aead_get_flags(ctx->sw_cipher.aead) &
+			CRYPTO_TFM_RES_MASK);
+
+	return rc;
+}
+
+/**
+ * AES-CCM Functions
+ */
+
+int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key,
+			     unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct dynamic_sa_ctl *sa;
+	int rc = 0;
+
+	rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen);
+	if (rc)
+		return rc;
+
+	if (ctx->sa_in || ctx->sa_out)
+		crypto4xx_free_sa(ctx);
+
+	rc = crypto4xx_alloc_sa(ctx, SA_AES128_CCM_LEN + (keylen - 16) / 4);
+	if (rc)
+		return rc;
+
+	/* Setup SA */
+	sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+	sa->sa_contents.w = SA_AES_CCM_CONTENTS | (keylen << 2);
+
+	set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+				 SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+				 SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
+				 SA_CIPHER_ALG_AES,
+				 SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
+				 SA_OPCODE_HASH_DECRYPT, DIR_INBOUND);
+
+	set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
+				 CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+				 SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+				 SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
+				 SA_NOT_COPY_HDR);
+
+	sa->sa_command_1.bf.key_len = keylen >> 3;
+
+	crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa), key, keylen);
+
+	memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+	sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+
+	set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+				 SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+				 SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
+				 SA_CIPHER_ALG_AES,
+				 SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
+				 SA_OPCODE_ENCRYPT_HASH, DIR_OUTBOUND);
+
+	set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
+				 CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+				 SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+				 SA_COPY_PAD, SA_COPY_PAYLOAD,
+				 SA_NOT_COPY_HDR);
+
+	sa->sa_command_1.bf.key_len = keylen >> 3;
+	return 0;
+}
+
+static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt)
+{
+	struct crypto4xx_ctx *ctx  = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	unsigned int len = req->cryptlen;
+	__le32 iv[16];
+	u32 tmp_sa[ctx->sa_len * 4];
+	struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *)tmp_sa;
+
+	if (crypto4xx_aead_need_fallback(req, true, decrypt))
+		return crypto4xx_aead_fallback(req, ctx, decrypt);
+
+	if (decrypt)
+		len -= crypto_aead_authsize(aead);
+
+	memcpy(tmp_sa, decrypt ? ctx->sa_in : ctx->sa_out, sizeof(tmp_sa));
+	sa->sa_command_0.bf.digest_len = crypto_aead_authsize(aead) >> 2;
+
+	if (req->iv[0] == 1) {
+		/* CRYPTO_MODE_AES_ICM */
+		sa->sa_command_1.bf.crypto_mode9_8 = 1;
+	}
+
+	iv[3] = cpu_to_le32(0);
+	crypto4xx_memcpy_to_le32(iv, req->iv, 16 - (req->iv[0] + 1));
+
+	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+				  len, iv, sizeof(iv),
+				  sa, ctx->sa_len, req->assoclen);
+}
+
+int crypto4xx_encrypt_aes_ccm(struct aead_request *req)
+{
+	return crypto4xx_crypt_aes_ccm(req, false);
+}
+
+int crypto4xx_decrypt_aes_ccm(struct aead_request *req)
+{
+	return crypto4xx_crypt_aes_ccm(req, true);
+}
+
+int crypto4xx_setauthsize_aead(struct crypto_aead *cipher,
+			       unsigned int authsize)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	return crypto_aead_setauthsize(ctx->sw_cipher.aead, authsize);
+}
+
+/**
+ * AES-GCM Functions
+ */
+
+static int crypto4xx_aes_gcm_validate_keylen(unsigned int keylen)
+{
+	switch (keylen) {
+	case 16:
+	case 24:
+	case 32:
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int crypto4xx_compute_gcm_hash_key_sw(__le32 *hash_start, const u8 *key,
+					     unsigned int keylen)
+{
+	struct crypto_cipher *aes_tfm = NULL;
+	uint8_t src[16] = { 0 };
+	int rc = 0;
+
+	aes_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC |
+				      CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(aes_tfm)) {
+		rc = PTR_ERR(aes_tfm);
+		pr_warn("could not load aes cipher driver: %d\n", rc);
+		return rc;
+	}
+
+	rc = crypto_cipher_setkey(aes_tfm, key, keylen);
+	if (rc) {
+		pr_err("setkey() failed: %d\n", rc);
+		goto out;
+	}
+
+	crypto_cipher_encrypt_one(aes_tfm, src, src);
+	crypto4xx_memcpy_to_le32(hash_start, src, 16);
+out:
+	crypto_free_cipher(aes_tfm);
+	return rc;
+}
+
+int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
+			     const u8 *key, unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct dynamic_sa_ctl *sa;
+	int    rc = 0;
+
+	if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0) {
+		crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen);
+	if (rc)
+		return rc;
+
+	if (ctx->sa_in || ctx->sa_out)
+		crypto4xx_free_sa(ctx);
+
+	rc = crypto4xx_alloc_sa(ctx, SA_AES128_GCM_LEN + (keylen - 16) / 4);
+	if (rc)
+		return rc;
+
+	sa  = (struct dynamic_sa_ctl *) ctx->sa_in;
+
+	sa->sa_contents.w = SA_AES_GCM_CONTENTS | (keylen << 2);
+	set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+				 SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+				 SA_NO_HEADER_PROC, SA_HASH_ALG_GHASH,
+				 SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
+				 SA_OP_GROUP_BASIC, SA_OPCODE_HASH_DECRYPT,
+				 DIR_INBOUND);
+	set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
+				 CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+				 SA_SEQ_MASK_ON, SA_MC_DISABLE,
+				 SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
+				 SA_NOT_COPY_HDR);
+
+	sa->sa_command_1.bf.key_len = keylen >> 3;
+
+	crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa),
+				 key, keylen);
+
+	rc = crypto4xx_compute_gcm_hash_key_sw(get_dynamic_sa_inner_digest(sa),
+		key, keylen);
+	if (rc) {
+		pr_err("GCM hash key setting failed = %d\n", rc);
+		goto err;
+	}
+
+	memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+	sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+	sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+	sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT_HASH;
+
+	return 0;
+err:
+	crypto4xx_free_sa(ctx);
+	return rc;
+}
+
+static inline int crypto4xx_crypt_aes_gcm(struct aead_request *req,
+					  bool decrypt)
+{
+	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	unsigned int len = req->cryptlen;
+	__le32 iv[4];
+
+	if (crypto4xx_aead_need_fallback(req, false, decrypt))
+		return crypto4xx_aead_fallback(req, ctx, decrypt);
+
+	crypto4xx_memcpy_to_le32(iv, req->iv, GCM_AES_IV_SIZE);
+	iv[3] = cpu_to_le32(1);
+
+	if (decrypt)
+		len -= crypto_aead_authsize(crypto_aead_reqtfm(req));
+
+	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+				  len, iv, sizeof(iv),
+				  decrypt ? ctx->sa_in : ctx->sa_out,
+				  ctx->sa_len, req->assoclen);
+}
+
+int crypto4xx_encrypt_aes_gcm(struct aead_request *req)
+{
+	return crypto4xx_crypt_aes_gcm(req, false);
+}
+
+int crypto4xx_decrypt_aes_gcm(struct aead_request *req)
+{
+	return crypto4xx_crypt_aes_gcm(req, true);
+}
+
 /**
  * HASH SHA1 Functions
  */
@@ -183,53 +564,39 @@ static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
 				   unsigned char hm)
 {
 	struct crypto_alg *alg = tfm->__crt_alg;
-	struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+	struct crypto4xx_alg *my_alg;
 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
-	struct dynamic_sa_ctl *sa;
-	struct dynamic_sa_hash160 *sa_in;
+	struct dynamic_sa_hash160 *sa;
 	int rc;
 
+	my_alg = container_of(__crypto_ahash_alg(alg), struct crypto4xx_alg,
+			      alg.u.hash);
 	ctx->dev   = my_alg->dev;
-	ctx->is_hash = 1;
-	ctx->hash_final = 0;
 
 	/* Create SA */
-	if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+	if (ctx->sa_in || ctx->sa_out)
 		crypto4xx_free_sa(ctx);
 
 	rc = crypto4xx_alloc_sa(ctx, sa_len);
 	if (rc)
 		return rc;
 
-	if (ctx->state_record_dma_addr == 0) {
-		crypto4xx_alloc_state_record(ctx);
-		if (!ctx->state_record_dma_addr) {
-			crypto4xx_free_sa(ctx);
-			return -ENOMEM;
-		}
-	}
-
 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 				 sizeof(struct crypto4xx_ctx));
-	sa = (struct dynamic_sa_ctl *) ctx->sa_in;
-	set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+	sa = (struct dynamic_sa_hash160 *)ctx->sa_in;
+	set_dynamic_sa_command_0(&sa->ctrl, SA_SAVE_HASH, SA_NOT_SAVE_IV,
 				 SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
 				 SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL,
 				 SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
 				 SA_OPCODE_HASH, DIR_INBOUND);
-	set_dynamic_sa_command_1(sa, 0, SA_HASH_MODE_HASH,
+	set_dynamic_sa_command_1(&sa->ctrl, 0, SA_HASH_MODE_HASH,
 				 CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
 				 SA_SEQ_MASK_OFF, SA_MC_ENABLE,
 				 SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
 				 SA_NOT_COPY_HDR);
-	ctx->direction = DIR_INBOUND;
-	sa->sa_contents = SA_HASH160_CONTENTS;
-	sa_in = (struct dynamic_sa_hash160 *) ctx->sa_in;
 	/* Need to zero hash digest in SA */
-	memset(sa_in->inner_digest, 0, sizeof(sa_in->inner_digest));
-	memset(sa_in->outer_digest, 0, sizeof(sa_in->outer_digest));
-	sa_in->state_ptr = ctx->state_record_dma_addr;
-	ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+	memset(sa->inner_digest, 0, sizeof(sa->inner_digest));
+	memset(sa->outer_digest, 0, sizeof(sa->outer_digest));
 
 	return 0;
 }
@@ -240,29 +607,27 @@ int crypto4xx_hash_init(struct ahash_request *req)
 	int ds;
 	struct dynamic_sa_ctl *sa;
 
-	sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+	sa = ctx->sa_in;
 	ds = crypto_ahash_digestsize(
 			__crypto_ahash_cast(req->base.tfm));
 	sa->sa_command_0.bf.digest_len = ds >> 2;
 	sa->sa_command_0.bf.load_hash_state = SA_LOAD_HASH_FROM_SA;
-	ctx->is_hash = 1;
-	ctx->direction = DIR_INBOUND;
 
 	return 0;
 }
 
 int crypto4xx_hash_update(struct ahash_request *req)
 {
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct scatterlist dst;
+	unsigned int ds = crypto_ahash_digestsize(ahash);
 
-	ctx->is_hash = 1;
-	ctx->hash_final = 0;
-	ctx->pd_ctl = 0x11;
-	ctx->direction = DIR_INBOUND;
+	sg_init_one(&dst, req->result, ds);
 
-	return crypto4xx_build_pd(&req->base, ctx, req->src,
-				  (struct scatterlist *) req->result,
-				  req->nbytes, NULL, 0);
+	return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
+				  req->nbytes, NULL, 0, ctx->sa_in,
+				  ctx->sa_len, 0);
 }
 
 int crypto4xx_hash_final(struct ahash_request *req)
@@ -272,15 +637,16 @@ int crypto4xx_hash_final(struct ahash_request *req)
 
 int crypto4xx_hash_digest(struct ahash_request *req)
 {
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct scatterlist dst;
+	unsigned int ds = crypto_ahash_digestsize(ahash);
 
-	ctx->hash_final = 1;
-	ctx->pd_ctl = 0x11;
-	ctx->direction = DIR_INBOUND;
+	sg_init_one(&dst, req->result, ds);
 
-	return crypto4xx_build_pd(&req->base, ctx, req->src,
-				  (struct scatterlist *) req->result,
-				  req->nbytes, NULL, 0);
+	return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
+				  req->nbytes, NULL, 0, ctx->sa_in,
+				  ctx->sa_len, 0);
 }
 
 /**
@@ -291,5 +657,3 @@ int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm)
 	return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1,
 				       SA_HASH_MODE_HASH);
 }
-
-

+ 476 - 355
drivers/crypto/amcc/crypto4xx_core.c

@@ -35,8 +35,14 @@
 #include <asm/dcr.h>
 #include <asm/dcr-regs.h>
 #include <asm/cacheflush.h>
+#include <crypto/aead.h>
 #include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/gcm.h>
 #include <crypto/sha.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
 #include "crypto4xx_reg_def.h"
 #include "crypto4xx_core.h"
 #include "crypto4xx_sa.h"
@@ -127,21 +133,17 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev)
 
 int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
 {
-	ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
-					&ctx->sa_in_dma_addr, GFP_ATOMIC);
+	ctx->sa_in = kzalloc(size * 4, GFP_ATOMIC);
 	if (ctx->sa_in == NULL)
 		return -ENOMEM;
 
-	ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
-					 &ctx->sa_out_dma_addr, GFP_ATOMIC);
+	ctx->sa_out = kzalloc(size * 4, GFP_ATOMIC);
 	if (ctx->sa_out == NULL) {
-		dma_free_coherent(ctx->dev->core_dev->device, size * 4,
-				  ctx->sa_in, ctx->sa_in_dma_addr);
+		kfree(ctx->sa_in);
+		ctx->sa_in = NULL;
 		return -ENOMEM;
 	}
 
-	memset(ctx->sa_in, 0, size * 4);
-	memset(ctx->sa_out, 0, size * 4);
 	ctx->sa_len = size;
 
 	return 0;
@@ -149,40 +151,13 @@ int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
 
 void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
 {
-	if (ctx->sa_in != NULL)
-		dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
-				  ctx->sa_in, ctx->sa_in_dma_addr);
-	if (ctx->sa_out != NULL)
-		dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
-				  ctx->sa_out, ctx->sa_out_dma_addr);
-
-	ctx->sa_in_dma_addr = 0;
-	ctx->sa_out_dma_addr = 0;
+	kfree(ctx->sa_in);
+	ctx->sa_in = NULL;
+	kfree(ctx->sa_out);
+	ctx->sa_out = NULL;
 	ctx->sa_len = 0;
 }
 
-u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx)
-{
-	ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device,
-				sizeof(struct sa_state_record),
-				&ctx->state_record_dma_addr, GFP_ATOMIC);
-	if (!ctx->state_record_dma_addr)
-		return -ENOMEM;
-	memset(ctx->state_record, 0, sizeof(struct sa_state_record));
-
-	return 0;
-}
-
-void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
-{
-	if (ctx->state_record != NULL)
-		dma_free_coherent(ctx->dev->core_dev->device,
-				  sizeof(struct sa_state_record),
-				  ctx->state_record,
-				  ctx->state_record_dma_addr);
-	ctx->state_record_dma_addr = 0;
-}
-
 /**
  * alloc memory for the gather ring
  * no need to alloc buf for the ring
@@ -191,7 +166,6 @@ void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
 static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
 {
 	int i;
-	struct pd_uinfo *pd_uinfo;
 	dev->pdr = dma_alloc_coherent(dev->core_dev->device,
 				      sizeof(struct ce_pd) * PPC4XX_NUM_PD,
 				      &dev->pdr_pa, GFP_ATOMIC);
@@ -207,9 +181,9 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
 				  dev->pdr_pa);
 		return -ENOMEM;
 	}
-	memset(dev->pdr, 0,  sizeof(struct ce_pd) * PPC4XX_NUM_PD);
+	memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
 	dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
-				   256 * PPC4XX_NUM_PD,
+				   sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
 				   &dev->shadow_sa_pool_pa,
 				   GFP_ATOMIC);
 	if (!dev->shadow_sa_pool)
@@ -221,16 +195,17 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
 	if (!dev->shadow_sr_pool)
 		return -ENOMEM;
 	for (i = 0; i < PPC4XX_NUM_PD; i++) {
-		pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo +
-						sizeof(struct pd_uinfo) * i);
+		struct ce_pd *pd = &dev->pdr[i];
+		struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[i];
+
+		pd->sa = dev->shadow_sa_pool_pa +
+			sizeof(union shadow_sa_buf) * i;
 
 		/* alloc 256 bytes which is enough for any kind of dynamic sa */
-		pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i;
-		pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i;
+		pd_uinfo->sa_va = &dev->shadow_sa_pool[i].sa;
 
 		/* alloc state record */
-		pd_uinfo->sr_va = dev->shadow_sr_pool +
-		    sizeof(struct sa_state_record) * i;
+		pd_uinfo->sr_va = &dev->shadow_sr_pool[i];
 		pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
 		    sizeof(struct sa_state_record) * i;
 	}
@@ -240,13 +215,16 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
 
 static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
 {
-	if (dev->pdr != NULL)
+	if (dev->pdr)
 		dma_free_coherent(dev->core_dev->device,
 				  sizeof(struct ce_pd) * PPC4XX_NUM_PD,
 				  dev->pdr, dev->pdr_pa);
+
 	if (dev->shadow_sa_pool)
-		dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
-				  dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
+		dma_free_coherent(dev->core_dev->device,
+			sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
+			dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
+
 	if (dev->shadow_sr_pool)
 		dma_free_coherent(dev->core_dev->device,
 			sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
@@ -273,28 +251,21 @@ static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
 
 static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
 {
-	struct pd_uinfo *pd_uinfo;
+	struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
+	u32 tail;
 	unsigned long flags;
 
-	pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
-				       sizeof(struct pd_uinfo) * idx);
 	spin_lock_irqsave(&dev->core_dev->lock, flags);
+	pd_uinfo->state = PD_ENTRY_FREE;
+
 	if (dev->pdr_tail != PPC4XX_LAST_PD)
 		dev->pdr_tail++;
 	else
 		dev->pdr_tail = 0;
-	pd_uinfo->state = PD_ENTRY_FREE;
+	tail = dev->pdr_tail;
 	spin_unlock_irqrestore(&dev->core_dev->lock, flags);
 
-	return 0;
-}
-
-static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev,
-				       dma_addr_t *pd_dma, u32 idx)
-{
-	*pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx;
-
-	return dev->pdr + sizeof(struct ce_pd) * idx;
+	return tail;
 }
 
 /**
@@ -326,10 +297,11 @@ static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
  * when this function is called.
  * preemption or interrupt must be disabled
  */
-u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
+static u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
 {
 	u32 retval;
 	u32 tmp;
+
 	if (n >= PPC4XX_NUM_GD)
 		return ERING_WAS_FULL;
 
@@ -372,7 +344,7 @@ static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
 {
 	*gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
 
-	return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx);
+	return &dev->gdr[idx];
 }
 
 /**
@@ -383,7 +355,6 @@ static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
 static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
 {
 	int i;
-	struct ce_sd *sd_array;
 
 	/* alloc memory for scatter descriptor ring */
 	dev->sdr = dma_alloc_coherent(dev->core_dev->device,
@@ -392,10 +363,9 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
 	if (!dev->sdr)
 		return -ENOMEM;
 
-	dev->scatter_buffer_size = PPC4XX_SD_BUFFER_SIZE;
 	dev->scatter_buffer_va =
 		dma_alloc_coherent(dev->core_dev->device,
-			dev->scatter_buffer_size * PPC4XX_NUM_SD,
+			PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
 			&dev->scatter_buffer_pa, GFP_ATOMIC);
 	if (!dev->scatter_buffer_va) {
 		dma_free_coherent(dev->core_dev->device,
@@ -404,11 +374,9 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
 		return -ENOMEM;
 	}
 
-	sd_array = dev->sdr;
-
 	for (i = 0; i < PPC4XX_NUM_SD; i++) {
-		sd_array[i].ptr = dev->scatter_buffer_pa +
-				  dev->scatter_buffer_size * i;
+		dev->sdr[i].ptr = dev->scatter_buffer_pa +
+				  PPC4XX_SD_BUFFER_SIZE * i;
 	}
 
 	return 0;
@@ -416,14 +384,14 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
 
 static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
 {
-	if (dev->sdr != NULL)
+	if (dev->sdr)
 		dma_free_coherent(dev->core_dev->device,
 				  sizeof(struct ce_sd) * PPC4XX_NUM_SD,
 				  dev->sdr, dev->sdr_pa);
 
-	if (dev->scatter_buffer_va != NULL)
+	if (dev->scatter_buffer_va)
 		dma_free_coherent(dev->core_dev->device,
-				  dev->scatter_buffer_size * PPC4XX_NUM_SD,
+				  PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
 				  dev->scatter_buffer_va,
 				  dev->scatter_buffer_pa);
 }
@@ -477,63 +445,7 @@ static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
 {
 	*sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
 
-	return  (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx);
-}
-
-static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev,
-				   dma_addr_t *addr, u32 *length,
-				   u32 *idx, u32 *offset, u32 *nbytes)
-{
-	u32 len;
-
-	if (*length > dev->scatter_buffer_size) {
-		memcpy(phys_to_virt(*addr),
-			dev->scatter_buffer_va +
-			*idx * dev->scatter_buffer_size + *offset,
-			dev->scatter_buffer_size);
-		*offset = 0;
-		*length -= dev->scatter_buffer_size;
-		*nbytes -= dev->scatter_buffer_size;
-		if (*idx == PPC4XX_LAST_SD)
-			*idx = 0;
-		else
-			(*idx)++;
-		*addr = *addr +  dev->scatter_buffer_size;
-		return 1;
-	} else if (*length < dev->scatter_buffer_size) {
-		memcpy(phys_to_virt(*addr),
-			dev->scatter_buffer_va +
-			*idx * dev->scatter_buffer_size + *offset, *length);
-		if ((*offset + *length) == dev->scatter_buffer_size) {
-			if (*idx == PPC4XX_LAST_SD)
-				*idx = 0;
-			else
-				(*idx)++;
-			*nbytes -= *length;
-			*offset = 0;
-		} else {
-			*nbytes -= *length;
-			*offset += *length;
-		}
-
-		return 0;
-	} else {
-		len = (*nbytes <= dev->scatter_buffer_size) ?
-				(*nbytes) : dev->scatter_buffer_size;
-		memcpy(phys_to_virt(*addr),
-			dev->scatter_buffer_va +
-			*idx * dev->scatter_buffer_size + *offset,
-			len);
-		*offset = 0;
-		*nbytes -= len;
-
-		if (*idx == PPC4XX_LAST_SD)
-			*idx = 0;
-		else
-			(*idx)++;
-
-		return 0;
-    }
+	return &dev->sdr[idx];
 }
 
 static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
@@ -542,66 +454,52 @@ static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
 				      u32 nbytes,
 				      struct scatterlist *dst)
 {
-	dma_addr_t addr;
-	u32 this_sd;
-	u32 offset;
-	u32 len;
-	u32 i;
-	u32 sg_len;
-	struct scatterlist *sg;
+	unsigned int first_sd = pd_uinfo->first_sd;
+	unsigned int last_sd;
+	unsigned int overflow = 0;
+	unsigned int to_copy;
+	unsigned int dst_start = 0;
 
-	this_sd = pd_uinfo->first_sd;
-	offset = 0;
-	i = 0;
+	/*
+	 * Because the scatter buffers are all neatly organized in one
+	 * big continuous ringbuffer; scatterwalk_map_and_copy() can
+	 * be instructed to copy a range of buffers in one go.
+	 */
+
+	last_sd = (first_sd + pd_uinfo->num_sd);
+	if (last_sd > PPC4XX_LAST_SD) {
+		last_sd = PPC4XX_LAST_SD;
+		overflow = last_sd % PPC4XX_NUM_SD;
+	}
 
 	while (nbytes) {
-		sg = &dst[i];
-		sg_len = sg->length;
-		addr = dma_map_page(dev->core_dev->device, sg_page(sg),
-				sg->offset, sg->length, DMA_TO_DEVICE);
-
-		if (offset == 0) {
-			len = (nbytes <= sg->length) ? nbytes : sg->length;
-			while (crypto4xx_fill_one_page(dev, &addr, &len,
-				&this_sd, &offset, &nbytes))
-				;
-			if (!nbytes)
-				return;
-			i++;
-		} else {
-			len = (nbytes <= (dev->scatter_buffer_size - offset)) ?
-				nbytes : (dev->scatter_buffer_size - offset);
-			len = (sg->length < len) ? sg->length : len;
-			while (crypto4xx_fill_one_page(dev, &addr, &len,
-					       &this_sd, &offset, &nbytes))
-				;
-			if (!nbytes)
-				return;
-			sg_len -= len;
-			if (sg_len) {
-				addr += len;
-				while (crypto4xx_fill_one_page(dev, &addr,
-					&sg_len, &this_sd, &offset, &nbytes))
-					;
-			}
-			i++;
+		void *buf = dev->scatter_buffer_va +
+			first_sd * PPC4XX_SD_BUFFER_SIZE;
+
+		to_copy = min(nbytes, PPC4XX_SD_BUFFER_SIZE *
+				      (1 + last_sd - first_sd));
+		scatterwalk_map_and_copy(buf, dst, dst_start, to_copy, 1);
+		nbytes -= to_copy;
+
+		if (overflow) {
+			first_sd = 0;
+			last_sd = overflow;
+			dst_start += to_copy;
+			overflow = 0;
 		}
 	}
 }
 
-static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo,
+static void crypto4xx_copy_digest_to_dst(void *dst,
+					struct pd_uinfo *pd_uinfo,
 					struct crypto4xx_ctx *ctx)
 {
 	struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
-	struct sa_state_record *state_record =
-				(struct sa_state_record *) pd_uinfo->sr_va;
 
 	if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
-		memcpy((void *) pd_uinfo->dest_va, state_record->save_digest,
+		memcpy(dst, pd_uinfo->sr_va->save_digest,
 		       SA_HASH_ALG_SHA1_DIGEST_SIZE);
 	}
-
-	return 0;
 }
 
 static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
@@ -623,7 +521,7 @@ static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
 	}
 }
 
-static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
+static void crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
 				     struct pd_uinfo *pd_uinfo,
 				     struct ce_pd *pd)
 {
@@ -644,13 +542,13 @@ static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
 				    dst->offset, dst->length, DMA_FROM_DEVICE);
 	}
 	crypto4xx_ret_sg_desc(dev, pd_uinfo);
-	if (ablk_req->base.complete != NULL)
-		ablk_req->base.complete(&ablk_req->base, 0);
 
-	return 0;
+	if (pd_uinfo->state & PD_ENTRY_BUSY)
+		ablkcipher_request_complete(ablk_req, -EINPROGRESS);
+	ablkcipher_request_complete(ablk_req, 0);
 }
 
-static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
+static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
 				struct pd_uinfo *pd_uinfo)
 {
 	struct crypto4xx_ctx *ctx;
@@ -659,62 +557,93 @@ static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
 	ahash_req = ahash_request_cast(pd_uinfo->async_req);
 	ctx  = crypto_tfm_ctx(ahash_req->base.tfm);
 
-	crypto4xx_copy_digest_to_dst(pd_uinfo,
+	crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo,
 				     crypto_tfm_ctx(ahash_req->base.tfm));
 	crypto4xx_ret_sg_desc(dev, pd_uinfo);
-	/* call user provided callback function x */
-	if (ahash_req->base.complete != NULL)
-		ahash_req->base.complete(&ahash_req->base, 0);
 
-	return 0;
+	if (pd_uinfo->state & PD_ENTRY_BUSY)
+		ahash_request_complete(ahash_req, -EINPROGRESS);
+	ahash_request_complete(ahash_req, 0);
 }
 
-static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
+static void crypto4xx_aead_done(struct crypto4xx_device *dev,
+				struct pd_uinfo *pd_uinfo,
+				struct ce_pd *pd)
 {
-	struct ce_pd *pd;
-	struct pd_uinfo *pd_uinfo;
+	struct aead_request *aead_req;
+	struct crypto4xx_ctx *ctx;
+	struct scatterlist *dst = pd_uinfo->dest_va;
+	int err = 0;
 
-	pd =  dev->pdr + sizeof(struct ce_pd)*idx;
-	pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx;
-	if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
-			CRYPTO_ALG_TYPE_ABLKCIPHER)
-		return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
-	else
-		return crypto4xx_ahash_done(dev, pd_uinfo);
+	aead_req = container_of(pd_uinfo->async_req, struct aead_request,
+				base);
+	ctx  = crypto_tfm_ctx(aead_req->base.tfm);
+
+	if (pd_uinfo->using_sd) {
+		crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
+					  pd->pd_ctl_len.bf.pkt_len,
+					  dst);
+	} else {
+		__dma_sync_page(sg_page(dst), dst->offset, dst->length,
+				DMA_FROM_DEVICE);
+	}
+
+	if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
+		/* append icv at the end */
+		size_t cp_len = crypto_aead_authsize(
+			crypto_aead_reqtfm(aead_req));
+		u32 icv[cp_len];
+
+		crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
+					   cp_len);
+
+		scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
+					 cp_len, 1);
+	}
+
+	crypto4xx_ret_sg_desc(dev, pd_uinfo);
+
+	if (pd->pd_ctl.bf.status & 0xff) {
+		if (pd->pd_ctl.bf.status & 0x1) {
+			/* authentication error */
+			err = -EBADMSG;
+		} else {
+			if (!__ratelimit(&dev->aead_ratelimit)) {
+				if (pd->pd_ctl.bf.status & 2)
+					pr_err("pad fail error\n");
+				if (pd->pd_ctl.bf.status & 4)
+					pr_err("seqnum fail\n");
+				if (pd->pd_ctl.bf.status & 8)
+					pr_err("error _notify\n");
+				pr_err("aead return err status = 0x%02x\n",
+					pd->pd_ctl.bf.status & 0xff);
+				pr_err("pd pad_ctl = 0x%08x\n",
+					pd->pd_ctl.bf.pd_pad_ctl);
+			}
+			err = -EINVAL;
+		}
+	}
+
+	if (pd_uinfo->state & PD_ENTRY_BUSY)
+		aead_request_complete(aead_req, -EINPROGRESS);
+
+	aead_request_complete(aead_req, err);
 }
 
-/**
- * Note: Only use this function to copy items that is word aligned.
- */
-void crypto4xx_memcpy_le(unsigned int *dst,
-			 const unsigned char *buf,
-			 int len)
+static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
 {
-	u8 *tmp;
-	for (; len >= 4; buf += 4, len -= 4)
-		*dst++ = cpu_to_le32(*(unsigned int *) buf);
-
-	tmp = (u8 *)dst;
-	switch (len) {
-	case 3:
-		*tmp++ = 0;
-		*tmp++ = *(buf+2);
-		*tmp++ = *(buf+1);
-		*tmp++ = *buf;
-		break;
-	case 2:
-		*tmp++ = 0;
-		*tmp++ = 0;
-		*tmp++ = *(buf+1);
-		*tmp++ = *buf;
+	struct ce_pd *pd = &dev->pdr[idx];
+	struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
+
+	switch (crypto_tfm_alg_type(pd_uinfo->async_req->tfm)) {
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
 		break;
-	case 1:
-		*tmp++ = 0;
-		*tmp++ = 0;
-		*tmp++ = 0;
-		*tmp++ = *buf;
+	case CRYPTO_ALG_TYPE_AEAD:
+		crypto4xx_aead_done(dev, pd_uinfo, pd);
 		break;
-	default:
+	case CRYPTO_ALG_TYPE_AHASH:
+		crypto4xx_ahash_done(dev, pd_uinfo);
 		break;
 	}
 }
@@ -729,17 +658,6 @@ static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
 	kfree(core_dev);
 }
 
-void crypto4xx_return_pd(struct crypto4xx_device *dev,
-			 u32 pd_entry, struct ce_pd *pd,
-			 struct pd_uinfo *pd_uinfo)
-{
-	/* irq should be already disabled */
-	dev->pdr_head = pd_entry;
-	pd->pd_ctl.w = 0;
-	pd->pd_ctl_len.w = 0;
-	pd_uinfo->state = PD_ENTRY_FREE;
-}
-
 static u32 get_next_gd(u32 current)
 {
 	if (current != PPC4XX_LAST_GD)
@@ -756,17 +674,19 @@ static u32 get_next_sd(u32 current)
 		return 0;
 }
 
-u32 crypto4xx_build_pd(struct crypto_async_request *req,
+int crypto4xx_build_pd(struct crypto_async_request *req,
 		       struct crypto4xx_ctx *ctx,
 		       struct scatterlist *src,
 		       struct scatterlist *dst,
-		       unsigned int datalen,
-		       void *iv, u32 iv_len)
+		       const unsigned int datalen,
+		       const __le32 *iv, const u32 iv_len,
+		       const struct dynamic_sa_ctl *req_sa,
+		       const unsigned int sa_len,
+		       const unsigned int assoclen)
 {
+	struct scatterlist _dst[2];
 	struct crypto4xx_device *dev = ctx->dev;
-	dma_addr_t addr, pd_dma, sd_dma, gd_dma;
 	struct dynamic_sa_ctl *sa;
-	struct scatterlist *sg;
 	struct ce_gd *gd;
 	struct ce_pd *pd;
 	u32 num_gd, num_sd;
@@ -774,22 +694,30 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
 	u32 fst_sd = 0xffffffff;
 	u32 pd_entry;
 	unsigned long flags;
-	struct pd_uinfo *pd_uinfo = NULL;
-	unsigned int nbytes = datalen, idx;
-	unsigned int ivlen = 0;
+	struct pd_uinfo *pd_uinfo;
+	unsigned int nbytes = datalen;
+	size_t offset_to_sr_ptr;
 	u32 gd_idx = 0;
+	int tmp;
+	bool is_busy;
 
-	/* figure how many gd is needed */
-	num_gd = sg_nents_for_len(src, datalen);
-	if ((int)num_gd < 0) {
+	/* figure how many gd are needed */
+	tmp = sg_nents_for_len(src, assoclen + datalen);
+	if (tmp < 0) {
 		dev_err(dev->core_dev->device, "Invalid number of src SG.\n");
-		return -EINVAL;
+		return tmp;
 	}
-	if (num_gd == 1)
-		num_gd = 0;
+	if (tmp == 1)
+		tmp = 0;
+	num_gd = tmp;
 
-	/* figure how many sd is needed */
-	if (sg_is_last(dst) || ctx->is_hash) {
+	if (assoclen) {
+		nbytes += assoclen;
+		dst = scatterwalk_ffwd(_dst, dst, assoclen);
+	}
+
+	/* figure how many sd are needed */
+	if (sg_is_last(dst)) {
 		num_sd = 0;
 	} else {
 		if (datalen > PPC4XX_SD_BUFFER_SIZE) {
@@ -808,6 +736,31 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
 	 * already got must be return the original place.
 	 */
 	spin_lock_irqsave(&dev->core_dev->lock, flags);
+	/*
+	 * Let the caller know to slow down, once more than 13/16ths = 81%
+	 * of the available data contexts are being used simultaneously.
+	 *
+	 * With PPC4XX_NUM_PD = 256, this will leave a "backlog queue" for
+	 * 31 more contexts. Before new requests have to be rejected.
+	 */
+	if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
+		is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
+			((PPC4XX_NUM_PD * 13) / 16);
+	} else {
+		/*
+		 * To fix contention issues between ipsec (no blacklog) and
+		 * dm-crypto (backlog) reserve 32 entries for "no backlog"
+		 * data contexts.
+		 */
+		is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
+			((PPC4XX_NUM_PD * 15) / 16);
+
+		if (is_busy) {
+			spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+			return -EBUSY;
+		}
+	}
+
 	if (num_gd) {
 		fst_gd = crypto4xx_get_n_gd(dev, num_gd);
 		if (fst_gd == ERING_WAS_FULL) {
@@ -835,38 +788,28 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
 	}
 	spin_unlock_irqrestore(&dev->core_dev->lock, flags);
 
-	pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
-				       sizeof(struct pd_uinfo) * pd_entry);
-	pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry);
+	pd = &dev->pdr[pd_entry];
+	pd->sa_len = sa_len;
+
+	pd_uinfo = &dev->pdr_uinfo[pd_entry];
 	pd_uinfo->async_req = req;
 	pd_uinfo->num_gd = num_gd;
 	pd_uinfo->num_sd = num_sd;
 
-	if (iv_len || ctx->is_hash) {
-		ivlen = iv_len;
-		pd->sa = pd_uinfo->sa_pa;
-		sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va;
-		if (ctx->direction == DIR_INBOUND)
-			memcpy(sa, ctx->sa_in, ctx->sa_len * 4);
-		else
-			memcpy(sa, ctx->sa_out, ctx->sa_len * 4);
+	if (iv_len)
+		memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
 
-		memcpy((void *) sa + ctx->offset_to_sr_ptr,
-			&pd_uinfo->sr_pa, 4);
+	sa = pd_uinfo->sa_va;
+	memcpy(sa, req_sa, sa_len * 4);
+
+	sa->sa_command_1.bf.hash_crypto_offset = (assoclen >> 2);
+	offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
+	*(u32 *)((unsigned long)sa + offset_to_sr_ptr) = pd_uinfo->sr_pa;
 
-		if (iv_len)
-			crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len);
-	} else {
-		if (ctx->direction == DIR_INBOUND) {
-			pd->sa = ctx->sa_in_dma_addr;
-			sa = (struct dynamic_sa_ctl *) ctx->sa_in;
-		} else {
-			pd->sa = ctx->sa_out_dma_addr;
-			sa = (struct dynamic_sa_ctl *) ctx->sa_out;
-		}
-	}
-	pd->sa_len = ctx->sa_len;
 	if (num_gd) {
+		dma_addr_t gd_dma;
+		struct scatterlist *sg;
+
 		/* get first gd we are going to use */
 		gd_idx = fst_gd;
 		pd_uinfo->first_gd = fst_gd;
@@ -875,27 +818,30 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
 		pd->src = gd_dma;
 		/* enable gather */
 		sa->sa_command_0.bf.gather = 1;
-		idx = 0;
-		src = &src[0];
 		/* walk the sg, and setup gather array */
+
+		sg = src;
 		while (nbytes) {
-			sg = &src[idx];
-			addr = dma_map_page(dev->core_dev->device, sg_page(sg),
-				    sg->offset, sg->length, DMA_TO_DEVICE);
-			gd->ptr = addr;
-			gd->ctl_len.len = sg->length;
+			size_t len;
+
+			len = min(sg->length, nbytes);
+			gd->ptr = dma_map_page(dev->core_dev->device,
+				sg_page(sg), sg->offset, len, DMA_TO_DEVICE);
+			gd->ctl_len.len = len;
 			gd->ctl_len.done = 0;
 			gd->ctl_len.ready = 1;
-			if (sg->length >= nbytes)
+			if (len >= nbytes)
 				break;
+
 			nbytes -= sg->length;
 			gd_idx = get_next_gd(gd_idx);
 			gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
-			idx++;
+			sg = sg_next(sg);
 		}
 	} else {
 		pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
-				src->offset, src->length, DMA_TO_DEVICE);
+				src->offset, min(nbytes, src->length),
+				DMA_TO_DEVICE);
 		/*
 		 * Disable gather in sa command
 		 */
@@ -906,25 +852,24 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
 		pd_uinfo->first_gd = 0xffffffff;
 		pd_uinfo->num_gd = 0;
 	}
-	if (ctx->is_hash || sg_is_last(dst)) {
+	if (sg_is_last(dst)) {
 		/*
 		 * we know application give us dst a whole piece of memory
 		 * no need to use scatter ring.
-		 * In case of is_hash, the icv is always at end of src data.
 		 */
 		pd_uinfo->using_sd = 0;
 		pd_uinfo->first_sd = 0xffffffff;
 		pd_uinfo->num_sd = 0;
 		pd_uinfo->dest_va = dst;
 		sa->sa_command_0.bf.scatter = 0;
-		if (ctx->is_hash)
-			pd->dest = virt_to_phys((void *)dst);
-		else
-			pd->dest = (u32)dma_map_page(dev->core_dev->device,
-					sg_page(dst), dst->offset,
-					dst->length, DMA_TO_DEVICE);
+		pd->dest = (u32)dma_map_page(dev->core_dev->device,
+					     sg_page(dst), dst->offset,
+					     min(datalen, dst->length),
+					     DMA_TO_DEVICE);
 	} else {
+		dma_addr_t sd_dma;
 		struct ce_sd *sd = NULL;
+
 		u32 sd_idx = fst_sd;
 		nbytes = datalen;
 		sa->sa_command_0.bf.scatter = 1;
@@ -938,7 +883,6 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
 		sd->ctl.done = 0;
 		sd->ctl.rdy = 1;
 		/* sd->ptr should be setup by sd_init routine*/
-		idx = 0;
 		if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
 			nbytes -= PPC4XX_SD_BUFFER_SIZE;
 		else
@@ -949,67 +893,97 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
 			/* setup scatter descriptor */
 			sd->ctl.done = 0;
 			sd->ctl.rdy = 1;
-			if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
+			if (nbytes >= PPC4XX_SD_BUFFER_SIZE) {
 				nbytes -= PPC4XX_SD_BUFFER_SIZE;
-			else
+			} else {
 				/*
 				 * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
 				 * which is more than nbytes, so done.
 				 */
 				nbytes = 0;
+			}
 		}
 	}
 
-	sa->sa_command_1.bf.hash_crypto_offset = 0;
-	pd->pd_ctl.w = ctx->pd_ctl;
-	pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen;
-	pd_uinfo->state = PD_ENTRY_INUSE;
+	pd->pd_ctl.w = PD_CTL_HOST_READY |
+		((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) |
+		 (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
+			PD_CTL_HASH_FINAL : 0);
+	pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen);
+	pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
+
 	wmb();
 	/* write any value to push engine to read a pd */
+	writel(0, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
 	writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
-	return -EINPROGRESS;
+	return is_busy ? -EBUSY : -EINPROGRESS;
 }
 
 /**
  * Algorithm Registration Functions
  */
-static int crypto4xx_alg_init(struct crypto_tfm *tfm)
+static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg,
+			       struct crypto4xx_ctx *ctx)
 {
-	struct crypto_alg *alg = tfm->__crt_alg;
-	struct crypto4xx_alg *amcc_alg = crypto_alg_to_crypto4xx_alg(alg);
-	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
-
 	ctx->dev = amcc_alg->dev;
 	ctx->sa_in = NULL;
 	ctx->sa_out = NULL;
-	ctx->sa_in_dma_addr = 0;
-	ctx->sa_out_dma_addr = 0;
 	ctx->sa_len = 0;
+}
 
-	switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
-	default:
-		tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
-		break;
-	case CRYPTO_ALG_TYPE_AHASH:
-		crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
-					 sizeof(struct crypto4xx_ctx));
-		break;
-	}
+static int crypto4xx_ablk_init(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct crypto4xx_alg *amcc_alg;
+	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
 
+	amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
+	crypto4xx_ctx_init(amcc_alg, ctx);
+	tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
 	return 0;
 }
 
-static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
+static void crypto4xx_common_exit(struct crypto4xx_ctx *ctx)
 {
-	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
-
 	crypto4xx_free_sa(ctx);
-	crypto4xx_free_state_record(ctx);
 }
 
-int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
-			   struct crypto4xx_alg_common *crypto_alg,
-			   int array_size)
+static void crypto4xx_ablk_exit(struct crypto_tfm *tfm)
+{
+	crypto4xx_common_exit(crypto_tfm_ctx(tfm));
+}
+
+static int crypto4xx_aead_init(struct crypto_aead *tfm)
+{
+	struct aead_alg *alg = crypto_aead_alg(tfm);
+	struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
+	struct crypto4xx_alg *amcc_alg;
+
+	ctx->sw_cipher.aead = crypto_alloc_aead(alg->base.cra_name, 0,
+						CRYPTO_ALG_NEED_FALLBACK |
+						CRYPTO_ALG_ASYNC);
+	if (IS_ERR(ctx->sw_cipher.aead))
+		return PTR_ERR(ctx->sw_cipher.aead);
+
+	amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.aead);
+	crypto4xx_ctx_init(amcc_alg, ctx);
+	crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
+				max(sizeof(struct crypto4xx_ctx), 32 +
+				crypto_aead_reqsize(ctx->sw_cipher.aead)));
+	return 0;
+}
+
+static void crypto4xx_aead_exit(struct crypto_aead *tfm)
+{
+	struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
+
+	crypto4xx_common_exit(ctx);
+	crypto_free_aead(ctx->sw_cipher.aead);
+}
+
+static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
+				  struct crypto4xx_alg_common *crypto_alg,
+				  int array_size)
 {
 	struct crypto4xx_alg *alg;
 	int i;
@@ -1024,6 +998,10 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
 		alg->dev = sec_dev;
 
 		switch (alg->alg.type) {
+		case CRYPTO_ALG_TYPE_AEAD:
+			rc = crypto_register_aead(&alg->alg.u.aead);
+			break;
+
 		case CRYPTO_ALG_TYPE_AHASH:
 			rc = crypto_register_ahash(&alg->alg.u.hash);
 			break;
@@ -1033,12 +1011,10 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
 			break;
 		}
 
-		if (rc) {
-			list_del(&alg->entry);
+		if (rc)
 			kfree(alg);
-		} else {
+		else
 			list_add_tail(&alg->entry, &sec_dev->alg_list);
-		}
 	}
 
 	return 0;
@@ -1055,6 +1031,10 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
 			crypto_unregister_ahash(&alg->alg.u.hash);
 			break;
 
+		case CRYPTO_ALG_TYPE_AEAD:
+			crypto_unregister_aead(&alg->alg.u.aead);
+			break;
+
 		default:
 			crypto_unregister_alg(&alg->alg.u.cipher);
 		}
@@ -1068,25 +1048,23 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data)
 	struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
 	struct pd_uinfo *pd_uinfo;
 	struct ce_pd *pd;
-	u32 tail;
-
-	while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) {
-		tail = core_dev->dev->pdr_tail;
-		pd_uinfo = core_dev->dev->pdr_uinfo +
-			sizeof(struct pd_uinfo)*tail;
-		pd =  core_dev->dev->pdr + sizeof(struct ce_pd) * tail;
-		if ((pd_uinfo->state == PD_ENTRY_INUSE) &&
-				   pd->pd_ctl.bf.pe_done &&
-				   !pd->pd_ctl.bf.host_ready) {
-			pd->pd_ctl.bf.pe_done = 0;
+	u32 tail = core_dev->dev->pdr_tail;
+	u32 head = core_dev->dev->pdr_head;
+
+	do {
+		pd_uinfo = &core_dev->dev->pdr_uinfo[tail];
+		pd = &core_dev->dev->pdr[tail];
+		if ((pd_uinfo->state & PD_ENTRY_INUSE) &&
+		     ((READ_ONCE(pd->pd_ctl.w) &
+		       (PD_CTL_PE_DONE | PD_CTL_HOST_READY)) ==
+		       PD_CTL_PE_DONE)) {
 			crypto4xx_pd_done(core_dev->dev, tail);
-			crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
-			pd_uinfo->state = PD_ENTRY_FREE;
+			tail = crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
 		} else {
 			/* if tail not done, break */
 			break;
 		}
-	}
+	} while (head != tail);
 }
 
 /**
@@ -1110,18 +1088,20 @@ static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
 /**
  * Supported Crypto Algorithms
  */
-struct crypto4xx_alg_common crypto4xx_alg[] = {
+static struct crypto4xx_alg_common crypto4xx_alg[] = {
 	/* Crypto AES modes */
 	{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
 		.cra_name 	= "cbc(aes)",
 		.cra_driver_name = "cbc-aes-ppc4xx",
 		.cra_priority 	= CRYPTO4XX_CRYPTO_PRIORITY,
-		.cra_flags 	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
+				  CRYPTO_ALG_ASYNC |
+				  CRYPTO_ALG_KERN_DRIVER_ONLY,
 		.cra_blocksize 	= AES_BLOCK_SIZE,
 		.cra_ctxsize 	= sizeof(struct crypto4xx_ctx),
 		.cra_type 	= &crypto_ablkcipher_type,
-		.cra_init	= crypto4xx_alg_init,
-		.cra_exit	= crypto4xx_alg_exit,
+		.cra_init	= crypto4xx_ablk_init,
+		.cra_exit	= crypto4xx_ablk_exit,
 		.cra_module 	= THIS_MODULE,
 		.cra_u 		= {
 			.ablkcipher = {
@@ -1134,6 +1114,147 @@ struct crypto4xx_alg_common crypto4xx_alg[] = {
 			}
 		}
 	}},
+	{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+		.cra_name	= "cfb(aes)",
+		.cra_driver_name = "cfb-aes-ppc4xx",
+		.cra_priority	= CRYPTO4XX_CRYPTO_PRIORITY,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
+				  CRYPTO_ALG_ASYNC |
+				  CRYPTO_ALG_KERN_DRIVER_ONLY,
+		.cra_blocksize	= AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct crypto4xx_ctx),
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_init	= crypto4xx_ablk_init,
+		.cra_exit	= crypto4xx_ablk_exit,
+		.cra_module	= THIS_MODULE,
+		.cra_u		= {
+			.ablkcipher = {
+				.min_keysize	= AES_MIN_KEY_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE,
+				.ivsize		= AES_IV_SIZE,
+				.setkey		= crypto4xx_setkey_aes_cfb,
+				.encrypt	= crypto4xx_encrypt,
+				.decrypt	= crypto4xx_decrypt,
+			}
+		}
+	} },
+	{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+		.cra_name	= "rfc3686(ctr(aes))",
+		.cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
+		.cra_priority	= CRYPTO4XX_CRYPTO_PRIORITY,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
+				  CRYPTO_ALG_ASYNC |
+				  CRYPTO_ALG_KERN_DRIVER_ONLY,
+		.cra_blocksize	= AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct crypto4xx_ctx),
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_init	= crypto4xx_ablk_init,
+		.cra_exit	= crypto4xx_ablk_exit,
+		.cra_module	= THIS_MODULE,
+		.cra_u		= {
+			.ablkcipher = {
+				.min_keysize	= AES_MIN_KEY_SIZE +
+						  CTR_RFC3686_NONCE_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE +
+						  CTR_RFC3686_NONCE_SIZE,
+				.ivsize		= CTR_RFC3686_IV_SIZE,
+				.setkey		= crypto4xx_setkey_rfc3686,
+				.encrypt	= crypto4xx_rfc3686_encrypt,
+				.decrypt	= crypto4xx_rfc3686_decrypt,
+			}
+		}
+	} },
+	{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+		.cra_name	= "ecb(aes)",
+		.cra_driver_name = "ecb-aes-ppc4xx",
+		.cra_priority	= CRYPTO4XX_CRYPTO_PRIORITY,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
+				  CRYPTO_ALG_ASYNC |
+				  CRYPTO_ALG_KERN_DRIVER_ONLY,
+		.cra_blocksize	= AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct crypto4xx_ctx),
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_init	= crypto4xx_ablk_init,
+		.cra_exit	= crypto4xx_ablk_exit,
+		.cra_module	= THIS_MODULE,
+		.cra_u		= {
+			.ablkcipher = {
+				.min_keysize	= AES_MIN_KEY_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE,
+				.setkey		= crypto4xx_setkey_aes_ecb,
+				.encrypt	= crypto4xx_encrypt,
+				.decrypt	= crypto4xx_decrypt,
+			}
+		}
+	} },
+	{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+		.cra_name	= "ofb(aes)",
+		.cra_driver_name = "ofb-aes-ppc4xx",
+		.cra_priority	= CRYPTO4XX_CRYPTO_PRIORITY,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
+				  CRYPTO_ALG_ASYNC |
+				  CRYPTO_ALG_KERN_DRIVER_ONLY,
+		.cra_blocksize	= AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct crypto4xx_ctx),
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_init	= crypto4xx_ablk_init,
+		.cra_exit	= crypto4xx_ablk_exit,
+		.cra_module	= THIS_MODULE,
+		.cra_u		= {
+			.ablkcipher = {
+				.min_keysize	= AES_MIN_KEY_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE,
+				.ivsize		= AES_IV_SIZE,
+				.setkey		= crypto4xx_setkey_aes_ofb,
+				.encrypt	= crypto4xx_encrypt,
+				.decrypt	= crypto4xx_decrypt,
+			}
+		}
+	} },
+
+	/* AEAD */
+	{ .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
+		.setkey		= crypto4xx_setkey_aes_ccm,
+		.setauthsize	= crypto4xx_setauthsize_aead,
+		.encrypt	= crypto4xx_encrypt_aes_ccm,
+		.decrypt	= crypto4xx_decrypt_aes_ccm,
+		.init		= crypto4xx_aead_init,
+		.exit		= crypto4xx_aead_exit,
+		.ivsize		= AES_BLOCK_SIZE,
+		.maxauthsize    = 16,
+		.base = {
+			.cra_name	= "ccm(aes)",
+			.cra_driver_name = "ccm-aes-ppc4xx",
+			.cra_priority	= CRYPTO4XX_CRYPTO_PRIORITY,
+			.cra_flags	= CRYPTO_ALG_ASYNC |
+					  CRYPTO_ALG_NEED_FALLBACK |
+					  CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize	= 1,
+			.cra_ctxsize	= sizeof(struct crypto4xx_ctx),
+			.cra_module	= THIS_MODULE,
+		},
+	} },
+	{ .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
+		.setkey		= crypto4xx_setkey_aes_gcm,
+		.setauthsize	= crypto4xx_setauthsize_aead,
+		.encrypt	= crypto4xx_encrypt_aes_gcm,
+		.decrypt	= crypto4xx_decrypt_aes_gcm,
+		.init		= crypto4xx_aead_init,
+		.exit		= crypto4xx_aead_exit,
+		.ivsize		= GCM_AES_IV_SIZE,
+		.maxauthsize	= 16,
+		.base = {
+			.cra_name	= "gcm(aes)",
+			.cra_driver_name = "gcm-aes-ppc4xx",
+			.cra_priority	= CRYPTO4XX_CRYPTO_PRIORITY,
+			.cra_flags	= CRYPTO_ALG_ASYNC |
+					  CRYPTO_ALG_NEED_FALLBACK |
+					  CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize	= 1,
+			.cra_ctxsize	= sizeof(struct crypto4xx_ctx),
+			.cra_module	= THIS_MODULE,
+		},
+	} },
 };
 
 /**
@@ -1187,13 +1308,14 @@ static int crypto4xx_probe(struct platform_device *ofdev)
 	core_dev->device = dev;
 	spin_lock_init(&core_dev->lock);
 	INIT_LIST_HEAD(&core_dev->dev->alg_list);
+	ratelimit_default_init(&core_dev->dev->aead_ratelimit);
 	rc = crypto4xx_build_pdr(core_dev->dev);
 	if (rc)
 		goto err_build_pdr;
 
 	rc = crypto4xx_build_gdr(core_dev->dev);
 	if (rc)
-		goto err_build_gdr;
+		goto err_build_pdr;
 
 	rc = crypto4xx_build_sdr(core_dev->dev);
 	if (rc)
@@ -1236,12 +1358,11 @@ err_iomap:
 err_request_irq:
 	irq_dispose_mapping(core_dev->irq);
 	tasklet_kill(&core_dev->tasklet);
-	crypto4xx_destroy_sdr(core_dev->dev);
 err_build_sdr:
+	crypto4xx_destroy_sdr(core_dev->dev);
 	crypto4xx_destroy_gdr(core_dev->dev);
-err_build_gdr:
-	crypto4xx_destroy_pdr(core_dev->dev);
 err_build_pdr:
+	crypto4xx_destroy_pdr(core_dev->dev);
 	kfree(core_dev->dev);
 err_alloc_dev:
 	kfree(core_dev);

+ 118 - 81
drivers/crypto/amcc/crypto4xx_core.h

@@ -22,7 +22,11 @@
 #ifndef __CRYPTO4XX_CORE_H__
 #define __CRYPTO4XX_CORE_H__
 
+#include <linux/ratelimit.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/aead.h>
+#include "crypto4xx_reg_def.h"
+#include "crypto4xx_sa.h"
 
 #define MODULE_NAME "crypto4xx"
 
@@ -34,20 +38,28 @@
 #define PPC405EX_CE_RESET                       0x00000008
 
 #define CRYPTO4XX_CRYPTO_PRIORITY		300
-#define PPC4XX_LAST_PD				63
-#define PPC4XX_NUM_PD				64
-#define PPC4XX_LAST_GD				1023
+#define PPC4XX_NUM_PD				256
+#define PPC4XX_LAST_PD				(PPC4XX_NUM_PD - 1)
 #define PPC4XX_NUM_GD				1024
-#define PPC4XX_LAST_SD				63
-#define PPC4XX_NUM_SD				64
+#define PPC4XX_LAST_GD				(PPC4XX_NUM_GD - 1)
+#define PPC4XX_NUM_SD				256
+#define PPC4XX_LAST_SD				(PPC4XX_NUM_SD - 1)
 #define PPC4XX_SD_BUFFER_SIZE			2048
 
-#define PD_ENTRY_INUSE				1
+#define PD_ENTRY_BUSY				BIT(1)
+#define PD_ENTRY_INUSE				BIT(0)
 #define PD_ENTRY_FREE				0
 #define ERING_WAS_FULL				0xffffffff
 
 struct crypto4xx_device;
 
+union shadow_sa_buf {
+	struct dynamic_sa_ctl sa;
+
+	/* alloc 256 bytes which is enough for any kind of dynamic sa */
+	u8 buf[256];
+} __packed;
+
 struct pd_uinfo {
 	struct crypto4xx_device *dev;
 	u32   state;
@@ -60,9 +72,8 @@ struct pd_uinfo {
 				used by this packet */
 	u32 num_sd;		/* number of scatter discriptors
 				used by this packet */
-	void *sa_va;		/* shadow sa, when using cp from ctx->sa */
-	u32 sa_pa;
-	void *sr_va;		/* state record for shadow sa */
+	struct dynamic_sa_ctl *sa_va;	/* shadow sa */
+	struct sa_state_record *sr_va;	/* state record for shadow sa */
 	u32 sr_pa;
 	struct scatterlist *dest_va;
 	struct crypto_async_request *async_req; 	/* base crypto request
@@ -72,27 +83,21 @@ struct pd_uinfo {
 struct crypto4xx_device {
 	struct crypto4xx_core_device *core_dev;
 	char *name;
-	u64  ce_phy_address;
 	void __iomem *ce_base;
 	void __iomem *trng_base;
 
-	void *pdr;			/* base address of packet
-					descriptor ring */
-	dma_addr_t pdr_pa;		/* physical address used to
-					program ce pdr_base_register */
-	void *gdr;                      /* gather descriptor ring */
-	dma_addr_t gdr_pa;		/* physical address used to
-					program ce gdr_base_register */
-	void *sdr;			/* scatter descriptor ring */
-	dma_addr_t sdr_pa;		/* physical address used to
-					program ce sdr_base_register */
+	struct ce_pd *pdr;	/* base address of packet descriptor ring */
+	dma_addr_t pdr_pa;	/* physical address of pdr_base_register */
+	struct ce_gd *gdr;	/* gather descriptor ring */
+	dma_addr_t gdr_pa;	/* physical address of gdr_base_register */
+	struct ce_sd *sdr;	/* scatter descriptor ring */
+	dma_addr_t sdr_pa;	/* physical address of sdr_base_register */
 	void *scatter_buffer_va;
 	dma_addr_t scatter_buffer_pa;
-	u32 scatter_buffer_size;
 
-	void *shadow_sa_pool;		/* pool of memory for sa in pd_uinfo */
+	union shadow_sa_buf *shadow_sa_pool;
 	dma_addr_t shadow_sa_pool_pa;
-	void *shadow_sr_pool;		/* pool of memory for sr in pd_uinfo */
+	struct sa_state_record *shadow_sr_pool;
 	dma_addr_t shadow_sr_pool_pa;
 	u32 pdr_tail;
 	u32 pdr_head;
@@ -100,9 +105,10 @@ struct crypto4xx_device {
 	u32 gdr_head;
 	u32 sdr_tail;
 	u32 sdr_head;
-	void *pdr_uinfo;
+	struct pd_uinfo *pdr_uinfo;
 	struct list_head alg_list;	/* List of algorithm supported
 					by this device */
+	struct ratelimit_state aead_ratelimit;
 };
 
 struct crypto4xx_core_device {
@@ -118,30 +124,13 @@ struct crypto4xx_core_device {
 
 struct crypto4xx_ctx {
 	struct crypto4xx_device *dev;
-	void *sa_in;
-	dma_addr_t sa_in_dma_addr;
-	void *sa_out;
-	dma_addr_t sa_out_dma_addr;
-	void *state_record;
-	dma_addr_t state_record_dma_addr;
+	struct dynamic_sa_ctl *sa_in;
+	struct dynamic_sa_ctl *sa_out;
+	__le32 iv_nonce;
 	u32 sa_len;
-	u32 offset_to_sr_ptr;           /* offset to state ptr, in dynamic sa */
-	u32 direction;
-	u32 next_hdr;
-	u32 save_iv;
-	u32 pd_ctl_len;
-	u32 pd_ctl;
-	u32 bypass;
-	u32 is_hash;
-	u32 hash_final;
-};
-
-struct crypto4xx_req_ctx {
-	struct crypto4xx_device *dev;	/* Device in which
-					operation to send to */
-	void *sa;
-	u32 sa_dma_addr;
-	u16 sa_len;
+	union {
+		struct crypto_aead *aead;
+	} sw_cipher;
 };
 
 struct crypto4xx_alg_common {
@@ -149,6 +138,7 @@ struct crypto4xx_alg_common {
 	union {
 		struct crypto_alg cipher;
 		struct ahash_alg hash;
+		struct aead_alg aead;
 	} u;
 };
 
@@ -158,43 +148,90 @@ struct crypto4xx_alg {
 	struct crypto4xx_device *dev;
 };
 
-static inline struct crypto4xx_alg *crypto_alg_to_crypto4xx_alg(
-	struct crypto_alg *x)
+int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
+void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
+void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
+int crypto4xx_build_pd(struct crypto_async_request *req,
+		       struct crypto4xx_ctx *ctx,
+		       struct scatterlist *src,
+		       struct scatterlist *dst,
+		       const unsigned int datalen,
+		       const __le32 *iv, const u32 iv_len,
+		       const struct dynamic_sa_ctl *sa,
+		       const unsigned int sa_len,
+		       const unsigned int assoclen);
+int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
+			     const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
+			     const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher,
+			     const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher,
+			     const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher,
+			     const u8 *key, unsigned int keylen);
+int crypto4xx_encrypt(struct ablkcipher_request *req);
+int crypto4xx_decrypt(struct ablkcipher_request *req);
+int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req);
+int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req);
+int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
+int crypto4xx_hash_digest(struct ahash_request *req);
+int crypto4xx_hash_final(struct ahash_request *req);
+int crypto4xx_hash_update(struct ahash_request *req);
+int crypto4xx_hash_init(struct ahash_request *req);
+
+/**
+ * Note: Only use this function to copy items that is word aligned.
+ */
+static inline void crypto4xx_memcpy_swab32(u32 *dst, const void *buf,
+					   size_t len)
 {
-	switch (x->cra_flags & CRYPTO_ALG_TYPE_MASK) {
-	case CRYPTO_ALG_TYPE_AHASH:
-		return container_of(__crypto_ahash_alg(x),
-				    struct crypto4xx_alg, alg.u.hash);
+	for (; len >= 4; buf += 4, len -= 4)
+		*dst++ = __swab32p((u32 *) buf);
+
+	if (len) {
+		const u8 *tmp = (u8 *)buf;
+
+		switch (len) {
+		case 3:
+			*dst = (tmp[2] << 16) |
+			       (tmp[1] << 8) |
+			       tmp[0];
+			break;
+		case 2:
+			*dst = (tmp[1] << 8) |
+			       tmp[0];
+			break;
+		case 1:
+			*dst = tmp[0];
+			break;
+		default:
+			break;
+		}
 	}
+}
 
-	return container_of(x, struct crypto4xx_alg, alg.u.cipher);
+static inline void crypto4xx_memcpy_from_le32(u32 *dst, const void *buf,
+					      size_t len)
+{
+	crypto4xx_memcpy_swab32(dst, buf, len);
 }
 
-extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
-extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
-extern u32 crypto4xx_alloc_sa_rctx(struct crypto4xx_ctx *ctx,
-				   struct crypto4xx_ctx *rctx);
-extern void crypto4xx_free_sa_rctx(struct crypto4xx_ctx *rctx);
-extern void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
-extern u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx);
-extern u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx);
-extern u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx);
-extern u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx);
-extern void crypto4xx_memcpy_le(unsigned int *dst,
-				const unsigned char *buf, int len);
-extern u32 crypto4xx_build_pd(struct crypto_async_request *req,
-			      struct crypto4xx_ctx *ctx,
-			      struct scatterlist *src,
-			      struct scatterlist *dst,
-			      unsigned int datalen,
-			      void *iv, u32 iv_len);
-extern int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
-				    const u8 *key, unsigned int keylen);
-extern int crypto4xx_encrypt(struct ablkcipher_request *req);
-extern int crypto4xx_decrypt(struct ablkcipher_request *req);
-extern int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
-extern int crypto4xx_hash_digest(struct ahash_request *req);
-extern int crypto4xx_hash_final(struct ahash_request *req);
-extern int crypto4xx_hash_update(struct ahash_request *req);
-extern int crypto4xx_hash_init(struct ahash_request *req);
+static inline void crypto4xx_memcpy_to_le32(__le32 *dst, const void *buf,
+					    size_t len)
+{
+	crypto4xx_memcpy_swab32((u32 *)dst, buf, len);
+}
+
+int crypto4xx_setauthsize_aead(struct crypto_aead *ciper,
+			       unsigned int authsize);
+int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher,
+			     const u8 *key, unsigned int keylen);
+int crypto4xx_encrypt_aes_ccm(struct aead_request *req);
+int crypto4xx_decrypt_aes_ccm(struct aead_request *req);
+int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
+			     const u8 *key, unsigned int keylen);
+int crypto4xx_encrypt_aes_gcm(struct aead_request *req);
+int crypto4xx_decrypt_aes_gcm(struct aead_request *req);
+
 #endif

+ 3 - 0
drivers/crypto/amcc/crypto4xx_reg_def.h

@@ -261,6 +261,9 @@ union ce_pd_ctl {
 	} bf;
 	u32 w;
 } __attribute__((packed));
+#define PD_CTL_HASH_FINAL	BIT(4)
+#define PD_CTL_PE_DONE		BIT(1)
+#define PD_CTL_HOST_READY	BIT(0)
 
 union ce_pd_ctl_len {
 	struct {

+ 0 - 85
drivers/crypto/amcc/crypto4xx_sa.c

@@ -1,85 +0,0 @@
-/**
- * AMCC SoC PPC4xx Crypto Driver
- *
- * Copyright (c) 2008 Applied Micro Circuits Corporation.
- * All rights reserved. James Hsiao <jhsiao@amcc.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * @file crypto4xx_sa.c
- *
- * This file implements the security context
- * associate format.
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/mod_devicetable.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock_types.h>
-#include <linux/highmem.h>
-#include <linux/scatterlist.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
-#include <crypto/des.h>
-#include "crypto4xx_reg_def.h"
-#include "crypto4xx_sa.h"
-#include "crypto4xx_core.h"
-
-u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx)
-{
-	u32 offset;
-	union dynamic_sa_contents cts;
-
-	if (ctx->direction == DIR_INBOUND)
-		cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
-	else
-		cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
-	offset = cts.bf.key_size
-		+ cts.bf.inner_size
-		+ cts.bf.outer_size
-		+ cts.bf.spi
-		+ cts.bf.seq_num0
-		+ cts.bf.seq_num1
-		+ cts.bf.seq_num_mask0
-		+ cts.bf.seq_num_mask1
-		+ cts.bf.seq_num_mask2
-		+ cts.bf.seq_num_mask3
-		+ cts.bf.iv0
-		+ cts.bf.iv1
-		+ cts.bf.iv2
-		+ cts.bf.iv3;
-
-	return sizeof(struct dynamic_sa_ctl) + offset * 4;
-}
-
-u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx)
-{
-	union dynamic_sa_contents cts;
-
-	if (ctx->direction == DIR_INBOUND)
-		cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
-	else
-		cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
-	return (cts.bf.iv0 + cts.bf.iv1 + cts.bf.iv2 + cts.bf.iv3) * 4;
-}
-
-u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx)
-{
-	union dynamic_sa_contents cts;
-
-	if (ctx->direction == DIR_INBOUND)
-		cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
-	else
-		cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
-
-	return sizeof(struct dynamic_sa_ctl);
-}

+ 87 - 12
drivers/crypto/amcc/crypto4xx_sa.h

@@ -55,6 +55,8 @@ union dynamic_sa_contents {
 #define SA_OP_GROUP_BASIC			0
 #define SA_OPCODE_ENCRYPT			0
 #define SA_OPCODE_DECRYPT			0
+#define SA_OPCODE_ENCRYPT_HASH			1
+#define SA_OPCODE_HASH_DECRYPT			1
 #define SA_OPCODE_HASH				3
 #define SA_CIPHER_ALG_DES			0
 #define SA_CIPHER_ALG_3DES			1
@@ -65,6 +67,8 @@ union dynamic_sa_contents {
 
 #define SA_HASH_ALG_MD5				0
 #define SA_HASH_ALG_SHA1			1
+#define SA_HASH_ALG_GHASH			12
+#define SA_HASH_ALG_CBC_MAC			14
 #define SA_HASH_ALG_NULL			15
 #define SA_HASH_ALG_SHA1_DIGEST_SIZE		20
 
@@ -112,6 +116,9 @@ union sa_command_0 {
 
 #define CRYPTO_MODE_ECB				0
 #define CRYPTO_MODE_CBC				1
+#define CRYPTO_MODE_OFB				2
+#define CRYPTO_MODE_CFB				3
+#define CRYPTO_MODE_CTR				4
 
 #define CRYPTO_FEEDBACK_MODE_NO_FB		0
 #define CRYPTO_FEEDBACK_MODE_64BIT_OFB		0
@@ -169,7 +176,7 @@ union sa_command_1 {
 } __attribute__((packed));
 
 struct dynamic_sa_ctl {
-	u32 sa_contents;
+	union dynamic_sa_contents sa_contents;
 	union sa_command_0 sa_command_0;
 	union sa_command_1 sa_command_1;
 } __attribute__((packed));
@@ -178,9 +185,12 @@ struct dynamic_sa_ctl {
  * State Record for Security Association (SA)
  */
 struct  sa_state_record {
-	u32 save_iv[4];
-	u32 save_hash_byte_cnt[2];
-	u32 save_digest[16];
+	__le32 save_iv[4];
+	__le32 save_hash_byte_cnt[2];
+	union {
+		u32 save_digest[16]; /* for MD5/SHA */
+		__le32 save_digest_le32[16]; /* GHASH / CBC */
+	};
 } __attribute__((packed));
 
 /**
@@ -189,8 +199,8 @@ struct  sa_state_record {
  */
 struct dynamic_sa_aes128 {
 	struct dynamic_sa_ctl	ctrl;
-	u32 key[4];
-	u32 iv[4]; /* for CBC, OFC, and CFB mode */
+	__le32 key[4];
+	__le32 iv[4]; /* for CBC, OFC, and CFB mode */
 	u32 state_ptr;
 	u32 reserved;
 } __attribute__((packed));
@@ -203,8 +213,8 @@ struct dynamic_sa_aes128 {
  */
 struct dynamic_sa_aes192 {
 	struct dynamic_sa_ctl ctrl;
-	u32 key[6];
-	u32 iv[4]; /* for CBC, OFC, and CFB mode */
+	__le32 key[6];
+	__le32 iv[4]; /* for CBC, OFC, and CFB mode */
 	u32 state_ptr;
 	u32 reserved;
 } __attribute__((packed));
@@ -217,8 +227,8 @@ struct dynamic_sa_aes192 {
  */
 struct dynamic_sa_aes256 {
 	struct dynamic_sa_ctl ctrl;
-	u32 key[8];
-	u32 iv[4]; /* for CBC, OFC, and CFB mode */
+	__le32 key[8];
+	__le32 iv[4]; /* for CBC, OFC, and CFB mode */
 	u32 state_ptr;
 	u32 reserved;
 } __attribute__((packed));
@@ -227,17 +237,82 @@ struct dynamic_sa_aes256 {
 #define SA_AES256_CONTENTS	0x3e000082
 #define SA_AES_CONTENTS		0x3e000002
 
+/**
+ * Security Association (SA) for AES128 CCM
+ */
+struct dynamic_sa_aes128_ccm {
+	struct dynamic_sa_ctl ctrl;
+	__le32 key[4];
+	__le32 iv[4];
+	u32 state_ptr;
+	u32 reserved;
+} __packed;
+#define SA_AES128_CCM_LEN	(sizeof(struct dynamic_sa_aes128_ccm)/4)
+#define SA_AES128_CCM_CONTENTS	0x3e000042
+#define SA_AES_CCM_CONTENTS	0x3e000002
+
+/**
+ * Security Association (SA) for AES128_GCM
+ */
+struct dynamic_sa_aes128_gcm {
+	struct dynamic_sa_ctl ctrl;
+	__le32 key[4];
+	__le32 inner_digest[4];
+	__le32 iv[4];
+	u32 state_ptr;
+	u32 reserved;
+} __packed;
+
+#define SA_AES128_GCM_LEN	(sizeof(struct dynamic_sa_aes128_gcm)/4)
+#define SA_AES128_GCM_CONTENTS	0x3e000442
+#define SA_AES_GCM_CONTENTS	0x3e000402
+
 /**
  * Security Association (SA) for HASH160: HMAC-SHA1
  */
 struct dynamic_sa_hash160 {
 	struct dynamic_sa_ctl ctrl;
-	u32 inner_digest[5];
-	u32 outer_digest[5];
+	__le32 inner_digest[5];
+	__le32 outer_digest[5];
 	u32 state_ptr;
 	u32 reserved;
 } __attribute__((packed));
 #define SA_HASH160_LEN		(sizeof(struct dynamic_sa_hash160)/4)
 #define SA_HASH160_CONTENTS     0x2000a502
 
+static inline u32
+get_dynamic_sa_offset_state_ptr_field(struct dynamic_sa_ctl *cts)
+{
+	u32 offset;
+
+	offset = cts->sa_contents.bf.key_size
+		+ cts->sa_contents.bf.inner_size
+		+ cts->sa_contents.bf.outer_size
+		+ cts->sa_contents.bf.spi
+		+ cts->sa_contents.bf.seq_num0
+		+ cts->sa_contents.bf.seq_num1
+		+ cts->sa_contents.bf.seq_num_mask0
+		+ cts->sa_contents.bf.seq_num_mask1
+		+ cts->sa_contents.bf.seq_num_mask2
+		+ cts->sa_contents.bf.seq_num_mask3
+		+ cts->sa_contents.bf.iv0
+		+ cts->sa_contents.bf.iv1
+		+ cts->sa_contents.bf.iv2
+		+ cts->sa_contents.bf.iv3;
+
+	return sizeof(struct dynamic_sa_ctl) + offset * 4;
+}
+
+static inline __le32 *get_dynamic_sa_key_field(struct dynamic_sa_ctl *cts)
+{
+	return (__le32 *) ((unsigned long)cts + sizeof(struct dynamic_sa_ctl));
+}
+
+static inline __le32 *get_dynamic_sa_inner_digest(struct dynamic_sa_ctl *cts)
+{
+	return (__le32 *) ((unsigned long)cts +
+		sizeof(struct dynamic_sa_ctl) +
+		cts->sa_contents.bf.key_size * 4);
+}
+
 #endif

+ 44 - 36
drivers/crypto/atmel-aes.c

@@ -36,6 +36,7 @@
 #include <crypto/scatterwalk.h>
 #include <crypto/algapi.h>
 #include <crypto/aes.h>
+#include <crypto/gcm.h>
 #include <crypto/xts.h>
 #include <crypto/internal/aead.h>
 #include <linux/platform_data/crypto-atmel.h>
@@ -76,12 +77,11 @@
 				 AES_FLAGS_ENCRYPT |		\
 				 AES_FLAGS_GTAGEN)
 
-#define AES_FLAGS_INIT		BIT(2)
 #define AES_FLAGS_BUSY		BIT(3)
 #define AES_FLAGS_DUMP_REG	BIT(4)
 #define AES_FLAGS_OWN_SHA	BIT(5)
 
-#define AES_FLAGS_PERSISTENT	(AES_FLAGS_INIT | AES_FLAGS_BUSY)
+#define AES_FLAGS_PERSISTENT	AES_FLAGS_BUSY
 
 #define ATMEL_AES_QUEUE_LENGTH	50
 
@@ -110,6 +110,7 @@ struct atmel_aes_base_ctx {
 	int			keylen;
 	u32			key[AES_KEYSIZE_256 / sizeof(u32)];
 	u16			block_size;
+	bool			is_aead;
 };
 
 struct atmel_aes_ctx {
@@ -156,6 +157,7 @@ struct atmel_aes_authenc_ctx {
 
 struct atmel_aes_reqctx {
 	unsigned long		mode;
+	u32			lastc[AES_BLOCK_SIZE / sizeof(u32)];
 };
 
 #ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
@@ -448,11 +450,8 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
 	if (err)
 		return err;
 
-	if (!(dd->flags & AES_FLAGS_INIT)) {
-		atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
-		atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
-		dd->flags |= AES_FLAGS_INIT;
-	}
+	atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
+	atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
 
 	return 0;
 }
@@ -497,12 +496,34 @@ static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err);
 static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
 {
 #ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
-	atmel_aes_authenc_complete(dd, err);
+	if (dd->ctx->is_aead)
+		atmel_aes_authenc_complete(dd, err);
 #endif
 
 	clk_disable(dd->iclk);
 	dd->flags &= ~AES_FLAGS_BUSY;
 
+	if (!dd->ctx->is_aead) {
+		struct ablkcipher_request *req =
+			ablkcipher_request_cast(dd->areq);
+		struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+		struct crypto_ablkcipher *ablkcipher =
+			crypto_ablkcipher_reqtfm(req);
+		int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+		if (rctx->mode & AES_FLAGS_ENCRYPT) {
+			scatterwalk_map_and_copy(req->info, req->dst,
+				req->nbytes - ivsize, ivsize, 0);
+		} else {
+			if (req->src == req->dst) {
+				memcpy(req->info, rctx->lastc, ivsize);
+			} else {
+				scatterwalk_map_and_copy(req->info, req->src,
+					req->nbytes - ivsize, ivsize, 0);
+			}
+		}
+	}
+
 	if (dd->is_async)
 		dd->areq->complete(dd->areq, err);
 
@@ -1071,11 +1092,11 @@ static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
 
 static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 {
-	struct atmel_aes_base_ctx *ctx;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
 	struct atmel_aes_reqctx *rctx;
 	struct atmel_aes_dev *dd;
 
-	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
 	switch (mode & AES_FLAGS_OPMODE_MASK) {
 	case AES_FLAGS_CFB8:
 		ctx->block_size = CFB8_BLOCK_SIZE;
@@ -1097,6 +1118,7 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 		ctx->block_size = AES_BLOCK_SIZE;
 		break;
 	}
+	ctx->is_aead = false;
 
 	dd = atmel_aes_find_dev(ctx);
 	if (!dd)
@@ -1105,6 +1127,13 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 	rctx = ablkcipher_request_ctx(req);
 	rctx->mode = mode;
 
+	if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) {
+		int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+		scatterwalk_map_and_copy(rctx->lastc, req->src,
+			(req->nbytes - ivsize), ivsize, 0);
+	}
+
 	return atmel_aes_handle_queue(dd, &req->base);
 }
 
@@ -1236,10 +1265,6 @@ static int atmel_aes_ctr_cra_init(struct crypto_tfm *tfm)
 	return 0;
 }
 
-static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
-{
-}
-
 static struct crypto_alg aes_algs[] = {
 {
 	.cra_name		= "ecb(aes)",
@@ -1252,7 +1277,6 @@ static struct crypto_alg aes_algs[] = {
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_aes_cra_init,
-	.cra_exit		= atmel_aes_cra_exit,
 	.cra_u.ablkcipher = {
 		.min_keysize	= AES_MIN_KEY_SIZE,
 		.max_keysize	= AES_MAX_KEY_SIZE,
@@ -1272,7 +1296,6 @@ static struct crypto_alg aes_algs[] = {
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_aes_cra_init,
-	.cra_exit		= atmel_aes_cra_exit,
 	.cra_u.ablkcipher = {
 		.min_keysize	= AES_MIN_KEY_SIZE,
 		.max_keysize	= AES_MAX_KEY_SIZE,
@@ -1293,7 +1316,6 @@ static struct crypto_alg aes_algs[] = {
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_aes_cra_init,
-	.cra_exit		= atmel_aes_cra_exit,
 	.cra_u.ablkcipher = {
 		.min_keysize	= AES_MIN_KEY_SIZE,
 		.max_keysize	= AES_MAX_KEY_SIZE,
@@ -1314,7 +1336,6 @@ static struct crypto_alg aes_algs[] = {
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_aes_cra_init,
-	.cra_exit		= atmel_aes_cra_exit,
 	.cra_u.ablkcipher = {
 		.min_keysize	= AES_MIN_KEY_SIZE,
 		.max_keysize	= AES_MAX_KEY_SIZE,
@@ -1335,7 +1356,6 @@ static struct crypto_alg aes_algs[] = {
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_aes_cra_init,
-	.cra_exit		= atmel_aes_cra_exit,
 	.cra_u.ablkcipher = {
 		.min_keysize	= AES_MIN_KEY_SIZE,
 		.max_keysize	= AES_MAX_KEY_SIZE,
@@ -1356,7 +1376,6 @@ static struct crypto_alg aes_algs[] = {
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_aes_cra_init,
-	.cra_exit		= atmel_aes_cra_exit,
 	.cra_u.ablkcipher = {
 		.min_keysize	= AES_MIN_KEY_SIZE,
 		.max_keysize	= AES_MAX_KEY_SIZE,
@@ -1377,7 +1396,6 @@ static struct crypto_alg aes_algs[] = {
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_aes_cra_init,
-	.cra_exit		= atmel_aes_cra_exit,
 	.cra_u.ablkcipher = {
 		.min_keysize	= AES_MIN_KEY_SIZE,
 		.max_keysize	= AES_MAX_KEY_SIZE,
@@ -1398,7 +1416,6 @@ static struct crypto_alg aes_algs[] = {
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_aes_ctr_cra_init,
-	.cra_exit		= atmel_aes_cra_exit,
 	.cra_u.ablkcipher = {
 		.min_keysize	= AES_MIN_KEY_SIZE,
 		.max_keysize	= AES_MAX_KEY_SIZE,
@@ -1421,7 +1438,6 @@ static struct crypto_alg aes_cfb64_alg = {
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_aes_cra_init,
-	.cra_exit		= atmel_aes_cra_exit,
 	.cra_u.ablkcipher = {
 		.min_keysize	= AES_MIN_KEY_SIZE,
 		.max_keysize	= AES_MAX_KEY_SIZE,
@@ -1532,7 +1548,7 @@ static int atmel_aes_gcm_start(struct atmel_aes_dev *dd)
 	if (err)
 		return atmel_aes_complete(dd, err);
 
-	if (likely(ivsize == 12)) {
+	if (likely(ivsize == GCM_AES_IV_SIZE)) {
 		memcpy(ctx->j0, iv, ivsize);
 		ctx->j0[3] = cpu_to_be32(1);
 		return atmel_aes_gcm_process(dd);
@@ -1739,6 +1755,7 @@ static int atmel_aes_gcm_crypt(struct aead_request *req,
 
 	ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
 	ctx->block_size = AES_BLOCK_SIZE;
+	ctx->is_aead = true;
 
 	dd = atmel_aes_find_dev(ctx);
 	if (!dd)
@@ -1808,19 +1825,13 @@ static int atmel_aes_gcm_init(struct crypto_aead *tfm)
 	return 0;
 }
 
-static void atmel_aes_gcm_exit(struct crypto_aead *tfm)
-{
-
-}
-
 static struct aead_alg aes_gcm_alg = {
 	.setkey		= atmel_aes_gcm_setkey,
 	.setauthsize	= atmel_aes_gcm_setauthsize,
 	.encrypt	= atmel_aes_gcm_encrypt,
 	.decrypt	= atmel_aes_gcm_decrypt,
 	.init		= atmel_aes_gcm_init,
-	.exit		= atmel_aes_gcm_exit,
-	.ivsize		= 12,
+	.ivsize		= GCM_AES_IV_SIZE,
 	.maxauthsize	= AES_BLOCK_SIZE,
 
 	.base = {
@@ -1955,7 +1966,6 @@ static struct crypto_alg aes_xts_alg = {
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_aes_xts_cra_init,
-	.cra_exit		= atmel_aes_cra_exit,
 	.cra_u.ablkcipher = {
 		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
 		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
@@ -2223,6 +2233,7 @@ static int atmel_aes_authenc_crypt(struct aead_request *req,
 
 	rctx->base.mode = mode;
 	ctx->block_size = AES_BLOCK_SIZE;
+	ctx->is_aead = true;
 
 	dd = atmel_aes_find_dev(ctx);
 	if (!dd)
@@ -2382,7 +2393,6 @@ static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
 			      struct crypto_platform_data *pdata)
 {
 	struct at_dma_slave *slave;
-	int err = -ENOMEM;
 	dma_cap_mask_t mask;
 
 	dma_cap_zero(mask);
@@ -2407,7 +2417,7 @@ err_dma_out:
 	dma_release_channel(dd->src.chan);
 err_dma_in:
 	dev_warn(dd->dev, "no DMA channel available\n");
-	return err;
+	return -ENODEV;
 }
 
 static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
@@ -2658,8 +2668,6 @@ static int atmel_aes_probe(struct platform_device *pdev)
 
 	crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
 
-	aes_dd->irq = -1;
-
 	/* Get the base address */
 	aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!aes_res) {

+ 1 - 4
drivers/crypto/atmel-sha.c

@@ -2628,7 +2628,6 @@ static bool atmel_sha_filter(struct dma_chan *chan, void *slave)
 static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
 				struct crypto_platform_data *pdata)
 {
-	int err = -ENOMEM;
 	dma_cap_mask_t mask_in;
 
 	/* Try to grab DMA channel */
@@ -2639,7 +2638,7 @@ static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
 			atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
 	if (!dd->dma_lch_in.chan) {
 		dev_warn(dd->dev, "no DMA channel available\n");
-		return err;
+		return -ENODEV;
 	}
 
 	dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
@@ -2778,8 +2777,6 @@ static int atmel_sha_probe(struct platform_device *pdev)
 
 	crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
 
-	sha_dd->irq = -1;
-
 	/* Get the base address */
 	sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!sha_res) {

+ 1 - 22
drivers/crypto/atmel-tdes.c

@@ -720,7 +720,6 @@ static bool atmel_tdes_filter(struct dma_chan *chan, void *slave)
 static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
 			struct crypto_platform_data *pdata)
 {
-	int err = -ENOMEM;
 	dma_cap_mask_t mask;
 
 	dma_cap_zero(mask);
@@ -765,7 +764,7 @@ err_dma_out:
 	dma_release_channel(dd->dma_lch_in.chan);
 err_dma_in:
 	dev_warn(dd->dev, "no DMA channel available\n");
-	return err;
+	return -ENODEV;
 }
 
 static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
@@ -912,10 +911,6 @@ static int atmel_tdes_cra_init(struct crypto_tfm *tfm)
 	return 0;
 }
 
-static void atmel_tdes_cra_exit(struct crypto_tfm *tfm)
-{
-}
-
 static struct crypto_alg tdes_algs[] = {
 {
 	.cra_name		= "ecb(des)",
@@ -928,7 +923,6 @@ static struct crypto_alg tdes_algs[] = {
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_tdes_cra_init,
-	.cra_exit		= atmel_tdes_cra_exit,
 	.cra_u.ablkcipher = {
 		.min_keysize	= DES_KEY_SIZE,
 		.max_keysize	= DES_KEY_SIZE,
@@ -948,7 +942,6 @@ static struct crypto_alg tdes_algs[] = {
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_tdes_cra_init,
-	.cra_exit		= atmel_tdes_cra_exit,
 	.cra_u.ablkcipher = {
 		.min_keysize	= DES_KEY_SIZE,
 		.max_keysize	= DES_KEY_SIZE,
@@ -969,7 +962,6 @@ static struct crypto_alg tdes_algs[] = {
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_tdes_cra_init,
-	.cra_exit		= atmel_tdes_cra_exit,
 	.cra_u.ablkcipher = {
 		.min_keysize	= DES_KEY_SIZE,
 		.max_keysize	= DES_KEY_SIZE,
@@ -990,7 +982,6 @@ static struct crypto_alg tdes_algs[] = {
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_tdes_cra_init,
-	.cra_exit		= atmel_tdes_cra_exit,
 	.cra_u.ablkcipher = {
 		.min_keysize	= DES_KEY_SIZE,
 		.max_keysize	= DES_KEY_SIZE,
@@ -1011,7 +1002,6 @@ static struct crypto_alg tdes_algs[] = {
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_tdes_cra_init,
-	.cra_exit		= atmel_tdes_cra_exit,
 	.cra_u.ablkcipher = {
 		.min_keysize	= DES_KEY_SIZE,
 		.max_keysize	= DES_KEY_SIZE,
@@ -1032,7 +1022,6 @@ static struct crypto_alg tdes_algs[] = {
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_tdes_cra_init,
-	.cra_exit		= atmel_tdes_cra_exit,
 	.cra_u.ablkcipher = {
 		.min_keysize	= DES_KEY_SIZE,
 		.max_keysize	= DES_KEY_SIZE,
@@ -1053,7 +1042,6 @@ static struct crypto_alg tdes_algs[] = {
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_tdes_cra_init,
-	.cra_exit		= atmel_tdes_cra_exit,
 	.cra_u.ablkcipher = {
 		.min_keysize	= DES_KEY_SIZE,
 		.max_keysize	= DES_KEY_SIZE,
@@ -1074,7 +1062,6 @@ static struct crypto_alg tdes_algs[] = {
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_tdes_cra_init,
-	.cra_exit		= atmel_tdes_cra_exit,
 	.cra_u.ablkcipher = {
 		.min_keysize	= 2 * DES_KEY_SIZE,
 		.max_keysize	= 3 * DES_KEY_SIZE,
@@ -1094,7 +1081,6 @@ static struct crypto_alg tdes_algs[] = {
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_tdes_cra_init,
-	.cra_exit		= atmel_tdes_cra_exit,
 	.cra_u.ablkcipher = {
 		.min_keysize	= 2*DES_KEY_SIZE,
 		.max_keysize	= 3*DES_KEY_SIZE,
@@ -1115,7 +1101,6 @@ static struct crypto_alg tdes_algs[] = {
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_tdes_cra_init,
-	.cra_exit		= atmel_tdes_cra_exit,
 	.cra_u.ablkcipher = {
 		.min_keysize	= 2*DES_KEY_SIZE,
 		.max_keysize	= 2*DES_KEY_SIZE,
@@ -1136,7 +1121,6 @@ static struct crypto_alg tdes_algs[] = {
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_tdes_cra_init,
-	.cra_exit		= atmel_tdes_cra_exit,
 	.cra_u.ablkcipher = {
 		.min_keysize	= 2*DES_KEY_SIZE,
 		.max_keysize	= 2*DES_KEY_SIZE,
@@ -1157,7 +1141,6 @@ static struct crypto_alg tdes_algs[] = {
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_tdes_cra_init,
-	.cra_exit		= atmel_tdes_cra_exit,
 	.cra_u.ablkcipher = {
 		.min_keysize	= 2*DES_KEY_SIZE,
 		.max_keysize	= 2*DES_KEY_SIZE,
@@ -1178,7 +1161,6 @@ static struct crypto_alg tdes_algs[] = {
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_tdes_cra_init,
-	.cra_exit		= atmel_tdes_cra_exit,
 	.cra_u.ablkcipher = {
 		.min_keysize	= 2*DES_KEY_SIZE,
 		.max_keysize	= 2*DES_KEY_SIZE,
@@ -1199,7 +1181,6 @@ static struct crypto_alg tdes_algs[] = {
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_tdes_cra_init,
-	.cra_exit		= atmel_tdes_cra_exit,
 	.cra_u.ablkcipher = {
 		.min_keysize	= 2*DES_KEY_SIZE,
 		.max_keysize	= 3*DES_KEY_SIZE,
@@ -1382,8 +1363,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
 
 	crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
 
-	tdes_dd->irq = -1;
-
 	/* Get the base address */
 	tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!tdes_res) {

+ 52 - 64
drivers/crypto/bcm/cipher.c

@@ -256,6 +256,44 @@ spu_ablkcipher_tx_sg_create(struct brcm_message *mssg,
 	return 0;
 }
 
+static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
+				u8 chan_idx)
+{
+	int err;
+	int retry_cnt = 0;
+	struct device *dev = &(iproc_priv.pdev->dev);
+
+	err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg);
+	if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
+		while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
+			/*
+			 * Mailbox queue is full. Since MAY_SLEEP is set, assume
+			 * not in atomic context and we can wait and try again.
+			 */
+			retry_cnt++;
+			usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
+			err = mbox_send_message(iproc_priv.mbox[chan_idx],
+						mssg);
+			atomic_inc(&iproc_priv.mb_no_spc);
+		}
+	}
+	if (err < 0) {
+		atomic_inc(&iproc_priv.mb_send_fail);
+		return err;
+	}
+
+	/* Check error returned by mailbox controller */
+	err = mssg->error;
+	if (unlikely(err < 0)) {
+		dev_err(dev, "message error %d", err);
+		/* Signal txdone for mailbox channel */
+	}
+
+	/* Signal txdone for mailbox channel */
+	mbox_client_txdone(iproc_priv.mbox[chan_idx], err);
+	return err;
+}
+
 /**
  * handle_ablkcipher_req() - Submit as much of a block cipher request as fits in
  * a single SPU request message, starting at the current position in the request
@@ -293,7 +331,6 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
 	u32 pad_len;		/* total length of all padding */
 	bool update_key = false;
 	struct brcm_message *mssg;	/* mailbox message */
-	int retry_cnt = 0;
 
 	/* number of entries in src and dst sg in mailbox message. */
 	u8 rx_frag_num = 2;	/* response header and STATUS */
@@ -462,24 +499,9 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
 	if (err)
 		return err;
 
-	err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
-	if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
-		while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
-			/*
-			 * Mailbox queue is full. Since MAY_SLEEP is set, assume
-			 * not in atomic context and we can wait and try again.
-			 */
-			retry_cnt++;
-			usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
-			err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
-						mssg);
-			atomic_inc(&iproc_priv.mb_no_spc);
-		}
-	}
-	if (unlikely(err < 0)) {
-		atomic_inc(&iproc_priv.mb_send_fail);
+	err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
+	if (unlikely(err < 0))
 		return err;
-	}
 
 	return -EINPROGRESS;
 }
@@ -710,7 +732,6 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx)
 	u32 spu_hdr_len;
 	unsigned int digestsize;
 	u16 rem = 0;
-	int retry_cnt = 0;
 
 	/*
 	 * number of entries in src and dst sg. Always includes SPU msg header.
@@ -904,24 +925,10 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx)
 	if (err)
 		return err;
 
-	err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
-	if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
-		while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
-			/*
-			 * Mailbox queue is full. Since MAY_SLEEP is set, assume
-			 * not in atomic context and we can wait and try again.
-			 */
-			retry_cnt++;
-			usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
-			err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
-						mssg);
-			atomic_inc(&iproc_priv.mb_no_spc);
-		}
-	}
-	if (err < 0) {
-		atomic_inc(&iproc_priv.mb_send_fail);
+	err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
+	if (unlikely(err < 0))
 		return err;
-	}
+
 	return -EINPROGRESS;
 }
 
@@ -1320,7 +1327,6 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx)
 	int assoc_nents = 0;
 	bool incl_icv = false;
 	unsigned int digestsize = ctx->digestsize;
-	int retry_cnt = 0;
 
 	/* number of entries in src and dst sg. Always includes SPU msg header.
 	 */
@@ -1367,11 +1373,11 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx)
 		 * expects AAD to include just SPI and seqno. So
 		 * subtract off the IV len.
 		 */
-		aead_parms.assoc_size -= GCM_ESP_IV_SIZE;
+		aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE;
 
 		if (rctx->is_encrypt) {
 			aead_parms.return_iv = true;
-			aead_parms.ret_iv_len = GCM_ESP_IV_SIZE;
+			aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE;
 			aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE;
 		}
 	} else {
@@ -1558,24 +1564,9 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx)
 	if (err)
 		return err;
 
-	err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
-	if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
-		while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
-			/*
-			 * Mailbox queue is full. Since MAY_SLEEP is set, assume
-			 * not in atomic context and we can wait and try again.
-			 */
-			retry_cnt++;
-			usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
-			err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
-						mssg);
-			atomic_inc(&iproc_priv.mb_no_spc);
-		}
-	}
-	if (err < 0) {
-		atomic_inc(&iproc_priv.mb_send_fail);
+	err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
+	if (unlikely(err < 0))
 		return err;
-	}
 
 	return -EINPROGRESS;
 }
@@ -3255,7 +3246,7 @@ static struct iproc_alg_s driver_algs[] = {
 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
 		 },
 		 .setkey = aead_gcm_esp_setkey,
-		 .ivsize = GCM_ESP_IV_SIZE,
+		 .ivsize = GCM_RFC4106_IV_SIZE,
 		 .maxauthsize = AES_BLOCK_SIZE,
 	 },
 	 .cipher_info = {
@@ -3301,7 +3292,7 @@ static struct iproc_alg_s driver_algs[] = {
 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
 		 },
 		 .setkey = rfc4543_gcm_esp_setkey,
-		 .ivsize = GCM_ESP_IV_SIZE,
+		 .ivsize = GCM_RFC4106_IV_SIZE,
 		 .maxauthsize = AES_BLOCK_SIZE,
 	 },
 	 .cipher_info = {
@@ -4537,7 +4528,7 @@ static int spu_mb_init(struct device *dev)
 	mcl->dev = dev;
 	mcl->tx_block = false;
 	mcl->tx_tout = 0;
-	mcl->knows_txdone = false;
+	mcl->knows_txdone = true;
 	mcl->rx_callback = spu_rx_callback;
 	mcl->tx_done = NULL;
 
@@ -4818,7 +4809,6 @@ static int spu_dt_read(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct spu_hw *spu = &iproc_priv.spu;
 	struct resource *spu_ctrl_regs;
-	const struct of_device_id *match;
 	const struct spu_type_subtype *matched_spu_type;
 	struct device_node *dn = pdev->dev.of_node;
 	int err, i;
@@ -4826,14 +4816,12 @@ static int spu_dt_read(struct platform_device *pdev)
 	/* Count number of mailbox channels */
 	spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells");
 
-	match = of_match_device(of_match_ptr(bcm_spu_dt_ids), dev);
-	if (!match) {
+	matched_spu_type = of_device_get_match_data(dev);
+	if (!matched_spu_type) {
 		dev_err(&pdev->dev, "Failed to match device\n");
 		return -ENODEV;
 	}
 
-	matched_spu_type = match->data;
-
 	spu->spu_type = matched_spu_type->type;
 	spu->spu_subtype = matched_spu_type->subtype;
 

+ 1 - 2
drivers/crypto/bcm/cipher.h

@@ -23,6 +23,7 @@
 #include <crypto/aes.h>
 #include <crypto/internal/hash.h>
 #include <crypto/aead.h>
+#include <crypto/gcm.h>
 #include <crypto/sha.h>
 #include <crypto/sha3.h>
 
@@ -39,8 +40,6 @@
 #define ARC4_STATE_SIZE     4
 
 #define CCM_AES_IV_SIZE    16
-#define GCM_AES_IV_SIZE    12
-#define GCM_ESP_IV_SIZE     8
 #define CCM_ESP_IV_SIZE     8
 #define RFC4543_ICV_SIZE   16
 

+ 7 - 7
drivers/crypto/bcm/util.c

@@ -271,7 +271,7 @@ int do_shash(unsigned char *name, unsigned char *result,
 	hash = crypto_alloc_shash(name, 0, 0);
 	if (IS_ERR(hash)) {
 		rc = PTR_ERR(hash);
-		pr_err("%s: Crypto %s allocation error %d", __func__, name, rc);
+		pr_err("%s: Crypto %s allocation error %d\n", __func__, name, rc);
 		return rc;
 	}
 
@@ -279,7 +279,7 @@ int do_shash(unsigned char *name, unsigned char *result,
 	sdesc = kmalloc(size, GFP_KERNEL);
 	if (!sdesc) {
 		rc = -ENOMEM;
-		pr_err("%s: Memory allocation failure", __func__);
+		pr_err("%s: Memory allocation failure\n", __func__);
 		goto do_shash_err;
 	}
 	sdesc->shash.tfm = hash;
@@ -288,31 +288,31 @@ int do_shash(unsigned char *name, unsigned char *result,
 	if (key_len > 0) {
 		rc = crypto_shash_setkey(hash, key, key_len);
 		if (rc) {
-			pr_err("%s: Could not setkey %s shash", __func__, name);
+			pr_err("%s: Could not setkey %s shash\n", __func__, name);
 			goto do_shash_err;
 		}
 	}
 
 	rc = crypto_shash_init(&sdesc->shash);
 	if (rc) {
-		pr_err("%s: Could not init %s shash", __func__, name);
+		pr_err("%s: Could not init %s shash\n", __func__, name);
 		goto do_shash_err;
 	}
 	rc = crypto_shash_update(&sdesc->shash, data1, data1_len);
 	if (rc) {
-		pr_err("%s: Could not update1", __func__);
+		pr_err("%s: Could not update1\n", __func__);
 		goto do_shash_err;
 	}
 	if (data2 && data2_len) {
 		rc = crypto_shash_update(&sdesc->shash, data2, data2_len);
 		if (rc) {
-			pr_err("%s: Could not update2", __func__);
+			pr_err("%s: Could not update2\n", __func__);
 			goto do_shash_err;
 		}
 	}
 	rc = crypto_shash_final(&sdesc->shash, result);
 	if (rc)
-		pr_err("%s: Could not generate %s hash", __func__, name);
+		pr_err("%s: Could not generate %s hash\n", __func__, name);
 
 do_shash_err:
 	crypto_free_shash(hash);

+ 5 - 5
drivers/crypto/caam/caamalg.c

@@ -992,7 +992,7 @@ static void init_gcm_job(struct aead_request *req,
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
 	unsigned int ivsize = crypto_aead_ivsize(aead);
 	u32 *desc = edesc->hw_desc;
-	bool generic_gcm = (ivsize == 12);
+	bool generic_gcm = (ivsize == GCM_AES_IV_SIZE);
 	unsigned int last;
 
 	init_aead_job(req, edesc, all_contig, encrypt);
@@ -1004,7 +1004,7 @@ static void init_gcm_job(struct aead_request *req,
 
 	/* Read GCM IV */
 	append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
-			 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
+			 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | GCM_AES_IV_SIZE | last);
 	/* Append Salt */
 	if (!generic_gcm)
 		append_data(desc, ctx->key + ctx->cdata.keylen, 4);
@@ -1953,7 +1953,7 @@ static struct caam_aead_alg driver_aeads[] = {
 			.setauthsize = rfc4106_setauthsize,
 			.encrypt = ipsec_gcm_encrypt,
 			.decrypt = ipsec_gcm_decrypt,
-			.ivsize = 8,
+			.ivsize = GCM_RFC4106_IV_SIZE,
 			.maxauthsize = AES_BLOCK_SIZE,
 		},
 		.caam = {
@@ -1971,7 +1971,7 @@ static struct caam_aead_alg driver_aeads[] = {
 			.setauthsize = rfc4543_setauthsize,
 			.encrypt = ipsec_gcm_encrypt,
 			.decrypt = ipsec_gcm_decrypt,
-			.ivsize = 8,
+			.ivsize = GCM_RFC4543_IV_SIZE,
 			.maxauthsize = AES_BLOCK_SIZE,
 		},
 		.caam = {
@@ -1990,7 +1990,7 @@ static struct caam_aead_alg driver_aeads[] = {
 			.setauthsize = gcm_setauthsize,
 			.encrypt = gcm_encrypt,
 			.decrypt = gcm_decrypt,
-			.ivsize = 12,
+			.ivsize = GCM_AES_IV_SIZE,
 			.maxauthsize = AES_BLOCK_SIZE,
 		},
 		.caam = {

+ 6 - 1
drivers/crypto/caam/caamalg_qi.c

@@ -7,7 +7,7 @@
  */
 
 #include "compat.h"
-
+#include "ctrl.h"
 #include "regs.h"
 #include "intern.h"
 #include "desc_constr.h"
@@ -2312,6 +2312,11 @@ static int __init caam_qi_algapi_init(void)
 	if (!priv || !priv->qi_present)
 		return -ENODEV;
 
+	if (caam_dpaa2) {
+		dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
+		return -ENODEV;
+	}
+
 	INIT_LIST_HEAD(&alg_list);
 
 	/*

+ 4 - 8
drivers/crypto/caam/caamhash.c

@@ -218,7 +218,7 @@ static inline int buf_map_to_sec4_sg(struct device *jrdev,
 }
 
 /* Map state->caam_ctx, and add it to link table */
-static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
+static inline int ctx_map_to_sec4_sg(struct device *jrdev,
 				     struct caam_hash_state *state, int ctx_len,
 				     struct sec4_sg_entry *sec4_sg, u32 flag)
 {
@@ -773,7 +773,7 @@ static int ahash_update_ctx(struct ahash_request *req)
 		edesc->src_nents = src_nents;
 		edesc->sec4_sg_bytes = sec4_sg_bytes;
 
-		ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+		ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
 					 edesc->sec4_sg, DMA_BIDIRECTIONAL);
 		if (ret)
 			goto unmap_ctx;
@@ -871,9 +871,8 @@ static int ahash_final_ctx(struct ahash_request *req)
 	desc = edesc->hw_desc;
 
 	edesc->sec4_sg_bytes = sec4_sg_bytes;
-	edesc->src_nents = 0;
 
-	ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
 				 edesc->sec4_sg, DMA_TO_DEVICE);
 	if (ret)
 		goto unmap_ctx;
@@ -967,7 +966,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
 
 	edesc->src_nents = src_nents;
 
-	ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
 				 edesc->sec4_sg, DMA_TO_DEVICE);
 	if (ret)
 		goto unmap_ctx;
@@ -1123,7 +1122,6 @@ static int ahash_final_no_ctx(struct ahash_request *req)
 		dev_err(jrdev, "unable to map dst\n");
 		goto unmap;
 	}
-	edesc->src_nents = 0;
 
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1205,7 +1203,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
 
 		edesc->src_nents = src_nents;
 		edesc->sec4_sg_bytes = sec4_sg_bytes;
-		edesc->dst_dma = 0;
 
 		ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
 		if (ret)
@@ -1417,7 +1414,6 @@ static int ahash_update_first(struct ahash_request *req)
 		}
 
 		edesc->src_nents = src_nents;
-		edesc->dst_dma = 0;
 
 		ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
 					  to_hash);

+ 1 - 0
drivers/crypto/caam/compat.h

@@ -32,6 +32,7 @@
 #include <crypto/aes.h>
 #include <crypto/ctr.h>
 #include <crypto/des.h>
+#include <crypto/gcm.h>
 #include <crypto/sha.h>
 #include <crypto/md5.h>
 #include <crypto/internal/aead.h>

+ 1 - 1
drivers/crypto/caam/desc.h

@@ -1440,7 +1440,7 @@
 #define MATH_SRC1_REG2		(0x02 << MATH_SRC1_SHIFT)
 #define MATH_SRC1_REG3		(0x03 << MATH_SRC1_SHIFT)
 #define MATH_SRC1_IMM		(0x04 << MATH_SRC1_SHIFT)
-#define MATH_SRC1_DPOVRD	(0x07 << MATH_SRC0_SHIFT)
+#define MATH_SRC1_DPOVRD	(0x07 << MATH_SRC1_SHIFT)
 #define MATH_SRC1_INFIFO	(0x0a << MATH_SRC1_SHIFT)
 #define MATH_SRC1_OUTFIFO	(0x0b << MATH_SRC1_SHIFT)
 #define MATH_SRC1_ONE		(0x0c << MATH_SRC1_SHIFT)

+ 1 - 1
drivers/crypto/cavium/nitrox/nitrox_hal.c

@@ -127,7 +127,7 @@ void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
 		 * size and interrupt threshold.
 		 */
 		offset = NPS_PKT_IN_INSTR_BADDRX(i);
-		nitrox_write_csr(ndev, NPS_PKT_IN_INSTR_BADDRX(i), cmdq->dma);
+		nitrox_write_csr(ndev, offset, cmdq->dma);
 
 		/* configure ring size */
 		offset = NPS_PKT_IN_INSTR_RSIZEX(i);

+ 4 - 5
drivers/crypto/ccp/ccp-crypto-aes-galois.c

@@ -19,13 +19,12 @@
 #include <crypto/algapi.h>
 #include <crypto/aes.h>
 #include <crypto/ctr.h>
+#include <crypto/gcm.h>
 #include <crypto/scatterwalk.h>
 #include <linux/delay.h>
 
 #include "ccp-crypto.h"
 
-#define	AES_GCM_IVSIZE	12
-
 static int ccp_aes_gcm_complete(struct crypto_async_request *async_req, int ret)
 {
 	return ret;
@@ -95,9 +94,9 @@ static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
 	 */
 
 	/* Prepare the IV: 12 bytes + an integer (counter) */
-	memcpy(rctx->iv, req->iv, AES_GCM_IVSIZE);
+	memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
 	for (i = 0; i < 3; i++)
-		rctx->iv[i + AES_GCM_IVSIZE] = 0;
+		rctx->iv[i + GCM_AES_IV_SIZE] = 0;
 	rctx->iv[AES_BLOCK_SIZE - 1] = 1;
 
 	/* Set up a scatterlist for the IV */
@@ -160,7 +159,7 @@ static struct aead_alg ccp_aes_gcm_defaults = {
 	.encrypt = ccp_aes_gcm_encrypt,
 	.decrypt = ccp_aes_gcm_decrypt,
 	.init = ccp_aes_gcm_cra_init,
-	.ivsize = AES_GCM_IVSIZE,
+	.ivsize = GCM_AES_IV_SIZE,
 	.maxauthsize = AES_BLOCK_SIZE,
 	.base = {
 		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |

+ 3 - 5
drivers/crypto/ccp/ccp-crypto-main.c

@@ -222,9 +222,10 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
 
 	/* Check if the cmd can/should be queued */
 	if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
-		ret = -EBUSY;
-		if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
+		if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) {
+			ret = -ENOSPC;
 			goto e_lock;
+		}
 	}
 
 	/* Look for an entry with the same tfm.  If there is a cmd
@@ -243,9 +244,6 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
 		ret = ccp_enqueue_cmd(crypto_cmd->cmd);
 		if (!ccp_crypto_success(ret))
 			goto e_lock;	/* Error, don't queue it */
-		if ((ret == -EBUSY) &&
-		    !(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
-			goto e_lock;	/* Not backlogging, don't queue it */
 	}
 
 	if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {

+ 1 - 2
drivers/crypto/ccp/ccp-dev-v5.c

@@ -788,13 +788,12 @@ static int ccp5_init(struct ccp_device *ccp)
 	struct ccp_cmd_queue *cmd_q;
 	struct dma_pool *dma_pool;
 	char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
-	unsigned int qmr, qim, i;
+	unsigned int qmr, i;
 	u64 status;
 	u32 status_lo, status_hi;
 	int ret;
 
 	/* Find available queues */
-	qim = 0;
 	qmr = ioread32(ccp->io_regs + Q_MASK_REG);
 	for (i = 0; i < MAX_HW_QUEUES; i++) {
 

+ 5 - 2
drivers/crypto/ccp/ccp-dev.c

@@ -292,9 +292,12 @@ int ccp_enqueue_cmd(struct ccp_cmd *cmd)
 	i = ccp->cmd_q_count;
 
 	if (ccp->cmd_count >= MAX_CMD_QLEN) {
-		ret = -EBUSY;
-		if (cmd->flags & CCP_CMD_MAY_BACKLOG)
+		if (cmd->flags & CCP_CMD_MAY_BACKLOG) {
+			ret = -EBUSY;
 			list_add_tail(&cmd->entry, &ccp->backlog);
+		} else {
+			ret = -ENOSPC;
+		}
 	} else {
 		ret = -EINPROGRESS;
 		ccp->cmd_count++;

+ 2 - 3
drivers/crypto/ccp/ccp-dmaengine.c

@@ -223,6 +223,7 @@ static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
 				desc->tx_desc.cookie, desc->status);
 
 			dma_cookie_complete(tx_desc);
+			dma_descriptor_unmap(tx_desc);
 		}
 
 		desc = __ccp_next_dma_desc(chan, desc);
@@ -230,9 +231,7 @@ static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
 		spin_unlock_irqrestore(&chan->lock, flags);
 
 		if (tx_desc) {
-			if (tx_desc->callback &&
-			    (tx_desc->flags & DMA_PREP_INTERRUPT))
-				tx_desc->callback(tx_desc->callback_param);
+			dmaengine_desc_get_callback_invoke(tx_desc, NULL);
 
 			dma_run_dependencies(tx_desc);
 		}

+ 1045 - 753
drivers/crypto/chelsio/chcr_algo.c

@@ -53,6 +53,7 @@
 #include <crypto/aes.h>
 #include <crypto/algapi.h>
 #include <crypto/hash.h>
+#include <crypto/gcm.h>
 #include <crypto/sha.h>
 #include <crypto/authenc.h>
 #include <crypto/ctr.h>
@@ -70,6 +71,8 @@
 #include "chcr_algo.h"
 #include "chcr_crypto.h"
 
+#define IV AES_BLOCK_SIZE
+
 static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
 {
 	return ctx->crypto_ctx->aeadctx;
@@ -102,7 +105,7 @@ static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
 
 static inline int is_ofld_imm(const struct sk_buff *skb)
 {
-	return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN);
+	return (skb->len <= SGE_MAX_WR_LEN);
 }
 
 /*
@@ -117,6 +120,92 @@ static inline unsigned int sgl_len(unsigned int n)
 	return (3 * n) / 2 + (n & 1) + 2;
 }
 
+static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
+			 unsigned int entlen,
+			 unsigned int skip)
+{
+	int nents = 0;
+	unsigned int less;
+	unsigned int skip_len = 0;
+
+	while (sg && skip) {
+		if (sg_dma_len(sg) <= skip) {
+			skip -= sg_dma_len(sg);
+			skip_len = 0;
+			sg = sg_next(sg);
+		} else {
+			skip_len = skip;
+			skip = 0;
+		}
+	}
+
+	while (sg && reqlen) {
+		less = min(reqlen, sg_dma_len(sg) - skip_len);
+		nents += DIV_ROUND_UP(less, entlen);
+		reqlen -= less;
+		skip_len = 0;
+		sg = sg_next(sg);
+	}
+	return nents;
+}
+
+static inline void chcr_handle_ahash_resp(struct ahash_request *req,
+					  unsigned char *input,
+					  int err)
+{
+	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
+	int digestsize, updated_digestsize;
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
+
+	if (input == NULL)
+		goto out;
+	reqctx = ahash_request_ctx(req);
+	digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
+	if (reqctx->is_sg_map)
+		chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+	if (reqctx->dma_addr)
+		dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->dma_addr,
+				 reqctx->dma_len, DMA_TO_DEVICE);
+	reqctx->dma_addr = 0;
+	updated_digestsize = digestsize;
+	if (digestsize == SHA224_DIGEST_SIZE)
+		updated_digestsize = SHA256_DIGEST_SIZE;
+	else if (digestsize == SHA384_DIGEST_SIZE)
+		updated_digestsize = SHA512_DIGEST_SIZE;
+	if (reqctx->result == 1) {
+		reqctx->result = 0;
+		memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
+		       digestsize);
+	} else {
+		memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
+		       updated_digestsize);
+	}
+out:
+	req->base.complete(&req->base, err);
+
+	}
+
+static inline void chcr_handle_aead_resp(struct aead_request *req,
+					 unsigned char *input,
+					 int err)
+{
+	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
+
+
+	chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
+	if (reqctx->b0_dma)
+		dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
+				 reqctx->b0_len, DMA_BIDIRECTIONAL);
+	if (reqctx->verify == VERIFY_SW) {
+		chcr_verify_tag(req, input, &err);
+		reqctx->verify = VERIFY_HW;
+}
+	req->base.complete(&req->base, err);
+
+}
 static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
 {
 	u8 temp[SHA512_DIGEST_SIZE];
@@ -151,29 +240,11 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
 {
 	struct crypto_tfm *tfm = req->tfm;
 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
-	struct uld_ctx *u_ctx = ULD_CTX(ctx);
-	struct chcr_req_ctx ctx_req;
-	unsigned int digestsize, updated_digestsize;
 	struct adapter *adap = padap(ctx->dev);
 
 	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
 	case CRYPTO_ALG_TYPE_AEAD:
-		ctx_req.req.aead_req = aead_request_cast(req);
-		ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
-		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
-			     ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
-		if (ctx_req.ctx.reqctx->skb) {
-			kfree_skb(ctx_req.ctx.reqctx->skb);
-			ctx_req.ctx.reqctx->skb = NULL;
-		}
-		free_new_sg(ctx_req.ctx.reqctx->newdstsg);
-		ctx_req.ctx.reqctx->newdstsg = NULL;
-		if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
-			chcr_verify_tag(ctx_req.req.aead_req, input,
-					&err);
-			ctx_req.ctx.reqctx->verify = VERIFY_HW;
-		}
-		ctx_req.req.aead_req->base.complete(req, err);
+		chcr_handle_aead_resp(aead_request_cast(req), input, err);
 		break;
 
 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
@@ -182,60 +253,13 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
 		break;
 
 	case CRYPTO_ALG_TYPE_AHASH:
-		ctx_req.req.ahash_req = ahash_request_cast(req);
-		ctx_req.ctx.ahash_ctx =
-			ahash_request_ctx(ctx_req.req.ahash_req);
-		digestsize =
-			crypto_ahash_digestsize(crypto_ahash_reqtfm(
-							ctx_req.req.ahash_req));
-		updated_digestsize = digestsize;
-		if (digestsize == SHA224_DIGEST_SIZE)
-			updated_digestsize = SHA256_DIGEST_SIZE;
-		else if (digestsize == SHA384_DIGEST_SIZE)
-			updated_digestsize = SHA512_DIGEST_SIZE;
-		if (ctx_req.ctx.ahash_ctx->skb) {
-			kfree_skb(ctx_req.ctx.ahash_ctx->skb);
-			ctx_req.ctx.ahash_ctx->skb = NULL;
-		}
-		if (ctx_req.ctx.ahash_ctx->result == 1) {
-			ctx_req.ctx.ahash_ctx->result = 0;
-			memcpy(ctx_req.req.ahash_req->result, input +
-			       sizeof(struct cpl_fw6_pld),
-			       digestsize);
-		} else {
-			memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input +
-			       sizeof(struct cpl_fw6_pld),
-			       updated_digestsize);
+		chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
 		}
-		ctx_req.req.ahash_req->base.complete(req, err);
-		break;
-	}
 	atomic_inc(&adap->chcr_stats.complete);
 	return err;
 }
 
-/*
- *	calc_tx_flits_ofld - calculate # of flits for an offload packet
- *	@skb: the packet
- *	Returns the number of flits needed for the given offload packet.
- *	These packets are already fully constructed and no additional headers
- *	will be added.
- */
-static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
-{
-	unsigned int flits, cnt;
-
-	if (is_ofld_imm(skb))
-		return DIV_ROUND_UP(skb->len, 8);
-
-	flits = skb_transport_offset(skb) / 8;   /* headers */
-	cnt = skb_shinfo(skb)->nr_frags;
-	if (skb_tail_pointer(skb) != skb_transport_header(skb))
-		cnt++;
-	return flits + sgl_len(cnt);
-}
-
-static inline void get_aes_decrypt_key(unsigned char *dec_key,
+static void get_aes_decrypt_key(unsigned char *dec_key,
 				       const unsigned char *key,
 				       unsigned int keylength)
 {
@@ -382,13 +406,19 @@ static inline int is_hmac(struct crypto_tfm *tfm)
 	return 0;
 }
 
-static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
-			   struct scatterlist *sg,
-			   struct phys_sge_parm *sg_param)
+static inline void dsgl_walk_init(struct dsgl_walk *walk,
+				   struct cpl_rx_phys_dsgl *dsgl)
 {
-	struct phys_sge_pairs *to;
-	unsigned int len = 0, left_size = sg_param->obsize;
-	unsigned int nents = sg_param->nents, i, j = 0;
+	walk->dsgl = dsgl;
+	walk->nents = 0;
+	walk->to = (struct phys_sge_pairs *)(dsgl + 1);
+}
+
+static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
+{
+	struct cpl_rx_phys_dsgl *phys_cpl;
+
+	phys_cpl = walk->dsgl;
 
 	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
 				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
@@ -398,38 +428,171 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
 		      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
 		      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
 		      CPL_RX_PHYS_DSGL_DCAID_V(0) |
-		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents));
+		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
 	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
-	phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
+	phys_cpl->rss_hdr_int.qid = htons(qid);
 	phys_cpl->rss_hdr_int.hash_val = 0;
-	to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
-				       sizeof(struct cpl_rx_phys_dsgl));
-	for (i = 0; nents && left_size; to++) {
-		for (j = 0; j < 8 && nents && left_size; j++, nents--) {
-			len = min(left_size, sg_dma_len(sg));
-			to->len[j] = htons(len);
-			to->addr[j] = cpu_to_be64(sg_dma_address(sg));
-			left_size -= len;
+}
+
+static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
+					size_t size,
+					dma_addr_t *addr)
+{
+	int j;
+
+	if (!size)
+		return;
+	j = walk->nents;
+	walk->to->len[j % 8] = htons(size);
+	walk->to->addr[j % 8] = cpu_to_be64(*addr);
+	j++;
+	if ((j % 8) == 0)
+		walk->to++;
+	walk->nents = j;
+}
+
+static void  dsgl_walk_add_sg(struct dsgl_walk *walk,
+			   struct scatterlist *sg,
+			      unsigned int slen,
+			      unsigned int skip)
+{
+	int skip_len = 0;
+	unsigned int left_size = slen, len = 0;
+	unsigned int j = walk->nents;
+	int offset, ent_len;
+
+	if (!slen)
+		return;
+	while (sg && skip) {
+		if (sg_dma_len(sg) <= skip) {
+			skip -= sg_dma_len(sg);
+			skip_len = 0;
 			sg = sg_next(sg);
+		} else {
+			skip_len = skip;
+			skip = 0;
 		}
 	}
+
+	while (left_size && sg) {
+		len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
+		offset = 0;
+		while (len) {
+			ent_len =  min_t(u32, len, CHCR_DST_SG_SIZE);
+			walk->to->len[j % 8] = htons(ent_len);
+			walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
+						      offset + skip_len);
+			offset += ent_len;
+			len -= ent_len;
+			j++;
+			if ((j % 8) == 0)
+				walk->to++;
+		}
+		walk->last_sg = sg;
+		walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
+					  skip_len) + skip_len;
+		left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
+		skip_len = 0;
+		sg = sg_next(sg);
+	}
+	walk->nents = j;
+}
+
+static inline void ulptx_walk_init(struct ulptx_walk *walk,
+				   struct ulptx_sgl *ulp)
+{
+	walk->sgl = ulp;
+	walk->nents = 0;
+	walk->pair_idx = 0;
+	walk->pair = ulp->sge;
+	walk->last_sg = NULL;
+	walk->last_sg_len = 0;
 }
 
-static inline int map_writesg_phys_cpl(struct device *dev,
-					struct cpl_rx_phys_dsgl *phys_cpl,
+static inline void ulptx_walk_end(struct ulptx_walk *walk)
+{
+	walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
+			      ULPTX_NSGE_V(walk->nents));
+}
+
+
+static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
+					size_t size,
+					dma_addr_t *addr)
+{
+	if (!size)
+		return;
+
+	if (walk->nents == 0) {
+		walk->sgl->len0 = cpu_to_be32(size);
+		walk->sgl->addr0 = cpu_to_be64(*addr);
+	} else {
+		walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
+		walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
+		walk->pair_idx = !walk->pair_idx;
+		if (!walk->pair_idx)
+			walk->pair++;
+	}
+	walk->nents++;
+}
+
+static void  ulptx_walk_add_sg(struct ulptx_walk *walk,
 					struct scatterlist *sg,
-					struct phys_sge_parm *sg_param)
+			       unsigned int len,
+			       unsigned int skip)
 {
-	if (!sg || !sg_param->nents)
-		return -EINVAL;
+	int small;
+	int skip_len = 0;
+	unsigned int sgmin;
 
-	sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE);
-	if (sg_param->nents == 0) {
-		pr_err("CHCR : DMA mapping failed\n");
-		return -EINVAL;
+	if (!len)
+		return;
+
+	while (sg && skip) {
+		if (sg_dma_len(sg) <= skip) {
+			skip -= sg_dma_len(sg);
+			skip_len = 0;
+			sg = sg_next(sg);
+		} else {
+			skip_len = skip;
+			skip = 0;
+		}
+	}
+	if (walk->nents == 0) {
+		small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
+		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
+		walk->sgl->len0 = cpu_to_be32(sgmin);
+		walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
+		walk->nents++;
+		len -= sgmin;
+		walk->last_sg = sg;
+		walk->last_sg_len = sgmin + skip_len;
+		skip_len += sgmin;
+		if (sg_dma_len(sg) == skip_len) {
+			sg = sg_next(sg);
+			skip_len = 0;
+		}
+	}
+
+	while (sg && len) {
+		small = min(sg_dma_len(sg) - skip_len, len);
+		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
+		walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
+		walk->pair->addr[walk->pair_idx] =
+			cpu_to_be64(sg_dma_address(sg) + skip_len);
+		walk->pair_idx = !walk->pair_idx;
+		walk->nents++;
+		if (!walk->pair_idx)
+			walk->pair++;
+		len -= sgmin;
+		skip_len += sgmin;
+		walk->last_sg = sg;
+		walk->last_sg_len = skip_len;
+		if (sg_dma_len(sg) == skip_len) {
+			sg = sg_next(sg);
+			skip_len = 0;
+		}
 	}
-	write_phys_cpl(phys_cpl, sg, sg_param);
-	return 0;
 }
 
 static inline int get_aead_subtype(struct crypto_aead *aead)
@@ -449,45 +612,6 @@ static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
 }
 
-static inline void write_buffer_to_skb(struct sk_buff *skb,
-					unsigned int *frags,
-					char *bfr,
-					u8 bfr_len)
-{
-	skb->len += bfr_len;
-	skb->data_len += bfr_len;
-	skb->truesize += bfr_len;
-	get_page(virt_to_page(bfr));
-	skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
-			   offset_in_page(bfr), bfr_len);
-	(*frags)++;
-}
-
-
-static inline void
-write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
-			struct scatterlist *sg, unsigned int count)
-{
-	struct page *spage;
-	unsigned int page_len;
-
-	skb->len += count;
-	skb->data_len += count;
-	skb->truesize += count;
-
-	while (count > 0) {
-		if (!sg || (!(sg->length)))
-			break;
-		spage = sg_page(sg);
-		get_page(spage);
-		page_len = min(sg->length, count);
-		skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len);
-		(*frags)++;
-		count -= page_len;
-		sg = sg_next(sg);
-	}
-}
-
 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
 {
 	struct adapter *adap = netdev2adap(dev);
@@ -524,30 +648,46 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
 			     struct scatterlist *dst,
 			     unsigned int minsg,
 			     unsigned int space,
-			     short int *sent,
-			     short int *dent)
+			     unsigned int srcskip,
+			     unsigned int dstskip)
 {
 	int srclen = 0, dstlen = 0;
-	int srcsg = minsg, dstsg = 0;
+	int srcsg = minsg, dstsg = minsg;
+	int offset = 0, less;
+
+	if (sg_dma_len(src) == srcskip) {
+		src = sg_next(src);
+		srcskip = 0;
+	}
 
-	*sent = 0;
-	*dent = 0;
-	while (src && dst && ((srcsg + 1) <= MAX_SKB_FRAGS) &&
+	if (sg_dma_len(dst) == dstskip) {
+		dst = sg_next(dst);
+		dstskip = 0;
+	}
+
+	while (src && dst &&
 	       space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
-		srclen += src->length;
+		srclen += (sg_dma_len(src) - srcskip);
 		srcsg++;
+		offset = 0;
 		while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
 		       space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
 			if (srclen <= dstlen)
 				break;
-			dstlen += dst->length;
-			dst = sg_next(dst);
+			less = min_t(unsigned int, sg_dma_len(dst) - offset -
+				dstskip, CHCR_DST_SG_SIZE);
+			dstlen += less;
+			offset += less;
+			if (offset == sg_dma_len(dst)) {
+				dst = sg_next(dst);
+				offset = 0;
+			}
 			dstsg++;
+			dstskip = 0;
 		}
 		src = sg_next(src);
+		 srcskip = 0;
 	}
-	*sent = srcsg - minsg;
-	*dent = dstsg;
 	return min(srclen, dstlen);
 }
 
@@ -576,47 +716,35 @@ static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
 }
 static inline void create_wreq(struct chcr_context *ctx,
 			       struct chcr_wr *chcr_req,
-			       void *req, struct sk_buff *skb,
-			       int kctx_len, int hash_sz,
-			       int is_iv,
+			       struct crypto_async_request *req,
+			       unsigned int imm,
+			       int hash_sz,
+			       unsigned int len16,
 			       unsigned int sc_len,
 			       unsigned int lcb)
 {
 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
-	int iv_loc = IV_DSGL;
 	int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
-	unsigned int immdatalen = 0, nr_frags = 0;
 
-	if (is_ofld_imm(skb)) {
-		immdatalen = skb->data_len;
-		iv_loc = IV_IMMEDIATE;
-	} else {
-		nr_frags = skb_shinfo(skb)->nr_frags;
-	}
 
-	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
-				((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
+	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
 	chcr_req->wreq.pld_size_hash_size =
-		htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
-		      FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
+		htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
 	chcr_req->wreq.len16_pkd =
-		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
-				    (calc_tx_flits_ofld(skb) * 8), 16)));
+		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
 	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
 	chcr_req->wreq.rx_chid_to_rx_q_id =
 		FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
-				is_iv ? iv_loc : IV_NOP, !!lcb,
-				ctx->tx_qidx);
+				!!lcb, ctx->tx_qidx);
 
 	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
 						       qid);
-	chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
-					16) - ((sizeof(chcr_req->wreq)) >> 4)));
+	chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
+				     ((sizeof(chcr_req->wreq)) >> 4)));
 
-	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
+	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
 	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
-				   sizeof(chcr_req->key_ctx) +
-				   kctx_len + sc_len + immdatalen);
+					   sizeof(chcr_req->key_ctx) + sc_len);
 }
 
 /**
@@ -629,47 +757,52 @@ static inline void create_wreq(struct chcr_context *ctx,
 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
 {
 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
-	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
-	struct uld_ctx *u_ctx = ULD_CTX(ctx);
-	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
 	struct sk_buff *skb = NULL;
 	struct chcr_wr *chcr_req;
 	struct cpl_rx_phys_dsgl *phys_cpl;
+	struct ulptx_sgl *ulptx;
 	struct chcr_blkcipher_req_ctx *reqctx =
 		ablkcipher_request_ctx(wrparam->req);
-	struct phys_sge_parm sg_param;
-	unsigned int frags = 0, transhdr_len, phys_dsgl;
+	unsigned int temp = 0, transhdr_len, dst_size;
 	int error;
-	unsigned int ivsize = AES_BLOCK_SIZE, kctx_len;
+	int nents;
+	unsigned int kctx_len;
 	gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 			GFP_KERNEL : GFP_ATOMIC;
-	struct adapter *adap = padap(ctx->dev);
-
-	phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
+	struct adapter *adap = padap(c_ctx(tfm)->dev);
 
+	nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
+			      reqctx->dst_ofst);
+	dst_size = get_space_for_phys_dsgl(nents + 1);
 	kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
-	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
-	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+	nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
+				  CHCR_SRC_SG_SIZE, reqctx->src_ofst);
+	temp = reqctx->imm ? (DIV_ROUND_UP((IV + wrparam->req->nbytes), 16)
+			      * 16) : (sgl_len(nents + MIN_CIPHER_SG) * 8);
+	transhdr_len += temp;
+	transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
 	if (!skb) {
 		error = -ENOMEM;
 		goto err;
 	}
-	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
 	chcr_req = __skb_put_zero(skb, transhdr_len);
 	chcr_req->sec_cpl.op_ivinsrtofst =
-		FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 1);
+		FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
 
-	chcr_req->sec_cpl.pldlen = htonl(ivsize + wrparam->bytes);
+	chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
-			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
+			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
 
 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
 			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
 							 ablkctx->ciph_mode,
-							 0, 0, ivsize >> 1);
+							 0, 0, IV >> 1);
 	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
-							  0, 1, phys_dsgl);
+							  0, 0, dst_size);
 
 	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
 	if ((reqctx->op == CHCR_DECRYPT_OP) &&
@@ -694,26 +827,18 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
 		}
 	}
 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
-	sg_param.nents = reqctx->dst_nents;
-	sg_param.obsize =  wrparam->bytes;
-	sg_param.qid = wrparam->qid;
-	error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
-				       reqctx->dst, &sg_param);
-	if (error)
-		goto map_fail1;
+	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
+	chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
+	chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
 
-	skb_set_transport_header(skb, transhdr_len);
-	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
-	write_sg_to_skb(skb, &frags, wrparam->srcsg, wrparam->bytes);
 	atomic_inc(&adap->chcr_stats.cipher_rqst);
-	create_wreq(ctx, chcr_req, &(wrparam->req->base), skb, kctx_len, 0, 1,
-			sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl,
+	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len
+		+(reqctx->imm ? (IV + wrparam->bytes) : 0);
+	create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
+		    transhdr_len, temp,
 			ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
 	reqctx->skb = skb;
-	skb_get(skb);
 	return skb;
-map_fail1:
-	kfree_skb(skb);
 err:
 	return ERR_PTR(error);
 }
@@ -738,8 +863,7 @@ static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
 				       unsigned int keylen)
 {
 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
-	struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
-	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 	int err = 0;
 
 	crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
@@ -757,8 +881,7 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
 			       const u8 *key,
 			       unsigned int keylen)
 {
-	struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
-	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 	unsigned int ck_size, context_size;
 	u16 alignment = 0;
 	int err;
@@ -790,8 +913,7 @@ static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
 				   const u8 *key,
 				   unsigned int keylen)
 {
-	struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
-	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 	unsigned int ck_size, context_size;
 	u16 alignment = 0;
 	int err;
@@ -822,8 +944,7 @@ static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
 				   const u8 *key,
 				   unsigned int keylen)
 {
-	struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
-	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 	unsigned int ck_size, context_size;
 	u16 alignment = 0;
 	int err;
@@ -890,25 +1011,28 @@ static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
 static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv)
 {
 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
-	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
-	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
 	struct crypto_cipher *cipher;
 	int ret, i;
 	u8 *key;
 	unsigned int keylen;
+	int round = reqctx->last_req_len / AES_BLOCK_SIZE;
+	int round8 = round / 8;
 
 	cipher = ablkctx->aes_generic;
-	memcpy(iv, req->info, AES_BLOCK_SIZE);
+	memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
 
 	keylen = ablkctx->enckey_len / 2;
 	key = ablkctx->key + keylen;
 	ret = crypto_cipher_setkey(cipher, key, keylen);
 	if (ret)
 		goto out;
+	/*H/W sends the encrypted IV in dsgl when AADIVDROP bit is 0*/
+	for (i = 0; i < round8; i++)
+		gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
 
-	crypto_cipher_encrypt_one(cipher, iv, iv);
-	for (i = 0; i < (reqctx->processed / AES_BLOCK_SIZE); i++)
+	for (i = 0; i < (round % 8); i++)
 		gf128mul_x_ble((le128 *)iv, (le128 *)iv);
 
 	crypto_cipher_decrypt_one(cipher, iv, iv);
@@ -982,65 +1106,60 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
 				   unsigned char *input, int err)
 {
 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
-	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
-	struct uld_ctx *u_ctx = ULD_CTX(ctx);
-	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
+	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
 	struct sk_buff *skb;
 	struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
 	struct  cipher_wr_param wrparam;
 	int bytes;
 
-	dma_unmap_sg(&u_ctx->lldi.pdev->dev, reqctx->dst, reqctx->dst_nents,
-		     DMA_FROM_DEVICE);
-
-	if (reqctx->skb) {
-		kfree_skb(reqctx->skb);
-		reqctx->skb = NULL;
-	}
 	if (err)
-		goto complete;
-
+		goto unmap;
 	if (req->nbytes == reqctx->processed) {
+		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+				      req);
 		err = chcr_final_cipher_iv(req, fw6_pld, req->info);
 		goto complete;
 	}
 
 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
-					    ctx->tx_qidx))) {
+					    c_ctx(tfm)->tx_qidx))) {
 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
 			err = -EBUSY;
-			goto complete;
+			goto unmap;
 		}
 
 	}
-	wrparam.srcsg = scatterwalk_ffwd(reqctx->srcffwd, req->src,
-				       reqctx->processed);
-	reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, reqctx->dstsg,
-					 reqctx->processed);
-	if (!wrparam.srcsg || !reqctx->dst) {
-		pr_err("Input sg list length less that nbytes\n");
-		err = -EINVAL;
-		goto complete;
-	}
-	bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dst, 1,
-				 SPACE_LEFT(ablkctx->enckey_len),
-				 &wrparam.snent, &reqctx->dst_nents);
+	if (!reqctx->imm) {
+		bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
+					  SPACE_LEFT(ablkctx->enckey_len),
+					  reqctx->src_ofst, reqctx->dst_ofst);
 	if ((bytes + reqctx->processed) >= req->nbytes)
 		bytes  = req->nbytes - reqctx->processed;
 	else
 		bytes = ROUND_16(bytes);
+	} else {
+		/*CTR mode counter overfloa*/
+		bytes  = req->nbytes - reqctx->processed;
+	}
+	dma_sync_single_for_cpu(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+				reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
 	err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
+	dma_sync_single_for_device(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+				   reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
 	if (err)
-		goto complete;
+		goto unmap;
 
 	if (unlikely(bytes == 0)) {
+		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+				      req);
 		err = chcr_cipher_fallback(ablkctx->sw_cipher,
 				     req->base.flags,
-				     wrparam.srcsg,
-				     reqctx->dst,
-				     req->nbytes - reqctx->processed,
-				     reqctx->iv,
+				     req->src,
+				     req->dst,
+				     req->nbytes,
+				     req->info,
 				     reqctx->op);
 		goto complete;
 	}
@@ -1048,23 +1167,24 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
 	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
 	    CRYPTO_ALG_SUB_TYPE_CTR)
 		bytes = adjust_ctr_overflow(reqctx->iv, bytes);
-	reqctx->processed += bytes;
-	wrparam.qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
+	wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
 	wrparam.req = req;
 	wrparam.bytes = bytes;
 	skb = create_cipher_wr(&wrparam);
 	if (IS_ERR(skb)) {
 		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
 		err = PTR_ERR(skb);
-		goto complete;
+		goto unmap;
 	}
 	skb->dev = u_ctx->lldi.ports[0];
-	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
 	chcr_send_wr(skb);
+	reqctx->last_req_len = bytes;
+	reqctx->processed += bytes;
 	return 0;
+unmap:
+	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
 complete:
-	free_new_sg(reqctx->newdstsg);
-	reqctx->newdstsg = NULL;
 	req->base.complete(&req->base, err);
 	return err;
 }
@@ -1077,12 +1197,10 @@ static int process_cipher(struct ablkcipher_request *req,
 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
-	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
-	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
 	struct	cipher_wr_param wrparam;
-	int bytes, nents, err = -EINVAL;
+	int bytes, err = -EINVAL;
 
-	reqctx->newdstsg = NULL;
 	reqctx->processed = 0;
 	if (!req->info)
 		goto error;
@@ -1093,25 +1211,41 @@ static int process_cipher(struct ablkcipher_request *req,
 		       ablkctx->enckey_len, req->nbytes, ivsize);
 		goto error;
 	}
-	wrparam.srcsg = req->src;
-	if (is_newsg(req->dst, &nents)) {
-		reqctx->newdstsg = alloc_new_sg(req->dst, nents);
-		if (IS_ERR(reqctx->newdstsg))
-			return PTR_ERR(reqctx->newdstsg);
-		reqctx->dstsg = reqctx->newdstsg;
+	chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
+	if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
+					    AES_MIN_KEY_SIZE +
+					    sizeof(struct cpl_rx_phys_dsgl) +
+					/*Min dsgl size*/
+					    32))) {
+		/* Can be sent as Imm*/
+		unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
+
+		dnents = sg_nents_xlen(req->dst, req->nbytes,
+				       CHCR_DST_SG_SIZE, 0);
+		dnents += 1; // IV
+		phys_dsgl = get_space_for_phys_dsgl(dnents);
+		kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
+		transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
+		reqctx->imm = (transhdr_len + IV + req->nbytes) <=
+			SGE_MAX_WR_LEN;
+		bytes = IV + req->nbytes;
+
 	} else {
-		reqctx->dstsg = req->dst;
+		reqctx->imm = 0;
 	}
-	bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dstsg, MIN_CIPHER_SG,
-				 SPACE_LEFT(ablkctx->enckey_len),
-				 &wrparam.snent,
-				 &reqctx->dst_nents);
+
+	if (!reqctx->imm) {
+		bytes = chcr_sg_ent_in_wr(req->src, req->dst,
+					  MIN_CIPHER_SG,
+					  SPACE_LEFT(ablkctx->enckey_len),
+					  0, 0);
 	if ((bytes + reqctx->processed) >= req->nbytes)
 		bytes  = req->nbytes - reqctx->processed;
 	else
 		bytes = ROUND_16(bytes);
-	if (unlikely(bytes > req->nbytes))
+	} else {
 		bytes = req->nbytes;
+	}
 	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
 				  CRYPTO_ALG_SUB_TYPE_CTR) {
 		bytes = adjust_ctr_overflow(req->info, bytes);
@@ -1128,9 +1262,11 @@ static int process_cipher(struct ablkcipher_request *req,
 
 	} else {
 
-		memcpy(reqctx->iv, req->info, ivsize);
+		memcpy(reqctx->iv, req->info, IV);
 	}
 	if (unlikely(bytes == 0)) {
+		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+				      req);
 		err = chcr_cipher_fallback(ablkctx->sw_cipher,
 					   req->base.flags,
 					   req->src,
@@ -1140,45 +1276,48 @@ static int process_cipher(struct ablkcipher_request *req,
 					   op_type);
 		goto error;
 	}
-	reqctx->processed = bytes;
-	reqctx->dst = reqctx->dstsg;
 	reqctx->op = op_type;
+	reqctx->srcsg = req->src;
+	reqctx->dstsg = req->dst;
+	reqctx->src_ofst = 0;
+	reqctx->dst_ofst = 0;
 	wrparam.qid = qid;
 	wrparam.req = req;
 	wrparam.bytes = bytes;
 	*skb = create_cipher_wr(&wrparam);
 	if (IS_ERR(*skb)) {
 		err = PTR_ERR(*skb);
-		goto error;
+		goto unmap;
 	}
+	reqctx->processed = bytes;
+	reqctx->last_req_len = bytes;
 
 	return 0;
+unmap:
+	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
 error:
-	free_new_sg(reqctx->newdstsg);
-	reqctx->newdstsg = NULL;
 	return err;
 }
 
 static int chcr_aes_encrypt(struct ablkcipher_request *req)
 {
 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
-	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
 	struct sk_buff *skb = NULL;
 	int err;
-	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
 
 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
-					    ctx->tx_qidx))) {
+					    c_ctx(tfm)->tx_qidx))) {
 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
 			return -EBUSY;
 	}
 
-	err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb,
-			       CHCR_ENCRYPT_OP);
+	err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
+			     &skb, CHCR_ENCRYPT_OP);
 	if (err || !skb)
 		return  err;
 	skb->dev = u_ctx->lldi.ports[0];
-	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
 	chcr_send_wr(skb);
 	return -EINPROGRESS;
 }
@@ -1186,23 +1325,22 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
 static int chcr_aes_decrypt(struct ablkcipher_request *req)
 {
 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
-	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
-	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
 	struct sk_buff *skb = NULL;
 	int err;
 
 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
-					    ctx->tx_qidx))) {
+					    c_ctx(tfm)->tx_qidx))) {
 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
 			return -EBUSY;
 	}
 
-	 err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb,
-			       CHCR_DECRYPT_OP);
+	 err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
+			      &skb, CHCR_DECRYPT_OP);
 	if (err || !skb)
 		return err;
 	skb->dev = u_ctx->lldi.ports[0];
-	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
 	chcr_send_wr(skb);
 	return -EINPROGRESS;
 }
@@ -1350,17 +1488,19 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
 {
 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
-	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
+	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
 	struct sk_buff *skb = NULL;
+	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
 	struct chcr_wr *chcr_req;
-	unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
+	struct ulptx_sgl *ulptx;
+	unsigned int nents = 0, transhdr_len, iopad_alignment = 0;
 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
-	unsigned int kctx_len = 0;
+	unsigned int kctx_len = 0, temp = 0;
 	u8 hash_size_in_response = 0;
 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
 		GFP_ATOMIC;
-	struct adapter *adap = padap(ctx->dev);
+	struct adapter *adap = padap(h_ctx(tfm)->dev);
+	int error = 0;
 
 	iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
 	kctx_len = param->alg_prm.result_size + iopad_alignment;
@@ -1372,15 +1512,22 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
 	else
 		hash_size_in_response = param->alg_prm.result_size;
 	transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
-	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+	req_ctx->imm = (transhdr_len + param->bfr_len + param->sg_len) <=
+		SGE_MAX_WR_LEN;
+	nents = sg_nents_xlen(req->src, param->sg_len, CHCR_SRC_SG_SIZE, 0);
+	nents += param->bfr_len ? 1 : 0;
+	transhdr_len += req_ctx->imm ? (DIV_ROUND_UP((param->bfr_len +
+			param->sg_len), 16) * 16) :
+			(sgl_len(nents) * 8);
+	transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+
+	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
 	if (!skb)
-		return skb;
-
-	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+		return ERR_PTR(-ENOMEM);
 	chcr_req = __skb_put_zero(skb, transhdr_len);
 
 	chcr_req->sec_cpl.op_ivinsrtofst =
-		FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 0);
+		FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
 	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
 
 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
@@ -1409,37 +1556,52 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
 					    ((kctx_len +
 					     sizeof(chcr_req->key_ctx)) >> 4));
 	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
-
-	skb_set_transport_header(skb, transhdr_len);
-	if (param->bfr_len != 0)
-		write_buffer_to_skb(skb, &frags, req_ctx->reqbfr,
-				    param->bfr_len);
-	if (param->sg_len != 0)
-		write_sg_to_skb(skb, &frags, req->src, param->sg_len);
+	ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + kctx_len +
+				     DUMMY_BYTES);
+	if (param->bfr_len != 0) {
+		req_ctx->dma_addr = dma_map_single(&u_ctx->lldi.pdev->dev,
+					  req_ctx->reqbfr, param->bfr_len,
+					  DMA_TO_DEVICE);
+		if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
+				       req_ctx->dma_addr)) {
+			error = -ENOMEM;
+			goto err;
+		}
+		req_ctx->dma_len = param->bfr_len;
+	} else {
+		req_ctx->dma_addr = 0;
+	}
+	chcr_add_hash_src_ent(req, ulptx, param);
+	/* Request upto max wr size */
+	temp = kctx_len + DUMMY_BYTES + (req_ctx->imm ? (param->sg_len
+					+ param->bfr_len) : 0);
 	atomic_inc(&adap->chcr_stats.digest_rqst);
-	create_wreq(ctx, chcr_req, &req->base, skb, kctx_len,
-		    hash_size_in_response, 0, DUMMY_BYTES, 0);
+	create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->imm,
+		    hash_size_in_response, transhdr_len,
+		    temp,  0);
 	req_ctx->skb = skb;
-	skb_get(skb);
 	return skb;
+err:
+	kfree_skb(skb);
+	return  ERR_PTR(error);
 }
 
 static int chcr_ahash_update(struct ahash_request *req)
 {
 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
-	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
 	struct uld_ctx *u_ctx = NULL;
 	struct sk_buff *skb;
 	u8 remainder = 0, bs;
 	unsigned int nbytes = req->nbytes;
 	struct hash_wr_param params;
+	int error;
 
 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
 
-	u_ctx = ULD_CTX(ctx);
+	u_ctx = ULD_CTX(h_ctx(rtfm));
 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
-					    ctx->tx_qidx))) {
+					    h_ctx(rtfm)->tx_qidx))) {
 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
 			return -EBUSY;
 	}
@@ -1453,7 +1615,9 @@ static int chcr_ahash_update(struct ahash_request *req)
 		req_ctx->reqlen += nbytes;
 		return 0;
 	}
-
+	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
+	if (error)
+		return -ENOMEM;
 	params.opad_needed = 0;
 	params.more = 1;
 	params.last = 0;
@@ -1464,25 +1628,27 @@ static int chcr_ahash_update(struct ahash_request *req)
 	req_ctx->result = 0;
 	req_ctx->data_len += params.sg_len + params.bfr_len;
 	skb = create_hash_wr(req, &params);
-	if (!skb)
-		return -ENOMEM;
+	if (IS_ERR(skb)) {
+		error = PTR_ERR(skb);
+		goto unmap;
+	}
 
 	if (remainder) {
-		u8 *temp;
 		/* Swap buffers */
-		temp = req_ctx->reqbfr;
-		req_ctx->reqbfr = req_ctx->skbfr;
-		req_ctx->skbfr = temp;
+		swap(req_ctx->reqbfr, req_ctx->skbfr);
 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
 				   req_ctx->reqbfr, remainder, req->nbytes -
 				   remainder);
 	}
 	req_ctx->reqlen = remainder;
 	skb->dev = u_ctx->lldi.ports[0];
-	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
 	chcr_send_wr(skb);
 
 	return -EINPROGRESS;
+unmap:
+	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+	return error;
 }
 
 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
@@ -1499,13 +1665,12 @@ static int chcr_ahash_final(struct ahash_request *req)
 {
 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
-	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
 	struct hash_wr_param params;
 	struct sk_buff *skb;
 	struct uld_ctx *u_ctx = NULL;
 	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
 
-	u_ctx = ULD_CTX(ctx);
+	u_ctx = ULD_CTX(h_ctx(rtfm));
 	if (is_hmac(crypto_ahash_tfm(rtfm)))
 		params.opad_needed = 1;
 	else
@@ -1528,11 +1693,11 @@ static int chcr_ahash_final(struct ahash_request *req)
 		params.more = 0;
 	}
 	skb = create_hash_wr(req, &params);
-	if (!skb)
-		return -ENOMEM;
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
 
 	skb->dev = u_ctx->lldi.ports[0];
-	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
 	chcr_send_wr(skb);
 	return -EINPROGRESS;
 }
@@ -1541,17 +1706,17 @@ static int chcr_ahash_finup(struct ahash_request *req)
 {
 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
-	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
 	struct uld_ctx *u_ctx = NULL;
 	struct sk_buff *skb;
 	struct hash_wr_param params;
 	u8  bs;
+	int error;
 
 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
-	u_ctx = ULD_CTX(ctx);
+	u_ctx = ULD_CTX(h_ctx(rtfm));
 
 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
-					    ctx->tx_qidx))) {
+					    h_ctx(rtfm)->tx_qidx))) {
 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
 			return -EBUSY;
 	}
@@ -1577,34 +1742,41 @@ static int chcr_ahash_finup(struct ahash_request *req)
 		params.last = 1;
 		params.more = 0;
 	}
-
-	skb = create_hash_wr(req, &params);
-	if (!skb)
+	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
+	if (error)
 		return -ENOMEM;
 
+	skb = create_hash_wr(req, &params);
+	if (IS_ERR(skb)) {
+		error = PTR_ERR(skb);
+		goto unmap;
+	}
 	skb->dev = u_ctx->lldi.ports[0];
-	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
 	chcr_send_wr(skb);
 
 	return -EINPROGRESS;
+unmap:
+	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+	return error;
 }
 
 static int chcr_ahash_digest(struct ahash_request *req)
 {
 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
-	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
 	struct uld_ctx *u_ctx = NULL;
 	struct sk_buff *skb;
 	struct hash_wr_param params;
 	u8  bs;
+	int error;
 
 	rtfm->init(req);
 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
 
-	u_ctx = ULD_CTX(ctx);
+	u_ctx = ULD_CTX(h_ctx(rtfm));
 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
-					    ctx->tx_qidx))) {
+					    h_ctx(rtfm)->tx_qidx))) {
 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
 			return -EBUSY;
 	}
@@ -1613,6 +1785,9 @@ static int chcr_ahash_digest(struct ahash_request *req)
 		params.opad_needed = 1;
 	else
 		params.opad_needed = 0;
+	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
+	if (error)
+		return -ENOMEM;
 
 	params.last = 0;
 	params.more = 0;
@@ -1630,13 +1805,17 @@ static int chcr_ahash_digest(struct ahash_request *req)
 	}
 
 	skb = create_hash_wr(req, &params);
-	if (!skb)
-		return -ENOMEM;
-
+	if (IS_ERR(skb)) {
+		error = PTR_ERR(skb);
+		goto unmap;
+	}
 	skb->dev = u_ctx->lldi.ports[0];
-	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
 	chcr_send_wr(skb);
 	return -EINPROGRESS;
+unmap:
+	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+	return error;
 }
 
 static int chcr_ahash_export(struct ahash_request *areq, void *out)
@@ -1646,6 +1825,8 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out)
 
 	state->reqlen = req_ctx->reqlen;
 	state->data_len = req_ctx->data_len;
+	state->is_sg_map = 0;
+	state->result = 0;
 	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
 	memcpy(state->partial_hash, req_ctx->partial_hash,
 	       CHCR_HASH_MAX_DIGEST_SIZE);
@@ -1661,6 +1842,8 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in)
 	req_ctx->data_len = state->data_len;
 	req_ctx->reqbfr = req_ctx->bfr1;
 	req_ctx->skbfr = req_ctx->bfr2;
+	req_ctx->is_sg_map = 0;
+	req_ctx->result = 0;
 	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
 	memcpy(req_ctx->partial_hash, state->partial_hash,
 	       CHCR_HASH_MAX_DIGEST_SIZE);
@@ -1670,8 +1853,7 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in)
 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
 			     unsigned int keylen)
 {
-	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
-	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
+	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
 	unsigned int i, err = 0, updated_digestsize;
@@ -1724,8 +1906,7 @@ out:
 static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 			       unsigned int key_len)
 {
-	struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
-	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 	unsigned short context_size = 0;
 	int err;
 
@@ -1764,6 +1945,7 @@ static int chcr_sha_init(struct ahash_request *areq)
 	req_ctx->skbfr = req_ctx->bfr2;
 	req_ctx->skb = NULL;
 	req_ctx->result = 0;
+	req_ctx->is_sg_map = 0;
 	copy_hash_init_values(req_ctx->partial_hash, digestsize);
 	return 0;
 }
@@ -1779,8 +1961,7 @@ static int chcr_hmac_init(struct ahash_request *areq)
 {
 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
-	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
-	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
+	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
 	unsigned int digestsize = crypto_ahash_digestsize(rtfm);
 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
 
@@ -1826,86 +2007,48 @@ static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
 	}
 }
 
-static int is_newsg(struct scatterlist *sgl, unsigned int *newents)
-{
-	int nents = 0;
-	int ret = 0;
-
-	while (sgl) {
-		if (sgl->length > CHCR_SG_SIZE)
-			ret = 1;
-		nents += DIV_ROUND_UP(sgl->length, CHCR_SG_SIZE);
-		sgl = sg_next(sgl);
-	}
-	*newents = nents;
-	return ret;
-}
-
-static inline void free_new_sg(struct scatterlist *sgl)
+static int chcr_aead_common_init(struct aead_request *req,
+				 unsigned short op_type)
 {
-	kfree(sgl);
-}
-
-static struct scatterlist *alloc_new_sg(struct scatterlist *sgl,
-				       unsigned int nents)
-{
-	struct scatterlist *newsg, *sg;
-	int i, len, processed = 0;
-	struct page *spage;
-	int offset;
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
+	int error = -EINVAL;
+	unsigned int dst_size;
+	unsigned int authsize = crypto_aead_authsize(tfm);
 
-	newsg = kmalloc_array(nents, sizeof(struct scatterlist), GFP_KERNEL);
-	if (!newsg)
-		return ERR_PTR(-ENOMEM);
-	sg = newsg;
-	sg_init_table(sg, nents);
-	offset = sgl->offset;
-	spage = sg_page(sgl);
-	for (i = 0; i < nents; i++) {
-		len = min_t(u32, sgl->length - processed, CHCR_SG_SIZE);
-		sg_set_page(sg, spage, len, offset);
-		processed += len;
-		offset += len;
-		if (offset >= PAGE_SIZE) {
-			offset = offset % PAGE_SIZE;
-			spage++;
-		}
-		if (processed == sgl->length) {
-			processed = 0;
-			sgl = sg_next(sgl);
-			if (!sgl)
-				break;
-			spage = sg_page(sgl);
-			offset = sgl->offset;
-		}
-		sg = sg_next(sg);
+	dst_size = req->assoclen + req->cryptlen + (op_type ?
+					-authsize : authsize);
+	/* validate key size */
+	if (aeadctx->enckey_len == 0)
+		goto err;
+	if (op_type && req->cryptlen < authsize)
+		goto err;
+	error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
+				  op_type);
+	if (error) {
+		error = -ENOMEM;
+		goto err;
 	}
-	return newsg;
+	reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
+					  CHCR_SRC_SG_SIZE, 0);
+	reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
+					  CHCR_SRC_SG_SIZE, req->assoclen);
+	return 0;
+err:
+	return error;
 }
 
-static int chcr_copy_assoc(struct aead_request *req,
-				struct chcr_aead_ctx *ctx)
-{
-	SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
-
-	skcipher_request_set_tfm(skreq, ctx->null);
-	skcipher_request_set_callback(skreq, aead_request_flags(req),
-			NULL, NULL);
-	skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
-			NULL);
-
-	return crypto_skcipher_encrypt(skreq);
-}
-static int chcr_aead_need_fallback(struct aead_request *req, int src_nent,
+static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
 				   int aadmax, int wrlen,
 				   unsigned short op_type)
 {
 	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
 
 	if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
+	    dst_nents > MAX_DSGL_ENT ||
 	    (req->assoclen > aadmax) ||
-	    (src_nent > MAX_SKB_FRAGS) ||
-	    (wrlen > MAX_WR_SIZE))
+	    (wrlen > SGE_MAX_WR_LEN))
 		return 1;
 	return 0;
 }
@@ -1913,8 +2056,7 @@ static int chcr_aead_need_fallback(struct aead_request *req, int src_nent,
 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct chcr_context *ctx = crypto_aead_ctx(tfm);
-	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
 	struct aead_request *subreq = aead_request_ctx(req);
 
 	aead_request_set_tfm(subreq, aeadctx->sw_cipher);
@@ -1933,96 +2075,75 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 					 unsigned short op_type)
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct chcr_context *ctx = crypto_aead_ctx(tfm);
-	struct uld_ctx *u_ctx = ULD_CTX(ctx);
-	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
 	struct sk_buff *skb = NULL;
 	struct chcr_wr *chcr_req;
 	struct cpl_rx_phys_dsgl *phys_cpl;
-	struct phys_sge_parm sg_param;
-	struct scatterlist *src;
-	unsigned int frags = 0, transhdr_len;
-	unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
-	unsigned int   kctx_len = 0, nents;
-	unsigned short stop_offset = 0;
+	struct ulptx_sgl *ulptx;
+	unsigned int transhdr_len;
+	unsigned int dst_size = 0, temp;
+	unsigned int   kctx_len = 0, dnents;
 	unsigned int  assoclen = req->assoclen;
 	unsigned int  authsize = crypto_aead_authsize(tfm);
-	int error = -EINVAL, src_nent;
+	int error = -EINVAL;
 	int null = 0;
 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
 		GFP_ATOMIC;
-	struct adapter *adap = padap(ctx->dev);
+	struct adapter *adap = padap(a_ctx(tfm)->dev);
 
-	reqctx->newdstsg = NULL;
-	dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize :
-						   authsize);
-	if (aeadctx->enckey_len == 0 || (req->cryptlen <= 0))
-		goto err;
-
-	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
-		goto err;
-	src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
-	if (src_nent < 0)
-		goto err;
-	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
+	if (req->cryptlen == 0)
+		return NULL;
 
-	if (req->src != req->dst) {
-		error = chcr_copy_assoc(req, aeadctx);
-		if (error)
-			return ERR_PTR(error);
-	}
-	if (dst_size && is_newsg(req->dst, &nents)) {
-		reqctx->newdstsg = alloc_new_sg(req->dst, nents);
-		if (IS_ERR(reqctx->newdstsg))
-			return ERR_CAST(reqctx->newdstsg);
-		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
-					       reqctx->newdstsg, req->assoclen);
-	} else {
-		if (req->src == req->dst)
-			reqctx->dst = src;
-		else
-			reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
-						       req->dst, req->assoclen);
-	}
+	reqctx->b0_dma = 0;
 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
 		null = 1;
 		assoclen = 0;
 	}
-	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
-					     (op_type ? -authsize : authsize));
-	if (reqctx->dst_nents < 0) {
-		pr_err("AUTHENC:Invalid Destination sg entries\n");
-		error = -EINVAL;
-		goto err;
+	dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
+						    authsize);
+	error = chcr_aead_common_init(req, op_type);
+	if (error)
+		return ERR_PTR(error);
+	if (dst_size) {
+		dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+		dnents += sg_nents_xlen(req->dst, req->cryptlen +
+			(op_type ? -authsize : authsize), CHCR_DST_SG_SIZE,
+			req->assoclen);
+		dnents += MIN_AUTH_SG; // For IV
+	} else {
+		dnents = 0;
 	}
-	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+
+	dst_size = get_space_for_phys_dsgl(dnents);
 	kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
 		- sizeof(chcr_req->key_ctx);
 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
-	if (chcr_aead_need_fallback(req, src_nent + MIN_AUTH_SG,
-			T6_MAX_AAD_SIZE,
-			transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8),
-				op_type)) {
+	reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
+			SGE_MAX_WR_LEN;
+	temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen), 16)
+			* 16) : (sgl_len(reqctx->src_nents + reqctx->aad_nents
+			+ MIN_GCM_SG) * 8);
+	transhdr_len += temp;
+	transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+
+	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
+				    transhdr_len, op_type)) {
 		atomic_inc(&adap->chcr_stats.fallback);
-		free_new_sg(reqctx->newdstsg);
-		reqctx->newdstsg = NULL;
+		chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
+				    op_type);
 		return ERR_PTR(chcr_aead_fallback(req, op_type));
 	}
-	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
 	if (!skb) {
 		error = -ENOMEM;
 		goto err;
 	}
 
-	/* LLD is going to write the sge hdr. */
-	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
-
-	/* Write WR */
 	chcr_req = __skb_put_zero(skb, transhdr_len);
 
-	stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+	temp  = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
 
 	/*
 	 * Input order	is AAD,IV and Payload. where IV should be included as
@@ -2030,24 +2151,24 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 	 * to the hardware spec
 	 */
 	chcr_req->sec_cpl.op_ivinsrtofst =
-		FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2,
-				       (ivsize ? (assoclen + 1) : 0));
-	chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen);
+		FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
+				       assoclen + 1);
+	chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
 					assoclen ? 1 : 0, assoclen,
-					assoclen + ivsize + 1,
-					(stop_offset & 0x1F0) >> 4);
+					assoclen + IV + 1,
+					(temp & 0x1F0) >> 4);
 	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
-					stop_offset & 0xF,
-					null ? 0 : assoclen + ivsize + 1,
-					stop_offset, stop_offset);
+					temp & 0xF,
+					null ? 0 : assoclen + IV + 1,
+					temp, temp);
 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
 					(op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
 					CHCR_SCMD_CIPHER_MODE_AES_CBC,
 					actx->auth_mode, aeadctx->hmac_ctrl,
-					ivsize >> 1);
+					IV >> 1);
 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
-					 0, 1, dst_size);
+					 0, 0, dst_size);
 
 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
 	if (op_type == CHCR_ENCRYPT_OP)
@@ -2060,41 +2181,312 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
 					4), actx->h_iopad, kctx_len -
 				(DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
-
+	memcpy(reqctx->iv, req->iv, IV);
 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
-	sg_param.nents = reqctx->dst_nents;
-	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
-	sg_param.qid = qid;
-	error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
-					reqctx->dst, &sg_param);
-	if (error)
-		goto dstmap_fail;
-
-	skb_set_transport_header(skb, transhdr_len);
-
-	if (assoclen) {
-		/* AAD buffer in */
-		write_sg_to_skb(skb, &frags, req->src, assoclen);
-
-	}
-	write_buffer_to_skb(skb, &frags, req->iv, ivsize);
-	write_sg_to_skb(skb, &frags, src, req->cryptlen);
+	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
+	chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
+	chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
 	atomic_inc(&adap->chcr_stats.cipher_rqst);
-	create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1,
-		   sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
+	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
+		kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
+	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
+		   transhdr_len, temp, 0);
 	reqctx->skb = skb;
-	skb_get(skb);
+	reqctx->op = op_type;
 
 	return skb;
-dstmap_fail:
-	/* ivmap_fail: */
-	kfree_skb(skb);
 err:
-	free_new_sg(reqctx->newdstsg);
-	reqctx->newdstsg = NULL;
+	chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
+			    op_type);
+
 	return ERR_PTR(error);
 }
 
+static int chcr_aead_dma_map(struct device *dev,
+			     struct aead_request *req,
+			     unsigned short op_type)
+{
+	int error;
+	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	unsigned int authsize = crypto_aead_authsize(tfm);
+	int dst_size;
+
+	dst_size = req->assoclen + req->cryptlen + (op_type ?
+				-authsize : authsize);
+	if (!req->cryptlen || !dst_size)
+		return 0;
+	reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
+					DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, reqctx->iv_dma))
+		return -ENOMEM;
+
+	if (req->src == req->dst) {
+		error = dma_map_sg(dev, req->src, sg_nents(req->src),
+				   DMA_BIDIRECTIONAL);
+		if (!error)
+			goto err;
+	} else {
+		error = dma_map_sg(dev, req->src, sg_nents(req->src),
+				   DMA_TO_DEVICE);
+		if (!error)
+			goto err;
+		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
+				   DMA_FROM_DEVICE);
+		if (!error) {
+			dma_unmap_sg(dev, req->src, sg_nents(req->src),
+				   DMA_TO_DEVICE);
+			goto err;
+		}
+	}
+
+	return 0;
+err:
+	dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
+	return -ENOMEM;
+}
+
+static void chcr_aead_dma_unmap(struct device *dev,
+			     struct aead_request *req,
+			     unsigned short op_type)
+{
+	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	unsigned int authsize = crypto_aead_authsize(tfm);
+	int dst_size;
+
+	dst_size = req->assoclen + req->cryptlen + (op_type ?
+					-authsize : authsize);
+	if (!req->cryptlen || !dst_size)
+		return;
+
+	dma_unmap_single(dev, reqctx->iv_dma, IV,
+					DMA_BIDIRECTIONAL);
+	if (req->src == req->dst) {
+		dma_unmap_sg(dev, req->src, sg_nents(req->src),
+				   DMA_BIDIRECTIONAL);
+	} else {
+		dma_unmap_sg(dev, req->src, sg_nents(req->src),
+				   DMA_TO_DEVICE);
+		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
+				   DMA_FROM_DEVICE);
+	}
+}
+
+static inline void chcr_add_aead_src_ent(struct aead_request *req,
+			       struct ulptx_sgl *ulptx,
+			       unsigned int assoclen,
+			       unsigned short op_type)
+{
+	struct ulptx_walk ulp_walk;
+	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
+
+	if (reqctx->imm) {
+		u8 *buf = (u8 *)ulptx;
+
+		if (reqctx->b0_dma) {
+			memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
+			buf += reqctx->b0_len;
+		}
+		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+				   buf, assoclen, 0);
+		buf += assoclen;
+		memcpy(buf, reqctx->iv, IV);
+		buf += IV;
+		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+				   buf, req->cryptlen, req->assoclen);
+	} else {
+		ulptx_walk_init(&ulp_walk, ulptx);
+		if (reqctx->b0_dma)
+			ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
+					    &reqctx->b0_dma);
+		ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
+		ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
+		ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
+				  req->assoclen);
+		ulptx_walk_end(&ulp_walk);
+	}
+}
+
+static inline void chcr_add_aead_dst_ent(struct aead_request *req,
+			       struct cpl_rx_phys_dsgl *phys_cpl,
+			       unsigned int assoclen,
+			       unsigned short op_type,
+			       unsigned short qid)
+{
+	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct dsgl_walk dsgl_walk;
+	unsigned int authsize = crypto_aead_authsize(tfm);
+	u32 temp;
+
+	dsgl_walk_init(&dsgl_walk, phys_cpl);
+	if (reqctx->b0_dma)
+		dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
+	dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
+	dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
+	temp = req->cryptlen + (op_type ? -authsize : authsize);
+	dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
+	dsgl_walk_end(&dsgl_walk, qid);
+}
+
+static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+					   struct ulptx_sgl *ulptx,
+					   struct  cipher_wr_param *wrparam)
+{
+	struct ulptx_walk ulp_walk;
+	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+
+	if (reqctx->imm) {
+		u8 *buf = (u8 *)ulptx;
+
+		memcpy(buf, reqctx->iv, IV);
+		buf += IV;
+		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+				   buf, wrparam->bytes, reqctx->processed);
+	} else {
+		ulptx_walk_init(&ulp_walk, ulptx);
+		ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
+		ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
+				  reqctx->src_ofst);
+		reqctx->srcsg = ulp_walk.last_sg;
+		reqctx->src_ofst = ulp_walk.last_sg_len;
+		ulptx_walk_end(&ulp_walk);
+	}
+}
+
+static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+					   struct cpl_rx_phys_dsgl *phys_cpl,
+					   struct  cipher_wr_param *wrparam,
+					   unsigned short qid)
+{
+	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+	struct dsgl_walk dsgl_walk;
+
+	dsgl_walk_init(&dsgl_walk, phys_cpl);
+	dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
+	dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
+			 reqctx->dst_ofst);
+	reqctx->dstsg = dsgl_walk.last_sg;
+	reqctx->dst_ofst = dsgl_walk.last_sg_len;
+
+	dsgl_walk_end(&dsgl_walk, qid);
+}
+
+static inline void chcr_add_hash_src_ent(struct ahash_request *req,
+					   struct ulptx_sgl *ulptx,
+					   struct hash_wr_param *param)
+{
+	struct ulptx_walk ulp_walk;
+	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
+
+	if (reqctx->imm) {
+		u8 *buf = (u8 *)ulptx;
+
+		if (param->bfr_len) {
+			memcpy(buf, reqctx->reqbfr, param->bfr_len);
+			buf += param->bfr_len;
+		}
+		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+				   buf, param->sg_len, 0);
+	} else {
+		ulptx_walk_init(&ulp_walk, ulptx);
+		if (param->bfr_len)
+			ulptx_walk_add_page(&ulp_walk, param->bfr_len,
+					    &reqctx->dma_addr);
+		ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len,
+					  0);
+//	       reqctx->srcsg = ulp_walk.last_sg;
+//	       reqctx->src_ofst = ulp_walk.last_sg_len;
+			ulptx_walk_end(&ulp_walk);
+	}
+}
+
+
+static inline int chcr_hash_dma_map(struct device *dev,
+			     struct ahash_request *req)
+{
+	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+	int error = 0;
+
+	if (!req->nbytes)
+		return 0;
+	error = dma_map_sg(dev, req->src, sg_nents(req->src),
+			   DMA_TO_DEVICE);
+	if (!error)
+		return error;
+	req_ctx->is_sg_map = 1;
+	return 0;
+}
+
+static inline void chcr_hash_dma_unmap(struct device *dev,
+			     struct ahash_request *req)
+{
+	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+
+	if (!req->nbytes)
+		return;
+
+	dma_unmap_sg(dev, req->src, sg_nents(req->src),
+			   DMA_TO_DEVICE);
+	req_ctx->is_sg_map = 0;
+
+}
+
+
+static int chcr_cipher_dma_map(struct device *dev,
+			     struct ablkcipher_request *req)
+{
+	int error;
+	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+
+	reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
+					DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, reqctx->iv_dma))
+		return -ENOMEM;
+
+	if (req->src == req->dst) {
+		error = dma_map_sg(dev, req->src, sg_nents(req->src),
+				   DMA_BIDIRECTIONAL);
+		if (!error)
+			goto err;
+	} else {
+		error = dma_map_sg(dev, req->src, sg_nents(req->src),
+				   DMA_TO_DEVICE);
+		if (!error)
+			goto err;
+		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
+				   DMA_FROM_DEVICE);
+		if (!error) {
+			dma_unmap_sg(dev, req->src, sg_nents(req->src),
+				   DMA_TO_DEVICE);
+			goto err;
+		}
+	}
+
+	return 0;
+err:
+	dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
+	return -ENOMEM;
+}
+static void chcr_cipher_dma_unmap(struct device *dev,
+				  struct ablkcipher_request *req)
+{
+	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+
+	dma_unmap_single(dev, reqctx->iv_dma, IV,
+					DMA_BIDIRECTIONAL);
+	if (req->src == req->dst) {
+		dma_unmap_sg(dev, req->src, sg_nents(req->src),
+				   DMA_BIDIRECTIONAL);
+	} else {
+		dma_unmap_sg(dev, req->src, sg_nents(req->src),
+				   DMA_TO_DEVICE);
+		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
+				   DMA_FROM_DEVICE);
+	}
+}
+
 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
 {
 	__be32 data;
@@ -2179,15 +2571,13 @@ static int ccm_format_packet(struct aead_request *req,
 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
 				  unsigned int dst_size,
 				  struct aead_request *req,
-				  unsigned short op_type,
-					  struct chcr_context *chcrctx)
+				  unsigned short op_type)
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
-	unsigned int ivsize = AES_BLOCK_SIZE;
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
 	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
 	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
-	unsigned int c_id = chcrctx->dev->rx_channel_id;
+	unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
 	unsigned int ccm_xtra;
 	unsigned char tag_offset = 0, auth_offset = 0;
 	unsigned int assoclen;
@@ -2200,7 +2590,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
 		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
 
 	auth_offset = req->cryptlen ?
-		(assoclen + ivsize + 1 + ccm_xtra) : 0;
+		(assoclen + IV + 1 + ccm_xtra) : 0;
 	if (op_type == CHCR_DECRYPT_OP) {
 		if (crypto_aead_authsize(tfm) != req->cryptlen)
 			tag_offset = crypto_aead_authsize(tfm);
@@ -2210,14 +2600,13 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
 
 
 	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
-					 2, (ivsize ?  (assoclen + 1) :  0) +
-					 ccm_xtra);
+					 2, assoclen + 1 + ccm_xtra);
 	sec_cpl->pldlen =
-		htonl(assoclen + ivsize + req->cryptlen + ccm_xtra);
+		htonl(assoclen + IV + req->cryptlen + ccm_xtra);
 	/* For CCM there wil be b0 always. So AAD start will be 1 always */
 	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
 					1, assoclen + ccm_xtra, assoclen
-					+ ivsize + 1 + ccm_xtra, 0);
+					+ IV + 1 + ccm_xtra, 0);
 
 	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
 					auth_offset, tag_offset,
@@ -2226,10 +2615,10 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
 	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
 					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
 					cipher_mode, mac_mode,
-					aeadctx->hmac_ctrl, ivsize >> 1);
+					aeadctx->hmac_ctrl, IV >> 1);
 
 	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
-					1, dst_size);
+					0, dst_size);
 }
 
 int aead_ccm_validate_input(unsigned short op_type,
@@ -2249,131 +2638,83 @@ int aead_ccm_validate_input(unsigned short op_type,
 			return -EINVAL;
 		}
 	}
-	if (aeadctx->enckey_len == 0) {
-		pr_err("CCM: Encryption key not set\n");
-		return -EINVAL;
-	}
 	return 0;
 }
 
-unsigned int fill_aead_req_fields(struct sk_buff *skb,
-				  struct aead_request *req,
-				  struct scatterlist *src,
-				  unsigned int ivsize,
-				  struct chcr_aead_ctx *aeadctx)
-{
-	unsigned int frags = 0;
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
-	/* b0 and aad length(if available) */
-
-	write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE +
-				(req->assoclen ?  CCM_AAD_FIELD_SIZE : 0));
-	if (req->assoclen) {
-		if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
-			write_sg_to_skb(skb, &frags, req->src,
-					req->assoclen - 8);
-		else
-			write_sg_to_skb(skb, &frags, req->src, req->assoclen);
-	}
-	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
-	if (req->cryptlen)
-		write_sg_to_skb(skb, &frags, src, req->cryptlen);
-
-	return frags;
-}
-
 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
 					  unsigned short qid,
 					  int size,
 					  unsigned short op_type)
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct chcr_context *ctx = crypto_aead_ctx(tfm);
-	struct uld_ctx *u_ctx = ULD_CTX(ctx);
-	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
 	struct sk_buff *skb = NULL;
 	struct chcr_wr *chcr_req;
 	struct cpl_rx_phys_dsgl *phys_cpl;
-	struct phys_sge_parm sg_param;
-	struct scatterlist *src;
-	unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
-	unsigned int dst_size = 0, kctx_len, nents;
-	unsigned int sub_type;
+	struct ulptx_sgl *ulptx;
+	unsigned int transhdr_len;
+	unsigned int dst_size = 0, kctx_len, dnents, temp;
+	unsigned int sub_type, assoclen = req->assoclen;
 	unsigned int authsize = crypto_aead_authsize(tfm);
-	int error = -EINVAL, src_nent;
+	int error = -EINVAL;
 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
 		GFP_ATOMIC;
-	struct adapter *adap = padap(ctx->dev);
+	struct adapter *adap = padap(a_ctx(tfm)->dev);
 
-	dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize :
+	reqctx->b0_dma = 0;
+	sub_type = get_aead_subtype(tfm);
+	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+		assoclen -= 8;
+	dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
 						   authsize);
-	reqctx->newdstsg = NULL;
-	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
-		goto err;
-	src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
-	if (src_nent < 0)
-		goto err;
+	error = chcr_aead_common_init(req, op_type);
+	if (error)
+		return ERR_PTR(error);
 
-	sub_type = get_aead_subtype(tfm);
-	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
-	if (req->src != req->dst) {
-		error = chcr_copy_assoc(req, aeadctx);
-		if (error) {
-			pr_err("AAD copy to destination buffer fails\n");
-			return ERR_PTR(error);
-		}
-	}
-	if (dst_size && is_newsg(req->dst, &nents)) {
-		reqctx->newdstsg = alloc_new_sg(req->dst, nents);
-		if (IS_ERR(reqctx->newdstsg))
-			return ERR_CAST(reqctx->newdstsg);
-		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
-					       reqctx->newdstsg, req->assoclen);
-	} else {
-		if (req->src == req->dst)
-			reqctx->dst = src;
-		else
-			reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
-						       req->dst, req->assoclen);
-	}
-	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
-					     (op_type ? -authsize : authsize));
-	if (reqctx->dst_nents < 0) {
-		pr_err("CCM:Invalid Destination sg entries\n");
-		error = -EINVAL;
-		goto err;
-	}
+
+	reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
 	error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
 	if (error)
 		goto err;
-
-	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+	if (dst_size) {
+		dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+		dnents += sg_nents_xlen(req->dst, req->cryptlen
+				+ (op_type ? -authsize : authsize),
+				CHCR_DST_SG_SIZE, req->assoclen);
+		dnents += MIN_CCM_SG; // For IV and B0
+	} else {
+		dnents = 0;
+	}
+	dst_size = get_space_for_phys_dsgl(dnents);
 	kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
-	if (chcr_aead_need_fallback(req, src_nent + MIN_CCM_SG,
-			    T6_MAX_AAD_SIZE - 18,
-			    transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8),
-			    op_type)) {
+	reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
+		       reqctx->b0_len) <= SGE_MAX_WR_LEN;
+	temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen +
+				reqctx->b0_len), 16) * 16) :
+		(sgl_len(reqctx->src_nents + reqctx->aad_nents +
+				    MIN_CCM_SG) *  8);
+	transhdr_len += temp;
+	transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+
+	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
+				    reqctx->b0_len, transhdr_len, op_type)) {
 		atomic_inc(&adap->chcr_stats.fallback);
-		free_new_sg(reqctx->newdstsg);
-		reqctx->newdstsg = NULL;
+		chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
+				    op_type);
 		return ERR_PTR(chcr_aead_fallback(req, op_type));
 	}
-
-	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),  flags);
+	skb = alloc_skb(SGE_MAX_WR_LEN,  flags);
 
 	if (!skb) {
 		error = -ENOMEM;
 		goto err;
 	}
 
-	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
-
-	chcr_req = __skb_put_zero(skb, transhdr_len);
+	chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
 
-	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx);
+	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type);
 
 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
@@ -2381,31 +2722,37 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
 					16), aeadctx->key, aeadctx->enckey_len);
 
 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
 	error = ccm_format_packet(req, aeadctx, sub_type, op_type);
 	if (error)
 		goto dstmap_fail;
 
-	sg_param.nents = reqctx->dst_nents;
-	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
-	sg_param.qid = qid;
-	error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
-				 reqctx->dst, &sg_param);
-	if (error)
+	reqctx->b0_dma = dma_map_single(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
+					&reqctx->scratch_pad, reqctx->b0_len,
+					DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
+			      reqctx->b0_dma)) {
+		error = -ENOMEM;
 		goto dstmap_fail;
+	}
+
+	chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
+	chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
 
-	skb_set_transport_header(skb, transhdr_len);
-	frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
 	atomic_inc(&adap->chcr_stats.aead_rqst);
-	create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, 0, 1,
-		    sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
+	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
+		kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
+		reqctx->b0_len) : 0);
+	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
+		    transhdr_len, temp, 0);
 	reqctx->skb = skb;
-	skb_get(skb);
+	reqctx->op = op_type;
+
 	return skb;
 dstmap_fail:
 	kfree_skb(skb);
 err:
-	free_new_sg(reqctx->newdstsg);
-	reqctx->newdstsg = NULL;
+	chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
 	return ERR_PTR(error);
 }
 
@@ -2415,115 +2762,84 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 				     unsigned short op_type)
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct chcr_context *ctx = crypto_aead_ctx(tfm);
-	struct uld_ctx *u_ctx = ULD_CTX(ctx);
-	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
 	struct sk_buff *skb = NULL;
 	struct chcr_wr *chcr_req;
 	struct cpl_rx_phys_dsgl *phys_cpl;
-	struct phys_sge_parm sg_param;
-	struct scatterlist *src;
-	unsigned int frags = 0, transhdr_len;
-	unsigned int ivsize = AES_BLOCK_SIZE;
-	unsigned int dst_size = 0, kctx_len, nents, assoclen = req->assoclen;
-	unsigned char tag_offset = 0;
+	struct ulptx_sgl *ulptx;
+	unsigned int transhdr_len, dnents = 0;
+	unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
 	unsigned int authsize = crypto_aead_authsize(tfm);
-	int error = -EINVAL, src_nent;
+	int error = -EINVAL;
 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
 		GFP_ATOMIC;
-	struct adapter *adap = padap(ctx->dev);
-
-	reqctx->newdstsg = NULL;
-	dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
-						    authsize);
-	/* validate key size */
-	if (aeadctx->enckey_len == 0)
-		goto err;
+	struct adapter *adap = padap(a_ctx(tfm)->dev);
 
-	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
-		goto err;
-	src_nent = sg_nents_for_len(req->src, assoclen + req->cryptlen);
-	if (src_nent < 0)
-		goto err;
+	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
+		assoclen = req->assoclen - 8;
 
-	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, assoclen);
-	if (req->src != req->dst) {
-		error = chcr_copy_assoc(req, aeadctx);
+	reqctx->b0_dma = 0;
+	dst_size = assoclen + req->cryptlen + (op_type ? -authsize :  authsize);
+	error = chcr_aead_common_init(req, op_type);
 		if (error)
 			return	ERR_PTR(error);
-	}
-
-	if (dst_size && is_newsg(req->dst, &nents)) {
-		reqctx->newdstsg = alloc_new_sg(req->dst, nents);
-		if (IS_ERR(reqctx->newdstsg))
-			return ERR_CAST(reqctx->newdstsg);
-		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
-					       reqctx->newdstsg, assoclen);
+	if (dst_size) {
+		dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+		dnents += sg_nents_xlen(req->dst,
+			req->cryptlen + (op_type ? -authsize : authsize),
+				CHCR_DST_SG_SIZE, req->assoclen);
+		dnents += MIN_GCM_SG; // For IV
 	} else {
-		if (req->src == req->dst)
-			reqctx->dst = src;
-		else
-			reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
-						       req->dst, assoclen);
-	}
-
-	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
-					     (op_type ? -authsize : authsize));
-	if (reqctx->dst_nents < 0) {
-		pr_err("GCM:Invalid Destination sg entries\n");
-		error = -EINVAL;
-		goto err;
+		dnents = 0;
 	}
-
-
-	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+	dst_size = get_space_for_phys_dsgl(dnents);
 	kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
 		AEAD_H_SIZE;
 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
-	if (chcr_aead_need_fallback(req, src_nent + MIN_GCM_SG,
-			    T6_MAX_AAD_SIZE,
-			    transhdr_len + (sgl_len(src_nent + MIN_GCM_SG) * 8),
-			    op_type)) {
+	reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
+			SGE_MAX_WR_LEN;
+	temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV +
+	req->cryptlen), 16) * 16) : (sgl_len(reqctx->src_nents +
+				reqctx->aad_nents + MIN_GCM_SG) * 8);
+	transhdr_len += temp;
+	transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
+			    transhdr_len, op_type)) {
 		atomic_inc(&adap->chcr_stats.fallback);
-		free_new_sg(reqctx->newdstsg);
-		reqctx->newdstsg = NULL;
+		chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
+				    op_type);
 		return ERR_PTR(chcr_aead_fallback(req, op_type));
 	}
-	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
 	if (!skb) {
 		error = -ENOMEM;
 		goto err;
 	}
 
-	/* NIC driver is going to write the sge hdr. */
-	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
-
 	chcr_req = __skb_put_zero(skb, transhdr_len);
 
-	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
-		assoclen = req->assoclen - 8;
-
-	tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+	//Offset of tag from end
+	temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
 	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
-					ctx->dev->rx_channel_id, 2, (ivsize ?
-					(assoclen + 1) : 0));
+					a_ctx(tfm)->dev->rx_channel_id, 2,
+					(assoclen + 1));
 	chcr_req->sec_cpl.pldlen =
-		htonl(assoclen + ivsize + req->cryptlen);
+		htonl(assoclen + IV + req->cryptlen);
 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
 					assoclen ? 1 : 0, assoclen,
-					assoclen + ivsize + 1, 0);
+					assoclen + IV + 1, 0);
 		chcr_req->sec_cpl.cipherstop_lo_authinsert =
-			FILL_SEC_CPL_AUTHINSERT(0, assoclen + ivsize + 1,
-						tag_offset, tag_offset);
+			FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
+						temp, temp);
 		chcr_req->sec_cpl.seqno_numivs =
 			FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
 					CHCR_ENCRYPT_OP) ? 1 : 0,
 					CHCR_SCMD_CIPHER_MODE_AES_GCM,
 					CHCR_SCMD_AUTH_MODE_GHASH,
-					aeadctx->hmac_ctrl, ivsize >> 1);
+					aeadctx->hmac_ctrl, IV >> 1);
 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
-					0, 1, dst_size);
+					0, 0, dst_size);
 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
 	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
@@ -2534,39 +2850,28 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 	if (get_aead_subtype(tfm) ==
 	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
 		memcpy(reqctx->iv, aeadctx->salt, 4);
-		memcpy(reqctx->iv + 4, req->iv, 8);
+		memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
 	} else {
-		memcpy(reqctx->iv, req->iv, 12);
+		memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
 	}
 	*((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
 
 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
-	sg_param.nents = reqctx->dst_nents;
-	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
-	sg_param.qid = qid;
-	error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
-					  reqctx->dst, &sg_param);
-	if (error)
-		goto dstmap_fail;
+	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
 
-	skb_set_transport_header(skb, transhdr_len);
-	write_sg_to_skb(skb, &frags, req->src, assoclen);
-	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
-	write_sg_to_skb(skb, &frags, src, req->cryptlen);
+	chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
+	chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
 	atomic_inc(&adap->chcr_stats.aead_rqst);
-	create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1,
-			sizeof(struct cpl_rx_phys_dsgl) + dst_size,
-			reqctx->verify);
+	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
+		kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
+	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
+		    transhdr_len, temp, reqctx->verify);
 	reqctx->skb = skb;
-	skb_get(skb);
+	reqctx->op = op_type;
 	return skb;
 
-dstmap_fail:
-	/* ivmap_fail: */
-	kfree_skb(skb);
 err:
-	free_new_sg(reqctx->newdstsg);
-	reqctx->newdstsg = NULL;
+	chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
 	return ERR_PTR(error);
 }
 
@@ -2574,8 +2879,7 @@ err:
 
 static int chcr_aead_cra_init(struct crypto_aead *tfm)
 {
-	struct chcr_context *ctx = crypto_aead_ctx(tfm);
-	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
 	struct aead_alg *alg = crypto_aead_alg(tfm);
 
 	aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
@@ -2586,25 +2890,20 @@ static int chcr_aead_cra_init(struct crypto_aead *tfm)
 	crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
 				 sizeof(struct aead_request) +
 				 crypto_aead_reqsize(aeadctx->sw_cipher)));
-	aeadctx->null = crypto_get_default_null_skcipher();
-	if (IS_ERR(aeadctx->null))
-		return PTR_ERR(aeadctx->null);
-	return chcr_device_init(ctx);
+	return chcr_device_init(a_ctx(tfm));
 }
 
 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
 {
-	struct chcr_context *ctx = crypto_aead_ctx(tfm);
-	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
 
-	crypto_put_default_null_skcipher();
 	crypto_free_aead(aeadctx->sw_cipher);
 }
 
 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
 					unsigned int authsize)
 {
-	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
 
 	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
 	aeadctx->mayverify = VERIFY_HW;
@@ -2613,7 +2912,7 @@ static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
 				    unsigned int authsize)
 {
-	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
 	u32 maxauth = crypto_aead_maxauthsize(tfm);
 
 	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
@@ -2651,7 +2950,7 @@ static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
 
 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
 {
-	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
 
 	switch (authsize) {
 	case ICV_4:
@@ -2691,7 +2990,7 @@ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
 					  unsigned int authsize)
 {
-	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
 
 	switch (authsize) {
 	case ICV_8:
@@ -2717,7 +3016,7 @@ static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
 				unsigned int authsize)
 {
-	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
 
 	switch (authsize) {
 	case ICV_4:
@@ -2760,8 +3059,7 @@ static int chcr_ccm_common_setkey(struct crypto_aead *aead,
 				const u8 *key,
 				unsigned int keylen)
 {
-	struct chcr_context *ctx = crypto_aead_ctx(aead);
-	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
 	unsigned char ck_size, mk_size;
 	int key_ctx_size = 0;
 
@@ -2794,8 +3092,7 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
 				const u8 *key,
 				unsigned int keylen)
 {
-	struct chcr_context *ctx = crypto_aead_ctx(aead);
-	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
 	int error;
 
 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
@@ -2813,8 +3110,7 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
 				    unsigned int keylen)
 {
-	struct chcr_context *ctx = crypto_aead_ctx(aead);
-	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
 	int error;
 
 	if (keylen < 3) {
@@ -2840,8 +3136,7 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
 			   unsigned int keylen)
 {
-	struct chcr_context *ctx = crypto_aead_ctx(aead);
-	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
 	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
 	struct crypto_cipher *cipher;
 	unsigned int ck_size;
@@ -2913,8 +3208,7 @@ out:
 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
 				   unsigned int keylen)
 {
-	struct chcr_context *ctx = crypto_aead_ctx(authenc);
-	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
 	/* it contains auth and cipher key both*/
 	struct crypto_authenc_keys keys;
@@ -3034,8 +3328,7 @@ out:
 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
 					const u8 *key, unsigned int keylen)
 {
-	struct chcr_context *ctx = crypto_aead_ctx(authenc);
-	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
 	struct crypto_authenc_keys keys;
 	int err;
@@ -3107,7 +3400,7 @@ static int chcr_aead_encrypt(struct aead_request *req)
 static int chcr_aead_decrypt(struct aead_request *req)
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
 	int size;
 
@@ -3140,30 +3433,29 @@ static int chcr_aead_op(struct aead_request *req,
 			  create_wr_t create_wr_fn)
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct chcr_context *ctx = crypto_aead_ctx(tfm);
 	struct uld_ctx *u_ctx;
 	struct sk_buff *skb;
 
-	if (!ctx->dev) {
+	if (!a_ctx(tfm)->dev) {
 		pr_err("chcr : %s : No crypto device.\n", __func__);
 		return -ENXIO;
 	}
-	u_ctx = ULD_CTX(ctx);
+	u_ctx = ULD_CTX(a_ctx(tfm));
 	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
-				   ctx->tx_qidx)) {
+				   a_ctx(tfm)->tx_qidx)) {
 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
 			return -EBUSY;
 	}
 
 	/* Form a WR from req */
-	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], size,
+	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size,
 			   op_type);
 
 	if (IS_ERR(skb) || !skb)
 		return PTR_ERR(skb);
 
 	skb->dev = u_ctx->lldi.ports[0];
-	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+	set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
 	chcr_send_wr(skb);
 	return -EINPROGRESS;
 }
@@ -3385,7 +3677,7 @@ static struct chcr_alg_template driver_algs[] = {
 						sizeof(struct chcr_aead_ctx) +
 						sizeof(struct chcr_gcm_ctx),
 			},
-			.ivsize = 12,
+			.ivsize = GCM_AES_IV_SIZE,
 			.maxauthsize = GHASH_DIGEST_SIZE,
 			.setkey = chcr_gcm_setkey,
 			.setauthsize = chcr_gcm_setauthsize,
@@ -3405,7 +3697,7 @@ static struct chcr_alg_template driver_algs[] = {
 						sizeof(struct chcr_gcm_ctx),
 
 			},
-			.ivsize = 8,
+			.ivsize = GCM_RFC4106_IV_SIZE,
 			.maxauthsize	= GHASH_DIGEST_SIZE,
 			.setkey = chcr_gcm_setkey,
 			.setauthsize	= chcr_4106_4309_setauthsize,

+ 15 - 42
drivers/crypto/chelsio/chcr_algo.h

@@ -176,21 +176,21 @@
 		      KEY_CONTEXT_SALT_PRESENT_V(1) | \
 		      KEY_CONTEXT_CTX_LEN_V((ctx_len)))
 
-#define FILL_WR_OP_CCTX_SIZE(len, ctx_len) \
+#define FILL_WR_OP_CCTX_SIZE \
 		htonl( \
 			FW_CRYPTO_LOOKASIDE_WR_OPCODE_V( \
 			FW_CRYPTO_LOOKASIDE_WR) | \
 			FW_CRYPTO_LOOKASIDE_WR_COMPL_V(0) | \
-			FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V((len)) | \
-			FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(1) | \
-			FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V((ctx_len)))
+			FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V((0)) | \
+			FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(0) | \
+			FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V(0))
 
-#define FILL_WR_RX_Q_ID(cid, qid, wr_iv, lcb, fid) \
+#define FILL_WR_RX_Q_ID(cid, qid, lcb, fid) \
 		htonl( \
 			FW_CRYPTO_LOOKASIDE_WR_RX_CHID_V((cid)) | \
 			FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V((qid)) | \
 			FW_CRYPTO_LOOKASIDE_WR_LCB_V((lcb)) | \
-			FW_CRYPTO_LOOKASIDE_WR_IV_V((wr_iv)) | \
+			FW_CRYPTO_LOOKASIDE_WR_IV_V((IV_NOP)) | \
 			FW_CRYPTO_LOOKASIDE_WR_FQIDX_V(fid))
 
 #define FILL_ULPTX_CMD_DEST(cid, qid) \
@@ -214,27 +214,22 @@
 					   calc_tx_flits_ofld(skb) * 8), 16)))
 
 #define FILL_CMD_MORE(immdatalen) htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) |\
-					ULP_TX_SC_MORE_V((immdatalen) ? 0 : 1))
-
+					ULP_TX_SC_MORE_V((immdatalen)))
 #define MAX_NK 8
-#define CRYPTO_MAX_IMM_TX_PKT_LEN 256
-#define MAX_WR_SIZE			512
 #define ROUND_16(bytes)		((bytes) & 0xFFFFFFF0)
 #define MAX_DSGL_ENT			32
-#define MAX_DIGEST_SKB_SGE	(MAX_SKB_FRAGS - 2)
 #define MIN_CIPHER_SG			1 /* IV */
-#define MIN_AUTH_SG			2 /*IV + AAD*/
-#define MIN_GCM_SG			2 /* IV + AAD*/
+#define MIN_AUTH_SG			1 /* IV */
+#define MIN_GCM_SG			1 /* IV */
 #define MIN_DIGEST_SG			1 /*Partial Buffer*/
-#define MIN_CCM_SG			3 /*IV+AAD+B0*/
+#define MIN_CCM_SG			2 /*IV+B0*/
 #define SPACE_LEFT(len) \
-	((MAX_WR_SIZE - WR_MIN_LEN - (len)))
+	((SGE_MAX_WR_LEN - WR_MIN_LEN - (len)))
 
-unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40,
-				48, 64, 72, 88,
-				96, 112, 120, 136,
-				144, 160, 168, 184,
-				192};
+unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40, 48, 64, 72, 88,
+				96, 112, 120, 136, 144, 160, 168, 184,
+				192, 208, 216, 232, 240, 256, 264, 280,
+				288, 304, 312, 328, 336, 352, 360, 376};
 unsigned int dsgl_ent_len[] = {0, 32, 32, 48, 48, 64, 64, 80, 80,
 				112, 112, 128, 128, 144, 144, 160, 160,
 				192, 192, 208, 208, 224, 224, 240, 240,
@@ -258,10 +253,8 @@ struct hash_wr_param {
 
 struct cipher_wr_param {
 	struct ablkcipher_request *req;
-	struct scatterlist *srcsg;
 	char *iv;
 	int bytes;
-	short int snent;
 	unsigned short qid;
 };
 enum {
@@ -299,31 +292,11 @@ enum {
 	ICV_16 = 16
 };
 
-struct hash_op_params {
-	unsigned char mk_size;
-	unsigned char pad_align;
-	unsigned char auth_mode;
-	char hash_name[MAX_HASH_NAME];
-	unsigned short block_size;
-	unsigned short word_size;
-	unsigned short ipad_size;
-};
-
 struct phys_sge_pairs {
 	__be16 len[8];
 	__be64 addr[8];
 };
 
-struct phys_sge_parm {
-	unsigned int nents;
-	unsigned int obsize;
-	unsigned short qid;
-};
-
-struct crypto_result {
-	struct completion completion;
-	int err;
-};
 
 static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
 		SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,

+ 5 - 5
drivers/crypto/chelsio/chcr_core.c

@@ -153,16 +153,16 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
 {
 	struct uld_ctx *u_ctx;
 
+	/* Create the device and add it in the device list */
+	if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE))
+		return ERR_PTR(-EOPNOTSUPP);
+
 	/* Create the device and add it in the device list */
 	u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
 	if (!u_ctx) {
 		u_ctx = ERR_PTR(-ENOMEM);
 		goto out;
 	}
-	if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) {
-		u_ctx = ERR_PTR(-ENOMEM);
-		goto out;
-	}
 	u_ctx->lldi = *lld;
 out:
 	return u_ctx;
@@ -224,7 +224,7 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
 static int __init chcr_crypto_init(void)
 {
 	if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info))
-		pr_err("ULD register fail: No chcr crypto support in cxgb4");
+		pr_err("ULD register fail: No chcr crypto support in cxgb4\n");
 
 	return 0;
 }

+ 1 - 1
drivers/crypto/chelsio/chcr_core.h

@@ -89,7 +89,7 @@ struct uld_ctx {
 	struct chcr_dev *dev;
 };
 
-struct uld_ctx * assign_chcr_device(void);
+struct uld_ctx *assign_chcr_device(void);
 int chcr_send_wr(struct sk_buff *skb);
 int start_crypto(void);
 int stop_crypto(void);

+ 86 - 35
drivers/crypto/chelsio/chcr_crypto.h

@@ -149,9 +149,23 @@
 
 #define CHCR_HASH_MAX_BLOCK_SIZE_64  64
 #define CHCR_HASH_MAX_BLOCK_SIZE_128 128
-#define CHCR_SG_SIZE 2048
+#define CHCR_SRC_SG_SIZE (0x10000 - sizeof(int))
+#define CHCR_DST_SG_SIZE 2048
 
-/* Aligned to 128 bit boundary */
+static inline struct chcr_context *a_ctx(struct crypto_aead *tfm)
+{
+	return crypto_aead_ctx(tfm);
+}
+
+static inline struct chcr_context *c_ctx(struct crypto_ablkcipher *tfm)
+{
+	return crypto_ablkcipher_ctx(tfm);
+}
+
+static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm)
+{
+	return crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+}
 
 struct ablk_ctx {
 	struct crypto_skcipher *sw_cipher;
@@ -165,16 +179,39 @@ struct ablk_ctx {
 };
 struct chcr_aead_reqctx {
 	struct	sk_buff	*skb;
-	struct scatterlist *dst;
-	struct scatterlist *newdstsg;
-	struct scatterlist srcffwd[2];
-	struct scatterlist dstffwd[2];
+	dma_addr_t iv_dma;
+	dma_addr_t b0_dma;
+	unsigned int b0_len;
+	unsigned int op;
+	short int aad_nents;
+	short int src_nents;
 	short int dst_nents;
+	u16 imm;
 	u16 verify;
 	u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
 	unsigned char scratch_pad[MAX_SCRATCH_PAD_SIZE];
 };
 
+struct ulptx_walk {
+	struct ulptx_sgl *sgl;
+	unsigned int nents;
+	unsigned int pair_idx;
+	unsigned int last_sg_len;
+	struct scatterlist *last_sg;
+	struct ulptx_sge_pair *pair;
+
+};
+
+struct dsgl_walk {
+	unsigned int nents;
+	unsigned int last_sg_len;
+	struct scatterlist *last_sg;
+	struct cpl_rx_phys_dsgl *dsgl;
+	struct phys_sge_pairs *to;
+};
+
+
+
 struct chcr_gcm_ctx {
 	u8 ghash_h[AEAD_H_SIZE];
 };
@@ -195,7 +232,6 @@ struct __aead_ctx {
 struct chcr_aead_ctx {
 	__be32 key_ctx_hdr;
 	unsigned int enckey_len;
-	struct crypto_skcipher *null;
 	struct crypto_aead *sw_cipher;
 	u8 salt[MAX_SALT];
 	u8 key[CHCR_AES_MAX_KEY_LEN];
@@ -231,8 +267,11 @@ struct chcr_ahash_req_ctx {
 	u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
 	u8 *reqbfr;
 	u8 *skbfr;
+	dma_addr_t dma_addr;
+	u32 dma_len;
 	u8 reqlen;
-	/* DMA the partial hash in it */
+	u8 imm;
+	u8 is_sg_map;
 	u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
 	u64 data_len;  /* Data len till time */
 	/* SKB which is being sent to the hardware for processing */
@@ -241,14 +280,15 @@ struct chcr_ahash_req_ctx {
 
 struct chcr_blkcipher_req_ctx {
 	struct sk_buff *skb;
-	struct scatterlist srcffwd[2];
-	struct scatterlist dstffwd[2];
 	struct scatterlist *dstsg;
-	struct scatterlist *dst;
-	struct scatterlist *newdstsg;
 	unsigned int processed;
+	unsigned int last_req_len;
+	struct scatterlist *srcsg;
+	unsigned int src_ofst;
+	unsigned int dst_ofst;
 	unsigned int op;
-	short int dst_nents;
+	dma_addr_t iv_dma;
+	u16 imm;
 	u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
 };
 
@@ -262,24 +302,6 @@ struct chcr_alg_template {
 	} alg;
 };
 
-struct chcr_req_ctx {
-	union {
-		struct ahash_request *ahash_req;
-		struct aead_request *aead_req;
-		struct ablkcipher_request *ablk_req;
-	} req;
-	union {
-		struct chcr_ahash_req_ctx *ahash_ctx;
-		struct chcr_aead_reqctx *reqctx;
-		struct chcr_blkcipher_req_ctx *ablk_ctx;
-	} ctx;
-};
-
-struct sge_opaque_hdr {
-	void *dev;
-	dma_addr_t addr[MAX_SKB_FRAGS + 1];
-};
-
 typedef struct sk_buff *(*create_wr_t)(struct aead_request *req,
 				       unsigned short qid,
 				       int size,
@@ -290,10 +312,39 @@ static int chcr_aead_op(struct aead_request *req_base,
 			  int size,
 			  create_wr_t create_wr_fn);
 static inline int get_aead_subtype(struct crypto_aead *aead);
-static int is_newsg(struct scatterlist *sgl, unsigned int *newents);
-static struct scatterlist *alloc_new_sg(struct scatterlist *sgl,
-					unsigned int nents);
-static inline void free_new_sg(struct scatterlist *sgl);
 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
 				   unsigned char *input, int err);
+static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err);
+static int chcr_aead_dma_map(struct device *dev, struct aead_request *req,
+			     unsigned short op_type);
+static void chcr_aead_dma_unmap(struct device *dev, struct aead_request
+				*req, unsigned short op_type);
+static inline void chcr_add_aead_dst_ent(struct aead_request *req,
+				    struct cpl_rx_phys_dsgl *phys_cpl,
+				    unsigned int assoclen,
+				    unsigned short op_type,
+				    unsigned short qid);
+static inline void chcr_add_aead_src_ent(struct aead_request *req,
+				    struct ulptx_sgl *ulptx,
+				    unsigned int assoclen,
+				    unsigned short op_type);
+static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+					   struct ulptx_sgl *ulptx,
+					   struct  cipher_wr_param *wrparam);
+static int chcr_cipher_dma_map(struct device *dev,
+			       struct ablkcipher_request *req);
+static void chcr_cipher_dma_unmap(struct device *dev,
+				  struct ablkcipher_request *req);
+static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+					   struct cpl_rx_phys_dsgl *phys_cpl,
+					   struct  cipher_wr_param *wrparam,
+					   unsigned short qid);
+int sg_nents_len_skip(struct scatterlist *sg, u64 len, u64 skip);
+static inline void chcr_add_hash_src_ent(struct ahash_request *req,
+					 struct ulptx_sgl *ulptx,
+					 struct hash_wr_param *param);
+static inline int chcr_hash_dma_map(struct device *dev,
+				    struct ahash_request *req);
+static inline void chcr_hash_dma_unmap(struct device *dev,
+				       struct ahash_request *req);
 #endif /* __CHCR_CRYPTO_H__ */

+ 2 - 4
drivers/crypto/inside-secure/safexcel_hash.c

@@ -308,10 +308,8 @@ unmap_cache:
 		ctx->base.cache_sz = 0;
 	}
 free_cache:
-	if (ctx->base.cache) {
-		kfree(ctx->base.cache);
-		ctx->base.cache = NULL;
-	}
+	kfree(ctx->base.cache);
+	ctx->base.cache = NULL;
 
 unlock:
 	spin_unlock_bh(&priv->ring[ring].egress_lock);

+ 0 - 1
drivers/crypto/ixp4xx_crypto.c

@@ -534,7 +534,6 @@ static void release_ixp_crypto(struct device *dev)
 			NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
 			crypt_virt, crypt_phys);
 	}
-	return;
 }
 
 static void reset_sa_dir(struct ix_sa_dir *dir)

+ 13 - 16
drivers/crypto/marvell/cesa.c

@@ -34,10 +34,6 @@
 /* Limit of the crypto queue before reaching the backlog */
 #define CESA_CRYPTO_DEFAULT_MAX_QLEN 128
 
-static int allhwsupport = !IS_ENABLED(CONFIG_CRYPTO_DEV_MV_CESA);
-module_param_named(allhwsupport, allhwsupport, int, 0444);
-MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the mv_cesa driver)");
-
 struct mv_cesa_dev *cesa_dev;
 
 struct crypto_async_request *
@@ -76,8 +72,6 @@ static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)
 
 	ctx = crypto_tfm_ctx(req->tfm);
 	ctx->ops->step(req);
-
-	return;
 }
 
 static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
@@ -183,8 +177,7 @@ int mv_cesa_queue_req(struct crypto_async_request *req,
 	spin_lock_bh(&engine->lock);
 	ret = crypto_enqueue_request(&engine->queue, req);
 	if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) &&
-	    (ret == -EINPROGRESS ||
-	    (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+	    (ret == -EINPROGRESS || ret == -EBUSY))
 		mv_cesa_tdma_chain(engine, creq);
 	spin_unlock_bh(&engine->lock);
 
@@ -202,7 +195,7 @@ static int mv_cesa_add_algs(struct mv_cesa_dev *cesa)
 	int i, j;
 
 	for (i = 0; i < cesa->caps->ncipher_algs; i++) {
-		ret = crypto_register_alg(cesa->caps->cipher_algs[i]);
+		ret = crypto_register_skcipher(cesa->caps->cipher_algs[i]);
 		if (ret)
 			goto err_unregister_crypto;
 	}
@@ -222,7 +215,7 @@ err_unregister_ahash:
 
 err_unregister_crypto:
 	for (j = 0; j < i; j++)
-		crypto_unregister_alg(cesa->caps->cipher_algs[j]);
+		crypto_unregister_skcipher(cesa->caps->cipher_algs[j]);
 
 	return ret;
 }
@@ -235,10 +228,10 @@ static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa)
 		crypto_unregister_ahash(cesa->caps->ahash_algs[i]);
 
 	for (i = 0; i < cesa->caps->ncipher_algs; i++)
-		crypto_unregister_alg(cesa->caps->cipher_algs[i]);
+		crypto_unregister_skcipher(cesa->caps->cipher_algs[i]);
 }
 
-static struct crypto_alg *orion_cipher_algs[] = {
+static struct skcipher_alg *orion_cipher_algs[] = {
 	&mv_cesa_ecb_des_alg,
 	&mv_cesa_cbc_des_alg,
 	&mv_cesa_ecb_des3_ede_alg,
@@ -254,7 +247,7 @@ static struct ahash_alg *orion_ahash_algs[] = {
 	&mv_ahmac_sha1_alg,
 };
 
-static struct crypto_alg *armada_370_cipher_algs[] = {
+static struct skcipher_alg *armada_370_cipher_algs[] = {
 	&mv_cesa_ecb_des_alg,
 	&mv_cesa_cbc_des_alg,
 	&mv_cesa_ecb_des3_ede_alg,
@@ -459,9 +452,6 @@ static int mv_cesa_probe(struct platform_device *pdev)
 		caps = match->data;
 	}
 
-	if ((caps == &orion_caps || caps == &kirkwood_caps) && !allhwsupport)
-		return -ENOTSUPP;
-
 	cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL);
 	if (!cesa)
 		return -ENOMEM;
@@ -599,9 +589,16 @@ static int mv_cesa_remove(struct platform_device *pdev)
 	return 0;
 }
 
+static const struct platform_device_id mv_cesa_plat_id_table[] = {
+	{ .name = "mv_crypto" },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, mv_cesa_plat_id_table);
+
 static struct platform_driver marvell_cesa = {
 	.probe		= mv_cesa_probe,
 	.remove		= mv_cesa_remove,
+	.id_table	= mv_cesa_plat_id_table,
 	.driver		= {
 		.name	= "marvell-cesa",
 		.of_match_table = mv_cesa_of_match_table,

+ 14 - 13
drivers/crypto/marvell/cesa.h

@@ -5,6 +5,7 @@
 #include <crypto/algapi.h>
 #include <crypto/hash.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
 
 #include <linux/crypto.h>
 #include <linux/dmapool.h>
@@ -373,7 +374,7 @@ struct mv_cesa_engine;
 struct mv_cesa_caps {
 	int nengines;
 	bool has_tdma;
-	struct crypto_alg **cipher_algs;
+	struct skcipher_alg **cipher_algs;
 	int ncipher_algs;
 	struct ahash_alg **ahash_algs;
 	int nahash_algs;
@@ -539,12 +540,12 @@ struct mv_cesa_sg_std_iter {
 };
 
 /**
- * struct mv_cesa_ablkcipher_std_req - cipher standard request
+ * struct mv_cesa_skcipher_std_req - cipher standard request
  * @op:		operation context
  * @offset:	current operation offset
  * @size:	size of the crypto operation
  */
-struct mv_cesa_ablkcipher_std_req {
+struct mv_cesa_skcipher_std_req {
 	struct mv_cesa_op_ctx op;
 	unsigned int offset;
 	unsigned int size;
@@ -552,14 +553,14 @@ struct mv_cesa_ablkcipher_std_req {
 };
 
 /**
- * struct mv_cesa_ablkcipher_req - cipher request
+ * struct mv_cesa_skcipher_req - cipher request
  * @req:	type specific request information
  * @src_nents:	number of entries in the src sg list
  * @dst_nents:	number of entries in the dest sg list
  */
-struct mv_cesa_ablkcipher_req {
+struct mv_cesa_skcipher_req {
 	struct mv_cesa_req base;
-	struct mv_cesa_ablkcipher_std_req std;
+	struct mv_cesa_skcipher_std_req std;
 	int src_nents;
 	int dst_nents;
 };
@@ -764,7 +765,7 @@ static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req,
 	 * the backlog and will be processed later. There's no need to
 	 * clean it up.
 	 */
-	if (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+	if (ret == -EBUSY)
 		return false;
 
 	/* Request wasn't queued, we need to clean it up */
@@ -869,11 +870,11 @@ extern struct ahash_alg mv_ahmac_md5_alg;
 extern struct ahash_alg mv_ahmac_sha1_alg;
 extern struct ahash_alg mv_ahmac_sha256_alg;
 
-extern struct crypto_alg mv_cesa_ecb_des_alg;
-extern struct crypto_alg mv_cesa_cbc_des_alg;
-extern struct crypto_alg mv_cesa_ecb_des3_ede_alg;
-extern struct crypto_alg mv_cesa_cbc_des3_ede_alg;
-extern struct crypto_alg mv_cesa_ecb_aes_alg;
-extern struct crypto_alg mv_cesa_cbc_aes_alg;
+extern struct skcipher_alg mv_cesa_ecb_des_alg;
+extern struct skcipher_alg mv_cesa_cbc_des_alg;
+extern struct skcipher_alg mv_cesa_ecb_des3_ede_alg;
+extern struct skcipher_alg mv_cesa_cbc_des3_ede_alg;
+extern struct skcipher_alg mv_cesa_ecb_aes_alg;
+extern struct skcipher_alg mv_cesa_cbc_aes_alg;
 
 #endif /* __MARVELL_CESA_H__ */

+ 233 - 243
drivers/crypto/marvell/cipher.c

@@ -32,23 +32,23 @@ struct mv_cesa_aes_ctx {
 	struct crypto_aes_ctx aes;
 };
 
-struct mv_cesa_ablkcipher_dma_iter {
+struct mv_cesa_skcipher_dma_iter {
 	struct mv_cesa_dma_iter base;
 	struct mv_cesa_sg_dma_iter src;
 	struct mv_cesa_sg_dma_iter dst;
 };
 
 static inline void
-mv_cesa_ablkcipher_req_iter_init(struct mv_cesa_ablkcipher_dma_iter *iter,
-				 struct ablkcipher_request *req)
+mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter *iter,
+			       struct skcipher_request *req)
 {
-	mv_cesa_req_dma_iter_init(&iter->base, req->nbytes);
+	mv_cesa_req_dma_iter_init(&iter->base, req->cryptlen);
 	mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
 	mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
 }
 
 static inline bool
-mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter)
+mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter *iter)
 {
 	iter->src.op_offset = 0;
 	iter->dst.op_offset = 0;
@@ -57,9 +57,9 @@ mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter)
 }
 
 static inline void
-mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req)
+mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req)
 {
-	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
 
 	if (req->dst != req->src) {
 		dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
@@ -73,20 +73,20 @@ mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req)
 	mv_cesa_dma_cleanup(&creq->base);
 }
 
-static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req)
+static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req)
 {
-	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
 
 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
-		mv_cesa_ablkcipher_dma_cleanup(req);
+		mv_cesa_skcipher_dma_cleanup(req);
 }
 
-static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
+static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
 {
-	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
-	struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+	struct mv_cesa_skcipher_std_req *sreq = &creq->std;
 	struct mv_cesa_engine *engine = creq->base.engine;
-	size_t  len = min_t(size_t, req->nbytes - sreq->offset,
+	size_t  len = min_t(size_t, req->cryptlen - sreq->offset,
 			    CESA_SA_SRAM_PAYLOAD_SIZE);
 
 	mv_cesa_adjust_op(engine, &sreq->op);
@@ -114,11 +114,11 @@ static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
 	writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
 }
 
-static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req,
-					  u32 status)
+static int mv_cesa_skcipher_std_process(struct skcipher_request *req,
+					u32 status)
 {
-	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
-	struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+	struct mv_cesa_skcipher_std_req *sreq = &creq->std;
 	struct mv_cesa_engine *engine = creq->base.engine;
 	size_t len;
 
@@ -127,122 +127,130 @@ static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req,
 				   sreq->size, sreq->offset);
 
 	sreq->offset += len;
-	if (sreq->offset < req->nbytes)
+	if (sreq->offset < req->cryptlen)
 		return -EINPROGRESS;
 
 	return 0;
 }
 
-static int mv_cesa_ablkcipher_process(struct crypto_async_request *req,
-				      u32 status)
+static int mv_cesa_skcipher_process(struct crypto_async_request *req,
+				    u32 status)
 {
-	struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
-	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
+	struct skcipher_request *skreq = skcipher_request_cast(req);
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
 	struct mv_cesa_req *basereq = &creq->base;
 
 	if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
-		return mv_cesa_ablkcipher_std_process(ablkreq, status);
+		return mv_cesa_skcipher_std_process(skreq, status);
 
 	return mv_cesa_dma_process(basereq, status);
 }
 
-static void mv_cesa_ablkcipher_step(struct crypto_async_request *req)
+static void mv_cesa_skcipher_step(struct crypto_async_request *req)
 {
-	struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
-	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
+	struct skcipher_request *skreq = skcipher_request_cast(req);
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
 
 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 		mv_cesa_dma_step(&creq->base);
 	else
-		mv_cesa_ablkcipher_std_step(ablkreq);
+		mv_cesa_skcipher_std_step(skreq);
 }
 
 static inline void
-mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req)
+mv_cesa_skcipher_dma_prepare(struct skcipher_request *req)
 {
-	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
 	struct mv_cesa_req *basereq = &creq->base;
 
 	mv_cesa_dma_prepare(basereq, basereq->engine);
 }
 
 static inline void
-mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req)
+mv_cesa_skcipher_std_prepare(struct skcipher_request *req)
 {
-	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
-	struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+	struct mv_cesa_skcipher_std_req *sreq = &creq->std;
 
 	sreq->size = 0;
 	sreq->offset = 0;
 }
 
-static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
-					      struct mv_cesa_engine *engine)
+static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req,
+					    struct mv_cesa_engine *engine)
 {
-	struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
-	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
+	struct skcipher_request *skreq = skcipher_request_cast(req);
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
 	creq->base.engine = engine;
 
 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
-		mv_cesa_ablkcipher_dma_prepare(ablkreq);
+		mv_cesa_skcipher_dma_prepare(skreq);
 	else
-		mv_cesa_ablkcipher_std_prepare(ablkreq);
+		mv_cesa_skcipher_std_prepare(skreq);
 }
 
 static inline void
-mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req)
+mv_cesa_skcipher_req_cleanup(struct crypto_async_request *req)
 {
-	struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
+	struct skcipher_request *skreq = skcipher_request_cast(req);
 
-	mv_cesa_ablkcipher_cleanup(ablkreq);
+	mv_cesa_skcipher_cleanup(skreq);
 }
 
 static void
-mv_cesa_ablkcipher_complete(struct crypto_async_request *req)
+mv_cesa_skcipher_complete(struct crypto_async_request *req)
 {
-	struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
-	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
+	struct skcipher_request *skreq = skcipher_request_cast(req);
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
 	struct mv_cesa_engine *engine = creq->base.engine;
 	unsigned int ivsize;
 
-	atomic_sub(ablkreq->nbytes, &engine->load);
-	ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq));
+	atomic_sub(skreq->cryptlen, &engine->load);
+	ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq));
 
 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
 		struct mv_cesa_req *basereq;
 
 		basereq = &creq->base;
-		memcpy(ablkreq->info, basereq->chain.last->op->ctx.blkcipher.iv,
+		memcpy(skreq->iv, basereq->chain.last->op->ctx.blkcipher.iv,
 		       ivsize);
 	} else {
-		memcpy_fromio(ablkreq->info,
+		memcpy_fromio(skreq->iv,
 			      engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
 			      ivsize);
 	}
 }
 
-static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = {
-	.step = mv_cesa_ablkcipher_step,
-	.process = mv_cesa_ablkcipher_process,
-	.cleanup = mv_cesa_ablkcipher_req_cleanup,
-	.complete = mv_cesa_ablkcipher_complete,
+static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = {
+	.step = mv_cesa_skcipher_step,
+	.process = mv_cesa_skcipher_process,
+	.cleanup = mv_cesa_skcipher_req_cleanup,
+	.complete = mv_cesa_skcipher_complete,
 };
 
-static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm)
+static void mv_cesa_skcipher_cra_exit(struct crypto_tfm *tfm)
 {
-	struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+	void *ctx = crypto_tfm_ctx(tfm);
+
+	memzero_explicit(ctx, tfm->__crt_alg->cra_ctxsize);
+}
+
+static int mv_cesa_skcipher_cra_init(struct crypto_tfm *tfm)
+{
+	struct mv_cesa_ctx *ctx = crypto_tfm_ctx(tfm);
 
-	ctx->base.ops = &mv_cesa_ablkcipher_req_ops;
+	ctx->ops = &mv_cesa_skcipher_req_ops;
 
-	tfm->crt_ablkcipher.reqsize = sizeof(struct mv_cesa_ablkcipher_req);
+	crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
+				    sizeof(struct mv_cesa_skcipher_req));
 
 	return 0;
 }
 
-static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
 			      unsigned int len)
 {
-	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
 	struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 	int remaining;
 	int offset;
@@ -251,7 +259,7 @@ static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 
 	ret = crypto_aes_expand_key(&ctx->aes, key, len);
 	if (ret) {
-		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
 		return ret;
 	}
 
@@ -264,16 +272,16 @@ static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 	return 0;
 }
 
-static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
 			      unsigned int len)
 {
-	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
 	struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
 	u32 tmp[DES_EXPKEY_WORDS];
 	int ret;
 
 	if (len != DES_KEY_SIZE) {
-		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
 		return -EINVAL;
 	}
 
@@ -288,14 +296,14 @@ static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 	return 0;
 }
 
-static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher *cipher,
+static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
 				   const u8 *key, unsigned int len)
 {
-	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
 	struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
 
 	if (len != DES3_EDE_KEY_SIZE) {
-		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
 		return -EINVAL;
 	}
 
@@ -304,14 +312,14 @@ static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher *cipher,
 	return 0;
 }
 
-static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
-				const struct mv_cesa_op_ctx *op_templ)
+static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
+					 const struct mv_cesa_op_ctx *op_templ)
 {
-	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
 		      GFP_KERNEL : GFP_ATOMIC;
 	struct mv_cesa_req *basereq = &creq->base;
-	struct mv_cesa_ablkcipher_dma_iter iter;
+	struct mv_cesa_skcipher_dma_iter iter;
 	bool skip_ctx = false;
 	int ret;
 	unsigned int ivsize;
@@ -339,7 +347,7 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
 	}
 
 	mv_cesa_tdma_desc_iter_init(&basereq->chain);
-	mv_cesa_ablkcipher_req_iter_init(&iter, req);
+	mv_cesa_skcipher_req_iter_init(&iter, req);
 
 	do {
 		struct mv_cesa_op_ctx *op;
@@ -370,10 +378,10 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
 		if (ret)
 			goto err_free_tdma;
 
-	} while (mv_cesa_ablkcipher_req_iter_next_op(&iter));
+	} while (mv_cesa_skcipher_req_iter_next_op(&iter));
 
 	/* Add output data for IV */
-	ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
+	ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req));
 	ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
 				    CESA_SA_DATA_SRAM_OFFSET,
 				    CESA_TDMA_SRC_IN_SRAM, flags);
@@ -399,11 +407,11 @@ err_unmap_src:
 }
 
 static inline int
-mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req,
-				const struct mv_cesa_op_ctx *op_templ)
+mv_cesa_skcipher_std_req_init(struct skcipher_request *req,
+			      const struct mv_cesa_op_ctx *op_templ)
 {
-	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
-	struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+	struct mv_cesa_skcipher_std_req *sreq = &creq->std;
 	struct mv_cesa_req *basereq = &creq->base;
 
 	sreq->op = *op_templ;
@@ -414,23 +422,23 @@ mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req,
 	return 0;
 }
 
-static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
-				       struct mv_cesa_op_ctx *tmpl)
+static int mv_cesa_skcipher_req_init(struct skcipher_request *req,
+				     struct mv_cesa_op_ctx *tmpl)
 {
-	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
-	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
-	unsigned int blksize = crypto_ablkcipher_blocksize(tfm);
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	unsigned int blksize = crypto_skcipher_blocksize(tfm);
 	int ret;
 
-	if (!IS_ALIGNED(req->nbytes, blksize))
+	if (!IS_ALIGNED(req->cryptlen, blksize))
 		return -EINVAL;
 
-	creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
+	creq->src_nents = sg_nents_for_len(req->src, req->cryptlen);
 	if (creq->src_nents < 0) {
 		dev_err(cesa_dev->dev, "Invalid number of src SG");
 		return creq->src_nents;
 	}
-	creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+	creq->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
 	if (creq->dst_nents < 0) {
 		dev_err(cesa_dev->dev, "Invalid number of dst SG");
 		return creq->dst_nents;
@@ -440,36 +448,36 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
 			      CESA_SA_DESC_CFG_OP_MSK);
 
 	if (cesa_dev->caps->has_tdma)
-		ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl);
+		ret = mv_cesa_skcipher_dma_req_init(req, tmpl);
 	else
-		ret = mv_cesa_ablkcipher_std_req_init(req, tmpl);
+		ret = mv_cesa_skcipher_std_req_init(req, tmpl);
 
 	return ret;
 }
 
-static int mv_cesa_ablkcipher_queue_req(struct ablkcipher_request *req,
-					struct mv_cesa_op_ctx *tmpl)
+static int mv_cesa_skcipher_queue_req(struct skcipher_request *req,
+				      struct mv_cesa_op_ctx *tmpl)
 {
 	int ret;
-	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
 	struct mv_cesa_engine *engine;
 
-	ret = mv_cesa_ablkcipher_req_init(req, tmpl);
+	ret = mv_cesa_skcipher_req_init(req, tmpl);
 	if (ret)
 		return ret;
 
-	engine = mv_cesa_select_engine(req->nbytes);
-	mv_cesa_ablkcipher_prepare(&req->base, engine);
+	engine = mv_cesa_select_engine(req->cryptlen);
+	mv_cesa_skcipher_prepare(&req->base, engine);
 
 	ret = mv_cesa_queue_req(&req->base, &creq->base);
 
 	if (mv_cesa_req_needs_cleanup(&req->base, ret))
-		mv_cesa_ablkcipher_cleanup(req);
+		mv_cesa_skcipher_cleanup(req);
 
 	return ret;
 }
 
-static int mv_cesa_des_op(struct ablkcipher_request *req,
+static int mv_cesa_des_op(struct skcipher_request *req,
 			  struct mv_cesa_op_ctx *tmpl)
 {
 	struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
@@ -479,10 +487,10 @@ static int mv_cesa_des_op(struct ablkcipher_request *req,
 
 	memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
 
-	return mv_cesa_ablkcipher_queue_req(req, tmpl);
+	return mv_cesa_skcipher_queue_req(req, tmpl);
 }
 
-static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req)
+static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
 {
 	struct mv_cesa_op_ctx tmpl;
 
@@ -493,7 +501,7 @@ static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req)
 	return mv_cesa_des_op(req, &tmpl);
 }
 
-static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req)
+static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req)
 {
 	struct mv_cesa_op_ctx tmpl;
 
@@ -504,41 +512,38 @@ static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req)
 	return mv_cesa_des_op(req, &tmpl);
 }
 
-struct crypto_alg mv_cesa_ecb_des_alg = {
-	.cra_name = "ecb(des)",
-	.cra_driver_name = "mv-ecb-des",
-	.cra_priority = 300,
-	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
-		     CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
-	.cra_blocksize = DES_BLOCK_SIZE,
-	.cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
-	.cra_alignmask = 0,
-	.cra_type = &crypto_ablkcipher_type,
-	.cra_module = THIS_MODULE,
-	.cra_init = mv_cesa_ablkcipher_cra_init,
-	.cra_u = {
-		.ablkcipher = {
-			.min_keysize = DES_KEY_SIZE,
-			.max_keysize = DES_KEY_SIZE,
-			.setkey = mv_cesa_des_setkey,
-			.encrypt = mv_cesa_ecb_des_encrypt,
-			.decrypt = mv_cesa_ecb_des_decrypt,
-		},
+struct skcipher_alg mv_cesa_ecb_des_alg = {
+	.setkey = mv_cesa_des_setkey,
+	.encrypt = mv_cesa_ecb_des_encrypt,
+	.decrypt = mv_cesa_ecb_des_decrypt,
+	.min_keysize = DES_KEY_SIZE,
+	.max_keysize = DES_KEY_SIZE,
+	.base = {
+		.cra_name = "ecb(des)",
+		.cra_driver_name = "mv-ecb-des",
+		.cra_priority = 300,
+		.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+		.cra_blocksize = DES_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
+		.cra_alignmask = 0,
+		.cra_module = THIS_MODULE,
+		.cra_init = mv_cesa_skcipher_cra_init,
+		.cra_exit = mv_cesa_skcipher_cra_exit,
 	},
 };
 
-static int mv_cesa_cbc_des_op(struct ablkcipher_request *req,
+static int mv_cesa_cbc_des_op(struct skcipher_request *req,
 			      struct mv_cesa_op_ctx *tmpl)
 {
 	mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
 			      CESA_SA_DESC_CFG_CRYPTCM_MSK);
 
-	memcpy(tmpl->ctx.blkcipher.iv, req->info, DES_BLOCK_SIZE);
+	memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES_BLOCK_SIZE);
 
 	return mv_cesa_des_op(req, tmpl);
 }
 
-static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req)
+static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
 {
 	struct mv_cesa_op_ctx tmpl;
 
@@ -547,7 +552,7 @@ static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req)
 	return mv_cesa_cbc_des_op(req, &tmpl);
 }
 
-static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req)
+static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req)
 {
 	struct mv_cesa_op_ctx tmpl;
 
@@ -556,31 +561,28 @@ static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req)
 	return mv_cesa_cbc_des_op(req, &tmpl);
 }
 
-struct crypto_alg mv_cesa_cbc_des_alg = {
-	.cra_name = "cbc(des)",
-	.cra_driver_name = "mv-cbc-des",
-	.cra_priority = 300,
-	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
-		     CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
-	.cra_blocksize = DES_BLOCK_SIZE,
-	.cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
-	.cra_alignmask = 0,
-	.cra_type = &crypto_ablkcipher_type,
-	.cra_module = THIS_MODULE,
-	.cra_init = mv_cesa_ablkcipher_cra_init,
-	.cra_u = {
-		.ablkcipher = {
-			.min_keysize = DES_KEY_SIZE,
-			.max_keysize = DES_KEY_SIZE,
-			.ivsize	     = DES_BLOCK_SIZE,
-			.setkey = mv_cesa_des_setkey,
-			.encrypt = mv_cesa_cbc_des_encrypt,
-			.decrypt = mv_cesa_cbc_des_decrypt,
-		},
+struct skcipher_alg mv_cesa_cbc_des_alg = {
+	.setkey = mv_cesa_des_setkey,
+	.encrypt = mv_cesa_cbc_des_encrypt,
+	.decrypt = mv_cesa_cbc_des_decrypt,
+	.min_keysize = DES_KEY_SIZE,
+	.max_keysize = DES_KEY_SIZE,
+	.ivsize = DES_BLOCK_SIZE,
+	.base = {
+		.cra_name = "cbc(des)",
+		.cra_driver_name = "mv-cbc-des",
+		.cra_priority = 300,
+		.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+		.cra_blocksize = DES_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
+		.cra_alignmask = 0,
+		.cra_module = THIS_MODULE,
+		.cra_init = mv_cesa_skcipher_cra_init,
+		.cra_exit = mv_cesa_skcipher_cra_exit,
 	},
 };
 
-static int mv_cesa_des3_op(struct ablkcipher_request *req,
+static int mv_cesa_des3_op(struct skcipher_request *req,
 			   struct mv_cesa_op_ctx *tmpl)
 {
 	struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
@@ -590,10 +592,10 @@ static int mv_cesa_des3_op(struct ablkcipher_request *req,
 
 	memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
 
-	return mv_cesa_ablkcipher_queue_req(req, tmpl);
+	return mv_cesa_skcipher_queue_req(req, tmpl);
 }
 
-static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req)
+static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
 {
 	struct mv_cesa_op_ctx tmpl;
 
@@ -605,7 +607,7 @@ static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req)
 	return mv_cesa_des3_op(req, &tmpl);
 }
 
-static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request *req)
+static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req)
 {
 	struct mv_cesa_op_ctx tmpl;
 
@@ -617,39 +619,36 @@ static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request *req)
 	return mv_cesa_des3_op(req, &tmpl);
 }
 
-struct crypto_alg mv_cesa_ecb_des3_ede_alg = {
-	.cra_name = "ecb(des3_ede)",
-	.cra_driver_name = "mv-ecb-des3-ede",
-	.cra_priority = 300,
-	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
-		     CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
-	.cra_blocksize = DES3_EDE_BLOCK_SIZE,
-	.cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
-	.cra_alignmask = 0,
-	.cra_type = &crypto_ablkcipher_type,
-	.cra_module = THIS_MODULE,
-	.cra_init = mv_cesa_ablkcipher_cra_init,
-	.cra_u = {
-		.ablkcipher = {
-			.min_keysize = DES3_EDE_KEY_SIZE,
-			.max_keysize = DES3_EDE_KEY_SIZE,
-			.ivsize	     = DES3_EDE_BLOCK_SIZE,
-			.setkey = mv_cesa_des3_ede_setkey,
-			.encrypt = mv_cesa_ecb_des3_ede_encrypt,
-			.decrypt = mv_cesa_ecb_des3_ede_decrypt,
-		},
+struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
+	.setkey = mv_cesa_des3_ede_setkey,
+	.encrypt = mv_cesa_ecb_des3_ede_encrypt,
+	.decrypt = mv_cesa_ecb_des3_ede_decrypt,
+	.min_keysize = DES3_EDE_KEY_SIZE,
+	.max_keysize = DES3_EDE_KEY_SIZE,
+	.ivsize = DES3_EDE_BLOCK_SIZE,
+	.base = {
+		.cra_name = "ecb(des3_ede)",
+		.cra_driver_name = "mv-ecb-des3-ede",
+		.cra_priority = 300,
+		.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+		.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
+		.cra_alignmask = 0,
+		.cra_module = THIS_MODULE,
+		.cra_init = mv_cesa_skcipher_cra_init,
+		.cra_exit = mv_cesa_skcipher_cra_exit,
 	},
 };
 
-static int mv_cesa_cbc_des3_op(struct ablkcipher_request *req,
+static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
 			       struct mv_cesa_op_ctx *tmpl)
 {
-	memcpy(tmpl->ctx.blkcipher.iv, req->info, DES3_EDE_BLOCK_SIZE);
+	memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE);
 
 	return mv_cesa_des3_op(req, tmpl);
 }
 
-static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request *req)
+static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
 {
 	struct mv_cesa_op_ctx tmpl;
 
@@ -661,7 +660,7 @@ static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request *req)
 	return mv_cesa_cbc_des3_op(req, &tmpl);
 }
 
-static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request *req)
+static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req)
 {
 	struct mv_cesa_op_ctx tmpl;
 
@@ -673,31 +672,28 @@ static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request *req)
 	return mv_cesa_cbc_des3_op(req, &tmpl);
 }
 
-struct crypto_alg mv_cesa_cbc_des3_ede_alg = {
-	.cra_name = "cbc(des3_ede)",
-	.cra_driver_name = "mv-cbc-des3-ede",
-	.cra_priority = 300,
-	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
-		     CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
-	.cra_blocksize = DES3_EDE_BLOCK_SIZE,
-	.cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
-	.cra_alignmask = 0,
-	.cra_type = &crypto_ablkcipher_type,
-	.cra_module = THIS_MODULE,
-	.cra_init = mv_cesa_ablkcipher_cra_init,
-	.cra_u = {
-		.ablkcipher = {
-			.min_keysize = DES3_EDE_KEY_SIZE,
-			.max_keysize = DES3_EDE_KEY_SIZE,
-			.ivsize	     = DES3_EDE_BLOCK_SIZE,
-			.setkey = mv_cesa_des3_ede_setkey,
-			.encrypt = mv_cesa_cbc_des3_ede_encrypt,
-			.decrypt = mv_cesa_cbc_des3_ede_decrypt,
-		},
+struct skcipher_alg mv_cesa_cbc_des3_ede_alg = {
+	.setkey = mv_cesa_des3_ede_setkey,
+	.encrypt = mv_cesa_cbc_des3_ede_encrypt,
+	.decrypt = mv_cesa_cbc_des3_ede_decrypt,
+	.min_keysize = DES3_EDE_KEY_SIZE,
+	.max_keysize = DES3_EDE_KEY_SIZE,
+	.ivsize = DES3_EDE_BLOCK_SIZE,
+	.base = {
+		.cra_name = "cbc(des3_ede)",
+		.cra_driver_name = "mv-cbc-des3-ede",
+		.cra_priority = 300,
+		.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+		.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
+		.cra_alignmask = 0,
+		.cra_module = THIS_MODULE,
+		.cra_init = mv_cesa_skcipher_cra_init,
+		.cra_exit = mv_cesa_skcipher_cra_exit,
 	},
 };
 
-static int mv_cesa_aes_op(struct ablkcipher_request *req,
+static int mv_cesa_aes_op(struct skcipher_request *req,
 			  struct mv_cesa_op_ctx *tmpl)
 {
 	struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
@@ -724,10 +720,10 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req,
 			      CESA_SA_DESC_CFG_CRYPTM_MSK |
 			      CESA_SA_DESC_CFG_AES_LEN_MSK);
 
-	return mv_cesa_ablkcipher_queue_req(req, tmpl);
+	return mv_cesa_skcipher_queue_req(req, tmpl);
 }
 
-static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req)
+static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
 {
 	struct mv_cesa_op_ctx tmpl;
 
@@ -738,7 +734,7 @@ static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req)
 	return mv_cesa_aes_op(req, &tmpl);
 }
 
-static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req)
+static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req)
 {
 	struct mv_cesa_op_ctx tmpl;
 
@@ -749,40 +745,37 @@ static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req)
 	return mv_cesa_aes_op(req, &tmpl);
 }
 
-struct crypto_alg mv_cesa_ecb_aes_alg = {
-	.cra_name = "ecb(aes)",
-	.cra_driver_name = "mv-ecb-aes",
-	.cra_priority = 300,
-	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
-		     CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
-	.cra_blocksize = AES_BLOCK_SIZE,
-	.cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
-	.cra_alignmask = 0,
-	.cra_type = &crypto_ablkcipher_type,
-	.cra_module = THIS_MODULE,
-	.cra_init = mv_cesa_ablkcipher_cra_init,
-	.cra_u = {
-		.ablkcipher = {
-			.min_keysize = AES_MIN_KEY_SIZE,
-			.max_keysize = AES_MAX_KEY_SIZE,
-			.setkey = mv_cesa_aes_setkey,
-			.encrypt = mv_cesa_ecb_aes_encrypt,
-			.decrypt = mv_cesa_ecb_aes_decrypt,
-		},
+struct skcipher_alg mv_cesa_ecb_aes_alg = {
+	.setkey = mv_cesa_aes_setkey,
+	.encrypt = mv_cesa_ecb_aes_encrypt,
+	.decrypt = mv_cesa_ecb_aes_decrypt,
+	.min_keysize = AES_MIN_KEY_SIZE,
+	.max_keysize = AES_MAX_KEY_SIZE,
+	.base = {
+		.cra_name = "ecb(aes)",
+		.cra_driver_name = "mv-ecb-aes",
+		.cra_priority = 300,
+		.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+		.cra_blocksize = AES_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
+		.cra_alignmask = 0,
+		.cra_module = THIS_MODULE,
+		.cra_init = mv_cesa_skcipher_cra_init,
+		.cra_exit = mv_cesa_skcipher_cra_exit,
 	},
 };
 
-static int mv_cesa_cbc_aes_op(struct ablkcipher_request *req,
+static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
 			      struct mv_cesa_op_ctx *tmpl)
 {
 	mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
 			      CESA_SA_DESC_CFG_CRYPTCM_MSK);
-	memcpy(tmpl->ctx.blkcipher.iv, req->info, AES_BLOCK_SIZE);
+	memcpy(tmpl->ctx.blkcipher.iv, req->iv, AES_BLOCK_SIZE);
 
 	return mv_cesa_aes_op(req, tmpl);
 }
 
-static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req)
+static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
 {
 	struct mv_cesa_op_ctx tmpl;
 
@@ -791,7 +784,7 @@ static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req)
 	return mv_cesa_cbc_aes_op(req, &tmpl);
 }
 
-static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req)
+static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req)
 {
 	struct mv_cesa_op_ctx tmpl;
 
@@ -800,26 +793,23 @@ static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req)
 	return mv_cesa_cbc_aes_op(req, &tmpl);
 }
 
-struct crypto_alg mv_cesa_cbc_aes_alg = {
-	.cra_name = "cbc(aes)",
-	.cra_driver_name = "mv-cbc-aes",
-	.cra_priority = 300,
-	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
-		     CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
-	.cra_blocksize = AES_BLOCK_SIZE,
-	.cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
-	.cra_alignmask = 0,
-	.cra_type = &crypto_ablkcipher_type,
-	.cra_module = THIS_MODULE,
-	.cra_init = mv_cesa_ablkcipher_cra_init,
-	.cra_u = {
-		.ablkcipher = {
-			.min_keysize = AES_MIN_KEY_SIZE,
-			.max_keysize = AES_MAX_KEY_SIZE,
-			.ivsize = AES_BLOCK_SIZE,
-			.setkey = mv_cesa_aes_setkey,
-			.encrypt = mv_cesa_cbc_aes_encrypt,
-			.decrypt = mv_cesa_cbc_aes_decrypt,
-		},
+struct skcipher_alg mv_cesa_cbc_aes_alg = {
+	.setkey = mv_cesa_aes_setkey,
+	.encrypt = mv_cesa_cbc_aes_encrypt,
+	.decrypt = mv_cesa_cbc_aes_decrypt,
+	.min_keysize = AES_MIN_KEY_SIZE,
+	.max_keysize = AES_MAX_KEY_SIZE,
+	.ivsize = AES_BLOCK_SIZE,
+	.base = {
+		.cra_name = "cbc(aes)",
+		.cra_driver_name = "mv-cbc-aes",
+		.cra_priority = 300,
+		.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+		.cra_blocksize = AES_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
+		.cra_alignmask = 0,
+		.cra_module = THIS_MODULE,
+		.cra_init = mv_cesa_skcipher_cra_init,
+		.cra_exit = mv_cesa_skcipher_cra_exit,
 	},
 };

+ 1 - 4
drivers/crypto/marvell/tdma.c

@@ -304,10 +304,7 @@ int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, gfp_t flags)
 	struct mv_cesa_tdma_desc *tdma;
 
 	tdma = mv_cesa_dma_add_desc(chain, flags);
-	if (IS_ERR(tdma))
-		return PTR_ERR(tdma);
-
-	return 0;
+	return PTR_ERR_OR_ZERO(tdma);
 }
 
 int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, gfp_t flags)

+ 12 - 27
drivers/crypto/mediatek/mtk-aes.c

@@ -13,6 +13,7 @@
  */
 
 #include <crypto/aes.h>
+#include <crypto/gcm.h>
 #include "mtk-platform.h"
 
 #define AES_QUEUE_SIZE		512
@@ -137,11 +138,6 @@ struct mtk_aes_gcm_ctx {
 	struct crypto_skcipher *ctr;
 };
 
-struct mtk_aes_gcm_setkey_result {
-	int err;
-	struct completion completion;
-};
-
 struct mtk_aes_drv {
 	struct list_head dev_list;
 	/* Device list lock */
@@ -928,25 +924,19 @@ static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
 static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
 {
 	struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+	struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
 	struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
 
+	/* Empty messages are not supported yet */
+	if (!gctx->textlen && !req->assoclen)
+		return -EINVAL;
+
 	rctx->mode = AES_FLAGS_GCM | mode;
 
 	return mtk_aes_handle_queue(ctx->cryp, !!(mode & AES_FLAGS_ENCRYPT),
 				    &req->base);
 }
 
-static void mtk_gcm_setkey_done(struct crypto_async_request *req, int err)
-{
-	struct mtk_aes_gcm_setkey_result *result = req->data;
-
-	if (err == -EINPROGRESS)
-		return;
-
-	result->err = err;
-	complete(&result->completion);
-}
-
 /*
  * Because of the hardware limitation, we need to pre-calculate key(H)
  * for the GHASH operation. The result of the encryption operation
@@ -962,7 +952,7 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
 		u32 hash[4];
 		u8 iv[8];
 
-		struct mtk_aes_gcm_setkey_result result;
+		struct crypto_wait wait;
 
 		struct scatterlist sg[1];
 		struct skcipher_request req;
@@ -1002,22 +992,17 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
 	if (!data)
 		return -ENOMEM;
 
-	init_completion(&data->result.completion);
+	crypto_init_wait(&data->wait);
 	sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE);
 	skcipher_request_set_tfm(&data->req, ctr);
 	skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
 				      CRYPTO_TFM_REQ_MAY_BACKLOG,
-				      mtk_gcm_setkey_done, &data->result);
+				      crypto_req_done, &data->wait);
 	skcipher_request_set_crypt(&data->req, data->sg, data->sg,
 				   AES_BLOCK_SIZE, data->iv);
 
-	err = crypto_skcipher_encrypt(&data->req);
-	if (err == -EINPROGRESS || err == -EBUSY) {
-		err = wait_for_completion_interruptible(
-			&data->result.completion);
-		if (!err)
-			err = data->result.err;
-	}
+	err = crypto_wait_req(crypto_skcipher_encrypt(&data->req),
+			      &data->wait);
 	if (err)
 		goto out;
 
@@ -1098,7 +1083,7 @@ static struct aead_alg aes_gcm_alg = {
 	.decrypt	= mtk_aes_gcm_decrypt,
 	.init		= mtk_aes_gcm_init,
 	.exit		= mtk_aes_gcm_exit,
-	.ivsize		= 12,
+	.ivsize		= GCM_AES_IV_SIZE,
 	.maxauthsize	= AES_BLOCK_SIZE,
 
 	.base = {

+ 0 - 1216
drivers/crypto/mv_cesa.c

@@ -1,1216 +0,0 @@
-/*
- * Support for Marvell's crypto engine which can be found on some Orion5X
- * boards.
- *
- * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
- * License: GPLv2
- *
- */
-#include <crypto/aes.h>
-#include <crypto/algapi.h>
-#include <linux/crypto.h>
-#include <linux/genalloc.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kthread.h>
-#include <linux/platform_device.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/clk.h>
-#include <crypto/hmac.h>
-#include <crypto/internal/hash.h>
-#include <crypto/sha.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_irq.h>
-
-#include "mv_cesa.h"
-
-#define MV_CESA	"MV-CESA:"
-#define MAX_HW_HASH_SIZE	0xFFFF
-#define MV_CESA_EXPIRE		500 /* msec */
-
-#define MV_CESA_DEFAULT_SRAM_SIZE	2048
-
-/*
- * STM:
- *   /---------------------------------------\
- *   |					     | request complete
- *  \./					     |
- * IDLE -> new request -> BUSY -> done -> DEQUEUE
- *                         /°\               |
- *			    |		     | more scatter entries
- *			    \________________/
- */
-enum engine_status {
-	ENGINE_IDLE,
-	ENGINE_BUSY,
-	ENGINE_W_DEQUEUE,
-};
-
-/**
- * struct req_progress - used for every crypt request
- * @src_sg_it:		sg iterator for src
- * @dst_sg_it:		sg iterator for dst
- * @sg_src_left:	bytes left in src to process (scatter list)
- * @src_start:		offset to add to src start position (scatter list)
- * @crypt_len:		length of current hw crypt/hash process
- * @hw_nbytes:		total bytes to process in hw for this request
- * @copy_back:		whether to copy data back (crypt) or not (hash)
- * @sg_dst_left:	bytes left dst to process in this scatter list
- * @dst_start:		offset to add to dst start position (scatter list)
- * @hw_processed_bytes:	number of bytes processed by hw (request).
- *
- * sg helper are used to iterate over the scatterlist. Since the size of the
- * SRAM may be less than the scatter size, this struct struct is used to keep
- * track of progress within current scatterlist.
- */
-struct req_progress {
-	struct sg_mapping_iter src_sg_it;
-	struct sg_mapping_iter dst_sg_it;
-	void (*complete) (void);
-	void (*process) (int is_first);
-
-	/* src mostly */
-	int sg_src_left;
-	int src_start;
-	int crypt_len;
-	int hw_nbytes;
-	/* dst mostly */
-	int copy_back;
-	int sg_dst_left;
-	int dst_start;
-	int hw_processed_bytes;
-};
-
-struct crypto_priv {
-	void __iomem *reg;
-	void __iomem *sram;
-	struct gen_pool *sram_pool;
-	dma_addr_t sram_dma;
-	int irq;
-	struct clk *clk;
-	struct task_struct *queue_th;
-
-	/* the lock protects queue and eng_st */
-	spinlock_t lock;
-	struct crypto_queue queue;
-	enum engine_status eng_st;
-	struct timer_list completion_timer;
-	struct crypto_async_request *cur_req;
-	struct req_progress p;
-	int max_req_size;
-	int sram_size;
-	int has_sha1;
-	int has_hmac_sha1;
-};
-
-static struct crypto_priv *cpg;
-
-struct mv_ctx {
-	u8 aes_enc_key[AES_KEY_LEN];
-	u32 aes_dec_key[8];
-	int key_len;
-	u32 need_calc_aes_dkey;
-};
-
-enum crypto_op {
-	COP_AES_ECB,
-	COP_AES_CBC,
-};
-
-struct mv_req_ctx {
-	enum crypto_op op;
-	int decrypt;
-};
-
-enum hash_op {
-	COP_SHA1,
-	COP_HMAC_SHA1
-};
-
-struct mv_tfm_hash_ctx {
-	struct crypto_shash *fallback;
-	struct crypto_shash *base_hash;
-	u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
-	int count_add;
-	enum hash_op op;
-};
-
-struct mv_req_hash_ctx {
-	u64 count;
-	u32 state[SHA1_DIGEST_SIZE / 4];
-	u8 buffer[SHA1_BLOCK_SIZE];
-	int first_hash;		/* marks that we don't have previous state */
-	int last_chunk;		/* marks that this is the 'final' request */
-	int extra_bytes;	/* unprocessed bytes in buffer */
-	enum hash_op op;
-	int count_add;
-};
-
-static void mv_completion_timer_callback(struct timer_list *unused)
-{
-	int active = readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_EN_SEC_ACCL0;
-
-	printk(KERN_ERR MV_CESA
-	       "completion timer expired (CESA %sactive), cleaning up.\n",
-	       active ? "" : "in");
-
-	del_timer(&cpg->completion_timer);
-	writel(SEC_CMD_DISABLE_SEC, cpg->reg + SEC_ACCEL_CMD);
-	while(readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_DISABLE_SEC)
-		printk(KERN_INFO MV_CESA "%s: waiting for engine finishing\n", __func__);
-	cpg->eng_st = ENGINE_W_DEQUEUE;
-	wake_up_process(cpg->queue_th);
-}
-
-static void mv_setup_timer(void)
-{
-	timer_setup(&cpg->completion_timer, mv_completion_timer_callback, 0);
-	mod_timer(&cpg->completion_timer,
-			jiffies + msecs_to_jiffies(MV_CESA_EXPIRE));
-}
-
-static void compute_aes_dec_key(struct mv_ctx *ctx)
-{
-	struct crypto_aes_ctx gen_aes_key;
-	int key_pos;
-
-	if (!ctx->need_calc_aes_dkey)
-		return;
-
-	crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
-
-	key_pos = ctx->key_len + 24;
-	memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
-	switch (ctx->key_len) {
-	case AES_KEYSIZE_256:
-		key_pos -= 2;
-		/* fall */
-	case AES_KEYSIZE_192:
-		key_pos -= 2;
-		memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
-				4 * 4);
-		break;
-	}
-	ctx->need_calc_aes_dkey = 0;
-}
-
-static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
-		unsigned int len)
-{
-	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
-	struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
-
-	switch (len) {
-	case AES_KEYSIZE_128:
-	case AES_KEYSIZE_192:
-	case AES_KEYSIZE_256:
-		break;
-	default:
-		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
-		return -EINVAL;
-	}
-	ctx->key_len = len;
-	ctx->need_calc_aes_dkey = 1;
-
-	memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
-	return 0;
-}
-
-static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
-{
-	int ret;
-	void *sbuf;
-	int copy_len;
-
-	while (len) {
-		if (!p->sg_src_left) {
-			ret = sg_miter_next(&p->src_sg_it);
-			BUG_ON(!ret);
-			p->sg_src_left = p->src_sg_it.length;
-			p->src_start = 0;
-		}
-
-		sbuf = p->src_sg_it.addr + p->src_start;
-
-		copy_len = min(p->sg_src_left, len);
-		memcpy(dbuf, sbuf, copy_len);
-
-		p->src_start += copy_len;
-		p->sg_src_left -= copy_len;
-
-		len -= copy_len;
-		dbuf += copy_len;
-	}
-}
-
-static void setup_data_in(void)
-{
-	struct req_progress *p = &cpg->p;
-	int data_in_sram =
-	    min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
-	copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
-			data_in_sram - p->crypt_len);
-	p->crypt_len = data_in_sram;
-}
-
-static void mv_process_current_q(int first_block)
-{
-	struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
-	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
-	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
-	struct sec_accel_config op;
-
-	switch (req_ctx->op) {
-	case COP_AES_ECB:
-		op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
-		break;
-	case COP_AES_CBC:
-	default:
-		op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
-		op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
-			ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
-		if (first_block)
-			memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
-		break;
-	}
-	if (req_ctx->decrypt) {
-		op.config |= CFG_DIR_DEC;
-		memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
-				AES_KEY_LEN);
-	} else {
-		op.config |= CFG_DIR_ENC;
-		memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
-				AES_KEY_LEN);
-	}
-
-	switch (ctx->key_len) {
-	case AES_KEYSIZE_128:
-		op.config |= CFG_AES_LEN_128;
-		break;
-	case AES_KEYSIZE_192:
-		op.config |= CFG_AES_LEN_192;
-		break;
-	case AES_KEYSIZE_256:
-		op.config |= CFG_AES_LEN_256;
-		break;
-	}
-	op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
-		ENC_P_DST(SRAM_DATA_OUT_START);
-	op.enc_key_p = SRAM_DATA_KEY_P;
-
-	setup_data_in();
-	op.enc_len = cpg->p.crypt_len;
-	memcpy(cpg->sram + SRAM_CONFIG, &op,
-			sizeof(struct sec_accel_config));
-
-	/* GO */
-	mv_setup_timer();
-	writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
-}
-
-static void mv_crypto_algo_completion(void)
-{
-	struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
-	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
-
-	sg_miter_stop(&cpg->p.src_sg_it);
-	sg_miter_stop(&cpg->p.dst_sg_it);
-
-	if (req_ctx->op != COP_AES_CBC)
-		return ;
-
-	memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
-}
-
-static void mv_process_hash_current(int first_block)
-{
-	struct ahash_request *req = ahash_request_cast(cpg->cur_req);
-	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
-	struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
-	struct req_progress *p = &cpg->p;
-	struct sec_accel_config op = { 0 };
-	int is_last;
-
-	switch (req_ctx->op) {
-	case COP_SHA1:
-	default:
-		op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
-		break;
-	case COP_HMAC_SHA1:
-		op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
-		memcpy(cpg->sram + SRAM_HMAC_IV_IN,
-				tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
-		break;
-	}
-
-	op.mac_src_p =
-		MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
-		req_ctx->
-		count);
-
-	setup_data_in();
-
-	op.mac_digest =
-		MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
-	op.mac_iv =
-		MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
-		MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
-
-	is_last = req_ctx->last_chunk
-		&& (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
-		&& (req_ctx->count <= MAX_HW_HASH_SIZE);
-	if (req_ctx->first_hash) {
-		if (is_last)
-			op.config |= CFG_NOT_FRAG;
-		else
-			op.config |= CFG_FIRST_FRAG;
-
-		req_ctx->first_hash = 0;
-	} else {
-		if (is_last)
-			op.config |= CFG_LAST_FRAG;
-		else
-			op.config |= CFG_MID_FRAG;
-
-		if (first_block) {
-			writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
-			writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
-			writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
-			writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
-			writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
-		}
-	}
-
-	memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
-
-	/* GO */
-	mv_setup_timer();
-	writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
-}
-
-static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
-					  struct shash_desc *desc)
-{
-	int i;
-	struct sha1_state shash_state;
-
-	shash_state.count = ctx->count + ctx->count_add;
-	for (i = 0; i < 5; i++)
-		shash_state.state[i] = ctx->state[i];
-	memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
-	return crypto_shash_import(desc, &shash_state);
-}
-
-static int mv_hash_final_fallback(struct ahash_request *req)
-{
-	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
-	struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
-	SHASH_DESC_ON_STACK(shash, tfm_ctx->fallback);
-	int rc;
-
-	shash->tfm = tfm_ctx->fallback;
-	shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-	if (unlikely(req_ctx->first_hash)) {
-		crypto_shash_init(shash);
-		crypto_shash_update(shash, req_ctx->buffer,
-				    req_ctx->extra_bytes);
-	} else {
-		/* only SHA1 for now....
-		 */
-		rc = mv_hash_import_sha1_ctx(req_ctx, shash);
-		if (rc)
-			goto out;
-	}
-	rc = crypto_shash_final(shash, req->result);
-out:
-	return rc;
-}
-
-static void mv_save_digest_state(struct mv_req_hash_ctx *ctx)
-{
-	ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
-	ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
-	ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
-	ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
-	ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
-}
-
-static void mv_hash_algo_completion(void)
-{
-	struct ahash_request *req = ahash_request_cast(cpg->cur_req);
-	struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
-
-	if (ctx->extra_bytes)
-		copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
-	sg_miter_stop(&cpg->p.src_sg_it);
-
-	if (likely(ctx->last_chunk)) {
-		if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
-			memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
-			       crypto_ahash_digestsize(crypto_ahash_reqtfm
-						       (req)));
-		} else {
-			mv_save_digest_state(ctx);
-			mv_hash_final_fallback(req);
-		}
-	} else {
-		mv_save_digest_state(ctx);
-	}
-}
-
-static void dequeue_complete_req(void)
-{
-	struct crypto_async_request *req = cpg->cur_req;
-	void *buf;
-	int ret;
-	cpg->p.hw_processed_bytes += cpg->p.crypt_len;
-	if (cpg->p.copy_back) {
-		int need_copy_len = cpg->p.crypt_len;
-		int sram_offset = 0;
-		do {
-			int dst_copy;
-
-			if (!cpg->p.sg_dst_left) {
-				ret = sg_miter_next(&cpg->p.dst_sg_it);
-				BUG_ON(!ret);
-				cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
-				cpg->p.dst_start = 0;
-			}
-
-			buf = cpg->p.dst_sg_it.addr;
-			buf += cpg->p.dst_start;
-
-			dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
-
-			memcpy(buf,
-			       cpg->sram + SRAM_DATA_OUT_START + sram_offset,
-			       dst_copy);
-			sram_offset += dst_copy;
-			cpg->p.sg_dst_left -= dst_copy;
-			need_copy_len -= dst_copy;
-			cpg->p.dst_start += dst_copy;
-		} while (need_copy_len > 0);
-	}
-
-	cpg->p.crypt_len = 0;
-
-	BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
-	if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
-		/* process next scatter list entry */
-		cpg->eng_st = ENGINE_BUSY;
-		cpg->p.process(0);
-	} else {
-		cpg->p.complete();
-		cpg->eng_st = ENGINE_IDLE;
-		local_bh_disable();
-		req->complete(req, 0);
-		local_bh_enable();
-	}
-}
-
-static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
-{
-	int i = 0;
-	size_t cur_len;
-
-	while (sl) {
-		cur_len = sl[i].length;
-		++i;
-		if (total_bytes > cur_len)
-			total_bytes -= cur_len;
-		else
-			break;
-	}
-
-	return i;
-}
-
-static void mv_start_new_crypt_req(struct ablkcipher_request *req)
-{
-	struct req_progress *p = &cpg->p;
-	int num_sgs;
-
-	cpg->cur_req = &req->base;
-	memset(p, 0, sizeof(struct req_progress));
-	p->hw_nbytes = req->nbytes;
-	p->complete = mv_crypto_algo_completion;
-	p->process = mv_process_current_q;
-	p->copy_back = 1;
-
-	num_sgs = count_sgs(req->src, req->nbytes);
-	sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
-
-	num_sgs = count_sgs(req->dst, req->nbytes);
-	sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
-
-	mv_process_current_q(1);
-}
-
-static void mv_start_new_hash_req(struct ahash_request *req)
-{
-	struct req_progress *p = &cpg->p;
-	struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
-	int num_sgs, hw_bytes, old_extra_bytes, rc;
-	cpg->cur_req = &req->base;
-	memset(p, 0, sizeof(struct req_progress));
-	hw_bytes = req->nbytes + ctx->extra_bytes;
-	old_extra_bytes = ctx->extra_bytes;
-
-	ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
-	if (ctx->extra_bytes != 0
-	    && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
-		hw_bytes -= ctx->extra_bytes;
-	else
-		ctx->extra_bytes = 0;
-
-	num_sgs = count_sgs(req->src, req->nbytes);
-	sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
-
-	if (hw_bytes) {
-		p->hw_nbytes = hw_bytes;
-		p->complete = mv_hash_algo_completion;
-		p->process = mv_process_hash_current;
-
-		if (unlikely(old_extra_bytes)) {
-			memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
-			       old_extra_bytes);
-			p->crypt_len = old_extra_bytes;
-		}
-
-		mv_process_hash_current(1);
-	} else {
-		copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
-				ctx->extra_bytes - old_extra_bytes);
-		sg_miter_stop(&p->src_sg_it);
-		if (ctx->last_chunk)
-			rc = mv_hash_final_fallback(req);
-		else
-			rc = 0;
-		cpg->eng_st = ENGINE_IDLE;
-		local_bh_disable();
-		req->base.complete(&req->base, rc);
-		local_bh_enable();
-	}
-}
-
-static int queue_manag(void *data)
-{
-	cpg->eng_st = ENGINE_IDLE;
-	do {
-		struct crypto_async_request *async_req = NULL;
-		struct crypto_async_request *backlog = NULL;
-
-		__set_current_state(TASK_INTERRUPTIBLE);
-
-		if (cpg->eng_st == ENGINE_W_DEQUEUE)
-			dequeue_complete_req();
-
-		spin_lock_irq(&cpg->lock);
-		if (cpg->eng_st == ENGINE_IDLE) {
-			backlog = crypto_get_backlog(&cpg->queue);
-			async_req = crypto_dequeue_request(&cpg->queue);
-			if (async_req) {
-				BUG_ON(cpg->eng_st != ENGINE_IDLE);
-				cpg->eng_st = ENGINE_BUSY;
-			}
-		}
-		spin_unlock_irq(&cpg->lock);
-
-		if (backlog) {
-			backlog->complete(backlog, -EINPROGRESS);
-			backlog = NULL;
-		}
-
-		if (async_req) {
-			if (crypto_tfm_alg_type(async_req->tfm) !=
-			    CRYPTO_ALG_TYPE_AHASH) {
-				struct ablkcipher_request *req =
-				    ablkcipher_request_cast(async_req);
-				mv_start_new_crypt_req(req);
-			} else {
-				struct ahash_request *req =
-				    ahash_request_cast(async_req);
-				mv_start_new_hash_req(req);
-			}
-			async_req = NULL;
-		}
-
-		schedule();
-
-	} while (!kthread_should_stop());
-	return 0;
-}
-
-static int mv_handle_req(struct crypto_async_request *req)
-{
-	unsigned long flags;
-	int ret;
-
-	spin_lock_irqsave(&cpg->lock, flags);
-	ret = crypto_enqueue_request(&cpg->queue, req);
-	spin_unlock_irqrestore(&cpg->lock, flags);
-	wake_up_process(cpg->queue_th);
-	return ret;
-}
-
-static int mv_enc_aes_ecb(struct ablkcipher_request *req)
-{
-	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
-
-	req_ctx->op = COP_AES_ECB;
-	req_ctx->decrypt = 0;
-
-	return mv_handle_req(&req->base);
-}
-
-static int mv_dec_aes_ecb(struct ablkcipher_request *req)
-{
-	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
-	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
-
-	req_ctx->op = COP_AES_ECB;
-	req_ctx->decrypt = 1;
-
-	compute_aes_dec_key(ctx);
-	return mv_handle_req(&req->base);
-}
-
-static int mv_enc_aes_cbc(struct ablkcipher_request *req)
-{
-	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
-
-	req_ctx->op = COP_AES_CBC;
-	req_ctx->decrypt = 0;
-
-	return mv_handle_req(&req->base);
-}
-
-static int mv_dec_aes_cbc(struct ablkcipher_request *req)
-{
-	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
-	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
-
-	req_ctx->op = COP_AES_CBC;
-	req_ctx->decrypt = 1;
-
-	compute_aes_dec_key(ctx);
-	return mv_handle_req(&req->base);
-}
-
-static int mv_cra_init(struct crypto_tfm *tfm)
-{
-	tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
-	return 0;
-}
-
-static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
-				 int is_last, unsigned int req_len,
-				 int count_add)
-{
-	memset(ctx, 0, sizeof(*ctx));
-	ctx->op = op;
-	ctx->count = req_len;
-	ctx->first_hash = 1;
-	ctx->last_chunk = is_last;
-	ctx->count_add = count_add;
-}
-
-static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
-				   unsigned req_len)
-{
-	ctx->last_chunk = is_last;
-	ctx->count += req_len;
-}
-
-static int mv_hash_init(struct ahash_request *req)
-{
-	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
-	mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
-			     tfm_ctx->count_add);
-	return 0;
-}
-
-static int mv_hash_update(struct ahash_request *req)
-{
-	if (!req->nbytes)
-		return 0;
-
-	mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
-	return mv_handle_req(&req->base);
-}
-
-static int mv_hash_final(struct ahash_request *req)
-{
-	struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
-
-	ahash_request_set_crypt(req, NULL, req->result, 0);
-	mv_update_hash_req_ctx(ctx, 1, 0);
-	return mv_handle_req(&req->base);
-}
-
-static int mv_hash_finup(struct ahash_request *req)
-{
-	mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
-	return mv_handle_req(&req->base);
-}
-
-static int mv_hash_digest(struct ahash_request *req)
-{
-	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
-	mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
-			     req->nbytes, tfm_ctx->count_add);
-	return mv_handle_req(&req->base);
-}
-
-static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
-			     const void *ostate)
-{
-	const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
-	int i;
-	for (i = 0; i < 5; i++) {
-		ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
-		ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
-	}
-}
-
-static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
-			  unsigned int keylen)
-{
-	int rc;
-	struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
-	int bs, ds, ss;
-
-	if (!ctx->base_hash)
-		return 0;
-
-	rc = crypto_shash_setkey(ctx->fallback, key, keylen);
-	if (rc)
-		return rc;
-
-	/* Can't see a way to extract the ipad/opad from the fallback tfm
-	   so I'm basically copying code from the hmac module */
-	bs = crypto_shash_blocksize(ctx->base_hash);
-	ds = crypto_shash_digestsize(ctx->base_hash);
-	ss = crypto_shash_statesize(ctx->base_hash);
-
-	{
-		SHASH_DESC_ON_STACK(shash, ctx->base_hash);
-
-		unsigned int i;
-		char ipad[ss];
-		char opad[ss];
-
-		shash->tfm = ctx->base_hash;
-		shash->flags = crypto_shash_get_flags(ctx->base_hash) &
-		    CRYPTO_TFM_REQ_MAY_SLEEP;
-
-		if (keylen > bs) {
-			int err;
-
-			err =
-			    crypto_shash_digest(shash, key, keylen, ipad);
-			if (err)
-				return err;
-
-			keylen = ds;
-		} else
-			memcpy(ipad, key, keylen);
-
-		memset(ipad + keylen, 0, bs - keylen);
-		memcpy(opad, ipad, bs);
-
-		for (i = 0; i < bs; i++) {
-			ipad[i] ^= HMAC_IPAD_VALUE;
-			opad[i] ^= HMAC_OPAD_VALUE;
-		}
-
-		rc = crypto_shash_init(shash) ? :
-		    crypto_shash_update(shash, ipad, bs) ? :
-		    crypto_shash_export(shash, ipad) ? :
-		    crypto_shash_init(shash) ? :
-		    crypto_shash_update(shash, opad, bs) ? :
-		    crypto_shash_export(shash, opad);
-
-		if (rc == 0)
-			mv_hash_init_ivs(ctx, ipad, opad);
-
-		return rc;
-	}
-}
-
-static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
-			    enum hash_op op, int count_add)
-{
-	const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
-	struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
-	struct crypto_shash *fallback_tfm = NULL;
-	struct crypto_shash *base_hash = NULL;
-	int err = -ENOMEM;
-
-	ctx->op = op;
-	ctx->count_add = count_add;
-
-	/* Allocate a fallback and abort if it failed. */
-	fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
-					  CRYPTO_ALG_NEED_FALLBACK);
-	if (IS_ERR(fallback_tfm)) {
-		printk(KERN_WARNING MV_CESA
-		       "Fallback driver '%s' could not be loaded!\n",
-		       fallback_driver_name);
-		err = PTR_ERR(fallback_tfm);
-		goto out;
-	}
-	ctx->fallback = fallback_tfm;
-
-	if (base_hash_name) {
-		/* Allocate a hash to compute the ipad/opad of hmac. */
-		base_hash = crypto_alloc_shash(base_hash_name, 0,
-					       CRYPTO_ALG_NEED_FALLBACK);
-		if (IS_ERR(base_hash)) {
-			printk(KERN_WARNING MV_CESA
-			       "Base driver '%s' could not be loaded!\n",
-			       base_hash_name);
-			err = PTR_ERR(base_hash);
-			goto err_bad_base;
-		}
-	}
-	ctx->base_hash = base_hash;
-
-	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
-				 sizeof(struct mv_req_hash_ctx) +
-				 crypto_shash_descsize(ctx->fallback));
-	return 0;
-err_bad_base:
-	crypto_free_shash(fallback_tfm);
-out:
-	return err;
-}
-
-static void mv_cra_hash_exit(struct crypto_tfm *tfm)
-{
-	struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
-
-	crypto_free_shash(ctx->fallback);
-	if (ctx->base_hash)
-		crypto_free_shash(ctx->base_hash);
-}
-
-static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
-{
-	return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
-}
-
-static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
-{
-	return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
-}
-
-static irqreturn_t crypto_int(int irq, void *priv)
-{
-	u32 val;
-
-	val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
-	if (!(val & SEC_INT_ACCEL0_DONE))
-		return IRQ_NONE;
-
-	if (!del_timer(&cpg->completion_timer)) {
-		printk(KERN_WARNING MV_CESA
-		       "got an interrupt but no pending timer?\n");
-	}
-	val &= ~SEC_INT_ACCEL0_DONE;
-	writel(val, cpg->reg + FPGA_INT_STATUS);
-	writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
-	BUG_ON(cpg->eng_st != ENGINE_BUSY);
-	cpg->eng_st = ENGINE_W_DEQUEUE;
-	wake_up_process(cpg->queue_th);
-	return IRQ_HANDLED;
-}
-
-static struct crypto_alg mv_aes_alg_ecb = {
-	.cra_name		= "ecb(aes)",
-	.cra_driver_name	= "mv-ecb-aes",
-	.cra_priority	= 300,
-	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
-			  CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
-	.cra_blocksize	= 16,
-	.cra_ctxsize	= sizeof(struct mv_ctx),
-	.cra_alignmask	= 0,
-	.cra_type	= &crypto_ablkcipher_type,
-	.cra_module	= THIS_MODULE,
-	.cra_init	= mv_cra_init,
-	.cra_u		= {
-		.ablkcipher = {
-			.min_keysize	=	AES_MIN_KEY_SIZE,
-			.max_keysize	=	AES_MAX_KEY_SIZE,
-			.setkey		=	mv_setkey_aes,
-			.encrypt	=	mv_enc_aes_ecb,
-			.decrypt	=	mv_dec_aes_ecb,
-		},
-	},
-};
-
-static struct crypto_alg mv_aes_alg_cbc = {
-	.cra_name		= "cbc(aes)",
-	.cra_driver_name	= "mv-cbc-aes",
-	.cra_priority	= 300,
-	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
-			  CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
-	.cra_blocksize	= AES_BLOCK_SIZE,
-	.cra_ctxsize	= sizeof(struct mv_ctx),
-	.cra_alignmask	= 0,
-	.cra_type	= &crypto_ablkcipher_type,
-	.cra_module	= THIS_MODULE,
-	.cra_init	= mv_cra_init,
-	.cra_u		= {
-		.ablkcipher = {
-			.ivsize		=	AES_BLOCK_SIZE,
-			.min_keysize	=	AES_MIN_KEY_SIZE,
-			.max_keysize	=	AES_MAX_KEY_SIZE,
-			.setkey		=	mv_setkey_aes,
-			.encrypt	=	mv_enc_aes_cbc,
-			.decrypt	=	mv_dec_aes_cbc,
-		},
-	},
-};
-
-static struct ahash_alg mv_sha1_alg = {
-	.init = mv_hash_init,
-	.update = mv_hash_update,
-	.final = mv_hash_final,
-	.finup = mv_hash_finup,
-	.digest = mv_hash_digest,
-	.halg = {
-		 .digestsize = SHA1_DIGEST_SIZE,
-		 .base = {
-			  .cra_name = "sha1",
-			  .cra_driver_name = "mv-sha1",
-			  .cra_priority = 300,
-			  .cra_flags =
-			  CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
-			  CRYPTO_ALG_NEED_FALLBACK,
-			  .cra_blocksize = SHA1_BLOCK_SIZE,
-			  .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
-			  .cra_init = mv_cra_hash_sha1_init,
-			  .cra_exit = mv_cra_hash_exit,
-			  .cra_module = THIS_MODULE,
-			  }
-		 }
-};
-
-static struct ahash_alg mv_hmac_sha1_alg = {
-	.init = mv_hash_init,
-	.update = mv_hash_update,
-	.final = mv_hash_final,
-	.finup = mv_hash_finup,
-	.digest = mv_hash_digest,
-	.setkey = mv_hash_setkey,
-	.halg = {
-		 .digestsize = SHA1_DIGEST_SIZE,
-		 .base = {
-			  .cra_name = "hmac(sha1)",
-			  .cra_driver_name = "mv-hmac-sha1",
-			  .cra_priority = 300,
-			  .cra_flags =
-			  CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
-			  CRYPTO_ALG_NEED_FALLBACK,
-			  .cra_blocksize = SHA1_BLOCK_SIZE,
-			  .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
-			  .cra_init = mv_cra_hash_hmac_sha1_init,
-			  .cra_exit = mv_cra_hash_exit,
-			  .cra_module = THIS_MODULE,
-			  }
-		 }
-};
-
-static int mv_cesa_get_sram(struct platform_device *pdev,
-			    struct crypto_priv *cp)
-{
-	struct resource *res;
-	u32 sram_size = MV_CESA_DEFAULT_SRAM_SIZE;
-
-	of_property_read_u32(pdev->dev.of_node, "marvell,crypto-sram-size",
-			     &sram_size);
-
-	cp->sram_size = sram_size;
-	cp->sram_pool = of_gen_pool_get(pdev->dev.of_node,
-					"marvell,crypto-srams", 0);
-	if (cp->sram_pool) {
-		cp->sram = gen_pool_dma_alloc(cp->sram_pool, sram_size,
-					      &cp->sram_dma);
-		if (cp->sram)
-			return 0;
-
-		return -ENOMEM;
-	}
-
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-					   "sram");
-	if (!res || resource_size(res) < cp->sram_size)
-		return -EINVAL;
-
-	cp->sram = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(cp->sram))
-		return PTR_ERR(cp->sram);
-
-	return 0;
-}
-
-static int mv_probe(struct platform_device *pdev)
-{
-	struct crypto_priv *cp;
-	struct resource *res;
-	int irq;
-	int ret;
-
-	if (cpg) {
-		printk(KERN_ERR MV_CESA "Second crypto dev?\n");
-		return -EEXIST;
-	}
-
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
-	if (!res)
-		return -ENXIO;
-
-	cp = devm_kzalloc(&pdev->dev, sizeof(*cp), GFP_KERNEL);
-	if (!cp)
-		return -ENOMEM;
-
-	spin_lock_init(&cp->lock);
-	crypto_init_queue(&cp->queue, 50);
-	cp->reg = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(cp->reg)) {
-		ret = PTR_ERR(cp->reg);
-		goto err;
-	}
-
-	ret = mv_cesa_get_sram(pdev, cp);
-	if (ret)
-		goto err;
-
-	cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
-
-	irq = platform_get_irq(pdev, 0);
-	if (irq < 0) {
-		ret = irq;
-		goto err;
-	}
-	cp->irq = irq;
-
-	platform_set_drvdata(pdev, cp);
-	cpg = cp;
-
-	cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
-	if (IS_ERR(cp->queue_th)) {
-		ret = PTR_ERR(cp->queue_th);
-		goto err;
-	}
-
-	ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev),
-			cp);
-	if (ret)
-		goto err_thread;
-
-	/* Not all platforms can gate the clock, so it is not
-	   an error if the clock does not exists. */
-	cp->clk = clk_get(&pdev->dev, NULL);
-	if (!IS_ERR(cp->clk))
-		clk_prepare_enable(cp->clk);
-
-	writel(0, cpg->reg + SEC_ACCEL_INT_STATUS);
-	writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
-	writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
-	writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
-
-	ret = crypto_register_alg(&mv_aes_alg_ecb);
-	if (ret) {
-		printk(KERN_WARNING MV_CESA
-		       "Could not register aes-ecb driver\n");
-		goto err_irq;
-	}
-
-	ret = crypto_register_alg(&mv_aes_alg_cbc);
-	if (ret) {
-		printk(KERN_WARNING MV_CESA
-		       "Could not register aes-cbc driver\n");
-		goto err_unreg_ecb;
-	}
-
-	ret = crypto_register_ahash(&mv_sha1_alg);
-	if (ret == 0)
-		cpg->has_sha1 = 1;
-	else
-		printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
-
-	ret = crypto_register_ahash(&mv_hmac_sha1_alg);
-	if (ret == 0) {
-		cpg->has_hmac_sha1 = 1;
-	} else {
-		printk(KERN_WARNING MV_CESA
-		       "Could not register hmac-sha1 driver\n");
-	}
-
-	return 0;
-err_unreg_ecb:
-	crypto_unregister_alg(&mv_aes_alg_ecb);
-err_irq:
-	free_irq(irq, cp);
-	if (!IS_ERR(cp->clk)) {
-		clk_disable_unprepare(cp->clk);
-		clk_put(cp->clk);
-	}
-err_thread:
-	kthread_stop(cp->queue_th);
-err:
-	cpg = NULL;
-	return ret;
-}
-
-static int mv_remove(struct platform_device *pdev)
-{
-	struct crypto_priv *cp = platform_get_drvdata(pdev);
-
-	crypto_unregister_alg(&mv_aes_alg_ecb);
-	crypto_unregister_alg(&mv_aes_alg_cbc);
-	if (cp->has_sha1)
-		crypto_unregister_ahash(&mv_sha1_alg);
-	if (cp->has_hmac_sha1)
-		crypto_unregister_ahash(&mv_hmac_sha1_alg);
-	kthread_stop(cp->queue_th);
-	free_irq(cp->irq, cp);
-	memset(cp->sram, 0, cp->sram_size);
-
-	if (!IS_ERR(cp->clk)) {
-		clk_disable_unprepare(cp->clk);
-		clk_put(cp->clk);
-	}
-
-	cpg = NULL;
-	return 0;
-}
-
-static const struct of_device_id mv_cesa_of_match_table[] = {
-	{ .compatible = "marvell,orion-crypto", },
-	{ .compatible = "marvell,kirkwood-crypto", },
-	{ .compatible = "marvell,dove-crypto", },
-	{}
-};
-MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
-
-static struct platform_driver marvell_crypto = {
-	.probe		= mv_probe,
-	.remove		= mv_remove,
-	.driver		= {
-		.name	= "mv_crypto",
-		.of_match_table = mv_cesa_of_match_table,
-	},
-};
-MODULE_ALIAS("platform:mv_crypto");
-
-module_platform_driver(marvell_crypto);
-
-MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
-MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
-MODULE_LICENSE("GPL");

+ 0 - 151
drivers/crypto/mv_cesa.h

@@ -1,151 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __MV_CRYPTO_H__
-#define __MV_CRYPTO_H__
-
-#define DIGEST_INITIAL_VAL_A	0xdd00
-#define DIGEST_INITIAL_VAL_B	0xdd04
-#define DIGEST_INITIAL_VAL_C	0xdd08
-#define DIGEST_INITIAL_VAL_D	0xdd0c
-#define DIGEST_INITIAL_VAL_E	0xdd10
-#define DES_CMD_REG		0xdd58
-
-#define SEC_ACCEL_CMD		0xde00
-#define SEC_CMD_EN_SEC_ACCL0	(1 << 0)
-#define SEC_CMD_EN_SEC_ACCL1	(1 << 1)
-#define SEC_CMD_DISABLE_SEC	(1 << 2)
-
-#define SEC_ACCEL_DESC_P0	0xde04
-#define SEC_DESC_P0_PTR(x)	(x)
-
-#define SEC_ACCEL_DESC_P1	0xde14
-#define SEC_DESC_P1_PTR(x)	(x)
-
-#define SEC_ACCEL_CFG		0xde08
-#define SEC_CFG_STOP_DIG_ERR	(1 << 0)
-#define SEC_CFG_CH0_W_IDMA	(1 << 7)
-#define SEC_CFG_CH1_W_IDMA	(1 << 8)
-#define SEC_CFG_ACT_CH0_IDMA	(1 << 9)
-#define SEC_CFG_ACT_CH1_IDMA	(1 << 10)
-
-#define SEC_ACCEL_STATUS	0xde0c
-#define SEC_ST_ACT_0		(1 << 0)
-#define SEC_ST_ACT_1		(1 << 1)
-
-/*
- * FPGA_INT_STATUS looks like a FPGA leftover and is documented only in Errata
- * 4.12. It looks like that it was part of an IRQ-controller in FPGA and
- * someone forgot to remove  it while switching to the core and moving to
- * SEC_ACCEL_INT_STATUS.
- */
-#define FPGA_INT_STATUS		0xdd68
-#define SEC_ACCEL_INT_STATUS	0xde20
-#define SEC_INT_AUTH_DONE	(1 << 0)
-#define SEC_INT_DES_E_DONE	(1 << 1)
-#define SEC_INT_AES_E_DONE	(1 << 2)
-#define SEC_INT_AES_D_DONE	(1 << 3)
-#define SEC_INT_ENC_DONE	(1 << 4)
-#define SEC_INT_ACCEL0_DONE	(1 << 5)
-#define SEC_INT_ACCEL1_DONE	(1 << 6)
-#define SEC_INT_ACC0_IDMA_DONE	(1 << 7)
-#define SEC_INT_ACC1_IDMA_DONE	(1 << 8)
-
-#define SEC_ACCEL_INT_MASK	0xde24
-
-#define AES_KEY_LEN	(8 * 4)
-
-struct sec_accel_config {
-
-	u32 config;
-#define CFG_OP_MAC_ONLY		0
-#define CFG_OP_CRYPT_ONLY	1
-#define CFG_OP_MAC_CRYPT	2
-#define CFG_OP_CRYPT_MAC	3
-#define CFG_MACM_MD5		(4 << 4)
-#define CFG_MACM_SHA1		(5 << 4)
-#define CFG_MACM_HMAC_MD5	(6 << 4)
-#define CFG_MACM_HMAC_SHA1	(7 << 4)
-#define CFG_ENCM_DES		(1 << 8)
-#define CFG_ENCM_3DES		(2 << 8)
-#define CFG_ENCM_AES		(3 << 8)
-#define CFG_DIR_ENC		(0 << 12)
-#define CFG_DIR_DEC		(1 << 12)
-#define CFG_ENC_MODE_ECB	(0 << 16)
-#define CFG_ENC_MODE_CBC	(1 << 16)
-#define CFG_3DES_EEE		(0 << 20)
-#define CFG_3DES_EDE		(1 << 20)
-#define CFG_AES_LEN_128		(0 << 24)
-#define CFG_AES_LEN_192		(1 << 24)
-#define CFG_AES_LEN_256		(2 << 24)
-#define CFG_NOT_FRAG		(0 << 30)
-#define CFG_FIRST_FRAG		(1 << 30)
-#define CFG_LAST_FRAG		(2 << 30)
-#define CFG_MID_FRAG		(3 << 30)
-
-	u32 enc_p;
-#define ENC_P_SRC(x)		(x)
-#define ENC_P_DST(x)		((x) << 16)
-
-	u32 enc_len;
-#define ENC_LEN(x)		(x)
-
-	u32 enc_key_p;
-#define ENC_KEY_P(x)		(x)
-
-	u32 enc_iv;
-#define ENC_IV_POINT(x)		((x) << 0)
-#define ENC_IV_BUF_POINT(x)	((x) << 16)
-
-	u32 mac_src_p;
-#define MAC_SRC_DATA_P(x)	(x)
-#define MAC_SRC_TOTAL_LEN(x)	((x) << 16)
-
-	u32 mac_digest;
-#define MAC_DIGEST_P(x)	(x)
-#define MAC_FRAG_LEN(x)	((x) << 16)
-	u32 mac_iv;
-#define MAC_INNER_IV_P(x)	(x)
-#define MAC_OUTER_IV_P(x)	((x) << 16)
-}__attribute__ ((packed));
-	/*
-	 * /-----------\ 0
-	 * | ACCEL CFG |	4 * 8
-	 * |-----------| 0x20
-	 * | CRYPT KEY |	8 * 4
-	 * |-----------| 0x40
-	 * |  IV   IN  |	4 * 4
-	 * |-----------| 0x40 (inplace)
-	 * |  IV BUF   |	4 * 4
-	 * |-----------| 0x80
-	 * |  DATA IN  |	16 * x (max ->max_req_size)
-	 * |-----------| 0x80 (inplace operation)
-	 * |  DATA OUT |	16 * x (max ->max_req_size)
-	 * \-----------/ SRAM size
-	 */
-
-	/* Hashing memory map:
-	 * /-----------\ 0
-	 * | ACCEL CFG |        4 * 8
-	 * |-----------| 0x20
-	 * | Inner IV  |        5 * 4
-	 * |-----------| 0x34
-	 * | Outer IV  |        5 * 4
-	 * |-----------| 0x48
-	 * | Output BUF|        5 * 4
-	 * |-----------| 0x80
-	 * |  DATA IN  |        64 * x (max ->max_req_size)
-	 * \-----------/ SRAM size
-	 */
-#define SRAM_CONFIG		0x00
-#define SRAM_DATA_KEY_P		0x20
-#define SRAM_DATA_IV		0x40
-#define SRAM_DATA_IV_BUF	0x40
-#define SRAM_DATA_IN_START	0x80
-#define SRAM_DATA_OUT_START	0x80
-
-#define SRAM_HMAC_IV_IN		0x20
-#define SRAM_HMAC_IV_OUT	0x34
-#define SRAM_DIGEST_BUF		0x48
-
-#define SRAM_CFG_SPACE		0x80
-
-#endif

+ 4 - 8
drivers/crypto/n2_core.c

@@ -1962,10 +1962,8 @@ static struct n2_crypto *alloc_n2cp(void)
 
 static void free_n2cp(struct n2_crypto *np)
 {
-	if (np->cwq_info.ino_table) {
-		kfree(np->cwq_info.ino_table);
-		np->cwq_info.ino_table = NULL;
-	}
+	kfree(np->cwq_info.ino_table);
+	np->cwq_info.ino_table = NULL;
 
 	kfree(np);
 }
@@ -2079,10 +2077,8 @@ static struct n2_mau *alloc_ncp(void)
 
 static void free_ncp(struct n2_mau *mp)
 {
-	if (mp->mau_info.ino_table) {
-		kfree(mp->mau_info.ino_table);
-		mp->mau_info.ino_table = NULL;
-	}
+	kfree(mp->mau_info.ino_table);
+	mp->mau_info.ino_table = NULL;
 
 	kfree(mp);
 }

+ 1 - 1
drivers/crypto/nx/nx-842-pseries.c

@@ -1082,7 +1082,7 @@ static int nx842_remove(struct vio_dev *viodev)
 	return 0;
 }
 
-static struct vio_device_id nx842_vio_driver_ids[] = {
+static const struct vio_device_id nx842_vio_driver_ids[] = {
 	{"ibm,compression-v1", "ibm,compression"},
 	{"", ""},
 };

+ 5 - 4
drivers/crypto/nx/nx-aes-gcm.c

@@ -22,6 +22,7 @@
 #include <crypto/internal/aead.h>
 #include <crypto/aes.h>
 #include <crypto/algapi.h>
+#include <crypto/gcm.h>
 #include <crypto/scatterwalk.h>
 #include <linux/module.h>
 #include <linux/types.h>
@@ -433,7 +434,7 @@ static int gcm_aes_nx_encrypt(struct aead_request *req)
 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
 	char *iv = rctx->iv;
 
-	memcpy(iv, req->iv, 12);
+	memcpy(iv, req->iv, GCM_AES_IV_SIZE);
 
 	return gcm_aes_nx_crypt(req, 1, req->assoclen);
 }
@@ -443,7 +444,7 @@ static int gcm_aes_nx_decrypt(struct aead_request *req)
 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
 	char *iv = rctx->iv;
 
-	memcpy(iv, req->iv, 12);
+	memcpy(iv, req->iv, GCM_AES_IV_SIZE);
 
 	return gcm_aes_nx_crypt(req, 0, req->assoclen);
 }
@@ -498,7 +499,7 @@ struct aead_alg nx_gcm_aes_alg = {
 	},
 	.init        = nx_crypto_ctx_aes_gcm_init,
 	.exit        = nx_crypto_ctx_aead_exit,
-	.ivsize      = 12,
+	.ivsize      = GCM_AES_IV_SIZE,
 	.maxauthsize = AES_BLOCK_SIZE,
 	.setkey      = gcm_aes_nx_set_key,
 	.encrypt     = gcm_aes_nx_encrypt,
@@ -516,7 +517,7 @@ struct aead_alg nx_gcm4106_aes_alg = {
 	},
 	.init        = nx_crypto_ctx_aes_gcm_init,
 	.exit        = nx_crypto_ctx_aead_exit,
-	.ivsize      = 8,
+	.ivsize      = GCM_RFC4106_IV_SIZE,
 	.maxauthsize = AES_BLOCK_SIZE,
 	.setkey      = gcm4106_aes_nx_set_key,
 	.setauthsize = gcm4106_aes_nx_setauthsize,

+ 1 - 1
drivers/crypto/nx/nx.c

@@ -833,7 +833,7 @@ static void __exit nx_fini(void)
 	vio_unregister_driver(&nx_driver.viodriver);
 }
 
-static struct vio_device_id nx_crypto_driver_ids[] = {
+static const struct vio_device_id nx_crypto_driver_ids[] = {
 	{ "ibm,sym-encryption-v1", "ibm,sym-encryption" },
 	{ "", "" }
 };

+ 6 - 5
drivers/crypto/omap-aes-gcm.c

@@ -18,6 +18,7 @@
 #include <linux/omap-dma.h>
 #include <linux/interrupt.h>
 #include <crypto/aes.h>
+#include <crypto/gcm.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/skcipher.h>
 #include <crypto/internal/aead.h>
@@ -186,7 +187,7 @@ static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
 	sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL);
 	if (!sk_req) {
 		pr_err("skcipher: Failed to allocate request\n");
-		return -1;
+		return -ENOMEM;
 	}
 
 	init_completion(&result.completion);
@@ -214,7 +215,7 @@ static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
 		}
 		/* fall through */
 	default:
-		pr_err("Encryption of IV failed for GCM mode");
+		pr_err("Encryption of IV failed for GCM mode\n");
 		break;
 	}
 
@@ -311,7 +312,7 @@ static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
 	int err, assoclen;
 
 	memset(rctx->auth_tag, 0, sizeof(rctx->auth_tag));
-	memcpy(rctx->iv + 12, &counter, 4);
+	memcpy(rctx->iv + GCM_AES_IV_SIZE, &counter, 4);
 
 	err = do_encrypt_iv(req, (u32 *)rctx->auth_tag, (u32 *)rctx->iv);
 	if (err)
@@ -339,7 +340,7 @@ int omap_aes_gcm_encrypt(struct aead_request *req)
 {
 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
 
-	memcpy(rctx->iv, req->iv, 12);
+	memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
 	return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM);
 }
 
@@ -347,7 +348,7 @@ int omap_aes_gcm_decrypt(struct aead_request *req)
 {
 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
 
-	memcpy(rctx->iv, req->iv, 12);
+	memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
 	return omap_aes_gcm_crypt(req, FLAGS_GCM);
 }
 

+ 5 - 7
drivers/crypto/omap-aes.c

@@ -35,6 +35,7 @@
 #include <linux/interrupt.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/aes.h>
+#include <crypto/gcm.h>
 #include <crypto/engine.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/internal/aead.h>
@@ -767,7 +768,7 @@ static struct aead_alg algs_aead_gcm[] = {
 	},
 	.init		= omap_aes_gcm_cra_init,
 	.exit		= omap_aes_gcm_cra_exit,
-	.ivsize		= 12,
+	.ivsize		= GCM_AES_IV_SIZE,
 	.maxauthsize	= AES_BLOCK_SIZE,
 	.setkey		= omap_aes_gcm_setkey,
 	.encrypt	= omap_aes_gcm_encrypt,
@@ -788,7 +789,7 @@ static struct aead_alg algs_aead_gcm[] = {
 	.init		= omap_aes_gcm_cra_init,
 	.exit		= omap_aes_gcm_cra_exit,
 	.maxauthsize	= AES_BLOCK_SIZE,
-	.ivsize		= 8,
+	.ivsize		= GCM_RFC4106_IV_SIZE,
 	.setkey		= omap_aes_4106gcm_setkey,
 	.encrypt	= omap_aes_4106gcm_encrypt,
 	.decrypt	= omap_aes_4106gcm_decrypt,
@@ -974,11 +975,10 @@ static int omap_aes_get_res_of(struct omap_aes_dev *dd,
 		struct device *dev, struct resource *res)
 {
 	struct device_node *node = dev->of_node;
-	const struct of_device_id *match;
 	int err = 0;
 
-	match = of_match_device(of_match_ptr(omap_aes_of_match), dev);
-	if (!match) {
+	dd->pdata = of_device_get_match_data(dev);
+	if (!dd->pdata) {
 		dev_err(dev, "no compatible OF match\n");
 		err = -EINVAL;
 		goto err;
@@ -991,8 +991,6 @@ static int omap_aes_get_res_of(struct omap_aes_dev *dd,
 		goto err;
 	}
 
-	dd->pdata = match->data;
-
 err:
 	return err;
 }

+ 2 - 5
drivers/crypto/omap-des.c

@@ -928,16 +928,13 @@ MODULE_DEVICE_TABLE(of, omap_des_of_match);
 static int omap_des_get_of(struct omap_des_dev *dd,
 		struct platform_device *pdev)
 {
-	const struct of_device_id *match;
 
-	match = of_match_device(of_match_ptr(omap_des_of_match), &pdev->dev);
-	if (!match) {
+	dd->pdata = of_device_get_match_data(&pdev->dev);
+	if (!dd->pdata) {
 		dev_err(&pdev->dev, "no compatible OF match\n");
 		return -EINVAL;
 	}
 
-	dd->pdata = match->data;
-
 	return 0;
 }
 #else

+ 2 - 5
drivers/crypto/omap-sham.c

@@ -1944,11 +1944,10 @@ static int omap_sham_get_res_of(struct omap_sham_dev *dd,
 		struct device *dev, struct resource *res)
 {
 	struct device_node *node = dev->of_node;
-	const struct of_device_id *match;
 	int err = 0;
 
-	match = of_match_device(of_match_ptr(omap_sham_of_match), dev);
-	if (!match) {
+	dd->pdata = of_device_get_match_data(dev);
+	if (!dd->pdata) {
 		dev_err(dev, "no compatible OF match\n");
 		err = -EINVAL;
 		goto err;
@@ -1968,8 +1967,6 @@ static int omap_sham_get_res_of(struct omap_sham_dev *dd,
 		goto err;
 	}
 
-	dd->pdata = match->data;
-
 err:
 	return err;
 }

+ 1 - 1
drivers/crypto/padlock-aes.c

@@ -482,7 +482,7 @@ static struct crypto_alg cbc_aes_alg = {
 	}
 };
 
-static struct x86_cpu_id padlock_cpu_id[] = {
+static const struct x86_cpu_id padlock_cpu_id[] = {
 	X86_FEATURE_MATCH(X86_FEATURE_XCRYPT),
 	{}
 };

+ 1 - 1
drivers/crypto/padlock-sha.c

@@ -509,7 +509,7 @@ static struct shash_alg sha256_alg_nano = {
 	}
 };
 
-static struct x86_cpu_id padlock_sha_ids[] = {
+static const struct x86_cpu_id padlock_sha_ids[] = {
 	X86_FEATURE_MATCH(X86_FEATURE_PHE),
 	{}
 };

+ 0 - 3
drivers/crypto/qat/qat_common/adf_dev_mgr.c

@@ -228,11 +228,8 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
 		list_add_tail(&map->list, &vfs_table);
 	} else if (accel_dev->is_vf && pf) {
 		/* VF on host */
-		struct adf_accel_vf_info *vf_info;
 		struct vf_id_map *map;
 
-		vf_info = pf->pf.vf_info + adf_get_vf_id(accel_dev);
-
 		map = adf_find_vf(adf_get_vf_num(accel_dev));
 		if (map) {
 			struct vf_id_map *next;

+ 8 - 10
drivers/crypto/qat/qat_common/qat_asym_algs.c

@@ -443,9 +443,6 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
 	struct qat_crypto_instance *inst = ctx->inst;
 	struct device *dev = &GET_DEV(inst->accel_dev);
 
-	if (unlikely(!params->p || !params->g))
-		return -EINVAL;
-
 	if (qat_dh_check_params_length(params->p_size << 3))
 		return -EINVAL;
 
@@ -462,11 +459,8 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
 	}
 
 	ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
-	if (!ctx->g) {
-		dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
-		ctx->p = NULL;
+	if (!ctx->g)
 		return -ENOMEM;
-	}
 	memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
 	       params->g_size);
 
@@ -507,18 +501,22 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
 
 	ret = qat_dh_set_params(ctx, &params);
 	if (ret < 0)
-		return ret;
+		goto err_clear_ctx;
 
 	ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
 				      GFP_KERNEL);
 	if (!ctx->xa) {
-		qat_dh_clear_ctx(dev, ctx);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto err_clear_ctx;
 	}
 	memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
 	       params.key_size);
 
 	return 0;
+
+err_clear_ctx:
+	qat_dh_clear_ctx(dev, ctx);
+	return ret;
 }
 
 static unsigned int qat_dh_max_size(struct crypto_kpp *tfm)

+ 9 - 6
drivers/crypto/qat/qat_common/qat_uclo.c

@@ -567,26 +567,26 @@ qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
 		       code_page->imp_expr_tab_offset);
 	if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
 	    imp_expr_tab->entry_num) {
-		pr_err("QAT: UOF can't contain imported variable to be parsed");
+		pr_err("QAT: UOF can't contain imported variable to be parsed\n");
 		return -EINVAL;
 	}
 	neigh_reg_tab = (struct icp_qat_uof_objtable *)
 			(encap_uof_obj->beg_uof +
 			code_page->neigh_reg_tab_offset);
 	if (neigh_reg_tab->entry_num) {
-		pr_err("QAT: UOF can't contain shared control store feature");
+		pr_err("QAT: UOF can't contain shared control store feature\n");
 		return -EINVAL;
 	}
 	if (image->numpages > 1) {
-		pr_err("QAT: UOF can't contain multiple pages");
+		pr_err("QAT: UOF can't contain multiple pages\n");
 		return -EINVAL;
 	}
 	if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
-		pr_err("QAT: UOF can't use shared control store feature");
+		pr_err("QAT: UOF can't use shared control store feature\n");
 		return -EFAULT;
 	}
 	if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
-		pr_err("QAT: UOF can't use reloadable feature");
+		pr_err("QAT: UOF can't use reloadable feature\n");
 		return -EFAULT;
 	}
 	return 0;
@@ -702,7 +702,7 @@ static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
 		}
 	}
 	if (!mflag) {
-		pr_err("QAT: uimage uses AE not set");
+		pr_err("QAT: uimage uses AE not set\n");
 		return -EINVAL;
 	}
 	return 0;
@@ -791,6 +791,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
 	case ICP_GPA_ABS:
 	case ICP_GPB_ABS:
 		ctx_mask = 0;
+		/* fall through */
 	case ICP_GPA_REL:
 	case ICP_GPB_REL:
 		return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
@@ -800,6 +801,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
 	case ICP_SR_RD_ABS:
 	case ICP_DR_RD_ABS:
 		ctx_mask = 0;
+		/* fall through */
 	case ICP_SR_REL:
 	case ICP_DR_REL:
 	case ICP_SR_RD_REL:
@@ -809,6 +811,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
 	case ICP_SR_WR_ABS:
 	case ICP_DR_WR_ABS:
 		ctx_mask = 0;
+		/* fall through */
 	case ICP_SR_WR_REL:
 	case ICP_DR_WR_REL:
 		return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,

Some files were not shown because too many files changed in this diff