Эх сурвалжийг харах

Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto updates from Herbert Xu:
 "Here is the crypto update for 4.3:

  API:

   - the AEAD interface transition is now complete.
   - add top-level skcipher interface.

  Drivers:

   - x86-64 acceleration for chacha20/poly1305.
   - add sunxi-ss Allwinner Security System crypto accelerator.
   - add RSA algorithm to qat driver.
   - add SRIOV support to qat driver.
   - add LS1021A support to caam.
   - add i.MX6 support to caam"

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (163 commits)
  crypto: algif_aead - fix for multiple operations on AF_ALG sockets
  crypto: qat - enable legacy VFs
  MPI: Fix mpi_read_buffer
  crypto: qat - silence a static checker warning
  crypto: vmx - Fixing opcode issue
  crypto: caam - Use the preferred style for memory allocations
  crypto: caam - Propagate the real error code in caam_probe
  crypto: caam - Fix the error handling in caam_probe
  crypto: caam - fix writing to JQCR_MS when using service interface
  crypto: hash - Add AHASH_REQUEST_ON_STACK
  crypto: testmgr - Use new skcipher interface
  crypto: skcipher - Add top-level skcipher interface
  crypto: cmac - allow usage in FIPS mode
  crypto: sahara - Use dmam_alloc_coherent
  crypto: caam - Add support for LS1021A
  crypto: qat - Don't move data inside output buffer
  crypto: vmx - Fixing GHASH Key issue on little endian
  crypto: vmx - Fixing AES-CTR counter bug
  crypto: null - Add missing Kconfig tristate for NULL2
  crypto: nx - Add forward declaration for struct crypto_aead
  ...
Linus Torvalds 10 жил өмнө
parent
commit
d4c90396ed
100 өөрчлөгдсөн 10811 нэмэгдсэн , 6320 устгасан
  1. 4 4
      Documentation/DocBook/crypto-API.tmpl
  2. 17 0
      Documentation/devicetree/bindings/crypto/fsl-sec4.txt
  3. 23 0
      Documentation/devicetree/bindings/crypto/sun4i-ss.txt
  4. 21 3
      MAINTAINERS
  5. 25 4
      arch/arm/boot/dts/imx6qdl.dtsi
  6. 27 0
      arch/arm/boot/dts/imx6sx.dtsi
  7. 8 0
      arch/arm/boot/dts/sun4i-a10.dtsi
  8. 18 0
      arch/arm/boot/dts/sun6i-a31.dtsi
  9. 8 0
      arch/arm/boot/dts/sun7i-a20.dtsi
  10. 1 2
      arch/arm/configs/imx_v6_v7_defconfig
  11. 2 0
      arch/arm/crypto/.gitignore
  12. 42 26
      arch/arm64/crypto/aes-ce-ccm-glue.c
  13. 1 0
      arch/powerpc/include/asm/switch_to.h
  14. 0 3
      arch/powerpc/kernel/process.c
  15. 6 0
      arch/x86/crypto/Makefile
  16. 18 35
      arch/x86/crypto/aesni-intel_glue.c
  17. 443 0
      arch/x86/crypto/chacha20-avx2-x86_64.S
  18. 625 0
      arch/x86/crypto/chacha20-ssse3-x86_64.S
  19. 150 0
      arch/x86/crypto/chacha20_glue.c
  20. 386 0
      arch/x86/crypto/poly1305-avx2-x86_64.S
  21. 582 0
      arch/x86/crypto/poly1305-sse2-x86_64.S
  22. 207 0
      arch/x86/crypto/poly1305_glue.c
  23. 37 3
      crypto/Kconfig
  24. 2 1
      crypto/Makefile
  25. 57 578
      crypto/aead.c
  26. 14 11
      crypto/algapi.c
  27. 5 7
      crypto/algboss.c
  28. 2 2
      crypto/algif_aead.c
  29. 180 400
      crypto/authenc.c
  30. 219 497
      crypto/authencesn.c
  31. 218 162
      crypto/ccm.c
  32. 12 16
      crypto/chacha20_generic.c
  33. 126 90
      crypto/chacha20poly1305.c
  34. 13 10
      crypto/cryptd.c
  35. 0 32
      crypto/crypto_user.c
  36. 12 74
      crypto/echainiv.c
  37. 68 34
      crypto/gcm.c
  38. 1 1
      crypto/jitterentropy-kcapi.c
  39. 7 0
      crypto/pcrypt.c
  40. 35 38
      crypto/poly1305_generic.c
  41. 25 1
      crypto/rsa.c
  42. 2 2
      crypto/rsa_helper.c
  43. 9 436
      crypto/seqiv.c
  44. 245 0
      crypto/skcipher.c
  45. 59 23
      crypto/tcrypt.c
  46. 20 0
      crypto/tcrypt.h
  47. 34 29
      crypto/testmgr.c
  48. 2293 655
      crypto/testmgr.h
  49. 3 0
      drivers/clk/imx/clk-imx6q.c
  50. 17 0
      drivers/crypto/Kconfig
  51. 1 0
      drivers/crypto/Makefile
  52. 1 1
      drivers/crypto/amcc/crypto4xx_core.c
  53. 9 1
      drivers/crypto/caam/Kconfig
  54. 1516 1361
      drivers/crypto/caam/caamalg.c
  55. 45 24
      drivers/crypto/caam/caamhash.c
  56. 21 5
      drivers/crypto/caam/caamrng.c
  57. 1 0
      drivers/crypto/caam/compat.h
  58. 128 26
      drivers/crypto/caam/ctrl.c
  59. 20 3
      drivers/crypto/caam/desc.h
  60. 1 1
      drivers/crypto/caam/desc_constr.h
  61. 5 0
      drivers/crypto/caam/intern.h
  62. 23 7
      drivers/crypto/caam/jr.c
  63. 57 7
      drivers/crypto/caam/regs.h
  64. 17 8
      drivers/crypto/caam/sg_sw_sec4.h
  65. 2 0
      drivers/crypto/ccp/ccp-platform.c
  66. 1 1
      drivers/crypto/img-hash.c
  67. 157 155
      drivers/crypto/ixp4xx_crypto.c
  68. 0 1
      drivers/crypto/marvell/cesa.c
  69. 5 12
      drivers/crypto/nx/Kconfig
  70. 2 6
      drivers/crypto/nx/Makefile
  71. 0 580
      drivers/crypto/nx/nx-842-crypto.c
  72. 0 84
      drivers/crypto/nx/nx-842-platform.c
  73. 29 13
      drivers/crypto/nx/nx-842-powernv.c
  74. 71 68
      drivers/crypto/nx/nx-842-pseries.c
  75. 491 63
      drivers/crypto/nx/nx-842.c
  76. 53 12
      drivers/crypto/nx/nx-842.h
  77. 70 81
      drivers/crypto/nx/nx-aes-ccm.c
  78. 0 21
      drivers/crypto/nx/nx-aes-ctr.c
  79. 38 26
      drivers/crypto/nx/nx-aes-gcm.c
  80. 11 19
      drivers/crypto/nx/nx.c
  81. 5 4
      drivers/crypto/nx/nx.h
  82. 41 45
      drivers/crypto/omap-aes.c
  83. 310 367
      drivers/crypto/picoxcell_crypto.c
  84. 15 0
      drivers/crypto/qat/Kconfig
  85. 1 0
      drivers/crypto/qat/Makefile
  86. 1 0
      drivers/crypto/qat/qat_common/.gitignore
  87. 8 0
      drivers/crypto/qat/qat_common/Makefile
  88. 43 3
      drivers/crypto/qat/qat_common/adf_accel_devices.h
  89. 37 5
      drivers/crypto/qat/qat_common/adf_accel_engine.c
  90. 290 0
      drivers/crypto/qat/qat_common/adf_admin.c
  91. 4 1
      drivers/crypto/qat/qat_common/adf_aer.c
  92. 6 3
      drivers/crypto/qat/qat_common/adf_cfg.c
  93. 2 1
      drivers/crypto/qat/qat_common/adf_cfg_common.h
  94. 48 5
      drivers/crypto/qat/qat_common/adf_common_drv.h
  95. 3 3
      drivers/crypto/qat/qat_common/adf_ctl_drv.c
  96. 268 18
      drivers/crypto/qat/qat_common/adf_dev_mgr.c
  97. 23 14
      drivers/crypto/qat/qat_common/adf_hw_arbiter.c
  98. 18 86
      drivers/crypto/qat/qat_common/adf_init.c
  99. 438 0
      drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
  100. 146 0
      drivers/crypto/qat/qat_common/adf_pf2vf_msg.h

+ 4 - 4
Documentation/DocBook/crypto-API.tmpl

@@ -585,7 +585,7 @@ kernel crypto API                                |   IPSEC Layer
 +-----------+                                    |
 +-----------+                                    |
 |           |            (1)
 |           |            (1)
 |   aead    | <-----------------------------------  esp_output
 |   aead    | <-----------------------------------  esp_output
-| (seqniv)  | ---+
+|  (seqiv)  | ---+
 +-----------+    |
 +-----------+    |
                  | (2)
                  | (2)
 +-----------+    |
 +-----------+    |
@@ -1101,7 +1101,7 @@ kernel crypto API            |       Caller
     </para>
     </para>
 
 
     <para>
     <para>
-     [1] http://www.chronox.de/libkcapi.html
+     [1] <ulink url="http://www.chronox.de/libkcapi.html">http://www.chronox.de/libkcapi.html</ulink>
     </para>
     </para>
 
 
    </sect1>
    </sect1>
@@ -1661,7 +1661,7 @@ read(opfd, out, outlen);
     </para>
     </para>
 
 
     <para>
     <para>
-     [1] http://www.chronox.de/libkcapi.html
+     [1] <ulink url="http://www.chronox.de/libkcapi.html">http://www.chronox.de/libkcapi.html</ulink>
     </para>
     </para>
 
 
    </sect1>
    </sect1>
@@ -1687,7 +1687,7 @@ read(opfd, out, outlen);
 !Pinclude/linux/crypto.h Block Cipher Algorithm Definitions
 !Pinclude/linux/crypto.h Block Cipher Algorithm Definitions
 !Finclude/linux/crypto.h crypto_alg
 !Finclude/linux/crypto.h crypto_alg
 !Finclude/linux/crypto.h ablkcipher_alg
 !Finclude/linux/crypto.h ablkcipher_alg
-!Finclude/linux/crypto.h aead_alg
+!Finclude/crypto/aead.h aead_alg
 !Finclude/linux/crypto.h blkcipher_alg
 !Finclude/linux/crypto.h blkcipher_alg
 !Finclude/linux/crypto.h cipher_alg
 !Finclude/linux/crypto.h cipher_alg
 !Finclude/crypto/rng.h rng_alg
 !Finclude/crypto/rng.h rng_alg

+ 17 - 0
Documentation/devicetree/bindings/crypto/fsl-sec4.txt

@@ -106,6 +106,18 @@ PROPERTIES
           to the interrupt parent to which the child domain
           to the interrupt parent to which the child domain
           is being mapped.
           is being mapped.
 
 
+   - clocks
+      Usage: required if SEC 4.0 requires explicit enablement of clocks
+      Value type: <prop_encoded-array>
+      Definition:  A list of phandle and clock specifier pairs describing
+          the clocks required for enabling and disabling SEC 4.0.
+
+   - clock-names
+      Usage: required if SEC 4.0 requires explicit enablement of clocks
+      Value type: <string>
+      Definition: A list of clock name strings in the same order as the
+          clocks property.
+
    Note: All other standard properties (see the ePAPR) are allowed
    Note: All other standard properties (see the ePAPR) are allowed
    but are optional.
    but are optional.
 
 
@@ -120,6 +132,11 @@ EXAMPLE
 		ranges = <0 0x300000 0x10000>;
 		ranges = <0 0x300000 0x10000>;
 		interrupt-parent = <&mpic>;
 		interrupt-parent = <&mpic>;
 		interrupts = <92 2>;
 		interrupts = <92 2>;
+		clocks = <&clks IMX6QDL_CLK_CAAM_MEM>,
+			 <&clks IMX6QDL_CLK_CAAM_ACLK>,
+			 <&clks IMX6QDL_CLK_CAAM_IPG>,
+			 <&clks IMX6QDL_CLK_EIM_SLOW>;
+		clock-names = "mem", "aclk", "ipg", "emi_slow";
 	};
 	};
 
 
 =====================================================================
 =====================================================================

+ 23 - 0
Documentation/devicetree/bindings/crypto/sun4i-ss.txt

@@ -0,0 +1,23 @@
+* Allwinner Security System found on A20 SoC
+
+Required properties:
+- compatible : Should be "allwinner,sun4i-a10-crypto".
+- reg: Should contain the Security System register location and length.
+- interrupts: Should contain the IRQ line for the Security System.
+- clocks : List of clock specifiers, corresponding to ahb and ss.
+- clock-names : Name of the functional clock, should be
+	* "ahb" : AHB gating clock
+	* "mod" : SS controller clock
+
+Optional properties:
+ - resets : phandle + reset specifier pair
+ - reset-names : must contain "ahb"
+
+Example:
+	crypto: crypto-engine@01c15000 {
+		compatible = "allwinner,sun4i-a10-crypto";
+		reg = <0x01c15000 0x1000>;
+		interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&ahb_gates 5>, <&ss_clk>;
+		clock-names = "ahb", "mod";
+	};

+ 21 - 3
MAINTAINERS

@@ -556,6 +556,12 @@ S:	Maintained
 F:	Documentation/i2c/busses/i2c-ali1563
 F:	Documentation/i2c/busses/i2c-ali1563
 F:	drivers/i2c/busses/i2c-ali1563.c
 F:	drivers/i2c/busses/i2c-ali1563.c
 
 
+ALLWINNER SECURITY SYSTEM
+M:	Corentin Labbe <clabbe.montjoie@gmail.com>
+L:	linux-crypto@vger.kernel.org
+S:	Maintained
+F:	drivers/crypto/sunxi-ss/
+
 ALPHA PORT
 ALPHA PORT
 M:	Richard Henderson <rth@twiddle.net>
 M:	Richard Henderson <rth@twiddle.net>
 M:	Ivan Kokshaysky <ink@jurassic.park.msu.ru>
 M:	Ivan Kokshaysky <ink@jurassic.park.msu.ru>
@@ -5078,9 +5084,21 @@ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux.git
 S:	Maintained
 S:	Maintained
 F:	arch/ia64/
 F:	arch/ia64/
 
 
+IBM Power VMX Cryptographic instructions
+M:	Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
+M:	Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
+L:	linux-crypto@vger.kernel.org
+S:	Supported
+F:	drivers/crypto/vmx/Makefile
+F:	drivers/crypto/vmx/Kconfig
+F:	drivers/crypto/vmx/vmx.c
+F:	drivers/crypto/vmx/aes*
+F:	drivers/crypto/vmx/ghash*
+F:	drivers/crypto/vmx/ppc-xlate.pl
+
 IBM Power in-Nest Crypto Acceleration
 IBM Power in-Nest Crypto Acceleration
-M:	Marcelo Henrique Cerri <mhcerri@linux.vnet.ibm.com>
-M:	Fionnuala Gunter <fin@linux.vnet.ibm.com>
+M:	Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
+M:	Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
 L:	linux-crypto@vger.kernel.org
 L:	linux-crypto@vger.kernel.org
 S:	Supported
 S:	Supported
 F:	drivers/crypto/nx/Makefile
 F:	drivers/crypto/nx/Makefile
@@ -5092,7 +5110,7 @@ F:	drivers/crypto/nx/nx_csbcpb.h
 F:	drivers/crypto/nx/nx_debugfs.h
 F:	drivers/crypto/nx/nx_debugfs.h
 
 
 IBM Power 842 compression accelerator
 IBM Power 842 compression accelerator
-M:	Dan Streetman <ddstreet@us.ibm.com>
+M:	Dan Streetman <ddstreet@ieee.org>
 S:	Supported
 S:	Supported
 F:	drivers/crypto/nx/Makefile
 F:	drivers/crypto/nx/Makefile
 F:	drivers/crypto/nx/Kconfig
 F:	drivers/crypto/nx/Kconfig

+ 25 - 4
arch/arm/boot/dts/imx6qdl.dtsi

@@ -836,10 +836,31 @@
 			reg = <0x02100000 0x100000>;
 			reg = <0x02100000 0x100000>;
 			ranges;
 			ranges;
 
 
-			caam@02100000 {
-				reg = <0x02100000 0x40000>;
-				interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>,
-					     <0 106 IRQ_TYPE_LEVEL_HIGH>;
+			crypto: caam@2100000 {
+				compatible = "fsl,sec-v4.0";
+				fsl,sec-era = <4>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				reg = <0x2100000 0x10000>;
+				ranges = <0 0x2100000 0x10000>;
+				interrupt-parent = <&intc>;
+				clocks = <&clks IMX6QDL_CLK_CAAM_MEM>,
+					 <&clks IMX6QDL_CLK_CAAM_ACLK>,
+					 <&clks IMX6QDL_CLK_CAAM_IPG>,
+					 <&clks IMX6QDL_CLK_EIM_SLOW>;
+				clock-names = "mem", "aclk", "ipg", "emi_slow";
+
+				sec_jr0: jr0@1000 {
+					compatible = "fsl,sec-v4.0-job-ring";
+					reg = <0x1000 0x1000>;
+					interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+				};
+
+				sec_jr1: jr1@2000 {
+					compatible = "fsl,sec-v4.0-job-ring";
+					reg = <0x2000 0x1000>;
+					interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+				};
 			};
 			};
 
 
 			aipstz@0217c000 { /* AIPSTZ2 */
 			aipstz@0217c000 { /* AIPSTZ2 */

+ 27 - 0
arch/arm/boot/dts/imx6sx.dtsi

@@ -738,6 +738,33 @@
 			reg = <0x02100000 0x100000>;
 			reg = <0x02100000 0x100000>;
 			ranges;
 			ranges;
 
 
+			crypto: caam@2100000 {
+				compatible = "fsl,sec-v4.0";
+				fsl,sec-era = <4>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				reg = <0x2100000 0x10000>;
+				ranges = <0 0x2100000 0x10000>;
+				interrupt-parent = <&intc>;
+				clocks = <&clks IMX6SX_CLK_CAAM_MEM>,
+					 <&clks IMX6SX_CLK_CAAM_ACLK>,
+					 <&clks IMX6SX_CLK_CAAM_IPG>,
+					 <&clks IMX6SX_CLK_EIM_SLOW>;
+				clock-names = "mem", "aclk", "ipg", "emi_slow";
+
+				sec_jr0: jr0@1000 {
+					compatible = "fsl,sec-v4.0-job-ring";
+					reg = <0x1000 0x1000>;
+					interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+				};
+
+				sec_jr1: jr1@2000 {
+					compatible = "fsl,sec-v4.0-job-ring";
+					reg = <0x2000 0x1000>;
+					interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+				};
+			};
+
 			usbotg1: usb@02184000 {
 			usbotg1: usb@02184000 {
 				compatible = "fsl,imx6sx-usb", "fsl,imx27-usb";
 				compatible = "fsl,imx6sx-usb", "fsl,imx27-usb";
 				reg = <0x02184000 0x200>;
 				reg = <0x02184000 0x200>;

+ 8 - 0
arch/arm/boot/dts/sun4i-a10.dtsi

@@ -678,6 +678,14 @@
 			status = "disabled";
 			status = "disabled";
 		};
 		};
 
 
+		crypto: crypto-engine@01c15000 {
+			compatible = "allwinner,sun4i-a10-crypto";
+			reg = <0x01c15000 0x1000>;
+			interrupts = <86>;
+			clocks = <&ahb_gates 5>, <&ss_clk>;
+			clock-names = "ahb", "mod";
+		};
+
 		spi2: spi@01c17000 {
 		spi2: spi@01c17000 {
 			compatible = "allwinner,sun4i-a10-spi";
 			compatible = "allwinner,sun4i-a10-spi";
 			reg = <0x01c17000 0x1000>;
 			reg = <0x01c17000 0x1000>;

+ 18 - 0
arch/arm/boot/dts/sun6i-a31.dtsi

@@ -367,6 +367,14 @@
 					     "mmc3_sample";
 					     "mmc3_sample";
 		};
 		};
 
 
+		ss_clk: clk@01c2009c {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun4i-a10-mod0-clk";
+			reg = <0x01c2009c 0x4>;
+			clocks = <&osc24M>, <&pll6 0>;
+			clock-output-names = "ss";
+		};
+
 		spi0_clk: clk@01c200a0 {
 		spi0_clk: clk@01c200a0 {
 			#clock-cells = <0>;
 			#clock-cells = <0>;
 			compatible = "allwinner,sun4i-a10-mod0-clk";
 			compatible = "allwinner,sun4i-a10-mod0-clk";
@@ -894,6 +902,16 @@
 			#size-cells = <0>;
 			#size-cells = <0>;
 		};
 		};
 
 
+		crypto: crypto-engine@01c15000 {
+			compatible = "allwinner,sun4i-a10-crypto";
+			reg = <0x01c15000 0x1000>;
+			interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ahb1_gates 5>, <&ss_clk>;
+			clock-names = "ahb", "mod";
+			resets = <&ahb1_rst 5>;
+			reset-names = "ahb";
+		};
+
 		timer@01c60000 {
 		timer@01c60000 {
 			compatible = "allwinner,sun6i-a31-hstimer",
 			compatible = "allwinner,sun6i-a31-hstimer",
 				     "allwinner,sun7i-a20-hstimer";
 				     "allwinner,sun7i-a20-hstimer";

+ 8 - 0
arch/arm/boot/dts/sun7i-a20.dtsi

@@ -754,6 +754,14 @@
 			status = "disabled";
 			status = "disabled";
 		};
 		};
 
 
+		crypto: crypto-engine@01c15000 {
+			compatible = "allwinner,sun4i-a10-crypto";
+			reg = <0x01c15000 0x1000>;
+			interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ahb_gates 5>, <&ss_clk>;
+			clock-names = "ahb", "mod";
+		};
+
 		spi2: spi@01c17000 {
 		spi2: spi@01c17000 {
 			compatible = "allwinner,sun4i-a10-spi";
 			compatible = "allwinner,sun4i-a10-spi";
 			reg = <0x01c17000 0x1000>;
 			reg = <0x01c17000 0x1000>;

+ 1 - 2
arch/arm/configs/imx_v6_v7_defconfig

@@ -354,8 +354,7 @@ CONFIG_PROVE_LOCKING=y
 # CONFIG_FTRACE is not set
 # CONFIG_FTRACE is not set
 # CONFIG_ARM_UNWIND is not set
 # CONFIG_ARM_UNWIND is not set
 CONFIG_SECURITYFS=y
 CONFIG_SECURITYFS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-# CONFIG_CRYPTO_HW is not set
+CONFIG_CRYPTO_DEV_FSL_CAAM=y
 CONFIG_CRC_CCITT=m
 CONFIG_CRC_CCITT=m
 CONFIG_CRC_T10DIF=y
 CONFIG_CRC_T10DIF=y
 CONFIG_CRC7=m
 CONFIG_CRC7=m

+ 2 - 0
arch/arm/crypto/.gitignore

@@ -1 +1,3 @@
 aesbs-core.S
 aesbs-core.S
+sha256-core.S
+sha512-core.S

+ 42 - 26
arch/arm64/crypto/aes-ce-ccm-glue.c

@@ -124,7 +124,7 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
 
 
 	ce_aes_ccm_auth_data(mac, (u8 *)&ltag, ltag.len, &macp, ctx->key_enc,
 	ce_aes_ccm_auth_data(mac, (u8 *)&ltag, ltag.len, &macp, ctx->key_enc,
 			     num_rounds(ctx));
 			     num_rounds(ctx));
-	scatterwalk_start(&walk, req->assoc);
+	scatterwalk_start(&walk, req->src);
 
 
 	do {
 	do {
 		u32 n = scatterwalk_clamp(&walk, len);
 		u32 n = scatterwalk_clamp(&walk, len);
@@ -151,6 +151,10 @@ static int ccm_encrypt(struct aead_request *req)
 	struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
 	struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
 	struct blkcipher_desc desc = { .info = req->iv };
 	struct blkcipher_desc desc = { .info = req->iv };
 	struct blkcipher_walk walk;
 	struct blkcipher_walk walk;
+	struct scatterlist srcbuf[2];
+	struct scatterlist dstbuf[2];
+	struct scatterlist *src;
+	struct scatterlist *dst;
 	u8 __aligned(8) mac[AES_BLOCK_SIZE];
 	u8 __aligned(8) mac[AES_BLOCK_SIZE];
 	u8 buf[AES_BLOCK_SIZE];
 	u8 buf[AES_BLOCK_SIZE];
 	u32 len = req->cryptlen;
 	u32 len = req->cryptlen;
@@ -168,7 +172,12 @@ static int ccm_encrypt(struct aead_request *req)
 	/* preserve the original iv for the final round */
 	/* preserve the original iv for the final round */
 	memcpy(buf, req->iv, AES_BLOCK_SIZE);
 	memcpy(buf, req->iv, AES_BLOCK_SIZE);
 
 
-	blkcipher_walk_init(&walk, req->dst, req->src, len);
+	src = scatterwalk_ffwd(srcbuf, req->src, req->assoclen);
+	dst = src;
+	if (req->src != req->dst)
+		dst = scatterwalk_ffwd(dstbuf, req->dst, req->assoclen);
+
+	blkcipher_walk_init(&walk, dst, src, len);
 	err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
 	err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
 					     AES_BLOCK_SIZE);
 					     AES_BLOCK_SIZE);
 
 
@@ -194,7 +203,7 @@ static int ccm_encrypt(struct aead_request *req)
 		return err;
 		return err;
 
 
 	/* copy authtag to end of dst */
 	/* copy authtag to end of dst */
-	scatterwalk_map_and_copy(mac, req->dst, req->cryptlen,
+	scatterwalk_map_and_copy(mac, dst, req->cryptlen,
 				 crypto_aead_authsize(aead), 1);
 				 crypto_aead_authsize(aead), 1);
 
 
 	return 0;
 	return 0;
@@ -207,6 +216,10 @@ static int ccm_decrypt(struct aead_request *req)
 	unsigned int authsize = crypto_aead_authsize(aead);
 	unsigned int authsize = crypto_aead_authsize(aead);
 	struct blkcipher_desc desc = { .info = req->iv };
 	struct blkcipher_desc desc = { .info = req->iv };
 	struct blkcipher_walk walk;
 	struct blkcipher_walk walk;
+	struct scatterlist srcbuf[2];
+	struct scatterlist dstbuf[2];
+	struct scatterlist *src;
+	struct scatterlist *dst;
 	u8 __aligned(8) mac[AES_BLOCK_SIZE];
 	u8 __aligned(8) mac[AES_BLOCK_SIZE];
 	u8 buf[AES_BLOCK_SIZE];
 	u8 buf[AES_BLOCK_SIZE];
 	u32 len = req->cryptlen - authsize;
 	u32 len = req->cryptlen - authsize;
@@ -224,7 +237,12 @@ static int ccm_decrypt(struct aead_request *req)
 	/* preserve the original iv for the final round */
 	/* preserve the original iv for the final round */
 	memcpy(buf, req->iv, AES_BLOCK_SIZE);
 	memcpy(buf, req->iv, AES_BLOCK_SIZE);
 
 
-	blkcipher_walk_init(&walk, req->dst, req->src, len);
+	src = scatterwalk_ffwd(srcbuf, req->src, req->assoclen);
+	dst = src;
+	if (req->src != req->dst)
+		dst = scatterwalk_ffwd(dstbuf, req->dst, req->assoclen);
+
+	blkcipher_walk_init(&walk, dst, src, len);
 	err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
 	err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
 					     AES_BLOCK_SIZE);
 					     AES_BLOCK_SIZE);
 
 
@@ -250,44 +268,42 @@ static int ccm_decrypt(struct aead_request *req)
 		return err;
 		return err;
 
 
 	/* compare calculated auth tag with the stored one */
 	/* compare calculated auth tag with the stored one */
-	scatterwalk_map_and_copy(buf, req->src, req->cryptlen - authsize,
+	scatterwalk_map_and_copy(buf, src, req->cryptlen - authsize,
 				 authsize, 0);
 				 authsize, 0);
 
 
-	if (memcmp(mac, buf, authsize))
+	if (crypto_memneq(mac, buf, authsize))
 		return -EBADMSG;
 		return -EBADMSG;
 	return 0;
 	return 0;
 }
 }
 
 
-static struct crypto_alg ccm_aes_alg = {
-	.cra_name		= "ccm(aes)",
-	.cra_driver_name	= "ccm-aes-ce",
-	.cra_priority		= 300,
-	.cra_flags		= CRYPTO_ALG_TYPE_AEAD,
-	.cra_blocksize		= 1,
-	.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
-	.cra_alignmask		= 7,
-	.cra_type		= &crypto_aead_type,
-	.cra_module		= THIS_MODULE,
-	.cra_aead = {
-		.ivsize		= AES_BLOCK_SIZE,
-		.maxauthsize	= AES_BLOCK_SIZE,
-		.setkey		= ccm_setkey,
-		.setauthsize	= ccm_setauthsize,
-		.encrypt	= ccm_encrypt,
-		.decrypt	= ccm_decrypt,
-	}
+static struct aead_alg ccm_aes_alg = {
+	.base = {
+		.cra_name		= "ccm(aes)",
+		.cra_driver_name	= "ccm-aes-ce",
+		.cra_priority		= 300,
+		.cra_blocksize		= 1,
+		.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
+		.cra_alignmask		= 7,
+		.cra_module		= THIS_MODULE,
+	},
+	.ivsize		= AES_BLOCK_SIZE,
+	.maxauthsize	= AES_BLOCK_SIZE,
+	.setkey		= ccm_setkey,
+	.setauthsize	= ccm_setauthsize,
+	.encrypt	= ccm_encrypt,
+	.decrypt	= ccm_decrypt,
 };
 };
 
 
 static int __init aes_mod_init(void)
 static int __init aes_mod_init(void)
 {
 {
 	if (!(elf_hwcap & HWCAP_AES))
 	if (!(elf_hwcap & HWCAP_AES))
 		return -ENODEV;
 		return -ENODEV;
-	return crypto_register_alg(&ccm_aes_alg);
+	return crypto_register_aead(&ccm_aes_alg);
 }
 }
 
 
 static void __exit aes_mod_exit(void)
 static void __exit aes_mod_exit(void)
 {
 {
-	crypto_unregister_alg(&ccm_aes_alg);
+	crypto_unregister_aead(&ccm_aes_alg);
 }
 }
 
 
 module_init(aes_mod_init);
 module_init(aes_mod_init);

+ 1 - 0
arch/powerpc/include/asm/switch_to.h

@@ -29,6 +29,7 @@ static inline void save_early_sprs(struct thread_struct *prev) {}
 
 
 extern void enable_kernel_fp(void);
 extern void enable_kernel_fp(void);
 extern void enable_kernel_altivec(void);
 extern void enable_kernel_altivec(void);
+extern void enable_kernel_vsx(void);
 extern int emulate_altivec(struct pt_regs *);
 extern int emulate_altivec(struct pt_regs *);
 extern void __giveup_vsx(struct task_struct *);
 extern void __giveup_vsx(struct task_struct *);
 extern void giveup_vsx(struct task_struct *);
 extern void giveup_vsx(struct task_struct *);

+ 0 - 3
arch/powerpc/kernel/process.c

@@ -204,8 +204,6 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
 #endif /* CONFIG_ALTIVEC */
 #endif /* CONFIG_ALTIVEC */
 
 
 #ifdef CONFIG_VSX
 #ifdef CONFIG_VSX
-#if 0
-/* not currently used, but some crazy RAID module might want to later */
 void enable_kernel_vsx(void)
 void enable_kernel_vsx(void)
 {
 {
 	WARN_ON(preemptible());
 	WARN_ON(preemptible());
@@ -220,7 +218,6 @@ void enable_kernel_vsx(void)
 #endif /* CONFIG_SMP */
 #endif /* CONFIG_SMP */
 }
 }
 EXPORT_SYMBOL(enable_kernel_vsx);
 EXPORT_SYMBOL(enable_kernel_vsx);
-#endif
 
 
 void giveup_vsx(struct task_struct *tsk)
 void giveup_vsx(struct task_struct *tsk)
 {
 {

+ 6 - 0
arch/x86/crypto/Makefile

@@ -20,6 +20,7 @@ obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o
 obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
 obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
 obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o
 obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o
 obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o
 obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o
+obj-$(CONFIG_CRYPTO_CHACHA20_X86_64) += chacha20-x86_64.o
 obj-$(CONFIG_CRYPTO_SERPENT_SSE2_X86_64) += serpent-sse2-x86_64.o
 obj-$(CONFIG_CRYPTO_SERPENT_SSE2_X86_64) += serpent-sse2-x86_64.o
 obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o
 obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o
 obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
 obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
@@ -30,6 +31,7 @@ obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o
 obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o
 obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o
 obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
 obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
 obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o
 obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o
+obj-$(CONFIG_CRYPTO_POLY1305_X86_64) += poly1305-x86_64.o
 
 
 # These modules require assembler to support AVX.
 # These modules require assembler to support AVX.
 ifeq ($(avx_supported),yes)
 ifeq ($(avx_supported),yes)
@@ -60,6 +62,7 @@ blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o
 twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
 twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
 twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o
 twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o
 salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o
 salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o
+chacha20-x86_64-y := chacha20-ssse3-x86_64.o chacha20_glue.o
 serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o
 serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o
 
 
 ifeq ($(avx_supported),yes)
 ifeq ($(avx_supported),yes)
@@ -75,6 +78,7 @@ endif
 
 
 ifeq ($(avx2_supported),yes)
 ifeq ($(avx2_supported),yes)
 	camellia-aesni-avx2-y := camellia-aesni-avx2-asm_64.o camellia_aesni_avx2_glue.o
 	camellia-aesni-avx2-y := camellia-aesni-avx2-asm_64.o camellia_aesni_avx2_glue.o
+	chacha20-x86_64-y += chacha20-avx2-x86_64.o
 	serpent-avx2-y := serpent-avx2-asm_64.o serpent_avx2_glue.o
 	serpent-avx2-y := serpent-avx2-asm_64.o serpent_avx2_glue.o
 endif
 endif
 
 
@@ -82,8 +86,10 @@ aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o
 aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o
 aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o
 ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
 ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
 sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
 sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
+poly1305-x86_64-y := poly1305-sse2-x86_64.o poly1305_glue.o
 ifeq ($(avx2_supported),yes)
 ifeq ($(avx2_supported),yes)
 sha1-ssse3-y += sha1_avx2_x86_64_asm.o
 sha1-ssse3-y += sha1_avx2_x86_64_asm.o
+poly1305-x86_64-y += poly1305-avx2-x86_64.o
 endif
 endif
 crc32c-intel-y := crc32c-intel_glue.o
 crc32c-intel-y := crc32c-intel_glue.o
 crc32c-intel-$(CONFIG_64BIT) += crc32c-pcl-intel-asm_64.o
 crc32c-intel-$(CONFIG_64BIT) += crc32c-pcl-intel-asm_64.o

+ 18 - 35
arch/x86/crypto/aesni-intel_glue.c

@@ -803,10 +803,7 @@ static int rfc4106_init(struct crypto_aead *aead)
 		return PTR_ERR(cryptd_tfm);
 		return PTR_ERR(cryptd_tfm);
 
 
 	*ctx = cryptd_tfm;
 	*ctx = cryptd_tfm;
-	crypto_aead_set_reqsize(
-		aead,
-		sizeof(struct aead_request) +
-		crypto_aead_reqsize(&cryptd_tfm->base));
+	crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
 	return 0;
 	return 0;
 }
 }
 
 
@@ -955,8 +952,8 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
 
 
 	/* Assuming we are supporting rfc4106 64-bit extended */
 	/* Assuming we are supporting rfc4106 64-bit extended */
 	/* sequence numbers We need to have the AAD length equal */
 	/* sequence numbers We need to have the AAD length equal */
-	/* to 8 or 12 bytes */
-	if (unlikely(req->assoclen != 8 && req->assoclen != 12))
+	/* to 16 or 20 bytes */
+	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
 		return -EINVAL;
 		return -EINVAL;
 
 
 	/* IV below built */
 	/* IV below built */
@@ -992,9 +989,9 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
 	}
 	}
 
 
 	kernel_fpu_begin();
 	kernel_fpu_begin();
-	aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
-		ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
-		+ ((unsigned long)req->cryptlen), auth_tag_len);
+	aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
+			  ctx->hash_subkey, assoc, req->assoclen - 8,
+			  dst + req->cryptlen, auth_tag_len);
 	kernel_fpu_end();
 	kernel_fpu_end();
 
 
 	/* The authTag (aka the Integrity Check Value) needs to be written
 	/* The authTag (aka the Integrity Check Value) needs to be written
@@ -1033,12 +1030,12 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
 	struct scatter_walk dst_sg_walk;
 	struct scatter_walk dst_sg_walk;
 	unsigned int i;
 	unsigned int i;
 
 
-	if (unlikely(req->assoclen != 8 && req->assoclen != 12))
+	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
 		return -EINVAL;
 		return -EINVAL;
 
 
 	/* Assuming we are supporting rfc4106 64-bit extended */
 	/* Assuming we are supporting rfc4106 64-bit extended */
 	/* sequence numbers We need to have the AAD length */
 	/* sequence numbers We need to have the AAD length */
-	/* equal to 8 or 12 bytes */
+	/* equal to 16 or 20 bytes */
 
 
 	tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
 	tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
 	/* IV below built */
 	/* IV below built */
@@ -1075,8 +1072,8 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
 
 
 	kernel_fpu_begin();
 	kernel_fpu_begin();
 	aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
 	aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
-		ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
-		authTag, auth_tag_len);
+			  ctx->hash_subkey, assoc, req->assoclen - 8,
+			  authTag, auth_tag_len);
 	kernel_fpu_end();
 	kernel_fpu_end();
 
 
 	/* Compare generated tag with passed in tag. */
 	/* Compare generated tag with passed in tag. */
@@ -1105,19 +1102,12 @@ static int rfc4106_encrypt(struct aead_request *req)
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
 	struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
 	struct cryptd_aead *cryptd_tfm = *ctx;
 	struct cryptd_aead *cryptd_tfm = *ctx;
-	struct aead_request *subreq = aead_request_ctx(req);
 
 
-	aead_request_set_tfm(subreq, irq_fpu_usable() ?
-				     cryptd_aead_child(cryptd_tfm) :
-				     &cryptd_tfm->base);
+	aead_request_set_tfm(req, irq_fpu_usable() ?
+				  cryptd_aead_child(cryptd_tfm) :
+				  &cryptd_tfm->base);
 
 
-	aead_request_set_callback(subreq, req->base.flags,
-				  req->base.complete, req->base.data);
-	aead_request_set_crypt(subreq, req->src, req->dst,
-			       req->cryptlen, req->iv);
-	aead_request_set_ad(subreq, req->assoclen);
-
-	return crypto_aead_encrypt(subreq);
+	return crypto_aead_encrypt(req);
 }
 }
 
 
 static int rfc4106_decrypt(struct aead_request *req)
 static int rfc4106_decrypt(struct aead_request *req)
@@ -1125,19 +1115,12 @@ static int rfc4106_decrypt(struct aead_request *req)
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
 	struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
 	struct cryptd_aead *cryptd_tfm = *ctx;
 	struct cryptd_aead *cryptd_tfm = *ctx;
-	struct aead_request *subreq = aead_request_ctx(req);
-
-	aead_request_set_tfm(subreq, irq_fpu_usable() ?
-				     cryptd_aead_child(cryptd_tfm) :
-				     &cryptd_tfm->base);
 
 
-	aead_request_set_callback(subreq, req->base.flags,
-				  req->base.complete, req->base.data);
-	aead_request_set_crypt(subreq, req->src, req->dst,
-			       req->cryptlen, req->iv);
-	aead_request_set_ad(subreq, req->assoclen);
+	aead_request_set_tfm(req, irq_fpu_usable() ?
+				  cryptd_aead_child(cryptd_tfm) :
+				  &cryptd_tfm->base);
 
 
-	return crypto_aead_decrypt(subreq);
+	return crypto_aead_decrypt(req);
 }
 }
 #endif
 #endif
 
 

+ 443 - 0
arch/x86/crypto/chacha20-avx2-x86_64.S

@@ -0,0 +1,443 @@
+/*
+ * ChaCha20 256-bit cipher algorithm, RFC7539, x64 AVX2 functions
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+
+.data
+.align 32
+
+ROT8:	.octa 0x0e0d0c0f0a09080b0605040702010003
+	.octa 0x0e0d0c0f0a09080b0605040702010003
+ROT16:	.octa 0x0d0c0f0e09080b0a0504070601000302
+	.octa 0x0d0c0f0e09080b0a0504070601000302
+CTRINC:	.octa 0x00000003000000020000000100000000
+	.octa 0x00000007000000060000000500000004
+
+.text
+
+ENTRY(chacha20_8block_xor_avx2)
+	# %rdi: Input state matrix, s
+	# %rsi: 8 data blocks output, o
+	# %rdx: 8 data blocks input, i
+
+	# This function encrypts eight consecutive ChaCha20 blocks by loading
+	# the state matrix in AVX registers eight times. As we need some
+	# scratch registers, we save the first four registers on the stack. The
+	# algorithm performs each operation on the corresponding word of each
+	# state matrix, hence requires no word shuffling. For final XORing step
+	# we transpose the matrix by interleaving 32-, 64- and then 128-bit
+	# words, which allows us to do XOR in AVX registers. 8/16-bit word
+	# rotation is done with the slightly better performing byte shuffling,
+	# 7/12-bit word rotation uses traditional shift+OR.
+
+	vzeroupper
+	# 4 * 32 byte stack, 32-byte aligned
+	mov		%rsp, %r8
+	and		$~31, %rsp
+	sub		$0x80, %rsp
+
+	# x0..15[0-7] = s[0..15]
+	vpbroadcastd	0x00(%rdi),%ymm0
+	vpbroadcastd	0x04(%rdi),%ymm1
+	vpbroadcastd	0x08(%rdi),%ymm2
+	vpbroadcastd	0x0c(%rdi),%ymm3
+	vpbroadcastd	0x10(%rdi),%ymm4
+	vpbroadcastd	0x14(%rdi),%ymm5
+	vpbroadcastd	0x18(%rdi),%ymm6
+	vpbroadcastd	0x1c(%rdi),%ymm7
+	vpbroadcastd	0x20(%rdi),%ymm8
+	vpbroadcastd	0x24(%rdi),%ymm9
+	vpbroadcastd	0x28(%rdi),%ymm10
+	vpbroadcastd	0x2c(%rdi),%ymm11
+	vpbroadcastd	0x30(%rdi),%ymm12
+	vpbroadcastd	0x34(%rdi),%ymm13
+	vpbroadcastd	0x38(%rdi),%ymm14
+	vpbroadcastd	0x3c(%rdi),%ymm15
+	# x0..3 on stack
+	vmovdqa		%ymm0,0x00(%rsp)
+	vmovdqa		%ymm1,0x20(%rsp)
+	vmovdqa		%ymm2,0x40(%rsp)
+	vmovdqa		%ymm3,0x60(%rsp)
+
+	vmovdqa		CTRINC(%rip),%ymm1
+	vmovdqa		ROT8(%rip),%ymm2
+	vmovdqa		ROT16(%rip),%ymm3
+
+	# x12 += counter values 0-3
+	vpaddd		%ymm1,%ymm12,%ymm12
+
+	mov		$10,%ecx
+
+.Ldoubleround8:
+	# x0 += x4, x12 = rotl32(x12 ^ x0, 16)
+	vpaddd		0x00(%rsp),%ymm4,%ymm0
+	vmovdqa		%ymm0,0x00(%rsp)
+	vpxor		%ymm0,%ymm12,%ymm12
+	vpshufb		%ymm3,%ymm12,%ymm12
+	# x1 += x5, x13 = rotl32(x13 ^ x1, 16)
+	vpaddd		0x20(%rsp),%ymm5,%ymm0
+	vmovdqa		%ymm0,0x20(%rsp)
+	vpxor		%ymm0,%ymm13,%ymm13
+	vpshufb		%ymm3,%ymm13,%ymm13
+	# x2 += x6, x14 = rotl32(x14 ^ x2, 16)
+	vpaddd		0x40(%rsp),%ymm6,%ymm0
+	vmovdqa		%ymm0,0x40(%rsp)
+	vpxor		%ymm0,%ymm14,%ymm14
+	vpshufb		%ymm3,%ymm14,%ymm14
+	# x3 += x7, x15 = rotl32(x15 ^ x3, 16)
+	vpaddd		0x60(%rsp),%ymm7,%ymm0
+	vmovdqa		%ymm0,0x60(%rsp)
+	vpxor		%ymm0,%ymm15,%ymm15
+	vpshufb		%ymm3,%ymm15,%ymm15
+
+	# x8 += x12, x4 = rotl32(x4 ^ x8, 12)
+	vpaddd		%ymm12,%ymm8,%ymm8
+	vpxor		%ymm8,%ymm4,%ymm4
+	vpslld		$12,%ymm4,%ymm0
+	vpsrld		$20,%ymm4,%ymm4
+	vpor		%ymm0,%ymm4,%ymm4
+	# x9 += x13, x5 = rotl32(x5 ^ x9, 12)
+	vpaddd		%ymm13,%ymm9,%ymm9
+	vpxor		%ymm9,%ymm5,%ymm5
+	vpslld		$12,%ymm5,%ymm0
+	vpsrld		$20,%ymm5,%ymm5
+	vpor		%ymm0,%ymm5,%ymm5
+	# x10 += x14, x6 = rotl32(x6 ^ x10, 12)
+	vpaddd		%ymm14,%ymm10,%ymm10
+	vpxor		%ymm10,%ymm6,%ymm6
+	vpslld		$12,%ymm6,%ymm0
+	vpsrld		$20,%ymm6,%ymm6
+	vpor		%ymm0,%ymm6,%ymm6
+	# x11 += x15, x7 = rotl32(x7 ^ x11, 12)
+	vpaddd		%ymm15,%ymm11,%ymm11
+	vpxor		%ymm11,%ymm7,%ymm7
+	vpslld		$12,%ymm7,%ymm0
+	vpsrld		$20,%ymm7,%ymm7
+	vpor		%ymm0,%ymm7,%ymm7
+
+	# x0 += x4, x12 = rotl32(x12 ^ x0, 8)
+	vpaddd		0x00(%rsp),%ymm4,%ymm0
+	vmovdqa		%ymm0,0x00(%rsp)
+	vpxor		%ymm0,%ymm12,%ymm12
+	vpshufb		%ymm2,%ymm12,%ymm12
+	# x1 += x5, x13 = rotl32(x13 ^ x1, 8)
+	vpaddd		0x20(%rsp),%ymm5,%ymm0
+	vmovdqa		%ymm0,0x20(%rsp)
+	vpxor		%ymm0,%ymm13,%ymm13
+	vpshufb		%ymm2,%ymm13,%ymm13
+	# x2 += x6, x14 = rotl32(x14 ^ x2, 8)
+	vpaddd		0x40(%rsp),%ymm6,%ymm0
+	vmovdqa		%ymm0,0x40(%rsp)
+	vpxor		%ymm0,%ymm14,%ymm14
+	vpshufb		%ymm2,%ymm14,%ymm14
+	# x3 += x7, x15 = rotl32(x15 ^ x3, 8)
+	vpaddd		0x60(%rsp),%ymm7,%ymm0
+	vmovdqa		%ymm0,0x60(%rsp)
+	vpxor		%ymm0,%ymm15,%ymm15
+	vpshufb		%ymm2,%ymm15,%ymm15
+
+	# x8 += x12, x4 = rotl32(x4 ^ x8, 7)
+	vpaddd		%ymm12,%ymm8,%ymm8
+	vpxor		%ymm8,%ymm4,%ymm4
+	vpslld		$7,%ymm4,%ymm0
+	vpsrld		$25,%ymm4,%ymm4
+	vpor		%ymm0,%ymm4,%ymm4
+	# x9 += x13, x5 = rotl32(x5 ^ x9, 7)
+	vpaddd		%ymm13,%ymm9,%ymm9
+	vpxor		%ymm9,%ymm5,%ymm5
+	vpslld		$7,%ymm5,%ymm0
+	vpsrld		$25,%ymm5,%ymm5
+	vpor		%ymm0,%ymm5,%ymm5
+	# x10 += x14, x6 = rotl32(x6 ^ x10, 7)
+	vpaddd		%ymm14,%ymm10,%ymm10
+	vpxor		%ymm10,%ymm6,%ymm6
+	vpslld		$7,%ymm6,%ymm0
+	vpsrld		$25,%ymm6,%ymm6
+	vpor		%ymm0,%ymm6,%ymm6
+	# x11 += x15, x7 = rotl32(x7 ^ x11, 7)
+	vpaddd		%ymm15,%ymm11,%ymm11
+	vpxor		%ymm11,%ymm7,%ymm7
+	vpslld		$7,%ymm7,%ymm0
+	vpsrld		$25,%ymm7,%ymm7
+	vpor		%ymm0,%ymm7,%ymm7
+
+	# x0 += x5, x15 = rotl32(x15 ^ x0, 16)
+	vpaddd		0x00(%rsp),%ymm5,%ymm0
+	vmovdqa		%ymm0,0x00(%rsp)
+	vpxor		%ymm0,%ymm15,%ymm15
+	vpshufb		%ymm3,%ymm15,%ymm15
+	# x1 += x6, x12 = rotl32(x12 ^ x1, 16)%ymm0
+	vpaddd		0x20(%rsp),%ymm6,%ymm0
+	vmovdqa		%ymm0,0x20(%rsp)
+	vpxor		%ymm0,%ymm12,%ymm12
+	vpshufb		%ymm3,%ymm12,%ymm12
+	# x2 += x7, x13 = rotl32(x13 ^ x2, 16)
+	vpaddd		0x40(%rsp),%ymm7,%ymm0
+	vmovdqa		%ymm0,0x40(%rsp)
+	vpxor		%ymm0,%ymm13,%ymm13
+	vpshufb		%ymm3,%ymm13,%ymm13
+	# x3 += x4, x14 = rotl32(x14 ^ x3, 16)
+	vpaddd		0x60(%rsp),%ymm4,%ymm0
+	vmovdqa		%ymm0,0x60(%rsp)
+	vpxor		%ymm0,%ymm14,%ymm14
+	vpshufb		%ymm3,%ymm14,%ymm14
+
+	# x10 += x15, x5 = rotl32(x5 ^ x10, 12)
+	vpaddd		%ymm15,%ymm10,%ymm10
+	vpxor		%ymm10,%ymm5,%ymm5
+	vpslld		$12,%ymm5,%ymm0
+	vpsrld		$20,%ymm5,%ymm5
+	vpor		%ymm0,%ymm5,%ymm5
+	# x11 += x12, x6 = rotl32(x6 ^ x11, 12)
+	vpaddd		%ymm12,%ymm11,%ymm11
+	vpxor		%ymm11,%ymm6,%ymm6
+	vpslld		$12,%ymm6,%ymm0
+	vpsrld		$20,%ymm6,%ymm6
+	vpor		%ymm0,%ymm6,%ymm6
+	# x8 += x13, x7 = rotl32(x7 ^ x8, 12)
+	vpaddd		%ymm13,%ymm8,%ymm8
+	vpxor		%ymm8,%ymm7,%ymm7
+	vpslld		$12,%ymm7,%ymm0
+	vpsrld		$20,%ymm7,%ymm7
+	vpor		%ymm0,%ymm7,%ymm7
+	# x9 += x14, x4 = rotl32(x4 ^ x9, 12)
+	vpaddd		%ymm14,%ymm9,%ymm9
+	vpxor		%ymm9,%ymm4,%ymm4
+	vpslld		$12,%ymm4,%ymm0
+	vpsrld		$20,%ymm4,%ymm4
+	vpor		%ymm0,%ymm4,%ymm4
+
+	# x0 += x5, x15 = rotl32(x15 ^ x0, 8)
+	vpaddd		0x00(%rsp),%ymm5,%ymm0
+	vmovdqa		%ymm0,0x00(%rsp)
+	vpxor		%ymm0,%ymm15,%ymm15
+	vpshufb		%ymm2,%ymm15,%ymm15
+	# x1 += x6, x12 = rotl32(x12 ^ x1, 8)
+	vpaddd		0x20(%rsp),%ymm6,%ymm0
+	vmovdqa		%ymm0,0x20(%rsp)
+	vpxor		%ymm0,%ymm12,%ymm12
+	vpshufb		%ymm2,%ymm12,%ymm12
+	# x2 += x7, x13 = rotl32(x13 ^ x2, 8)
+	vpaddd		0x40(%rsp),%ymm7,%ymm0
+	vmovdqa		%ymm0,0x40(%rsp)
+	vpxor		%ymm0,%ymm13,%ymm13
+	vpshufb		%ymm2,%ymm13,%ymm13
+	# x3 += x4, x14 = rotl32(x14 ^ x3, 8)
+	vpaddd		0x60(%rsp),%ymm4,%ymm0
+	vmovdqa		%ymm0,0x60(%rsp)
+	vpxor		%ymm0,%ymm14,%ymm14
+	vpshufb		%ymm2,%ymm14,%ymm14
+
+	# x10 += x15, x5 = rotl32(x5 ^ x10, 7)
+	vpaddd		%ymm15,%ymm10,%ymm10
+	vpxor		%ymm10,%ymm5,%ymm5
+	vpslld		$7,%ymm5,%ymm0
+	vpsrld		$25,%ymm5,%ymm5
+	vpor		%ymm0,%ymm5,%ymm5
+	# x11 += x12, x6 = rotl32(x6 ^ x11, 7)
+	vpaddd		%ymm12,%ymm11,%ymm11
+	vpxor		%ymm11,%ymm6,%ymm6
+	vpslld		$7,%ymm6,%ymm0
+	vpsrld		$25,%ymm6,%ymm6
+	vpor		%ymm0,%ymm6,%ymm6
+	# x8 += x13, x7 = rotl32(x7 ^ x8, 7)
+	vpaddd		%ymm13,%ymm8,%ymm8
+	vpxor		%ymm8,%ymm7,%ymm7
+	vpslld		$7,%ymm7,%ymm0
+	vpsrld		$25,%ymm7,%ymm7
+	vpor		%ymm0,%ymm7,%ymm7
+	# x9 += x14, x4 = rotl32(x4 ^ x9, 7)
+	vpaddd		%ymm14,%ymm9,%ymm9
+	vpxor		%ymm9,%ymm4,%ymm4
+	vpslld		$7,%ymm4,%ymm0
+	vpsrld		$25,%ymm4,%ymm4
+	vpor		%ymm0,%ymm4,%ymm4
+
+	dec		%ecx
+	jnz		.Ldoubleround8
+
+	# x0..15[0-3] += s[0..15]
+	vpbroadcastd	0x00(%rdi),%ymm0
+	vpaddd		0x00(%rsp),%ymm0,%ymm0
+	vmovdqa		%ymm0,0x00(%rsp)
+	vpbroadcastd	0x04(%rdi),%ymm0
+	vpaddd		0x20(%rsp),%ymm0,%ymm0
+	vmovdqa		%ymm0,0x20(%rsp)
+	vpbroadcastd	0x08(%rdi),%ymm0
+	vpaddd		0x40(%rsp),%ymm0,%ymm0
+	vmovdqa		%ymm0,0x40(%rsp)
+	vpbroadcastd	0x0c(%rdi),%ymm0
+	vpaddd		0x60(%rsp),%ymm0,%ymm0
+	vmovdqa		%ymm0,0x60(%rsp)
+	vpbroadcastd	0x10(%rdi),%ymm0
+	vpaddd		%ymm0,%ymm4,%ymm4
+	vpbroadcastd	0x14(%rdi),%ymm0
+	vpaddd		%ymm0,%ymm5,%ymm5
+	vpbroadcastd	0x18(%rdi),%ymm0
+	vpaddd		%ymm0,%ymm6,%ymm6
+	vpbroadcastd	0x1c(%rdi),%ymm0
+	vpaddd		%ymm0,%ymm7,%ymm7
+	vpbroadcastd	0x20(%rdi),%ymm0
+	vpaddd		%ymm0,%ymm8,%ymm8
+	vpbroadcastd	0x24(%rdi),%ymm0
+	vpaddd		%ymm0,%ymm9,%ymm9
+	vpbroadcastd	0x28(%rdi),%ymm0
+	vpaddd		%ymm0,%ymm10,%ymm10
+	vpbroadcastd	0x2c(%rdi),%ymm0
+	vpaddd		%ymm0,%ymm11,%ymm11
+	vpbroadcastd	0x30(%rdi),%ymm0
+	vpaddd		%ymm0,%ymm12,%ymm12
+	vpbroadcastd	0x34(%rdi),%ymm0
+	vpaddd		%ymm0,%ymm13,%ymm13
+	vpbroadcastd	0x38(%rdi),%ymm0
+	vpaddd		%ymm0,%ymm14,%ymm14
+	vpbroadcastd	0x3c(%rdi),%ymm0
+	vpaddd		%ymm0,%ymm15,%ymm15
+
+	# x12 += counter values 0-3
+	vpaddd		%ymm1,%ymm12,%ymm12
+
+	# interleave 32-bit words in state n, n+1
+	vmovdqa		0x00(%rsp),%ymm0
+	vmovdqa		0x20(%rsp),%ymm1
+	vpunpckldq	%ymm1,%ymm0,%ymm2
+	vpunpckhdq	%ymm1,%ymm0,%ymm1
+	vmovdqa		%ymm2,0x00(%rsp)
+	vmovdqa		%ymm1,0x20(%rsp)
+	vmovdqa		0x40(%rsp),%ymm0
+	vmovdqa		0x60(%rsp),%ymm1
+	vpunpckldq	%ymm1,%ymm0,%ymm2
+	vpunpckhdq	%ymm1,%ymm0,%ymm1
+	vmovdqa		%ymm2,0x40(%rsp)
+	vmovdqa		%ymm1,0x60(%rsp)
+	vmovdqa		%ymm4,%ymm0
+	vpunpckldq	%ymm5,%ymm0,%ymm4
+	vpunpckhdq	%ymm5,%ymm0,%ymm5
+	vmovdqa		%ymm6,%ymm0
+	vpunpckldq	%ymm7,%ymm0,%ymm6
+	vpunpckhdq	%ymm7,%ymm0,%ymm7
+	vmovdqa		%ymm8,%ymm0
+	vpunpckldq	%ymm9,%ymm0,%ymm8
+	vpunpckhdq	%ymm9,%ymm0,%ymm9
+	vmovdqa		%ymm10,%ymm0
+	vpunpckldq	%ymm11,%ymm0,%ymm10
+	vpunpckhdq	%ymm11,%ymm0,%ymm11
+	vmovdqa		%ymm12,%ymm0
+	vpunpckldq	%ymm13,%ymm0,%ymm12
+	vpunpckhdq	%ymm13,%ymm0,%ymm13
+	vmovdqa		%ymm14,%ymm0
+	vpunpckldq	%ymm15,%ymm0,%ymm14
+	vpunpckhdq	%ymm15,%ymm0,%ymm15
+
+	# interleave 64-bit words in state n, n+2
+	vmovdqa		0x00(%rsp),%ymm0
+	vmovdqa		0x40(%rsp),%ymm2
+	vpunpcklqdq	%ymm2,%ymm0,%ymm1
+	vpunpckhqdq	%ymm2,%ymm0,%ymm2
+	vmovdqa		%ymm1,0x00(%rsp)
+	vmovdqa		%ymm2,0x40(%rsp)
+	vmovdqa		0x20(%rsp),%ymm0
+	vmovdqa		0x60(%rsp),%ymm2
+	vpunpcklqdq	%ymm2,%ymm0,%ymm1
+	vpunpckhqdq	%ymm2,%ymm0,%ymm2
+	vmovdqa		%ymm1,0x20(%rsp)
+	vmovdqa		%ymm2,0x60(%rsp)
+	vmovdqa		%ymm4,%ymm0
+	vpunpcklqdq	%ymm6,%ymm0,%ymm4
+	vpunpckhqdq	%ymm6,%ymm0,%ymm6
+	vmovdqa		%ymm5,%ymm0
+	vpunpcklqdq	%ymm7,%ymm0,%ymm5
+	vpunpckhqdq	%ymm7,%ymm0,%ymm7
+	vmovdqa		%ymm8,%ymm0
+	vpunpcklqdq	%ymm10,%ymm0,%ymm8
+	vpunpckhqdq	%ymm10,%ymm0,%ymm10
+	vmovdqa		%ymm9,%ymm0
+	vpunpcklqdq	%ymm11,%ymm0,%ymm9
+	vpunpckhqdq	%ymm11,%ymm0,%ymm11
+	vmovdqa		%ymm12,%ymm0
+	vpunpcklqdq	%ymm14,%ymm0,%ymm12
+	vpunpckhqdq	%ymm14,%ymm0,%ymm14
+	vmovdqa		%ymm13,%ymm0
+	vpunpcklqdq	%ymm15,%ymm0,%ymm13
+	vpunpckhqdq	%ymm15,%ymm0,%ymm15
+
+	# interleave 128-bit words in state n, n+4
+	vmovdqa		0x00(%rsp),%ymm0
+	vperm2i128	$0x20,%ymm4,%ymm0,%ymm1
+	vperm2i128	$0x31,%ymm4,%ymm0,%ymm4
+	vmovdqa		%ymm1,0x00(%rsp)
+	vmovdqa		0x20(%rsp),%ymm0
+	vperm2i128	$0x20,%ymm5,%ymm0,%ymm1
+	vperm2i128	$0x31,%ymm5,%ymm0,%ymm5
+	vmovdqa		%ymm1,0x20(%rsp)
+	vmovdqa		0x40(%rsp),%ymm0
+	vperm2i128	$0x20,%ymm6,%ymm0,%ymm1
+	vperm2i128	$0x31,%ymm6,%ymm0,%ymm6
+	vmovdqa		%ymm1,0x40(%rsp)
+	vmovdqa		0x60(%rsp),%ymm0
+	vperm2i128	$0x20,%ymm7,%ymm0,%ymm1
+	vperm2i128	$0x31,%ymm7,%ymm0,%ymm7
+	vmovdqa		%ymm1,0x60(%rsp)
+	vperm2i128	$0x20,%ymm12,%ymm8,%ymm0
+	vperm2i128	$0x31,%ymm12,%ymm8,%ymm12
+	vmovdqa		%ymm0,%ymm8
+	vperm2i128	$0x20,%ymm13,%ymm9,%ymm0
+	vperm2i128	$0x31,%ymm13,%ymm9,%ymm13
+	vmovdqa		%ymm0,%ymm9
+	vperm2i128	$0x20,%ymm14,%ymm10,%ymm0
+	vperm2i128	$0x31,%ymm14,%ymm10,%ymm14
+	vmovdqa		%ymm0,%ymm10
+	vperm2i128	$0x20,%ymm15,%ymm11,%ymm0
+	vperm2i128	$0x31,%ymm15,%ymm11,%ymm15
+	vmovdqa		%ymm0,%ymm11
+
+	# xor with corresponding input, write to output
+	vmovdqa		0x00(%rsp),%ymm0
+	vpxor		0x0000(%rdx),%ymm0,%ymm0
+	vmovdqu		%ymm0,0x0000(%rsi)
+	vmovdqa		0x20(%rsp),%ymm0
+	vpxor		0x0080(%rdx),%ymm0,%ymm0
+	vmovdqu		%ymm0,0x0080(%rsi)
+	vmovdqa		0x40(%rsp),%ymm0
+	vpxor		0x0040(%rdx),%ymm0,%ymm0
+	vmovdqu		%ymm0,0x0040(%rsi)
+	vmovdqa		0x60(%rsp),%ymm0
+	vpxor		0x00c0(%rdx),%ymm0,%ymm0
+	vmovdqu		%ymm0,0x00c0(%rsi)
+	vpxor		0x0100(%rdx),%ymm4,%ymm4
+	vmovdqu		%ymm4,0x0100(%rsi)
+	vpxor		0x0180(%rdx),%ymm5,%ymm5
+	vmovdqu		%ymm5,0x00180(%rsi)
+	vpxor		0x0140(%rdx),%ymm6,%ymm6
+	vmovdqu		%ymm6,0x0140(%rsi)
+	vpxor		0x01c0(%rdx),%ymm7,%ymm7
+	vmovdqu		%ymm7,0x01c0(%rsi)
+	vpxor		0x0020(%rdx),%ymm8,%ymm8
+	vmovdqu		%ymm8,0x0020(%rsi)
+	vpxor		0x00a0(%rdx),%ymm9,%ymm9
+	vmovdqu		%ymm9,0x00a0(%rsi)
+	vpxor		0x0060(%rdx),%ymm10,%ymm10
+	vmovdqu		%ymm10,0x0060(%rsi)
+	vpxor		0x00e0(%rdx),%ymm11,%ymm11
+	vmovdqu		%ymm11,0x00e0(%rsi)
+	vpxor		0x0120(%rdx),%ymm12,%ymm12
+	vmovdqu		%ymm12,0x0120(%rsi)
+	vpxor		0x01a0(%rdx),%ymm13,%ymm13
+	vmovdqu		%ymm13,0x01a0(%rsi)
+	vpxor		0x0160(%rdx),%ymm14,%ymm14
+	vmovdqu		%ymm14,0x0160(%rsi)
+	vpxor		0x01e0(%rdx),%ymm15,%ymm15
+	vmovdqu		%ymm15,0x01e0(%rsi)
+
+	vzeroupper
+	mov		%r8,%rsp
+	ret
+ENDPROC(chacha20_8block_xor_avx2)

+ 625 - 0
arch/x86/crypto/chacha20-ssse3-x86_64.S

@@ -0,0 +1,625 @@
+/*
+ * ChaCha20 256-bit cipher algorithm, RFC7539, x64 SSSE3 functions
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+
+.data
+.align 16
+
+ROT8:	.octa 0x0e0d0c0f0a09080b0605040702010003
+ROT16:	.octa 0x0d0c0f0e09080b0a0504070601000302
+CTRINC:	.octa 0x00000003000000020000000100000000
+
+.text
+
+ENTRY(chacha20_block_xor_ssse3)
+	# %rdi: Input state matrix, s
+	# %rsi: 1 data block output, o
+	# %rdx: 1 data block input, i
+
+	# This function encrypts one ChaCha20 block by loading the state matrix
+	# in four SSE registers. It performs matrix operation on four words in
+	# parallel, but requireds shuffling to rearrange the words after each
+	# round. 8/16-bit word rotation is done with the slightly better
+	# performing SSSE3 byte shuffling, 7/12-bit word rotation uses
+	# traditional shift+OR.
+
+	# x0..3 = s0..3
+	movdqa		0x00(%rdi),%xmm0
+	movdqa		0x10(%rdi),%xmm1
+	movdqa		0x20(%rdi),%xmm2
+	movdqa		0x30(%rdi),%xmm3
+	movdqa		%xmm0,%xmm8
+	movdqa		%xmm1,%xmm9
+	movdqa		%xmm2,%xmm10
+	movdqa		%xmm3,%xmm11
+
+	movdqa		ROT8(%rip),%xmm4
+	movdqa		ROT16(%rip),%xmm5
+
+	mov	$10,%ecx
+
+.Ldoubleround:
+
+	# x0 += x1, x3 = rotl32(x3 ^ x0, 16)
+	paddd		%xmm1,%xmm0
+	pxor		%xmm0,%xmm3
+	pshufb		%xmm5,%xmm3
+
+	# x2 += x3, x1 = rotl32(x1 ^ x2, 12)
+	paddd		%xmm3,%xmm2
+	pxor		%xmm2,%xmm1
+	movdqa		%xmm1,%xmm6
+	pslld		$12,%xmm6
+	psrld		$20,%xmm1
+	por		%xmm6,%xmm1
+
+	# x0 += x1, x3 = rotl32(x3 ^ x0, 8)
+	paddd		%xmm1,%xmm0
+	pxor		%xmm0,%xmm3
+	pshufb		%xmm4,%xmm3
+
+	# x2 += x3, x1 = rotl32(x1 ^ x2, 7)
+	paddd		%xmm3,%xmm2
+	pxor		%xmm2,%xmm1
+	movdqa		%xmm1,%xmm7
+	pslld		$7,%xmm7
+	psrld		$25,%xmm1
+	por		%xmm7,%xmm1
+
+	# x1 = shuffle32(x1, MASK(0, 3, 2, 1))
+	pshufd		$0x39,%xmm1,%xmm1
+	# x2 = shuffle32(x2, MASK(1, 0, 3, 2))
+	pshufd		$0x4e,%xmm2,%xmm2
+	# x3 = shuffle32(x3, MASK(2, 1, 0, 3))
+	pshufd		$0x93,%xmm3,%xmm3
+
+	# x0 += x1, x3 = rotl32(x3 ^ x0, 16)
+	paddd		%xmm1,%xmm0
+	pxor		%xmm0,%xmm3
+	pshufb		%xmm5,%xmm3
+
+	# x2 += x3, x1 = rotl32(x1 ^ x2, 12)
+	paddd		%xmm3,%xmm2
+	pxor		%xmm2,%xmm1
+	movdqa		%xmm1,%xmm6
+	pslld		$12,%xmm6
+	psrld		$20,%xmm1
+	por		%xmm6,%xmm1
+
+	# x0 += x1, x3 = rotl32(x3 ^ x0, 8)
+	paddd		%xmm1,%xmm0
+	pxor		%xmm0,%xmm3
+	pshufb		%xmm4,%xmm3
+
+	# x2 += x3, x1 = rotl32(x1 ^ x2, 7)
+	paddd		%xmm3,%xmm2
+	pxor		%xmm2,%xmm1
+	movdqa		%xmm1,%xmm7
+	pslld		$7,%xmm7
+	psrld		$25,%xmm1
+	por		%xmm7,%xmm1
+
+	# x1 = shuffle32(x1, MASK(2, 1, 0, 3))
+	pshufd		$0x93,%xmm1,%xmm1
+	# x2 = shuffle32(x2, MASK(1, 0, 3, 2))
+	pshufd		$0x4e,%xmm2,%xmm2
+	# x3 = shuffle32(x3, MASK(0, 3, 2, 1))
+	pshufd		$0x39,%xmm3,%xmm3
+
+	dec		%ecx
+	jnz		.Ldoubleround
+
+	# o0 = i0 ^ (x0 + s0)
+	movdqu		0x00(%rdx),%xmm4
+	paddd		%xmm8,%xmm0
+	pxor		%xmm4,%xmm0
+	movdqu		%xmm0,0x00(%rsi)
+	# o1 = i1 ^ (x1 + s1)
+	movdqu		0x10(%rdx),%xmm5
+	paddd		%xmm9,%xmm1
+	pxor		%xmm5,%xmm1
+	movdqu		%xmm1,0x10(%rsi)
+	# o2 = i2 ^ (x2 + s2)
+	movdqu		0x20(%rdx),%xmm6
+	paddd		%xmm10,%xmm2
+	pxor		%xmm6,%xmm2
+	movdqu		%xmm2,0x20(%rsi)
+	# o3 = i3 ^ (x3 + s3)
+	movdqu		0x30(%rdx),%xmm7
+	paddd		%xmm11,%xmm3
+	pxor		%xmm7,%xmm3
+	movdqu		%xmm3,0x30(%rsi)
+
+	ret
+ENDPROC(chacha20_block_xor_ssse3)
+
+ENTRY(chacha20_4block_xor_ssse3)
+	# %rdi: Input state matrix, s
+	# %rsi: 4 data blocks output, o
+	# %rdx: 4 data blocks input, i
+
+	# This function encrypts four consecutive ChaCha20 blocks by loading the
+	# the state matrix in SSE registers four times. As we need some scratch
+	# registers, we save the first four registers on the stack. The
+	# algorithm performs each operation on the corresponding word of each
+	# state matrix, hence requires no word shuffling. For final XORing step
+	# we transpose the matrix by interleaving 32- and then 64-bit words,
+	# which allows us to do XOR in SSE registers. 8/16-bit word rotation is
+	# done with the slightly better performing SSSE3 byte shuffling,
+	# 7/12-bit word rotation uses traditional shift+OR.
+
+	sub		$0x40,%rsp
+
+	# x0..15[0-3] = s0..3[0..3]
+	movq		0x00(%rdi),%xmm1
+	pshufd		$0x00,%xmm1,%xmm0
+	pshufd		$0x55,%xmm1,%xmm1
+	movq		0x08(%rdi),%xmm3
+	pshufd		$0x00,%xmm3,%xmm2
+	pshufd		$0x55,%xmm3,%xmm3
+	movq		0x10(%rdi),%xmm5
+	pshufd		$0x00,%xmm5,%xmm4
+	pshufd		$0x55,%xmm5,%xmm5
+	movq		0x18(%rdi),%xmm7
+	pshufd		$0x00,%xmm7,%xmm6
+	pshufd		$0x55,%xmm7,%xmm7
+	movq		0x20(%rdi),%xmm9
+	pshufd		$0x00,%xmm9,%xmm8
+	pshufd		$0x55,%xmm9,%xmm9
+	movq		0x28(%rdi),%xmm11
+	pshufd		$0x00,%xmm11,%xmm10
+	pshufd		$0x55,%xmm11,%xmm11
+	movq		0x30(%rdi),%xmm13
+	pshufd		$0x00,%xmm13,%xmm12
+	pshufd		$0x55,%xmm13,%xmm13
+	movq		0x38(%rdi),%xmm15
+	pshufd		$0x00,%xmm15,%xmm14
+	pshufd		$0x55,%xmm15,%xmm15
+	# x0..3 on stack
+	movdqa		%xmm0,0x00(%rsp)
+	movdqa		%xmm1,0x10(%rsp)
+	movdqa		%xmm2,0x20(%rsp)
+	movdqa		%xmm3,0x30(%rsp)
+
+	movdqa		CTRINC(%rip),%xmm1
+	movdqa		ROT8(%rip),%xmm2
+	movdqa		ROT16(%rip),%xmm3
+
+	# x12 += counter values 0-3
+	paddd		%xmm1,%xmm12
+
+	mov		$10,%ecx
+
+.Ldoubleround4:
+	# x0 += x4, x12 = rotl32(x12 ^ x0, 16)
+	movdqa		0x00(%rsp),%xmm0
+	paddd		%xmm4,%xmm0
+	movdqa		%xmm0,0x00(%rsp)
+	pxor		%xmm0,%xmm12
+	pshufb		%xmm3,%xmm12
+	# x1 += x5, x13 = rotl32(x13 ^ x1, 16)
+	movdqa		0x10(%rsp),%xmm0
+	paddd		%xmm5,%xmm0
+	movdqa		%xmm0,0x10(%rsp)
+	pxor		%xmm0,%xmm13
+	pshufb		%xmm3,%xmm13
+	# x2 += x6, x14 = rotl32(x14 ^ x2, 16)
+	movdqa		0x20(%rsp),%xmm0
+	paddd		%xmm6,%xmm0
+	movdqa		%xmm0,0x20(%rsp)
+	pxor		%xmm0,%xmm14
+	pshufb		%xmm3,%xmm14
+	# x3 += x7, x15 = rotl32(x15 ^ x3, 16)
+	movdqa		0x30(%rsp),%xmm0
+	paddd		%xmm7,%xmm0
+	movdqa		%xmm0,0x30(%rsp)
+	pxor		%xmm0,%xmm15
+	pshufb		%xmm3,%xmm15
+
+	# x8 += x12, x4 = rotl32(x4 ^ x8, 12)
+	paddd		%xmm12,%xmm8
+	pxor		%xmm8,%xmm4
+	movdqa		%xmm4,%xmm0
+	pslld		$12,%xmm0
+	psrld		$20,%xmm4
+	por		%xmm0,%xmm4
+	# x9 += x13, x5 = rotl32(x5 ^ x9, 12)
+	paddd		%xmm13,%xmm9
+	pxor		%xmm9,%xmm5
+	movdqa		%xmm5,%xmm0
+	pslld		$12,%xmm0
+	psrld		$20,%xmm5
+	por		%xmm0,%xmm5
+	# x10 += x14, x6 = rotl32(x6 ^ x10, 12)
+	paddd		%xmm14,%xmm10
+	pxor		%xmm10,%xmm6
+	movdqa		%xmm6,%xmm0
+	pslld		$12,%xmm0
+	psrld		$20,%xmm6
+	por		%xmm0,%xmm6
+	# x11 += x15, x7 = rotl32(x7 ^ x11, 12)
+	paddd		%xmm15,%xmm11
+	pxor		%xmm11,%xmm7
+	movdqa		%xmm7,%xmm0
+	pslld		$12,%xmm0
+	psrld		$20,%xmm7
+	por		%xmm0,%xmm7
+
+	# x0 += x4, x12 = rotl32(x12 ^ x0, 8)
+	movdqa		0x00(%rsp),%xmm0
+	paddd		%xmm4,%xmm0
+	movdqa		%xmm0,0x00(%rsp)
+	pxor		%xmm0,%xmm12
+	pshufb		%xmm2,%xmm12
+	# x1 += x5, x13 = rotl32(x13 ^ x1, 8)
+	movdqa		0x10(%rsp),%xmm0
+	paddd		%xmm5,%xmm0
+	movdqa		%xmm0,0x10(%rsp)
+	pxor		%xmm0,%xmm13
+	pshufb		%xmm2,%xmm13
+	# x2 += x6, x14 = rotl32(x14 ^ x2, 8)
+	movdqa		0x20(%rsp),%xmm0
+	paddd		%xmm6,%xmm0
+	movdqa		%xmm0,0x20(%rsp)
+	pxor		%xmm0,%xmm14
+	pshufb		%xmm2,%xmm14
+	# x3 += x7, x15 = rotl32(x15 ^ x3, 8)
+	movdqa		0x30(%rsp),%xmm0
+	paddd		%xmm7,%xmm0
+	movdqa		%xmm0,0x30(%rsp)
+	pxor		%xmm0,%xmm15
+	pshufb		%xmm2,%xmm15
+
+	# x8 += x12, x4 = rotl32(x4 ^ x8, 7)
+	paddd		%xmm12,%xmm8
+	pxor		%xmm8,%xmm4
+	movdqa		%xmm4,%xmm0
+	pslld		$7,%xmm0
+	psrld		$25,%xmm4
+	por		%xmm0,%xmm4
+	# x9 += x13, x5 = rotl32(x5 ^ x9, 7)
+	paddd		%xmm13,%xmm9
+	pxor		%xmm9,%xmm5
+	movdqa		%xmm5,%xmm0
+	pslld		$7,%xmm0
+	psrld		$25,%xmm5
+	por		%xmm0,%xmm5
+	# x10 += x14, x6 = rotl32(x6 ^ x10, 7)
+	paddd		%xmm14,%xmm10
+	pxor		%xmm10,%xmm6
+	movdqa		%xmm6,%xmm0
+	pslld		$7,%xmm0
+	psrld		$25,%xmm6
+	por		%xmm0,%xmm6
+	# x11 += x15, x7 = rotl32(x7 ^ x11, 7)
+	paddd		%xmm15,%xmm11
+	pxor		%xmm11,%xmm7
+	movdqa		%xmm7,%xmm0
+	pslld		$7,%xmm0
+	psrld		$25,%xmm7
+	por		%xmm0,%xmm7
+
+	# x0 += x5, x15 = rotl32(x15 ^ x0, 16)
+	movdqa		0x00(%rsp),%xmm0
+	paddd		%xmm5,%xmm0
+	movdqa		%xmm0,0x00(%rsp)
+	pxor		%xmm0,%xmm15
+	pshufb		%xmm3,%xmm15
+	# x1 += x6, x12 = rotl32(x12 ^ x1, 16)
+	movdqa		0x10(%rsp),%xmm0
+	paddd		%xmm6,%xmm0
+	movdqa		%xmm0,0x10(%rsp)
+	pxor		%xmm0,%xmm12
+	pshufb		%xmm3,%xmm12
+	# x2 += x7, x13 = rotl32(x13 ^ x2, 16)
+	movdqa		0x20(%rsp),%xmm0
+	paddd		%xmm7,%xmm0
+	movdqa		%xmm0,0x20(%rsp)
+	pxor		%xmm0,%xmm13
+	pshufb		%xmm3,%xmm13
+	# x3 += x4, x14 = rotl32(x14 ^ x3, 16)
+	movdqa		0x30(%rsp),%xmm0
+	paddd		%xmm4,%xmm0
+	movdqa		%xmm0,0x30(%rsp)
+	pxor		%xmm0,%xmm14
+	pshufb		%xmm3,%xmm14
+
+	# x10 += x15, x5 = rotl32(x5 ^ x10, 12)
+	paddd		%xmm15,%xmm10
+	pxor		%xmm10,%xmm5
+	movdqa		%xmm5,%xmm0
+	pslld		$12,%xmm0
+	psrld		$20,%xmm5
+	por		%xmm0,%xmm5
+	# x11 += x12, x6 = rotl32(x6 ^ x11, 12)
+	paddd		%xmm12,%xmm11
+	pxor		%xmm11,%xmm6
+	movdqa		%xmm6,%xmm0
+	pslld		$12,%xmm0
+	psrld		$20,%xmm6
+	por		%xmm0,%xmm6
+	# x8 += x13, x7 = rotl32(x7 ^ x8, 12)
+	paddd		%xmm13,%xmm8
+	pxor		%xmm8,%xmm7
+	movdqa		%xmm7,%xmm0
+	pslld		$12,%xmm0
+	psrld		$20,%xmm7
+	por		%xmm0,%xmm7
+	# x9 += x14, x4 = rotl32(x4 ^ x9, 12)
+	paddd		%xmm14,%xmm9
+	pxor		%xmm9,%xmm4
+	movdqa		%xmm4,%xmm0
+	pslld		$12,%xmm0
+	psrld		$20,%xmm4
+	por		%xmm0,%xmm4
+
+	# x0 += x5, x15 = rotl32(x15 ^ x0, 8)
+	movdqa		0x00(%rsp),%xmm0
+	paddd		%xmm5,%xmm0
+	movdqa		%xmm0,0x00(%rsp)
+	pxor		%xmm0,%xmm15
+	pshufb		%xmm2,%xmm15
+	# x1 += x6, x12 = rotl32(x12 ^ x1, 8)
+	movdqa		0x10(%rsp),%xmm0
+	paddd		%xmm6,%xmm0
+	movdqa		%xmm0,0x10(%rsp)
+	pxor		%xmm0,%xmm12
+	pshufb		%xmm2,%xmm12
+	# x2 += x7, x13 = rotl32(x13 ^ x2, 8)
+	movdqa		0x20(%rsp),%xmm0
+	paddd		%xmm7,%xmm0
+	movdqa		%xmm0,0x20(%rsp)
+	pxor		%xmm0,%xmm13
+	pshufb		%xmm2,%xmm13
+	# x3 += x4, x14 = rotl32(x14 ^ x3, 8)
+	movdqa		0x30(%rsp),%xmm0
+	paddd		%xmm4,%xmm0
+	movdqa		%xmm0,0x30(%rsp)
+	pxor		%xmm0,%xmm14
+	pshufb		%xmm2,%xmm14
+
+	# x10 += x15, x5 = rotl32(x5 ^ x10, 7)
+	paddd		%xmm15,%xmm10
+	pxor		%xmm10,%xmm5
+	movdqa		%xmm5,%xmm0
+	pslld		$7,%xmm0
+	psrld		$25,%xmm5
+	por		%xmm0,%xmm5
+	# x11 += x12, x6 = rotl32(x6 ^ x11, 7)
+	paddd		%xmm12,%xmm11
+	pxor		%xmm11,%xmm6
+	movdqa		%xmm6,%xmm0
+	pslld		$7,%xmm0
+	psrld		$25,%xmm6
+	por		%xmm0,%xmm6
+	# x8 += x13, x7 = rotl32(x7 ^ x8, 7)
+	paddd		%xmm13,%xmm8
+	pxor		%xmm8,%xmm7
+	movdqa		%xmm7,%xmm0
+	pslld		$7,%xmm0
+	psrld		$25,%xmm7
+	por		%xmm0,%xmm7
+	# x9 += x14, x4 = rotl32(x4 ^ x9, 7)
+	paddd		%xmm14,%xmm9
+	pxor		%xmm9,%xmm4
+	movdqa		%xmm4,%xmm0
+	pslld		$7,%xmm0
+	psrld		$25,%xmm4
+	por		%xmm0,%xmm4
+
+	dec		%ecx
+	jnz		.Ldoubleround4
+
+	# x0[0-3] += s0[0]
+	# x1[0-3] += s0[1]
+	movq		0x00(%rdi),%xmm3
+	pshufd		$0x00,%xmm3,%xmm2
+	pshufd		$0x55,%xmm3,%xmm3
+	paddd		0x00(%rsp),%xmm2
+	movdqa		%xmm2,0x00(%rsp)
+	paddd		0x10(%rsp),%xmm3
+	movdqa		%xmm3,0x10(%rsp)
+	# x2[0-3] += s0[2]
+	# x3[0-3] += s0[3]
+	movq		0x08(%rdi),%xmm3
+	pshufd		$0x00,%xmm3,%xmm2
+	pshufd		$0x55,%xmm3,%xmm3
+	paddd		0x20(%rsp),%xmm2
+	movdqa		%xmm2,0x20(%rsp)
+	paddd		0x30(%rsp),%xmm3
+	movdqa		%xmm3,0x30(%rsp)
+
+	# x4[0-3] += s1[0]
+	# x5[0-3] += s1[1]
+	movq		0x10(%rdi),%xmm3
+	pshufd		$0x00,%xmm3,%xmm2
+	pshufd		$0x55,%xmm3,%xmm3
+	paddd		%xmm2,%xmm4
+	paddd		%xmm3,%xmm5
+	# x6[0-3] += s1[2]
+	# x7[0-3] += s1[3]
+	movq		0x18(%rdi),%xmm3
+	pshufd		$0x00,%xmm3,%xmm2
+	pshufd		$0x55,%xmm3,%xmm3
+	paddd		%xmm2,%xmm6
+	paddd		%xmm3,%xmm7
+
+	# x8[0-3] += s2[0]
+	# x9[0-3] += s2[1]
+	movq		0x20(%rdi),%xmm3
+	pshufd		$0x00,%xmm3,%xmm2
+	pshufd		$0x55,%xmm3,%xmm3
+	paddd		%xmm2,%xmm8
+	paddd		%xmm3,%xmm9
+	# x10[0-3] += s2[2]
+	# x11[0-3] += s2[3]
+	movq		0x28(%rdi),%xmm3
+	pshufd		$0x00,%xmm3,%xmm2
+	pshufd		$0x55,%xmm3,%xmm3
+	paddd		%xmm2,%xmm10
+	paddd		%xmm3,%xmm11
+
+	# x12[0-3] += s3[0]
+	# x13[0-3] += s3[1]
+	movq		0x30(%rdi),%xmm3
+	pshufd		$0x00,%xmm3,%xmm2
+	pshufd		$0x55,%xmm3,%xmm3
+	paddd		%xmm2,%xmm12
+	paddd		%xmm3,%xmm13
+	# x14[0-3] += s3[2]
+	# x15[0-3] += s3[3]
+	movq		0x38(%rdi),%xmm3
+	pshufd		$0x00,%xmm3,%xmm2
+	pshufd		$0x55,%xmm3,%xmm3
+	paddd		%xmm2,%xmm14
+	paddd		%xmm3,%xmm15
+
+	# x12 += counter values 0-3
+	paddd		%xmm1,%xmm12
+
+	# interleave 32-bit words in state n, n+1
+	movdqa		0x00(%rsp),%xmm0
+	movdqa		0x10(%rsp),%xmm1
+	movdqa		%xmm0,%xmm2
+	punpckldq	%xmm1,%xmm2
+	punpckhdq	%xmm1,%xmm0
+	movdqa		%xmm2,0x00(%rsp)
+	movdqa		%xmm0,0x10(%rsp)
+	movdqa		0x20(%rsp),%xmm0
+	movdqa		0x30(%rsp),%xmm1
+	movdqa		%xmm0,%xmm2
+	punpckldq	%xmm1,%xmm2
+	punpckhdq	%xmm1,%xmm0
+	movdqa		%xmm2,0x20(%rsp)
+	movdqa		%xmm0,0x30(%rsp)
+	movdqa		%xmm4,%xmm0
+	punpckldq	%xmm5,%xmm4
+	punpckhdq	%xmm5,%xmm0
+	movdqa		%xmm0,%xmm5
+	movdqa		%xmm6,%xmm0
+	punpckldq	%xmm7,%xmm6
+	punpckhdq	%xmm7,%xmm0
+	movdqa		%xmm0,%xmm7
+	movdqa		%xmm8,%xmm0
+	punpckldq	%xmm9,%xmm8
+	punpckhdq	%xmm9,%xmm0
+	movdqa		%xmm0,%xmm9
+	movdqa		%xmm10,%xmm0
+	punpckldq	%xmm11,%xmm10
+	punpckhdq	%xmm11,%xmm0
+	movdqa		%xmm0,%xmm11
+	movdqa		%xmm12,%xmm0
+	punpckldq	%xmm13,%xmm12
+	punpckhdq	%xmm13,%xmm0
+	movdqa		%xmm0,%xmm13
+	movdqa		%xmm14,%xmm0
+	punpckldq	%xmm15,%xmm14
+	punpckhdq	%xmm15,%xmm0
+	movdqa		%xmm0,%xmm15
+
+	# interleave 64-bit words in state n, n+2
+	movdqa		0x00(%rsp),%xmm0
+	movdqa		0x20(%rsp),%xmm1
+	movdqa		%xmm0,%xmm2
+	punpcklqdq	%xmm1,%xmm2
+	punpckhqdq	%xmm1,%xmm0
+	movdqa		%xmm2,0x00(%rsp)
+	movdqa		%xmm0,0x20(%rsp)
+	movdqa		0x10(%rsp),%xmm0
+	movdqa		0x30(%rsp),%xmm1
+	movdqa		%xmm0,%xmm2
+	punpcklqdq	%xmm1,%xmm2
+	punpckhqdq	%xmm1,%xmm0
+	movdqa		%xmm2,0x10(%rsp)
+	movdqa		%xmm0,0x30(%rsp)
+	movdqa		%xmm4,%xmm0
+	punpcklqdq	%xmm6,%xmm4
+	punpckhqdq	%xmm6,%xmm0
+	movdqa		%xmm0,%xmm6
+	movdqa		%xmm5,%xmm0
+	punpcklqdq	%xmm7,%xmm5
+	punpckhqdq	%xmm7,%xmm0
+	movdqa		%xmm0,%xmm7
+	movdqa		%xmm8,%xmm0
+	punpcklqdq	%xmm10,%xmm8
+	punpckhqdq	%xmm10,%xmm0
+	movdqa		%xmm0,%xmm10
+	movdqa		%xmm9,%xmm0
+	punpcklqdq	%xmm11,%xmm9
+	punpckhqdq	%xmm11,%xmm0
+	movdqa		%xmm0,%xmm11
+	movdqa		%xmm12,%xmm0
+	punpcklqdq	%xmm14,%xmm12
+	punpckhqdq	%xmm14,%xmm0
+	movdqa		%xmm0,%xmm14
+	movdqa		%xmm13,%xmm0
+	punpcklqdq	%xmm15,%xmm13
+	punpckhqdq	%xmm15,%xmm0
+	movdqa		%xmm0,%xmm15
+
+	# xor with corresponding input, write to output
+	movdqa		0x00(%rsp),%xmm0
+	movdqu		0x00(%rdx),%xmm1
+	pxor		%xmm1,%xmm0
+	movdqu		%xmm0,0x00(%rsi)
+	movdqa		0x10(%rsp),%xmm0
+	movdqu		0x80(%rdx),%xmm1
+	pxor		%xmm1,%xmm0
+	movdqu		%xmm0,0x80(%rsi)
+	movdqa		0x20(%rsp),%xmm0
+	movdqu		0x40(%rdx),%xmm1
+	pxor		%xmm1,%xmm0
+	movdqu		%xmm0,0x40(%rsi)
+	movdqa		0x30(%rsp),%xmm0
+	movdqu		0xc0(%rdx),%xmm1
+	pxor		%xmm1,%xmm0
+	movdqu		%xmm0,0xc0(%rsi)
+	movdqu		0x10(%rdx),%xmm1
+	pxor		%xmm1,%xmm4
+	movdqu		%xmm4,0x10(%rsi)
+	movdqu		0x90(%rdx),%xmm1
+	pxor		%xmm1,%xmm5
+	movdqu		%xmm5,0x90(%rsi)
+	movdqu		0x50(%rdx),%xmm1
+	pxor		%xmm1,%xmm6
+	movdqu		%xmm6,0x50(%rsi)
+	movdqu		0xd0(%rdx),%xmm1
+	pxor		%xmm1,%xmm7
+	movdqu		%xmm7,0xd0(%rsi)
+	movdqu		0x20(%rdx),%xmm1
+	pxor		%xmm1,%xmm8
+	movdqu		%xmm8,0x20(%rsi)
+	movdqu		0xa0(%rdx),%xmm1
+	pxor		%xmm1,%xmm9
+	movdqu		%xmm9,0xa0(%rsi)
+	movdqu		0x60(%rdx),%xmm1
+	pxor		%xmm1,%xmm10
+	movdqu		%xmm10,0x60(%rsi)
+	movdqu		0xe0(%rdx),%xmm1
+	pxor		%xmm1,%xmm11
+	movdqu		%xmm11,0xe0(%rsi)
+	movdqu		0x30(%rdx),%xmm1
+	pxor		%xmm1,%xmm12
+	movdqu		%xmm12,0x30(%rsi)
+	movdqu		0xb0(%rdx),%xmm1
+	pxor		%xmm1,%xmm13
+	movdqu		%xmm13,0xb0(%rsi)
+	movdqu		0x70(%rdx),%xmm1
+	pxor		%xmm1,%xmm14
+	movdqu		%xmm14,0x70(%rsi)
+	movdqu		0xf0(%rdx),%xmm1
+	pxor		%xmm1,%xmm15
+	movdqu		%xmm15,0xf0(%rsi)
+
+	add		$0x40,%rsp
+	ret
+ENDPROC(chacha20_4block_xor_ssse3)

+ 150 - 0
arch/x86/crypto/chacha20_glue.c

@@ -0,0 +1,150 @@
+/*
+ * ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/chacha20.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/fpu/api.h>
+#include <asm/simd.h>
+
+#define CHACHA20_STATE_ALIGN 16
+
+asmlinkage void chacha20_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src);
+asmlinkage void chacha20_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src);
+#ifdef CONFIG_AS_AVX2
+asmlinkage void chacha20_8block_xor_avx2(u32 *state, u8 *dst, const u8 *src);
+static bool chacha20_use_avx2;
+#endif
+
+static void chacha20_dosimd(u32 *state, u8 *dst, const u8 *src,
+			    unsigned int bytes)
+{
+	u8 buf[CHACHA20_BLOCK_SIZE];
+
+#ifdef CONFIG_AS_AVX2
+	if (chacha20_use_avx2) {
+		while (bytes >= CHACHA20_BLOCK_SIZE * 8) {
+			chacha20_8block_xor_avx2(state, dst, src);
+			bytes -= CHACHA20_BLOCK_SIZE * 8;
+			src += CHACHA20_BLOCK_SIZE * 8;
+			dst += CHACHA20_BLOCK_SIZE * 8;
+			state[12] += 8;
+		}
+	}
+#endif
+	while (bytes >= CHACHA20_BLOCK_SIZE * 4) {
+		chacha20_4block_xor_ssse3(state, dst, src);
+		bytes -= CHACHA20_BLOCK_SIZE * 4;
+		src += CHACHA20_BLOCK_SIZE * 4;
+		dst += CHACHA20_BLOCK_SIZE * 4;
+		state[12] += 4;
+	}
+	while (bytes >= CHACHA20_BLOCK_SIZE) {
+		chacha20_block_xor_ssse3(state, dst, src);
+		bytes -= CHACHA20_BLOCK_SIZE;
+		src += CHACHA20_BLOCK_SIZE;
+		dst += CHACHA20_BLOCK_SIZE;
+		state[12]++;
+	}
+	if (bytes) {
+		memcpy(buf, src, bytes);
+		chacha20_block_xor_ssse3(state, buf, buf);
+		memcpy(dst, buf, bytes);
+	}
+}
+
+static int chacha20_simd(struct blkcipher_desc *desc, struct scatterlist *dst,
+			 struct scatterlist *src, unsigned int nbytes)
+{
+	u32 *state, state_buf[16 + (CHACHA20_STATE_ALIGN / sizeof(u32)) - 1];
+	struct blkcipher_walk walk;
+	int err;
+
+	if (!may_use_simd())
+		return crypto_chacha20_crypt(desc, dst, src, nbytes);
+
+	state = (u32 *)roundup((uintptr_t)state_buf, CHACHA20_STATE_ALIGN);
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE);
+
+	crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv);
+
+	kernel_fpu_begin();
+
+	while (walk.nbytes >= CHACHA20_BLOCK_SIZE) {
+		chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
+				rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE));
+		err = blkcipher_walk_done(desc, &walk,
+					  walk.nbytes % CHACHA20_BLOCK_SIZE);
+	}
+
+	if (walk.nbytes) {
+		chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
+				walk.nbytes);
+		err = blkcipher_walk_done(desc, &walk, 0);
+	}
+
+	kernel_fpu_end();
+
+	return err;
+}
+
+static struct crypto_alg alg = {
+	.cra_name		= "chacha20",
+	.cra_driver_name	= "chacha20-simd",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		= 1,
+	.cra_type		= &crypto_blkcipher_type,
+	.cra_ctxsize		= sizeof(struct chacha20_ctx),
+	.cra_alignmask		= sizeof(u32) - 1,
+	.cra_module		= THIS_MODULE,
+	.cra_u			= {
+		.blkcipher = {
+			.min_keysize	= CHACHA20_KEY_SIZE,
+			.max_keysize	= CHACHA20_KEY_SIZE,
+			.ivsize		= CHACHA20_IV_SIZE,
+			.geniv		= "seqiv",
+			.setkey		= crypto_chacha20_setkey,
+			.encrypt	= chacha20_simd,
+			.decrypt	= chacha20_simd,
+		},
+	},
+};
+
+static int __init chacha20_simd_mod_init(void)
+{
+	if (!cpu_has_ssse3)
+		return -ENODEV;
+
+#ifdef CONFIG_AS_AVX2
+	chacha20_use_avx2 = cpu_has_avx && cpu_has_avx2 &&
+			    cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL);
+#endif
+	return crypto_register_alg(&alg);
+}
+
+static void __exit chacha20_simd_mod_fini(void)
+{
+	crypto_unregister_alg(&alg);
+}
+
+module_init(chacha20_simd_mod_init);
+module_exit(chacha20_simd_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
+MODULE_DESCRIPTION("chacha20 cipher algorithm, SIMD accelerated");
+MODULE_ALIAS_CRYPTO("chacha20");
+MODULE_ALIAS_CRYPTO("chacha20-simd");

+ 386 - 0
arch/x86/crypto/poly1305-avx2-x86_64.S

@@ -0,0 +1,386 @@
+/*
+ * Poly1305 authenticator algorithm, RFC7539, x64 AVX2 functions
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+
+.data
+.align 32
+
+ANMASK:	.octa 0x0000000003ffffff0000000003ffffff
+	.octa 0x0000000003ffffff0000000003ffffff
+ORMASK:	.octa 0x00000000010000000000000001000000
+	.octa 0x00000000010000000000000001000000
+
+.text
+
+#define h0 0x00(%rdi)
+#define h1 0x04(%rdi)
+#define h2 0x08(%rdi)
+#define h3 0x0c(%rdi)
+#define h4 0x10(%rdi)
+#define r0 0x00(%rdx)
+#define r1 0x04(%rdx)
+#define r2 0x08(%rdx)
+#define r3 0x0c(%rdx)
+#define r4 0x10(%rdx)
+#define u0 0x00(%r8)
+#define u1 0x04(%r8)
+#define u2 0x08(%r8)
+#define u3 0x0c(%r8)
+#define u4 0x10(%r8)
+#define w0 0x14(%r8)
+#define w1 0x18(%r8)
+#define w2 0x1c(%r8)
+#define w3 0x20(%r8)
+#define w4 0x24(%r8)
+#define y0 0x28(%r8)
+#define y1 0x2c(%r8)
+#define y2 0x30(%r8)
+#define y3 0x34(%r8)
+#define y4 0x38(%r8)
+#define m %rsi
+#define hc0 %ymm0
+#define hc1 %ymm1
+#define hc2 %ymm2
+#define hc3 %ymm3
+#define hc4 %ymm4
+#define hc0x %xmm0
+#define hc1x %xmm1
+#define hc2x %xmm2
+#define hc3x %xmm3
+#define hc4x %xmm4
+#define t1 %ymm5
+#define t2 %ymm6
+#define t1x %xmm5
+#define t2x %xmm6
+#define ruwy0 %ymm7
+#define ruwy1 %ymm8
+#define ruwy2 %ymm9
+#define ruwy3 %ymm10
+#define ruwy4 %ymm11
+#define ruwy0x %xmm7
+#define ruwy1x %xmm8
+#define ruwy2x %xmm9
+#define ruwy3x %xmm10
+#define ruwy4x %xmm11
+#define svxz1 %ymm12
+#define svxz2 %ymm13
+#define svxz3 %ymm14
+#define svxz4 %ymm15
+#define d0 %r9
+#define d1 %r10
+#define d2 %r11
+#define d3 %r12
+#define d4 %r13
+
+ENTRY(poly1305_4block_avx2)
+	# %rdi: Accumulator h[5]
+	# %rsi: 64 byte input block m
+	# %rdx: Poly1305 key r[5]
+	# %rcx: Quadblock count
+	# %r8:  Poly1305 derived key r^2 u[5], r^3 w[5], r^4 y[5],
+
+	# This four-block variant uses loop unrolled block processing. It
+	# requires 4 Poly1305 keys: r, r^2, r^3 and r^4:
+	# h = (h + m) * r  =>  h = (h + m1) * r^4 + m2 * r^3 + m3 * r^2 + m4 * r
+
+	vzeroupper
+	push		%rbx
+	push		%r12
+	push		%r13
+
+	# combine r0,u0,w0,y0
+	vmovd		y0,ruwy0x
+	vmovd		w0,t1x
+	vpunpcklqdq	t1,ruwy0,ruwy0
+	vmovd		u0,t1x
+	vmovd		r0,t2x
+	vpunpcklqdq	t2,t1,t1
+	vperm2i128	$0x20,t1,ruwy0,ruwy0
+
+	# combine r1,u1,w1,y1 and s1=r1*5,v1=u1*5,x1=w1*5,z1=y1*5
+	vmovd		y1,ruwy1x
+	vmovd		w1,t1x
+	vpunpcklqdq	t1,ruwy1,ruwy1
+	vmovd		u1,t1x
+	vmovd		r1,t2x
+	vpunpcklqdq	t2,t1,t1
+	vperm2i128	$0x20,t1,ruwy1,ruwy1
+	vpslld		$2,ruwy1,svxz1
+	vpaddd		ruwy1,svxz1,svxz1
+
+	# combine r2,u2,w2,y2 and s2=r2*5,v2=u2*5,x2=w2*5,z2=y2*5
+	vmovd		y2,ruwy2x
+	vmovd		w2,t1x
+	vpunpcklqdq	t1,ruwy2,ruwy2
+	vmovd		u2,t1x
+	vmovd		r2,t2x
+	vpunpcklqdq	t2,t1,t1
+	vperm2i128	$0x20,t1,ruwy2,ruwy2
+	vpslld		$2,ruwy2,svxz2
+	vpaddd		ruwy2,svxz2,svxz2
+
+	# combine r3,u3,w3,y3 and s3=r3*5,v3=u3*5,x3=w3*5,z3=y3*5
+	vmovd		y3,ruwy3x
+	vmovd		w3,t1x
+	vpunpcklqdq	t1,ruwy3,ruwy3
+	vmovd		u3,t1x
+	vmovd		r3,t2x
+	vpunpcklqdq	t2,t1,t1
+	vperm2i128	$0x20,t1,ruwy3,ruwy3
+	vpslld		$2,ruwy3,svxz3
+	vpaddd		ruwy3,svxz3,svxz3
+
+	# combine r4,u4,w4,y4 and s4=r4*5,v4=u4*5,x4=w4*5,z4=y4*5
+	vmovd		y4,ruwy4x
+	vmovd		w4,t1x
+	vpunpcklqdq	t1,ruwy4,ruwy4
+	vmovd		u4,t1x
+	vmovd		r4,t2x
+	vpunpcklqdq	t2,t1,t1
+	vperm2i128	$0x20,t1,ruwy4,ruwy4
+	vpslld		$2,ruwy4,svxz4
+	vpaddd		ruwy4,svxz4,svxz4
+
+.Ldoblock4:
+	# hc0 = [m[48-51] & 0x3ffffff, m[32-35] & 0x3ffffff,
+	#	 m[16-19] & 0x3ffffff, m[ 0- 3] & 0x3ffffff + h0]
+	vmovd		0x00(m),hc0x
+	vmovd		0x10(m),t1x
+	vpunpcklqdq	t1,hc0,hc0
+	vmovd		0x20(m),t1x
+	vmovd		0x30(m),t2x
+	vpunpcklqdq	t2,t1,t1
+	vperm2i128	$0x20,t1,hc0,hc0
+	vpand		ANMASK(%rip),hc0,hc0
+	vmovd		h0,t1x
+	vpaddd		t1,hc0,hc0
+	# hc1 = [(m[51-54] >> 2) & 0x3ffffff, (m[35-38] >> 2) & 0x3ffffff,
+	#	 (m[19-22] >> 2) & 0x3ffffff, (m[ 3- 6] >> 2) & 0x3ffffff + h1]
+	vmovd		0x03(m),hc1x
+	vmovd		0x13(m),t1x
+	vpunpcklqdq	t1,hc1,hc1
+	vmovd		0x23(m),t1x
+	vmovd		0x33(m),t2x
+	vpunpcklqdq	t2,t1,t1
+	vperm2i128	$0x20,t1,hc1,hc1
+	vpsrld		$2,hc1,hc1
+	vpand		ANMASK(%rip),hc1,hc1
+	vmovd		h1,t1x
+	vpaddd		t1,hc1,hc1
+	# hc2 = [(m[54-57] >> 4) & 0x3ffffff, (m[38-41] >> 4) & 0x3ffffff,
+	#	 (m[22-25] >> 4) & 0x3ffffff, (m[ 6- 9] >> 4) & 0x3ffffff + h2]
+	vmovd		0x06(m),hc2x
+	vmovd		0x16(m),t1x
+	vpunpcklqdq	t1,hc2,hc2
+	vmovd		0x26(m),t1x
+	vmovd		0x36(m),t2x
+	vpunpcklqdq	t2,t1,t1
+	vperm2i128	$0x20,t1,hc2,hc2
+	vpsrld		$4,hc2,hc2
+	vpand		ANMASK(%rip),hc2,hc2
+	vmovd		h2,t1x
+	vpaddd		t1,hc2,hc2
+	# hc3 = [(m[57-60] >> 6) & 0x3ffffff, (m[41-44] >> 6) & 0x3ffffff,
+	#	 (m[25-28] >> 6) & 0x3ffffff, (m[ 9-12] >> 6) & 0x3ffffff + h3]
+	vmovd		0x09(m),hc3x
+	vmovd		0x19(m),t1x
+	vpunpcklqdq	t1,hc3,hc3
+	vmovd		0x29(m),t1x
+	vmovd		0x39(m),t2x
+	vpunpcklqdq	t2,t1,t1
+	vperm2i128	$0x20,t1,hc3,hc3
+	vpsrld		$6,hc3,hc3
+	vpand		ANMASK(%rip),hc3,hc3
+	vmovd		h3,t1x
+	vpaddd		t1,hc3,hc3
+	# hc4 = [(m[60-63] >> 8) | (1<<24), (m[44-47] >> 8) | (1<<24),
+	#	 (m[28-31] >> 8) | (1<<24), (m[12-15] >> 8) | (1<<24) + h4]
+	vmovd		0x0c(m),hc4x
+	vmovd		0x1c(m),t1x
+	vpunpcklqdq	t1,hc4,hc4
+	vmovd		0x2c(m),t1x
+	vmovd		0x3c(m),t2x
+	vpunpcklqdq	t2,t1,t1
+	vperm2i128	$0x20,t1,hc4,hc4
+	vpsrld		$8,hc4,hc4
+	vpor		ORMASK(%rip),hc4,hc4
+	vmovd		h4,t1x
+	vpaddd		t1,hc4,hc4
+
+	# t1 = [ hc0[3] * r0, hc0[2] * u0, hc0[1] * w0, hc0[0] * y0 ]
+	vpmuludq	hc0,ruwy0,t1
+	# t1 += [ hc1[3] * s4, hc1[2] * v4, hc1[1] * x4, hc1[0] * z4 ]
+	vpmuludq	hc1,svxz4,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc2[3] * s3, hc2[2] * v3, hc2[1] * x3, hc2[0] * z3 ]
+	vpmuludq	hc2,svxz3,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc3[3] * s2, hc3[2] * v2, hc3[1] * x2, hc3[0] * z2 ]
+	vpmuludq	hc3,svxz2,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc4[3] * s1, hc4[2] * v1, hc4[1] * x1, hc4[0] * z1 ]
+	vpmuludq	hc4,svxz1,t2
+	vpaddq		t2,t1,t1
+	# d0 = t1[0] + t1[1] + t[2] + t[3]
+	vpermq		$0xee,t1,t2
+	vpaddq		t2,t1,t1
+	vpsrldq		$8,t1,t2
+	vpaddq		t2,t1,t1
+	vmovq		t1x,d0
+
+	# t1 = [ hc0[3] * r1, hc0[2] * u1,hc0[1] * w1, hc0[0] * y1 ]
+	vpmuludq	hc0,ruwy1,t1
+	# t1 += [ hc1[3] * r0, hc1[2] * u0, hc1[1] * w0, hc1[0] * y0 ]
+	vpmuludq	hc1,ruwy0,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc2[3] * s4, hc2[2] * v4, hc2[1] * x4, hc2[0] * z4 ]
+	vpmuludq	hc2,svxz4,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc3[3] * s3, hc3[2] * v3, hc3[1] * x3, hc3[0] * z3 ]
+	vpmuludq	hc3,svxz3,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc4[3] * s2, hc4[2] * v2, hc4[1] * x2, hc4[0] * z2 ]
+	vpmuludq	hc4,svxz2,t2
+	vpaddq		t2,t1,t1
+	# d1 = t1[0] + t1[1] + t1[3] + t1[4]
+	vpermq		$0xee,t1,t2
+	vpaddq		t2,t1,t1
+	vpsrldq		$8,t1,t2
+	vpaddq		t2,t1,t1
+	vmovq		t1x,d1
+
+	# t1 = [ hc0[3] * r2, hc0[2] * u2, hc0[1] * w2, hc0[0] * y2 ]
+	vpmuludq	hc0,ruwy2,t1
+	# t1 += [ hc1[3] * r1, hc1[2] * u1, hc1[1] * w1, hc1[0] * y1 ]
+	vpmuludq	hc1,ruwy1,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc2[3] * r0, hc2[2] * u0, hc2[1] * w0, hc2[0] * y0 ]
+	vpmuludq	hc2,ruwy0,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc3[3] * s4, hc3[2] * v4, hc3[1] * x4, hc3[0] * z4 ]
+	vpmuludq	hc3,svxz4,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc4[3] * s3, hc4[2] * v3, hc4[1] * x3, hc4[0] * z3 ]
+	vpmuludq	hc4,svxz3,t2
+	vpaddq		t2,t1,t1
+	# d2 = t1[0] + t1[1] + t1[2] + t1[3]
+	vpermq		$0xee,t1,t2
+	vpaddq		t2,t1,t1
+	vpsrldq		$8,t1,t2
+	vpaddq		t2,t1,t1
+	vmovq		t1x,d2
+
+	# t1 = [ hc0[3] * r3, hc0[2] * u3, hc0[1] * w3, hc0[0] * y3 ]
+	vpmuludq	hc0,ruwy3,t1
+	# t1 += [ hc1[3] * r2, hc1[2] * u2, hc1[1] * w2, hc1[0] * y2 ]
+	vpmuludq	hc1,ruwy2,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc2[3] * r1, hc2[2] * u1, hc2[1] * w1, hc2[0] * y1 ]
+	vpmuludq	hc2,ruwy1,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc3[3] * r0, hc3[2] * u0, hc3[1] * w0, hc3[0] * y0 ]
+	vpmuludq	hc3,ruwy0,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc4[3] * s4, hc4[2] * v4, hc4[1] * x4, hc4[0] * z4 ]
+	vpmuludq	hc4,svxz4,t2
+	vpaddq		t2,t1,t1
+	# d3 = t1[0] + t1[1] + t1[2] + t1[3]
+	vpermq		$0xee,t1,t2
+	vpaddq		t2,t1,t1
+	vpsrldq		$8,t1,t2
+	vpaddq		t2,t1,t1
+	vmovq		t1x,d3
+
+	# t1 = [ hc0[3] * r4, hc0[2] * u4, hc0[1] * w4, hc0[0] * y4 ]
+	vpmuludq	hc0,ruwy4,t1
+	# t1 += [ hc1[3] * r3, hc1[2] * u3, hc1[1] * w3, hc1[0] * y3 ]
+	vpmuludq	hc1,ruwy3,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc2[3] * r2, hc2[2] * u2, hc2[1] * w2, hc2[0] * y2 ]
+	vpmuludq	hc2,ruwy2,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc3[3] * r1, hc3[2] * u1, hc3[1] * w1, hc3[0] * y1 ]
+	vpmuludq	hc3,ruwy1,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc4[3] * r0, hc4[2] * u0, hc4[1] * w0, hc4[0] * y0 ]
+	vpmuludq	hc4,ruwy0,t2
+	vpaddq		t2,t1,t1
+	# d4 = t1[0] + t1[1] + t1[2] + t1[3]
+	vpermq		$0xee,t1,t2
+	vpaddq		t2,t1,t1
+	vpsrldq		$8,t1,t2
+	vpaddq		t2,t1,t1
+	vmovq		t1x,d4
+
+	# d1 += d0 >> 26
+	mov		d0,%rax
+	shr		$26,%rax
+	add		%rax,d1
+	# h0 = d0 & 0x3ffffff
+	mov		d0,%rbx
+	and		$0x3ffffff,%ebx
+
+	# d2 += d1 >> 26
+	mov		d1,%rax
+	shr		$26,%rax
+	add		%rax,d2
+	# h1 = d1 & 0x3ffffff
+	mov		d1,%rax
+	and		$0x3ffffff,%eax
+	mov		%eax,h1
+
+	# d3 += d2 >> 26
+	mov		d2,%rax
+	shr		$26,%rax
+	add		%rax,d3
+	# h2 = d2 & 0x3ffffff
+	mov		d2,%rax
+	and		$0x3ffffff,%eax
+	mov		%eax,h2
+
+	# d4 += d3 >> 26
+	mov		d3,%rax
+	shr		$26,%rax
+	add		%rax,d4
+	# h3 = d3 & 0x3ffffff
+	mov		d3,%rax
+	and		$0x3ffffff,%eax
+	mov		%eax,h3
+
+	# h0 += (d4 >> 26) * 5
+	mov		d4,%rax
+	shr		$26,%rax
+	lea		(%eax,%eax,4),%eax
+	add		%eax,%ebx
+	# h4 = d4 & 0x3ffffff
+	mov		d4,%rax
+	and		$0x3ffffff,%eax
+	mov		%eax,h4
+
+	# h1 += h0 >> 26
+	mov		%ebx,%eax
+	shr		$26,%eax
+	add		%eax,h1
+	# h0 = h0 & 0x3ffffff
+	andl		$0x3ffffff,%ebx
+	mov		%ebx,h0
+
+	add		$0x40,m
+	dec		%rcx
+	jnz		.Ldoblock4
+
+	vzeroupper
+	pop		%r13
+	pop		%r12
+	pop		%rbx
+	ret
+ENDPROC(poly1305_4block_avx2)

+ 582 - 0
arch/x86/crypto/poly1305-sse2-x86_64.S

@@ -0,0 +1,582 @@
+/*
+ * Poly1305 authenticator algorithm, RFC7539, x64 SSE2 functions
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+
+.data
+.align 16
+
+ANMASK:	.octa 0x0000000003ffffff0000000003ffffff
+ORMASK:	.octa 0x00000000010000000000000001000000
+
+.text
+
+#define h0 0x00(%rdi)
+#define h1 0x04(%rdi)
+#define h2 0x08(%rdi)
+#define h3 0x0c(%rdi)
+#define h4 0x10(%rdi)
+#define r0 0x00(%rdx)
+#define r1 0x04(%rdx)
+#define r2 0x08(%rdx)
+#define r3 0x0c(%rdx)
+#define r4 0x10(%rdx)
+#define s1 0x00(%rsp)
+#define s2 0x04(%rsp)
+#define s3 0x08(%rsp)
+#define s4 0x0c(%rsp)
+#define m %rsi
+#define h01 %xmm0
+#define h23 %xmm1
+#define h44 %xmm2
+#define t1 %xmm3
+#define t2 %xmm4
+#define t3 %xmm5
+#define t4 %xmm6
+#define mask %xmm7
+#define d0 %r8
+#define d1 %r9
+#define d2 %r10
+#define d3 %r11
+#define d4 %r12
+
+ENTRY(poly1305_block_sse2)
+	# %rdi: Accumulator h[5]
+	# %rsi: 16 byte input block m
+	# %rdx: Poly1305 key r[5]
+	# %rcx: Block count
+
+	# This single block variant tries to improve performance by doing two
+	# multiplications in parallel using SSE instructions. There is quite
+	# some quardword packing involved, hence the speedup is marginal.
+
+	push		%rbx
+	push		%r12
+	sub		$0x10,%rsp
+
+	# s1..s4 = r1..r4 * 5
+	mov		r1,%eax
+	lea		(%eax,%eax,4),%eax
+	mov		%eax,s1
+	mov		r2,%eax
+	lea		(%eax,%eax,4),%eax
+	mov		%eax,s2
+	mov		r3,%eax
+	lea		(%eax,%eax,4),%eax
+	mov		%eax,s3
+	mov		r4,%eax
+	lea		(%eax,%eax,4),%eax
+	mov		%eax,s4
+
+	movdqa		ANMASK(%rip),mask
+
+.Ldoblock:
+	# h01 = [0, h1, 0, h0]
+	# h23 = [0, h3, 0, h2]
+	# h44 = [0, h4, 0, h4]
+	movd		h0,h01
+	movd		h1,t1
+	movd		h2,h23
+	movd		h3,t2
+	movd		h4,h44
+	punpcklqdq	t1,h01
+	punpcklqdq	t2,h23
+	punpcklqdq	h44,h44
+
+	# h01 += [ (m[3-6] >> 2) & 0x3ffffff, m[0-3] & 0x3ffffff ]
+	movd		0x00(m),t1
+	movd		0x03(m),t2
+	psrld		$2,t2
+	punpcklqdq	t2,t1
+	pand		mask,t1
+	paddd		t1,h01
+	# h23 += [ (m[9-12] >> 6) & 0x3ffffff, (m[6-9] >> 4) & 0x3ffffff ]
+	movd		0x06(m),t1
+	movd		0x09(m),t2
+	psrld		$4,t1
+	psrld		$6,t2
+	punpcklqdq	t2,t1
+	pand		mask,t1
+	paddd		t1,h23
+	# h44 += [ (m[12-15] >> 8) | (1 << 24), (m[12-15] >> 8) | (1 << 24) ]
+	mov		0x0c(m),%eax
+	shr		$8,%eax
+	or		$0x01000000,%eax
+	movd		%eax,t1
+	pshufd		$0xc4,t1,t1
+	paddd		t1,h44
+
+	# t1[0] = h0 * r0 + h2 * s3
+	# t1[1] = h1 * s4 + h3 * s2
+	movd		r0,t1
+	movd		s4,t2
+	punpcklqdq	t2,t1
+	pmuludq		h01,t1
+	movd		s3,t2
+	movd		s2,t3
+	punpcklqdq	t3,t2
+	pmuludq		h23,t2
+	paddq		t2,t1
+	# t2[0] = h0 * r1 + h2 * s4
+	# t2[1] = h1 * r0 + h3 * s3
+	movd		r1,t2
+	movd		r0,t3
+	punpcklqdq	t3,t2
+	pmuludq		h01,t2
+	movd		s4,t3
+	movd		s3,t4
+	punpcklqdq	t4,t3
+	pmuludq		h23,t3
+	paddq		t3,t2
+	# t3[0] = h4 * s1
+	# t3[1] = h4 * s2
+	movd		s1,t3
+	movd		s2,t4
+	punpcklqdq	t4,t3
+	pmuludq		h44,t3
+	# d0 = t1[0] + t1[1] + t3[0]
+	# d1 = t2[0] + t2[1] + t3[1]
+	movdqa		t1,t4
+	punpcklqdq	t2,t4
+	punpckhqdq	t2,t1
+	paddq		t4,t1
+	paddq		t3,t1
+	movq		t1,d0
+	psrldq		$8,t1
+	movq		t1,d1
+
+	# t1[0] = h0 * r2 + h2 * r0
+	# t1[1] = h1 * r1 + h3 * s4
+	movd		r2,t1
+	movd		r1,t2
+	punpcklqdq 	t2,t1
+	pmuludq		h01,t1
+	movd		r0,t2
+	movd		s4,t3
+	punpcklqdq	t3,t2
+	pmuludq		h23,t2
+	paddq		t2,t1
+	# t2[0] = h0 * r3 + h2 * r1
+	# t2[1] = h1 * r2 + h3 * r0
+	movd		r3,t2
+	movd		r2,t3
+	punpcklqdq	t3,t2
+	pmuludq		h01,t2
+	movd		r1,t3
+	movd		r0,t4
+	punpcklqdq	t4,t3
+	pmuludq		h23,t3
+	paddq		t3,t2
+	# t3[0] = h4 * s3
+	# t3[1] = h4 * s4
+	movd		s3,t3
+	movd		s4,t4
+	punpcklqdq	t4,t3
+	pmuludq		h44,t3
+	# d2 = t1[0] + t1[1] + t3[0]
+	# d3 = t2[0] + t2[1] + t3[1]
+	movdqa		t1,t4
+	punpcklqdq	t2,t4
+	punpckhqdq	t2,t1
+	paddq		t4,t1
+	paddq		t3,t1
+	movq		t1,d2
+	psrldq		$8,t1
+	movq		t1,d3
+
+	# t1[0] = h0 * r4 + h2 * r2
+	# t1[1] = h1 * r3 + h3 * r1
+	movd		r4,t1
+	movd		r3,t2
+	punpcklqdq	t2,t1
+	pmuludq		h01,t1
+	movd		r2,t2
+	movd		r1,t3
+	punpcklqdq	t3,t2
+	pmuludq		h23,t2
+	paddq		t2,t1
+	# t3[0] = h4 * r0
+	movd		r0,t3
+	pmuludq		h44,t3
+	# d4 = t1[0] + t1[1] + t3[0]
+	movdqa		t1,t4
+	psrldq		$8,t4
+	paddq		t4,t1
+	paddq		t3,t1
+	movq		t1,d4
+
+	# d1 += d0 >> 26
+	mov		d0,%rax
+	shr		$26,%rax
+	add		%rax,d1
+	# h0 = d0 & 0x3ffffff
+	mov		d0,%rbx
+	and		$0x3ffffff,%ebx
+
+	# d2 += d1 >> 26
+	mov		d1,%rax
+	shr		$26,%rax
+	add		%rax,d2
+	# h1 = d1 & 0x3ffffff
+	mov		d1,%rax
+	and		$0x3ffffff,%eax
+	mov		%eax,h1
+
+	# d3 += d2 >> 26
+	mov		d2,%rax
+	shr		$26,%rax
+	add		%rax,d3
+	# h2 = d2 & 0x3ffffff
+	mov		d2,%rax
+	and		$0x3ffffff,%eax
+	mov		%eax,h2
+
+	# d4 += d3 >> 26
+	mov		d3,%rax
+	shr		$26,%rax
+	add		%rax,d4
+	# h3 = d3 & 0x3ffffff
+	mov		d3,%rax
+	and		$0x3ffffff,%eax
+	mov		%eax,h3
+
+	# h0 += (d4 >> 26) * 5
+	mov		d4,%rax
+	shr		$26,%rax
+	lea		(%eax,%eax,4),%eax
+	add		%eax,%ebx
+	# h4 = d4 & 0x3ffffff
+	mov		d4,%rax
+	and		$0x3ffffff,%eax
+	mov		%eax,h4
+
+	# h1 += h0 >> 26
+	mov		%ebx,%eax
+	shr		$26,%eax
+	add		%eax,h1
+	# h0 = h0 & 0x3ffffff
+	andl		$0x3ffffff,%ebx
+	mov		%ebx,h0
+
+	add		$0x10,m
+	dec		%rcx
+	jnz		.Ldoblock
+
+	add		$0x10,%rsp
+	pop		%r12
+	pop		%rbx
+	ret
+ENDPROC(poly1305_block_sse2)
+
+
+#define u0 0x00(%r8)
+#define u1 0x04(%r8)
+#define u2 0x08(%r8)
+#define u3 0x0c(%r8)
+#define u4 0x10(%r8)
+#define hc0 %xmm0
+#define hc1 %xmm1
+#define hc2 %xmm2
+#define hc3 %xmm5
+#define hc4 %xmm6
+#define ru0 %xmm7
+#define ru1 %xmm8
+#define ru2 %xmm9
+#define ru3 %xmm10
+#define ru4 %xmm11
+#define sv1 %xmm12
+#define sv2 %xmm13
+#define sv3 %xmm14
+#define sv4 %xmm15
+#undef d0
+#define d0 %r13
+
+ENTRY(poly1305_2block_sse2)
+	# %rdi: Accumulator h[5]
+	# %rsi: 16 byte input block m
+	# %rdx: Poly1305 key r[5]
+	# %rcx: Doubleblock count
+	# %r8:  Poly1305 derived key r^2 u[5]
+
+	# This two-block variant further improves performance by using loop
+	# unrolled block processing. This is more straight forward and does
+	# less byte shuffling, but requires a second Poly1305 key r^2:
+	# h = (h + m) * r    =>    h = (h + m1) * r^2 + m2 * r
+
+	push		%rbx
+	push		%r12
+	push		%r13
+
+	# combine r0,u0
+	movd		u0,ru0
+	movd		r0,t1
+	punpcklqdq	t1,ru0
+
+	# combine r1,u1 and s1=r1*5,v1=u1*5
+	movd		u1,ru1
+	movd		r1,t1
+	punpcklqdq	t1,ru1
+	movdqa		ru1,sv1
+	pslld		$2,sv1
+	paddd		ru1,sv1
+
+	# combine r2,u2 and s2=r2*5,v2=u2*5
+	movd		u2,ru2
+	movd		r2,t1
+	punpcklqdq	t1,ru2
+	movdqa		ru2,sv2
+	pslld		$2,sv2
+	paddd		ru2,sv2
+
+	# combine r3,u3 and s3=r3*5,v3=u3*5
+	movd		u3,ru3
+	movd		r3,t1
+	punpcklqdq	t1,ru3
+	movdqa		ru3,sv3
+	pslld		$2,sv3
+	paddd		ru3,sv3
+
+	# combine r4,u4 and s4=r4*5,v4=u4*5
+	movd		u4,ru4
+	movd		r4,t1
+	punpcklqdq	t1,ru4
+	movdqa		ru4,sv4
+	pslld		$2,sv4
+	paddd		ru4,sv4
+
+.Ldoblock2:
+	# hc0 = [ m[16-19] & 0x3ffffff, h0 + m[0-3] & 0x3ffffff ]
+	movd		0x00(m),hc0
+	movd		0x10(m),t1
+	punpcklqdq	t1,hc0
+	pand		ANMASK(%rip),hc0
+	movd		h0,t1
+	paddd		t1,hc0
+	# hc1 = [ (m[19-22] >> 2) & 0x3ffffff, h1 + (m[3-6] >> 2) & 0x3ffffff ]
+	movd		0x03(m),hc1
+	movd		0x13(m),t1
+	punpcklqdq	t1,hc1
+	psrld		$2,hc1
+	pand		ANMASK(%rip),hc1
+	movd		h1,t1
+	paddd		t1,hc1
+	# hc2 = [ (m[22-25] >> 4) & 0x3ffffff, h2 + (m[6-9] >> 4) & 0x3ffffff ]
+	movd		0x06(m),hc2
+	movd		0x16(m),t1
+	punpcklqdq	t1,hc2
+	psrld		$4,hc2
+	pand		ANMASK(%rip),hc2
+	movd		h2,t1
+	paddd		t1,hc2
+	# hc3 = [ (m[25-28] >> 6) & 0x3ffffff, h3 + (m[9-12] >> 6) & 0x3ffffff ]
+	movd		0x09(m),hc3
+	movd		0x19(m),t1
+	punpcklqdq	t1,hc3
+	psrld		$6,hc3
+	pand		ANMASK(%rip),hc3
+	movd		h3,t1
+	paddd		t1,hc3
+	# hc4 = [ (m[28-31] >> 8) | (1<<24), h4 + (m[12-15] >> 8) | (1<<24) ]
+	movd		0x0c(m),hc4
+	movd		0x1c(m),t1
+	punpcklqdq	t1,hc4
+	psrld		$8,hc4
+	por		ORMASK(%rip),hc4
+	movd		h4,t1
+	paddd		t1,hc4
+
+	# t1 = [ hc0[1] * r0, hc0[0] * u0 ]
+	movdqa		ru0,t1
+	pmuludq		hc0,t1
+	# t1 += [ hc1[1] * s4, hc1[0] * v4 ]
+	movdqa		sv4,t2
+	pmuludq		hc1,t2
+	paddq		t2,t1
+	# t1 += [ hc2[1] * s3, hc2[0] * v3 ]
+	movdqa		sv3,t2
+	pmuludq		hc2,t2
+	paddq		t2,t1
+	# t1 += [ hc3[1] * s2, hc3[0] * v2 ]
+	movdqa		sv2,t2
+	pmuludq		hc3,t2
+	paddq		t2,t1
+	# t1 += [ hc4[1] * s1, hc4[0] * v1 ]
+	movdqa		sv1,t2
+	pmuludq		hc4,t2
+	paddq		t2,t1
+	# d0 = t1[0] + t1[1]
+	movdqa		t1,t2
+	psrldq		$8,t2
+	paddq		t2,t1
+	movq		t1,d0
+
+	# t1 = [ hc0[1] * r1, hc0[0] * u1 ]
+	movdqa		ru1,t1
+	pmuludq		hc0,t1
+	# t1 += [ hc1[1] * r0, hc1[0] * u0 ]
+	movdqa		ru0,t2
+	pmuludq		hc1,t2
+	paddq		t2,t1
+	# t1 += [ hc2[1] * s4, hc2[0] * v4 ]
+	movdqa		sv4,t2
+	pmuludq		hc2,t2
+	paddq		t2,t1
+	# t1 += [ hc3[1] * s3, hc3[0] * v3 ]
+	movdqa		sv3,t2
+	pmuludq		hc3,t2
+	paddq		t2,t1
+	# t1 += [ hc4[1] * s2, hc4[0] * v2 ]
+	movdqa		sv2,t2
+	pmuludq		hc4,t2
+	paddq		t2,t1
+	# d1 = t1[0] + t1[1]
+	movdqa		t1,t2
+	psrldq		$8,t2
+	paddq		t2,t1
+	movq		t1,d1
+
+	# t1 = [ hc0[1] * r2, hc0[0] * u2 ]
+	movdqa		ru2,t1
+	pmuludq		hc0,t1
+	# t1 += [ hc1[1] * r1, hc1[0] * u1 ]
+	movdqa		ru1,t2
+	pmuludq		hc1,t2
+	paddq		t2,t1
+	# t1 += [ hc2[1] * r0, hc2[0] * u0 ]
+	movdqa		ru0,t2
+	pmuludq		hc2,t2
+	paddq		t2,t1
+	# t1 += [ hc3[1] * s4, hc3[0] * v4 ]
+	movdqa		sv4,t2
+	pmuludq		hc3,t2
+	paddq		t2,t1
+	# t1 += [ hc4[1] * s3, hc4[0] * v3 ]
+	movdqa		sv3,t2
+	pmuludq		hc4,t2
+	paddq		t2,t1
+	# d2 = t1[0] + t1[1]
+	movdqa		t1,t2
+	psrldq		$8,t2
+	paddq		t2,t1
+	movq		t1,d2
+
+	# t1 = [ hc0[1] * r3, hc0[0] * u3 ]
+	movdqa		ru3,t1
+	pmuludq		hc0,t1
+	# t1 += [ hc1[1] * r2, hc1[0] * u2 ]
+	movdqa		ru2,t2
+	pmuludq		hc1,t2
+	paddq		t2,t1
+	# t1 += [ hc2[1] * r1, hc2[0] * u1 ]
+	movdqa		ru1,t2
+	pmuludq		hc2,t2
+	paddq		t2,t1
+	# t1 += [ hc3[1] * r0, hc3[0] * u0 ]
+	movdqa		ru0,t2
+	pmuludq		hc3,t2
+	paddq		t2,t1
+	# t1 += [ hc4[1] * s4, hc4[0] * v4 ]
+	movdqa		sv4,t2
+	pmuludq		hc4,t2
+	paddq		t2,t1
+	# d3 = t1[0] + t1[1]
+	movdqa		t1,t2
+	psrldq		$8,t2
+	paddq		t2,t1
+	movq		t1,d3
+
+	# t1 = [ hc0[1] * r4, hc0[0] * u4 ]
+	movdqa		ru4,t1
+	pmuludq		hc0,t1
+	# t1 += [ hc1[1] * r3, hc1[0] * u3 ]
+	movdqa		ru3,t2
+	pmuludq		hc1,t2
+	paddq		t2,t1
+	# t1 += [ hc2[1] * r2, hc2[0] * u2 ]
+	movdqa		ru2,t2
+	pmuludq		hc2,t2
+	paddq		t2,t1
+	# t1 += [ hc3[1] * r1, hc3[0] * u1 ]
+	movdqa		ru1,t2
+	pmuludq		hc3,t2
+	paddq		t2,t1
+	# t1 += [ hc4[1] * r0, hc4[0] * u0 ]
+	movdqa		ru0,t2
+	pmuludq		hc4,t2
+	paddq		t2,t1
+	# d4 = t1[0] + t1[1]
+	movdqa		t1,t2
+	psrldq		$8,t2
+	paddq		t2,t1
+	movq		t1,d4
+
+	# d1 += d0 >> 26
+	mov		d0,%rax
+	shr		$26,%rax
+	add		%rax,d1
+	# h0 = d0 & 0x3ffffff
+	mov		d0,%rbx
+	and		$0x3ffffff,%ebx
+
+	# d2 += d1 >> 26
+	mov		d1,%rax
+	shr		$26,%rax
+	add		%rax,d2
+	# h1 = d1 & 0x3ffffff
+	mov		d1,%rax
+	and		$0x3ffffff,%eax
+	mov		%eax,h1
+
+	# d3 += d2 >> 26
+	mov		d2,%rax
+	shr		$26,%rax
+	add		%rax,d3
+	# h2 = d2 & 0x3ffffff
+	mov		d2,%rax
+	and		$0x3ffffff,%eax
+	mov		%eax,h2
+
+	# d4 += d3 >> 26
+	mov		d3,%rax
+	shr		$26,%rax
+	add		%rax,d4
+	# h3 = d3 & 0x3ffffff
+	mov		d3,%rax
+	and		$0x3ffffff,%eax
+	mov		%eax,h3
+
+	# h0 += (d4 >> 26) * 5
+	mov		d4,%rax
+	shr		$26,%rax
+	lea		(%eax,%eax,4),%eax
+	add		%eax,%ebx
+	# h4 = d4 & 0x3ffffff
+	mov		d4,%rax
+	and		$0x3ffffff,%eax
+	mov		%eax,h4
+
+	# h1 += h0 >> 26
+	mov		%ebx,%eax
+	shr		$26,%eax
+	add		%eax,h1
+	# h0 = h0 & 0x3ffffff
+	andl		$0x3ffffff,%ebx
+	mov		%ebx,h0
+
+	add		$0x20,m
+	dec		%rcx
+	jnz		.Ldoblock2
+
+	pop		%r13
+	pop		%r12
+	pop		%rbx
+	ret
+ENDPROC(poly1305_2block_sse2)

+ 207 - 0
arch/x86/crypto/poly1305_glue.c

@@ -0,0 +1,207 @@
+/*
+ * Poly1305 authenticator algorithm, RFC7539, SIMD glue code
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
+#include <crypto/poly1305.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/fpu/api.h>
+#include <asm/simd.h>
+
+struct poly1305_simd_desc_ctx {
+	struct poly1305_desc_ctx base;
+	/* derived key u set? */
+	bool uset;
+#ifdef CONFIG_AS_AVX2
+	/* derived keys r^3, r^4 set? */
+	bool wset;
+#endif
+	/* derived Poly1305 key r^2 */
+	u32 u[5];
+	/* ... silently appended r^3 and r^4 when using AVX2 */
+};
+
+asmlinkage void poly1305_block_sse2(u32 *h, const u8 *src,
+				    const u32 *r, unsigned int blocks);
+asmlinkage void poly1305_2block_sse2(u32 *h, const u8 *src, const u32 *r,
+				     unsigned int blocks, const u32 *u);
+#ifdef CONFIG_AS_AVX2
+asmlinkage void poly1305_4block_avx2(u32 *h, const u8 *src, const u32 *r,
+				     unsigned int blocks, const u32 *u);
+static bool poly1305_use_avx2;
+#endif
+
+static int poly1305_simd_init(struct shash_desc *desc)
+{
+	struct poly1305_simd_desc_ctx *sctx = shash_desc_ctx(desc);
+
+	sctx->uset = false;
+#ifdef CONFIG_AS_AVX2
+	sctx->wset = false;
+#endif
+
+	return crypto_poly1305_init(desc);
+}
+
+static void poly1305_simd_mult(u32 *a, const u32 *b)
+{
+	u8 m[POLY1305_BLOCK_SIZE];
+
+	memset(m, 0, sizeof(m));
+	/* The poly1305 block function adds a hi-bit to the accumulator which
+	 * we don't need for key multiplication; compensate for it. */
+	a[4] -= 1 << 24;
+	poly1305_block_sse2(a, m, b, 1);
+}
+
+static unsigned int poly1305_simd_blocks(struct poly1305_desc_ctx *dctx,
+					 const u8 *src, unsigned int srclen)
+{
+	struct poly1305_simd_desc_ctx *sctx;
+	unsigned int blocks, datalen;
+
+	BUILD_BUG_ON(offsetof(struct poly1305_simd_desc_ctx, base));
+	sctx = container_of(dctx, struct poly1305_simd_desc_ctx, base);
+
+	if (unlikely(!dctx->sset)) {
+		datalen = crypto_poly1305_setdesckey(dctx, src, srclen);
+		src += srclen - datalen;
+		srclen = datalen;
+	}
+
+#ifdef CONFIG_AS_AVX2
+	if (poly1305_use_avx2 && srclen >= POLY1305_BLOCK_SIZE * 4) {
+		if (unlikely(!sctx->wset)) {
+			if (!sctx->uset) {
+				memcpy(sctx->u, dctx->r, sizeof(sctx->u));
+				poly1305_simd_mult(sctx->u, dctx->r);
+				sctx->uset = true;
+			}
+			memcpy(sctx->u + 5, sctx->u, sizeof(sctx->u));
+			poly1305_simd_mult(sctx->u + 5, dctx->r);
+			memcpy(sctx->u + 10, sctx->u + 5, sizeof(sctx->u));
+			poly1305_simd_mult(sctx->u + 10, dctx->r);
+			sctx->wset = true;
+		}
+		blocks = srclen / (POLY1305_BLOCK_SIZE * 4);
+		poly1305_4block_avx2(dctx->h, src, dctx->r, blocks, sctx->u);
+		src += POLY1305_BLOCK_SIZE * 4 * blocks;
+		srclen -= POLY1305_BLOCK_SIZE * 4 * blocks;
+	}
+#endif
+	if (likely(srclen >= POLY1305_BLOCK_SIZE * 2)) {
+		if (unlikely(!sctx->uset)) {
+			memcpy(sctx->u, dctx->r, sizeof(sctx->u));
+			poly1305_simd_mult(sctx->u, dctx->r);
+			sctx->uset = true;
+		}
+		blocks = srclen / (POLY1305_BLOCK_SIZE * 2);
+		poly1305_2block_sse2(dctx->h, src, dctx->r, blocks, sctx->u);
+		src += POLY1305_BLOCK_SIZE * 2 * blocks;
+		srclen -= POLY1305_BLOCK_SIZE * 2 * blocks;
+	}
+	if (srclen >= POLY1305_BLOCK_SIZE) {
+		poly1305_block_sse2(dctx->h, src, dctx->r, 1);
+		srclen -= POLY1305_BLOCK_SIZE;
+	}
+	return srclen;
+}
+
+static int poly1305_simd_update(struct shash_desc *desc,
+				const u8 *src, unsigned int srclen)
+{
+	struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+	unsigned int bytes;
+
+	/* kernel_fpu_begin/end is costly, use fallback for small updates */
+	if (srclen <= 288 || !may_use_simd())
+		return crypto_poly1305_update(desc, src, srclen);
+
+	kernel_fpu_begin();
+
+	if (unlikely(dctx->buflen)) {
+		bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen);
+		memcpy(dctx->buf + dctx->buflen, src, bytes);
+		src += bytes;
+		srclen -= bytes;
+		dctx->buflen += bytes;
+
+		if (dctx->buflen == POLY1305_BLOCK_SIZE) {
+			poly1305_simd_blocks(dctx, dctx->buf,
+					     POLY1305_BLOCK_SIZE);
+			dctx->buflen = 0;
+		}
+	}
+
+	if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
+		bytes = poly1305_simd_blocks(dctx, src, srclen);
+		src += srclen - bytes;
+		srclen = bytes;
+	}
+
+	kernel_fpu_end();
+
+	if (unlikely(srclen)) {
+		dctx->buflen = srclen;
+		memcpy(dctx->buf, src, srclen);
+	}
+
+	return 0;
+}
+
+static struct shash_alg alg = {
+	.digestsize	= POLY1305_DIGEST_SIZE,
+	.init		= poly1305_simd_init,
+	.update		= poly1305_simd_update,
+	.final		= crypto_poly1305_final,
+	.setkey		= crypto_poly1305_setkey,
+	.descsize	= sizeof(struct poly1305_simd_desc_ctx),
+	.base		= {
+		.cra_name		= "poly1305",
+		.cra_driver_name	= "poly1305-simd",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
+		.cra_alignmask		= sizeof(u32) - 1,
+		.cra_blocksize		= POLY1305_BLOCK_SIZE,
+		.cra_module		= THIS_MODULE,
+	},
+};
+
+static int __init poly1305_simd_mod_init(void)
+{
+	if (!cpu_has_xmm2)
+		return -ENODEV;
+
+#ifdef CONFIG_AS_AVX2
+	poly1305_use_avx2 = cpu_has_avx && cpu_has_avx2 &&
+			    cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL);
+	alg.descsize = sizeof(struct poly1305_simd_desc_ctx);
+	if (poly1305_use_avx2)
+		alg.descsize += 10 * sizeof(u32);
+#endif
+	return crypto_register_shash(&alg);
+}
+
+static void __exit poly1305_simd_mod_exit(void)
+{
+	crypto_unregister_shash(&alg);
+}
+
+module_init(poly1305_simd_mod_init);
+module_exit(poly1305_simd_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
+MODULE_DESCRIPTION("Poly1305 authenticator");
+MODULE_ALIAS_CRYPTO("poly1305");
+MODULE_ALIAS_CRYPTO("poly1305-simd");

+ 37 - 3
crypto/Kconfig

@@ -48,6 +48,8 @@ config CRYPTO_AEAD
 config CRYPTO_AEAD2
 config CRYPTO_AEAD2
 	tristate
 	tristate
 	select CRYPTO_ALGAPI2
 	select CRYPTO_ALGAPI2
+	select CRYPTO_NULL2
+	select CRYPTO_RNG2
 
 
 config CRYPTO_BLKCIPHER
 config CRYPTO_BLKCIPHER
 	tristate
 	tristate
@@ -150,12 +152,16 @@ config CRYPTO_GF128MUL
 
 
 config CRYPTO_NULL
 config CRYPTO_NULL
 	tristate "Null algorithms"
 	tristate "Null algorithms"
-	select CRYPTO_ALGAPI
-	select CRYPTO_BLKCIPHER
-	select CRYPTO_HASH
+	select CRYPTO_NULL2
 	help
 	help
 	  These are 'Null' algorithms, used by IPsec, which do nothing.
 	  These are 'Null' algorithms, used by IPsec, which do nothing.
 
 
+config CRYPTO_NULL2
+	tristate
+	select CRYPTO_ALGAPI2
+	select CRYPTO_BLKCIPHER2
+	select CRYPTO_HASH2
+
 config CRYPTO_PCRYPT
 config CRYPTO_PCRYPT
 	tristate "Parallel crypto engine"
 	tristate "Parallel crypto engine"
 	depends on SMP
 	depends on SMP
@@ -200,6 +206,7 @@ config CRYPTO_AUTHENC
 	select CRYPTO_BLKCIPHER
 	select CRYPTO_BLKCIPHER
 	select CRYPTO_MANAGER
 	select CRYPTO_MANAGER
 	select CRYPTO_HASH
 	select CRYPTO_HASH
+	select CRYPTO_NULL
 	help
 	help
 	  Authenc: Combined mode wrapper for IPsec.
 	  Authenc: Combined mode wrapper for IPsec.
 	  This is required for IPSec.
 	  This is required for IPSec.
@@ -470,6 +477,18 @@ config CRYPTO_POLY1305
 	  It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use
 	  It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use
 	  in IETF protocols. This is the portable C implementation of Poly1305.
 	  in IETF protocols. This is the portable C implementation of Poly1305.
 
 
+config CRYPTO_POLY1305_X86_64
+	tristate "Poly1305 authenticator algorithm (x86_64/SSE2/AVX2)"
+	depends on X86 && 64BIT
+	select CRYPTO_POLY1305
+	help
+	  Poly1305 authenticator algorithm, RFC7539.
+
+	  Poly1305 is an authenticator algorithm designed by Daniel J. Bernstein.
+	  It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use
+	  in IETF protocols. This is the x86_64 assembler implementation using SIMD
+	  instructions.
+
 config CRYPTO_MD4
 config CRYPTO_MD4
 	tristate "MD4 digest algorithm"
 	tristate "MD4 digest algorithm"
 	select CRYPTO_HASH
 	select CRYPTO_HASH
@@ -1213,6 +1232,21 @@ config CRYPTO_CHACHA20
 	  See also:
 	  See also:
 	  <http://cr.yp.to/chacha/chacha-20080128.pdf>
 	  <http://cr.yp.to/chacha/chacha-20080128.pdf>
 
 
+config CRYPTO_CHACHA20_X86_64
+	tristate "ChaCha20 cipher algorithm (x86_64/SSSE3/AVX2)"
+	depends on X86 && 64BIT
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_CHACHA20
+	help
+	  ChaCha20 cipher algorithm, RFC7539.
+
+	  ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J.
+	  Bernstein and further specified in RFC7539 for use in IETF protocols.
+	  This is the x86_64 assembler implementation using SIMD instructions.
+
+	  See also:
+	  <http://cr.yp.to/chacha/chacha-20080128.pdf>
+
 config CRYPTO_SEED
 config CRYPTO_SEED
 	tristate "SEED cipher algorithm"
 	tristate "SEED cipher algorithm"
 	select CRYPTO_ALGAPI
 	select CRYPTO_ALGAPI

+ 2 - 1
crypto/Makefile

@@ -17,6 +17,7 @@ obj-$(CONFIG_CRYPTO_AEAD2) += aead.o
 
 
 crypto_blkcipher-y := ablkcipher.o
 crypto_blkcipher-y := ablkcipher.o
 crypto_blkcipher-y += blkcipher.o
 crypto_blkcipher-y += blkcipher.o
+crypto_blkcipher-y += skcipher.o
 obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o
 obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o
 obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o
 obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o
 obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o
 obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o
@@ -46,7 +47,7 @@ obj-$(CONFIG_CRYPTO_CMAC) += cmac.o
 obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
 obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
 obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
 obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
 obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
 obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
-obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
+obj-$(CONFIG_CRYPTO_NULL2) += crypto_null.o
 obj-$(CONFIG_CRYPTO_MD4) += md4.o
 obj-$(CONFIG_CRYPTO_MD4) += md4.o
 obj-$(CONFIG_CRYPTO_MD5) += md5.o
 obj-$(CONFIG_CRYPTO_MD5) += md5.o
 obj-$(CONFIG_CRYPTO_RMD128) += rmd128.o
 obj-$(CONFIG_CRYPTO_RMD128) += rmd128.o

+ 57 - 578
crypto/aead.c

@@ -3,7 +3,7 @@
  *
  *
  * This file provides API support for AEAD algorithms.
  * This file provides API support for AEAD algorithms.
  *
  *
- * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
  *
  *
  * This program is free software; you can redistribute it and/or modify it
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
  * under the terms of the GNU General Public License as published by the Free
@@ -13,13 +13,14 @@
  */
  */
 
 
 #include <crypto/internal/geniv.h>
 #include <crypto/internal/geniv.h>
+#include <crypto/internal/rng.h>
+#include <crypto/null.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/scatterwalk.h>
 #include <linux/err.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/rtnetlink.h>
 #include <linux/rtnetlink.h>
-#include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/seq_file.h>
 #include <linux/seq_file.h>
 #include <linux/cryptouser.h>
 #include <linux/cryptouser.h>
@@ -27,17 +28,6 @@
 
 
 #include "internal.h"
 #include "internal.h"
 
 
-struct compat_request_ctx {
-	struct scatterlist src[2];
-	struct scatterlist dst[2];
-	struct scatterlist ivbuf[2];
-	struct scatterlist *ivsg;
-	struct aead_givcrypt_request subreq;
-};
-
-static int aead_null_givencrypt(struct aead_givcrypt_request *req);
-static int aead_null_givdecrypt(struct aead_givcrypt_request *req);
-
 static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
 static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
 			    unsigned int keylen)
 			    unsigned int keylen)
 {
 {
@@ -53,7 +43,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
 
 
 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
 	memcpy(alignbuffer, key, keylen);
 	memcpy(alignbuffer, key, keylen);
-	ret = tfm->setkey(tfm, alignbuffer, keylen);
+	ret = crypto_aead_alg(tfm)->setkey(tfm, alignbuffer, keylen);
 	memset(alignbuffer, 0, keylen);
 	memset(alignbuffer, 0, keylen);
 	kfree(buffer);
 	kfree(buffer);
 	return ret;
 	return ret;
@@ -64,12 +54,10 @@ int crypto_aead_setkey(struct crypto_aead *tfm,
 {
 {
 	unsigned long alignmask = crypto_aead_alignmask(tfm);
 	unsigned long alignmask = crypto_aead_alignmask(tfm);
 
 
-	tfm = tfm->child;
-
 	if ((unsigned long)key & alignmask)
 	if ((unsigned long)key & alignmask)
 		return setkey_unaligned(tfm, key, keylen);
 		return setkey_unaligned(tfm, key, keylen);
 
 
-	return tfm->setkey(tfm, key, keylen);
+	return crypto_aead_alg(tfm)->setkey(tfm, key, keylen);
 }
 }
 EXPORT_SYMBOL_GPL(crypto_aead_setkey);
 EXPORT_SYMBOL_GPL(crypto_aead_setkey);
 
 
@@ -80,100 +68,17 @@ int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
 	if (authsize > crypto_aead_maxauthsize(tfm))
 	if (authsize > crypto_aead_maxauthsize(tfm))
 		return -EINVAL;
 		return -EINVAL;
 
 
-	if (tfm->setauthsize) {
-		err = tfm->setauthsize(tfm->child, authsize);
+	if (crypto_aead_alg(tfm)->setauthsize) {
+		err = crypto_aead_alg(tfm)->setauthsize(tfm, authsize);
 		if (err)
 		if (err)
 			return err;
 			return err;
 	}
 	}
 
 
-	tfm->child->authsize = authsize;
 	tfm->authsize = authsize;
 	tfm->authsize = authsize;
 	return 0;
 	return 0;
 }
 }
 EXPORT_SYMBOL_GPL(crypto_aead_setauthsize);
 EXPORT_SYMBOL_GPL(crypto_aead_setauthsize);
 
 
-struct aead_old_request {
-	struct scatterlist srcbuf[2];
-	struct scatterlist dstbuf[2];
-	struct aead_request subreq;
-};
-
-unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
-{
-	return tfm->reqsize + sizeof(struct aead_old_request);
-}
-EXPORT_SYMBOL_GPL(crypto_aead_reqsize);
-
-static int old_crypt(struct aead_request *req,
-		     int (*crypt)(struct aead_request *req))
-{
-	struct aead_old_request *nreq = aead_request_ctx(req);
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct scatterlist *src, *dst;
-
-	if (req->old)
-		return crypt(req);
-
-	src = scatterwalk_ffwd(nreq->srcbuf, req->src, req->assoclen);
-	dst = req->src == req->dst ?
-	      src : scatterwalk_ffwd(nreq->dstbuf, req->dst, req->assoclen);
-
-	aead_request_set_tfm(&nreq->subreq, aead);
-	aead_request_set_callback(&nreq->subreq, aead_request_flags(req),
-				  req->base.complete, req->base.data);
-	aead_request_set_crypt(&nreq->subreq, src, dst, req->cryptlen,
-			       req->iv);
-	aead_request_set_assoc(&nreq->subreq, req->src, req->assoclen);
-
-	return crypt(&nreq->subreq);
-}
-
-static int old_encrypt(struct aead_request *req)
-{
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct old_aead_alg *alg = crypto_old_aead_alg(aead);
-
-	return old_crypt(req, alg->encrypt);
-}
-
-static int old_decrypt(struct aead_request *req)
-{
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct old_aead_alg *alg = crypto_old_aead_alg(aead);
-
-	return old_crypt(req, alg->decrypt);
-}
-
-static int no_givcrypt(struct aead_givcrypt_request *req)
-{
-	return -ENOSYS;
-}
-
-static int crypto_old_aead_init_tfm(struct crypto_tfm *tfm)
-{
-	struct old_aead_alg *alg = &tfm->__crt_alg->cra_aead;
-	struct crypto_aead *crt = __crypto_aead_cast(tfm);
-
-	if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8)
-		return -EINVAL;
-
-	crt->setkey = alg->setkey;
-	crt->setauthsize = alg->setauthsize;
-	crt->encrypt = old_encrypt;
-	crt->decrypt = old_decrypt;
-	if (alg->ivsize) {
-		crt->givencrypt = alg->givencrypt ?: no_givcrypt;
-		crt->givdecrypt = alg->givdecrypt ?: no_givcrypt;
-	} else {
-		crt->givencrypt = aead_null_givencrypt;
-		crt->givdecrypt = aead_null_givdecrypt;
-	}
-	crt->child = __crypto_aead_cast(tfm);
-	crt->authsize = alg->maxauthsize;
-
-	return 0;
-}
-
 static void crypto_aead_exit_tfm(struct crypto_tfm *tfm)
 static void crypto_aead_exit_tfm(struct crypto_tfm *tfm)
 {
 {
 	struct crypto_aead *aead = __crypto_aead_cast(tfm);
 	struct crypto_aead *aead = __crypto_aead_cast(tfm);
@@ -187,14 +92,6 @@ static int crypto_aead_init_tfm(struct crypto_tfm *tfm)
 	struct crypto_aead *aead = __crypto_aead_cast(tfm);
 	struct crypto_aead *aead = __crypto_aead_cast(tfm);
 	struct aead_alg *alg = crypto_aead_alg(aead);
 	struct aead_alg *alg = crypto_aead_alg(aead);
 
 
-	if (crypto_old_aead_alg(aead)->encrypt)
-		return crypto_old_aead_init_tfm(tfm);
-
-	aead->setkey = alg->setkey;
-	aead->setauthsize = alg->setauthsize;
-	aead->encrypt = alg->encrypt;
-	aead->decrypt = alg->decrypt;
-	aead->child = __crypto_aead_cast(tfm);
 	aead->authsize = alg->maxauthsize;
 	aead->authsize = alg->maxauthsize;
 
 
 	if (alg->exit)
 	if (alg->exit)
@@ -206,64 +103,6 @@ static int crypto_aead_init_tfm(struct crypto_tfm *tfm)
 	return 0;
 	return 0;
 }
 }
 
 
-#ifdef CONFIG_NET
-static int crypto_old_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
-	struct crypto_report_aead raead;
-	struct old_aead_alg *aead = &alg->cra_aead;
-
-	strncpy(raead.type, "aead", sizeof(raead.type));
-	strncpy(raead.geniv, aead->geniv ?: "<built-in>", sizeof(raead.geniv));
-
-	raead.blocksize = alg->cra_blocksize;
-	raead.maxauthsize = aead->maxauthsize;
-	raead.ivsize = aead->ivsize;
-
-	if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD,
-		    sizeof(struct crypto_report_aead), &raead))
-		goto nla_put_failure;
-	return 0;
-
-nla_put_failure:
-	return -EMSGSIZE;
-}
-#else
-static int crypto_old_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
-	return -ENOSYS;
-}
-#endif
-
-static void crypto_old_aead_show(struct seq_file *m, struct crypto_alg *alg)
-	__attribute__ ((unused));
-static void crypto_old_aead_show(struct seq_file *m, struct crypto_alg *alg)
-{
-	struct old_aead_alg *aead = &alg->cra_aead;
-
-	seq_printf(m, "type         : aead\n");
-	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
-					     "yes" : "no");
-	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
-	seq_printf(m, "ivsize       : %u\n", aead->ivsize);
-	seq_printf(m, "maxauthsize  : %u\n", aead->maxauthsize);
-	seq_printf(m, "geniv        : %s\n", aead->geniv ?: "<built-in>");
-}
-
-const struct crypto_type crypto_aead_type = {
-	.extsize = crypto_alg_extsize,
-	.init_tfm = crypto_aead_init_tfm,
-#ifdef CONFIG_PROC_FS
-	.show = crypto_old_aead_show,
-#endif
-	.report = crypto_old_aead_report,
-	.lookup = crypto_lookup_aead,
-	.maskclear = ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV),
-	.maskset = CRYPTO_ALG_TYPE_MASK,
-	.type = CRYPTO_ALG_TYPE_AEAD,
-	.tfmsize = offsetof(struct crypto_aead, base),
-};
-EXPORT_SYMBOL_GPL(crypto_aead_type);
-
 #ifdef CONFIG_NET
 #ifdef CONFIG_NET
 static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
 static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
 {
 {
@@ -307,93 +146,31 @@ static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
 	seq_printf(m, "geniv        : <none>\n");
 	seq_printf(m, "geniv        : <none>\n");
 }
 }
 
 
-static const struct crypto_type crypto_new_aead_type = {
-	.extsize = crypto_alg_extsize,
-	.init_tfm = crypto_aead_init_tfm,
-#ifdef CONFIG_PROC_FS
-	.show = crypto_aead_show,
-#endif
-	.report = crypto_aead_report,
-	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
-	.maskset = CRYPTO_ALG_TYPE_MASK,
-	.type = CRYPTO_ALG_TYPE_AEAD,
-	.tfmsize = offsetof(struct crypto_aead, base),
-};
-
-static int aead_null_givencrypt(struct aead_givcrypt_request *req)
-{
-	return crypto_aead_encrypt(&req->areq);
-}
-
-static int aead_null_givdecrypt(struct aead_givcrypt_request *req)
-{
-	return crypto_aead_decrypt(&req->areq);
-}
-
-#ifdef CONFIG_NET
-static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
-	struct crypto_report_aead raead;
-	struct old_aead_alg *aead = &alg->cra_aead;
-
-	strncpy(raead.type, "nivaead", sizeof(raead.type));
-	strncpy(raead.geniv, aead->geniv, sizeof(raead.geniv));
-
-	raead.blocksize = alg->cra_blocksize;
-	raead.maxauthsize = aead->maxauthsize;
-	raead.ivsize = aead->ivsize;
-
-	if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD,
-		    sizeof(struct crypto_report_aead), &raead))
-		goto nla_put_failure;
-	return 0;
-
-nla_put_failure:
-	return -EMSGSIZE;
-}
-#else
-static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
+static void crypto_aead_free_instance(struct crypto_instance *inst)
 {
 {
-	return -ENOSYS;
-}
-#endif
-
+	struct aead_instance *aead = aead_instance(inst);
 
 
-static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
-	__attribute__ ((unused));
-static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
-{
-	struct old_aead_alg *aead = &alg->cra_aead;
+	if (!aead->free) {
+		inst->tmpl->free(inst);
+		return;
+	}
 
 
-	seq_printf(m, "type         : nivaead\n");
-	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
-					     "yes" : "no");
-	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
-	seq_printf(m, "ivsize       : %u\n", aead->ivsize);
-	seq_printf(m, "maxauthsize  : %u\n", aead->maxauthsize);
-	seq_printf(m, "geniv        : %s\n", aead->geniv);
+	aead->free(aead);
 }
 }
 
 
-const struct crypto_type crypto_nivaead_type = {
+static const struct crypto_type crypto_aead_type = {
 	.extsize = crypto_alg_extsize,
 	.extsize = crypto_alg_extsize,
 	.init_tfm = crypto_aead_init_tfm,
 	.init_tfm = crypto_aead_init_tfm,
+	.free = crypto_aead_free_instance,
 #ifdef CONFIG_PROC_FS
 #ifdef CONFIG_PROC_FS
-	.show = crypto_nivaead_show,
+	.show = crypto_aead_show,
 #endif
 #endif
-	.report = crypto_nivaead_report,
-	.maskclear = ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV),
-	.maskset = CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV,
+	.report = crypto_aead_report,
+	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
+	.maskset = CRYPTO_ALG_TYPE_MASK,
 	.type = CRYPTO_ALG_TYPE_AEAD,
 	.type = CRYPTO_ALG_TYPE_AEAD,
 	.tfmsize = offsetof(struct crypto_aead, base),
 	.tfmsize = offsetof(struct crypto_aead, base),
 };
 };
-EXPORT_SYMBOL_GPL(crypto_nivaead_type);
-
-static int crypto_grab_nivaead(struct crypto_aead_spawn *spawn,
-			       const char *name, u32 type, u32 mask)
-{
-	spawn->base.frontend = &crypto_nivaead_type;
-	return crypto_grab_spawn(&spawn->base, name, type, mask);
-}
 
 
 static int aead_geniv_setkey(struct crypto_aead *tfm,
 static int aead_geniv_setkey(struct crypto_aead *tfm,
 			     const u8 *key, unsigned int keylen)
 			     const u8 *key, unsigned int keylen)
@@ -411,169 +188,6 @@ static int aead_geniv_setauthsize(struct crypto_aead *tfm,
 	return crypto_aead_setauthsize(ctx->child, authsize);
 	return crypto_aead_setauthsize(ctx->child, authsize);
 }
 }
 
 
-static void compat_encrypt_complete2(struct aead_request *req, int err)
-{
-	struct compat_request_ctx *rctx = aead_request_ctx(req);
-	struct aead_givcrypt_request *subreq = &rctx->subreq;
-	struct crypto_aead *geniv;
-
-	if (err == -EINPROGRESS)
-		return;
-
-	if (err)
-		goto out;
-
-	geniv = crypto_aead_reqtfm(req);
-	scatterwalk_map_and_copy(subreq->giv, rctx->ivsg, 0,
-				 crypto_aead_ivsize(geniv), 1);
-
-out:
-	kzfree(subreq->giv);
-}
-
-static void compat_encrypt_complete(struct crypto_async_request *base, int err)
-{
-	struct aead_request *req = base->data;
-
-	compat_encrypt_complete2(req, err);
-	aead_request_complete(req, err);
-}
-
-static int compat_encrypt(struct aead_request *req)
-{
-	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
-	struct compat_request_ctx *rctx = aead_request_ctx(req);
-	struct aead_givcrypt_request *subreq = &rctx->subreq;
-	unsigned int ivsize = crypto_aead_ivsize(geniv);
-	struct scatterlist *src, *dst;
-	crypto_completion_t compl;
-	void *data;
-	u8 *info;
-	__be64 seq;
-	int err;
-
-	if (req->cryptlen < ivsize)
-		return -EINVAL;
-
-	compl = req->base.complete;
-	data = req->base.data;
-
-	rctx->ivsg = scatterwalk_ffwd(rctx->ivbuf, req->dst, req->assoclen);
-	info = PageHighMem(sg_page(rctx->ivsg)) ? NULL : sg_virt(rctx->ivsg);
-
-	if (!info) {
-		info = kmalloc(ivsize, req->base.flags &
-				       CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
-								  GFP_ATOMIC);
-		if (!info)
-			return -ENOMEM;
-
-		compl = compat_encrypt_complete;
-		data = req;
-	}
-
-	memcpy(&seq, req->iv + ivsize - sizeof(seq), sizeof(seq));
-
-	src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen + ivsize);
-	dst = req->src == req->dst ?
-	      src : scatterwalk_ffwd(rctx->dst, rctx->ivsg, ivsize);
-
-	aead_givcrypt_set_tfm(subreq, ctx->child);
-	aead_givcrypt_set_callback(subreq, req->base.flags,
-				   req->base.complete, req->base.data);
-	aead_givcrypt_set_crypt(subreq, src, dst,
-				req->cryptlen - ivsize, req->iv);
-	aead_givcrypt_set_assoc(subreq, req->src, req->assoclen);
-	aead_givcrypt_set_giv(subreq, info, be64_to_cpu(seq));
-
-	err = crypto_aead_givencrypt(subreq);
-	if (unlikely(PageHighMem(sg_page(rctx->ivsg))))
-		compat_encrypt_complete2(req, err);
-	return err;
-}
-
-static int compat_decrypt(struct aead_request *req)
-{
-	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
-	struct compat_request_ctx *rctx = aead_request_ctx(req);
-	struct aead_request *subreq = &rctx->subreq.areq;
-	unsigned int ivsize = crypto_aead_ivsize(geniv);
-	struct scatterlist *src, *dst;
-	crypto_completion_t compl;
-	void *data;
-
-	if (req->cryptlen < ivsize)
-		return -EINVAL;
-
-	aead_request_set_tfm(subreq, ctx->child);
-
-	compl = req->base.complete;
-	data = req->base.data;
-
-	src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen + ivsize);
-	dst = req->src == req->dst ?
-	      src : scatterwalk_ffwd(rctx->dst, req->dst,
-				     req->assoclen + ivsize);
-
-	aead_request_set_callback(subreq, req->base.flags, compl, data);
-	aead_request_set_crypt(subreq, src, dst,
-			       req->cryptlen - ivsize, req->iv);
-	aead_request_set_assoc(subreq, req->src, req->assoclen);
-
-	scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
-
-	return crypto_aead_decrypt(subreq);
-}
-
-static int compat_encrypt_first(struct aead_request *req)
-{
-	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
-	int err = 0;
-
-	spin_lock_bh(&ctx->lock);
-	if (geniv->encrypt != compat_encrypt_first)
-		goto unlock;
-
-	geniv->encrypt = compat_encrypt;
-
-unlock:
-	spin_unlock_bh(&ctx->lock);
-
-	if (err)
-		return err;
-
-	return compat_encrypt(req);
-}
-
-static int aead_geniv_init_compat(struct crypto_tfm *tfm)
-{
-	struct crypto_aead *geniv = __crypto_aead_cast(tfm);
-	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
-	int err;
-
-	spin_lock_init(&ctx->lock);
-
-	crypto_aead_set_reqsize(geniv, sizeof(struct compat_request_ctx));
-
-	err = aead_geniv_init(tfm);
-
-	ctx->child = geniv->child;
-	geniv->child = geniv;
-
-	return err;
-}
-
-static void aead_geniv_exit_compat(struct crypto_tfm *tfm)
-{
-	struct crypto_aead *geniv = __crypto_aead_cast(tfm);
-	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
-
-	crypto_free_aead(ctx->child);
-}
-
 struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
 struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
 				       struct rtattr **tb, u32 type, u32 mask)
 				       struct rtattr **tb, u32 type, u32 mask)
 {
 {
@@ -590,8 +204,7 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
 	if (IS_ERR(algt))
 	if (IS_ERR(algt))
 		return ERR_CAST(algt);
 		return ERR_CAST(algt);
 
 
-	if ((algt->type ^ (CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV)) &
-	    algt->mask)
+	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
 		return ERR_PTR(-EINVAL);
 		return ERR_PTR(-EINVAL);
 
 
 	name = crypto_attr_alg_name(tb[1]);
 	name = crypto_attr_alg_name(tb[1]);
@@ -608,9 +221,7 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
 	mask |= crypto_requires_sync(algt->type, algt->mask);
 	mask |= crypto_requires_sync(algt->type, algt->mask);
 
 
 	crypto_set_aead_spawn(spawn, aead_crypto_instance(inst));
 	crypto_set_aead_spawn(spawn, aead_crypto_instance(inst));
-	err = (algt->mask & CRYPTO_ALG_GENIV) ?
-	      crypto_grab_nivaead(spawn, name, type, mask) :
-	      crypto_grab_aead(spawn, name, type, mask);
+	err = crypto_grab_aead(spawn, name, type, mask);
 	if (err)
 	if (err)
 		goto err_free_inst;
 		goto err_free_inst;
 
 
@@ -623,43 +234,6 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
 	if (ivsize < sizeof(u64))
 	if (ivsize < sizeof(u64))
 		goto err_drop_alg;
 		goto err_drop_alg;
 
 
-	/*
-	 * This is only true if we're constructing an algorithm with its
-	 * default IV generator.  For the default generator we elide the
-	 * template name and double-check the IV generator.
-	 */
-	if (algt->mask & CRYPTO_ALG_GENIV) {
-		if (!alg->base.cra_aead.encrypt)
-			goto err_drop_alg;
-		if (strcmp(tmpl->name, alg->base.cra_aead.geniv))
-			goto err_drop_alg;
-
-		memcpy(inst->alg.base.cra_name, alg->base.cra_name,
-		       CRYPTO_MAX_ALG_NAME);
-		memcpy(inst->alg.base.cra_driver_name,
-		       alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME);
-
-		inst->alg.base.cra_flags = CRYPTO_ALG_TYPE_AEAD |
-					   CRYPTO_ALG_GENIV;
-		inst->alg.base.cra_flags |= alg->base.cra_flags &
-					    CRYPTO_ALG_ASYNC;
-		inst->alg.base.cra_priority = alg->base.cra_priority;
-		inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
-		inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
-		inst->alg.base.cra_type = &crypto_aead_type;
-
-		inst->alg.base.cra_aead.ivsize = ivsize;
-		inst->alg.base.cra_aead.maxauthsize = maxauthsize;
-
-		inst->alg.base.cra_aead.setkey = alg->base.cra_aead.setkey;
-		inst->alg.base.cra_aead.setauthsize =
-			alg->base.cra_aead.setauthsize;
-		inst->alg.base.cra_aead.encrypt = alg->base.cra_aead.encrypt;
-		inst->alg.base.cra_aead.decrypt = alg->base.cra_aead.decrypt;
-
-		goto out;
-	}
-
 	err = -ENAMETOOLONG;
 	err = -ENAMETOOLONG;
 	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
 	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
 		     "%s(%s)", tmpl->name, alg->base.cra_name) >=
 		     "%s(%s)", tmpl->name, alg->base.cra_name) >=
@@ -682,12 +256,6 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
 	inst->alg.ivsize = ivsize;
 	inst->alg.ivsize = ivsize;
 	inst->alg.maxauthsize = maxauthsize;
 	inst->alg.maxauthsize = maxauthsize;
 
 
-	inst->alg.encrypt = compat_encrypt_first;
-	inst->alg.decrypt = compat_decrypt;
-
-	inst->alg.base.cra_init = aead_geniv_init_compat;
-	inst->alg.base.cra_exit = aead_geniv_exit_compat;
-
 out:
 out:
 	return inst;
 	return inst;
 
 
@@ -707,147 +275,58 @@ void aead_geniv_free(struct aead_instance *inst)
 }
 }
 EXPORT_SYMBOL_GPL(aead_geniv_free);
 EXPORT_SYMBOL_GPL(aead_geniv_free);
 
 
-int aead_geniv_init(struct crypto_tfm *tfm)
+int aead_init_geniv(struct crypto_aead *aead)
 {
 {
-	struct crypto_instance *inst = (void *)tfm->__crt_alg;
+	struct aead_geniv_ctx *ctx = crypto_aead_ctx(aead);
+	struct aead_instance *inst = aead_alg_instance(aead);
 	struct crypto_aead *child;
 	struct crypto_aead *child;
-	struct crypto_aead *aead;
-
-	aead = __crypto_aead_cast(tfm);
-
-	child = crypto_spawn_aead(crypto_instance_ctx(inst));
-	if (IS_ERR(child))
-		return PTR_ERR(child);
-
-	aead->child = child;
-	aead->reqsize += crypto_aead_reqsize(child);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(aead_geniv_init);
-
-void aead_geniv_exit(struct crypto_tfm *tfm)
-{
-	crypto_free_aead(__crypto_aead_cast(tfm)->child);
-}
-EXPORT_SYMBOL_GPL(aead_geniv_exit);
-
-static int crypto_nivaead_default(struct crypto_alg *alg, u32 type, u32 mask)
-{
-	struct rtattr *tb[3];
-	struct {
-		struct rtattr attr;
-		struct crypto_attr_type data;
-	} ptype;
-	struct {
-		struct rtattr attr;
-		struct crypto_attr_alg data;
-	} palg;
-	struct crypto_template *tmpl;
-	struct crypto_instance *inst;
-	struct crypto_alg *larval;
-	const char *geniv;
 	int err;
 	int err;
 
 
-	larval = crypto_larval_lookup(alg->cra_driver_name,
-				      CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV,
-				      CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
-	err = PTR_ERR(larval);
-	if (IS_ERR(larval))
-		goto out;
-
-	err = -EAGAIN;
-	if (!crypto_is_larval(larval))
-		goto drop_larval;
-
-	ptype.attr.rta_len = sizeof(ptype);
-	ptype.attr.rta_type = CRYPTOA_TYPE;
-	ptype.data.type = type | CRYPTO_ALG_GENIV;
-	/* GENIV tells the template that we're making a default geniv. */
-	ptype.data.mask = mask | CRYPTO_ALG_GENIV;
-	tb[0] = &ptype.attr;
-
-	palg.attr.rta_len = sizeof(palg);
-	palg.attr.rta_type = CRYPTOA_ALG;
-	/* Must use the exact name to locate ourselves. */
-	memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
-	tb[1] = &palg.attr;
-
-	tb[2] = NULL;
+	spin_lock_init(&ctx->lock);
 
 
-	geniv = alg->cra_aead.geniv;
+	err = crypto_get_default_rng();
+	if (err)
+		goto out;
 
 
-	tmpl = crypto_lookup_template(geniv);
-	err = -ENOENT;
-	if (!tmpl)
-		goto kill_larval;
+	err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
+				   crypto_aead_ivsize(aead));
+	crypto_put_default_rng();
+	if (err)
+		goto out;
 
 
-	if (tmpl->create) {
-		err = tmpl->create(tmpl, tb);
-		if (err)
-			goto put_tmpl;
-		goto ok;
-	}
+	ctx->null = crypto_get_default_null_skcipher();
+	err = PTR_ERR(ctx->null);
+	if (IS_ERR(ctx->null))
+		goto out;
 
 
-	inst = tmpl->alloc(tb);
-	err = PTR_ERR(inst);
-	if (IS_ERR(inst))
-		goto put_tmpl;
+	child = crypto_spawn_aead(aead_instance_ctx(inst));
+	err = PTR_ERR(child);
+	if (IS_ERR(child))
+		goto drop_null;
 
 
-	err = crypto_register_instance(tmpl, inst);
-	if (err) {
-		tmpl->free(inst);
-		goto put_tmpl;
-	}
+	ctx->child = child;
+	crypto_aead_set_reqsize(aead, crypto_aead_reqsize(child) +
+				      sizeof(struct aead_request));
 
 
-ok:
-	/* Redo the lookup to use the instance we just registered. */
-	err = -EAGAIN;
+	err = 0;
 
 
-put_tmpl:
-	crypto_tmpl_put(tmpl);
-kill_larval:
-	crypto_larval_kill(larval);
-drop_larval:
-	crypto_mod_put(larval);
 out:
 out:
-	crypto_mod_put(alg);
 	return err;
 	return err;
+
+drop_null:
+	crypto_put_default_null_skcipher();
+	goto out;
 }
 }
+EXPORT_SYMBOL_GPL(aead_init_geniv);
 
 
-struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask)
+void aead_exit_geniv(struct crypto_aead *tfm)
 {
 {
-	struct crypto_alg *alg;
-
-	alg = crypto_alg_mod_lookup(name, type, mask);
-	if (IS_ERR(alg))
-		return alg;
-
-	if (alg->cra_type == &crypto_aead_type)
-		return alg;
-
-	if (!alg->cra_aead.ivsize)
-		return alg;
-
-	crypto_mod_put(alg);
-	alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
-				    mask & ~CRYPTO_ALG_TESTED);
-	if (IS_ERR(alg))
-		return alg;
-
-	if (alg->cra_type == &crypto_aead_type) {
-		if (~alg->cra_flags & (type ^ ~mask) & CRYPTO_ALG_TESTED) {
-			crypto_mod_put(alg);
-			alg = ERR_PTR(-ENOENT);
-		}
-		return alg;
-	}
-
-	BUG_ON(!alg->cra_aead.ivsize);
+	struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
 
 
-	return ERR_PTR(crypto_nivaead_default(alg, type, mask));
+	crypto_free_aead(ctx->child);
+	crypto_put_default_null_skcipher();
 }
 }
-EXPORT_SYMBOL_GPL(crypto_lookup_aead);
+EXPORT_SYMBOL_GPL(aead_exit_geniv);
 
 
 int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
 int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
 		     u32 type, u32 mask)
 		     u32 type, u32 mask)
@@ -870,7 +349,7 @@ static int aead_prepare_alg(struct aead_alg *alg)
 	if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8)
 	if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	base->cra_type = &crypto_new_aead_type;
+	base->cra_type = &crypto_aead_type;
 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
 	base->cra_flags |= CRYPTO_ALG_TYPE_AEAD;
 	base->cra_flags |= CRYPTO_ALG_TYPE_AEAD;
 
 

+ 14 - 11
crypto/algapi.c

@@ -67,12 +67,22 @@ static int crypto_check_alg(struct crypto_alg *alg)
 	return crypto_set_driver_name(alg);
 	return crypto_set_driver_name(alg);
 }
 }
 
 
+static void crypto_free_instance(struct crypto_instance *inst)
+{
+	if (!inst->alg.cra_type->free) {
+		inst->tmpl->free(inst);
+		return;
+	}
+
+	inst->alg.cra_type->free(inst);
+}
+
 static void crypto_destroy_instance(struct crypto_alg *alg)
 static void crypto_destroy_instance(struct crypto_alg *alg)
 {
 {
 	struct crypto_instance *inst = (void *)alg;
 	struct crypto_instance *inst = (void *)alg;
 	struct crypto_template *tmpl = inst->tmpl;
 	struct crypto_template *tmpl = inst->tmpl;
 
 
-	tmpl->free(inst);
+	crypto_free_instance(inst);
 	crypto_tmpl_put(tmpl);
 	crypto_tmpl_put(tmpl);
 }
 }
 
 
@@ -481,7 +491,7 @@ void crypto_unregister_template(struct crypto_template *tmpl)
 
 
 	hlist_for_each_entry_safe(inst, n, list, list) {
 	hlist_for_each_entry_safe(inst, n, list, list) {
 		BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1);
 		BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1);
-		tmpl->free(inst);
+		crypto_free_instance(inst);
 	}
 	}
 	crypto_remove_final(&users);
 	crypto_remove_final(&users);
 }
 }
@@ -892,7 +902,7 @@ out:
 }
 }
 EXPORT_SYMBOL_GPL(crypto_enqueue_request);
 EXPORT_SYMBOL_GPL(crypto_enqueue_request);
 
 
-void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset)
+struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
 {
 {
 	struct list_head *request;
 	struct list_head *request;
 
 
@@ -907,14 +917,7 @@ void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset)
 	request = queue->list.next;
 	request = queue->list.next;
 	list_del(request);
 	list_del(request);
 
 
-	return (char *)list_entry(request, struct crypto_async_request, list) -
-	       offset;
-}
-EXPORT_SYMBOL_GPL(__crypto_dequeue_request);
-
-struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
-{
-	return __crypto_dequeue_request(queue, 0);
+	return list_entry(request, struct crypto_async_request, list);
 }
 }
 EXPORT_SYMBOL_GPL(crypto_dequeue_request);
 EXPORT_SYMBOL_GPL(crypto_dequeue_request);
 
 

+ 5 - 7
crypto/algboss.c

@@ -248,13 +248,11 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
 	type = alg->cra_flags;
 	type = alg->cra_flags;
 
 
 	/* This piece of crap needs to disappear into per-type test hooks. */
 	/* This piece of crap needs to disappear into per-type test hooks. */
-	if ((!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
-	       CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
-	     ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
-	      CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
-					  alg->cra_ablkcipher.ivsize)) ||
-	    (!((type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) &&
-	     alg->cra_type == &crypto_nivaead_type && alg->cra_aead.ivsize))
+	if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
+	      CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
+	    ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+	     CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
+					 alg->cra_ablkcipher.ivsize))
 		type |= CRYPTO_ALG_TESTED;
 		type |= CRYPTO_ALG_TESTED;
 
 
 	param->type = type;
 	param->type = type;

+ 2 - 2
crypto/algif_aead.c

@@ -90,6 +90,7 @@ static void aead_put_sgl(struct sock *sk)
 		put_page(sg_page(sg + i));
 		put_page(sg_page(sg + i));
 		sg_assign_page(sg + i, NULL);
 		sg_assign_page(sg + i, NULL);
 	}
 	}
+	sg_init_table(sg, ALG_MAX_PAGES);
 	sgl->cur = 0;
 	sgl->cur = 0;
 	ctx->used = 0;
 	ctx->used = 0;
 	ctx->more = 0;
 	ctx->more = 0;
@@ -514,8 +515,7 @@ static struct proto_ops algif_aead_ops = {
 
 
 static void *aead_bind(const char *name, u32 type, u32 mask)
 static void *aead_bind(const char *name, u32 type, u32 mask)
 {
 {
-	return crypto_alloc_aead(name, type | CRYPTO_ALG_AEAD_NEW,
-				 mask | CRYPTO_ALG_AEAD_NEW);
+	return crypto_alloc_aead(name, type, mask);
 }
 }
 
 
 static void aead_release(void *private)
 static void aead_release(void *private)

+ 180 - 400
crypto/authenc.c

@@ -1,7 +1,7 @@
 /*
 /*
  * Authenc: Simple AEAD wrapper for IPsec
  * Authenc: Simple AEAD wrapper for IPsec
  *
  *
- * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
  *
  *
  * This program is free software; you can redistribute it and/or modify it
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
  * under the terms of the GNU General Public License as published by the Free
@@ -14,6 +14,7 @@
 #include <crypto/internal/hash.h>
 #include <crypto/internal/hash.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/authenc.h>
 #include <crypto/authenc.h>
+#include <crypto/null.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/scatterwalk.h>
 #include <linux/err.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/init.h>
@@ -23,26 +24,21 @@
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/spinlock.h>
 
 
-typedef u8 *(*authenc_ahash_t)(struct aead_request *req, unsigned int flags);
-
 struct authenc_instance_ctx {
 struct authenc_instance_ctx {
 	struct crypto_ahash_spawn auth;
 	struct crypto_ahash_spawn auth;
 	struct crypto_skcipher_spawn enc;
 	struct crypto_skcipher_spawn enc;
+	unsigned int reqoff;
 };
 };
 
 
 struct crypto_authenc_ctx {
 struct crypto_authenc_ctx {
-	unsigned int reqoff;
 	struct crypto_ahash *auth;
 	struct crypto_ahash *auth;
 	struct crypto_ablkcipher *enc;
 	struct crypto_ablkcipher *enc;
+	struct crypto_blkcipher *null;
 };
 };
 
 
 struct authenc_request_ctx {
 struct authenc_request_ctx {
-	unsigned int cryptlen;
-	struct scatterlist *sg;
-	struct scatterlist asg[2];
-	struct scatterlist cipher[2];
-	crypto_completion_t complete;
-	crypto_completion_t update_complete;
+	struct scatterlist src[2];
+	struct scatterlist dst[2];
 	char tail[];
 	char tail[];
 };
 };
 
 
@@ -119,189 +115,35 @@ badkey:
 	goto out;
 	goto out;
 }
 }
 
 
-static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq,
-					    int err)
-{
-	struct aead_request *req = areq->data;
-	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
-	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-
-	if (err)
-		goto out;
-
-	ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
-				areq_ctx->cryptlen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) &
-					  CRYPTO_TFM_REQ_MAY_SLEEP,
-				   areq_ctx->complete, req);
-
-	err = crypto_ahash_finup(ahreq);
-	if (err)
-		goto out;
-
-	scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
-				 areq_ctx->cryptlen,
-				 crypto_aead_authsize(authenc), 1);
-
-out:
-	authenc_request_complete(req, err);
-}
-
 static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err)
 static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err)
 {
 {
 	struct aead_request *req = areq->data;
 	struct aead_request *req = areq->data;
 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+	struct aead_instance *inst = aead_alg_instance(authenc);
+	struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
 	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
 	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
 
 
 	if (err)
 	if (err)
 		goto out;
 		goto out;
 
 
-	scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
-				 areq_ctx->cryptlen,
+	scatterwalk_map_and_copy(ahreq->result, req->dst,
+				 req->assoclen + req->cryptlen,
 				 crypto_aead_authsize(authenc), 1);
 				 crypto_aead_authsize(authenc), 1);
 
 
 out:
 out:
 	aead_request_complete(req, err);
 	aead_request_complete(req, err);
 }
 }
 
 
-static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
-					     int err)
-{
-	u8 *ihash;
-	unsigned int authsize;
-	struct ablkcipher_request *abreq;
-	struct aead_request *req = areq->data;
-	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
-	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-	unsigned int cryptlen = req->cryptlen;
-
-	if (err)
-		goto out;
-
-	ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
-				areq_ctx->cryptlen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) &
-					  CRYPTO_TFM_REQ_MAY_SLEEP,
-				   areq_ctx->complete, req);
-
-	err = crypto_ahash_finup(ahreq);
-	if (err)
-		goto out;
-
-	authsize = crypto_aead_authsize(authenc);
-	cryptlen -= authsize;
-	ihash = ahreq->result + authsize;
-	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
-				 authsize, 0);
-
-	err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
-	if (err)
-		goto out;
-
-	abreq = aead_request_ctx(req);
-	ablkcipher_request_set_tfm(abreq, ctx->enc);
-	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
-					req->base.complete, req->base.data);
-	ablkcipher_request_set_crypt(abreq, req->src, req->dst,
-				     cryptlen, req->iv);
-
-	err = crypto_ablkcipher_decrypt(abreq);
-
-out:
-	authenc_request_complete(req, err);
-}
-
-static void authenc_verify_ahash_done(struct crypto_async_request *areq,
-				      int err)
-{
-	u8 *ihash;
-	unsigned int authsize;
-	struct ablkcipher_request *abreq;
-	struct aead_request *req = areq->data;
-	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
-	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-	unsigned int cryptlen = req->cryptlen;
-
-	if (err)
-		goto out;
-
-	authsize = crypto_aead_authsize(authenc);
-	cryptlen -= authsize;
-	ihash = ahreq->result + authsize;
-	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
-				 authsize, 0);
-
-	err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
-	if (err)
-		goto out;
-
-	abreq = aead_request_ctx(req);
-	ablkcipher_request_set_tfm(abreq, ctx->enc);
-	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
-					req->base.complete, req->base.data);
-	ablkcipher_request_set_crypt(abreq, req->src, req->dst,
-				     cryptlen, req->iv);
-
-	err = crypto_ablkcipher_decrypt(abreq);
-
-out:
-	authenc_request_complete(req, err);
-}
-
-static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags)
-{
-	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
-	struct crypto_ahash *auth = ctx->auth;
-	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-	u8 *hash = areq_ctx->tail;
-	int err;
-
-	hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
-			    crypto_ahash_alignmask(auth) + 1);
-
-	ahash_request_set_tfm(ahreq, auth);
-
-	err = crypto_ahash_init(ahreq);
-	if (err)
-		return ERR_PTR(err);
-
-	ahash_request_set_crypt(ahreq, req->assoc, hash, req->assoclen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
-				   areq_ctx->update_complete, req);
-
-	err = crypto_ahash_update(ahreq);
-	if (err)
-		return ERR_PTR(err);
-
-	ahash_request_set_crypt(ahreq, areq_ctx->sg, hash,
-				areq_ctx->cryptlen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
-				   areq_ctx->complete, req);
-
-	err = crypto_ahash_finup(ahreq);
-	if (err)
-		return ERR_PTR(err);
-
-	return hash;
-}
-
-static u8 *crypto_authenc_ahash(struct aead_request *req, unsigned int flags)
+static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags)
 {
 {
 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+	struct aead_instance *inst = aead_alg_instance(authenc);
 	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
 	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+	struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
 	struct crypto_ahash *auth = ctx->auth;
 	struct crypto_ahash *auth = ctx->auth;
 	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
 	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
 	u8 *hash = areq_ctx->tail;
 	u8 *hash = areq_ctx->tail;
 	int err;
 	int err;
 
 
@@ -309,66 +151,18 @@ static u8 *crypto_authenc_ahash(struct aead_request *req, unsigned int flags)
 			   crypto_ahash_alignmask(auth) + 1);
 			   crypto_ahash_alignmask(auth) + 1);
 
 
 	ahash_request_set_tfm(ahreq, auth);
 	ahash_request_set_tfm(ahreq, auth);
-	ahash_request_set_crypt(ahreq, areq_ctx->sg, hash,
-				areq_ctx->cryptlen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
-				   areq_ctx->complete, req);
+	ahash_request_set_crypt(ahreq, req->dst, hash,
+				req->assoclen + req->cryptlen);
+	ahash_request_set_callback(ahreq, flags,
+				   authenc_geniv_ahash_done, req);
 
 
 	err = crypto_ahash_digest(ahreq);
 	err = crypto_ahash_digest(ahreq);
 	if (err)
 	if (err)
-		return ERR_PTR(err);
-
-	return hash;
-}
-
-static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
-				 unsigned int flags)
-{
-	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct scatterlist *dst = req->dst;
-	struct scatterlist *assoc = req->assoc;
-	struct scatterlist *cipher = areq_ctx->cipher;
-	struct scatterlist *asg = areq_ctx->asg;
-	unsigned int ivsize = crypto_aead_ivsize(authenc);
-	unsigned int cryptlen = req->cryptlen;
-	authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb;
-	struct page *dstp;
-	u8 *vdst;
-	u8 *hash;
-
-	dstp = sg_page(dst);
-	vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;
-
-	if (ivsize) {
-		sg_init_table(cipher, 2);
-		sg_set_buf(cipher, iv, ivsize);
-		scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2);
-		dst = cipher;
-		cryptlen += ivsize;
-	}
-
-	if (req->assoclen && sg_is_last(assoc)) {
-		authenc_ahash_fn = crypto_authenc_ahash;
-		sg_init_table(asg, 2);
-		sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
-		scatterwalk_crypto_chain(asg, dst, 0, 2);
-		dst = asg;
-		cryptlen += req->assoclen;
-	}
-
-	areq_ctx->cryptlen = cryptlen;
-	areq_ctx->sg = dst;
-
-	areq_ctx->complete = authenc_geniv_ahash_done;
-	areq_ctx->update_complete = authenc_geniv_ahash_update_done;
-
-	hash = authenc_ahash_fn(req, flags);
-	if (IS_ERR(hash))
-		return PTR_ERR(hash);
+		return err;
 
 
-	scatterwalk_map_and_copy(hash, dst, cryptlen,
+	scatterwalk_map_and_copy(hash, req->dst, req->assoclen + req->cryptlen,
 				 crypto_aead_authsize(authenc), 1);
 				 crypto_aead_authsize(authenc), 1);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -377,180 +171,155 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
 {
 {
 	struct aead_request *areq = req->data;
 	struct aead_request *areq = req->data;
 
 
-	if (!err) {
-		struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
-		struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
-		struct authenc_request_ctx *areq_ctx = aead_request_ctx(areq);
-		struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
-							    + ctx->reqoff);
-		u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(ctx->enc);
+	if (err)
+		goto out;
 
 
-		err = crypto_authenc_genicv(areq, iv, 0);
-	}
+	err = crypto_authenc_genicv(areq, 0);
 
 
+out:
 	authenc_request_complete(areq, err);
 	authenc_request_complete(areq, err);
 }
 }
 
 
+static int crypto_authenc_copy_assoc(struct aead_request *req)
+{
+	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+	struct blkcipher_desc desc = {
+		.tfm = ctx->null,
+	};
+
+	return crypto_blkcipher_encrypt(&desc, req->dst, req->src,
+					req->assoclen);
+}
+
 static int crypto_authenc_encrypt(struct aead_request *req)
 static int crypto_authenc_encrypt(struct aead_request *req)
 {
 {
 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+	struct aead_instance *inst = aead_alg_instance(authenc);
 	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
 	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+	struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
 	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
 	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
 	struct crypto_ablkcipher *enc = ctx->enc;
 	struct crypto_ablkcipher *enc = ctx->enc;
-	struct scatterlist *dst = req->dst;
 	unsigned int cryptlen = req->cryptlen;
 	unsigned int cryptlen = req->cryptlen;
-	struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
-						    + ctx->reqoff);
-	u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc);
+	struct ablkcipher_request *abreq = (void *)(areq_ctx->tail +
+						    ictx->reqoff);
+	struct scatterlist *src, *dst;
 	int err;
 	int err;
 
 
+	sg_init_table(areq_ctx->src, 2);
+	src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
+	dst = src;
+
+	if (req->src != req->dst) {
+		err = crypto_authenc_copy_assoc(req);
+		if (err)
+			return err;
+
+		sg_init_table(areq_ctx->dst, 2);
+		dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
+	}
+
 	ablkcipher_request_set_tfm(abreq, enc);
 	ablkcipher_request_set_tfm(abreq, enc);
 	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
 	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
 					crypto_authenc_encrypt_done, req);
 					crypto_authenc_encrypt_done, req);
-	ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv);
-
-	memcpy(iv, req->iv, crypto_aead_ivsize(authenc));
+	ablkcipher_request_set_crypt(abreq, src, dst, cryptlen, req->iv);
 
 
 	err = crypto_ablkcipher_encrypt(abreq);
 	err = crypto_ablkcipher_encrypt(abreq);
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
-	return crypto_authenc_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
+	return crypto_authenc_genicv(req, aead_request_flags(req));
 }
 }
 
 
-static void crypto_authenc_givencrypt_done(struct crypto_async_request *req,
-					   int err)
+static int crypto_authenc_decrypt_tail(struct aead_request *req,
+				       unsigned int flags)
 {
 {
-	struct aead_request *areq = req->data;
-
-	if (!err) {
-		struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
-
-		err = crypto_authenc_genicv(areq, greq->giv, 0);
-	}
+	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+	struct aead_instance *inst = aead_alg_instance(authenc);
+	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+	struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
+	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
+	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
+	struct ablkcipher_request *abreq = (void *)(areq_ctx->tail +
+						    ictx->reqoff);
+	unsigned int authsize = crypto_aead_authsize(authenc);
+	u8 *ihash = ahreq->result + authsize;
+	struct scatterlist *src, *dst;
 
 
-	authenc_request_complete(areq, err);
-}
+	scatterwalk_map_and_copy(ihash, req->src, ahreq->nbytes, authsize, 0);
 
 
-static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req)
-{
-	struct crypto_aead *authenc = aead_givcrypt_reqtfm(req);
-	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
-	struct aead_request *areq = &req->areq;
-	struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
-	u8 *iv = req->giv;
-	int err;
+	if (crypto_memneq(ihash, ahreq->result, authsize))
+		return -EBADMSG;
 
 
-	skcipher_givcrypt_set_tfm(greq, ctx->enc);
-	skcipher_givcrypt_set_callback(greq, aead_request_flags(areq),
-				       crypto_authenc_givencrypt_done, areq);
-	skcipher_givcrypt_set_crypt(greq, areq->src, areq->dst, areq->cryptlen,
-				    areq->iv);
-	skcipher_givcrypt_set_giv(greq, iv, req->seq);
+	sg_init_table(areq_ctx->src, 2);
+	src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
+	dst = src;
 
 
-	err = crypto_skcipher_givencrypt(greq);
-	if (err)
-		return err;
+	if (req->src != req->dst) {
+		sg_init_table(areq_ctx->dst, 2);
+		dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
+	}
 
 
-	return crypto_authenc_genicv(areq, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
-}
+	ablkcipher_request_set_tfm(abreq, ctx->enc);
+	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+					req->base.complete, req->base.data);
+	ablkcipher_request_set_crypt(abreq, src, dst,
+				     req->cryptlen - authsize, req->iv);
 
 
-static int crypto_authenc_verify(struct aead_request *req,
-				 authenc_ahash_t authenc_ahash_fn)
-{
-	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-	u8 *ohash;
-	u8 *ihash;
-	unsigned int authsize;
-
-	areq_ctx->complete = authenc_verify_ahash_done;
-	areq_ctx->update_complete = authenc_verify_ahash_update_done;
-
-	ohash = authenc_ahash_fn(req, CRYPTO_TFM_REQ_MAY_SLEEP);
-	if (IS_ERR(ohash))
-		return PTR_ERR(ohash);
-
-	authsize = crypto_aead_authsize(authenc);
-	ihash = ohash + authsize;
-	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
-				 authsize, 0);
-	return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;
+	return crypto_ablkcipher_decrypt(abreq);
 }
 }
 
 
-static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
-				  unsigned int cryptlen)
+static void authenc_verify_ahash_done(struct crypto_async_request *areq,
+				      int err)
 {
 {
-	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct scatterlist *src = req->src;
-	struct scatterlist *assoc = req->assoc;
-	struct scatterlist *cipher = areq_ctx->cipher;
-	struct scatterlist *asg = areq_ctx->asg;
-	unsigned int ivsize = crypto_aead_ivsize(authenc);
-	authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb;
-	struct page *srcp;
-	u8 *vsrc;
-
-	srcp = sg_page(src);
-	vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset;
-
-	if (ivsize) {
-		sg_init_table(cipher, 2);
-		sg_set_buf(cipher, iv, ivsize);
-		scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2);
-		src = cipher;
-		cryptlen += ivsize;
-	}
+	struct aead_request *req = areq->data;
 
 
-	if (req->assoclen && sg_is_last(assoc)) {
-		authenc_ahash_fn = crypto_authenc_ahash;
-		sg_init_table(asg, 2);
-		sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
-		scatterwalk_crypto_chain(asg, src, 0, 2);
-		src = asg;
-		cryptlen += req->assoclen;
-	}
+	if (err)
+		goto out;
 
 
-	areq_ctx->cryptlen = cryptlen;
-	areq_ctx->sg = src;
+	err = crypto_authenc_decrypt_tail(req, 0);
 
 
-	return crypto_authenc_verify(req, authenc_ahash_fn);
+out:
+	authenc_request_complete(req, err);
 }
 }
 
 
 static int crypto_authenc_decrypt(struct aead_request *req)
 static int crypto_authenc_decrypt(struct aead_request *req)
 {
 {
 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
-	struct ablkcipher_request *abreq = aead_request_ctx(req);
-	unsigned int cryptlen = req->cryptlen;
 	unsigned int authsize = crypto_aead_authsize(authenc);
 	unsigned int authsize = crypto_aead_authsize(authenc);
-	u8 *iv = req->iv;
+	struct aead_instance *inst = aead_alg_instance(authenc);
+	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+	struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
+	struct crypto_ahash *auth = ctx->auth;
+	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
+	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
+	u8 *hash = areq_ctx->tail;
 	int err;
 	int err;
 
 
-	if (cryptlen < authsize)
-		return -EINVAL;
-	cryptlen -= authsize;
+	hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
+			   crypto_ahash_alignmask(auth) + 1);
 
 
-	err = crypto_authenc_iverify(req, iv, cryptlen);
+	ahash_request_set_tfm(ahreq, auth);
+	ahash_request_set_crypt(ahreq, req->src, hash,
+				req->assoclen + req->cryptlen - authsize);
+	ahash_request_set_callback(ahreq, aead_request_flags(req),
+				   authenc_verify_ahash_done, req);
+
+	err = crypto_ahash_digest(ahreq);
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
-	ablkcipher_request_set_tfm(abreq, ctx->enc);
-	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
-					req->base.complete, req->base.data);
-	ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv);
-
-	return crypto_ablkcipher_decrypt(abreq);
+	return crypto_authenc_decrypt_tail(req, aead_request_flags(req));
 }
 }
 
 
-static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
+static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
 {
 {
-	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
-	struct authenc_instance_ctx *ictx = crypto_instance_ctx(inst);
-	struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct aead_instance *inst = aead_alg_instance(tfm);
+	struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
+	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
 	struct crypto_ahash *auth;
 	struct crypto_ahash *auth;
 	struct crypto_ablkcipher *enc;
 	struct crypto_ablkcipher *enc;
+	struct crypto_blkcipher *null;
 	int err;
 	int err;
 
 
 	auth = crypto_spawn_ahash(&ictx->auth);
 	auth = crypto_spawn_ahash(&ictx->auth);
@@ -562,42 +331,57 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
 	if (IS_ERR(enc))
 	if (IS_ERR(enc))
 		goto err_free_ahash;
 		goto err_free_ahash;
 
 
+	null = crypto_get_default_null_skcipher();
+	err = PTR_ERR(null);
+	if (IS_ERR(null))
+		goto err_free_skcipher;
+
 	ctx->auth = auth;
 	ctx->auth = auth;
 	ctx->enc = enc;
 	ctx->enc = enc;
+	ctx->null = null;
 
 
-	ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) +
-			    crypto_ahash_alignmask(auth),
-			    crypto_ahash_alignmask(auth) + 1) +
-		      crypto_ablkcipher_ivsize(enc);
-
-	crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
+	crypto_aead_set_reqsize(
+		tfm,
 		sizeof(struct authenc_request_ctx) +
 		sizeof(struct authenc_request_ctx) +
-		ctx->reqoff +
+		ictx->reqoff +
 		max_t(unsigned int,
 		max_t(unsigned int,
-			crypto_ahash_reqsize(auth) +
-			sizeof(struct ahash_request),
-			sizeof(struct skcipher_givcrypt_request) +
-			crypto_ablkcipher_reqsize(enc)));
+		      crypto_ahash_reqsize(auth) +
+		      sizeof(struct ahash_request),
+		      sizeof(struct ablkcipher_request) +
+		      crypto_ablkcipher_reqsize(enc)));
 
 
 	return 0;
 	return 0;
 
 
+err_free_skcipher:
+	crypto_free_ablkcipher(enc);
 err_free_ahash:
 err_free_ahash:
 	crypto_free_ahash(auth);
 	crypto_free_ahash(auth);
 	return err;
 	return err;
 }
 }
 
 
-static void crypto_authenc_exit_tfm(struct crypto_tfm *tfm)
+static void crypto_authenc_exit_tfm(struct crypto_aead *tfm)
 {
 {
-	struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
 
 
 	crypto_free_ahash(ctx->auth);
 	crypto_free_ahash(ctx->auth);
 	crypto_free_ablkcipher(ctx->enc);
 	crypto_free_ablkcipher(ctx->enc);
+	crypto_put_default_null_skcipher();
 }
 }
 
 
-static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
+static void crypto_authenc_free(struct aead_instance *inst)
+{
+	struct authenc_instance_ctx *ctx = aead_instance_ctx(inst);
+
+	crypto_drop_skcipher(&ctx->enc);
+	crypto_drop_ahash(&ctx->auth);
+	kfree(inst);
+}
+
+static int crypto_authenc_create(struct crypto_template *tmpl,
+				 struct rtattr **tb)
 {
 {
 	struct crypto_attr_type *algt;
 	struct crypto_attr_type *algt;
-	struct crypto_instance *inst;
+	struct aead_instance *inst;
 	struct hash_alg_common *auth;
 	struct hash_alg_common *auth;
 	struct crypto_alg *auth_base;
 	struct crypto_alg *auth_base;
 	struct crypto_alg *enc;
 	struct crypto_alg *enc;
@@ -607,15 +391,15 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
 
 
 	algt = crypto_get_attr_type(tb);
 	algt = crypto_get_attr_type(tb);
 	if (IS_ERR(algt))
 	if (IS_ERR(algt))
-		return ERR_CAST(algt);
+		return PTR_ERR(algt);
 
 
 	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
 	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 
 	auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
 	auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
 			       CRYPTO_ALG_TYPE_AHASH_MASK);
 			       CRYPTO_ALG_TYPE_AHASH_MASK);
 	if (IS_ERR(auth))
 	if (IS_ERR(auth))
-		return ERR_CAST(auth);
+		return PTR_ERR(auth);
 
 
 	auth_base = &auth->base;
 	auth_base = &auth->base;
 
 
@@ -629,13 +413,14 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
 	if (!inst)
 	if (!inst)
 		goto out_put_auth;
 		goto out_put_auth;
 
 
-	ctx = crypto_instance_ctx(inst);
+	ctx = aead_instance_ctx(inst);
 
 
-	err = crypto_init_ahash_spawn(&ctx->auth, auth, inst);
+	err = crypto_init_ahash_spawn(&ctx->auth, auth,
+				      aead_crypto_instance(inst));
 	if (err)
 	if (err)
 		goto err_free_inst;
 		goto err_free_inst;
 
 
-	crypto_set_skcipher_spawn(&ctx->enc, inst);
+	crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
 	err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
 	err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
 				   crypto_requires_sync(algt->type,
 				   crypto_requires_sync(algt->type,
 							algt->mask));
 							algt->mask));
@@ -644,41 +429,47 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
 
 
 	enc = crypto_skcipher_spawn_alg(&ctx->enc);
 	enc = crypto_skcipher_spawn_alg(&ctx->enc);
 
 
+	ctx->reqoff = ALIGN(2 * auth->digestsize + auth_base->cra_alignmask,
+			    auth_base->cra_alignmask + 1);
+
 	err = -ENAMETOOLONG;
 	err = -ENAMETOOLONG;
-	if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
+	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
 		     "authenc(%s,%s)", auth_base->cra_name, enc->cra_name) >=
 		     "authenc(%s,%s)", auth_base->cra_name, enc->cra_name) >=
 	    CRYPTO_MAX_ALG_NAME)
 	    CRYPTO_MAX_ALG_NAME)
 		goto err_drop_enc;
 		goto err_drop_enc;
 
 
-	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+	if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 		     "authenc(%s,%s)", auth_base->cra_driver_name,
 		     "authenc(%s,%s)", auth_base->cra_driver_name,
 		     enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 		     enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 		goto err_drop_enc;
 		goto err_drop_enc;
 
 
-	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
-	inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC;
-	inst->alg.cra_priority = enc->cra_priority *
-				 10 + auth_base->cra_priority;
-	inst->alg.cra_blocksize = enc->cra_blocksize;
-	inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask;
-	inst->alg.cra_type = &crypto_aead_type;
+	inst->alg.base.cra_flags = enc->cra_flags & CRYPTO_ALG_ASYNC;
+	inst->alg.base.cra_priority = enc->cra_priority * 10 +
+				      auth_base->cra_priority;
+	inst->alg.base.cra_blocksize = enc->cra_blocksize;
+	inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
+				       enc->cra_alignmask;
+	inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
+
+	inst->alg.ivsize = enc->cra_ablkcipher.ivsize;
+	inst->alg.maxauthsize = auth->digestsize;
 
 
-	inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize;
-	inst->alg.cra_aead.maxauthsize = auth->digestsize;
+	inst->alg.init = crypto_authenc_init_tfm;
+	inst->alg.exit = crypto_authenc_exit_tfm;
 
 
-	inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
+	inst->alg.setkey = crypto_authenc_setkey;
+	inst->alg.encrypt = crypto_authenc_encrypt;
+	inst->alg.decrypt = crypto_authenc_decrypt;
 
 
-	inst->alg.cra_init = crypto_authenc_init_tfm;
-	inst->alg.cra_exit = crypto_authenc_exit_tfm;
+	inst->free = crypto_authenc_free;
 
 
-	inst->alg.cra_aead.setkey = crypto_authenc_setkey;
-	inst->alg.cra_aead.encrypt = crypto_authenc_encrypt;
-	inst->alg.cra_aead.decrypt = crypto_authenc_decrypt;
-	inst->alg.cra_aead.givencrypt = crypto_authenc_givencrypt;
+	err = aead_register_instance(tmpl, inst);
+	if (err)
+		goto err_drop_enc;
 
 
 out:
 out:
 	crypto_mod_put(auth_base);
 	crypto_mod_put(auth_base);
-	return inst;
+	return err;
 
 
 err_drop_enc:
 err_drop_enc:
 	crypto_drop_skcipher(&ctx->enc);
 	crypto_drop_skcipher(&ctx->enc);
@@ -687,23 +478,12 @@ err_drop_auth:
 err_free_inst:
 err_free_inst:
 	kfree(inst);
 	kfree(inst);
 out_put_auth:
 out_put_auth:
-	inst = ERR_PTR(err);
 	goto out;
 	goto out;
 }
 }
 
 
-static void crypto_authenc_free(struct crypto_instance *inst)
-{
-	struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst);
-
-	crypto_drop_skcipher(&ctx->enc);
-	crypto_drop_ahash(&ctx->auth);
-	kfree(inst);
-}
-
 static struct crypto_template crypto_authenc_tmpl = {
 static struct crypto_template crypto_authenc_tmpl = {
 	.name = "authenc",
 	.name = "authenc",
-	.alloc = crypto_authenc_alloc,
-	.free = crypto_authenc_free,
+	.create = crypto_authenc_create,
 	.module = THIS_MODULE,
 	.module = THIS_MODULE,
 };
 };
 
 

+ 219 - 497
crypto/authencesn.c

@@ -4,6 +4,7 @@
  *
  *
  * Copyright (C) 2010 secunet Security Networks AG
  * Copyright (C) 2010 secunet Security Networks AG
  * Copyright (C) 2010 Steffen Klassert <steffen.klassert@secunet.com>
  * Copyright (C) 2010 Steffen Klassert <steffen.klassert@secunet.com>
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
  *
  *
  * This program is free software; you can redistribute it and/or modify it
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
  * under the terms of the GNU General Public License as published by the Free
@@ -16,6 +17,7 @@
 #include <crypto/internal/hash.h>
 #include <crypto/internal/hash.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/authenc.h>
 #include <crypto/authenc.h>
+#include <crypto/null.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/scatterwalk.h>
 #include <linux/err.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/init.h>
@@ -34,19 +36,12 @@ struct crypto_authenc_esn_ctx {
 	unsigned int reqoff;
 	unsigned int reqoff;
 	struct crypto_ahash *auth;
 	struct crypto_ahash *auth;
 	struct crypto_ablkcipher *enc;
 	struct crypto_ablkcipher *enc;
+	struct crypto_blkcipher *null;
 };
 };
 
 
 struct authenc_esn_request_ctx {
 struct authenc_esn_request_ctx {
-	unsigned int cryptlen;
-	unsigned int headlen;
-	unsigned int trailen;
-	struct scatterlist *sg;
-	struct scatterlist hsg[2];
-	struct scatterlist tsg[1];
-	struct scatterlist cipher[2];
-	crypto_completion_t complete;
-	crypto_completion_t update_complete;
-	crypto_completion_t update_complete2;
+	struct scatterlist src[2];
+	struct scatterlist dst[2];
 	char tail[];
 	char tail[];
 };
 };
 
 
@@ -56,6 +51,15 @@ static void authenc_esn_request_complete(struct aead_request *req, int err)
 		aead_request_complete(req, err);
 		aead_request_complete(req, err);
 }
 }
 
 
+static int crypto_authenc_esn_setauthsize(struct crypto_aead *authenc_esn,
+					  unsigned int authsize)
+{
+	if (authsize > 0 && authsize < 4)
+		return -EINVAL;
+
+	return 0;
+}
+
 static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key,
 static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key,
 				     unsigned int keylen)
 				     unsigned int keylen)
 {
 {
@@ -93,536 +97,242 @@ badkey:
 	goto out;
 	goto out;
 }
 }
 
 
-static void authenc_esn_geniv_ahash_update_done(struct crypto_async_request *areq,
-						int err)
+static int crypto_authenc_esn_genicv_tail(struct aead_request *req,
+					  unsigned int flags)
 {
 {
-	struct aead_request *req = areq->data;
 	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
 	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
 	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
 	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
 	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
 	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-
-	if (err)
-		goto out;
-
-	ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
-				areq_ctx->cryptlen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) &
-					  CRYPTO_TFM_REQ_MAY_SLEEP,
-				   areq_ctx->update_complete2, req);
-
-	err = crypto_ahash_update(ahreq);
-	if (err)
-		goto out;
-
-	ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
-				areq_ctx->trailen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) &
-					  CRYPTO_TFM_REQ_MAY_SLEEP,
-				   areq_ctx->complete, req);
-
-	err = crypto_ahash_finup(ahreq);
-	if (err)
-		goto out;
-
-	scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
-				 areq_ctx->cryptlen,
-				 crypto_aead_authsize(authenc_esn), 1);
-
-out:
-	authenc_esn_request_complete(req, err);
-}
-
-static void authenc_esn_geniv_ahash_update_done2(struct crypto_async_request *areq,
-						 int err)
-{
-	struct aead_request *req = areq->data;
-	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
-	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-
-	if (err)
-		goto out;
-
-	ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
-				areq_ctx->trailen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) &
-					  CRYPTO_TFM_REQ_MAY_SLEEP,
-				   areq_ctx->complete, req);
-
-	err = crypto_ahash_finup(ahreq);
-	if (err)
-		goto out;
+	struct crypto_ahash *auth = ctx->auth;
+	u8 *hash = PTR_ALIGN((u8 *)areq_ctx->tail,
+			     crypto_ahash_alignmask(auth) + 1);
+	unsigned int authsize = crypto_aead_authsize(authenc_esn);
+	unsigned int assoclen = req->assoclen;
+	unsigned int cryptlen = req->cryptlen;
+	struct scatterlist *dst = req->dst;
+	u32 tmp[2];
 
 
-	scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
-				 areq_ctx->cryptlen,
-				 crypto_aead_authsize(authenc_esn), 1);
+	/* Move high-order bits of sequence number back. */
+	scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
+	scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
+	scatterwalk_map_and_copy(tmp, dst, 0, 8, 1);
 
 
-out:
-	authenc_esn_request_complete(req, err);
+	scatterwalk_map_and_copy(hash, dst, assoclen + cryptlen, authsize, 1);
+	return 0;
 }
 }
 
 
-
 static void authenc_esn_geniv_ahash_done(struct crypto_async_request *areq,
 static void authenc_esn_geniv_ahash_done(struct crypto_async_request *areq,
 					 int err)
 					 int err)
 {
 {
 	struct aead_request *req = areq->data;
 	struct aead_request *req = areq->data;
-	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
-	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
 
 
-	if (err)
-		goto out;
-
-	scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
-				 areq_ctx->cryptlen,
-				 crypto_aead_authsize(authenc_esn), 1);
-
-out:
+	err = err ?: crypto_authenc_esn_genicv_tail(req, 0);
 	aead_request_complete(req, err);
 	aead_request_complete(req, err);
 }
 }
 
 
-
-static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *areq,
-						 int err)
+static int crypto_authenc_esn_genicv(struct aead_request *req,
+				     unsigned int flags)
 {
 {
-	u8 *ihash;
-	unsigned int authsize;
-	struct ablkcipher_request *abreq;
-	struct aead_request *req = areq->data;
 	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
 	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
-	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
 	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
 	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-	unsigned int cryptlen = req->cryptlen;
-
-	if (err)
-		goto out;
-
-	ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
-				areq_ctx->cryptlen);
-
-	ahash_request_set_callback(ahreq,
-				   aead_request_flags(req) &
-				   CRYPTO_TFM_REQ_MAY_SLEEP,
-				   areq_ctx->update_complete2, req);
-
-	err = crypto_ahash_update(ahreq);
-	if (err)
-		goto out;
-
-	ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
-				areq_ctx->trailen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) &
-					  CRYPTO_TFM_REQ_MAY_SLEEP,
-				   areq_ctx->complete, req);
-
-	err = crypto_ahash_finup(ahreq);
-	if (err)
-		goto out;
-
-	authsize = crypto_aead_authsize(authenc_esn);
-	cryptlen -= authsize;
-	ihash = ahreq->result + authsize;
-	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
-				 authsize, 0);
-
-	err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
-	if (err)
-		goto out;
-
-	abreq = aead_request_ctx(req);
-	ablkcipher_request_set_tfm(abreq, ctx->enc);
-	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
-					req->base.complete, req->base.data);
-	ablkcipher_request_set_crypt(abreq, req->src, req->dst,
-				     cryptlen, req->iv);
-
-	err = crypto_ablkcipher_decrypt(abreq);
-
-out:
-	authenc_esn_request_complete(req, err);
-}
-
-static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *areq,
-						  int err)
-{
-	u8 *ihash;
-	unsigned int authsize;
-	struct ablkcipher_request *abreq;
-	struct aead_request *req = areq->data;
-	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
 	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
 	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+	struct crypto_ahash *auth = ctx->auth;
+	u8 *hash = PTR_ALIGN((u8 *)areq_ctx->tail,
+			     crypto_ahash_alignmask(auth) + 1);
 	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
 	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+	unsigned int authsize = crypto_aead_authsize(authenc_esn);
+	unsigned int assoclen = req->assoclen;
 	unsigned int cryptlen = req->cryptlen;
 	unsigned int cryptlen = req->cryptlen;
+	struct scatterlist *dst = req->dst;
+	u32 tmp[2];
 
 
-	if (err)
-		goto out;
+	if (!authsize)
+		return 0;
 
 
-	ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
-				areq_ctx->trailen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) &
-					  CRYPTO_TFM_REQ_MAY_SLEEP,
-				   areq_ctx->complete, req);
+	/* Move high-order bits of sequence number to the end. */
+	scatterwalk_map_and_copy(tmp, dst, 0, 8, 0);
+	scatterwalk_map_and_copy(tmp, dst, 4, 4, 1);
+	scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1);
 
 
-	err = crypto_ahash_finup(ahreq);
-	if (err)
-		goto out;
+	sg_init_table(areq_ctx->dst, 2);
+	dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
 
 
-	authsize = crypto_aead_authsize(authenc_esn);
-	cryptlen -= authsize;
-	ihash = ahreq->result + authsize;
-	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
-				 authsize, 0);
-
-	err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
-	if (err)
-		goto out;
-
-	abreq = aead_request_ctx(req);
-	ablkcipher_request_set_tfm(abreq, ctx->enc);
-	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
-					req->base.complete, req->base.data);
-	ablkcipher_request_set_crypt(abreq, req->src, req->dst,
-				     cryptlen, req->iv);
-
-	err = crypto_ablkcipher_decrypt(abreq);
+	ahash_request_set_tfm(ahreq, auth);
+	ahash_request_set_crypt(ahreq, dst, hash, assoclen + cryptlen);
+	ahash_request_set_callback(ahreq, flags,
+				   authenc_esn_geniv_ahash_done, req);
 
 
-out:
-	authenc_esn_request_complete(req, err);
+	return crypto_ahash_digest(ahreq) ?:
+	       crypto_authenc_esn_genicv_tail(req, aead_request_flags(req));
 }
 }
 
 
 
 
-static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
-					  int err)
+static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req,
+					    int err)
 {
 {
-	u8 *ihash;
-	unsigned int authsize;
-	struct ablkcipher_request *abreq;
-	struct aead_request *req = areq->data;
-	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
-	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-	unsigned int cryptlen = req->cryptlen;
-
-	if (err)
-		goto out;
-
-	authsize = crypto_aead_authsize(authenc_esn);
-	cryptlen -= authsize;
-	ihash = ahreq->result + authsize;
-	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
-				 authsize, 0);
-
-	err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
-	if (err)
-		goto out;
-
-	abreq = aead_request_ctx(req);
-	ablkcipher_request_set_tfm(abreq, ctx->enc);
-	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
-					req->base.complete, req->base.data);
-	ablkcipher_request_set_crypt(abreq, req->src, req->dst,
-				     cryptlen, req->iv);
+	struct aead_request *areq = req->data;
 
 
-	err = crypto_ablkcipher_decrypt(abreq);
+	if (!err)
+		err = crypto_authenc_esn_genicv(areq, 0);
 
 
-out:
-	authenc_esn_request_complete(req, err);
+	authenc_esn_request_complete(areq, err);
 }
 }
 
 
-static u8 *crypto_authenc_esn_ahash(struct aead_request *req,
-				    unsigned int flags)
+static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len)
 {
 {
 	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
 	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
 	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
 	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-	struct crypto_ahash *auth = ctx->auth;
-	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-	u8 *hash = areq_ctx->tail;
-	int err;
+	struct blkcipher_desc desc = {
+		.tfm = ctx->null,
+	};
 
 
-	hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
-			    crypto_ahash_alignmask(auth) + 1);
-
-	ahash_request_set_tfm(ahreq, auth);
-
-	err = crypto_ahash_init(ahreq);
-	if (err)
-		return ERR_PTR(err);
-
-	ahash_request_set_crypt(ahreq, areq_ctx->hsg, hash, areq_ctx->headlen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
-				   areq_ctx->update_complete, req);
-
-	err = crypto_ahash_update(ahreq);
-	if (err)
-		return ERR_PTR(err);
-
-	ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, areq_ctx->cryptlen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
-				   areq_ctx->update_complete2, req);
-
-	err = crypto_ahash_update(ahreq);
-	if (err)
-		return ERR_PTR(err);
-
-	ahash_request_set_crypt(ahreq, areq_ctx->tsg, hash,
-				areq_ctx->trailen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
-				   areq_ctx->complete, req);
-
-	err = crypto_ahash_finup(ahreq);
-	if (err)
-		return ERR_PTR(err);
-
-	return hash;
+	return crypto_blkcipher_encrypt(&desc, req->dst, req->src, len);
 }
 }
 
 
-static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
-				     unsigned int flags)
+static int crypto_authenc_esn_encrypt(struct aead_request *req)
 {
 {
 	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
 	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
 	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
 	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct scatterlist *dst = req->dst;
-	struct scatterlist *assoc = req->assoc;
-	struct scatterlist *cipher = areq_ctx->cipher;
-	struct scatterlist *hsg = areq_ctx->hsg;
-	struct scatterlist *tsg = areq_ctx->tsg;
-	unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
+	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+	struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
+						    + ctx->reqoff);
+	struct crypto_ablkcipher *enc = ctx->enc;
+	unsigned int assoclen = req->assoclen;
 	unsigned int cryptlen = req->cryptlen;
 	unsigned int cryptlen = req->cryptlen;
-	struct page *dstp;
-	u8 *vdst;
-	u8 *hash;
-
-	dstp = sg_page(dst);
-	vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;
-
-	if (ivsize) {
-		sg_init_table(cipher, 2);
-		sg_set_buf(cipher, iv, ivsize);
-		scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2);
-		dst = cipher;
-		cryptlen += ivsize;
-	}
-
-	if (assoc->length < 12)
-		return -EINVAL;
-
-	sg_init_table(hsg, 2);
-	sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
-	sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
-
-	sg_init_table(tsg, 1);
-	sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
-
-	areq_ctx->cryptlen = cryptlen;
-	areq_ctx->headlen = 8;
-	areq_ctx->trailen = 4;
-	areq_ctx->sg = dst;
-
-	areq_ctx->complete = authenc_esn_geniv_ahash_done;
-	areq_ctx->update_complete = authenc_esn_geniv_ahash_update_done;
-	areq_ctx->update_complete2 = authenc_esn_geniv_ahash_update_done2;
-
-	hash = crypto_authenc_esn_ahash(req, flags);
-	if (IS_ERR(hash))
-		return PTR_ERR(hash);
+	struct scatterlist *src, *dst;
+	int err;
 
 
-	scatterwalk_map_and_copy(hash, dst, cryptlen,
-				 crypto_aead_authsize(authenc_esn), 1);
-	return 0;
-}
+	sg_init_table(areq_ctx->src, 2);
+	src = scatterwalk_ffwd(areq_ctx->src, req->src, assoclen);
+	dst = src;
 
 
+	if (req->src != req->dst) {
+		err = crypto_authenc_esn_copy(req, assoclen);
+		if (err)
+			return err;
 
 
-static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req,
-					    int err)
-{
-	struct aead_request *areq = req->data;
-
-	if (!err) {
-		struct crypto_aead *authenc_esn = crypto_aead_reqtfm(areq);
-		struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-		struct ablkcipher_request *abreq = aead_request_ctx(areq);
-		u8 *iv = (u8 *)(abreq + 1) +
-			 crypto_ablkcipher_reqsize(ctx->enc);
-
-		err = crypto_authenc_esn_genicv(areq, iv, 0);
+		sg_init_table(areq_ctx->dst, 2);
+		dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen);
 	}
 	}
 
 
-	authenc_esn_request_complete(areq, err);
-}
-
-static int crypto_authenc_esn_encrypt(struct aead_request *req)
-{
-	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
-	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct crypto_ablkcipher *enc = ctx->enc;
-	struct scatterlist *dst = req->dst;
-	unsigned int cryptlen = req->cryptlen;
-	struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
-						    + ctx->reqoff);
-	u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc);
-	int err;
-
 	ablkcipher_request_set_tfm(abreq, enc);
 	ablkcipher_request_set_tfm(abreq, enc);
 	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
 	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
 					crypto_authenc_esn_encrypt_done, req);
 					crypto_authenc_esn_encrypt_done, req);
-	ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv);
-
-	memcpy(iv, req->iv, crypto_aead_ivsize(authenc_esn));
+	ablkcipher_request_set_crypt(abreq, src, dst, cryptlen, req->iv);
 
 
 	err = crypto_ablkcipher_encrypt(abreq);
 	err = crypto_ablkcipher_encrypt(abreq);
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
-	return crypto_authenc_esn_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
+	return crypto_authenc_esn_genicv(req, aead_request_flags(req));
 }
 }
 
 
-static void crypto_authenc_esn_givencrypt_done(struct crypto_async_request *req,
-					       int err)
+static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
+					   unsigned int flags)
 {
 {
-	struct aead_request *areq = req->data;
-
-	if (!err) {
-		struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
-
-		err = crypto_authenc_esn_genicv(areq, greq->giv, 0);
-	}
+	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+	unsigned int authsize = crypto_aead_authsize(authenc_esn);
+	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+	struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
+						    + ctx->reqoff);
+	struct crypto_ahash *auth = ctx->auth;
+	u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail,
+			      crypto_ahash_alignmask(auth) + 1);
+	unsigned int cryptlen = req->cryptlen - authsize;
+	unsigned int assoclen = req->assoclen;
+	struct scatterlist *dst = req->dst;
+	u8 *ihash = ohash + crypto_ahash_digestsize(auth);
+	u32 tmp[2];
 
 
-	authenc_esn_request_complete(areq, err);
-}
+	/* Move high-order bits of sequence number back. */
+	scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
+	scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
+	scatterwalk_map_and_copy(tmp, dst, 0, 8, 1);
 
 
-static int crypto_authenc_esn_givencrypt(struct aead_givcrypt_request *req)
-{
-	struct crypto_aead *authenc_esn = aead_givcrypt_reqtfm(req);
-	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-	struct aead_request *areq = &req->areq;
-	struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
-	u8 *iv = req->giv;
-	int err;
+	if (crypto_memneq(ihash, ohash, authsize))
+		return -EBADMSG;
 
 
-	skcipher_givcrypt_set_tfm(greq, ctx->enc);
-	skcipher_givcrypt_set_callback(greq, aead_request_flags(areq),
-				       crypto_authenc_esn_givencrypt_done, areq);
-	skcipher_givcrypt_set_crypt(greq, areq->src, areq->dst, areq->cryptlen,
-				    areq->iv);
-	skcipher_givcrypt_set_giv(greq, iv, req->seq);
+	sg_init_table(areq_ctx->dst, 2);
+	dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
 
 
-	err = crypto_skcipher_givencrypt(greq);
-	if (err)
-		return err;
+	ablkcipher_request_set_tfm(abreq, ctx->enc);
+	ablkcipher_request_set_callback(abreq, flags,
+					req->base.complete, req->base.data);
+	ablkcipher_request_set_crypt(abreq, dst, dst, cryptlen, req->iv);
 
 
-	return crypto_authenc_esn_genicv(areq, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
+	return crypto_ablkcipher_decrypt(abreq);
 }
 }
 
 
-static int crypto_authenc_esn_verify(struct aead_request *req)
+static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
+					  int err)
 {
 {
-	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
-	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-	u8 *ohash;
-	u8 *ihash;
-	unsigned int authsize;
-
-	areq_ctx->complete = authenc_esn_verify_ahash_done;
-	areq_ctx->update_complete = authenc_esn_verify_ahash_update_done;
-
-	ohash = crypto_authenc_esn_ahash(req, CRYPTO_TFM_REQ_MAY_SLEEP);
-	if (IS_ERR(ohash))
-		return PTR_ERR(ohash);
+	struct aead_request *req = areq->data;
 
 
-	authsize = crypto_aead_authsize(authenc_esn);
-	ihash = ohash + authsize;
-	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
-				 authsize, 0);
-	return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;
+	err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
+	aead_request_complete(req, err);
 }
 }
 
 
-static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
-				      unsigned int cryptlen)
+static int crypto_authenc_esn_decrypt(struct aead_request *req)
 {
 {
 	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
 	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
 	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
 	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct scatterlist *src = req->src;
-	struct scatterlist *assoc = req->assoc;
-	struct scatterlist *cipher = areq_ctx->cipher;
-	struct scatterlist *hsg = areq_ctx->hsg;
-	struct scatterlist *tsg = areq_ctx->tsg;
-	unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
-	struct page *srcp;
-	u8 *vsrc;
-
-	srcp = sg_page(src);
-	vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset;
-
-	if (ivsize) {
-		sg_init_table(cipher, 2);
-		sg_set_buf(cipher, iv, ivsize);
-		scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2);
-		src = cipher;
-		cryptlen += ivsize;
-	}
+	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+	unsigned int authsize = crypto_aead_authsize(authenc_esn);
+	struct crypto_ahash *auth = ctx->auth;
+	u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail,
+			      crypto_ahash_alignmask(auth) + 1);
+	unsigned int assoclen = req->assoclen;
+	unsigned int cryptlen = req->cryptlen;
+	u8 *ihash = ohash + crypto_ahash_digestsize(auth);
+	struct scatterlist *dst = req->dst;
+	u32 tmp[2];
+	int err;
 
 
-	if (assoc->length < 12)
-		return -EINVAL;
+	cryptlen -= authsize;
 
 
-	sg_init_table(hsg, 2);
-	sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
-	sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
+	if (req->src != dst) {
+		err = crypto_authenc_esn_copy(req, assoclen + cryptlen);
+		if (err)
+			return err;
+	}
 
 
-	sg_init_table(tsg, 1);
-	sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
+	scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen,
+				 authsize, 0);
 
 
-	areq_ctx->cryptlen = cryptlen;
-	areq_ctx->headlen = 8;
-	areq_ctx->trailen = 4;
-	areq_ctx->sg = src;
+	if (!authsize)
+		goto tail;
 
 
-	areq_ctx->complete = authenc_esn_verify_ahash_done;
-	areq_ctx->update_complete = authenc_esn_verify_ahash_update_done;
-	areq_ctx->update_complete2 = authenc_esn_verify_ahash_update_done2;
+	/* Move high-order bits of sequence number to the end. */
+	scatterwalk_map_and_copy(tmp, dst, 0, 8, 0);
+	scatterwalk_map_and_copy(tmp, dst, 4, 4, 1);
+	scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1);
 
 
-	return crypto_authenc_esn_verify(req);
-}
+	sg_init_table(areq_ctx->dst, 2);
+	dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
 
 
-static int crypto_authenc_esn_decrypt(struct aead_request *req)
-{
-	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
-	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-	struct ablkcipher_request *abreq = aead_request_ctx(req);
-	unsigned int cryptlen = req->cryptlen;
-	unsigned int authsize = crypto_aead_authsize(authenc_esn);
-	u8 *iv = req->iv;
-	int err;
-
-	if (cryptlen < authsize)
-		return -EINVAL;
-	cryptlen -= authsize;
+	ahash_request_set_tfm(ahreq, auth);
+	ahash_request_set_crypt(ahreq, dst, ohash, assoclen + cryptlen);
+	ahash_request_set_callback(ahreq, aead_request_flags(req),
+				   authenc_esn_verify_ahash_done, req);
 
 
-	err = crypto_authenc_esn_iverify(req, iv, cryptlen);
+	err = crypto_ahash_digest(ahreq);
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
-	ablkcipher_request_set_tfm(abreq, ctx->enc);
-	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
-					req->base.complete, req->base.data);
-	ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv);
-
-	return crypto_ablkcipher_decrypt(abreq);
+tail:
+	return crypto_authenc_esn_decrypt_tail(req, aead_request_flags(req));
 }
 }
 
 
-static int crypto_authenc_esn_init_tfm(struct crypto_tfm *tfm)
+static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
 {
 {
-	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
-	struct authenc_esn_instance_ctx *ictx = crypto_instance_ctx(inst);
-	struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct aead_instance *inst = aead_alg_instance(tfm);
+	struct authenc_esn_instance_ctx *ictx = aead_instance_ctx(inst);
+	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
 	struct crypto_ahash *auth;
 	struct crypto_ahash *auth;
 	struct crypto_ablkcipher *enc;
 	struct crypto_ablkcipher *enc;
+	struct crypto_blkcipher *null;
 	int err;
 	int err;
 
 
 	auth = crypto_spawn_ahash(&ictx->auth);
 	auth = crypto_spawn_ahash(&ictx->auth);
@@ -634,15 +344,20 @@ static int crypto_authenc_esn_init_tfm(struct crypto_tfm *tfm)
 	if (IS_ERR(enc))
 	if (IS_ERR(enc))
 		goto err_free_ahash;
 		goto err_free_ahash;
 
 
+	null = crypto_get_default_null_skcipher();
+	err = PTR_ERR(null);
+	if (IS_ERR(null))
+		goto err_free_skcipher;
+
 	ctx->auth = auth;
 	ctx->auth = auth;
 	ctx->enc = enc;
 	ctx->enc = enc;
+	ctx->null = null;
 
 
-	ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) +
-			    crypto_ahash_alignmask(auth),
-			    crypto_ahash_alignmask(auth) + 1) +
-		      crypto_ablkcipher_ivsize(enc);
+	ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth),
+			    crypto_ahash_alignmask(auth) + 1);
 
 
-	crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
+	crypto_aead_set_reqsize(
+		tfm,
 		sizeof(struct authenc_esn_request_ctx) +
 		sizeof(struct authenc_esn_request_ctx) +
 		ctx->reqoff +
 		ctx->reqoff +
 		max_t(unsigned int,
 		max_t(unsigned int,
@@ -653,23 +368,36 @@ static int crypto_authenc_esn_init_tfm(struct crypto_tfm *tfm)
 
 
 	return 0;
 	return 0;
 
 
+err_free_skcipher:
+	crypto_free_ablkcipher(enc);
 err_free_ahash:
 err_free_ahash:
 	crypto_free_ahash(auth);
 	crypto_free_ahash(auth);
 	return err;
 	return err;
 }
 }
 
 
-static void crypto_authenc_esn_exit_tfm(struct crypto_tfm *tfm)
+static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm)
 {
 {
-	struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
 
 
 	crypto_free_ahash(ctx->auth);
 	crypto_free_ahash(ctx->auth);
 	crypto_free_ablkcipher(ctx->enc);
 	crypto_free_ablkcipher(ctx->enc);
+	crypto_put_default_null_skcipher();
+}
+
+static void crypto_authenc_esn_free(struct aead_instance *inst)
+{
+	struct authenc_esn_instance_ctx *ctx = aead_instance_ctx(inst);
+
+	crypto_drop_skcipher(&ctx->enc);
+	crypto_drop_ahash(&ctx->auth);
+	kfree(inst);
 }
 }
 
 
-static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb)
+static int crypto_authenc_esn_create(struct crypto_template *tmpl,
+				     struct rtattr **tb)
 {
 {
 	struct crypto_attr_type *algt;
 	struct crypto_attr_type *algt;
-	struct crypto_instance *inst;
+	struct aead_instance *inst;
 	struct hash_alg_common *auth;
 	struct hash_alg_common *auth;
 	struct crypto_alg *auth_base;
 	struct crypto_alg *auth_base;
 	struct crypto_alg *enc;
 	struct crypto_alg *enc;
@@ -679,15 +407,15 @@ static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb)
 
 
 	algt = crypto_get_attr_type(tb);
 	algt = crypto_get_attr_type(tb);
 	if (IS_ERR(algt))
 	if (IS_ERR(algt))
-		return ERR_CAST(algt);
+		return PTR_ERR(algt);
 
 
 	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
 	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 
 	auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
 	auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
 			       CRYPTO_ALG_TYPE_AHASH_MASK);
 			       CRYPTO_ALG_TYPE_AHASH_MASK);
 	if (IS_ERR(auth))
 	if (IS_ERR(auth))
-		return ERR_CAST(auth);
+		return PTR_ERR(auth);
 
 
 	auth_base = &auth->base;
 	auth_base = &auth->base;
 
 
@@ -701,13 +429,14 @@ static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb)
 	if (!inst)
 	if (!inst)
 		goto out_put_auth;
 		goto out_put_auth;
 
 
-	ctx = crypto_instance_ctx(inst);
+	ctx = aead_instance_ctx(inst);
 
 
-	err = crypto_init_ahash_spawn(&ctx->auth, auth, inst);
+	err = crypto_init_ahash_spawn(&ctx->auth, auth,
+				      aead_crypto_instance(inst));
 	if (err)
 	if (err)
 		goto err_free_inst;
 		goto err_free_inst;
 
 
-	crypto_set_skcipher_spawn(&ctx->enc, inst);
+	crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
 	err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
 	err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
 				   crypto_requires_sync(algt->type,
 				   crypto_requires_sync(algt->type,
 							algt->mask));
 							algt->mask));
@@ -717,40 +446,44 @@ static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb)
 	enc = crypto_skcipher_spawn_alg(&ctx->enc);
 	enc = crypto_skcipher_spawn_alg(&ctx->enc);
 
 
 	err = -ENAMETOOLONG;
 	err = -ENAMETOOLONG;
-	if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
-		     "authencesn(%s,%s)", auth_base->cra_name, enc->cra_name) >=
-	    CRYPTO_MAX_ALG_NAME)
+	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+		     "authencesn(%s,%s)", auth_base->cra_name,
+		     enc->cra_name) >= CRYPTO_MAX_ALG_NAME)
 		goto err_drop_enc;
 		goto err_drop_enc;
 
 
-	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+	if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 		     "authencesn(%s,%s)", auth_base->cra_driver_name,
 		     "authencesn(%s,%s)", auth_base->cra_driver_name,
 		     enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 		     enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 		goto err_drop_enc;
 		goto err_drop_enc;
 
 
-	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
-	inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC;
-	inst->alg.cra_priority = enc->cra_priority *
-				 10 + auth_base->cra_priority;
-	inst->alg.cra_blocksize = enc->cra_blocksize;
-	inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask;
-	inst->alg.cra_type = &crypto_aead_type;
+	inst->alg.base.cra_flags = enc->cra_flags & CRYPTO_ALG_ASYNC;
+	inst->alg.base.cra_priority = enc->cra_priority * 10 +
+				      auth_base->cra_priority;
+	inst->alg.base.cra_blocksize = enc->cra_blocksize;
+	inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
+				       enc->cra_alignmask;
+	inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx);
+
+	inst->alg.ivsize = enc->cra_ablkcipher.ivsize;
+	inst->alg.maxauthsize = auth->digestsize;
 
 
-	inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize;
-	inst->alg.cra_aead.maxauthsize = auth->digestsize;
+	inst->alg.init = crypto_authenc_esn_init_tfm;
+	inst->alg.exit = crypto_authenc_esn_exit_tfm;
 
 
-	inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx);
+	inst->alg.setkey = crypto_authenc_esn_setkey;
+	inst->alg.setauthsize = crypto_authenc_esn_setauthsize;
+	inst->alg.encrypt = crypto_authenc_esn_encrypt;
+	inst->alg.decrypt = crypto_authenc_esn_decrypt;
 
 
-	inst->alg.cra_init = crypto_authenc_esn_init_tfm;
-	inst->alg.cra_exit = crypto_authenc_esn_exit_tfm;
+	inst->free = crypto_authenc_esn_free,
 
 
-	inst->alg.cra_aead.setkey = crypto_authenc_esn_setkey;
-	inst->alg.cra_aead.encrypt = crypto_authenc_esn_encrypt;
-	inst->alg.cra_aead.decrypt = crypto_authenc_esn_decrypt;
-	inst->alg.cra_aead.givencrypt = crypto_authenc_esn_givencrypt;
+	err = aead_register_instance(tmpl, inst);
+	if (err)
+		goto err_drop_enc;
 
 
 out:
 out:
 	crypto_mod_put(auth_base);
 	crypto_mod_put(auth_base);
-	return inst;
+	return err;
 
 
 err_drop_enc:
 err_drop_enc:
 	crypto_drop_skcipher(&ctx->enc);
 	crypto_drop_skcipher(&ctx->enc);
@@ -759,23 +492,12 @@ err_drop_auth:
 err_free_inst:
 err_free_inst:
 	kfree(inst);
 	kfree(inst);
 out_put_auth:
 out_put_auth:
-	inst = ERR_PTR(err);
 	goto out;
 	goto out;
 }
 }
 
 
-static void crypto_authenc_esn_free(struct crypto_instance *inst)
-{
-	struct authenc_esn_instance_ctx *ctx = crypto_instance_ctx(inst);
-
-	crypto_drop_skcipher(&ctx->enc);
-	crypto_drop_ahash(&ctx->auth);
-	kfree(inst);
-}
-
 static struct crypto_template crypto_authenc_esn_tmpl = {
 static struct crypto_template crypto_authenc_esn_tmpl = {
 	.name = "authencesn",
 	.name = "authencesn",
-	.alloc = crypto_authenc_esn_alloc,
-	.free = crypto_authenc_esn_free,
+	.create = crypto_authenc_esn_create,
 	.module = THIS_MODULE,
 	.module = THIS_MODULE,
 };
 };
 
 

+ 218 - 162
crypto/ccm.c

@@ -36,14 +36,20 @@ struct crypto_rfc4309_ctx {
 	u8 nonce[3];
 	u8 nonce[3];
 };
 };
 
 
+struct crypto_rfc4309_req_ctx {
+	struct scatterlist src[3];
+	struct scatterlist dst[3];
+	struct aead_request subreq;
+};
+
 struct crypto_ccm_req_priv_ctx {
 struct crypto_ccm_req_priv_ctx {
 	u8 odata[16];
 	u8 odata[16];
 	u8 idata[16];
 	u8 idata[16];
 	u8 auth_tag[16];
 	u8 auth_tag[16];
 	u32 ilen;
 	u32 ilen;
 	u32 flags;
 	u32 flags;
-	struct scatterlist src[2];
-	struct scatterlist dst[2];
+	struct scatterlist src[3];
+	struct scatterlist dst[3];
 	struct ablkcipher_request abreq;
 	struct ablkcipher_request abreq;
 };
 };
 
 
@@ -265,7 +271,7 @@ static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
 	/* format associated data and compute into mac */
 	/* format associated data and compute into mac */
 	if (assoclen) {
 	if (assoclen) {
 		pctx->ilen = format_adata(idata, assoclen);
 		pctx->ilen = format_adata(idata, assoclen);
-		get_data_to_compute(cipher, pctx, req->assoc, req->assoclen);
+		get_data_to_compute(cipher, pctx, req->src, req->assoclen);
 	} else {
 	} else {
 		pctx->ilen = 0;
 		pctx->ilen = 0;
 	}
 	}
@@ -286,7 +292,8 @@ static void crypto_ccm_encrypt_done(struct crypto_async_request *areq, int err)
 	u8 *odata = pctx->odata;
 	u8 *odata = pctx->odata;
 
 
 	if (!err)
 	if (!err)
-		scatterwalk_map_and_copy(odata, req->dst, req->cryptlen,
+		scatterwalk_map_and_copy(odata, req->dst,
+					 req->assoclen + req->cryptlen,
 					 crypto_aead_authsize(aead), 1);
 					 crypto_aead_authsize(aead), 1);
 	aead_request_complete(req, err);
 	aead_request_complete(req, err);
 }
 }
@@ -300,6 +307,41 @@ static inline int crypto_ccm_check_iv(const u8 *iv)
 	return 0;
 	return 0;
 }
 }
 
 
+static int crypto_ccm_init_crypt(struct aead_request *req, u8 *tag)
+{
+	struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
+	struct scatterlist *sg;
+	u8 *iv = req->iv;
+	int err;
+
+	err = crypto_ccm_check_iv(iv);
+	if (err)
+		return err;
+
+	pctx->flags = aead_request_flags(req);
+
+	 /* Note: rfc 3610 and NIST 800-38C require counter of
+	 * zero to encrypt auth tag.
+	 */
+	memset(iv + 15 - iv[0], 0, iv[0] + 1);
+
+	sg_init_table(pctx->src, 3);
+	sg_set_buf(pctx->src, tag, 16);
+	sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen);
+	if (sg != pctx->src + 1)
+		sg_chain(pctx->src, 2, sg);
+
+	if (req->src != req->dst) {
+		sg_init_table(pctx->dst, 3);
+		sg_set_buf(pctx->dst, tag, 16);
+		sg = scatterwalk_ffwd(pctx->dst + 1, req->dst, req->assoclen);
+		if (sg != pctx->dst + 1)
+			sg_chain(pctx->dst, 2, sg);
+	}
+
+	return 0;
+}
+
 static int crypto_ccm_encrypt(struct aead_request *req)
 static int crypto_ccm_encrypt(struct aead_request *req)
 {
 {
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
@@ -312,32 +354,17 @@ static int crypto_ccm_encrypt(struct aead_request *req)
 	u8 *iv = req->iv;
 	u8 *iv = req->iv;
 	int err;
 	int err;
 
 
-	err = crypto_ccm_check_iv(iv);
+	err = crypto_ccm_init_crypt(req, odata);
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
-	pctx->flags = aead_request_flags(req);
-
-	err = crypto_ccm_auth(req, req->src, cryptlen);
+	err = crypto_ccm_auth(req, sg_next(pctx->src), cryptlen);
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
-	 /* Note: rfc 3610 and NIST 800-38C require counter of
-	 * zero to encrypt auth tag.
-	 */
-	memset(iv + 15 - iv[0], 0, iv[0] + 1);
-
-	sg_init_table(pctx->src, 2);
-	sg_set_buf(pctx->src, odata, 16);
-	scatterwalk_sg_chain(pctx->src, 2, req->src);
-
 	dst = pctx->src;
 	dst = pctx->src;
-	if (req->src != req->dst) {
-		sg_init_table(pctx->dst, 2);
-		sg_set_buf(pctx->dst, odata, 16);
-		scatterwalk_sg_chain(pctx->dst, 2, req->dst);
+	if (req->src != req->dst)
 		dst = pctx->dst;
 		dst = pctx->dst;
-	}
 
 
 	ablkcipher_request_set_tfm(abreq, ctx->ctr);
 	ablkcipher_request_set_tfm(abreq, ctx->ctr);
 	ablkcipher_request_set_callback(abreq, pctx->flags,
 	ablkcipher_request_set_callback(abreq, pctx->flags,
@@ -348,7 +375,7 @@ static int crypto_ccm_encrypt(struct aead_request *req)
 		return err;
 		return err;
 
 
 	/* copy authtag to end of dst */
 	/* copy authtag to end of dst */
-	scatterwalk_map_and_copy(odata, req->dst, cryptlen,
+	scatterwalk_map_and_copy(odata, sg_next(dst), cryptlen,
 				 crypto_aead_authsize(aead), 1);
 				 crypto_aead_authsize(aead), 1);
 	return err;
 	return err;
 }
 }
@@ -361,9 +388,14 @@ static void crypto_ccm_decrypt_done(struct crypto_async_request *areq,
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 	unsigned int authsize = crypto_aead_authsize(aead);
 	unsigned int authsize = crypto_aead_authsize(aead);
 	unsigned int cryptlen = req->cryptlen - authsize;
 	unsigned int cryptlen = req->cryptlen - authsize;
+	struct scatterlist *dst;
+
+	pctx->flags = 0;
+
+	dst = sg_next(req->src == req->dst ? pctx->src : pctx->dst);
 
 
 	if (!err) {
 	if (!err) {
-		err = crypto_ccm_auth(req, req->dst, cryptlen);
+		err = crypto_ccm_auth(req, dst, cryptlen);
 		if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize))
 		if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize))
 			err = -EBADMSG;
 			err = -EBADMSG;
 	}
 	}
@@ -384,31 +416,18 @@ static int crypto_ccm_decrypt(struct aead_request *req)
 	u8 *iv = req->iv;
 	u8 *iv = req->iv;
 	int err;
 	int err;
 
 
-	if (cryptlen < authsize)
-		return -EINVAL;
 	cryptlen -= authsize;
 	cryptlen -= authsize;
 
 
-	err = crypto_ccm_check_iv(iv);
+	err = crypto_ccm_init_crypt(req, authtag);
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
-	pctx->flags = aead_request_flags(req);
-
-	scatterwalk_map_and_copy(authtag, req->src, cryptlen, authsize, 0);
-
-	memset(iv + 15 - iv[0], 0, iv[0] + 1);
-
-	sg_init_table(pctx->src, 2);
-	sg_set_buf(pctx->src, authtag, 16);
-	scatterwalk_sg_chain(pctx->src, 2, req->src);
+	scatterwalk_map_and_copy(authtag, sg_next(pctx->src), cryptlen,
+				 authsize, 0);
 
 
 	dst = pctx->src;
 	dst = pctx->src;
-	if (req->src != req->dst) {
-		sg_init_table(pctx->dst, 2);
-		sg_set_buf(pctx->dst, authtag, 16);
-		scatterwalk_sg_chain(pctx->dst, 2, req->dst);
+	if (req->src != req->dst)
 		dst = pctx->dst;
 		dst = pctx->dst;
-	}
 
 
 	ablkcipher_request_set_tfm(abreq, ctx->ctr);
 	ablkcipher_request_set_tfm(abreq, ctx->ctr);
 	ablkcipher_request_set_callback(abreq, pctx->flags,
 	ablkcipher_request_set_callback(abreq, pctx->flags,
@@ -418,7 +437,7 @@ static int crypto_ccm_decrypt(struct aead_request *req)
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
-	err = crypto_ccm_auth(req, req->dst, cryptlen);
+	err = crypto_ccm_auth(req, sg_next(dst), cryptlen);
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
@@ -429,11 +448,11 @@ static int crypto_ccm_decrypt(struct aead_request *req)
 	return err;
 	return err;
 }
 }
 
 
-static int crypto_ccm_init_tfm(struct crypto_tfm *tfm)
+static int crypto_ccm_init_tfm(struct crypto_aead *tfm)
 {
 {
-	struct crypto_instance *inst = (void *)tfm->__crt_alg;
-	struct ccm_instance_ctx *ictx = crypto_instance_ctx(inst);
-	struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct aead_instance *inst = aead_alg_instance(tfm);
+	struct ccm_instance_ctx *ictx = aead_instance_ctx(inst);
+	struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm);
 	struct crypto_cipher *cipher;
 	struct crypto_cipher *cipher;
 	struct crypto_ablkcipher *ctr;
 	struct crypto_ablkcipher *ctr;
 	unsigned long align;
 	unsigned long align;
@@ -451,9 +470,10 @@ static int crypto_ccm_init_tfm(struct crypto_tfm *tfm)
 	ctx->cipher = cipher;
 	ctx->cipher = cipher;
 	ctx->ctr = ctr;
 	ctx->ctr = ctr;
 
 
-	align = crypto_tfm_alg_alignmask(tfm);
+	align = crypto_aead_alignmask(tfm);
 	align &= ~(crypto_tfm_ctx_alignment() - 1);
 	align &= ~(crypto_tfm_ctx_alignment() - 1);
-	crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
+	crypto_aead_set_reqsize(
+		tfm,
 		align + sizeof(struct crypto_ccm_req_priv_ctx) +
 		align + sizeof(struct crypto_ccm_req_priv_ctx) +
 		crypto_ablkcipher_reqsize(ctr));
 		crypto_ablkcipher_reqsize(ctr));
 
 
@@ -464,21 +484,31 @@ err_free_cipher:
 	return err;
 	return err;
 }
 }
 
 
-static void crypto_ccm_exit_tfm(struct crypto_tfm *tfm)
+static void crypto_ccm_exit_tfm(struct crypto_aead *tfm)
 {
 {
-	struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm);
 
 
 	crypto_free_cipher(ctx->cipher);
 	crypto_free_cipher(ctx->cipher);
 	crypto_free_ablkcipher(ctx->ctr);
 	crypto_free_ablkcipher(ctx->ctr);
 }
 }
 
 
-static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb,
-						       const char *full_name,
-						       const char *ctr_name,
-						       const char *cipher_name)
+static void crypto_ccm_free(struct aead_instance *inst)
+{
+	struct ccm_instance_ctx *ctx = aead_instance_ctx(inst);
+
+	crypto_drop_spawn(&ctx->cipher);
+	crypto_drop_skcipher(&ctx->ctr);
+	kfree(inst);
+}
+
+static int crypto_ccm_create_common(struct crypto_template *tmpl,
+				    struct rtattr **tb,
+				    const char *full_name,
+				    const char *ctr_name,
+				    const char *cipher_name)
 {
 {
 	struct crypto_attr_type *algt;
 	struct crypto_attr_type *algt;
-	struct crypto_instance *inst;
+	struct aead_instance *inst;
 	struct crypto_alg *ctr;
 	struct crypto_alg *ctr;
 	struct crypto_alg *cipher;
 	struct crypto_alg *cipher;
 	struct ccm_instance_ctx *ictx;
 	struct ccm_instance_ctx *ictx;
@@ -486,15 +516,15 @@ static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb,
 
 
 	algt = crypto_get_attr_type(tb);
 	algt = crypto_get_attr_type(tb);
 	if (IS_ERR(algt))
 	if (IS_ERR(algt))
-		return ERR_CAST(algt);
+		return PTR_ERR(algt);
 
 
 	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
 	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 
 	cipher = crypto_alg_mod_lookup(cipher_name,  CRYPTO_ALG_TYPE_CIPHER,
 	cipher = crypto_alg_mod_lookup(cipher_name,  CRYPTO_ALG_TYPE_CIPHER,
 				       CRYPTO_ALG_TYPE_MASK);
 				       CRYPTO_ALG_TYPE_MASK);
 	if (IS_ERR(cipher))
 	if (IS_ERR(cipher))
-		return ERR_CAST(cipher);
+		return PTR_ERR(cipher);
 
 
 	err = -EINVAL;
 	err = -EINVAL;
 	if (cipher->cra_blocksize != 16)
 	if (cipher->cra_blocksize != 16)
@@ -505,14 +535,15 @@ static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb,
 	if (!inst)
 	if (!inst)
 		goto out_put_cipher;
 		goto out_put_cipher;
 
 
-	ictx = crypto_instance_ctx(inst);
+	ictx = aead_instance_ctx(inst);
 
 
-	err = crypto_init_spawn(&ictx->cipher, cipher, inst,
+	err = crypto_init_spawn(&ictx->cipher, cipher,
+				aead_crypto_instance(inst),
 				CRYPTO_ALG_TYPE_MASK);
 				CRYPTO_ALG_TYPE_MASK);
 	if (err)
 	if (err)
 		goto err_free_inst;
 		goto err_free_inst;
 
 
-	crypto_set_skcipher_spawn(&ictx->ctr, inst);
+	crypto_set_skcipher_spawn(&ictx->ctr, aead_crypto_instance(inst));
 	err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0,
 	err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0,
 				   crypto_requires_sync(algt->type,
 				   crypto_requires_sync(algt->type,
 							algt->mask));
 							algt->mask));
@@ -531,33 +562,39 @@ static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb,
 		goto err_drop_ctr;
 		goto err_drop_ctr;
 
 
 	err = -ENAMETOOLONG;
 	err = -ENAMETOOLONG;
-	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+	if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 		     "ccm_base(%s,%s)", ctr->cra_driver_name,
 		     "ccm_base(%s,%s)", ctr->cra_driver_name,
 		     cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 		     cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 		goto err_drop_ctr;
 		goto err_drop_ctr;
 
 
-	memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
-
-	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
-	inst->alg.cra_flags |= ctr->cra_flags & CRYPTO_ALG_ASYNC;
-	inst->alg.cra_priority = cipher->cra_priority + ctr->cra_priority;
-	inst->alg.cra_blocksize = 1;
-	inst->alg.cra_alignmask = cipher->cra_alignmask | ctr->cra_alignmask |
-				  (__alignof__(u32) - 1);
-	inst->alg.cra_type = &crypto_aead_type;
-	inst->alg.cra_aead.ivsize = 16;
-	inst->alg.cra_aead.maxauthsize = 16;
-	inst->alg.cra_ctxsize = sizeof(struct crypto_ccm_ctx);
-	inst->alg.cra_init = crypto_ccm_init_tfm;
-	inst->alg.cra_exit = crypto_ccm_exit_tfm;
-	inst->alg.cra_aead.setkey = crypto_ccm_setkey;
-	inst->alg.cra_aead.setauthsize = crypto_ccm_setauthsize;
-	inst->alg.cra_aead.encrypt = crypto_ccm_encrypt;
-	inst->alg.cra_aead.decrypt = crypto_ccm_decrypt;
+	memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
+
+	inst->alg.base.cra_flags = ctr->cra_flags & CRYPTO_ALG_ASYNC;
+	inst->alg.base.cra_priority = (cipher->cra_priority +
+				       ctr->cra_priority) / 2;
+	inst->alg.base.cra_blocksize = 1;
+	inst->alg.base.cra_alignmask = cipher->cra_alignmask |
+				       ctr->cra_alignmask |
+				       (__alignof__(u32) - 1);
+	inst->alg.ivsize = 16;
+	inst->alg.maxauthsize = 16;
+	inst->alg.base.cra_ctxsize = sizeof(struct crypto_ccm_ctx);
+	inst->alg.init = crypto_ccm_init_tfm;
+	inst->alg.exit = crypto_ccm_exit_tfm;
+	inst->alg.setkey = crypto_ccm_setkey;
+	inst->alg.setauthsize = crypto_ccm_setauthsize;
+	inst->alg.encrypt = crypto_ccm_encrypt;
+	inst->alg.decrypt = crypto_ccm_decrypt;
+
+	inst->free = crypto_ccm_free;
+
+	err = aead_register_instance(tmpl, inst);
+	if (err)
+		goto err_drop_ctr;
 
 
-out:
+out_put_cipher:
 	crypto_mod_put(cipher);
 	crypto_mod_put(cipher);
-	return inst;
+	return err;
 
 
 err_drop_ctr:
 err_drop_ctr:
 	crypto_drop_skcipher(&ictx->ctr);
 	crypto_drop_skcipher(&ictx->ctr);
@@ -565,12 +602,10 @@ err_drop_cipher:
 	crypto_drop_spawn(&ictx->cipher);
 	crypto_drop_spawn(&ictx->cipher);
 err_free_inst:
 err_free_inst:
 	kfree(inst);
 	kfree(inst);
-out_put_cipher:
-	inst = ERR_PTR(err);
-	goto out;
+	goto out_put_cipher;
 }
 }
 
 
-static struct crypto_instance *crypto_ccm_alloc(struct rtattr **tb)
+static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
 {
 {
 	const char *cipher_name;
 	const char *cipher_name;
 	char ctr_name[CRYPTO_MAX_ALG_NAME];
 	char ctr_name[CRYPTO_MAX_ALG_NAME];
@@ -578,36 +613,28 @@ static struct crypto_instance *crypto_ccm_alloc(struct rtattr **tb)
 
 
 	cipher_name = crypto_attr_alg_name(tb[1]);
 	cipher_name = crypto_attr_alg_name(tb[1]);
 	if (IS_ERR(cipher_name))
 	if (IS_ERR(cipher_name))
-		return ERR_CAST(cipher_name);
+		return PTR_ERR(cipher_name);
 
 
 	if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)",
 	if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)",
 		     cipher_name) >= CRYPTO_MAX_ALG_NAME)
 		     cipher_name) >= CRYPTO_MAX_ALG_NAME)
-		return ERR_PTR(-ENAMETOOLONG);
+		return -ENAMETOOLONG;
 
 
 	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >=
 	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >=
 	    CRYPTO_MAX_ALG_NAME)
 	    CRYPTO_MAX_ALG_NAME)
-		return ERR_PTR(-ENAMETOOLONG);
+		return -ENAMETOOLONG;
 
 
-	return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name);
-}
-
-static void crypto_ccm_free(struct crypto_instance *inst)
-{
-	struct ccm_instance_ctx *ctx = crypto_instance_ctx(inst);
-
-	crypto_drop_spawn(&ctx->cipher);
-	crypto_drop_skcipher(&ctx->ctr);
-	kfree(inst);
+	return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
+					cipher_name);
 }
 }
 
 
 static struct crypto_template crypto_ccm_tmpl = {
 static struct crypto_template crypto_ccm_tmpl = {
 	.name = "ccm",
 	.name = "ccm",
-	.alloc = crypto_ccm_alloc,
-	.free = crypto_ccm_free,
+	.create = crypto_ccm_create,
 	.module = THIS_MODULE,
 	.module = THIS_MODULE,
 };
 };
 
 
-static struct crypto_instance *crypto_ccm_base_alloc(struct rtattr **tb)
+static int crypto_ccm_base_create(struct crypto_template *tmpl,
+				  struct rtattr **tb)
 {
 {
 	const char *ctr_name;
 	const char *ctr_name;
 	const char *cipher_name;
 	const char *cipher_name;
@@ -615,23 +642,23 @@ static struct crypto_instance *crypto_ccm_base_alloc(struct rtattr **tb)
 
 
 	ctr_name = crypto_attr_alg_name(tb[1]);
 	ctr_name = crypto_attr_alg_name(tb[1]);
 	if (IS_ERR(ctr_name))
 	if (IS_ERR(ctr_name))
-		return ERR_CAST(ctr_name);
+		return PTR_ERR(ctr_name);
 
 
 	cipher_name = crypto_attr_alg_name(tb[2]);
 	cipher_name = crypto_attr_alg_name(tb[2]);
 	if (IS_ERR(cipher_name))
 	if (IS_ERR(cipher_name))
-		return ERR_CAST(cipher_name);
+		return PTR_ERR(cipher_name);
 
 
 	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)",
 	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)",
 		     ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME)
 		     ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME)
-		return ERR_PTR(-ENAMETOOLONG);
+		return -ENAMETOOLONG;
 
 
-	return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name);
+	return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
+					cipher_name);
 }
 }
 
 
 static struct crypto_template crypto_ccm_base_tmpl = {
 static struct crypto_template crypto_ccm_base_tmpl = {
 	.name = "ccm_base",
 	.name = "ccm_base",
-	.alloc = crypto_ccm_base_alloc,
-	.free = crypto_ccm_free,
+	.create = crypto_ccm_base_create,
 	.module = THIS_MODULE,
 	.module = THIS_MODULE,
 };
 };
 
 
@@ -677,10 +704,12 @@ static int crypto_rfc4309_setauthsize(struct crypto_aead *parent,
 
 
 static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req)
 static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req)
 {
 {
-	struct aead_request *subreq = aead_request_ctx(req);
+	struct crypto_rfc4309_req_ctx *rctx = aead_request_ctx(req);
+	struct aead_request *subreq = &rctx->subreq;
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 	struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(aead);
 	struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(aead);
 	struct crypto_aead *child = ctx->child;
 	struct crypto_aead *child = ctx->child;
+	struct scatterlist *sg;
 	u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
 	u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
 			   crypto_aead_alignmask(child) + 1);
 			   crypto_aead_alignmask(child) + 1);
 
 
@@ -690,17 +719,38 @@ static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req)
 	memcpy(iv + 1, ctx->nonce, 3);
 	memcpy(iv + 1, ctx->nonce, 3);
 	memcpy(iv + 4, req->iv, 8);
 	memcpy(iv + 4, req->iv, 8);
 
 
+	scatterwalk_map_and_copy(iv + 16, req->src, 0, req->assoclen - 8, 0);
+
+	sg_init_table(rctx->src, 3);
+	sg_set_buf(rctx->src, iv + 16, req->assoclen - 8);
+	sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen);
+	if (sg != rctx->src + 1)
+		sg_chain(rctx->src, 2, sg);
+
+	if (req->src != req->dst) {
+		sg_init_table(rctx->dst, 3);
+		sg_set_buf(rctx->dst, iv + 16, req->assoclen - 8);
+		sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen);
+		if (sg != rctx->dst + 1)
+			sg_chain(rctx->dst, 2, sg);
+	}
+
 	aead_request_set_tfm(subreq, child);
 	aead_request_set_tfm(subreq, child);
 	aead_request_set_callback(subreq, req->base.flags, req->base.complete,
 	aead_request_set_callback(subreq, req->base.flags, req->base.complete,
 				  req->base.data);
 				  req->base.data);
-	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv);
-	aead_request_set_assoc(subreq, req->assoc, req->assoclen);
+	aead_request_set_crypt(subreq, rctx->src,
+			       req->src == req->dst ? rctx->src : rctx->dst,
+			       req->cryptlen, iv);
+	aead_request_set_ad(subreq, req->assoclen - 8);
 
 
 	return subreq;
 	return subreq;
 }
 }
 
 
 static int crypto_rfc4309_encrypt(struct aead_request *req)
 static int crypto_rfc4309_encrypt(struct aead_request *req)
 {
 {
+	if (req->assoclen != 16 && req->assoclen != 20)
+		return -EINVAL;
+
 	req = crypto_rfc4309_crypt(req);
 	req = crypto_rfc4309_crypt(req);
 
 
 	return crypto_aead_encrypt(req);
 	return crypto_aead_encrypt(req);
@@ -708,16 +758,19 @@ static int crypto_rfc4309_encrypt(struct aead_request *req)
 
 
 static int crypto_rfc4309_decrypt(struct aead_request *req)
 static int crypto_rfc4309_decrypt(struct aead_request *req)
 {
 {
+	if (req->assoclen != 16 && req->assoclen != 20)
+		return -EINVAL;
+
 	req = crypto_rfc4309_crypt(req);
 	req = crypto_rfc4309_crypt(req);
 
 
 	return crypto_aead_decrypt(req);
 	return crypto_aead_decrypt(req);
 }
 }
 
 
-static int crypto_rfc4309_init_tfm(struct crypto_tfm *tfm)
+static int crypto_rfc4309_init_tfm(struct crypto_aead *tfm)
 {
 {
-	struct crypto_instance *inst = (void *)tfm->__crt_alg;
-	struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst);
-	struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct aead_instance *inst = aead_alg_instance(tfm);
+	struct crypto_aead_spawn *spawn = aead_instance_ctx(inst);
+	struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(tfm);
 	struct crypto_aead *aead;
 	struct crypto_aead *aead;
 	unsigned long align;
 	unsigned long align;
 
 
@@ -729,115 +782,118 @@ static int crypto_rfc4309_init_tfm(struct crypto_tfm *tfm)
 
 
 	align = crypto_aead_alignmask(aead);
 	align = crypto_aead_alignmask(aead);
 	align &= ~(crypto_tfm_ctx_alignment() - 1);
 	align &= ~(crypto_tfm_ctx_alignment() - 1);
-	crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
-		sizeof(struct aead_request) +
+	crypto_aead_set_reqsize(
+		tfm,
+		sizeof(struct crypto_rfc4309_req_ctx) +
 		ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) +
 		ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) +
-		align + 16);
+		align + 32);
 
 
 	return 0;
 	return 0;
 }
 }
 
 
-static void crypto_rfc4309_exit_tfm(struct crypto_tfm *tfm)
+static void crypto_rfc4309_exit_tfm(struct crypto_aead *tfm)
 {
 {
-	struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(tfm);
 
 
 	crypto_free_aead(ctx->child);
 	crypto_free_aead(ctx->child);
 }
 }
 
 
-static struct crypto_instance *crypto_rfc4309_alloc(struct rtattr **tb)
+static void crypto_rfc4309_free(struct aead_instance *inst)
+{
+	crypto_drop_aead(aead_instance_ctx(inst));
+	kfree(inst);
+}
+
+static int crypto_rfc4309_create(struct crypto_template *tmpl,
+				 struct rtattr **tb)
 {
 {
 	struct crypto_attr_type *algt;
 	struct crypto_attr_type *algt;
-	struct crypto_instance *inst;
+	struct aead_instance *inst;
 	struct crypto_aead_spawn *spawn;
 	struct crypto_aead_spawn *spawn;
-	struct crypto_alg *alg;
+	struct aead_alg *alg;
 	const char *ccm_name;
 	const char *ccm_name;
 	int err;
 	int err;
 
 
 	algt = crypto_get_attr_type(tb);
 	algt = crypto_get_attr_type(tb);
 	if (IS_ERR(algt))
 	if (IS_ERR(algt))
-		return ERR_CAST(algt);
+		return PTR_ERR(algt);
 
 
 	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
 	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 
 	ccm_name = crypto_attr_alg_name(tb[1]);
 	ccm_name = crypto_attr_alg_name(tb[1]);
 	if (IS_ERR(ccm_name))
 	if (IS_ERR(ccm_name))
-		return ERR_CAST(ccm_name);
+		return PTR_ERR(ccm_name);
 
 
 	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
 	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
 	if (!inst)
 	if (!inst)
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
 
 
-	spawn = crypto_instance_ctx(inst);
-	crypto_set_aead_spawn(spawn, inst);
+	spawn = aead_instance_ctx(inst);
+	crypto_set_aead_spawn(spawn, aead_crypto_instance(inst));
 	err = crypto_grab_aead(spawn, ccm_name, 0,
 	err = crypto_grab_aead(spawn, ccm_name, 0,
 			       crypto_requires_sync(algt->type, algt->mask));
 			       crypto_requires_sync(algt->type, algt->mask));
 	if (err)
 	if (err)
 		goto out_free_inst;
 		goto out_free_inst;
 
 
-	alg = crypto_aead_spawn_alg(spawn);
+	alg = crypto_spawn_aead_alg(spawn);
 
 
 	err = -EINVAL;
 	err = -EINVAL;
 
 
 	/* We only support 16-byte blocks. */
 	/* We only support 16-byte blocks. */
-	if (alg->cra_aead.ivsize != 16)
+	if (crypto_aead_alg_ivsize(alg) != 16)
 		goto out_drop_alg;
 		goto out_drop_alg;
 
 
 	/* Not a stream cipher? */
 	/* Not a stream cipher? */
-	if (alg->cra_blocksize != 1)
+	if (alg->base.cra_blocksize != 1)
 		goto out_drop_alg;
 		goto out_drop_alg;
 
 
 	err = -ENAMETOOLONG;
 	err = -ENAMETOOLONG;
-	if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
-		     "rfc4309(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME ||
-	    snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
-		     "rfc4309(%s)", alg->cra_driver_name) >=
+	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+		     "rfc4309(%s)", alg->base.cra_name) >=
+	    CRYPTO_MAX_ALG_NAME ||
+	    snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+		     "rfc4309(%s)", alg->base.cra_driver_name) >=
 	    CRYPTO_MAX_ALG_NAME)
 	    CRYPTO_MAX_ALG_NAME)
 		goto out_drop_alg;
 		goto out_drop_alg;
 
 
-	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
-	inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
-	inst->alg.cra_priority = alg->cra_priority;
-	inst->alg.cra_blocksize = 1;
-	inst->alg.cra_alignmask = alg->cra_alignmask;
-	inst->alg.cra_type = &crypto_nivaead_type;
+	inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
+	inst->alg.base.cra_priority = alg->base.cra_priority;
+	inst->alg.base.cra_blocksize = 1;
+	inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
 
 
-	inst->alg.cra_aead.ivsize = 8;
-	inst->alg.cra_aead.maxauthsize = 16;
+	inst->alg.ivsize = 8;
+	inst->alg.maxauthsize = 16;
 
 
-	inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx);
+	inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx);
 
 
-	inst->alg.cra_init = crypto_rfc4309_init_tfm;
-	inst->alg.cra_exit = crypto_rfc4309_exit_tfm;
+	inst->alg.init = crypto_rfc4309_init_tfm;
+	inst->alg.exit = crypto_rfc4309_exit_tfm;
 
 
-	inst->alg.cra_aead.setkey = crypto_rfc4309_setkey;
-	inst->alg.cra_aead.setauthsize = crypto_rfc4309_setauthsize;
-	inst->alg.cra_aead.encrypt = crypto_rfc4309_encrypt;
-	inst->alg.cra_aead.decrypt = crypto_rfc4309_decrypt;
+	inst->alg.setkey = crypto_rfc4309_setkey;
+	inst->alg.setauthsize = crypto_rfc4309_setauthsize;
+	inst->alg.encrypt = crypto_rfc4309_encrypt;
+	inst->alg.decrypt = crypto_rfc4309_decrypt;
 
 
-	inst->alg.cra_aead.geniv = "seqiv";
+	inst->free = crypto_rfc4309_free;
+
+	err = aead_register_instance(tmpl, inst);
+	if (err)
+		goto out_drop_alg;
 
 
 out:
 out:
-	return inst;
+	return err;
 
 
 out_drop_alg:
 out_drop_alg:
 	crypto_drop_aead(spawn);
 	crypto_drop_aead(spawn);
 out_free_inst:
 out_free_inst:
 	kfree(inst);
 	kfree(inst);
-	inst = ERR_PTR(err);
 	goto out;
 	goto out;
 }
 }
 
 
-static void crypto_rfc4309_free(struct crypto_instance *inst)
-{
-	crypto_drop_spawn(crypto_instance_ctx(inst));
-	kfree(inst);
-}
-
 static struct crypto_template crypto_rfc4309_tmpl = {
 static struct crypto_template crypto_rfc4309_tmpl = {
 	.name = "rfc4309",
 	.name = "rfc4309",
-	.alloc = crypto_rfc4309_alloc,
-	.free = crypto_rfc4309_free,
+	.create = crypto_rfc4309_create,
 	.module = THIS_MODULE,
 	.module = THIS_MODULE,
 };
 };
 
 

+ 12 - 16
crypto/chacha20_generic.c

@@ -13,14 +13,7 @@
 #include <linux/crypto.h>
 #include <linux/crypto.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/module.h>
-
-#define CHACHA20_NONCE_SIZE 16
-#define CHACHA20_KEY_SIZE   32
-#define CHACHA20_BLOCK_SIZE 64
-
-struct chacha20_ctx {
-	u32 key[8];
-};
+#include <crypto/chacha20.h>
 
 
 static inline u32 rotl32(u32 v, u8 n)
 static inline u32 rotl32(u32 v, u8 n)
 {
 {
@@ -108,7 +101,7 @@ static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src,
 	}
 	}
 }
 }
 
 
-static void chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv)
+void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv)
 {
 {
 	static const char constant[16] = "expand 32-byte k";
 	static const char constant[16] = "expand 32-byte k";
 
 
@@ -129,8 +122,9 @@ static void chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv)
 	state[14] = le32_to_cpuvp(iv +  8);
 	state[14] = le32_to_cpuvp(iv +  8);
 	state[15] = le32_to_cpuvp(iv + 12);
 	state[15] = le32_to_cpuvp(iv + 12);
 }
 }
+EXPORT_SYMBOL_GPL(crypto_chacha20_init);
 
 
-static int chacha20_setkey(struct crypto_tfm *tfm, const u8 *key,
+int crypto_chacha20_setkey(struct crypto_tfm *tfm, const u8 *key,
 			   unsigned int keysize)
 			   unsigned int keysize)
 {
 {
 	struct chacha20_ctx *ctx = crypto_tfm_ctx(tfm);
 	struct chacha20_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -144,8 +138,9 @@ static int chacha20_setkey(struct crypto_tfm *tfm, const u8 *key,
 
 
 	return 0;
 	return 0;
 }
 }
+EXPORT_SYMBOL_GPL(crypto_chacha20_setkey);
 
 
-static int chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+int crypto_chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
 			  struct scatterlist *src, unsigned int nbytes)
 			  struct scatterlist *src, unsigned int nbytes)
 {
 {
 	struct blkcipher_walk walk;
 	struct blkcipher_walk walk;
@@ -155,7 +150,7 @@ static int chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
 	blkcipher_walk_init(&walk, dst, src, nbytes);
 	blkcipher_walk_init(&walk, dst, src, nbytes);
 	err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE);
 	err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE);
 
 
-	chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv);
+	crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv);
 
 
 	while (walk.nbytes >= CHACHA20_BLOCK_SIZE) {
 	while (walk.nbytes >= CHACHA20_BLOCK_SIZE) {
 		chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
 		chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
@@ -172,6 +167,7 @@ static int chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
 
 
 	return err;
 	return err;
 }
 }
+EXPORT_SYMBOL_GPL(crypto_chacha20_crypt);
 
 
 static struct crypto_alg alg = {
 static struct crypto_alg alg = {
 	.cra_name		= "chacha20",
 	.cra_name		= "chacha20",
@@ -187,11 +183,11 @@ static struct crypto_alg alg = {
 		.blkcipher = {
 		.blkcipher = {
 			.min_keysize	= CHACHA20_KEY_SIZE,
 			.min_keysize	= CHACHA20_KEY_SIZE,
 			.max_keysize	= CHACHA20_KEY_SIZE,
 			.max_keysize	= CHACHA20_KEY_SIZE,
-			.ivsize		= CHACHA20_NONCE_SIZE,
+			.ivsize		= CHACHA20_IV_SIZE,
 			.geniv		= "seqiv",
 			.geniv		= "seqiv",
-			.setkey		= chacha20_setkey,
-			.encrypt	= chacha20_crypt,
-			.decrypt	= chacha20_crypt,
+			.setkey		= crypto_chacha20_setkey,
+			.encrypt	= crypto_chacha20_crypt,
+			.decrypt	= crypto_chacha20_crypt,
 		},
 		},
 	},
 	},
 };
 };

+ 126 - 90
crypto/chacha20poly1305.c

@@ -13,6 +13,8 @@
 #include <crypto/internal/hash.h>
 #include <crypto/internal/hash.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/scatterwalk.h>
+#include <crypto/chacha20.h>
+#include <crypto/poly1305.h>
 #include <linux/err.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
@@ -20,11 +22,6 @@
 
 
 #include "internal.h"
 #include "internal.h"
 
 
-#define POLY1305_BLOCK_SIZE	16
-#define POLY1305_DIGEST_SIZE	16
-#define POLY1305_KEY_SIZE	32
-#define CHACHA20_KEY_SIZE	32
-#define CHACHA20_IV_SIZE	16
 #define CHACHAPOLY_IV_SIZE	12
 #define CHACHAPOLY_IV_SIZE	12
 
 
 struct chachapoly_instance_ctx {
 struct chachapoly_instance_ctx {
@@ -60,12 +57,16 @@ struct chacha_req {
 };
 };
 
 
 struct chachapoly_req_ctx {
 struct chachapoly_req_ctx {
+	struct scatterlist src[2];
+	struct scatterlist dst[2];
 	/* the key we generate for Poly1305 using Chacha20 */
 	/* the key we generate for Poly1305 using Chacha20 */
 	u8 key[POLY1305_KEY_SIZE];
 	u8 key[POLY1305_KEY_SIZE];
 	/* calculated Poly1305 tag */
 	/* calculated Poly1305 tag */
 	u8 tag[POLY1305_DIGEST_SIZE];
 	u8 tag[POLY1305_DIGEST_SIZE];
 	/* length of data to en/decrypt, without ICV */
 	/* length of data to en/decrypt, without ICV */
 	unsigned int cryptlen;
 	unsigned int cryptlen;
+	/* Actual AD, excluding IV */
+	unsigned int assoclen;
 	union {
 	union {
 		struct poly_req poly;
 		struct poly_req poly;
 		struct chacha_req chacha;
 		struct chacha_req chacha;
@@ -98,7 +99,9 @@ static int poly_verify_tag(struct aead_request *req)
 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
 	u8 tag[sizeof(rctx->tag)];
 	u8 tag[sizeof(rctx->tag)];
 
 
-	scatterwalk_map_and_copy(tag, req->src, rctx->cryptlen, sizeof(tag), 0);
+	scatterwalk_map_and_copy(tag, req->src,
+				 req->assoclen + rctx->cryptlen,
+				 sizeof(tag), 0);
 	if (crypto_memneq(tag, rctx->tag, sizeof(tag)))
 	if (crypto_memneq(tag, rctx->tag, sizeof(tag)))
 		return -EBADMSG;
 		return -EBADMSG;
 	return 0;
 	return 0;
@@ -108,7 +111,8 @@ static int poly_copy_tag(struct aead_request *req)
 {
 {
 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
 
 
-	scatterwalk_map_and_copy(rctx->tag, req->dst, rctx->cryptlen,
+	scatterwalk_map_and_copy(rctx->tag, req->dst,
+				 req->assoclen + rctx->cryptlen,
 				 sizeof(rctx->tag), 1);
 				 sizeof(rctx->tag), 1);
 	return 0;
 	return 0;
 }
 }
@@ -123,14 +127,24 @@ static int chacha_decrypt(struct aead_request *req)
 	struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
 	struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
 	struct chacha_req *creq = &rctx->u.chacha;
 	struct chacha_req *creq = &rctx->u.chacha;
+	struct scatterlist *src, *dst;
 	int err;
 	int err;
 
 
 	chacha_iv(creq->iv, req, 1);
 	chacha_iv(creq->iv, req, 1);
 
 
+	sg_init_table(rctx->src, 2);
+	src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
+	dst = src;
+
+	if (req->src != req->dst) {
+		sg_init_table(rctx->dst, 2);
+		dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
+	}
+
 	ablkcipher_request_set_callback(&creq->req, aead_request_flags(req),
 	ablkcipher_request_set_callback(&creq->req, aead_request_flags(req),
 					chacha_decrypt_done, req);
 					chacha_decrypt_done, req);
 	ablkcipher_request_set_tfm(&creq->req, ctx->chacha);
 	ablkcipher_request_set_tfm(&creq->req, ctx->chacha);
-	ablkcipher_request_set_crypt(&creq->req, req->src, req->dst,
+	ablkcipher_request_set_crypt(&creq->req, src, dst,
 				     rctx->cryptlen, creq->iv);
 				     rctx->cryptlen, creq->iv);
 	err = crypto_ablkcipher_decrypt(&creq->req);
 	err = crypto_ablkcipher_decrypt(&creq->req);
 	if (err)
 	if (err)
@@ -156,14 +170,15 @@ static void poly_tail_done(struct crypto_async_request *areq, int err)
 
 
 static int poly_tail(struct aead_request *req)
 static int poly_tail(struct aead_request *req)
 {
 {
-	struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
 	struct poly_req *preq = &rctx->u.poly;
 	struct poly_req *preq = &rctx->u.poly;
 	__le64 len;
 	__le64 len;
 	int err;
 	int err;
 
 
 	sg_init_table(preq->src, 1);
 	sg_init_table(preq->src, 1);
-	len = cpu_to_le64(req->assoclen);
+	len = cpu_to_le64(rctx->assoclen);
 	memcpy(&preq->tail.assoclen, &len, sizeof(len));
 	memcpy(&preq->tail.assoclen, &len, sizeof(len));
 	len = cpu_to_le64(rctx->cryptlen);
 	len = cpu_to_le64(rctx->cryptlen);
 	memcpy(&preq->tail.cryptlen, &len, sizeof(len));
 	memcpy(&preq->tail.cryptlen, &len, sizeof(len));
@@ -228,6 +243,9 @@ static int poly_cipher(struct aead_request *req)
 	if (rctx->cryptlen == req->cryptlen) /* encrypting */
 	if (rctx->cryptlen == req->cryptlen) /* encrypting */
 		crypt = req->dst;
 		crypt = req->dst;
 
 
+	sg_init_table(rctx->src, 2);
+	crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen);
+
 	ahash_request_set_callback(&preq->req, aead_request_flags(req),
 	ahash_request_set_callback(&preq->req, aead_request_flags(req),
 				   poly_cipher_done, req);
 				   poly_cipher_done, req);
 	ahash_request_set_tfm(&preq->req, ctx->poly);
 	ahash_request_set_tfm(&preq->req, ctx->poly);
@@ -253,7 +271,7 @@ static int poly_adpad(struct aead_request *req)
 	unsigned int padlen, bs = POLY1305_BLOCK_SIZE;
 	unsigned int padlen, bs = POLY1305_BLOCK_SIZE;
 	int err;
 	int err;
 
 
-	padlen = (bs - (req->assoclen % bs)) % bs;
+	padlen = (bs - (rctx->assoclen % bs)) % bs;
 	memset(preq->pad, 0, sizeof(preq->pad));
 	memset(preq->pad, 0, sizeof(preq->pad));
 	sg_init_table(preq->src, 1);
 	sg_init_table(preq->src, 1);
 	sg_set_buf(preq->src, preq->pad, padlen);
 	sg_set_buf(preq->src, preq->pad, padlen);
@@ -285,7 +303,7 @@ static int poly_ad(struct aead_request *req)
 	ahash_request_set_callback(&preq->req, aead_request_flags(req),
 	ahash_request_set_callback(&preq->req, aead_request_flags(req),
 				   poly_ad_done, req);
 				   poly_ad_done, req);
 	ahash_request_set_tfm(&preq->req, ctx->poly);
 	ahash_request_set_tfm(&preq->req, ctx->poly);
-	ahash_request_set_crypt(&preq->req, req->assoc, NULL, req->assoclen);
+	ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen);
 
 
 	err = crypto_ahash_update(&preq->req);
 	err = crypto_ahash_update(&preq->req);
 	if (err)
 	if (err)
@@ -351,11 +369,20 @@ static void poly_genkey_done(struct crypto_async_request *areq, int err)
 
 
 static int poly_genkey(struct aead_request *req)
 static int poly_genkey(struct aead_request *req)
 {
 {
-	struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
 	struct chacha_req *creq = &rctx->u.chacha;
 	struct chacha_req *creq = &rctx->u.chacha;
 	int err;
 	int err;
 
 
+	rctx->assoclen = req->assoclen;
+
+	if (crypto_aead_ivsize(tfm) == 8) {
+		if (rctx->assoclen < 8)
+			return -EINVAL;
+		rctx->assoclen -= 8;
+	}
+
 	sg_init_table(creq->src, 1);
 	sg_init_table(creq->src, 1);
 	memset(rctx->key, 0, sizeof(rctx->key));
 	memset(rctx->key, 0, sizeof(rctx->key));
 	sg_set_buf(creq->src, rctx->key, sizeof(rctx->key));
 	sg_set_buf(creq->src, rctx->key, sizeof(rctx->key));
@@ -385,14 +412,24 @@ static int chacha_encrypt(struct aead_request *req)
 	struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
 	struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
 	struct chacha_req *creq = &rctx->u.chacha;
 	struct chacha_req *creq = &rctx->u.chacha;
+	struct scatterlist *src, *dst;
 	int err;
 	int err;
 
 
 	chacha_iv(creq->iv, req, 1);
 	chacha_iv(creq->iv, req, 1);
 
 
+	sg_init_table(rctx->src, 2);
+	src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
+	dst = src;
+
+	if (req->src != req->dst) {
+		sg_init_table(rctx->dst, 2);
+		dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
+	}
+
 	ablkcipher_request_set_callback(&creq->req, aead_request_flags(req),
 	ablkcipher_request_set_callback(&creq->req, aead_request_flags(req),
 					chacha_encrypt_done, req);
 					chacha_encrypt_done, req);
 	ablkcipher_request_set_tfm(&creq->req, ctx->chacha);
 	ablkcipher_request_set_tfm(&creq->req, ctx->chacha);
-	ablkcipher_request_set_crypt(&creq->req, req->src, req->dst,
+	ablkcipher_request_set_crypt(&creq->req, src, dst,
 				     req->cryptlen, creq->iv);
 				     req->cryptlen, creq->iv);
 	err = crypto_ablkcipher_encrypt(&creq->req);
 	err = crypto_ablkcipher_encrypt(&creq->req);
 	if (err)
 	if (err)
@@ -426,8 +463,6 @@ static int chachapoly_decrypt(struct aead_request *req)
 {
 {
 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
 
 
-	if (req->cryptlen < POLY1305_DIGEST_SIZE)
-		return -EINVAL;
 	rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE;
 	rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE;
 
 
 	/* decrypt call chain:
 	/* decrypt call chain:
@@ -476,11 +511,11 @@ static int chachapoly_setauthsize(struct crypto_aead *tfm,
 	return 0;
 	return 0;
 }
 }
 
 
-static int chachapoly_init(struct crypto_tfm *tfm)
+static int chachapoly_init(struct crypto_aead *tfm)
 {
 {
-	struct crypto_instance *inst = (void *)tfm->__crt_alg;
-	struct chachapoly_instance_ctx *ictx = crypto_instance_ctx(inst);
-	struct chachapoly_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct aead_instance *inst = aead_alg_instance(tfm);
+	struct chachapoly_instance_ctx *ictx = aead_instance_ctx(inst);
+	struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
 	struct crypto_ablkcipher *chacha;
 	struct crypto_ablkcipher *chacha;
 	struct crypto_ahash *poly;
 	struct crypto_ahash *poly;
 	unsigned long align;
 	unsigned long align;
@@ -499,77 +534,87 @@ static int chachapoly_init(struct crypto_tfm *tfm)
 	ctx->poly = poly;
 	ctx->poly = poly;
 	ctx->saltlen = ictx->saltlen;
 	ctx->saltlen = ictx->saltlen;
 
 
-	align = crypto_tfm_alg_alignmask(tfm);
+	align = crypto_aead_alignmask(tfm);
 	align &= ~(crypto_tfm_ctx_alignment() - 1);
 	align &= ~(crypto_tfm_ctx_alignment() - 1);
-	crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
-				align + offsetof(struct chachapoly_req_ctx, u) +
-				max(offsetof(struct chacha_req, req) +
-				    sizeof(struct ablkcipher_request) +
-				    crypto_ablkcipher_reqsize(chacha),
-				    offsetof(struct poly_req, req) +
-				    sizeof(struct ahash_request) +
-				    crypto_ahash_reqsize(poly)));
+	crypto_aead_set_reqsize(
+		tfm,
+		align + offsetof(struct chachapoly_req_ctx, u) +
+		max(offsetof(struct chacha_req, req) +
+		    sizeof(struct ablkcipher_request) +
+		    crypto_ablkcipher_reqsize(chacha),
+		    offsetof(struct poly_req, req) +
+		    sizeof(struct ahash_request) +
+		    crypto_ahash_reqsize(poly)));
 
 
 	return 0;
 	return 0;
 }
 }
 
 
-static void chachapoly_exit(struct crypto_tfm *tfm)
+static void chachapoly_exit(struct crypto_aead *tfm)
 {
 {
-	struct chachapoly_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
 
 
 	crypto_free_ahash(ctx->poly);
 	crypto_free_ahash(ctx->poly);
 	crypto_free_ablkcipher(ctx->chacha);
 	crypto_free_ablkcipher(ctx->chacha);
 }
 }
 
 
-static struct crypto_instance *chachapoly_alloc(struct rtattr **tb,
-						const char *name,
-						unsigned int ivsize)
+static void chachapoly_free(struct aead_instance *inst)
+{
+	struct chachapoly_instance_ctx *ctx = aead_instance_ctx(inst);
+
+	crypto_drop_skcipher(&ctx->chacha);
+	crypto_drop_ahash(&ctx->poly);
+	kfree(inst);
+}
+
+static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
+			     const char *name, unsigned int ivsize)
 {
 {
 	struct crypto_attr_type *algt;
 	struct crypto_attr_type *algt;
-	struct crypto_instance *inst;
+	struct aead_instance *inst;
 	struct crypto_alg *chacha;
 	struct crypto_alg *chacha;
 	struct crypto_alg *poly;
 	struct crypto_alg *poly;
-	struct ahash_alg *poly_ahash;
+	struct hash_alg_common *poly_hash;
 	struct chachapoly_instance_ctx *ctx;
 	struct chachapoly_instance_ctx *ctx;
 	const char *chacha_name, *poly_name;
 	const char *chacha_name, *poly_name;
 	int err;
 	int err;
 
 
 	if (ivsize > CHACHAPOLY_IV_SIZE)
 	if (ivsize > CHACHAPOLY_IV_SIZE)
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 
 	algt = crypto_get_attr_type(tb);
 	algt = crypto_get_attr_type(tb);
 	if (IS_ERR(algt))
 	if (IS_ERR(algt))
-		return ERR_CAST(algt);
+		return PTR_ERR(algt);
 
 
 	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
 	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 
 	chacha_name = crypto_attr_alg_name(tb[1]);
 	chacha_name = crypto_attr_alg_name(tb[1]);
 	if (IS_ERR(chacha_name))
 	if (IS_ERR(chacha_name))
-		return ERR_CAST(chacha_name);
+		return PTR_ERR(chacha_name);
 	poly_name = crypto_attr_alg_name(tb[2]);
 	poly_name = crypto_attr_alg_name(tb[2]);
 	if (IS_ERR(poly_name))
 	if (IS_ERR(poly_name))
-		return ERR_CAST(poly_name);
+		return PTR_ERR(poly_name);
 
 
 	poly = crypto_find_alg(poly_name, &crypto_ahash_type,
 	poly = crypto_find_alg(poly_name, &crypto_ahash_type,
 			       CRYPTO_ALG_TYPE_HASH,
 			       CRYPTO_ALG_TYPE_HASH,
 			       CRYPTO_ALG_TYPE_AHASH_MASK);
 			       CRYPTO_ALG_TYPE_AHASH_MASK);
 	if (IS_ERR(poly))
 	if (IS_ERR(poly))
-		return ERR_CAST(poly);
+		return PTR_ERR(poly);
 
 
 	err = -ENOMEM;
 	err = -ENOMEM;
 	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 	if (!inst)
 	if (!inst)
 		goto out_put_poly;
 		goto out_put_poly;
 
 
-	ctx = crypto_instance_ctx(inst);
+	ctx = aead_instance_ctx(inst);
 	ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize;
 	ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize;
-	poly_ahash = container_of(poly, struct ahash_alg, halg.base);
-	err = crypto_init_ahash_spawn(&ctx->poly, &poly_ahash->halg, inst);
+	poly_hash = __crypto_hash_alg_common(poly);
+	err = crypto_init_ahash_spawn(&ctx->poly, poly_hash,
+				      aead_crypto_instance(inst));
 	if (err)
 	if (err)
 		goto err_free_inst;
 		goto err_free_inst;
 
 
-	crypto_set_skcipher_spawn(&ctx->chacha, inst);
+	crypto_set_skcipher_spawn(&ctx->chacha, aead_crypto_instance(inst));
 	err = crypto_grab_skcipher(&ctx->chacha, chacha_name, 0,
 	err = crypto_grab_skcipher(&ctx->chacha, chacha_name, 0,
 				   crypto_requires_sync(algt->type,
 				   crypto_requires_sync(algt->type,
 							algt->mask));
 							algt->mask));
@@ -587,37 +632,42 @@ static struct crypto_instance *chachapoly_alloc(struct rtattr **tb,
 		goto out_drop_chacha;
 		goto out_drop_chacha;
 
 
 	err = -ENAMETOOLONG;
 	err = -ENAMETOOLONG;
-	if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
+	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
 		     "%s(%s,%s)", name, chacha_name,
 		     "%s(%s,%s)", name, chacha_name,
 		     poly_name) >= CRYPTO_MAX_ALG_NAME)
 		     poly_name) >= CRYPTO_MAX_ALG_NAME)
 		goto out_drop_chacha;
 		goto out_drop_chacha;
-	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+	if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 		     "%s(%s,%s)", name, chacha->cra_driver_name,
 		     "%s(%s,%s)", name, chacha->cra_driver_name,
 		     poly->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 		     poly->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 		goto out_drop_chacha;
 		goto out_drop_chacha;
 
 
-	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
-	inst->alg.cra_flags |= (chacha->cra_flags |
-				poly->cra_flags) & CRYPTO_ALG_ASYNC;
-	inst->alg.cra_priority = (chacha->cra_priority +
-				  poly->cra_priority) / 2;
-	inst->alg.cra_blocksize = 1;
-	inst->alg.cra_alignmask = chacha->cra_alignmask | poly->cra_alignmask;
-	inst->alg.cra_type = &crypto_nivaead_type;
-	inst->alg.cra_aead.ivsize = ivsize;
-	inst->alg.cra_aead.maxauthsize = POLY1305_DIGEST_SIZE;
-	inst->alg.cra_ctxsize = sizeof(struct chachapoly_ctx) + ctx->saltlen;
-	inst->alg.cra_init = chachapoly_init;
-	inst->alg.cra_exit = chachapoly_exit;
-	inst->alg.cra_aead.encrypt = chachapoly_encrypt;
-	inst->alg.cra_aead.decrypt = chachapoly_decrypt;
-	inst->alg.cra_aead.setkey = chachapoly_setkey;
-	inst->alg.cra_aead.setauthsize = chachapoly_setauthsize;
-	inst->alg.cra_aead.geniv = "seqiv";
-
-out:
+	inst->alg.base.cra_flags = (chacha->cra_flags | poly->cra_flags) &
+				   CRYPTO_ALG_ASYNC;
+	inst->alg.base.cra_priority = (chacha->cra_priority +
+				       poly->cra_priority) / 2;
+	inst->alg.base.cra_blocksize = 1;
+	inst->alg.base.cra_alignmask = chacha->cra_alignmask |
+				       poly->cra_alignmask;
+	inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) +
+				     ctx->saltlen;
+	inst->alg.ivsize = ivsize;
+	inst->alg.maxauthsize = POLY1305_DIGEST_SIZE;
+	inst->alg.init = chachapoly_init;
+	inst->alg.exit = chachapoly_exit;
+	inst->alg.encrypt = chachapoly_encrypt;
+	inst->alg.decrypt = chachapoly_decrypt;
+	inst->alg.setkey = chachapoly_setkey;
+	inst->alg.setauthsize = chachapoly_setauthsize;
+
+	inst->free = chachapoly_free;
+
+	err = aead_register_instance(tmpl, inst);
+	if (err)
+		goto out_drop_chacha;
+
+out_put_poly:
 	crypto_mod_put(poly);
 	crypto_mod_put(poly);
-	return inst;
+	return err;
 
 
 out_drop_chacha:
 out_drop_chacha:
 	crypto_drop_skcipher(&ctx->chacha);
 	crypto_drop_skcipher(&ctx->chacha);
@@ -625,41 +675,28 @@ err_drop_poly:
 	crypto_drop_ahash(&ctx->poly);
 	crypto_drop_ahash(&ctx->poly);
 err_free_inst:
 err_free_inst:
 	kfree(inst);
 	kfree(inst);
-out_put_poly:
-	inst = ERR_PTR(err);
-	goto out;
-}
-
-static struct crypto_instance *rfc7539_alloc(struct rtattr **tb)
-{
-	return chachapoly_alloc(tb, "rfc7539", 12);
+	goto out_put_poly;
 }
 }
 
 
-static struct crypto_instance *rfc7539esp_alloc(struct rtattr **tb)
+static int rfc7539_create(struct crypto_template *tmpl, struct rtattr **tb)
 {
 {
-	return chachapoly_alloc(tb, "rfc7539esp", 8);
+	return chachapoly_create(tmpl, tb, "rfc7539", 12);
 }
 }
 
 
-static void chachapoly_free(struct crypto_instance *inst)
+static int rfc7539esp_create(struct crypto_template *tmpl, struct rtattr **tb)
 {
 {
-	struct chachapoly_instance_ctx *ctx = crypto_instance_ctx(inst);
-
-	crypto_drop_skcipher(&ctx->chacha);
-	crypto_drop_ahash(&ctx->poly);
-	kfree(inst);
+	return chachapoly_create(tmpl, tb, "rfc7539esp", 8);
 }
 }
 
 
 static struct crypto_template rfc7539_tmpl = {
 static struct crypto_template rfc7539_tmpl = {
 	.name = "rfc7539",
 	.name = "rfc7539",
-	.alloc = rfc7539_alloc,
-	.free = chachapoly_free,
+	.create = rfc7539_create,
 	.module = THIS_MODULE,
 	.module = THIS_MODULE,
 };
 };
 
 
 static struct crypto_template rfc7539esp_tmpl = {
 static struct crypto_template rfc7539esp_tmpl = {
 	.name = "rfc7539esp",
 	.name = "rfc7539esp",
-	.alloc = rfc7539esp_alloc,
-	.free = chachapoly_free,
+	.create = rfc7539esp_create,
 	.module = THIS_MODULE,
 	.module = THIS_MODULE,
 };
 };
 
 
@@ -690,6 +727,5 @@ module_exit(chacha20poly1305_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
 MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
 MODULE_DESCRIPTION("ChaCha20-Poly1305 AEAD");
 MODULE_DESCRIPTION("ChaCha20-Poly1305 AEAD");
-MODULE_ALIAS_CRYPTO("chacha20poly1305");
 MODULE_ALIAS_CRYPTO("rfc7539");
 MODULE_ALIAS_CRYPTO("rfc7539");
 MODULE_ALIAS_CRYPTO("rfc7539esp");
 MODULE_ALIAS_CRYPTO("rfc7539esp");

+ 13 - 10
crypto/cryptd.c

@@ -176,10 +176,9 @@ static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
 	algt = crypto_get_attr_type(tb);
 	algt = crypto_get_attr_type(tb);
 	if (IS_ERR(algt))
 	if (IS_ERR(algt))
 		return;
 		return;
-	if ((algt->type & CRYPTO_ALG_INTERNAL))
-		*type |= CRYPTO_ALG_INTERNAL;
-	if ((algt->mask & CRYPTO_ALG_INTERNAL))
-		*mask |= CRYPTO_ALG_INTERNAL;
+
+	*type |= algt->type & CRYPTO_ALG_INTERNAL;
+	*mask |= algt->mask & CRYPTO_ALG_INTERNAL;
 }
 }
 
 
 static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
 static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
@@ -688,16 +687,18 @@ static void cryptd_aead_crypt(struct aead_request *req,
 			int (*crypt)(struct aead_request *req))
 			int (*crypt)(struct aead_request *req))
 {
 {
 	struct cryptd_aead_request_ctx *rctx;
 	struct cryptd_aead_request_ctx *rctx;
+	crypto_completion_t compl;
+
 	rctx = aead_request_ctx(req);
 	rctx = aead_request_ctx(req);
+	compl = rctx->complete;
 
 
 	if (unlikely(err == -EINPROGRESS))
 	if (unlikely(err == -EINPROGRESS))
 		goto out;
 		goto out;
 	aead_request_set_tfm(req, child);
 	aead_request_set_tfm(req, child);
 	err = crypt( req );
 	err = crypt( req );
-	req->base.complete = rctx->complete;
 out:
 out:
 	local_bh_disable();
 	local_bh_disable();
-	rctx->complete(&req->base, err);
+	compl(&req->base, err);
 	local_bh_enable();
 	local_bh_enable();
 }
 }
 
 
@@ -708,7 +709,7 @@ static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
 	struct aead_request *req;
 	struct aead_request *req;
 
 
 	req = container_of(areq, struct aead_request, base);
 	req = container_of(areq, struct aead_request, base);
-	cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt);
+	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
 }
 }
 
 
 static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
 static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
@@ -718,7 +719,7 @@ static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
 	struct aead_request *req;
 	struct aead_request *req;
 
 
 	req = container_of(areq, struct aead_request, base);
 	req = container_of(areq, struct aead_request, base);
-	cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt);
+	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
 }
 }
 
 
 static int cryptd_aead_enqueue(struct aead_request *req,
 static int cryptd_aead_enqueue(struct aead_request *req,
@@ -756,7 +757,9 @@ static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
 		return PTR_ERR(cipher);
 		return PTR_ERR(cipher);
 
 
 	ctx->child = cipher;
 	ctx->child = cipher;
-	crypto_aead_set_reqsize(tfm, sizeof(struct cryptd_aead_request_ctx));
+	crypto_aead_set_reqsize(
+		tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
+			 crypto_aead_reqsize(cipher)));
 	return 0;
 	return 0;
 }
 }
 
 
@@ -775,7 +778,7 @@ static int cryptd_create_aead(struct crypto_template *tmpl,
 	struct aead_alg *alg;
 	struct aead_alg *alg;
 	const char *name;
 	const char *name;
 	u32 type = 0;
 	u32 type = 0;
-	u32 mask = 0;
+	u32 mask = CRYPTO_ALG_ASYNC;
 	int err;
 	int err;
 
 
 	cryptd_check_internal(tb, &type, &mask);
 	cryptd_check_internal(tb, &type, &mask);

+ 0 - 32
crypto/crypto_user.c

@@ -25,7 +25,6 @@
 #include <net/netlink.h>
 #include <net/netlink.h>
 #include <linux/security.h>
 #include <linux/security.h>
 #include <net/net_namespace.h>
 #include <net/net_namespace.h>
-#include <crypto/internal/aead.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/internal/rng.h>
 #include <crypto/internal/rng.h>
 #include <crypto/akcipher.h>
 #include <crypto/akcipher.h>
@@ -385,34 +384,6 @@ static struct crypto_alg *crypto_user_skcipher_alg(const char *name, u32 type,
 	return ERR_PTR(err);
 	return ERR_PTR(err);
 }
 }
 
 
-static struct crypto_alg *crypto_user_aead_alg(const char *name, u32 type,
-					       u32 mask)
-{
-	int err;
-	struct crypto_alg *alg;
-
-	type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
-	type |= CRYPTO_ALG_TYPE_AEAD;
-	mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
-	mask |= CRYPTO_ALG_TYPE_MASK;
-
-	for (;;) {
-		alg = crypto_lookup_aead(name,  type, mask);
-		if (!IS_ERR(alg))
-			return alg;
-
-		err = PTR_ERR(alg);
-		if (err != -EAGAIN)
-			break;
-		if (signal_pending(current)) {
-			err = -EINTR;
-			break;
-		}
-	}
-
-	return ERR_PTR(err);
-}
-
 static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
 static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
 			  struct nlattr **attrs)
 			  struct nlattr **attrs)
 {
 {
@@ -446,9 +417,6 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
 		name = p->cru_name;
 		name = p->cru_name;
 
 
 	switch (p->cru_type & p->cru_mask & CRYPTO_ALG_TYPE_MASK) {
 	switch (p->cru_type & p->cru_mask & CRYPTO_ALG_TYPE_MASK) {
-	case CRYPTO_ALG_TYPE_AEAD:
-		alg = crypto_user_aead_alg(name, p->cru_type, p->cru_mask);
-		break;
 	case CRYPTO_ALG_TYPE_GIVCIPHER:
 	case CRYPTO_ALG_TYPE_GIVCIPHER:
 	case CRYPTO_ALG_TYPE_BLKCIPHER:
 	case CRYPTO_ALG_TYPE_BLKCIPHER:
 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
 	case CRYPTO_ALG_TYPE_ABLKCIPHER:

+ 12 - 74
crypto/echainiv.c

@@ -19,8 +19,6 @@
  */
  */
 
 
 #include <crypto/internal/geniv.h>
 #include <crypto/internal/geniv.h>
-#include <crypto/null.h>
-#include <crypto/rng.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/scatterwalk.h>
 #include <linux/err.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/init.h>
@@ -33,13 +31,6 @@
 
 
 #define MAX_IV_SIZE 16
 #define MAX_IV_SIZE 16
 
 
-struct echainiv_ctx {
-	/* aead_geniv_ctx must be first the element */
-	struct aead_geniv_ctx geniv;
-	struct crypto_blkcipher *null;
-	u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
-};
-
 static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
 static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
 
 
 /* We don't care if we get preempted and read/write IVs from the next CPU. */
 /* We don't care if we get preempted and read/write IVs from the next CPU. */
@@ -103,7 +94,7 @@ static void echainiv_encrypt_complete(struct crypto_async_request *base,
 static int echainiv_encrypt(struct aead_request *req)
 static int echainiv_encrypt(struct aead_request *req)
 {
 {
 	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
 	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-	struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
+	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
 	struct aead_request *subreq = aead_request_ctx(req);
 	struct aead_request *subreq = aead_request_ctx(req);
 	crypto_completion_t compl;
 	crypto_completion_t compl;
 	void *data;
 	void *data;
@@ -114,7 +105,7 @@ static int echainiv_encrypt(struct aead_request *req)
 	if (req->cryptlen < ivsize)
 	if (req->cryptlen < ivsize)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	aead_request_set_tfm(subreq, ctx->geniv.child);
+	aead_request_set_tfm(subreq, ctx->child);
 
 
 	compl = echainiv_encrypt_complete;
 	compl = echainiv_encrypt_complete;
 	data = req;
 	data = req;
@@ -145,8 +136,8 @@ static int echainiv_encrypt(struct aead_request *req)
 
 
 	aead_request_set_callback(subreq, req->base.flags, compl, data);
 	aead_request_set_callback(subreq, req->base.flags, compl, data);
 	aead_request_set_crypt(subreq, req->dst, req->dst,
 	aead_request_set_crypt(subreq, req->dst, req->dst,
-			       req->cryptlen - ivsize, info);
-	aead_request_set_ad(subreq, req->assoclen + ivsize);
+			       req->cryptlen, info);
+	aead_request_set_ad(subreq, req->assoclen);
 
 
 	crypto_xor(info, ctx->salt, ivsize);
 	crypto_xor(info, ctx->salt, ivsize);
 	scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
 	scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
@@ -160,16 +151,16 @@ static int echainiv_encrypt(struct aead_request *req)
 static int echainiv_decrypt(struct aead_request *req)
 static int echainiv_decrypt(struct aead_request *req)
 {
 {
 	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
 	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-	struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
+	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
 	struct aead_request *subreq = aead_request_ctx(req);
 	struct aead_request *subreq = aead_request_ctx(req);
 	crypto_completion_t compl;
 	crypto_completion_t compl;
 	void *data;
 	void *data;
 	unsigned int ivsize = crypto_aead_ivsize(geniv);
 	unsigned int ivsize = crypto_aead_ivsize(geniv);
 
 
-	if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
+	if (req->cryptlen < ivsize)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	aead_request_set_tfm(subreq, ctx->geniv.child);
+	aead_request_set_tfm(subreq, ctx->child);
 
 
 	compl = req->base.complete;
 	compl = req->base.complete;
 	data = req->base.data;
 	data = req->base.data;
@@ -180,61 +171,10 @@ static int echainiv_decrypt(struct aead_request *req)
 	aead_request_set_ad(subreq, req->assoclen + ivsize);
 	aead_request_set_ad(subreq, req->assoclen + ivsize);
 
 
 	scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
 	scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
-	if (req->src != req->dst)
-		scatterwalk_map_and_copy(req->iv, req->dst,
-					 req->assoclen, ivsize, 1);
 
 
 	return crypto_aead_decrypt(subreq);
 	return crypto_aead_decrypt(subreq);
 }
 }
 
 
-static int echainiv_init(struct crypto_tfm *tfm)
-{
-	struct crypto_aead *geniv = __crypto_aead_cast(tfm);
-	struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
-	int err;
-
-	spin_lock_init(&ctx->geniv.lock);
-
-	crypto_aead_set_reqsize(geniv, sizeof(struct aead_request));
-
-	err = crypto_get_default_rng();
-	if (err)
-		goto out;
-
-	err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
-				   crypto_aead_ivsize(geniv));
-	crypto_put_default_rng();
-	if (err)
-		goto out;
-
-	ctx->null = crypto_get_default_null_skcipher();
-	err = PTR_ERR(ctx->null);
-	if (IS_ERR(ctx->null))
-		goto out;
-
-	err = aead_geniv_init(tfm);
-	if (err)
-		goto drop_null;
-
-	ctx->geniv.child = geniv->child;
-	geniv->child = geniv;
-
-out:
-	return err;
-
-drop_null:
-	crypto_put_default_null_skcipher();
-	goto out;
-}
-
-static void echainiv_exit(struct crypto_tfm *tfm)
-{
-	struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm);
-
-	crypto_free_aead(ctx->geniv.child);
-	crypto_put_default_null_skcipher();
-}
-
 static int echainiv_aead_create(struct crypto_template *tmpl,
 static int echainiv_aead_create(struct crypto_template *tmpl,
 				struct rtattr **tb)
 				struct rtattr **tb)
 {
 {
@@ -251,9 +191,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
 	spawn = aead_instance_ctx(inst);
 	spawn = aead_instance_ctx(inst);
 	alg = crypto_spawn_aead_alg(spawn);
 	alg = crypto_spawn_aead_alg(spawn);
 
 
-	if (alg->base.cra_aead.encrypt)
-		goto done;
-
 	err = -EINVAL;
 	err = -EINVAL;
 	if (inst->alg.ivsize & (sizeof(u32) - 1) ||
 	if (inst->alg.ivsize & (sizeof(u32) - 1) ||
 	    inst->alg.ivsize > MAX_IV_SIZE)
 	    inst->alg.ivsize > MAX_IV_SIZE)
@@ -262,14 +199,15 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
 	inst->alg.encrypt = echainiv_encrypt;
 	inst->alg.encrypt = echainiv_encrypt;
 	inst->alg.decrypt = echainiv_decrypt;
 	inst->alg.decrypt = echainiv_decrypt;
 
 
-	inst->alg.base.cra_init = echainiv_init;
-	inst->alg.base.cra_exit = echainiv_exit;
+	inst->alg.init = aead_init_geniv;
+	inst->alg.exit = aead_exit_geniv;
 
 
 	inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
 	inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
-	inst->alg.base.cra_ctxsize = sizeof(struct echainiv_ctx);
+	inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
 	inst->alg.base.cra_ctxsize += inst->alg.ivsize;
 	inst->alg.base.cra_ctxsize += inst->alg.ivsize;
 
 
-done:
+	inst->free = aead_geniv_free;
+
 	err = aead_register_instance(tmpl, inst);
 	err = aead_register_instance(tmpl, inst);
 	if (err)
 	if (err)
 		goto free_inst;
 		goto free_inst;

+ 68 - 34
crypto/gcm.c

@@ -38,6 +38,12 @@ struct crypto_rfc4106_ctx {
 	u8 nonce[4];
 	u8 nonce[4];
 };
 };
 
 
+struct crypto_rfc4106_req_ctx {
+	struct scatterlist src[3];
+	struct scatterlist dst[3];
+	struct aead_request subreq;
+};
+
 struct crypto_rfc4543_instance_ctx {
 struct crypto_rfc4543_instance_ctx {
 	struct crypto_aead_spawn aead;
 	struct crypto_aead_spawn aead;
 };
 };
@@ -601,6 +607,15 @@ static void crypto_gcm_exit_tfm(struct crypto_aead *tfm)
 	crypto_free_ablkcipher(ctx->ctr);
 	crypto_free_ablkcipher(ctx->ctr);
 }
 }
 
 
+static void crypto_gcm_free(struct aead_instance *inst)
+{
+	struct gcm_instance_ctx *ctx = aead_instance_ctx(inst);
+
+	crypto_drop_skcipher(&ctx->ctr);
+	crypto_drop_ahash(&ctx->ghash);
+	kfree(inst);
+}
+
 static int crypto_gcm_create_common(struct crypto_template *tmpl,
 static int crypto_gcm_create_common(struct crypto_template *tmpl,
 				    struct rtattr **tb,
 				    struct rtattr **tb,
 				    const char *full_name,
 				    const char *full_name,
@@ -689,6 +704,8 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
 	inst->alg.encrypt = crypto_gcm_encrypt;
 	inst->alg.encrypt = crypto_gcm_encrypt;
 	inst->alg.decrypt = crypto_gcm_decrypt;
 	inst->alg.decrypt = crypto_gcm_decrypt;
 
 
+	inst->free = crypto_gcm_free;
+
 	err = aead_register_instance(tmpl, inst);
 	err = aead_register_instance(tmpl, inst);
 	if (err)
 	if (err)
 		goto out_put_ctr;
 		goto out_put_ctr;
@@ -728,19 +745,9 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
 					ctr_name, "ghash");
 					ctr_name, "ghash");
 }
 }
 
 
-static void crypto_gcm_free(struct crypto_instance *inst)
-{
-	struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst);
-
-	crypto_drop_skcipher(&ctx->ctr);
-	crypto_drop_ahash(&ctx->ghash);
-	kfree(aead_instance(inst));
-}
-
 static struct crypto_template crypto_gcm_tmpl = {
 static struct crypto_template crypto_gcm_tmpl = {
 	.name = "gcm",
 	.name = "gcm",
 	.create = crypto_gcm_create,
 	.create = crypto_gcm_create,
-	.free = crypto_gcm_free,
 	.module = THIS_MODULE,
 	.module = THIS_MODULE,
 };
 };
 
 
@@ -770,7 +777,6 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
 static struct crypto_template crypto_gcm_base_tmpl = {
 static struct crypto_template crypto_gcm_base_tmpl = {
 	.name = "gcm_base",
 	.name = "gcm_base",
 	.create = crypto_gcm_base_create,
 	.create = crypto_gcm_base_create,
-	.free = crypto_gcm_free,
 	.module = THIS_MODULE,
 	.module = THIS_MODULE,
 };
 };
 
 
@@ -816,27 +822,50 @@ static int crypto_rfc4106_setauthsize(struct crypto_aead *parent,
 
 
 static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req)
 static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req)
 {
 {
-	struct aead_request *subreq = aead_request_ctx(req);
+	struct crypto_rfc4106_req_ctx *rctx = aead_request_ctx(req);
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 	struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(aead);
 	struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(aead);
+	struct aead_request *subreq = &rctx->subreq;
 	struct crypto_aead *child = ctx->child;
 	struct crypto_aead *child = ctx->child;
+	struct scatterlist *sg;
 	u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
 	u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
 			   crypto_aead_alignmask(child) + 1);
 			   crypto_aead_alignmask(child) + 1);
 
 
+	scatterwalk_map_and_copy(iv + 12, req->src, 0, req->assoclen - 8, 0);
+
 	memcpy(iv, ctx->nonce, 4);
 	memcpy(iv, ctx->nonce, 4);
 	memcpy(iv + 4, req->iv, 8);
 	memcpy(iv + 4, req->iv, 8);
 
 
+	sg_init_table(rctx->src, 3);
+	sg_set_buf(rctx->src, iv + 12, req->assoclen - 8);
+	sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen);
+	if (sg != rctx->src + 1)
+		sg_chain(rctx->src, 2, sg);
+
+	if (req->src != req->dst) {
+		sg_init_table(rctx->dst, 3);
+		sg_set_buf(rctx->dst, iv + 12, req->assoclen - 8);
+		sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen);
+		if (sg != rctx->dst + 1)
+			sg_chain(rctx->dst, 2, sg);
+	}
+
 	aead_request_set_tfm(subreq, child);
 	aead_request_set_tfm(subreq, child);
 	aead_request_set_callback(subreq, req->base.flags, req->base.complete,
 	aead_request_set_callback(subreq, req->base.flags, req->base.complete,
 				  req->base.data);
 				  req->base.data);
-	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv);
-	aead_request_set_ad(subreq, req->assoclen);
+	aead_request_set_crypt(subreq, rctx->src,
+			       req->src == req->dst ? rctx->src : rctx->dst,
+			       req->cryptlen, iv);
+	aead_request_set_ad(subreq, req->assoclen - 8);
 
 
 	return subreq;
 	return subreq;
 }
 }
 
 
 static int crypto_rfc4106_encrypt(struct aead_request *req)
 static int crypto_rfc4106_encrypt(struct aead_request *req)
 {
 {
+	if (req->assoclen != 16 && req->assoclen != 20)
+		return -EINVAL;
+
 	req = crypto_rfc4106_crypt(req);
 	req = crypto_rfc4106_crypt(req);
 
 
 	return crypto_aead_encrypt(req);
 	return crypto_aead_encrypt(req);
@@ -844,6 +873,9 @@ static int crypto_rfc4106_encrypt(struct aead_request *req)
 
 
 static int crypto_rfc4106_decrypt(struct aead_request *req)
 static int crypto_rfc4106_decrypt(struct aead_request *req)
 {
 {
+	if (req->assoclen != 16 && req->assoclen != 20)
+		return -EINVAL;
+
 	req = crypto_rfc4106_crypt(req);
 	req = crypto_rfc4106_crypt(req);
 
 
 	return crypto_aead_decrypt(req);
 	return crypto_aead_decrypt(req);
@@ -867,9 +899,9 @@ static int crypto_rfc4106_init_tfm(struct crypto_aead *tfm)
 	align &= ~(crypto_tfm_ctx_alignment() - 1);
 	align &= ~(crypto_tfm_ctx_alignment() - 1);
 	crypto_aead_set_reqsize(
 	crypto_aead_set_reqsize(
 		tfm,
 		tfm,
-		sizeof(struct aead_request) +
+		sizeof(struct crypto_rfc4106_req_ctx) +
 		ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) +
 		ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) +
-		align + 12);
+		align + 24);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -881,6 +913,12 @@ static void crypto_rfc4106_exit_tfm(struct crypto_aead *tfm)
 	crypto_free_aead(ctx->child);
 	crypto_free_aead(ctx->child);
 }
 }
 
 
+static void crypto_rfc4106_free(struct aead_instance *inst)
+{
+	crypto_drop_aead(aead_instance_ctx(inst));
+	kfree(inst);
+}
+
 static int crypto_rfc4106_create(struct crypto_template *tmpl,
 static int crypto_rfc4106_create(struct crypto_template *tmpl,
 				 struct rtattr **tb)
 				 struct rtattr **tb)
 {
 {
@@ -934,7 +972,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
 	    CRYPTO_MAX_ALG_NAME)
 	    CRYPTO_MAX_ALG_NAME)
 		goto out_drop_alg;
 		goto out_drop_alg;
 
 
-	inst->alg.base.cra_flags |= alg->base.cra_flags & CRYPTO_ALG_ASYNC;
+	inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
 	inst->alg.base.cra_priority = alg->base.cra_priority;
 	inst->alg.base.cra_priority = alg->base.cra_priority;
 	inst->alg.base.cra_blocksize = 1;
 	inst->alg.base.cra_blocksize = 1;
 	inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
 	inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
@@ -952,6 +990,8 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
 	inst->alg.encrypt = crypto_rfc4106_encrypt;
 	inst->alg.encrypt = crypto_rfc4106_encrypt;
 	inst->alg.decrypt = crypto_rfc4106_decrypt;
 	inst->alg.decrypt = crypto_rfc4106_decrypt;
 
 
+	inst->free = crypto_rfc4106_free;
+
 	err = aead_register_instance(tmpl, inst);
 	err = aead_register_instance(tmpl, inst);
 	if (err)
 	if (err)
 		goto out_drop_alg;
 		goto out_drop_alg;
@@ -966,16 +1006,9 @@ out_free_inst:
 	goto out;
 	goto out;
 }
 }
 
 
-static void crypto_rfc4106_free(struct crypto_instance *inst)
-{
-	crypto_drop_aead(crypto_instance_ctx(inst));
-	kfree(aead_instance(inst));
-}
-
 static struct crypto_template crypto_rfc4106_tmpl = {
 static struct crypto_template crypto_rfc4106_tmpl = {
 	.name = "rfc4106",
 	.name = "rfc4106",
 	.create = crypto_rfc4106_create,
 	.create = crypto_rfc4106_create,
-	.free = crypto_rfc4106_free,
 	.module = THIS_MODULE,
 	.module = THIS_MODULE,
 };
 };
 
 
@@ -1114,6 +1147,15 @@ static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm)
 	crypto_put_default_null_skcipher();
 	crypto_put_default_null_skcipher();
 }
 }
 
 
+static void crypto_rfc4543_free(struct aead_instance *inst)
+{
+	struct crypto_rfc4543_instance_ctx *ctx = aead_instance_ctx(inst);
+
+	crypto_drop_aead(&ctx->aead);
+
+	kfree(inst);
+}
+
 static int crypto_rfc4543_create(struct crypto_template *tmpl,
 static int crypto_rfc4543_create(struct crypto_template *tmpl,
 				struct rtattr **tb)
 				struct rtattr **tb)
 {
 {
@@ -1187,6 +1229,8 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
 	inst->alg.encrypt = crypto_rfc4543_encrypt;
 	inst->alg.encrypt = crypto_rfc4543_encrypt;
 	inst->alg.decrypt = crypto_rfc4543_decrypt;
 	inst->alg.decrypt = crypto_rfc4543_decrypt;
 
 
+	inst->free = crypto_rfc4543_free,
+
 	err = aead_register_instance(tmpl, inst);
 	err = aead_register_instance(tmpl, inst);
 	if (err)
 	if (err)
 		goto out_drop_alg;
 		goto out_drop_alg;
@@ -1201,19 +1245,9 @@ out_free_inst:
 	goto out;
 	goto out;
 }
 }
 
 
-static void crypto_rfc4543_free(struct crypto_instance *inst)
-{
-	struct crypto_rfc4543_instance_ctx *ctx = crypto_instance_ctx(inst);
-
-	crypto_drop_aead(&ctx->aead);
-
-	kfree(aead_instance(inst));
-}
-
 static struct crypto_template crypto_rfc4543_tmpl = {
 static struct crypto_template crypto_rfc4543_tmpl = {
 	.name = "rfc4543",
 	.name = "rfc4543",
 	.create = crypto_rfc4543_create,
 	.create = crypto_rfc4543_create,
-	.free = crypto_rfc4543_free,
 	.module = THIS_MODULE,
 	.module = THIS_MODULE,
 };
 };
 
 

+ 1 - 1
crypto/jitterentropy-kcapi.c

@@ -79,7 +79,7 @@ int jent_fips_enabled(void)
 
 
 void jent_panic(char *s)
 void jent_panic(char *s)
 {
 {
-	panic(s);
+	panic("%s", s);
 }
 }
 
 
 void jent_memcpy(void *dest, const void *src, unsigned int n)
 void jent_memcpy(void *dest, const void *src, unsigned int n)

+ 7 - 0
crypto/pcrypt.c

@@ -274,11 +274,16 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
 			      u32 type, u32 mask)
 			      u32 type, u32 mask)
 {
 {
 	struct pcrypt_instance_ctx *ctx;
 	struct pcrypt_instance_ctx *ctx;
+	struct crypto_attr_type *algt;
 	struct aead_instance *inst;
 	struct aead_instance *inst;
 	struct aead_alg *alg;
 	struct aead_alg *alg;
 	const char *name;
 	const char *name;
 	int err;
 	int err;
 
 
+	algt = crypto_get_attr_type(tb);
+	if (IS_ERR(algt))
+		return PTR_ERR(algt);
+
 	name = crypto_attr_alg_name(tb[1]);
 	name = crypto_attr_alg_name(tb[1]);
 	if (IS_ERR(name))
 	if (IS_ERR(name))
 		return PTR_ERR(name);
 		return PTR_ERR(name);
@@ -299,6 +304,8 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
 	if (err)
 	if (err)
 		goto out_drop_aead;
 		goto out_drop_aead;
 
 
+	inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC;
+
 	inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
 	inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
 	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
 	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
 
 

+ 35 - 38
crypto/poly1305_generic.c

@@ -13,31 +13,11 @@
 
 
 #include <crypto/algapi.h>
 #include <crypto/algapi.h>
 #include <crypto/internal/hash.h>
 #include <crypto/internal/hash.h>
+#include <crypto/poly1305.h>
 #include <linux/crypto.h>
 #include <linux/crypto.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/module.h>
 
 
-#define POLY1305_BLOCK_SIZE	16
-#define POLY1305_KEY_SIZE	32
-#define POLY1305_DIGEST_SIZE	16
-
-struct poly1305_desc_ctx {
-	/* key */
-	u32 r[5];
-	/* finalize key */
-	u32 s[4];
-	/* accumulator */
-	u32 h[5];
-	/* partial buffer */
-	u8 buf[POLY1305_BLOCK_SIZE];
-	/* bytes used in partial buffer */
-	unsigned int buflen;
-	/* r key has been set */
-	bool rset;
-	/* s key has been set */
-	bool sset;
-};
-
 static inline u64 mlt(u64 a, u64 b)
 static inline u64 mlt(u64 a, u64 b)
 {
 {
 	return a * b;
 	return a * b;
@@ -58,7 +38,7 @@ static inline u32 le32_to_cpuvp(const void *p)
 	return le32_to_cpup(p);
 	return le32_to_cpup(p);
 }
 }
 
 
-static int poly1305_init(struct shash_desc *desc)
+int crypto_poly1305_init(struct shash_desc *desc)
 {
 {
 	struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
 	struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
 
 
@@ -69,8 +49,9 @@ static int poly1305_init(struct shash_desc *desc)
 
 
 	return 0;
 	return 0;
 }
 }
+EXPORT_SYMBOL_GPL(crypto_poly1305_init);
 
 
-static int poly1305_setkey(struct crypto_shash *tfm,
+int crypto_poly1305_setkey(struct crypto_shash *tfm,
 			   const u8 *key, unsigned int keylen)
 			   const u8 *key, unsigned int keylen)
 {
 {
 	/* Poly1305 requires a unique key for each tag, which implies that
 	/* Poly1305 requires a unique key for each tag, which implies that
@@ -79,6 +60,7 @@ static int poly1305_setkey(struct crypto_shash *tfm,
 	 * the update() call. */
 	 * the update() call. */
 	return -ENOTSUPP;
 	return -ENOTSUPP;
 }
 }
+EXPORT_SYMBOL_GPL(crypto_poly1305_setkey);
 
 
 static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key)
 static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key)
 {
 {
@@ -98,16 +80,10 @@ static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key)
 	dctx->s[3] = le32_to_cpuvp(key + 12);
 	dctx->s[3] = le32_to_cpuvp(key + 12);
 }
 }
 
 
-static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx,
-				    const u8 *src, unsigned int srclen,
-				    u32 hibit)
+unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
+					const u8 *src, unsigned int srclen)
 {
 {
-	u32 r0, r1, r2, r3, r4;
-	u32 s1, s2, s3, s4;
-	u32 h0, h1, h2, h3, h4;
-	u64 d0, d1, d2, d3, d4;
-
-	if (unlikely(!dctx->sset)) {
+	if (!dctx->sset) {
 		if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) {
 		if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) {
 			poly1305_setrkey(dctx, src);
 			poly1305_setrkey(dctx, src);
 			src += POLY1305_BLOCK_SIZE;
 			src += POLY1305_BLOCK_SIZE;
@@ -121,6 +97,25 @@ static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx,
 			dctx->sset = true;
 			dctx->sset = true;
 		}
 		}
 	}
 	}
+	return srclen;
+}
+EXPORT_SYMBOL_GPL(crypto_poly1305_setdesckey);
+
+static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx,
+				    const u8 *src, unsigned int srclen,
+				    u32 hibit)
+{
+	u32 r0, r1, r2, r3, r4;
+	u32 s1, s2, s3, s4;
+	u32 h0, h1, h2, h3, h4;
+	u64 d0, d1, d2, d3, d4;
+	unsigned int datalen;
+
+	if (unlikely(!dctx->sset)) {
+		datalen = crypto_poly1305_setdesckey(dctx, src, srclen);
+		src += srclen - datalen;
+		srclen = datalen;
+	}
 
 
 	r0 = dctx->r[0];
 	r0 = dctx->r[0];
 	r1 = dctx->r[1];
 	r1 = dctx->r[1];
@@ -181,7 +176,7 @@ static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx,
 	return srclen;
 	return srclen;
 }
 }
 
 
-static int poly1305_update(struct shash_desc *desc,
+int crypto_poly1305_update(struct shash_desc *desc,
 			   const u8 *src, unsigned int srclen)
 			   const u8 *src, unsigned int srclen)
 {
 {
 	struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
 	struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
@@ -214,8 +209,9 @@ static int poly1305_update(struct shash_desc *desc,
 
 
 	return 0;
 	return 0;
 }
 }
+EXPORT_SYMBOL_GPL(crypto_poly1305_update);
 
 
-static int poly1305_final(struct shash_desc *desc, u8 *dst)
+int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
 {
 {
 	struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
 	struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
 	__le32 *mac = (__le32 *)dst;
 	__le32 *mac = (__le32 *)dst;
@@ -282,13 +278,14 @@ static int poly1305_final(struct shash_desc *desc, u8 *dst)
 
 
 	return 0;
 	return 0;
 }
 }
+EXPORT_SYMBOL_GPL(crypto_poly1305_final);
 
 
 static struct shash_alg poly1305_alg = {
 static struct shash_alg poly1305_alg = {
 	.digestsize	= POLY1305_DIGEST_SIZE,
 	.digestsize	= POLY1305_DIGEST_SIZE,
-	.init		= poly1305_init,
-	.update		= poly1305_update,
-	.final		= poly1305_final,
-	.setkey		= poly1305_setkey,
+	.init		= crypto_poly1305_init,
+	.update		= crypto_poly1305_update,
+	.final		= crypto_poly1305_final,
+	.setkey		= crypto_poly1305_setkey,
 	.descsize	= sizeof(struct poly1305_desc_ctx),
 	.descsize	= sizeof(struct poly1305_desc_ctx),
 	.base		= {
 	.base		= {
 		.cra_name		= "poly1305",
 		.cra_name		= "poly1305",

+ 25 - 1
crypto/rsa.c

@@ -267,12 +267,36 @@ err_free_m:
 	return ret;
 	return ret;
 }
 }
 
 
+static int rsa_check_key_length(unsigned int len)
+{
+	switch (len) {
+	case 512:
+	case 1024:
+	case 1536:
+	case 2048:
+	case 3072:
+	case 4096:
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
 static int rsa_setkey(struct crypto_akcipher *tfm, const void *key,
 static int rsa_setkey(struct crypto_akcipher *tfm, const void *key,
 		      unsigned int keylen)
 		      unsigned int keylen)
 {
 {
 	struct rsa_key *pkey = akcipher_tfm_ctx(tfm);
 	struct rsa_key *pkey = akcipher_tfm_ctx(tfm);
+	int ret;
 
 
-	return rsa_parse_key(pkey, key, keylen);
+	ret = rsa_parse_key(pkey, key, keylen);
+	if (ret)
+		return ret;
+
+	if (rsa_check_key_length(mpi_get_size(pkey->n) << 3)) {
+		rsa_free_key(pkey);
+		ret = -EINVAL;
+	}
+	return ret;
 }
 }
 
 
 static void rsa_exit_tfm(struct crypto_akcipher *tfm)
 static void rsa_exit_tfm(struct crypto_akcipher *tfm)

+ 2 - 2
crypto/rsa_helper.c

@@ -28,7 +28,7 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
 		return -ENOMEM;
 		return -ENOMEM;
 
 
 	/* In FIPS mode only allow key size 2K & 3K */
 	/* In FIPS mode only allow key size 2K & 3K */
-	if (fips_enabled && (mpi_get_size(key->n) != 256 ||
+	if (fips_enabled && (mpi_get_size(key->n) != 256 &&
 			     mpi_get_size(key->n) != 384)) {
 			     mpi_get_size(key->n) != 384)) {
 		pr_err("RSA: key size not allowed in FIPS mode\n");
 		pr_err("RSA: key size not allowed in FIPS mode\n");
 		mpi_free(key->n);
 		mpi_free(key->n);
@@ -62,7 +62,7 @@ int rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
 		return -ENOMEM;
 		return -ENOMEM;
 
 
 	/* In FIPS mode only allow key size 2K & 3K */
 	/* In FIPS mode only allow key size 2K & 3K */
-	if (fips_enabled && (mpi_get_size(key->d) != 256 ||
+	if (fips_enabled && (mpi_get_size(key->d) != 256 &&
 			     mpi_get_size(key->d) != 384)) {
 			     mpi_get_size(key->d) != 384)) {
 		pr_err("RSA: key size not allowed in FIPS mode\n");
 		pr_err("RSA: key size not allowed in FIPS mode\n");
 		mpi_free(key->d);
 		mpi_free(key->d);

+ 9 - 436
crypto/seqiv.c

@@ -15,7 +15,6 @@
 
 
 #include <crypto/internal/geniv.h>
 #include <crypto/internal/geniv.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/internal/skcipher.h>
-#include <crypto/null.h>
 #include <crypto/rng.h>
 #include <crypto/rng.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/scatterwalk.h>
 #include <linux/err.h>
 #include <linux/err.h>
@@ -26,23 +25,11 @@
 #include <linux/spinlock.h>
 #include <linux/spinlock.h>
 #include <linux/string.h>
 #include <linux/string.h>
 
 
-struct seqniv_request_ctx {
-	struct scatterlist dst[2];
-	struct aead_request subreq;
-};
-
 struct seqiv_ctx {
 struct seqiv_ctx {
 	spinlock_t lock;
 	spinlock_t lock;
 	u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
 	u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
 };
 };
 
 
-struct seqiv_aead_ctx {
-	/* aead_geniv_ctx must be first the element */
-	struct aead_geniv_ctx geniv;
-	struct crypto_blkcipher *null;
-	u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
-};
-
 static void seqiv_free(struct crypto_instance *inst);
 static void seqiv_free(struct crypto_instance *inst);
 
 
 static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err)
 static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err)
@@ -71,32 +58,6 @@ static void seqiv_complete(struct crypto_async_request *base, int err)
 	skcipher_givcrypt_complete(req, err);
 	skcipher_givcrypt_complete(req, err);
 }
 }
 
 
-static void seqiv_aead_complete2(struct aead_givcrypt_request *req, int err)
-{
-	struct aead_request *subreq = aead_givcrypt_reqctx(req);
-	struct crypto_aead *geniv;
-
-	if (err == -EINPROGRESS)
-		return;
-
-	if (err)
-		goto out;
-
-	geniv = aead_givcrypt_reqtfm(req);
-	memcpy(req->areq.iv, subreq->iv, crypto_aead_ivsize(geniv));
-
-out:
-	kfree(subreq->iv);
-}
-
-static void seqiv_aead_complete(struct crypto_async_request *base, int err)
-{
-	struct aead_givcrypt_request *req = base->data;
-
-	seqiv_aead_complete2(req, err);
-	aead_givcrypt_complete(req, err);
-}
-
 static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err)
 static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err)
 {
 {
 	struct aead_request *subreq = aead_request_ctx(req);
 	struct aead_request *subreq = aead_request_ctx(req);
@@ -124,50 +85,6 @@ static void seqiv_aead_encrypt_complete(struct crypto_async_request *base,
 	aead_request_complete(req, err);
 	aead_request_complete(req, err);
 }
 }
 
 
-static void seqniv_aead_encrypt_complete2(struct aead_request *req, int err)
-{
-	unsigned int ivsize = 8;
-	u8 data[20];
-
-	if (err == -EINPROGRESS)
-		return;
-
-	/* Swap IV and ESP header back to correct order. */
-	scatterwalk_map_and_copy(data, req->dst, 0, req->assoclen + ivsize, 0);
-	scatterwalk_map_and_copy(data + ivsize, req->dst, 0, req->assoclen, 1);
-	scatterwalk_map_and_copy(data, req->dst, req->assoclen, ivsize, 1);
-}
-
-static void seqniv_aead_encrypt_complete(struct crypto_async_request *base,
-					int err)
-{
-	struct aead_request *req = base->data;
-
-	seqniv_aead_encrypt_complete2(req, err);
-	aead_request_complete(req, err);
-}
-
-static void seqniv_aead_decrypt_complete2(struct aead_request *req, int err)
-{
-	u8 data[4];
-
-	if (err == -EINPROGRESS)
-		return;
-
-	/* Move ESP header back to correct location. */
-	scatterwalk_map_and_copy(data, req->dst, 16, req->assoclen - 8, 0);
-	scatterwalk_map_and_copy(data, req->dst, 8, req->assoclen - 8, 1);
-}
-
-static void seqniv_aead_decrypt_complete(struct crypto_async_request *base,
-					 int err)
-{
-	struct aead_request *req = base->data;
-
-	seqniv_aead_decrypt_complete2(req, err);
-	aead_request_complete(req, err);
-}
-
 static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq,
 static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq,
 			unsigned int ivsize)
 			unsigned int ivsize)
 {
 {
@@ -227,112 +144,10 @@ static int seqiv_givencrypt(struct skcipher_givcrypt_request *req)
 	return err;
 	return err;
 }
 }
 
 
-static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req)
-{
-	struct crypto_aead *geniv = aead_givcrypt_reqtfm(req);
-	struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
-	struct aead_request *areq = &req->areq;
-	struct aead_request *subreq = aead_givcrypt_reqctx(req);
-	crypto_completion_t compl;
-	void *data;
-	u8 *info;
-	unsigned int ivsize;
-	int err;
-
-	aead_request_set_tfm(subreq, aead_geniv_base(geniv));
-
-	compl = areq->base.complete;
-	data = areq->base.data;
-	info = areq->iv;
-
-	ivsize = crypto_aead_ivsize(geniv);
-
-	if (unlikely(!IS_ALIGNED((unsigned long)info,
-				 crypto_aead_alignmask(geniv) + 1))) {
-		info = kmalloc(ivsize, areq->base.flags &
-				       CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
-								  GFP_ATOMIC);
-		if (!info)
-			return -ENOMEM;
-
-		compl = seqiv_aead_complete;
-		data = req;
-	}
-
-	aead_request_set_callback(subreq, areq->base.flags, compl, data);
-	aead_request_set_crypt(subreq, areq->src, areq->dst, areq->cryptlen,
-			       info);
-	aead_request_set_assoc(subreq, areq->assoc, areq->assoclen);
-
-	seqiv_geniv(ctx, info, req->seq, ivsize);
-	memcpy(req->giv, info, ivsize);
-
-	err = crypto_aead_encrypt(subreq);
-	if (unlikely(info != areq->iv))
-		seqiv_aead_complete2(req, err);
-	return err;
-}
-
-static int seqniv_aead_encrypt(struct aead_request *req)
-{
-	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-	struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
-	struct seqniv_request_ctx *rctx = aead_request_ctx(req);
-	struct aead_request *subreq = &rctx->subreq;
-	struct scatterlist *dst;
-	crypto_completion_t compl;
-	void *data;
-	unsigned int ivsize = 8;
-	u8 buf[20] __attribute__ ((aligned(__alignof__(u32))));
-	int err;
-
-	if (req->cryptlen < ivsize)
-		return -EINVAL;
-
-	/* ESP AD is at most 12 bytes (ESN). */
-	if (req->assoclen > 12)
-		return -EINVAL;
-
-	aead_request_set_tfm(subreq, ctx->geniv.child);
-
-	compl = seqniv_aead_encrypt_complete;
-	data = req;
-
-	if (req->src != req->dst) {
-		struct blkcipher_desc desc = {
-			.tfm = ctx->null,
-		};
-
-		err = crypto_blkcipher_encrypt(&desc, req->dst, req->src,
-					       req->assoclen + req->cryptlen);
-		if (err)
-			return err;
-	}
-
-	dst = scatterwalk_ffwd(rctx->dst, req->dst, ivsize);
-
-	aead_request_set_callback(subreq, req->base.flags, compl, data);
-	aead_request_set_crypt(subreq, dst, dst,
-			       req->cryptlen - ivsize, req->iv);
-	aead_request_set_ad(subreq, req->assoclen);
-
-	memcpy(buf, req->iv, ivsize);
-	crypto_xor(buf, ctx->salt, ivsize);
-	memcpy(req->iv, buf, ivsize);
-
-	/* Swap order of IV and ESP AD for ICV generation. */
-	scatterwalk_map_and_copy(buf + ivsize, req->dst, 0, req->assoclen, 0);
-	scatterwalk_map_and_copy(buf, req->dst, 0, req->assoclen + ivsize, 1);
-
-	err = crypto_aead_encrypt(subreq);
-	seqniv_aead_encrypt_complete2(req, err);
-	return err;
-}
-
 static int seqiv_aead_encrypt(struct aead_request *req)
 static int seqiv_aead_encrypt(struct aead_request *req)
 {
 {
 	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
 	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-	struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
+	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
 	struct aead_request *subreq = aead_request_ctx(req);
 	struct aead_request *subreq = aead_request_ctx(req);
 	crypto_completion_t compl;
 	crypto_completion_t compl;
 	void *data;
 	void *data;
@@ -343,7 +158,7 @@ static int seqiv_aead_encrypt(struct aead_request *req)
 	if (req->cryptlen < ivsize)
 	if (req->cryptlen < ivsize)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	aead_request_set_tfm(subreq, ctx->geniv.child);
+	aead_request_set_tfm(subreq, ctx->child);
 
 
 	compl = req->base.complete;
 	compl = req->base.complete;
 	data = req->base.data;
 	data = req->base.data;
@@ -387,67 +202,10 @@ static int seqiv_aead_encrypt(struct aead_request *req)
 	return err;
 	return err;
 }
 }
 
 
-static int seqniv_aead_decrypt(struct aead_request *req)
-{
-	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-	struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
-	struct seqniv_request_ctx *rctx = aead_request_ctx(req);
-	struct aead_request *subreq = &rctx->subreq;
-	struct scatterlist *dst;
-	crypto_completion_t compl;
-	void *data;
-	unsigned int ivsize = 8;
-	u8 buf[20];
-	int err;
-
-	if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
-		return -EINVAL;
-
-	aead_request_set_tfm(subreq, ctx->geniv.child);
-
-	compl = req->base.complete;
-	data = req->base.data;
-
-	if (req->assoclen > 12)
-		return -EINVAL;
-	else if (req->assoclen > 8) {
-		compl = seqniv_aead_decrypt_complete;
-		data = req;
-	}
-
-	if (req->src != req->dst) {
-		struct blkcipher_desc desc = {
-			.tfm = ctx->null,
-		};
-
-		err = crypto_blkcipher_encrypt(&desc, req->dst, req->src,
-					       req->assoclen + req->cryptlen);
-		if (err)
-			return err;
-	}
-
-	/* Move ESP AD forward for ICV generation. */
-	scatterwalk_map_and_copy(buf, req->dst, 0, req->assoclen + ivsize, 0);
-	memcpy(req->iv, buf + req->assoclen, ivsize);
-	scatterwalk_map_and_copy(buf, req->dst, ivsize, req->assoclen, 1);
-
-	dst = scatterwalk_ffwd(rctx->dst, req->dst, ivsize);
-
-	aead_request_set_callback(subreq, req->base.flags, compl, data);
-	aead_request_set_crypt(subreq, dst, dst,
-			       req->cryptlen - ivsize, req->iv);
-	aead_request_set_ad(subreq, req->assoclen);
-
-	err = crypto_aead_decrypt(subreq);
-	if (req->assoclen > 8)
-		seqniv_aead_decrypt_complete2(req, err);
-	return err;
-}
-
 static int seqiv_aead_decrypt(struct aead_request *req)
 static int seqiv_aead_decrypt(struct aead_request *req)
 {
 {
 	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
 	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-	struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
+	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
 	struct aead_request *subreq = aead_request_ctx(req);
 	struct aead_request *subreq = aead_request_ctx(req);
 	crypto_completion_t compl;
 	crypto_completion_t compl;
 	void *data;
 	void *data;
@@ -456,7 +214,7 @@ static int seqiv_aead_decrypt(struct aead_request *req)
 	if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
 	if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
 		return -EINVAL;
 		return -EINVAL;
 
 
-	aead_request_set_tfm(subreq, ctx->geniv.child);
+	aead_request_set_tfm(subreq, ctx->child);
 
 
 	compl = req->base.complete;
 	compl = req->base.complete;
 	data = req->base.data;
 	data = req->base.data;
@@ -467,9 +225,6 @@ static int seqiv_aead_decrypt(struct aead_request *req)
 	aead_request_set_ad(subreq, req->assoclen + ivsize);
 	aead_request_set_ad(subreq, req->assoclen + ivsize);
 
 
 	scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
 	scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
-	if (req->src != req->dst)
-		scatterwalk_map_and_copy(req->iv, req->dst,
-					 req->assoclen, ivsize, 1);
 
 
 	return crypto_aead_decrypt(subreq);
 	return crypto_aead_decrypt(subreq);
 }
 }
@@ -495,85 +250,6 @@ static int seqiv_init(struct crypto_tfm *tfm)
 	return err ?: skcipher_geniv_init(tfm);
 	return err ?: skcipher_geniv_init(tfm);
 }
 }
 
 
-static int seqiv_old_aead_init(struct crypto_tfm *tfm)
-{
-	struct crypto_aead *geniv = __crypto_aead_cast(tfm);
-	struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
-	int err;
-
-	spin_lock_init(&ctx->lock);
-
-	crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
-				sizeof(struct aead_request));
-	err = 0;
-	if (!crypto_get_default_rng()) {
-		geniv->givencrypt = seqiv_aead_givencrypt;
-		err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
-					   crypto_aead_ivsize(geniv));
-		crypto_put_default_rng();
-	}
-
-	return err ?: aead_geniv_init(tfm);
-}
-
-static int seqiv_aead_init_common(struct crypto_tfm *tfm, unsigned int reqsize)
-{
-	struct crypto_aead *geniv = __crypto_aead_cast(tfm);
-	struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
-	int err;
-
-	spin_lock_init(&ctx->geniv.lock);
-
-	crypto_aead_set_reqsize(geniv, sizeof(struct aead_request));
-
-	err = crypto_get_default_rng();
-	if (err)
-		goto out;
-
-	err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
-				   crypto_aead_ivsize(geniv));
-	crypto_put_default_rng();
-	if (err)
-		goto out;
-
-	ctx->null = crypto_get_default_null_skcipher();
-	err = PTR_ERR(ctx->null);
-	if (IS_ERR(ctx->null))
-		goto out;
-
-	err = aead_geniv_init(tfm);
-	if (err)
-		goto drop_null;
-
-	ctx->geniv.child = geniv->child;
-	geniv->child = geniv;
-
-out:
-	return err;
-
-drop_null:
-	crypto_put_default_null_skcipher();
-	goto out;
-}
-
-static int seqiv_aead_init(struct crypto_tfm *tfm)
-{
-	return seqiv_aead_init_common(tfm, sizeof(struct aead_request));
-}
-
-static int seqniv_aead_init(struct crypto_tfm *tfm)
-{
-	return seqiv_aead_init_common(tfm, sizeof(struct seqniv_request_ctx));
-}
-
-static void seqiv_aead_exit(struct crypto_tfm *tfm)
-{
-	struct seqiv_aead_ctx *ctx = crypto_tfm_ctx(tfm);
-
-	crypto_free_aead(ctx->geniv.child);
-	crypto_put_default_null_skcipher();
-}
-
 static int seqiv_ablkcipher_create(struct crypto_template *tmpl,
 static int seqiv_ablkcipher_create(struct crypto_template *tmpl,
 				   struct rtattr **tb)
 				   struct rtattr **tb)
 {
 {
@@ -609,33 +285,6 @@ free_inst:
 	goto out;
 	goto out;
 }
 }
 
 
-static int seqiv_old_aead_create(struct crypto_template *tmpl,
-				 struct aead_instance *aead)
-{
-	struct crypto_instance *inst = aead_crypto_instance(aead);
-	int err = -EINVAL;
-
-	if (inst->alg.cra_aead.ivsize < sizeof(u64))
-		goto free_inst;
-
-	inst->alg.cra_init = seqiv_old_aead_init;
-	inst->alg.cra_exit = aead_geniv_exit;
-
-	inst->alg.cra_ctxsize = inst->alg.cra_aead.ivsize;
-	inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx);
-
-	err = crypto_register_instance(tmpl, inst);
-	if (err)
-		goto free_inst;
-
-out:
-	return err;
-
-free_inst:
-	aead_geniv_free(aead);
-	goto out;
-}
-
 static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
 static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
 {
 {
 	struct aead_instance *inst;
 	struct aead_instance *inst;
@@ -650,15 +299,9 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
 
 
 	inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
 	inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
 
 
-	if (inst->alg.base.cra_aead.encrypt)
-		return seqiv_old_aead_create(tmpl, inst);
-
 	spawn = aead_instance_ctx(inst);
 	spawn = aead_instance_ctx(inst);
 	alg = crypto_spawn_aead_alg(spawn);
 	alg = crypto_spawn_aead_alg(spawn);
 
 
-	if (alg->base.cra_aead.encrypt)
-		goto done;
-
 	err = -EINVAL;
 	err = -EINVAL;
 	if (inst->alg.ivsize != sizeof(u64))
 	if (inst->alg.ivsize != sizeof(u64))
 		goto free_inst;
 		goto free_inst;
@@ -666,13 +309,12 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
 	inst->alg.encrypt = seqiv_aead_encrypt;
 	inst->alg.encrypt = seqiv_aead_encrypt;
 	inst->alg.decrypt = seqiv_aead_decrypt;
 	inst->alg.decrypt = seqiv_aead_decrypt;
 
 
-	inst->alg.base.cra_init = seqiv_aead_init;
-	inst->alg.base.cra_exit = seqiv_aead_exit;
+	inst->alg.init = aead_init_geniv;
+	inst->alg.exit = aead_exit_geniv;
 
 
-	inst->alg.base.cra_ctxsize = sizeof(struct seqiv_aead_ctx);
-	inst->alg.base.cra_ctxsize += inst->alg.base.cra_aead.ivsize;
+	inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
+	inst->alg.base.cra_ctxsize += inst->alg.ivsize;
 
 
-done:
 	err = aead_register_instance(tmpl, inst);
 	err = aead_register_instance(tmpl, inst);
 	if (err)
 	if (err)
 		goto free_inst;
 		goto free_inst;
@@ -702,51 +344,6 @@ static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb)
 	return err;
 	return err;
 }
 }
 
 
-static int seqniv_create(struct crypto_template *tmpl, struct rtattr **tb)
-{
-	struct aead_instance *inst;
-	struct crypto_aead_spawn *spawn;
-	struct aead_alg *alg;
-	int err;
-
-	inst = aead_geniv_alloc(tmpl, tb, 0, 0);
-	err = PTR_ERR(inst);
-	if (IS_ERR(inst))
-		goto out;
-
-	spawn = aead_instance_ctx(inst);
-	alg = crypto_spawn_aead_alg(spawn);
-
-	if (alg->base.cra_aead.encrypt)
-		goto done;
-
-	err = -EINVAL;
-	if (inst->alg.ivsize != sizeof(u64))
-		goto free_inst;
-
-	inst->alg.encrypt = seqniv_aead_encrypt;
-	inst->alg.decrypt = seqniv_aead_decrypt;
-
-	inst->alg.base.cra_init = seqniv_aead_init;
-	inst->alg.base.cra_exit = seqiv_aead_exit;
-
-	inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
-	inst->alg.base.cra_ctxsize = sizeof(struct seqiv_aead_ctx);
-	inst->alg.base.cra_ctxsize += inst->alg.ivsize;
-
-done:
-	err = aead_register_instance(tmpl, inst);
-	if (err)
-		goto free_inst;
-
-out:
-	return err;
-
-free_inst:
-	aead_geniv_free(inst);
-	goto out;
-}
-
 static void seqiv_free(struct crypto_instance *inst)
 static void seqiv_free(struct crypto_instance *inst)
 {
 {
 	if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
 	if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
@@ -762,36 +359,13 @@ static struct crypto_template seqiv_tmpl = {
 	.module = THIS_MODULE,
 	.module = THIS_MODULE,
 };
 };
 
 
-static struct crypto_template seqniv_tmpl = {
-	.name = "seqniv",
-	.create = seqniv_create,
-	.free = seqiv_free,
-	.module = THIS_MODULE,
-};
-
 static int __init seqiv_module_init(void)
 static int __init seqiv_module_init(void)
 {
 {
-	int err;
-
-	err = crypto_register_template(&seqiv_tmpl);
-	if (err)
-		goto out;
-
-	err = crypto_register_template(&seqniv_tmpl);
-	if (err)
-		goto out_undo_niv;
-
-out:
-	return err;
-
-out_undo_niv:
-	crypto_unregister_template(&seqiv_tmpl);
-	goto out;
+	return crypto_register_template(&seqiv_tmpl);
 }
 }
 
 
 static void __exit seqiv_module_exit(void)
 static void __exit seqiv_module_exit(void)
 {
 {
-	crypto_unregister_template(&seqniv_tmpl);
 	crypto_unregister_template(&seqiv_tmpl);
 	crypto_unregister_template(&seqiv_tmpl);
 }
 }
 
 
@@ -801,4 +375,3 @@ module_exit(seqiv_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Sequence Number IV Generator");
 MODULE_DESCRIPTION("Sequence Number IV Generator");
 MODULE_ALIAS_CRYPTO("seqiv");
 MODULE_ALIAS_CRYPTO("seqiv");
-MODULE_ALIAS_CRYPTO("seqniv");

+ 245 - 0
crypto/skcipher.c

@@ -0,0 +1,245 @@
+/*
+ * Symmetric key cipher operations.
+ *
+ * Generic encrypt/decrypt wrapper for ciphers, handles operations across
+ * multiple page boundaries by using temporary blocks.  In user context,
+ * the kernel is given a chance to schedule us once per page.
+ *
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/internal/skcipher.h>
+#include <linux/bug.h>
+#include <linux/module.h>
+
+#include "internal.h"
+
+static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
+{
+	if (alg->cra_type == &crypto_blkcipher_type)
+		return sizeof(struct crypto_blkcipher *);
+
+	BUG_ON(alg->cra_type != &crypto_ablkcipher_type &&
+	       alg->cra_type != &crypto_givcipher_type);
+
+	return sizeof(struct crypto_ablkcipher *);
+}
+
+static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
+				     const u8 *key, unsigned int keylen)
+{
+	struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
+	struct crypto_blkcipher *blkcipher = *ctx;
+	int err;
+
+	crypto_blkcipher_clear_flags(blkcipher, ~0);
+	crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
+					      CRYPTO_TFM_REQ_MASK);
+	err = crypto_blkcipher_setkey(blkcipher, key, keylen);
+	crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
+				       CRYPTO_TFM_RES_MASK);
+
+	return err;
+}
+
+static int skcipher_crypt_blkcipher(struct skcipher_request *req,
+				    int (*crypt)(struct blkcipher_desc *,
+						 struct scatterlist *,
+						 struct scatterlist *,
+						 unsigned int))
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
+	struct blkcipher_desc desc = {
+		.tfm = *ctx,
+		.info = req->iv,
+		.flags = req->base.flags,
+	};
+
+
+	return crypt(&desc, req->dst, req->src, req->cryptlen);
+}
+
+static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
+{
+	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
+
+	return skcipher_crypt_blkcipher(req, alg->encrypt);
+}
+
+static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
+{
+	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
+
+	return skcipher_crypt_blkcipher(req, alg->decrypt);
+}
+
+static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
+{
+	struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
+
+	crypto_free_blkcipher(*ctx);
+}
+
+int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *calg = tfm->__crt_alg;
+	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
+	struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
+	struct crypto_blkcipher *blkcipher;
+	struct crypto_tfm *btfm;
+
+	if (!crypto_mod_get(calg))
+		return -EAGAIN;
+
+	btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
+					CRYPTO_ALG_TYPE_MASK);
+	if (IS_ERR(btfm)) {
+		crypto_mod_put(calg);
+		return PTR_ERR(btfm);
+	}
+
+	blkcipher = __crypto_blkcipher_cast(btfm);
+	*ctx = blkcipher;
+	tfm->exit = crypto_exit_skcipher_ops_blkcipher;
+
+	skcipher->setkey = skcipher_setkey_blkcipher;
+	skcipher->encrypt = skcipher_encrypt_blkcipher;
+	skcipher->decrypt = skcipher_decrypt_blkcipher;
+
+	skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
+
+	return 0;
+}
+
+static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
+				      const u8 *key, unsigned int keylen)
+{
+	struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
+	struct crypto_ablkcipher *ablkcipher = *ctx;
+	int err;
+
+	crypto_ablkcipher_clear_flags(ablkcipher, ~0);
+	crypto_ablkcipher_set_flags(ablkcipher,
+				    crypto_skcipher_get_flags(tfm) &
+				    CRYPTO_TFM_REQ_MASK);
+	err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
+	crypto_skcipher_set_flags(tfm,
+				  crypto_ablkcipher_get_flags(ablkcipher) &
+				  CRYPTO_TFM_RES_MASK);
+
+	return err;
+}
+
+static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
+				     int (*crypt)(struct ablkcipher_request *))
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
+	struct ablkcipher_request *subreq = skcipher_request_ctx(req);
+
+	ablkcipher_request_set_tfm(subreq, *ctx);
+	ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
+					req->base.complete, req->base.data);
+	ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+				     req->iv);
+
+	return crypt(subreq);
+}
+
+static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
+{
+	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
+
+	return skcipher_crypt_ablkcipher(req, alg->encrypt);
+}
+
+static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
+{
+	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
+
+	return skcipher_crypt_ablkcipher(req, alg->decrypt);
+}
+
+static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
+{
+	struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
+
+	crypto_free_ablkcipher(*ctx);
+}
+
+int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *calg = tfm->__crt_alg;
+	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
+	struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
+	struct crypto_ablkcipher *ablkcipher;
+	struct crypto_tfm *abtfm;
+
+	if (!crypto_mod_get(calg))
+		return -EAGAIN;
+
+	abtfm = __crypto_alloc_tfm(calg, 0, 0);
+	if (IS_ERR(abtfm)) {
+		crypto_mod_put(calg);
+		return PTR_ERR(abtfm);
+	}
+
+	ablkcipher = __crypto_ablkcipher_cast(abtfm);
+	*ctx = ablkcipher;
+	tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
+
+	skcipher->setkey = skcipher_setkey_ablkcipher;
+	skcipher->encrypt = skcipher_encrypt_ablkcipher;
+	skcipher->decrypt = skcipher_decrypt_ablkcipher;
+
+	skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
+			    sizeof(struct ablkcipher_request);
+
+	return 0;
+}
+
+static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
+{
+	if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
+		return crypto_init_skcipher_ops_blkcipher(tfm);
+
+	BUG_ON(tfm->__crt_alg->cra_type != &crypto_ablkcipher_type &&
+	       tfm->__crt_alg->cra_type != &crypto_givcipher_type);
+
+	return crypto_init_skcipher_ops_ablkcipher(tfm);
+}
+
+static const struct crypto_type crypto_skcipher_type2 = {
+	.extsize = crypto_skcipher_extsize,
+	.init_tfm = crypto_skcipher_init_tfm,
+	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
+	.maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
+	.type = CRYPTO_ALG_TYPE_BLKCIPHER,
+	.tfmsize = offsetof(struct crypto_skcipher, base),
+};
+
+struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
+					      u32 type, u32 mask)
+{
+	return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Symmetric key cipher type");

+ 59 - 23
crypto/tcrypt.c

@@ -73,6 +73,22 @@ static char *check[] = {
 	"lzo", "cts", "zlib", NULL
 	"lzo", "cts", "zlib", NULL
 };
 };
 
 
+struct tcrypt_result {
+	struct completion completion;
+	int err;
+};
+
+static void tcrypt_complete(struct crypto_async_request *req, int err)
+{
+	struct tcrypt_result *res = req->data;
+
+	if (err == -EINPROGRESS)
+		return;
+
+	res->err = err;
+	complete(&res->completion);
+}
+
 static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc,
 static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc,
 			       struct scatterlist *sg, int blen, int secs)
 			       struct scatterlist *sg, int blen, int secs)
 {
 {
@@ -143,6 +159,20 @@ out:
 	return ret;
 	return ret;
 }
 }
 
 
+static inline int do_one_aead_op(struct aead_request *req, int ret)
+{
+	if (ret == -EINPROGRESS || ret == -EBUSY) {
+		struct tcrypt_result *tr = req->base.data;
+
+		ret = wait_for_completion_interruptible(&tr->completion);
+		if (!ret)
+			ret = tr->err;
+		reinit_completion(&tr->completion);
+	}
+
+	return ret;
+}
+
 static int test_aead_jiffies(struct aead_request *req, int enc,
 static int test_aead_jiffies(struct aead_request *req, int enc,
 				int blen, int secs)
 				int blen, int secs)
 {
 {
@@ -153,9 +183,9 @@ static int test_aead_jiffies(struct aead_request *req, int enc,
 	for (start = jiffies, end = start + secs * HZ, bcount = 0;
 	for (start = jiffies, end = start + secs * HZ, bcount = 0;
 	     time_before(jiffies, end); bcount++) {
 	     time_before(jiffies, end); bcount++) {
 		if (enc)
 		if (enc)
-			ret = crypto_aead_encrypt(req);
+			ret = do_one_aead_op(req, crypto_aead_encrypt(req));
 		else
 		else
-			ret = crypto_aead_decrypt(req);
+			ret = do_one_aead_op(req, crypto_aead_decrypt(req));
 
 
 		if (ret)
 		if (ret)
 			return ret;
 			return ret;
@@ -177,9 +207,9 @@ static int test_aead_cycles(struct aead_request *req, int enc, int blen)
 	/* Warm-up run. */
 	/* Warm-up run. */
 	for (i = 0; i < 4; i++) {
 	for (i = 0; i < 4; i++) {
 		if (enc)
 		if (enc)
-			ret = crypto_aead_encrypt(req);
+			ret = do_one_aead_op(req, crypto_aead_encrypt(req));
 		else
 		else
-			ret = crypto_aead_decrypt(req);
+			ret = do_one_aead_op(req, crypto_aead_decrypt(req));
 
 
 		if (ret)
 		if (ret)
 			goto out;
 			goto out;
@@ -191,9 +221,9 @@ static int test_aead_cycles(struct aead_request *req, int enc, int blen)
 
 
 		start = get_cycles();
 		start = get_cycles();
 		if (enc)
 		if (enc)
-			ret = crypto_aead_encrypt(req);
+			ret = do_one_aead_op(req, crypto_aead_encrypt(req));
 		else
 		else
-			ret = crypto_aead_decrypt(req);
+			ret = do_one_aead_op(req, crypto_aead_decrypt(req));
 		end = get_cycles();
 		end = get_cycles();
 
 
 		if (ret)
 		if (ret)
@@ -286,6 +316,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
 	char *axbuf[XBUFSIZE];
 	char *axbuf[XBUFSIZE];
 	unsigned int *b_size;
 	unsigned int *b_size;
 	unsigned int iv_len;
 	unsigned int iv_len;
+	struct tcrypt_result result;
 
 
 	iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
 	iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
 	if (!iv)
 	if (!iv)
@@ -321,6 +352,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
 		goto out_notfm;
 		goto out_notfm;
 	}
 	}
 
 
+	init_completion(&result.completion);
 	printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
 	printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
 			get_driver_name(crypto_aead, tfm), e);
 			get_driver_name(crypto_aead, tfm), e);
 
 
@@ -331,6 +363,9 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
 		goto out_noreq;
 		goto out_noreq;
 	}
 	}
 
 
+	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				  tcrypt_complete, &result);
+
 	i = 0;
 	i = 0;
 	do {
 	do {
 		b_size = aead_sizes;
 		b_size = aead_sizes;
@@ -749,22 +784,6 @@ out:
 	crypto_free_hash(tfm);
 	crypto_free_hash(tfm);
 }
 }
 
 
-struct tcrypt_result {
-	struct completion completion;
-	int err;
-};
-
-static void tcrypt_complete(struct crypto_async_request *req, int err)
-{
-	struct tcrypt_result *res = req->data;
-
-	if (err == -EINPROGRESS)
-		return;
-
-	res->err = err;
-	complete(&res->completion);
-}
-
 static inline int do_one_ahash_op(struct ahash_request *req, int ret)
 static inline int do_one_ahash_op(struct ahash_request *req, int ret)
 {
 {
 	if (ret == -EINPROGRESS || ret == -EBUSY) {
 	if (ret == -EINPROGRESS || ret == -EBUSY) {
@@ -1759,14 +1778,27 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
 
 
 	case 211:
 	case 211:
 		test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec,
 		test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec,
+				NULL, 0, 16, 16, aead_speed_template_20);
+		test_aead_speed("gcm(aes)", ENCRYPT, sec,
 				NULL, 0, 16, 8, aead_speed_template_20);
 				NULL, 0, 16, 8, aead_speed_template_20);
 		break;
 		break;
 
 
 	case 212:
 	case 212:
 		test_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec,
 		test_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec,
-				NULL, 0, 16, 8, aead_speed_template_19);
+				NULL, 0, 16, 16, aead_speed_template_19);
+		break;
+
+	case 213:
+		test_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT, sec,
+				NULL, 0, 16, 8, aead_speed_template_36);
+		break;
+
+	case 214:
+		test_cipher_speed("chacha20", ENCRYPT, sec, NULL, 0,
+				  speed_template_32);
 		break;
 		break;
 
 
+
 	case 300:
 	case 300:
 		if (alg) {
 		if (alg) {
 			test_hash_speed(alg, sec, generic_hash_speed_template);
 			test_hash_speed(alg, sec, generic_hash_speed_template);
@@ -1855,6 +1887,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
 		test_hash_speed("crct10dif", sec, generic_hash_speed_template);
 		test_hash_speed("crct10dif", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
 		if (mode > 300 && mode < 400) break;
 
 
+	case 321:
+		test_hash_speed("poly1305", sec, poly1305_speed_template);
+		if (mode > 300 && mode < 400) break;
+
 	case 399:
 	case 399:
 		break;
 		break;
 
 

+ 20 - 0
crypto/tcrypt.h

@@ -61,12 +61,14 @@ static u8 speed_template_32_40_48[] = {32, 40, 48, 0};
 static u8 speed_template_32_48[] = {32, 48, 0};
 static u8 speed_template_32_48[] = {32, 48, 0};
 static u8 speed_template_32_48_64[] = {32, 48, 64, 0};
 static u8 speed_template_32_48_64[] = {32, 48, 64, 0};
 static u8 speed_template_32_64[] = {32, 64, 0};
 static u8 speed_template_32_64[] = {32, 64, 0};
+static u8 speed_template_32[] = {32, 0};
 
 
 /*
 /*
  * AEAD speed tests
  * AEAD speed tests
  */
  */
 static u8 aead_speed_template_19[] = {19, 0};
 static u8 aead_speed_template_19[] = {19, 0};
 static u8 aead_speed_template_20[] = {20, 0};
 static u8 aead_speed_template_20[] = {20, 0};
+static u8 aead_speed_template_36[] = {36, 0};
 
 
 /*
 /*
  * Digest speed tests
  * Digest speed tests
@@ -127,4 +129,22 @@ static struct hash_speed hash_speed_template_16[] = {
 	{  .blen = 0,	.plen = 0,	.klen = 0, }
 	{  .blen = 0,	.plen = 0,	.klen = 0, }
 };
 };
 
 
+static struct hash_speed poly1305_speed_template[] = {
+	{ .blen = 96,	.plen = 16, },
+	{ .blen = 96,	.plen = 32, },
+	{ .blen = 96,	.plen = 96, },
+	{ .blen = 288,	.plen = 16, },
+	{ .blen = 288,	.plen = 32, },
+	{ .blen = 288,	.plen = 288, },
+	{ .blen = 1056,	.plen = 32, },
+	{ .blen = 1056,	.plen = 1056, },
+	{ .blen = 2080,	.plen = 32, },
+	{ .blen = 2080,	.plen = 2080, },
+	{ .blen = 4128,	.plen = 4128, },
+	{ .blen = 8224,	.plen = 8224, },
+
+	/* End marker */
+	{  .blen = 0,	.plen = 0, }
+};
+
 #endif	/* _CRYPTO_TCRYPT_H */
 #endif	/* _CRYPTO_TCRYPT_H */

+ 34 - 29
crypto/testmgr.c

@@ -22,6 +22,7 @@
 
 
 #include <crypto/aead.h>
 #include <crypto/aead.h>
 #include <crypto/hash.h>
 #include <crypto/hash.h>
+#include <crypto/skcipher.h>
 #include <linux/err.h>
 #include <linux/err.h>
 #include <linux/fips.h>
 #include <linux/fips.h>
 #include <linux/module.h>
 #include <linux/module.h>
@@ -921,15 +922,15 @@ out_nobuf:
 	return ret;
 	return ret;
 }
 }
 
 
-static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
+static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
 			   struct cipher_testvec *template, unsigned int tcount,
 			   struct cipher_testvec *template, unsigned int tcount,
 			   const bool diff_dst, const int align_offset)
 			   const bool diff_dst, const int align_offset)
 {
 {
 	const char *algo =
 	const char *algo =
-		crypto_tfm_alg_driver_name(crypto_ablkcipher_tfm(tfm));
+		crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm));
 	unsigned int i, j, k, n, temp;
 	unsigned int i, j, k, n, temp;
 	char *q;
 	char *q;
-	struct ablkcipher_request *req;
+	struct skcipher_request *req;
 	struct scatterlist sg[8];
 	struct scatterlist sg[8];
 	struct scatterlist sgout[8];
 	struct scatterlist sgout[8];
 	const char *e, *d;
 	const char *e, *d;
@@ -958,15 +959,15 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
 
 
 	init_completion(&result.completion);
 	init_completion(&result.completion);
 
 
-	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
+	req = skcipher_request_alloc(tfm, GFP_KERNEL);
 	if (!req) {
 	if (!req) {
 		pr_err("alg: skcipher%s: Failed to allocate request for %s\n",
 		pr_err("alg: skcipher%s: Failed to allocate request for %s\n",
 		       d, algo);
 		       d, algo);
 		goto out;
 		goto out;
 	}
 	}
 
 
-	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-					tcrypt_complete, &result);
+	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				      tcrypt_complete, &result);
 
 
 	j = 0;
 	j = 0;
 	for (i = 0; i < tcount; i++) {
 	for (i = 0; i < tcount; i++) {
@@ -987,15 +988,16 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
 		data += align_offset;
 		data += align_offset;
 		memcpy(data, template[i].input, template[i].ilen);
 		memcpy(data, template[i].input, template[i].ilen);
 
 
-		crypto_ablkcipher_clear_flags(tfm, ~0);
+		crypto_skcipher_clear_flags(tfm, ~0);
 		if (template[i].wk)
 		if (template[i].wk)
-			crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+			crypto_skcipher_set_flags(tfm,
+						  CRYPTO_TFM_REQ_WEAK_KEY);
 
 
-		ret = crypto_ablkcipher_setkey(tfm, template[i].key,
-					       template[i].klen);
+		ret = crypto_skcipher_setkey(tfm, template[i].key,
+					     template[i].klen);
 		if (!ret == template[i].fail) {
 		if (!ret == template[i].fail) {
 			pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n",
 			pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n",
-			       d, j, algo, crypto_ablkcipher_get_flags(tfm));
+			       d, j, algo, crypto_skcipher_get_flags(tfm));
 			goto out;
 			goto out;
 		} else if (ret)
 		} else if (ret)
 			continue;
 			continue;
@@ -1007,10 +1009,10 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
 			sg_init_one(&sgout[0], data, template[i].ilen);
 			sg_init_one(&sgout[0], data, template[i].ilen);
 		}
 		}
 
 
-		ablkcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
-					     template[i].ilen, iv);
-		ret = enc ? crypto_ablkcipher_encrypt(req) :
-			    crypto_ablkcipher_decrypt(req);
+		skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
+					   template[i].ilen, iv);
+		ret = enc ? crypto_skcipher_encrypt(req) :
+			    crypto_skcipher_decrypt(req);
 
 
 		switch (ret) {
 		switch (ret) {
 		case 0:
 		case 0:
@@ -1054,15 +1056,16 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
 			memset(iv, 0, MAX_IVLEN);
 			memset(iv, 0, MAX_IVLEN);
 
 
 		j++;
 		j++;
-		crypto_ablkcipher_clear_flags(tfm, ~0);
+		crypto_skcipher_clear_flags(tfm, ~0);
 		if (template[i].wk)
 		if (template[i].wk)
-			crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+			crypto_skcipher_set_flags(tfm,
+						  CRYPTO_TFM_REQ_WEAK_KEY);
 
 
-		ret = crypto_ablkcipher_setkey(tfm, template[i].key,
-					       template[i].klen);
+		ret = crypto_skcipher_setkey(tfm, template[i].key,
+					     template[i].klen);
 		if (!ret == template[i].fail) {
 		if (!ret == template[i].fail) {
 			pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n",
 			pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n",
-			       d, j, algo, crypto_ablkcipher_get_flags(tfm));
+			       d, j, algo, crypto_skcipher_get_flags(tfm));
 			goto out;
 			goto out;
 		} else if (ret)
 		} else if (ret)
 			continue;
 			continue;
@@ -1100,11 +1103,11 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
 			temp += template[i].tap[k];
 			temp += template[i].tap[k];
 		}
 		}
 
 
-		ablkcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
-					     template[i].ilen, iv);
+		skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
+					   template[i].ilen, iv);
 
 
-		ret = enc ? crypto_ablkcipher_encrypt(req) :
-			    crypto_ablkcipher_decrypt(req);
+		ret = enc ? crypto_skcipher_encrypt(req) :
+			    crypto_skcipher_decrypt(req);
 
 
 		switch (ret) {
 		switch (ret) {
 		case 0:
 		case 0:
@@ -1157,7 +1160,7 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
 	ret = 0;
 	ret = 0;
 
 
 out:
 out:
-	ablkcipher_request_free(req);
+	skcipher_request_free(req);
 	if (diff_dst)
 	if (diff_dst)
 		testmgr_free_buf(xoutbuf);
 		testmgr_free_buf(xoutbuf);
 out_nooutbuf:
 out_nooutbuf:
@@ -1166,7 +1169,7 @@ out_nobuf:
 	return ret;
 	return ret;
 }
 }
 
 
-static int test_skcipher(struct crypto_ablkcipher *tfm, int enc,
+static int test_skcipher(struct crypto_skcipher *tfm, int enc,
 			 struct cipher_testvec *template, unsigned int tcount)
 			 struct cipher_testvec *template, unsigned int tcount)
 {
 {
 	unsigned int alignmask;
 	unsigned int alignmask;
@@ -1578,10 +1581,10 @@ out:
 static int alg_test_skcipher(const struct alg_test_desc *desc,
 static int alg_test_skcipher(const struct alg_test_desc *desc,
 			     const char *driver, u32 type, u32 mask)
 			     const char *driver, u32 type, u32 mask)
 {
 {
-	struct crypto_ablkcipher *tfm;
+	struct crypto_skcipher *tfm;
 	int err = 0;
 	int err = 0;
 
 
-	tfm = crypto_alloc_ablkcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
+	tfm = crypto_alloc_skcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
 	if (IS_ERR(tfm)) {
 	if (IS_ERR(tfm)) {
 		printk(KERN_ERR "alg: skcipher: Failed to load transform for "
 		printk(KERN_ERR "alg: skcipher: Failed to load transform for "
 		       "%s: %ld\n", driver, PTR_ERR(tfm));
 		       "%s: %ld\n", driver, PTR_ERR(tfm));
@@ -1600,7 +1603,7 @@ static int alg_test_skcipher(const struct alg_test_desc *desc,
 				    desc->suite.cipher.dec.count);
 				    desc->suite.cipher.dec.count);
 
 
 out:
 out:
-	crypto_free_ablkcipher(tfm);
+	crypto_free_skcipher(tfm);
 	return err;
 	return err;
 }
 }
 
 
@@ -2476,6 +2479,7 @@ static const struct alg_test_desc alg_test_descs[] = {
 		}
 		}
 	}, {
 	}, {
 		.alg = "cmac(aes)",
 		.alg = "cmac(aes)",
+		.fips_allowed = 1,
 		.test = alg_test_hash,
 		.test = alg_test_hash,
 		.suite = {
 		.suite = {
 			.hash = {
 			.hash = {
@@ -2485,6 +2489,7 @@ static const struct alg_test_desc alg_test_descs[] = {
 		}
 		}
 	}, {
 	}, {
 		.alg = "cmac(des3_ede)",
 		.alg = "cmac(des3_ede)",
+		.fips_allowed = 1,
 		.test = alg_test_hash,
 		.test = alg_test_hash,
 		.suite = {
 		.suite = {
 			.hash = {
 			.hash = {

+ 2293 - 655
crypto/testmgr.h

@@ -14504,6 +14504,9 @@ static struct cipher_testvec aes_cbc_enc_tv_template[] = {
 		.result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
 		.result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
 			  "\x27\x08\x94\x2d\xbe\x77\x18\x1a",
 			  "\x27\x08\x94\x2d\xbe\x77\x18\x1a",
 		.rlen   = 16,
 		.rlen   = 16,
+		.also_non_np = 1,
+		.np	= 8,
+		.tap	= { 3, 2, 3, 2, 3, 1, 1, 1 },
 	}, {
 	}, {
 		.key    = "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0"
 		.key    = "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0"
 			  "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a",
 			  "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a",
@@ -14723,6 +14726,9 @@ static struct cipher_testvec aes_cbc_dec_tv_template[] = {
 		.ilen   = 16,
 		.ilen   = 16,
 		.result = "Single block msg",
 		.result = "Single block msg",
 		.rlen   = 16,
 		.rlen   = 16,
+		.also_non_np = 1,
+		.np	= 8,
+		.tap	= { 3, 2, 3, 2, 3, 1, 1, 1 },
 	}, {
 	}, {
 		.key    = "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0"
 		.key    = "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0"
 			  "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a",
 			  "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a",
@@ -15032,6 +15038,9 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
 		.klen   = 8 + 20 + 16,
 		.klen   = 8 + 20 + 16,
 		.iv     = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
 		.iv     = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
 			  "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
 			  "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
+		.assoc	= "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
+			  "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
+		.alen	= 16,
 		.input  = "Single block msg",
 		.input  = "Single block msg",
 		.ilen   = 16,
 		.ilen   = 16,
 		.result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
 		.result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
@@ -15057,6 +15066,9 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
 		.klen   = 8 + 20 + 16,
 		.klen   = 8 + 20 + 16,
 		.iv     = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
 		.iv     = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
 			  "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
 			  "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
+		.assoc	= "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
+			  "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
+		.alen	= 16,
 		.input  = "\x00\x01\x02\x03\x04\x05\x06\x07"
 		.input  = "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
 			  "\x10\x11\x12\x13\x14\x15\x16\x17"
 			  "\x10\x11\x12\x13\x14\x15\x16\x17"
@@ -15087,6 +15099,9 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
 		.klen   = 8 + 20 + 16,
 		.klen   = 8 + 20 + 16,
 		.iv     = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
 		.iv     = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
 			  "\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
 			  "\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
+		.assoc	= "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
+			  "\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
+		.alen	= 16,
 		.input  = "This is a 48-byte message (exactly 3 AES blocks)",
 		.input  = "This is a 48-byte message (exactly 3 AES blocks)",
 		.ilen   = 48,
 		.ilen   = 48,
 		.result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53"
 		.result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53"
@@ -15116,6 +15131,9 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
 		.klen   = 8 + 20 + 16,
 		.klen   = 8 + 20 + 16,
 		.iv     = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
 		.iv     = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
 			  "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
 			  "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
+		.assoc	= "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
+			  "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
+		.alen	= 16,
 		.input  = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
 		.input  = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
 			  "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
 			  "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
 			  "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
 			  "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
@@ -15154,8 +15172,10 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
 		.klen   = 8 + 20 + 16,
 		.klen   = 8 + 20 + 16,
 		.iv     = "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
 		.iv     = "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
 			  "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
 			  "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
-		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
+			  "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
+		.alen   = 24,
 		.input  = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00"
 		.input  = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00"
 			  "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00"
 			  "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
@@ -15199,6 +15219,9 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
 		.klen   = 8 + 20 + 24,
 		.klen   = 8 + 20 + 24,
 		.iv     = "\x00\x01\x02\x03\x04\x05\x06\x07"
 		.iv     = "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.assoc	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.alen	= 16,
 		.input  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
 		.input  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
 			  "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
 			  "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
 			  "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
 			  "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
@@ -15239,6 +15262,9 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
 		.klen   = 8 + 20 + 32,
 		.klen   = 8 + 20 + 32,
 		.iv     = "\x00\x01\x02\x03\x04\x05\x06\x07"
 		.iv     = "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.assoc	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.alen	= 16,
 		.input  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
 		.input  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
 			  "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
 			  "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
 			  "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
 			  "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
@@ -15374,6 +15400,9 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
 		.klen   = 8 + 32 + 16,
 		.klen   = 8 + 32 + 16,
 		.iv     = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
 		.iv     = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
 			  "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
 			  "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
+		.assoc	= "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
+			  "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
+		.alen	= 16,
 		.input  = "Single block msg",
 		.input  = "Single block msg",
 		.ilen   = 16,
 		.ilen   = 16,
 		.result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
 		.result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
@@ -15401,6 +15430,9 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
 		.klen   = 8 + 32 + 16,
 		.klen   = 8 + 32 + 16,
 		.iv     = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
 		.iv     = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
 			  "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
 			  "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
+		.assoc	= "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
+			  "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
+		.alen	= 16,
 		.input  = "\x00\x01\x02\x03\x04\x05\x06\x07"
 		.input  = "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
 			  "\x10\x11\x12\x13\x14\x15\x16\x17"
 			  "\x10\x11\x12\x13\x14\x15\x16\x17"
@@ -15433,6 +15465,9 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
 		.klen   = 8 + 32 + 16,
 		.klen   = 8 + 32 + 16,
 		.iv     = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
 		.iv     = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
 			  "\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
 			  "\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
+		.assoc	= "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
+			  "\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
+		.alen	= 16,
 		.input  = "This is a 48-byte message (exactly 3 AES blocks)",
 		.input  = "This is a 48-byte message (exactly 3 AES blocks)",
 		.ilen   = 48,
 		.ilen   = 48,
 		.result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53"
 		.result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53"
@@ -15464,6 +15499,9 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
 		.klen   = 8 + 32 + 16,
 		.klen   = 8 + 32 + 16,
 		.iv     = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
 		.iv     = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
 			  "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
 			  "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
+		.assoc	= "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
+			  "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
+		.alen	= 16,
 		.input  = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
 		.input  = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
 			  "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
 			  "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
 			  "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
 			  "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
@@ -15504,8 +15542,10 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
 		.klen   = 8 + 32 + 16,
 		.klen   = 8 + 32 + 16,
 		.iv     = "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
 		.iv     = "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
 			  "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
 			  "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
-		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
+			  "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
+		.alen   = 24,
 		.input  = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00"
 		.input  = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00"
 			  "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00"
 			  "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
@@ -15551,6 +15591,9 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
 		.klen   = 8 + 32 + 24,
 		.klen   = 8 + 32 + 24,
 		.iv     = "\x00\x01\x02\x03\x04\x05\x06\x07"
 		.iv     = "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.assoc	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.alen   = 16,
 		.input  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
 		.input  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
 			  "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
 			  "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
 			  "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
 			  "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
@@ -15593,6 +15636,9 @@ static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
 		.klen   = 8 + 32 + 32,
 		.klen   = 8 + 32 + 32,
 		.iv     = "\x00\x01\x02\x03\x04\x05\x06\x07"
 		.iv     = "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.assoc	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.alen   = 16,
 		.input  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
 		.input  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
 			  "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
 			  "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
 			  "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
 			  "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
@@ -15641,6 +15687,9 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
 		.klen   = 8 + 64 + 16,
 		.klen   = 8 + 64 + 16,
 		.iv     = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
 		.iv     = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
 			  "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
 			  "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
+		.assoc	= "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
+			  "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
+		.alen   = 16,
 		.input  = "Single block msg",
 		.input  = "Single block msg",
 		.ilen   = 16,
 		.ilen   = 16,
 		.result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
 		.result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
@@ -15676,6 +15725,9 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
 		.klen   = 8 + 64 + 16,
 		.klen   = 8 + 64 + 16,
 		.iv     = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
 		.iv     = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
 			  "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
 			  "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
+		.assoc	= "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
+			  "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
+		.alen   = 16,
 		.input  = "\x00\x01\x02\x03\x04\x05\x06\x07"
 		.input  = "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
 			  "\x10\x11\x12\x13\x14\x15\x16\x17"
 			  "\x10\x11\x12\x13\x14\x15\x16\x17"
@@ -15716,6 +15768,9 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
 		.klen   = 8 + 64 + 16,
 		.klen   = 8 + 64 + 16,
 		.iv     = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
 		.iv     = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
 			  "\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
 			  "\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
+		.assoc	= "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
+			  "\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
+		.alen   = 16,
 		.input  = "This is a 48-byte message (exactly 3 AES blocks)",
 		.input  = "This is a 48-byte message (exactly 3 AES blocks)",
 		.ilen   = 48,
 		.ilen   = 48,
 		.result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53"
 		.result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53"
@@ -15755,6 +15810,9 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
 		.klen   = 8 + 64 + 16,
 		.klen   = 8 + 64 + 16,
 		.iv     = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
 		.iv     = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
 			  "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
 			  "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
+		.assoc	= "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
+			  "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
+		.alen   = 16,
 		.input  = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
 		.input  = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
 			  "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
 			  "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
 			  "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
 			  "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
@@ -15803,8 +15861,10 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
 		.klen   = 8 + 64 + 16,
 		.klen   = 8 + 64 + 16,
 		.iv     = "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
 		.iv     = "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
 			  "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
 			  "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
-		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
+			  "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
+		.alen   = 24,
 		.input  = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00"
 		.input  = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00"
 			  "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00"
 			  "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
@@ -15858,6 +15918,9 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
 		.klen   = 8 + 64 + 24,
 		.klen   = 8 + 64 + 24,
 		.iv     = "\x00\x01\x02\x03\x04\x05\x06\x07"
 		.iv     = "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.assoc	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.alen   = 16,
 		.input  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
 		.input  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
 			  "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
 			  "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
 			  "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
 			  "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
@@ -15908,6 +15971,9 @@ static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
 		.klen   = 8 + 64 + 32,
 		.klen   = 8 + 64 + 32,
 		.iv     = "\x00\x01\x02\x03\x04\x05\x06\x07"
 		.iv     = "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.assoc	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.alen   = 16,
 		.input  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
 		.input  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
 			  "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
 			  "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
 			  "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
 			  "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
@@ -15955,8 +16021,9 @@ static struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
 			  "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24",
 			  "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24",
 		.klen	= 8 + 20 + 8,
 		.klen	= 8 + 20 + 8,
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+		.alen   = 16,
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16015,8 +16082,9 @@ static struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
 			  "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24",
 			  "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24",
 		.klen	= 8 + 24 + 8,
 		.klen	= 8 + 24 + 8,
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+		.alen   = 16,
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16076,8 +16144,9 @@ static struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
 			  "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24",
 			  "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24",
 		.klen	= 8 + 32 + 8,
 		.klen	= 8 + 32 + 8,
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+		.alen   = 16,
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16140,8 +16209,9 @@ static struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
 			  "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24",
 			  "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24",
 		.klen	= 8 + 48 + 8,
 		.klen	= 8 + 48 + 8,
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+		.alen   = 16,
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16208,8 +16278,9 @@ static struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
 			  "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24",
 			  "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24",
 		.klen	= 8 + 64 + 8,
 		.klen	= 8 + 64 + 8,
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+		.alen   = 16,
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16275,8 +16346,9 @@ static struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
 			  "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
 			  "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
 		.klen	= 8 + 20 + 24,
 		.klen	= 8 + 20 + 24,
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+		.alen   = 16,
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 		  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
 		  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16337,8 +16409,9 @@ static struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
 			  "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
 			  "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
 		.klen	= 8 + 24 + 24,
 		.klen	= 8 + 24 + 24,
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+		.alen   = 16,
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16400,8 +16473,9 @@ static struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
 			  "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
 			  "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
 		.klen	= 8 + 32 + 24,
 		.klen	= 8 + 32 + 24,
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+		.alen   = 16,
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16466,8 +16540,9 @@ static struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
 			  "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
 			  "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
 		.klen	= 8 + 48 + 24,
 		.klen	= 8 + 48 + 24,
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-	.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+		.alen   = 16,
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16536,8 +16611,9 @@ static struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
 			  "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
 			  "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
 		.klen	= 8 + 64 + 24,
 		.klen	= 8 + 64 + 24,
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+		.alen   = 16,
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -20129,149 +20205,150 @@ static struct aead_testvec aes_gcm_dec_tv_template[] = {
 };
 };
 
 
 static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
 static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
-        { /* Generated using Crypto++ */
+	{ /* Generated using Crypto++ */
 		.key    = zeroed_string,
 		.key    = zeroed_string,
 		.klen	= 20,
 		.klen	= 20,
-                .iv     = zeroed_string,
-                .input  = zeroed_string,
-                .ilen   = 16,
-                .assoc  = zeroed_string,
-                .alen   = 8,
+		.iv	= zeroed_string,
+		.input  = zeroed_string,
+		.ilen   = 16,
+		.assoc  = zeroed_string,
+		.alen   = 16,
 		.result	= "\x03\x88\xDA\xCE\x60\xB6\xA3\x92"
 		.result	= "\x03\x88\xDA\xCE\x60\xB6\xA3\x92"
-                          "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78"
-                          "\x97\xFE\x4C\x23\x37\x42\x01\xE0"
-                          "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B",
+			  "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78"
+			  "\x97\xFE\x4C\x23\x37\x42\x01\xE0"
+			  "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B",
 		.rlen	= 32,
 		.rlen	= 32,
-        },{
+	},{
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+			  "\x00\x00\x00\x00",
 		.klen	= 20,
 		.klen	= 20,
-                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
-                          "\x00\x00\x00\x00",
-                .input  = zeroed_string,
-                .ilen   = 16,
-                .assoc  = zeroed_string,
-                .alen   = 8,
+		.iv     = "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input  = zeroed_string,
+		.ilen   = 16,
+		.assoc  = "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.alen   = 16,
 		.result	= "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18"
 		.result	= "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18"
-                          "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28"
-                          "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D"
-                          "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF",
+			  "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28"
+			  "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D"
+			  "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF",
 		.rlen	= 32,
 		.rlen	= 32,
 
 
-        }, {
+	}, {
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+			  "\x00\x00\x00\x00",
 		.klen	= 20,
 		.klen	= 20,
-                .iv     = zeroed_string,
-                .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .ilen   = 16,
-                .assoc  = zeroed_string,
-                .alen   = 8,
+		.iv     = zeroed_string,
+		.input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.ilen   = 16,
+		.assoc  = zeroed_string,
+		.alen   = 16,
 		.result	= "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
 		.result	= "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
-                          "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
-                          "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C"
-                          "\xB1\x68\xFD\x14\x52\x64\x61\xB2",
+			  "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+			  "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C"
+			  "\xB1\x68\xFD\x14\x52\x64\x61\xB2",
 		.rlen	= 32,
 		.rlen	= 32,
-        }, {
+	}, {
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+			  "\x00\x00\x00\x00",
 		.klen	= 20,
 		.klen	= 20,
-                .iv     = zeroed_string,
-                .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .ilen   = 16,
-                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .alen   = 8,
+		.iv     = zeroed_string,
+		.input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.ilen   = 16,
+		.assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.alen   = 16,
 		.result	= "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
 		.result	= "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
-                          "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
-                          "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63"
-                          "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5",
+			  "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+			  "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63"
+			  "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5",
 		.rlen	= 32,
 		.rlen	= 32,
-        }, {
+	}, {
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+			  "\x00\x00\x00\x00",
 		.klen	= 20,
 		.klen	= 20,
-                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
-                          "\x00\x00\x00\x00",
-                .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .ilen   = 16,
-                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .alen   = 8,
+		.iv     = "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.ilen   = 16,
+		.assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.alen   = 16,
 		.result	= "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
 		.result	= "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
-                          "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
-                          "\x64\x50\xF9\x32\x13\xFB\x74\x61"
-                          "\xF4\xED\x52\xD3\xC5\x10\x55\x3C",
+			  "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+			  "\x64\x50\xF9\x32\x13\xFB\x74\x61"
+			  "\xF4\xED\x52\xD3\xC5\x10\x55\x3C",
 		.rlen	= 32,
 		.rlen	= 32,
-        }, {
+	}, {
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+			  "\x00\x00\x00\x00",
 		.klen	= 20,
 		.klen	= 20,
-                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
-                          "\x00\x00\x00\x00",
-                .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .ilen   = 64,
-                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .alen   = 8,
+		.iv     = "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.ilen   = 64,
+		.assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.alen   = 16,
 		.result	= "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
 		.result	= "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
-                          "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
-                          "\x98\x14\xA1\x42\x37\x80\xFD\x90"
-                          "\x68\x12\x01\xA8\x91\x89\xB9\x83"
-                          "\x5B\x11\x77\x12\x9B\xFF\x24\x89"
-                          "\x94\x5F\x18\x12\xBA\x27\x09\x39"
-                          "\x99\x96\x76\x42\x15\x1C\xCD\xCB"
-                          "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD"
-                          "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85"
-                          "\xBD\xCF\x62\x98\x58\x14\xE5\xBD",
+			  "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+			  "\x98\x14\xA1\x42\x37\x80\xFD\x90"
+			  "\x68\x12\x01\xA8\x91\x89\xB9\x83"
+			  "\x5B\x11\x77\x12\x9B\xFF\x24\x89"
+			  "\x94\x5F\x18\x12\xBA\x27\x09\x39"
+			  "\x99\x96\x76\x42\x15\x1C\xCD\xCB"
+			  "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD"
+			  "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85"
+			  "\xBD\xCF\x62\x98\x58\x14\xE5\xBD",
 		.rlen	= 80,
 		.rlen	= 80,
-        }, {
+	}, {
 		.key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
 		.key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
-                          "\x00\x00\x00\x00",
+			  "\x00\x00\x00\x00",
 		.klen	= 20,
 		.klen	= 20,
-                .iv     = "\x00\x00\x45\x67\x89\xab\xcd\xef"
-                          "\x00\x00\x00\x00",
-                .input  = "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff",
-                .ilen   = 192,
-                .assoc  = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
-                          "\xaa\xaa\xaa\xaa",
-                .alen   = 12,
+		.iv     = "\x00\x00\x45\x67\x89\xab\xcd\xef",
+		.input  = "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff",
+		.ilen   = 192,
+		.assoc  = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+			  "\xaa\xaa\xaa\xaa\x00\x00\x45\x67"
+			  "\x89\xab\xcd\xef",
+		.alen   = 20,
 		.result	= "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE"
 		.result	= "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE"
 			  "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A"
 			  "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A"
 			  "\x44\x6D\xC3\x88\x46\x2E\xC2\x01"
 			  "\x44\x6D\xC3\x88\x46\x2E\xC2\x01"
@@ -20316,8 +20393,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
 			  "\x00\x21\x00\x01\x01\x02\x02\x01",
 			  "\x00\x21\x00\x01\x01\x02\x02\x01",
 		.ilen	= 72,
 		.ilen	= 72,
 		.assoc	= "\x00\x00\x43\x21\x87\x65\x43\x21"
 		.assoc	= "\x00\x00\x43\x21\x87\x65\x43\x21"
-			  "\x00\x00\x00\x00",
-		.alen	= 12,
+			  "\x00\x00\x00\x00\x49\x56\xED\x7E"
+			  "\x3B\x24\x4C\xFE",
+		.alen	= 20,
 		.result	= "\xFE\xCF\x53\x7E\x72\x9D\x5B\x07"
 		.result	= "\xFE\xCF\x53\x7E\x72\x9D\x5B\x07"
 			  "\xDC\x30\xDF\x52\x8D\xD2\x2B\x76"
 			  "\xDC\x30\xDF\x52\x8D\xD2\x2B\x76"
 			  "\x8D\x1B\x98\x73\x66\x96\xA6\xFD"
 			  "\x8D\x1B\x98\x73\x66\x96\xA6\xFD"
@@ -20345,8 +20423,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
 			  "\x65\x72\x63\x69\x74\x79\x02\x64"
 			  "\x65\x72\x63\x69\x74\x79\x02\x64"
 			  "\x6B\x00\x00\x01\x00\x01\x00\x01",
 			  "\x6B\x00\x00\x01\x00\x01\x00\x01",
 		.ilen	= 64,
 		.ilen	= 64,
-		.assoc	= "\x00\x00\xA5\xF8\x00\x00\x00\x0A",
-		.alen	= 8,
+		.assoc	= "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
+			  "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+		.alen	= 16,
 		.result	= "\xDE\xB2\x2C\xD9\xB0\x7C\x72\xC1"
 		.result	= "\xDE\xB2\x2C\xD9\xB0\x7C\x72\xC1"
 			  "\x6E\x3A\x65\xBE\xEB\x8D\xF3\x04"
 			  "\x6E\x3A\x65\xBE\xEB\x8D\xF3\x04"
 			  "\xA5\xA5\x89\x7D\x33\xAE\x53\x0F"
 			  "\xA5\xA5\x89\x7D\x33\xAE\x53\x0F"
@@ -20374,8 +20453,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
 			  "\x02\x04\x05\xB4\x01\x01\x04\x02"
 			  "\x02\x04\x05\xB4\x01\x01\x04\x02"
 			  "\x01\x02\x02\x01",
 			  "\x01\x02\x02\x01",
 		.ilen	= 52,
 		.ilen	= 52,
-		.assoc	= "\x4A\x2C\xBF\xE3\x00\x00\x00\x02",
-		.alen	= 8,
+		.assoc	= "\x4A\x2C\xBF\xE3\x00\x00\x00\x02"
+			  "\x01\x02\x03\x04\x05\x06\x07\x08",
+		.alen	= 16,
 		.result	= "\xFF\x42\x5C\x9B\x72\x45\x99\xDF"
 		.result	= "\xFF\x42\x5C\x9B\x72\x45\x99\xDF"
 			  "\x7A\x3B\xCD\x51\x01\x94\xE0\x0D"
 			  "\x7A\x3B\xCD\x51\x01\x94\xE0\x0D"
 			  "\x6A\x78\x10\x7F\x1B\x0B\x1C\xBF"
 			  "\x6A\x78\x10\x7F\x1B\x0B\x1C\xBF"
@@ -20401,8 +20481,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
 			  "\x75\x76\x77\x61\x62\x63\x64\x65"
 			  "\x75\x76\x77\x61\x62\x63\x64\x65"
 			  "\x66\x67\x68\x69\x01\x02\x02\x01",
 			  "\x66\x67\x68\x69\x01\x02\x02\x01",
 		.ilen	= 64,
 		.ilen	= 64,
-		.assoc	= "\x00\x00\x00\x00\x00\x00\x00\x01",
-		.alen	= 8,
+		.assoc	= "\x00\x00\x00\x00\x00\x00\x00\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.alen	= 16,
 		.result	= "\x46\x88\xDA\xF2\xF9\x73\xA3\x92"
 		.result	= "\x46\x88\xDA\xF2\xF9\x73\xA3\x92"
 			  "\x73\x29\x09\xC3\x31\xD5\x6D\x60"
 			  "\x73\x29\x09\xC3\x31\xD5\x6D\x60"
 			  "\xF6\x94\xAB\xAA\x41\x4B\x5E\x7F"
 			  "\xF6\x94\xAB\xAA\x41\x4B\x5E\x7F"
@@ -20430,8 +20511,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
 			  "\x66\x67\x68\x69\x01\x02\x02\x01",
 			  "\x66\x67\x68\x69\x01\x02\x02\x01",
 		.ilen	= 64,
 		.ilen	= 64,
 		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
 		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
-			  "\x10\x10\x10\x10",
-		.alen	= 12,
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
 		.result	= "\xFB\xA2\xCA\xA4\x85\x3C\xF9\xF0"
 		.result	= "\xFB\xA2\xCA\xA4\x85\x3C\xF9\xF0"
 			  "\xF2\x2C\xB1\x0D\x86\xDD\x83\xB0"
 			  "\xF2\x2C\xB1\x0D\x86\xDD\x83\xB0"
 			  "\xFE\xC7\x56\x91\xCF\x1A\x04\xB0"
 			  "\xFE\xC7\x56\x91\xCF\x1A\x04\xB0"
@@ -20455,8 +20537,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
 			  "\x01\x02\x02\x01",
 			  "\x01\x02\x02\x01",
 		.ilen	= 28,
 		.ilen	= 28,
 		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
 		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
-			  "\x10\x10\x10\x10",
-		.alen	= 12,
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
 		.result	= "\xFB\xA2\xCA\x84\x5E\x5D\xF9\xF0"
 		.result	= "\xFB\xA2\xCA\x84\x5E\x5D\xF9\xF0"
 			  "\xF2\x2C\x3E\x6E\x86\xDD\x83\x1E"
 			  "\xF2\x2C\x3E\x6E\x86\xDD\x83\x1E"
 			  "\x1F\xC6\x57\x92\xCD\x1A\xF9\x13"
 			  "\x1F\xC6\x57\x92\xCD\x1A\xF9\x13"
@@ -20477,8 +20560,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
 			  "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E"
 			  "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E"
 			  "\x50\x10\x16\xD0\x75\x68\x00\x01",
 			  "\x50\x10\x16\xD0\x75\x68\x00\x01",
 		.ilen	= 40,
 		.ilen	= 40,
-		.assoc	= "\x00\x00\xA5\xF8\x00\x00\x00\x0A",
-		.alen	= 8,
+		.assoc	= "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
+			  "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+		.alen	= 16,
 		.result	= "\xA5\xB1\xF8\x06\x60\x29\xAE\xA4"
 		.result	= "\xA5\xB1\xF8\x06\x60\x29\xAE\xA4"
 			  "\x0E\x59\x8B\x81\x22\xDE\x02\x42"
 			  "\x0E\x59\x8B\x81\x22\xDE\x02\x42"
 			  "\x09\x38\xB3\xAB\x33\xF8\x28\xE6"
 			  "\x09\x38\xB3\xAB\x33\xF8\x28\xE6"
@@ -20505,8 +20589,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
 			  "\x23\x01\x01\x01",
 			  "\x23\x01\x01\x01",
 		.ilen	= 76,
 		.ilen	= 76,
 		.assoc	= "\x00\x00\x01\x00\x00\x00\x00\x00"
 		.assoc	= "\x00\x00\x01\x00\x00\x00\x00\x00"
-			  "\x00\x00\x00\x01",
-		.alen	= 12,
+			  "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
+			  "\xCE\xFA\xCE\x74",
+		.alen	= 20,
 		.result	= "\x18\xA6\xFD\x42\xF7\x2C\xBF\x4A"
 		.result	= "\x18\xA6\xFD\x42\xF7\x2C\xBF\x4A"
 			  "\xB2\xA2\xEA\x90\x1F\x73\xD8\x14"
 			  "\xB2\xA2\xEA\x90\x1F\x73\xD8\x14"
 			  "\xE3\xE7\xF2\x43\xD9\x54\x12\xE1"
 			  "\xE3\xE7\xF2\x43\xD9\x54\x12\xE1"
@@ -20535,8 +20620,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
 			  "\x50\x10\x1F\x64\x6D\x54\x00\x01",
 			  "\x50\x10\x1F\x64\x6D\x54\x00\x01",
 		.ilen	= 40,
 		.ilen	= 40,
 		.assoc	= "\x17\x40\x5E\x67\x15\x6F\x31\x26"
 		.assoc	= "\x17\x40\x5E\x67\x15\x6F\x31\x26"
-			  "\xDD\x0D\xB9\x9B",
-		.alen	= 12,
+			  "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
+			  "\x69\x76\x65\x63",
+		.alen	= 20,
 		.result	= "\xF2\xD6\x9E\xCD\xBD\x5A\x0D\x5B"
 		.result	= "\xF2\xD6\x9E\xCD\xBD\x5A\x0D\x5B"
 			  "\x8D\x5E\xF3\x8B\xAD\x4D\xA5\x8D"
 			  "\x8D\x5E\xF3\x8B\xAD\x4D\xA5\x8D"
 			  "\x1F\x27\x8F\xDE\x98\xEF\x67\x54"
 			  "\x1F\x27\x8F\xDE\x98\xEF\x67\x54"
@@ -20563,8 +20649,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
 			  "\x15\x01\x01\x01",
 			  "\x15\x01\x01\x01",
 		.ilen	= 76,
 		.ilen	= 76,
 		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
 		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
-			  "\x10\x10\x10\x10",
-		.alen	= 12,
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
 		.result	= "\xFB\xA2\xCA\xD1\x2F\xC1\xF9\xF0"
 		.result	= "\xFB\xA2\xCA\xD1\x2F\xC1\xF9\xF0"
 			  "\x0D\x3C\xEB\xF3\x05\x41\x0D\xB8"
 			  "\x0D\x3C\xEB\xF3\x05\x41\x0D\xB8"
 			  "\x3D\x77\x84\xB6\x07\x32\x3D\x22"
 			  "\x3D\x77\x84\xB6\x07\x32\x3D\x22"
@@ -20597,8 +20684,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
 			  "\x72\x72\x6F\x77\x01\x02\x02\x01",
 			  "\x72\x72\x6F\x77\x01\x02\x02\x01",
 		.ilen	= 72,
 		.ilen	= 72,
 		.assoc	= "\x17\x40\x5E\x67\x15\x6F\x31\x26"
 		.assoc	= "\x17\x40\x5E\x67\x15\x6F\x31\x26"
-			  "\xDD\x0D\xB9\x9B",
-		.alen	= 12,
+			  "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
+			  "\x69\x76\x65\x63",
+		.alen	= 20,
 		.result	= "\xD4\xB7\xED\x86\xA1\x77\x7F\x2E"
 		.result	= "\xD4\xB7\xED\x86\xA1\x77\x7F\x2E"
 			  "\xA1\x3D\x69\x73\xD3\x24\xC6\x9E"
 			  "\xA1\x3D\x69\x73\xD3\x24\xC6\x9E"
 			  "\x7B\x43\xF8\x26\xFB\x56\x83\x12"
 			  "\x7B\x43\xF8\x26\xFB\x56\x83\x12"
@@ -20619,8 +20707,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
 		.iv	= "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
 		.iv	= "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
 		.input	= "\x01\x02\x02\x01",
 		.input	= "\x01\x02\x02\x01",
 		.ilen	= 4,
 		.ilen	= 4,
-		.assoc	= "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF",
-		.alen	= 8,
+		.assoc	= "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF"
+			  "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
+		.alen	= 16,
 		.result	= "\x43\x7F\x86\x6B\xCB\x3F\x69\x9F"
 		.result	= "\x43\x7F\x86\x6B\xCB\x3F\x69\x9F"
 			  "\xE9\xB0\x82\x2B\xAC\x96\x1C\x45"
 			  "\xE9\xB0\x82\x2B\xAC\x96\x1C\x45"
 			  "\x04\xBE\xF2\x70",
 			  "\x04\xBE\xF2\x70",
@@ -20636,8 +20725,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
 			  "\x62\x65\x00\x01",
 			  "\x62\x65\x00\x01",
 		.ilen	= 20,
 		.ilen	= 20,
 		.assoc	= "\x00\x00\x01\x00\x00\x00\x00\x00"
 		.assoc	= "\x00\x00\x01\x00\x00\x00\x00\x00"
-			  "\x00\x00\x00\x01",
-		.alen	= 12,
+			  "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
+			  "\xCE\xFA\xCE\x74",
+		.alen	= 20,
 		.result	= "\x29\xC9\xFC\x69\xA1\x97\xD0\x38"
 		.result	= "\x29\xC9\xFC\x69\xA1\x97\xD0\x38"
 			  "\xCC\xDD\x14\xE2\xDD\xFC\xAA\x05"
 			  "\xCC\xDD\x14\xE2\xDD\xFC\xAA\x05"
 			  "\x43\x33\x21\x64\x41\x25\x03\x52"
 			  "\x43\x33\x21\x64\x41\x25\x03\x52"
@@ -20661,8 +20751,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
 			  "\x01\x02\x02\x01",
 			  "\x01\x02\x02\x01",
 		.ilen	= 52,
 		.ilen	= 52,
 		.assoc	= "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF"
 		.assoc	= "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF"
-			  "\xFF\xFF\xFF\xFF",
-		.alen	= 12,
+			  "\xFF\xFF\xFF\xFF\x33\x30\x21\x69"
+			  "\x67\x65\x74\x6D",
+		.alen	= 20,
 		.result	= "\xF9\x7A\xB2\xAA\x35\x6D\x8E\xDC"
 		.result	= "\xF9\x7A\xB2\xAA\x35\x6D\x8E\xDC"
 			  "\xE1\x76\x44\xAC\x8C\x78\xE2\x5D"
 			  "\xE1\x76\x44\xAC\x8C\x78\xE2\x5D"
 			  "\xD2\x4D\xED\xBB\x29\xEB\xF1\xB6"
 			  "\xD2\x4D\xED\xBB\x29\xEB\xF1\xB6"
@@ -20688,8 +20779,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
 			  "\x01\x02\x02\x01",
 			  "\x01\x02\x02\x01",
 		.ilen	= 52,
 		.ilen	= 52,
 		.assoc	= "\x3F\x7E\xF6\x42\x10\x10\x10\x10"
 		.assoc	= "\x3F\x7E\xF6\x42\x10\x10\x10\x10"
-			  "\x10\x10\x10\x10",
-		.alen	= 12,
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
 		.result	= "\xFB\xA2\xCA\xA8\xC6\xC5\xF9\xF0"
 		.result	= "\xFB\xA2\xCA\xA8\xC6\xC5\xF9\xF0"
 			  "\xF2\x2C\xA5\x4A\x06\x12\x10\xAD"
 			  "\xF2\x2C\xA5\x4A\x06\x12\x10\xAD"
 			  "\x3F\x6E\x57\x91\xCF\x1A\xCA\x21"
 			  "\x3F\x6E\x57\x91\xCF\x1A\xCA\x21"
@@ -20712,8 +20804,9 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
 			  "\x71\x72\x73\x74\x01\x02\x02\x01",
 			  "\x71\x72\x73\x74\x01\x02\x02\x01",
 		.ilen	= 32,
 		.ilen	= 32,
 		.assoc	= "\x00\x00\x43\x21\x87\x65\x43\x21"
 		.assoc	= "\x00\x00\x43\x21\x87\x65\x43\x21"
-			  "\x00\x00\x00\x07",
-		.alen	= 12,
+			  "\x00\x00\x00\x07\x48\x55\xEC\x7D"
+			  "\x3A\x23\x4B\xFD",
+		.alen	= 20,
 		.result	= "\x74\x75\x2E\x8A\xEB\x5D\x87\x3C"
 		.result	= "\x74\x75\x2E\x8A\xEB\x5D\x87\x3C"
 			  "\xD7\xC0\xF4\xAC\xC3\x6C\x4B\xFF"
 			  "\xD7\xC0\xF4\xAC\xC3\x6C\x4B\xFF"
 			  "\x84\xB7\xD7\xB9\x8F\x0C\xA8\xB6"
 			  "\x84\xB7\xD7\xB9\x8F\x0C\xA8\xB6"
@@ -20725,122 +20818,122 @@ static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
 };
 };
 
 
 static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
 static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
-        { /* Generated using Crypto++ */
+	{ /* Generated using Crypto++ */
 		.key    = zeroed_string,
 		.key    = zeroed_string,
 		.klen	= 20,
 		.klen	= 20,
-                .iv     = zeroed_string,
+		.iv     = zeroed_string,
 		.input	= "\x03\x88\xDA\xCE\x60\xB6\xA3\x92"
 		.input	= "\x03\x88\xDA\xCE\x60\xB6\xA3\x92"
-                          "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78"
-                          "\x97\xFE\x4C\x23\x37\x42\x01\xE0"
-                          "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B",
+			  "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78"
+			  "\x97\xFE\x4C\x23\x37\x42\x01\xE0"
+			  "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B",
 		.ilen	= 32,
 		.ilen	= 32,
-                .assoc  = zeroed_string,
-                .alen   = 8,
-                .result = zeroed_string,
-                .rlen   = 16,
+		.assoc  = zeroed_string,
+		.alen   = 16,
+		.result = zeroed_string,
+		.rlen   = 16,
 
 
-        },{
+	},{
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+			  "\x00\x00\x00\x00",
 		.klen	= 20,
 		.klen	= 20,
-                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
-                          "\x00\x00\x00\x00",
+		.iv     = "\x00\x00\x00\x00\x00\x00\x00\x01",
 		.input	= "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18"
 		.input	= "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18"
-                          "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28"
-                          "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D"
-                          "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF",
+			  "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28"
+			  "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D"
+			  "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF",
 		.ilen	= 32,
 		.ilen	= 32,
-                .assoc  = zeroed_string,
-                .alen   = 8,
-                .result = zeroed_string,
-                .rlen   = 16,
-        }, {
+		.assoc  = "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.alen   = 16,
+		.result = zeroed_string,
+		.rlen   = 16,
+	}, {
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+			  "\x00\x00\x00\x00",
 		.klen	= 20,
 		.klen	= 20,
-                .iv     = zeroed_string,
+		.iv     = zeroed_string,
 		.input	= "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
 		.input	= "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
-                          "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
-                          "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C"
-                          "\xB1\x68\xFD\x14\x52\x64\x61\xB2",
+			  "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+			  "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C"
+			  "\xB1\x68\xFD\x14\x52\x64\x61\xB2",
 		.ilen	= 32,
 		.ilen	= 32,
-                .assoc  = zeroed_string,
-                .alen   = 8,
-                .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .rlen   = 16,
-        }, {
+		.assoc  = zeroed_string,
+		.alen   = 16,
+		.result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.rlen   = 16,
+	}, {
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+			  "\x00\x00\x00\x00",
 		.klen	= 20,
 		.klen	= 20,
-                .iv     = zeroed_string,
+		.iv     = zeroed_string,
 		.input	= "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
 		.input	= "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
-                          "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
-                          "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63"
-                          "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5",
+			  "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+			  "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63"
+			  "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5",
 		.ilen	= 32,
 		.ilen	= 32,
-                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .alen   = 8,
-                .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .rlen   = 16,
+		.assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.alen   = 16,
+		.result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.rlen   = 16,
 
 
-        }, {
+	}, {
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+			  "\x00\x00\x00\x00",
 		.klen	= 20,
 		.klen	= 20,
-                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
-                          "\x00\x00\x00\x00",
+		.iv     = "\x00\x00\x00\x00\x00\x00\x00\x01",
 		.input	= "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
 		.input	= "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
-                          "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
-                          "\x64\x50\xF9\x32\x13\xFB\x74\x61"
-                          "\xF4\xED\x52\xD3\xC5\x10\x55\x3C",
+			  "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+			  "\x64\x50\xF9\x32\x13\xFB\x74\x61"
+			  "\xF4\xED\x52\xD3\xC5\x10\x55\x3C",
 		.ilen	= 32,
 		.ilen	= 32,
-                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .alen   = 8,
-                .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .rlen   = 16,
-        }, {
+		.assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.alen   = 16,
+		.result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.rlen   = 16,
+	}, {
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+			  "\x00\x00\x00\x00",
 		.klen	= 20,
 		.klen	= 20,
-                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
-                          "\x00\x00\x00\x00",
+		.iv     = "\x00\x00\x00\x00\x00\x00\x00\x01",
 		.input	= "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
 		.input	= "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
-                          "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
-                          "\x98\x14\xA1\x42\x37\x80\xFD\x90"
-                          "\x68\x12\x01\xA8\x91\x89\xB9\x83"
-                          "\x5B\x11\x77\x12\x9B\xFF\x24\x89"
-                          "\x94\x5F\x18\x12\xBA\x27\x09\x39"
-                          "\x99\x96\x76\x42\x15\x1C\xCD\xCB"
-                          "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD"
-                          "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85"
-                          "\xBD\xCF\x62\x98\x58\x14\xE5\xBD",
+			  "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+			  "\x98\x14\xA1\x42\x37\x80\xFD\x90"
+			  "\x68\x12\x01\xA8\x91\x89\xB9\x83"
+			  "\x5B\x11\x77\x12\x9B\xFF\x24\x89"
+			  "\x94\x5F\x18\x12\xBA\x27\x09\x39"
+			  "\x99\x96\x76\x42\x15\x1C\xCD\xCB"
+			  "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD"
+			  "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85"
+			  "\xBD\xCF\x62\x98\x58\x14\xE5\xBD",
 		.ilen	= 80,
 		.ilen	= 80,
-                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .alen   = 8,
-                .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .rlen   = 64,
-        }, {
+		.assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.alen   = 16,
+		.result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.rlen   = 64,
+	}, {
 		.key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
 		.key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
-                          "\x00\x00\x00\x00",
+			  "\x00\x00\x00\x00",
 		.klen	= 20,
 		.klen	= 20,
-                .iv     = "\x00\x00\x45\x67\x89\xab\xcd\xef"
-                          "\x00\x00\x00\x00",
+		.iv     = "\x00\x00\x45\x67\x89\xab\xcd\xef",
 		.input	= "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE"
 		.input	= "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE"
 			  "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A"
 			  "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A"
 			  "\x44\x6D\xC3\x88\x46\x2E\xC2\x01"
 			  "\x44\x6D\xC3\x88\x46\x2E\xC2\x01"
@@ -20868,34 +20961,35 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
 			  "\x37\x08\x1C\xCF\xBA\x5D\x71\x46"
 			  "\x37\x08\x1C\xCF\xBA\x5D\x71\x46"
 			  "\x80\x72\xB0\x4C\x82\x0D\x60\x3C",
 			  "\x80\x72\xB0\x4C\x82\x0D\x60\x3C",
 		.ilen	= 208,
 		.ilen	= 208,
-                .assoc  = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
-                          "\xaa\xaa\xaa\xaa",
-                .alen   = 12,
-                .result = "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff",
-                .rlen   = 192,
+		.assoc  = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+			  "\xaa\xaa\xaa\xaa\x00\x00\x45\x67"
+			  "\x89\xab\xcd\xef",
+		.alen   = 20,
+		.result = "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff",
+		.rlen   = 192,
 	}, {
 	}, {
 		.key	= "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA"
 		.key	= "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA"
 			  "\x90\x6A\xC7\x3C\x36\x13\xA6\x34"
 			  "\x90\x6A\xC7\x3C\x36\x13\xA6\x34"
@@ -20913,8 +21007,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
 			  "\x00\x21\x00\x01\x01\x02\x02\x01",
 			  "\x00\x21\x00\x01\x01\x02\x02\x01",
 		.rlen	= 72,
 		.rlen	= 72,
 		.assoc	= "\x00\x00\x43\x21\x87\x65\x43\x21"
 		.assoc	= "\x00\x00\x43\x21\x87\x65\x43\x21"
-			  "\x00\x00\x00\x00",
-		.alen	= 12,
+			  "\x00\x00\x00\x00\x49\x56\xED\x7E"
+			  "\x3B\x24\x4C\xFE",
+		.alen	= 20,
 		.input	= "\xFE\xCF\x53\x7E\x72\x9D\x5B\x07"
 		.input	= "\xFE\xCF\x53\x7E\x72\x9D\x5B\x07"
 			  "\xDC\x30\xDF\x52\x8D\xD2\x2B\x76"
 			  "\xDC\x30\xDF\x52\x8D\xD2\x2B\x76"
 			  "\x8D\x1B\x98\x73\x66\x96\xA6\xFD"
 			  "\x8D\x1B\x98\x73\x66\x96\xA6\xFD"
@@ -20942,8 +21037,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
 			  "\x65\x72\x63\x69\x74\x79\x02\x64"
 			  "\x65\x72\x63\x69\x74\x79\x02\x64"
 			  "\x6B\x00\x00\x01\x00\x01\x00\x01",
 			  "\x6B\x00\x00\x01\x00\x01\x00\x01",
 		.rlen	= 64,
 		.rlen	= 64,
-		.assoc	= "\x00\x00\xA5\xF8\x00\x00\x00\x0A",
-		.alen	= 8,
+		.assoc	= "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
+			  "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+		.alen	= 16,
 		.input	= "\xDE\xB2\x2C\xD9\xB0\x7C\x72\xC1"
 		.input	= "\xDE\xB2\x2C\xD9\xB0\x7C\x72\xC1"
 			  "\x6E\x3A\x65\xBE\xEB\x8D\xF3\x04"
 			  "\x6E\x3A\x65\xBE\xEB\x8D\xF3\x04"
 			  "\xA5\xA5\x89\x7D\x33\xAE\x53\x0F"
 			  "\xA5\xA5\x89\x7D\x33\xAE\x53\x0F"
@@ -20971,8 +21067,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
 			  "\x02\x04\x05\xB4\x01\x01\x04\x02"
 			  "\x02\x04\x05\xB4\x01\x01\x04\x02"
 			  "\x01\x02\x02\x01",
 			  "\x01\x02\x02\x01",
 		.rlen	= 52,
 		.rlen	= 52,
-		.assoc	= "\x4A\x2C\xBF\xE3\x00\x00\x00\x02",
-		.alen	= 8,
+		.assoc	= "\x4A\x2C\xBF\xE3\x00\x00\x00\x02"
+			  "\x01\x02\x03\x04\x05\x06\x07\x08",
+		.alen	= 16,
 		.input	= "\xFF\x42\x5C\x9B\x72\x45\x99\xDF"
 		.input	= "\xFF\x42\x5C\x9B\x72\x45\x99\xDF"
 			  "\x7A\x3B\xCD\x51\x01\x94\xE0\x0D"
 			  "\x7A\x3B\xCD\x51\x01\x94\xE0\x0D"
 			  "\x6A\x78\x10\x7F\x1B\x0B\x1C\xBF"
 			  "\x6A\x78\x10\x7F\x1B\x0B\x1C\xBF"
@@ -20998,8 +21095,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
 			  "\x75\x76\x77\x61\x62\x63\x64\x65"
 			  "\x75\x76\x77\x61\x62\x63\x64\x65"
 			  "\x66\x67\x68\x69\x01\x02\x02\x01",
 			  "\x66\x67\x68\x69\x01\x02\x02\x01",
 		.rlen	= 64,
 		.rlen	= 64,
-		.assoc	= "\x00\x00\x00\x00\x00\x00\x00\x01",
-		.alen	= 8,
+		.assoc	= "\x00\x00\x00\x00\x00\x00\x00\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.alen	= 16,
 		.input	= "\x46\x88\xDA\xF2\xF9\x73\xA3\x92"
 		.input	= "\x46\x88\xDA\xF2\xF9\x73\xA3\x92"
 			  "\x73\x29\x09\xC3\x31\xD5\x6D\x60"
 			  "\x73\x29\x09\xC3\x31\xD5\x6D\x60"
 			  "\xF6\x94\xAB\xAA\x41\x4B\x5E\x7F"
 			  "\xF6\x94\xAB\xAA\x41\x4B\x5E\x7F"
@@ -21027,8 +21125,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
 			  "\x66\x67\x68\x69\x01\x02\x02\x01",
 			  "\x66\x67\x68\x69\x01\x02\x02\x01",
 		.rlen	= 64,
 		.rlen	= 64,
 		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
 		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
-			  "\x10\x10\x10\x10",
-		.alen	= 12,
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
 		.input	= "\xFB\xA2\xCA\xA4\x85\x3C\xF9\xF0"
 		.input	= "\xFB\xA2\xCA\xA4\x85\x3C\xF9\xF0"
 			  "\xF2\x2C\xB1\x0D\x86\xDD\x83\xB0"
 			  "\xF2\x2C\xB1\x0D\x86\xDD\x83\xB0"
 			  "\xFE\xC7\x56\x91\xCF\x1A\x04\xB0"
 			  "\xFE\xC7\x56\x91\xCF\x1A\x04\xB0"
@@ -21052,8 +21151,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
 			  "\x01\x02\x02\x01",
 			  "\x01\x02\x02\x01",
 		.rlen	= 28,
 		.rlen	= 28,
 		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
 		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
-			  "\x10\x10\x10\x10",
-		.alen	= 12,
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
 		.input	= "\xFB\xA2\xCA\x84\x5E\x5D\xF9\xF0"
 		.input	= "\xFB\xA2\xCA\x84\x5E\x5D\xF9\xF0"
 			  "\xF2\x2C\x3E\x6E\x86\xDD\x83\x1E"
 			  "\xF2\x2C\x3E\x6E\x86\xDD\x83\x1E"
 			  "\x1F\xC6\x57\x92\xCD\x1A\xF9\x13"
 			  "\x1F\xC6\x57\x92\xCD\x1A\xF9\x13"
@@ -21074,8 +21174,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
 			  "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E"
 			  "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E"
 			  "\x50\x10\x16\xD0\x75\x68\x00\x01",
 			  "\x50\x10\x16\xD0\x75\x68\x00\x01",
 		.rlen	= 40,
 		.rlen	= 40,
-		.assoc	= "\x00\x00\xA5\xF8\x00\x00\x00\x0A",
-		.alen	= 8,
+		.assoc	= "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
+			  "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+		.alen	= 16,
 		.input	= "\xA5\xB1\xF8\x06\x60\x29\xAE\xA4"
 		.input	= "\xA5\xB1\xF8\x06\x60\x29\xAE\xA4"
 			  "\x0E\x59\x8B\x81\x22\xDE\x02\x42"
 			  "\x0E\x59\x8B\x81\x22\xDE\x02\x42"
 			  "\x09\x38\xB3\xAB\x33\xF8\x28\xE6"
 			  "\x09\x38\xB3\xAB\x33\xF8\x28\xE6"
@@ -21102,8 +21203,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
 			  "\x23\x01\x01\x01",
 			  "\x23\x01\x01\x01",
 		.rlen	= 76,
 		.rlen	= 76,
 		.assoc	= "\x00\x00\x01\x00\x00\x00\x00\x00"
 		.assoc	= "\x00\x00\x01\x00\x00\x00\x00\x00"
-			  "\x00\x00\x00\x01",
-		.alen	= 12,
+			  "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
+			  "\xCE\xFA\xCE\x74",
+		.alen	= 20,
 		.input	= "\x18\xA6\xFD\x42\xF7\x2C\xBF\x4A"
 		.input	= "\x18\xA6\xFD\x42\xF7\x2C\xBF\x4A"
 			  "\xB2\xA2\xEA\x90\x1F\x73\xD8\x14"
 			  "\xB2\xA2\xEA\x90\x1F\x73\xD8\x14"
 			  "\xE3\xE7\xF2\x43\xD9\x54\x12\xE1"
 			  "\xE3\xE7\xF2\x43\xD9\x54\x12\xE1"
@@ -21132,8 +21234,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
 			  "\x50\x10\x1F\x64\x6D\x54\x00\x01",
 			  "\x50\x10\x1F\x64\x6D\x54\x00\x01",
 		.rlen	= 40,
 		.rlen	= 40,
 		.assoc	= "\x17\x40\x5E\x67\x15\x6F\x31\x26"
 		.assoc	= "\x17\x40\x5E\x67\x15\x6F\x31\x26"
-			  "\xDD\x0D\xB9\x9B",
-		.alen	= 12,
+			  "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
+			  "\x69\x76\x65\x63",
+		.alen	= 20,
 		.input	= "\xF2\xD6\x9E\xCD\xBD\x5A\x0D\x5B"
 		.input	= "\xF2\xD6\x9E\xCD\xBD\x5A\x0D\x5B"
 			  "\x8D\x5E\xF3\x8B\xAD\x4D\xA5\x8D"
 			  "\x8D\x5E\xF3\x8B\xAD\x4D\xA5\x8D"
 			  "\x1F\x27\x8F\xDE\x98\xEF\x67\x54"
 			  "\x1F\x27\x8F\xDE\x98\xEF\x67\x54"
@@ -21160,8 +21263,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
 			  "\x15\x01\x01\x01",
 			  "\x15\x01\x01\x01",
 		.rlen	= 76,
 		.rlen	= 76,
 		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
 		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
-			  "\x10\x10\x10\x10",
-		.alen	= 12,
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
 		.input	= "\xFB\xA2\xCA\xD1\x2F\xC1\xF9\xF0"
 		.input	= "\xFB\xA2\xCA\xD1\x2F\xC1\xF9\xF0"
 			  "\x0D\x3C\xEB\xF3\x05\x41\x0D\xB8"
 			  "\x0D\x3C\xEB\xF3\x05\x41\x0D\xB8"
 			  "\x3D\x77\x84\xB6\x07\x32\x3D\x22"
 			  "\x3D\x77\x84\xB6\x07\x32\x3D\x22"
@@ -21194,8 +21298,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
 			  "\x72\x72\x6F\x77\x01\x02\x02\x01",
 			  "\x72\x72\x6F\x77\x01\x02\x02\x01",
 		.rlen	= 72,
 		.rlen	= 72,
 		.assoc	= "\x17\x40\x5E\x67\x15\x6F\x31\x26"
 		.assoc	= "\x17\x40\x5E\x67\x15\x6F\x31\x26"
-			  "\xDD\x0D\xB9\x9B",
-		.alen	= 12,
+			  "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
+			  "\x69\x76\x65\x63",
+		.alen	= 20,
 		.input	= "\xD4\xB7\xED\x86\xA1\x77\x7F\x2E"
 		.input	= "\xD4\xB7\xED\x86\xA1\x77\x7F\x2E"
 			  "\xA1\x3D\x69\x73\xD3\x24\xC6\x9E"
 			  "\xA1\x3D\x69\x73\xD3\x24\xC6\x9E"
 			  "\x7B\x43\xF8\x26\xFB\x56\x83\x12"
 			  "\x7B\x43\xF8\x26\xFB\x56\x83\x12"
@@ -21216,8 +21321,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
 		.iv	= "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
 		.iv	= "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
 		.result	= "\x01\x02\x02\x01",
 		.result	= "\x01\x02\x02\x01",
 		.rlen	= 4,
 		.rlen	= 4,
-		.assoc	= "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF",
-		.alen	= 8,
+		.assoc	= "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF"
+			  "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
+		.alen	= 16,
 		.input	= "\x43\x7F\x86\x6B\xCB\x3F\x69\x9F"
 		.input	= "\x43\x7F\x86\x6B\xCB\x3F\x69\x9F"
 			  "\xE9\xB0\x82\x2B\xAC\x96\x1C\x45"
 			  "\xE9\xB0\x82\x2B\xAC\x96\x1C\x45"
 			  "\x04\xBE\xF2\x70",
 			  "\x04\xBE\xF2\x70",
@@ -21233,8 +21339,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
 			  "\x62\x65\x00\x01",
 			  "\x62\x65\x00\x01",
 		.rlen	= 20,
 		.rlen	= 20,
 		.assoc	= "\x00\x00\x01\x00\x00\x00\x00\x00"
 		.assoc	= "\x00\x00\x01\x00\x00\x00\x00\x00"
-			  "\x00\x00\x00\x01",
-		.alen	= 12,
+			  "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
+			  "\xCE\xFA\xCE\x74",
+		.alen	= 20,
 		.input	= "\x29\xC9\xFC\x69\xA1\x97\xD0\x38"
 		.input	= "\x29\xC9\xFC\x69\xA1\x97\xD0\x38"
 			  "\xCC\xDD\x14\xE2\xDD\xFC\xAA\x05"
 			  "\xCC\xDD\x14\xE2\xDD\xFC\xAA\x05"
 			  "\x43\x33\x21\x64\x41\x25\x03\x52"
 			  "\x43\x33\x21\x64\x41\x25\x03\x52"
@@ -21258,8 +21365,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
 			  "\x01\x02\x02\x01",
 			  "\x01\x02\x02\x01",
 		.rlen	= 52,
 		.rlen	= 52,
 		.assoc	= "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF"
 		.assoc	= "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF"
-			  "\xFF\xFF\xFF\xFF",
-		.alen	= 12,
+			  "\xFF\xFF\xFF\xFF\x33\x30\x21\x69"
+			  "\x67\x65\x74\x6D",
+		.alen	= 20,
 		.input	= "\xF9\x7A\xB2\xAA\x35\x6D\x8E\xDC"
 		.input	= "\xF9\x7A\xB2\xAA\x35\x6D\x8E\xDC"
 			  "\xE1\x76\x44\xAC\x8C\x78\xE2\x5D"
 			  "\xE1\x76\x44\xAC\x8C\x78\xE2\x5D"
 			  "\xD2\x4D\xED\xBB\x29\xEB\xF1\xB6"
 			  "\xD2\x4D\xED\xBB\x29\xEB\xF1\xB6"
@@ -21285,8 +21393,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
 			  "\x01\x02\x02\x01",
 			  "\x01\x02\x02\x01",
 		.rlen	= 52,
 		.rlen	= 52,
 		.assoc	= "\x3F\x7E\xF6\x42\x10\x10\x10\x10"
 		.assoc	= "\x3F\x7E\xF6\x42\x10\x10\x10\x10"
-			  "\x10\x10\x10\x10",
-		.alen	= 12,
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
 		.input	= "\xFB\xA2\xCA\xA8\xC6\xC5\xF9\xF0"
 		.input	= "\xFB\xA2\xCA\xA8\xC6\xC5\xF9\xF0"
 			  "\xF2\x2C\xA5\x4A\x06\x12\x10\xAD"
 			  "\xF2\x2C\xA5\x4A\x06\x12\x10\xAD"
 			  "\x3F\x6E\x57\x91\xCF\x1A\xCA\x21"
 			  "\x3F\x6E\x57\x91\xCF\x1A\xCA\x21"
@@ -21309,8 +21418,9 @@ static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
 			  "\x71\x72\x73\x74\x01\x02\x02\x01",
 			  "\x71\x72\x73\x74\x01\x02\x02\x01",
 		.rlen	= 32,
 		.rlen	= 32,
 		.assoc	= "\x00\x00\x43\x21\x87\x65\x43\x21"
 		.assoc	= "\x00\x00\x43\x21\x87\x65\x43\x21"
-			  "\x00\x00\x00\x07",
-		.alen	= 12,
+			  "\x00\x00\x00\x07\x48\x55\xEC\x7D"
+			  "\x3A\x23\x4B\xFD",
+		.alen	= 20,
 		.input	= "\x74\x75\x2E\x8A\xEB\x5D\x87\x3C"
 		.input	= "\x74\x75\x2E\x8A\xEB\x5D\x87\x3C"
 			  "\xD7\xC0\xF4\xAC\xC3\x6C\x4B\xFF"
 			  "\xD7\xC0\xF4\xAC\xC3\x6C\x4B\xFF"
 			  "\x84\xB7\xD7\xB9\x8F\x0C\xA8\xB6"
 			  "\x84\xB7\xD7\xB9\x8F\x0C\xA8\xB6"
@@ -21538,10 +21648,7 @@ static struct aead_testvec aes_ccm_enc_tv_template[] = {
 			  "\xba",
 			  "\xba",
 		.rlen	= 33,
 		.rlen	= 33,
 	}, {
 	}, {
-		/*
-		 * This is the same vector as aes_ccm_rfc4309_enc_tv_template[0]
-		 * below but rewritten to use the ccm algorithm directly.
-		 */
+		/* This is taken from FIPS CAVS. */
 		.key	= "\x83\xac\x54\x66\xc2\xeb\xe5\x05"
 		.key	= "\x83\xac\x54\x66\xc2\xeb\xe5\x05"
 			  "\x2e\x01\xd1\xfc\x5d\x82\x66\x2e",
 			  "\x2e\x01\xd1\xfc\x5d\x82\x66\x2e",
 		.klen	= 16,
 		.klen	= 16,
@@ -21559,214 +21666,51 @@ static struct aead_testvec aes_ccm_enc_tv_template[] = {
 			  "\xda\x24\xea\xd9\xa1\x39\x98\xfd"
 			  "\xda\x24\xea\xd9\xa1\x39\x98\xfd"
 			  "\xa4\xbe\xd9\xf2\x1a\x6d\x22\xa8",
 			  "\xa4\xbe\xd9\xf2\x1a\x6d\x22\xa8",
 		.rlen	= 48,
 		.rlen	= 48,
-	}
-};
-
-static struct aead_testvec aes_ccm_dec_tv_template[] = {
-	{ /* From RFC 3610 */
-		.key	= "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
-			  "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
-		.klen	= 16,
-		.iv	= "\x01\x00\x00\x00\x03\x02\x01\x00"
-			  "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
-		.assoc	= "\x00\x01\x02\x03\x04\x05\x06\x07",
-		.alen	= 8,
-		.input	= "\x58\x8c\x97\x9a\x61\xc6\x63\xd2"
-			  "\xf0\x66\xd0\xc2\xc0\xf9\x89\x80"
-			  "\x6d\x5f\x6b\x61\xda\xc3\x84\x17"
-			  "\xe8\xd1\x2c\xfd\xf9\x26\xe0",
-		.ilen	= 31,
-		.result	= "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
-			  "\x10\x11\x12\x13\x14\x15\x16\x17"
-			  "\x18\x19\x1a\x1b\x1c\x1d\x1e",
-		.rlen	= 23,
 	}, {
 	}, {
-		.key	= "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
-			  "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
+		.key	= "\x1e\x2c\x7e\x01\x41\x9a\xef\xc0"
+			  "\x0d\x58\x96\x6e\x5c\xa2\x4b\xd3",
 		.klen	= 16,
 		.klen	= 16,
-		.iv	= "\x01\x00\x00\x00\x07\x06\x05\x04"
-			  "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
-		.assoc	= "\x00\x01\x02\x03\x04\x05\x06\x07"
-			  "\x08\x09\x0a\x0b",
-		.alen	= 12,
-		.input	= "\xdc\xf1\xfb\x7b\x5d\x9e\x23\xfb"
-			  "\x9d\x4e\x13\x12\x53\x65\x8a\xd8"
-			  "\x6e\xbd\xca\x3e\x51\xe8\x3f\x07"
-			  "\x7d\x9c\x2d\x93",
-		.ilen	= 28,
-		.result	= "\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
-			  "\x14\x15\x16\x17\x18\x19\x1a\x1b"
-			  "\x1c\x1d\x1e\x1f",
-		.rlen	= 20,
+		.iv	= "\x03\x4f\xa3\x19\xd3\x01\x5a\xd8"
+			  "\x30\x60\x15\x56\x00\x00\x00\x00",
+		.assoc	= "\xda\xe6\x28\x9c\x45\x2d\xfd\x63"
+			  "\x5e\xda\x4c\xb6\xe6\xfc\xf9\xb7"
+			  "\x0c\x56\xcb\xe4\xe0\x05\x7a\xe1"
+			  "\x0a\x63\x09\x78\xbc\x2c\x55\xde",
+		.alen	= 32,
+		.input	= "\x87\xa3\x36\xfd\x96\xb3\x93\x78"
+			  "\xa9\x28\x63\xba\x12\xa3\x14\x85"
+			  "\x57\x1e\x06\xc9\x7b\x21\xef\x76"
+			  "\x7f\x38\x7e\x8e\x29\xa4\x3e\x7e",
+		.ilen	= 32,
+		.result	= "\x8a\x1e\x11\xf0\x02\x6b\xe2\x19"
+			  "\xfc\x70\xc4\x6d\x8e\xb7\x99\xab"
+			  "\xc5\x4b\xa2\xac\xd3\xf3\x48\xff"
+			  "\x3b\xb5\xce\x53\xef\xde\xbb\x02"
+			  "\xa9\x86\x15\x6c\x13\xfe\xda\x0a"
+			  "\x22\xb8\x29\x3d\xd8\x39\x9a\x23",
+		.rlen	= 48,
 	}, {
 	}, {
-		.key	= "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
-			  "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
-		.klen	= 16,
-		.iv	= "\x01\x00\x00\x00\x0b\x0a\x09\x08"
-			  "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
-		.assoc	= "\x00\x01\x02\x03\x04\x05\x06\x07",
-		.alen	= 8,
-		.input	= "\x82\x53\x1a\x60\xcc\x24\x94\x5a"
-			  "\x4b\x82\x79\x18\x1a\xb5\xc8\x4d"
-			  "\xf2\x1c\xe7\xf9\xb7\x3f\x42\xe1"
-			  "\x97\xea\x9c\x07\xe5\x6b\x5e\xb1"
-			  "\x7e\x5f\x4e",
-		.ilen	= 35,
-		.result	= "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
-			  "\x10\x11\x12\x13\x14\x15\x16\x17"
-			  "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
-			  "\x20",
-		.rlen	= 25,
-	}, {
-		.key	= "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
-			  "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
-		.klen	= 16,
-		.iv	= "\x01\x00\x00\x00\x0c\x0b\x0a\x09"
-			  "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
-		.assoc	= "\x00\x01\x02\x03\x04\x05\x06\x07"
-			  "\x08\x09\x0a\x0b",
-		.alen	= 12,
-		.input	= "\x07\x34\x25\x94\x15\x77\x85\x15"
-			  "\x2b\x07\x40\x98\x33\x0a\xbb\x14"
-			  "\x1b\x94\x7b\x56\x6a\xa9\x40\x6b"
-			  "\x4d\x99\x99\x88\xdd",
-		.ilen	= 29,
-		.result	= "\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
-			  "\x14\x15\x16\x17\x18\x19\x1a\x1b"
-			  "\x1c\x1d\x1e",
-		.rlen	= 19,
-	}, {
-		.key	= "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
-			  "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
-		.klen	= 16,
-		.iv	= "\x01\x00\x33\x56\x8e\xf7\xb2\x63"
-			  "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
-		.assoc	= "\x63\x01\x8f\x76\xdc\x8a\x1b\xcb",
-		.alen	= 8,
-		.input	= "\x4c\xcb\x1e\x7c\xa9\x81\xbe\xfa"
-			  "\xa0\x72\x6c\x55\xd3\x78\x06\x12"
-			  "\x98\xc8\x5c\x92\x81\x4a\xbc\x33"
-			  "\xc5\x2e\xe8\x1d\x7d\x77\xc0\x8a",
-		.ilen	= 32,
-		.result	= "\x90\x20\xea\x6f\x91\xbd\xd8\x5a"
-			  "\xfa\x00\x39\xba\x4b\xaf\xf9\xbf"
-			  "\xb7\x9c\x70\x28\x94\x9c\xd0\xec",
-		.rlen	= 24,
-	}, {
-		.key	= "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
-			  "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
-		.klen	= 16,
-		.iv	= "\x01\x00\xd5\x60\x91\x2d\x3f\x70"
-			  "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
-		.assoc	= "\xcd\x90\x44\xd2\xb7\x1f\xdb\x81"
-			  "\x20\xea\x60\xc0",
-		.alen	= 12,
-		.input	= "\x00\x97\x69\xec\xab\xdf\x48\x62"
-			  "\x55\x94\xc5\x92\x51\xe6\x03\x57"
-			  "\x22\x67\x5e\x04\xc8\x47\x09\x9e"
-			  "\x5a\xe0\x70\x45\x51",
-		.ilen	= 29,
-		.result	= "\x64\x35\xac\xba\xfb\x11\xa8\x2e"
-			  "\x2f\x07\x1d\x7c\xa4\xa5\xeb\xd9"
-			  "\x3a\x80\x3b\xa8\x7f",
-		.rlen	= 21,
-	}, {
-		.key	= "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
-			  "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
-		.klen	= 16,
-		.iv	= "\x01\x00\x42\xff\xf8\xf1\x95\x1c"
-			  "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
-		.assoc	= "\xd8\x5b\xc7\xe6\x9f\x94\x4f\xb8",
-		.alen	= 8,
-		.input	= "\xbc\x21\x8d\xaa\x94\x74\x27\xb6"
-			  "\xdb\x38\x6a\x99\xac\x1a\xef\x23"
-			  "\xad\xe0\xb5\x29\x39\xcb\x6a\x63"
-			  "\x7c\xf9\xbe\xc2\x40\x88\x97\xc6"
-			  "\xba",
-		.ilen	= 33,
-		.result	= "\x8a\x19\xb9\x50\xbc\xf7\x1a\x01"
-			  "\x8e\x5e\x67\x01\xc9\x17\x87\x65"
-			  "\x98\x09\xd6\x7d\xbe\xdd\x18",
-		.rlen	= 23,
-	},
-};
-
-/*
- * rfc4309 refers to section 8 of rfc3610 for test vectors, but they all
- * use a 13-byte nonce, we only support an 11-byte nonce. Similarly, all of
- * Special Publication 800-38C's test vectors also use nonce lengths our
- * implementation doesn't support. The following are taken from fips cavs
- * fax files on hand at Red Hat.
- *
- * nb: actual key lengths are (klen - 3), the last 3 bytes are actually
- * part of the nonce which combine w/the iv, but need to be input this way.
- */
-static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
-	{
-		.key	= "\x83\xac\x54\x66\xc2\xeb\xe5\x05"
-			  "\x2e\x01\xd1\xfc\x5d\x82\x66\x2e"
-			  "\x96\xac\x59",
-		.klen	= 19,
-		.iv	= "\x30\x07\xa1\xe2\xa2\xc7\x55\x24",
-		.alen	= 0,
-		.input	= "\x19\xc8\x81\xf6\xe9\x86\xff\x93"
-			  "\x0b\x78\x67\xe5\xbb\xb7\xfc\x6e"
-			  "\x83\x77\xb3\xa6\x0c\x8c\x9f\x9c"
-			  "\x35\x2e\xad\xe0\x62\xf9\x91\xa1",
-		.ilen	= 32,
-		.result	= "\xab\x6f\xe1\x69\x1d\x19\x99\xa8"
-			  "\x92\xa0\xc4\x6f\x7e\xe2\x8b\xb1"
-			  "\x70\xbb\x8c\xa6\x4c\x6e\x97\x8a"
-			  "\x57\x2b\xbe\x5d\x98\xa6\xb1\x32"
-			  "\xda\x24\xea\xd9\xa1\x39\x98\xfd"
-			  "\xa4\xbe\xd9\xf2\x1a\x6d\x22\xa8",
-		.rlen	= 48,
-	}, {
-		.key	= "\x1e\x2c\x7e\x01\x41\x9a\xef\xc0"
-			  "\x0d\x58\x96\x6e\x5c\xa2\x4b\xd3"
-			  "\x4f\xa3\x19",
-		.klen	= 19,
-		.iv	= "\xd3\x01\x5a\xd8\x30\x60\x15\x56",
-		.assoc	= "\xda\xe6\x28\x9c\x45\x2d\xfd\x63"
-			  "\x5e\xda\x4c\xb6\xe6\xfc\xf9\xb7"
-			  "\x0c\x56\xcb\xe4\xe0\x05\x7a\xe1"
-			  "\x0a\x63\x09\x78\xbc\x2c\x55\xde",
-		.alen	= 32,
-		.input	= "\x87\xa3\x36\xfd\x96\xb3\x93\x78"
-			  "\xa9\x28\x63\xba\x12\xa3\x14\x85"
-			  "\x57\x1e\x06\xc9\x7b\x21\xef\x76"
-			  "\x7f\x38\x7e\x8e\x29\xa4\x3e\x7e",
-		.ilen	= 32,
-		.result	= "\x8a\x1e\x11\xf0\x02\x6b\xe2\x19"
-			  "\xfc\x70\xc4\x6d\x8e\xb7\x99\xab"
-			  "\xc5\x4b\xa2\xac\xd3\xf3\x48\xff"
-			  "\x3b\xb5\xce\x53\xef\xde\xbb\x02"
-			  "\xa9\x86\x15\x6c\x13\xfe\xda\x0a"
-			  "\x22\xb8\x29\x3d\xd8\x39\x9a\x23",
-		.rlen	= 48,
-	}, {
-		.key	= "\xf4\x6b\xc2\x75\x62\xfe\xb4\xe1"
-			  "\xa3\xf0\xff\xdd\x4e\x4b\x12\x75"
-			  "\x53\x14\x73\x66\x8d\x88\xf6\x80"
-			  "\xa0\x20\x35",
-		.klen	= 27,
-		.iv	= "\x26\xf2\x21\x8d\x50\x20\xda\xe2",
-		.assoc	= "\x5b\x9e\x13\x67\x02\x5e\xef\xc1"
-			  "\x6c\xf9\xd7\x1e\x52\x8f\x7a\x47"
-			  "\xe9\xd4\xcf\x20\x14\x6e\xf0\x2d"
-			  "\xd8\x9e\x2b\x56\x10\x23\x56\xe7",
-		.alen	= 32,
-		.ilen	= 0,
-		.result	= "\x36\xea\x7a\x70\x08\xdc\x6a\xbc"
-			  "\xad\x0c\x7a\x63\xf6\x61\xfd\x9b",
-		.rlen	= 16,
+		.key	= "\xf4\x6b\xc2\x75\x62\xfe\xb4\xe1"
+			  "\xa3\xf0\xff\xdd\x4e\x4b\x12\x75"
+			  "\x53\x14\x73\x66\x8d\x88\xf6\x80",
+		.klen	= 24,
+		.iv	= "\x03\xa0\x20\x35\x26\xf2\x21\x8d"
+			  "\x50\x20\xda\xe2\x00\x00\x00\x00",
+		.assoc	= "\x5b\x9e\x13\x67\x02\x5e\xef\xc1"
+			  "\x6c\xf9\xd7\x1e\x52\x8f\x7a\x47"
+			  "\xe9\xd4\xcf\x20\x14\x6e\xf0\x2d"
+			  "\xd8\x9e\x2b\x56\x10\x23\x56\xe7",
+		.alen	= 32,
+		.result	= "\x36\xea\x7a\x70\x08\xdc\x6a\xbc"
+			  "\xad\x0c\x7a\x63\xf6\x61\xfd\x9b",
+		.rlen	= 16,
 	}, {
 	}, {
 		.key	= "\x56\xdf\x5c\x8f\x26\x3f\x0e\x42"
 		.key	= "\x56\xdf\x5c\x8f\x26\x3f\x0e\x42"
 			  "\xef\x7a\xd3\xce\xfc\x84\x60\x62"
 			  "\xef\x7a\xd3\xce\xfc\x84\x60\x62"
-			  "\xca\xb4\x40\xaf\x5f\xc9\xc9\x01"
-			  "\xd6\x3c\x8c",
-		.klen	= 27,
-		.iv	= "\x86\x84\xb6\xcd\xef\x09\x2e\x94",
+			  "\xca\xb4\x40\xaf\x5f\xc9\xc9\x01",
+		.klen	= 24,
+		.iv	= "\x03\xd6\x3c\x8c\x86\x84\xb6\xcd"
+			  "\xef\x09\x2e\x94\x00\x00\x00\x00",
 		.assoc	= "\x02\x65\x78\x3c\xe9\x21\x30\x91"
 		.assoc	= "\x02\x65\x78\x3c\xe9\x21\x30\x91"
 			  "\xb1\xb9\xda\x76\x9a\x78\x6d\x95"
 			  "\xb1\xb9\xda\x76\x9a\x78\x6d\x95"
 			  "\xf2\x88\x32\xa3\xf2\x50\xcb\x4c"
 			  "\xf2\x88\x32\xa3\xf2\x50\xcb\x4c"
@@ -21788,10 +21732,10 @@ static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
 		.key	= "\xe0\x8d\x99\x71\x60\xd7\x97\x1a"
 		.key	= "\xe0\x8d\x99\x71\x60\xd7\x97\x1a"
 			  "\xbd\x01\x99\xd5\x8a\xdf\x71\x3a"
 			  "\xbd\x01\x99\xd5\x8a\xdf\x71\x3a"
 			  "\xd3\xdf\x24\x4b\x5e\x3d\x4b\x4e"
 			  "\xd3\xdf\x24\x4b\x5e\x3d\x4b\x4e"
-			  "\x30\x7a\xb9\xd8\x53\x0a\x5e\x2b"
-			  "\x1e\x29\x91",
-		.klen	= 35,
-		.iv	= "\xad\x8e\xc1\x53\x0a\xcf\x2d\xbe",
+			  "\x30\x7a\xb9\xd8\x53\x0a\x5e\x2b",
+		.klen	= 32,
+		.iv	= "\x03\x1e\x29\x91\xad\x8e\xc1\x53"
+			  "\x0a\xcf\x2d\xbe\x00\x00\x00\x00",
 		.assoc	= "\x19\xb6\x1f\x57\xc4\xf3\xf0\x8b"
 		.assoc	= "\x19\xb6\x1f\x57\xc4\xf3\xf0\x8b"
 			  "\x78\x2b\x94\x02\x29\x0f\x42\x27"
 			  "\x78\x2b\x94\x02\x29\x0f\x42\x27"
 			  "\x6b\x75\xcb\x98\x34\x08\x7e\x79"
 			  "\x6b\x75\xcb\x98\x34\x08\x7e\x79"
@@ -21812,10 +21756,10 @@ static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
 		.key	= "\x7c\xc8\x18\x3b\x8d\x99\xe0\x7c"
 		.key	= "\x7c\xc8\x18\x3b\x8d\x99\xe0\x7c"
 			  "\x45\x41\xb8\xbd\x5c\xa7\xc2\x32"
 			  "\x45\x41\xb8\xbd\x5c\xa7\xc2\x32"
 			  "\x8a\xb8\x02\x59\xa4\xfe\xa9\x2c"
 			  "\x8a\xb8\x02\x59\xa4\xfe\xa9\x2c"
-			  "\x09\x75\x9a\x9b\x3c\x9b\x27\x39"
-			  "\xf9\xd9\x4e",
-		.klen	= 35,
-		.iv	= "\x63\xb5\x3d\x9d\x43\xf6\x1e\x50",
+			  "\x09\x75\x9a\x9b\x3c\x9b\x27\x39",
+		.klen	= 32,
+		.iv	= "\x03\xf9\xd9\x4e\x63\xb5\x3d\x9d"
+			  "\x43\xf6\x1e\x50",
 		.assoc	= "\x57\xf5\x6b\x8b\x57\x5c\x3d\x3b"
 		.assoc	= "\x57\xf5\x6b\x8b\x57\x5c\x3d\x3b"
 			  "\x13\x02\x01\x0c\x83\x4c\x96\x35"
 			  "\x13\x02\x01\x0c\x83\x4c\x96\x35"
 			  "\x8e\xd6\x39\xcf\x7d\x14\x9b\x94"
 			  "\x8e\xd6\x39\xcf\x7d\x14\x9b\x94"
@@ -21837,10 +21781,10 @@ static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
 		.key	= "\xab\xd0\xe9\x33\x07\x26\xe5\x83"
 		.key	= "\xab\xd0\xe9\x33\x07\x26\xe5\x83"
 			  "\x8c\x76\x95\xd4\xb6\xdc\xf3\x46"
 			  "\x8c\x76\x95\xd4\xb6\xdc\xf3\x46"
 			  "\xf9\x8f\xad\xe3\x02\x13\x83\x77"
 			  "\xf9\x8f\xad\xe3\x02\x13\x83\x77"
-			  "\x3f\xb0\xf1\xa1\xa1\x22\x0f\x2b"
-			  "\x24\xa7\x8b",
-		.klen	= 35,
-		.iv	= "\x07\xcb\xcc\x0e\xe6\x33\xbf\xf5",
+			  "\x3f\xb0\xf1\xa1\xa1\x22\x0f\x2b",
+		.klen	= 32,
+		.iv	= "\x03\x24\xa7\x8b\x07\xcb\xcc\x0e"
+			  "\xe6\x33\xbf\xf5\x00\x00\x00\x00",
 		.assoc	= "\xd4\xdb\x30\x1d\x03\xfe\xfd\x5f"
 		.assoc	= "\xd4\xdb\x30\x1d\x03\xfe\xfd\x5f"
 			  "\x87\xd4\x8c\xb6\xb6\xf1\x7a\x5d"
 			  "\x87\xd4\x8c\xb6\xb6\xf1\x7a\x5d"
 			  "\xab\x90\x65\x8d\x8e\xca\x4d\x4f"
 			  "\xab\x90\x65\x8d\x8e\xca\x4d\x4f"
@@ -21858,16 +21802,142 @@ static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
 			  "\x5c\xda\xb2\x33\xe5\x13\xe2\x0d"
 			  "\x5c\xda\xb2\x33\xe5\x13\xe2\x0d"
 			  "\x74\xd1\xef\xb5\x0f\x3a\xb5\xf8",
 			  "\x74\xd1\xef\xb5\x0f\x3a\xb5\xf8",
 		.rlen	= 48,
 		.rlen	= 48,
-	},
+	}
 };
 };
 
 
-static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
-	{
+static struct aead_testvec aes_ccm_dec_tv_template[] = {
+	{ /* From RFC 3610 */
+		.key	= "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+			  "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
+		.klen	= 16,
+		.iv	= "\x01\x00\x00\x00\x03\x02\x01\x00"
+			  "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
+		.assoc	= "\x00\x01\x02\x03\x04\x05\x06\x07",
+		.alen	= 8,
+		.input	= "\x58\x8c\x97\x9a\x61\xc6\x63\xd2"
+			  "\xf0\x66\xd0\xc2\xc0\xf9\x89\x80"
+			  "\x6d\x5f\x6b\x61\xda\xc3\x84\x17"
+			  "\xe8\xd1\x2c\xfd\xf9\x26\xe0",
+		.ilen	= 31,
+		.result	= "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+			  "\x10\x11\x12\x13\x14\x15\x16\x17"
+			  "\x18\x19\x1a\x1b\x1c\x1d\x1e",
+		.rlen	= 23,
+	}, {
+		.key	= "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+			  "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
+		.klen	= 16,
+		.iv	= "\x01\x00\x00\x00\x07\x06\x05\x04"
+			  "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
+		.assoc	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b",
+		.alen	= 12,
+		.input	= "\xdc\xf1\xfb\x7b\x5d\x9e\x23\xfb"
+			  "\x9d\x4e\x13\x12\x53\x65\x8a\xd8"
+			  "\x6e\xbd\xca\x3e\x51\xe8\x3f\x07"
+			  "\x7d\x9c\x2d\x93",
+		.ilen	= 28,
+		.result	= "\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
+			  "\x14\x15\x16\x17\x18\x19\x1a\x1b"
+			  "\x1c\x1d\x1e\x1f",
+		.rlen	= 20,
+	}, {
+		.key	= "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+			  "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
+		.klen	= 16,
+		.iv	= "\x01\x00\x00\x00\x0b\x0a\x09\x08"
+			  "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
+		.assoc	= "\x00\x01\x02\x03\x04\x05\x06\x07",
+		.alen	= 8,
+		.input	= "\x82\x53\x1a\x60\xcc\x24\x94\x5a"
+			  "\x4b\x82\x79\x18\x1a\xb5\xc8\x4d"
+			  "\xf2\x1c\xe7\xf9\xb7\x3f\x42\xe1"
+			  "\x97\xea\x9c\x07\xe5\x6b\x5e\xb1"
+			  "\x7e\x5f\x4e",
+		.ilen	= 35,
+		.result	= "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+			  "\x10\x11\x12\x13\x14\x15\x16\x17"
+			  "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+			  "\x20",
+		.rlen	= 25,
+	}, {
+		.key	= "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+			  "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
+		.klen	= 16,
+		.iv	= "\x01\x00\x00\x00\x0c\x0b\x0a\x09"
+			  "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
+		.assoc	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b",
+		.alen	= 12,
+		.input	= "\x07\x34\x25\x94\x15\x77\x85\x15"
+			  "\x2b\x07\x40\x98\x33\x0a\xbb\x14"
+			  "\x1b\x94\x7b\x56\x6a\xa9\x40\x6b"
+			  "\x4d\x99\x99\x88\xdd",
+		.ilen	= 29,
+		.result	= "\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
+			  "\x14\x15\x16\x17\x18\x19\x1a\x1b"
+			  "\x1c\x1d\x1e",
+		.rlen	= 19,
+	}, {
+		.key	= "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
+			  "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
+		.klen	= 16,
+		.iv	= "\x01\x00\x33\x56\x8e\xf7\xb2\x63"
+			  "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
+		.assoc	= "\x63\x01\x8f\x76\xdc\x8a\x1b\xcb",
+		.alen	= 8,
+		.input	= "\x4c\xcb\x1e\x7c\xa9\x81\xbe\xfa"
+			  "\xa0\x72\x6c\x55\xd3\x78\x06\x12"
+			  "\x98\xc8\x5c\x92\x81\x4a\xbc\x33"
+			  "\xc5\x2e\xe8\x1d\x7d\x77\xc0\x8a",
+		.ilen	= 32,
+		.result	= "\x90\x20\xea\x6f\x91\xbd\xd8\x5a"
+			  "\xfa\x00\x39\xba\x4b\xaf\xf9\xbf"
+			  "\xb7\x9c\x70\x28\x94\x9c\xd0\xec",
+		.rlen	= 24,
+	}, {
+		.key	= "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
+			  "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
+		.klen	= 16,
+		.iv	= "\x01\x00\xd5\x60\x91\x2d\x3f\x70"
+			  "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
+		.assoc	= "\xcd\x90\x44\xd2\xb7\x1f\xdb\x81"
+			  "\x20\xea\x60\xc0",
+		.alen	= 12,
+		.input	= "\x00\x97\x69\xec\xab\xdf\x48\x62"
+			  "\x55\x94\xc5\x92\x51\xe6\x03\x57"
+			  "\x22\x67\x5e\x04\xc8\x47\x09\x9e"
+			  "\x5a\xe0\x70\x45\x51",
+		.ilen	= 29,
+		.result	= "\x64\x35\xac\xba\xfb\x11\xa8\x2e"
+			  "\x2f\x07\x1d\x7c\xa4\xa5\xeb\xd9"
+			  "\x3a\x80\x3b\xa8\x7f",
+		.rlen	= 21,
+	}, {
+		.key	= "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
+			  "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
+		.klen	= 16,
+		.iv	= "\x01\x00\x42\xff\xf8\xf1\x95\x1c"
+			  "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
+		.assoc	= "\xd8\x5b\xc7\xe6\x9f\x94\x4f\xb8",
+		.alen	= 8,
+		.input	= "\xbc\x21\x8d\xaa\x94\x74\x27\xb6"
+			  "\xdb\x38\x6a\x99\xac\x1a\xef\x23"
+			  "\xad\xe0\xb5\x29\x39\xcb\x6a\x63"
+			  "\x7c\xf9\xbe\xc2\x40\x88\x97\xc6"
+			  "\xba",
+		.ilen	= 33,
+		.result	= "\x8a\x19\xb9\x50\xbc\xf7\x1a\x01"
+			  "\x8e\x5e\x67\x01\xc9\x17\x87\x65"
+			  "\x98\x09\xd6\x7d\xbe\xdd\x18",
+		.rlen	= 23,
+	}, {
+		/* This is taken from FIPS CAVS. */
 		.key	= "\xab\x2f\x8a\x74\xb7\x1c\xd2\xb1"
 		.key	= "\xab\x2f\x8a\x74\xb7\x1c\xd2\xb1"
-			  "\xff\x80\x2e\x48\x7d\x82\xf8\xb9"
-			  "\xc6\xfb\x7d",
-		.klen	= 19,
-		.iv	= "\x80\x0d\x13\xab\xd8\xa6\xb2\xd8",
+			  "\xff\x80\x2e\x48\x7d\x82\xf8\xb9",
+		.klen	= 16,
+		.iv	= "\x03\xc6\xfb\x7d\x80\x0d\x13\xab"
+			  "\xd8\xa6\xb2\xd8\x00\x00\x00\x00",
 		.alen	= 0,
 		.alen	= 0,
 		.input	= "\xd5\xe8\x93\x9f\xc7\x89\x2e\x2b",
 		.input	= "\xd5\xe8\x93\x9f\xc7\x89\x2e\x2b",
 		.ilen	= 8,
 		.ilen	= 8,
@@ -21876,10 +21946,10 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
 		.novrfy	= 1,
 		.novrfy	= 1,
 	}, {
 	}, {
 		.key	= "\xab\x2f\x8a\x74\xb7\x1c\xd2\xb1"
 		.key	= "\xab\x2f\x8a\x74\xb7\x1c\xd2\xb1"
-			  "\xff\x80\x2e\x48\x7d\x82\xf8\xb9"
-			  "\xaf\x94\x87",
-		.klen	= 19,
-		.iv	= "\x78\x35\x82\x81\x7f\x88\x94\x68",
+			  "\xff\x80\x2e\x48\x7d\x82\xf8\xb9",
+		.klen	= 16,
+		.iv	= "\x03\xaf\x94\x87\x78\x35\x82\x81"
+			  "\x7f\x88\x94\x68\x00\x00\x00\x00",
 		.alen	= 0,
 		.alen	= 0,
 		.input	= "\x41\x3c\xb8\x87\x73\xcb\xf3\xf3",
 		.input	= "\x41\x3c\xb8\x87\x73\xcb\xf3\xf3",
 		.ilen	= 8,
 		.ilen	= 8,
@@ -21887,10 +21957,10 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
 		.rlen	= 0,
 		.rlen	= 0,
 	}, {
 	}, {
 		.key	= "\x61\x0e\x8c\xae\xe3\x23\xb6\x38"
 		.key	= "\x61\x0e\x8c\xae\xe3\x23\xb6\x38"
-			  "\x76\x1c\xf6\x3a\x67\xa3\x9c\xd8"
-			  "\xc6\xfb\x7d",
-		.klen	= 19,
-		.iv	= "\x80\x0d\x13\xab\xd8\xa6\xb2\xd8",
+			  "\x76\x1c\xf6\x3a\x67\xa3\x9c\xd8",
+		.klen	= 16,
+		.iv	= "\x03\xc6\xfb\x7d\x80\x0d\x13\xab"
+			  "\xd8\xa6\xb2\xd8\x00\x00\x00\x00",
 		.assoc	= "\xf3\x94\x87\x78\x35\x82\x81\x7f"
 		.assoc	= "\xf3\x94\x87\x78\x35\x82\x81\x7f"
 			  "\x88\x94\x68\xb1\x78\x6b\x2b\xd6"
 			  "\x88\x94\x68\xb1\x78\x6b\x2b\xd6"
 			  "\x04\x1f\x4e\xed\x78\xd5\x33\x66"
 			  "\x04\x1f\x4e\xed\x78\xd5\x33\x66"
@@ -21911,10 +21981,10 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
 		.novrfy	= 1,
 		.novrfy	= 1,
 	}, {
 	}, {
 		.key	= "\x61\x0e\x8c\xae\xe3\x23\xb6\x38"
 		.key	= "\x61\x0e\x8c\xae\xe3\x23\xb6\x38"
-			  "\x76\x1c\xf6\x3a\x67\xa3\x9c\xd8"
-			  "\x05\xe0\xc9",
-		.klen	= 19,
-		.iv	= "\x0f\xed\x34\xea\x97\xd4\x3b\xdf",
+			  "\x76\x1c\xf6\x3a\x67\xa3\x9c\xd8",
+		.klen	= 16,
+		.iv	= "\x03\x05\xe0\xc9\x0f\xed\x34\xea"
+			  "\x97\xd4\x3b\xdf\x00\x00\x00\x00",
 		.assoc	= "\x49\x5c\x50\x1f\x1d\x94\xcc\x81"
 		.assoc	= "\x49\x5c\x50\x1f\x1d\x94\xcc\x81"
 			  "\xba\xb7\xb6\x03\xaf\xa5\xc1\xa1"
 			  "\xba\xb7\xb6\x03\xaf\xa5\xc1\xa1"
 			  "\xd8\x5c\x42\x68\xe0\x6c\xda\x89"
 			  "\xd8\x5c\x42\x68\xe0\x6c\xda\x89"
@@ -21935,10 +22005,10 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
 	}, {
 	}, {
 		.key	= "\x39\xbb\xa7\xbe\x59\x97\x9e\x73"
 		.key	= "\x39\xbb\xa7\xbe\x59\x97\x9e\x73"
 			  "\xa2\xbc\x6b\x98\xd7\x75\x7f\xe3"
 			  "\xa2\xbc\x6b\x98\xd7\x75\x7f\xe3"
-			  "\xa4\x48\x93\x39\x26\x71\x4a\xc6"
-			  "\xee\x49\x83",
-		.klen	= 27,
-		.iv	= "\xe9\xa9\xff\xe9\x57\xba\xfd\x9e",
+			  "\xa4\x48\x93\x39\x26\x71\x4a\xc6",
+		.klen	= 24,
+		.iv	= "\x03\xee\x49\x83\xe9\xa9\xff\xe9"
+			  "\x57\xba\xfd\x9e\x00\x00\x00\x00",
 		.assoc	= "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1"
 		.assoc	= "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1"
 			  "\x58\x7c\xf2\x5c\x6d\x39\x0a\x64"
 			  "\x58\x7c\xf2\x5c\x6d\x39\x0a\x64"
 			  "\xa4\xf0\x13\x05\xd1\x77\x99\x67"
 			  "\xa4\xf0\x13\x05\xd1\x77\x99\x67"
@@ -21949,114 +22019,1348 @@ static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
 		.result	= "\x00",
 		.result	= "\x00",
 		.rlen	= 0,
 		.rlen	= 0,
 	}, {
 	}, {
-		.key	= "\x58\x5d\xa0\x96\x65\x1a\x04\xd7"
-			  "\x96\xe5\xc5\x68\xaa\x95\x35\xe0"
-			  "\x29\xa0\xba\x9e\x48\x78\xd1\xba"
-			  "\xee\x49\x83",
+		.key	= "\x58\x5d\xa0\x96\x65\x1a\x04\xd7"
+			  "\x96\xe5\xc5\x68\xaa\x95\x35\xe0"
+			  "\x29\xa0\xba\x9e\x48\x78\xd1\xba",
+		.klen	= 24,
+		.iv	= "\x03\xee\x49\x83\xe9\xa9\xff\xe9"
+			  "\x57\xba\xfd\x9e\x00\x00\x00\x00",
+		.assoc	= "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1"
+			  "\x58\x7c\xf2\x5c\x6d\x39\x0a\x64"
+			  "\xa4\xf0\x13\x05\xd1\x77\x99\x67"
+			  "\x11\xc4\xc6\xdb\x00\x56\x36\x61",
+		.alen	= 32,
+		.input	= "\xfb\xe5\x5d\x34\xbe\xe5\xe8\xe7"
+			  "\x5a\xef\x2f\xbf\x1f\x7f\xd4\xb2"
+			  "\x66\xca\x61\x1e\x96\x7a\x61\xb3"
+			  "\x1c\x16\x45\x52\xba\x04\x9c\x9f"
+			  "\xb1\xd2\x40\xbc\x52\x7c\x6f\xb1",
+		.ilen	= 40,
+		.result	= "\x85\x34\x66\x42\xc8\x92\x0f\x36"
+			  "\x58\xe0\x6b\x91\x3c\x98\x5c\xbb"
+			  "\x0a\x85\xcc\x02\xad\x7a\x96\xe9"
+			  "\x65\x43\xa4\xc3\x0f\xdc\x55\x81",
+		.rlen	= 32,
+	}, {
+		.key	= "\x58\x5d\xa0\x96\x65\x1a\x04\xd7"
+			  "\x96\xe5\xc5\x68\xaa\x95\x35\xe0"
+			  "\x29\xa0\xba\x9e\x48\x78\xd1\xba",
+		.klen	= 24,
+		.iv	= "\x03\xd1\xfc\x57\x9c\xfe\xb8\x9c"
+			  "\xad\x71\xaa\x1f\x00\x00\x00\x00",
+		.assoc	= "\x86\x67\xa5\xa9\x14\x5f\x0d\xc6"
+			  "\xff\x14\xc7\x44\xbf\x6c\x3a\xc3"
+			  "\xff\xb6\x81\xbd\xe2\xd5\x06\xc7"
+			  "\x3c\xa1\x52\x13\x03\x8a\x23\x3a",
+		.alen	= 32,
+		.input	= "\x3f\x66\xb0\x9d\xe5\x4b\x38\x00"
+			  "\xc6\x0e\x6e\xe5\xd6\x98\xa6\x37"
+			  "\x8c\x26\x33\xc6\xb2\xa2\x17\xfa"
+			  "\x64\x19\xc0\x30\xd7\xfc\x14\x6b"
+			  "\xe3\x33\xc2\x04\xb0\x37\xbe\x3f"
+			  "\xa9\xb4\x2d\x68\x03\xa3\x44\xef",
+		.ilen	= 48,
+		.result	= "\x02\x87\x4d\x28\x80\x6e\xb2\xed"
+			  "\x99\x2a\xa8\xca\x04\x25\x45\x90"
+			  "\x1d\xdd\x5a\xd9\xe4\xdb\x9c\x9c"
+			  "\x49\xe9\x01\xfe\xa7\x80\x6d\x6b",
+		.rlen	= 32,
+		.novrfy	= 1,
+	}, {
+		.key	= "\xa4\x4b\x54\x29\x0a\xb8\x6d\x01"
+			  "\x5b\x80\x2a\xcf\x25\xc4\xb7\x5c"
+			  "\x20\x2c\xad\x30\xc2\x2b\x41\xfb"
+			  "\x0e\x85\xbc\x33\xad\x0f\x2b\xff",
+		.klen	= 32,
+		.iv	= "\x03\xee\x49\x83\xe9\xa9\xff\xe9"
+			  "\x57\xba\xfd\x9e\x00\x00\x00\x00",
+		.alen	= 0,
+		.input	= "\x1f\xb8\x8f\xa3\xdd\x54\x00\xf2",
+		.ilen	= 8,
+		.result	= "\x00",
+		.rlen	= 0,
+	}, {
+		.key	= "\x39\xbb\xa7\xbe\x59\x97\x9e\x73"
+			  "\xa2\xbc\x6b\x98\xd7\x75\x7f\xe3"
+			  "\xa4\x48\x93\x39\x26\x71\x4a\xc6"
+			  "\xae\x8f\x11\x4c\xc2\x9c\x4a\xbb",
+		.klen	= 32,
+		.iv	= "\x03\x85\x34\x66\x42\xc8\x92\x0f"
+			  "\x36\x58\xe0\x6b\x00\x00\x00\x00",
+		.alen	= 0,
+		.input	= "\x48\x01\x5e\x02\x24\x04\x66\x47"
+			  "\xa1\xea\x6f\xaf\xe8\xfc\xfb\xdd"
+			  "\xa5\xa9\x87\x8d\x84\xee\x2e\x77"
+			  "\xbb\x86\xb9\xf5\x5c\x6c\xff\xf6"
+			  "\x72\xc3\x8e\xf7\x70\xb1\xb2\x07"
+			  "\xbc\xa8\xa3\xbd\x83\x7c\x1d\x2a",
+		.ilen	= 48,
+		.result	= "\xdc\x56\xf2\x71\xb0\xb1\xa0\x6c"
+			  "\xf0\x97\x3a\xfb\x6d\xe7\x32\x99"
+			  "\x3e\xaf\x70\x5e\xb2\x4d\xea\x39"
+			  "\x89\xd4\x75\x7a\x63\xb1\xda\x93",
+		.rlen	= 32,
+		.novrfy	= 1,
+	}, {
+		.key	= "\x58\x5d\xa0\x96\x65\x1a\x04\xd7"
+			  "\x96\xe5\xc5\x68\xaa\x95\x35\xe0"
+			  "\x29\xa0\xba\x9e\x48\x78\xd1\xba"
+			  "\x0d\x1a\x53\x3b\xb5\xe3\xf8\x8b",
+		.klen	= 32,
+		.iv	= "\x03\xcf\x76\x3f\xd9\x95\x75\x8f"
+			  "\x44\x89\x40\x7b\x00\x00\x00\x00",
+		.assoc	= "\x8f\x86\x6c\x4d\x1d\xc5\x39\x88"
+			  "\xc8\xf3\x5c\x52\x10\x63\x6f\x2b"
+			  "\x8a\x2a\xc5\x6f\x30\x23\x58\x7b"
+			  "\xfb\x36\x03\x11\xb4\xd9\xf2\xfe",
+		.alen	= 32,
+		.input	= "\x48\x58\xd6\xf3\xad\x63\x58\xbf"
+			  "\xae\xc7\x5e\xae\x83\x8f\x7b\xe4"
+			  "\x78\x5c\x4c\x67\x71\x89\x94\xbf"
+			  "\x47\xf1\x63\x7e\x1c\x59\xbd\xc5"
+			  "\x7f\x44\x0a\x0c\x01\x18\x07\x92"
+			  "\xe1\xd3\x51\xce\x32\x6d\x0c\x5b",
+		.ilen	= 48,
+		.result	= "\xc2\x54\xc8\xde\x78\x87\x77\x40"
+			  "\x49\x71\xe4\xb7\xe7\xcb\x76\x61"
+			  "\x0a\x41\xb9\xe9\xc0\x76\x54\xab"
+			  "\x04\x49\x3b\x19\x93\x57\x25\x5d",
+		.rlen	= 32,
+	},
+};
+
+/*
+ * rfc4309 refers to section 8 of rfc3610 for test vectors, but they all
+ * use a 13-byte nonce, we only support an 11-byte nonce.  Worse,
+ * they use AD lengths which are not valid ESP header lengths.
+ *
+ * These vectors are copied/generated from the ones for rfc4106 with
+ * the key truncated by one byte..
+ */
+static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
+	{ /* Generated using Crypto++ */
+		.key	= zeroed_string,
+		.klen	= 19,
+		.iv	= zeroed_string,
+		.input	= zeroed_string,
+		.ilen	= 16,
+		.assoc	= zeroed_string,
+		.alen	= 16,
+		.result	= "\x2E\x9A\xCA\x6B\xDA\x54\xFC\x6F"
+			  "\x12\x50\xE8\xDE\x81\x3C\x63\x08"
+			  "\x1A\x22\xBA\x75\xEE\xD4\xD5\xB5"
+			  "\x27\x50\x01\xAC\x03\x33\x39\xFB",
+		.rlen	= 32,
+	},{
+		.key	= "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input	= zeroed_string,
+		.ilen	= 16,
+		.assoc	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.alen	= 16,
+		.result	= "\xCF\xB9\x99\x17\xC8\x86\x0E\x7F"
+			  "\x7E\x76\xF8\xE6\xF8\xCC\x1F\x17"
+			  "\x6A\xE0\x53\x9F\x4B\x73\x7E\xDA"
+			  "\x08\x09\x4E\xC4\x1E\xAD\xC6\xB0",
+		.rlen	= 32,
+
+	}, {
+		.key	= "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= zeroed_string,
+		.input	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.ilen	= 16,
+		.assoc	= zeroed_string,
+		.alen	= 16,
+		.result	= "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6"
+			  "\x61\xF4\xF5\x41\x03\x4A\xE3\x86"
+			  "\xA1\xE2\xC2\x42\x2B\x81\x70\x40"
+			  "\xFD\x7F\x76\xD1\x03\x07\xBB\x0C",
+		.rlen	= 32,
+	}, {
+		.key	= "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= zeroed_string,
+		.input	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.ilen	= 16,
+		.assoc	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.alen	= 16,
+		.result	= "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6"
+			  "\x61\xF4\xF5\x41\x03\x4A\xE3\x86"
+			  "\x5B\xC0\x73\xE0\x2B\x73\x68\xC9"
+			  "\x2D\x8C\x58\xC2\x90\x3D\xB0\x3E",
+		.rlen	= 32,
+	}, {
+		.key	= "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.ilen	= 16,
+		.assoc	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.alen	= 16,
+		.result	= "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E"
+			  "\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16"
+			  "\x43\x8E\x76\x57\x3B\xB4\x05\xE8"
+			  "\xA9\x9B\xBF\x25\xE0\x4F\xC0\xED",
+		.rlen	= 32,
+	}, {
+		.key	= "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.ilen	= 64,
+		.assoc	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.alen	= 16,
+		.result	= "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E"
+			  "\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16"
+			  "\x9C\xA4\x97\x83\x3F\x01\xA5\xF4"
+			  "\x43\x09\xE7\xB8\xE9\xD1\xD7\x02"
+			  "\x9B\xAB\x39\x18\xEB\x94\x34\x36"
+			  "\xE6\xC5\xC8\x9B\x00\x81\x9E\x49"
+			  "\x1D\x78\xE1\x48\xE3\xE9\xEA\x8E"
+			  "\x3A\x2B\x67\x5D\x35\x6A\x0F\xDB"
+			  "\x02\x73\xDD\xE7\x30\x4A\x30\x54"
+			  "\x1A\x9D\x09\xCA\xC8\x1C\x32\x5F",
+		.rlen	= 80,
+	}, {
+		.key	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= "\x00\x00\x45\x67\x89\xab\xcd\xef",
+		.input	= "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff",
+		.ilen	= 192,
+		.assoc	= "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+			  "\xaa\xaa\xaa\xaa\x00\x00\x45\x67"
+			  "\x89\xab\xcd\xef",
+		.alen	= 20,
+		.result	= "\x64\x17\xDC\x24\x9D\x92\xBA\x5E"
+			  "\x7C\x64\x6D\x33\x46\x77\xAC\xB1"
+			  "\x5C\x9E\xE2\xC7\x27\x11\x3E\x95"
+			  "\x7D\xBE\x28\xC8\xC1\xCA\x5E\x8C"
+			  "\xB4\xE2\xDE\x9F\x53\x59\x26\xDB"
+			  "\x0C\xD4\xE4\x07\x9A\xE6\x3E\x01"
+			  "\x58\x0D\x3E\x3D\xD5\x21\xEB\x04"
+			  "\x06\x9D\x5F\xB9\x02\x49\x1A\x2B"
+			  "\xBA\xF0\x4E\x3B\x85\x50\x5B\x09"
+			  "\xFE\xEC\xFC\x54\xEC\x0C\xE2\x79"
+			  "\x8A\x2F\x5F\xD7\x05\x5D\xF1\x6D"
+			  "\x22\xEB\xD1\x09\x80\x3F\x5A\x70"
+			  "\xB2\xB9\xD3\x63\x99\xC2\x4D\x1B"
+			  "\x36\x12\x00\x89\xAA\x5D\x55\xDA"
+			  "\x1D\x5B\xD8\x3C\x5F\x09\xD2\xE6"
+			  "\x39\x41\x5C\xF0\xBE\x26\x4E\x5F"
+			  "\x2B\x50\x44\x52\xC2\x10\x7D\x38"
+			  "\x82\x64\x83\x0C\xAE\x49\xD0\xE5"
+			  "\x4F\xE5\x66\x4C\x58\x7A\xEE\x43"
+			  "\x3B\x51\xFE\xBA\x24\x8A\xFE\xDC"
+			  "\x19\x6D\x60\x66\x61\xF9\x9A\x3F"
+			  "\x75\xFC\x38\x53\x5B\xB5\xCD\x52"
+			  "\x4F\xE5\xE4\xC9\xFE\x10\xCB\x98"
+			  "\xF0\x06\x5B\x07\xAB\xBB\xF4\x0E"
+			  "\x2D\xC2\xDD\x5D\xDD\x22\x9A\xCC"
+			  "\x39\xAB\x63\xA5\x3D\x9C\x51\x8A",
+		.rlen	= 208,
+	}, { /* From draft-mcgrew-gcm-test-01 */
+		.key	= "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA"
+			  "\x90\x6A\xC7\x3C\x36\x13\xA6\x34"
+			  "\x2E\x44\x3B",
+		.klen	= 19,
+		.iv	= "\x49\x56\xED\x7E\x3B\x24\x4C\xFE",
+		.input	= "\x45\x00\x00\x48\x69\x9A\x00\x00"
+			  "\x80\x11\x4D\xB7\xC0\xA8\x01\x02"
+			  "\xC0\xA8\x01\x01\x0A\x9B\xF1\x56"
+			  "\x38\xD3\x01\x00\x00\x01\x00\x00"
+			  "\x00\x00\x00\x00\x04\x5F\x73\x69"
+			  "\x70\x04\x5F\x75\x64\x70\x03\x73"
+			  "\x69\x70\x09\x63\x79\x62\x65\x72"
+			  "\x63\x69\x74\x79\x02\x64\x6B\x00"
+			  "\x00\x21\x00\x01\x01\x02\x02\x01",
+		.ilen	= 72,
+		.assoc	= "\x00\x00\x43\x21\x87\x65\x43\x21"
+			  "\x00\x00\x00\x00\x49\x56\xED\x7E"
+			  "\x3B\x24\x4C\xFE",
+		.alen	= 20,
+		.result	= "\x89\xBA\x3E\xEF\xE6\xD6\xCF\xDB"
+			  "\x83\x60\xF5\xBA\x3A\x56\x79\xE6"
+			  "\x7E\x0C\x53\xCF\x9E\x87\xE0\x4E"
+			  "\x1A\x26\x01\x24\xC7\x2E\x3D\xBF"
+			  "\x29\x2C\x91\xC1\xB8\xA8\xCF\xE0"
+			  "\x39\xF8\x53\x6D\x31\x22\x2B\xBF"
+			  "\x98\x81\xFC\x34\xEE\x85\x36\xCD"
+			  "\x26\xDB\x6C\x7A\x0C\x77\x8A\x35"
+			  "\x18\x85\x54\xB2\xBC\xDD\x3F\x43"
+			  "\x61\x06\x8A\xDF\x86\x3F\xB4\xAC"
+			  "\x97\xDC\xBD\xFD\x92\x10\xC5\xFF",
+		.rlen	= 88,
+	}, {
+		.key	= "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
+			  "\x6D\x6A\x8F\x94\x67\x30\x83\x08"
+			  "\xCA\xFE\xBA",
+		.klen	= 19,
+		.iv	= "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+		.input	= "\x45\x00\x00\x3E\x69\x8F\x00\x00"
+			  "\x80\x11\x4D\xCC\xC0\xA8\x01\x02"
+			  "\xC0\xA8\x01\x01\x0A\x98\x00\x35"
+			  "\x00\x2A\x23\x43\xB2\xD0\x01\x00"
+			  "\x00\x01\x00\x00\x00\x00\x00\x00"
+			  "\x03\x73\x69\x70\x09\x63\x79\x62"
+			  "\x65\x72\x63\x69\x74\x79\x02\x64"
+			  "\x6B\x00\x00\x01\x00\x01\x00\x01",
+		.ilen	= 64,
+		.assoc	= "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
+			  "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+		.alen	= 16,
+		.result	= "\x4B\xC2\x70\x60\x64\xD2\xF3\xC8"
+			  "\xE5\x26\x8A\xDE\xB8\x7E\x7D\x16"
+			  "\x56\xC7\xD2\x88\xBA\x8D\x58\xAF"
+			  "\xF5\x71\xB6\x37\x84\xA7\xB1\x99"
+			  "\x51\x5C\x0D\xA0\x27\xDE\xE7\x2D"
+			  "\xEF\x25\x88\x1F\x1D\x77\x11\xFF"
+			  "\xDB\xED\xEE\x56\x16\xC5\x5C\x9B"
+			  "\x00\x62\x1F\x68\x4E\x7C\xA0\x97"
+			  "\x10\x72\x7E\x53\x13\x3B\x68\xE4"
+			  "\x30\x99\x91\x79\x09\xEA\xFF\x6A",
+		.rlen	= 80,
+	}, {
+		.key	= "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\x11\x22\x33",
+		.klen	= 35,
+		.iv	= "\x01\x02\x03\x04\x05\x06\x07\x08",
+		.input	= "\x45\x00\x00\x30\x69\xA6\x40\x00"
+			  "\x80\x06\x26\x90\xC0\xA8\x01\x02"
+			  "\x93\x89\x15\x5E\x0A\x9E\x00\x8B"
+			  "\x2D\xC5\x7E\xE0\x00\x00\x00\x00"
+			  "\x70\x02\x40\x00\x20\xBF\x00\x00"
+			  "\x02\x04\x05\xB4\x01\x01\x04\x02"
+			  "\x01\x02\x02\x01",
+		.ilen	= 52,
+		.assoc	= "\x4A\x2C\xBF\xE3\x00\x00\x00\x02"
+			  "\x01\x02\x03\x04\x05\x06\x07\x08",
+		.alen	= 16,
+		.result	= "\xD6\x31\x0D\x2B\x3D\x6F\xBD\x2F"
+			  "\x58\x41\x7E\xFF\x9A\x9E\x09\xB4"
+			  "\x1A\xF7\xF6\x42\x31\xCD\xBF\xAD"
+			  "\x27\x0E\x2C\xF2\xDB\x10\xDF\x55"
+			  "\x8F\x0D\xD7\xAC\x23\xBD\x42\x10"
+			  "\xD0\xB2\xAF\xD8\x37\xAC\x6B\x0B"
+			  "\x11\xD4\x0B\x12\xEC\xB4\xB1\x92"
+			  "\x23\xA6\x10\xB0\x26\xD6\xD9\x26"
+			  "\x5A\x48\x6A\x3E",
+		.rlen	= 68,
+	}, {
+		.key	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.input	= "\x45\x00\x00\x3C\x99\xC5\x00\x00"
+			  "\x80\x01\xCB\x7A\x40\x67\x93\x18"
+			  "\x01\x01\x01\x01\x08\x00\x07\x5C"
+			  "\x02\x00\x44\x00\x61\x62\x63\x64"
+			  "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
+			  "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
+			  "\x75\x76\x77\x61\x62\x63\x64\x65"
+			  "\x66\x67\x68\x69\x01\x02\x02\x01",
+		.ilen	= 64,
+		.assoc	= "\x00\x00\x00\x00\x00\x00\x00\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.alen	= 16,
+		.result	= "\x6B\x9A\xCA\x57\x43\x91\xFC\x6F"
+			  "\x92\x51\x23\xA4\xC1\x5B\xF0\x10"
+			  "\xF3\x13\xF4\xF8\xA1\x9A\xB4\xDC"
+			  "\x89\xC8\xF8\x42\x62\x95\xB7\xCB"
+			  "\xB8\xF5\x0F\x1B\x2E\x94\xA2\xA7"
+			  "\xBF\xFB\x8A\x92\x13\x63\xD1\x3C"
+			  "\x08\xF5\xE8\xA6\xAA\xF6\x34\xF9"
+			  "\x42\x05\xAF\xB3\xE7\x9A\xFC\xEE"
+			  "\x36\x25\xC1\x10\x12\x1C\xCA\x82"
+			  "\xEA\xE6\x63\x5A\x57\x28\xA9\x9A",
+		.rlen	= 80,
+	}, {
+		.key	= "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
+			  "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
+			  "\x57\x69\x0E",
+		.klen	= 19,
+		.iv	= "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
+		.input	= "\x45\x00\x00\x3C\x99\xC3\x00\x00"
+			  "\x80\x01\xCB\x7C\x40\x67\x93\x18"
+			  "\x01\x01\x01\x01\x08\x00\x08\x5C"
+			  "\x02\x00\x43\x00\x61\x62\x63\x64"
+			  "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
+			  "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
+			  "\x75\x76\x77\x61\x62\x63\x64\x65"
+			  "\x66\x67\x68\x69\x01\x02\x02\x01",
+		.ilen	= 64,
+		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
+		.result	= "\x6A\x6B\x45\x2B\x7C\x67\x52\xF6"
+			  "\x10\x60\x40\x62\x6B\x4F\x97\x8E"
+			  "\x0B\xB2\x22\x97\xCB\x21\xE0\x90"
+			  "\xA2\xE7\xD1\x41\x30\xE4\x4B\x1B"
+			  "\x79\x01\x58\x50\x01\x06\xE1\xE0"
+			  "\x2C\x83\x79\xD3\xDE\x46\x97\x1A"
+			  "\x30\xB8\xE5\xDF\xD7\x12\x56\x75"
+			  "\xD0\x95\xB7\xB8\x91\x42\xF7\xFD"
+			  "\x97\x57\xCA\xC1\x20\xD0\x86\xB9"
+			  "\x66\x9D\xB4\x2B\x96\x22\xAC\x67",
+		.rlen	= 80,
+	}, {
+		.key	= "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
+			  "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
+			  "\x57\x69\x0E",
+		.klen	= 19,
+		.iv	= "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
+		.input	= "\x45\x00\x00\x1C\x42\xA2\x00\x00"
+			  "\x80\x01\x44\x1F\x40\x67\x93\xB6"
+			  "\xE0\x00\x00\x02\x0A\x00\xF5\xFF"
+			  "\x01\x02\x02\x01",
+		.ilen	= 28,
+		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
+		.result	= "\x6A\x6B\x45\x0B\xA7\x06\x52\xF6"
+			  "\x10\x60\xCF\x01\x6B\x4F\x97\x20"
+			  "\xEA\xB3\x23\x94\xC9\x21\x1D\x33"
+			  "\xA1\xE5\x90\x40\x05\x37\x45\x70"
+			  "\xB5\xD6\x09\x0A\x23\x73\x33\xF9"
+			  "\x08\xB4\x22\xE4",
+		.rlen	= 44,
+	}, {
+		.key	= "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
+			  "\x6D\x6A\x8F\x94\x67\x30\x83\x08"
+			  "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
+			  "\xCA\xFE\xBA",
+		.klen	= 27,
+		.iv	= "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+		.input	= "\x45\x00\x00\x28\xA4\xAD\x40\x00"
+			  "\x40\x06\x78\x80\x0A\x01\x03\x8F"
+			  "\x0A\x01\x06\x12\x80\x23\x06\xB8"
+			  "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E"
+			  "\x50\x10\x16\xD0\x75\x68\x00\x01",
+		.ilen	= 40,
+		.assoc	= "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
+			  "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+		.alen	= 16,
+		.result	= "\x05\x22\x15\xD1\x52\x56\x85\x04"
+			  "\xA8\x5C\x5D\x6D\x7E\x6E\xF5\xFA"
+			  "\xEA\x16\x37\x50\xF3\xDF\x84\x3B"
+			  "\x2F\x32\x18\x57\x34\x2A\x8C\x23"
+			  "\x67\xDF\x6D\x35\x7B\x54\x0D\xFB"
+			  "\x34\xA5\x9F\x6C\x48\x30\x1E\x22"
+			  "\xFE\xB1\x22\x17\x17\x8A\xB9\x5B",
+		.rlen	= 56,
+	}, {
+		.key	= "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\xDE\xCA\xF8",
+		.klen	= 19,
+		.iv	= "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74",
+		.input	= "\x45\x00\x00\x49\x33\xBA\x00\x00"
+			  "\x7F\x11\x91\x06\xC3\xFB\x1D\x10"
+			  "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE"
+			  "\x00\x35\xDD\x7B\x80\x03\x02\xD5"
+			  "\x00\x00\x4E\x20\x00\x1E\x8C\x18"
+			  "\xD7\x5B\x81\xDC\x91\xBA\xA0\x47"
+			  "\x6B\x91\xB9\x24\xB2\x80\x38\x9D"
+			  "\x92\xC9\x63\xBA\xC0\x46\xEC\x95"
+			  "\x9B\x62\x66\xC0\x47\x22\xB1\x49"
+			  "\x23\x01\x01\x01",
+		.ilen	= 76,
+		.assoc	= "\x00\x00\x01\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
+			  "\xCE\xFA\xCE\x74",
+		.alen	= 20,
+		.result	= "\x92\xD0\x53\x79\x33\x38\xD5\xF3"
+			  "\x7D\xE4\x7A\x8E\x86\x03\xC9\x90"
+			  "\x96\x35\xAB\x9C\xFB\xE8\xA3\x76"
+			  "\xE9\xE9\xE2\xD1\x2E\x11\x0E\x00"
+			  "\xFA\xCE\xB5\x9E\x02\xA7\x7B\xEA"
+			  "\x71\x9A\x58\xFB\xA5\x8A\xE1\xB7"
+			  "\x9C\x39\x9D\xE3\xB5\x6E\x69\xE6"
+			  "\x63\xC9\xDB\x05\x69\x51\x12\xAD"
+			  "\x3E\x00\x32\x73\x86\xF2\xEE\xF5"
+			  "\x0F\xE8\x81\x7E\x84\xD3\xC0\x0D"
+			  "\x76\xD6\x55\xC6\xB4\xC2\x34\xC7"
+			  "\x12\x25\x0B\xF9",
+		.rlen	= 92,
+	}, {
+		.key	= "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\x73\x61\x6C",
+		.klen	= 35,
+		.iv	= "\x61\x6E\x64\x01\x69\x76\x65\x63",
+		.input	= "\x45\x08\x00\x28\x73\x2C\x00\x00"
+			  "\x40\x06\xE9\xF9\x0A\x01\x06\x12"
+			  "\x0A\x01\x03\x8F\x06\xB8\x80\x23"
+			  "\xDD\x6B\xAF\xBE\xCB\x71\x26\x02"
+			  "\x50\x10\x1F\x64\x6D\x54\x00\x01",
+		.ilen	= 40,
+		.assoc	= "\x17\x40\x5E\x67\x15\x6F\x31\x26"
+			  "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
+			  "\x69\x76\x65\x63",
+		.alen	= 20,
+		.result	= "\xCC\x74\xB7\xD3\xB0\x38\x50\x42"
+			  "\x2C\x64\x87\x46\x1E\x34\x10\x05"
+			  "\x29\x6B\xBB\x36\xE9\x69\xAD\x92"
+			  "\x82\xA1\x10\x6A\xEB\x0F\xDC\x7D"
+			  "\x08\xBA\xF3\x91\xCA\xAA\x61\xDA"
+			  "\x62\xF4\x14\x61\x5C\x9D\xB5\xA7"
+			  "\xEE\xD7\xB9\x7E\x87\x99\x9B\x7D",
+		.rlen	= 56,
+	}, {
+		.key	= "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
+			  "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
+			  "\x57\x69\x0E",
+		.klen	= 19,
+		.iv	= "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
+		.input	= "\x45\x00\x00\x49\x33\x3E\x00\x00"
+			  "\x7F\x11\x91\x82\xC3\xFB\x1D\x10"
+			  "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE"
+			  "\x00\x35\xCB\x45\x80\x03\x02\x5B"
+			  "\x00\x00\x01\xE0\x00\x1E\x8C\x18"
+			  "\xD6\x57\x59\xD5\x22\x84\xA0\x35"
+			  "\x2C\x71\x47\x5C\x88\x80\x39\x1C"
+			  "\x76\x4D\x6E\x5E\xE0\x49\x6B\x32"
+			  "\x5A\xE2\x70\xC0\x38\x99\x49\x39"
+			  "\x15\x01\x01\x01",
+		.ilen	= 76,
+		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
+		.result	= "\x6A\x6B\x45\x5E\xD6\x9A\x52\xF6"
+			  "\xEF\x70\x1A\x9C\xE8\xD3\x19\x86"
+			  "\xC8\x02\xF0\xB0\x03\x09\xD9\x02"
+			  "\xA0\xD2\x59\x04\xD1\x85\x2A\x24"
+			  "\x1C\x67\x3E\xD8\x68\x72\x06\x94"
+			  "\x97\xBA\x4F\x76\x8D\xB0\x44\x5B"
+			  "\x69\xBF\xD5\xE2\x3D\xF1\x0B\x0C"
+			  "\xC0\xBF\xB1\x8F\x70\x09\x9E\xCE"
+			  "\xA5\xF2\x55\x58\x84\xFA\xF9\xB5"
+			  "\x23\xF4\x84\x40\x74\x14\x8A\x6B"
+			  "\xDB\xD7\x67\xED\xA4\x93\xF3\x47"
+			  "\xCC\xF7\x46\x6F",
+		.rlen	= 92,
+	}, {
+		.key	= "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\x73\x61\x6C",
+		.klen	= 35,
+		.iv	= "\x61\x6E\x64\x01\x69\x76\x65\x63",
+		.input	= "\x63\x69\x73\x63\x6F\x01\x72\x75"
+			  "\x6C\x65\x73\x01\x74\x68\x65\x01"
+			  "\x6E\x65\x74\x77\x65\x01\x64\x65"
+			  "\x66\x69\x6E\x65\x01\x74\x68\x65"
+			  "\x74\x65\x63\x68\x6E\x6F\x6C\x6F"
+			  "\x67\x69\x65\x73\x01\x74\x68\x61"
+			  "\x74\x77\x69\x6C\x6C\x01\x64\x65"
+			  "\x66\x69\x6E\x65\x74\x6F\x6D\x6F"
+			  "\x72\x72\x6F\x77\x01\x02\x02\x01",
+		.ilen	= 72,
+		.assoc	= "\x17\x40\x5E\x67\x15\x6F\x31\x26"
+			  "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
+			  "\x69\x76\x65\x63",
+		.alen	= 20,
+		.result	= "\xEA\x15\xC4\x98\xAC\x15\x22\x37"
+			  "\x00\x07\x1D\xBE\x60\x5D\x73\x16"
+			  "\x4D\x0F\xCC\xCE\x8A\xD0\x49\xD4"
+			  "\x39\xA3\xD1\xB1\x21\x0A\x92\x1A"
+			  "\x2C\xCF\x8F\x9D\xC9\x91\x0D\xB4"
+			  "\x15\xFC\xBC\xA5\xC5\xBF\x54\xE5"
+			  "\x1C\xC7\x32\x41\x07\x7B\x2C\xB6"
+			  "\x5C\x23\x7C\x93\xEA\xEF\x23\x1C"
+			  "\x73\xF4\xE7\x12\x84\x4C\x37\x0A"
+			  "\x4A\x8F\x06\x37\x48\xF9\xF9\x05"
+			  "\x55\x13\x40\xC3\xD5\x55\x3A\x3D",
+		.rlen	= 88,
+	}, {
+		.key	= "\x7D\x77\x3D\x00\xC1\x44\xC5\x25"
+			  "\xAC\x61\x9D\x18\xC8\x4A\x3F\x47"
+			  "\xD9\x66\x42",
+		.klen	= 19,
+		.iv	= "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
+		.input	= "\x01\x02\x02\x01",
+		.ilen	= 4,
+		.assoc	= "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF"
+			  "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
+		.alen	= 16,
+		.result	= "\x4C\x72\x63\x30\x2F\xE6\x56\xDD"
+			  "\xD0\xD8\x60\x9D\x8B\xEF\x85\x90"
+			  "\xF7\x61\x24\x62",
+		.rlen	= 20,
+	}, {
+		.key	= "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\xDE\xCA\xF8",
+		.klen	= 19,
+		.iv	= "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74",
+		.input	= "\x74\x6F\x01\x62\x65\x01\x6F\x72"
+			  "\x01\x6E\x6F\x74\x01\x74\x6F\x01"
+			  "\x62\x65\x00\x01",
+		.ilen	= 20,
+		.assoc	= "\x00\x00\x01\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
+			  "\xCE\xFA\xCE\x74",
+		.alen	= 20,
+		.result	= "\xA3\xBF\x52\x52\x65\x83\xBA\x81"
+			  "\x03\x9B\x84\xFC\x44\x8C\xBB\x81"
+			  "\x36\xE1\x78\xBB\xA5\x49\x3A\xD0"
+			  "\xF0\x6B\x21\xAF\x98\xC0\x34\xDC"
+			  "\x17\x17\x65\xAD",
+		.rlen	= 36,
+	}, {
+		.key	= "\x6C\x65\x67\x61\x6C\x69\x7A\x65"
+			  "\x6D\x61\x72\x69\x6A\x75\x61\x6E"
+			  "\x61\x61\x6E\x64\x64\x6F\x69\x74"
+			  "\x62\x65\x66\x6F\x72\x65\x69\x61"
+			  "\x74\x75\x72",
+		.klen	= 35,
+		.iv	= "\x33\x30\x21\x69\x67\x65\x74\x6D",
+		.input	= "\x45\x00\x00\x30\xDA\x3A\x00\x00"
+			  "\x80\x01\xDF\x3B\xC0\xA8\x00\x05"
+			  "\xC0\xA8\x00\x01\x08\x00\xC6\xCD"
+			  "\x02\x00\x07\x00\x61\x62\x63\x64"
+			  "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
+			  "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
+			  "\x01\x02\x02\x01",
+		.ilen	= 52,
+		.assoc	= "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF"
+			  "\xFF\xFF\xFF\xFF\x33\x30\x21\x69"
+			  "\x67\x65\x74\x6D",
+		.alen	= 20,
+		.result	= "\x96\xFD\x86\xF8\xD1\x98\xFF\x10"
+			  "\xAB\x8C\xDA\x8A\x5A\x08\x38\x1A"
+			  "\x48\x59\x80\x18\x1A\x18\x1A\x04"
+			  "\xC9\x0D\xE3\xE7\x0E\xA4\x0B\x75"
+			  "\x92\x9C\x52\x5C\x0B\xFB\xF8\xAF"
+			  "\x16\xC3\x35\xA8\xE7\xCE\x84\x04"
+			  "\xEB\x40\x6B\x7A\x8E\x75\xBB\x42"
+			  "\xE0\x63\x4B\x21\x44\xA2\x2B\x2B"
+			  "\x39\xDB\xC8\xDC",
+		.rlen	= 68,
+	}, {
+		.key	= "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
+			  "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
+			  "\x57\x69\x0E",
+		.klen	= 19,
+		.iv	= "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
+		.input	= "\x45\x00\x00\x30\xDA\x3A\x00\x00"
+			  "\x80\x01\xDF\x3B\xC0\xA8\x00\x05"
+			  "\xC0\xA8\x00\x01\x08\x00\xC6\xCD"
+			  "\x02\x00\x07\x00\x61\x62\x63\x64"
+			  "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
+			  "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
+			  "\x01\x02\x02\x01",
+		.ilen	= 52,
+		.assoc	= "\x3F\x7E\xF6\x42\x10\x10\x10\x10"
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
+		.result	= "\x6A\x6B\x45\x27\x3F\x9E\x52\xF6"
+			  "\x10\x60\x54\x25\xEB\x80\x04\x93"
+			  "\xCA\x1B\x23\x97\xCB\x21\x2E\x01"
+			  "\xA2\xE7\x95\x41\x30\xE4\x4B\x1B"
+			  "\x79\x01\x58\x50\x01\x06\xE1\xE0"
+			  "\x2C\x83\x79\xD3\xDE\x46\x97\x1A"
+			  "\x44\xCC\x90\xBF\x00\x94\x94\x92"
+			  "\x20\x17\x0C\x1B\x55\xDE\x7E\x68"
+			  "\xF4\x95\x5D\x4F",
+		.rlen	= 68,
+	}, {
+		.key	= "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA"
+			  "\x90\x6A\xC7\x3C\x36\x13\xA6\x34"
+			  "\x22\x43\x3C",
+		.klen	= 19,
+		.iv	= "\x48\x55\xEC\x7D\x3A\x23\x4B\xFD",
+		.input	= "\x08\x00\xC6\xCD\x02\x00\x07\x00"
+			  "\x61\x62\x63\x64\x65\x66\x67\x68"
+			  "\x69\x6A\x6B\x6C\x6D\x6E\x6F\x70"
+			  "\x71\x72\x73\x74\x01\x02\x02\x01",
+		.ilen	= 32,
+		.assoc	= "\x00\x00\x43\x21\x87\x65\x43\x21"
+			  "\x00\x00\x00\x07\x48\x55\xEC\x7D"
+			  "\x3A\x23\x4B\xFD",
+		.alen	= 20,
+		.result	= "\x67\xE9\x28\xB3\x1C\xA4\x6D\x02"
+			  "\xF0\xB5\x37\xB6\x6B\x2F\xF5\x4F"
+			  "\xF8\xA3\x4C\x53\xB8\x12\x09\xBF"
+			  "\x58\x7D\xCF\x29\xA3\x41\x68\x6B"
+			  "\xCE\xE8\x79\x85\x3C\xB0\x3A\x8F"
+			  "\x16\xB0\xA1\x26\xC9\xBC\xBC\xA6",
+		.rlen	= 48,
+	}
+};
+
+static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[]	= {
+	{ /* Generated using Crypto++ */
+		.key	= zeroed_string,
+		.klen	= 19,
+		.iv	= zeroed_string,
+		.result	= zeroed_string,
+		.rlen	= 16,
+		.assoc	= zeroed_string,
+		.alen	= 16,
+		.input	= "\x2E\x9A\xCA\x6B\xDA\x54\xFC\x6F"
+			  "\x12\x50\xE8\xDE\x81\x3C\x63\x08"
+			  "\x1A\x22\xBA\x75\xEE\xD4\xD5\xB5"
+			  "\x27\x50\x01\xAC\x03\x33\x39\xFB",
+		.ilen	= 32,
+	},{
+		.key	= "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.result	= zeroed_string,
+		.rlen	= 16,
+		.assoc	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.alen	= 16,
+		.input	= "\xCF\xB9\x99\x17\xC8\x86\x0E\x7F"
+			  "\x7E\x76\xF8\xE6\xF8\xCC\x1F\x17"
+			  "\x6A\xE0\x53\x9F\x4B\x73\x7E\xDA"
+			  "\x08\x09\x4E\xC4\x1E\xAD\xC6\xB0",
+		.ilen	= 32,
+
+	}, {
+		.key	= "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= zeroed_string,
+		.result	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.rlen	= 16,
+		.assoc	= zeroed_string,
+		.alen	= 16,
+		.input	= "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6"
+			  "\x61\xF4\xF5\x41\x03\x4A\xE3\x86"
+			  "\xA1\xE2\xC2\x42\x2B\x81\x70\x40"
+			  "\xFD\x7F\x76\xD1\x03\x07\xBB\x0C",
+		.ilen	= 32,
+	}, {
+		.key	= "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= zeroed_string,
+		.result	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.rlen	= 16,
+		.assoc	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.alen	= 16,
+		.input	= "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6"
+			  "\x61\xF4\xF5\x41\x03\x4A\xE3\x86"
+			  "\x5B\xC0\x73\xE0\x2B\x73\x68\xC9"
+			  "\x2D\x8C\x58\xC2\x90\x3D\xB0\x3E",
+		.ilen	= 32,
+	}, {
+		.key	= "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.result	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.rlen	= 16,
+		.assoc	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.alen	= 16,
+		.input	= "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E"
+			  "\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16"
+			  "\x43\x8E\x76\x57\x3B\xB4\x05\xE8"
+			  "\xA9\x9B\xBF\x25\xE0\x4F\xC0\xED",
+		.ilen	= 32,
+	}, {
+		.key	= "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.result	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.rlen	= 64,
+		.assoc	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.alen	= 16,
+		.input	= "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E"
+			  "\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16"
+			  "\x9C\xA4\x97\x83\x3F\x01\xA5\xF4"
+			  "\x43\x09\xE7\xB8\xE9\xD1\xD7\x02"
+			  "\x9B\xAB\x39\x18\xEB\x94\x34\x36"
+			  "\xE6\xC5\xC8\x9B\x00\x81\x9E\x49"
+			  "\x1D\x78\xE1\x48\xE3\xE9\xEA\x8E"
+			  "\x3A\x2B\x67\x5D\x35\x6A\x0F\xDB"
+			  "\x02\x73\xDD\xE7\x30\x4A\x30\x54"
+			  "\x1A\x9D\x09\xCA\xC8\x1C\x32\x5F",
+		.ilen	= 80,
+	}, {
+		.key	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= "\x00\x00\x45\x67\x89\xab\xcd\xef",
+		.result	= "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff",
+		.rlen	= 192,
+		.assoc	= "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+			  "\xaa\xaa\xaa\xaa\x00\x00\x45\x67"
+			  "\x89\xab\xcd\xef",
+		.alen	= 20,
+		.input	= "\x64\x17\xDC\x24\x9D\x92\xBA\x5E"
+			  "\x7C\x64\x6D\x33\x46\x77\xAC\xB1"
+			  "\x5C\x9E\xE2\xC7\x27\x11\x3E\x95"
+			  "\x7D\xBE\x28\xC8\xC1\xCA\x5E\x8C"
+			  "\xB4\xE2\xDE\x9F\x53\x59\x26\xDB"
+			  "\x0C\xD4\xE4\x07\x9A\xE6\x3E\x01"
+			  "\x58\x0D\x3E\x3D\xD5\x21\xEB\x04"
+			  "\x06\x9D\x5F\xB9\x02\x49\x1A\x2B"
+			  "\xBA\xF0\x4E\x3B\x85\x50\x5B\x09"
+			  "\xFE\xEC\xFC\x54\xEC\x0C\xE2\x79"
+			  "\x8A\x2F\x5F\xD7\x05\x5D\xF1\x6D"
+			  "\x22\xEB\xD1\x09\x80\x3F\x5A\x70"
+			  "\xB2\xB9\xD3\x63\x99\xC2\x4D\x1B"
+			  "\x36\x12\x00\x89\xAA\x5D\x55\xDA"
+			  "\x1D\x5B\xD8\x3C\x5F\x09\xD2\xE6"
+			  "\x39\x41\x5C\xF0\xBE\x26\x4E\x5F"
+			  "\x2B\x50\x44\x52\xC2\x10\x7D\x38"
+			  "\x82\x64\x83\x0C\xAE\x49\xD0\xE5"
+			  "\x4F\xE5\x66\x4C\x58\x7A\xEE\x43"
+			  "\x3B\x51\xFE\xBA\x24\x8A\xFE\xDC"
+			  "\x19\x6D\x60\x66\x61\xF9\x9A\x3F"
+			  "\x75\xFC\x38\x53\x5B\xB5\xCD\x52"
+			  "\x4F\xE5\xE4\xC9\xFE\x10\xCB\x98"
+			  "\xF0\x06\x5B\x07\xAB\xBB\xF4\x0E"
+			  "\x2D\xC2\xDD\x5D\xDD\x22\x9A\xCC"
+			  "\x39\xAB\x63\xA5\x3D\x9C\x51\x8A",
+		.ilen	= 208,
+	}, { /* From draft-mcgrew-gcm-test-01 */
+		.key	= "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA"
+			  "\x90\x6A\xC7\x3C\x36\x13\xA6\x34"
+			  "\x2E\x44\x3B",
+		.klen	= 19,
+		.iv	= "\x49\x56\xED\x7E\x3B\x24\x4C\xFE",
+		.result	= "\x45\x00\x00\x48\x69\x9A\x00\x00"
+			  "\x80\x11\x4D\xB7\xC0\xA8\x01\x02"
+			  "\xC0\xA8\x01\x01\x0A\x9B\xF1\x56"
+			  "\x38\xD3\x01\x00\x00\x01\x00\x00"
+			  "\x00\x00\x00\x00\x04\x5F\x73\x69"
+			  "\x70\x04\x5F\x75\x64\x70\x03\x73"
+			  "\x69\x70\x09\x63\x79\x62\x65\x72"
+			  "\x63\x69\x74\x79\x02\x64\x6B\x00"
+			  "\x00\x21\x00\x01\x01\x02\x02\x01",
+		.rlen	= 72,
+		.assoc	= "\x00\x00\x43\x21\x87\x65\x43\x21"
+			  "\x00\x00\x00\x00\x49\x56\xED\x7E"
+			  "\x3B\x24\x4C\xFE",
+		.alen	= 20,
+		.input	= "\x89\xBA\x3E\xEF\xE6\xD6\xCF\xDB"
+			  "\x83\x60\xF5\xBA\x3A\x56\x79\xE6"
+			  "\x7E\x0C\x53\xCF\x9E\x87\xE0\x4E"
+			  "\x1A\x26\x01\x24\xC7\x2E\x3D\xBF"
+			  "\x29\x2C\x91\xC1\xB8\xA8\xCF\xE0"
+			  "\x39\xF8\x53\x6D\x31\x22\x2B\xBF"
+			  "\x98\x81\xFC\x34\xEE\x85\x36\xCD"
+			  "\x26\xDB\x6C\x7A\x0C\x77\x8A\x35"
+			  "\x18\x85\x54\xB2\xBC\xDD\x3F\x43"
+			  "\x61\x06\x8A\xDF\x86\x3F\xB4\xAC"
+			  "\x97\xDC\xBD\xFD\x92\x10\xC5\xFF",
+		.ilen	= 88,
+	}, {
+		.key	= "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
+			  "\x6D\x6A\x8F\x94\x67\x30\x83\x08"
+			  "\xCA\xFE\xBA",
+		.klen	= 19,
+		.iv	= "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+		.result	= "\x45\x00\x00\x3E\x69\x8F\x00\x00"
+			  "\x80\x11\x4D\xCC\xC0\xA8\x01\x02"
+			  "\xC0\xA8\x01\x01\x0A\x98\x00\x35"
+			  "\x00\x2A\x23\x43\xB2\xD0\x01\x00"
+			  "\x00\x01\x00\x00\x00\x00\x00\x00"
+			  "\x03\x73\x69\x70\x09\x63\x79\x62"
+			  "\x65\x72\x63\x69\x74\x79\x02\x64"
+			  "\x6B\x00\x00\x01\x00\x01\x00\x01",
+		.rlen	= 64,
+		.assoc	= "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
+			  "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+		.alen	= 16,
+		.input	= "\x4B\xC2\x70\x60\x64\xD2\xF3\xC8"
+			  "\xE5\x26\x8A\xDE\xB8\x7E\x7D\x16"
+			  "\x56\xC7\xD2\x88\xBA\x8D\x58\xAF"
+			  "\xF5\x71\xB6\x37\x84\xA7\xB1\x99"
+			  "\x51\x5C\x0D\xA0\x27\xDE\xE7\x2D"
+			  "\xEF\x25\x88\x1F\x1D\x77\x11\xFF"
+			  "\xDB\xED\xEE\x56\x16\xC5\x5C\x9B"
+			  "\x00\x62\x1F\x68\x4E\x7C\xA0\x97"
+			  "\x10\x72\x7E\x53\x13\x3B\x68\xE4"
+			  "\x30\x99\x91\x79\x09\xEA\xFF\x6A",
+		.ilen	= 80,
+	}, {
+		.key	= "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\x11\x22\x33",
+		.klen	= 35,
+		.iv	= "\x01\x02\x03\x04\x05\x06\x07\x08",
+		.result	= "\x45\x00\x00\x30\x69\xA6\x40\x00"
+			  "\x80\x06\x26\x90\xC0\xA8\x01\x02"
+			  "\x93\x89\x15\x5E\x0A\x9E\x00\x8B"
+			  "\x2D\xC5\x7E\xE0\x00\x00\x00\x00"
+			  "\x70\x02\x40\x00\x20\xBF\x00\x00"
+			  "\x02\x04\x05\xB4\x01\x01\x04\x02"
+			  "\x01\x02\x02\x01",
+		.rlen	= 52,
+		.assoc	= "\x4A\x2C\xBF\xE3\x00\x00\x00\x02"
+			  "\x01\x02\x03\x04\x05\x06\x07\x08",
+		.alen	= 16,
+		.input	= "\xD6\x31\x0D\x2B\x3D\x6F\xBD\x2F"
+			  "\x58\x41\x7E\xFF\x9A\x9E\x09\xB4"
+			  "\x1A\xF7\xF6\x42\x31\xCD\xBF\xAD"
+			  "\x27\x0E\x2C\xF2\xDB\x10\xDF\x55"
+			  "\x8F\x0D\xD7\xAC\x23\xBD\x42\x10"
+			  "\xD0\xB2\xAF\xD8\x37\xAC\x6B\x0B"
+			  "\x11\xD4\x0B\x12\xEC\xB4\xB1\x92"
+			  "\x23\xA6\x10\xB0\x26\xD6\xD9\x26"
+			  "\x5A\x48\x6A\x3E",
+		.ilen	= 68,
+	}, {
+		.key	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.result	= "\x45\x00\x00\x3C\x99\xC5\x00\x00"
+			  "\x80\x01\xCB\x7A\x40\x67\x93\x18"
+			  "\x01\x01\x01\x01\x08\x00\x07\x5C"
+			  "\x02\x00\x44\x00\x61\x62\x63\x64"
+			  "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
+			  "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
+			  "\x75\x76\x77\x61\x62\x63\x64\x65"
+			  "\x66\x67\x68\x69\x01\x02\x02\x01",
+		.rlen	= 64,
+		.assoc	= "\x00\x00\x00\x00\x00\x00\x00\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.alen	= 16,
+		.input	= "\x6B\x9A\xCA\x57\x43\x91\xFC\x6F"
+			  "\x92\x51\x23\xA4\xC1\x5B\xF0\x10"
+			  "\xF3\x13\xF4\xF8\xA1\x9A\xB4\xDC"
+			  "\x89\xC8\xF8\x42\x62\x95\xB7\xCB"
+			  "\xB8\xF5\x0F\x1B\x2E\x94\xA2\xA7"
+			  "\xBF\xFB\x8A\x92\x13\x63\xD1\x3C"
+			  "\x08\xF5\xE8\xA6\xAA\xF6\x34\xF9"
+			  "\x42\x05\xAF\xB3\xE7\x9A\xFC\xEE"
+			  "\x36\x25\xC1\x10\x12\x1C\xCA\x82"
+			  "\xEA\xE6\x63\x5A\x57\x28\xA9\x9A",
+		.ilen	= 80,
+	}, {
+		.key	= "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
+			  "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
+			  "\x57\x69\x0E",
+		.klen	= 19,
+		.iv	= "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
+		.result	= "\x45\x00\x00\x3C\x99\xC3\x00\x00"
+			  "\x80\x01\xCB\x7C\x40\x67\x93\x18"
+			  "\x01\x01\x01\x01\x08\x00\x08\x5C"
+			  "\x02\x00\x43\x00\x61\x62\x63\x64"
+			  "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
+			  "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
+			  "\x75\x76\x77\x61\x62\x63\x64\x65"
+			  "\x66\x67\x68\x69\x01\x02\x02\x01",
+		.rlen	= 64,
+		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
+		.input	= "\x6A\x6B\x45\x2B\x7C\x67\x52\xF6"
+			  "\x10\x60\x40\x62\x6B\x4F\x97\x8E"
+			  "\x0B\xB2\x22\x97\xCB\x21\xE0\x90"
+			  "\xA2\xE7\xD1\x41\x30\xE4\x4B\x1B"
+			  "\x79\x01\x58\x50\x01\x06\xE1\xE0"
+			  "\x2C\x83\x79\xD3\xDE\x46\x97\x1A"
+			  "\x30\xB8\xE5\xDF\xD7\x12\x56\x75"
+			  "\xD0\x95\xB7\xB8\x91\x42\xF7\xFD"
+			  "\x97\x57\xCA\xC1\x20\xD0\x86\xB9"
+			  "\x66\x9D\xB4\x2B\x96\x22\xAC\x67",
+		.ilen	= 80,
+	}, {
+		.key	= "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
+			  "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
+			  "\x57\x69\x0E",
+		.klen	= 19,
+		.iv	= "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
+		.result	= "\x45\x00\x00\x1C\x42\xA2\x00\x00"
+			  "\x80\x01\x44\x1F\x40\x67\x93\xB6"
+			  "\xE0\x00\x00\x02\x0A\x00\xF5\xFF"
+			  "\x01\x02\x02\x01",
+		.rlen	= 28,
+		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
+		.input	= "\x6A\x6B\x45\x0B\xA7\x06\x52\xF6"
+			  "\x10\x60\xCF\x01\x6B\x4F\x97\x20"
+			  "\xEA\xB3\x23\x94\xC9\x21\x1D\x33"
+			  "\xA1\xE5\x90\x40\x05\x37\x45\x70"
+			  "\xB5\xD6\x09\x0A\x23\x73\x33\xF9"
+			  "\x08\xB4\x22\xE4",
+		.ilen	= 44,
+	}, {
+		.key	= "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
+			  "\x6D\x6A\x8F\x94\x67\x30\x83\x08"
+			  "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
+			  "\xCA\xFE\xBA",
 		.klen	= 27,
 		.klen	= 27,
-		.iv	= "\xe9\xa9\xff\xe9\x57\xba\xfd\x9e",
-		.assoc	= "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1"
-			  "\x58\x7c\xf2\x5c\x6d\x39\x0a\x64"
-			  "\xa4\xf0\x13\x05\xd1\x77\x99\x67"
-			  "\x11\xc4\xc6\xdb\x00\x56\x36\x61",
-		.alen	= 32,
-		.input	= "\xfb\xe5\x5d\x34\xbe\xe5\xe8\xe7"
-			  "\x5a\xef\x2f\xbf\x1f\x7f\xd4\xb2"
-			  "\x66\xca\x61\x1e\x96\x7a\x61\xb3"
-			  "\x1c\x16\x45\x52\xba\x04\x9c\x9f"
-			  "\xb1\xd2\x40\xbc\x52\x7c\x6f\xb1",
-		.ilen	= 40,
-		.result	= "\x85\x34\x66\x42\xc8\x92\x0f\x36"
-			  "\x58\xe0\x6b\x91\x3c\x98\x5c\xbb"
-			  "\x0a\x85\xcc\x02\xad\x7a\x96\xe9"
-			  "\x65\x43\xa4\xc3\x0f\xdc\x55\x81",
-		.rlen	= 32,
+		.iv	= "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+		.result	= "\x45\x00\x00\x28\xA4\xAD\x40\x00"
+			  "\x40\x06\x78\x80\x0A\x01\x03\x8F"
+			  "\x0A\x01\x06\x12\x80\x23\x06\xB8"
+			  "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E"
+			  "\x50\x10\x16\xD0\x75\x68\x00\x01",
+		.rlen	= 40,
+		.assoc	= "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
+			  "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+		.alen	= 16,
+		.input	= "\x05\x22\x15\xD1\x52\x56\x85\x04"
+			  "\xA8\x5C\x5D\x6D\x7E\x6E\xF5\xFA"
+			  "\xEA\x16\x37\x50\xF3\xDF\x84\x3B"
+			  "\x2F\x32\x18\x57\x34\x2A\x8C\x23"
+			  "\x67\xDF\x6D\x35\x7B\x54\x0D\xFB"
+			  "\x34\xA5\x9F\x6C\x48\x30\x1E\x22"
+			  "\xFE\xB1\x22\x17\x17\x8A\xB9\x5B",
+		.ilen	= 56,
 	}, {
 	}, {
-		.key	= "\x58\x5d\xa0\x96\x65\x1a\x04\xd7"
-			  "\x96\xe5\xc5\x68\xaa\x95\x35\xe0"
-			  "\x29\xa0\xba\x9e\x48\x78\xd1\xba"
-			  "\xd1\xfc\x57",
-		.klen	= 27,
-		.iv	= "\x9c\xfe\xb8\x9c\xad\x71\xaa\x1f",
-		.assoc	= "\x86\x67\xa5\xa9\x14\x5f\x0d\xc6"
-			  "\xff\x14\xc7\x44\xbf\x6c\x3a\xc3"
-			  "\xff\xb6\x81\xbd\xe2\xd5\x06\xc7"
-			  "\x3c\xa1\x52\x13\x03\x8a\x23\x3a",
-		.alen	= 32,
-		.input	= "\x3f\x66\xb0\x9d\xe5\x4b\x38\x00"
-			  "\xc6\x0e\x6e\xe5\xd6\x98\xa6\x37"
-			  "\x8c\x26\x33\xc6\xb2\xa2\x17\xfa"
-			  "\x64\x19\xc0\x30\xd7\xfc\x14\x6b"
-			  "\xe3\x33\xc2\x04\xb0\x37\xbe\x3f"
-			  "\xa9\xb4\x2d\x68\x03\xa3\x44\xef",
-		.ilen	= 48,
-		.result	= "\x02\x87\x4d\x28\x80\x6e\xb2\xed"
-			  "\x99\x2a\xa8\xca\x04\x25\x45\x90"
-			  "\x1d\xdd\x5a\xd9\xe4\xdb\x9c\x9c"
-			  "\x49\xe9\x01\xfe\xa7\x80\x6d\x6b",
-		.rlen	= 32,
-		.novrfy	= 1,
+		.key	= "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\xDE\xCA\xF8",
+		.klen	= 19,
+		.iv	= "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74",
+		.result	= "\x45\x00\x00\x49\x33\xBA\x00\x00"
+			  "\x7F\x11\x91\x06\xC3\xFB\x1D\x10"
+			  "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE"
+			  "\x00\x35\xDD\x7B\x80\x03\x02\xD5"
+			  "\x00\x00\x4E\x20\x00\x1E\x8C\x18"
+			  "\xD7\x5B\x81\xDC\x91\xBA\xA0\x47"
+			  "\x6B\x91\xB9\x24\xB2\x80\x38\x9D"
+			  "\x92\xC9\x63\xBA\xC0\x46\xEC\x95"
+			  "\x9B\x62\x66\xC0\x47\x22\xB1\x49"
+			  "\x23\x01\x01\x01",
+		.rlen	= 76,
+		.assoc	= "\x00\x00\x01\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
+			  "\xCE\xFA\xCE\x74",
+		.alen	= 20,
+		.input	= "\x92\xD0\x53\x79\x33\x38\xD5\xF3"
+			  "\x7D\xE4\x7A\x8E\x86\x03\xC9\x90"
+			  "\x96\x35\xAB\x9C\xFB\xE8\xA3\x76"
+			  "\xE9\xE9\xE2\xD1\x2E\x11\x0E\x00"
+			  "\xFA\xCE\xB5\x9E\x02\xA7\x7B\xEA"
+			  "\x71\x9A\x58\xFB\xA5\x8A\xE1\xB7"
+			  "\x9C\x39\x9D\xE3\xB5\x6E\x69\xE6"
+			  "\x63\xC9\xDB\x05\x69\x51\x12\xAD"
+			  "\x3E\x00\x32\x73\x86\xF2\xEE\xF5"
+			  "\x0F\xE8\x81\x7E\x84\xD3\xC0\x0D"
+			  "\x76\xD6\x55\xC6\xB4\xC2\x34\xC7"
+			  "\x12\x25\x0B\xF9",
+		.ilen	= 92,
 	}, {
 	}, {
-		.key	= "\xa4\x4b\x54\x29\x0a\xb8\x6d\x01"
-			  "\x5b\x80\x2a\xcf\x25\xc4\xb7\x5c"
-			  "\x20\x2c\xad\x30\xc2\x2b\x41\xfb"
-			  "\x0e\x85\xbc\x33\xad\x0f\x2b\xff"
-			  "\xee\x49\x83",
+		.key	= "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\x73\x61\x6C",
 		.klen	= 35,
 		.klen	= 35,
-		.iv	= "\xe9\xa9\xff\xe9\x57\xba\xfd\x9e",
-		.alen	= 0,
-		.input	= "\x1f\xb8\x8f\xa3\xdd\x54\x00\xf2",
-		.ilen	= 8,
-		.result	= "\x00",
-		.rlen	= 0,
+		.iv	= "\x61\x6E\x64\x01\x69\x76\x65\x63",
+		.result	= "\x45\x08\x00\x28\x73\x2C\x00\x00"
+			  "\x40\x06\xE9\xF9\x0A\x01\x06\x12"
+			  "\x0A\x01\x03\x8F\x06\xB8\x80\x23"
+			  "\xDD\x6B\xAF\xBE\xCB\x71\x26\x02"
+			  "\x50\x10\x1F\x64\x6D\x54\x00\x01",
+		.rlen	= 40,
+		.assoc	= "\x17\x40\x5E\x67\x15\x6F\x31\x26"
+			  "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
+			  "\x69\x76\x65\x63",
+		.alen	= 20,
+		.input	= "\xCC\x74\xB7\xD3\xB0\x38\x50\x42"
+			  "\x2C\x64\x87\x46\x1E\x34\x10\x05"
+			  "\x29\x6B\xBB\x36\xE9\x69\xAD\x92"
+			  "\x82\xA1\x10\x6A\xEB\x0F\xDC\x7D"
+			  "\x08\xBA\xF3\x91\xCA\xAA\x61\xDA"
+			  "\x62\xF4\x14\x61\x5C\x9D\xB5\xA7"
+			  "\xEE\xD7\xB9\x7E\x87\x99\x9B\x7D",
+		.ilen	= 56,
 	}, {
 	}, {
-		.key	= "\x39\xbb\xa7\xbe\x59\x97\x9e\x73"
-			  "\xa2\xbc\x6b\x98\xd7\x75\x7f\xe3"
-			  "\xa4\x48\x93\x39\x26\x71\x4a\xc6"
-			  "\xae\x8f\x11\x4c\xc2\x9c\x4a\xbb"
-			  "\x85\x34\x66",
+		.key	= "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
+			  "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
+			  "\x57\x69\x0E",
+		.klen	= 19,
+		.iv	= "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
+		.result	= "\x45\x00\x00\x49\x33\x3E\x00\x00"
+			  "\x7F\x11\x91\x82\xC3\xFB\x1D\x10"
+			  "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE"
+			  "\x00\x35\xCB\x45\x80\x03\x02\x5B"
+			  "\x00\x00\x01\xE0\x00\x1E\x8C\x18"
+			  "\xD6\x57\x59\xD5\x22\x84\xA0\x35"
+			  "\x2C\x71\x47\x5C\x88\x80\x39\x1C"
+			  "\x76\x4D\x6E\x5E\xE0\x49\x6B\x32"
+			  "\x5A\xE2\x70\xC0\x38\x99\x49\x39"
+			  "\x15\x01\x01\x01",
+		.rlen	= 76,
+		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
+		.input	= "\x6A\x6B\x45\x5E\xD6\x9A\x52\xF6"
+			  "\xEF\x70\x1A\x9C\xE8\xD3\x19\x86"
+			  "\xC8\x02\xF0\xB0\x03\x09\xD9\x02"
+			  "\xA0\xD2\x59\x04\xD1\x85\x2A\x24"
+			  "\x1C\x67\x3E\xD8\x68\x72\x06\x94"
+			  "\x97\xBA\x4F\x76\x8D\xB0\x44\x5B"
+			  "\x69\xBF\xD5\xE2\x3D\xF1\x0B\x0C"
+			  "\xC0\xBF\xB1\x8F\x70\x09\x9E\xCE"
+			  "\xA5\xF2\x55\x58\x84\xFA\xF9\xB5"
+			  "\x23\xF4\x84\x40\x74\x14\x8A\x6B"
+			  "\xDB\xD7\x67\xED\xA4\x93\xF3\x47"
+			  "\xCC\xF7\x46\x6F",
+		.ilen	= 92,
+	}, {
+		.key	= "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\x73\x61\x6C",
 		.klen	= 35,
 		.klen	= 35,
-		.iv	= "\x42\xc8\x92\x0f\x36\x58\xe0\x6b",
-		.alen	= 0,
-		.input	= "\x48\x01\x5e\x02\x24\x04\x66\x47"
-			  "\xa1\xea\x6f\xaf\xe8\xfc\xfb\xdd"
-			  "\xa5\xa9\x87\x8d\x84\xee\x2e\x77"
-			  "\xbb\x86\xb9\xf5\x5c\x6c\xff\xf6"
-			  "\x72\xc3\x8e\xf7\x70\xb1\xb2\x07"
-			  "\xbc\xa8\xa3\xbd\x83\x7c\x1d\x2a",
-		.ilen	= 48,
-		.result	= "\xdc\x56\xf2\x71\xb0\xb1\xa0\x6c"
-			  "\xf0\x97\x3a\xfb\x6d\xe7\x32\x99"
-			  "\x3e\xaf\x70\x5e\xb2\x4d\xea\x39"
-			  "\x89\xd4\x75\x7a\x63\xb1\xda\x93",
-		.rlen	= 32,
-		.novrfy	= 1,
+		.iv	= "\x61\x6E\x64\x01\x69\x76\x65\x63",
+		.result	= "\x63\x69\x73\x63\x6F\x01\x72\x75"
+			  "\x6C\x65\x73\x01\x74\x68\x65\x01"
+			  "\x6E\x65\x74\x77\x65\x01\x64\x65"
+			  "\x66\x69\x6E\x65\x01\x74\x68\x65"
+			  "\x74\x65\x63\x68\x6E\x6F\x6C\x6F"
+			  "\x67\x69\x65\x73\x01\x74\x68\x61"
+			  "\x74\x77\x69\x6C\x6C\x01\x64\x65"
+			  "\x66\x69\x6E\x65\x74\x6F\x6D\x6F"
+			  "\x72\x72\x6F\x77\x01\x02\x02\x01",
+		.rlen	= 72,
+		.assoc	= "\x17\x40\x5E\x67\x15\x6F\x31\x26"
+			  "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
+			  "\x69\x76\x65\x63",
+		.alen	= 20,
+		.input	= "\xEA\x15\xC4\x98\xAC\x15\x22\x37"
+			  "\x00\x07\x1D\xBE\x60\x5D\x73\x16"
+			  "\x4D\x0F\xCC\xCE\x8A\xD0\x49\xD4"
+			  "\x39\xA3\xD1\xB1\x21\x0A\x92\x1A"
+			  "\x2C\xCF\x8F\x9D\xC9\x91\x0D\xB4"
+			  "\x15\xFC\xBC\xA5\xC5\xBF\x54\xE5"
+			  "\x1C\xC7\x32\x41\x07\x7B\x2C\xB6"
+			  "\x5C\x23\x7C\x93\xEA\xEF\x23\x1C"
+			  "\x73\xF4\xE7\x12\x84\x4C\x37\x0A"
+			  "\x4A\x8F\x06\x37\x48\xF9\xF9\x05"
+			  "\x55\x13\x40\xC3\xD5\x55\x3A\x3D",
+		.ilen	= 88,
 	}, {
 	}, {
-		.key	= "\x58\x5d\xa0\x96\x65\x1a\x04\xd7"
-			  "\x96\xe5\xc5\x68\xaa\x95\x35\xe0"
-			  "\x29\xa0\xba\x9e\x48\x78\xd1\xba"
-			  "\x0d\x1a\x53\x3b\xb5\xe3\xf8\x8b"
-			  "\xcf\x76\x3f",
+		.key	= "\x7D\x77\x3D\x00\xC1\x44\xC5\x25"
+			  "\xAC\x61\x9D\x18\xC8\x4A\x3F\x47"
+			  "\xD9\x66\x42",
+		.klen	= 19,
+		.iv	= "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
+		.result	= "\x01\x02\x02\x01",
+		.rlen	= 4,
+		.assoc	= "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF"
+			  "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
+		.alen	= 16,
+		.input	= "\x4C\x72\x63\x30\x2F\xE6\x56\xDD"
+			  "\xD0\xD8\x60\x9D\x8B\xEF\x85\x90"
+			  "\xF7\x61\x24\x62",
+		.ilen	= 20,
+	}, {
+		.key	= "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\xDE\xCA\xF8",
+		.klen	= 19,
+		.iv	= "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74",
+		.result	= "\x74\x6F\x01\x62\x65\x01\x6F\x72"
+			  "\x01\x6E\x6F\x74\x01\x74\x6F\x01"
+			  "\x62\x65\x00\x01",
+		.rlen	= 20,
+		.assoc	= "\x00\x00\x01\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
+			  "\xCE\xFA\xCE\x74",
+		.alen	= 20,
+		.input	= "\xA3\xBF\x52\x52\x65\x83\xBA\x81"
+			  "\x03\x9B\x84\xFC\x44\x8C\xBB\x81"
+			  "\x36\xE1\x78\xBB\xA5\x49\x3A\xD0"
+			  "\xF0\x6B\x21\xAF\x98\xC0\x34\xDC"
+			  "\x17\x17\x65\xAD",
+		.ilen	= 36,
+	}, {
+		.key	= "\x6C\x65\x67\x61\x6C\x69\x7A\x65"
+			  "\x6D\x61\x72\x69\x6A\x75\x61\x6E"
+			  "\x61\x61\x6E\x64\x64\x6F\x69\x74"
+			  "\x62\x65\x66\x6F\x72\x65\x69\x61"
+			  "\x74\x75\x72",
 		.klen	= 35,
 		.klen	= 35,
-		.iv	= "\xd9\x95\x75\x8f\x44\x89\x40\x7b",
-		.assoc	= "\x8f\x86\x6c\x4d\x1d\xc5\x39\x88"
-			  "\xc8\xf3\x5c\x52\x10\x63\x6f\x2b"
-			  "\x8a\x2a\xc5\x6f\x30\x23\x58\x7b"
-			  "\xfb\x36\x03\x11\xb4\xd9\xf2\xfe",
-		.alen	= 32,
-		.input	= "\x48\x58\xd6\xf3\xad\x63\x58\xbf"
-			  "\xae\xc7\x5e\xae\x83\x8f\x7b\xe4"
-			  "\x78\x5c\x4c\x67\x71\x89\x94\xbf"
-			  "\x47\xf1\x63\x7e\x1c\x59\xbd\xc5"
-			  "\x7f\x44\x0a\x0c\x01\x18\x07\x92"
-			  "\xe1\xd3\x51\xce\x32\x6d\x0c\x5b",
-		.ilen	= 48,
-		.result	= "\xc2\x54\xc8\xde\x78\x87\x77\x40"
-			  "\x49\x71\xe4\xb7\xe7\xcb\x76\x61"
-			  "\x0a\x41\xb9\xe9\xc0\x76\x54\xab"
-			  "\x04\x49\x3b\x19\x93\x57\x25\x5d",
+		.iv	= "\x33\x30\x21\x69\x67\x65\x74\x6D",
+		.result	= "\x45\x00\x00\x30\xDA\x3A\x00\x00"
+			  "\x80\x01\xDF\x3B\xC0\xA8\x00\x05"
+			  "\xC0\xA8\x00\x01\x08\x00\xC6\xCD"
+			  "\x02\x00\x07\x00\x61\x62\x63\x64"
+			  "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
+			  "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
+			  "\x01\x02\x02\x01",
+		.rlen	= 52,
+		.assoc	= "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF"
+			  "\xFF\xFF\xFF\xFF\x33\x30\x21\x69"
+			  "\x67\x65\x74\x6D",
+		.alen	= 20,
+		.input	= "\x96\xFD\x86\xF8\xD1\x98\xFF\x10"
+			  "\xAB\x8C\xDA\x8A\x5A\x08\x38\x1A"
+			  "\x48\x59\x80\x18\x1A\x18\x1A\x04"
+			  "\xC9\x0D\xE3\xE7\x0E\xA4\x0B\x75"
+			  "\x92\x9C\x52\x5C\x0B\xFB\xF8\xAF"
+			  "\x16\xC3\x35\xA8\xE7\xCE\x84\x04"
+			  "\xEB\x40\x6B\x7A\x8E\x75\xBB\x42"
+			  "\xE0\x63\x4B\x21\x44\xA2\x2B\x2B"
+			  "\x39\xDB\xC8\xDC",
+		.ilen	= 68,
+	}, {
+		.key	= "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
+			  "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
+			  "\x57\x69\x0E",
+		.klen	= 19,
+		.iv	= "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
+		.result	= "\x45\x00\x00\x30\xDA\x3A\x00\x00"
+			  "\x80\x01\xDF\x3B\xC0\xA8\x00\x05"
+			  "\xC0\xA8\x00\x01\x08\x00\xC6\xCD"
+			  "\x02\x00\x07\x00\x61\x62\x63\x64"
+			  "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
+			  "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
+			  "\x01\x02\x02\x01",
+		.rlen	= 52,
+		.assoc	= "\x3F\x7E\xF6\x42\x10\x10\x10\x10"
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
+		.input	= "\x6A\x6B\x45\x27\x3F\x9E\x52\xF6"
+			  "\x10\x60\x54\x25\xEB\x80\x04\x93"
+			  "\xCA\x1B\x23\x97\xCB\x21\x2E\x01"
+			  "\xA2\xE7\x95\x41\x30\xE4\x4B\x1B"
+			  "\x79\x01\x58\x50\x01\x06\xE1\xE0"
+			  "\x2C\x83\x79\xD3\xDE\x46\x97\x1A"
+			  "\x44\xCC\x90\xBF\x00\x94\x94\x92"
+			  "\x20\x17\x0C\x1B\x55\xDE\x7E\x68"
+			  "\xF4\x95\x5D\x4F",
+		.ilen	= 68,
+	}, {
+		.key	= "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA"
+			  "\x90\x6A\xC7\x3C\x36\x13\xA6\x34"
+			  "\x22\x43\x3C",
+		.klen	= 19,
+		.iv	= "\x48\x55\xEC\x7D\x3A\x23\x4B\xFD",
+		.result	= "\x08\x00\xC6\xCD\x02\x00\x07\x00"
+			  "\x61\x62\x63\x64\x65\x66\x67\x68"
+			  "\x69\x6A\x6B\x6C\x6D\x6E\x6F\x70"
+			  "\x71\x72\x73\x74\x01\x02\x02\x01",
 		.rlen	= 32,
 		.rlen	= 32,
-	},
+		.assoc	= "\x00\x00\x43\x21\x87\x65\x43\x21"
+			  "\x00\x00\x00\x07\x48\x55\xEC\x7D"
+			  "\x3A\x23\x4B\xFD",
+		.alen	= 20,
+		.input	= "\x67\xE9\x28\xB3\x1C\xA4\x6D\x02"
+			  "\xF0\xB5\x37\xB6\x6B\x2F\xF5\x4F"
+			  "\xF8\xA3\x4C\x53\xB8\x12\x09\xBF"
+			  "\x58\x7D\xCF\x29\xA3\x41\x68\x6B"
+			  "\xCE\xE8\x79\x85\x3C\xB0\x3A\x8F"
+			  "\x16\xB0\xA1\x26\xC9\xBC\xBC\xA6",
+		.ilen	= 48,
+	}
 };
 };
 
 
 /*
 /*
@@ -22343,8 +23647,9 @@ static struct aead_testvec rfc7539esp_enc_tv_template[] = {
 		.klen	= 36,
 		.klen	= 36,
 		.iv	= "\x01\x02\x03\x04\x05\x06\x07\x08",
 		.iv	= "\x01\x02\x03\x04\x05\x06\x07\x08",
 		.assoc	= "\xf3\x33\x88\x86\x00\x00\x00\x00"
 		.assoc	= "\xf3\x33\x88\x86\x00\x00\x00\x00"
-			  "\x00\x00\x4e\x91",
-		.alen	= 12,
+			  "\x00\x00\x4e\x91\x01\x02\x03\x04"
+			  "\x05\x06\x07\x08",
+		.alen	= 20,
 		.input	= "\x49\x6e\x74\x65\x72\x6e\x65\x74"
 		.input	= "\x49\x6e\x74\x65\x72\x6e\x65\x74"
 			  "\x2d\x44\x72\x61\x66\x74\x73\x20"
 			  "\x2d\x44\x72\x61\x66\x74\x73\x20"
 			  "\x61\x72\x65\x20\x64\x72\x61\x66"
 			  "\x61\x72\x65\x20\x64\x72\x61\x66"
@@ -22430,8 +23735,9 @@ static struct aead_testvec rfc7539esp_dec_tv_template[] = {
 		.klen	= 36,
 		.klen	= 36,
 		.iv	= "\x01\x02\x03\x04\x05\x06\x07\x08",
 		.iv	= "\x01\x02\x03\x04\x05\x06\x07\x08",
 		.assoc	= "\xf3\x33\x88\x86\x00\x00\x00\x00"
 		.assoc	= "\xf3\x33\x88\x86\x00\x00\x00\x00"
-			  "\x00\x00\x4e\x91",
-		.alen	= 12,
+			  "\x00\x00\x4e\x91\x01\x02\x03\x04"
+			  "\x05\x06\x07\x08",
+		.alen	= 20,
 		.input	= "\x64\xa0\x86\x15\x75\x86\x1a\xf4"
 		.input	= "\x64\xa0\x86\x15\x75\x86\x1a\xf4"
 			  "\x60\xf0\x62\xc7\x9b\xe6\x43\xbd"
 			  "\x60\xf0\x62\xc7\x9b\xe6\x43\xbd"
 			  "\x5e\x80\x5c\xfd\x34\x5c\xf3\x89"
 			  "\x5e\x80\x5c\xfd\x34\x5c\xf3\x89"
@@ -30174,7 +31480,7 @@ static struct cipher_testvec salsa20_stream_enc_tv_template[] = {
 	},
 	},
 };
 };
 
 
-#define CHACHA20_ENC_TEST_VECTORS 3
+#define CHACHA20_ENC_TEST_VECTORS 4
 static struct cipher_testvec chacha20_enc_tv_template[] = {
 static struct cipher_testvec chacha20_enc_tv_template[] = {
 	{ /* RFC7539 A.2. Test Vector #1 */
 	{ /* RFC7539 A.2. Test Vector #1 */
 		.key	= "\x00\x00\x00\x00\x00\x00\x00\x00"
 		.key	= "\x00\x00\x00\x00\x00\x00\x00\x00"
@@ -30348,6 +31654,338 @@ static struct cipher_testvec chacha20_enc_tv_template[] = {
 			  "\x87\xb5\x8d\xfd\x72\x8a\xfa\x36"
 			  "\x87\xb5\x8d\xfd\x72\x8a\xfa\x36"
 			  "\x75\x7a\x79\x7a\xc1\x88\xd1",
 			  "\x75\x7a\x79\x7a\xc1\x88\xd1",
 		.rlen	= 127,
 		.rlen	= 127,
+	}, { /* Self-made test vector for long data */
+		.key	= "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
+			  "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
+			  "\x47\x39\x17\xc1\x40\x2b\x80\x09"
+			  "\x9d\xca\x5c\xbc\x20\x70\x75\xc0",
+		.klen	= 32,
+		.iv     = "\x1c\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input	= "\x49\xee\xe0\xdc\x24\x90\x40\xcd"
+			  "\xc5\x40\x8f\x47\x05\xbc\xdd\x81"
+			  "\x47\xc6\x8d\xe6\xb1\x8f\xd7\xcb"
+			  "\x09\x0e\x6e\x22\x48\x1f\xbf\xb8"
+			  "\x5c\xf7\x1e\x8a\xc1\x23\xf2\xd4"
+			  "\x19\x4b\x01\x0f\x4e\xa4\x43\xce"
+			  "\x01\xc6\x67\xda\x03\x91\x18\x90"
+			  "\xa5\xa4\x8e\x45\x03\xb3\x2d\xac"
+			  "\x74\x92\xd3\x53\x47\xc8\xdd\x25"
+			  "\x53\x6c\x02\x03\x87\x0d\x11\x0c"
+			  "\x58\xe3\x12\x18\xfd\x2a\x5b\x40"
+			  "\x0c\x30\xf0\xb8\x3f\x43\xce\xae"
+			  "\x65\x3a\x7d\x7c\xf4\x54\xaa\xcc"
+			  "\x33\x97\xc3\x77\xba\xc5\x70\xde"
+			  "\xd7\xd5\x13\xa5\x65\xc4\x5f\x0f"
+			  "\x46\x1a\x0d\x97\xb5\xf3\xbb\x3c"
+			  "\x84\x0f\x2b\xc5\xaa\xea\xf2\x6c"
+			  "\xc9\xb5\x0c\xee\x15\xf3\x7d\xbe"
+			  "\x9f\x7b\x5a\xa6\xae\x4f\x83\xb6"
+			  "\x79\x49\x41\xf4\x58\x18\xcb\x86"
+			  "\x7f\x30\x0e\xf8\x7d\x44\x36\xea"
+			  "\x75\xeb\x88\x84\x40\x3c\xad\x4f"
+			  "\x6f\x31\x6b\xaa\x5d\xe5\xa5\xc5"
+			  "\x21\x66\xe9\xa7\xe3\xb2\x15\x88"
+			  "\x78\xf6\x79\xa1\x59\x47\x12\x4e"
+			  "\x9f\x9f\x64\x1a\xa0\x22\x5b\x08"
+			  "\xbe\x7c\x36\xc2\x2b\x66\x33\x1b"
+			  "\xdd\x60\x71\xf7\x47\x8c\x61\xc3"
+			  "\xda\x8a\x78\x1e\x16\xfa\x1e\x86"
+			  "\x81\xa6\x17\x2a\xa7\xb5\xc2\xe7"
+			  "\xa4\xc7\x42\xf1\xcf\x6a\xca\xb4"
+			  "\x45\xcf\xf3\x93\xf0\xe7\xea\xf6"
+			  "\xf4\xe6\x33\x43\x84\x93\xa5\x67"
+			  "\x9b\x16\x58\x58\x80\x0f\x2b\x5c"
+			  "\x24\x74\x75\x7f\x95\x81\xb7\x30"
+			  "\x7a\x33\xa7\xf7\x94\x87\x32\x27"
+			  "\x10\x5d\x14\x4c\x43\x29\xdd\x26"
+			  "\xbd\x3e\x3c\x0e\xfe\x0e\xa5\x10"
+			  "\xea\x6b\x64\xfd\x73\xc6\xed\xec"
+			  "\xa8\xc9\xbf\xb3\xba\x0b\x4d\x07"
+			  "\x70\xfc\x16\xfd\x79\x1e\xd7\xc5"
+			  "\x49\x4e\x1c\x8b\x8d\x79\x1b\xb1"
+			  "\xec\xca\x60\x09\x4c\x6a\xd5\x09"
+			  "\x49\x46\x00\x88\x22\x8d\xce\xea"
+			  "\xb1\x17\x11\xde\x42\xd2\x23\xc1"
+			  "\x72\x11\xf5\x50\x73\x04\x40\x47"
+			  "\xf9\x5d\xe7\xa7\x26\xb1\x7e\xb0"
+			  "\x3f\x58\xc1\x52\xab\x12\x67\x9d"
+			  "\x3f\x43\x4b\x68\xd4\x9c\x68\x38"
+			  "\x07\x8a\x2d\x3e\xf3\xaf\x6a\x4b"
+			  "\xf9\xe5\x31\x69\x22\xf9\xa6\x69"
+			  "\xc6\x9c\x96\x9a\x12\x35\x95\x1d"
+			  "\x95\xd5\xdd\xbe\xbf\x93\x53\x24"
+			  "\xfd\xeb\xc2\x0a\x64\xb0\x77\x00"
+			  "\x6f\x88\xc4\x37\x18\x69\x7c\xd7"
+			  "\x41\x92\x55\x4c\x03\xa1\x9a\x4b"
+			  "\x15\xe5\xdf\x7f\x37\x33\x72\xc1"
+			  "\x8b\x10\x67\xa3\x01\x57\x94\x25"
+			  "\x7b\x38\x71\x7e\xdd\x1e\xcc\x73"
+			  "\x55\xd2\x8e\xeb\x07\xdd\xf1\xda"
+			  "\x58\xb1\x47\x90\xfe\x42\x21\x72"
+			  "\xa3\x54\x7a\xa0\x40\xec\x9f\xdd"
+			  "\xc6\x84\x6e\xca\xae\xe3\x68\xb4"
+			  "\x9d\xe4\x78\xff\x57\xf2\xf8\x1b"
+			  "\x03\xa1\x31\xd9\xde\x8d\xf5\x22"
+			  "\x9c\xdd\x20\xa4\x1e\x27\xb1\x76"
+			  "\x4f\x44\x55\xe2\x9b\xa1\x9c\xfe"
+			  "\x54\xf7\x27\x1b\xf4\xde\x02\xf5"
+			  "\x1b\x55\x48\x5c\xdc\x21\x4b\x9e"
+			  "\x4b\x6e\xed\x46\x23\xdc\x65\xb2"
+			  "\xcf\x79\x5f\x28\xe0\x9e\x8b\xe7"
+			  "\x4c\x9d\x8a\xff\xc1\xa6\x28\xb8"
+			  "\x65\x69\x8a\x45\x29\xef\x74\x85"
+			  "\xde\x79\xc7\x08\xae\x30\xb0\xf4"
+			  "\xa3\x1d\x51\x41\xab\xce\xcb\xf6"
+			  "\xb5\xd8\x6d\xe0\x85\xe1\x98\xb3"
+			  "\x43\xbb\x86\x83\x0a\xa0\xf5\xb7"
+			  "\x04\x0b\xfa\x71\x1f\xb0\xf6\xd9"
+			  "\x13\x00\x15\xf0\xc7\xeb\x0d\x5a"
+			  "\x9f\xd7\xb9\x6c\x65\x14\x22\x45"
+			  "\x6e\x45\x32\x3e\x7e\x60\x1a\x12"
+			  "\x97\x82\x14\xfb\xaa\x04\x22\xfa"
+			  "\xa0\xe5\x7e\x8c\x78\x02\x48\x5d"
+			  "\x78\x33\x5a\x7c\xad\xdb\x29\xce"
+			  "\xbb\x8b\x61\xa4\xb7\x42\xe2\xac"
+			  "\x8b\x1a\xd9\x2f\x0b\x8b\x62\x21"
+			  "\x83\x35\x7e\xad\x73\xc2\xb5\x6c"
+			  "\x10\x26\x38\x07\xe5\xc7\x36\x80"
+			  "\xe2\x23\x12\x61\xf5\x48\x4b\x2b"
+			  "\xc5\xdf\x15\xd9\x87\x01\xaa\xac"
+			  "\x1e\x7c\xad\x73\x78\x18\x63\xe0"
+			  "\x8b\x9f\x81\xd8\x12\x6a\x28\x10"
+			  "\xbe\x04\x68\x8a\x09\x7c\x1b\x1c"
+			  "\x83\x66\x80\x47\x80\xe8\xfd\x35"
+			  "\x1c\x97\x6f\xae\x49\x10\x66\xcc"
+			  "\xc6\xd8\xcc\x3a\x84\x91\x20\x77"
+			  "\x72\xe4\x24\xd2\x37\x9f\xc5\xc9"
+			  "\x25\x94\x10\x5f\x40\x00\x64\x99"
+			  "\xdc\xae\xd7\x21\x09\x78\x50\x15"
+			  "\xac\x5f\xc6\x2c\xa2\x0b\xa9\x39"
+			  "\x87\x6e\x6d\xab\xde\x08\x51\x16"
+			  "\xc7\x13\xe9\xea\xed\x06\x8e\x2c"
+			  "\xf8\x37\x8c\xf0\xa6\x96\x8d\x43"
+			  "\xb6\x98\x37\xb2\x43\xed\xde\xdf"
+			  "\x89\x1a\xe7\xeb\x9d\xa1\x7b\x0b"
+			  "\x77\xb0\xe2\x75\xc0\xf1\x98\xd9"
+			  "\x80\x55\xc9\x34\x91\xd1\x59\xe8"
+			  "\x4b\x0f\xc1\xa9\x4b\x7a\x84\x06"
+			  "\x20\xa8\x5d\xfa\xd1\xde\x70\x56"
+			  "\x2f\x9e\x91\x9c\x20\xb3\x24\xd8"
+			  "\x84\x3d\xe1\x8c\x7e\x62\x52\xe5"
+			  "\x44\x4b\x9f\xc2\x93\x03\xea\x2b"
+			  "\x59\xc5\xfa\x3f\x91\x2b\xbb\x23"
+			  "\xf5\xb2\x7b\xf5\x38\xaf\xb3\xee"
+			  "\x63\xdc\x7b\xd1\xff\xaa\x8b\xab"
+			  "\x82\x6b\x37\x04\xeb\x74\xbe\x79"
+			  "\xb9\x83\x90\xef\x20\x59\x46\xff"
+			  "\xe9\x97\x3e\x2f\xee\xb6\x64\x18"
+			  "\x38\x4c\x7a\x4a\xf9\x61\xe8\x9a"
+			  "\xa1\xb5\x01\xa6\x47\xd3\x11\xd4"
+			  "\xce\xd3\x91\x49\x88\xc7\xb8\x4d"
+			  "\xb1\xb9\x07\x6d\x16\x72\xae\x46"
+			  "\x5e\x03\xa1\x4b\xb6\x02\x30\xa8"
+			  "\x3d\xa9\x07\x2a\x7c\x19\xe7\x62"
+			  "\x87\xe3\x82\x2f\x6f\xe1\x09\xd9"
+			  "\x94\x97\xea\xdd\x58\x9e\xae\x76"
+			  "\x7e\x35\xe5\xb4\xda\x7e\xf4\xde"
+			  "\xf7\x32\x87\xcd\x93\xbf\x11\x56"
+			  "\x11\xbe\x08\x74\xe1\x69\xad\xe2"
+			  "\xd7\xf8\x86\x75\x8a\x3c\xa4\xbe"
+			  "\x70\xa7\x1b\xfc\x0b\x44\x2a\x76"
+			  "\x35\xea\x5d\x85\x81\xaf\x85\xeb"
+			  "\xa0\x1c\x61\xc2\xf7\x4f\xa5\xdc"
+			  "\x02\x7f\xf6\x95\x40\x6e\x8a\x9a"
+			  "\xf3\x5d\x25\x6e\x14\x3a\x22\xc9"
+			  "\x37\x1c\xeb\x46\x54\x3f\xa5\x91"
+			  "\xc2\xb5\x8c\xfe\x53\x08\x97\x32"
+			  "\x1b\xb2\x30\x27\xfe\x25\x5d\xdc"
+			  "\x08\x87\xd0\xe5\x94\x1a\xd4\xf1"
+			  "\xfe\xd6\xb4\xa3\xe6\x74\x81\x3c"
+			  "\x1b\xb7\x31\xa7\x22\xfd\xd4\xdd"
+			  "\x20\x4e\x7c\x51\xb0\x60\x73\xb8"
+			  "\x9c\xac\x91\x90\x7e\x01\xb0\xe1"
+			  "\x8a\x2f\x75\x1c\x53\x2a\x98\x2a"
+			  "\x06\x52\x95\x52\xb2\xe9\x25\x2e"
+			  "\x4c\xe2\x5a\x00\xb2\x13\x81\x03"
+			  "\x77\x66\x0d\xa5\x99\xda\x4e\x8c"
+			  "\xac\xf3\x13\x53\x27\x45\xaf\x64"
+			  "\x46\xdc\xea\x23\xda\x97\xd1\xab"
+			  "\x7d\x6c\x30\x96\x1f\xbc\x06\x34"
+			  "\x18\x0b\x5e\x21\x35\x11\x8d\x4c"
+			  "\xe0\x2d\xe9\x50\x16\x74\x81\xa8"
+			  "\xb4\x34\xb9\x72\x42\xa6\xcc\xbc"
+			  "\xca\x34\x83\x27\x10\x5b\x68\x45"
+			  "\x8f\x52\x22\x0c\x55\x3d\x29\x7c"
+			  "\xe3\xc0\x66\x05\x42\x91\x5f\x58"
+			  "\xfe\x4a\x62\xd9\x8c\xa9\x04\x19"
+			  "\x04\xa9\x08\x4b\x57\xfc\x67\x53"
+			  "\x08\x7c\xbc\x66\x8a\xb0\xb6\x9f"
+			  "\x92\xd6\x41\x7c\x5b\x2a\x00\x79"
+			  "\x72",
+		.ilen	= 1281,
+		.result	= "\x45\xe8\xe0\xb6\x9c\xca\xfd\x87"
+			  "\xe8\x1d\x37\x96\x8a\xe3\x40\x35"
+			  "\xcf\x5e\x3a\x46\x3d\xfb\xd0\x69"
+			  "\xde\xaf\x7a\xd5\x0d\xe9\x52\xec"
+			  "\xc2\x82\xe5\x3e\x7d\xb2\x4a\xd9"
+			  "\xbb\xc3\x9f\xc0\x5d\xac\x93\x8d"
+			  "\x0e\x6f\xd3\xd7\xfb\x6a\x0d\xce"
+			  "\x92\x2c\xf7\xbb\x93\x57\xcc\xee"
+			  "\x42\x72\x6f\xc8\x4b\xd2\x76\xbf"
+			  "\xa0\xe3\x7a\x39\xf9\x5c\x8e\xfd"
+			  "\xa1\x1d\x41\xe5\x08\xc1\x1c\x11"
+			  "\x92\xfd\x39\x5c\x51\xd0\x2f\x66"
+			  "\x33\x4a\x71\x15\xfe\xee\x12\x54"
+			  "\x8c\x8f\x34\xd8\x50\x3c\x18\xa6"
+			  "\xc5\xe1\x46\x8a\xfb\x5f\x7e\x25"
+			  "\x9b\xe2\xc3\x66\x41\x2b\xb3\xa5"
+			  "\x57\x0e\x94\x17\x26\x39\xbb\x54"
+			  "\xae\x2e\x6f\x42\xfb\x4d\x89\x6f"
+			  "\x9d\xf1\x16\x2e\xe3\xe7\xfc\xe3"
+			  "\xb2\x4b\x2b\xa6\x7c\x04\x69\x3a"
+			  "\x70\x5a\xa7\xf1\x31\x64\x19\xca"
+			  "\x45\x79\xd8\x58\x23\x61\xaf\xc2"
+			  "\x52\x05\xc3\x0b\xc1\x64\x7c\x81"
+			  "\xd9\x11\xcf\xff\x02\x3d\x51\x84"
+			  "\x01\xac\xc6\x2e\x34\x2b\x09\x3a"
+			  "\xa8\x5d\x98\x0e\x89\xd9\xef\x8f"
+			  "\xd9\xd7\x7d\xdd\x63\x47\x46\x7d"
+			  "\xa1\xda\x0b\x53\x7d\x79\xcd\xc9"
+			  "\x86\xdd\x6b\x13\xa1\x9a\x70\xdd"
+			  "\x5c\xa1\x69\x3c\xe4\x5d\xe3\x8c"
+			  "\xe5\xf4\x87\x9c\x10\xcf\x0f\x0b"
+			  "\xc8\x43\xdc\xf8\x1d\x62\x5e\x5b"
+			  "\xe2\x03\x06\xc5\x71\xb6\x48\xa5"
+			  "\xf0\x0f\x2d\xd5\xa2\x73\x55\x8f"
+			  "\x01\xa7\x59\x80\x5f\x11\x6c\x40"
+			  "\xff\xb1\xf2\xc6\x7e\x01\xbb\x1c"
+			  "\x69\x9c\xc9\x3f\x71\x5f\x07\x7e"
+			  "\xdf\x6f\x99\xca\x9c\xfd\xf9\xb9"
+			  "\x49\xe7\xcc\x91\xd5\x9b\x8f\x03"
+			  "\xae\xe7\x61\x32\xef\x41\x6c\x75"
+			  "\x84\x9b\x8c\xce\x1d\x6b\x93\x21"
+			  "\x41\xec\xc6\xad\x8e\x0c\x48\xa8"
+			  "\xe2\xf5\x57\xde\xf7\x38\xfd\x4a"
+			  "\x6f\xa7\x4a\xf9\xac\x7d\xb1\x85"
+			  "\x7d\x6c\x95\x0a\x5a\xcf\x68\xd2"
+			  "\xe0\x7a\x26\xd9\xc1\x6d\x3e\xc6"
+			  "\x37\xbd\xbe\x24\x36\x77\x9f\x1b"
+			  "\xc1\x22\xf3\x79\xae\x95\x78\x66"
+			  "\x97\x11\xc0\x1a\xf1\xe8\x0d\x38"
+			  "\x09\xc2\xee\xb7\xd3\x46\x7b\x59"
+			  "\x77\x23\xe8\xb4\x92\x3d\x78\xbe"
+			  "\xe2\x25\x63\xa5\x2a\x06\x70\x92"
+			  "\x32\x63\xf9\x19\x21\x68\xe1\x0b"
+			  "\x9a\xd0\xee\x21\xdb\x1f\xe0\xde"
+			  "\x3e\x64\x02\x4d\x0e\xe0\x0a\xa9"
+			  "\xed\x19\x8c\xa8\xbf\xe3\x2e\x75"
+			  "\x24\x2b\xb0\xe5\x82\x6a\x1e\x6f"
+			  "\x71\x2a\x3a\x60\xed\x06\x0d\x17"
+			  "\xa2\xdb\x29\x1d\xae\xb2\xc4\xfb"
+			  "\x94\x04\xd8\x58\xfc\xc4\x04\x4e"
+			  "\xee\xc7\xc1\x0f\xe9\x9b\x63\x2d"
+			  "\x02\x3e\x02\x67\xe5\xd8\xbb\x79"
+			  "\xdf\xd2\xeb\x50\xe9\x0a\x02\x46"
+			  "\xdf\x68\xcf\xe7\x2b\x0a\x56\xd6"
+			  "\xf7\xbc\x44\xad\xb8\xb5\x5f\xeb"
+			  "\xbc\x74\x6b\xe8\x7e\xb0\x60\xc6"
+			  "\x0d\x96\x09\xbb\x19\xba\xe0\x3c"
+			  "\xc4\x6c\xbf\x0f\x58\xc0\x55\x62"
+			  "\x23\xa0\xff\xb5\x1c\xfd\x18\xe1"
+			  "\xcf\x6d\xd3\x52\xb4\xce\xa6\xfa"
+			  "\xaa\xfb\x1b\x0b\x42\x6d\x79\x42"
+			  "\x48\x70\x5b\x0e\xdd\x3a\xc9\x69"
+			  "\x8b\x73\x67\xf6\x95\xdb\x8c\xfb"
+			  "\xfd\xb5\x08\x47\x42\x84\x9a\xfa"
+			  "\xcc\x67\xb2\x3c\xb6\xfd\xd8\x32"
+			  "\xd6\x04\xb6\x4a\xea\x53\x4b\xf5"
+			  "\x94\x16\xad\xf0\x10\x2e\x2d\xb4"
+			  "\x8b\xab\xe5\x89\xc7\x39\x12\xf3"
+			  "\x8d\xb5\x96\x0b\x87\x5d\xa7\x7c"
+			  "\xb0\xc2\xf6\x2e\x57\x97\x2c\xdc"
+			  "\x54\x1c\x34\x72\xde\x0c\x68\x39"
+			  "\x9d\x32\xa5\x75\x92\x13\x32\xea"
+			  "\x90\x27\xbd\x5b\x1d\xb9\x21\x02"
+			  "\x1c\xcc\xba\x97\x5e\x49\x58\xe8"
+			  "\xac\x8b\xf3\xce\x3c\xf0\x00\xe9"
+			  "\x6c\xae\xe9\x77\xdf\xf4\x02\xcd"
+			  "\x55\x25\x89\x9e\x90\xf3\x6b\x8f"
+			  "\xb7\xd6\x47\x98\x26\x2f\x31\x2f"
+			  "\x8d\xbf\x54\xcd\x99\xeb\x80\xd7"
+			  "\xac\xc3\x08\xc2\xa6\x32\xf1\x24"
+			  "\x76\x7c\x4f\x78\x53\x55\xfb\x00"
+			  "\x8a\xd6\x52\x53\x25\x45\xfb\x0a"
+			  "\x6b\xb9\xbe\x3c\x5e\x11\xcc\x6a"
+			  "\xdd\xfc\xa7\xc4\x79\x4d\xbd\xfb"
+			  "\xce\x3a\xf1\x7a\xda\xeb\xfe\x64"
+			  "\x28\x3d\x0f\xee\x80\xba\x0c\xf8"
+			  "\xe9\x5b\x3a\xd4\xae\xc9\xf3\x0e"
+			  "\xe8\x5d\xc5\x5c\x0b\x20\x20\xee"
+			  "\x40\x0d\xde\x07\xa7\x14\xb4\x90"
+			  "\xb6\xbd\x3b\xae\x7d\x2b\xa7\xc7"
+			  "\xdc\x0b\x4c\x5d\x65\xb0\xd2\xc5"
+			  "\x79\x61\x23\xe0\xa2\x99\x73\x55"
+			  "\xad\xc6\xfb\xc7\x54\xb5\x98\x1f"
+			  "\x8c\x86\xc2\x3f\xbe\x5e\xea\x64"
+			  "\xa3\x60\x18\x9f\x80\xaf\x52\x74"
+			  "\x1a\xfe\x22\xc2\x92\x67\x40\x02"
+			  "\x08\xee\x67\x5b\x67\xe0\x3d\xde"
+			  "\x7a\xaf\x8e\x28\xf3\x5e\x0e\xf4"
+			  "\x48\x56\xaa\x85\x22\xd8\x36\xed"
+			  "\x3b\x3d\x68\x69\x30\xbc\x71\x23"
+			  "\xb1\x6e\x61\x03\x89\x44\x03\xf4"
+			  "\x32\xaa\x4c\x40\x9f\x69\xfb\x70"
+			  "\x91\xcc\x1f\x11\xbd\x76\x67\xe6"
+			  "\x10\x8b\x29\x39\x68\xea\x4e\x6d"
+			  "\xae\xfb\x40\xcf\xe2\xd0\x0d\x8d"
+			  "\x6f\xed\x9b\x8d\x64\x7a\x94\x8e"
+			  "\x32\x38\x78\xeb\x7d\x5f\xf9\x4d"
+			  "\x13\xbe\x21\xea\x16\xe7\x5c\xee"
+			  "\xcd\xf6\x5f\xc6\x45\xb2\x8f\x2b"
+			  "\xb5\x93\x3e\x45\xdb\xfd\xa2\x6a"
+			  "\xec\x83\x92\x99\x87\x47\xe0\x7c"
+			  "\xa2\x7b\xc4\x2a\xcd\xc0\x81\x03"
+			  "\x98\xb0\x87\xb6\x86\x13\x64\x33"
+			  "\x4c\xd7\x99\xbf\xdb\x7b\x6e\xaa"
+			  "\x76\xcc\xa0\x74\x1b\xa3\x6e\x83"
+			  "\xd4\xba\x7a\x84\x9d\x91\x71\xcd"
+			  "\x60\x2d\x56\xfd\x26\x35\xcb\xeb"
+			  "\xac\xe9\xee\xa4\xfc\x18\x5b\x91"
+			  "\xd5\xfe\x84\x45\xe0\xc7\xfd\x11"
+			  "\xe9\x00\xb6\x54\xdf\xe1\x94\xde"
+			  "\x2b\x70\x9f\x94\x7f\x15\x0e\x83"
+			  "\x63\x10\xb3\xf5\xea\xd3\xe8\xd1"
+			  "\xa5\xfc\x17\x19\x68\x9a\xbc\x17"
+			  "\x30\x43\x0a\x1a\x33\x92\xd4\x2a"
+			  "\x2e\x68\x99\xbc\x49\xf0\x68\xe3"
+			  "\xf0\x1f\xcb\xcc\xfa\xbb\x05\x56"
+			  "\x46\x84\x8b\x69\x83\x64\xc5\xe0"
+			  "\xc5\x52\x99\x07\x3c\xa6\x5c\xaf"
+			  "\xa3\xde\xd7\xdb\x43\xe6\xb7\x76"
+			  "\x4e\x4d\xd6\x71\x60\x63\x4a\x0c"
+			  "\x5f\xae\x25\x84\x22\x90\x5f\x26"
+			  "\x61\x4d\x8f\xaf\xc9\x22\xf2\x05"
+			  "\xcf\xc1\xdc\x68\xe5\x57\x8e\x24"
+			  "\x1b\x30\x59\xca\xd7\x0d\xc3\xd3"
+			  "\x52\x9e\x09\x3e\x0e\xaf\xdb\x5f"
+			  "\xc7\x2b\xde\x3a\xfd\xad\x93\x04"
+			  "\x74\x06\x89\x0e\x90\xeb\x85\xff"
+			  "\xe6\x3c\x12\x42\xf4\xfa\x80\x75"
+			  "\x5e\x4e\xd7\x2f\x93\x0b\x34\x41"
+			  "\x02\x85\x68\xd0\x03\x12\xde\x92"
+			  "\x54\x7a\x7e\xfb\x55\xe7\x88\xfb"
+			  "\xa4\xa9\xf2\xd1\xc6\x70\x06\x37"
+			  "\x25\xee\xa7\x6e\xd9\x89\x86\x50"
+			  "\x2e\x07\xdb\xfb\x2a\x86\x45\x0e"
+			  "\x91\xf4\x7c\xbb\x12\x60\xe8\x3f"
+			  "\x71\xbe\x8f\x9d\x26\xef\xd9\x89"
+			  "\xc4\x8f\xd8\xc5\x73\xd8\x84\xaa"
+			  "\x2f\xad\x22\x1e\x7e\xcf\xa2\x08"
+			  "\x23\x45\x89\x42\xa0\x30\xeb\xbf"
+			  "\xa1\xed\xad\xd5\x76\xfa\x24\x8f"
+			  "\x98",
+		.rlen	= 1281,
 	},
 	},
 };
 };
 
 

+ 3 - 0
drivers/clk/imx/clk-imx6q.c

@@ -381,6 +381,9 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
 	clk[IMX6QDL_CLK_ASRC]         = imx_clk_gate2_shared("asrc",         "asrc_podf",   base + 0x68, 6, &share_count_asrc);
 	clk[IMX6QDL_CLK_ASRC]         = imx_clk_gate2_shared("asrc",         "asrc_podf",   base + 0x68, 6, &share_count_asrc);
 	clk[IMX6QDL_CLK_ASRC_IPG]     = imx_clk_gate2_shared("asrc_ipg",     "ahb",         base + 0x68, 6, &share_count_asrc);
 	clk[IMX6QDL_CLK_ASRC_IPG]     = imx_clk_gate2_shared("asrc_ipg",     "ahb",         base + 0x68, 6, &share_count_asrc);
 	clk[IMX6QDL_CLK_ASRC_MEM]     = imx_clk_gate2_shared("asrc_mem",     "ahb",         base + 0x68, 6, &share_count_asrc);
 	clk[IMX6QDL_CLK_ASRC_MEM]     = imx_clk_gate2_shared("asrc_mem",     "ahb",         base + 0x68, 6, &share_count_asrc);
+	clk[IMX6QDL_CLK_CAAM_MEM]     = imx_clk_gate2("caam_mem",      "ahb",               base + 0x68, 8);
+	clk[IMX6QDL_CLK_CAAM_ACLK]    = imx_clk_gate2("caam_aclk",     "ahb",               base + 0x68, 10);
+	clk[IMX6QDL_CLK_CAAM_IPG]     = imx_clk_gate2("caam_ipg",      "ipg",               base + 0x68, 12);
 	clk[IMX6QDL_CLK_CAN1_IPG]     = imx_clk_gate2("can1_ipg",      "ipg",               base + 0x68, 14);
 	clk[IMX6QDL_CLK_CAN1_IPG]     = imx_clk_gate2("can1_ipg",      "ipg",               base + 0x68, 14);
 	clk[IMX6QDL_CLK_CAN1_SERIAL]  = imx_clk_gate2("can1_serial",   "can_root",          base + 0x68, 16);
 	clk[IMX6QDL_CLK_CAN1_SERIAL]  = imx_clk_gate2("can1_serial",   "can_root",          base + 0x68, 16);
 	clk[IMX6QDL_CLK_CAN2_IPG]     = imx_clk_gate2("can2_ipg",      "ipg",               base + 0x68, 18);
 	clk[IMX6QDL_CLK_CAN2_IPG]     = imx_clk_gate2("can2_ipg",      "ipg",               base + 0x68, 18);

+ 17 - 0
drivers/crypto/Kconfig

@@ -480,4 +480,21 @@ config CRYPTO_DEV_IMGTEC_HASH
 	  hardware hash accelerator. Supporting MD5/SHA1/SHA224/SHA256
 	  hardware hash accelerator. Supporting MD5/SHA1/SHA224/SHA256
 	  hashing algorithms.
 	  hashing algorithms.
 
 
+config CRYPTO_DEV_SUN4I_SS
+	tristate "Support for Allwinner Security System cryptographic accelerator"
+	depends on ARCH_SUNXI
+	select CRYPTO_MD5
+	select CRYPTO_SHA1
+	select CRYPTO_AES
+	select CRYPTO_DES
+	select CRYPTO_BLKCIPHER
+	help
+	  Some Allwinner SoC have a crypto accelerator named
+	  Security System. Select this if you want to use it.
+	  The Security System handle AES/DES/3DES ciphers in CBC mode
+	  and SHA1 and MD5 hash algorithms.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called sun4i-ss.
+
 endif # CRYPTO_HW
 endif # CRYPTO_HW

+ 1 - 0
drivers/crypto/Makefile

@@ -28,3 +28,4 @@ obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
 obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
 obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
 obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
 obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
 obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
 obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
+obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/

+ 1 - 1
drivers/crypto/amcc/crypto4xx_core.c

@@ -1113,7 +1113,7 @@ static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
 	struct device *dev = (struct device *)data;
 	struct device *dev = (struct device *)data;
 	struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
 	struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
 
 
-	if (core_dev->dev->ce_base == 0)
+	if (!core_dev->dev->ce_base)
 		return 0;
 		return 0;
 
 
 	writel(PPC4XX_INTERRUPT_CLR,
 	writel(PPC4XX_INTERRUPT_CLR,

+ 9 - 1
drivers/crypto/caam/Kconfig

@@ -1,6 +1,6 @@
 config CRYPTO_DEV_FSL_CAAM
 config CRYPTO_DEV_FSL_CAAM
 	tristate "Freescale CAAM-Multicore driver backend"
 	tristate "Freescale CAAM-Multicore driver backend"
-	depends on FSL_SOC
+	depends on FSL_SOC || ARCH_MXC
 	help
 	help
 	  Enables the driver module for Freescale's Cryptographic Accelerator
 	  Enables the driver module for Freescale's Cryptographic Accelerator
 	  and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
 	  and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
@@ -112,6 +112,14 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
 	  To compile this as a module, choose M here: the module
 	  To compile this as a module, choose M here: the module
 	  will be called caamrng.
 	  will be called caamrng.
 
 
+config CRYPTO_DEV_FSL_CAAM_IMX
+	def_bool SOC_IMX6 || SOC_IMX7D
+	depends on CRYPTO_DEV_FSL_CAAM
+
+config CRYPTO_DEV_FSL_CAAM_LE
+	def_bool CRYPTO_DEV_FSL_CAAM_IMX || SOC_LS1021A
+	depends on CRYPTO_DEV_FSL_CAAM
+
 config CRYPTO_DEV_FSL_CAAM_DEBUG
 config CRYPTO_DEV_FSL_CAAM_DEBUG
 	bool "Enable debug output in CAAM driver"
 	bool "Enable debug output in CAAM driver"
 	depends on CRYPTO_DEV_FSL_CAAM
 	depends on CRYPTO_DEV_FSL_CAAM

+ 1516 - 1361
drivers/crypto/caam/caamalg.c

@@ -68,27 +68,29 @@
 #define AEAD_DESC_JOB_IO_LEN		(DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
 #define AEAD_DESC_JOB_IO_LEN		(DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
 #define GCM_DESC_JOB_IO_LEN		(AEAD_DESC_JOB_IO_LEN + \
 #define GCM_DESC_JOB_IO_LEN		(AEAD_DESC_JOB_IO_LEN + \
 					 CAAM_CMD_SZ * 4)
 					 CAAM_CMD_SZ * 4)
+#define AUTHENC_DESC_JOB_IO_LEN		(AEAD_DESC_JOB_IO_LEN + \
+					 CAAM_CMD_SZ * 5)
 
 
 /* length of descriptors text */
 /* length of descriptors text */
 #define DESC_AEAD_BASE			(4 * CAAM_CMD_SZ)
 #define DESC_AEAD_BASE			(4 * CAAM_CMD_SZ)
-#define DESC_AEAD_ENC_LEN		(DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
-#define DESC_AEAD_DEC_LEN		(DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
-#define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+#define DESC_AEAD_ENC_LEN		(DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_DEC_LEN		(DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
 
 
 /* Note: Nonce is counted in enckeylen */
 /* Note: Nonce is counted in enckeylen */
-#define DESC_AEAD_CTR_RFC3686_LEN	(6 * CAAM_CMD_SZ)
+#define DESC_AEAD_CTR_RFC3686_LEN	(4 * CAAM_CMD_SZ)
 
 
 #define DESC_AEAD_NULL_BASE		(3 * CAAM_CMD_SZ)
 #define DESC_AEAD_NULL_BASE		(3 * CAAM_CMD_SZ)
-#define DESC_AEAD_NULL_ENC_LEN		(DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
-#define DESC_AEAD_NULL_DEC_LEN		(DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_ENC_LEN		(DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_DEC_LEN		(DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
 
 
 #define DESC_GCM_BASE			(3 * CAAM_CMD_SZ)
 #define DESC_GCM_BASE			(3 * CAAM_CMD_SZ)
 #define DESC_GCM_ENC_LEN		(DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
 #define DESC_GCM_ENC_LEN		(DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
 #define DESC_GCM_DEC_LEN		(DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
 #define DESC_GCM_DEC_LEN		(DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
 
 
 #define DESC_RFC4106_BASE		(3 * CAAM_CMD_SZ)
 #define DESC_RFC4106_BASE		(3 * CAAM_CMD_SZ)
-#define DESC_RFC4106_ENC_LEN		(DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ)
-#define DESC_RFC4106_DEC_LEN		(DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ)
+#define DESC_RFC4106_ENC_LEN		(DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+#define DESC_RFC4106_DEC_LEN		(DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
 
 
 #define DESC_RFC4543_BASE		(3 * CAAM_CMD_SZ)
 #define DESC_RFC4543_BASE		(3 * CAAM_CMD_SZ)
 #define DESC_RFC4543_ENC_LEN		(DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
 #define DESC_RFC4543_ENC_LEN		(DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
@@ -111,6 +113,20 @@
 #endif
 #endif
 static struct list_head alg_list;
 static struct list_head alg_list;
 
 
+struct caam_alg_entry {
+	int class1_alg_type;
+	int class2_alg_type;
+	int alg_op;
+	bool rfc3686;
+	bool geniv;
+};
+
+struct caam_aead_alg {
+	struct aead_alg aead;
+	struct caam_alg_entry caam;
+	bool registered;
+};
+
 /* Set DK bit in class 1 operation if shared */
 /* Set DK bit in class 1 operation if shared */
 static inline void append_dec_op1(u32 *desc, u32 type)
 static inline void append_dec_op1(u32 *desc, u32 type)
 {
 {
@@ -144,18 +160,6 @@ static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
 			     KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
 			     KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
 }
 }
 
 
-/*
- * For aead encrypt and decrypt, read iv for both classes
- */
-static inline void aead_append_ld_iv(u32 *desc, int ivsize, int ivoffset)
-{
-	append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
-			LDST_SRCDST_BYTE_CONTEXT |
-			(ivoffset << LDST_OFFSET_SHIFT));
-	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
-		    (ivoffset << MOVE_OFFSET_SHIFT) | ivsize);
-}
-
 /*
 /*
  * For ablkcipher encrypt and decrypt, read from req->src and
  * For ablkcipher encrypt and decrypt, read from req->src and
  * write to req->dst
  * write to req->dst
@@ -169,13 +173,6 @@ static inline void ablkcipher_append_src_dst(u32 *desc)
 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
 }
 }
 
 
-/*
- * If all data, including src (with assoc and iv) or dst (with iv only) are
- * contiguous
- */
-#define GIV_SRC_CONTIG		1
-#define GIV_DST_CONTIG		(1 << 1)
-
 /*
 /*
  * per-session context
  * per-session context
  */
  */
@@ -259,7 +256,6 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
 
 
 static int aead_null_set_sh_desc(struct crypto_aead *aead)
 static int aead_null_set_sh_desc(struct crypto_aead *aead)
 {
 {
-	unsigned int ivsize = crypto_aead_ivsize(aead);
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
 	struct device *jrdev = ctx->jrdev;
 	struct device *jrdev = ctx->jrdev;
 	bool keys_fit_inline = false;
 	bool keys_fit_inline = false;
@@ -270,11 +266,11 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
 	 * Job Descriptor and Shared Descriptors
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
 	 */
-	if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
+	if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
 	    ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
 	    ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
 		keys_fit_inline = true;
 		keys_fit_inline = true;
 
 
-	/* old_aead_encrypt shared descriptor */
+	/* aead_encrypt shared descriptor */
 	desc = ctx->sh_desc_enc;
 	desc = ctx->sh_desc_enc;
 
 
 	init_sh_desc(desc, HDR_SHARE_SERIAL);
 	init_sh_desc(desc, HDR_SHARE_SERIAL);
@@ -291,20 +287,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
 			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
 			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
 	set_jump_tgt_here(desc, key_jump_cmd);
 	set_jump_tgt_here(desc, key_jump_cmd);
 
 
-	/* cryptlen = seqoutlen - authsize */
-	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
-
-	/*
-	 * NULL encryption; IV is zero
-	 * assoclen = (assoclen + cryptlen) - cryptlen
-	 */
-	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
-
-	/* read assoc before reading payload */
-	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
-			     KEY_VLF);
+	/* assoclen + cryptlen = seqinlen */
+	append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
 
 
-	/* Prepare to read and write cryptlen bytes */
+	/* Prepare to read and write cryptlen + assoclen bytes */
 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
 
 
@@ -363,7 +349,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
 
 
 	desc = ctx->sh_desc_dec;
 	desc = ctx->sh_desc_dec;
 
 
-	/* old_aead_decrypt shared descriptor */
+	/* aead_decrypt shared descriptor */
 	init_sh_desc(desc, HDR_SHARE_SERIAL);
 	init_sh_desc(desc, HDR_SHARE_SERIAL);
 
 
 	/* Skip if already shared */
 	/* Skip if already shared */
@@ -382,18 +368,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
 	append_operation(desc, ctx->class2_alg_type |
 	append_operation(desc, ctx->class2_alg_type |
 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
 
 
-	/* assoclen + cryptlen = seqinlen - ivsize - authsize */
-	append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
-				ctx->authsize + ivsize);
-	/* assoclen = (assoclen + cryptlen) - cryptlen */
+	/* assoclen + cryptlen = seqoutlen */
 	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
 	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-	append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
 
 
-	/* read assoc before reading payload */
-	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
-			     KEY_VLF);
-
-	/* Prepare to read and write cryptlen bytes */
+	/* Prepare to read and write cryptlen + assoclen bytes */
 	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
 	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
 
 
@@ -450,10 +428,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
 
 
 static int aead_set_sh_desc(struct crypto_aead *aead)
 static int aead_set_sh_desc(struct crypto_aead *aead)
 {
 {
+	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+						 struct caam_aead_alg, aead);
 	unsigned int ivsize = crypto_aead_ivsize(aead);
 	unsigned int ivsize = crypto_aead_ivsize(aead);
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
-	struct crypto_tfm *ctfm = crypto_aead_tfm(aead);
-	const char *alg_name = crypto_tfm_alg_name(ctfm);
 	struct device *jrdev = ctx->jrdev;
 	struct device *jrdev = ctx->jrdev;
 	bool keys_fit_inline;
 	bool keys_fit_inline;
 	u32 geniv, moveiv;
 	u32 geniv, moveiv;
@@ -461,11 +439,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	u32 *desc;
 	u32 *desc;
 	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
 	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
 			       OP_ALG_AAI_CTR_MOD128);
 			       OP_ALG_AAI_CTR_MOD128);
-	const bool is_rfc3686 = (ctr_mode &&
-				 (strstr(alg_name, "rfc3686") != NULL));
-
-	if (!ctx->authsize)
-		return 0;
+	const bool is_rfc3686 = alg->caam.rfc3686;
 
 
 	/* NULL encryption / decryption */
 	/* NULL encryption / decryption */
 	if (!ctx->enckeylen)
 	if (!ctx->enckeylen)
@@ -486,18 +460,21 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	if (is_rfc3686)
 	if (is_rfc3686)
 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
 
 
+	if (alg->caam.geniv)
+		goto skip_enc;
+
 	/*
 	/*
 	 * Job Descriptor and Shared Descriptors
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
 	 */
 	keys_fit_inline = false;
 	keys_fit_inline = false;
-	if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
+	if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
 	    ctx->split_key_pad_len + ctx->enckeylen +
 	    ctx->split_key_pad_len + ctx->enckeylen +
 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
 	    CAAM_DESC_BYTES_MAX)
 	    CAAM_DESC_BYTES_MAX)
 		keys_fit_inline = true;
 		keys_fit_inline = true;
 
 
-	/* old_aead_encrypt shared descriptor */
+	/* aead_encrypt shared descriptor */
 	desc = ctx->sh_desc_enc;
 	desc = ctx->sh_desc_enc;
 
 
 	/* Note: Context registers are saved. */
 	/* Note: Context registers are saved. */
@@ -507,19 +484,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	append_operation(desc, ctx->class2_alg_type |
 	append_operation(desc, ctx->class2_alg_type |
 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
 
 
-	/* cryptlen = seqoutlen - authsize */
-	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
-
-	/* assoclen + cryptlen = seqinlen - ivsize */
-	append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, ivsize);
+	/* Read and write assoclen bytes */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
 
 
-	/* assoclen = (assoclen + cryptlen) - cryptlen */
-	append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
+	/* Skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
 
 
 	/* read assoc before reading payload */
 	/* read assoc before reading payload */
 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
-			     KEY_VLF);
-	aead_append_ld_iv(desc, ivsize, ctx1_iv_off);
+				      FIFOLDST_VLF);
 
 
 	/* Load Counter into CONTEXT1 reg */
 	/* Load Counter into CONTEXT1 reg */
 	if (is_rfc3686)
 	if (is_rfc3686)
@@ -534,8 +508,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
 
 
 	/* Read and write cryptlen bytes */
 	/* Read and write cryptlen bytes */
-	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
 
 
 	/* Write ICV */
 	/* Write ICV */
@@ -555,18 +529,19 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 		       desc_bytes(desc), 1);
 		       desc_bytes(desc), 1);
 #endif
 #endif
 
 
+skip_enc:
 	/*
 	/*
 	 * Job Descriptor and Shared Descriptors
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
 	 */
 	keys_fit_inline = false;
 	keys_fit_inline = false;
-	if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
+	if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
 	    ctx->split_key_pad_len + ctx->enckeylen +
 	    ctx->split_key_pad_len + ctx->enckeylen +
 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
 	    CAAM_DESC_BYTES_MAX)
 	    CAAM_DESC_BYTES_MAX)
 		keys_fit_inline = true;
 		keys_fit_inline = true;
 
 
-	/* old_aead_decrypt shared descriptor */
+	/* aead_decrypt shared descriptor */
 	desc = ctx->sh_desc_dec;
 	desc = ctx->sh_desc_dec;
 
 
 	/* Note: Context registers are saved. */
 	/* Note: Context registers are saved. */
@@ -576,19 +551,17 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	append_operation(desc, ctx->class2_alg_type |
 	append_operation(desc, ctx->class2_alg_type |
 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
 
 
-	/* assoclen + cryptlen = seqinlen - ivsize - authsize */
-	append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
-				ctx->authsize + ivsize);
-	/* assoclen = (assoclen + cryptlen) - cryptlen */
-	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-	append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+	/* Read and write assoclen bytes */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/* Skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
 
 
 	/* read assoc before reading payload */
 	/* read assoc before reading payload */
 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
 			     KEY_VLF);
 			     KEY_VLF);
 
 
-	aead_append_ld_iv(desc, ivsize, ctx1_iv_off);
-
 	/* Load Counter into CONTEXT1 reg */
 	/* Load Counter into CONTEXT1 reg */
 	if (is_rfc3686)
 	if (is_rfc3686)
 		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
 		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
@@ -605,8 +578,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 		append_dec_op1(desc, ctx->class1_alg_type);
 		append_dec_op1(desc, ctx->class1_alg_type);
 
 
 	/* Read and write cryptlen bytes */
 	/* Read and write cryptlen bytes */
-	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
-	append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
 
 
 	/* Load ICV */
 	/* Load ICV */
@@ -626,12 +599,15 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 		       desc_bytes(desc), 1);
 		       desc_bytes(desc), 1);
 #endif
 #endif
 
 
+	if (!alg->caam.geniv)
+		goto skip_givenc;
+
 	/*
 	/*
 	 * Job Descriptor and Shared Descriptors
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
 	 */
 	keys_fit_inline = false;
 	keys_fit_inline = false;
-	if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
+	if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
 	    ctx->split_key_pad_len + ctx->enckeylen +
 	    ctx->split_key_pad_len + ctx->enckeylen +
 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
 	    CAAM_DESC_BYTES_MAX)
 	    CAAM_DESC_BYTES_MAX)
@@ -643,6 +619,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	/* Note: Context registers are saved. */
 	/* Note: Context registers are saved. */
 	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
 	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
 
 
+	if (is_rfc3686)
+		goto copy_iv;
+
 	/* Generate IV */
 	/* Generate IV */
 	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
 	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
 		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
 		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
@@ -656,6 +635,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 		    (ivsize << MOVE_LEN_SHIFT));
 		    (ivsize << MOVE_LEN_SHIFT));
 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
 
 
+copy_iv:
 	/* Copy IV to class 1 context */
 	/* Copy IV to class 1 context */
 	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
 	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
 		    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
 		    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
@@ -668,8 +648,12 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	/* ivsize + cryptlen = seqoutlen - authsize */
 	/* ivsize + cryptlen = seqoutlen - authsize */
 	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
 	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
 
 
-	/* assoclen = seqinlen - (ivsize + cryptlen) */
-	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+	/* Read and write assoclen bytes */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/* Skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
 
 
 	/* read assoc before reading payload */
 	/* read assoc before reading payload */
 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
@@ -710,9 +694,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
 	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
 			 LDST_SRCDST_BYTE_CONTEXT);
 			 LDST_SRCDST_BYTE_CONTEXT);
 
 
-	ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
-						 desc_bytes(desc),
-						 DMA_TO_DEVICE);
+	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+					      desc_bytes(desc),
+					      DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
 	if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
 		dev_err(jrdev, "unable to map shared descriptor\n");
 		dev_err(jrdev, "unable to map shared descriptor\n");
 		return -ENOMEM;
 		return -ENOMEM;
@@ -723,6 +707,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 		       desc_bytes(desc), 1);
 		       desc_bytes(desc), 1);
 #endif
 #endif
 
 
+skip_givenc:
 	return 0;
 	return 0;
 }
 }
 
 
@@ -976,22 +961,28 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
 	append_operation(desc, ctx->class1_alg_type |
 	append_operation(desc, ctx->class1_alg_type |
 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
 
 
-	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+	append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
 
 
-	/* Skip assoc data */
-	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
 	/* Read assoc data */
 	/* Read assoc data */
 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
 
 
-	/* cryptlen = seqoutlen - assoclen */
-	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+	/* Skip IV */
+	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
 
 
 	/* Will read cryptlen bytes */
 	/* Will read cryptlen bytes */
 	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
 	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
 
 
+	/* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+	/* Skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+	/* cryptlen = seqoutlen - assoclen */
+	append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
+
 	/* Write encrypted data */
 	/* Write encrypted data */
 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
 
 
@@ -1044,21 +1035,27 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
 	append_operation(desc, ctx->class1_alg_type |
 	append_operation(desc, ctx->class1_alg_type |
 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
 
 
-	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+	append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
 
 
-	/* Skip assoc data */
-	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
 	/* Read assoc data */
 	/* Read assoc data */
 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
 
 
-	/* Will write cryptlen bytes */
-	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+	/* Skip IV */
+	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
 
 
 	/* Will read cryptlen bytes */
 	/* Will read cryptlen bytes */
-	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
+
+	/* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+	/* Skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+	/* Will write cryptlen bytes */
+	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
 
 
 	/* Store payload data */
 	/* Store payload data */
 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
@@ -1793,22 +1790,6 @@ static void aead_unmap(struct device *dev,
 		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
 		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
 }
 }
 
 
-static void old_aead_unmap(struct device *dev,
-			   struct aead_edesc *edesc,
-			   struct aead_request *req)
-{
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	int ivsize = crypto_aead_ivsize(aead);
-
-	dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
-			     DMA_TO_DEVICE, edesc->assoc_chained);
-
-	caam_unmap(dev, req->src, req->dst,
-		   edesc->src_nents, edesc->src_chained, edesc->dst_nents,
-		   edesc->dst_chained, edesc->iv_dma, ivsize,
-		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
-}
-
 static void ablkcipher_unmap(struct device *dev,
 static void ablkcipher_unmap(struct device *dev,
 			     struct ablkcipher_edesc *edesc,
 			     struct ablkcipher_edesc *edesc,
 			     struct ablkcipher_request *req)
 			     struct ablkcipher_request *req)
@@ -1844,45 +1825,6 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
 	aead_request_complete(req, err);
 	aead_request_complete(req, err);
 }
 }
 
 
-static void old_aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
-				  void *context)
-{
-	struct aead_request *req = context;
-	struct aead_edesc *edesc;
-#ifdef DEBUG
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
-	int ivsize = crypto_aead_ivsize(aead);
-
-	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
-
-	edesc = (struct aead_edesc *)((char *)desc -
-		 offsetof(struct aead_edesc, hw_desc));
-
-	if (err)
-		caam_jr_strstatus(jrdev, err);
-
-	old_aead_unmap(jrdev, edesc, req);
-
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
-		       req->assoclen , 1);
-	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
-		       edesc->src_nents ? 100 : ivsize, 1);
-	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
-		       edesc->src_nents ? 100 : req->cryptlen +
-		       ctx->authsize + 4, 1);
-#endif
-
-	kfree(edesc);
-
-	aead_request_complete(req, err);
-}
-
 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
 				   void *context)
 				   void *context)
 {
 {
@@ -1911,62 +1853,6 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
 	aead_request_complete(req, err);
 	aead_request_complete(req, err);
 }
 }
 
 
-static void old_aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
-				  void *context)
-{
-	struct aead_request *req = context;
-	struct aead_edesc *edesc;
-#ifdef DEBUG
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
-	int ivsize = crypto_aead_ivsize(aead);
-
-	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
-
-	edesc = (struct aead_edesc *)((char *)desc -
-		 offsetof(struct aead_edesc, hw_desc));
-
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
-		       ivsize, 1);
-	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
-		       req->cryptlen - ctx->authsize, 1);
-#endif
-
-	if (err)
-		caam_jr_strstatus(jrdev, err);
-
-	old_aead_unmap(jrdev, edesc, req);
-
-	/*
-	 * verify hw auth check passed else return -EBADMSG
-	 */
-	if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
-		err = -EBADMSG;
-
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4,
-		       ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
-		       sizeof(struct iphdr) + req->assoclen +
-		       ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
-		       ctx->authsize + 36, 1);
-	if (!err && edesc->sec4_sg_bytes) {
-		struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
-		print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
-			       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
-			sg->length + ctx->authsize + 16, 1);
-	}
-#endif
-
-	kfree(edesc);
-
-	aead_request_complete(req, err);
-}
-
 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
 				   void *context)
 				   void *context)
 {
 {
@@ -2032,91 +1918,6 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
 	ablkcipher_request_complete(req, err);
 	ablkcipher_request_complete(req, err);
 }
 }
 
 
-/*
- * Fill in aead job descriptor
- */
-static void old_init_aead_job(u32 *sh_desc, dma_addr_t ptr,
-			      struct aead_edesc *edesc,
-			      struct aead_request *req,
-			      bool all_contig, bool encrypt)
-{
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
-	int ivsize = crypto_aead_ivsize(aead);
-	int authsize = ctx->authsize;
-	u32 *desc = edesc->hw_desc;
-	u32 out_options = 0, in_options;
-	dma_addr_t dst_dma, src_dma;
-	int len, sec4_sg_index = 0;
-	bool is_gcm = false;
-
-#ifdef DEBUG
-	debug("assoclen %d cryptlen %d authsize %d\n",
-	      req->assoclen, req->cryptlen, authsize);
-	print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
-		       req->assoclen , 1);
-	print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
-		       edesc->src_nents ? 100 : ivsize, 1);
-	print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
-			edesc->src_nents ? 100 : req->cryptlen, 1);
-	print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
-		       desc_bytes(sh_desc), 1);
-#endif
-
-	if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
-	      OP_ALG_ALGSEL_AES) &&
-	    ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
-		is_gcm = true;
-
-	len = desc_len(sh_desc);
-	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
-
-	if (all_contig) {
-		if (is_gcm)
-			src_dma = edesc->iv_dma;
-		else
-			src_dma = sg_dma_address(req->assoc);
-		in_options = 0;
-	} else {
-		src_dma = edesc->sec4_sg_dma;
-		sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
-				 (edesc->src_nents ? : 1);
-		in_options = LDST_SGF;
-	}
-
-	append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
-			  in_options);
-
-	if (likely(req->src == req->dst)) {
-		if (all_contig) {
-			dst_dma = sg_dma_address(req->src);
-		} else {
-			dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
-				  ((edesc->assoc_nents ? : 1) + 1);
-			out_options = LDST_SGF;
-		}
-	} else {
-		if (!edesc->dst_nents) {
-			dst_dma = sg_dma_address(req->dst);
-		} else {
-			dst_dma = edesc->sec4_sg_dma +
-				  sec4_sg_index *
-				  sizeof(struct sec4_sg_entry);
-			out_options = LDST_SGF;
-		}
-	}
-	if (encrypt)
-		append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
-				   out_options);
-	else
-		append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
-				   out_options);
-}
-
 /*
 /*
  * Fill in aead job descriptor
  * Fill in aead job descriptor
  */
  */
@@ -2208,80 +2009,43 @@ static void init_gcm_job(struct aead_request *req,
 	/* End of blank commands */
 	/* End of blank commands */
 }
 }
 
 
-/*
- * Fill in aead givencrypt job descriptor
- */
-static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
-			      struct aead_edesc *edesc,
-			      struct aead_request *req,
-			      int contig)
+static void init_authenc_job(struct aead_request *req,
+			     struct aead_edesc *edesc,
+			     bool all_contig, bool encrypt)
 {
 {
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+						 struct caam_aead_alg, aead);
+	unsigned int ivsize = crypto_aead_ivsize(aead);
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
-	int ivsize = crypto_aead_ivsize(aead);
-	int authsize = ctx->authsize;
+	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+			       OP_ALG_AAI_CTR_MOD128);
+	const bool is_rfc3686 = alg->caam.rfc3686;
 	u32 *desc = edesc->hw_desc;
 	u32 *desc = edesc->hw_desc;
-	u32 out_options = 0, in_options;
-	dma_addr_t dst_dma, src_dma;
-	int len, sec4_sg_index = 0;
-	bool is_gcm = false;
+	u32 ivoffset = 0;
 
 
-#ifdef DEBUG
-	debug("assoclen %d cryptlen %d authsize %d\n",
-	      req->assoclen, req->cryptlen, authsize);
-	print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
-		       req->assoclen , 1);
-	print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
-	print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
-			edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
-	print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
-		       desc_bytes(sh_desc), 1);
-#endif
+	/*
+	 * AES-CTR needs to load IV in CONTEXT1 reg
+	 * at an offset of 128bits (16bytes)
+	 * CONTEXT1[255:128] = IV
+	 */
+	if (ctr_mode)
+		ivoffset = 16;
 
 
-	if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
-	      OP_ALG_ALGSEL_AES) &&
-	    ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
-		is_gcm = true;
+	/*
+	 * RFC3686 specific:
+	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+	 */
+	if (is_rfc3686)
+		ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
 
 
-	len = desc_len(sh_desc);
-	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+	init_aead_job(req, edesc, all_contig, encrypt);
 
 
-	if (contig & GIV_SRC_CONTIG) {
-		if (is_gcm)
-			src_dma = edesc->iv_dma;
-		else
-			src_dma = sg_dma_address(req->assoc);
-		in_options = 0;
-	} else {
-		src_dma = edesc->sec4_sg_dma;
-		sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
-		in_options = LDST_SGF;
-	}
-	append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
-			  in_options);
-
-	if (contig & GIV_DST_CONTIG) {
-		dst_dma = edesc->iv_dma;
-	} else {
-		if (likely(req->src == req->dst)) {
-			dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
-				  (edesc->assoc_nents +
-				   (is_gcm ? 1 + edesc->src_nents : 0));
-			out_options = LDST_SGF;
-		} else {
-			dst_dma = edesc->sec4_sg_dma +
-				  sec4_sg_index *
-				  sizeof(struct sec4_sg_entry);
-			out_options = LDST_SGF;
-		}
-	}
-
-	append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
-			   out_options);
+	if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt)))
+		append_load_as_imm(desc, req->iv, ivsize,
+				   LDST_CLASS_1_CCB |
+				   LDST_SRCDST_BYTE_CONTEXT |
+				   (ivoffset << LDST_OFFSET_SHIFT));
 }
 }
 
 
 /*
 /*
@@ -2389,150 +2153,6 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
 	append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
 	append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
 }
 }
 
 
-/*
- * allocate and map the aead extended descriptor
- */
-static struct aead_edesc *old_aead_edesc_alloc(struct aead_request *req,
-					       int desc_bytes,
-					       bool *all_contig_ptr,
-					       bool encrypt)
-{
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
-	struct device *jrdev = ctx->jrdev;
-	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
-		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
-	int assoc_nents, src_nents, dst_nents = 0;
-	struct aead_edesc *edesc;
-	dma_addr_t iv_dma = 0;
-	int sgc;
-	bool all_contig = true;
-	bool assoc_chained = false, src_chained = false, dst_chained = false;
-	int ivsize = crypto_aead_ivsize(aead);
-	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
-	unsigned int authsize = ctx->authsize;
-	bool is_gcm = false;
-
-	assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
-
-	if (unlikely(req->dst != req->src)) {
-		src_nents = sg_count(req->src, req->cryptlen, &src_chained);
-		dst_nents = sg_count(req->dst,
-				     req->cryptlen +
-					(encrypt ? authsize : (-authsize)),
-				     &dst_chained);
-	} else {
-		src_nents = sg_count(req->src,
-				     req->cryptlen +
-					(encrypt ? authsize : 0),
-				     &src_chained);
-	}
-
-	sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
-				 DMA_TO_DEVICE, assoc_chained);
-	if (likely(req->src == req->dst)) {
-		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
-					 DMA_BIDIRECTIONAL, src_chained);
-	} else {
-		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
-					 DMA_TO_DEVICE, src_chained);
-		sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
-					 DMA_FROM_DEVICE, dst_chained);
-	}
-
-	iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
-	if (dma_mapping_error(jrdev, iv_dma)) {
-		dev_err(jrdev, "unable to map IV\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
-	      OP_ALG_ALGSEL_AES) &&
-	    ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
-		is_gcm = true;
-
-	/*
-	 * Check if data are contiguous.
-	 * GCM expected input sequence: IV, AAD, text
-	 * All other - expected input sequence: AAD, IV, text
-	 */
-	if (is_gcm)
-		all_contig = (!assoc_nents &&
-			      iv_dma + ivsize == sg_dma_address(req->assoc) &&
-			      !src_nents && sg_dma_address(req->assoc) +
-			      req->assoclen == sg_dma_address(req->src));
-	else
-		all_contig = (!assoc_nents && sg_dma_address(req->assoc) +
-			      req->assoclen == iv_dma && !src_nents &&
-			      iv_dma + ivsize == sg_dma_address(req->src));
-	if (!all_contig) {
-		assoc_nents = assoc_nents ? : 1;
-		src_nents = src_nents ? : 1;
-		sec4_sg_len = assoc_nents + 1 + src_nents;
-	}
-
-	sec4_sg_len += dst_nents;
-
-	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
-
-	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
-			sec4_sg_bytes, GFP_DMA | flags);
-	if (!edesc) {
-		dev_err(jrdev, "could not allocate extended descriptor\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	edesc->assoc_nents = assoc_nents;
-	edesc->assoc_chained = assoc_chained;
-	edesc->src_nents = src_nents;
-	edesc->src_chained = src_chained;
-	edesc->dst_nents = dst_nents;
-	edesc->dst_chained = dst_chained;
-	edesc->iv_dma = iv_dma;
-	edesc->sec4_sg_bytes = sec4_sg_bytes;
-	edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
-			 desc_bytes;
-	*all_contig_ptr = all_contig;
-
-	sec4_sg_index = 0;
-	if (!all_contig) {
-		if (!is_gcm) {
-			sg_to_sec4_sg_len(req->assoc, req->assoclen,
-					  edesc->sec4_sg + sec4_sg_index);
-			sec4_sg_index += assoc_nents;
-		}
-
-		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
-				   iv_dma, ivsize, 0);
-		sec4_sg_index += 1;
-
-		if (is_gcm) {
-			sg_to_sec4_sg_len(req->assoc, req->assoclen,
-					  edesc->sec4_sg + sec4_sg_index);
-			sec4_sg_index += assoc_nents;
-		}
-
-		sg_to_sec4_sg_last(req->src,
-				   src_nents,
-				   edesc->sec4_sg +
-				   sec4_sg_index, 0);
-		sec4_sg_index += src_nents;
-	}
-	if (dst_nents) {
-		sg_to_sec4_sg_last(req->dst, dst_nents,
-				   edesc->sec4_sg + sec4_sg_index, 0);
-	}
-	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-					    sec4_sg_bytes, DMA_TO_DEVICE);
-	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
-		dev_err(jrdev, "unable to map S/G table\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	return edesc;
-}
-
 /*
 /*
  * allocate and map the aead extended descriptor
  * allocate and map the aead extended descriptor
  */
  */
@@ -2579,8 +2199,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
 
 
 	/* allocate space for base edesc and hw desc commands, link tables */
 	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kzalloc(sizeof(struct aead_edesc) + desc_bytes +
-			sec4_sg_bytes, GFP_DMA | flags);
+	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+			GFP_DMA | flags);
 	if (!edesc) {
 	if (!edesc) {
 		dev_err(jrdev, "could not allocate extended descriptor\n");
 		dev_err(jrdev, "could not allocate extended descriptor\n");
 		return ERR_PTR(-ENOMEM);
 		return ERR_PTR(-ENOMEM);
@@ -2685,7 +2305,15 @@ static int gcm_encrypt(struct aead_request *req)
 	return ret;
 	return ret;
 }
 }
 
 
-static int old_aead_encrypt(struct aead_request *req)
+static int ipsec_gcm_encrypt(struct aead_request *req)
+{
+	if (req->assoclen < 8)
+		return -EINVAL;
+
+	return gcm_encrypt(req);
+}
+
+static int aead_encrypt(struct aead_request *req)
 {
 {
 	struct aead_edesc *edesc;
 	struct aead_edesc *edesc;
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
@@ -2696,14 +2324,13 @@ static int old_aead_encrypt(struct aead_request *req)
 	int ret = 0;
 	int ret = 0;
 
 
 	/* allocate extended descriptor */
 	/* allocate extended descriptor */
-	edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN *
-				     CAAM_CMD_SZ, &all_contig, true);
+	edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
+				 &all_contig, true);
 	if (IS_ERR(edesc))
 	if (IS_ERR(edesc))
 		return PTR_ERR(edesc);
 		return PTR_ERR(edesc);
 
 
 	/* Create and submit job descriptor */
 	/* Create and submit job descriptor */
-	old_init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
-			  all_contig, true);
+	init_authenc_job(req, edesc, all_contig, true);
 #ifdef DEBUG
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
@@ -2711,11 +2338,11 @@ static int old_aead_encrypt(struct aead_request *req)
 #endif
 #endif
 
 
 	desc = edesc->hw_desc;
 	desc = edesc->hw_desc;
-	ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req);
+	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
 	if (!ret) {
 	if (!ret) {
 		ret = -EINPROGRESS;
 		ret = -EINPROGRESS;
 	} else {
 	} else {
-		old_aead_unmap(jrdev, edesc, req);
+		aead_unmap(jrdev, edesc, req);
 		kfree(edesc);
 		kfree(edesc);
 	}
 	}
 
 
@@ -2757,7 +2384,15 @@ static int gcm_decrypt(struct aead_request *req)
 	return ret;
 	return ret;
 }
 }
 
 
-static int old_aead_decrypt(struct aead_request *req)
+static int ipsec_gcm_decrypt(struct aead_request *req)
+{
+	if (req->assoclen < 8)
+		return -EINVAL;
+
+	return gcm_decrypt(req);
+}
+
+static int aead_decrypt(struct aead_request *req)
 {
 {
 	struct aead_edesc *edesc;
 	struct aead_edesc *edesc;
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
@@ -2768,20 +2403,19 @@ static int old_aead_decrypt(struct aead_request *req)
 	int ret = 0;
 	int ret = 0;
 
 
 	/* allocate extended descriptor */
 	/* allocate extended descriptor */
-	edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN *
-				     CAAM_CMD_SZ, &all_contig, false);
+	edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
+				 &all_contig, false);
 	if (IS_ERR(edesc))
 	if (IS_ERR(edesc))
 		return PTR_ERR(edesc);
 		return PTR_ERR(edesc);
 
 
 #ifdef DEBUG
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
 	print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
-		       req->cryptlen, 1);
+		       req->assoclen + req->cryptlen, 1);
 #endif
 #endif
 
 
 	/* Create and submit job descriptor*/
 	/* Create and submit job descriptor*/
-	old_init_aead_job(ctx->sh_desc_dec,
-			  ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
+	init_authenc_job(req, edesc, all_contig, false);
 #ifdef DEBUG
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
@@ -2789,49 +2423,58 @@ static int old_aead_decrypt(struct aead_request *req)
 #endif
 #endif
 
 
 	desc = edesc->hw_desc;
 	desc = edesc->hw_desc;
-	ret = caam_jr_enqueue(jrdev, desc, old_aead_decrypt_done, req);
+	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
 	if (!ret) {
 	if (!ret) {
 		ret = -EINPROGRESS;
 		ret = -EINPROGRESS;
 	} else {
 	} else {
-		old_aead_unmap(jrdev, edesc, req);
+		aead_unmap(jrdev, edesc, req);
 		kfree(edesc);
 		kfree(edesc);
 	}
 	}
 
 
 	return ret;
 	return ret;
 }
 }
 
 
+static int aead_givdecrypt(struct aead_request *req)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	unsigned int ivsize = crypto_aead_ivsize(aead);
+
+	if (req->cryptlen < ivsize)
+		return -EINVAL;
+
+	req->cryptlen -= ivsize;
+	req->assoclen += ivsize;
+
+	return aead_decrypt(req);
+}
+
 /*
 /*
- * allocate and map the aead extended descriptor for aead givencrypt
+ * allocate and map the ablkcipher extended descriptor for ablkcipher
  */
  */
-static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
-					       *greq, int desc_bytes,
-					       u32 *contig_ptr)
+static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+						       *req, int desc_bytes,
+						       bool *iv_contig_out)
 {
 {
-	struct aead_request *req = &greq->areq;
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
 	struct device *jrdev = ctx->jrdev;
 	struct device *jrdev = ctx->jrdev;
 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
 	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
-		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
-	int assoc_nents, src_nents, dst_nents = 0;
-	struct aead_edesc *edesc;
+					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int src_nents, dst_nents = 0, sec4_sg_bytes;
+	struct ablkcipher_edesc *edesc;
 	dma_addr_t iv_dma = 0;
 	dma_addr_t iv_dma = 0;
+	bool iv_contig = false;
 	int sgc;
 	int sgc;
-	u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
-	int ivsize = crypto_aead_ivsize(aead);
-	bool assoc_chained = false, src_chained = false, dst_chained = false;
-	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
-	bool is_gcm = false;
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	bool src_chained = false, dst_chained = false;
+	int sec4_sg_index;
 
 
-	assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
-	src_nents = sg_count(req->src, req->cryptlen, &src_chained);
+	src_nents = sg_count(req->src, req->nbytes, &src_chained);
 
 
-	if (unlikely(req->dst != req->src))
-		dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
-				     &dst_chained);
+	if (req->dst != req->src)
+		dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
 
 
-	sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
-				 DMA_TO_DEVICE, assoc_chained);
 	if (likely(req->src == req->dst)) {
 	if (likely(req->src == req->dst)) {
 		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
 		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
 					 DMA_BIDIRECTIONAL, src_chained);
 					 DMA_BIDIRECTIONAL, src_chained);
@@ -2842,121 +2485,52 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
 					 DMA_FROM_DEVICE, dst_chained);
 					 DMA_FROM_DEVICE, dst_chained);
 	}
 	}
 
 
-	iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
+	iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, iv_dma)) {
 	if (dma_mapping_error(jrdev, iv_dma)) {
 		dev_err(jrdev, "unable to map IV\n");
 		dev_err(jrdev, "unable to map IV\n");
 		return ERR_PTR(-ENOMEM);
 		return ERR_PTR(-ENOMEM);
 	}
 	}
 
 
-	if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
-	      OP_ALG_ALGSEL_AES) &&
-	    ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
-		is_gcm = true;
-
 	/*
 	/*
-	 * Check if data are contiguous.
-	 * GCM expected input sequence: IV, AAD, text
-	 * All other - expected input sequence: AAD, IV, text
+	 * Check if iv can be contiguous with source and destination.
+	 * If so, include it. If not, create scatterlist.
 	 */
 	 */
-
-	if (is_gcm) {
-		if (assoc_nents || iv_dma + ivsize !=
-		    sg_dma_address(req->assoc) || src_nents ||
-		    sg_dma_address(req->assoc) + req->assoclen !=
-		    sg_dma_address(req->src))
-			contig &= ~GIV_SRC_CONTIG;
-	} else {
-		if (assoc_nents ||
-		    sg_dma_address(req->assoc) + req->assoclen != iv_dma ||
-		    src_nents || iv_dma + ivsize != sg_dma_address(req->src))
-			contig &= ~GIV_SRC_CONTIG;
-	}
-
-	if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
-		contig &= ~GIV_DST_CONTIG;
-
-	if (!(contig & GIV_SRC_CONTIG)) {
-		assoc_nents = assoc_nents ? : 1;
+	if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
+		iv_contig = true;
+	else
 		src_nents = src_nents ? : 1;
 		src_nents = src_nents ? : 1;
-		sec4_sg_len += assoc_nents + 1 + src_nents;
-		if (req->src == req->dst &&
-		    (src_nents || iv_dma + ivsize != sg_dma_address(req->src)))
-			contig &= ~GIV_DST_CONTIG;
-	}
-
-	/*
-	 * Add new sg entries for GCM output sequence.
-	 * Expected output sequence: IV, encrypted text.
-	 */
-	if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG))
-		sec4_sg_len += 1 + src_nents;
-
-	if (unlikely(req->src != req->dst)) {
-		dst_nents = dst_nents ? : 1;
-		sec4_sg_len += 1 + dst_nents;
-	}
-
-	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
+	sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
+			sizeof(struct sec4_sg_entry);
 
 
 	/* allocate space for base edesc and hw desc commands, link tables */
 	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
-			sec4_sg_bytes, GFP_DMA | flags);
+	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+			GFP_DMA | flags);
 	if (!edesc) {
 	if (!edesc) {
 		dev_err(jrdev, "could not allocate extended descriptor\n");
 		dev_err(jrdev, "could not allocate extended descriptor\n");
 		return ERR_PTR(-ENOMEM);
 		return ERR_PTR(-ENOMEM);
 	}
 	}
 
 
-	edesc->assoc_nents = assoc_nents;
-	edesc->assoc_chained = assoc_chained;
 	edesc->src_nents = src_nents;
 	edesc->src_nents = src_nents;
 	edesc->src_chained = src_chained;
 	edesc->src_chained = src_chained;
 	edesc->dst_nents = dst_nents;
 	edesc->dst_nents = dst_nents;
 	edesc->dst_chained = dst_chained;
 	edesc->dst_chained = dst_chained;
-	edesc->iv_dma = iv_dma;
 	edesc->sec4_sg_bytes = sec4_sg_bytes;
 	edesc->sec4_sg_bytes = sec4_sg_bytes;
-	edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
+	edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
 			 desc_bytes;
 			 desc_bytes;
-	*contig_ptr = contig;
 
 
 	sec4_sg_index = 0;
 	sec4_sg_index = 0;
-	if (!(contig & GIV_SRC_CONTIG)) {
-		if (!is_gcm) {
-			sg_to_sec4_sg_len(req->assoc, req->assoclen,
-					  edesc->sec4_sg + sec4_sg_index);
-			sec4_sg_index += assoc_nents;
-		}
-
-		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
-				   iv_dma, ivsize, 0);
-		sec4_sg_index += 1;
-
-		if (is_gcm) {
-			sg_to_sec4_sg_len(req->assoc, req->assoclen,
-					  edesc->sec4_sg + sec4_sg_index);
-			sec4_sg_index += assoc_nents;
-		}
-
-		sg_to_sec4_sg_last(req->src, src_nents,
-				   edesc->sec4_sg +
-				   sec4_sg_index, 0);
-		sec4_sg_index += src_nents;
-	}
-
-	if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) {
-		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
-				   iv_dma, ivsize, 0);
-		sec4_sg_index += 1;
+	if (!iv_contig) {
+		dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
 		sg_to_sec4_sg_last(req->src, src_nents,
 		sg_to_sec4_sg_last(req->src, src_nents,
-				   edesc->sec4_sg + sec4_sg_index, 0);
+				   edesc->sec4_sg + 1, 0);
+		sec4_sg_index += 1 + src_nents;
 	}
 	}
 
 
-	if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
-		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
-				   iv_dma, ivsize, 0);
-		sec4_sg_index += 1;
+	if (dst_nents) {
 		sg_to_sec4_sg_last(req->dst, dst_nents,
 		sg_to_sec4_sg_last(req->dst, dst_nents,
-				   edesc->sec4_sg + sec4_sg_index, 0);
+			edesc->sec4_sg + sec4_sg_index, 0);
 	}
 	}
+
 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
 					    sec4_sg_bytes, DMA_TO_DEVICE);
 					    sec4_sg_bytes, DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
@@ -2964,201 +2538,58 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
 		return ERR_PTR(-ENOMEM);
 		return ERR_PTR(-ENOMEM);
 	}
 	}
 
 
+	edesc->iv_dma = iv_dma;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
+		       sec4_sg_bytes, 1);
+#endif
+
+	*iv_contig_out = iv_contig;
 	return edesc;
 	return edesc;
 }
 }
 
 
-static int old_aead_givencrypt(struct aead_givcrypt_request *areq)
+static int ablkcipher_encrypt(struct ablkcipher_request *req)
 {
 {
-	struct aead_request *req = &areq->areq;
-	struct aead_edesc *edesc;
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct ablkcipher_edesc *edesc;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
 	struct device *jrdev = ctx->jrdev;
 	struct device *jrdev = ctx->jrdev;
-	u32 contig;
+	bool iv_contig;
 	u32 *desc;
 	u32 *desc;
 	int ret = 0;
 	int ret = 0;
 
 
 	/* allocate extended descriptor */
 	/* allocate extended descriptor */
-	edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
-				     CAAM_CMD_SZ, &contig);
-
+	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
+				       CAAM_CMD_SZ, &iv_contig);
 	if (IS_ERR(edesc))
 	if (IS_ERR(edesc))
 		return PTR_ERR(edesc);
 		return PTR_ERR(edesc);
 
 
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
-		       req->cryptlen, 1);
-#endif
-
 	/* Create and submit job descriptor*/
 	/* Create and submit job descriptor*/
-	init_aead_giv_job(ctx->sh_desc_givenc,
-			  ctx->sh_desc_givenc_dma, edesc, req, contig);
+	init_ablkcipher_job(ctx->sh_desc_enc,
+		ctx->sh_desc_enc_dma, edesc, req, iv_contig);
 #ifdef DEBUG
 #ifdef DEBUG
-	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
+	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
 		       desc_bytes(edesc->hw_desc), 1);
 		       desc_bytes(edesc->hw_desc), 1);
 #endif
 #endif
-
 	desc = edesc->hw_desc;
 	desc = edesc->hw_desc;
-	ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req);
+	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
+
 	if (!ret) {
 	if (!ret) {
 		ret = -EINPROGRESS;
 		ret = -EINPROGRESS;
 	} else {
 	} else {
-		old_aead_unmap(jrdev, edesc, req);
+		ablkcipher_unmap(jrdev, edesc, req);
 		kfree(edesc);
 		kfree(edesc);
 	}
 	}
 
 
 	return ret;
 	return ret;
 }
 }
 
 
-static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
-{
-	return old_aead_encrypt(&areq->areq);
-}
-
-/*
- * allocate and map the ablkcipher extended descriptor for ablkcipher
- */
-static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
-						       *req, int desc_bytes,
-						       bool *iv_contig_out)
+static int ablkcipher_decrypt(struct ablkcipher_request *req)
 {
 {
-	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
-	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
-	struct device *jrdev = ctx->jrdev;
-	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
-					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
-		       GFP_KERNEL : GFP_ATOMIC;
-	int src_nents, dst_nents = 0, sec4_sg_bytes;
-	struct ablkcipher_edesc *edesc;
-	dma_addr_t iv_dma = 0;
-	bool iv_contig = false;
-	int sgc;
-	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
-	bool src_chained = false, dst_chained = false;
-	int sec4_sg_index;
-
-	src_nents = sg_count(req->src, req->nbytes, &src_chained);
-
-	if (req->dst != req->src)
-		dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
-
-	if (likely(req->src == req->dst)) {
-		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
-					 DMA_BIDIRECTIONAL, src_chained);
-	} else {
-		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
-					 DMA_TO_DEVICE, src_chained);
-		sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
-					 DMA_FROM_DEVICE, dst_chained);
-	}
-
-	iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
-	if (dma_mapping_error(jrdev, iv_dma)) {
-		dev_err(jrdev, "unable to map IV\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	/*
-	 * Check if iv can be contiguous with source and destination.
-	 * If so, include it. If not, create scatterlist.
-	 */
-	if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
-		iv_contig = true;
-	else
-		src_nents = src_nents ? : 1;
-	sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
-			sizeof(struct sec4_sg_entry);
-
-	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
-			sec4_sg_bytes, GFP_DMA | flags);
-	if (!edesc) {
-		dev_err(jrdev, "could not allocate extended descriptor\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	edesc->src_nents = src_nents;
-	edesc->src_chained = src_chained;
-	edesc->dst_nents = dst_nents;
-	edesc->dst_chained = dst_chained;
-	edesc->sec4_sg_bytes = sec4_sg_bytes;
-	edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
-			 desc_bytes;
-
-	sec4_sg_index = 0;
-	if (!iv_contig) {
-		dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
-		sg_to_sec4_sg_last(req->src, src_nents,
-				   edesc->sec4_sg + 1, 0);
-		sec4_sg_index += 1 + src_nents;
-	}
-
-	if (dst_nents) {
-		sg_to_sec4_sg_last(req->dst, dst_nents,
-			edesc->sec4_sg + sec4_sg_index, 0);
-	}
-
-	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-					    sec4_sg_bytes, DMA_TO_DEVICE);
-	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
-		dev_err(jrdev, "unable to map S/G table\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	edesc->iv_dma = iv_dma;
-
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
-		       sec4_sg_bytes, 1);
-#endif
-
-	*iv_contig_out = iv_contig;
-	return edesc;
-}
-
-static int ablkcipher_encrypt(struct ablkcipher_request *req)
-{
-	struct ablkcipher_edesc *edesc;
-	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
-	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
-	struct device *jrdev = ctx->jrdev;
-	bool iv_contig;
-	u32 *desc;
-	int ret = 0;
-
-	/* allocate extended descriptor */
-	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
-				       CAAM_CMD_SZ, &iv_contig);
-	if (IS_ERR(edesc))
-		return PTR_ERR(edesc);
-
-	/* Create and submit job descriptor*/
-	init_ablkcipher_job(ctx->sh_desc_enc,
-		ctx->sh_desc_enc_dma, edesc, req, iv_contig);
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
-		       desc_bytes(edesc->hw_desc), 1);
-#endif
-	desc = edesc->hw_desc;
-	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
-
-	if (!ret) {
-		ret = -EINPROGRESS;
-	} else {
-		ablkcipher_unmap(jrdev, edesc, req);
-		kfree(edesc);
-	}
-
-	return ret;
-}
-
-static int ablkcipher_decrypt(struct ablkcipher_request *req)
-{
-	struct ablkcipher_edesc *edesc;
+	struct ablkcipher_edesc *edesc;
 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
 	struct device *jrdev = ctx->jrdev;
 	struct device *jrdev = ctx->jrdev;
@@ -3251,8 +2682,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
 			sizeof(struct sec4_sg_entry);
 			sizeof(struct sec4_sg_entry);
 
 
 	/* allocate space for base edesc and hw desc commands, link tables */
 	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kmalloc(sizeof(*edesc) + desc_bytes +
-			sec4_sg_bytes, GFP_DMA | flags);
+	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+			GFP_DMA | flags);
 	if (!edesc) {
 	if (!edesc) {
 		dev_err(jrdev, "could not allocate extended descriptor\n");
 		dev_err(jrdev, "could not allocate extended descriptor\n");
 		return ERR_PTR(-ENOMEM);
 		return ERR_PTR(-ENOMEM);
@@ -3347,7 +2778,6 @@ struct caam_alg_template {
 	u32 type;
 	u32 type;
 	union {
 	union {
 		struct ablkcipher_alg ablkcipher;
 		struct ablkcipher_alg ablkcipher;
-		struct old_aead_alg aead;
 	} template_u;
 	} template_u;
 	u32 class1_alg_type;
 	u32 class1_alg_type;
 	u32 class2_alg_type;
 	u32 class2_alg_type;
@@ -3355,753 +2785,1426 @@ struct caam_alg_template {
 };
 };
 
 
 static struct caam_alg_template driver_algs[] = {
 static struct caam_alg_template driver_algs[] = {
-	/* single-pass ipsec_esp descriptor */
-	{
-		.name = "authenc(hmac(md5),ecb(cipher_null))",
-		.driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
-		.blocksize = NULL_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = aead_null_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = NULL_IV_SIZE,
-			.maxauthsize = MD5_DIGEST_SIZE,
-			},
-		.class1_alg_type = 0,
-		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
-	},
+	/* ablkcipher descriptor */
 	{
 	{
-		.name = "authenc(hmac(sha1),ecb(cipher_null))",
-		.driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
-		.blocksize = NULL_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = aead_null_givencrypt,
+		.name = "cbc(aes)",
+		.driver_name = "cbc-aes-caam",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.givencrypt = ablkcipher_givencrypt,
 			.geniv = "<built-in>",
 			.geniv = "<built-in>",
-			.ivsize = NULL_IV_SIZE,
-			.maxauthsize = SHA1_DIGEST_SIZE,
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
 			},
 			},
-		.class1_alg_type = 0,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(sha224),ecb(cipher_null))",
-		.driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
-		.blocksize = NULL_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = aead_null_givencrypt,
+		.name = "cbc(des3_ede)",
+		.driver_name = "cbc-3des-caam",
+		.blocksize = DES3_EDE_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.givencrypt = ablkcipher_givencrypt,
 			.geniv = "<built-in>",
 			.geniv = "<built-in>",
-			.ivsize = NULL_IV_SIZE,
-			.maxauthsize = SHA224_DIGEST_SIZE,
+			.min_keysize = DES3_EDE_KEY_SIZE,
+			.max_keysize = DES3_EDE_KEY_SIZE,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
 			},
 			},
-		.class1_alg_type = 0,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(sha256),ecb(cipher_null))",
-		.driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
-		.blocksize = NULL_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = aead_null_givencrypt,
+		.name = "cbc(des)",
+		.driver_name = "cbc-des-caam",
+		.blocksize = DES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.givencrypt = ablkcipher_givencrypt,
 			.geniv = "<built-in>",
 			.geniv = "<built-in>",
-			.ivsize = NULL_IV_SIZE,
-			.maxauthsize = SHA256_DIGEST_SIZE,
+			.min_keysize = DES_KEY_SIZE,
+			.max_keysize = DES_KEY_SIZE,
+			.ivsize = DES_BLOCK_SIZE,
 			},
 			},
-		.class1_alg_type = 0,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(sha384),ecb(cipher_null))",
-		.driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
-		.blocksize = NULL_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = aead_null_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = NULL_IV_SIZE,
-			.maxauthsize = SHA384_DIGEST_SIZE,
+		.name = "ctr(aes)",
+		.driver_name = "ctr-aes-caam",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.geniv = "chainiv",
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
 			},
 			},
-		.class1_alg_type = 0,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(sha512),ecb(cipher_null))",
-		.driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
-		.blocksize = NULL_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = aead_null_givencrypt,
+		.name = "rfc3686(ctr(aes))",
+		.driver_name = "rfc3686-ctr-aes-caam",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.givencrypt = ablkcipher_givencrypt,
 			.geniv = "<built-in>",
 			.geniv = "<built-in>",
-			.ivsize = NULL_IV_SIZE,
-			.maxauthsize = SHA512_DIGEST_SIZE,
+			.min_keysize = AES_MIN_KEY_SIZE +
+				       CTR_RFC3686_NONCE_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE +
+				       CTR_RFC3686_NONCE_SIZE,
+			.ivsize = CTR_RFC3686_IV_SIZE,
 			},
 			},
-		.class1_alg_type = 0,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
-	},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+	}
+};
+
+static struct caam_aead_alg driver_aeads[] = {
 	{
 	{
-		.name = "authenc(hmac(md5),cbc(aes))",
-		.driver_name = "authenc-hmac-md5-cbc-aes-caam",
-		.blocksize = AES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = AES_BLOCK_SIZE,
-			.maxauthsize = MD5_DIGEST_SIZE,
+		.aead = {
+			.base = {
+				.cra_name = "rfc4106(gcm(aes))",
+				.cra_driver_name = "rfc4106-gcm-aes-caam",
+				.cra_blocksize = 1,
 			},
 			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+			.setkey = rfc4106_setkey,
+			.setauthsize = rfc4106_setauthsize,
+			.encrypt = ipsec_gcm_encrypt,
+			.decrypt = ipsec_gcm_decrypt,
+			.ivsize = 8,
+			.maxauthsize = AES_BLOCK_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+		},
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(sha1),cbc(aes))",
-		.driver_name = "authenc-hmac-sha1-cbc-aes-caam",
-		.blocksize = AES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
+		.aead = {
+			.base = {
+				.cra_name = "rfc4543(gcm(aes))",
+				.cra_driver_name = "rfc4543-gcm-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = rfc4543_setkey,
+			.setauthsize = rfc4543_setauthsize,
+			.encrypt = ipsec_gcm_encrypt,
+			.decrypt = ipsec_gcm_decrypt,
+			.ivsize = 8,
+			.maxauthsize = AES_BLOCK_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+		},
+	},
+	/* Galois Counter Mode */
+	{
+		.aead = {
+			.base = {
+				.cra_name = "gcm(aes)",
+				.cra_driver_name = "gcm-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = gcm_setkey,
+			.setauthsize = gcm_setauthsize,
+			.encrypt = gcm_encrypt,
+			.decrypt = gcm_decrypt,
+			.ivsize = 12,
+			.maxauthsize = AES_BLOCK_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+		},
+	},
+	/* single-pass ipsec_esp descriptor */
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),"
+					    "ecb(cipher_null))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "ecb-cipher_null-caam",
+				.cra_blocksize = NULL_BLOCK_SIZE,
+			},
 			.setkey = aead_setkey,
 			.setkey = aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = NULL_IV_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),"
+					    "ecb(cipher_null))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "ecb-cipher_null-caam",
+				.cra_blocksize = NULL_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = NULL_IV_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),"
+					    "ecb(cipher_null))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "ecb-cipher_null-caam",
+				.cra_blocksize = NULL_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = NULL_IV_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),"
+					    "ecb(cipher_null))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "ecb-cipher_null-caam",
+				.cra_blocksize = NULL_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = NULL_IV_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),"
+					    "ecb(cipher_null))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "ecb-cipher_null-caam",
+				.cra_blocksize = NULL_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = NULL_IV_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),"
+					    "ecb(cipher_null))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "ecb-cipher_null-caam",
+				.cra_blocksize = NULL_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = NULL_IV_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(md5),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-hmac-md5-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
 			.ivsize = AES_BLOCK_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
 			.maxauthsize = SHA1_DIGEST_SIZE,
 			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha1),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha1-cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
 			},
 			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(sha224),cbc(aes))",
-		.driver_name = "authenc-hmac-sha224-cbc-aes-caam",
-		.blocksize = AES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
 			.setkey = aead_setkey,
 			.setkey = aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
 			.ivsize = AES_BLOCK_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
 			.maxauthsize = SHA224_DIGEST_SIZE,
 			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha224),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha224-cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
 			},
 			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(sha256),cbc(aes))",
-		.driver_name = "authenc-hmac-sha256-cbc-aes-caam",
-		.blocksize = AES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
 			.setkey = aead_setkey,
 			.setkey = aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
 			.ivsize = AES_BLOCK_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
 			.maxauthsize = SHA256_DIGEST_SIZE,
 			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha256),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha256-cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
 			},
 			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(sha384),cbc(aes))",
-		.driver_name = "authenc-hmac-sha384-cbc-aes-caam",
-		.blocksize = AES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
 			.setkey = aead_setkey,
 			.setkey = aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
 			.ivsize = AES_BLOCK_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
 			.maxauthsize = SHA384_DIGEST_SIZE,
 			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha384),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha384-cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
 			},
 			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
 	},
 	},
-
 	{
 	{
-		.name = "authenc(hmac(sha512),cbc(aes))",
-		.driver_name = "authenc-hmac-sha512-cbc-aes-caam",
-		.blocksize = AES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
 			.setkey = aead_setkey,
 			.setkey = aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
 			.ivsize = AES_BLOCK_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
 			.maxauthsize = SHA512_DIGEST_SIZE,
 			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha512),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha512-cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
 			},
 			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(md5),cbc(des3_ede))",
-		.driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
-		.blocksize = DES3_EDE_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
 			.setkey = aead_setkey,
 			.setkey = aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.maxauthsize = MD5_DIGEST_SIZE,
 			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(md5),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-hmac-md5-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
 			},
-		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		}
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(sha1),cbc(des3_ede))",
-		.driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
-		.blocksize = DES3_EDE_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
 			.setkey = aead_setkey,
 			.setkey = aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.maxauthsize = SHA1_DIGEST_SIZE,
 			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha1),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha1-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
 			},
-		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(sha224),cbc(des3_ede))",
-		.driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
-		.blocksize = DES3_EDE_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
 			.setkey = aead_setkey,
 			.setkey = aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.maxauthsize = SHA224_DIGEST_SIZE,
 			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha224),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha224-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
 			},
-		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(sha256),cbc(des3_ede))",
-		.driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
-		.blocksize = DES3_EDE_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
 			.setkey = aead_setkey,
 			.setkey = aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.maxauthsize = SHA256_DIGEST_SIZE,
 			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha256),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha256-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
 			},
-		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(sha384),cbc(des3_ede))",
-		.driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
-		.blocksize = DES3_EDE_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
 			.setkey = aead_setkey,
 			.setkey = aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.maxauthsize = SHA384_DIGEST_SIZE,
 			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha384),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha384-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
 			},
-		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(sha512),cbc(des3_ede))",
-		.driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
-		.blocksize = DES3_EDE_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
 			.setkey = aead_setkey,
 			.setkey = aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.maxauthsize = SHA512_DIGEST_SIZE,
 			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha512),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha512-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
 			},
-		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(md5),cbc(des))",
-		.driver_name = "authenc-hmac-md5-cbc-des-caam",
-		.blocksize = DES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(des))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
 			.setkey = aead_setkey,
 			.setkey = aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
 			.ivsize = DES_BLOCK_SIZE,
 			.ivsize = DES_BLOCK_SIZE,
 			.maxauthsize = MD5_DIGEST_SIZE,
 			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(md5),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-hmac-md5-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
 			},
 			},
-		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(sha1),cbc(des))",
-		.driver_name = "authenc-hmac-sha1-cbc-des-caam",
-		.blocksize = DES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
 			.setkey = aead_setkey,
 			.setkey = aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
 			.ivsize = DES_BLOCK_SIZE,
 			.ivsize = DES_BLOCK_SIZE,
 			.maxauthsize = SHA1_DIGEST_SIZE,
 			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha1),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha1-cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
 			},
 			},
-		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(sha224),cbc(des))",
-		.driver_name = "authenc-hmac-sha224-cbc-des-caam",
-		.blocksize = DES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
 			.setkey = aead_setkey,
 			.setkey = aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
 			.ivsize = DES_BLOCK_SIZE,
 			.ivsize = DES_BLOCK_SIZE,
 			.maxauthsize = SHA224_DIGEST_SIZE,
 			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha224),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha224-cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
 			},
 			},
-		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(sha256),cbc(des))",
-		.driver_name = "authenc-hmac-sha256-cbc-des-caam",
-		.blocksize = DES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
 			.setkey = aead_setkey,
 			.setkey = aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
 			.ivsize = DES_BLOCK_SIZE,
 			.ivsize = DES_BLOCK_SIZE,
 			.maxauthsize = SHA256_DIGEST_SIZE,
 			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha256),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha256-cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
 			},
 			},
-		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(sha384),cbc(des))",
-		.driver_name = "authenc-hmac-sha384-cbc-des-caam",
-		.blocksize = DES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
 			.setkey = aead_setkey,
 			.setkey = aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
 			.ivsize = DES_BLOCK_SIZE,
 			.ivsize = DES_BLOCK_SIZE,
 			.maxauthsize = SHA384_DIGEST_SIZE,
 			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha384),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha384-cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
 			},
 			},
-		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+		},
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(sha512),cbc(des))",
-		.driver_name = "authenc-hmac-sha512-cbc-des-caam",
-		.blocksize = DES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha512),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha512-cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
 			.setkey = aead_setkey,
 			.setkey = aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
 			.ivsize = DES_BLOCK_SIZE,
 			.ivsize = DES_BLOCK_SIZE,
 			.maxauthsize = SHA512_DIGEST_SIZE,
 			.maxauthsize = SHA512_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
-		.driver_name = "authenc-hmac-md5-rfc3686-ctr-aes-caam",
-		.blocksize = 1,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),"
+					    "rfc3686(ctr(aes)))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
 			.setkey = aead_setkey,
 			.setkey = aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
 			.ivsize = CTR_RFC3686_IV_SIZE,
 			.ivsize = CTR_RFC3686_IV_SIZE,
 			.maxauthsize = MD5_DIGEST_SIZE,
 			.maxauthsize = MD5_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
-		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+			.rfc3686 = true,
+		},
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
-		.driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-caam",
-		.blocksize = 1,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
+		.aead = {
+			.base = {
+				.cra_name = "seqiv(authenc("
+					    "hmac(md5),rfc3686(ctr(aes))))",
+				.cra_driver_name = "seqiv-authenc-hmac-md5-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
 			.setkey = aead_setkey,
 			.setkey = aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
 			.ivsize = CTR_RFC3686_IV_SIZE,
 			.ivsize = CTR_RFC3686_IV_SIZE,
-			.maxauthsize = SHA1_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+			.rfc3686 = true,
+			.geniv = true,
+		},
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
-		.driver_name = "authenc-hmac-sha224-rfc3686-ctr-aes-caam",
-		.blocksize = 1,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),"
+					    "rfc3686(ctr(aes)))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
 			.setkey = aead_setkey,
 			.setkey = aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
 			.ivsize = CTR_RFC3686_IV_SIZE,
 			.ivsize = CTR_RFC3686_IV_SIZE,
-			.maxauthsize = SHA224_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+			.rfc3686 = true,
+		},
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
-		.driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-caam",
-		.blocksize = 1,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
+		.aead = {
+			.base = {
+				.cra_name = "seqiv(authenc("
+					    "hmac(sha1),rfc3686(ctr(aes))))",
+				.cra_driver_name = "seqiv-authenc-hmac-sha1-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
 			.setkey = aead_setkey,
 			.setkey = aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
 			.ivsize = CTR_RFC3686_IV_SIZE,
 			.ivsize = CTR_RFC3686_IV_SIZE,
-			.maxauthsize = SHA256_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+			.rfc3686 = true,
+			.geniv = true,
+		},
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
-		.driver_name = "authenc-hmac-sha384-rfc3686-ctr-aes-caam",
-		.blocksize = 1,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),"
+					    "rfc3686(ctr(aes)))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
 			.setkey = aead_setkey,
 			.setkey = aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
 			.ivsize = CTR_RFC3686_IV_SIZE,
 			.ivsize = CTR_RFC3686_IV_SIZE,
-			.maxauthsize = SHA384_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+			.rfc3686 = true,
+		},
 	},
 	},
 	{
 	{
-		.name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
-		.driver_name = "authenc-hmac-sha512-rfc3686-ctr-aes-caam",
-		.blocksize = 1,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
+		.aead = {
+			.base = {
+				.cra_name = "seqiv(authenc("
+					    "hmac(sha224),rfc3686(ctr(aes))))",
+				.cra_driver_name = "seqiv-authenc-hmac-sha224-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
 			.setkey = aead_setkey,
 			.setkey = aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
 			.ivsize = CTR_RFC3686_IV_SIZE,
 			.ivsize = CTR_RFC3686_IV_SIZE,
-			.maxauthsize = SHA512_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
-	},
-	/* ablkcipher descriptor */
-	{
-		.name = "cbc(aes)",
-		.driver_name = "cbc-aes-caam",
-		.blocksize = AES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
-		.template_ablkcipher = {
-			.setkey = ablkcipher_setkey,
-			.encrypt = ablkcipher_encrypt,
-			.decrypt = ablkcipher_decrypt,
-			.givencrypt = ablkcipher_givencrypt,
-			.geniv = "<built-in>",
-			.min_keysize = AES_MIN_KEY_SIZE,
-			.max_keysize = AES_MAX_KEY_SIZE,
-			.ivsize = AES_BLOCK_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+			.rfc3686 = true,
+			.geniv = true,
+		},
 	},
 	},
 	{
 	{
-		.name = "cbc(des3_ede)",
-		.driver_name = "cbc-3des-caam",
-		.blocksize = DES3_EDE_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
-		.template_ablkcipher = {
-			.setkey = ablkcipher_setkey,
-			.encrypt = ablkcipher_encrypt,
-			.decrypt = ablkcipher_decrypt,
-			.givencrypt = ablkcipher_givencrypt,
-			.geniv = "<built-in>",
-			.min_keysize = DES3_EDE_KEY_SIZE,
-			.max_keysize = DES3_EDE_KEY_SIZE,
-			.ivsize = DES3_EDE_BLOCK_SIZE,
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),"
+					    "rfc3686(ctr(aes)))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
 			},
 			},
-		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+			.rfc3686 = true,
+		},
 	},
 	},
 	{
 	{
-		.name = "cbc(des)",
-		.driver_name = "cbc-des-caam",
-		.blocksize = DES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
-		.template_ablkcipher = {
-			.setkey = ablkcipher_setkey,
-			.encrypt = ablkcipher_encrypt,
-			.decrypt = ablkcipher_decrypt,
-			.givencrypt = ablkcipher_givencrypt,
-			.geniv = "<built-in>",
-			.min_keysize = DES_KEY_SIZE,
-			.max_keysize = DES_KEY_SIZE,
-			.ivsize = DES_BLOCK_SIZE,
+		.aead = {
+			.base = {
+				.cra_name = "seqiv(authenc(hmac(sha256),"
+					    "rfc3686(ctr(aes))))",
+				.cra_driver_name = "seqiv-authenc-hmac-sha256-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
 			},
 			},
-		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+			.rfc3686 = true,
+			.geniv = true,
+		},
 	},
 	},
 	{
 	{
-		.name = "ctr(aes)",
-		.driver_name = "ctr-aes-caam",
-		.blocksize = 1,
-		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
-		.template_ablkcipher = {
-			.setkey = ablkcipher_setkey,
-			.encrypt = ablkcipher_encrypt,
-			.decrypt = ablkcipher_decrypt,
-			.geniv = "chainiv",
-			.min_keysize = AES_MIN_KEY_SIZE,
-			.max_keysize = AES_MAX_KEY_SIZE,
-			.ivsize = AES_BLOCK_SIZE,
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),"
+					    "rfc3686(ctr(aes)))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
 			},
 			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
-	},
-	{
-		.name = "rfc3686(ctr(aes))",
-		.driver_name = "rfc3686-ctr-aes-caam",
-		.blocksize = 1,
-		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
-		.template_ablkcipher = {
-			.setkey = ablkcipher_setkey,
-			.encrypt = ablkcipher_encrypt,
-			.decrypt = ablkcipher_decrypt,
-			.givencrypt = ablkcipher_givencrypt,
-			.geniv = "<built-in>",
-			.min_keysize = AES_MIN_KEY_SIZE +
-				       CTR_RFC3686_NONCE_SIZE,
-			.max_keysize = AES_MAX_KEY_SIZE +
-				       CTR_RFC3686_NONCE_SIZE,
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
 			.ivsize = CTR_RFC3686_IV_SIZE,
 			.ivsize = CTR_RFC3686_IV_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
-	}
-};
-
-struct caam_alg_entry {
-	int class1_alg_type;
-	int class2_alg_type;
-	int alg_op;
-};
-
-struct caam_aead_alg {
-	struct aead_alg aead;
-	struct caam_alg_entry caam;
-	bool registered;
-};
-
-static struct caam_aead_alg driver_aeads[] = {
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+			.rfc3686 = true,
+		},
+	},
 	{
 	{
 		.aead = {
 		.aead = {
 			.base = {
 			.base = {
-				.cra_name = "rfc4106(gcm(aes))",
-				.cra_driver_name = "rfc4106-gcm-aes-caam",
+				.cra_name = "seqiv(authenc(hmac(sha384),"
+					    "rfc3686(ctr(aes))))",
+				.cra_driver_name = "seqiv-authenc-hmac-sha384-"
+						   "rfc3686-ctr-aes-caam",
 				.cra_blocksize = 1,
 				.cra_blocksize = 1,
 			},
 			},
-			.setkey = rfc4106_setkey,
-			.setauthsize = rfc4106_setauthsize,
-			.encrypt = gcm_encrypt,
-			.decrypt = gcm_decrypt,
-			.ivsize = 8,
-			.maxauthsize = AES_BLOCK_SIZE,
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
 		},
 		},
 		.caam = {
 		.caam = {
-			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+			.rfc3686 = true,
+			.geniv = true,
 		},
 		},
 	},
 	},
 	{
 	{
 		.aead = {
 		.aead = {
 			.base = {
 			.base = {
-				.cra_name = "rfc4543(gcm(aes))",
-				.cra_driver_name = "rfc4543-gcm-aes-caam",
+				.cra_name = "authenc(hmac(sha512),"
+					    "rfc3686(ctr(aes)))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "rfc3686-ctr-aes-caam",
 				.cra_blocksize = 1,
 				.cra_blocksize = 1,
 			},
 			},
-			.setkey = rfc4543_setkey,
-			.setauthsize = rfc4543_setauthsize,
-			.encrypt = gcm_encrypt,
-			.decrypt = gcm_decrypt,
-			.ivsize = 8,
-			.maxauthsize = AES_BLOCK_SIZE,
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
 		},
 		},
 		.caam = {
 		.caam = {
-			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+			.rfc3686 = true,
 		},
 		},
 	},
 	},
-	/* Galois Counter Mode */
 	{
 	{
 		.aead = {
 		.aead = {
 			.base = {
 			.base = {
-				.cra_name = "gcm(aes)",
-				.cra_driver_name = "gcm-aes-caam",
+				.cra_name = "seqiv(authenc(hmac(sha512),"
+					    "rfc3686(ctr(aes))))",
+				.cra_driver_name = "seqiv-authenc-hmac-sha512-"
+						   "rfc3686-ctr-aes-caam",
 				.cra_blocksize = 1,
 				.cra_blocksize = 1,
 			},
 			},
-			.setkey = gcm_setkey,
-			.setauthsize = gcm_setauthsize,
-			.encrypt = gcm_encrypt,
-			.decrypt = gcm_decrypt,
-			.ivsize = 12,
-			.maxauthsize = AES_BLOCK_SIZE,
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
 		},
 		},
 		.caam = {
 		.caam = {
-			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+			.rfc3686 = true,
+			.geniv = true,
 		},
 		},
 	},
 	},
 };
 };
@@ -4211,7 +4314,7 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
 	struct caam_crypto_alg *t_alg;
 	struct caam_crypto_alg *t_alg;
 	struct crypto_alg *alg;
 	struct crypto_alg *alg;
 
 
-	t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
+	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
 	if (!t_alg) {
 	if (!t_alg) {
 		pr_err("failed to allocate t_alg\n");
 		pr_err("failed to allocate t_alg\n");
 		return ERR_PTR(-ENOMEM);
 		return ERR_PTR(-ENOMEM);
@@ -4240,10 +4343,6 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
 		alg->cra_type = &crypto_ablkcipher_type;
 		alg->cra_type = &crypto_ablkcipher_type;
 		alg->cra_ablkcipher = template->template_ablkcipher;
 		alg->cra_ablkcipher = template->template_ablkcipher;
 		break;
 		break;
-	case CRYPTO_ALG_TYPE_AEAD:
-		alg->cra_type = &crypto_aead_type;
-		alg->cra_aead = template->template_aead;
-		break;
 	}
 	}
 
 
 	t_alg->caam.class1_alg_type = template->class1_alg_type;
 	t_alg->caam.class1_alg_type = template->class1_alg_type;
@@ -4271,8 +4370,10 @@ static int __init caam_algapi_init(void)
 	struct device_node *dev_node;
 	struct device_node *dev_node;
 	struct platform_device *pdev;
 	struct platform_device *pdev;
 	struct device *ctrldev;
 	struct device *ctrldev;
-	void *priv;
+	struct caam_drv_private *priv;
 	int i = 0, err = 0;
 	int i = 0, err = 0;
+	u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
+	unsigned int md_limit = SHA512_DIGEST_SIZE;
 	bool registered = false;
 	bool registered = false;
 
 
 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
@@ -4302,16 +4403,39 @@ static int __init caam_algapi_init(void)
 
 
 	INIT_LIST_HEAD(&alg_list);
 	INIT_LIST_HEAD(&alg_list);
 
 
-	/* register crypto algorithms the device supports */
+	/*
+	 * Register crypto algorithms the device supports.
+	 * First, detect presence and attributes of DES, AES, and MD blocks.
+	 */
+	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+	des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
+	aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
+	md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+
+	/* If MD is present, limit digest size based on LP256 */
+	if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
+		md_limit = SHA256_DIGEST_SIZE;
+
 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
-		/* TODO: check if h/w supports alg */
 		struct caam_crypto_alg *t_alg;
 		struct caam_crypto_alg *t_alg;
+		struct caam_alg_template *alg = driver_algs + i;
+		u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
+
+		/* Skip DES algorithms if not supported by device */
+		if (!des_inst &&
+		    ((alg_sel == OP_ALG_ALGSEL_3DES) ||
+		     (alg_sel == OP_ALG_ALGSEL_DES)))
+				continue;
+
+		/* Skip AES algorithms if not supported by device */
+		if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
+				continue;
 
 
-		t_alg = caam_alg_alloc(&driver_algs[i]);
+		t_alg = caam_alg_alloc(alg);
 		if (IS_ERR(t_alg)) {
 		if (IS_ERR(t_alg)) {
 			err = PTR_ERR(t_alg);
 			err = PTR_ERR(t_alg);
-			pr_warn("%s alg allocation failed\n",
-				driver_algs[i].driver_name);
+			pr_warn("%s alg allocation failed\n", alg->driver_name);
 			continue;
 			continue;
 		}
 		}
 
 
@@ -4329,6 +4453,37 @@ static int __init caam_algapi_init(void)
 
 
 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
 		struct caam_aead_alg *t_alg = driver_aeads + i;
 		struct caam_aead_alg *t_alg = driver_aeads + i;
+		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
+				 OP_ALG_ALGSEL_MASK;
+		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
+				 OP_ALG_ALGSEL_MASK;
+		u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
+
+		/* Skip DES algorithms if not supported by device */
+		if (!des_inst &&
+		    ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
+		     (c1_alg_sel == OP_ALG_ALGSEL_DES)))
+				continue;
+
+		/* Skip AES algorithms if not supported by device */
+		if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
+				continue;
+
+		/*
+		 * Check support for AES algorithms not available
+		 * on LP devices.
+		 */
+		if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
+			if (alg_aai == OP_ALG_AAI_GCM)
+				continue;
+
+		/*
+		 * Skip algorithms requiring message digests
+		 * if MD or MD size is not supported by device.
+		 */
+		if (c2_alg_sel &&
+		    (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
+				continue;
 
 
 		caam_aead_alg_init(t_alg);
 		caam_aead_alg_init(t_alg);
 
 

+ 45 - 24
drivers/crypto/caam/caamhash.c

@@ -127,7 +127,7 @@ struct caam_hash_state {
 	int buflen_0;
 	int buflen_0;
 	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
 	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
 	int buflen_1;
 	int buflen_1;
-	u8 caam_ctx[MAX_CTX_LEN];
+	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
 	int (*update)(struct ahash_request *req);
 	int (*update)(struct ahash_request *req);
 	int (*final)(struct ahash_request *req);
 	int (*final)(struct ahash_request *req);
 	int (*finup)(struct ahash_request *req);
 	int (*finup)(struct ahash_request *req);
@@ -807,7 +807,7 @@ static int ahash_update_ctx(struct ahash_request *req)
 		 * allocate space for base edesc and hw desc commands,
 		 * allocate space for base edesc and hw desc commands,
 		 * link tables
 		 * link tables
 		 */
 		 */
-		edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+		edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
 				sec4_sg_bytes, GFP_DMA | flags);
 				sec4_sg_bytes, GFP_DMA | flags);
 		if (!edesc) {
 		if (!edesc) {
 			dev_err(jrdev,
 			dev_err(jrdev,
@@ -829,7 +829,7 @@ static int ahash_update_ctx(struct ahash_request *req)
 		state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
 		state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
 							edesc->sec4_sg + 1,
 							edesc->sec4_sg + 1,
 							buf, state->buf_dma,
 							buf, state->buf_dma,
-							*buflen, last_buflen);
+							*next_buflen, *buflen);
 
 
 		if (src_nents) {
 		if (src_nents) {
 			src_map_to_sec4_sg(jrdev, req->src, src_nents,
 			src_map_to_sec4_sg(jrdev, req->src, src_nents,
@@ -919,8 +919,8 @@ static int ahash_final_ctx(struct ahash_request *req)
 	sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
 	sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
 
 
 	/* allocate space for base edesc and hw desc commands, link tables */
 	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
-			sec4_sg_bytes, GFP_DMA | flags);
+	edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
+			GFP_DMA | flags);
 	if (!edesc) {
 	if (!edesc) {
 		dev_err(jrdev, "could not allocate extended descriptor\n");
 		dev_err(jrdev, "could not allocate extended descriptor\n");
 		return -ENOMEM;
 		return -ENOMEM;
@@ -1006,8 +1006,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
 			 sizeof(struct sec4_sg_entry);
 			 sizeof(struct sec4_sg_entry);
 
 
 	/* allocate space for base edesc and hw desc commands, link tables */
 	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
-			sec4_sg_bytes, GFP_DMA | flags);
+	edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
+			GFP_DMA | flags);
 	if (!edesc) {
 	if (!edesc) {
 		dev_err(jrdev, "could not allocate extended descriptor\n");
 		dev_err(jrdev, "could not allocate extended descriptor\n");
 		return -ENOMEM;
 		return -ENOMEM;
@@ -1092,8 +1092,8 @@ static int ahash_digest(struct ahash_request *req)
 	sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
 	sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
 
 
 	/* allocate space for base edesc and hw desc commands, link tables */
 	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
-			DESC_JOB_IO_LEN, GFP_DMA | flags);
+	edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes + DESC_JOB_IO_LEN,
+			GFP_DMA | flags);
 	if (!edesc) {
 	if (!edesc) {
 		dev_err(jrdev, "could not allocate extended descriptor\n");
 		dev_err(jrdev, "could not allocate extended descriptor\n");
 		return -ENOMEM;
 		return -ENOMEM;
@@ -1166,8 +1166,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
 	int sh_len;
 	int sh_len;
 
 
 	/* allocate space for base edesc and hw desc commands, link tables */
 	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
-			GFP_DMA | flags);
+	edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | flags);
 	if (!edesc) {
 	if (!edesc) {
 		dev_err(jrdev, "could not allocate extended descriptor\n");
 		dev_err(jrdev, "could not allocate extended descriptor\n");
 		return -ENOMEM;
 		return -ENOMEM;
@@ -1246,7 +1245,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
 		 * allocate space for base edesc and hw desc commands,
 		 * allocate space for base edesc and hw desc commands,
 		 * link tables
 		 * link tables
 		 */
 		 */
-		edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+		edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
 				sec4_sg_bytes, GFP_DMA | flags);
 				sec4_sg_bytes, GFP_DMA | flags);
 		if (!edesc) {
 		if (!edesc) {
 			dev_err(jrdev,
 			dev_err(jrdev,
@@ -1354,8 +1353,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
 			 sizeof(struct sec4_sg_entry);
 			 sizeof(struct sec4_sg_entry);
 
 
 	/* allocate space for base edesc and hw desc commands, link tables */
 	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
-			sec4_sg_bytes, GFP_DMA | flags);
+	edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
+			GFP_DMA | flags);
 	if (!edesc) {
 	if (!edesc) {
 		dev_err(jrdev, "could not allocate extended descriptor\n");
 		dev_err(jrdev, "could not allocate extended descriptor\n");
 		return -ENOMEM;
 		return -ENOMEM;
@@ -1449,7 +1448,7 @@ static int ahash_update_first(struct ahash_request *req)
 		 * allocate space for base edesc and hw desc commands,
 		 * allocate space for base edesc and hw desc commands,
 		 * link tables
 		 * link tables
 		 */
 		 */
-		edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+		edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
 				sec4_sg_bytes, GFP_DMA | flags);
 				sec4_sg_bytes, GFP_DMA | flags);
 		if (!edesc) {
 		if (!edesc) {
 			dev_err(jrdev,
 			dev_err(jrdev,
@@ -1843,7 +1842,7 @@ caam_hash_alloc(struct caam_hash_template *template,
 	struct ahash_alg *halg;
 	struct ahash_alg *halg;
 	struct crypto_alg *alg;
 	struct crypto_alg *alg;
 
 
-	t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
+	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
 	if (!t_alg) {
 	if (!t_alg) {
 		pr_err("failed to allocate t_alg\n");
 		pr_err("failed to allocate t_alg\n");
 		return ERR_PTR(-ENOMEM);
 		return ERR_PTR(-ENOMEM);
@@ -1885,8 +1884,10 @@ static int __init caam_algapi_hash_init(void)
 	struct device_node *dev_node;
 	struct device_node *dev_node;
 	struct platform_device *pdev;
 	struct platform_device *pdev;
 	struct device *ctrldev;
 	struct device *ctrldev;
-	void *priv;
 	int i = 0, err = 0;
 	int i = 0, err = 0;
+	struct caam_drv_private *priv;
+	unsigned int md_limit = SHA512_DIGEST_SIZE;
+	u32 cha_inst, cha_vid;
 
 
 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
 	if (!dev_node) {
 	if (!dev_node) {
@@ -1912,19 +1913,40 @@ static int __init caam_algapi_hash_init(void)
 	if (!priv)
 	if (!priv)
 		return -ENODEV;
 		return -ENODEV;
 
 
+	/*
+	 * Register crypto algorithms the device supports.  First, identify
+	 * presence and attributes of MD block.
+	 */
+	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+
+	/*
+	 * Skip registration of any hashing algorithms if MD block
+	 * is not present.
+	 */
+	if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
+		return -ENODEV;
+
+	/* Limit digest size based on LP256 */
+	if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
+		md_limit = SHA256_DIGEST_SIZE;
+
 	INIT_LIST_HEAD(&hash_list);
 	INIT_LIST_HEAD(&hash_list);
 
 
 	/* register crypto algorithms the device supports */
 	/* register crypto algorithms the device supports */
 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
-		/* TODO: check if h/w supports alg */
 		struct caam_hash_alg *t_alg;
 		struct caam_hash_alg *t_alg;
+		struct caam_hash_template *alg = driver_hash + i;
+
+		/* If MD size is not supported by device, skip registration */
+		if (alg->template_ahash.halg.digestsize > md_limit)
+			continue;
 
 
 		/* register hmac version */
 		/* register hmac version */
-		t_alg = caam_hash_alloc(&driver_hash[i], true);
+		t_alg = caam_hash_alloc(alg, true);
 		if (IS_ERR(t_alg)) {
 		if (IS_ERR(t_alg)) {
 			err = PTR_ERR(t_alg);
 			err = PTR_ERR(t_alg);
-			pr_warn("%s alg allocation failed\n",
-				driver_hash[i].driver_name);
+			pr_warn("%s alg allocation failed\n", alg->driver_name);
 			continue;
 			continue;
 		}
 		}
 
 
@@ -1937,11 +1959,10 @@ static int __init caam_algapi_hash_init(void)
 			list_add_tail(&t_alg->entry, &hash_list);
 			list_add_tail(&t_alg->entry, &hash_list);
 
 
 		/* register unkeyed version */
 		/* register unkeyed version */
-		t_alg = caam_hash_alloc(&driver_hash[i], false);
+		t_alg = caam_hash_alloc(alg, false);
 		if (IS_ERR(t_alg)) {
 		if (IS_ERR(t_alg)) {
 			err = PTR_ERR(t_alg);
 			err = PTR_ERR(t_alg);
-			pr_warn("%s alg allocation failed\n",
-				driver_hash[i].driver_name);
+			pr_warn("%s alg allocation failed\n", alg->driver_name);
 			continue;
 			continue;
 		}
 		}
 
 

+ 21 - 5
drivers/crypto/caam/caamrng.c

@@ -108,6 +108,10 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
 
 
 	atomic_set(&bd->empty, BUF_NOT_EMPTY);
 	atomic_set(&bd->empty, BUF_NOT_EMPTY);
 	complete(&bd->filled);
 	complete(&bd->filled);
+
+	/* Buffer refilled, invalidate cache */
+	dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE);
+
 #ifdef DEBUG
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "rng refreshed buf@: ",
 	print_hex_dump(KERN_ERR, "rng refreshed buf@: ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1);
 		       DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1);
@@ -311,7 +315,7 @@ static int __init caam_rng_init(void)
 	struct device_node *dev_node;
 	struct device_node *dev_node;
 	struct platform_device *pdev;
 	struct platform_device *pdev;
 	struct device *ctrldev;
 	struct device *ctrldev;
-	void *priv;
+	struct caam_drv_private *priv;
 	int err;
 	int err;
 
 
 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
@@ -338,20 +342,32 @@ static int __init caam_rng_init(void)
 	if (!priv)
 	if (!priv)
 		return -ENODEV;
 		return -ENODEV;
 
 
+	/* Check for an instantiated RNG before registration */
+	if (!(rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK))
+		return -ENODEV;
+
 	dev = caam_jr_alloc();
 	dev = caam_jr_alloc();
 	if (IS_ERR(dev)) {
 	if (IS_ERR(dev)) {
 		pr_err("Job Ring Device allocation for transform failed\n");
 		pr_err("Job Ring Device allocation for transform failed\n");
 		return PTR_ERR(dev);
 		return PTR_ERR(dev);
 	}
 	}
-	rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA);
-	if (!rng_ctx)
-		return -ENOMEM;
+	rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA);
+	if (!rng_ctx) {
+		err = -ENOMEM;
+		goto free_caam_alloc;
+	}
 	err = caam_init_rng(rng_ctx, dev);
 	err = caam_init_rng(rng_ctx, dev);
 	if (err)
 	if (err)
-		return err;
+		goto free_rng_ctx;
 
 
 	dev_info(dev, "registering rng-caam\n");
 	dev_info(dev, "registering rng-caam\n");
 	return hwrng_register(&caam_rng);
 	return hwrng_register(&caam_rng);
+
+free_rng_ctx:
+	kfree(rng_ctx);
+free_caam_alloc:
+	caam_jr_free(dev);
+	return err;
 }
 }
 
 
 module_init(caam_rng_init);
 module_init(caam_rng_init);

+ 1 - 0
drivers/crypto/caam/compat.h

@@ -23,6 +23,7 @@
 #include <linux/types.h>
 #include <linux/types.h>
 #include <linux/debugfs.h>
 #include <linux/debugfs.h>
 #include <linux/circ_buf.h>
 #include <linux/circ_buf.h>
+#include <linux/clk.h>
 #include <net/xfrm.h>
 #include <net/xfrm.h>
 
 
 #include <crypto/algapi.h>
 #include <crypto/algapi.h>

+ 128 - 26
drivers/crypto/caam/ctrl.c

@@ -15,6 +15,24 @@
 #include "desc_constr.h"
 #include "desc_constr.h"
 #include "error.h"
 #include "error.h"
 
 
+/*
+ * i.MX targets tend to have clock control subsystems that can
+ * enable/disable clocking to our device.
+ */
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
+static inline struct clk *caam_drv_identify_clk(struct device *dev,
+						char *clk_name)
+{
+	return devm_clk_get(dev, clk_name);
+}
+#else
+static inline struct clk *caam_drv_identify_clk(struct device *dev,
+						char *clk_name)
+{
+	return NULL;
+}
+#endif
+
 /*
 /*
  * Descriptor to instantiate RNG State Handle 0 in normal mode and
  * Descriptor to instantiate RNG State Handle 0 in normal mode and
  * load the JDKEK, TDKEK and TDSK registers
  * load the JDKEK, TDKEK and TDSK registers
@@ -121,7 +139,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
 		flags |= DECO_JQCR_FOUR;
 		flags |= DECO_JQCR_FOUR;
 
 
 	/* Instruct the DECO to execute it */
 	/* Instruct the DECO to execute it */
-	wr_reg32(&deco->jr_ctl_hi, flags);
+	setbits32(&deco->jr_ctl_hi, flags);
 
 
 	timeout = 10000000;
 	timeout = 10000000;
 	do {
 	do {
@@ -175,7 +193,7 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
 {
 {
 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
 	struct caam_ctrl __iomem *ctrl;
 	struct caam_ctrl __iomem *ctrl;
-	u32 *desc, status, rdsta_val;
+	u32 *desc, status = 0, rdsta_val;
 	int ret = 0, sh_idx;
 	int ret = 0, sh_idx;
 
 
 	ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
 	ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
@@ -207,7 +225,8 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
 		 * CAAM eras), then try again.
 		 * CAAM eras), then try again.
 		 */
 		 */
 		rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
 		rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
-		if (status || !(rdsta_val & (1 << sh_idx)))
+		if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
+		    !(rdsta_val & (1 << sh_idx)))
 			ret = -EAGAIN;
 			ret = -EAGAIN;
 		if (ret)
 		if (ret)
 			break;
 			break;
@@ -279,7 +298,7 @@ static int caam_remove(struct platform_device *pdev)
 	struct device *ctrldev;
 	struct device *ctrldev;
 	struct caam_drv_private *ctrlpriv;
 	struct caam_drv_private *ctrlpriv;
 	struct caam_ctrl __iomem *ctrl;
 	struct caam_ctrl __iomem *ctrl;
-	int ring, ret = 0;
+	int ring;
 
 
 	ctrldev = &pdev->dev;
 	ctrldev = &pdev->dev;
 	ctrlpriv = dev_get_drvdata(ctrldev);
 	ctrlpriv = dev_get_drvdata(ctrldev);
@@ -303,7 +322,13 @@ static int caam_remove(struct platform_device *pdev)
 	/* Unmap controller region */
 	/* Unmap controller region */
 	iounmap(ctrl);
 	iounmap(ctrl);
 
 
-	return ret;
+	/* shut clocks off before finalizing shutdown */
+	clk_disable_unprepare(ctrlpriv->caam_ipg);
+	clk_disable_unprepare(ctrlpriv->caam_mem);
+	clk_disable_unprepare(ctrlpriv->caam_aclk);
+	clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+
+	return 0;
 }
 }
 
 
 /*
 /*
@@ -370,14 +395,14 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
 int caam_get_era(void)
 int caam_get_era(void)
 {
 {
 	struct device_node *caam_node;
 	struct device_node *caam_node;
-	for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
-		const uint32_t *prop = (uint32_t *)of_get_property(caam_node,
-				"fsl,sec-era",
-				NULL);
-		return prop ? *prop : -ENOTSUPP;
-	}
+	int ret;
+	u32 prop;
 
 
-	return -ENOTSUPP;
+	caam_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+	ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
+	of_node_put(caam_node);
+
+	return IS_ERR_VALUE(ret) ? -ENOTSUPP : prop;
 }
 }
 EXPORT_SYMBOL(caam_get_era);
 EXPORT_SYMBOL(caam_get_era);
 
 
@@ -390,6 +415,7 @@ static int caam_probe(struct platform_device *pdev)
 	struct device_node *nprop, *np;
 	struct device_node *nprop, *np;
 	struct caam_ctrl __iomem *ctrl;
 	struct caam_ctrl __iomem *ctrl;
 	struct caam_drv_private *ctrlpriv;
 	struct caam_drv_private *ctrlpriv;
+	struct clk *clk;
 #ifdef CONFIG_DEBUG_FS
 #ifdef CONFIG_DEBUG_FS
 	struct caam_perfmon *perfmon;
 	struct caam_perfmon *perfmon;
 #endif
 #endif
@@ -398,8 +424,7 @@ static int caam_probe(struct platform_device *pdev)
 	int pg_size;
 	int pg_size;
 	int BLOCK_OFFSET = 0;
 	int BLOCK_OFFSET = 0;
 
 
-	ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private),
-				GFP_KERNEL);
+	ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
 	if (!ctrlpriv)
 	if (!ctrlpriv)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
@@ -408,12 +433,76 @@ static int caam_probe(struct platform_device *pdev)
 	ctrlpriv->pdev = pdev;
 	ctrlpriv->pdev = pdev;
 	nprop = pdev->dev.of_node;
 	nprop = pdev->dev.of_node;
 
 
+	/* Enable clocking */
+	clk = caam_drv_identify_clk(&pdev->dev, "ipg");
+	if (IS_ERR(clk)) {
+		ret = PTR_ERR(clk);
+		dev_err(&pdev->dev,
+			"can't identify CAAM ipg clk: %d\n", ret);
+		return ret;
+	}
+	ctrlpriv->caam_ipg = clk;
+
+	clk = caam_drv_identify_clk(&pdev->dev, "mem");
+	if (IS_ERR(clk)) {
+		ret = PTR_ERR(clk);
+		dev_err(&pdev->dev,
+			"can't identify CAAM mem clk: %d\n", ret);
+		return ret;
+	}
+	ctrlpriv->caam_mem = clk;
+
+	clk = caam_drv_identify_clk(&pdev->dev, "aclk");
+	if (IS_ERR(clk)) {
+		ret = PTR_ERR(clk);
+		dev_err(&pdev->dev,
+			"can't identify CAAM aclk clk: %d\n", ret);
+		return ret;
+	}
+	ctrlpriv->caam_aclk = clk;
+
+	clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
+	if (IS_ERR(clk)) {
+		ret = PTR_ERR(clk);
+		dev_err(&pdev->dev,
+			"can't identify CAAM emi_slow clk: %d\n", ret);
+		return ret;
+	}
+	ctrlpriv->caam_emi_slow = clk;
+
+	ret = clk_prepare_enable(ctrlpriv->caam_ipg);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
+		return ret;
+	}
+
+	ret = clk_prepare_enable(ctrlpriv->caam_mem);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
+			ret);
+		goto disable_caam_ipg;
+	}
+
+	ret = clk_prepare_enable(ctrlpriv->caam_aclk);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "can't enable CAAM aclk clock: %d\n", ret);
+		goto disable_caam_mem;
+	}
+
+	ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
+			ret);
+		goto disable_caam_aclk;
+	}
+
 	/* Get configuration properties from device tree */
 	/* Get configuration properties from device tree */
 	/* First, get register page */
 	/* First, get register page */
 	ctrl = of_iomap(nprop, 0);
 	ctrl = of_iomap(nprop, 0);
 	if (ctrl == NULL) {
 	if (ctrl == NULL) {
 		dev_err(dev, "caam: of_iomap() failed\n");
 		dev_err(dev, "caam: of_iomap() failed\n");
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto disable_caam_emi_slow;
 	}
 	}
 	/* Finding the page size for using the CTPR_MS register */
 	/* Finding the page size for using the CTPR_MS register */
 	comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
 	comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
@@ -444,8 +533,9 @@ static int caam_probe(struct platform_device *pdev)
 	 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
 	 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
 	 * long pointers in master configuration register
 	 * long pointers in master configuration register
 	 */
 	 */
-	setbits32(&ctrl->mcr, MCFGR_WDENABLE |
-		  (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
+	clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH |
+		      MCFGR_WDENABLE | (sizeof(dma_addr_t) == sizeof(u64) ?
+					MCFGR_LONG_PTR : 0));
 
 
 	/*
 	/*
 	 *  Read the Compile Time paramters and SCFGR to determine
 	 *  Read the Compile Time paramters and SCFGR to determine
@@ -492,12 +582,11 @@ static int caam_probe(struct platform_device *pdev)
 		    of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
 		    of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
 			rspec++;
 			rspec++;
 
 
-	ctrlpriv->jrpdev = devm_kzalloc(&pdev->dev,
-					sizeof(struct platform_device *) * rspec,
-					GFP_KERNEL);
+	ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
+					sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
 	if (ctrlpriv->jrpdev == NULL) {
 	if (ctrlpriv->jrpdev == NULL) {
-		iounmap(ctrl);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto iounmap_ctrl;
 	}
 	}
 
 
 	ring = 0;
 	ring = 0;
@@ -537,8 +626,8 @@ static int caam_probe(struct platform_device *pdev)
 	/* If no QI and no rings specified, quit and go home */
 	/* If no QI and no rings specified, quit and go home */
 	if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
 	if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
 		dev_err(dev, "no queues configured, terminating\n");
 		dev_err(dev, "no queues configured, terminating\n");
-		caam_remove(pdev);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto caam_remove;
 	}
 	}
 
 
 	cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
 	cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
@@ -595,8 +684,7 @@ static int caam_probe(struct platform_device *pdev)
 		} while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
 		} while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
 		if (ret) {
 		if (ret) {
 			dev_err(dev, "failed to instantiate RNG");
 			dev_err(dev, "failed to instantiate RNG");
-			caam_remove(pdev);
-			return ret;
+			goto caam_remove;
 		}
 		}
 		/*
 		/*
 		 * Set handles init'ed by this module as the complement of the
 		 * Set handles init'ed by this module as the complement of the
@@ -700,6 +788,20 @@ static int caam_probe(struct platform_device *pdev)
 						 &ctrlpriv->ctl_tdsk_wrap);
 						 &ctrlpriv->ctl_tdsk_wrap);
 #endif
 #endif
 	return 0;
 	return 0;
+
+caam_remove:
+	caam_remove(pdev);
+iounmap_ctrl:
+	iounmap(ctrl);
+disable_caam_emi_slow:
+	clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+disable_caam_aclk:
+	clk_disable_unprepare(ctrlpriv->caam_aclk);
+disable_caam_mem:
+	clk_disable_unprepare(ctrlpriv->caam_mem);
+disable_caam_ipg:
+	clk_disable_unprepare(ctrlpriv->caam_ipg);
+	return ret;
 }
 }
 
 
 static struct of_device_id caam_match[] = {
 static struct of_device_id caam_match[] = {

+ 20 - 3
drivers/crypto/caam/desc.h

@@ -8,12 +8,29 @@
 #ifndef DESC_H
 #ifndef DESC_H
 #define DESC_H
 #define DESC_H
 
 
+/*
+ * 16-byte hardware scatter/gather table
+ * An 8-byte table exists in the hardware spec, but has never been
+ * implemented to date. The 8/16 option is selected at RTL-compile-time.
+ * and this selection is visible in the Compile Time Parameters Register
+ */
+
+#define SEC4_SG_LEN_EXT		0x80000000	/* Entry points to table */
+#define SEC4_SG_LEN_FIN		0x40000000	/* Last ent in table */
+#define SEC4_SG_BPID_MASK	0x000000ff
+#define SEC4_SG_BPID_SHIFT	16
+#define SEC4_SG_LEN_MASK	0x3fffffff	/* Excludes EXT and FINAL */
+#define SEC4_SG_OFFS_MASK	0x00001fff
+
 struct sec4_sg_entry {
 struct sec4_sg_entry {
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
+	u32 rsvd1;
+	dma_addr_t ptr;
+#else
 	u64 ptr;
 	u64 ptr;
-#define SEC4_SG_LEN_FIN 0x40000000
-#define SEC4_SG_LEN_EXT 0x80000000
+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_IMX */
 	u32 len;
 	u32 len;
-	u8 reserved;
+	u8 rsvd2;
 	u8 buf_pool_id;
 	u8 buf_pool_id;
 	u16 offset;
 	u16 offset;
 };
 };

+ 1 - 1
drivers/crypto/caam/desc_constr.h

@@ -367,7 +367,7 @@ do { \
 	if (upper) \
 	if (upper) \
 		append_u64(desc, data); \
 		append_u64(desc, data); \
 	else \
 	else \
-		append_u32(desc, data); \
+		append_u32(desc, lower_32_bits(data)); \
 } while (0)
 } while (0)
 
 
 #define append_math_add_imm_u64(desc, dest, src0, src1, data) \
 #define append_math_add_imm_u64(desc, dest, src0, src1, data) \

+ 5 - 0
drivers/crypto/caam/intern.h

@@ -91,6 +91,11 @@ struct caam_drv_private {
 				   Handles of the RNG4 block are initialized
 				   Handles of the RNG4 block are initialized
 				   by this driver */
 				   by this driver */
 
 
+	struct clk *caam_ipg;
+	struct clk *caam_mem;
+	struct clk *caam_aclk;
+	struct clk *caam_emi_slow;
+
 	/*
 	/*
 	 * debugfs entries for developer view into driver/device
 	 * debugfs entries for developer view into driver/device
 	 * variables at runtime.
 	 * variables at runtime.

+ 23 - 7
drivers/crypto/caam/jr.c

@@ -202,6 +202,13 @@ static void caam_jr_dequeue(unsigned long devarg)
 		userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
 		userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
 		userstatus = jrp->outring[hw_idx].jrstatus;
 		userstatus = jrp->outring[hw_idx].jrstatus;
 
 
+		/*
+		 * Make sure all information from the job has been obtained
+		 * before telling CAAM that the job has been removed from the
+		 * output ring.
+		 */
+		mb();
+
 		/* set done */
 		/* set done */
 		wr_reg32(&jrp->rregs->outring_rmvd, 1);
 		wr_reg32(&jrp->rregs->outring_rmvd, 1);
 
 
@@ -351,12 +358,23 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
 
 
 	jrp->inpring[jrp->inp_ring_write_index] = desc_dma;
 	jrp->inpring[jrp->inp_ring_write_index] = desc_dma;
 
 
+	/*
+	 * Guarantee that the descriptor's DMA address has been written to
+	 * the next slot in the ring before the write index is updated, since
+	 * other cores may update this index independently.
+	 */
 	smp_wmb();
 	smp_wmb();
 
 
 	jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
 	jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
 				    (JOBR_DEPTH - 1);
 				    (JOBR_DEPTH - 1);
 	jrp->head = (head + 1) & (JOBR_DEPTH - 1);
 	jrp->head = (head + 1) & (JOBR_DEPTH - 1);
 
 
+	/*
+	 * Ensure that all job information has been written before
+	 * notifying CAAM that a new job was added to the input ring.
+	 */
+	wmb();
+
 	wr_reg32(&jrp->rregs->inpring_jobadd, 1);
 	wr_reg32(&jrp->rregs->inpring_jobadd, 1);
 
 
 	spin_unlock_bh(&jrp->inplock);
 	spin_unlock_bh(&jrp->inplock);
@@ -392,18 +410,17 @@ static int caam_jr_init(struct device *dev)
 		goto out_free_irq;
 		goto out_free_irq;
 
 
 	error = -ENOMEM;
 	error = -ENOMEM;
-	jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
-					  &inpbusaddr, GFP_KERNEL);
+	jrp->inpring = dma_alloc_coherent(dev, sizeof(*jrp->inpring) *
+					  JOBR_DEPTH, &inpbusaddr, GFP_KERNEL);
 	if (!jrp->inpring)
 	if (!jrp->inpring)
 		goto out_free_irq;
 		goto out_free_irq;
 
 
-	jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) *
+	jrp->outring = dma_alloc_coherent(dev, sizeof(*jrp->outring) *
 					  JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
 					  JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
 	if (!jrp->outring)
 	if (!jrp->outring)
 		goto out_free_inpring;
 		goto out_free_inpring;
 
 
-	jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
-			       GFP_KERNEL);
+	jrp->entinfo = kcalloc(JOBR_DEPTH, sizeof(*jrp->entinfo), GFP_KERNEL);
 	if (!jrp->entinfo)
 	if (!jrp->entinfo)
 		goto out_free_outring;
 		goto out_free_outring;
 
 
@@ -461,8 +478,7 @@ static int caam_jr_probe(struct platform_device *pdev)
 	int error;
 	int error;
 
 
 	jrdev = &pdev->dev;
 	jrdev = &pdev->dev;
-	jrpriv = devm_kmalloc(jrdev, sizeof(struct caam_drv_private_jr),
-			      GFP_KERNEL);
+	jrpriv = devm_kmalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL);
 	if (!jrpriv)
 	if (!jrpriv)
 		return -ENOMEM;
 		return -ENOMEM;
 
 

+ 57 - 7
drivers/crypto/caam/regs.h

@@ -65,9 +65,31 @@
  *
  *
  */
  */
 
 
+#ifdef CONFIG_ARM
+/* These are common macros for Power, put here for ARM */
+#define setbits32(_addr, _v) writel((readl(_addr) | (_v)), (_addr))
+#define clrbits32(_addr, _v) writel((readl(_addr) & ~(_v)), (_addr))
+
+#define out_arch(type, endian, a, v)	__raw_write##type(cpu_to_##endian(v), a)
+#define in_arch(type, endian, a)	endian##_to_cpu(__raw_read##type(a))
+
+#define out_le32(a, v)	out_arch(l, le32, a, v)
+#define in_le32(a)	in_arch(l, le32, a)
+
+#define out_be32(a, v)	out_arch(l, be32, a, v)
+#define in_be32(a)	in_arch(l, be32, a)
+
+#define clrsetbits(type, addr, clear, set) \
+	out_##type((addr), (in_##type(addr) & ~(clear)) | (set))
+
+#define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set)
+#define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set)
+#endif
+
 #ifdef __BIG_ENDIAN
 #ifdef __BIG_ENDIAN
 #define wr_reg32(reg, data) out_be32(reg, data)
 #define wr_reg32(reg, data) out_be32(reg, data)
 #define rd_reg32(reg) in_be32(reg)
 #define rd_reg32(reg) in_be32(reg)
+#define clrsetbits_32(addr, clear, set) clrsetbits_be32(addr, clear, set)
 #ifdef CONFIG_64BIT
 #ifdef CONFIG_64BIT
 #define wr_reg64(reg, data) out_be64(reg, data)
 #define wr_reg64(reg, data) out_be64(reg, data)
 #define rd_reg64(reg) in_be64(reg)
 #define rd_reg64(reg) in_be64(reg)
@@ -76,6 +98,7 @@
 #ifdef __LITTLE_ENDIAN
 #ifdef __LITTLE_ENDIAN
 #define wr_reg32(reg, data) __raw_writel(data, reg)
 #define wr_reg32(reg, data) __raw_writel(data, reg)
 #define rd_reg32(reg) __raw_readl(reg)
 #define rd_reg32(reg) __raw_readl(reg)
+#define clrsetbits_32(addr, clear, set) clrsetbits_le32(addr, clear, set)
 #ifdef CONFIG_64BIT
 #ifdef CONFIG_64BIT
 #define wr_reg64(reg, data) __raw_writeq(data, reg)
 #define wr_reg64(reg, data) __raw_writeq(data, reg)
 #define rd_reg64(reg) __raw_readq(reg)
 #define rd_reg64(reg) __raw_readq(reg)
@@ -85,20 +108,31 @@
 
 
 /*
 /*
  * The only users of these wr/rd_reg64 functions is the Job Ring (JR).
  * The only users of these wr/rd_reg64 functions is the Job Ring (JR).
- * The DMA address registers in the JR are a pair of 32-bit registers.
- * The layout is:
+ * The DMA address registers in the JR are handled differently depending on
+ * platform:
+ *
+ * 1. All BE CAAM platforms and i.MX platforms (LE CAAM):
  *
  *
  *    base + 0x0000 : most-significant 32 bits
  *    base + 0x0000 : most-significant 32 bits
  *    base + 0x0004 : least-significant 32 bits
  *    base + 0x0004 : least-significant 32 bits
  *
  *
  * The 32-bit version of this core therefore has to write to base + 0x0004
  * The 32-bit version of this core therefore has to write to base + 0x0004
- * to set the 32-bit wide DMA address. This seems to be independent of the
- * endianness of the written/read data.
+ * to set the 32-bit wide DMA address.
+ *
+ * 2. All other LE CAAM platforms (LS1021A etc.)
+ *    base + 0x0000 : least-significant 32 bits
+ *    base + 0x0004 : most-significant 32 bits
  */
  */
 
 
 #ifndef CONFIG_64BIT
 #ifndef CONFIG_64BIT
+#if !defined(CONFIG_CRYPTO_DEV_FSL_CAAM_LE) || \
+	defined(CONFIG_CRYPTO_DEV_FSL_CAAM_IMX)
 #define REG64_MS32(reg) ((u32 __iomem *)(reg))
 #define REG64_MS32(reg) ((u32 __iomem *)(reg))
 #define REG64_LS32(reg) ((u32 __iomem *)(reg) + 1)
 #define REG64_LS32(reg) ((u32 __iomem *)(reg) + 1)
+#else
+#define REG64_MS32(reg) ((u32 __iomem *)(reg) + 1)
+#define REG64_LS32(reg) ((u32 __iomem *)(reg))
+#endif
 
 
 static inline void wr_reg64(u64 __iomem *reg, u64 data)
 static inline void wr_reg64(u64 __iomem *reg, u64 data)
 {
 {
@@ -133,18 +167,28 @@ struct jr_outentry {
 #define CHA_NUM_MS_DECONUM_SHIFT	24
 #define CHA_NUM_MS_DECONUM_SHIFT	24
 #define CHA_NUM_MS_DECONUM_MASK	(0xfull << CHA_NUM_MS_DECONUM_SHIFT)
 #define CHA_NUM_MS_DECONUM_MASK	(0xfull << CHA_NUM_MS_DECONUM_SHIFT)
 
 
-/* CHA Version IDs */
+/*
+ * CHA version IDs / instantiation bitfields
+ * Defined for use with the cha_id fields in perfmon, but the same shift/mask
+ * selectors can be used to pull out the number of instantiated blocks within
+ * cha_num fields in perfmon because the locations are the same.
+ */
 #define CHA_ID_LS_AES_SHIFT	0
 #define CHA_ID_LS_AES_SHIFT	0
-#define CHA_ID_LS_AES_MASK		(0xfull << CHA_ID_LS_AES_SHIFT)
+#define CHA_ID_LS_AES_MASK	(0xfull << CHA_ID_LS_AES_SHIFT)
+#define CHA_ID_LS_AES_LP	(0x3ull << CHA_ID_LS_AES_SHIFT)
+#define CHA_ID_LS_AES_HP	(0x4ull << CHA_ID_LS_AES_SHIFT)
 
 
 #define CHA_ID_LS_DES_SHIFT	4
 #define CHA_ID_LS_DES_SHIFT	4
-#define CHA_ID_LS_DES_MASK		(0xfull << CHA_ID_LS_DES_SHIFT)
+#define CHA_ID_LS_DES_MASK	(0xfull << CHA_ID_LS_DES_SHIFT)
 
 
 #define CHA_ID_LS_ARC4_SHIFT	8
 #define CHA_ID_LS_ARC4_SHIFT	8
 #define CHA_ID_LS_ARC4_MASK	(0xfull << CHA_ID_LS_ARC4_SHIFT)
 #define CHA_ID_LS_ARC4_MASK	(0xfull << CHA_ID_LS_ARC4_SHIFT)
 
 
 #define CHA_ID_LS_MD_SHIFT	12
 #define CHA_ID_LS_MD_SHIFT	12
 #define CHA_ID_LS_MD_MASK	(0xfull << CHA_ID_LS_MD_SHIFT)
 #define CHA_ID_LS_MD_MASK	(0xfull << CHA_ID_LS_MD_SHIFT)
+#define CHA_ID_LS_MD_LP256	(0x0ull << CHA_ID_LS_MD_SHIFT)
+#define CHA_ID_LS_MD_LP512	(0x1ull << CHA_ID_LS_MD_SHIFT)
+#define CHA_ID_LS_MD_HP		(0x2ull << CHA_ID_LS_MD_SHIFT)
 
 
 #define CHA_ID_LS_RNG_SHIFT	16
 #define CHA_ID_LS_RNG_SHIFT	16
 #define CHA_ID_LS_RNG_MASK	(0xfull << CHA_ID_LS_RNG_SHIFT)
 #define CHA_ID_LS_RNG_MASK	(0xfull << CHA_ID_LS_RNG_SHIFT)
@@ -395,10 +439,16 @@ struct caam_ctrl {
 /* AXI read cache control */
 /* AXI read cache control */
 #define MCFGR_ARCACHE_SHIFT	12
 #define MCFGR_ARCACHE_SHIFT	12
 #define MCFGR_ARCACHE_MASK	(0xf << MCFGR_ARCACHE_SHIFT)
 #define MCFGR_ARCACHE_MASK	(0xf << MCFGR_ARCACHE_SHIFT)
+#define MCFGR_ARCACHE_BUFF	(0x1 << MCFGR_ARCACHE_SHIFT)
+#define MCFGR_ARCACHE_CACH	(0x2 << MCFGR_ARCACHE_SHIFT)
+#define MCFGR_ARCACHE_RALL	(0x4 << MCFGR_ARCACHE_SHIFT)
 
 
 /* AXI write cache control */
 /* AXI write cache control */
 #define MCFGR_AWCACHE_SHIFT	8
 #define MCFGR_AWCACHE_SHIFT	8
 #define MCFGR_AWCACHE_MASK	(0xf << MCFGR_AWCACHE_SHIFT)
 #define MCFGR_AWCACHE_MASK	(0xf << MCFGR_AWCACHE_SHIFT)
+#define MCFGR_AWCACHE_BUFF	(0x1 << MCFGR_AWCACHE_SHIFT)
+#define MCFGR_AWCACHE_CACH	(0x2 << MCFGR_AWCACHE_SHIFT)
+#define MCFGR_AWCACHE_WALL	(0x8 << MCFGR_AWCACHE_SHIFT)
 
 
 /* AXI pipeline depth */
 /* AXI pipeline depth */
 #define MCFGR_AXIPIPE_SHIFT	4
 #define MCFGR_AXIPIPE_SHIFT	4

+ 17 - 8
drivers/crypto/caam/sg_sw_sec4.h

@@ -15,7 +15,6 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
 {
 {
 	sec4_sg_ptr->ptr = dma;
 	sec4_sg_ptr->ptr = dma;
 	sec4_sg_ptr->len = len;
 	sec4_sg_ptr->len = len;
-	sec4_sg_ptr->reserved = 0;
 	sec4_sg_ptr->buf_pool_id = 0;
 	sec4_sg_ptr->buf_pool_id = 0;
 	sec4_sg_ptr->offset = offset;
 	sec4_sg_ptr->offset = offset;
 #ifdef DEBUG
 #ifdef DEBUG
@@ -106,9 +105,15 @@ static inline void dma_unmap_sg_chained(
 {
 {
 	if (unlikely(chained)) {
 	if (unlikely(chained)) {
 		int i;
 		int i;
+		struct scatterlist *tsg = sg;
+
+		/*
+		 * Use a local copy of the sg pointer to avoid moving the
+		 * head of the list pointed to by sg as we walk the list.
+		 */
 		for (i = 0; i < nents; i++) {
 		for (i = 0; i < nents; i++) {
-			dma_unmap_sg(dev, sg, 1, dir);
-			sg = sg_next(sg);
+			dma_unmap_sg(dev, tsg, 1, dir);
+			tsg = sg_next(tsg);
 		}
 		}
 	} else if (nents) {
 	} else if (nents) {
 		dma_unmap_sg(dev, sg, nents, dir);
 		dma_unmap_sg(dev, sg, nents, dir);
@@ -119,19 +124,23 @@ static inline int dma_map_sg_chained(
 	struct device *dev, struct scatterlist *sg, unsigned int nents,
 	struct device *dev, struct scatterlist *sg, unsigned int nents,
 	enum dma_data_direction dir, bool chained)
 	enum dma_data_direction dir, bool chained)
 {
 {
-	struct scatterlist *first = sg;
-
 	if (unlikely(chained)) {
 	if (unlikely(chained)) {
 		int i;
 		int i;
+		struct scatterlist *tsg = sg;
+
+		/*
+		 * Use a local copy of the sg pointer to avoid moving the
+		 * head of the list pointed to by sg as we walk the list.
+		 */
 		for (i = 0; i < nents; i++) {
 		for (i = 0; i < nents; i++) {
-			if (!dma_map_sg(dev, sg, 1, dir)) {
-				dma_unmap_sg_chained(dev, first, i, dir,
+			if (!dma_map_sg(dev, tsg, 1, dir)) {
+				dma_unmap_sg_chained(dev, sg, i, dir,
 						     chained);
 						     chained);
 				nents = 0;
 				nents = 0;
 				break;
 				break;
 			}
 			}
 
 
-			sg = sg_next(sg);
+			tsg = sg_next(tsg);
 		}
 		}
 	} else
 	} else
 		nents = dma_map_sg(dev, sg, nents, dir);
 		nents = dma_map_sg(dev, sg, nents, dir);

+ 2 - 0
drivers/crypto/ccp/ccp-platform.c

@@ -216,6 +216,7 @@ static const struct acpi_device_id ccp_acpi_match[] = {
 	{ "AMDI0C00", 0 },
 	{ "AMDI0C00", 0 },
 	{ },
 	{ },
 };
 };
+MODULE_DEVICE_TABLE(acpi, ccp_acpi_match);
 #endif
 #endif
 
 
 #ifdef CONFIG_OF
 #ifdef CONFIG_OF
@@ -223,6 +224,7 @@ static const struct of_device_id ccp_of_match[] = {
 	{ .compatible = "amd,ccp-seattle-v1a" },
 	{ .compatible = "amd,ccp-seattle-v1a" },
 	{ },
 	{ },
 };
 };
+MODULE_DEVICE_TABLE(of, ccp_of_match);
 #endif
 #endif
 
 
 static struct platform_driver ccp_platform_driver = {
 static struct platform_driver ccp_platform_driver = {

+ 1 - 1
drivers/crypto/img-hash.c

@@ -334,7 +334,7 @@ static int img_hash_dma_init(struct img_hash_dev *hdev)
 
 
 	hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx");
 	hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx");
 	if (!hdev->dma_lch) {
 	if (!hdev->dma_lch) {
-		dev_err(hdev->dev, "Couldn't aquire a slave DMA channel.\n");
+		dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
 		return -EBUSY;
 		return -EBUSY;
 	}
 	}
 	dma_conf.direction = DMA_MEM_TO_DEV;
 	dma_conf.direction = DMA_MEM_TO_DEV;

+ 157 - 155
drivers/crypto/ixp4xx_crypto.c

@@ -156,7 +156,8 @@ struct ablk_ctx {
 };
 };
 
 
 struct aead_ctx {
 struct aead_ctx {
-	struct buffer_desc *buffer;
+	struct buffer_desc *src;
+	struct buffer_desc *dst;
 	struct scatterlist ivlist;
 	struct scatterlist ivlist;
 	/* used when the hmac is not on one sg entry */
 	/* used when the hmac is not on one sg entry */
 	u8 *hmac_virt;
 	u8 *hmac_virt;
@@ -198,6 +199,15 @@ struct ixp_alg {
 	int registered;
 	int registered;
 };
 };
 
 
+struct ixp_aead_alg {
+	struct aead_alg crypto;
+	const struct ix_hash_algo *hash;
+	u32 cfg_enc;
+	u32 cfg_dec;
+
+	int registered;
+};
+
 static const struct ix_hash_algo hash_alg_md5 = {
 static const struct ix_hash_algo hash_alg_md5 = {
 	.cfgword	= 0xAA010004,
 	.cfgword	= 0xAA010004,
 	.icv		= "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
 	.icv		= "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
@@ -339,11 +349,11 @@ static void finish_scattered_hmac(struct crypt_ctl *crypt)
 	struct aead_ctx *req_ctx = aead_request_ctx(req);
 	struct aead_ctx *req_ctx = aead_request_ctx(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	int authsize = crypto_aead_authsize(tfm);
 	int authsize = crypto_aead_authsize(tfm);
-	int decryptlen = req->cryptlen - authsize;
+	int decryptlen = req->assoclen + req->cryptlen - authsize;
 
 
 	if (req_ctx->encrypt) {
 	if (req_ctx->encrypt) {
 		scatterwalk_map_and_copy(req_ctx->hmac_virt,
 		scatterwalk_map_and_copy(req_ctx->hmac_virt,
-			req->src, decryptlen, authsize, 1);
+			req->dst, decryptlen, authsize, 1);
 	}
 	}
 	dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
 	dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
 }
 }
@@ -364,7 +374,8 @@ static void one_packet(dma_addr_t phys)
 		struct aead_request *req = crypt->data.aead_req;
 		struct aead_request *req = crypt->data.aead_req;
 		struct aead_ctx *req_ctx = aead_request_ctx(req);
 		struct aead_ctx *req_ctx = aead_request_ctx(req);
 
 
-		free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
+		free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+		free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
 		if (req_ctx->hmac_virt) {
 		if (req_ctx->hmac_virt) {
 			finish_scattered_hmac(crypt);
 			finish_scattered_hmac(crypt);
 		}
 		}
@@ -573,11 +584,10 @@ static int init_tfm_ablk(struct crypto_tfm *tfm)
 	return init_tfm(tfm);
 	return init_tfm(tfm);
 }
 }
 
 
-static int init_tfm_aead(struct crypto_tfm *tfm)
+static int init_tfm_aead(struct crypto_aead *tfm)
 {
 {
-	crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
-				sizeof(struct aead_ctx));
-	return init_tfm(tfm);
+	crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
+	return init_tfm(crypto_aead_tfm(tfm));
 }
 }
 
 
 static void exit_tfm(struct crypto_tfm *tfm)
 static void exit_tfm(struct crypto_tfm *tfm)
@@ -587,6 +597,11 @@ static void exit_tfm(struct crypto_tfm *tfm)
 	free_sa_dir(&ctx->decrypt);
 	free_sa_dir(&ctx->decrypt);
 }
 }
 
 
+static void exit_tfm_aead(struct crypto_aead *tfm)
+{
+	exit_tfm(crypto_aead_tfm(tfm));
+}
+
 static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
 static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
 		int init_len, u32 ctx_addr, const u8 *key, int key_len)
 		int init_len, u32 ctx_addr, const u8 *key, int key_len)
 {
 {
@@ -969,24 +984,6 @@ static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
 	return ret;
 	return ret;
 }
 }
 
 
-static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
-		unsigned int nbytes)
-{
-	int offset = 0;
-
-	if (!nbytes)
-		return 0;
-
-	for (;;) {
-		if (start < offset + sg->length)
-			break;
-
-		offset += sg->length;
-		sg = sg_next(sg);
-	}
-	return (start + nbytes > offset + sg->length);
-}
-
 static int aead_perform(struct aead_request *req, int encrypt,
 static int aead_perform(struct aead_request *req, int encrypt,
 		int cryptoffset, int eff_cryptlen, u8 *iv)
 		int cryptoffset, int eff_cryptlen, u8 *iv)
 {
 {
@@ -1002,6 +999,8 @@ static int aead_perform(struct aead_request *req, int encrypt,
 	struct device *dev = &pdev->dev;
 	struct device *dev = &pdev->dev;
 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 				GFP_KERNEL : GFP_ATOMIC;
 				GFP_KERNEL : GFP_ATOMIC;
+	enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
+	unsigned int lastlen;
 
 
 	if (qmgr_stat_full(SEND_QID))
 	if (qmgr_stat_full(SEND_QID))
 		return -EAGAIN;
 		return -EAGAIN;
@@ -1030,35 +1029,55 @@ static int aead_perform(struct aead_request *req, int encrypt,
 	crypt->crypt_len = eff_cryptlen;
 	crypt->crypt_len = eff_cryptlen;
 
 
 	crypt->auth_offs = 0;
 	crypt->auth_offs = 0;
-	crypt->auth_len = req->assoclen + ivsize + cryptlen;
+	crypt->auth_len = req->assoclen + cryptlen;
 	BUG_ON(ivsize && !req->iv);
 	BUG_ON(ivsize && !req->iv);
 	memcpy(crypt->iv, req->iv, ivsize);
 	memcpy(crypt->iv, req->iv, ivsize);
 
 
+	req_ctx->dst = NULL;
+
 	if (req->src != req->dst) {
 	if (req->src != req->dst) {
-		BUG(); /* -ENOTSUP because of my laziness */
+		struct buffer_desc dst_hook;
+
+		crypt->mode |= NPE_OP_NOT_IN_PLACE;
+		src_direction = DMA_TO_DEVICE;
+
+		buf = chainup_buffers(dev, req->dst, crypt->auth_len,
+				      &dst_hook, flags, DMA_FROM_DEVICE);
+		req_ctx->dst = dst_hook.next;
+		crypt->dst_buf = dst_hook.phys_next;
+
+		if (!buf)
+			goto free_buf_dst;
+
+		if (encrypt) {
+			lastlen = buf->buf_len;
+			if (lastlen >= authsize)
+				crypt->icv_rev_aes = buf->phys_addr +
+						     buf->buf_len - authsize;
+		}
 	}
 	}
 
 
-	/* ASSOC data */
-	buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook,
-		flags, DMA_TO_DEVICE);
-	req_ctx->buffer = src_hook.next;
+	buf = chainup_buffers(dev, req->src, crypt->auth_len,
+			      &src_hook, flags, src_direction);
+	req_ctx->src = src_hook.next;
 	crypt->src_buf = src_hook.phys_next;
 	crypt->src_buf = src_hook.phys_next;
 	if (!buf)
 	if (!buf)
-		goto out;
-	/* IV */
-	sg_init_table(&req_ctx->ivlist, 1);
-	sg_set_buf(&req_ctx->ivlist, iv, ivsize);
-	buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags,
-			DMA_BIDIRECTIONAL);
-	if (!buf)
-		goto free_chain;
-	if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
+		goto free_buf_src;
+
+	if (!encrypt || !req_ctx->dst) {
+		lastlen = buf->buf_len;
+		if (lastlen >= authsize)
+			crypt->icv_rev_aes = buf->phys_addr +
+					     buf->buf_len - authsize;
+	}
+
+	if (unlikely(lastlen < authsize)) {
 		/* The 12 hmac bytes are scattered,
 		/* The 12 hmac bytes are scattered,
 		 * we need to copy them into a safe buffer */
 		 * we need to copy them into a safe buffer */
 		req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
 		req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
 				&crypt->icv_rev_aes);
 				&crypt->icv_rev_aes);
 		if (unlikely(!req_ctx->hmac_virt))
 		if (unlikely(!req_ctx->hmac_virt))
-			goto free_chain;
+			goto free_buf_src;
 		if (!encrypt) {
 		if (!encrypt) {
 			scatterwalk_map_and_copy(req_ctx->hmac_virt,
 			scatterwalk_map_and_copy(req_ctx->hmac_virt,
 				req->src, cryptlen, authsize, 0);
 				req->src, cryptlen, authsize, 0);
@@ -1067,27 +1086,16 @@ static int aead_perform(struct aead_request *req, int encrypt,
 	} else {
 	} else {
 		req_ctx->hmac_virt = NULL;
 		req_ctx->hmac_virt = NULL;
 	}
 	}
-	/* Crypt */
-	buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags,
-			DMA_BIDIRECTIONAL);
-	if (!buf)
-		goto free_hmac_virt;
-	if (!req_ctx->hmac_virt) {
-		crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
-	}
 
 
 	crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
 	crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
 	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
 	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
 	BUG_ON(qmgr_stat_overflow(SEND_QID));
 	BUG_ON(qmgr_stat_overflow(SEND_QID));
 	return -EINPROGRESS;
 	return -EINPROGRESS;
-free_hmac_virt:
-	if (req_ctx->hmac_virt) {
-		dma_pool_free(buffer_pool, req_ctx->hmac_virt,
-				crypt->icv_rev_aes);
-	}
-free_chain:
-	free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
-out:
+
+free_buf_src:
+	free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+free_buf_dst:
+	free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
 	crypt->ctl_flags = CTL_FLAG_UNUSED;
 	crypt->ctl_flags = CTL_FLAG_UNUSED;
 	return -ENOMEM;
 	return -ENOMEM;
 }
 }
@@ -1173,40 +1181,12 @@ badkey:
 
 
 static int aead_encrypt(struct aead_request *req)
 static int aead_encrypt(struct aead_request *req)
 {
 {
-	unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
-	return aead_perform(req, 1, req->assoclen + ivsize,
-			req->cryptlen, req->iv);
+	return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
 }
 }
 
 
 static int aead_decrypt(struct aead_request *req)
 static int aead_decrypt(struct aead_request *req)
 {
 {
-	unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
-	return aead_perform(req, 0, req->assoclen + ivsize,
-			req->cryptlen, req->iv);
-}
-
-static int aead_givencrypt(struct aead_givcrypt_request *req)
-{
-	struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
-	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
-	unsigned len, ivsize = crypto_aead_ivsize(tfm);
-	__be64 seq;
-
-	/* copied from eseqiv.c */
-	if (!ctx->salted) {
-		get_random_bytes(ctx->salt, ivsize);
-		ctx->salted = 1;
-	}
-	memcpy(req->areq.iv, ctx->salt, ivsize);
-	len = ivsize;
-	if (ivsize > sizeof(u64)) {
-		memset(req->giv, 0, ivsize - sizeof(u64));
-		len = sizeof(u64);
-	}
-	seq = cpu_to_be64(req->seq);
-	memcpy(req->giv + ivsize - len, &seq, len);
-	return aead_perform(&req->areq, 1, req->areq.assoclen,
-			req->areq.cryptlen +ivsize, req->giv);
+	return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
 }
 }
 
 
 static struct ixp_alg ixp4xx_algos[] = {
 static struct ixp_alg ixp4xx_algos[] = {
@@ -1319,80 +1299,77 @@ static struct ixp_alg ixp4xx_algos[] = {
 	},
 	},
 	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
 	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
 	.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
 	.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
-}, {
+} };
+
+static struct ixp_aead_alg ixp4xx_aeads[] = {
+{
 	.crypto	= {
 	.crypto	= {
-		.cra_name	= "authenc(hmac(md5),cbc(des))",
-		.cra_blocksize	= DES_BLOCK_SIZE,
-		.cra_u		= { .aead = {
-			.ivsize		= DES_BLOCK_SIZE,
-			.maxauthsize	= MD5_DIGEST_SIZE,
-			}
-		}
+		.base = {
+			.cra_name	= "authenc(hmac(md5),cbc(des))",
+			.cra_blocksize	= DES_BLOCK_SIZE,
+		},
+		.ivsize		= DES_BLOCK_SIZE,
+		.maxauthsize	= MD5_DIGEST_SIZE,
 	},
 	},
 	.hash = &hash_alg_md5,
 	.hash = &hash_alg_md5,
 	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
 	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
 	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
 	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
 }, {
 }, {
 	.crypto	= {
 	.crypto	= {
-		.cra_name	= "authenc(hmac(md5),cbc(des3_ede))",
-		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
-		.cra_u		= { .aead = {
-			.ivsize		= DES3_EDE_BLOCK_SIZE,
-			.maxauthsize	= MD5_DIGEST_SIZE,
-			}
-		}
+		.base = {
+			.cra_name	= "authenc(hmac(md5),cbc(des3_ede))",
+			.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
+		},
+		.ivsize		= DES3_EDE_BLOCK_SIZE,
+		.maxauthsize	= MD5_DIGEST_SIZE,
 	},
 	},
 	.hash = &hash_alg_md5,
 	.hash = &hash_alg_md5,
 	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
 	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
 	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
 	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
 }, {
 }, {
 	.crypto	= {
 	.crypto	= {
-		.cra_name	= "authenc(hmac(sha1),cbc(des))",
-		.cra_blocksize	= DES_BLOCK_SIZE,
-		.cra_u		= { .aead = {
+		.base = {
+			.cra_name	= "authenc(hmac(sha1),cbc(des))",
+			.cra_blocksize	= DES_BLOCK_SIZE,
+		},
 			.ivsize		= DES_BLOCK_SIZE,
 			.ivsize		= DES_BLOCK_SIZE,
 			.maxauthsize	= SHA1_DIGEST_SIZE,
 			.maxauthsize	= SHA1_DIGEST_SIZE,
-			}
-		}
 	},
 	},
 	.hash = &hash_alg_sha1,
 	.hash = &hash_alg_sha1,
 	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
 	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
 	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
 	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
 }, {
 }, {
 	.crypto	= {
 	.crypto	= {
-		.cra_name	= "authenc(hmac(sha1),cbc(des3_ede))",
-		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
-		.cra_u		= { .aead = {
-			.ivsize		= DES3_EDE_BLOCK_SIZE,
-			.maxauthsize	= SHA1_DIGEST_SIZE,
-			}
-		}
+		.base = {
+			.cra_name	= "authenc(hmac(sha1),cbc(des3_ede))",
+			.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
+		},
+		.ivsize		= DES3_EDE_BLOCK_SIZE,
+		.maxauthsize	= SHA1_DIGEST_SIZE,
 	},
 	},
 	.hash = &hash_alg_sha1,
 	.hash = &hash_alg_sha1,
 	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
 	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
 	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
 	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
 }, {
 }, {
 	.crypto	= {
 	.crypto	= {
-		.cra_name	= "authenc(hmac(md5),cbc(aes))",
-		.cra_blocksize	= AES_BLOCK_SIZE,
-		.cra_u		= { .aead = {
-			.ivsize		= AES_BLOCK_SIZE,
-			.maxauthsize	= MD5_DIGEST_SIZE,
-			}
-		}
+		.base = {
+			.cra_name	= "authenc(hmac(md5),cbc(aes))",
+			.cra_blocksize	= AES_BLOCK_SIZE,
+		},
+		.ivsize		= AES_BLOCK_SIZE,
+		.maxauthsize	= MD5_DIGEST_SIZE,
 	},
 	},
 	.hash = &hash_alg_md5,
 	.hash = &hash_alg_md5,
 	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
 	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
 	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
 	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
 }, {
 }, {
 	.crypto	= {
 	.crypto	= {
-		.cra_name	= "authenc(hmac(sha1),cbc(aes))",
-		.cra_blocksize	= AES_BLOCK_SIZE,
-		.cra_u		= { .aead = {
-			.ivsize		= AES_BLOCK_SIZE,
-			.maxauthsize	= SHA1_DIGEST_SIZE,
-			}
-		}
+		.base = {
+			.cra_name	= "authenc(hmac(sha1),cbc(aes))",
+			.cra_blocksize	= AES_BLOCK_SIZE,
+		},
+		.ivsize		= AES_BLOCK_SIZE,
+		.maxauthsize	= SHA1_DIGEST_SIZE,
 	},
 	},
 	.hash = &hash_alg_sha1,
 	.hash = &hash_alg_sha1,
 	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
 	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
@@ -1436,32 +1413,20 @@ static int __init ixp_module_init(void)
 		if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
 		if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
 			continue;
 			continue;
 		}
 		}
-		if (!ixp4xx_algos[i].hash) {
-			/* block ciphers */
-			cra->cra_type = &crypto_ablkcipher_type;
-			cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
-					 CRYPTO_ALG_KERN_DRIVER_ONLY |
-					 CRYPTO_ALG_ASYNC;
-			if (!cra->cra_ablkcipher.setkey)
-				cra->cra_ablkcipher.setkey = ablk_setkey;
-			if (!cra->cra_ablkcipher.encrypt)
-				cra->cra_ablkcipher.encrypt = ablk_encrypt;
-			if (!cra->cra_ablkcipher.decrypt)
-				cra->cra_ablkcipher.decrypt = ablk_decrypt;
-			cra->cra_init = init_tfm_ablk;
-		} else {
-			/* authenc */
-			cra->cra_type = &crypto_aead_type;
-			cra->cra_flags = CRYPTO_ALG_TYPE_AEAD |
-					 CRYPTO_ALG_KERN_DRIVER_ONLY |
-					 CRYPTO_ALG_ASYNC;
-			cra->cra_aead.setkey = aead_setkey;
-			cra->cra_aead.setauthsize = aead_setauthsize;
-			cra->cra_aead.encrypt = aead_encrypt;
-			cra->cra_aead.decrypt = aead_decrypt;
-			cra->cra_aead.givencrypt = aead_givencrypt;
-			cra->cra_init = init_tfm_aead;
-		}
+
+		/* block ciphers */
+		cra->cra_type = &crypto_ablkcipher_type;
+		cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+				 CRYPTO_ALG_KERN_DRIVER_ONLY |
+				 CRYPTO_ALG_ASYNC;
+		if (!cra->cra_ablkcipher.setkey)
+			cra->cra_ablkcipher.setkey = ablk_setkey;
+		if (!cra->cra_ablkcipher.encrypt)
+			cra->cra_ablkcipher.encrypt = ablk_encrypt;
+		if (!cra->cra_ablkcipher.decrypt)
+			cra->cra_ablkcipher.decrypt = ablk_decrypt;
+		cra->cra_init = init_tfm_ablk;
+
 		cra->cra_ctxsize = sizeof(struct ixp_ctx);
 		cra->cra_ctxsize = sizeof(struct ixp_ctx);
 		cra->cra_module = THIS_MODULE;
 		cra->cra_module = THIS_MODULE;
 		cra->cra_alignmask = 3;
 		cra->cra_alignmask = 3;
@@ -1473,6 +1438,38 @@ static int __init ixp_module_init(void)
 		else
 		else
 			ixp4xx_algos[i].registered = 1;
 			ixp4xx_algos[i].registered = 1;
 	}
 	}
+
+	for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
+		struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
+
+		if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+			     "%s"IXP_POSTFIX, cra->base.cra_name) >=
+		    CRYPTO_MAX_ALG_NAME)
+			continue;
+		if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
+			continue;
+
+		/* authenc */
+		cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+				      CRYPTO_ALG_ASYNC;
+		cra->setkey = aead_setkey;
+		cra->setauthsize = aead_setauthsize;
+		cra->encrypt = aead_encrypt;
+		cra->decrypt = aead_decrypt;
+		cra->init = init_tfm_aead;
+		cra->exit = exit_tfm_aead;
+
+		cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
+		cra->base.cra_module = THIS_MODULE;
+		cra->base.cra_alignmask = 3;
+		cra->base.cra_priority = 300;
+
+		if (crypto_register_aead(cra))
+			printk(KERN_ERR "Failed to register '%s'\n",
+				cra->base.cra_driver_name);
+		else
+			ixp4xx_aeads[i].registered = 1;
+	}
 	return 0;
 	return 0;
 }
 }
 
 
@@ -1481,6 +1478,11 @@ static void __exit ixp_module_exit(void)
 	int num = ARRAY_SIZE(ixp4xx_algos);
 	int num = ARRAY_SIZE(ixp4xx_algos);
 	int i;
 	int i;
 
 
+	for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
+		if (ixp4xx_aeads[i].registered)
+			crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
+	}
+
 	for (i=0; i< num; i++) {
 	for (i=0; i< num; i++) {
 		if (ixp4xx_algos[i].registered)
 		if (ixp4xx_algos[i].registered)
 			crypto_unregister_alg(&ixp4xx_algos[i].crypto);
 			crypto_unregister_alg(&ixp4xx_algos[i].crypto);

+ 0 - 1
drivers/crypto/marvell/cesa.c

@@ -533,7 +533,6 @@ static struct platform_driver marvell_cesa = {
 	.probe		= mv_cesa_probe,
 	.probe		= mv_cesa_probe,
 	.remove		= mv_cesa_remove,
 	.remove		= mv_cesa_remove,
 	.driver		= {
 	.driver		= {
-		.owner	= THIS_MODULE,
 		.name	= "marvell-cesa",
 		.name	= "marvell-cesa",
 		.of_match_table = mv_cesa_of_match_table,
 		.of_match_table = mv_cesa_of_match_table,
 	},
 	},

+ 5 - 12
drivers/crypto/nx/Kconfig

@@ -14,11 +14,14 @@ config CRYPTO_DEV_NX_ENCRYPT
 config CRYPTO_DEV_NX_COMPRESS
 config CRYPTO_DEV_NX_COMPRESS
 	tristate "Compression acceleration support"
 	tristate "Compression acceleration support"
 	default y
 	default y
+	select CRYPTO_ALGAPI
+	select 842_DECOMPRESS
 	help
 	help
 	  Support for PowerPC Nest (NX) compression acceleration. This
 	  Support for PowerPC Nest (NX) compression acceleration. This
 	  module supports acceleration for compressing memory with the 842
 	  module supports acceleration for compressing memory with the 842
-	  algorithm.  One of the platform drivers must be selected also.
-	  If you choose 'M' here, this module will be called nx_compress.
+	  algorithm using the cryptographic API.  One of the platform
+	  drivers must be selected also.  If you choose 'M' here, this
+	  module will be called nx_compress.
 
 
 if CRYPTO_DEV_NX_COMPRESS
 if CRYPTO_DEV_NX_COMPRESS
 
 
@@ -42,14 +45,4 @@ config CRYPTO_DEV_NX_COMPRESS_POWERNV
 	  algorithm.  This supports NX hardware on the PowerNV platform.
 	  algorithm.  This supports NX hardware on the PowerNV platform.
 	  If you choose 'M' here, this module will be called nx_compress_powernv.
 	  If you choose 'M' here, this module will be called nx_compress_powernv.
 
 
-config CRYPTO_DEV_NX_COMPRESS_CRYPTO
-	tristate "Compression acceleration cryptographic interface"
-	select CRYPTO_ALGAPI
-	select 842_DECOMPRESS
-	default y
-	help
-	  Support for PowerPC Nest (NX) accelerators using the cryptographic
-	  API.  If you choose 'M' here, this module will be called
-	  nx_compress_crypto.
-
 endif
 endif

+ 2 - 6
drivers/crypto/nx/Makefile

@@ -10,12 +10,8 @@ nx-crypto-objs := nx.o \
 		  nx-sha256.o \
 		  nx-sha256.o \
 		  nx-sha512.o
 		  nx-sha512.o
 
 
-obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS) += nx-compress.o nx-compress-platform.o
-obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o
-obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o
-obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_CRYPTO) += nx-compress-crypto.o
+obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o nx-compress.o
+obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress.o
 nx-compress-objs := nx-842.o
 nx-compress-objs := nx-842.o
-nx-compress-platform-objs := nx-842-platform.o
 nx-compress-pseries-objs := nx-842-pseries.o
 nx-compress-pseries-objs := nx-842-pseries.o
 nx-compress-powernv-objs := nx-842-powernv.o
 nx-compress-powernv-objs := nx-842-powernv.o
-nx-compress-crypto-objs := nx-842-crypto.o

+ 0 - 580
drivers/crypto/nx/nx-842-crypto.c

@@ -1,580 +0,0 @@
-/*
- * Cryptographic API for the NX-842 hardware compression.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * Copyright (C) IBM Corporation, 2011-2015
- *
- * Original Authors: Robert Jennings <rcj@linux.vnet.ibm.com>
- *                   Seth Jennings <sjenning@linux.vnet.ibm.com>
- *
- * Rewrite: Dan Streetman <ddstreet@ieee.org>
- *
- * This is an interface to the NX-842 compression hardware in PowerPC
- * processors.  Most of the complexity of this drvier is due to the fact that
- * the NX-842 compression hardware requires the input and output data buffers
- * to be specifically aligned, to be a specific multiple in length, and within
- * specific minimum and maximum lengths.  Those restrictions, provided by the
- * nx-842 driver via nx842_constraints, mean this driver must use bounce
- * buffers and headers to correct misaligned in or out buffers, and to split
- * input buffers that are too large.
- *
- * This driver will fall back to software decompression if the hardware
- * decompression fails, so this driver's decompression should never fail as
- * long as the provided compressed buffer is valid.  Any compressed buffer
- * created by this driver will have a header (except ones where the input
- * perfectly matches the constraints); so users of this driver cannot simply
- * pass a compressed buffer created by this driver over to the 842 software
- * decompression library.  Instead, users must use this driver to decompress;
- * if the hardware fails or is unavailable, the compressed buffer will be
- * parsed and the header removed, and the raw 842 buffer(s) passed to the 842
- * software decompression library.
- *
- * This does not fall back to software compression, however, since the caller
- * of this function is specifically requesting hardware compression; if the
- * hardware compression fails, the caller can fall back to software
- * compression, and the raw 842 compressed buffer that the software compressor
- * creates can be passed to this driver for hardware decompression; any
- * buffer without our specific header magic is assumed to be a raw 842 buffer
- * and passed directly to the hardware.  Note that the software compression
- * library will produce a compressed buffer that is incompatible with the
- * hardware decompressor if the original input buffer length is not a multiple
- * of 8; if such a compressed buffer is passed to this driver for
- * decompression, the hardware will reject it and this driver will then pass
- * it over to the software library for decompression.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/crypto.h>
-#include <linux/vmalloc.h>
-#include <linux/sw842.h>
-#include <linux/ratelimit.h>
-
-#include "nx-842.h"
-
-/* The first 5 bits of this magic are 0x1f, which is an invalid 842 5-bit
- * template (see lib/842/842.h), so this magic number will never appear at
- * the start of a raw 842 compressed buffer.  That is important, as any buffer
- * passed to us without this magic is assumed to be a raw 842 compressed
- * buffer, and passed directly to the hardware to decompress.
- */
-#define NX842_CRYPTO_MAGIC	(0xf842)
-#define NX842_CRYPTO_GROUP_MAX	(0x20)
-#define NX842_CRYPTO_HEADER_SIZE(g)				\
-	(sizeof(struct nx842_crypto_header) +			\
-	 sizeof(struct nx842_crypto_header_group) * (g))
-#define NX842_CRYPTO_HEADER_MAX_SIZE				\
-	NX842_CRYPTO_HEADER_SIZE(NX842_CRYPTO_GROUP_MAX)
-
-/* bounce buffer size */
-#define BOUNCE_BUFFER_ORDER	(2)
-#define BOUNCE_BUFFER_SIZE					\
-	((unsigned int)(PAGE_SIZE << BOUNCE_BUFFER_ORDER))
-
-/* try longer on comp because we can fallback to sw decomp if hw is busy */
-#define COMP_BUSY_TIMEOUT	(250) /* ms */
-#define DECOMP_BUSY_TIMEOUT	(50) /* ms */
-
-struct nx842_crypto_header_group {
-	__be16 padding;			/* unused bytes at start of group */
-	__be32 compressed_length;	/* compressed bytes in group */
-	__be32 uncompressed_length;	/* bytes after decompression */
-} __packed;
-
-struct nx842_crypto_header {
-	__be16 magic;		/* NX842_CRYPTO_MAGIC */
-	__be16 ignore;		/* decompressed end bytes to ignore */
-	u8 groups;		/* total groups in this header */
-	struct nx842_crypto_header_group group[];
-} __packed;
-
-struct nx842_crypto_param {
-	u8 *in;
-	unsigned int iremain;
-	u8 *out;
-	unsigned int oremain;
-	unsigned int ototal;
-};
-
-static int update_param(struct nx842_crypto_param *p,
-			unsigned int slen, unsigned int dlen)
-{
-	if (p->iremain < slen)
-		return -EOVERFLOW;
-	if (p->oremain < dlen)
-		return -ENOSPC;
-
-	p->in += slen;
-	p->iremain -= slen;
-	p->out += dlen;
-	p->oremain -= dlen;
-	p->ototal += dlen;
-
-	return 0;
-}
-
-struct nx842_crypto_ctx {
-	u8 *wmem;
-	u8 *sbounce, *dbounce;
-
-	struct nx842_crypto_header header;
-	struct nx842_crypto_header_group group[NX842_CRYPTO_GROUP_MAX];
-};
-
-static int nx842_crypto_init(struct crypto_tfm *tfm)
-{
-	struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
-
-	ctx->wmem = kmalloc(nx842_workmem_size(), GFP_KERNEL);
-	ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
-	ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
-	if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) {
-		kfree(ctx->wmem);
-		free_page((unsigned long)ctx->sbounce);
-		free_page((unsigned long)ctx->dbounce);
-		return -ENOMEM;
-	}
-
-	return 0;
-}
-
-static void nx842_crypto_exit(struct crypto_tfm *tfm)
-{
-	struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
-
-	kfree(ctx->wmem);
-	free_page((unsigned long)ctx->sbounce);
-	free_page((unsigned long)ctx->dbounce);
-}
-
-static int read_constraints(struct nx842_constraints *c)
-{
-	int ret;
-
-	ret = nx842_constraints(c);
-	if (ret) {
-		pr_err_ratelimited("could not get nx842 constraints : %d\n",
-				   ret);
-		return ret;
-	}
-
-	/* limit maximum, to always have enough bounce buffer to decompress */
-	if (c->maximum > BOUNCE_BUFFER_SIZE) {
-		c->maximum = BOUNCE_BUFFER_SIZE;
-		pr_info_once("limiting nx842 maximum to %x\n", c->maximum);
-	}
-
-	return 0;
-}
-
-static int nx842_crypto_add_header(struct nx842_crypto_header *hdr, u8 *buf)
-{
-	int s = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
-
-	/* compress should have added space for header */
-	if (s > be16_to_cpu(hdr->group[0].padding)) {
-		pr_err("Internal error: no space for header\n");
-		return -EINVAL;
-	}
-
-	memcpy(buf, hdr, s);
-
-	print_hex_dump_debug("header ", DUMP_PREFIX_OFFSET, 16, 1, buf, s, 0);
-
-	return 0;
-}
-
-static int compress(struct nx842_crypto_ctx *ctx,
-		    struct nx842_crypto_param *p,
-		    struct nx842_crypto_header_group *g,
-		    struct nx842_constraints *c,
-		    u16 *ignore,
-		    unsigned int hdrsize)
-{
-	unsigned int slen = p->iremain, dlen = p->oremain, tmplen;
-	unsigned int adj_slen = slen;
-	u8 *src = p->in, *dst = p->out;
-	int ret, dskip = 0;
-	ktime_t timeout;
-
-	if (p->iremain == 0)
-		return -EOVERFLOW;
-
-	if (p->oremain == 0 || hdrsize + c->minimum > dlen)
-		return -ENOSPC;
-
-	if (slen % c->multiple)
-		adj_slen = round_up(slen, c->multiple);
-	if (slen < c->minimum)
-		adj_slen = c->minimum;
-	if (slen > c->maximum)
-		adj_slen = slen = c->maximum;
-	if (adj_slen > slen || (u64)src % c->alignment) {
-		adj_slen = min(adj_slen, BOUNCE_BUFFER_SIZE);
-		slen = min(slen, BOUNCE_BUFFER_SIZE);
-		if (adj_slen > slen)
-			memset(ctx->sbounce + slen, 0, adj_slen - slen);
-		memcpy(ctx->sbounce, src, slen);
-		src = ctx->sbounce;
-		slen = adj_slen;
-		pr_debug("using comp sbounce buffer, len %x\n", slen);
-	}
-
-	dst += hdrsize;
-	dlen -= hdrsize;
-
-	if ((u64)dst % c->alignment) {
-		dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst);
-		dst += dskip;
-		dlen -= dskip;
-	}
-	if (dlen % c->multiple)
-		dlen = round_down(dlen, c->multiple);
-	if (dlen < c->minimum) {
-nospc:
-		dst = ctx->dbounce;
-		dlen = min(p->oremain, BOUNCE_BUFFER_SIZE);
-		dlen = round_down(dlen, c->multiple);
-		dskip = 0;
-		pr_debug("using comp dbounce buffer, len %x\n", dlen);
-	}
-	if (dlen > c->maximum)
-		dlen = c->maximum;
-
-	tmplen = dlen;
-	timeout = ktime_add_ms(ktime_get(), COMP_BUSY_TIMEOUT);
-	do {
-		dlen = tmplen; /* reset dlen, if we're retrying */
-		ret = nx842_compress(src, slen, dst, &dlen, ctx->wmem);
-		/* possibly we should reduce the slen here, instead of
-		 * retrying with the dbounce buffer?
-		 */
-		if (ret == -ENOSPC && dst != ctx->dbounce)
-			goto nospc;
-	} while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
-	if (ret)
-		return ret;
-
-	dskip += hdrsize;
-
-	if (dst == ctx->dbounce)
-		memcpy(p->out + dskip, dst, dlen);
-
-	g->padding = cpu_to_be16(dskip);
-	g->compressed_length = cpu_to_be32(dlen);
-	g->uncompressed_length = cpu_to_be32(slen);
-
-	if (p->iremain < slen) {
-		*ignore = slen - p->iremain;
-		slen = p->iremain;
-	}
-
-	pr_debug("compress slen %x ignore %x dlen %x padding %x\n",
-		 slen, *ignore, dlen, dskip);
-
-	return update_param(p, slen, dskip + dlen);
-}
-
-static int nx842_crypto_compress(struct crypto_tfm *tfm,
-				 const u8 *src, unsigned int slen,
-				 u8 *dst, unsigned int *dlen)
-{
-	struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
-	struct nx842_crypto_header *hdr = &ctx->header;
-	struct nx842_crypto_param p;
-	struct nx842_constraints c;
-	unsigned int groups, hdrsize, h;
-	int ret, n;
-	bool add_header;
-	u16 ignore = 0;
-
-	p.in = (u8 *)src;
-	p.iremain = slen;
-	p.out = dst;
-	p.oremain = *dlen;
-	p.ototal = 0;
-
-	*dlen = 0;
-
-	ret = read_constraints(&c);
-	if (ret)
-		return ret;
-
-	groups = min_t(unsigned int, NX842_CRYPTO_GROUP_MAX,
-		       DIV_ROUND_UP(p.iremain, c.maximum));
-	hdrsize = NX842_CRYPTO_HEADER_SIZE(groups);
-
-	/* skip adding header if the buffers meet all constraints */
-	add_header = (p.iremain % c.multiple	||
-		      p.iremain < c.minimum	||
-		      p.iremain > c.maximum	||
-		      (u64)p.in % c.alignment	||
-		      p.oremain % c.multiple	||
-		      p.oremain < c.minimum	||
-		      p.oremain > c.maximum	||
-		      (u64)p.out % c.alignment);
-
-	hdr->magic = cpu_to_be16(NX842_CRYPTO_MAGIC);
-	hdr->groups = 0;
-	hdr->ignore = 0;
-
-	while (p.iremain > 0) {
-		n = hdr->groups++;
-		if (hdr->groups > NX842_CRYPTO_GROUP_MAX)
-			return -ENOSPC;
-
-		/* header goes before first group */
-		h = !n && add_header ? hdrsize : 0;
-
-		if (ignore)
-			pr_warn("interal error, ignore is set %x\n", ignore);
-
-		ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h);
-		if (ret)
-			return ret;
-	}
-
-	if (!add_header && hdr->groups > 1) {
-		pr_err("Internal error: No header but multiple groups\n");
-		return -EINVAL;
-	}
-
-	/* ignore indicates the input stream needed to be padded */
-	hdr->ignore = cpu_to_be16(ignore);
-	if (ignore)
-		pr_debug("marked %d bytes as ignore\n", ignore);
-
-	if (add_header)
-		ret = nx842_crypto_add_header(hdr, dst);
-	if (ret)
-		return ret;
-
-	*dlen = p.ototal;
-
-	pr_debug("compress total slen %x dlen %x\n", slen, *dlen);
-
-	return 0;
-}
-
-static int decompress(struct nx842_crypto_ctx *ctx,
-		      struct nx842_crypto_param *p,
-		      struct nx842_crypto_header_group *g,
-		      struct nx842_constraints *c,
-		      u16 ignore,
-		      bool usehw)
-{
-	unsigned int slen = be32_to_cpu(g->compressed_length);
-	unsigned int required_len = be32_to_cpu(g->uncompressed_length);
-	unsigned int dlen = p->oremain, tmplen;
-	unsigned int adj_slen = slen;
-	u8 *src = p->in, *dst = p->out;
-	u16 padding = be16_to_cpu(g->padding);
-	int ret, spadding = 0, dpadding = 0;
-	ktime_t timeout;
-
-	if (!slen || !required_len)
-		return -EINVAL;
-
-	if (p->iremain <= 0 || padding + slen > p->iremain)
-		return -EOVERFLOW;
-
-	if (p->oremain <= 0 || required_len - ignore > p->oremain)
-		return -ENOSPC;
-
-	src += padding;
-
-	if (!usehw)
-		goto usesw;
-
-	if (slen % c->multiple)
-		adj_slen = round_up(slen, c->multiple);
-	if (slen < c->minimum)
-		adj_slen = c->minimum;
-	if (slen > c->maximum)
-		goto usesw;
-	if (slen < adj_slen || (u64)src % c->alignment) {
-		/* we can append padding bytes because the 842 format defines
-		 * an "end" template (see lib/842/842_decompress.c) and will
-		 * ignore any bytes following it.
-		 */
-		if (slen < adj_slen)
-			memset(ctx->sbounce + slen, 0, adj_slen - slen);
-		memcpy(ctx->sbounce, src, slen);
-		src = ctx->sbounce;
-		spadding = adj_slen - slen;
-		slen = adj_slen;
-		pr_debug("using decomp sbounce buffer, len %x\n", slen);
-	}
-
-	if (dlen % c->multiple)
-		dlen = round_down(dlen, c->multiple);
-	if (dlen < required_len || (u64)dst % c->alignment) {
-		dst = ctx->dbounce;
-		dlen = min(required_len, BOUNCE_BUFFER_SIZE);
-		pr_debug("using decomp dbounce buffer, len %x\n", dlen);
-	}
-	if (dlen < c->minimum)
-		goto usesw;
-	if (dlen > c->maximum)
-		dlen = c->maximum;
-
-	tmplen = dlen;
-	timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT);
-	do {
-		dlen = tmplen; /* reset dlen, if we're retrying */
-		ret = nx842_decompress(src, slen, dst, &dlen, ctx->wmem);
-	} while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
-	if (ret) {
-usesw:
-		/* reset everything, sw doesn't have constraints */
-		src = p->in + padding;
-		slen = be32_to_cpu(g->compressed_length);
-		spadding = 0;
-		dst = p->out;
-		dlen = p->oremain;
-		dpadding = 0;
-		if (dlen < required_len) { /* have ignore bytes */
-			dst = ctx->dbounce;
-			dlen = BOUNCE_BUFFER_SIZE;
-		}
-		pr_info_ratelimited("using software 842 decompression\n");
-		ret = sw842_decompress(src, slen, dst, &dlen);
-	}
-	if (ret)
-		return ret;
-
-	slen -= spadding;
-
-	dlen -= ignore;
-	if (ignore)
-		pr_debug("ignoring last %x bytes\n", ignore);
-
-	if (dst == ctx->dbounce)
-		memcpy(p->out, dst, dlen);
-
-	pr_debug("decompress slen %x padding %x dlen %x ignore %x\n",
-		 slen, padding, dlen, ignore);
-
-	return update_param(p, slen + padding, dlen);
-}
-
-static int nx842_crypto_decompress(struct crypto_tfm *tfm,
-				   const u8 *src, unsigned int slen,
-				   u8 *dst, unsigned int *dlen)
-{
-	struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
-	struct nx842_crypto_header *hdr;
-	struct nx842_crypto_param p;
-	struct nx842_constraints c;
-	int n, ret, hdr_len;
-	u16 ignore = 0;
-	bool usehw = true;
-
-	p.in = (u8 *)src;
-	p.iremain = slen;
-	p.out = dst;
-	p.oremain = *dlen;
-	p.ototal = 0;
-
-	*dlen = 0;
-
-	if (read_constraints(&c))
-		usehw = false;
-
-	hdr = (struct nx842_crypto_header *)src;
-
-	/* If it doesn't start with our header magic number, assume it's a raw
-	 * 842 compressed buffer and pass it directly to the hardware driver
-	 */
-	if (be16_to_cpu(hdr->magic) != NX842_CRYPTO_MAGIC) {
-		struct nx842_crypto_header_group g = {
-			.padding =		0,
-			.compressed_length =	cpu_to_be32(p.iremain),
-			.uncompressed_length =	cpu_to_be32(p.oremain),
-		};
-
-		ret = decompress(ctx, &p, &g, &c, 0, usehw);
-		if (ret)
-			return ret;
-
-		*dlen = p.ototal;
-
-		return 0;
-	}
-
-	if (!hdr->groups) {
-		pr_err("header has no groups\n");
-		return -EINVAL;
-	}
-	if (hdr->groups > NX842_CRYPTO_GROUP_MAX) {
-		pr_err("header has too many groups %x, max %x\n",
-		       hdr->groups, NX842_CRYPTO_GROUP_MAX);
-		return -EINVAL;
-	}
-
-	hdr_len = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
-	if (hdr_len > slen)
-		return -EOVERFLOW;
-
-	memcpy(&ctx->header, src, hdr_len);
-	hdr = &ctx->header;
-
-	for (n = 0; n < hdr->groups; n++) {
-		/* ignore applies to last group */
-		if (n + 1 == hdr->groups)
-			ignore = be16_to_cpu(hdr->ignore);
-
-		ret = decompress(ctx, &p, &hdr->group[n], &c, ignore, usehw);
-		if (ret)
-			return ret;
-	}
-
-	*dlen = p.ototal;
-
-	pr_debug("decompress total slen %x dlen %x\n", slen, *dlen);
-
-	return 0;
-}
-
-static struct crypto_alg alg = {
-	.cra_name		= "842",
-	.cra_driver_name	= "842-nx",
-	.cra_priority		= 300,
-	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
-	.cra_ctxsize		= sizeof(struct nx842_crypto_ctx),
-	.cra_module		= THIS_MODULE,
-	.cra_init		= nx842_crypto_init,
-	.cra_exit		= nx842_crypto_exit,
-	.cra_u			= { .compress = {
-	.coa_compress		= nx842_crypto_compress,
-	.coa_decompress		= nx842_crypto_decompress } }
-};
-
-static int __init nx842_crypto_mod_init(void)
-{
-	return crypto_register_alg(&alg);
-}
-module_init(nx842_crypto_mod_init);
-
-static void __exit nx842_crypto_mod_exit(void)
-{
-	crypto_unregister_alg(&alg);
-}
-module_exit(nx842_crypto_mod_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("IBM PowerPC Nest (NX) 842 Hardware Compression Interface");
-MODULE_ALIAS_CRYPTO("842");
-MODULE_ALIAS_CRYPTO("842-nx");
-MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");

+ 0 - 84
drivers/crypto/nx/nx-842-platform.c

@@ -1,84 +0,0 @@
-
-#include "nx-842.h"
-
-/* this is needed, separate from the main nx-842.c driver, because that main
- * driver loads the platform drivers during its init(), and it expects one
- * (or none) of the platform drivers to set this pointer to its driver.
- * That means this pointer can't be in the main nx-842 driver, because it
- * wouldn't be accessible until after the main driver loaded, which wouldn't
- * be possible as it's waiting for the platform driver to load.  So place it
- * here.
- */
-static struct nx842_driver *driver;
-static DEFINE_SPINLOCK(driver_lock);
-
-struct nx842_driver *nx842_platform_driver(void)
-{
-	return driver;
-}
-EXPORT_SYMBOL_GPL(nx842_platform_driver);
-
-bool nx842_platform_driver_set(struct nx842_driver *_driver)
-{
-	bool ret = false;
-
-	spin_lock(&driver_lock);
-
-	if (!driver) {
-		driver = _driver;
-		ret = true;
-	} else
-		WARN(1, "can't set platform driver, already set to %s\n",
-		     driver->name);
-
-	spin_unlock(&driver_lock);
-	return ret;
-}
-EXPORT_SYMBOL_GPL(nx842_platform_driver_set);
-
-/* only call this from the platform driver exit function */
-void nx842_platform_driver_unset(struct nx842_driver *_driver)
-{
-	spin_lock(&driver_lock);
-
-	if (driver == _driver)
-		driver = NULL;
-	else if (driver)
-		WARN(1, "can't unset platform driver %s, currently set to %s\n",
-		     _driver->name, driver->name);
-	else
-		WARN(1, "can't unset platform driver, already unset\n");
-
-	spin_unlock(&driver_lock);
-}
-EXPORT_SYMBOL_GPL(nx842_platform_driver_unset);
-
-bool nx842_platform_driver_get(void)
-{
-	bool ret = false;
-
-	spin_lock(&driver_lock);
-
-	if (driver)
-		ret = try_module_get(driver->owner);
-
-	spin_unlock(&driver_lock);
-
-	return ret;
-}
-EXPORT_SYMBOL_GPL(nx842_platform_driver_get);
-
-void nx842_platform_driver_put(void)
-{
-	spin_lock(&driver_lock);
-
-	if (driver)
-		module_put(driver->owner);
-
-	spin_unlock(&driver_lock);
-}
-EXPORT_SYMBOL_GPL(nx842_platform_driver_put);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
-MODULE_DESCRIPTION("842 H/W Compression platform driver");

+ 29 - 13
drivers/crypto/nx/nx-842-powernv.c

@@ -26,6 +26,8 @@
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
 MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
 MODULE_DESCRIPTION("842 H/W Compression driver for IBM PowerNV processors");
 MODULE_DESCRIPTION("842 H/W Compression driver for IBM PowerNV processors");
+MODULE_ALIAS_CRYPTO("842");
+MODULE_ALIAS_CRYPTO("842-nx");
 
 
 #define WORKMEM_ALIGN	(CRB_ALIGN)
 #define WORKMEM_ALIGN	(CRB_ALIGN)
 #define CSB_WAIT_MAX	(5000) /* ms */
 #define CSB_WAIT_MAX	(5000) /* ms */
@@ -344,7 +346,8 @@ static int wait_for_csb(struct nx842_workmem *wmem,
 	}
 	}
 
 
 	/* successful completion */
 	/* successful completion */
-	pr_debug_ratelimited("Processed %u bytes in %lu us\n", csb->count,
+	pr_debug_ratelimited("Processed %u bytes in %lu us\n",
+			     be32_to_cpu(csb->count),
 			     (unsigned long)ktime_us_delta(now, start));
 			     (unsigned long)ktime_us_delta(now, start));
 
 
 	return 0;
 	return 0;
@@ -581,9 +584,29 @@ static struct nx842_driver nx842_powernv_driver = {
 	.decompress =	nx842_powernv_decompress,
 	.decompress =	nx842_powernv_decompress,
 };
 };
 
 
+static int nx842_powernv_crypto_init(struct crypto_tfm *tfm)
+{
+	return nx842_crypto_init(tfm, &nx842_powernv_driver);
+}
+
+static struct crypto_alg nx842_powernv_alg = {
+	.cra_name		= "842",
+	.cra_driver_name	= "842-nx",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
+	.cra_ctxsize		= sizeof(struct nx842_crypto_ctx),
+	.cra_module		= THIS_MODULE,
+	.cra_init		= nx842_powernv_crypto_init,
+	.cra_exit		= nx842_crypto_exit,
+	.cra_u			= { .compress = {
+	.coa_compress		= nx842_crypto_compress,
+	.coa_decompress		= nx842_crypto_decompress } }
+};
+
 static __init int nx842_powernv_init(void)
 static __init int nx842_powernv_init(void)
 {
 {
 	struct device_node *dn;
 	struct device_node *dn;
+	int ret;
 
 
 	/* verify workmem size/align restrictions */
 	/* verify workmem size/align restrictions */
 	BUILD_BUG_ON(WORKMEM_ALIGN % CRB_ALIGN);
 	BUILD_BUG_ON(WORKMEM_ALIGN % CRB_ALIGN);
@@ -594,17 +617,14 @@ static __init int nx842_powernv_init(void)
 	BUILD_BUG_ON(DDE_BUFFER_ALIGN % DDE_BUFFER_SIZE_MULT);
 	BUILD_BUG_ON(DDE_BUFFER_ALIGN % DDE_BUFFER_SIZE_MULT);
 	BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT);
 	BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT);
 
 
-	pr_info("loading\n");
-
 	for_each_compatible_node(dn, NULL, "ibm,power-nx")
 	for_each_compatible_node(dn, NULL, "ibm,power-nx")
 		nx842_powernv_probe(dn);
 		nx842_powernv_probe(dn);
 
 
-	if (!nx842_ct) {
-		pr_err("no coprocessors found\n");
+	if (!nx842_ct)
 		return -ENODEV;
 		return -ENODEV;
-	}
 
 
-	if (!nx842_platform_driver_set(&nx842_powernv_driver)) {
+	ret = crypto_register_alg(&nx842_powernv_alg);
+	if (ret) {
 		struct nx842_coproc *coproc, *n;
 		struct nx842_coproc *coproc, *n;
 
 
 		list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
 		list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
@@ -612,11 +632,9 @@ static __init int nx842_powernv_init(void)
 			kfree(coproc);
 			kfree(coproc);
 		}
 		}
 
 
-		return -EEXIST;
+		return ret;
 	}
 	}
 
 
-	pr_info("loaded\n");
-
 	return 0;
 	return 0;
 }
 }
 module_init(nx842_powernv_init);
 module_init(nx842_powernv_init);
@@ -625,13 +643,11 @@ static void __exit nx842_powernv_exit(void)
 {
 {
 	struct nx842_coproc *coproc, *n;
 	struct nx842_coproc *coproc, *n;
 
 
-	nx842_platform_driver_unset(&nx842_powernv_driver);
+	crypto_unregister_alg(&nx842_powernv_alg);
 
 
 	list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
 	list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
 		list_del(&coproc->list);
 		list_del(&coproc->list);
 		kfree(coproc);
 		kfree(coproc);
 	}
 	}
-
-	pr_info("unloaded\n");
 }
 }
 module_exit(nx842_powernv_exit);
 module_exit(nx842_powernv_exit);

+ 71 - 68
drivers/crypto/nx/nx-842-pseries.c

@@ -29,6 +29,8 @@
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>");
 MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>");
 MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
 MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
+MODULE_ALIAS_CRYPTO("842");
+MODULE_ALIAS_CRYPTO("842-nx");
 
 
 static struct nx842_constraints nx842_pseries_constraints = {
 static struct nx842_constraints nx842_pseries_constraints = {
 	.alignment =	DDE_BUFFER_ALIGN,
 	.alignment =	DDE_BUFFER_ALIGN,
@@ -99,11 +101,6 @@ struct nx842_workmem {
 #define NX842_HW_PAGE_SIZE	(4096)
 #define NX842_HW_PAGE_SIZE	(4096)
 #define NX842_HW_PAGE_MASK	(~(NX842_HW_PAGE_SIZE-1))
 #define NX842_HW_PAGE_MASK	(~(NX842_HW_PAGE_SIZE-1))
 
 
-enum nx842_status {
-	UNAVAILABLE,
-	AVAILABLE
-};
-
 struct ibm_nx842_counters {
 struct ibm_nx842_counters {
 	atomic64_t comp_complete;
 	atomic64_t comp_complete;
 	atomic64_t comp_failed;
 	atomic64_t comp_failed;
@@ -121,7 +118,6 @@ static struct nx842_devdata {
 	unsigned int max_sg_len;
 	unsigned int max_sg_len;
 	unsigned int max_sync_size;
 	unsigned int max_sync_size;
 	unsigned int max_sync_sg;
 	unsigned int max_sync_sg;
-	enum nx842_status status;
 } __rcu *devdata;
 } __rcu *devdata;
 static DEFINE_SPINLOCK(devdata_mutex);
 static DEFINE_SPINLOCK(devdata_mutex);
 
 
@@ -230,9 +226,12 @@ static int nx842_validate_result(struct device *dev,
 	switch (csb->completion_code) {
 	switch (csb->completion_code) {
 	case 0:	/* Completed without error */
 	case 0:	/* Completed without error */
 		break;
 		break;
-	case 64: /* Target bytes > Source bytes during compression */
+	case 64: /* Compression ok, but output larger than input */
+		dev_dbg(dev, "%s: output size larger than input size\n",
+					__func__);
+		break;
 	case 13: /* Output buffer too small */
 	case 13: /* Output buffer too small */
-		dev_dbg(dev, "%s: Compression output larger than input\n",
+		dev_dbg(dev, "%s: Out of space in output buffer\n",
 					__func__);
 					__func__);
 		return -ENOSPC;
 		return -ENOSPC;
 	case 66: /* Input data contains an illegal template field */
 	case 66: /* Input data contains an illegal template field */
@@ -537,41 +536,36 @@ static int nx842_OF_set_defaults(struct nx842_devdata *devdata)
 		devdata->max_sync_size = 0;
 		devdata->max_sync_size = 0;
 		devdata->max_sync_sg = 0;
 		devdata->max_sync_sg = 0;
 		devdata->max_sg_len = 0;
 		devdata->max_sg_len = 0;
-		devdata->status = UNAVAILABLE;
 		return 0;
 		return 0;
 	} else
 	} else
 		return -ENOENT;
 		return -ENOENT;
 }
 }
 
 
 /**
 /**
- * nx842_OF_upd_status -- Update the device info from OF status prop
+ * nx842_OF_upd_status -- Check the device info from OF status prop
  *
  *
  * The status property indicates if the accelerator is enabled.  If the
  * The status property indicates if the accelerator is enabled.  If the
  * device is in the OF tree it indicates that the hardware is present.
  * device is in the OF tree it indicates that the hardware is present.
  * The status field indicates if the device is enabled when the status
  * The status field indicates if the device is enabled when the status
  * is 'okay'.  Otherwise the device driver will be disabled.
  * is 'okay'.  Otherwise the device driver will be disabled.
  *
  *
- * @devdata - struct nx842_devdata to update
  * @prop - struct property point containing the maxsyncop for the update
  * @prop - struct property point containing the maxsyncop for the update
  *
  *
  * Returns:
  * Returns:
  *  0 - Device is available
  *  0 - Device is available
- *  -EINVAL - Device is not available
+ *  -ENODEV - Device is not available
  */
  */
-static int nx842_OF_upd_status(struct nx842_devdata *devdata,
-					struct property *prop) {
-	int ret = 0;
+static int nx842_OF_upd_status(struct property *prop)
+{
 	const char *status = (const char *)prop->value;
 	const char *status = (const char *)prop->value;
 
 
-	if (!strncmp(status, "okay", (size_t)prop->length)) {
-		devdata->status = AVAILABLE;
-	} else {
-		dev_info(devdata->dev, "%s: status '%s' is not 'okay'\n",
-				__func__, status);
-		devdata->status = UNAVAILABLE;
-	}
+	if (!strncmp(status, "okay", (size_t)prop->length))
+		return 0;
+	if (!strncmp(status, "disabled", (size_t)prop->length))
+		return -ENODEV;
+	dev_info(devdata->dev, "%s: unknown status '%s'\n", __func__, status);
 
 
-	return ret;
+	return -EINVAL;
 }
 }
 
 
 /**
 /**
@@ -735,6 +729,10 @@ static int nx842_OF_upd(struct property *new_prop)
 	int ret = 0;
 	int ret = 0;
 	unsigned long flags;
 	unsigned long flags;
 
 
+	new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
+	if (!new_devdata)
+		return -ENOMEM;
+
 	spin_lock_irqsave(&devdata_mutex, flags);
 	spin_lock_irqsave(&devdata_mutex, flags);
 	old_devdata = rcu_dereference_check(devdata,
 	old_devdata = rcu_dereference_check(devdata,
 			lockdep_is_held(&devdata_mutex));
 			lockdep_is_held(&devdata_mutex));
@@ -744,16 +742,10 @@ static int nx842_OF_upd(struct property *new_prop)
 	if (!old_devdata || !of_node) {
 	if (!old_devdata || !of_node) {
 		pr_err("%s: device is not available\n", __func__);
 		pr_err("%s: device is not available\n", __func__);
 		spin_unlock_irqrestore(&devdata_mutex, flags);
 		spin_unlock_irqrestore(&devdata_mutex, flags);
+		kfree(new_devdata);
 		return -ENODEV;
 		return -ENODEV;
 	}
 	}
 
 
-	new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
-	if (!new_devdata) {
-		dev_err(old_devdata->dev, "%s: Could not allocate memory for device data\n", __func__);
-		ret = -ENOMEM;
-		goto error_out;
-	}
-
 	memcpy(new_devdata, old_devdata, sizeof(*old_devdata));
 	memcpy(new_devdata, old_devdata, sizeof(*old_devdata));
 	new_devdata->counters = old_devdata->counters;
 	new_devdata->counters = old_devdata->counters;
 
 
@@ -777,7 +769,7 @@ static int nx842_OF_upd(struct property *new_prop)
 		goto out;
 		goto out;
 
 
 	/* Perform property updates */
 	/* Perform property updates */
-	ret = nx842_OF_upd_status(new_devdata, status);
+	ret = nx842_OF_upd_status(status);
 	if (ret)
 	if (ret)
 		goto error_out;
 		goto error_out;
 
 
@@ -970,13 +962,43 @@ static struct nx842_driver nx842_pseries_driver = {
 	.decompress =	nx842_pseries_decompress,
 	.decompress =	nx842_pseries_decompress,
 };
 };
 
 
-static int __init nx842_probe(struct vio_dev *viodev,
-				  const struct vio_device_id *id)
+static int nx842_pseries_crypto_init(struct crypto_tfm *tfm)
+{
+	return nx842_crypto_init(tfm, &nx842_pseries_driver);
+}
+
+static struct crypto_alg nx842_pseries_alg = {
+	.cra_name		= "842",
+	.cra_driver_name	= "842-nx",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
+	.cra_ctxsize		= sizeof(struct nx842_crypto_ctx),
+	.cra_module		= THIS_MODULE,
+	.cra_init		= nx842_pseries_crypto_init,
+	.cra_exit		= nx842_crypto_exit,
+	.cra_u			= { .compress = {
+	.coa_compress		= nx842_crypto_compress,
+	.coa_decompress		= nx842_crypto_decompress } }
+};
+
+static int nx842_probe(struct vio_dev *viodev,
+		       const struct vio_device_id *id)
 {
 {
 	struct nx842_devdata *old_devdata, *new_devdata = NULL;
 	struct nx842_devdata *old_devdata, *new_devdata = NULL;
 	unsigned long flags;
 	unsigned long flags;
 	int ret = 0;
 	int ret = 0;
 
 
+	new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
+	if (!new_devdata)
+		return -ENOMEM;
+
+	new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
+			GFP_NOFS);
+	if (!new_devdata->counters) {
+		kfree(new_devdata);
+		return -ENOMEM;
+	}
+
 	spin_lock_irqsave(&devdata_mutex, flags);
 	spin_lock_irqsave(&devdata_mutex, flags);
 	old_devdata = rcu_dereference_check(devdata,
 	old_devdata = rcu_dereference_check(devdata,
 			lockdep_is_held(&devdata_mutex));
 			lockdep_is_held(&devdata_mutex));
@@ -989,21 +1011,6 @@ static int __init nx842_probe(struct vio_dev *viodev,
 
 
 	dev_set_drvdata(&viodev->dev, NULL);
 	dev_set_drvdata(&viodev->dev, NULL);
 
 
-	new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
-	if (!new_devdata) {
-		dev_err(&viodev->dev, "%s: Could not allocate memory for device data\n", __func__);
-		ret = -ENOMEM;
-		goto error_unlock;
-	}
-
-	new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
-			GFP_NOFS);
-	if (!new_devdata->counters) {
-		dev_err(&viodev->dev, "%s: Could not allocate memory for performance counters\n", __func__);
-		ret = -ENOMEM;
-		goto error_unlock;
-	}
-
 	new_devdata->vdev = viodev;
 	new_devdata->vdev = viodev;
 	new_devdata->dev = &viodev->dev;
 	new_devdata->dev = &viodev->dev;
 	nx842_OF_set_defaults(new_devdata);
 	nx842_OF_set_defaults(new_devdata);
@@ -1016,9 +1023,12 @@ static int __init nx842_probe(struct vio_dev *viodev,
 	of_reconfig_notifier_register(&nx842_of_nb);
 	of_reconfig_notifier_register(&nx842_of_nb);
 
 
 	ret = nx842_OF_upd(NULL);
 	ret = nx842_OF_upd(NULL);
-	if (ret && ret != -ENODEV) {
-		dev_err(&viodev->dev, "could not parse device tree. %d\n", ret);
-		ret = -1;
+	if (ret)
+		goto error;
+
+	ret = crypto_register_alg(&nx842_pseries_alg);
+	if (ret) {
+		dev_err(&viodev->dev, "could not register comp alg: %d\n", ret);
 		goto error;
 		goto error;
 	}
 	}
 
 
@@ -1043,7 +1053,7 @@ error:
 	return ret;
 	return ret;
 }
 }
 
 
-static int __exit nx842_remove(struct vio_dev *viodev)
+static int nx842_remove(struct vio_dev *viodev)
 {
 {
 	struct nx842_devdata *old_devdata;
 	struct nx842_devdata *old_devdata;
 	unsigned long flags;
 	unsigned long flags;
@@ -1051,6 +1061,8 @@ static int __exit nx842_remove(struct vio_dev *viodev)
 	pr_info("Removing IBM Power 842 compression device\n");
 	pr_info("Removing IBM Power 842 compression device\n");
 	sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group);
 	sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group);
 
 
+	crypto_unregister_alg(&nx842_pseries_alg);
+
 	spin_lock_irqsave(&devdata_mutex, flags);
 	spin_lock_irqsave(&devdata_mutex, flags);
 	old_devdata = rcu_dereference_check(devdata,
 	old_devdata = rcu_dereference_check(devdata,
 			lockdep_is_held(&devdata_mutex));
 			lockdep_is_held(&devdata_mutex));
@@ -1074,18 +1086,16 @@ static struct vio_device_id nx842_vio_driver_ids[] = {
 static struct vio_driver nx842_vio_driver = {
 static struct vio_driver nx842_vio_driver = {
 	.name = KBUILD_MODNAME,
 	.name = KBUILD_MODNAME,
 	.probe = nx842_probe,
 	.probe = nx842_probe,
-	.remove = __exit_p(nx842_remove),
+	.remove = nx842_remove,
 	.get_desired_dma = nx842_get_desired_dma,
 	.get_desired_dma = nx842_get_desired_dma,
 	.id_table = nx842_vio_driver_ids,
 	.id_table = nx842_vio_driver_ids,
 };
 };
 
 
-static int __init nx842_init(void)
+static int __init nx842_pseries_init(void)
 {
 {
 	struct nx842_devdata *new_devdata;
 	struct nx842_devdata *new_devdata;
 	int ret;
 	int ret;
 
 
-	pr_info("Registering IBM Power 842 compression driver\n");
-
 	if (!of_find_compatible_node(NULL, NULL, "ibm,compression"))
 	if (!of_find_compatible_node(NULL, NULL, "ibm,compression"))
 		return -ENODEV;
 		return -ENODEV;
 
 
@@ -1095,7 +1105,6 @@ static int __init nx842_init(void)
 		pr_err("Could not allocate memory for device data\n");
 		pr_err("Could not allocate memory for device data\n");
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
-	new_devdata->status = UNAVAILABLE;
 	RCU_INIT_POINTER(devdata, new_devdata);
 	RCU_INIT_POINTER(devdata, new_devdata);
 
 
 	ret = vio_register_driver(&nx842_vio_driver);
 	ret = vio_register_driver(&nx842_vio_driver);
@@ -1106,24 +1115,18 @@ static int __init nx842_init(void)
 		return ret;
 		return ret;
 	}
 	}
 
 
-	if (!nx842_platform_driver_set(&nx842_pseries_driver)) {
-		vio_unregister_driver(&nx842_vio_driver);
-		kfree(new_devdata);
-		return -EEXIST;
-	}
-
 	return 0;
 	return 0;
 }
 }
 
 
-module_init(nx842_init);
+module_init(nx842_pseries_init);
 
 
-static void __exit nx842_exit(void)
+static void __exit nx842_pseries_exit(void)
 {
 {
 	struct nx842_devdata *old_devdata;
 	struct nx842_devdata *old_devdata;
 	unsigned long flags;
 	unsigned long flags;
 
 
-	pr_info("Exiting IBM Power 842 compression driver\n");
-	nx842_platform_driver_unset(&nx842_pseries_driver);
+	crypto_unregister_alg(&nx842_pseries_alg);
+
 	spin_lock_irqsave(&devdata_mutex, flags);
 	spin_lock_irqsave(&devdata_mutex, flags);
 	old_devdata = rcu_dereference_check(devdata,
 	old_devdata = rcu_dereference_check(devdata,
 			lockdep_is_held(&devdata_mutex));
 			lockdep_is_held(&devdata_mutex));
@@ -1136,5 +1139,5 @@ static void __exit nx842_exit(void)
 	vio_unregister_driver(&nx842_vio_driver);
 	vio_unregister_driver(&nx842_vio_driver);
 }
 }
 
 
-module_exit(nx842_exit);
+module_exit(nx842_pseries_exit);
 
 

+ 491 - 63
drivers/crypto/nx/nx-842.c

@@ -1,10 +1,5 @@
 /*
 /*
- * Driver frontend for IBM Power 842 compression accelerator
- *
- * Copyright (C) 2015 Dan Streetman, IBM Corp
- *
- * Designer of the Power data compression engine:
- *   Bulent Abali <abali@us.ibm.com>
+ * Cryptographic API for the NX-842 hardware compression.
  *
  *
  * This program is free software; you can redistribute it and/or modify
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * it under the terms of the GNU General Public License as published by
@@ -15,89 +10,522 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  * GNU General Public License for more details.
+ *
+ * Copyright (C) IBM Corporation, 2011-2015
+ *
+ * Designer of the Power data compression engine:
+ *   Bulent Abali <abali@us.ibm.com>
+ *
+ * Original Authors: Robert Jennings <rcj@linux.vnet.ibm.com>
+ *                   Seth Jennings <sjenning@linux.vnet.ibm.com>
+ *
+ * Rewrite: Dan Streetman <ddstreet@ieee.org>
+ *
+ * This is an interface to the NX-842 compression hardware in PowerPC
+ * processors.  Most of the complexity of this drvier is due to the fact that
+ * the NX-842 compression hardware requires the input and output data buffers
+ * to be specifically aligned, to be a specific multiple in length, and within
+ * specific minimum and maximum lengths.  Those restrictions, provided by the
+ * nx-842 driver via nx842_constraints, mean this driver must use bounce
+ * buffers and headers to correct misaligned in or out buffers, and to split
+ * input buffers that are too large.
+ *
+ * This driver will fall back to software decompression if the hardware
+ * decompression fails, so this driver's decompression should never fail as
+ * long as the provided compressed buffer is valid.  Any compressed buffer
+ * created by this driver will have a header (except ones where the input
+ * perfectly matches the constraints); so users of this driver cannot simply
+ * pass a compressed buffer created by this driver over to the 842 software
+ * decompression library.  Instead, users must use this driver to decompress;
+ * if the hardware fails or is unavailable, the compressed buffer will be
+ * parsed and the header removed, and the raw 842 buffer(s) passed to the 842
+ * software decompression library.
+ *
+ * This does not fall back to software compression, however, since the caller
+ * of this function is specifically requesting hardware compression; if the
+ * hardware compression fails, the caller can fall back to software
+ * compression, and the raw 842 compressed buffer that the software compressor
+ * creates can be passed to this driver for hardware decompression; any
+ * buffer without our specific header magic is assumed to be a raw 842 buffer
+ * and passed directly to the hardware.  Note that the software compression
+ * library will produce a compressed buffer that is incompatible with the
+ * hardware decompressor if the original input buffer length is not a multiple
+ * of 8; if such a compressed buffer is passed to this driver for
+ * decompression, the hardware will reject it and this driver will then pass
+ * it over to the software library for decompression.
  */
  */
 
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 
-#include "nx-842.h"
+#include <linux/vmalloc.h>
+#include <linux/sw842.h>
+#include <linux/spinlock.h>
 
 
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
-MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
+#include "nx-842.h"
 
 
-/**
- * nx842_constraints
- *
- * This provides the driver's constraints.  Different nx842 implementations
- * may have varying requirements.  The constraints are:
- *   @alignment:	All buffers should be aligned to this
- *   @multiple:		All buffer lengths should be a multiple of this
- *   @minimum:		Buffer lengths must not be less than this amount
- *   @maximum:		Buffer lengths must not be more than this amount
- *
- * The constraints apply to all buffers and lengths, both input and output,
- * for both compression and decompression, except for the minimum which
- * only applies to compression input and decompression output; the
- * compressed data can be less than the minimum constraint.  It can be
- * assumed that compressed data will always adhere to the multiple
- * constraint.
- *
- * The driver may succeed even if these constraints are violated;
- * however the driver can return failure or suffer reduced performance
- * if any constraint is not met.
+/* The first 5 bits of this magic are 0x1f, which is an invalid 842 5-bit
+ * template (see lib/842/842.h), so this magic number will never appear at
+ * the start of a raw 842 compressed buffer.  That is important, as any buffer
+ * passed to us without this magic is assumed to be a raw 842 compressed
+ * buffer, and passed directly to the hardware to decompress.
  */
  */
-int nx842_constraints(struct nx842_constraints *c)
+#define NX842_CRYPTO_MAGIC	(0xf842)
+#define NX842_CRYPTO_HEADER_SIZE(g)				\
+	(sizeof(struct nx842_crypto_header) +			\
+	 sizeof(struct nx842_crypto_header_group) * (g))
+#define NX842_CRYPTO_HEADER_MAX_SIZE				\
+	NX842_CRYPTO_HEADER_SIZE(NX842_CRYPTO_GROUP_MAX)
+
+/* bounce buffer size */
+#define BOUNCE_BUFFER_ORDER	(2)
+#define BOUNCE_BUFFER_SIZE					\
+	((unsigned int)(PAGE_SIZE << BOUNCE_BUFFER_ORDER))
+
+/* try longer on comp because we can fallback to sw decomp if hw is busy */
+#define COMP_BUSY_TIMEOUT	(250) /* ms */
+#define DECOMP_BUSY_TIMEOUT	(50) /* ms */
+
+struct nx842_crypto_param {
+	u8 *in;
+	unsigned int iremain;
+	u8 *out;
+	unsigned int oremain;
+	unsigned int ototal;
+};
+
+static int update_param(struct nx842_crypto_param *p,
+			unsigned int slen, unsigned int dlen)
 {
 {
-	memcpy(c, nx842_platform_driver()->constraints, sizeof(*c));
+	if (p->iremain < slen)
+		return -EOVERFLOW;
+	if (p->oremain < dlen)
+		return -ENOSPC;
+
+	p->in += slen;
+	p->iremain -= slen;
+	p->out += dlen;
+	p->oremain -= dlen;
+	p->ototal += dlen;
+
 	return 0;
 	return 0;
 }
 }
-EXPORT_SYMBOL_GPL(nx842_constraints);
 
 
-/**
- * nx842_workmem_size
- *
- * Get the amount of working memory the driver requires.
- */
-size_t nx842_workmem_size(void)
+int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver)
 {
 {
-	return nx842_platform_driver()->workmem_size;
+	struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	spin_lock_init(&ctx->lock);
+	ctx->driver = driver;
+	ctx->wmem = kmalloc(driver->workmem_size, GFP_KERNEL);
+	ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
+	ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
+	if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) {
+		kfree(ctx->wmem);
+		free_page((unsigned long)ctx->sbounce);
+		free_page((unsigned long)ctx->dbounce);
+		return -ENOMEM;
+	}
+
+	return 0;
 }
 }
-EXPORT_SYMBOL_GPL(nx842_workmem_size);
+EXPORT_SYMBOL_GPL(nx842_crypto_init);
 
 
-int nx842_compress(const unsigned char *in, unsigned int ilen,
-		   unsigned char *out, unsigned int *olen, void *wmem)
+void nx842_crypto_exit(struct crypto_tfm *tfm)
 {
 {
-	return nx842_platform_driver()->compress(in, ilen, out, olen, wmem);
+	struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	kfree(ctx->wmem);
+	free_page((unsigned long)ctx->sbounce);
+	free_page((unsigned long)ctx->dbounce);
 }
 }
-EXPORT_SYMBOL_GPL(nx842_compress);
+EXPORT_SYMBOL_GPL(nx842_crypto_exit);
 
 
-int nx842_decompress(const unsigned char *in, unsigned int ilen,
-		     unsigned char *out, unsigned int *olen, void *wmem)
+static void check_constraints(struct nx842_constraints *c)
 {
 {
-	return nx842_platform_driver()->decompress(in, ilen, out, olen, wmem);
+	/* limit maximum, to always have enough bounce buffer to decompress */
+	if (c->maximum > BOUNCE_BUFFER_SIZE)
+		c->maximum = BOUNCE_BUFFER_SIZE;
 }
 }
-EXPORT_SYMBOL_GPL(nx842_decompress);
 
 
-static __init int nx842_init(void)
+static int nx842_crypto_add_header(struct nx842_crypto_header *hdr, u8 *buf)
 {
 {
-	request_module("nx-compress-powernv");
-	request_module("nx-compress-pseries");
+	int s = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
 
 
-	/* we prevent loading if there's no platform driver, and we get the
-	 * module that set it so it won't unload, so we don't need to check
-	 * if it's set in any of the above functions
-	 */
-	if (!nx842_platform_driver_get()) {
-		pr_err("no nx842 driver found.\n");
-		return -ENODEV;
+	/* compress should have added space for header */
+	if (s > be16_to_cpu(hdr->group[0].padding)) {
+		pr_err("Internal error: no space for header\n");
+		return -EINVAL;
 	}
 	}
 
 
+	memcpy(buf, hdr, s);
+
+	print_hex_dump_debug("header ", DUMP_PREFIX_OFFSET, 16, 1, buf, s, 0);
+
 	return 0;
 	return 0;
 }
 }
-module_init(nx842_init);
 
 
-static void __exit nx842_exit(void)
+static int compress(struct nx842_crypto_ctx *ctx,
+		    struct nx842_crypto_param *p,
+		    struct nx842_crypto_header_group *g,
+		    struct nx842_constraints *c,
+		    u16 *ignore,
+		    unsigned int hdrsize)
+{
+	unsigned int slen = p->iremain, dlen = p->oremain, tmplen;
+	unsigned int adj_slen = slen;
+	u8 *src = p->in, *dst = p->out;
+	int ret, dskip = 0;
+	ktime_t timeout;
+
+	if (p->iremain == 0)
+		return -EOVERFLOW;
+
+	if (p->oremain == 0 || hdrsize + c->minimum > dlen)
+		return -ENOSPC;
+
+	if (slen % c->multiple)
+		adj_slen = round_up(slen, c->multiple);
+	if (slen < c->minimum)
+		adj_slen = c->minimum;
+	if (slen > c->maximum)
+		adj_slen = slen = c->maximum;
+	if (adj_slen > slen || (u64)src % c->alignment) {
+		adj_slen = min(adj_slen, BOUNCE_BUFFER_SIZE);
+		slen = min(slen, BOUNCE_BUFFER_SIZE);
+		if (adj_slen > slen)
+			memset(ctx->sbounce + slen, 0, adj_slen - slen);
+		memcpy(ctx->sbounce, src, slen);
+		src = ctx->sbounce;
+		slen = adj_slen;
+		pr_debug("using comp sbounce buffer, len %x\n", slen);
+	}
+
+	dst += hdrsize;
+	dlen -= hdrsize;
+
+	if ((u64)dst % c->alignment) {
+		dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst);
+		dst += dskip;
+		dlen -= dskip;
+	}
+	if (dlen % c->multiple)
+		dlen = round_down(dlen, c->multiple);
+	if (dlen < c->minimum) {
+nospc:
+		dst = ctx->dbounce;
+		dlen = min(p->oremain, BOUNCE_BUFFER_SIZE);
+		dlen = round_down(dlen, c->multiple);
+		dskip = 0;
+		pr_debug("using comp dbounce buffer, len %x\n", dlen);
+	}
+	if (dlen > c->maximum)
+		dlen = c->maximum;
+
+	tmplen = dlen;
+	timeout = ktime_add_ms(ktime_get(), COMP_BUSY_TIMEOUT);
+	do {
+		dlen = tmplen; /* reset dlen, if we're retrying */
+		ret = ctx->driver->compress(src, slen, dst, &dlen, ctx->wmem);
+		/* possibly we should reduce the slen here, instead of
+		 * retrying with the dbounce buffer?
+		 */
+		if (ret == -ENOSPC && dst != ctx->dbounce)
+			goto nospc;
+	} while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
+	if (ret)
+		return ret;
+
+	dskip += hdrsize;
+
+	if (dst == ctx->dbounce)
+		memcpy(p->out + dskip, dst, dlen);
+
+	g->padding = cpu_to_be16(dskip);
+	g->compressed_length = cpu_to_be32(dlen);
+	g->uncompressed_length = cpu_to_be32(slen);
+
+	if (p->iremain < slen) {
+		*ignore = slen - p->iremain;
+		slen = p->iremain;
+	}
+
+	pr_debug("compress slen %x ignore %x dlen %x padding %x\n",
+		 slen, *ignore, dlen, dskip);
+
+	return update_param(p, slen, dskip + dlen);
+}
+
+int nx842_crypto_compress(struct crypto_tfm *tfm,
+			  const u8 *src, unsigned int slen,
+			  u8 *dst, unsigned int *dlen)
+{
+	struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct nx842_crypto_header *hdr = &ctx->header;
+	struct nx842_crypto_param p;
+	struct nx842_constraints c = *ctx->driver->constraints;
+	unsigned int groups, hdrsize, h;
+	int ret, n;
+	bool add_header;
+	u16 ignore = 0;
+
+	check_constraints(&c);
+
+	p.in = (u8 *)src;
+	p.iremain = slen;
+	p.out = dst;
+	p.oremain = *dlen;
+	p.ototal = 0;
+
+	*dlen = 0;
+
+	groups = min_t(unsigned int, NX842_CRYPTO_GROUP_MAX,
+		       DIV_ROUND_UP(p.iremain, c.maximum));
+	hdrsize = NX842_CRYPTO_HEADER_SIZE(groups);
+
+	spin_lock_bh(&ctx->lock);
+
+	/* skip adding header if the buffers meet all constraints */
+	add_header = (p.iremain % c.multiple	||
+		      p.iremain < c.minimum	||
+		      p.iremain > c.maximum	||
+		      (u64)p.in % c.alignment	||
+		      p.oremain % c.multiple	||
+		      p.oremain < c.minimum	||
+		      p.oremain > c.maximum	||
+		      (u64)p.out % c.alignment);
+
+	hdr->magic = cpu_to_be16(NX842_CRYPTO_MAGIC);
+	hdr->groups = 0;
+	hdr->ignore = 0;
+
+	while (p.iremain > 0) {
+		n = hdr->groups++;
+		ret = -ENOSPC;
+		if (hdr->groups > NX842_CRYPTO_GROUP_MAX)
+			goto unlock;
+
+		/* header goes before first group */
+		h = !n && add_header ? hdrsize : 0;
+
+		if (ignore)
+			pr_warn("interal error, ignore is set %x\n", ignore);
+
+		ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h);
+		if (ret)
+			goto unlock;
+	}
+
+	if (!add_header && hdr->groups > 1) {
+		pr_err("Internal error: No header but multiple groups\n");
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	/* ignore indicates the input stream needed to be padded */
+	hdr->ignore = cpu_to_be16(ignore);
+	if (ignore)
+		pr_debug("marked %d bytes as ignore\n", ignore);
+
+	if (add_header)
+		ret = nx842_crypto_add_header(hdr, dst);
+	if (ret)
+		goto unlock;
+
+	*dlen = p.ototal;
+
+	pr_debug("compress total slen %x dlen %x\n", slen, *dlen);
+
+unlock:
+	spin_unlock_bh(&ctx->lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nx842_crypto_compress);
+
+static int decompress(struct nx842_crypto_ctx *ctx,
+		      struct nx842_crypto_param *p,
+		      struct nx842_crypto_header_group *g,
+		      struct nx842_constraints *c,
+		      u16 ignore)
 {
 {
-	nx842_platform_driver_put();
+	unsigned int slen = be32_to_cpu(g->compressed_length);
+	unsigned int required_len = be32_to_cpu(g->uncompressed_length);
+	unsigned int dlen = p->oremain, tmplen;
+	unsigned int adj_slen = slen;
+	u8 *src = p->in, *dst = p->out;
+	u16 padding = be16_to_cpu(g->padding);
+	int ret, spadding = 0, dpadding = 0;
+	ktime_t timeout;
+
+	if (!slen || !required_len)
+		return -EINVAL;
+
+	if (p->iremain <= 0 || padding + slen > p->iremain)
+		return -EOVERFLOW;
+
+	if (p->oremain <= 0 || required_len - ignore > p->oremain)
+		return -ENOSPC;
+
+	src += padding;
+
+	if (slen % c->multiple)
+		adj_slen = round_up(slen, c->multiple);
+	if (slen < c->minimum)
+		adj_slen = c->minimum;
+	if (slen > c->maximum)
+		goto usesw;
+	if (slen < adj_slen || (u64)src % c->alignment) {
+		/* we can append padding bytes because the 842 format defines
+		 * an "end" template (see lib/842/842_decompress.c) and will
+		 * ignore any bytes following it.
+		 */
+		if (slen < adj_slen)
+			memset(ctx->sbounce + slen, 0, adj_slen - slen);
+		memcpy(ctx->sbounce, src, slen);
+		src = ctx->sbounce;
+		spadding = adj_slen - slen;
+		slen = adj_slen;
+		pr_debug("using decomp sbounce buffer, len %x\n", slen);
+	}
+
+	if (dlen % c->multiple)
+		dlen = round_down(dlen, c->multiple);
+	if (dlen < required_len || (u64)dst % c->alignment) {
+		dst = ctx->dbounce;
+		dlen = min(required_len, BOUNCE_BUFFER_SIZE);
+		pr_debug("using decomp dbounce buffer, len %x\n", dlen);
+	}
+	if (dlen < c->minimum)
+		goto usesw;
+	if (dlen > c->maximum)
+		dlen = c->maximum;
+
+	tmplen = dlen;
+	timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT);
+	do {
+		dlen = tmplen; /* reset dlen, if we're retrying */
+		ret = ctx->driver->decompress(src, slen, dst, &dlen, ctx->wmem);
+	} while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
+	if (ret) {
+usesw:
+		/* reset everything, sw doesn't have constraints */
+		src = p->in + padding;
+		slen = be32_to_cpu(g->compressed_length);
+		spadding = 0;
+		dst = p->out;
+		dlen = p->oremain;
+		dpadding = 0;
+		if (dlen < required_len) { /* have ignore bytes */
+			dst = ctx->dbounce;
+			dlen = BOUNCE_BUFFER_SIZE;
+		}
+		pr_info_ratelimited("using software 842 decompression\n");
+		ret = sw842_decompress(src, slen, dst, &dlen);
+	}
+	if (ret)
+		return ret;
+
+	slen -= spadding;
+
+	dlen -= ignore;
+	if (ignore)
+		pr_debug("ignoring last %x bytes\n", ignore);
+
+	if (dst == ctx->dbounce)
+		memcpy(p->out, dst, dlen);
+
+	pr_debug("decompress slen %x padding %x dlen %x ignore %x\n",
+		 slen, padding, dlen, ignore);
+
+	return update_param(p, slen + padding, dlen);
 }
 }
-module_exit(nx842_exit);
+
+int nx842_crypto_decompress(struct crypto_tfm *tfm,
+			    const u8 *src, unsigned int slen,
+			    u8 *dst, unsigned int *dlen)
+{
+	struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct nx842_crypto_header *hdr;
+	struct nx842_crypto_param p;
+	struct nx842_constraints c = *ctx->driver->constraints;
+	int n, ret, hdr_len;
+	u16 ignore = 0;
+
+	check_constraints(&c);
+
+	p.in = (u8 *)src;
+	p.iremain = slen;
+	p.out = dst;
+	p.oremain = *dlen;
+	p.ototal = 0;
+
+	*dlen = 0;
+
+	hdr = (struct nx842_crypto_header *)src;
+
+	spin_lock_bh(&ctx->lock);
+
+	/* If it doesn't start with our header magic number, assume it's a raw
+	 * 842 compressed buffer and pass it directly to the hardware driver
+	 */
+	if (be16_to_cpu(hdr->magic) != NX842_CRYPTO_MAGIC) {
+		struct nx842_crypto_header_group g = {
+			.padding =		0,
+			.compressed_length =	cpu_to_be32(p.iremain),
+			.uncompressed_length =	cpu_to_be32(p.oremain),
+		};
+
+		ret = decompress(ctx, &p, &g, &c, 0);
+		if (ret)
+			goto unlock;
+
+		goto success;
+	}
+
+	if (!hdr->groups) {
+		pr_err("header has no groups\n");
+		ret = -EINVAL;
+		goto unlock;
+	}
+	if (hdr->groups > NX842_CRYPTO_GROUP_MAX) {
+		pr_err("header has too many groups %x, max %x\n",
+		       hdr->groups, NX842_CRYPTO_GROUP_MAX);
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	hdr_len = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
+	if (hdr_len > slen) {
+		ret = -EOVERFLOW;
+		goto unlock;
+	}
+
+	memcpy(&ctx->header, src, hdr_len);
+	hdr = &ctx->header;
+
+	for (n = 0; n < hdr->groups; n++) {
+		/* ignore applies to last group */
+		if (n + 1 == hdr->groups)
+			ignore = be16_to_cpu(hdr->ignore);
+
+		ret = decompress(ctx, &p, &hdr->group[n], &c, ignore);
+		if (ret)
+			goto unlock;
+	}
+
+success:
+	*dlen = p.ototal;
+
+	pr_debug("decompress total slen %x dlen %x\n", slen, *dlen);
+
+	ret = 0;
+
+unlock:
+	spin_unlock_bh(&ctx->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nx842_crypto_decompress);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IBM PowerPC Nest (NX) 842 Hardware Compression Driver");
+MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");

+ 53 - 12
drivers/crypto/nx/nx-842.h

@@ -3,8 +3,9 @@
 #define __NX_842_H__
 #define __NX_842_H__
 
 
 #include <linux/kernel.h>
 #include <linux/kernel.h>
+#include <linux/init.h>
 #include <linux/module.h>
 #include <linux/module.h>
-#include <linux/sw842.h>
+#include <linux/crypto.h>
 #include <linux/of.h>
 #include <linux/of.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/io.h>
@@ -104,6 +105,25 @@ static inline unsigned long nx842_get_pa(void *addr)
 #define GET_FIELD(v, m)		(((v) & (m)) >> MASK_LSH(m))
 #define GET_FIELD(v, m)		(((v) & (m)) >> MASK_LSH(m))
 #define SET_FIELD(v, m, val)	(((v) & ~(m)) | (((val) << MASK_LSH(m)) & (m)))
 #define SET_FIELD(v, m, val)	(((v) & ~(m)) | (((val) << MASK_LSH(m)) & (m)))
 
 
+/**
+ * This provides the driver's constraints.  Different nx842 implementations
+ * may have varying requirements.  The constraints are:
+ *   @alignment:	All buffers should be aligned to this
+ *   @multiple:		All buffer lengths should be a multiple of this
+ *   @minimum:		Buffer lengths must not be less than this amount
+ *   @maximum:		Buffer lengths must not be more than this amount
+ *
+ * The constraints apply to all buffers and lengths, both input and output,
+ * for both compression and decompression, except for the minimum which
+ * only applies to compression input and decompression output; the
+ * compressed data can be less than the minimum constraint.  It can be
+ * assumed that compressed data will always adhere to the multiple
+ * constraint.
+ *
+ * The driver may succeed even if these constraints are violated;
+ * however the driver can return failure or suffer reduced performance
+ * if any constraint is not met.
+ */
 struct nx842_constraints {
 struct nx842_constraints {
 	int alignment;
 	int alignment;
 	int multiple;
 	int multiple;
@@ -126,19 +146,40 @@ struct nx842_driver {
 			  void *wrkmem);
 			  void *wrkmem);
 };
 };
 
 
-struct nx842_driver *nx842_platform_driver(void);
-bool nx842_platform_driver_set(struct nx842_driver *driver);
-void nx842_platform_driver_unset(struct nx842_driver *driver);
-bool nx842_platform_driver_get(void);
-void nx842_platform_driver_put(void);
+struct nx842_crypto_header_group {
+	__be16 padding;			/* unused bytes at start of group */
+	__be32 compressed_length;	/* compressed bytes in group */
+	__be32 uncompressed_length;	/* bytes after decompression */
+} __packed;
+
+struct nx842_crypto_header {
+	__be16 magic;		/* NX842_CRYPTO_MAGIC */
+	__be16 ignore;		/* decompressed end bytes to ignore */
+	u8 groups;		/* total groups in this header */
+	struct nx842_crypto_header_group group[];
+} __packed;
 
 
-size_t nx842_workmem_size(void);
+#define NX842_CRYPTO_GROUP_MAX	(0x20)
 
 
-int nx842_constraints(struct nx842_constraints *constraints);
+struct nx842_crypto_ctx {
+	spinlock_t lock;
+
+	u8 *wmem;
+	u8 *sbounce, *dbounce;
+
+	struct nx842_crypto_header header;
+	struct nx842_crypto_header_group group[NX842_CRYPTO_GROUP_MAX];
+
+	struct nx842_driver *driver;
+};
 
 
-int nx842_compress(const unsigned char *in, unsigned int in_len,
-		   unsigned char *out, unsigned int *out_len, void *wrkmem);
-int nx842_decompress(const unsigned char *in, unsigned int in_len,
-		     unsigned char *out, unsigned int *out_len, void *wrkmem);
+int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver);
+void nx842_crypto_exit(struct crypto_tfm *tfm);
+int nx842_crypto_compress(struct crypto_tfm *tfm,
+			  const u8 *src, unsigned int slen,
+			  u8 *dst, unsigned int *dlen);
+int nx842_crypto_decompress(struct crypto_tfm *tfm,
+			    const u8 *src, unsigned int slen,
+			    u8 *dst, unsigned int *dlen);
 
 
 #endif /* __NX_842_H__ */
 #endif /* __NX_842_H__ */

+ 70 - 81
drivers/crypto/nx/nx-aes-ccm.c

@@ -94,8 +94,6 @@ static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm,
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	crypto_aead_crt(tfm)->authsize = authsize;
-
 	return 0;
 	return 0;
 }
 }
 
 
@@ -111,8 +109,6 @@ static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm,
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	crypto_aead_crt(tfm)->authsize = authsize;
-
 	return 0;
 	return 0;
 }
 }
 
 
@@ -174,6 +170,7 @@ static int generate_pat(u8                   *iv,
 			struct nx_crypto_ctx *nx_ctx,
 			struct nx_crypto_ctx *nx_ctx,
 			unsigned int          authsize,
 			unsigned int          authsize,
 			unsigned int          nbytes,
 			unsigned int          nbytes,
+			unsigned int	      assoclen,
 			u8                   *out)
 			u8                   *out)
 {
 {
 	struct nx_sg *nx_insg = nx_ctx->in_sg;
 	struct nx_sg *nx_insg = nx_ctx->in_sg;
@@ -200,16 +197,16 @@ static int generate_pat(u8                   *iv,
 	 * greater than 2^32.
 	 * greater than 2^32.
 	 */
 	 */
 
 
-	if (!req->assoclen) {
+	if (!assoclen) {
 		b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
 		b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
-	} else if (req->assoclen <= 14) {
+	} else if (assoclen <= 14) {
 		/* if associated data is 14 bytes or less, we do 1 GCM
 		/* if associated data is 14 bytes or less, we do 1 GCM
 		 * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
 		 * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
 		 * which is fed in through the source buffers here */
 		 * which is fed in through the source buffers here */
 		b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
 		b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
 		b1 = nx_ctx->priv.ccm.iauth_tag;
 		b1 = nx_ctx->priv.ccm.iauth_tag;
-		iauth_len = req->assoclen;
-	} else if (req->assoclen <= 65280) {
+		iauth_len = assoclen;
+	} else if (assoclen <= 65280) {
 		/* if associated data is less than (2^16 - 2^8), we construct
 		/* if associated data is less than (2^16 - 2^8), we construct
 		 * B1 differently and feed in the associated data to a CCA
 		 * B1 differently and feed in the associated data to a CCA
 		 * operation */
 		 * operation */
@@ -223,7 +220,7 @@ static int generate_pat(u8                   *iv,
 	}
 	}
 
 
 	/* generate B0 */
 	/* generate B0 */
-	rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0);
+	rc = generate_b0(iv, assoclen, authsize, nbytes, b0);
 	if (rc)
 	if (rc)
 		return rc;
 		return rc;
 
 
@@ -233,22 +230,22 @@ static int generate_pat(u8                   *iv,
 	 */
 	 */
 	if (b1) {
 	if (b1) {
 		memset(b1, 0, 16);
 		memset(b1, 0, 16);
-		if (req->assoclen <= 65280) {
-			*(u16 *)b1 = (u16)req->assoclen;
-			scatterwalk_map_and_copy(b1 + 2, req->assoc, 0,
+		if (assoclen <= 65280) {
+			*(u16 *)b1 = assoclen;
+			scatterwalk_map_and_copy(b1 + 2, req->src, 0,
 					 iauth_len, SCATTERWALK_FROM_SG);
 					 iauth_len, SCATTERWALK_FROM_SG);
 		} else {
 		} else {
 			*(u16 *)b1 = (u16)(0xfffe);
 			*(u16 *)b1 = (u16)(0xfffe);
-			*(u32 *)&b1[2] = (u32)req->assoclen;
-			scatterwalk_map_and_copy(b1 + 6, req->assoc, 0,
+			*(u32 *)&b1[2] = assoclen;
+			scatterwalk_map_and_copy(b1 + 6, req->src, 0,
 					 iauth_len, SCATTERWALK_FROM_SG);
 					 iauth_len, SCATTERWALK_FROM_SG);
 		}
 		}
 	}
 	}
 
 
 	/* now copy any remaining AAD to scatterlist and call nx... */
 	/* now copy any remaining AAD to scatterlist and call nx... */
-	if (!req->assoclen) {
+	if (!assoclen) {
 		return rc;
 		return rc;
-	} else if (req->assoclen <= 14) {
+	} else if (assoclen <= 14) {
 		unsigned int len = 16;
 		unsigned int len = 16;
 
 
 		nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen);
 		nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen);
@@ -280,7 +277,7 @@ static int generate_pat(u8                   *iv,
 			return rc;
 			return rc;
 
 
 		atomic_inc(&(nx_ctx->stats->aes_ops));
 		atomic_inc(&(nx_ctx->stats->aes_ops));
-		atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
+		atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
 
 
 	} else {
 	} else {
 		unsigned int processed = 0, to_process;
 		unsigned int processed = 0, to_process;
@@ -294,15 +291,15 @@ static int generate_pat(u8                   *iv,
 				nx_ctx->ap->databytelen/NX_PAGE_SIZE);
 				nx_ctx->ap->databytelen/NX_PAGE_SIZE);
 
 
 		do {
 		do {
-			to_process = min_t(u32, req->assoclen - processed,
+			to_process = min_t(u32, assoclen - processed,
 					   nx_ctx->ap->databytelen);
 					   nx_ctx->ap->databytelen);
 
 
 			nx_insg = nx_walk_and_build(nx_ctx->in_sg,
 			nx_insg = nx_walk_and_build(nx_ctx->in_sg,
 						    nx_ctx->ap->sglen,
 						    nx_ctx->ap->sglen,
-						    req->assoc, processed,
+						    req->src, processed,
 						    &to_process);
 						    &to_process);
 
 
-			if ((to_process + processed) < req->assoclen) {
+			if ((to_process + processed) < assoclen) {
 				NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
 				NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
 					NX_FDM_INTERMEDIATE;
 					NX_FDM_INTERMEDIATE;
 			} else {
 			} else {
@@ -328,11 +325,10 @@ static int generate_pat(u8                   *iv,
 			NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
 			NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
 
 
 			atomic_inc(&(nx_ctx->stats->aes_ops));
 			atomic_inc(&(nx_ctx->stats->aes_ops));
-			atomic64_add(req->assoclen,
-					&(nx_ctx->stats->aes_bytes));
+			atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
 
 
 			processed += to_process;
 			processed += to_process;
-		} while (processed < req->assoclen);
+		} while (processed < assoclen);
 
 
 		result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
 		result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
 	}
 	}
@@ -343,7 +339,8 @@ static int generate_pat(u8                   *iv,
 }
 }
 
 
 static int ccm_nx_decrypt(struct aead_request   *req,
 static int ccm_nx_decrypt(struct aead_request   *req,
-			  struct blkcipher_desc *desc)
+			  struct blkcipher_desc *desc,
+			  unsigned int assoclen)
 {
 {
 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
@@ -360,10 +357,10 @@ static int ccm_nx_decrypt(struct aead_request   *req,
 
 
 	/* copy out the auth tag to compare with later */
 	/* copy out the auth tag to compare with later */
 	scatterwalk_map_and_copy(priv->oauth_tag,
 	scatterwalk_map_and_copy(priv->oauth_tag,
-				 req->src, nbytes, authsize,
+				 req->src, nbytes + req->assoclen, authsize,
 				 SCATTERWALK_FROM_SG);
 				 SCATTERWALK_FROM_SG);
 
 
-	rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
+	rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
 			  csbcpb->cpb.aes_ccm.in_pat_or_b0);
 			  csbcpb->cpb.aes_ccm.in_pat_or_b0);
 	if (rc)
 	if (rc)
 		goto out;
 		goto out;
@@ -383,8 +380,8 @@ static int ccm_nx_decrypt(struct aead_request   *req,
 		NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
 		NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
 
 
 		rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
 		rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
-					&to_process, processed,
-					csbcpb->cpb.aes_ccm.iv_or_ctr);
+				       &to_process, processed + req->assoclen,
+				       csbcpb->cpb.aes_ccm.iv_or_ctr);
 		if (rc)
 		if (rc)
 			goto out;
 			goto out;
 
 
@@ -420,7 +417,8 @@ out:
 }
 }
 
 
 static int ccm_nx_encrypt(struct aead_request   *req,
 static int ccm_nx_encrypt(struct aead_request   *req,
-			  struct blkcipher_desc *desc)
+			  struct blkcipher_desc *desc,
+			  unsigned int assoclen)
 {
 {
 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
@@ -432,7 +430,7 @@ static int ccm_nx_encrypt(struct aead_request   *req,
 
 
 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
 
-	rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
+	rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
 			  csbcpb->cpb.aes_ccm.in_pat_or_b0);
 			  csbcpb->cpb.aes_ccm.in_pat_or_b0);
 	if (rc)
 	if (rc)
 		goto out;
 		goto out;
@@ -451,7 +449,7 @@ static int ccm_nx_encrypt(struct aead_request   *req,
 		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
 		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
 
 
 		rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
 		rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
-					&to_process, processed,
+				       &to_process, processed + req->assoclen,
 				       csbcpb->cpb.aes_ccm.iv_or_ctr);
 				       csbcpb->cpb.aes_ccm.iv_or_ctr);
 		if (rc)
 		if (rc)
 			goto out;
 			goto out;
@@ -483,7 +481,7 @@ static int ccm_nx_encrypt(struct aead_request   *req,
 
 
 	/* copy out the auth tag */
 	/* copy out the auth tag */
 	scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
 	scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
-				 req->dst, nbytes, authsize,
+				 req->dst, nbytes + req->assoclen, authsize,
 				 SCATTERWALK_TO_SG);
 				 SCATTERWALK_TO_SG);
 
 
 out:
 out:
@@ -503,9 +501,8 @@ static int ccm4309_aes_nx_encrypt(struct aead_request *req)
 	memcpy(iv + 4, req->iv, 8);
 	memcpy(iv + 4, req->iv, 8);
 
 
 	desc.info = iv;
 	desc.info = iv;
-	desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
 
 
-	return ccm_nx_encrypt(req, &desc);
+	return ccm_nx_encrypt(req, &desc, req->assoclen - 8);
 }
 }
 
 
 static int ccm_aes_nx_encrypt(struct aead_request *req)
 static int ccm_aes_nx_encrypt(struct aead_request *req)
@@ -514,13 +511,12 @@ static int ccm_aes_nx_encrypt(struct aead_request *req)
 	int rc;
 	int rc;
 
 
 	desc.info = req->iv;
 	desc.info = req->iv;
-	desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
 
 
 	rc = crypto_ccm_check_iv(desc.info);
 	rc = crypto_ccm_check_iv(desc.info);
 	if (rc)
 	if (rc)
 		return rc;
 		return rc;
 
 
-	return ccm_nx_encrypt(req, &desc);
+	return ccm_nx_encrypt(req, &desc, req->assoclen);
 }
 }
 
 
 static int ccm4309_aes_nx_decrypt(struct aead_request *req)
 static int ccm4309_aes_nx_decrypt(struct aead_request *req)
@@ -535,9 +531,8 @@ static int ccm4309_aes_nx_decrypt(struct aead_request *req)
 	memcpy(iv + 4, req->iv, 8);
 	memcpy(iv + 4, req->iv, 8);
 
 
 	desc.info = iv;
 	desc.info = iv;
-	desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
 
 
-	return ccm_nx_decrypt(req, &desc);
+	return ccm_nx_decrypt(req, &desc, req->assoclen - 8);
 }
 }
 
 
 static int ccm_aes_nx_decrypt(struct aead_request *req)
 static int ccm_aes_nx_decrypt(struct aead_request *req)
@@ -546,13 +541,12 @@ static int ccm_aes_nx_decrypt(struct aead_request *req)
 	int rc;
 	int rc;
 
 
 	desc.info = req->iv;
 	desc.info = req->iv;
-	desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
 
 
 	rc = crypto_ccm_check_iv(desc.info);
 	rc = crypto_ccm_check_iv(desc.info);
 	if (rc)
 	if (rc)
 		return rc;
 		return rc;
 
 
-	return ccm_nx_decrypt(req, &desc);
+	return ccm_nx_decrypt(req, &desc, req->assoclen);
 }
 }
 
 
 /* tell the block cipher walk routines that this is a stream cipher by
 /* tell the block cipher walk routines that this is a stream cipher by
@@ -560,47 +554,42 @@ static int ccm_aes_nx_decrypt(struct aead_request *req)
  * during encrypt/decrypt doesn't solve this problem, because it calls
  * during encrypt/decrypt doesn't solve this problem, because it calls
  * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
  * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
  * but instead uses this tfm->blocksize. */
  * but instead uses this tfm->blocksize. */
-struct crypto_alg nx_ccm_aes_alg = {
-	.cra_name        = "ccm(aes)",
-	.cra_driver_name = "ccm-aes-nx",
-	.cra_priority    = 300,
-	.cra_flags       = CRYPTO_ALG_TYPE_AEAD |
-			   CRYPTO_ALG_NEED_FALLBACK,
-	.cra_blocksize   = 1,
-	.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
-	.cra_type        = &crypto_aead_type,
-	.cra_module      = THIS_MODULE,
-	.cra_init        = nx_crypto_ctx_aes_ccm_init,
-	.cra_exit        = nx_crypto_ctx_exit,
-	.cra_aead = {
-		.ivsize      = AES_BLOCK_SIZE,
-		.maxauthsize = AES_BLOCK_SIZE,
-		.setkey      = ccm_aes_nx_set_key,
-		.setauthsize = ccm_aes_nx_setauthsize,
-		.encrypt     = ccm_aes_nx_encrypt,
-		.decrypt     = ccm_aes_nx_decrypt,
-	}
+struct aead_alg nx_ccm_aes_alg = {
+	.base = {
+		.cra_name        = "ccm(aes)",
+		.cra_driver_name = "ccm-aes-nx",
+		.cra_priority    = 300,
+		.cra_flags       = CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize   = 1,
+		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
+		.cra_module      = THIS_MODULE,
+	},
+	.init        = nx_crypto_ctx_aes_ccm_init,
+	.exit        = nx_crypto_ctx_aead_exit,
+	.ivsize      = AES_BLOCK_SIZE,
+	.maxauthsize = AES_BLOCK_SIZE,
+	.setkey      = ccm_aes_nx_set_key,
+	.setauthsize = ccm_aes_nx_setauthsize,
+	.encrypt     = ccm_aes_nx_encrypt,
+	.decrypt     = ccm_aes_nx_decrypt,
 };
 };
 
 
-struct crypto_alg nx_ccm4309_aes_alg = {
-	.cra_name        = "rfc4309(ccm(aes))",
-	.cra_driver_name = "rfc4309-ccm-aes-nx",
-	.cra_priority    = 300,
-	.cra_flags       = CRYPTO_ALG_TYPE_AEAD |
-			   CRYPTO_ALG_NEED_FALLBACK,
-	.cra_blocksize   = 1,
-	.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
-	.cra_type        = &crypto_nivaead_type,
-	.cra_module      = THIS_MODULE,
-	.cra_init        = nx_crypto_ctx_aes_ccm_init,
-	.cra_exit        = nx_crypto_ctx_exit,
-	.cra_aead = {
-		.ivsize      = 8,
-		.maxauthsize = AES_BLOCK_SIZE,
-		.setkey      = ccm4309_aes_nx_set_key,
-		.setauthsize = ccm4309_aes_nx_setauthsize,
-		.encrypt     = ccm4309_aes_nx_encrypt,
-		.decrypt     = ccm4309_aes_nx_decrypt,
-		.geniv       = "seqiv",
-	}
+struct aead_alg nx_ccm4309_aes_alg = {
+	.base = {
+		.cra_name        = "rfc4309(ccm(aes))",
+		.cra_driver_name = "rfc4309-ccm-aes-nx",
+		.cra_priority    = 300,
+		.cra_flags       = CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize   = 1,
+		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
+		.cra_module      = THIS_MODULE,
+	},
+	.init        = nx_crypto_ctx_aes_ccm_init,
+	.exit        = nx_crypto_ctx_aead_exit,
+	.ivsize      = 8,
+	.maxauthsize = AES_BLOCK_SIZE,
+	.setkey      = ccm4309_aes_nx_set_key,
+	.setauthsize = ccm4309_aes_nx_setauthsize,
+	.encrypt     = ccm4309_aes_nx_encrypt,
+	.decrypt     = ccm4309_aes_nx_decrypt,
 };
 };

+ 0 - 21
drivers/crypto/nx/nx-aes-ctr.c

@@ -144,27 +144,6 @@ static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
 	return ctr_aes_nx_crypt(desc, dst, src, nbytes);
 	return ctr_aes_nx_crypt(desc, dst, src, nbytes);
 }
 }
 
 
-struct crypto_alg nx_ctr_aes_alg = {
-	.cra_name        = "ctr(aes)",
-	.cra_driver_name = "ctr-aes-nx",
-	.cra_priority    = 300,
-	.cra_flags       = CRYPTO_ALG_TYPE_BLKCIPHER,
-	.cra_blocksize   = 1,
-	.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
-	.cra_type        = &crypto_blkcipher_type,
-	.cra_module      = THIS_MODULE,
-	.cra_init        = nx_crypto_ctx_aes_ctr_init,
-	.cra_exit        = nx_crypto_ctx_exit,
-	.cra_blkcipher = {
-		.min_keysize = AES_MIN_KEY_SIZE,
-		.max_keysize = AES_MAX_KEY_SIZE,
-		.ivsize      = AES_BLOCK_SIZE,
-		.setkey      = ctr_aes_nx_set_key,
-		.encrypt     = ctr_aes_nx_crypt,
-		.decrypt     = ctr_aes_nx_crypt,
-	}
-};
-
 struct crypto_alg nx_ctr3686_aes_alg = {
 struct crypto_alg nx_ctr3686_aes_alg = {
 	.cra_name        = "rfc3686(ctr(aes))",
 	.cra_name        = "rfc3686(ctr(aes))",
 	.cra_driver_name = "rfc3686-ctr-aes-nx",
 	.cra_driver_name = "rfc3686-ctr-aes-nx",

+ 38 - 26
drivers/crypto/nx/nx-aes-gcm.c

@@ -21,11 +21,9 @@
 
 
 #include <crypto/internal/aead.h>
 #include <crypto/internal/aead.h>
 #include <crypto/aes.h>
 #include <crypto/aes.h>
-#include <crypto/algapi.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/scatterwalk.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/types.h>
-#include <linux/crypto.h>
 #include <asm/vio.h>
 #include <asm/vio.h>
 
 
 #include "nx_csbcpb.h"
 #include "nx_csbcpb.h"
@@ -36,7 +34,7 @@ static int gcm_aes_nx_set_key(struct crypto_aead *tfm,
 			      const u8           *in_key,
 			      const u8           *in_key,
 			      unsigned int        key_len)
 			      unsigned int        key_len)
 {
 {
-	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
+	struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 	struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
 	struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
 
 
@@ -75,7 +73,7 @@ static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm,
 				  const u8           *in_key,
 				  const u8           *in_key,
 				  unsigned int        key_len)
 				  unsigned int        key_len)
 {
 {
-	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
+	struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
 	char *nonce = nx_ctx->priv.gcm.nonce;
 	char *nonce = nx_ctx->priv.gcm.nonce;
 	int rc;
 	int rc;
 
 
@@ -110,13 +108,14 @@ static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm,
 
 
 static int nx_gca(struct nx_crypto_ctx  *nx_ctx,
 static int nx_gca(struct nx_crypto_ctx  *nx_ctx,
 		  struct aead_request   *req,
 		  struct aead_request   *req,
-		  u8                    *out)
+		  u8                    *out,
+		  unsigned int assoclen)
 {
 {
 	int rc;
 	int rc;
 	struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
 	struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
 	struct scatter_walk walk;
 	struct scatter_walk walk;
 	struct nx_sg *nx_sg = nx_ctx->in_sg;
 	struct nx_sg *nx_sg = nx_ctx->in_sg;
-	unsigned int nbytes = req->assoclen;
+	unsigned int nbytes = assoclen;
 	unsigned int processed = 0, to_process;
 	unsigned int processed = 0, to_process;
 	unsigned int max_sg_len;
 	unsigned int max_sg_len;
 
 
@@ -167,7 +166,7 @@ static int nx_gca(struct nx_crypto_ctx  *nx_ctx,
 		NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
 		NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
 
 
 		atomic_inc(&(nx_ctx->stats->aes_ops));
 		atomic_inc(&(nx_ctx->stats->aes_ops));
-		atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
+		atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
 
 
 		processed += to_process;
 		processed += to_process;
 	} while (processed < nbytes);
 	} while (processed < nbytes);
@@ -177,13 +176,15 @@ static int nx_gca(struct nx_crypto_ctx  *nx_ctx,
 	return rc;
 	return rc;
 }
 }
 
 
-static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
+static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
+		unsigned int assoclen)
 {
 {
 	int rc;
 	int rc;
-	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct nx_crypto_ctx *nx_ctx =
+		crypto_aead_ctx(crypto_aead_reqtfm(req));
 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 	struct nx_sg *nx_sg;
 	struct nx_sg *nx_sg;
-	unsigned int nbytes = req->assoclen;
+	unsigned int nbytes = assoclen;
 	unsigned int processed = 0, to_process;
 	unsigned int processed = 0, to_process;
 	unsigned int max_sg_len;
 	unsigned int max_sg_len;
 
 
@@ -238,7 +239,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
 		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 
 
 		atomic_inc(&(nx_ctx->stats->aes_ops));
 		atomic_inc(&(nx_ctx->stats->aes_ops));
-		atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
+		atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
 
 
 		processed += to_process;
 		processed += to_process;
 	} while (processed < nbytes);
 	} while (processed < nbytes);
@@ -253,7 +254,8 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
 		     int enc)
 		     int enc)
 {
 {
 	int rc;
 	int rc;
-	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct nx_crypto_ctx *nx_ctx =
+		crypto_aead_ctx(crypto_aead_reqtfm(req));
 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 	char out[AES_BLOCK_SIZE];
 	char out[AES_BLOCK_SIZE];
 	struct nx_sg *in_sg, *out_sg;
 	struct nx_sg *in_sg, *out_sg;
@@ -314,9 +316,11 @@ out:
 	return rc;
 	return rc;
 }
 }
 
 
-static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
+static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
+			    unsigned int assoclen)
 {
 {
-	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct nx_crypto_ctx *nx_ctx =
+		crypto_aead_ctx(crypto_aead_reqtfm(req));
 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 	struct blkcipher_desc desc;
 	struct blkcipher_desc desc;
@@ -332,10 +336,10 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
 	*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
 	*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
 
 
 	if (nbytes == 0) {
 	if (nbytes == 0) {
-		if (req->assoclen == 0)
+		if (assoclen == 0)
 			rc = gcm_empty(req, &desc, enc);
 			rc = gcm_empty(req, &desc, enc);
 		else
 		else
-			rc = gmac(req, &desc);
+			rc = gmac(req, &desc, assoclen);
 		if (rc)
 		if (rc)
 			goto out;
 			goto out;
 		else
 		else
@@ -343,9 +347,10 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
 	}
 	}
 
 
 	/* Process associated data */
 	/* Process associated data */
-	csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8;
-	if (req->assoclen) {
-		rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad);
+	csbcpb->cpb.aes_gcm.bit_length_aad = assoclen * 8;
+	if (assoclen) {
+		rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad,
+			    assoclen);
 		if (rc)
 		if (rc)
 			goto out;
 			goto out;
 	}
 	}
@@ -363,7 +368,6 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
 		to_process = nbytes - processed;
 		to_process = nbytes - processed;
 
 
 		csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
 		csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
-		desc.tfm = (struct crypto_blkcipher *) req->base.tfm;
 		rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
 		rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
 				       req->src, &to_process,
 				       req->src, &to_process,
 				       processed + req->assoclen,
 				       processed + req->assoclen,
@@ -430,7 +434,7 @@ static int gcm_aes_nx_encrypt(struct aead_request *req)
 
 
 	memcpy(iv, req->iv, 12);
 	memcpy(iv, req->iv, 12);
 
 
-	return gcm_aes_nx_crypt(req, 1);
+	return gcm_aes_nx_crypt(req, 1, req->assoclen);
 }
 }
 
 
 static int gcm_aes_nx_decrypt(struct aead_request *req)
 static int gcm_aes_nx_decrypt(struct aead_request *req)
@@ -440,12 +444,13 @@ static int gcm_aes_nx_decrypt(struct aead_request *req)
 
 
 	memcpy(iv, req->iv, 12);
 	memcpy(iv, req->iv, 12);
 
 
-	return gcm_aes_nx_crypt(req, 0);
+	return gcm_aes_nx_crypt(req, 0, req->assoclen);
 }
 }
 
 
 static int gcm4106_aes_nx_encrypt(struct aead_request *req)
 static int gcm4106_aes_nx_encrypt(struct aead_request *req)
 {
 {
-	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct nx_crypto_ctx *nx_ctx =
+		crypto_aead_ctx(crypto_aead_reqtfm(req));
 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
 	char *iv = rctx->iv;
 	char *iv = rctx->iv;
 	char *nonce = nx_ctx->priv.gcm.nonce;
 	char *nonce = nx_ctx->priv.gcm.nonce;
@@ -453,12 +458,16 @@ static int gcm4106_aes_nx_encrypt(struct aead_request *req)
 	memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
 	memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
 	memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
 	memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
 
 
-	return gcm_aes_nx_crypt(req, 1);
+	if (req->assoclen < 8)
+		return -EINVAL;
+
+	return gcm_aes_nx_crypt(req, 1, req->assoclen - 8);
 }
 }
 
 
 static int gcm4106_aes_nx_decrypt(struct aead_request *req)
 static int gcm4106_aes_nx_decrypt(struct aead_request *req)
 {
 {
-	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct nx_crypto_ctx *nx_ctx =
+		crypto_aead_ctx(crypto_aead_reqtfm(req));
 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
 	char *iv = rctx->iv;
 	char *iv = rctx->iv;
 	char *nonce = nx_ctx->priv.gcm.nonce;
 	char *nonce = nx_ctx->priv.gcm.nonce;
@@ -466,7 +475,10 @@ static int gcm4106_aes_nx_decrypt(struct aead_request *req)
 	memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
 	memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
 	memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
 	memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
 
 
-	return gcm_aes_nx_crypt(req, 0);
+	if (req->assoclen < 8)
+		return -EINVAL;
+
+	return gcm_aes_nx_crypt(req, 0, req->assoclen - 8);
 }
 }
 
 
 /* tell the block cipher walk routines that this is a stream cipher by
 /* tell the block cipher walk routines that this is a stream cipher by

+ 11 - 19
drivers/crypto/nx/nx.c

@@ -596,13 +596,9 @@ static int nx_register_algs(void)
 	if (rc)
 	if (rc)
 		goto out_unreg_ecb;
 		goto out_unreg_ecb;
 
 
-	rc = nx_register_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
-	if (rc)
-		goto out_unreg_cbc;
-
 	rc = nx_register_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
 	rc = nx_register_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
 	if (rc)
 	if (rc)
-		goto out_unreg_ctr;
+		goto out_unreg_cbc;
 
 
 	rc = nx_register_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
 	rc = nx_register_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
 	if (rc)
 	if (rc)
@@ -612,11 +608,11 @@ static int nx_register_algs(void)
 	if (rc)
 	if (rc)
 		goto out_unreg_gcm;
 		goto out_unreg_gcm;
 
 
-	rc = nx_register_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+	rc = nx_register_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
 	if (rc)
 	if (rc)
 		goto out_unreg_gcm4106;
 		goto out_unreg_gcm4106;
 
 
-	rc = nx_register_alg(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+	rc = nx_register_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
 	if (rc)
 	if (rc)
 		goto out_unreg_ccm;
 		goto out_unreg_ccm;
 
 
@@ -644,17 +640,15 @@ out_unreg_s256:
 	nx_unregister_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA,
 	nx_unregister_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA,
 			    NX_PROPS_SHA256);
 			    NX_PROPS_SHA256);
 out_unreg_ccm4309:
 out_unreg_ccm4309:
-	nx_unregister_alg(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+	nx_unregister_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
 out_unreg_ccm:
 out_unreg_ccm:
-	nx_unregister_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+	nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
 out_unreg_gcm4106:
 out_unreg_gcm4106:
 	nx_unregister_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
 	nx_unregister_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
 out_unreg_gcm:
 out_unreg_gcm:
 	nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
 	nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
 out_unreg_ctr3686:
 out_unreg_ctr3686:
 	nx_unregister_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
 	nx_unregister_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
-out_unreg_ctr:
-	nx_unregister_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
 out_unreg_cbc:
 out_unreg_cbc:
 	nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
 	nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
 out_unreg_ecb:
 out_unreg_ecb:
@@ -711,11 +705,10 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
 }
 }
 
 
 /* entry points from the crypto tfm initializers */
 /* entry points from the crypto tfm initializers */
-int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm)
 {
 {
-	crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
-				sizeof(struct nx_ccm_rctx));
-	return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+	crypto_aead_set_reqsize(tfm, sizeof(struct nx_ccm_rctx));
+	return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
 				  NX_MODE_AES_CCM);
 				  NX_MODE_AES_CCM);
 }
 }
 
 
@@ -813,16 +806,15 @@ static int nx_remove(struct vio_dev *viodev)
 				    NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256);
 				    NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256);
 		nx_unregister_shash(&nx_shash_sha256_alg,
 		nx_unregister_shash(&nx_shash_sha256_alg,
 				    NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512);
 				    NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512);
-		nx_unregister_alg(&nx_ccm4309_aes_alg,
-				  NX_FC_AES, NX_MODE_AES_CCM);
-		nx_unregister_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+		nx_unregister_aead(&nx_ccm4309_aes_alg,
+				   NX_FC_AES, NX_MODE_AES_CCM);
+		nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
 		nx_unregister_aead(&nx_gcm4106_aes_alg,
 		nx_unregister_aead(&nx_gcm4106_aes_alg,
 				   NX_FC_AES, NX_MODE_AES_GCM);
 				   NX_FC_AES, NX_MODE_AES_GCM);
 		nx_unregister_aead(&nx_gcm_aes_alg,
 		nx_unregister_aead(&nx_gcm_aes_alg,
 				   NX_FC_AES, NX_MODE_AES_GCM);
 				   NX_FC_AES, NX_MODE_AES_GCM);
 		nx_unregister_alg(&nx_ctr3686_aes_alg,
 		nx_unregister_alg(&nx_ctr3686_aes_alg,
 				  NX_FC_AES, NX_MODE_AES_CTR);
 				  NX_FC_AES, NX_MODE_AES_CTR);
-		nx_unregister_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
 		nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
 		nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
 		nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
 		nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
 	}
 	}

+ 5 - 4
drivers/crypto/nx/nx.h

@@ -149,8 +149,10 @@ struct nx_crypto_ctx {
 	} priv;
 	} priv;
 };
 };
 
 
+struct crypto_aead;
+
 /* prototypes */
 /* prototypes */
-int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm);
 int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm);
 int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm);
 int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm);
 int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm);
 int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm);
 int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm);
@@ -187,10 +189,9 @@ extern struct crypto_alg nx_cbc_aes_alg;
 extern struct crypto_alg nx_ecb_aes_alg;
 extern struct crypto_alg nx_ecb_aes_alg;
 extern struct aead_alg nx_gcm_aes_alg;
 extern struct aead_alg nx_gcm_aes_alg;
 extern struct aead_alg nx_gcm4106_aes_alg;
 extern struct aead_alg nx_gcm4106_aes_alg;
-extern struct crypto_alg nx_ctr_aes_alg;
 extern struct crypto_alg nx_ctr3686_aes_alg;
 extern struct crypto_alg nx_ctr3686_aes_alg;
-extern struct crypto_alg nx_ccm_aes_alg;
-extern struct crypto_alg nx_ccm4309_aes_alg;
+extern struct aead_alg nx_ccm_aes_alg;
+extern struct aead_alg nx_ccm4309_aes_alg;
 extern struct shash_alg nx_shash_aes_xcbc_alg;
 extern struct shash_alg nx_shash_aes_xcbc_alg;
 extern struct shash_alg nx_shash_sha512_alg;
 extern struct shash_alg nx_shash_sha512_alg;
 extern struct shash_alg nx_shash_sha256_alg;
 extern struct shash_alg nx_shash_sha256_alg;

+ 41 - 45
drivers/crypto/omap-aes.c

@@ -52,29 +52,30 @@
 #define AES_REG_IV(dd, x)		((dd)->pdata->iv_ofs + ((x) * 0x04))
 #define AES_REG_IV(dd, x)		((dd)->pdata->iv_ofs + ((x) * 0x04))
 
 
 #define AES_REG_CTRL(dd)		((dd)->pdata->ctrl_ofs)
 #define AES_REG_CTRL(dd)		((dd)->pdata->ctrl_ofs)
-#define AES_REG_CTRL_CTR_WIDTH_MASK	(3 << 7)
-#define AES_REG_CTRL_CTR_WIDTH_32		(0 << 7)
-#define AES_REG_CTRL_CTR_WIDTH_64		(1 << 7)
-#define AES_REG_CTRL_CTR_WIDTH_96		(2 << 7)
-#define AES_REG_CTRL_CTR_WIDTH_128		(3 << 7)
-#define AES_REG_CTRL_CTR		(1 << 6)
-#define AES_REG_CTRL_CBC		(1 << 5)
-#define AES_REG_CTRL_KEY_SIZE		(3 << 3)
-#define AES_REG_CTRL_DIRECTION		(1 << 2)
-#define AES_REG_CTRL_INPUT_READY	(1 << 1)
-#define AES_REG_CTRL_OUTPUT_READY	(1 << 0)
+#define AES_REG_CTRL_CTR_WIDTH_MASK	GENMASK(8, 7)
+#define AES_REG_CTRL_CTR_WIDTH_32	0
+#define AES_REG_CTRL_CTR_WIDTH_64	BIT(7)
+#define AES_REG_CTRL_CTR_WIDTH_96	BIT(8)
+#define AES_REG_CTRL_CTR_WIDTH_128	GENMASK(8, 7)
+#define AES_REG_CTRL_CTR		BIT(6)
+#define AES_REG_CTRL_CBC		BIT(5)
+#define AES_REG_CTRL_KEY_SIZE		GENMASK(4, 3)
+#define AES_REG_CTRL_DIRECTION		BIT(2)
+#define AES_REG_CTRL_INPUT_READY	BIT(1)
+#define AES_REG_CTRL_OUTPUT_READY	BIT(0)
+#define AES_REG_CTRL_MASK		GENMASK(24, 2)
 
 
 #define AES_REG_DATA_N(dd, x)		((dd)->pdata->data_ofs + ((x) * 0x04))
 #define AES_REG_DATA_N(dd, x)		((dd)->pdata->data_ofs + ((x) * 0x04))
 
 
 #define AES_REG_REV(dd)			((dd)->pdata->rev_ofs)
 #define AES_REG_REV(dd)			((dd)->pdata->rev_ofs)
 
 
 #define AES_REG_MASK(dd)		((dd)->pdata->mask_ofs)
 #define AES_REG_MASK(dd)		((dd)->pdata->mask_ofs)
-#define AES_REG_MASK_SIDLE		(1 << 6)
-#define AES_REG_MASK_START		(1 << 5)
-#define AES_REG_MASK_DMA_OUT_EN		(1 << 3)
-#define AES_REG_MASK_DMA_IN_EN		(1 << 2)
-#define AES_REG_MASK_SOFTRESET		(1 << 1)
-#define AES_REG_AUTOIDLE		(1 << 0)
+#define AES_REG_MASK_SIDLE		BIT(6)
+#define AES_REG_MASK_START		BIT(5)
+#define AES_REG_MASK_DMA_OUT_EN		BIT(3)
+#define AES_REG_MASK_DMA_IN_EN		BIT(2)
+#define AES_REG_MASK_SOFTRESET		BIT(1)
+#define AES_REG_AUTOIDLE		BIT(0)
 
 
 #define AES_REG_LENGTH_N(x)		(0x54 + ((x) * 0x04))
 #define AES_REG_LENGTH_N(x)		(0x54 + ((x) * 0x04))
 
 
@@ -254,7 +255,7 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
 {
 {
 	unsigned int key32;
 	unsigned int key32;
 	int i, err;
 	int i, err;
-	u32 val, mask = 0;
+	u32 val;
 
 
 	err = omap_aes_hw_init(dd);
 	err = omap_aes_hw_init(dd);
 	if (err)
 	if (err)
@@ -274,17 +275,13 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
 	val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
 	val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
 	if (dd->flags & FLAGS_CBC)
 	if (dd->flags & FLAGS_CBC)
 		val |= AES_REG_CTRL_CBC;
 		val |= AES_REG_CTRL_CBC;
-	if (dd->flags & FLAGS_CTR) {
+	if (dd->flags & FLAGS_CTR)
 		val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
 		val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
-		mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK;
-	}
+
 	if (dd->flags & FLAGS_ENCRYPT)
 	if (dd->flags & FLAGS_ENCRYPT)
 		val |= AES_REG_CTRL_DIRECTION;
 		val |= AES_REG_CTRL_DIRECTION;
 
 
-	mask |= AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
-			AES_REG_CTRL_KEY_SIZE;
-
-	omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, mask);
+	omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, AES_REG_CTRL_MASK);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -558,6 +555,9 @@ static int omap_aes_check_aligned(struct scatterlist *sg, int total)
 {
 {
 	int len = 0;
 	int len = 0;
 
 
+	if (!IS_ALIGNED(total, AES_BLOCK_SIZE))
+		return -EINVAL;
+
 	while (sg) {
 	while (sg) {
 		if (!IS_ALIGNED(sg->offset, 4))
 		if (!IS_ALIGNED(sg->offset, 4))
 			return -1;
 			return -1;
@@ -577,9 +577,10 @@ static int omap_aes_check_aligned(struct scatterlist *sg, int total)
 static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
 static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
 {
 {
 	void *buf_in, *buf_out;
 	void *buf_in, *buf_out;
-	int pages;
+	int pages, total;
 
 
-	pages = get_order(dd->total);
+	total = ALIGN(dd->total, AES_BLOCK_SIZE);
+	pages = get_order(total);
 
 
 	buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
 	buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
 	buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
 	buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
@@ -594,11 +595,11 @@ static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
 	sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0);
 	sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0);
 
 
 	sg_init_table(&dd->in_sgl, 1);
 	sg_init_table(&dd->in_sgl, 1);
-	sg_set_buf(&dd->in_sgl, buf_in, dd->total);
+	sg_set_buf(&dd->in_sgl, buf_in, total);
 	dd->in_sg = &dd->in_sgl;
 	dd->in_sg = &dd->in_sgl;
 
 
 	sg_init_table(&dd->out_sgl, 1);
 	sg_init_table(&dd->out_sgl, 1);
-	sg_set_buf(&dd->out_sgl, buf_out, dd->total);
+	sg_set_buf(&dd->out_sgl, buf_out, total);
 	dd->out_sg = &dd->out_sgl;
 	dd->out_sg = &dd->out_sgl;
 
 
 	return 0;
 	return 0;
@@ -611,7 +612,7 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
 	struct omap_aes_ctx *ctx;
 	struct omap_aes_ctx *ctx;
 	struct omap_aes_reqctx *rctx;
 	struct omap_aes_reqctx *rctx;
 	unsigned long flags;
 	unsigned long flags;
-	int err, ret = 0;
+	int err, ret = 0, len;
 
 
 	spin_lock_irqsave(&dd->lock, flags);
 	spin_lock_irqsave(&dd->lock, flags);
 	if (req)
 	if (req)
@@ -650,8 +651,9 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
 		dd->sgs_copied = 0;
 		dd->sgs_copied = 0;
 	}
 	}
 
 
-	dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total);
-	dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total);
+	len = ALIGN(dd->total, AES_BLOCK_SIZE);
+	dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, len);
+	dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, len);
 	BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0);
 	BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0);
 
 
 	rctx = ablkcipher_request_ctx(req);
 	rctx = ablkcipher_request_ctx(req);
@@ -678,7 +680,7 @@ static void omap_aes_done_task(unsigned long data)
 {
 {
 	struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
 	struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
 	void *buf_in, *buf_out;
 	void *buf_in, *buf_out;
-	int pages;
+	int pages, len;
 
 
 	pr_debug("enter done_task\n");
 	pr_debug("enter done_task\n");
 
 
@@ -697,7 +699,8 @@ static void omap_aes_done_task(unsigned long data)
 
 
 		sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1);
 		sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1);
 
 
-		pages = get_order(dd->total_save);
+		len = ALIGN(dd->total_save, AES_BLOCK_SIZE);
+		pages = get_order(len);
 		free_pages((unsigned long)buf_in, pages);
 		free_pages((unsigned long)buf_in, pages);
 		free_pages((unsigned long)buf_out, pages);
 		free_pages((unsigned long)buf_out, pages);
 	}
 	}
@@ -726,11 +729,6 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 		  !!(mode & FLAGS_ENCRYPT),
 		  !!(mode & FLAGS_ENCRYPT),
 		  !!(mode & FLAGS_CBC));
 		  !!(mode & FLAGS_CBC));
 
 
-	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
-		pr_err("request size is not exact amount of AES blocks\n");
-		return -EINVAL;
-	}
-
 	dd = omap_aes_find_dev(ctx);
 	dd = omap_aes_find_dev(ctx);
 	if (!dd)
 	if (!dd)
 		return -ENODEV;
 		return -ENODEV;
@@ -833,7 +831,7 @@ static struct crypto_alg algs_ecb_cbc[] = {
 {
 {
 	.cra_name		= "ecb(aes)",
 	.cra_name		= "ecb(aes)",
 	.cra_driver_name	= "ecb-aes-omap",
 	.cra_driver_name	= "ecb-aes-omap",
-	.cra_priority		= 100,
+	.cra_priority		= 300,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
 				  CRYPTO_ALG_KERN_DRIVER_ONLY |
 				  CRYPTO_ALG_KERN_DRIVER_ONLY |
 				  CRYPTO_ALG_ASYNC,
 				  CRYPTO_ALG_ASYNC,
@@ -855,7 +853,7 @@ static struct crypto_alg algs_ecb_cbc[] = {
 {
 {
 	.cra_name		= "cbc(aes)",
 	.cra_name		= "cbc(aes)",
 	.cra_driver_name	= "cbc-aes-omap",
 	.cra_driver_name	= "cbc-aes-omap",
-	.cra_priority		= 100,
+	.cra_priority		= 300,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
 				  CRYPTO_ALG_KERN_DRIVER_ONLY |
 				  CRYPTO_ALG_KERN_DRIVER_ONLY |
 				  CRYPTO_ALG_ASYNC,
 				  CRYPTO_ALG_ASYNC,
@@ -881,7 +879,7 @@ static struct crypto_alg algs_ctr[] = {
 {
 {
 	.cra_name		= "ctr(aes)",
 	.cra_name		= "ctr(aes)",
 	.cra_driver_name	= "ctr-aes-omap",
 	.cra_driver_name	= "ctr-aes-omap",
-	.cra_priority		= 100,
+	.cra_priority		= 300,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
 				  CRYPTO_ALG_KERN_DRIVER_ONLY |
 				  CRYPTO_ALG_KERN_DRIVER_ONLY |
 				  CRYPTO_ALG_ASYNC,
 				  CRYPTO_ALG_ASYNC,
@@ -1046,9 +1044,7 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
 			}
 			}
 		}
 		}
 
 
-		dd->total -= AES_BLOCK_SIZE;
-
-		BUG_ON(dd->total < 0);
+		dd->total -= min_t(size_t, AES_BLOCK_SIZE, dd->total);
 
 
 		/* Clear IRQ status */
 		/* Clear IRQ status */
 		status &= ~AES_REG_IRQ_DATA_OUT;
 		status &= ~AES_REG_IRQ_DATA_OUT;

+ 310 - 367
drivers/crypto/picoxcell_crypto.c

@@ -99,11 +99,16 @@ struct spacc_req {
 	dma_addr_t			src_addr, dst_addr;
 	dma_addr_t			src_addr, dst_addr;
 	struct spacc_ddt		*src_ddt, *dst_ddt;
 	struct spacc_ddt		*src_ddt, *dst_ddt;
 	void				(*complete)(struct spacc_req *req);
 	void				(*complete)(struct spacc_req *req);
+};
 
 
-	/* AEAD specific bits. */
-	u8				*giv;
-	size_t				giv_len;
-	dma_addr_t			giv_pa;
+struct spacc_aead {
+	unsigned long			ctrl_default;
+	unsigned long			type;
+	struct aead_alg			alg;
+	struct spacc_engine		*engine;
+	struct list_head		entry;
+	int				key_offs;
+	int				iv_offs;
 };
 };
 
 
 struct spacc_engine {
 struct spacc_engine {
@@ -121,6 +126,9 @@ struct spacc_engine {
 	struct spacc_alg		*algs;
 	struct spacc_alg		*algs;
 	unsigned			num_algs;
 	unsigned			num_algs;
 	struct list_head		registered_algs;
 	struct list_head		registered_algs;
+	struct spacc_aead		*aeads;
+	unsigned			num_aeads;
+	struct list_head		registered_aeads;
 	size_t				cipher_pg_sz;
 	size_t				cipher_pg_sz;
 	size_t				hash_pg_sz;
 	size_t				hash_pg_sz;
 	const char			*name;
 	const char			*name;
@@ -174,8 +182,6 @@ struct spacc_aead_ctx {
 	u8				cipher_key_len;
 	u8				cipher_key_len;
 	u8				hash_key_len;
 	u8				hash_key_len;
 	struct crypto_aead		*sw_cipher;
 	struct crypto_aead		*sw_cipher;
-	size_t				auth_size;
-	u8				salt[AES_BLOCK_SIZE];
 };
 };
 
 
 static int spacc_ablk_submit(struct spacc_req *req);
 static int spacc_ablk_submit(struct spacc_req *req);
@@ -185,6 +191,11 @@ static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg)
 	return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
 	return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
 }
 }
 
 
+static inline struct spacc_aead *to_spacc_aead(struct aead_alg *alg)
+{
+	return container_of(alg, struct spacc_aead, alg);
+}
+
 static inline int spacc_fifo_cmd_full(struct spacc_engine *engine)
 static inline int spacc_fifo_cmd_full(struct spacc_engine *engine)
 {
 {
 	u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET);
 	u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET);
@@ -310,120 +321,117 @@ out:
 	return NULL;
 	return NULL;
 }
 }
 
 
-static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv)
+static int spacc_aead_make_ddts(struct aead_request *areq)
 {
 {
-	struct aead_request *areq = container_of(req->req, struct aead_request,
-						 base);
+	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+	struct spacc_req *req = aead_request_ctx(areq);
 	struct spacc_engine *engine = req->engine;
 	struct spacc_engine *engine = req->engine;
 	struct spacc_ddt *src_ddt, *dst_ddt;
 	struct spacc_ddt *src_ddt, *dst_ddt;
-	unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq));
-	unsigned nents = sg_count(areq->src, areq->cryptlen);
 	unsigned total;
 	unsigned total;
-	dma_addr_t iv_addr;
+	unsigned int src_nents, dst_nents;
 	struct scatterlist *cur;
 	struct scatterlist *cur;
-	int i, dst_ents, src_ents, assoc_ents;
-	u8 *iv = giv ? giv : areq->iv;
+	int i, dst_ents, src_ents;
+
+	total = areq->assoclen + areq->cryptlen;
+	if (req->is_encrypt)
+		total += crypto_aead_authsize(aead);
+
+	src_nents = sg_count(areq->src, total);
+	if (src_nents + 1 > MAX_DDT_LEN)
+		return -E2BIG;
+
+	dst_nents = 0;
+	if (areq->src != areq->dst) {
+		dst_nents = sg_count(areq->dst, total);
+		if (src_nents + 1 > MAX_DDT_LEN)
+			return -E2BIG;
+	}
 
 
 	src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr);
 	src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr);
 	if (!src_ddt)
 	if (!src_ddt)
-		return -ENOMEM;
+		goto err;
 
 
 	dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr);
 	dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr);
-	if (!dst_ddt) {
-		dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
-		return -ENOMEM;
-	}
+	if (!dst_ddt)
+		goto err_free_src;
 
 
 	req->src_ddt = src_ddt;
 	req->src_ddt = src_ddt;
 	req->dst_ddt = dst_ddt;
 	req->dst_ddt = dst_ddt;
 
 
-	assoc_ents = dma_map_sg(engine->dev, areq->assoc,
-		sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
-	if (areq->src != areq->dst) {
-		src_ents = dma_map_sg(engine->dev, areq->src, nents,
+	if (dst_nents) {
+		src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
 				      DMA_TO_DEVICE);
 				      DMA_TO_DEVICE);
-		dst_ents = dma_map_sg(engine->dev, areq->dst, nents,
+		if (!src_ents)
+			goto err_free_dst;
+
+		dst_ents = dma_map_sg(engine->dev, areq->dst, dst_nents,
 				      DMA_FROM_DEVICE);
 				      DMA_FROM_DEVICE);
+
+		if (!dst_ents) {
+			dma_unmap_sg(engine->dev, areq->src, src_nents,
+				     DMA_TO_DEVICE);
+			goto err_free_dst;
+		}
 	} else {
 	} else {
-		src_ents = dma_map_sg(engine->dev, areq->src, nents,
+		src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
 				      DMA_BIDIRECTIONAL);
 				      DMA_BIDIRECTIONAL);
-		dst_ents = 0;
+		if (!src_ents)
+			goto err_free_dst;
+		dst_ents = src_ents;
 	}
 	}
 
 
 	/*
 	/*
-	 * Map the IV/GIV. For the GIV it needs to be bidirectional as it is
-	 * formed by the crypto block and sent as the ESP IV for IPSEC.
+	 * Now map in the payload for the source and destination and terminate
+	 * with the NULL pointers.
 	 */
 	 */
-	iv_addr = dma_map_single(engine->dev, iv, ivsize,
-				 giv ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
-	req->giv_pa = iv_addr;
+	for_each_sg(areq->src, cur, src_ents, i)
+		ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
 
 
-	/*
-	 * Map the associated data. For decryption we don't copy the
-	 * associated data.
-	 */
-	total = areq->assoclen;
-	for_each_sg(areq->assoc, cur, assoc_ents, i) {
+	/* For decryption we need to skip the associated data. */
+	total = req->is_encrypt ? 0 : areq->assoclen;
+	for_each_sg(areq->dst, cur, dst_ents, i) {
 		unsigned len = sg_dma_len(cur);
 		unsigned len = sg_dma_len(cur);
 
 
-		if (len > total)
-			len = total;
-
-		total -= len;
+		if (len <= total) {
+			total -= len;
+			continue;
+		}
 
 
-		ddt_set(src_ddt++, sg_dma_address(cur), len);
-		if (req->is_encrypt)
-			ddt_set(dst_ddt++, sg_dma_address(cur), len);
+		ddt_set(dst_ddt++, sg_dma_address(cur) + total, len - total);
 	}
 	}
-	ddt_set(src_ddt++, iv_addr, ivsize);
-
-	if (giv || req->is_encrypt)
-		ddt_set(dst_ddt++, iv_addr, ivsize);
-
-	/*
-	 * Now map in the payload for the source and destination and terminate
-	 * with the NULL pointers.
-	 */
-	for_each_sg(areq->src, cur, src_ents, i) {
-		ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
-		if (areq->src == areq->dst)
-			ddt_set(dst_ddt++, sg_dma_address(cur),
-				sg_dma_len(cur));
-	}
-
-	for_each_sg(areq->dst, cur, dst_ents, i)
-		ddt_set(dst_ddt++, sg_dma_address(cur),
-			sg_dma_len(cur));
 
 
 	ddt_set(src_ddt, 0, 0);
 	ddt_set(src_ddt, 0, 0);
 	ddt_set(dst_ddt, 0, 0);
 	ddt_set(dst_ddt, 0, 0);
 
 
 	return 0;
 	return 0;
+
+err_free_dst:
+	dma_pool_free(engine->req_pool, dst_ddt, req->dst_addr);
+err_free_src:
+	dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
+err:
+	return -ENOMEM;
 }
 }
 
 
 static void spacc_aead_free_ddts(struct spacc_req *req)
 static void spacc_aead_free_ddts(struct spacc_req *req)
 {
 {
 	struct aead_request *areq = container_of(req->req, struct aead_request,
 	struct aead_request *areq = container_of(req->req, struct aead_request,
 						 base);
 						 base);
-	struct spacc_alg *alg = to_spacc_alg(req->req->tfm->__crt_alg);
-	struct spacc_ablk_ctx *aead_ctx = crypto_tfm_ctx(req->req->tfm);
+	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+	unsigned total = areq->assoclen + areq->cryptlen +
+			 (req->is_encrypt ? crypto_aead_authsize(aead) : 0);
+	struct spacc_aead_ctx *aead_ctx = crypto_aead_ctx(aead);
 	struct spacc_engine *engine = aead_ctx->generic.engine;
 	struct spacc_engine *engine = aead_ctx->generic.engine;
-	unsigned ivsize = alg->alg.cra_aead.ivsize;
-	unsigned nents = sg_count(areq->src, areq->cryptlen);
+	unsigned nents = sg_count(areq->src, total);
 
 
 	if (areq->src != areq->dst) {
 	if (areq->src != areq->dst) {
 		dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);
 		dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);
 		dma_unmap_sg(engine->dev, areq->dst,
 		dma_unmap_sg(engine->dev, areq->dst,
-			     sg_count(areq->dst, areq->cryptlen),
+			     sg_count(areq->dst, total),
 			     DMA_FROM_DEVICE);
 			     DMA_FROM_DEVICE);
 	} else
 	} else
 		dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);
 		dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);
 
 
-	dma_unmap_sg(engine->dev, areq->assoc,
-		     sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
-
-	dma_unmap_single(engine->dev, req->giv_pa, ivsize, DMA_BIDIRECTIONAL);
-
 	dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr);
 	dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr);
 	dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr);
 	dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr);
 }
 }
@@ -438,65 +446,22 @@ static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt,
 	dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
 	dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
 }
 }
 
 
-/*
- * Set key for a DES operation in an AEAD cipher. This also performs weak key
- * checking if required.
- */
-static int spacc_aead_des_setkey(struct crypto_aead *aead, const u8 *key,
-				 unsigned int len)
-{
-	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
-	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
-	u32 tmp[DES_EXPKEY_WORDS];
-
-	if (unlikely(!des_ekey(tmp, key)) &&
-	    (crypto_aead_get_flags(aead)) & CRYPTO_TFM_REQ_WEAK_KEY) {
-		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
-		return -EINVAL;
-	}
-
-	memcpy(ctx->cipher_key, key, len);
-	ctx->cipher_key_len = len;
-
-	return 0;
-}
-
-/* Set the key for the AES block cipher component of the AEAD transform. */
-static int spacc_aead_aes_setkey(struct crypto_aead *aead, const u8 *key,
-				 unsigned int len)
-{
-	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
-	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
-
-	/*
-	 * IPSec engine only supports 128 and 256 bit AES keys. If we get a
-	 * request for any other size (192 bits) then we need to do a software
-	 * fallback.
-	 */
-	if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) {
-		/*
-		 * Set the fallback transform to use the same request flags as
-		 * the hardware transform.
-		 */
-		ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
-		ctx->sw_cipher->base.crt_flags |=
-			tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
-		return crypto_aead_setkey(ctx->sw_cipher, key, len);
-	}
-
-	memcpy(ctx->cipher_key, key, len);
-	ctx->cipher_key_len = len;
-
-	return 0;
-}
-
 static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
 static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
 			     unsigned int keylen)
 			     unsigned int keylen)
 {
 {
 	struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
 	struct crypto_authenc_keys keys;
 	struct crypto_authenc_keys keys;
-	int err = -EINVAL;
+	int err;
+
+	crypto_aead_clear_flags(ctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
+	crypto_aead_set_flags(ctx->sw_cipher, crypto_aead_get_flags(tfm) &
+					      CRYPTO_TFM_REQ_MASK);
+	err = crypto_aead_setkey(ctx->sw_cipher, key, keylen);
+	crypto_aead_clear_flags(tfm, CRYPTO_TFM_RES_MASK);
+	crypto_aead_set_flags(tfm, crypto_aead_get_flags(ctx->sw_cipher) &
+				   CRYPTO_TFM_RES_MASK);
+	if (err)
+		return err;
 
 
 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
 		goto badkey;
 		goto badkey;
@@ -507,14 +472,8 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
 	if (keys.authkeylen > sizeof(ctx->hash_ctx))
 	if (keys.authkeylen > sizeof(ctx->hash_ctx))
 		goto badkey;
 		goto badkey;
 
 
-	if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
-	    SPA_CTRL_CIPH_ALG_AES)
-		err = spacc_aead_aes_setkey(tfm, keys.enckey, keys.enckeylen);
-	else
-		err = spacc_aead_des_setkey(tfm, keys.enckey, keys.enckeylen);
-
-	if (err)
-		goto badkey;
+	memcpy(ctx->cipher_key, keys.enckey, keys.enckeylen);
+	ctx->cipher_key_len = keys.enckeylen;
 
 
 	memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
 	memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
 	ctx->hash_key_len = keys.authkeylen;
 	ctx->hash_key_len = keys.authkeylen;
@@ -531,9 +490,7 @@ static int spacc_aead_setauthsize(struct crypto_aead *tfm,
 {
 {
 	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
 	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
 
 
-	ctx->auth_size = authsize;
-
-	return 0;
+	return crypto_aead_setauthsize(ctx->sw_cipher, authsize);
 }
 }
 
 
 /*
 /*
@@ -541,15 +498,13 @@ static int spacc_aead_setauthsize(struct crypto_aead *tfm,
  * be completed in hardware because the hardware may not support certain key
  * be completed in hardware because the hardware may not support certain key
  * sizes. In these cases we need to complete the request in software.
  * sizes. In these cases we need to complete the request in software.
  */
  */
-static int spacc_aead_need_fallback(struct spacc_req *req)
+static int spacc_aead_need_fallback(struct aead_request *aead_req)
 {
 {
-	struct aead_request *aead_req;
-	struct crypto_tfm *tfm = req->req->tfm;
-	struct crypto_alg *alg = req->req->tfm->__crt_alg;
-	struct spacc_alg *spacc_alg = to_spacc_alg(alg);
-	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+	struct aead_alg *alg = crypto_aead_alg(aead);
+	struct spacc_aead *spacc_alg = to_spacc_aead(alg);
+	struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
 
 
-	aead_req = container_of(req->req, struct aead_request, base);
 	/*
 	/*
 	 * If we have a non-supported key-length, then we need to do a
 	 * If we have a non-supported key-length, then we need to do a
 	 * software fallback.
 	 * software fallback.
@@ -568,22 +523,17 @@ static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type,
 {
 {
 	struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req));
 	struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req));
 	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm);
 	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm);
-	int err;
+	struct aead_request *subreq = aead_request_ctx(req);
 
 
-	if (ctx->sw_cipher) {
-		/*
-		 * Change the request to use the software fallback transform,
-		 * and once the ciphering has completed, put the old transform
-		 * back into the request.
-		 */
-		aead_request_set_tfm(req, ctx->sw_cipher);
-		err = is_encrypt ? crypto_aead_encrypt(req) :
-		    crypto_aead_decrypt(req);
-		aead_request_set_tfm(req, __crypto_aead_cast(old_tfm));
-	} else
-		err = -EINVAL;
+	aead_request_set_tfm(subreq, ctx->sw_cipher);
+	aead_request_set_callback(subreq, req->base.flags,
+				  req->base.complete, req->base.data);
+	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+			       req->iv);
+	aead_request_set_ad(subreq, req->assoclen);
 
 
-	return err;
+	return is_encrypt ? crypto_aead_encrypt(subreq) :
+			    crypto_aead_decrypt(subreq);
 }
 }
 
 
 static void spacc_aead_complete(struct spacc_req *req)
 static void spacc_aead_complete(struct spacc_req *req)
@@ -594,18 +544,19 @@ static void spacc_aead_complete(struct spacc_req *req)
 
 
 static int spacc_aead_submit(struct spacc_req *req)
 static int spacc_aead_submit(struct spacc_req *req)
 {
 {
-	struct crypto_tfm *tfm = req->req->tfm;
-	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
-	struct crypto_alg *alg = req->req->tfm->__crt_alg;
-	struct spacc_alg *spacc_alg = to_spacc_alg(alg);
-	struct spacc_engine *engine = ctx->generic.engine;
-	u32 ctrl, proc_len, assoc_len;
 	struct aead_request *aead_req =
 	struct aead_request *aead_req =
 		container_of(req->req, struct aead_request, base);
 		container_of(req->req, struct aead_request, base);
+	struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+	unsigned int authsize = crypto_aead_authsize(aead);
+	struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
+	struct aead_alg *alg = crypto_aead_alg(aead);
+	struct spacc_aead *spacc_alg = to_spacc_aead(alg);
+	struct spacc_engine *engine = ctx->generic.engine;
+	u32 ctrl, proc_len, assoc_len;
 
 
 	req->result = -EINPROGRESS;
 	req->result = -EINPROGRESS;
 	req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key,
 	req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key,
-		ctx->cipher_key_len, aead_req->iv, alg->cra_aead.ivsize,
+		ctx->cipher_key_len, aead_req->iv, crypto_aead_ivsize(aead),
 		ctx->hash_ctx, ctx->hash_key_len);
 		ctx->hash_ctx, ctx->hash_key_len);
 
 
 	/* Set the source and destination DDT pointers. */
 	/* Set the source and destination DDT pointers. */
@@ -616,26 +567,16 @@ static int spacc_aead_submit(struct spacc_req *req)
 	assoc_len = aead_req->assoclen;
 	assoc_len = aead_req->assoclen;
 	proc_len = aead_req->cryptlen + assoc_len;
 	proc_len = aead_req->cryptlen + assoc_len;
 
 
-	/*
-	 * If we aren't generating an IV, then we need to include the IV in the
-	 * associated data so that it is included in the hash.
-	 */
-	if (!req->giv) {
-		assoc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
-		proc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
-	} else
-		proc_len += req->giv_len;
-
 	/*
 	/*
 	 * If we are decrypting, we need to take the length of the ICV out of
 	 * If we are decrypting, we need to take the length of the ICV out of
 	 * the processing length.
 	 * the processing length.
 	 */
 	 */
 	if (!req->is_encrypt)
 	if (!req->is_encrypt)
-		proc_len -= ctx->auth_size;
+		proc_len -= authsize;
 
 
 	writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET);
 	writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET);
 	writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET);
 	writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET);
-	writel(ctx->auth_size, engine->regs + SPA_ICV_LEN_REG_OFFSET);
+	writel(authsize, engine->regs + SPA_ICV_LEN_REG_OFFSET);
 	writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
 	writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
 	writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
 	writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
 
 
@@ -674,32 +615,29 @@ static void spacc_push(struct spacc_engine *engine)
 /*
 /*
  * Setup an AEAD request for processing. This will configure the engine, load
  * Setup an AEAD request for processing. This will configure the engine, load
  * the context and then start the packet processing.
  * the context and then start the packet processing.
- *
- * @giv Pointer to destination address for a generated IV. If the
- *	request does not need to generate an IV then this should be set to NULL.
  */
  */
-static int spacc_aead_setup(struct aead_request *req, u8 *giv,
+static int spacc_aead_setup(struct aead_request *req,
 			    unsigned alg_type, bool is_encrypt)
 			    unsigned alg_type, bool is_encrypt)
 {
 {
-	struct crypto_alg *alg = req->base.tfm->__crt_alg;
-	struct spacc_engine *engine = to_spacc_alg(alg)->engine;
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct aead_alg *alg = crypto_aead_alg(aead);
+	struct spacc_engine *engine = to_spacc_aead(alg)->engine;
 	struct spacc_req *dev_req = aead_request_ctx(req);
 	struct spacc_req *dev_req = aead_request_ctx(req);
-	int err = -EINPROGRESS;
+	int err;
 	unsigned long flags;
 	unsigned long flags;
-	unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
 
 
-	dev_req->giv		= giv;
-	dev_req->giv_len	= ivsize;
 	dev_req->req		= &req->base;
 	dev_req->req		= &req->base;
 	dev_req->is_encrypt	= is_encrypt;
 	dev_req->is_encrypt	= is_encrypt;
 	dev_req->result		= -EBUSY;
 	dev_req->result		= -EBUSY;
 	dev_req->engine		= engine;
 	dev_req->engine		= engine;
 	dev_req->complete	= spacc_aead_complete;
 	dev_req->complete	= spacc_aead_complete;
 
 
-	if (unlikely(spacc_aead_need_fallback(dev_req)))
+	if (unlikely(spacc_aead_need_fallback(req) ||
+		     ((err = spacc_aead_make_ddts(req)) == -E2BIG)))
 		return spacc_aead_do_fallback(req, alg_type, is_encrypt);
 		return spacc_aead_do_fallback(req, alg_type, is_encrypt);
 
 
-	spacc_aead_make_ddts(dev_req, dev_req->giv);
+	if (err)
+		goto out;
 
 
 	err = -EINPROGRESS;
 	err = -EINPROGRESS;
 	spin_lock_irqsave(&engine->hw_lock, flags);
 	spin_lock_irqsave(&engine->hw_lock, flags);
@@ -728,70 +666,44 @@ out:
 static int spacc_aead_encrypt(struct aead_request *req)
 static int spacc_aead_encrypt(struct aead_request *req)
 {
 {
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
-	struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+	struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead));
 
 
-	return spacc_aead_setup(req, NULL, alg->type, 1);
-}
-
-static int spacc_aead_givencrypt(struct aead_givcrypt_request *req)
-{
-	struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
-	struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	size_t ivsize = crypto_aead_ivsize(tfm);
-	struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
-	unsigned len;
-	__be64 seq;
-
-	memcpy(req->areq.iv, ctx->salt, ivsize);
-	len = ivsize;
-	if (ivsize > sizeof(u64)) {
-		memset(req->giv, 0, ivsize - sizeof(u64));
-		len = sizeof(u64);
-	}
-	seq = cpu_to_be64(req->seq);
-	memcpy(req->giv + ivsize - len, &seq, len);
-
-	return spacc_aead_setup(&req->areq, req->giv, alg->type, 1);
+	return spacc_aead_setup(req, alg->type, 1);
 }
 }
 
 
 static int spacc_aead_decrypt(struct aead_request *req)
 static int spacc_aead_decrypt(struct aead_request *req)
 {
 {
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
-	struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+	struct spacc_aead  *alg = to_spacc_aead(crypto_aead_alg(aead));
 
 
-	return spacc_aead_setup(req, NULL, alg->type, 0);
+	return spacc_aead_setup(req, alg->type, 0);
 }
 }
 
 
 /*
 /*
  * Initialise a new AEAD context. This is responsible for allocating the
  * Initialise a new AEAD context. This is responsible for allocating the
  * fallback cipher and initialising the context.
  * fallback cipher and initialising the context.
  */
  */
-static int spacc_aead_cra_init(struct crypto_tfm *tfm)
+static int spacc_aead_cra_init(struct crypto_aead *tfm)
 {
 {
-	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
-	struct crypto_alg *alg = tfm->__crt_alg;
-	struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+	struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct aead_alg *alg = crypto_aead_alg(tfm);
+	struct spacc_aead *spacc_alg = to_spacc_aead(alg);
 	struct spacc_engine *engine = spacc_alg->engine;
 	struct spacc_engine *engine = spacc_alg->engine;
 
 
 	ctx->generic.flags = spacc_alg->type;
 	ctx->generic.flags = spacc_alg->type;
 	ctx->generic.engine = engine;
 	ctx->generic.engine = engine;
-	ctx->sw_cipher = crypto_alloc_aead(alg->cra_name, 0,
-					   CRYPTO_ALG_ASYNC |
+	ctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
 					   CRYPTO_ALG_NEED_FALLBACK);
 					   CRYPTO_ALG_NEED_FALLBACK);
-	if (IS_ERR(ctx->sw_cipher)) {
-		dev_warn(engine->dev, "failed to allocate fallback for %s\n",
-			 alg->cra_name);
-		ctx->sw_cipher = NULL;
-	}
+	if (IS_ERR(ctx->sw_cipher))
+		return PTR_ERR(ctx->sw_cipher);
 	ctx->generic.key_offs = spacc_alg->key_offs;
 	ctx->generic.key_offs = spacc_alg->key_offs;
 	ctx->generic.iv_offs = spacc_alg->iv_offs;
 	ctx->generic.iv_offs = spacc_alg->iv_offs;
 
 
-	get_random_bytes(ctx->salt, sizeof(ctx->salt));
-
-	crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
-				sizeof(struct spacc_req));
+	crypto_aead_set_reqsize(
+		tfm,
+		max(sizeof(struct spacc_req),
+		    sizeof(struct aead_request) +
+		    crypto_aead_reqsize(ctx->sw_cipher)));
 
 
 	return 0;
 	return 0;
 }
 }
@@ -800,13 +712,11 @@ static int spacc_aead_cra_init(struct crypto_tfm *tfm)
  * Destructor for an AEAD context. This is called when the transform is freed
  * Destructor for an AEAD context. This is called when the transform is freed
  * and must free the fallback cipher.
  * and must free the fallback cipher.
  */
  */
-static void spacc_aead_cra_exit(struct crypto_tfm *tfm)
+static void spacc_aead_cra_exit(struct crypto_aead *tfm)
 {
 {
-	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 
 
-	if (ctx->sw_cipher)
-		crypto_free_aead(ctx->sw_cipher);
-	ctx->sw_cipher = NULL;
+	crypto_free_aead(ctx->sw_cipher);
 }
 }
 
 
 /*
 /*
@@ -1458,180 +1368,188 @@ static struct spacc_alg ipsec_engine_algs[] = {
 			.cra_exit = spacc_ablk_cra_exit,
 			.cra_exit = spacc_ablk_cra_exit,
 		},
 		},
 	},
 	},
+};
+
+static struct spacc_aead ipsec_engine_aeads[] = {
 	{
 	{
-		.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
-				SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
+		.ctrl_default = SPA_CTRL_CIPH_ALG_AES |
+				SPA_CTRL_CIPH_MODE_CBC |
+				SPA_CTRL_HASH_ALG_SHA |
+				SPA_CTRL_HASH_MODE_HMAC,
 		.key_offs = 0,
 		.key_offs = 0,
 		.iv_offs = AES_MAX_KEY_SIZE,
 		.iv_offs = AES_MAX_KEY_SIZE,
 		.alg = {
 		.alg = {
-			.cra_name = "authenc(hmac(sha1),cbc(aes))",
-			.cra_driver_name = "authenc-hmac-sha1-cbc-aes-picoxcell",
-			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD |
-					CRYPTO_ALG_ASYNC |
-					CRYPTO_ALG_KERN_DRIVER_ONLY,
-			.cra_blocksize = AES_BLOCK_SIZE,
-			.cra_ctxsize = sizeof(struct spacc_aead_ctx),
-			.cra_type = &crypto_aead_type,
-			.cra_module = THIS_MODULE,
-			.cra_aead = {
-				.setkey = spacc_aead_setkey,
-				.setauthsize = spacc_aead_setauthsize,
-				.encrypt = spacc_aead_encrypt,
-				.decrypt = spacc_aead_decrypt,
-				.givencrypt = spacc_aead_givencrypt,
-				.ivsize = AES_BLOCK_SIZE,
-				.maxauthsize = SHA1_DIGEST_SIZE,
+			.base = {
+				.cra_name = "authenc(hmac(sha1),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-aes-picoxcell",
+				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_NEED_FALLBACK |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
+				.cra_module = THIS_MODULE,
 			},
 			},
-			.cra_init = spacc_aead_cra_init,
-			.cra_exit = spacc_aead_cra_exit,
+			.setkey = spacc_aead_setkey,
+			.setauthsize = spacc_aead_setauthsize,
+			.encrypt = spacc_aead_encrypt,
+			.decrypt = spacc_aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+			.init = spacc_aead_cra_init,
+			.exit = spacc_aead_cra_exit,
 		},
 		},
 	},
 	},
 	{
 	{
-		.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
+		.ctrl_default = SPA_CTRL_CIPH_ALG_AES |
+				SPA_CTRL_CIPH_MODE_CBC |
 				SPA_CTRL_HASH_ALG_SHA256 |
 				SPA_CTRL_HASH_ALG_SHA256 |
 				SPA_CTRL_HASH_MODE_HMAC,
 				SPA_CTRL_HASH_MODE_HMAC,
 		.key_offs = 0,
 		.key_offs = 0,
 		.iv_offs = AES_MAX_KEY_SIZE,
 		.iv_offs = AES_MAX_KEY_SIZE,
 		.alg = {
 		.alg = {
-			.cra_name = "authenc(hmac(sha256),cbc(aes))",
-			.cra_driver_name = "authenc-hmac-sha256-cbc-aes-picoxcell",
-			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD |
-					CRYPTO_ALG_ASYNC |
-					CRYPTO_ALG_KERN_DRIVER_ONLY,
-			.cra_blocksize = AES_BLOCK_SIZE,
-			.cra_ctxsize = sizeof(struct spacc_aead_ctx),
-			.cra_type = &crypto_aead_type,
-			.cra_module = THIS_MODULE,
-			.cra_aead = {
-				.setkey = spacc_aead_setkey,
-				.setauthsize = spacc_aead_setauthsize,
-				.encrypt = spacc_aead_encrypt,
-				.decrypt = spacc_aead_decrypt,
-				.givencrypt = spacc_aead_givencrypt,
-				.ivsize = AES_BLOCK_SIZE,
-				.maxauthsize = SHA256_DIGEST_SIZE,
+			.base = {
+				.cra_name = "authenc(hmac(sha256),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-aes-picoxcell",
+				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_NEED_FALLBACK |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
+				.cra_module = THIS_MODULE,
 			},
 			},
-			.cra_init = spacc_aead_cra_init,
-			.cra_exit = spacc_aead_cra_exit,
+			.setkey = spacc_aead_setkey,
+			.setauthsize = spacc_aead_setauthsize,
+			.encrypt = spacc_aead_encrypt,
+			.decrypt = spacc_aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+			.init = spacc_aead_cra_init,
+			.exit = spacc_aead_cra_exit,
 		},
 		},
 	},
 	},
 	{
 	{
 		.key_offs = 0,
 		.key_offs = 0,
 		.iv_offs = AES_MAX_KEY_SIZE,
 		.iv_offs = AES_MAX_KEY_SIZE,
-		.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
-				SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
+		.ctrl_default = SPA_CTRL_CIPH_ALG_AES |
+				SPA_CTRL_CIPH_MODE_CBC |
+				SPA_CTRL_HASH_ALG_MD5 |
+				SPA_CTRL_HASH_MODE_HMAC,
 		.alg = {
 		.alg = {
-			.cra_name = "authenc(hmac(md5),cbc(aes))",
-			.cra_driver_name = "authenc-hmac-md5-cbc-aes-picoxcell",
-			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD |
-					CRYPTO_ALG_ASYNC |
-					CRYPTO_ALG_KERN_DRIVER_ONLY,
-			.cra_blocksize = AES_BLOCK_SIZE,
-			.cra_ctxsize = sizeof(struct spacc_aead_ctx),
-			.cra_type = &crypto_aead_type,
-			.cra_module = THIS_MODULE,
-			.cra_aead = {
-				.setkey = spacc_aead_setkey,
-				.setauthsize = spacc_aead_setauthsize,
-				.encrypt = spacc_aead_encrypt,
-				.decrypt = spacc_aead_decrypt,
-				.givencrypt = spacc_aead_givencrypt,
-				.ivsize = AES_BLOCK_SIZE,
-				.maxauthsize = MD5_DIGEST_SIZE,
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-aes-picoxcell",
+				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_NEED_FALLBACK |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
+				.cra_module = THIS_MODULE,
 			},
 			},
-			.cra_init = spacc_aead_cra_init,
-			.cra_exit = spacc_aead_cra_exit,
+			.setkey = spacc_aead_setkey,
+			.setauthsize = spacc_aead_setauthsize,
+			.encrypt = spacc_aead_encrypt,
+			.decrypt = spacc_aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+			.init = spacc_aead_cra_init,
+			.exit = spacc_aead_cra_exit,
 		},
 		},
 	},
 	},
 	{
 	{
 		.key_offs = DES_BLOCK_SIZE,
 		.key_offs = DES_BLOCK_SIZE,
 		.iv_offs = 0,
 		.iv_offs = 0,
-		.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
-				SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
+		.ctrl_default = SPA_CTRL_CIPH_ALG_DES |
+				SPA_CTRL_CIPH_MODE_CBC |
+				SPA_CTRL_HASH_ALG_SHA |
+				SPA_CTRL_HASH_MODE_HMAC,
 		.alg = {
 		.alg = {
-			.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
-			.cra_driver_name = "authenc-hmac-sha1-cbc-3des-picoxcell",
-			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD |
-					CRYPTO_ALG_ASYNC |
-					CRYPTO_ALG_KERN_DRIVER_ONLY,
-			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
-			.cra_ctxsize = sizeof(struct spacc_aead_ctx),
-			.cra_type = &crypto_aead_type,
-			.cra_module = THIS_MODULE,
-			.cra_aead = {
-				.setkey = spacc_aead_setkey,
-				.setauthsize = spacc_aead_setauthsize,
-				.encrypt = spacc_aead_encrypt,
-				.decrypt = spacc_aead_decrypt,
-				.givencrypt = spacc_aead_givencrypt,
-				.ivsize = DES3_EDE_BLOCK_SIZE,
-				.maxauthsize = SHA1_DIGEST_SIZE,
+			.base = {
+				.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-3des-picoxcell",
+				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_NEED_FALLBACK |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
+				.cra_module = THIS_MODULE,
 			},
 			},
-			.cra_init = spacc_aead_cra_init,
-			.cra_exit = spacc_aead_cra_exit,
+			.setkey = spacc_aead_setkey,
+			.setauthsize = spacc_aead_setauthsize,
+			.encrypt = spacc_aead_encrypt,
+			.decrypt = spacc_aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+			.init = spacc_aead_cra_init,
+			.exit = spacc_aead_cra_exit,
 		},
 		},
 	},
 	},
 	{
 	{
 		.key_offs = DES_BLOCK_SIZE,
 		.key_offs = DES_BLOCK_SIZE,
 		.iv_offs = 0,
 		.iv_offs = 0,
-		.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
+		.ctrl_default = SPA_CTRL_CIPH_ALG_AES |
+				SPA_CTRL_CIPH_MODE_CBC |
 				SPA_CTRL_HASH_ALG_SHA256 |
 				SPA_CTRL_HASH_ALG_SHA256 |
 				SPA_CTRL_HASH_MODE_HMAC,
 				SPA_CTRL_HASH_MODE_HMAC,
 		.alg = {
 		.alg = {
-			.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
-			.cra_driver_name = "authenc-hmac-sha256-cbc-3des-picoxcell",
-			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD |
-					CRYPTO_ALG_ASYNC |
-					CRYPTO_ALG_KERN_DRIVER_ONLY,
-			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
-			.cra_ctxsize = sizeof(struct spacc_aead_ctx),
-			.cra_type = &crypto_aead_type,
-			.cra_module = THIS_MODULE,
-			.cra_aead = {
-				.setkey = spacc_aead_setkey,
-				.setauthsize = spacc_aead_setauthsize,
-				.encrypt = spacc_aead_encrypt,
-				.decrypt = spacc_aead_decrypt,
-				.givencrypt = spacc_aead_givencrypt,
-				.ivsize = DES3_EDE_BLOCK_SIZE,
-				.maxauthsize = SHA256_DIGEST_SIZE,
+			.base = {
+				.cra_name = "authenc(hmac(sha256),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-3des-picoxcell",
+				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_NEED_FALLBACK |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
+				.cra_module = THIS_MODULE,
 			},
 			},
-			.cra_init = spacc_aead_cra_init,
-			.cra_exit = spacc_aead_cra_exit,
+			.setkey = spacc_aead_setkey,
+			.setauthsize = spacc_aead_setauthsize,
+			.encrypt = spacc_aead_encrypt,
+			.decrypt = spacc_aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+			.init = spacc_aead_cra_init,
+			.exit = spacc_aead_cra_exit,
 		},
 		},
 	},
 	},
 	{
 	{
 		.key_offs = DES_BLOCK_SIZE,
 		.key_offs = DES_BLOCK_SIZE,
 		.iv_offs = 0,
 		.iv_offs = 0,
-		.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
-				SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
+		.ctrl_default = SPA_CTRL_CIPH_ALG_DES |
+				SPA_CTRL_CIPH_MODE_CBC |
+				SPA_CTRL_HASH_ALG_MD5 |
+				SPA_CTRL_HASH_MODE_HMAC,
 		.alg = {
 		.alg = {
-			.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
-			.cra_driver_name = "authenc-hmac-md5-cbc-3des-picoxcell",
-			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD |
-					CRYPTO_ALG_ASYNC |
-					CRYPTO_ALG_KERN_DRIVER_ONLY,
-			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
-			.cra_ctxsize = sizeof(struct spacc_aead_ctx),
-			.cra_type = &crypto_aead_type,
-			.cra_module = THIS_MODULE,
-			.cra_aead = {
-				.setkey = spacc_aead_setkey,
-				.setauthsize = spacc_aead_setauthsize,
-				.encrypt = spacc_aead_encrypt,
-				.decrypt = spacc_aead_decrypt,
-				.givencrypt = spacc_aead_givencrypt,
-				.ivsize = DES3_EDE_BLOCK_SIZE,
-				.maxauthsize = MD5_DIGEST_SIZE,
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-3des-picoxcell",
+				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_NEED_FALLBACK |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
+				.cra_module = THIS_MODULE,
 			},
 			},
-			.cra_init = spacc_aead_cra_init,
-			.cra_exit = spacc_aead_cra_exit,
+			.setkey = spacc_aead_setkey,
+			.setauthsize = spacc_aead_setauthsize,
+			.encrypt = spacc_aead_encrypt,
+			.decrypt = spacc_aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+			.init = spacc_aead_cra_init,
+			.exit = spacc_aead_cra_exit,
 		},
 		},
 	},
 	},
 };
 };
@@ -1707,6 +1625,8 @@ static int spacc_probe(struct platform_device *pdev)
 		engine->fifo_sz		= SPACC_CRYPTO_IPSEC_FIFO_SZ;
 		engine->fifo_sz		= SPACC_CRYPTO_IPSEC_FIFO_SZ;
 		engine->algs		= ipsec_engine_algs;
 		engine->algs		= ipsec_engine_algs;
 		engine->num_algs	= ARRAY_SIZE(ipsec_engine_algs);
 		engine->num_algs	= ARRAY_SIZE(ipsec_engine_algs);
+		engine->aeads		= ipsec_engine_aeads;
+		engine->num_aeads	= ARRAY_SIZE(ipsec_engine_aeads);
 	} else if (spacc_is_compatible(pdev, "picochip,spacc-l2")) {
 	} else if (spacc_is_compatible(pdev, "picochip,spacc-l2")) {
 		engine->max_ctxs	= SPACC_CRYPTO_L2_MAX_CTXS;
 		engine->max_ctxs	= SPACC_CRYPTO_L2_MAX_CTXS;
 		engine->cipher_pg_sz	= SPACC_CRYPTO_L2_CIPHER_PG_SZ;
 		engine->cipher_pg_sz	= SPACC_CRYPTO_L2_CIPHER_PG_SZ;
@@ -1815,17 +1735,40 @@ static int spacc_probe(struct platform_device *pdev)
 				engine->algs[i].alg.cra_name);
 				engine->algs[i].alg.cra_name);
 	}
 	}
 
 
+	INIT_LIST_HEAD(&engine->registered_aeads);
+	for (i = 0; i < engine->num_aeads; ++i) {
+		engine->aeads[i].engine = engine;
+		err = crypto_register_aead(&engine->aeads[i].alg);
+		if (!err) {
+			list_add_tail(&engine->aeads[i].entry,
+				      &engine->registered_aeads);
+			ret = 0;
+		}
+		if (err)
+			dev_err(engine->dev, "failed to register alg \"%s\"\n",
+				engine->aeads[i].alg.base.cra_name);
+		else
+			dev_dbg(engine->dev, "registered alg \"%s\"\n",
+				engine->aeads[i].alg.base.cra_name);
+	}
+
 	return ret;
 	return ret;
 }
 }
 
 
 static int spacc_remove(struct platform_device *pdev)
 static int spacc_remove(struct platform_device *pdev)
 {
 {
+	struct spacc_aead *aead, *an;
 	struct spacc_alg *alg, *next;
 	struct spacc_alg *alg, *next;
 	struct spacc_engine *engine = platform_get_drvdata(pdev);
 	struct spacc_engine *engine = platform_get_drvdata(pdev);
 
 
 	del_timer_sync(&engine->packet_timeout);
 	del_timer_sync(&engine->packet_timeout);
 	device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
 	device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
 
 
+	list_for_each_entry_safe(aead, an, &engine->registered_aeads, entry) {
+		list_del(&aead->entry);
+		crypto_unregister_aead(&aead->alg);
+	}
+
 	list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {
 	list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {
 		list_del(&alg->entry);
 		list_del(&alg->entry);
 		crypto_unregister_alg(&alg->alg);
 		crypto_unregister_alg(&alg->alg);

+ 15 - 0
drivers/crypto/qat/Kconfig

@@ -3,11 +3,13 @@ config CRYPTO_DEV_QAT
 	select CRYPTO_AEAD
 	select CRYPTO_AEAD
 	select CRYPTO_AUTHENC
 	select CRYPTO_AUTHENC
 	select CRYPTO_BLKCIPHER
 	select CRYPTO_BLKCIPHER
+	select CRYPTO_AKCIPHER
 	select CRYPTO_HMAC
 	select CRYPTO_HMAC
 	select CRYPTO_SHA1
 	select CRYPTO_SHA1
 	select CRYPTO_SHA256
 	select CRYPTO_SHA256
 	select CRYPTO_SHA512
 	select CRYPTO_SHA512
 	select FW_LOADER
 	select FW_LOADER
+	select ASN1
 
 
 config CRYPTO_DEV_QAT_DH895xCC
 config CRYPTO_DEV_QAT_DH895xCC
 	tristate "Support for Intel(R) DH895xCC"
 	tristate "Support for Intel(R) DH895xCC"
@@ -19,3 +21,16 @@ config CRYPTO_DEV_QAT_DH895xCC
 
 
 	  To compile this as a module, choose M here: the module
 	  To compile this as a module, choose M here: the module
 	  will be called qat_dh895xcc.
 	  will be called qat_dh895xcc.
+
+config CRYPTO_DEV_QAT_DH895xCCVF
+	tristate "Support for Intel(R) DH895xCC Virtual Function"
+	depends on X86 && PCI
+	select PCI_IOV
+	select CRYPTO_DEV_QAT
+
+	help
+	  Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
+	  Virtual Function for accelerating crypto and compression workloads.
+
+	  To compile this as a module, choose M here: the module
+	  will be called qat_dh895xccvf.

+ 1 - 0
drivers/crypto/qat/Makefile

@@ -1,2 +1,3 @@
 obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
 obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
 obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
 obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/

+ 1 - 0
drivers/crypto/qat/qat_common/.gitignore

@@ -0,0 +1 @@
+*-asn1.[ch]

+ 8 - 0
drivers/crypto/qat/qat_common/Makefile

@@ -1,3 +1,6 @@
+$(obj)/qat_rsakey-asn1.o: $(obj)/qat_rsakey-asn1.c $(obj)/qat_rsakey-asn1.h
+clean-files += qat_rsakey-asn1.c qat_rsakey-asn1.h
+
 obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
 obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
 intel_qat-objs := adf_cfg.o \
 intel_qat-objs := adf_cfg.o \
 	adf_ctl_drv.o \
 	adf_ctl_drv.o \
@@ -6,9 +9,14 @@ intel_qat-objs := adf_cfg.o \
 	adf_accel_engine.o \
 	adf_accel_engine.o \
 	adf_aer.o \
 	adf_aer.o \
 	adf_transport.o \
 	adf_transport.o \
+	adf_admin.o \
+	adf_hw_arbiter.o \
 	qat_crypto.o \
 	qat_crypto.o \
 	qat_algs.o \
 	qat_algs.o \
+	qat_rsakey-asn1.o \
+	qat_asym_algs.o \
 	qat_uclo.o \
 	qat_uclo.o \
 	qat_hal.o
 	qat_hal.o
 
 
 intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
 intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
+intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o

+ 43 - 3
drivers/crypto/qat/qat_common/adf_accel_devices.h

@@ -46,13 +46,17 @@
 */
 */
 #ifndef ADF_ACCEL_DEVICES_H_
 #ifndef ADF_ACCEL_DEVICES_H_
 #define ADF_ACCEL_DEVICES_H_
 #define ADF_ACCEL_DEVICES_H_
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/list.h>
 #include <linux/list.h>
 #include <linux/io.h>
 #include <linux/io.h>
+#include <linux/ratelimit.h>
 #include "adf_cfg_common.h"
 #include "adf_cfg_common.h"
 
 
 #define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
 #define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
+#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
 #define ADF_DH895XCC_PCI_DEVICE_ID 0x435
 #define ADF_DH895XCC_PCI_DEVICE_ID 0x435
+#define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443
 #define ADF_PCI_MAX_BARS 3
 #define ADF_PCI_MAX_BARS 3
 #define ADF_DEVICE_NAME_LENGTH 32
 #define ADF_DEVICE_NAME_LENGTH 32
 #define ADF_ETR_MAX_RINGS_PER_BANK 16
 #define ADF_ETR_MAX_RINGS_PER_BANK 16
@@ -79,6 +83,7 @@ struct adf_bar {
 struct adf_accel_msix {
 struct adf_accel_msix {
 	struct msix_entry *entries;
 	struct msix_entry *entries;
 	char **names;
 	char **names;
+	u32 num_entries;
 } __packed;
 } __packed;
 
 
 struct adf_accel_pci {
 struct adf_accel_pci {
@@ -99,6 +104,7 @@ enum dev_sku_info {
 	DEV_SKU_2,
 	DEV_SKU_2,
 	DEV_SKU_3,
 	DEV_SKU_3,
 	DEV_SKU_4,
 	DEV_SKU_4,
+	DEV_SKU_VF,
 	DEV_SKU_UNKNOWN,
 	DEV_SKU_UNKNOWN,
 };
 };
 
 
@@ -113,6 +119,8 @@ static inline const char *get_sku_info(enum dev_sku_info info)
 		return "SKU3";
 		return "SKU3";
 	case DEV_SKU_4:
 	case DEV_SKU_4:
 		return "SKU4";
 		return "SKU4";
+	case DEV_SKU_VF:
+		return "SKUVF";
 	case DEV_SKU_UNKNOWN:
 	case DEV_SKU_UNKNOWN:
 	default:
 	default:
 		break;
 		break;
@@ -135,23 +143,29 @@ struct adf_hw_device_data {
 	struct adf_hw_device_class *dev_class;
 	struct adf_hw_device_class *dev_class;
 	uint32_t (*get_accel_mask)(uint32_t fuse);
 	uint32_t (*get_accel_mask)(uint32_t fuse);
 	uint32_t (*get_ae_mask)(uint32_t fuse);
 	uint32_t (*get_ae_mask)(uint32_t fuse);
+	uint32_t (*get_sram_bar_id)(struct adf_hw_device_data *self);
 	uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self);
 	uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self);
 	uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self);
 	uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self);
 	uint32_t (*get_num_aes)(struct adf_hw_device_data *self);
 	uint32_t (*get_num_aes)(struct adf_hw_device_data *self);
 	uint32_t (*get_num_accels)(struct adf_hw_device_data *self);
 	uint32_t (*get_num_accels)(struct adf_hw_device_data *self);
+	uint32_t (*get_pf2vf_offset)(uint32_t i);
+	uint32_t (*get_vintmsk_offset)(uint32_t i);
 	enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
 	enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
-	void (*hw_arb_ring_enable)(struct adf_etr_ring_data *ring);
-	void (*hw_arb_ring_disable)(struct adf_etr_ring_data *ring);
 	int (*alloc_irq)(struct adf_accel_dev *accel_dev);
 	int (*alloc_irq)(struct adf_accel_dev *accel_dev);
 	void (*free_irq)(struct adf_accel_dev *accel_dev);
 	void (*free_irq)(struct adf_accel_dev *accel_dev);
 	void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
 	void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
 	int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
 	int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
 	void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
 	void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
+	int (*send_admin_init)(struct adf_accel_dev *accel_dev);
 	int (*init_arb)(struct adf_accel_dev *accel_dev);
 	int (*init_arb)(struct adf_accel_dev *accel_dev);
 	void (*exit_arb)(struct adf_accel_dev *accel_dev);
 	void (*exit_arb)(struct adf_accel_dev *accel_dev);
+	void (*get_arb_mapping)(struct adf_accel_dev *accel_dev,
+				const uint32_t **cfg);
+	void (*disable_iov)(struct adf_accel_dev *accel_dev);
 	void (*enable_ints)(struct adf_accel_dev *accel_dev);
 	void (*enable_ints)(struct adf_accel_dev *accel_dev);
+	int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
 	const char *fw_name;
 	const char *fw_name;
-	uint32_t pci_dev_id;
+	const char *fw_mmp_name;
 	uint32_t fuses;
 	uint32_t fuses;
 	uint32_t accel_capabilities_mask;
 	uint32_t accel_capabilities_mask;
 	uint16_t accel_mask;
 	uint16_t accel_mask;
@@ -163,6 +177,7 @@ struct adf_hw_device_data {
 	uint8_t num_accel;
 	uint8_t num_accel;
 	uint8_t num_logical_accel;
 	uint8_t num_logical_accel;
 	uint8_t num_engines;
 	uint8_t num_engines;
+	uint8_t min_iov_compat_ver;
 } __packed;
 } __packed;
 
 
 /* CSR write macro */
 /* CSR write macro */
@@ -184,6 +199,16 @@ struct icp_qat_fw_loader_handle;
 struct adf_fw_loader_data {
 struct adf_fw_loader_data {
 	struct icp_qat_fw_loader_handle *fw_loader;
 	struct icp_qat_fw_loader_handle *fw_loader;
 	const struct firmware *uof_fw;
 	const struct firmware *uof_fw;
+	const struct firmware *mmp_fw;
+};
+
+struct adf_accel_vf_info {
+	struct adf_accel_dev *accel_dev;
+	struct tasklet_struct vf2pf_bh_tasklet;
+	struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
+	struct ratelimit_state vf2pf_ratelimit;
+	u32 vf_nr;
+	bool init;
 };
 };
 
 
 struct adf_accel_dev {
 struct adf_accel_dev {
@@ -199,6 +224,21 @@ struct adf_accel_dev {
 	struct list_head list;
 	struct list_head list;
 	struct module *owner;
 	struct module *owner;
 	struct adf_accel_pci accel_pci_dev;
 	struct adf_accel_pci accel_pci_dev;
+	union {
+		struct {
+			/* vf_info is non-zero when SR-IOV is init'ed */
+			struct adf_accel_vf_info *vf_info;
+		} pf;
+		struct {
+			char *irq_name;
+			struct tasklet_struct pf2vf_bh_tasklet;
+			struct mutex vf2pf_lock; /* protect CSR access */
+			struct completion iov_msg_completion;
+			uint8_t compatible;
+			uint8_t pf_version;
+		} vf;
+	};
+	bool is_vf;
 	uint8_t accel_id;
 	uint8_t accel_id;
 } __packed;
 } __packed;
 #endif
 #endif

+ 37 - 5
drivers/crypto/qat/qat_common/adf_accel_engine.c

@@ -55,24 +55,36 @@ int adf_ae_fw_load(struct adf_accel_dev *accel_dev)
 {
 {
 	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
 	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-	void *uof_addr;
-	uint32_t uof_size;
+	void *uof_addr, *mmp_addr;
+	u32 uof_size, mmp_size;
 
 
+	if (!hw_device->fw_name)
+		return 0;
+
+	if (request_firmware(&loader_data->mmp_fw, hw_device->fw_mmp_name,
+			     &accel_dev->accel_pci_dev.pci_dev->dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to load MMP firmware %s\n",
+			hw_device->fw_mmp_name);
+		return -EFAULT;
+	}
 	if (request_firmware(&loader_data->uof_fw, hw_device->fw_name,
 	if (request_firmware(&loader_data->uof_fw, hw_device->fw_name,
 			     &accel_dev->accel_pci_dev.pci_dev->dev)) {
 			     &accel_dev->accel_pci_dev.pci_dev->dev)) {
-		dev_err(&GET_DEV(accel_dev), "Failed to load firmware %s\n",
+		dev_err(&GET_DEV(accel_dev), "Failed to load UOF firmware %s\n",
 			hw_device->fw_name);
 			hw_device->fw_name);
-		return -EFAULT;
+		goto out_err;
 	}
 	}
 
 
 	uof_size = loader_data->uof_fw->size;
 	uof_size = loader_data->uof_fw->size;
 	uof_addr = (void *)loader_data->uof_fw->data;
 	uof_addr = (void *)loader_data->uof_fw->data;
+	mmp_size = loader_data->mmp_fw->size;
+	mmp_addr = (void *)loader_data->mmp_fw->data;
+	qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size);
 	if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) {
 	if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) {
 		dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n");
 		dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n");
 		goto out_err;
 		goto out_err;
 	}
 	}
 	if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {
 	if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {
-		dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n");
+		dev_err(&GET_DEV(accel_dev), "Failed to load UOF\n");
 		goto out_err;
 		goto out_err;
 	}
 	}
 	return 0;
 	return 0;
@@ -85,11 +97,17 @@ out_err:
 void adf_ae_fw_release(struct adf_accel_dev *accel_dev)
 void adf_ae_fw_release(struct adf_accel_dev *accel_dev)
 {
 {
 	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
 	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+	if (!hw_device->fw_name)
+		return;
 
 
 	qat_uclo_del_uof_obj(loader_data->fw_loader);
 	qat_uclo_del_uof_obj(loader_data->fw_loader);
 	qat_hal_deinit(loader_data->fw_loader);
 	qat_hal_deinit(loader_data->fw_loader);
 	release_firmware(loader_data->uof_fw);
 	release_firmware(loader_data->uof_fw);
+	release_firmware(loader_data->mmp_fw);
 	loader_data->uof_fw = NULL;
 	loader_data->uof_fw = NULL;
+	loader_data->mmp_fw = NULL;
 	loader_data->fw_loader = NULL;
 	loader_data->fw_loader = NULL;
 }
 }
 
 
@@ -99,6 +117,9 @@ int adf_ae_start(struct adf_accel_dev *accel_dev)
 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 	uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
 	uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
 
 
+	if (!hw_data->fw_name)
+		return 0;
+
 	for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
 	for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
 		if (hw_data->ae_mask & (1 << ae)) {
 		if (hw_data->ae_mask & (1 << ae)) {
 			qat_hal_start(loader_data->fw_loader, ae, 0xFF);
 			qat_hal_start(loader_data->fw_loader, ae, 0xFF);
@@ -117,6 +138,9 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev)
 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 	uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
 	uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
 
 
+	if (!hw_data->fw_name)
+		return 0;
+
 	for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
 	for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
 		if (hw_data->ae_mask & (1 << ae)) {
 		if (hw_data->ae_mask & (1 << ae)) {
 			qat_hal_stop(loader_data->fw_loader, ae, 0xFF);
 			qat_hal_stop(loader_data->fw_loader, ae, 0xFF);
@@ -143,6 +167,10 @@ static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae)
 int adf_ae_init(struct adf_accel_dev *accel_dev)
 int adf_ae_init(struct adf_accel_dev *accel_dev)
 {
 {
 	struct adf_fw_loader_data *loader_data;
 	struct adf_fw_loader_data *loader_data;
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+	if (!hw_device->fw_name)
+		return 0;
 
 
 	loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL);
 	loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL);
 	if (!loader_data)
 	if (!loader_data)
@@ -166,6 +194,10 @@ int adf_ae_init(struct adf_accel_dev *accel_dev)
 int adf_ae_shutdown(struct adf_accel_dev *accel_dev)
 int adf_ae_shutdown(struct adf_accel_dev *accel_dev)
 {
 {
 	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
 	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+	if (!hw_device->fw_name)
+		return 0;
 
 
 	qat_hal_deinit(loader_data->fw_loader);
 	qat_hal_deinit(loader_data->fw_loader);
 	kfree(accel_dev->fw_loader);
 	kfree(accel_dev->fw_loader);

+ 290 - 0
drivers/crypto/qat/qat_common/adf_admin.c

@@ -0,0 +1,290 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_init_admin.h"
+
+/* Admin Messages Registers */
+#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574)
+#define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578)
+#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970
+#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
+#define ADF_ADMINMSG_LEN 32
+
+static const u8 const_tab[1024] = {
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13,
+0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76,
+0x54, 0x32, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab,
+0x89, 0x98, 0xba, 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0,
+0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x05, 0x9e,
+0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd, 0x17, 0xf7, 0x0e, 0x59, 0x39,
+0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe,
+0xfa, 0x4f, 0xa4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae,
+0x85, 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f,
+0x9b, 0x05, 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19, 0x05,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
+0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
+0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, 0x33, 0x26, 0x67, 0xff,
+0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c,
+0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f,
+0xa4, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb,
+0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
+0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, 0x0e, 0x52,
+0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
+0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13,
+0x7e, 0x21, 0x79, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+struct adf_admin_comms {
+	dma_addr_t phy_addr;
+	dma_addr_t const_tbl_addr;
+	void *virt_addr;
+	void __iomem *mailbox_addr;
+	struct mutex lock;	/* protects adf_admin_comms struct */
+};
+
+static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae,
+				  void *in, void *out)
+{
+	struct adf_admin_comms *admin = accel_dev->admin;
+	int offset = ae * ADF_ADMINMSG_LEN * 2;
+	void __iomem *mailbox = admin->mailbox_addr;
+	int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE;
+	int times, received;
+
+	mutex_lock(&admin->lock);
+
+	if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
+		mutex_unlock(&admin->lock);
+		return -EAGAIN;
+	}
+
+	memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
+	ADF_CSR_WR(mailbox, mb_offset, 1);
+	received = 0;
+	for (times = 0; times < 50; times++) {
+		msleep(20);
+		if (ADF_CSR_RD(mailbox, mb_offset) == 0) {
+			received = 1;
+			break;
+		}
+	}
+	if (received)
+		memcpy(out, admin->virt_addr + offset +
+		       ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
+	else
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to send admin msg to accelerator\n");
+
+	mutex_unlock(&admin->lock);
+	return received ? 0 : -EFAULT;
+}
+
+static int adf_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd)
+{
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+	struct icp_qat_fw_init_admin_req req;
+	struct icp_qat_fw_init_admin_resp resp;
+	int i;
+
+	memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req));
+	req.init_admin_cmd_id = cmd;
+
+	if (cmd == ICP_QAT_FW_CONSTANTS_CFG) {
+		req.init_cfg_sz = 1024;
+		req.init_cfg_ptr = accel_dev->admin->const_tbl_addr;
+	}
+	for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
+		memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp));
+		if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) ||
+		    resp.init_resp_hdr.status)
+			return -EFAULT;
+	}
+	return 0;
+}
+
+/**
+ * adf_send_admin_init() - Function sends init message to FW
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function sends admin init message to the FW
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_admin_init(struct adf_accel_dev *accel_dev)
+{
+	int ret = adf_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME);
+
+	if (ret)
+		return ret;
+	return adf_send_admin_cmd(accel_dev, ICP_QAT_FW_CONSTANTS_CFG);
+}
+EXPORT_SYMBOL_GPL(adf_send_admin_init);
+
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
+{
+	struct adf_admin_comms *admin;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *pmisc =
+		&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+	void __iomem *csr = pmisc->virt_addr;
+	void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET;
+	u64 reg_val;
+
+	admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
+			     dev_to_node(&GET_DEV(accel_dev)));
+	if (!admin)
+		return -ENOMEM;
+	admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+					       &admin->phy_addr, GFP_KERNEL);
+	if (!admin->virt_addr) {
+		dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
+		kfree(admin);
+		return -ENOMEM;
+	}
+
+	admin->const_tbl_addr = dma_map_single(&GET_DEV(accel_dev),
+					       (void *) const_tab, 1024,
+					       DMA_TO_DEVICE);
+
+	if (unlikely(dma_mapping_error(&GET_DEV(accel_dev),
+				       admin->const_tbl_addr))) {
+		dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+				  admin->virt_addr, admin->phy_addr);
+		kfree(admin);
+		return -ENOMEM;
+	}
+	reg_val = (u64)admin->phy_addr;
+	ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32);
+	ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val);
+	mutex_init(&admin->lock);
+	admin->mailbox_addr = mailbox;
+	accel_dev->admin = admin;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_init_admin_comms);
+
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
+{
+	struct adf_admin_comms *admin = accel_dev->admin;
+
+	if (!admin)
+		return;
+
+	if (admin->virt_addr)
+		dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+				  admin->virt_addr, admin->phy_addr);
+
+	dma_unmap_single(&GET_DEV(accel_dev), admin->const_tbl_addr, 1024,
+			 DMA_TO_DEVICE);
+	mutex_destroy(&admin->lock);
+	kfree(admin);
+	accel_dev->admin = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_exit_admin_comms);

+ 4 - 1
drivers/crypto/qat/qat_common/adf_aer.c

@@ -91,6 +91,9 @@ static void adf_dev_restore(struct adf_accel_dev *accel_dev)
 	dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
 	dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
 		 accel_dev->accel_id);
 		 accel_dev->accel_id);
 
 
+	if (!parent)
+		parent = pdev;
+
 	if (!pci_wait_for_pending_transaction(pdev))
 	if (!pci_wait_for_pending_transaction(pdev))
 		dev_info(&GET_DEV(accel_dev),
 		dev_info(&GET_DEV(accel_dev),
 			 "Transaction still in progress. Proceeding\n");
 			 "Transaction still in progress. Proceeding\n");
@@ -206,7 +209,7 @@ static struct pci_error_handlers adf_err_handler = {
  * QAT acceleration device accel_dev.
  * QAT acceleration device accel_dev.
  * To be used by QAT device specific drivers.
  * To be used by QAT device specific drivers.
  *
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
  */
 int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf)
 int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf)
 {
 {

+ 6 - 3
drivers/crypto/qat/qat_common/adf_cfg.c

@@ -123,7 +123,7 @@ static const struct file_operations qat_dev_cfg_fops = {
  * The table stores device specific config values.
  * The table stores device specific config values.
  * To be used by QAT device specific drivers.
  * To be used by QAT device specific drivers.
  *
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
  */
 int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
 int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
 {
 {
@@ -178,6 +178,9 @@ void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev)
 {
 {
 	struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
 	struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
 
 
+	if (!dev_cfg_data)
+		return;
+
 	down_write(&dev_cfg_data->lock);
 	down_write(&dev_cfg_data->lock);
 	adf_cfg_section_del_all(&dev_cfg_data->sec_list);
 	adf_cfg_section_del_all(&dev_cfg_data->sec_list);
 	up_write(&dev_cfg_data->lock);
 	up_write(&dev_cfg_data->lock);
@@ -276,7 +279,7 @@ static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev,
  * in the given acceleration device
  * in the given acceleration device
  * To be used by QAT device specific drivers.
  * To be used by QAT device specific drivers.
  *
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
  */
 int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
 int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
 				const char *section_name,
 				const char *section_name,
@@ -327,7 +330,7 @@ EXPORT_SYMBOL_GPL(adf_cfg_add_key_value_param);
  * will be stored.
  * will be stored.
  * To be used by QAT device specific drivers.
  * To be used by QAT device specific drivers.
  *
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
  */
 int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
 int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
 {
 {

+ 2 - 1
drivers/crypto/qat/qat_common/adf_cfg_common.h

@@ -60,7 +60,7 @@
 #define ADF_CFG_NO_DEVICE 0xFF
 #define ADF_CFG_NO_DEVICE 0xFF
 #define ADF_CFG_AFFINITY_WHATEVER 0xFF
 #define ADF_CFG_AFFINITY_WHATEVER 0xFF
 #define MAX_DEVICE_NAME_SIZE 32
 #define MAX_DEVICE_NAME_SIZE 32
-#define ADF_MAX_DEVICES 32
+#define ADF_MAX_DEVICES (32 * 32)
 
 
 enum adf_cfg_val_type {
 enum adf_cfg_val_type {
 	ADF_DEC,
 	ADF_DEC,
@@ -71,6 +71,7 @@ enum adf_cfg_val_type {
 enum adf_device_type {
 enum adf_device_type {
 	DEV_UNKNOWN = 0,
 	DEV_UNKNOWN = 0,
 	DEV_DH895XCC,
 	DEV_DH895XCC,
+	DEV_DH895XCCVF,
 };
 };
 
 
 struct adf_dev_status_info {
 struct adf_dev_status_info {

+ 48 - 5
drivers/crypto/qat/qat_common/adf_common_drv.h

@@ -54,8 +54,8 @@
 #include "icp_qat_hal.h"
 #include "icp_qat_hal.h"
 
 
 #define ADF_MAJOR_VERSION	0
 #define ADF_MAJOR_VERSION	0
-#define ADF_MINOR_VERSION	1
-#define ADF_BUILD_VERSION	3
+#define ADF_MINOR_VERSION	2
+#define ADF_BUILD_VERSION	0
 #define ADF_DRV_VERSION		__stringify(ADF_MAJOR_VERSION) "." \
 #define ADF_DRV_VERSION		__stringify(ADF_MAJOR_VERSION) "." \
 				__stringify(ADF_MINOR_VERSION) "." \
 				__stringify(ADF_MINOR_VERSION) "." \
 				__stringify(ADF_BUILD_VERSION)
 				__stringify(ADF_BUILD_VERSION)
@@ -91,9 +91,13 @@ struct service_hndl {
 	unsigned long start_status;
 	unsigned long start_status;
 	char *name;
 	char *name;
 	struct list_head list;
 	struct list_head list;
-	int admin;
 };
 };
 
 
+static inline int get_current_node(void)
+{
+	return topology_physical_package_id(smp_processor_id());
+}
+
 int adf_service_register(struct service_hndl *service);
 int adf_service_register(struct service_hndl *service);
 int adf_service_unregister(struct service_hndl *service);
 int adf_service_unregister(struct service_hndl *service);
 
 
@@ -102,13 +106,24 @@ int adf_dev_start(struct adf_accel_dev *accel_dev);
 int adf_dev_stop(struct adf_accel_dev *accel_dev);
 int adf_dev_stop(struct adf_accel_dev *accel_dev);
 void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
 void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
 
 
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr);
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
+void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info);
+void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
+void adf_clean_vf_map(bool);
+
 int adf_ctl_dev_register(void);
 int adf_ctl_dev_register(void);
 void adf_ctl_dev_unregister(void);
 void adf_ctl_dev_unregister(void);
 int adf_processes_dev_register(void);
 int adf_processes_dev_register(void);
 void adf_processes_dev_unregister(void);
 void adf_processes_dev_unregister(void);
 
 
-int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev);
-void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev);
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
+		       struct adf_accel_dev *pf);
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
+		       struct adf_accel_dev *pf);
 struct list_head *adf_devmgr_get_head(void);
 struct list_head *adf_devmgr_get_head(void);
 struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id);
 struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id);
 struct adf_accel_dev *adf_devmgr_get_first(void);
 struct adf_accel_dev *adf_devmgr_get_first(void);
@@ -130,6 +145,12 @@ int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
 void adf_disable_aer(struct adf_accel_dev *accel_dev);
 void adf_disable_aer(struct adf_accel_dev *accel_dev);
 int adf_init_aer(void);
 int adf_init_aer(void);
 void adf_exit_aer(void);
 void adf_exit_aer(void);
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
+int adf_send_admin_init(struct adf_accel_dev *accel_dev);
+int adf_init_arb(struct adf_accel_dev *accel_dev);
+void adf_exit_arb(struct adf_accel_dev *accel_dev);
+void adf_update_ring_arb(struct adf_etr_ring_data *ring);
 
 
 int adf_dev_get(struct adf_accel_dev *accel_dev);
 int adf_dev_get(struct adf_accel_dev *accel_dev);
 void adf_dev_put(struct adf_accel_dev *accel_dev);
 void adf_dev_put(struct adf_accel_dev *accel_dev);
@@ -141,10 +162,13 @@ int qat_crypto_unregister(void);
 struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
 struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
 void qat_crypto_put_instance(struct qat_crypto_instance *inst);
 void qat_crypto_put_instance(struct qat_crypto_instance *inst);
 void qat_alg_callback(void *resp);
 void qat_alg_callback(void *resp);
+void qat_alg_asym_callback(void *resp);
 int qat_algs_init(void);
 int qat_algs_init(void);
 void qat_algs_exit(void);
 void qat_algs_exit(void);
 int qat_algs_register(void);
 int qat_algs_register(void);
 int qat_algs_unregister(void);
 int qat_algs_unregister(void);
+int qat_asym_algs_register(void);
+void qat_asym_algs_unregister(void);
 
 
 int qat_hal_init(struct adf_accel_dev *accel_dev);
 int qat_hal_init(struct adf_accel_dev *accel_dev);
 void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
 void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
@@ -196,4 +220,23 @@ int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle);
 void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle);
 void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle);
 int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
 int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
 			 void *addr_ptr, int mem_size);
 			 void *addr_ptr, int mem_size);
+void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
+			void *addr_ptr, int mem_size);
+#if defined(CONFIG_PCI_IOV)
+int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
+void adf_disable_sriov(struct adf_accel_dev *accel_dev);
+void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+				  uint32_t vf_mask);
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+				 uint32_t vf_mask);
+#else
+static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+	return 0;
+}
+
+static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
+{
+}
+#endif
 #endif
 #endif

+ 3 - 3
drivers/crypto/qat/qat_common/adf_ctl_drv.c

@@ -398,10 +398,9 @@ static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
 	}
 	}
 
 
 	accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
 	accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
-	if (!accel_dev) {
-		pr_err("QAT: Device %d not found\n", dev_info.accel_id);
+	if (!accel_dev)
 		return -ENODEV;
 		return -ENODEV;
-	}
+
 	hw_data = accel_dev->hw_device;
 	hw_data = accel_dev->hw_device;
 	dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
 	dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
 	dev_info.num_ae = hw_data->get_num_aes(hw_data);
 	dev_info.num_ae = hw_data->get_num_aes(hw_data);
@@ -495,6 +494,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
 	adf_exit_aer();
 	adf_exit_aer();
 	qat_crypto_unregister();
 	qat_crypto_unregister();
 	qat_algs_exit();
 	qat_algs_exit();
+	adf_clean_vf_map(false);
 	mutex_destroy(&adf_ctl_lock);
 	mutex_destroy(&adf_ctl_lock);
 }
 }
 
 

+ 268 - 18
drivers/crypto/qat/qat_common/adf_dev_mgr.c

@@ -50,21 +50,125 @@
 #include "adf_common_drv.h"
 #include "adf_common_drv.h"
 
 
 static LIST_HEAD(accel_table);
 static LIST_HEAD(accel_table);
+static LIST_HEAD(vfs_table);
 static DEFINE_MUTEX(table_lock);
 static DEFINE_MUTEX(table_lock);
 static uint32_t num_devices;
 static uint32_t num_devices;
 
 
+struct vf_id_map {
+	u32 bdf;
+	u32 id;
+	u32 fake_id;
+	bool attached;
+	struct list_head list;
+};
+
+static int adf_get_vf_id(struct adf_accel_dev *vf)
+{
+	return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) +
+		PCI_FUNC(accel_to_pci_dev(vf)->devfn) +
+		(PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1));
+}
+
+static int adf_get_vf_num(struct adf_accel_dev *vf)
+{
+	return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf);
+}
+
+static struct vf_id_map *adf_find_vf(u32 bdf)
+{
+	struct list_head *itr;
+
+	list_for_each(itr, &vfs_table) {
+		struct vf_id_map *ptr =
+			list_entry(itr, struct vf_id_map, list);
+
+		if (ptr->bdf == bdf)
+			return ptr;
+	}
+	return NULL;
+}
+
+static int adf_get_vf_real_id(u32 fake)
+{
+	struct list_head *itr;
+
+	list_for_each(itr, &vfs_table) {
+		struct vf_id_map *ptr =
+			list_entry(itr, struct vf_id_map, list);
+		if (ptr->fake_id == fake)
+			return ptr->id;
+	}
+	return -1;
+}
+
+/**
+ * adf_clean_vf_map() - Cleans VF id mapings
+ *
+ * Function cleans internal ids for virtual functions.
+ * @vf: flag indicating whether mappings is cleaned
+ *	for vfs only or for vfs and pfs
+ */
+void adf_clean_vf_map(bool vf)
+{
+	struct vf_id_map *map;
+	struct list_head *ptr, *tmp;
+
+	mutex_lock(&table_lock);
+	list_for_each_safe(ptr, tmp, &vfs_table) {
+		map = list_entry(ptr, struct vf_id_map, list);
+		if (map->bdf != -1)
+			num_devices--;
+
+		if (vf && map->bdf == -1)
+			continue;
+
+		list_del(ptr);
+		kfree(map);
+	}
+	mutex_unlock(&table_lock);
+}
+EXPORT_SYMBOL_GPL(adf_clean_vf_map);
+
+/**
+ * adf_devmgr_update_class_index() - Update internal index
+ * @hw_data:  Pointer to internal device data.
+ *
+ * Function updates internal dev index for VFs
+ */
+void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
+{
+	struct adf_hw_device_class *class = hw_data->dev_class;
+	struct list_head *itr;
+	int i = 0;
+
+	list_for_each(itr, &accel_table) {
+		struct adf_accel_dev *ptr =
+				list_entry(itr, struct adf_accel_dev, list);
+
+		if (ptr->hw_device->dev_class == class)
+			ptr->hw_device->instance_id = i++;
+
+		if (i == class->instances)
+				break;
+	}
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index);
+
 /**
 /**
  * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
  * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
  * @accel_dev:  Pointer to acceleration device.
  * @accel_dev:  Pointer to acceleration device.
+ * @pf:		Corresponding PF if the accel_dev is a VF
  *
  *
  * Function adds acceleration device to the acceleration framework.
  * Function adds acceleration device to the acceleration framework.
  * To be used by QAT device specific drivers.
  * To be used by QAT device specific drivers.
  *
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
  */
-int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev)
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
+		       struct adf_accel_dev *pf)
 {
 {
 	struct list_head *itr;
 	struct list_head *itr;
+	int ret = 0;
 
 
 	if (num_devices == ADF_MAX_DEVICES) {
 	if (num_devices == ADF_MAX_DEVICES) {
 		dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n",
 		dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n",
@@ -73,20 +177,77 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev)
 	}
 	}
 
 
 	mutex_lock(&table_lock);
 	mutex_lock(&table_lock);
-	list_for_each(itr, &accel_table) {
-		struct adf_accel_dev *ptr =
+	atomic_set(&accel_dev->ref_count, 0);
+
+	/* PF on host or VF on guest */
+	if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
+		struct vf_id_map *map;
+
+		list_for_each(itr, &accel_table) {
+			struct adf_accel_dev *ptr =
 				list_entry(itr, struct adf_accel_dev, list);
 				list_entry(itr, struct adf_accel_dev, list);
 
 
-		if (ptr == accel_dev) {
-			mutex_unlock(&table_lock);
-			return -EEXIST;
+			if (ptr == accel_dev) {
+				ret = -EEXIST;
+				goto unlock;
+			}
 		}
 		}
+
+		list_add_tail(&accel_dev->list, &accel_table);
+		accel_dev->accel_id = num_devices++;
+
+		map = kzalloc(sizeof(*map), GFP_KERNEL);
+		if (!map) {
+			ret = -ENOMEM;
+			goto unlock;
+		}
+		map->bdf = ~0;
+		map->id = accel_dev->accel_id;
+		map->fake_id = map->id;
+		map->attached = true;
+		list_add_tail(&map->list, &vfs_table);
+	} else if (accel_dev->is_vf && pf) {
+		/* VF on host */
+		struct adf_accel_vf_info *vf_info;
+		struct vf_id_map *map;
+
+		vf_info = pf->pf.vf_info + adf_get_vf_id(accel_dev);
+
+		map = adf_find_vf(adf_get_vf_num(accel_dev));
+		if (map) {
+			struct vf_id_map *next;
+
+			accel_dev->accel_id = map->id;
+			list_add_tail(&accel_dev->list, &accel_table);
+			map->fake_id++;
+			map->attached = true;
+			next = list_next_entry(map, list);
+			while (next && &next->list != &vfs_table) {
+				next->fake_id++;
+				next = list_next_entry(next, list);
+			}
+
+			ret = 0;
+			goto unlock;
+		}
+
+		map = kzalloc(sizeof(*map), GFP_KERNEL);
+		if (!map) {
+			ret = -ENOMEM;
+			goto unlock;
+		}
+
+		accel_dev->accel_id = num_devices++;
+		list_add_tail(&accel_dev->list, &accel_table);
+		map->bdf = adf_get_vf_num(accel_dev);
+		map->id = accel_dev->accel_id;
+		map->fake_id = map->id;
+		map->attached = true;
+		list_add_tail(&map->list, &vfs_table);
 	}
 	}
-	atomic_set(&accel_dev->ref_count, 0);
-	list_add_tail(&accel_dev->list, &accel_table);
-	accel_dev->accel_id = num_devices++;
+unlock:
 	mutex_unlock(&table_lock);
 	mutex_unlock(&table_lock);
-	return 0;
+	return ret;
 }
 }
 EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
 EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
 
 
@@ -98,17 +259,37 @@ struct list_head *adf_devmgr_get_head(void)
 /**
 /**
  * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
  * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
  * @accel_dev:  Pointer to acceleration device.
  * @accel_dev:  Pointer to acceleration device.
+ * @pf:		Corresponding PF if the accel_dev is a VF
  *
  *
  * Function removes acceleration device from the acceleration framework.
  * Function removes acceleration device from the acceleration framework.
  * To be used by QAT device specific drivers.
  * To be used by QAT device specific drivers.
  *
  *
  * Return: void
  * Return: void
  */
  */
-void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev)
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
+		       struct adf_accel_dev *pf)
 {
 {
 	mutex_lock(&table_lock);
 	mutex_lock(&table_lock);
+	if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
+		num_devices--;
+	} else if (accel_dev->is_vf && pf) {
+		struct vf_id_map *map, *next;
+
+		map = adf_find_vf(adf_get_vf_num(accel_dev));
+		if (!map) {
+			dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n");
+			goto unlock;
+		}
+		map->fake_id--;
+		map->attached = false;
+		next = list_next_entry(map, list);
+		while (next && &next->list != &vfs_table) {
+			next->fake_id--;
+			next = list_next_entry(next, list);
+		}
+	}
+unlock:
 	list_del(&accel_dev->list);
 	list_del(&accel_dev->list);
-	num_devices--;
 	mutex_unlock(&table_lock);
 	mutex_unlock(&table_lock);
 }
 }
 EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
 EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
@@ -154,17 +335,24 @@ EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
 struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
 struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
 {
 {
 	struct list_head *itr;
 	struct list_head *itr;
+	int real_id;
 
 
 	mutex_lock(&table_lock);
 	mutex_lock(&table_lock);
+	real_id = adf_get_vf_real_id(id);
+	if (real_id < 0)
+		goto unlock;
+
+	id = real_id;
+
 	list_for_each(itr, &accel_table) {
 	list_for_each(itr, &accel_table) {
 		struct adf_accel_dev *ptr =
 		struct adf_accel_dev *ptr =
 				list_entry(itr, struct adf_accel_dev, list);
 				list_entry(itr, struct adf_accel_dev, list);
-
 		if (ptr->accel_id == id) {
 		if (ptr->accel_id == id) {
 			mutex_unlock(&table_lock);
 			mutex_unlock(&table_lock);
 			return ptr;
 			return ptr;
 		}
 		}
 	}
 	}
+unlock:
 	mutex_unlock(&table_lock);
 	mutex_unlock(&table_lock);
 	return NULL;
 	return NULL;
 }
 }
@@ -180,21 +368,52 @@ int adf_devmgr_verify_id(uint32_t id)
 	return -ENODEV;
 	return -ENODEV;
 }
 }
 
 
-void adf_devmgr_get_num_dev(uint32_t *num)
+static int adf_get_num_dettached_vfs(void)
 {
 {
 	struct list_head *itr;
 	struct list_head *itr;
+	int vfs = 0;
 
 
-	*num = 0;
-	list_for_each(itr, &accel_table) {
-		(*num)++;
+	mutex_lock(&table_lock);
+	list_for_each(itr, &vfs_table) {
+		struct vf_id_map *ptr =
+			list_entry(itr, struct vf_id_map, list);
+		if (ptr->bdf != ~0 && !ptr->attached)
+			vfs++;
 	}
 	}
+	mutex_unlock(&table_lock);
+	return vfs;
+}
+
+void adf_devmgr_get_num_dev(uint32_t *num)
+{
+	*num = num_devices - adf_get_num_dettached_vfs();
 }
 }
 
 
+/**
+ * adf_dev_in_use() - Check whether accel_dev is currently in use
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when device is in use, 0 otherwise.
+ */
 int adf_dev_in_use(struct adf_accel_dev *accel_dev)
 int adf_dev_in_use(struct adf_accel_dev *accel_dev)
 {
 {
 	return atomic_read(&accel_dev->ref_count) != 0;
 	return atomic_read(&accel_dev->ref_count) != 0;
 }
 }
+EXPORT_SYMBOL_GPL(adf_dev_in_use);
 
 
+/**
+ * adf_dev_get() - Increment accel_dev reference count
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Increment the accel_dev refcount and if this is the first time
+ * incrementing it during this period the accel_dev is in use,
+ * increment the module refcount too.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 when successful, EFAULT when fail to bump module refcount
+ */
 int adf_dev_get(struct adf_accel_dev *accel_dev)
 int adf_dev_get(struct adf_accel_dev *accel_dev)
 {
 {
 	if (atomic_add_return(1, &accel_dev->ref_count) == 1)
 	if (atomic_add_return(1, &accel_dev->ref_count) == 1)
@@ -202,19 +421,50 @@ int adf_dev_get(struct adf_accel_dev *accel_dev)
 			return -EFAULT;
 			return -EFAULT;
 	return 0;
 	return 0;
 }
 }
+EXPORT_SYMBOL_GPL(adf_dev_get);
 
 
+/**
+ * adf_dev_put() - Decrement accel_dev reference count
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Decrement the accel_dev refcount and if this is the last time
+ * decrementing it during this period the accel_dev is in use,
+ * decrement the module refcount too.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
 void adf_dev_put(struct adf_accel_dev *accel_dev)
 void adf_dev_put(struct adf_accel_dev *accel_dev)
 {
 {
 	if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
 	if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
 		module_put(accel_dev->owner);
 		module_put(accel_dev->owner);
 }
 }
+EXPORT_SYMBOL_GPL(adf_dev_put);
 
 
+/**
+ * adf_devmgr_in_reset() - Check whether device is in reset
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when the device is being reset, 0 otherwise.
+ */
 int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
 int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
 {
 {
 	return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
 	return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
 }
 }
+EXPORT_SYMBOL_GPL(adf_devmgr_in_reset);
 
 
+/**
+ * adf_dev_started() - Check whether device has started
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when the device has started, 0 otherwise
+ */
 int adf_dev_started(struct adf_accel_dev *accel_dev)
 int adf_dev_started(struct adf_accel_dev *accel_dev)
 {
 {
 	return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
 	return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
 }
 }
+EXPORT_SYMBOL_GPL(adf_dev_started);

+ 23 - 14
drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c → drivers/crypto/qat/qat_common/adf_hw_arbiter.c

@@ -44,9 +44,8 @@
   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
 */
-#include <adf_accel_devices.h>
-#include <adf_transport_internal.h>
-#include "adf_drv.h"
+#include "adf_accel_devices.h"
+#include "adf_transport_internal.h"
 
 
 #define ADF_ARB_NUM 4
 #define ADF_ARB_NUM 4
 #define ADF_ARB_REQ_RING_NUM 8
 #define ADF_ARB_REQ_RING_NUM 8
@@ -58,7 +57,6 @@
 #define ADF_ARB_RO_EN_OFFSET 0x090
 #define ADF_ARB_RO_EN_OFFSET 0x090
 #define ADF_ARB_WQCFG_OFFSET 0x100
 #define ADF_ARB_WQCFG_OFFSET 0x100
 #define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
 #define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
-#define ADF_ARB_WRK_2_SER_MAP 10
 #define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
 #define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
 
 
 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
@@ -89,10 +87,11 @@
 
 
 int adf_init_arb(struct adf_accel_dev *accel_dev)
 int adf_init_arb(struct adf_accel_dev *accel_dev)
 {
 {
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 	void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
 	void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
-	uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
-	uint32_t arb, i;
-	const uint32_t *thd_2_arb_cfg;
+	u32 arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
+	u32 arb, i;
+	const u32 *thd_2_arb_cfg;
 
 
 	/* Service arb configured for 32 bytes responses and
 	/* Service arb configured for 32 bytes responses and
 	 * ring flow control check enabled. */
 	 * ring flow control check enabled. */
@@ -109,30 +108,39 @@ int adf_init_arb(struct adf_accel_dev *accel_dev)
 		WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF);
 		WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF);
 
 
 	/* Setup worker queue registers */
 	/* Setup worker queue registers */
-	for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+	for (i = 0; i < hw_data->num_engines; i++)
 		WRITE_CSR_ARB_WQCFG(csr, i, i);
 		WRITE_CSR_ARB_WQCFG(csr, i, i);
 
 
 	/* Map worker threads to service arbiters */
 	/* Map worker threads to service arbiters */
-	adf_get_arbiter_mapping(accel_dev, &thd_2_arb_cfg);
+	hw_data->get_arb_mapping(accel_dev, &thd_2_arb_cfg);
 
 
 	if (!thd_2_arb_cfg)
 	if (!thd_2_arb_cfg)
 		return -EFAULT;
 		return -EFAULT;
 
 
-	for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+	for (i = 0; i < hw_data->num_engines; i++)
 		WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, *(thd_2_arb_cfg + i));
 		WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, *(thd_2_arb_cfg + i));
 
 
 	return 0;
 	return 0;
 }
 }
-
-void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring)
+EXPORT_SYMBOL_GPL(adf_init_arb);
+
+/**
+ * adf_update_ring_arb() - update ring arbitration rgister
+ * @accel_dev:  Pointer to ring data.
+ *
+ * Function enables or disables rings for/from arbitration.
+ */
+void adf_update_ring_arb(struct adf_etr_ring_data *ring)
 {
 {
 	WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr,
 	WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr,
 				   ring->bank->bank_number,
 				   ring->bank->bank_number,
 				   ring->bank->ring_mask & 0xFF);
 				   ring->bank->ring_mask & 0xFF);
 }
 }
+EXPORT_SYMBOL_GPL(adf_update_ring_arb);
 
 
 void adf_exit_arb(struct adf_accel_dev *accel_dev)
 void adf_exit_arb(struct adf_accel_dev *accel_dev)
 {
 {
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 	void __iomem *csr;
 	void __iomem *csr;
 	unsigned int i;
 	unsigned int i;
 
 
@@ -146,14 +154,15 @@ void adf_exit_arb(struct adf_accel_dev *accel_dev)
 		WRITE_CSR_ARB_SARCONFIG(csr, i, 0);
 		WRITE_CSR_ARB_SARCONFIG(csr, i, 0);
 
 
 	/* Shutdown work queue */
 	/* Shutdown work queue */
-	for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+	for (i = 0; i < hw_data->num_engines; i++)
 		WRITE_CSR_ARB_WQCFG(csr, i, 0);
 		WRITE_CSR_ARB_WQCFG(csr, i, 0);
 
 
 	/* Unmap worker threads to service arbiters */
 	/* Unmap worker threads to service arbiters */
-	for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+	for (i = 0; i < hw_data->num_engines; i++)
 		WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, 0);
 		WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, 0);
 
 
 	/* Disable arbitration on all rings */
 	/* Disable arbitration on all rings */
 	for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
 	for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
 		WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0);
 		WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0);
 }
 }
+EXPORT_SYMBOL_GPL(adf_exit_arb);

+ 18 - 86
drivers/crypto/qat/qat_common/adf_init.c

@@ -69,7 +69,7 @@ static void adf_service_add(struct service_hndl *service)
  * Function adds the acceleration service to the acceleration framework.
  * Function adds the acceleration service to the acceleration framework.
  * To be used by QAT device specific drivers.
  * To be used by QAT device specific drivers.
  *
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
  */
 int adf_service_register(struct service_hndl *service)
 int adf_service_register(struct service_hndl *service)
 {
 {
@@ -94,7 +94,7 @@ static void adf_service_remove(struct service_hndl *service)
  * Function remove the acceleration service from the acceleration framework.
  * Function remove the acceleration service from the acceleration framework.
  * To be used by QAT device specific drivers.
  * To be used by QAT device specific drivers.
  *
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
  */
 int adf_service_unregister(struct service_hndl *service)
 int adf_service_unregister(struct service_hndl *service)
 {
 {
@@ -114,7 +114,7 @@ EXPORT_SYMBOL_GPL(adf_service_unregister);
  * Initialize the ring data structures and the admin comms and arbitration
  * Initialize the ring data structures and the admin comms and arbitration
  * services.
  * services.
  *
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
  */
 int adf_dev_init(struct adf_accel_dev *accel_dev)
 int adf_dev_init(struct adf_accel_dev *accel_dev)
 {
 {
@@ -177,20 +177,6 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
 	 */
 	 */
 	list_for_each(list_itr, &service_table) {
 	list_for_each(list_itr, &service_table) {
 		service = list_entry(list_itr, struct service_hndl, list);
 		service = list_entry(list_itr, struct service_hndl, list);
-		if (!service->admin)
-			continue;
-		if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
-			dev_err(&GET_DEV(accel_dev),
-				"Failed to initialise service %s\n",
-				service->name);
-			return -EFAULT;
-		}
-		set_bit(accel_dev->accel_id, &service->init_status);
-	}
-	list_for_each(list_itr, &service_table) {
-		service = list_entry(list_itr, struct service_hndl, list);
-		if (service->admin)
-			continue;
 		if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
 		if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
 			dev_err(&GET_DEV(accel_dev),
 			dev_err(&GET_DEV(accel_dev),
 				"Failed to initialise service %s\n",
 				"Failed to initialise service %s\n",
@@ -201,6 +187,7 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
 	}
 	}
 
 
 	hw_data->enable_error_correction(accel_dev);
 	hw_data->enable_error_correction(accel_dev);
+	hw_data->enable_vf2pf_comms(accel_dev);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -214,10 +201,11 @@ EXPORT_SYMBOL_GPL(adf_dev_init);
  * is ready to be used.
  * is ready to be used.
  * To be used by QAT device specific drivers.
  * To be used by QAT device specific drivers.
  *
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
  */
 int adf_dev_start(struct adf_accel_dev *accel_dev)
 int adf_dev_start(struct adf_accel_dev *accel_dev)
 {
 {
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 	struct service_hndl *service;
 	struct service_hndl *service;
 	struct list_head *list_itr;
 	struct list_head *list_itr;
 
 
@@ -229,22 +217,13 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
 	}
 	}
 	set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
 	set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
 
 
-	list_for_each(list_itr, &service_table) {
-		service = list_entry(list_itr, struct service_hndl, list);
-		if (!service->admin)
-			continue;
-		if (service->event_hld(accel_dev, ADF_EVENT_START)) {
-			dev_err(&GET_DEV(accel_dev),
-				"Failed to start service %s\n",
-				service->name);
-			return -EFAULT;
-		}
-		set_bit(accel_dev->accel_id, &service->start_status);
+	if (hw_data->send_admin_init(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to send init message\n");
+		return -EFAULT;
 	}
 	}
+
 	list_for_each(list_itr, &service_table) {
 	list_for_each(list_itr, &service_table) {
 		service = list_entry(list_itr, struct service_hndl, list);
 		service = list_entry(list_itr, struct service_hndl, list);
-		if (service->admin)
-			continue;
 		if (service->event_hld(accel_dev, ADF_EVENT_START)) {
 		if (service->event_hld(accel_dev, ADF_EVENT_START)) {
 			dev_err(&GET_DEV(accel_dev),
 			dev_err(&GET_DEV(accel_dev),
 				"Failed to start service %s\n",
 				"Failed to start service %s\n",
@@ -257,7 +236,8 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
 	clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
 	clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
 	set_bit(ADF_STATUS_STARTED, &accel_dev->status);
 	set_bit(ADF_STATUS_STARTED, &accel_dev->status);
 
 
-	if (qat_algs_register()) {
+	if (!list_empty(&accel_dev->crypto_list) &&
+	    (qat_algs_register() || qat_asym_algs_register())) {
 		dev_err(&GET_DEV(accel_dev),
 		dev_err(&GET_DEV(accel_dev),
 			"Failed to register crypto algs\n");
 			"Failed to register crypto algs\n");
 		set_bit(ADF_STATUS_STARTING, &accel_dev->status);
 		set_bit(ADF_STATUS_STARTING, &accel_dev->status);
@@ -276,7 +256,7 @@ EXPORT_SYMBOL_GPL(adf_dev_start);
  * is shuting down.
  * is shuting down.
  * To be used by QAT device specific drivers.
  * To be used by QAT device specific drivers.
  *
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
  */
 int adf_dev_stop(struct adf_accel_dev *accel_dev)
 int adf_dev_stop(struct adf_accel_dev *accel_dev)
 {
 {
@@ -292,14 +272,15 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
 	clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
 	clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
 	clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
 	clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
 
 
-	if (qat_algs_unregister())
+	if (!list_empty(&accel_dev->crypto_list) && qat_algs_unregister())
 		dev_err(&GET_DEV(accel_dev),
 		dev_err(&GET_DEV(accel_dev),
 			"Failed to unregister crypto algs\n");
 			"Failed to unregister crypto algs\n");
 
 
+	if (!list_empty(&accel_dev->crypto_list))
+		qat_asym_algs_unregister();
+
 	list_for_each(list_itr, &service_table) {
 	list_for_each(list_itr, &service_table) {
 		service = list_entry(list_itr, struct service_hndl, list);
 		service = list_entry(list_itr, struct service_hndl, list);
-		if (service->admin)
-			continue;
 		if (!test_bit(accel_dev->accel_id, &service->start_status))
 		if (!test_bit(accel_dev->accel_id, &service->start_status))
 			continue;
 			continue;
 		ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
 		ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
@@ -310,19 +291,6 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
 			clear_bit(accel_dev->accel_id, &service->start_status);
 			clear_bit(accel_dev->accel_id, &service->start_status);
 		}
 		}
 	}
 	}
-	list_for_each(list_itr, &service_table) {
-		service = list_entry(list_itr, struct service_hndl, list);
-		if (!service->admin)
-			continue;
-		if (!test_bit(accel_dev->accel_id, &service->start_status))
-			continue;
-		if (service->event_hld(accel_dev, ADF_EVENT_STOP))
-			dev_err(&GET_DEV(accel_dev),
-				"Failed to shutdown service %s\n",
-				service->name);
-		else
-			clear_bit(accel_dev->accel_id, &service->start_status);
-	}
 
 
 	if (wait)
 	if (wait)
 		msleep(100);
 		msleep(100);
@@ -373,21 +341,6 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
 
 
 	list_for_each(list_itr, &service_table) {
 	list_for_each(list_itr, &service_table) {
 		service = list_entry(list_itr, struct service_hndl, list);
 		service = list_entry(list_itr, struct service_hndl, list);
-		if (service->admin)
-			continue;
-		if (!test_bit(accel_dev->accel_id, &service->init_status))
-			continue;
-		if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
-			dev_err(&GET_DEV(accel_dev),
-				"Failed to shutdown service %s\n",
-				service->name);
-		else
-			clear_bit(accel_dev->accel_id, &service->init_status);
-	}
-	list_for_each(list_itr, &service_table) {
-		service = list_entry(list_itr, struct service_hndl, list);
-		if (!service->admin)
-			continue;
 		if (!test_bit(accel_dev->accel_id, &service->init_status))
 		if (!test_bit(accel_dev->accel_id, &service->init_status))
 			continue;
 			continue;
 		if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
 		if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
@@ -413,6 +366,7 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
 	if (hw_data->exit_admin_comms)
 	if (hw_data->exit_admin_comms)
 		hw_data->exit_admin_comms(accel_dev);
 		hw_data->exit_admin_comms(accel_dev);
 
 
+	hw_data->disable_iov(accel_dev);
 	adf_cleanup_etr_data(accel_dev);
 	adf_cleanup_etr_data(accel_dev);
 }
 }
 EXPORT_SYMBOL_GPL(adf_dev_shutdown);
 EXPORT_SYMBOL_GPL(adf_dev_shutdown);
@@ -424,17 +378,6 @@ int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
 
 
 	list_for_each(list_itr, &service_table) {
 	list_for_each(list_itr, &service_table) {
 		service = list_entry(list_itr, struct service_hndl, list);
 		service = list_entry(list_itr, struct service_hndl, list);
-		if (service->admin)
-			continue;
-		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
-			dev_err(&GET_DEV(accel_dev),
-				"Failed to restart service %s.\n",
-				service->name);
-	}
-	list_for_each(list_itr, &service_table) {
-		service = list_entry(list_itr, struct service_hndl, list);
-		if (!service->admin)
-			continue;
 		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
 		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
 			dev_err(&GET_DEV(accel_dev),
 			dev_err(&GET_DEV(accel_dev),
 				"Failed to restart service %s.\n",
 				"Failed to restart service %s.\n",
@@ -450,17 +393,6 @@ int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
 
 
 	list_for_each(list_itr, &service_table) {
 	list_for_each(list_itr, &service_table) {
 		service = list_entry(list_itr, struct service_hndl, list);
 		service = list_entry(list_itr, struct service_hndl, list);
-		if (service->admin)
-			continue;
-		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
-			dev_err(&GET_DEV(accel_dev),
-				"Failed to restart service %s.\n",
-				service->name);
-	}
-	list_for_each(list_itr, &service_table) {
-		service = list_entry(list_itr, struct service_hndl, list);
-		if (!service->admin)
-			continue;
 		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
 		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
 			dev_err(&GET_DEV(accel_dev),
 			dev_err(&GET_DEV(accel_dev),
 				"Failed to restart service %s.\n",
 				"Failed to restart service %s.\n",

+ 438 - 0
drivers/crypto/qat/qat_common/adf_pf2vf_msg.c

@@ -0,0 +1,438 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <linux/pci.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pf2vf_msg.h"
+
+#define ADF_DH895XCC_EP_OFFSET	0x3A000
+#define ADF_DH895XCC_ERRMSK3	(ADF_DH895XCC_EP_OFFSET + 0x1C)
+#define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9)
+#define ADF_DH895XCC_ERRMSK5	(ADF_DH895XCC_EP_OFFSET + 0xDC)
+#define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
+
+/**
+ * adf_enable_pf2vf_interrupts() - Enable PF to VF interrupts
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function enables PF to VF interrupts
+ */
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	void __iomem *pmisc_bar_addr =
+		pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+
+	ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0);
+}
+EXPORT_SYMBOL_GPL(adf_enable_pf2vf_interrupts);
+
+/**
+ * adf_disable_pf2vf_interrupts() - Disable PF to VF interrupts
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function disables PF to VF interrupts
+ */
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	void __iomem *pmisc_bar_addr =
+		pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+
+	ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2);
+}
+EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
+
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+				 u32 vf_mask)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *pmisc =
+			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+	void __iomem *pmisc_addr = pmisc->virt_addr;
+	u32 reg;
+
+	/* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */
+	if (vf_mask & 0xFFFF) {
+		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3);
+		reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
+		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
+	}
+
+	/* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */
+	if (vf_mask >> 16) {
+		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5);
+		reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
+		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
+	}
+}
+
+/**
+ * adf_disable_pf2vf_interrupts() - Disable VF to PF interrupts
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function disables VF to PF interrupts
+ */
+void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *pmisc =
+			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+	void __iomem *pmisc_addr = pmisc->virt_addr;
+	u32 reg;
+
+	/* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */
+	if (vf_mask & 0xFFFF) {
+		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) |
+			ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
+		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
+	}
+
+	/* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */
+	if (vf_mask >> 16) {
+		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) |
+			ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
+		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
+	}
+}
+EXPORT_SYMBOL_GPL(adf_disable_vf2pf_interrupts);
+
+static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
+{
+	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	void __iomem *pmisc_bar_addr =
+		pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+	u32 val, pf2vf_offset, count = 0;
+	u32 local_in_use_mask, local_in_use_pattern;
+	u32 remote_in_use_mask, remote_in_use_pattern;
+	struct mutex *lock;	/* lock preventing concurrent acces of CSR */
+	u32 int_bit;
+	int ret = 0;
+
+	if (accel_dev->is_vf) {
+		pf2vf_offset = hw_data->get_pf2vf_offset(0);
+		lock = &accel_dev->vf.vf2pf_lock;
+		local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
+		local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
+		remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
+		remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
+		int_bit = ADF_VF2PF_INT;
+	} else {
+		pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr);
+		lock = &accel_dev->pf.vf_info[vf_nr].pf2vf_lock;
+		local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
+		local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
+		remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
+		remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
+		int_bit = ADF_PF2VF_INT;
+	}
+
+	mutex_lock(lock);
+
+	/* Check if PF2VF CSR is in use by remote function */
+	val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
+	if ((val & remote_in_use_mask) == remote_in_use_pattern) {
+		dev_dbg(&GET_DEV(accel_dev),
+			"PF2VF CSR in use by remote function\n");
+		ret = -EBUSY;
+		goto out;
+	}
+
+	/* Attempt to get ownership of PF2VF CSR */
+	msg &= ~local_in_use_mask;
+	msg |= local_in_use_pattern;
+	ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg);
+
+	/* Wait in case remote func also attempting to get ownership */
+	msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY);
+
+	val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
+	if ((val & local_in_use_mask) != local_in_use_pattern) {
+		dev_dbg(&GET_DEV(accel_dev),
+			"PF2VF CSR in use by remote - collision detected\n");
+		ret = -EBUSY;
+		goto out;
+	}
+
+	/*
+	 * This function now owns the PV2VF CSR.  The IN_USE_BY pattern must
+	 * remain in the PF2VF CSR for all writes including ACK from remote
+	 * until this local function relinquishes the CSR.  Send the message
+	 * by interrupting the remote.
+	 */
+	ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
+
+	/* Wait for confirmation from remote func it received the message */
+	do {
+		msleep(ADF_IOV_MSG_ACK_DELAY);
+		val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
+	} while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY));
+
+	if (val & int_bit) {
+		dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
+		val &= ~int_bit;
+		ret = -EIO;
+	}
+
+	/* Finished with PF2VF CSR; relinquish it and leave msg in CSR */
+	ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask);
+out:
+	mutex_unlock(lock);
+	return ret;
+}
+
+/**
+ * adf_iov_putmsg() - send PF2VF message
+ * @accel_dev:  Pointer to acceleration device.
+ * @msg:	Message to send
+ * @vf_nr:	VF number to which the message will be sent
+ *
+ * Function sends a messge from the PF to a VF
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
+{
+	u32 count = 0;
+	int ret;
+
+	do {
+		ret = __adf_iov_putmsg(accel_dev, msg, vf_nr);
+		if (ret)
+			msleep(ADF_IOV_MSG_RETRY_DELAY);
+	} while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES));
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(adf_iov_putmsg);
+
+void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
+{
+	struct adf_accel_dev *accel_dev = vf_info->accel_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	int bar_id = hw_data->get_misc_bar_id(hw_data);
+	struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id];
+	void __iomem *pmisc_addr = pmisc->virt_addr;
+	u32 msg, resp = 0, vf_nr = vf_info->vf_nr;
+
+	/* Read message from the VF */
+	msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
+
+	/* To ACK, clear the VF2PFINT bit */
+	msg &= ~ADF_VF2PF_INT;
+	ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
+
+	if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM))
+		/* Ignore legacy non-system (non-kernel) VF2PF messages */
+		goto err;
+
+	switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) {
+	case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
+		{
+		u8 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
+
+		resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
+			 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
+			  ADF_PF2VF_MSGTYPE_SHIFT) |
+			 (ADF_PFVF_COMPATIBILITY_VERSION <<
+			  ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
+
+		dev_dbg(&GET_DEV(accel_dev),
+			"Compatibility Version Request from VF%d vers=%u\n",
+			vf_nr + 1, vf_compat_ver);
+
+		if (vf_compat_ver < hw_data->min_iov_compat_ver) {
+			dev_err(&GET_DEV(accel_dev),
+				"VF (vers %d) incompatible with PF (vers %d)\n",
+				vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+			resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
+				ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+		} else if (vf_compat_ver > ADF_PFVF_COMPATIBILITY_VERSION) {
+			dev_err(&GET_DEV(accel_dev),
+				"VF (vers %d) compat with PF (vers %d) unkn.\n",
+				vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+			resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
+				ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+		} else {
+			dev_dbg(&GET_DEV(accel_dev),
+				"VF (vers %d) compatible with PF (vers %d)\n",
+				vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+			resp |= ADF_PF2VF_VF_COMPATIBLE <<
+				ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+		}
+		}
+		break;
+	case ADF_VF2PF_MSGTYPE_VERSION_REQ:
+		dev_dbg(&GET_DEV(accel_dev),
+			"Legacy VersionRequest received from VF%d 0x%x\n",
+			vf_nr + 1, msg);
+		resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
+			 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
+			  ADF_PF2VF_MSGTYPE_SHIFT) |
+			 (ADF_PFVF_COMPATIBILITY_VERSION <<
+			  ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
+		resp |= ADF_PF2VF_VF_COMPATIBLE <<
+			ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+		/* Set legacy major and minor version num */
+		resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT |
+			1 << ADF_PF2VF_MINORVERSION_SHIFT;
+		break;
+	case ADF_VF2PF_MSGTYPE_INIT:
+		{
+		dev_dbg(&GET_DEV(accel_dev),
+			"Init message received from VF%d 0x%x\n",
+			vf_nr + 1, msg);
+		vf_info->init = true;
+		}
+		break;
+	case ADF_VF2PF_MSGTYPE_SHUTDOWN:
+		{
+		dev_dbg(&GET_DEV(accel_dev),
+			"Shutdown message received from VF%d 0x%x\n",
+			vf_nr + 1, msg);
+		vf_info->init = false;
+		}
+		break;
+	default:
+		goto err;
+	}
+
+	if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr))
+		dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n");
+
+	/* re-enable interrupt on PF from this VF */
+	adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
+	return;
+err:
+	dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
+		vf_nr + 1, msg);
+}
+
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_vf_info *vf;
+	u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
+		(ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT));
+	int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
+
+	for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
+		if (vf->init && adf_iov_putmsg(accel_dev, msg, i))
+			dev_err(&GET_DEV(accel_dev),
+				"Failed to send restarting msg to VF%d\n", i);
+	}
+}
+
+static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
+{
+	unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	u32 msg = 0;
+	int ret;
+
+	msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
+	msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
+	msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
+	BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
+
+	/* Send request from VF to PF */
+	ret = adf_iov_putmsg(accel_dev, msg, 0);
+	if (ret) {
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to send Compatibility Version Request.\n");
+		return ret;
+	}
+
+	/* Wait for response */
+	if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
+					 timeout)) {
+		dev_err(&GET_DEV(accel_dev),
+			"IOV request/response message timeout expired\n");
+		return -EIO;
+	}
+
+	/* Response from PF received, check compatibility */
+	switch (accel_dev->vf.compatible) {
+	case ADF_PF2VF_VF_COMPATIBLE:
+		break;
+	case ADF_PF2VF_VF_COMPAT_UNKNOWN:
+		/* VF is newer than PF and decides whether it is compatible */
+		if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver)
+			break;
+		/* fall through */
+	case ADF_PF2VF_VF_INCOMPATIBLE:
+		dev_err(&GET_DEV(accel_dev),
+			"PF (vers %d) and VF (vers %d) are not compatible\n",
+			accel_dev->vf.pf_version,
+			ADF_PFVF_COMPATIBILITY_VERSION);
+		return -EINVAL;
+	default:
+		dev_err(&GET_DEV(accel_dev),
+			"Invalid response from PF; assume not compatible\n");
+		return -EINVAL;
+	}
+	return ret;
+}
+
+/**
+ * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
+ *
+ * @accel_dev: Pointer to acceleration device virtual function.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+	adf_enable_pf2vf_interrupts(accel_dev);
+	return adf_vf2pf_request_version(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);

+ 146 - 0
drivers/crypto/qat/qat_common/adf_pf2vf_msg.h

@@ -0,0 +1,146 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_PF2VF_MSG_H
+#define ADF_PF2VF_MSG_H
+
+/*
+ * PF<->VF Messaging
+ * The PF has an array of 32-bit PF2VF registers, one for each VF.  The
+ * PF can access all these registers; each VF can access only the one
+ * register associated with that particular VF.
+ *
+ * The register functionally is split into two parts:
+ * The bottom half is for PF->VF messages. In particular when the first
+ * bit of this register (bit 0) gets set an interrupt will be triggered
+ * in the respective VF.
+ * The top half is for VF->PF messages. In particular when the first bit
+ * of this half of register (bit 16) gets set an interrupt will be triggered
+ * in the PF.
+ *
+ * The remaining bits within this register are available to encode messages.
+ * and implement a collision control mechanism to prevent concurrent use of
+ * the PF2VF register by both the PF and VF.
+ *
+ *  31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
+ *  _______________________________________________
+ * |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |
+ * +-----------------------------------------------+
+ *  \___________________________/ \_________/ ^   ^
+ *                ^                    ^      |   |
+ *                |                    |      |   VF2PF Int
+ *                |                    |      Message Origin
+ *                |                    Message Type
+ *                Message-specific Data/Reserved
+ *
+ *  15 14 13 12 11 10  9  8  7  6  5  4  3  2  1  0
+ *  _______________________________________________
+ * |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |
+ * +-----------------------------------------------+
+ *  \___________________________/ \_________/ ^   ^
+ *                ^                    ^      |   |
+ *                |                    |      |   PF2VF Int
+ *                |                    |      Message Origin
+ *                |                    Message Type
+ *                Message-specific Data/Reserved
+ *
+ * Message Origin (Should always be 1)
+ * A legacy out-of-tree QAT driver allowed for a set of messages not supported
+ * by this driver; these had a Msg Origin of 0 and are ignored by this driver.
+ *
+ * When a PF or VF attempts to send a message in the lower or upper 16 bits,
+ * respectively, the other 16 bits are written to first with a defined
+ * IN_USE_BY pattern as part of a collision control scheme (see adf_iov_putmsg).
+ */
+
+#define ADF_PFVF_COMPATIBILITY_VERSION		0x1	/* PF<->VF compat */
+
+/* PF->VF messages */
+#define ADF_PF2VF_INT				BIT(0)
+#define ADF_PF2VF_MSGORIGIN_SYSTEM		BIT(1)
+#define ADF_PF2VF_MSGTYPE_MASK			0x0000003C
+#define ADF_PF2VF_MSGTYPE_SHIFT			2
+#define ADF_PF2VF_MSGTYPE_RESTARTING		0x01
+#define ADF_PF2VF_MSGTYPE_VERSION_RESP		0x02
+#define ADF_PF2VF_IN_USE_BY_PF			0x6AC20000
+#define ADF_PF2VF_IN_USE_BY_PF_MASK		0xFFFE0000
+
+/* PF->VF Version Response */
+#define ADF_PF2VF_VERSION_RESP_VERS_MASK	0x00003FC0
+#define ADF_PF2VF_VERSION_RESP_VERS_SHIFT	6
+#define ADF_PF2VF_VERSION_RESP_RESULT_MASK	0x0000C000
+#define ADF_PF2VF_VERSION_RESP_RESULT_SHIFT	14
+#define ADF_PF2VF_MINORVERSION_SHIFT		6
+#define ADF_PF2VF_MAJORVERSION_SHIFT		10
+#define ADF_PF2VF_VF_COMPATIBLE			1
+#define ADF_PF2VF_VF_INCOMPATIBLE		2
+#define ADF_PF2VF_VF_COMPAT_UNKNOWN		3
+
+/* VF->PF messages */
+#define ADF_VF2PF_IN_USE_BY_VF			0x00006AC2
+#define ADF_VF2PF_IN_USE_BY_VF_MASK		0x0000FFFE
+#define ADF_VF2PF_INT				BIT(16)
+#define ADF_VF2PF_MSGORIGIN_SYSTEM		BIT(17)
+#define ADF_VF2PF_MSGTYPE_MASK			0x003C0000
+#define ADF_VF2PF_MSGTYPE_SHIFT			18
+#define ADF_VF2PF_MSGTYPE_INIT			0x3
+#define ADF_VF2PF_MSGTYPE_SHUTDOWN		0x4
+#define ADF_VF2PF_MSGTYPE_VERSION_REQ		0x5
+#define ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ	0x6
+
+/* VF->PF Compatible Version Request */
+#define ADF_VF2PF_COMPAT_VER_REQ_SHIFT		22
+
+/* Collision detection */
+#define ADF_IOV_MSG_COLLISION_DETECT_DELAY	10
+#define ADF_IOV_MSG_ACK_DELAY			2
+#define ADF_IOV_MSG_ACK_MAX_RETRY		100
+#define ADF_IOV_MSG_RETRY_DELAY			5
+#define ADF_IOV_MSG_MAX_RETRIES			3
+#define ADF_IOV_MSG_RESP_TIMEOUT	(ADF_IOV_MSG_ACK_DELAY * \
+					 ADF_IOV_MSG_ACK_MAX_RETRY + \
+					 ADF_IOV_MSG_COLLISION_DETECT_DELAY)
+#endif /* ADF_IOV_MSG_H */

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно