Просмотр исходного кода

Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto update from Herbert Xu:
 "Algorithms:
   - Add RSA padding algorithm

  Drivers:
   - Add GCM mode support to atmel
   - Add atmel support for SAMA5D2 devices
   - Add cipher modes to talitos
   - Add rockchip driver for rk3288
   - Add qat support for C3XXX and C62X"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (103 commits)
  crypto: hifn_795x, picoxcell - use ablkcipher_request_cast
  crypto: qat - fix SKU definiftion for c3xxx dev
  crypto: qat - Fix random config build issue
  crypto: ccp - use to_pci_dev and to_platform_device
  crypto: qat - Rename dh895xcc mmp firmware
  crypto: 842 - remove WARN inside printk
  crypto: atmel-aes - add debug facilities to monitor register accesses.
  crypto: atmel-aes - add support to GCM mode
  crypto: atmel-aes - change the DMA threshold
  crypto: atmel-aes - fix the counter overflow in CTR mode
  crypto: atmel-aes - fix atmel-ctr-aes driver for RFC 3686
  crypto: atmel-aes - create sections to regroup functions by usage
  crypto: atmel-aes - fix typo and indentation
  crypto: atmel-aes - use SIZE_IN_WORDS() helper macro
  crypto: atmel-aes - improve performances of data transfer
  crypto: atmel-aes - fix atmel_aes_remove()
  crypto: atmel-aes - remove useless AES_FLAGS_DMA flag
  crypto: atmel-aes - reduce latency of DMA completion
  crypto: atmel-aes - remove unused 'err' member of struct atmel_aes_dev
  crypto: atmel-aes - rework crypto request completion
  ...
Linus Torvalds 9 лет назад
Родитель
Сommit
c597b6bcd5
100 измененных файлов с 7216 добавлено и 1570 удалено
  1. 29 0
      Documentation/devicetree/bindings/crypto/rockchip-crypto.txt
  2. 1 0
      arch/powerpc/include/asm/icswx.h
  3. 26 0
      arch/x86/crypto/ghash-clmulni-intel_glue.c
  4. 1 0
      crypto/Makefile
  5. 33 1
      crypto/akcipher.c
  6. 4 5
      crypto/algapi.c
  7. 3 3
      crypto/algif_aead.c
  8. 5 5
      crypto/algif_skcipher.c
  9. 1 1
      crypto/asymmetric_keys/signature.c
  10. 8 0
      crypto/chacha20poly1305.c
  11. 2 2
      crypto/cryptd.c
  12. 3 3
      crypto/drbg.c
  13. 2 6
      crypto/mcryptd.c
  14. 6 0
      crypto/md5.c
  15. 628 0
      crypto/rsa-pkcs1pad.c
  16. 15 25
      crypto/rsa.c
  17. 7 0
      crypto/sha1_generic.c
  18. 16 0
      crypto/sha256_generic.c
  19. 1 1
      crypto/tcrypt.c
  20. 5 1
      drivers/char/hw_random/core.c
  21. 7 6
      drivers/char/hw_random/omap3-rom-rng.c
  22. 16 2
      drivers/crypto/Kconfig
  23. 1 0
      drivers/crypto/Makefile
  24. 4 0
      drivers/crypto/amcc/crypto4xx_core.c
  25. 10 0
      drivers/crypto/atmel-aes-regs.h
  26. 1267 576
      drivers/crypto/atmel-aes.c
  27. 1 2
      drivers/crypto/atmel-sha.c
  28. 25 1
      drivers/crypto/caam/caamhash.c
  29. 2 0
      drivers/crypto/ccp/Kconfig
  30. 8 31
      drivers/crypto/ccp/ccp-ops.c
  31. 4 4
      drivers/crypto/ccp/ccp-pci.c
  32. 2 4
      drivers/crypto/ccp/ccp-platform.c
  33. 207 305
      drivers/crypto/hifn_795x.c
  34. 2 4
      drivers/crypto/ixp4xx_crypto.c
  35. 8 0
      drivers/crypto/marvell/cipher.c
  36. 4 0
      drivers/crypto/marvell/hash.c
  37. 13 37
      drivers/crypto/n2_core.c
  38. 12 11
      drivers/crypto/nx/nx-842-powernv.c
  39. 1 3
      drivers/crypto/omap-aes.c
  40. 2 3
      drivers/crypto/omap-des.c
  41. 2 2
      drivers/crypto/padlock-aes.c
  42. 38 18
      drivers/crypto/picoxcell_crypto.c
  43. 46 0
      drivers/crypto/qat/Kconfig
  44. 4 0
      drivers/crypto/qat/Makefile
  45. 3 0
      drivers/crypto/qat/qat_c3xxx/Makefile
  46. 238 0
      drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
  47. 83 0
      drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
  48. 335 0
      drivers/crypto/qat/qat_c3xxx/adf_drv.c
  49. 3 0
      drivers/crypto/qat/qat_c3xxxvf/Makefile
  50. 173 0
      drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
  51. 19 12
      drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
  52. 305 0
      drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
  53. 3 0
      drivers/crypto/qat/qat_c62x/Makefile
  54. 248 0
      drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
  55. 84 0
      drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
  56. 335 0
      drivers/crypto/qat/qat_c62x/adf_drv.c
  57. 3 0
      drivers/crypto/qat/qat_c62xvf/Makefile
  58. 173 0
      drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
  59. 19 13
      drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
  60. 305 0
      drivers/crypto/qat/qat_c62xvf/adf_drv.c
  61. 3 1
      drivers/crypto/qat/qat_common/Makefile
  62. 14 2
      drivers/crypto/qat/qat_common/adf_accel_devices.h
  63. 6 3
      drivers/crypto/qat/qat_common/adf_accel_engine.c
  64. 3 1
      drivers/crypto/qat/qat_common/adf_admin.c
  65. 2 2
      drivers/crypto/qat/qat_common/adf_aer.c
  66. 6 2
      drivers/crypto/qat/qat_common/adf_cfg_common.h
  67. 24 7
      drivers/crypto/qat/qat_common/adf_common_drv.h
  68. 6 13
      drivers/crypto/qat/qat_common/adf_ctl_drv.c
  69. 31 5
      drivers/crypto/qat/qat_common/adf_dev_mgr.c
  70. 1 7
      drivers/crypto/qat/qat_common/adf_hw_arbiter.c
  71. 1 20
      drivers/crypto/qat/qat_common/adf_init.c
  72. 30 14
      drivers/crypto/qat/qat_common/adf_isr.c
  73. 0 23
      drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
  74. 8 20
      drivers/crypto/qat/qat_common/adf_transport.c
  75. 5 0
      drivers/crypto/qat/qat_common/adf_transport_access_macros.h
  76. 1 1
      drivers/crypto/qat/qat_common/adf_transport_internal.h
  77. 43 21
      drivers/crypto/qat/qat_common/adf_vf_isr.c
  78. 10 0
      drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
  79. 34 3
      drivers/crypto/qat/qat_common/icp_qat_hal.h
  80. 158 7
      drivers/crypto/qat/qat_common/icp_qat_uclo.h
  81. 108 28
      drivers/crypto/qat/qat_common/qat_crypto.c
  82. 81 43
      drivers/crypto/qat/qat_common/qat_hal.c
  83. 529 26
      drivers/crypto/qat/qat_common/qat_uclo.c
  84. 1 3
      drivers/crypto/qat/qat_dh895xcc/Makefile
  85. 2 3
      drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
  86. 4 5
      drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
  87. 6 97
      drivers/crypto/qat/qat_dh895xcc/adf_drv.c
  88. 1 3
      drivers/crypto/qat/qat_dh895xccvf/Makefile
  89. 3 2
      drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
  90. 3 7
      drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
  91. 4 92
      drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
  92. 8 0
      drivers/crypto/qce/ablkcipher.c
  93. 5 0
      drivers/crypto/qce/sha.c
  94. 3 0
      drivers/crypto/rockchip/Makefile
  95. 394 0
      drivers/crypto/rockchip/rk3288_crypto.c
  96. 216 0
      drivers/crypto/rockchip/rk3288_crypto.h
  97. 505 0
      drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
  98. 27 15
      drivers/crypto/sahara.c
  99. 2 0
      drivers/crypto/sunxi-ss/sun4i-ss-core.c
  100. 117 7
      drivers/crypto/talitos.c

+ 29 - 0
Documentation/devicetree/bindings/crypto/rockchip-crypto.txt

@@ -0,0 +1,29 @@
+Rockchip Electronics And Security Accelerator
+
+Required properties:
+- compatible: Should be "rockchip,rk3288-crypto"
+- reg: Base physical address of the engine and length of memory mapped
+       region
+- interrupts: Interrupt number
+- clocks: Reference to the clocks about crypto
+- clock-names: "aclk" used to clock data
+	       "hclk" used to clock data
+	       "sclk" used to clock crypto accelerator
+	       "apb_pclk" used to clock dma
+- resets: Must contain an entry for each entry in reset-names.
+	  See ../reset/reset.txt for details.
+- reset-names: Must include the name "crypto-rst".
+
+Examples:
+
+	crypto: cypto-controller@ff8a0000 {
+		compatible = "rockchip,rk3288-crypto";
+		reg = <0xff8a0000 0x4000>;
+		interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cru ACLK_CRYPTO>, <&cru HCLK_CRYPTO>,
+			 <&cru SCLK_CRYPTO>, <&cru ACLK_DMAC1>;
+		clock-names = "aclk", "hclk", "sclk", "apb_pclk";
+		resets = <&cru SRST_CRYPTO>;
+		reset-names = "crypto-rst";
+		status = "okay";
+	};

+ 1 - 0
arch/powerpc/include/asm/icswx.h

@@ -164,6 +164,7 @@ struct coprocessor_request_block {
 #define ICSWX_INITIATED		(0x8)
 #define ICSWX_INITIATED		(0x8)
 #define ICSWX_BUSY		(0x4)
 #define ICSWX_BUSY		(0x4)
 #define ICSWX_REJECTED		(0x2)
 #define ICSWX_REJECTED		(0x2)
+#define ICSWX_XERS0		(0x1)	/* undefined or set from XERSO. */
 
 
 static inline int icswx(__be32 ccw, struct coprocessor_request_block *crb)
 static inline int icswx(__be32 ccw, struct coprocessor_request_block *crb)
 {
 {

+ 26 - 0
arch/x86/crypto/ghash-clmulni-intel_glue.c

@@ -219,6 +219,29 @@ static int ghash_async_final(struct ahash_request *req)
 	}
 	}
 }
 }
 
 
+static int ghash_async_import(struct ahash_request *req, const void *in)
+{
+	struct ahash_request *cryptd_req = ahash_request_ctx(req);
+	struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
+	struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+	ghash_async_init(req);
+	memcpy(dctx, in, sizeof(*dctx));
+	return 0;
+
+}
+
+static int ghash_async_export(struct ahash_request *req, void *out)
+{
+	struct ahash_request *cryptd_req = ahash_request_ctx(req);
+	struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
+	struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+	memcpy(out, dctx, sizeof(*dctx));
+	return 0;
+
+}
+
 static int ghash_async_digest(struct ahash_request *req)
 static int ghash_async_digest(struct ahash_request *req)
 {
 {
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -288,8 +311,11 @@ static struct ahash_alg ghash_async_alg = {
 	.final		= ghash_async_final,
 	.final		= ghash_async_final,
 	.setkey		= ghash_async_setkey,
 	.setkey		= ghash_async_setkey,
 	.digest		= ghash_async_digest,
 	.digest		= ghash_async_digest,
+	.export		= ghash_async_export,
+	.import		= ghash_async_import,
 	.halg = {
 	.halg = {
 		.digestsize	= GHASH_DIGEST_SIZE,
 		.digestsize	= GHASH_DIGEST_SIZE,
+		.statesize = sizeof(struct ghash_desc_ctx),
 		.base = {
 		.base = {
 			.cra_name		= "ghash",
 			.cra_name		= "ghash",
 			.cra_driver_name	= "ghash-clmulni",
 			.cra_driver_name	= "ghash-clmulni",

+ 1 - 0
crypto/Makefile

@@ -40,6 +40,7 @@ rsa_generic-y := rsapubkey-asn1.o
 rsa_generic-y += rsaprivkey-asn1.o
 rsa_generic-y += rsaprivkey-asn1.o
 rsa_generic-y += rsa.o
 rsa_generic-y += rsa.o
 rsa_generic-y += rsa_helper.o
 rsa_generic-y += rsa_helper.o
+rsa_generic-y += rsa-pkcs1pad.o
 obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
 obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
 
 
 cryptomgr-y := algboss.o testmgr.o
 cryptomgr-y := algboss.o testmgr.o

+ 33 - 1
crypto/akcipher.c

@@ -21,6 +21,7 @@
 #include <linux/cryptouser.h>
 #include <linux/cryptouser.h>
 #include <net/netlink.h>
 #include <net/netlink.h>
 #include <crypto/akcipher.h>
 #include <crypto/akcipher.h>
+#include <crypto/internal/akcipher.h>
 #include "internal.h"
 #include "internal.h"
 
 
 #ifdef CONFIG_NET
 #ifdef CONFIG_NET
@@ -75,9 +76,17 @@ static int crypto_akcipher_init_tfm(struct crypto_tfm *tfm)
 	return 0;
 	return 0;
 }
 }
 
 
+static void crypto_akcipher_free_instance(struct crypto_instance *inst)
+{
+	struct akcipher_instance *akcipher = akcipher_instance(inst);
+
+	akcipher->free(akcipher);
+}
+
 static const struct crypto_type crypto_akcipher_type = {
 static const struct crypto_type crypto_akcipher_type = {
 	.extsize = crypto_alg_extsize,
 	.extsize = crypto_alg_extsize,
 	.init_tfm = crypto_akcipher_init_tfm,
 	.init_tfm = crypto_akcipher_init_tfm,
+	.free = crypto_akcipher_free_instance,
 #ifdef CONFIG_PROC_FS
 #ifdef CONFIG_PROC_FS
 	.show = crypto_akcipher_show,
 	.show = crypto_akcipher_show,
 #endif
 #endif
@@ -88,6 +97,14 @@ static const struct crypto_type crypto_akcipher_type = {
 	.tfmsize = offsetof(struct crypto_akcipher, base),
 	.tfmsize = offsetof(struct crypto_akcipher, base),
 };
 };
 
 
+int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name,
+			 u32 type, u32 mask)
+{
+	spawn->base.frontend = &crypto_akcipher_type;
+	return crypto_grab_spawn(&spawn->base, name, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_grab_akcipher);
+
 struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type,
 struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type,
 					      u32 mask)
 					      u32 mask)
 {
 {
@@ -95,13 +112,20 @@ struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type,
 }
 }
 EXPORT_SYMBOL_GPL(crypto_alloc_akcipher);
 EXPORT_SYMBOL_GPL(crypto_alloc_akcipher);
 
 
-int crypto_register_akcipher(struct akcipher_alg *alg)
+static void akcipher_prepare_alg(struct akcipher_alg *alg)
 {
 {
 	struct crypto_alg *base = &alg->base;
 	struct crypto_alg *base = &alg->base;
 
 
 	base->cra_type = &crypto_akcipher_type;
 	base->cra_type = &crypto_akcipher_type;
 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
 	base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER;
 	base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER;
+}
+
+int crypto_register_akcipher(struct akcipher_alg *alg)
+{
+	struct crypto_alg *base = &alg->base;
+
+	akcipher_prepare_alg(alg);
 	return crypto_register_alg(base);
 	return crypto_register_alg(base);
 }
 }
 EXPORT_SYMBOL_GPL(crypto_register_akcipher);
 EXPORT_SYMBOL_GPL(crypto_register_akcipher);
@@ -112,5 +136,13 @@ void crypto_unregister_akcipher(struct akcipher_alg *alg)
 }
 }
 EXPORT_SYMBOL_GPL(crypto_unregister_akcipher);
 EXPORT_SYMBOL_GPL(crypto_unregister_akcipher);
 
 
+int akcipher_register_instance(struct crypto_template *tmpl,
+			       struct akcipher_instance *inst)
+{
+	akcipher_prepare_alg(&inst->alg);
+	return crypto_register_instance(tmpl, akcipher_crypto_instance(inst));
+}
+EXPORT_SYMBOL_GPL(akcipher_register_instance);
+
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Generic public key cipher type");
 MODULE_DESCRIPTION("Generic public key cipher type");

+ 4 - 5
crypto/algapi.c

@@ -93,16 +93,15 @@ static struct list_head *crypto_more_spawns(struct crypto_alg *alg,
 {
 {
 	struct crypto_spawn *spawn, *n;
 	struct crypto_spawn *spawn, *n;
 
 
-	if (list_empty(stack))
+	spawn = list_first_entry_or_null(stack, struct crypto_spawn, list);
+	if (!spawn)
 		return NULL;
 		return NULL;
 
 
-	spawn = list_first_entry(stack, struct crypto_spawn, list);
-	n = list_entry(spawn->list.next, struct crypto_spawn, list);
+	n = list_next_entry(spawn, list);
 
 
 	if (spawn->alg && &n->list != stack && !n->alg)
 	if (spawn->alg && &n->list != stack && !n->alg)
 		n->alg = (n->list.next == stack) ? alg :
 		n->alg = (n->list.next == stack) ? alg :
-			 &list_entry(n->list.next, struct crypto_spawn,
-				     list)->inst->alg;
+			 &list_next_entry(n, list)->inst->alg;
 
 
 	list_move(&spawn->list, secondary_spawns);
 	list_move(&spawn->list, secondary_spawns);
 
 

+ 3 - 3
crypto/algif_aead.c

@@ -213,7 +213,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
 	}
 	}
 
 
 	while (size) {
 	while (size) {
-		unsigned long len = size;
+		size_t len = size;
 		struct scatterlist *sg = NULL;
 		struct scatterlist *sg = NULL;
 
 
 		/* use the existing memory in an allocated page */
 		/* use the existing memory in an allocated page */
@@ -247,7 +247,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
 		/* allocate a new page */
 		/* allocate a new page */
 		len = min_t(unsigned long, size, aead_sndbuf(sk));
 		len = min_t(unsigned long, size, aead_sndbuf(sk));
 		while (len) {
 		while (len) {
-			int plen = 0;
+			size_t plen = 0;
 
 
 			if (sgl->cur >= ALG_MAX_PAGES) {
 			if (sgl->cur >= ALG_MAX_PAGES) {
 				aead_put_sgl(sk);
 				aead_put_sgl(sk);
@@ -256,7 +256,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
 			}
 			}
 
 
 			sg = sgl->sg + sgl->cur;
 			sg = sgl->sg + sgl->cur;
-			plen = min_t(int, len, PAGE_SIZE);
+			plen = min_t(size_t, len, PAGE_SIZE);
 
 
 			sg_assign_page(sg, alloc_page(GFP_KERNEL));
 			sg_assign_page(sg, alloc_page(GFP_KERNEL));
 			err = -ENOMEM;
 			err = -ENOMEM;

+ 5 - 5
crypto/algif_skcipher.c

@@ -40,7 +40,7 @@ struct skcipher_ctx {
 	struct af_alg_completion completion;
 	struct af_alg_completion completion;
 
 
 	atomic_t inflight;
 	atomic_t inflight;
-	unsigned used;
+	size_t used;
 
 
 	unsigned int len;
 	unsigned int len;
 	bool more;
 	bool more;
@@ -153,7 +153,7 @@ static int skcipher_alloc_sgl(struct sock *sk)
 	return 0;
 	return 0;
 }
 }
 
 
-static void skcipher_pull_sgl(struct sock *sk, int used, int put)
+static void skcipher_pull_sgl(struct sock *sk, size_t used, int put)
 {
 {
 	struct alg_sock *ask = alg_sk(sk);
 	struct alg_sock *ask = alg_sk(sk);
 	struct skcipher_ctx *ctx = ask->private;
 	struct skcipher_ctx *ctx = ask->private;
@@ -167,7 +167,7 @@ static void skcipher_pull_sgl(struct sock *sk, int used, int put)
 		sg = sgl->sg;
 		sg = sgl->sg;
 
 
 		for (i = 0; i < sgl->cur; i++) {
 		for (i = 0; i < sgl->cur; i++) {
-			int plen = min_t(int, used, sg[i].length);
+			size_t plen = min_t(size_t, used, sg[i].length);
 
 
 			if (!sg_page(sg + i))
 			if (!sg_page(sg + i))
 				continue;
 				continue;
@@ -348,7 +348,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
 	while (size) {
 	while (size) {
 		struct scatterlist *sg;
 		struct scatterlist *sg;
 		unsigned long len = size;
 		unsigned long len = size;
-		int plen;
+		size_t plen;
 
 
 		if (ctx->merge) {
 		if (ctx->merge) {
 			sgl = list_entry(ctx->tsgl.prev,
 			sgl = list_entry(ctx->tsgl.prev,
@@ -390,7 +390,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
 		sg_unmark_end(sg + sgl->cur);
 		sg_unmark_end(sg + sgl->cur);
 		do {
 		do {
 			i = sgl->cur;
 			i = sgl->cur;
-			plen = min_t(int, len, PAGE_SIZE);
+			plen = min_t(size_t, len, PAGE_SIZE);
 
 
 			sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
 			sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
 			err = -ENOMEM;
 			err = -ENOMEM;

+ 1 - 1
crypto/asymmetric_keys/signature.c

@@ -13,7 +13,7 @@
 
 
 #define pr_fmt(fmt) "SIG: "fmt
 #define pr_fmt(fmt) "SIG: "fmt
 #include <keys/asymmetric-subtype.h>
 #include <keys/asymmetric-subtype.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/err.h>
 #include <linux/err.h>
 #include <crypto/public_key.h>
 #include <crypto/public_key.h>
 #include "asymmetric_keys.h"
 #include "asymmetric_keys.h"

+ 8 - 0
crypto/chacha20poly1305.c

@@ -130,6 +130,9 @@ static int chacha_decrypt(struct aead_request *req)
 	struct scatterlist *src, *dst;
 	struct scatterlist *src, *dst;
 	int err;
 	int err;
 
 
+	if (rctx->cryptlen == 0)
+		goto skip;
+
 	chacha_iv(creq->iv, req, 1);
 	chacha_iv(creq->iv, req, 1);
 
 
 	sg_init_table(rctx->src, 2);
 	sg_init_table(rctx->src, 2);
@@ -150,6 +153,7 @@ static int chacha_decrypt(struct aead_request *req)
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
+skip:
 	return poly_verify_tag(req);
 	return poly_verify_tag(req);
 }
 }
 
 
@@ -415,6 +419,9 @@ static int chacha_encrypt(struct aead_request *req)
 	struct scatterlist *src, *dst;
 	struct scatterlist *src, *dst;
 	int err;
 	int err;
 
 
+	if (req->cryptlen == 0)
+		goto skip;
+
 	chacha_iv(creq->iv, req, 1);
 	chacha_iv(creq->iv, req, 1);
 
 
 	sg_init_table(rctx->src, 2);
 	sg_init_table(rctx->src, 2);
@@ -435,6 +442,7 @@ static int chacha_encrypt(struct aead_request *req)
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
+skip:
 	return poly_genkey(req);
 	return poly_genkey(req);
 }
 }
 
 

+ 2 - 2
crypto/cryptd.c

@@ -637,6 +637,7 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
 	inst->alg.halg.base.cra_flags = type;
 	inst->alg.halg.base.cra_flags = type;
 
 
 	inst->alg.halg.digestsize = salg->digestsize;
 	inst->alg.halg.digestsize = salg->digestsize;
+	inst->alg.halg.statesize = salg->statesize;
 	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
 	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
 
 
 	inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
 	inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
@@ -887,8 +888,7 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
 	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
 	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
 		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
 		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
 		return ERR_PTR(-EINVAL);
 		return ERR_PTR(-EINVAL);
-	type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
-	type |= CRYPTO_ALG_TYPE_BLKCIPHER;
+	type = crypto_skcipher_type(type);
 	mask &= ~CRYPTO_ALG_TYPE_MASK;
 	mask &= ~CRYPTO_ALG_TYPE_MASK;
 	mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
 	mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
 	tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
 	tfm = crypto_alloc_base(cryptd_alg_name, type, mask);

+ 3 - 3
crypto/drbg.c

@@ -626,7 +626,7 @@ out:
 	return len;
 	return len;
 }
 }
 
 
-static struct drbg_state_ops drbg_ctr_ops = {
+static const struct drbg_state_ops drbg_ctr_ops = {
 	.update		= drbg_ctr_update,
 	.update		= drbg_ctr_update,
 	.generate	= drbg_ctr_generate,
 	.generate	= drbg_ctr_generate,
 	.crypto_init	= drbg_init_sym_kernel,
 	.crypto_init	= drbg_init_sym_kernel,
@@ -752,7 +752,7 @@ static int drbg_hmac_generate(struct drbg_state *drbg,
 	return len;
 	return len;
 }
 }
 
 
-static struct drbg_state_ops drbg_hmac_ops = {
+static const struct drbg_state_ops drbg_hmac_ops = {
 	.update		= drbg_hmac_update,
 	.update		= drbg_hmac_update,
 	.generate	= drbg_hmac_generate,
 	.generate	= drbg_hmac_generate,
 	.crypto_init	= drbg_init_hash_kernel,
 	.crypto_init	= drbg_init_hash_kernel,
@@ -1032,7 +1032,7 @@ out:
  * scratchpad usage: as update and generate are used isolated, both
  * scratchpad usage: as update and generate are used isolated, both
  * can use the scratchpad
  * can use the scratchpad
  */
  */
-static struct drbg_state_ops drbg_hash_ops = {
+static const struct drbg_state_ops drbg_hash_ops = {
 	.update		= drbg_hash_update,
 	.update		= drbg_hash_update,
 	.generate	= drbg_hash_generate,
 	.generate	= drbg_hash_generate,
 	.crypto_init	= drbg_init_hash_kernel,
 	.crypto_init	= drbg_init_hash_kernel,

+ 2 - 6
crypto/mcryptd.c

@@ -128,13 +128,9 @@ static void mcryptd_opportunistic_flush(void)
 	flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
 	flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
 	while (single_task_running()) {
 	while (single_task_running()) {
 		mutex_lock(&flist->lock);
 		mutex_lock(&flist->lock);
-		if (list_empty(&flist->list)) {
-			mutex_unlock(&flist->lock);
-			return;
-		}
-		cstate = list_entry(flist->list.next,
+		cstate = list_first_entry_or_null(&flist->list,
 				struct mcryptd_alg_cstate, flush_list);
 				struct mcryptd_alg_cstate, flush_list);
-		if (!cstate->flusher_engaged) {
+		if (!cstate || !cstate->flusher_engaged) {
 			mutex_unlock(&flist->lock);
 			mutex_unlock(&flist->lock);
 			return;
 			return;
 		}
 		}

+ 6 - 0
crypto/md5.c

@@ -24,6 +24,12 @@
 #include <linux/cryptohash.h>
 #include <linux/cryptohash.h>
 #include <asm/byteorder.h>
 #include <asm/byteorder.h>
 
 
+const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = {
+	0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
+	0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
+};
+EXPORT_SYMBOL_GPL(md5_zero_message_hash);
+
 /* XXX: this stuff can be optimized */
 /* XXX: this stuff can be optimized */
 static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
 static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
 {
 {

+ 628 - 0
crypto/rsa-pkcs1pad.c

@@ -0,0 +1,628 @@
+/*
+ * RSA padding templates.
+ *
+ * Copyright (c) 2015  Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/akcipher.h>
+#include <crypto/internal/akcipher.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/random.h>
+
+struct pkcs1pad_ctx {
+	struct crypto_akcipher *child;
+
+	unsigned int key_size;
+};
+
+struct pkcs1pad_request {
+	struct akcipher_request child_req;
+
+	struct scatterlist in_sg[3], out_sg[2];
+	uint8_t *in_buf, *out_buf;
+};
+
+static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
+		unsigned int keylen)
+{
+	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+	int err, size;
+
+	err = crypto_akcipher_set_pub_key(ctx->child, key, keylen);
+
+	if (!err) {
+		/* Find out new modulus size from rsa implementation */
+		size = crypto_akcipher_maxsize(ctx->child);
+
+		ctx->key_size = size > 0 ? size : 0;
+		if (size <= 0)
+			err = size;
+	}
+
+	return err;
+}
+
+static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key,
+		unsigned int keylen)
+{
+	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+	int err, size;
+
+	err = crypto_akcipher_set_priv_key(ctx->child, key, keylen);
+
+	if (!err) {
+		/* Find out new modulus size from rsa implementation */
+		size = crypto_akcipher_maxsize(ctx->child);
+
+		ctx->key_size = size > 0 ? size : 0;
+		if (size <= 0)
+			err = size;
+	}
+
+	return err;
+}
+
+static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
+{
+	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+	/*
+	 * The maximum destination buffer size for the encrypt/sign operations
+	 * will be the same as for RSA, even though it's smaller for
+	 * decrypt/verify.
+	 */
+
+	return ctx->key_size ?: -EINVAL;
+}
+
+static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len,
+		struct scatterlist *next)
+{
+	int nsegs = next ? 1 : 0;
+
+	if (offset_in_page(buf) + len <= PAGE_SIZE) {
+		nsegs += 1;
+		sg_init_table(sg, nsegs);
+		sg_set_buf(sg, buf, len);
+	} else {
+		nsegs += 2;
+		sg_init_table(sg, nsegs);
+		sg_set_buf(sg + 0, buf, PAGE_SIZE - offset_in_page(buf));
+		sg_set_buf(sg + 1, buf + PAGE_SIZE - offset_in_page(buf),
+				offset_in_page(buf) + len - PAGE_SIZE);
+	}
+
+	if (next)
+		sg_chain(sg, nsegs, next);
+}
+
+static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+	size_t pad_len = ctx->key_size - req_ctx->child_req.dst_len;
+	size_t chunk_len, pad_left;
+	struct sg_mapping_iter miter;
+
+	if (!err) {
+		if (pad_len) {
+			sg_miter_start(&miter, req->dst,
+					sg_nents_for_len(req->dst, pad_len),
+					SG_MITER_ATOMIC | SG_MITER_TO_SG);
+
+			pad_left = pad_len;
+			while (pad_left) {
+				sg_miter_next(&miter);
+
+				chunk_len = min(miter.length, pad_left);
+				memset(miter.addr, 0, chunk_len);
+				pad_left -= chunk_len;
+			}
+
+			sg_miter_stop(&miter);
+		}
+
+		sg_pcopy_from_buffer(req->dst,
+				sg_nents_for_len(req->dst, ctx->key_size),
+				req_ctx->out_buf, req_ctx->child_req.dst_len,
+				pad_len);
+	}
+	req->dst_len = ctx->key_size;
+
+	kfree(req_ctx->in_buf);
+	kzfree(req_ctx->out_buf);
+
+	return err;
+}
+
+static void pkcs1pad_encrypt_sign_complete_cb(
+		struct crypto_async_request *child_async_req, int err)
+{
+	struct akcipher_request *req = child_async_req->data;
+	struct crypto_async_request async_req;
+
+	if (err == -EINPROGRESS)
+		return;
+
+	async_req.data = req->base.data;
+	async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
+	async_req.flags = child_async_req->flags;
+	req->base.complete(&async_req,
+			pkcs1pad_encrypt_sign_complete(req, err));
+}
+
+static int pkcs1pad_encrypt(struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+	int err;
+	unsigned int i, ps_end;
+
+	if (!ctx->key_size)
+		return -EINVAL;
+
+	if (req->src_len > ctx->key_size - 11)
+		return -EOVERFLOW;
+
+	if (req->dst_len < ctx->key_size) {
+		req->dst_len = ctx->key_size;
+		return -EOVERFLOW;
+	}
+
+	if (ctx->key_size > PAGE_SIZE)
+		return -ENOTSUPP;
+
+	/*
+	 * Replace both input and output to add the padding in the input and
+	 * the potential missing leading zeros in the output.
+	 */
+	req_ctx->child_req.src = req_ctx->in_sg;
+	req_ctx->child_req.src_len = ctx->key_size - 1;
+	req_ctx->child_req.dst = req_ctx->out_sg;
+	req_ctx->child_req.dst_len = ctx->key_size;
+
+	req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
+			(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+			GFP_KERNEL : GFP_ATOMIC);
+	if (!req_ctx->in_buf)
+		return -ENOMEM;
+
+	ps_end = ctx->key_size - req->src_len - 2;
+	req_ctx->in_buf[0] = 0x02;
+	for (i = 1; i < ps_end; i++)
+		req_ctx->in_buf[i] = 1 + prandom_u32_max(255);
+	req_ctx->in_buf[ps_end] = 0x00;
+
+	pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
+			ctx->key_size - 1 - req->src_len, req->src);
+
+	req_ctx->out_buf = kmalloc(ctx->key_size,
+			(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+			GFP_KERNEL : GFP_ATOMIC);
+	if (!req_ctx->out_buf) {
+		kfree(req_ctx->in_buf);
+		return -ENOMEM;
+	}
+
+	pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
+			ctx->key_size, NULL);
+
+	akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
+	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
+			pkcs1pad_encrypt_sign_complete_cb, req);
+
+	err = crypto_akcipher_encrypt(&req_ctx->child_req);
+	if (err != -EINPROGRESS &&
+			(err != -EBUSY ||
+			 !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+		return pkcs1pad_encrypt_sign_complete(req, err);
+
+	return err;
+}
+
+static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+	unsigned int pos;
+
+	if (err == -EOVERFLOW)
+		/* Decrypted value had no leading 0 byte */
+		err = -EINVAL;
+
+	if (err)
+		goto done;
+
+	if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
+		err = -EINVAL;
+		goto done;
+	}
+
+	if (req_ctx->out_buf[0] != 0x02) {
+		err = -EINVAL;
+		goto done;
+	}
+	for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
+		if (req_ctx->out_buf[pos] == 0x00)
+			break;
+	if (pos < 9 || pos == req_ctx->child_req.dst_len) {
+		err = -EINVAL;
+		goto done;
+	}
+	pos++;
+
+	if (req->dst_len < req_ctx->child_req.dst_len - pos)
+		err = -EOVERFLOW;
+	req->dst_len = req_ctx->child_req.dst_len - pos;
+
+	if (!err)
+		sg_copy_from_buffer(req->dst,
+				sg_nents_for_len(req->dst, req->dst_len),
+				req_ctx->out_buf + pos, req->dst_len);
+
+done:
+	kzfree(req_ctx->out_buf);
+
+	return err;
+}
+
+static void pkcs1pad_decrypt_complete_cb(
+		struct crypto_async_request *child_async_req, int err)
+{
+	struct akcipher_request *req = child_async_req->data;
+	struct crypto_async_request async_req;
+
+	if (err == -EINPROGRESS)
+		return;
+
+	async_req.data = req->base.data;
+	async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
+	async_req.flags = child_async_req->flags;
+	req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err));
+}
+
+static int pkcs1pad_decrypt(struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+	int err;
+
+	if (!ctx->key_size || req->src_len != ctx->key_size)
+		return -EINVAL;
+
+	if (ctx->key_size > PAGE_SIZE)
+		return -ENOTSUPP;
+
+	/* Reuse input buffer, output to a new buffer */
+	req_ctx->child_req.src = req->src;
+	req_ctx->child_req.src_len = req->src_len;
+	req_ctx->child_req.dst = req_ctx->out_sg;
+	req_ctx->child_req.dst_len = ctx->key_size - 1;
+
+	req_ctx->out_buf = kmalloc(ctx->key_size - 1,
+			(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+			GFP_KERNEL : GFP_ATOMIC);
+	if (!req_ctx->out_buf)
+		return -ENOMEM;
+
+	pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
+			ctx->key_size - 1, NULL);
+
+	akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
+	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
+			pkcs1pad_decrypt_complete_cb, req);
+
+	err = crypto_akcipher_decrypt(&req_ctx->child_req);
+	if (err != -EINPROGRESS &&
+			(err != -EBUSY ||
+			 !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+		return pkcs1pad_decrypt_complete(req, err);
+
+	return err;
+}
+
+static int pkcs1pad_sign(struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+	int err;
+	unsigned int ps_end;
+
+	if (!ctx->key_size)
+		return -EINVAL;
+
+	if (req->src_len > ctx->key_size - 11)
+		return -EOVERFLOW;
+
+	if (req->dst_len < ctx->key_size) {
+		req->dst_len = ctx->key_size;
+		return -EOVERFLOW;
+	}
+
+	if (ctx->key_size > PAGE_SIZE)
+		return -ENOTSUPP;
+
+	/*
+	 * Replace both input and output to add the padding in the input and
+	 * the potential missing leading zeros in the output.
+	 */
+	req_ctx->child_req.src = req_ctx->in_sg;
+	req_ctx->child_req.src_len = ctx->key_size - 1;
+	req_ctx->child_req.dst = req_ctx->out_sg;
+	req_ctx->child_req.dst_len = ctx->key_size;
+
+	req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
+			(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+			GFP_KERNEL : GFP_ATOMIC);
+	if (!req_ctx->in_buf)
+		return -ENOMEM;
+
+	ps_end = ctx->key_size - req->src_len - 2;
+	req_ctx->in_buf[0] = 0x01;
+	memset(req_ctx->in_buf + 1, 0xff, ps_end - 1);
+	req_ctx->in_buf[ps_end] = 0x00;
+
+	pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
+			ctx->key_size - 1 - req->src_len, req->src);
+
+	req_ctx->out_buf = kmalloc(ctx->key_size,
+			(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+			GFP_KERNEL : GFP_ATOMIC);
+	if (!req_ctx->out_buf) {
+		kfree(req_ctx->in_buf);
+		return -ENOMEM;
+	}
+
+	pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
+			ctx->key_size, NULL);
+
+	akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
+	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
+			pkcs1pad_encrypt_sign_complete_cb, req);
+
+	err = crypto_akcipher_sign(&req_ctx->child_req);
+	if (err != -EINPROGRESS &&
+			(err != -EBUSY ||
+			 !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+		return pkcs1pad_encrypt_sign_complete(req, err);
+
+	return err;
+}
+
+static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+	unsigned int pos;
+
+	if (err == -EOVERFLOW)
+		/* Decrypted value had no leading 0 byte */
+		err = -EINVAL;
+
+	if (err)
+		goto done;
+
+	if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
+		err = -EINVAL;
+		goto done;
+	}
+
+	if (req_ctx->out_buf[0] != 0x01) {
+		err = -EINVAL;
+		goto done;
+	}
+	for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
+		if (req_ctx->out_buf[pos] != 0xff)
+			break;
+	if (pos < 9 || pos == req_ctx->child_req.dst_len ||
+			req_ctx->out_buf[pos] != 0x00) {
+		err = -EINVAL;
+		goto done;
+	}
+	pos++;
+
+	if (req->dst_len < req_ctx->child_req.dst_len - pos)
+		err = -EOVERFLOW;
+	req->dst_len = req_ctx->child_req.dst_len - pos;
+
+	if (!err)
+		sg_copy_from_buffer(req->dst,
+				sg_nents_for_len(req->dst, req->dst_len),
+				req_ctx->out_buf + pos, req->dst_len);
+
+done:
+	kzfree(req_ctx->out_buf);
+
+	return err;
+}
+
+static void pkcs1pad_verify_complete_cb(
+		struct crypto_async_request *child_async_req, int err)
+{
+	struct akcipher_request *req = child_async_req->data;
+	struct crypto_async_request async_req;
+
+	if (err == -EINPROGRESS)
+		return;
+
+	async_req.data = req->base.data;
+	async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
+	async_req.flags = child_async_req->flags;
+	req->base.complete(&async_req, pkcs1pad_verify_complete(req, err));
+}
+
+/*
+ * The verify operation is here for completeness similar to the verification
+ * defined in RFC2313 section 10.2 except that block type 0 is not accepted,
+ * as in RFC2437.  RFC2437 section 9.2 doesn't define any operation to
+ * retrieve the DigestInfo from a signature, instead the user is expected
+ * to call the sign operation to generate the expected signature and compare
+ * signatures instead of the message-digests.
+ */
+static int pkcs1pad_verify(struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+	int err;
+
+	if (!ctx->key_size || req->src_len != ctx->key_size)
+		return -EINVAL;
+
+	if (ctx->key_size > PAGE_SIZE)
+		return -ENOTSUPP;
+
+	/* Reuse input buffer, output to a new buffer */
+	req_ctx->child_req.src = req->src;
+	req_ctx->child_req.src_len = req->src_len;
+	req_ctx->child_req.dst = req_ctx->out_sg;
+	req_ctx->child_req.dst_len = ctx->key_size - 1;
+
+	req_ctx->out_buf = kmalloc(ctx->key_size - 1,
+			(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+			GFP_KERNEL : GFP_ATOMIC);
+	if (!req_ctx->out_buf)
+		return -ENOMEM;
+
+	pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
+			ctx->key_size - 1, NULL);
+
+	akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
+	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
+			pkcs1pad_verify_complete_cb, req);
+
+	err = crypto_akcipher_verify(&req_ctx->child_req);
+	if (err != -EINPROGRESS &&
+			(err != -EBUSY ||
+			 !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+		return pkcs1pad_verify_complete(req, err);
+
+	return err;
+}
+
+static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm)
+{
+	struct akcipher_instance *inst = akcipher_alg_instance(tfm);
+	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct crypto_akcipher *child_tfm;
+
+	child_tfm = crypto_spawn_akcipher(akcipher_instance_ctx(inst));
+	if (IS_ERR(child_tfm))
+		return PTR_ERR(child_tfm);
+
+	ctx->child = child_tfm;
+
+	return 0;
+}
+
+static void pkcs1pad_exit_tfm(struct crypto_akcipher *tfm)
+{
+	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+	crypto_free_akcipher(ctx->child);
+}
+
+static void pkcs1pad_free(struct akcipher_instance *inst)
+{
+	struct crypto_akcipher_spawn *spawn = akcipher_instance_ctx(inst);
+
+	crypto_drop_akcipher(spawn);
+
+	kfree(inst);
+}
+
+static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+	struct crypto_attr_type *algt;
+	struct akcipher_instance *inst;
+	struct crypto_akcipher_spawn *spawn;
+	struct akcipher_alg *rsa_alg;
+	const char *rsa_alg_name;
+	int err;
+
+	algt = crypto_get_attr_type(tb);
+	if (IS_ERR(algt))
+		return PTR_ERR(algt);
+
+	if ((algt->type ^ CRYPTO_ALG_TYPE_AKCIPHER) & algt->mask)
+		return -EINVAL;
+
+	rsa_alg_name = crypto_attr_alg_name(tb[1]);
+	if (IS_ERR(rsa_alg_name))
+		return PTR_ERR(rsa_alg_name);
+
+	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+	if (!inst)
+		return -ENOMEM;
+
+	spawn = akcipher_instance_ctx(inst);
+	crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst));
+	err = crypto_grab_akcipher(spawn, rsa_alg_name, 0,
+			crypto_requires_sync(algt->type, algt->mask));
+	if (err)
+		goto out_free_inst;
+
+	rsa_alg = crypto_spawn_akcipher_alg(spawn);
+
+	err = -ENAMETOOLONG;
+	if (snprintf(inst->alg.base.cra_name,
+				CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
+				rsa_alg->base.cra_name) >=
+			CRYPTO_MAX_ALG_NAME ||
+			snprintf(inst->alg.base.cra_driver_name,
+				CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
+				rsa_alg->base.cra_driver_name) >=
+			CRYPTO_MAX_ALG_NAME)
+		goto out_drop_alg;
+
+	inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC;
+	inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
+	inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx);
+
+	inst->alg.init = pkcs1pad_init_tfm;
+	inst->alg.exit = pkcs1pad_exit_tfm;
+
+	inst->alg.encrypt = pkcs1pad_encrypt;
+	inst->alg.decrypt = pkcs1pad_decrypt;
+	inst->alg.sign = pkcs1pad_sign;
+	inst->alg.verify = pkcs1pad_verify;
+	inst->alg.set_pub_key = pkcs1pad_set_pub_key;
+	inst->alg.set_priv_key = pkcs1pad_set_priv_key;
+	inst->alg.max_size = pkcs1pad_get_max_size;
+	inst->alg.reqsize = sizeof(struct pkcs1pad_request) + rsa_alg->reqsize;
+
+	inst->free = pkcs1pad_free;
+
+	err = akcipher_register_instance(tmpl, inst);
+	if (err)
+		goto out_drop_alg;
+
+	return 0;
+
+out_drop_alg:
+	crypto_drop_akcipher(spawn);
+out_free_inst:
+	kfree(inst);
+	return err;
+}
+
+struct crypto_template rsa_pkcs1pad_tmpl = {
+	.name = "pkcs1pad",
+	.create = pkcs1pad_create,
+	.module = THIS_MODULE,
+};

+ 15 - 25
crypto/rsa.c

@@ -13,6 +13,7 @@
 #include <crypto/internal/rsa.h>
 #include <crypto/internal/rsa.h>
 #include <crypto/internal/akcipher.h>
 #include <crypto/internal/akcipher.h>
 #include <crypto/akcipher.h>
 #include <crypto/akcipher.h>
+#include <crypto/algapi.h>
 
 
 /*
 /*
  * RSAEP function [RFC3447 sec 5.1.1]
  * RSAEP function [RFC3447 sec 5.1.1]
@@ -91,12 +92,6 @@ static int rsa_enc(struct akcipher_request *req)
 		goto err_free_c;
 		goto err_free_c;
 	}
 	}
 
 
-	if (req->dst_len < mpi_get_size(pkey->n)) {
-		req->dst_len = mpi_get_size(pkey->n);
-		ret = -EOVERFLOW;
-		goto err_free_c;
-	}
-
 	ret = -ENOMEM;
 	ret = -ENOMEM;
 	m = mpi_read_raw_from_sgl(req->src, req->src_len);
 	m = mpi_read_raw_from_sgl(req->src, req->src_len);
 	if (!m)
 	if (!m)
@@ -136,12 +131,6 @@ static int rsa_dec(struct akcipher_request *req)
 		goto err_free_m;
 		goto err_free_m;
 	}
 	}
 
 
-	if (req->dst_len < mpi_get_size(pkey->n)) {
-		req->dst_len = mpi_get_size(pkey->n);
-		ret = -EOVERFLOW;
-		goto err_free_m;
-	}
-
 	ret = -ENOMEM;
 	ret = -ENOMEM;
 	c = mpi_read_raw_from_sgl(req->src, req->src_len);
 	c = mpi_read_raw_from_sgl(req->src, req->src_len);
 	if (!c)
 	if (!c)
@@ -180,12 +169,6 @@ static int rsa_sign(struct akcipher_request *req)
 		goto err_free_s;
 		goto err_free_s;
 	}
 	}
 
 
-	if (req->dst_len < mpi_get_size(pkey->n)) {
-		req->dst_len = mpi_get_size(pkey->n);
-		ret = -EOVERFLOW;
-		goto err_free_s;
-	}
-
 	ret = -ENOMEM;
 	ret = -ENOMEM;
 	m = mpi_read_raw_from_sgl(req->src, req->src_len);
 	m = mpi_read_raw_from_sgl(req->src, req->src_len);
 	if (!m)
 	if (!m)
@@ -225,12 +208,6 @@ static int rsa_verify(struct akcipher_request *req)
 		goto err_free_m;
 		goto err_free_m;
 	}
 	}
 
 
-	if (req->dst_len < mpi_get_size(pkey->n)) {
-		req->dst_len = mpi_get_size(pkey->n);
-		ret = -EOVERFLOW;
-		goto err_free_m;
-	}
-
 	ret = -ENOMEM;
 	ret = -ENOMEM;
 	s = mpi_read_raw_from_sgl(req->src, req->src_len);
 	s = mpi_read_raw_from_sgl(req->src, req->src_len);
 	if (!s) {
 	if (!s) {
@@ -339,11 +316,24 @@ static struct akcipher_alg rsa = {
 
 
 static int rsa_init(void)
 static int rsa_init(void)
 {
 {
-	return crypto_register_akcipher(&rsa);
+	int err;
+
+	err = crypto_register_akcipher(&rsa);
+	if (err)
+		return err;
+
+	err = crypto_register_template(&rsa_pkcs1pad_tmpl);
+	if (err) {
+		crypto_unregister_akcipher(&rsa);
+		return err;
+	}
+
+	return 0;
 }
 }
 
 
 static void rsa_exit(void)
 static void rsa_exit(void)
 {
 {
+	crypto_unregister_template(&rsa_pkcs1pad_tmpl);
 	crypto_unregister_akcipher(&rsa);
 	crypto_unregister_akcipher(&rsa);
 }
 }
 
 

+ 7 - 0
crypto/sha1_generic.c

@@ -26,6 +26,13 @@
 #include <crypto/sha1_base.h>
 #include <crypto/sha1_base.h>
 #include <asm/byteorder.h>
 #include <asm/byteorder.h>
 
 
+const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE] = {
+	0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
+	0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
+	0xaf, 0xd8, 0x07, 0x09
+};
+EXPORT_SYMBOL_GPL(sha1_zero_message_hash);
+
 static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src,
 static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src,
 				  int blocks)
 				  int blocks)
 {
 {

+ 16 - 0
crypto/sha256_generic.c

@@ -27,6 +27,22 @@
 #include <asm/byteorder.h>
 #include <asm/byteorder.h>
 #include <asm/unaligned.h>
 #include <asm/unaligned.h>
 
 
+const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = {
+	0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
+	0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
+	0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
+	0x2f
+};
+EXPORT_SYMBOL_GPL(sha224_zero_message_hash);
+
+const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = {
+	0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
+	0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
+	0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
+	0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
+};
+EXPORT_SYMBOL_GPL(sha256_zero_message_hash);
+
 static inline u32 Ch(u32 x, u32 y, u32 z)
 static inline u32 Ch(u32 x, u32 y, u32 z)
 {
 {
 	return z ^ (x & (y ^ z));
 	return z ^ (x & (y ^ z));

+ 1 - 1
crypto/tcrypt.c

@@ -1789,7 +1789,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
 		test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec,
 		test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec,
 				NULL, 0, 16, 16, aead_speed_template_20);
 				NULL, 0, 16, 16, aead_speed_template_20);
 		test_aead_speed("gcm(aes)", ENCRYPT, sec,
 		test_aead_speed("gcm(aes)", ENCRYPT, sec,
-				NULL, 0, 16, 8, aead_speed_template_20);
+				NULL, 0, 16, 8, speed_template_16_24_32);
 		break;
 		break;
 
 
 	case 212:
 	case 212:

+ 5 - 1
drivers/char/hw_random/core.c

@@ -238,7 +238,10 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
 			goto out;
 			goto out;
 		}
 		}
 
 
-		mutex_lock(&reading_mutex);
+		if (mutex_lock_interruptible(&reading_mutex)) {
+			err = -ERESTARTSYS;
+			goto out_put;
+		}
 		if (!data_avail) {
 		if (!data_avail) {
 			bytes_read = rng_get_data(rng, rng_buffer,
 			bytes_read = rng_get_data(rng, rng_buffer,
 				rng_buffer_size(),
 				rng_buffer_size(),
@@ -288,6 +291,7 @@ out:
 
 
 out_unlock_reading:
 out_unlock_reading:
 	mutex_unlock(&reading_mutex);
 	mutex_unlock(&reading_mutex);
+out_put:
 	put_rng(rng);
 	put_rng(rng);
 	goto out;
 	goto out;
 }
 }

+ 7 - 6
drivers/char/hw_random/omap3-rom-rng.c

@@ -17,7 +17,7 @@
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/random.h>
 #include <linux/random.h>
 #include <linux/hw_random.h>
 #include <linux/hw_random.h>
-#include <linux/timer.h>
+#include <linux/workqueue.h>
 #include <linux/clk.h>
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/err.h>
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
@@ -29,11 +29,11 @@
 /* param1: ptr, param2: count, param3: flag */
 /* param1: ptr, param2: count, param3: flag */
 static u32 (*omap3_rom_rng_call)(u32, u32, u32);
 static u32 (*omap3_rom_rng_call)(u32, u32, u32);
 
 
-static struct timer_list idle_timer;
+static struct delayed_work idle_work;
 static int rng_idle;
 static int rng_idle;
 static struct clk *rng_clk;
 static struct clk *rng_clk;
 
 
-static void omap3_rom_rng_idle(unsigned long data)
+static void omap3_rom_rng_idle(struct work_struct *work)
 {
 {
 	int r;
 	int r;
 
 
@@ -51,7 +51,7 @@ static int omap3_rom_rng_get_random(void *buf, unsigned int count)
 	u32 r;
 	u32 r;
 	u32 ptr;
 	u32 ptr;
 
 
-	del_timer_sync(&idle_timer);
+	cancel_delayed_work_sync(&idle_work);
 	if (rng_idle) {
 	if (rng_idle) {
 		clk_prepare_enable(rng_clk);
 		clk_prepare_enable(rng_clk);
 		r = omap3_rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT);
 		r = omap3_rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT);
@@ -65,7 +65,7 @@ static int omap3_rom_rng_get_random(void *buf, unsigned int count)
 
 
 	ptr = virt_to_phys(buf);
 	ptr = virt_to_phys(buf);
 	r = omap3_rom_rng_call(ptr, count, RNG_GEN_HW);
 	r = omap3_rom_rng_call(ptr, count, RNG_GEN_HW);
-	mod_timer(&idle_timer, jiffies + msecs_to_jiffies(500));
+	schedule_delayed_work(&idle_work, msecs_to_jiffies(500));
 	if (r != 0)
 	if (r != 0)
 		return -EINVAL;
 		return -EINVAL;
 	return 0;
 	return 0;
@@ -102,7 +102,7 @@ static int omap3_rom_rng_probe(struct platform_device *pdev)
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	setup_timer(&idle_timer, omap3_rom_rng_idle, 0);
+	INIT_DELAYED_WORK(&idle_work, omap3_rom_rng_idle);
 	rng_clk = devm_clk_get(&pdev->dev, "ick");
 	rng_clk = devm_clk_get(&pdev->dev, "ick");
 	if (IS_ERR(rng_clk)) {
 	if (IS_ERR(rng_clk)) {
 		pr_err("unable to get RNG clock\n");
 		pr_err("unable to get RNG clock\n");
@@ -118,6 +118,7 @@ static int omap3_rom_rng_probe(struct platform_device *pdev)
 
 
 static int omap3_rom_rng_remove(struct platform_device *pdev)
 static int omap3_rom_rng_remove(struct platform_device *pdev)
 {
 {
+	cancel_delayed_work_sync(&idle_work);
 	hwrng_unregister(&omap3_rom_rng_ops);
 	hwrng_unregister(&omap3_rom_rng_ops);
 	clk_disable_unprepare(rng_clk);
 	clk_disable_unprepare(rng_clk);
 	return 0;
 	return 0;

+ 16 - 2
drivers/crypto/Kconfig

@@ -194,6 +194,9 @@ config CRYPTO_DEV_NIAGARA2
        select CRYPTO_DES
        select CRYPTO_DES
        select CRYPTO_BLKCIPHER
        select CRYPTO_BLKCIPHER
        select CRYPTO_HASH
        select CRYPTO_HASH
+       select CRYPTO_MD5
+       select CRYPTO_SHA1
+       select CRYPTO_SHA256
        depends on SPARC64
        depends on SPARC64
        help
        help
 	  Each core of a Niagara2 processor contains a Stream
 	  Each core of a Niagara2 processor contains a Stream
@@ -378,10 +381,10 @@ config CRYPTO_DEV_BFIN_CRC
 
 
 config CRYPTO_DEV_ATMEL_AES
 config CRYPTO_DEV_ATMEL_AES
 	tristate "Support for Atmel AES hw accelerator"
 	tristate "Support for Atmel AES hw accelerator"
-	depends on ARCH_AT91
+	depends on AT_XDMAC || AT_HDMAC || COMPILE_TEST
 	select CRYPTO_AES
 	select CRYPTO_AES
+	select CRYPTO_AEAD
 	select CRYPTO_BLKCIPHER
 	select CRYPTO_BLKCIPHER
-	select AT_HDMAC
 	help
 	help
 	  Some Atmel processors have AES hw accelerator.
 	  Some Atmel processors have AES hw accelerator.
 	  Select this if you want to use the Atmel module for
 	  Select this if you want to use the Atmel module for
@@ -498,4 +501,15 @@ config CRYPTO_DEV_SUN4I_SS
 	  To compile this driver as a module, choose M here: the module
 	  To compile this driver as a module, choose M here: the module
 	  will be called sun4i-ss.
 	  will be called sun4i-ss.
 
 
+config CRYPTO_DEV_ROCKCHIP
+	tristate "Rockchip's Cryptographic Engine driver"
+	depends on OF && ARCH_ROCKCHIP
+	select CRYPTO_AES
+	select CRYPTO_DES
+	select CRYPTO_BLKCIPHER
+
+	help
+	  This driver interfaces with the hardware crypto accelerator.
+	  Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode.
+
 endif # CRYPTO_HW
 endif # CRYPTO_HW

+ 1 - 0
drivers/crypto/Makefile

@@ -29,3 +29,4 @@ obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
 obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
 obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
 obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
 obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
 obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
 obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
+obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/

+ 4 - 0
drivers/crypto/amcc/crypto4xx_core.c

@@ -781,6 +781,10 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
 
 
 	/* figure how many gd is needed */
 	/* figure how many gd is needed */
 	num_gd = sg_nents_for_len(src, datalen);
 	num_gd = sg_nents_for_len(src, datalen);
+	if ((int)num_gd < 0) {
+		dev_err(dev->core_dev->device, "Invalid number of src SG.\n");
+		return -EINVAL;
+	}
 	if (num_gd == 1)
 	if (num_gd == 1)
 		num_gd = 0;
 		num_gd = 0;
 
 

+ 10 - 0
drivers/crypto/atmel-aes-regs.h

@@ -9,6 +9,7 @@
 #define	AES_MR			0x04
 #define	AES_MR			0x04
 #define AES_MR_CYPHER_DEC		(0 << 0)
 #define AES_MR_CYPHER_DEC		(0 << 0)
 #define AES_MR_CYPHER_ENC		(1 << 0)
 #define AES_MR_CYPHER_ENC		(1 << 0)
+#define AES_MR_GTAGEN			(1 << 1)
 #define	AES_MR_DUALBUFF			(1 << 3)
 #define	AES_MR_DUALBUFF			(1 << 3)
 #define AES_MR_PROCDLY_MASK		(0xF << 4)
 #define AES_MR_PROCDLY_MASK		(0xF << 4)
 #define AES_MR_PROCDLY_OFFSET	4
 #define AES_MR_PROCDLY_OFFSET	4
@@ -26,6 +27,7 @@
 #define AES_MR_OPMOD_OFB		(0x2 << 12)
 #define AES_MR_OPMOD_OFB		(0x2 << 12)
 #define AES_MR_OPMOD_CFB		(0x3 << 12)
 #define AES_MR_OPMOD_CFB		(0x3 << 12)
 #define AES_MR_OPMOD_CTR		(0x4 << 12)
 #define AES_MR_OPMOD_CTR		(0x4 << 12)
+#define AES_MR_OPMOD_GCM		(0x5 << 12)
 #define AES_MR_LOD				(0x1 << 15)
 #define AES_MR_LOD				(0x1 << 15)
 #define AES_MR_CFBS_MASK		(0x7 << 16)
 #define AES_MR_CFBS_MASK		(0x7 << 16)
 #define AES_MR_CFBS_128b		(0x0 << 16)
 #define AES_MR_CFBS_128b		(0x0 << 16)
@@ -44,6 +46,7 @@
 #define	AES_ISR		0x1C
 #define	AES_ISR		0x1C
 #define AES_INT_DATARDY		(1 << 0)
 #define AES_INT_DATARDY		(1 << 0)
 #define AES_INT_URAD		(1 << 8)
 #define AES_INT_URAD		(1 << 8)
+#define AES_INT_TAGRDY		(1 << 16)
 #define AES_ISR_URAT_MASK	(0xF << 12)
 #define AES_ISR_URAT_MASK	(0xF << 12)
 #define AES_ISR_URAT_IDR_WR_PROC	(0x0 << 12)
 #define AES_ISR_URAT_IDR_WR_PROC	(0x0 << 12)
 #define AES_ISR_URAT_ODR_RD_PROC	(0x1 << 12)
 #define AES_ISR_URAT_ODR_RD_PROC	(0x1 << 12)
@@ -57,6 +60,13 @@
 #define AES_ODATAR(x)	(0x50 + ((x) * 0x04))
 #define AES_ODATAR(x)	(0x50 + ((x) * 0x04))
 #define AES_IVR(x)		(0x60 + ((x) * 0x04))
 #define AES_IVR(x)		(0x60 + ((x) * 0x04))
 
 
+#define AES_AADLENR	0x70
+#define AES_CLENR	0x74
+#define AES_GHASHR(x)	(0x78 + ((x) * 0x04))
+#define AES_TAGR(x)	(0x88 + ((x) * 0x04))
+#define AES_CTRR	0x98
+#define AES_GCMHR(x)	(0x9c + ((x) * 0x04))
+
 #define AES_HW_VERSION	0xFC
 #define AES_HW_VERSION	0xFC
 
 
 #endif /* __ATMEL_AES_REGS_H__ */
 #endif /* __ATMEL_AES_REGS_H__ */

+ 1267 - 576
drivers/crypto/atmel-aes.c

@@ -33,68 +33,118 @@
 #include <linux/of_device.h>
 #include <linux/of_device.h>
 #include <linux/delay.h>
 #include <linux/delay.h>
 #include <linux/crypto.h>
 #include <linux/crypto.h>
-#include <linux/cryptohash.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/algapi.h>
 #include <crypto/algapi.h>
 #include <crypto/aes.h>
 #include <crypto/aes.h>
-#include <crypto/hash.h>
-#include <crypto/internal/hash.h>
+#include <crypto/internal/aead.h>
 #include <linux/platform_data/crypto-atmel.h>
 #include <linux/platform_data/crypto-atmel.h>
 #include <dt-bindings/dma/at91.h>
 #include <dt-bindings/dma/at91.h>
 #include "atmel-aes-regs.h"
 #include "atmel-aes-regs.h"
 
 
+#define ATMEL_AES_PRIORITY	300
+
+#define ATMEL_AES_BUFFER_ORDER	2
+#define ATMEL_AES_BUFFER_SIZE	(PAGE_SIZE << ATMEL_AES_BUFFER_ORDER)
+
 #define CFB8_BLOCK_SIZE		1
 #define CFB8_BLOCK_SIZE		1
 #define CFB16_BLOCK_SIZE	2
 #define CFB16_BLOCK_SIZE	2
 #define CFB32_BLOCK_SIZE	4
 #define CFB32_BLOCK_SIZE	4
 #define CFB64_BLOCK_SIZE	8
 #define CFB64_BLOCK_SIZE	8
 
 
+#define SIZE_IN_WORDS(x)	((x) >> 2)
+
 /* AES flags */
 /* AES flags */
-#define AES_FLAGS_MODE_MASK	0x03ff
-#define AES_FLAGS_ENCRYPT	BIT(0)
-#define AES_FLAGS_CBC		BIT(1)
-#define AES_FLAGS_CFB		BIT(2)
-#define AES_FLAGS_CFB8		BIT(3)
-#define AES_FLAGS_CFB16		BIT(4)
-#define AES_FLAGS_CFB32		BIT(5)
-#define AES_FLAGS_CFB64		BIT(6)
-#define AES_FLAGS_CFB128	BIT(7)
-#define AES_FLAGS_OFB		BIT(8)
-#define AES_FLAGS_CTR		BIT(9)
-
-#define AES_FLAGS_INIT		BIT(16)
-#define AES_FLAGS_DMA		BIT(17)
-#define AES_FLAGS_BUSY		BIT(18)
-#define AES_FLAGS_FAST		BIT(19)
+/* Reserve bits [18:16] [14:12] [1:0] for mode (same as for AES_MR) */
+#define AES_FLAGS_ENCRYPT	AES_MR_CYPHER_ENC
+#define AES_FLAGS_GTAGEN	AES_MR_GTAGEN
+#define AES_FLAGS_OPMODE_MASK	(AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK)
+#define AES_FLAGS_ECB		AES_MR_OPMOD_ECB
+#define AES_FLAGS_CBC		AES_MR_OPMOD_CBC
+#define AES_FLAGS_OFB		AES_MR_OPMOD_OFB
+#define AES_FLAGS_CFB128	(AES_MR_OPMOD_CFB | AES_MR_CFBS_128b)
+#define AES_FLAGS_CFB64		(AES_MR_OPMOD_CFB | AES_MR_CFBS_64b)
+#define AES_FLAGS_CFB32		(AES_MR_OPMOD_CFB | AES_MR_CFBS_32b)
+#define AES_FLAGS_CFB16		(AES_MR_OPMOD_CFB | AES_MR_CFBS_16b)
+#define AES_FLAGS_CFB8		(AES_MR_OPMOD_CFB | AES_MR_CFBS_8b)
+#define AES_FLAGS_CTR		AES_MR_OPMOD_CTR
+#define AES_FLAGS_GCM		AES_MR_OPMOD_GCM
+
+#define AES_FLAGS_MODE_MASK	(AES_FLAGS_OPMODE_MASK |	\
+				 AES_FLAGS_ENCRYPT |		\
+				 AES_FLAGS_GTAGEN)
+
+#define AES_FLAGS_INIT		BIT(2)
+#define AES_FLAGS_BUSY		BIT(3)
+#define AES_FLAGS_DUMP_REG	BIT(4)
+
+#define AES_FLAGS_PERSISTENT	(AES_FLAGS_INIT | AES_FLAGS_BUSY)
 
 
 #define ATMEL_AES_QUEUE_LENGTH	50
 #define ATMEL_AES_QUEUE_LENGTH	50
 
 
-#define ATMEL_AES_DMA_THRESHOLD		16
+#define ATMEL_AES_DMA_THRESHOLD		256
 
 
 
 
 struct atmel_aes_caps {
 struct atmel_aes_caps {
-	bool	has_dualbuff;
-	bool	has_cfb64;
-	u32		max_burst_size;
+	bool			has_dualbuff;
+	bool			has_cfb64;
+	bool			has_ctr32;
+	bool			has_gcm;
+	u32			max_burst_size;
 };
 };
 
 
 struct atmel_aes_dev;
 struct atmel_aes_dev;
 
 
+
+typedef int (*atmel_aes_fn_t)(struct atmel_aes_dev *);
+
+
+struct atmel_aes_base_ctx {
+	struct atmel_aes_dev	*dd;
+	atmel_aes_fn_t		start;
+	int			keylen;
+	u32			key[AES_KEYSIZE_256 / sizeof(u32)];
+	u16			block_size;
+};
+
 struct atmel_aes_ctx {
 struct atmel_aes_ctx {
-	struct atmel_aes_dev *dd;
+	struct atmel_aes_base_ctx	base;
+};
+
+struct atmel_aes_ctr_ctx {
+	struct atmel_aes_base_ctx	base;
+
+	u32			iv[AES_BLOCK_SIZE / sizeof(u32)];
+	size_t			offset;
+	struct scatterlist	src[2];
+	struct scatterlist	dst[2];
+};
 
 
-	int		keylen;
-	u32		key[AES_KEYSIZE_256 / sizeof(u32)];
+struct atmel_aes_gcm_ctx {
+	struct atmel_aes_base_ctx	base;
 
 
-	u16		block_size;
+	struct scatterlist	src[2];
+	struct scatterlist	dst[2];
+
+	u32			j0[AES_BLOCK_SIZE / sizeof(u32)];
+	u32			tag[AES_BLOCK_SIZE / sizeof(u32)];
+	u32			ghash[AES_BLOCK_SIZE / sizeof(u32)];
+	size_t			textlen;
+
+	const u32		*ghash_in;
+	u32			*ghash_out;
+	atmel_aes_fn_t		ghash_resume;
 };
 };
 
 
 struct atmel_aes_reqctx {
 struct atmel_aes_reqctx {
-	unsigned long mode;
+	unsigned long		mode;
 };
 };
 
 
 struct atmel_aes_dma {
 struct atmel_aes_dma {
-	struct dma_chan			*chan;
-	struct dma_slave_config dma_conf;
+	struct dma_chan		*chan;
+	struct scatterlist	*sg;
+	int			nents;
+	unsigned int		remainder;
+	unsigned int		sg_len;
 };
 };
 
 
 struct atmel_aes_dev {
 struct atmel_aes_dev {
@@ -102,13 +152,18 @@ struct atmel_aes_dev {
 	unsigned long		phys_base;
 	unsigned long		phys_base;
 	void __iomem		*io_base;
 	void __iomem		*io_base;
 
 
-	struct atmel_aes_ctx	*ctx;
+	struct crypto_async_request	*areq;
+	struct atmel_aes_base_ctx	*ctx;
+
+	bool			is_async;
+	atmel_aes_fn_t		resume;
+	atmel_aes_fn_t		cpu_transfer_complete;
+
 	struct device		*dev;
 	struct device		*dev;
 	struct clk		*iclk;
 	struct clk		*iclk;
-	int	irq;
+	int			irq;
 
 
 	unsigned long		flags;
 	unsigned long		flags;
-	int	err;
 
 
 	spinlock_t		lock;
 	spinlock_t		lock;
 	struct crypto_queue	queue;
 	struct crypto_queue	queue;
@@ -116,33 +171,21 @@ struct atmel_aes_dev {
 	struct tasklet_struct	done_task;
 	struct tasklet_struct	done_task;
 	struct tasklet_struct	queue_task;
 	struct tasklet_struct	queue_task;
 
 
-	struct ablkcipher_request	*req;
-	size_t	total;
+	size_t			total;
+	size_t			datalen;
+	u32			*data;
 
 
-	struct scatterlist	*in_sg;
-	unsigned int		nb_in_sg;
-	size_t				in_offset;
-	struct scatterlist	*out_sg;
-	unsigned int		nb_out_sg;
-	size_t				out_offset;
+	struct atmel_aes_dma	src;
+	struct atmel_aes_dma	dst;
 
 
-	size_t	bufcnt;
-	size_t	buflen;
-	size_t	dma_size;
-
-	void	*buf_in;
-	int		dma_in;
-	dma_addr_t	dma_addr_in;
-	struct atmel_aes_dma	dma_lch_in;
-
-	void	*buf_out;
-	int		dma_out;
-	dma_addr_t	dma_addr_out;
-	struct atmel_aes_dma	dma_lch_out;
+	size_t			buflen;
+	void			*buf;
+	struct scatterlist	aligned_sg;
+	struct scatterlist	*real_dst;
 
 
 	struct atmel_aes_caps	caps;
 	struct atmel_aes_caps	caps;
 
 
-	u32	hw_version;
+	u32			hw_version;
 };
 };
 
 
 struct atmel_aes_drv {
 struct atmel_aes_drv {
@@ -155,71 +198,128 @@ static struct atmel_aes_drv atmel_aes = {
 	.lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
 	.lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
 };
 };
 
 
-static int atmel_aes_sg_length(struct ablkcipher_request *req,
-			struct scatterlist *sg)
+#ifdef VERBOSE_DEBUG
+static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz)
 {
 {
-	unsigned int total = req->nbytes;
-	int sg_nb;
-	unsigned int len;
-	struct scatterlist *sg_list;
-
-	sg_nb = 0;
-	sg_list = sg;
-	total = req->nbytes;
+	switch (offset) {
+	case AES_CR:
+		return "CR";
+
+	case AES_MR:
+		return "MR";
+
+	case AES_ISR:
+		return "ISR";
+
+	case AES_IMR:
+		return "IMR";
+
+	case AES_IER:
+		return "IER";
+
+	case AES_IDR:
+		return "IDR";
+
+	case AES_KEYWR(0):
+	case AES_KEYWR(1):
+	case AES_KEYWR(2):
+	case AES_KEYWR(3):
+	case AES_KEYWR(4):
+	case AES_KEYWR(5):
+	case AES_KEYWR(6):
+	case AES_KEYWR(7):
+		snprintf(tmp, sz, "KEYWR[%u]", (offset - AES_KEYWR(0)) >> 2);
+		break;
 
 
-	while (total) {
-		len = min(sg_list->length, total);
+	case AES_IDATAR(0):
+	case AES_IDATAR(1):
+	case AES_IDATAR(2):
+	case AES_IDATAR(3):
+		snprintf(tmp, sz, "IDATAR[%u]", (offset - AES_IDATAR(0)) >> 2);
+		break;
 
 
-		sg_nb++;
-		total -= len;
+	case AES_ODATAR(0):
+	case AES_ODATAR(1):
+	case AES_ODATAR(2):
+	case AES_ODATAR(3):
+		snprintf(tmp, sz, "ODATAR[%u]", (offset - AES_ODATAR(0)) >> 2);
+		break;
 
 
-		sg_list = sg_next(sg_list);
-		if (!sg_list)
-			total = 0;
-	}
+	case AES_IVR(0):
+	case AES_IVR(1):
+	case AES_IVR(2):
+	case AES_IVR(3):
+		snprintf(tmp, sz, "IVR[%u]", (offset - AES_IVR(0)) >> 2);
+		break;
 
 
-	return sg_nb;
-}
+	case AES_AADLENR:
+		return "AADLENR";
 
 
-static int atmel_aes_sg_copy(struct scatterlist **sg, size_t *offset,
-			void *buf, size_t buflen, size_t total, int out)
-{
-	unsigned int count, off = 0;
+	case AES_CLENR:
+		return "CLENR";
 
 
-	while (buflen && total) {
-		count = min((*sg)->length - *offset, total);
-		count = min(count, buflen);
+	case AES_GHASHR(0):
+	case AES_GHASHR(1):
+	case AES_GHASHR(2):
+	case AES_GHASHR(3):
+		snprintf(tmp, sz, "GHASHR[%u]", (offset - AES_GHASHR(0)) >> 2);
+		break;
 
 
-		if (!count)
-			return off;
+	case AES_TAGR(0):
+	case AES_TAGR(1):
+	case AES_TAGR(2):
+	case AES_TAGR(3):
+		snprintf(tmp, sz, "TAGR[%u]", (offset - AES_TAGR(0)) >> 2);
+		break;
 
 
-		scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
+	case AES_CTRR:
+		return "CTRR";
 
 
-		off += count;
-		buflen -= count;
-		*offset += count;
-		total -= count;
+	case AES_GCMHR(0):
+	case AES_GCMHR(1):
+	case AES_GCMHR(2):
+	case AES_GCMHR(3):
+		snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2);
 
 
-		if (*offset == (*sg)->length) {
-			*sg = sg_next(*sg);
-			if (*sg)
-				*offset = 0;
-			else
-				total = 0;
-		}
+	default:
+		snprintf(tmp, sz, "0x%02x", offset);
+		break;
 	}
 	}
 
 
-	return off;
+	return tmp;
 }
 }
+#endif /* VERBOSE_DEBUG */
+
+/* Shared functions */
 
 
 static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
 static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
 {
 {
-	return readl_relaxed(dd->io_base + offset);
+	u32 value = readl_relaxed(dd->io_base + offset);
+
+#ifdef VERBOSE_DEBUG
+	if (dd->flags & AES_FLAGS_DUMP_REG) {
+		char tmp[16];
+
+		dev_vdbg(dd->dev, "read 0x%08x from %s\n", value,
+			 atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
+	}
+#endif /* VERBOSE_DEBUG */
+
+	return value;
 }
 }
 
 
 static inline void atmel_aes_write(struct atmel_aes_dev *dd,
 static inline void atmel_aes_write(struct atmel_aes_dev *dd,
 					u32 offset, u32 value)
 					u32 offset, u32 value)
 {
 {
+#ifdef VERBOSE_DEBUG
+	if (dd->flags & AES_FLAGS_DUMP_REG) {
+		char tmp[16];
+
+		dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
+			 atmel_aes_reg_name(offset, tmp));
+	}
+#endif /* VERBOSE_DEBUG */
+
 	writel_relaxed(value, dd->io_base + offset);
 	writel_relaxed(value, dd->io_base + offset);
 }
 }
 
 
@@ -231,13 +331,50 @@ static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
 }
 }
 
 
 static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
 static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
-					u32 *value, int count)
+			      const u32 *value, int count)
 {
 {
 	for (; count--; value++, offset += 4)
 	for (; count--; value++, offset += 4)
 		atmel_aes_write(dd, offset, *value);
 		atmel_aes_write(dd, offset, *value);
 }
 }
 
 
-static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx)
+static inline void atmel_aes_read_block(struct atmel_aes_dev *dd, u32 offset,
+					u32 *value)
+{
+	atmel_aes_read_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
+}
+
+static inline void atmel_aes_write_block(struct atmel_aes_dev *dd, u32 offset,
+					 const u32 *value)
+{
+	atmel_aes_write_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
+}
+
+static inline int atmel_aes_wait_for_data_ready(struct atmel_aes_dev *dd,
+						atmel_aes_fn_t resume)
+{
+	u32 isr = atmel_aes_read(dd, AES_ISR);
+
+	if (unlikely(isr & AES_INT_DATARDY))
+		return resume(dd);
+
+	dd->resume = resume;
+	atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
+	return -EINPROGRESS;
+}
+
+static inline size_t atmel_aes_padlen(size_t len, size_t block_size)
+{
+	len &= block_size - 1;
+	return len ? block_size - len : 0;
+}
+
+static inline struct aead_request *
+aead_request_cast(struct crypto_async_request *req)
+{
+	return container_of(req, struct aead_request, base);
+}
+
+static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx)
 {
 {
 	struct atmel_aes_dev *aes_dd = NULL;
 	struct atmel_aes_dev *aes_dd = NULL;
 	struct atmel_aes_dev *tmp;
 	struct atmel_aes_dev *tmp;
@@ -270,7 +407,6 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
 		atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
 		atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
 		atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
 		atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
 		dd->flags |= AES_FLAGS_INIT;
 		dd->flags |= AES_FLAGS_INIT;
-		dd->err = 0;
 	}
 	}
 
 
 	return 0;
 	return 0;
@@ -281,552 +417,643 @@ static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
 	return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
 	return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
 }
 }
 
 
-static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
+static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
 {
 {
-	atmel_aes_hw_init(dd);
+	int err;
+
+	err = atmel_aes_hw_init(dd);
+	if (err)
+		return err;
 
 
 	dd->hw_version = atmel_aes_get_version(dd);
 	dd->hw_version = atmel_aes_get_version(dd);
 
 
-	dev_info(dd->dev,
-			"version: 0x%x\n", dd->hw_version);
+	dev_info(dd->dev, "version: 0x%x\n", dd->hw_version);
 
 
 	clk_disable_unprepare(dd->iclk);
 	clk_disable_unprepare(dd->iclk);
+	return 0;
+}
+
+static inline void atmel_aes_set_mode(struct atmel_aes_dev *dd,
+				      const struct atmel_aes_reqctx *rctx)
+{
+	/* Clear all but persistent flags and set request flags. */
+	dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode;
 }
 }
 
 
-static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err)
+static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
 {
 {
-	struct ablkcipher_request *req = dd->req;
+	return (dd->flags & AES_FLAGS_ENCRYPT);
+}
 
 
+static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
+{
 	clk_disable_unprepare(dd->iclk);
 	clk_disable_unprepare(dd->iclk);
 	dd->flags &= ~AES_FLAGS_BUSY;
 	dd->flags &= ~AES_FLAGS_BUSY;
 
 
-	req->base.complete(&req->base, err);
-}
+	if (dd->is_async)
+		dd->areq->complete(dd->areq, err);
 
 
-static void atmel_aes_dma_callback(void *data)
-{
-	struct atmel_aes_dev *dd = data;
+	tasklet_schedule(&dd->queue_task);
 
 
-	/* dma_lch_out - completed */
-	tasklet_schedule(&dd->done_task);
+	return err;
 }
 }
 
 
-static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd,
-		dma_addr_t dma_addr_in, dma_addr_t dma_addr_out, int length)
+static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
+				 const u32 *iv)
 {
 {
-	struct scatterlist sg[2];
-	struct dma_async_tx_descriptor	*in_desc, *out_desc;
+	u32 valmr = 0;
 
 
-	dd->dma_size = length;
+	/* MR register must be set before IV registers */
+	if (dd->ctx->keylen == AES_KEYSIZE_128)
+		valmr |= AES_MR_KEYSIZE_128;
+	else if (dd->ctx->keylen == AES_KEYSIZE_192)
+		valmr |= AES_MR_KEYSIZE_192;
+	else
+		valmr |= AES_MR_KEYSIZE_256;
 
 
-	dma_sync_single_for_device(dd->dev, dma_addr_in, length,
-				   DMA_TO_DEVICE);
-	dma_sync_single_for_device(dd->dev, dma_addr_out, length,
-				   DMA_FROM_DEVICE);
+	valmr |= dd->flags & AES_FLAGS_MODE_MASK;
 
 
-	if (dd->flags & AES_FLAGS_CFB8) {
-		dd->dma_lch_in.dma_conf.dst_addr_width =
-			DMA_SLAVE_BUSWIDTH_1_BYTE;
-		dd->dma_lch_out.dma_conf.src_addr_width =
-			DMA_SLAVE_BUSWIDTH_1_BYTE;
-	} else if (dd->flags & AES_FLAGS_CFB16) {
-		dd->dma_lch_in.dma_conf.dst_addr_width =
-			DMA_SLAVE_BUSWIDTH_2_BYTES;
-		dd->dma_lch_out.dma_conf.src_addr_width =
-			DMA_SLAVE_BUSWIDTH_2_BYTES;
+	if (use_dma) {
+		valmr |= AES_MR_SMOD_IDATAR0;
+		if (dd->caps.has_dualbuff)
+			valmr |= AES_MR_DUALBUFF;
 	} else {
 	} else {
-		dd->dma_lch_in.dma_conf.dst_addr_width =
-			DMA_SLAVE_BUSWIDTH_4_BYTES;
-		dd->dma_lch_out.dma_conf.src_addr_width =
-			DMA_SLAVE_BUSWIDTH_4_BYTES;
+		valmr |= AES_MR_SMOD_AUTO;
 	}
 	}
 
 
-	if (dd->flags & (AES_FLAGS_CFB8 | AES_FLAGS_CFB16 |
-			AES_FLAGS_CFB32 | AES_FLAGS_CFB64)) {
-		dd->dma_lch_in.dma_conf.src_maxburst = 1;
-		dd->dma_lch_in.dma_conf.dst_maxburst = 1;
-		dd->dma_lch_out.dma_conf.src_maxburst = 1;
-		dd->dma_lch_out.dma_conf.dst_maxburst = 1;
-	} else {
-		dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
-		dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
-		dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
-		dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
-	}
+	atmel_aes_write(dd, AES_MR, valmr);
 
 
-	dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
-	dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
+	atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
+			  SIZE_IN_WORDS(dd->ctx->keylen));
 
 
-	dd->flags |= AES_FLAGS_DMA;
+	if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
+		atmel_aes_write_block(dd, AES_IVR(0), iv);
+}
 
 
-	sg_init_table(&sg[0], 1);
-	sg_dma_address(&sg[0]) = dma_addr_in;
-	sg_dma_len(&sg[0]) = length;
 
 
-	sg_init_table(&sg[1], 1);
-	sg_dma_address(&sg[1]) = dma_addr_out;
-	sg_dma_len(&sg[1]) = length;
+/* CPU transfer */
 
 
-	in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
-				1, DMA_MEM_TO_DEV,
-				DMA_PREP_INTERRUPT  |  DMA_CTRL_ACK);
-	if (!in_desc)
-		return -EINVAL;
+static int atmel_aes_cpu_transfer(struct atmel_aes_dev *dd)
+{
+	int err = 0;
+	u32 isr;
 
 
-	out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
-				1, DMA_DEV_TO_MEM,
-				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-	if (!out_desc)
-		return -EINVAL;
+	for (;;) {
+		atmel_aes_read_block(dd, AES_ODATAR(0), dd->data);
+		dd->data += 4;
+		dd->datalen -= AES_BLOCK_SIZE;
 
 
-	out_desc->callback = atmel_aes_dma_callback;
-	out_desc->callback_param = dd;
+		if (dd->datalen < AES_BLOCK_SIZE)
+			break;
 
 
-	dmaengine_submit(out_desc);
-	dma_async_issue_pending(dd->dma_lch_out.chan);
+		atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
 
 
-	dmaengine_submit(in_desc);
-	dma_async_issue_pending(dd->dma_lch_in.chan);
+		isr = atmel_aes_read(dd, AES_ISR);
+		if (!(isr & AES_INT_DATARDY)) {
+			dd->resume = atmel_aes_cpu_transfer;
+			atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
+			return -EINPROGRESS;
+		}
+	}
 
 
-	return 0;
+	if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
+				 dd->buf, dd->total))
+		err = -EINVAL;
+
+	if (err)
+		return atmel_aes_complete(dd, err);
+
+	return dd->cpu_transfer_complete(dd);
 }
 }
 
 
-static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
+static int atmel_aes_cpu_start(struct atmel_aes_dev *dd,
+			       struct scatterlist *src,
+			       struct scatterlist *dst,
+			       size_t len,
+			       atmel_aes_fn_t resume)
 {
 {
-	dd->flags &= ~AES_FLAGS_DMA;
+	size_t padlen = atmel_aes_padlen(len, AES_BLOCK_SIZE);
 
 
-	dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in,
-				dd->dma_size, DMA_TO_DEVICE);
-	dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
-				dd->dma_size, DMA_FROM_DEVICE);
-
-	/* use cache buffers */
-	dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
-	if (!dd->nb_in_sg)
+	if (unlikely(len == 0))
 		return -EINVAL;
 		return -EINVAL;
 
 
-	dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
-	if (!dd->nb_out_sg)
-		return -EINVAL;
+	sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
 
 
-	dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg,
-					dd->buf_in, dd->total);
+	dd->total = len;
+	dd->real_dst = dst;
+	dd->cpu_transfer_complete = resume;
+	dd->datalen = len + padlen;
+	dd->data = (u32 *)dd->buf;
+	atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
+	return atmel_aes_wait_for_data_ready(dd, atmel_aes_cpu_transfer);
+}
 
 
-	if (!dd->bufcnt)
-		return -EINVAL;
 
 
-	dd->total -= dd->bufcnt;
+/* DMA transfer */
 
 
-	atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
-	atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in,
-				dd->bufcnt >> 2);
+static void atmel_aes_dma_callback(void *data);
 
 
-	return 0;
+static bool atmel_aes_check_aligned(struct atmel_aes_dev *dd,
+				    struct scatterlist *sg,
+				    size_t len,
+				    struct atmel_aes_dma *dma)
+{
+	int nents;
+
+	if (!IS_ALIGNED(len, dd->ctx->block_size))
+		return false;
+
+	for (nents = 0; sg; sg = sg_next(sg), ++nents) {
+		if (!IS_ALIGNED(sg->offset, sizeof(u32)))
+			return false;
+
+		if (len <= sg->length) {
+			if (!IS_ALIGNED(len, dd->ctx->block_size))
+				return false;
+
+			dma->nents = nents+1;
+			dma->remainder = sg->length - len;
+			sg->length = len;
+			return true;
+		}
+
+		if (!IS_ALIGNED(sg->length, dd->ctx->block_size))
+			return false;
+
+		len -= sg->length;
+	}
+
+	return false;
 }
 }
 
 
-static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd)
+static inline void atmel_aes_restore_sg(const struct atmel_aes_dma *dma)
 {
 {
-	int err, fast = 0, in, out;
-	size_t count;
-	dma_addr_t addr_in, addr_out;
+	struct scatterlist *sg = dma->sg;
+	int nents = dma->nents;
 
 
-	if ((!dd->in_offset) && (!dd->out_offset)) {
-		/* check for alignment */
-		in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
-			IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
-		out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
-			IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
-		fast = in && out;
+	if (!dma->remainder)
+		return;
 
 
-		if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
-			fast = 0;
-	}
+	while (--nents > 0 && sg)
+		sg = sg_next(sg);
 
 
+	if (!sg)
+		return;
 
 
-	if (fast)  {
-		count = min(dd->total, sg_dma_len(dd->in_sg));
-		count = min(count, sg_dma_len(dd->out_sg));
+	sg->length += dma->remainder;
+}
 
 
-		err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
-		if (!err) {
-			dev_err(dd->dev, "dma_map_sg() error\n");
-			return -EINVAL;
-		}
+static int atmel_aes_map(struct atmel_aes_dev *dd,
+			 struct scatterlist *src,
+			 struct scatterlist *dst,
+			 size_t len)
+{
+	bool src_aligned, dst_aligned;
+	size_t padlen;
+
+	dd->total = len;
+	dd->src.sg = src;
+	dd->dst.sg = dst;
+	dd->real_dst = dst;
 
 
-		err = dma_map_sg(dd->dev, dd->out_sg, 1,
-				DMA_FROM_DEVICE);
-		if (!err) {
-			dev_err(dd->dev, "dma_map_sg() error\n");
-			dma_unmap_sg(dd->dev, dd->in_sg, 1,
-				DMA_TO_DEVICE);
-			return -EINVAL;
+	src_aligned = atmel_aes_check_aligned(dd, src, len, &dd->src);
+	if (src == dst)
+		dst_aligned = src_aligned;
+	else
+		dst_aligned = atmel_aes_check_aligned(dd, dst, len, &dd->dst);
+	if (!src_aligned || !dst_aligned) {
+		padlen = atmel_aes_padlen(len, dd->ctx->block_size);
+
+		if (dd->buflen < len + padlen)
+			return -ENOMEM;
+
+		if (!src_aligned) {
+			sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
+			dd->src.sg = &dd->aligned_sg;
+			dd->src.nents = 1;
+			dd->src.remainder = 0;
 		}
 		}
 
 
-		addr_in = sg_dma_address(dd->in_sg);
-		addr_out = sg_dma_address(dd->out_sg);
+		if (!dst_aligned) {
+			dd->dst.sg = &dd->aligned_sg;
+			dd->dst.nents = 1;
+			dd->dst.remainder = 0;
+		}
 
 
-		dd->flags |= AES_FLAGS_FAST;
+		sg_init_table(&dd->aligned_sg, 1);
+		sg_set_buf(&dd->aligned_sg, dd->buf, len + padlen);
+	}
 
 
+	if (dd->src.sg == dd->dst.sg) {
+		dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
+					    DMA_BIDIRECTIONAL);
+		dd->dst.sg_len = dd->src.sg_len;
+		if (!dd->src.sg_len)
+			return -EFAULT;
 	} else {
 	} else {
-		dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in,
-					dd->dma_size, DMA_TO_DEVICE);
+		dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
+					    DMA_TO_DEVICE);
+		if (!dd->src.sg_len)
+			return -EFAULT;
+
+		dd->dst.sg_len = dma_map_sg(dd->dev, dd->dst.sg, dd->dst.nents,
+					    DMA_FROM_DEVICE);
+		if (!dd->dst.sg_len) {
+			dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
+				     DMA_TO_DEVICE);
+			return -EFAULT;
+		}
+	}
 
 
-		/* use cache buffers */
-		count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset,
-				dd->buf_in, dd->buflen, dd->total, 0);
+	return 0;
+}
 
 
-		addr_in = dd->dma_addr_in;
-		addr_out = dd->dma_addr_out;
+static void atmel_aes_unmap(struct atmel_aes_dev *dd)
+{
+	if (dd->src.sg == dd->dst.sg) {
+		dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
+			     DMA_BIDIRECTIONAL);
 
 
-		dd->flags &= ~AES_FLAGS_FAST;
-	}
+		if (dd->src.sg != &dd->aligned_sg)
+			atmel_aes_restore_sg(&dd->src);
+	} else {
+		dma_unmap_sg(dd->dev, dd->dst.sg, dd->dst.nents,
+			     DMA_FROM_DEVICE);
 
 
-	dd->total -= count;
+		if (dd->dst.sg != &dd->aligned_sg)
+			atmel_aes_restore_sg(&dd->dst);
 
 
-	err = atmel_aes_crypt_dma(dd, addr_in, addr_out, count);
+		dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
+			     DMA_TO_DEVICE);
 
 
-	if (err && (dd->flags & AES_FLAGS_FAST)) {
-		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
-		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
+		if (dd->src.sg != &dd->aligned_sg)
+			atmel_aes_restore_sg(&dd->src);
 	}
 	}
 
 
-	return err;
+	if (dd->dst.sg == &dd->aligned_sg)
+		sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
+				    dd->buf, dd->total);
 }
 }
 
 
-static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd)
+static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
+					enum dma_slave_buswidth addr_width,
+					enum dma_transfer_direction dir,
+					u32 maxburst)
 {
 {
+	struct dma_async_tx_descriptor *desc;
+	struct dma_slave_config config;
+	dma_async_tx_callback callback;
+	struct atmel_aes_dma *dma;
 	int err;
 	int err;
-	u32 valcr = 0, valmr = 0;
 
 
-	err = atmel_aes_hw_init(dd);
+	memset(&config, 0, sizeof(config));
+	config.direction = dir;
+	config.src_addr_width = addr_width;
+	config.dst_addr_width = addr_width;
+	config.src_maxburst = maxburst;
+	config.dst_maxburst = maxburst;
+
+	switch (dir) {
+	case DMA_MEM_TO_DEV:
+		dma = &dd->src;
+		callback = NULL;
+		config.dst_addr = dd->phys_base + AES_IDATAR(0);
+		break;
 
 
+	case DMA_DEV_TO_MEM:
+		dma = &dd->dst;
+		callback = atmel_aes_dma_callback;
+		config.src_addr = dd->phys_base + AES_ODATAR(0);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	err = dmaengine_slave_config(dma->chan, &config);
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
-	/* MR register must be set before IV registers */
-	if (dd->ctx->keylen == AES_KEYSIZE_128)
-		valmr |= AES_MR_KEYSIZE_128;
-	else if (dd->ctx->keylen == AES_KEYSIZE_192)
-		valmr |= AES_MR_KEYSIZE_192;
-	else
-		valmr |= AES_MR_KEYSIZE_256;
+	desc = dmaengine_prep_slave_sg(dma->chan, dma->sg, dma->sg_len, dir,
+				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!desc)
+		return -ENOMEM;
 
 
-	if (dd->flags & AES_FLAGS_CBC) {
-		valmr |= AES_MR_OPMOD_CBC;
-	} else if (dd->flags & AES_FLAGS_CFB) {
-		valmr |= AES_MR_OPMOD_CFB;
-		if (dd->flags & AES_FLAGS_CFB8)
-			valmr |= AES_MR_CFBS_8b;
-		else if (dd->flags & AES_FLAGS_CFB16)
-			valmr |= AES_MR_CFBS_16b;
-		else if (dd->flags & AES_FLAGS_CFB32)
-			valmr |= AES_MR_CFBS_32b;
-		else if (dd->flags & AES_FLAGS_CFB64)
-			valmr |= AES_MR_CFBS_64b;
-		else if (dd->flags & AES_FLAGS_CFB128)
-			valmr |= AES_MR_CFBS_128b;
-	} else if (dd->flags & AES_FLAGS_OFB) {
-		valmr |= AES_MR_OPMOD_OFB;
-	} else if (dd->flags & AES_FLAGS_CTR) {
-		valmr |= AES_MR_OPMOD_CTR;
-	} else {
-		valmr |= AES_MR_OPMOD_ECB;
-	}
+	desc->callback = callback;
+	desc->callback_param = dd;
+	dmaengine_submit(desc);
+	dma_async_issue_pending(dma->chan);
 
 
-	if (dd->flags & AES_FLAGS_ENCRYPT)
-		valmr |= AES_MR_CYPHER_ENC;
+	return 0;
+}
 
 
-	if (dd->total > ATMEL_AES_DMA_THRESHOLD) {
-		valmr |= AES_MR_SMOD_IDATAR0;
-		if (dd->caps.has_dualbuff)
-			valmr |= AES_MR_DUALBUFF;
-	} else {
-		valmr |= AES_MR_SMOD_AUTO;
+static void atmel_aes_dma_transfer_stop(struct atmel_aes_dev *dd,
+					enum dma_transfer_direction dir)
+{
+	struct atmel_aes_dma *dma;
+
+	switch (dir) {
+	case DMA_MEM_TO_DEV:
+		dma = &dd->src;
+		break;
+
+	case DMA_DEV_TO_MEM:
+		dma = &dd->dst;
+		break;
+
+	default:
+		return;
 	}
 	}
 
 
-	atmel_aes_write(dd, AES_CR, valcr);
-	atmel_aes_write(dd, AES_MR, valmr);
+	dmaengine_terminate_all(dma->chan);
+}
 
 
-	atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
-						dd->ctx->keylen >> 2);
+static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
+			       struct scatterlist *src,
+			       struct scatterlist *dst,
+			       size_t len,
+			       atmel_aes_fn_t resume)
+{
+	enum dma_slave_buswidth addr_width;
+	u32 maxburst;
+	int err;
+
+	switch (dd->ctx->block_size) {
+	case CFB8_BLOCK_SIZE:
+		addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+		maxburst = 1;
+		break;
 
 
-	if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) ||
-	   (dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) &&
-	   dd->req->info) {
-		atmel_aes_write_n(dd, AES_IVR(0), dd->req->info, 4);
+	case CFB16_BLOCK_SIZE:
+		addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+		maxburst = 1;
+		break;
+
+	case CFB32_BLOCK_SIZE:
+	case CFB64_BLOCK_SIZE:
+		addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+		maxburst = 1;
+		break;
+
+	case AES_BLOCK_SIZE:
+		addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+		maxburst = dd->caps.max_burst_size;
+		break;
+
+	default:
+		err = -EINVAL;
+		goto exit;
 	}
 	}
 
 
-	return 0;
+	err = atmel_aes_map(dd, src, dst, len);
+	if (err)
+		goto exit;
+
+	dd->resume = resume;
+
+	/* Set output DMA transfer first */
+	err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_DEV_TO_MEM,
+					   maxburst);
+	if (err)
+		goto unmap;
+
+	/* Then set input DMA transfer */
+	err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_MEM_TO_DEV,
+					   maxburst);
+	if (err)
+		goto output_transfer_stop;
+
+	return -EINPROGRESS;
+
+output_transfer_stop:
+	atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
+unmap:
+	atmel_aes_unmap(dd);
+exit:
+	return atmel_aes_complete(dd, err);
+}
+
+static void atmel_aes_dma_stop(struct atmel_aes_dev *dd)
+{
+	atmel_aes_dma_transfer_stop(dd, DMA_MEM_TO_DEV);
+	atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
+	atmel_aes_unmap(dd);
+}
+
+static void atmel_aes_dma_callback(void *data)
+{
+	struct atmel_aes_dev *dd = data;
+
+	atmel_aes_dma_stop(dd);
+	dd->is_async = true;
+	(void)dd->resume(dd);
 }
 }
 
 
 static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
 static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
-			       struct ablkcipher_request *req)
+				  struct crypto_async_request *new_areq)
 {
 {
-	struct crypto_async_request *async_req, *backlog;
-	struct atmel_aes_ctx *ctx;
-	struct atmel_aes_reqctx *rctx;
+	struct crypto_async_request *areq, *backlog;
+	struct atmel_aes_base_ctx *ctx;
 	unsigned long flags;
 	unsigned long flags;
 	int err, ret = 0;
 	int err, ret = 0;
 
 
 	spin_lock_irqsave(&dd->lock, flags);
 	spin_lock_irqsave(&dd->lock, flags);
-	if (req)
-		ret = ablkcipher_enqueue_request(&dd->queue, req);
+	if (new_areq)
+		ret = crypto_enqueue_request(&dd->queue, new_areq);
 	if (dd->flags & AES_FLAGS_BUSY) {
 	if (dd->flags & AES_FLAGS_BUSY) {
 		spin_unlock_irqrestore(&dd->lock, flags);
 		spin_unlock_irqrestore(&dd->lock, flags);
 		return ret;
 		return ret;
 	}
 	}
 	backlog = crypto_get_backlog(&dd->queue);
 	backlog = crypto_get_backlog(&dd->queue);
-	async_req = crypto_dequeue_request(&dd->queue);
-	if (async_req)
+	areq = crypto_dequeue_request(&dd->queue);
+	if (areq)
 		dd->flags |= AES_FLAGS_BUSY;
 		dd->flags |= AES_FLAGS_BUSY;
 	spin_unlock_irqrestore(&dd->lock, flags);
 	spin_unlock_irqrestore(&dd->lock, flags);
 
 
-	if (!async_req)
+	if (!areq)
 		return ret;
 		return ret;
 
 
 	if (backlog)
 	if (backlog)
 		backlog->complete(backlog, -EINPROGRESS);
 		backlog->complete(backlog, -EINPROGRESS);
 
 
-	req = ablkcipher_request_cast(async_req);
-
-	/* assign new request to device */
-	dd->req = req;
-	dd->total = req->nbytes;
-	dd->in_offset = 0;
-	dd->in_sg = req->src;
-	dd->out_offset = 0;
-	dd->out_sg = req->dst;
+	ctx = crypto_tfm_ctx(areq->tfm);
 
 
-	rctx = ablkcipher_request_ctx(req);
-	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
-	rctx->mode &= AES_FLAGS_MODE_MASK;
-	dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode;
+	dd->areq = areq;
 	dd->ctx = ctx;
 	dd->ctx = ctx;
-	ctx->dd = dd;
+	dd->is_async = (areq != new_areq);
 
 
-	err = atmel_aes_write_ctrl(dd);
-	if (!err) {
-		if (dd->total > ATMEL_AES_DMA_THRESHOLD)
-			err = atmel_aes_crypt_dma_start(dd);
-		else
-			err = atmel_aes_crypt_cpu_start(dd);
-	}
-	if (err) {
-		/* aes_task will not finish it, so do it here */
-		atmel_aes_finish_req(dd, err);
-		tasklet_schedule(&dd->queue_task);
-	}
-
-	return ret;
+	err = ctx->start(dd);
+	return (dd->is_async) ? ret : err;
 }
 }
 
 
-static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd)
-{
-	int err = -EINVAL;
-	size_t count;
 
 
-	if (dd->flags & AES_FLAGS_DMA) {
-		err = 0;
-		if  (dd->flags & AES_FLAGS_FAST) {
-			dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
-			dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
-		} else {
-			dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
-				dd->dma_size, DMA_FROM_DEVICE);
-
-			/* copy data */
-			count = atmel_aes_sg_copy(&dd->out_sg, &dd->out_offset,
-				dd->buf_out, dd->buflen, dd->dma_size, 1);
-			if (count != dd->dma_size) {
-				err = -EINVAL;
-				pr_err("not all data converted: %u\n", count);
-			}
-		}
-	}
+/* AES async block ciphers */
 
 
-	return err;
+static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd)
+{
+	return atmel_aes_complete(dd, 0);
 }
 }
 
 
-
-static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
+static int atmel_aes_start(struct atmel_aes_dev *dd)
 {
 {
-	int err = -ENOMEM;
-
-	dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
-	dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
-	dd->buflen = PAGE_SIZE;
-	dd->buflen &= ~(AES_BLOCK_SIZE - 1);
-
-	if (!dd->buf_in || !dd->buf_out) {
-		dev_err(dd->dev, "unable to alloc pages.\n");
-		goto err_alloc;
-	}
+	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
+	struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+	bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD ||
+			dd->ctx->block_size != AES_BLOCK_SIZE);
+	int err;
 
 
-	/* MAP here */
-	dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
-					dd->buflen, DMA_TO_DEVICE);
-	if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
-		dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
-		err = -EINVAL;
-		goto err_map_in;
-	}
+	atmel_aes_set_mode(dd, rctx);
 
 
-	dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
-					dd->buflen, DMA_FROM_DEVICE);
-	if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
-		dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
-		err = -EINVAL;
-		goto err_map_out;
-	}
+	err = atmel_aes_hw_init(dd);
+	if (err)
+		return atmel_aes_complete(dd, err);
 
 
-	return 0;
+	atmel_aes_write_ctrl(dd, use_dma, req->info);
+	if (use_dma)
+		return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
+					   atmel_aes_transfer_complete);
 
 
-err_map_out:
-	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
-		DMA_TO_DEVICE);
-err_map_in:
-err_alloc:
-	free_page((unsigned long)dd->buf_out);
-	free_page((unsigned long)dd->buf_in);
-	if (err)
-		pr_err("error: %d\n", err);
-	return err;
+	return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
+				   atmel_aes_transfer_complete);
 }
 }
 
 
-static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
+static inline struct atmel_aes_ctr_ctx *
+atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
 {
 {
-	dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
-			 DMA_FROM_DEVICE);
-	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
-		DMA_TO_DEVICE);
-	free_page((unsigned long)dd->buf_out);
-	free_page((unsigned long)dd->buf_in);
+	return container_of(ctx, struct atmel_aes_ctr_ctx, base);
 }
 }
 
 
-static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
 {
 {
-	struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(
-			crypto_ablkcipher_reqtfm(req));
-	struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
-	struct atmel_aes_dev *dd;
-
-	if (mode & AES_FLAGS_CFB8) {
-		if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) {
-			pr_err("request size is not exact amount of CFB8 blocks\n");
-			return -EINVAL;
-		}
-		ctx->block_size = CFB8_BLOCK_SIZE;
-	} else if (mode & AES_FLAGS_CFB16) {
-		if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) {
-			pr_err("request size is not exact amount of CFB16 blocks\n");
-			return -EINVAL;
-		}
-		ctx->block_size = CFB16_BLOCK_SIZE;
-	} else if (mode & AES_FLAGS_CFB32) {
-		if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) {
-			pr_err("request size is not exact amount of CFB32 blocks\n");
-			return -EINVAL;
-		}
-		ctx->block_size = CFB32_BLOCK_SIZE;
-	} else if (mode & AES_FLAGS_CFB64) {
-		if (!IS_ALIGNED(req->nbytes, CFB64_BLOCK_SIZE)) {
-			pr_err("request size is not exact amount of CFB64 blocks\n");
-			return -EINVAL;
+	struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
+	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
+	struct scatterlist *src, *dst;
+	u32 ctr, blocks;
+	size_t datalen;
+	bool use_dma, fragmented = false;
+
+	/* Check for transfer completion. */
+	ctx->offset += dd->total;
+	if (ctx->offset >= req->nbytes)
+		return atmel_aes_transfer_complete(dd);
+
+	/* Compute data length. */
+	datalen = req->nbytes - ctx->offset;
+	blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
+	ctr = be32_to_cpu(ctx->iv[3]);
+	if (dd->caps.has_ctr32) {
+		/* Check 32bit counter overflow. */
+		u32 start = ctr;
+		u32 end = start + blocks - 1;
+
+		if (end < start) {
+			ctr |= 0xffffffff;
+			datalen = AES_BLOCK_SIZE * -start;
+			fragmented = true;
 		}
 		}
-		ctx->block_size = CFB64_BLOCK_SIZE;
 	} else {
 	} else {
-		if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
-			pr_err("request size is not exact amount of AES blocks\n");
-			return -EINVAL;
+		/* Check 16bit counter overflow. */
+		u16 start = ctr & 0xffff;
+		u16 end = start + (u16)blocks - 1;
+
+		if (blocks >> 16 || end < start) {
+			ctr |= 0xffff;
+			datalen = AES_BLOCK_SIZE * (0x10000-start);
+			fragmented = true;
 		}
 		}
-		ctx->block_size = AES_BLOCK_SIZE;
+	}
+	use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD);
+
+	/* Jump to offset. */
+	src = scatterwalk_ffwd(ctx->src, req->src, ctx->offset);
+	dst = ((req->src == req->dst) ? src :
+	       scatterwalk_ffwd(ctx->dst, req->dst, ctx->offset));
+
+	/* Configure hardware. */
+	atmel_aes_write_ctrl(dd, use_dma, ctx->iv);
+	if (unlikely(fragmented)) {
+		/*
+		 * Increment the counter manually to cope with the hardware
+		 * counter overflow.
+		 */
+		ctx->iv[3] = cpu_to_be32(ctr);
+		crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
 	}
 	}
 
 
-	dd = atmel_aes_find_dev(ctx);
-	if (!dd)
-		return -ENODEV;
-
-	rctx->mode = mode;
+	if (use_dma)
+		return atmel_aes_dma_start(dd, src, dst, datalen,
+					   atmel_aes_ctr_transfer);
 
 
-	return atmel_aes_handle_queue(dd, req);
+	return atmel_aes_cpu_start(dd, src, dst, datalen,
+				   atmel_aes_ctr_transfer);
 }
 }
 
 
-static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
+static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
 {
 {
-	struct at_dma_slave	*sl = slave;
+	struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
+	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
+	struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+	int err;
 
 
-	if (sl && sl->dma_dev == chan->device->dev) {
-		chan->private = sl;
-		return true;
-	} else {
-		return false;
-	}
+	atmel_aes_set_mode(dd, rctx);
+
+	err = atmel_aes_hw_init(dd);
+	if (err)
+		return atmel_aes_complete(dd, err);
+
+	memcpy(ctx->iv, req->info, AES_BLOCK_SIZE);
+	ctx->offset = 0;
+	dd->total = 0;
+	return atmel_aes_ctr_transfer(dd);
 }
 }
 
 
-static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
-	struct crypto_platform_data *pdata)
+static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 {
 {
-	int err = -ENOMEM;
-	dma_cap_mask_t mask;
+	struct atmel_aes_base_ctx *ctx;
+	struct atmel_aes_reqctx *rctx;
+	struct atmel_aes_dev *dd;
 
 
-	dma_cap_zero(mask);
-	dma_cap_set(DMA_SLAVE, mask);
+	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+	switch (mode & AES_FLAGS_OPMODE_MASK) {
+	case AES_FLAGS_CFB8:
+		ctx->block_size = CFB8_BLOCK_SIZE;
+		break;
 
 
-	/* Try to grab 2 DMA channels */
-	dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask,
-			atmel_aes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
-	if (!dd->dma_lch_in.chan)
-		goto err_dma_in;
+	case AES_FLAGS_CFB16:
+		ctx->block_size = CFB16_BLOCK_SIZE;
+		break;
 
 
-	dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
-	dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
-		AES_IDATAR(0);
-	dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
-	dd->dma_lch_in.dma_conf.src_addr_width =
-		DMA_SLAVE_BUSWIDTH_4_BYTES;
-	dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
-	dd->dma_lch_in.dma_conf.dst_addr_width =
-		DMA_SLAVE_BUSWIDTH_4_BYTES;
-	dd->dma_lch_in.dma_conf.device_fc = false;
-
-	dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask,
-			atmel_aes_filter, &pdata->dma_slave->txdata, dd->dev, "rx");
-	if (!dd->dma_lch_out.chan)
-		goto err_dma_out;
+	case AES_FLAGS_CFB32:
+		ctx->block_size = CFB32_BLOCK_SIZE;
+		break;
 
 
-	dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
-	dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
-		AES_ODATAR(0);
-	dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
-	dd->dma_lch_out.dma_conf.src_addr_width =
-		DMA_SLAVE_BUSWIDTH_4_BYTES;
-	dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
-	dd->dma_lch_out.dma_conf.dst_addr_width =
-		DMA_SLAVE_BUSWIDTH_4_BYTES;
-	dd->dma_lch_out.dma_conf.device_fc = false;
+	case AES_FLAGS_CFB64:
+		ctx->block_size = CFB64_BLOCK_SIZE;
+		break;
 
 
-	return 0;
+	default:
+		ctx->block_size = AES_BLOCK_SIZE;
+		break;
+	}
 
 
-err_dma_out:
-	dma_release_channel(dd->dma_lch_in.chan);
-err_dma_in:
-	dev_warn(dd->dev, "no DMA channel available\n");
-	return err;
-}
+	dd = atmel_aes_find_dev(ctx);
+	if (!dd)
+		return -ENODEV;
 
 
-static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
-{
-	dma_release_channel(dd->dma_lch_in.chan);
-	dma_release_channel(dd->dma_lch_out.chan);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->mode = mode;
+
+	return atmel_aes_handle_queue(dd, &req->base);
 }
 }
 
 
 static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 			   unsigned int keylen)
 			   unsigned int keylen)
 {
 {
-	struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 
 
-	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
-		   keylen != AES_KEYSIZE_256) {
+	if (keylen != AES_KEYSIZE_128 &&
+	    keylen != AES_KEYSIZE_192 &&
+	    keylen != AES_KEYSIZE_256) {
 		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
@@ -839,115 +1066,110 @@ static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 
 
 static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
 static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
 {
 {
-	return atmel_aes_crypt(req,
-		AES_FLAGS_ENCRYPT);
+	return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
 }
 }
 
 
 static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
 static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
 {
 {
-	return atmel_aes_crypt(req,
-		0);
+	return atmel_aes_crypt(req, AES_FLAGS_ECB);
 }
 }
 
 
 static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
 static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
 {
 {
-	return atmel_aes_crypt(req,
-		AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
+	return atmel_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
 }
 }
 
 
 static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
 static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
 {
 {
-	return atmel_aes_crypt(req,
-		AES_FLAGS_CBC);
+	return atmel_aes_crypt(req, AES_FLAGS_CBC);
 }
 }
 
 
 static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
 static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
 {
 {
-	return atmel_aes_crypt(req,
-		AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
+	return atmel_aes_crypt(req, AES_FLAGS_OFB | AES_FLAGS_ENCRYPT);
 }
 }
 
 
 static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
 static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
 {
 {
-	return atmel_aes_crypt(req,
-		AES_FLAGS_OFB);
+	return atmel_aes_crypt(req, AES_FLAGS_OFB);
 }
 }
 
 
 static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
 static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
 {
 {
-	return atmel_aes_crypt(req,
-		AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB128);
+	return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT);
 }
 }
 
 
 static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
 static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
 {
 {
-	return atmel_aes_crypt(req,
-		AES_FLAGS_CFB | AES_FLAGS_CFB128);
+	return atmel_aes_crypt(req, AES_FLAGS_CFB128);
 }
 }
 
 
 static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
 static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
 {
 {
-	return atmel_aes_crypt(req,
-		AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB64);
+	return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT);
 }
 }
 
 
 static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
 static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
 {
 {
-	return atmel_aes_crypt(req,
-		AES_FLAGS_CFB | AES_FLAGS_CFB64);
+	return atmel_aes_crypt(req, AES_FLAGS_CFB64);
 }
 }
 
 
 static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
 static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
 {
 {
-	return atmel_aes_crypt(req,
-		AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB32);
+	return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT);
 }
 }
 
 
 static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
 static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
 {
 {
-	return atmel_aes_crypt(req,
-		AES_FLAGS_CFB | AES_FLAGS_CFB32);
+	return atmel_aes_crypt(req, AES_FLAGS_CFB32);
 }
 }
 
 
 static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
 static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
 {
 {
-	return atmel_aes_crypt(req,
-		AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB16);
+	return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT);
 }
 }
 
 
 static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
 static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
 {
 {
-	return atmel_aes_crypt(req,
-		AES_FLAGS_CFB | AES_FLAGS_CFB16);
+	return atmel_aes_crypt(req, AES_FLAGS_CFB16);
 }
 }
 
 
 static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
 static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
 {
 {
-	return atmel_aes_crypt(req,
-		AES_FLAGS_ENCRYPT |	AES_FLAGS_CFB | AES_FLAGS_CFB8);
+	return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT);
 }
 }
 
 
 static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
 static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
 {
 {
-	return atmel_aes_crypt(req,
-		AES_FLAGS_CFB | AES_FLAGS_CFB8);
+	return atmel_aes_crypt(req, AES_FLAGS_CFB8);
 }
 }
 
 
 static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
 static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
 {
 {
-	return atmel_aes_crypt(req,
-		AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
+	return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT);
 }
 }
 
 
 static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
 static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
 {
 {
-	return atmel_aes_crypt(req,
-		AES_FLAGS_CTR);
+	return atmel_aes_crypt(req, AES_FLAGS_CTR);
 }
 }
 
 
 static int atmel_aes_cra_init(struct crypto_tfm *tfm)
 static int atmel_aes_cra_init(struct crypto_tfm *tfm)
 {
 {
+	struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
+	ctx->base.start = atmel_aes_start;
+
+	return 0;
+}
+
+static int atmel_aes_ctr_cra_init(struct crypto_tfm *tfm)
+{
+	struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
 	tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
 	tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
+	ctx->base.start = atmel_aes_ctr_start;
 
 
 	return 0;
 	return 0;
 }
 }
@@ -960,7 +1182,7 @@ static struct crypto_alg aes_algs[] = {
 {
 {
 	.cra_name		= "ecb(aes)",
 	.cra_name		= "ecb(aes)",
 	.cra_driver_name	= "atmel-ecb-aes",
 	.cra_driver_name	= "atmel-ecb-aes",
-	.cra_priority		= 100,
+	.cra_priority		= ATMEL_AES_PRIORITY,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 	.cra_blocksize		= AES_BLOCK_SIZE,
 	.cra_blocksize		= AES_BLOCK_SIZE,
 	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
 	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
@@ -980,7 +1202,7 @@ static struct crypto_alg aes_algs[] = {
 {
 {
 	.cra_name		= "cbc(aes)",
 	.cra_name		= "cbc(aes)",
 	.cra_driver_name	= "atmel-cbc-aes",
 	.cra_driver_name	= "atmel-cbc-aes",
-	.cra_priority		= 100,
+	.cra_priority		= ATMEL_AES_PRIORITY,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 	.cra_blocksize		= AES_BLOCK_SIZE,
 	.cra_blocksize		= AES_BLOCK_SIZE,
 	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
 	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
@@ -1001,7 +1223,7 @@ static struct crypto_alg aes_algs[] = {
 {
 {
 	.cra_name		= "ofb(aes)",
 	.cra_name		= "ofb(aes)",
 	.cra_driver_name	= "atmel-ofb-aes",
 	.cra_driver_name	= "atmel-ofb-aes",
-	.cra_priority		= 100,
+	.cra_priority		= ATMEL_AES_PRIORITY,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 	.cra_blocksize		= AES_BLOCK_SIZE,
 	.cra_blocksize		= AES_BLOCK_SIZE,
 	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
 	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
@@ -1022,7 +1244,7 @@ static struct crypto_alg aes_algs[] = {
 {
 {
 	.cra_name		= "cfb(aes)",
 	.cra_name		= "cfb(aes)",
 	.cra_driver_name	= "atmel-cfb-aes",
 	.cra_driver_name	= "atmel-cfb-aes",
-	.cra_priority		= 100,
+	.cra_priority		= ATMEL_AES_PRIORITY,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 	.cra_blocksize		= AES_BLOCK_SIZE,
 	.cra_blocksize		= AES_BLOCK_SIZE,
 	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
 	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
@@ -1043,7 +1265,7 @@ static struct crypto_alg aes_algs[] = {
 {
 {
 	.cra_name		= "cfb32(aes)",
 	.cra_name		= "cfb32(aes)",
 	.cra_driver_name	= "atmel-cfb32-aes",
 	.cra_driver_name	= "atmel-cfb32-aes",
-	.cra_priority		= 100,
+	.cra_priority		= ATMEL_AES_PRIORITY,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 	.cra_blocksize		= CFB32_BLOCK_SIZE,
 	.cra_blocksize		= CFB32_BLOCK_SIZE,
 	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
 	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
@@ -1064,7 +1286,7 @@ static struct crypto_alg aes_algs[] = {
 {
 {
 	.cra_name		= "cfb16(aes)",
 	.cra_name		= "cfb16(aes)",
 	.cra_driver_name	= "atmel-cfb16-aes",
 	.cra_driver_name	= "atmel-cfb16-aes",
-	.cra_priority		= 100,
+	.cra_priority		= ATMEL_AES_PRIORITY,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 	.cra_blocksize		= CFB16_BLOCK_SIZE,
 	.cra_blocksize		= CFB16_BLOCK_SIZE,
 	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
 	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
@@ -1085,7 +1307,7 @@ static struct crypto_alg aes_algs[] = {
 {
 {
 	.cra_name		= "cfb8(aes)",
 	.cra_name		= "cfb8(aes)",
 	.cra_driver_name	= "atmel-cfb8-aes",
 	.cra_driver_name	= "atmel-cfb8-aes",
-	.cra_priority		= 100,
+	.cra_priority		= ATMEL_AES_PRIORITY,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 	.cra_blocksize		= CFB8_BLOCK_SIZE,
 	.cra_blocksize		= CFB8_BLOCK_SIZE,
 	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
 	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
@@ -1106,14 +1328,14 @@ static struct crypto_alg aes_algs[] = {
 {
 {
 	.cra_name		= "ctr(aes)",
 	.cra_name		= "ctr(aes)",
 	.cra_driver_name	= "atmel-ctr-aes",
 	.cra_driver_name	= "atmel-ctr-aes",
-	.cra_priority		= 100,
+	.cra_priority		= ATMEL_AES_PRIORITY,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
+	.cra_blocksize		= 1,
+	.cra_ctxsize		= sizeof(struct atmel_aes_ctr_ctx),
 	.cra_alignmask		= 0xf,
 	.cra_alignmask		= 0xf,
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_module		= THIS_MODULE,
-	.cra_init		= atmel_aes_cra_init,
+	.cra_init		= atmel_aes_ctr_cra_init,
 	.cra_exit		= atmel_aes_cra_exit,
 	.cra_exit		= atmel_aes_cra_exit,
 	.cra_u.ablkcipher = {
 	.cra_u.ablkcipher = {
 		.min_keysize	= AES_MIN_KEY_SIZE,
 		.min_keysize	= AES_MIN_KEY_SIZE,
@@ -1129,7 +1351,7 @@ static struct crypto_alg aes_algs[] = {
 static struct crypto_alg aes_cfb64_alg = {
 static struct crypto_alg aes_cfb64_alg = {
 	.cra_name		= "cfb64(aes)",
 	.cra_name		= "cfb64(aes)",
 	.cra_driver_name	= "atmel-cfb64-aes",
 	.cra_driver_name	= "atmel-cfb64-aes",
-	.cra_priority		= 100,
+	.cra_priority		= ATMEL_AES_PRIORITY,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 	.cra_blocksize		= CFB64_BLOCK_SIZE,
 	.cra_blocksize		= CFB64_BLOCK_SIZE,
 	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
 	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
@@ -1148,53 +1370,496 @@ static struct crypto_alg aes_cfb64_alg = {
 	}
 	}
 };
 };
 
 
-static void atmel_aes_queue_task(unsigned long data)
+
+/* gcm aead functions */
+
+static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
+			       const u32 *data, size_t datalen,
+			       const u32 *ghash_in, u32 *ghash_out,
+			       atmel_aes_fn_t resume);
+static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd);
+static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd);
+
+static int atmel_aes_gcm_start(struct atmel_aes_dev *dd);
+static int atmel_aes_gcm_process(struct atmel_aes_dev *dd);
+static int atmel_aes_gcm_length(struct atmel_aes_dev *dd);
+static int atmel_aes_gcm_data(struct atmel_aes_dev *dd);
+static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd);
+static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd);
+static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd);
+
+static inline struct atmel_aes_gcm_ctx *
+atmel_aes_gcm_ctx_cast(struct atmel_aes_base_ctx *ctx)
 {
 {
-	struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
+	return container_of(ctx, struct atmel_aes_gcm_ctx, base);
+}
 
 
-	atmel_aes_handle_queue(dd, NULL);
+static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
+			       const u32 *data, size_t datalen,
+			       const u32 *ghash_in, u32 *ghash_out,
+			       atmel_aes_fn_t resume)
+{
+	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
+
+	dd->data = (u32 *)data;
+	dd->datalen = datalen;
+	ctx->ghash_in = ghash_in;
+	ctx->ghash_out = ghash_out;
+	ctx->ghash_resume = resume;
+
+	atmel_aes_write_ctrl(dd, false, NULL);
+	return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_ghash_init);
 }
 }
 
 
-static void atmel_aes_done_task(unsigned long data)
+static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd)
 {
 {
-	struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data;
+	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
+
+	/* Set the data length. */
+	atmel_aes_write(dd, AES_AADLENR, dd->total);
+	atmel_aes_write(dd, AES_CLENR, 0);
+
+	/* If needed, overwrite the GCM Intermediate Hash Word Registers */
+	if (ctx->ghash_in)
+		atmel_aes_write_block(dd, AES_GHASHR(0), ctx->ghash_in);
+
+	return atmel_aes_gcm_ghash_finalize(dd);
+}
+
+static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd)
+{
+	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
+	u32 isr;
+
+	/* Write data into the Input Data Registers. */
+	while (dd->datalen > 0) {
+		atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
+		dd->data += 4;
+		dd->datalen -= AES_BLOCK_SIZE;
+
+		isr = atmel_aes_read(dd, AES_ISR);
+		if (!(isr & AES_INT_DATARDY)) {
+			dd->resume = atmel_aes_gcm_ghash_finalize;
+			atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
+			return -EINPROGRESS;
+		}
+	}
+
+	/* Read the computed hash from GHASHRx. */
+	atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash_out);
+
+	return ctx->ghash_resume(dd);
+}
+
+
+static int atmel_aes_gcm_start(struct atmel_aes_dev *dd)
+{
+	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
+	struct aead_request *req = aead_request_cast(dd->areq);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct atmel_aes_reqctx *rctx = aead_request_ctx(req);
+	size_t ivsize = crypto_aead_ivsize(tfm);
+	size_t datalen, padlen;
+	const void *iv = req->iv;
+	u8 *data = dd->buf;
 	int err;
 	int err;
 
 
-	if (!(dd->flags & AES_FLAGS_DMA)) {
-		atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out,
-				dd->bufcnt >> 2);
+	atmel_aes_set_mode(dd, rctx);
 
 
-		if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg,
-			dd->buf_out, dd->bufcnt))
-			err = 0;
-		else
-			err = -EINVAL;
+	err = atmel_aes_hw_init(dd);
+	if (err)
+		return atmel_aes_complete(dd, err);
+
+	if (likely(ivsize == 12)) {
+		memcpy(ctx->j0, iv, ivsize);
+		ctx->j0[3] = cpu_to_be32(1);
+		return atmel_aes_gcm_process(dd);
+	}
+
+	padlen = atmel_aes_padlen(ivsize, AES_BLOCK_SIZE);
+	datalen = ivsize + padlen + AES_BLOCK_SIZE;
+	if (datalen > dd->buflen)
+		return atmel_aes_complete(dd, -EINVAL);
+
+	memcpy(data, iv, ivsize);
+	memset(data + ivsize, 0, padlen + sizeof(u64));
+	((u64 *)(data + datalen))[-1] = cpu_to_be64(ivsize * 8);
+
+	return atmel_aes_gcm_ghash(dd, (const u32 *)data, datalen,
+				   NULL, ctx->j0, atmel_aes_gcm_process);
+}
+
+static int atmel_aes_gcm_process(struct atmel_aes_dev *dd)
+{
+	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
+	struct aead_request *req = aead_request_cast(dd->areq);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	bool enc = atmel_aes_is_encrypt(dd);
+	u32 authsize;
+
+	/* Compute text length. */
+	authsize = crypto_aead_authsize(tfm);
+	ctx->textlen = req->cryptlen - (enc ? 0 : authsize);
+
+	/*
+	 * According to tcrypt test suite, the GCM Automatic Tag Generation
+	 * fails when both the message and its associated data are empty.
+	 */
+	if (likely(req->assoclen != 0 || ctx->textlen != 0))
+		dd->flags |= AES_FLAGS_GTAGEN;
+
+	atmel_aes_write_ctrl(dd, false, NULL);
+	return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_length);
+}
+
+static int atmel_aes_gcm_length(struct atmel_aes_dev *dd)
+{
+	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
+	struct aead_request *req = aead_request_cast(dd->areq);
+	u32 j0_lsw, *j0 = ctx->j0;
+	size_t padlen;
+
+	/* Write incr32(J0) into IV. */
+	j0_lsw = j0[3];
+	j0[3] = cpu_to_be32(be32_to_cpu(j0[3]) + 1);
+	atmel_aes_write_block(dd, AES_IVR(0), j0);
+	j0[3] = j0_lsw;
+
+	/* Set aad and text lengths. */
+	atmel_aes_write(dd, AES_AADLENR, req->assoclen);
+	atmel_aes_write(dd, AES_CLENR, ctx->textlen);
+
+	/* Check whether AAD are present. */
+	if (unlikely(req->assoclen == 0)) {
+		dd->datalen = 0;
+		return atmel_aes_gcm_data(dd);
+	}
+
+	/* Copy assoc data and add padding. */
+	padlen = atmel_aes_padlen(req->assoclen, AES_BLOCK_SIZE);
+	if (unlikely(req->assoclen + padlen > dd->buflen))
+		return atmel_aes_complete(dd, -EINVAL);
+	sg_copy_to_buffer(req->src, sg_nents(req->src), dd->buf, req->assoclen);
+
+	/* Write assoc data into the Input Data register. */
+	dd->data = (u32 *)dd->buf;
+	dd->datalen = req->assoclen + padlen;
+	return atmel_aes_gcm_data(dd);
+}
 
 
-		goto cpu_end;
+static int atmel_aes_gcm_data(struct atmel_aes_dev *dd)
+{
+	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
+	struct aead_request *req = aead_request_cast(dd->areq);
+	bool use_dma = (ctx->textlen >= ATMEL_AES_DMA_THRESHOLD);
+	struct scatterlist *src, *dst;
+	u32 isr, mr;
+
+	/* Write AAD first. */
+	while (dd->datalen > 0) {
+		atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
+		dd->data += 4;
+		dd->datalen -= AES_BLOCK_SIZE;
+
+		isr = atmel_aes_read(dd, AES_ISR);
+		if (!(isr & AES_INT_DATARDY)) {
+			dd->resume = atmel_aes_gcm_data;
+			atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
+			return -EINPROGRESS;
+		}
 	}
 	}
 
 
-	err = atmel_aes_crypt_dma_stop(dd);
+	/* GMAC only. */
+	if (unlikely(ctx->textlen == 0))
+		return atmel_aes_gcm_tag_init(dd);
+
+	/* Prepare src and dst scatter lists to transfer cipher/plain texts */
+	src = scatterwalk_ffwd(ctx->src, req->src, req->assoclen);
+	dst = ((req->src == req->dst) ? src :
+	       scatterwalk_ffwd(ctx->dst, req->dst, req->assoclen));
+
+	if (use_dma) {
+		/* Update the Mode Register for DMA transfers. */
+		mr = atmel_aes_read(dd, AES_MR);
+		mr &= ~(AES_MR_SMOD_MASK | AES_MR_DUALBUFF);
+		mr |= AES_MR_SMOD_IDATAR0;
+		if (dd->caps.has_dualbuff)
+			mr |= AES_MR_DUALBUFF;
+		atmel_aes_write(dd, AES_MR, mr);
+
+		return atmel_aes_dma_start(dd, src, dst, ctx->textlen,
+					   atmel_aes_gcm_tag_init);
+	}
 
 
-	err = dd->err ? : err;
+	return atmel_aes_cpu_start(dd, src, dst, ctx->textlen,
+				   atmel_aes_gcm_tag_init);
+}
 
 
-	if (dd->total && !err) {
-		if (dd->flags & AES_FLAGS_FAST) {
-			dd->in_sg = sg_next(dd->in_sg);
-			dd->out_sg = sg_next(dd->out_sg);
-			if (!dd->in_sg || !dd->out_sg)
-				err = -EINVAL;
+static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd)
+{
+	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
+	struct aead_request *req = aead_request_cast(dd->areq);
+	u64 *data = dd->buf;
+
+	if (likely(dd->flags & AES_FLAGS_GTAGEN)) {
+		if (!(atmel_aes_read(dd, AES_ISR) & AES_INT_TAGRDY)) {
+			dd->resume = atmel_aes_gcm_tag_init;
+			atmel_aes_write(dd, AES_IER, AES_INT_TAGRDY);
+			return -EINPROGRESS;
 		}
 		}
-		if (!err)
-			err = atmel_aes_crypt_dma_start(dd);
-		if (!err)
-			return; /* DMA started. Not fininishing. */
+
+		return atmel_aes_gcm_finalize(dd);
+	}
+
+	/* Read the GCM Intermediate Hash Word Registers. */
+	atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash);
+
+	data[0] = cpu_to_be64(req->assoclen * 8);
+	data[1] = cpu_to_be64(ctx->textlen * 8);
+
+	return atmel_aes_gcm_ghash(dd, (const u32 *)data, AES_BLOCK_SIZE,
+				   ctx->ghash, ctx->ghash, atmel_aes_gcm_tag);
+}
+
+static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd)
+{
+	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
+	unsigned long flags;
+
+	/*
+	 * Change mode to CTR to complete the tag generation.
+	 * Use J0 as Initialization Vector.
+	 */
+	flags = dd->flags;
+	dd->flags &= ~(AES_FLAGS_OPMODE_MASK | AES_FLAGS_GTAGEN);
+	dd->flags |= AES_FLAGS_CTR;
+	atmel_aes_write_ctrl(dd, false, ctx->j0);
+	dd->flags = flags;
+
+	atmel_aes_write_block(dd, AES_IDATAR(0), ctx->ghash);
+	return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_finalize);
+}
+
+static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd)
+{
+	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
+	struct aead_request *req = aead_request_cast(dd->areq);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	bool enc = atmel_aes_is_encrypt(dd);
+	u32 offset, authsize, itag[4], *otag = ctx->tag;
+	int err;
+
+	/* Read the computed tag. */
+	if (likely(dd->flags & AES_FLAGS_GTAGEN))
+		atmel_aes_read_block(dd, AES_TAGR(0), ctx->tag);
+	else
+		atmel_aes_read_block(dd, AES_ODATAR(0), ctx->tag);
+
+	offset = req->assoclen + ctx->textlen;
+	authsize = crypto_aead_authsize(tfm);
+	if (enc) {
+		scatterwalk_map_and_copy(otag, req->dst, offset, authsize, 1);
+		err = 0;
+	} else {
+		scatterwalk_map_and_copy(itag, req->src, offset, authsize, 0);
+		err = crypto_memneq(itag, otag, authsize) ? -EBADMSG : 0;
+	}
+
+	return atmel_aes_complete(dd, err);
+}
+
+static int atmel_aes_gcm_crypt(struct aead_request *req,
+			       unsigned long mode)
+{
+	struct atmel_aes_base_ctx *ctx;
+	struct atmel_aes_reqctx *rctx;
+	struct atmel_aes_dev *dd;
+
+	ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+	ctx->block_size = AES_BLOCK_SIZE;
+
+	dd = atmel_aes_find_dev(ctx);
+	if (!dd)
+		return -ENODEV;
+
+	rctx = aead_request_ctx(req);
+	rctx->mode = AES_FLAGS_GCM | mode;
+
+	return atmel_aes_handle_queue(dd, &req->base);
+}
+
+static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+				unsigned int keylen)
+{
+	struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
+
+	if (keylen != AES_KEYSIZE_256 &&
+	    keylen != AES_KEYSIZE_192 &&
+	    keylen != AES_KEYSIZE_128) {
+		crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	memcpy(ctx->key, key, keylen);
+	ctx->keylen = keylen;
+
+	return 0;
+}
+
+static int atmel_aes_gcm_setauthsize(struct crypto_aead *tfm,
+				     unsigned int authsize)
+{
+	/* Same as crypto_gcm_authsize() from crypto/gcm.c */
+	switch (authsize) {
+	case 4:
+	case 8:
+	case 12:
+	case 13:
+	case 14:
+	case 15:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int atmel_aes_gcm_encrypt(struct aead_request *req)
+{
+	return atmel_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
+}
+
+static int atmel_aes_gcm_decrypt(struct aead_request *req)
+{
+	return atmel_aes_gcm_crypt(req, 0);
+}
+
+static int atmel_aes_gcm_init(struct crypto_aead *tfm)
+{
+	struct atmel_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
+	ctx->base.start = atmel_aes_gcm_start;
+
+	return 0;
+}
+
+static void atmel_aes_gcm_exit(struct crypto_aead *tfm)
+{
+
+}
+
+static struct aead_alg aes_gcm_alg = {
+	.setkey		= atmel_aes_gcm_setkey,
+	.setauthsize	= atmel_aes_gcm_setauthsize,
+	.encrypt	= atmel_aes_gcm_encrypt,
+	.decrypt	= atmel_aes_gcm_decrypt,
+	.init		= atmel_aes_gcm_init,
+	.exit		= atmel_aes_gcm_exit,
+	.ivsize		= 12,
+	.maxauthsize	= AES_BLOCK_SIZE,
+
+	.base = {
+		.cra_name		= "gcm(aes)",
+		.cra_driver_name	= "atmel-gcm-aes",
+		.cra_priority		= ATMEL_AES_PRIORITY,
+		.cra_flags		= CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= 1,
+		.cra_ctxsize		= sizeof(struct atmel_aes_gcm_ctx),
+		.cra_alignmask		= 0xf,
+		.cra_module		= THIS_MODULE,
+	},
+};
+
+
+/* Probe functions */
+
+static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
+{
+	dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER);
+	dd->buflen = ATMEL_AES_BUFFER_SIZE;
+	dd->buflen &= ~(AES_BLOCK_SIZE - 1);
+
+	if (!dd->buf) {
+		dev_err(dd->dev, "unable to alloc pages.\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
+{
+	free_page((unsigned long)dd->buf);
+}
+
+static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
+{
+	struct at_dma_slave	*sl = slave;
+
+	if (sl && sl->dma_dev == chan->device->dev) {
+		chan->private = sl;
+		return true;
+	} else {
+		return false;
 	}
 	}
+}
+
+static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
+			      struct crypto_platform_data *pdata)
+{
+	struct at_dma_slave *slave;
+	int err = -ENOMEM;
+	dma_cap_mask_t mask;
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	/* Try to grab 2 DMA channels */
+	slave = &pdata->dma_slave->rxdata;
+	dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
+							slave, dd->dev, "tx");
+	if (!dd->src.chan)
+		goto err_dma_in;
+
+	slave = &pdata->dma_slave->txdata;
+	dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
+							slave, dd->dev, "rx");
+	if (!dd->dst.chan)
+		goto err_dma_out;
+
+	return 0;
+
+err_dma_out:
+	dma_release_channel(dd->src.chan);
+err_dma_in:
+	dev_warn(dd->dev, "no DMA channel available\n");
+	return err;
+}
+
+static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
+{
+	dma_release_channel(dd->dst.chan);
+	dma_release_channel(dd->src.chan);
+}
+
+static void atmel_aes_queue_task(unsigned long data)
+{
+	struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
 
 
-cpu_end:
-	atmel_aes_finish_req(dd, err);
 	atmel_aes_handle_queue(dd, NULL);
 	atmel_aes_handle_queue(dd, NULL);
 }
 }
 
 
+static void atmel_aes_done_task(unsigned long data)
+{
+	struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
+
+	dd->is_async = true;
+	(void)dd->resume(dd);
+}
+
 static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
 static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
 {
 {
 	struct atmel_aes_dev *aes_dd = dev_id;
 	struct atmel_aes_dev *aes_dd = dev_id;
@@ -1217,10 +1882,14 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
 {
 {
 	int i;
 	int i;
 
 
-	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
-		crypto_unregister_alg(&aes_algs[i]);
+	if (dd->caps.has_gcm)
+		crypto_unregister_aead(&aes_gcm_alg);
+
 	if (dd->caps.has_cfb64)
 	if (dd->caps.has_cfb64)
 		crypto_unregister_alg(&aes_cfb64_alg);
 		crypto_unregister_alg(&aes_cfb64_alg);
+
+	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
+		crypto_unregister_alg(&aes_algs[i]);
 }
 }
 
 
 static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
 static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
@@ -1239,8 +1908,16 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
 			goto err_aes_cfb64_alg;
 			goto err_aes_cfb64_alg;
 	}
 	}
 
 
+	if (dd->caps.has_gcm) {
+		err = crypto_register_aead(&aes_gcm_alg);
+		if (err)
+			goto err_aes_gcm_alg;
+	}
+
 	return 0;
 	return 0;
 
 
+err_aes_gcm_alg:
+	crypto_unregister_alg(&aes_cfb64_alg);
 err_aes_cfb64_alg:
 err_aes_cfb64_alg:
 	i = ARRAY_SIZE(aes_algs);
 	i = ARRAY_SIZE(aes_algs);
 err_aes_algs:
 err_aes_algs:
@@ -1254,13 +1931,24 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
 {
 {
 	dd->caps.has_dualbuff = 0;
 	dd->caps.has_dualbuff = 0;
 	dd->caps.has_cfb64 = 0;
 	dd->caps.has_cfb64 = 0;
+	dd->caps.has_ctr32 = 0;
+	dd->caps.has_gcm = 0;
 	dd->caps.max_burst_size = 1;
 	dd->caps.max_burst_size = 1;
 
 
 	/* keep only major version number */
 	/* keep only major version number */
 	switch (dd->hw_version & 0xff0) {
 	switch (dd->hw_version & 0xff0) {
+	case 0x500:
+		dd->caps.has_dualbuff = 1;
+		dd->caps.has_cfb64 = 1;
+		dd->caps.has_ctr32 = 1;
+		dd->caps.has_gcm = 1;
+		dd->caps.max_burst_size = 4;
+		break;
 	case 0x200:
 	case 0x200:
 		dd->caps.has_dualbuff = 1;
 		dd->caps.has_dualbuff = 1;
 		dd->caps.has_cfb64 = 1;
 		dd->caps.has_cfb64 = 1;
+		dd->caps.has_ctr32 = 1;
+		dd->caps.has_gcm = 1;
 		dd->caps.max_burst_size = 4;
 		dd->caps.max_burst_size = 4;
 		break;
 		break;
 	case 0x130:
 	case 0x130:
@@ -1402,7 +2090,9 @@ static int atmel_aes_probe(struct platform_device *pdev)
 		goto res_err;
 		goto res_err;
 	}
 	}
 
 
-	atmel_aes_hw_version_init(aes_dd);
+	err = atmel_aes_hw_version_init(aes_dd);
+	if (err)
+		goto res_err;
 
 
 	atmel_aes_get_cap(aes_dd);
 	atmel_aes_get_cap(aes_dd);
 
 
@@ -1423,8 +2113,8 @@ static int atmel_aes_probe(struct platform_device *pdev)
 		goto err_algs;
 		goto err_algs;
 
 
 	dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
 	dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
-			dma_chan_name(aes_dd->dma_lch_in.chan),
-			dma_chan_name(aes_dd->dma_lch_out.chan));
+			dma_chan_name(aes_dd->src.chan),
+			dma_chan_name(aes_dd->dst.chan));
 
 
 	return 0;
 	return 0;
 
 
@@ -1462,6 +2152,7 @@ static int atmel_aes_remove(struct platform_device *pdev)
 	tasklet_kill(&aes_dd->queue_task);
 	tasklet_kill(&aes_dd->queue_task);
 
 
 	atmel_aes_dma_cleanup(aes_dd);
 	atmel_aes_dma_cleanup(aes_dd);
+	atmel_aes_buff_cleanup(aes_dd);
 
 
 	return 0;
 	return 0;
 }
 }

+ 1 - 2
drivers/crypto/atmel-sha.c

@@ -755,7 +755,6 @@ static int atmel_sha_finish(struct ahash_request *req)
 {
 {
 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 	struct atmel_sha_dev *dd = ctx->dd;
 	struct atmel_sha_dev *dd = ctx->dd;
-	int err = 0;
 
 
 	if (ctx->digcnt[0] || ctx->digcnt[1])
 	if (ctx->digcnt[0] || ctx->digcnt[1])
 		atmel_sha_copy_ready_hash(req);
 		atmel_sha_copy_ready_hash(req);
@@ -763,7 +762,7 @@ static int atmel_sha_finish(struct ahash_request *req)
 	dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1],
 	dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1],
 		ctx->digcnt[0], ctx->bufcnt);
 		ctx->digcnt[0], ctx->bufcnt);
 
 
-	return err;
+	return 0;
 }
 }
 
 
 static void atmel_sha_finish_req(struct ahash_request *req, int err)
 static void atmel_sha_finish_req(struct ahash_request *req, int err)

+ 25 - 1
drivers/crypto/caam/caamhash.c

@@ -803,6 +803,10 @@ static int ahash_update_ctx(struct ahash_request *req)
 	if (to_hash) {
 	if (to_hash) {
 		src_nents = sg_nents_for_len(req->src,
 		src_nents = sg_nents_for_len(req->src,
 					     req->nbytes - (*next_buflen));
 					     req->nbytes - (*next_buflen));
+		if (src_nents < 0) {
+			dev_err(jrdev, "Invalid number of src SG.\n");
+			return src_nents;
+		}
 		sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
 		sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
 		sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
 		sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
 				 sizeof(struct sec4_sg_entry);
 				 sizeof(struct sec4_sg_entry);
@@ -1002,6 +1006,10 @@ static int ahash_finup_ctx(struct ahash_request *req)
 	int sh_len;
 	int sh_len;
 
 
 	src_nents = sg_nents_for_len(req->src, req->nbytes);
 	src_nents = sg_nents_for_len(req->src, req->nbytes);
+	if (src_nents < 0) {
+		dev_err(jrdev, "Invalid number of src SG.\n");
+		return src_nents;
+	}
 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
 	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
 	sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
 	sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
 			 sizeof(struct sec4_sg_entry);
 			 sizeof(struct sec4_sg_entry);
@@ -1086,6 +1094,10 @@ static int ahash_digest(struct ahash_request *req)
 	int sh_len;
 	int sh_len;
 
 
 	src_nents = sg_count(req->src, req->nbytes);
 	src_nents = sg_count(req->src, req->nbytes);
+	if (src_nents < 0) {
+		dev_err(jrdev, "Invalid number of src SG.\n");
+		return src_nents;
+	}
 	dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
 	dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
 	sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
 	sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
 
 
@@ -1234,6 +1246,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
 	if (to_hash) {
 	if (to_hash) {
 		src_nents = sg_nents_for_len(req->src,
 		src_nents = sg_nents_for_len(req->src,
 					     req->nbytes - (*next_buflen));
 					     req->nbytes - (*next_buflen));
+		if (src_nents < 0) {
+			dev_err(jrdev, "Invalid number of src SG.\n");
+			return src_nents;
+		}
 		sec4_sg_bytes = (1 + src_nents) *
 		sec4_sg_bytes = (1 + src_nents) *
 				sizeof(struct sec4_sg_entry);
 				sizeof(struct sec4_sg_entry);
 
 
@@ -1342,6 +1358,10 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
 	int ret = 0;
 	int ret = 0;
 
 
 	src_nents = sg_nents_for_len(req->src, req->nbytes);
 	src_nents = sg_nents_for_len(req->src, req->nbytes);
+	if (src_nents < 0) {
+		dev_err(jrdev, "Invalid number of src SG.\n");
+		return src_nents;
+	}
 	sec4_sg_src_index = 2;
 	sec4_sg_src_index = 2;
 	sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
 	sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
 			 sizeof(struct sec4_sg_entry);
 			 sizeof(struct sec4_sg_entry);
@@ -1430,6 +1450,10 @@ static int ahash_update_first(struct ahash_request *req)
 
 
 	if (to_hash) {
 	if (to_hash) {
 		src_nents = sg_count(req->src, req->nbytes - (*next_buflen));
 		src_nents = sg_count(req->src, req->nbytes - (*next_buflen));
+		if (src_nents < 0) {
+			dev_err(jrdev, "Invalid number of src SG.\n");
+			return src_nents;
+		}
 		dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
 		dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
 		sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
 		sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
 
 
@@ -1572,7 +1596,7 @@ static int ahash_export(struct ahash_request *req, void *out)
 		len = state->buflen_1;
 		len = state->buflen_1;
 	} else {
 	} else {
 		buf = state->buf_0;
 		buf = state->buf_0;
-		len = state->buflen_1;
+		len = state->buflen_0;
 	}
 	}
 
 
 	memcpy(export->buf, buf, len);
 	memcpy(export->buf, buf, len);

+ 2 - 0
drivers/crypto/ccp/Kconfig

@@ -3,6 +3,8 @@ config CRYPTO_DEV_CCP_DD
 	depends on CRYPTO_DEV_CCP
 	depends on CRYPTO_DEV_CCP
 	default m
 	default m
 	select HW_RANDOM
 	select HW_RANDOM
+	select CRYPTO_SHA1
+	select CRYPTO_SHA256
 	help
 	help
 	  Provides the interface to use the AMD Cryptographic Coprocessor
 	  Provides the interface to use the AMD Cryptographic Coprocessor
 	  which can be used to offload encryption operations such as SHA,
 	  which can be used to offload encryption operations such as SHA,

+ 8 - 31
drivers/crypto/ccp/ccp-ops.c

@@ -152,32 +152,6 @@ static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
 	cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
 	cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
 };
 };
 
 
-/* The CCP cannot perform zero-length sha operations so the caller
- * is required to buffer data for the final operation.  However, a
- * sha operation for a message with a total length of zero is valid
- * so known values are required to supply the result.
- */
-static const u8 ccp_sha1_zero[CCP_SHA_CTXSIZE] = {
-	0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
-	0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
-	0xaf, 0xd8, 0x07, 0x09, 0x00, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-};
-
-static const u8 ccp_sha224_zero[CCP_SHA_CTXSIZE] = {
-	0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9,
-	0x47, 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4,
-	0x15, 0xa2, 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a,
-	0xc5, 0xb3, 0xe4, 0x2f, 0x00, 0x00, 0x00, 0x00,
-};
-
-static const u8 ccp_sha256_zero[CCP_SHA_CTXSIZE] = {
-	0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
-	0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
-	0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
-	0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55,
-};
-
 static u32 ccp_addr_lo(struct ccp_dma_info *info)
 static u32 ccp_addr_lo(struct ccp_dma_info *info)
 {
 {
 	return lower_32_bits(info->address + info->offset);
 	return lower_32_bits(info->address + info->offset);
@@ -1391,18 +1365,21 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 		if (sha->msg_bits)
 		if (sha->msg_bits)
 			return -EINVAL;
 			return -EINVAL;
 
 
-		/* A sha operation for a message with a total length of zero,
-		 * return known result.
+		/* The CCP cannot perform zero-length sha operations so the
+		 * caller is required to buffer data for the final operation.
+		 * However, a sha operation for a message with a total length
+		 * of zero is valid so known values are required to supply
+		 * the result.
 		 */
 		 */
 		switch (sha->type) {
 		switch (sha->type) {
 		case CCP_SHA_TYPE_1:
 		case CCP_SHA_TYPE_1:
-			sha_zero = ccp_sha1_zero;
+			sha_zero = sha1_zero_message_hash;
 			break;
 			break;
 		case CCP_SHA_TYPE_224:
 		case CCP_SHA_TYPE_224:
-			sha_zero = ccp_sha224_zero;
+			sha_zero = sha224_zero_message_hash;
 			break;
 			break;
 		case CCP_SHA_TYPE_256:
 		case CCP_SHA_TYPE_256:
-			sha_zero = ccp_sha256_zero;
+			sha_zero = sha256_zero_message_hash;
 			break;
 			break;
 		default:
 		default:
 			return -EINVAL;
 			return -EINVAL;

+ 4 - 4
drivers/crypto/ccp/ccp-pci.c

@@ -44,7 +44,7 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp)
 {
 {
 	struct ccp_pci *ccp_pci = ccp->dev_specific;
 	struct ccp_pci *ccp_pci = ccp->dev_specific;
 	struct device *dev = ccp->dev;
 	struct device *dev = ccp->dev;
-	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+	struct pci_dev *pdev = to_pci_dev(dev);
 	struct msix_entry msix_entry[MSIX_VECTORS];
 	struct msix_entry msix_entry[MSIX_VECTORS];
 	unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1;
 	unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1;
 	int v, ret;
 	int v, ret;
@@ -86,7 +86,7 @@ e_irq:
 static int ccp_get_msi_irq(struct ccp_device *ccp)
 static int ccp_get_msi_irq(struct ccp_device *ccp)
 {
 {
 	struct device *dev = ccp->dev;
 	struct device *dev = ccp->dev;
-	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+	struct pci_dev *pdev = to_pci_dev(dev);
 	int ret;
 	int ret;
 
 
 	ret = pci_enable_msi(pdev);
 	ret = pci_enable_msi(pdev);
@@ -133,7 +133,7 @@ static void ccp_free_irqs(struct ccp_device *ccp)
 {
 {
 	struct ccp_pci *ccp_pci = ccp->dev_specific;
 	struct ccp_pci *ccp_pci = ccp->dev_specific;
 	struct device *dev = ccp->dev;
 	struct device *dev = ccp->dev;
-	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+	struct pci_dev *pdev = to_pci_dev(dev);
 
 
 	if (ccp_pci->msix_count) {
 	if (ccp_pci->msix_count) {
 		while (ccp_pci->msix_count--)
 		while (ccp_pci->msix_count--)
@@ -149,7 +149,7 @@ static void ccp_free_irqs(struct ccp_device *ccp)
 static int ccp_find_mmio_area(struct ccp_device *ccp)
 static int ccp_find_mmio_area(struct ccp_device *ccp)
 {
 {
 	struct device *dev = ccp->dev;
 	struct device *dev = ccp->dev;
-	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+	struct pci_dev *pdev = to_pci_dev(dev);
 	resource_size_t io_len;
 	resource_size_t io_len;
 	unsigned long io_flags;
 	unsigned long io_flags;
 
 

+ 2 - 4
drivers/crypto/ccp/ccp-platform.c

@@ -35,8 +35,7 @@ struct ccp_platform {
 static int ccp_get_irq(struct ccp_device *ccp)
 static int ccp_get_irq(struct ccp_device *ccp)
 {
 {
 	struct device *dev = ccp->dev;
 	struct device *dev = ccp->dev;
-	struct platform_device *pdev = container_of(dev,
-					struct platform_device, dev);
+	struct platform_device *pdev = to_platform_device(dev);
 	int ret;
 	int ret;
 
 
 	ret = platform_get_irq(pdev, 0);
 	ret = platform_get_irq(pdev, 0);
@@ -78,8 +77,7 @@ static void ccp_free_irqs(struct ccp_device *ccp)
 static struct resource *ccp_find_mmio_area(struct ccp_device *ccp)
 static struct resource *ccp_find_mmio_area(struct ccp_device *ccp)
 {
 {
 	struct device *dev = ccp->dev;
 	struct device *dev = ccp->dev;
-	struct platform_device *pdev = container_of(dev,
-					struct platform_device, dev);
+	struct platform_device *pdev = to_platform_device(dev);
 	struct resource *ior;
 	struct resource *ior;
 
 
 	ior = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	ior = platform_get_resource(pdev, IORESOURCE_MEM, 0);

+ 207 - 305
drivers/crypto/hifn_795x.c

@@ -11,10 +11,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
  */
 
 
 #include <linux/kernel.h>
 #include <linux/kernel.h>
@@ -36,14 +32,6 @@
 #include <crypto/algapi.h>
 #include <crypto/algapi.h>
 #include <crypto/des.h>
 #include <crypto/des.h>
 
 
-//#define HIFN_DEBUG
-
-#ifdef HIFN_DEBUG
-#define dprintk(f, a...) 	printk(f, ##a)
-#else
-#define dprintk(f, a...)	do {} while (0)
-#endif
-
 static char hifn_pll_ref[sizeof("extNNN")] = "ext";
 static char hifn_pll_ref[sizeof("extNNN")] = "ext";
 module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
 module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
 MODULE_PARM_DESC(hifn_pll_ref,
 MODULE_PARM_DESC(hifn_pll_ref,
@@ -79,12 +67,12 @@ static atomic_t hifn_dev_number;
 
 
 /* DMA registres */
 /* DMA registres */
 
 
-#define HIFN_DMA_CRA 			0x0C	/* DMA Command Ring Address */
-#define HIFN_DMA_SDRA 			0x1C	/* DMA Source Data Ring Address */
+#define HIFN_DMA_CRA			0x0C	/* DMA Command Ring Address */
+#define HIFN_DMA_SDRA			0x1C	/* DMA Source Data Ring Address */
 #define HIFN_DMA_RRA			0x2C	/* DMA Result Ring Address */
 #define HIFN_DMA_RRA			0x2C	/* DMA Result Ring Address */
 #define HIFN_DMA_DDRA			0x3C	/* DMA Destination Data Ring Address */
 #define HIFN_DMA_DDRA			0x3C	/* DMA Destination Data Ring Address */
 #define HIFN_DMA_STCTL			0x40	/* DMA Status and Control */
 #define HIFN_DMA_STCTL			0x40	/* DMA Status and Control */
-#define HIFN_DMA_INTREN 		0x44	/* DMA Interrupt Enable */
+#define HIFN_DMA_INTREN			0x44	/* DMA Interrupt Enable */
 #define HIFN_DMA_CFG1			0x48	/* DMA Configuration #1 */
 #define HIFN_DMA_CFG1			0x48	/* DMA Configuration #1 */
 #define HIFN_DMA_CFG2			0x6C	/* DMA Configuration #2 */
 #define HIFN_DMA_CFG2			0x6C	/* DMA Configuration #2 */
 #define HIFN_CHIP_ID			0x98	/* Chip ID */
 #define HIFN_CHIP_ID			0x98	/* Chip ID */
@@ -358,10 +346,10 @@ static atomic_t hifn_dev_number;
 #define HIFN_NAMESIZE			32
 #define HIFN_NAMESIZE			32
 #define HIFN_MAX_RESULT_ORDER		5
 #define HIFN_MAX_RESULT_ORDER		5
 
 
-#define	HIFN_D_CMD_RSIZE		24*1
-#define	HIFN_D_SRC_RSIZE		80*1
-#define	HIFN_D_DST_RSIZE		80*1
-#define	HIFN_D_RES_RSIZE		24*1
+#define	HIFN_D_CMD_RSIZE		(24 * 1)
+#define	HIFN_D_SRC_RSIZE		(80 * 1)
+#define	HIFN_D_DST_RSIZE		(80 * 1)
+#define	HIFN_D_RES_RSIZE		(24 * 1)
 
 
 #define HIFN_D_DST_DALIGN		4
 #define HIFN_D_DST_DALIGN		4
 
 
@@ -386,17 +374,16 @@ static atomic_t hifn_dev_number;
 #define	HIFN_MAX_RESULT			(8 + 4 + 4 + 20 + 4)
 #define	HIFN_MAX_RESULT			(8 + 4 + 4 + 20 + 4)
 #define HIFN_USED_RESULT		12
 #define HIFN_USED_RESULT		12
 
 
-struct hifn_desc
-{
+struct hifn_desc {
 	volatile __le32		l;
 	volatile __le32		l;
 	volatile __le32		p;
 	volatile __le32		p;
 };
 };
 
 
 struct hifn_dma {
 struct hifn_dma {
-	struct hifn_desc	cmdr[HIFN_D_CMD_RSIZE+1];
-	struct hifn_desc	srcr[HIFN_D_SRC_RSIZE+1];
-	struct hifn_desc	dstr[HIFN_D_DST_RSIZE+1];
-	struct hifn_desc	resr[HIFN_D_RES_RSIZE+1];
+	struct hifn_desc	cmdr[HIFN_D_CMD_RSIZE + 1];
+	struct hifn_desc	srcr[HIFN_D_SRC_RSIZE + 1];
+	struct hifn_desc	dstr[HIFN_D_DST_RSIZE + 1];
+	struct hifn_desc	resr[HIFN_D_RES_RSIZE + 1];
 
 
 	u8			command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND];
 	u8			command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND];
 	u8			result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT];
 	u8			result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT];
@@ -410,16 +397,15 @@ struct hifn_dma {
 	int			cmdk, srck, dstk, resk;
 	int			cmdk, srck, dstk, resk;
 };
 };
 
 
-#define HIFN_FLAG_CMD_BUSY	(1<<0)
-#define HIFN_FLAG_SRC_BUSY	(1<<1)
-#define HIFN_FLAG_DST_BUSY	(1<<2)
-#define HIFN_FLAG_RES_BUSY	(1<<3)
-#define HIFN_FLAG_OLD_KEY	(1<<4)
+#define HIFN_FLAG_CMD_BUSY	(1 << 0)
+#define HIFN_FLAG_SRC_BUSY	(1 << 1)
+#define HIFN_FLAG_DST_BUSY	(1 << 2)
+#define HIFN_FLAG_RES_BUSY	(1 << 3)
+#define HIFN_FLAG_OLD_KEY	(1 << 4)
 
 
 #define HIFN_DEFAULT_ACTIVE_NUM	5
 #define HIFN_DEFAULT_ACTIVE_NUM	5
 
 
-struct hifn_device
-{
+struct hifn_device {
 	char			name[HIFN_NAMESIZE];
 	char			name[HIFN_NAMESIZE];
 
 
 	int			irq;
 	int			irq;
@@ -432,7 +418,7 @@ struct hifn_device
 
 
 	u32			dmareg;
 	u32			dmareg;
 
 
-	void 			*sa[HIFN_D_RES_RSIZE];
+	void			*sa[HIFN_D_RES_RSIZE];
 
 
 	spinlock_t		lock;
 	spinlock_t		lock;
 
 
@@ -447,7 +433,7 @@ struct hifn_device
 
 
 	struct tasklet_struct	tasklet;
 	struct tasklet_struct	tasklet;
 
 
-	struct crypto_queue 	queue;
+	struct crypto_queue	queue;
 	struct list_head	alg_list;
 	struct list_head	alg_list;
 
 
 	unsigned int		pk_clk_freq;
 	unsigned int		pk_clk_freq;
@@ -468,8 +454,7 @@ struct hifn_device
 #define	HIFN_D_JUMP			0x40000000
 #define	HIFN_D_JUMP			0x40000000
 #define	HIFN_D_VALID			0x80000000
 #define	HIFN_D_VALID			0x80000000
 
 
-struct hifn_base_command
-{
+struct hifn_base_command {
 	volatile __le16		masks;
 	volatile __le16		masks;
 	volatile __le16		session_num;
 	volatile __le16		session_num;
 	volatile __le16		total_source_count;
 	volatile __le16		total_source_count;
@@ -491,12 +476,11 @@ struct hifn_base_command
 /*
 /*
  * Structure to help build up the command data structure.
  * Structure to help build up the command data structure.
  */
  */
-struct hifn_crypt_command
-{
-	volatile __le16 		masks;
-	volatile __le16 		header_skip;
-	volatile __le16 		source_count;
-	volatile __le16 		reserved;
+struct hifn_crypt_command {
+	volatile __le16		masks;
+	volatile __le16		header_skip;
+	volatile __le16		source_count;
+	volatile __le16		reserved;
 };
 };
 
 
 #define	HIFN_CRYPT_CMD_ALG_MASK		0x0003		/* algorithm: */
 #define	HIFN_CRYPT_CMD_ALG_MASK		0x0003		/* algorithm: */
@@ -522,12 +506,11 @@ struct hifn_crypt_command
 /*
 /*
  * Structure to help build up the command data structure.
  * Structure to help build up the command data structure.
  */
  */
-struct hifn_mac_command
-{
-	volatile __le16 	masks;
-	volatile __le16 	header_skip;
-	volatile __le16 	source_count;
-	volatile __le16 	reserved;
+struct hifn_mac_command {
+	volatile __le16	masks;
+	volatile __le16	header_skip;
+	volatile __le16	source_count;
+	volatile __le16	reserved;
 };
 };
 
 
 #define	HIFN_MAC_CMD_ALG_MASK		0x0001
 #define	HIFN_MAC_CMD_ALG_MASK		0x0001
@@ -551,12 +534,11 @@ struct hifn_mac_command
 #define	HIFN_MAC_CMD_POS_IPSEC		0x0200
 #define	HIFN_MAC_CMD_POS_IPSEC		0x0200
 #define	HIFN_MAC_CMD_NEW_KEY		0x0800
 #define	HIFN_MAC_CMD_NEW_KEY		0x0800
 
 
-struct hifn_comp_command
-{
-	volatile __le16 	masks;
-	volatile __le16 	header_skip;
-	volatile __le16 	source_count;
-	volatile __le16 	reserved;
+struct hifn_comp_command {
+	volatile __le16		masks;
+	volatile __le16		header_skip;
+	volatile __le16		source_count;
+	volatile __le16		reserved;
 };
 };
 
 
 #define	HIFN_COMP_CMD_SRCLEN_M		0xc000
 #define	HIFN_COMP_CMD_SRCLEN_M		0xc000
@@ -570,12 +552,11 @@ struct hifn_comp_command
 #define	HIFN_COMP_CMD_ALG_MPPC		0x0001	/*   MPPC */
 #define	HIFN_COMP_CMD_ALG_MPPC		0x0001	/*   MPPC */
 #define	HIFN_COMP_CMD_ALG_LZS		0x0000	/*   LZS */
 #define	HIFN_COMP_CMD_ALG_LZS		0x0000	/*   LZS */
 
 
-struct hifn_base_result
-{
-	volatile __le16 	flags;
-	volatile __le16 	session;
-	volatile __le16 	src_cnt;		/* 15:0 of source count */
-	volatile __le16 	dst_cnt;		/* 15:0 of dest count */
+struct hifn_base_result {
+	volatile __le16		flags;
+	volatile __le16		session;
+	volatile __le16		src_cnt;		/* 15:0 of source count */
+	volatile __le16		dst_cnt;		/* 15:0 of dest count */
 };
 };
 
 
 #define	HIFN_BASE_RES_DSTOVERRUN	0x0200	/* destination overrun */
 #define	HIFN_BASE_RES_DSTOVERRUN	0x0200	/* destination overrun */
@@ -584,8 +565,7 @@ struct hifn_base_result
 #define	HIFN_BASE_RES_DSTLEN_M		0x3000	/* 17:16 of dest count */
 #define	HIFN_BASE_RES_DSTLEN_M		0x3000	/* 17:16 of dest count */
 #define	HIFN_BASE_RES_DSTLEN_S		12
 #define	HIFN_BASE_RES_DSTLEN_S		12
 
 
-struct hifn_comp_result
-{
+struct hifn_comp_result {
 	volatile __le16		flags;
 	volatile __le16		flags;
 	volatile __le16		crc;
 	volatile __le16		crc;
 };
 };
@@ -596,18 +576,16 @@ struct hifn_comp_result
 #define	HIFN_COMP_RES_ENDMARKER		0x0002	/* LZS: end marker seen */
 #define	HIFN_COMP_RES_ENDMARKER		0x0002	/* LZS: end marker seen */
 #define	HIFN_COMP_RES_SRC_NOTZERO	0x0001	/* source expired */
 #define	HIFN_COMP_RES_SRC_NOTZERO	0x0001	/* source expired */
 
 
-struct hifn_mac_result
-{
-	volatile __le16 	flags;
-	volatile __le16 	reserved;
+struct hifn_mac_result {
+	volatile __le16		flags;
+	volatile __le16		reserved;
 	/* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */
 	/* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */
 };
 };
 
 
 #define	HIFN_MAC_RES_MISCOMPARE		0x0002	/* compare failed */
 #define	HIFN_MAC_RES_MISCOMPARE		0x0002	/* compare failed */
 #define	HIFN_MAC_RES_SRC_NOTZERO	0x0001	/* source expired */
 #define	HIFN_MAC_RES_SRC_NOTZERO	0x0001	/* source expired */
 
 
-struct hifn_crypt_result
-{
+struct hifn_crypt_result {
 	volatile __le16		flags;
 	volatile __le16		flags;
 	volatile __le16		reserved;
 	volatile __le16		reserved;
 };
 };
@@ -622,11 +600,10 @@ struct hifn_crypt_result
 #define	HIFN_POLL_SCALAR	0x0
 #define	HIFN_POLL_SCALAR	0x0
 #endif
 #endif
 
 
-#define	HIFN_MAX_SEGLEN 	0xffff		/* maximum dma segment len */
+#define	HIFN_MAX_SEGLEN		0xffff		/* maximum dma segment len */
 #define	HIFN_MAX_DMALEN		0x3ffff		/* maximum dma length */
 #define	HIFN_MAX_DMALEN		0x3ffff		/* maximum dma length */
 
 
-struct hifn_crypto_alg
-{
+struct hifn_crypto_alg {
 	struct list_head	entry;
 	struct list_head	entry;
 	struct crypto_alg	alg;
 	struct crypto_alg	alg;
 	struct hifn_device	*dev;
 	struct hifn_device	*dev;
@@ -634,24 +611,21 @@ struct hifn_crypto_alg
 
 
 #define ASYNC_SCATTERLIST_CACHE	16
 #define ASYNC_SCATTERLIST_CACHE	16
 
 
-#define ASYNC_FLAGS_MISALIGNED	(1<<0)
+#define ASYNC_FLAGS_MISALIGNED	(1 << 0)
 
 
-struct hifn_cipher_walk
-{
+struct hifn_cipher_walk {
 	struct scatterlist	cache[ASYNC_SCATTERLIST_CACHE];
 	struct scatterlist	cache[ASYNC_SCATTERLIST_CACHE];
 	u32			flags;
 	u32			flags;
 	int			num;
 	int			num;
 };
 };
 
 
-struct hifn_context
-{
+struct hifn_context {
 	u8			key[HIFN_MAX_CRYPT_KEY_LENGTH];
 	u8			key[HIFN_MAX_CRYPT_KEY_LENGTH];
 	struct hifn_device	*dev;
 	struct hifn_device	*dev;
 	unsigned int		keysize;
 	unsigned int		keysize;
 };
 };
 
 
-struct hifn_request_context
-{
+struct hifn_request_context {
 	u8			*iv;
 	u8			*iv;
 	unsigned int		ivsize;
 	unsigned int		ivsize;
 	u8			op, type, mode, unused;
 	u8			op, type, mode, unused;
@@ -693,7 +667,7 @@ static void hifn_wait_puc(struct hifn_device *dev)
 	int i;
 	int i;
 	u32 ret;
 	u32 ret;
 
 
-	for (i=10000; i > 0; --i) {
+	for (i = 10000; i > 0; --i) {
 		ret = hifn_read_0(dev, HIFN_0_PUCTRL);
 		ret = hifn_read_0(dev, HIFN_0_PUCTRL);
 		if (!(ret & HIFN_PUCTRL_RESET))
 		if (!(ret & HIFN_PUCTRL_RESET))
 			break;
 			break;
@@ -702,7 +676,7 @@ static void hifn_wait_puc(struct hifn_device *dev)
 	}
 	}
 
 
 	if (!i)
 	if (!i)
-		dprintk("%s: Failed to reset PUC unit.\n", dev->name);
+		dev_err(&dev->pdev->dev, "Failed to reset PUC unit.\n");
 }
 }
 
 
 static void hifn_reset_puc(struct hifn_device *dev)
 static void hifn_reset_puc(struct hifn_device *dev)
@@ -749,13 +723,12 @@ static void hifn_reset_dma(struct hifn_device *dev, int full)
 	hifn_reset_puc(dev);
 	hifn_reset_puc(dev);
 }
 }
 
 
-static u32 hifn_next_signature(u_int32_t a, u_int cnt)
+static u32 hifn_next_signature(u32 a, u_int cnt)
 {
 {
 	int i;
 	int i;
 	u32 v;
 	u32 v;
 
 
 	for (i = 0; i < cnt; i++) {
 	for (i = 0; i < cnt; i++) {
-
 		/* get the parity */
 		/* get the parity */
 		v = a & 0x80080125;
 		v = a & 0x80080125;
 		v ^= v >> 16;
 		v ^= v >> 16;
@@ -846,33 +819,28 @@ static int hifn_init_pubrng(struct hifn_device *dev)
 	hifn_write_1(dev, HIFN_1_PUB_RESET, hifn_read_1(dev, HIFN_1_PUB_RESET) |
 	hifn_write_1(dev, HIFN_1_PUB_RESET, hifn_read_1(dev, HIFN_1_PUB_RESET) |
 			HIFN_PUBRST_RESET);
 			HIFN_PUBRST_RESET);
 
 
-	for (i=100; i > 0; --i) {
+	for (i = 100; i > 0; --i) {
 		mdelay(1);
 		mdelay(1);
 
 
 		if ((hifn_read_1(dev, HIFN_1_PUB_RESET) & HIFN_PUBRST_RESET) == 0)
 		if ((hifn_read_1(dev, HIFN_1_PUB_RESET) & HIFN_PUBRST_RESET) == 0)
 			break;
 			break;
 	}
 	}
 
 
-	if (!i)
-		dprintk("Chip %s: Failed to initialise public key engine.\n",
-				dev->name);
-	else {
+	if (!i) {
+		dev_err(&dev->pdev->dev, "Failed to initialise public key engine.\n");
+	} else {
 		hifn_write_1(dev, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
 		hifn_write_1(dev, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
 		dev->dmareg |= HIFN_DMAIER_PUBDONE;
 		dev->dmareg |= HIFN_DMAIER_PUBDONE;
 		hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
 		hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
 
 
-		dprintk("Chip %s: Public key engine has been successfully "
-				"initialised.\n", dev->name);
+		dev_dbg(&dev->pdev->dev, "Public key engine has been successfully initialised.\n");
 	}
 	}
 
 
-	/*
-	 * Enable RNG engine.
-	 */
+	/* Enable RNG engine. */
 
 
 	hifn_write_1(dev, HIFN_1_RNG_CONFIG,
 	hifn_write_1(dev, HIFN_1_RNG_CONFIG,
 			hifn_read_1(dev, HIFN_1_RNG_CONFIG) | HIFN_RNGCFG_ENA);
 			hifn_read_1(dev, HIFN_1_RNG_CONFIG) | HIFN_RNGCFG_ENA);
-	dprintk("Chip %s: RNG engine has been successfully initialised.\n",
-			dev->name);
+	dev_dbg(&dev->pdev->dev, "RNG engine has been successfully initialised.\n");
 
 
 #ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG
 #ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG
 	/* First value must be discarded */
 	/* First value must be discarded */
@@ -896,8 +864,8 @@ static int hifn_enable_crypto(struct hifn_device *dev)
 		}
 		}
 	}
 	}
 
 
-	if (offtbl == NULL) {
-		dprintk("Chip %s: Unknown card!\n", dev->name);
+	if (!offtbl) {
+		dev_err(&dev->pdev->dev, "Unknown card!\n");
 		return -ENODEV;
 		return -ENODEV;
 	}
 	}
 
 
@@ -912,7 +880,7 @@ static int hifn_enable_crypto(struct hifn_device *dev)
 	hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, 0);
 	hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, 0);
 	mdelay(1);
 	mdelay(1);
 
 
-	for (i=0; i<12; ++i) {
+	for (i = 0; i < 12; ++i) {
 		addr = hifn_next_signature(addr, offtbl[i] + 0x101);
 		addr = hifn_next_signature(addr, offtbl[i] + 0x101);
 		hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, addr);
 		hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, addr);
 
 
@@ -920,7 +888,7 @@ static int hifn_enable_crypto(struct hifn_device *dev)
 	}
 	}
 	hifn_write_1(dev, HIFN_1_DMA_CNFG, dmacfg);
 	hifn_write_1(dev, HIFN_1_DMA_CNFG, dmacfg);
 
 
-	dprintk("Chip %s: %s.\n", dev->name, pci_name(dev->pdev));
+	dev_dbg(&dev->pdev->dev, "%s %s.\n", dev->name, pci_name(dev->pdev));
 
 
 	return 0;
 	return 0;
 }
 }
@@ -931,16 +899,14 @@ static void hifn_init_dma(struct hifn_device *dev)
 	u32 dptr = dev->desc_dma;
 	u32 dptr = dev->desc_dma;
 	int i;
 	int i;
 
 
-	for (i=0; i<HIFN_D_CMD_RSIZE; ++i)
+	for (i = 0; i < HIFN_D_CMD_RSIZE; ++i)
 		dma->cmdr[i].p = __cpu_to_le32(dptr +
 		dma->cmdr[i].p = __cpu_to_le32(dptr +
 				offsetof(struct hifn_dma, command_bufs[i][0]));
 				offsetof(struct hifn_dma, command_bufs[i][0]));
-	for (i=0; i<HIFN_D_RES_RSIZE; ++i)
+	for (i = 0; i < HIFN_D_RES_RSIZE; ++i)
 		dma->resr[i].p = __cpu_to_le32(dptr +
 		dma->resr[i].p = __cpu_to_le32(dptr +
 				offsetof(struct hifn_dma, result_bufs[i][0]));
 				offsetof(struct hifn_dma, result_bufs[i][0]));
 
 
-	/*
-	 * Setup LAST descriptors.
-	 */
+	/* Setup LAST descriptors. */
 	dma->cmdr[HIFN_D_CMD_RSIZE].p = __cpu_to_le32(dptr +
 	dma->cmdr[HIFN_D_CMD_RSIZE].p = __cpu_to_le32(dptr +
 			offsetof(struct hifn_dma, cmdr[0]));
 			offsetof(struct hifn_dma, cmdr[0]));
 	dma->srcr[HIFN_D_SRC_RSIZE].p = __cpu_to_le32(dptr +
 	dma->srcr[HIFN_D_SRC_RSIZE].p = __cpu_to_le32(dptr +
@@ -960,7 +926,7 @@ static void hifn_init_dma(struct hifn_device *dev)
  * to calculate the optimal multiplier. For PCI we assume 66MHz, since that
  * to calculate the optimal multiplier. For PCI we assume 66MHz, since that
  * allows us to operate without the risk of overclocking the chip. If it
  * allows us to operate without the risk of overclocking the chip. If it
  * actually uses 33MHz, the chip will operate at half the speed, this can be
  * actually uses 33MHz, the chip will operate at half the speed, this can be
- * overriden by specifying the frequency as module parameter (pci33).
+ * overridden by specifying the frequency as module parameter (pci33).
  *
  *
  * Unfortunately the PCI clock is not very suitable since the HIFN needs a
  * Unfortunately the PCI clock is not very suitable since the HIFN needs a
  * stable clock and the PCI clock frequency may vary, so the default is the
  * stable clock and the PCI clock frequency may vary, so the default is the
@@ -984,9 +950,8 @@ static void hifn_init_pll(struct hifn_device *dev)
 		freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10);
 		freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10);
 	else {
 	else {
 		freq = 66;
 		freq = 66;
-		printk(KERN_INFO "hifn795x: assuming %uMHz clock speed, "
-				 "override with hifn_pll_ref=%.3s<frequency>\n",
-		       freq, hifn_pll_ref);
+		dev_info(&dev->pdev->dev, "assuming %uMHz clock speed, override with hifn_pll_ref=%.3s<frequency>\n",
+			 freq, hifn_pll_ref);
 	}
 	}
 
 
 	m = HIFN_PLL_FCK_MAX / freq;
 	m = HIFN_PLL_FCK_MAX / freq;
@@ -1174,17 +1139,17 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev,
 
 
 	mask = 0;
 	mask = 0;
 	switch (rctx->op) {
 	switch (rctx->op) {
-		case ACRYPTO_OP_DECRYPT:
-			mask = HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE;
-			break;
-		case ACRYPTO_OP_ENCRYPT:
-			mask = HIFN_BASE_CMD_CRYPT;
-			break;
-		case ACRYPTO_OP_HMAC:
-			mask = HIFN_BASE_CMD_MAC;
-			break;
-		default:
-			goto err_out;
+	case ACRYPTO_OP_DECRYPT:
+		mask = HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE;
+		break;
+	case ACRYPTO_OP_ENCRYPT:
+		mask = HIFN_BASE_CMD_CRYPT;
+		break;
+	case ACRYPTO_OP_HMAC:
+		mask = HIFN_BASE_CMD_MAC;
+		break;
+	default:
+		goto err_out;
 	}
 	}
 
 
 	buf_pos += hifn_setup_base_command(dev, buf_pos, nbytes,
 	buf_pos += hifn_setup_base_command(dev, buf_pos, nbytes,
@@ -1199,53 +1164,53 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev,
 			md |= HIFN_CRYPT_CMD_NEW_IV;
 			md |= HIFN_CRYPT_CMD_NEW_IV;
 
 
 		switch (rctx->mode) {
 		switch (rctx->mode) {
-			case ACRYPTO_MODE_ECB:
-				md |= HIFN_CRYPT_CMD_MODE_ECB;
-				break;
-			case ACRYPTO_MODE_CBC:
-				md |= HIFN_CRYPT_CMD_MODE_CBC;
-				break;
-			case ACRYPTO_MODE_CFB:
-				md |= HIFN_CRYPT_CMD_MODE_CFB;
-				break;
-			case ACRYPTO_MODE_OFB:
-				md |= HIFN_CRYPT_CMD_MODE_OFB;
-				break;
-			default:
-				goto err_out;
+		case ACRYPTO_MODE_ECB:
+			md |= HIFN_CRYPT_CMD_MODE_ECB;
+			break;
+		case ACRYPTO_MODE_CBC:
+			md |= HIFN_CRYPT_CMD_MODE_CBC;
+			break;
+		case ACRYPTO_MODE_CFB:
+			md |= HIFN_CRYPT_CMD_MODE_CFB;
+			break;
+		case ACRYPTO_MODE_OFB:
+			md |= HIFN_CRYPT_CMD_MODE_OFB;
+			break;
+		default:
+			goto err_out;
 		}
 		}
 
 
 		switch (rctx->type) {
 		switch (rctx->type) {
-			case ACRYPTO_TYPE_AES_128:
-				if (ctx->keysize != 16)
-					goto err_out;
-				md |= HIFN_CRYPT_CMD_KSZ_128 |
-					HIFN_CRYPT_CMD_ALG_AES;
-				break;
-			case ACRYPTO_TYPE_AES_192:
-				if (ctx->keysize != 24)
-					goto err_out;
-				md |= HIFN_CRYPT_CMD_KSZ_192 |
-					HIFN_CRYPT_CMD_ALG_AES;
-				break;
-			case ACRYPTO_TYPE_AES_256:
-				if (ctx->keysize != 32)
-					goto err_out;
-				md |= HIFN_CRYPT_CMD_KSZ_256 |
-					HIFN_CRYPT_CMD_ALG_AES;
-				break;
-			case ACRYPTO_TYPE_3DES:
-				if (ctx->keysize != 24)
-					goto err_out;
-				md |= HIFN_CRYPT_CMD_ALG_3DES;
-				break;
-			case ACRYPTO_TYPE_DES:
-				if (ctx->keysize != 8)
-					goto err_out;
-				md |= HIFN_CRYPT_CMD_ALG_DES;
-				break;
-			default:
+		case ACRYPTO_TYPE_AES_128:
+			if (ctx->keysize != 16)
+				goto err_out;
+			md |= HIFN_CRYPT_CMD_KSZ_128 |
+				HIFN_CRYPT_CMD_ALG_AES;
+			break;
+		case ACRYPTO_TYPE_AES_192:
+			if (ctx->keysize != 24)
 				goto err_out;
 				goto err_out;
+			md |= HIFN_CRYPT_CMD_KSZ_192 |
+				HIFN_CRYPT_CMD_ALG_AES;
+			break;
+		case ACRYPTO_TYPE_AES_256:
+			if (ctx->keysize != 32)
+				goto err_out;
+			md |= HIFN_CRYPT_CMD_KSZ_256 |
+				HIFN_CRYPT_CMD_ALG_AES;
+			break;
+		case ACRYPTO_TYPE_3DES:
+			if (ctx->keysize != 24)
+				goto err_out;
+			md |= HIFN_CRYPT_CMD_ALG_3DES;
+			break;
+		case ACRYPTO_TYPE_DES:
+			if (ctx->keysize != 8)
+				goto err_out;
+			md |= HIFN_CRYPT_CMD_ALG_DES;
+			break;
+		default:
+			goto err_out;
 		}
 		}
 
 
 		buf_pos += hifn_setup_crypto_command(dev, buf_pos,
 		buf_pos += hifn_setup_crypto_command(dev, buf_pos,
@@ -1265,8 +1230,9 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev,
 			HIFN_D_VALID | HIFN_D_LAST |
 			HIFN_D_VALID | HIFN_D_LAST |
 			HIFN_D_MASKDONEIRQ | HIFN_D_JUMP);
 			HIFN_D_MASKDONEIRQ | HIFN_D_JUMP);
 		dma->cmdi = 0;
 		dma->cmdi = 0;
-	} else
-		dma->cmdr[dma->cmdi-1].l |= __cpu_to_le32(HIFN_D_VALID);
+	} else {
+		dma->cmdr[dma->cmdi - 1].l |= __cpu_to_le32(HIFN_D_VALID);
+	}
 
 
 	if (!(dev->flags & HIFN_FLAG_CMD_BUSY)) {
 	if (!(dev->flags & HIFN_FLAG_CMD_BUSY)) {
 		hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
 		hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
@@ -1424,7 +1390,7 @@ static int hifn_cipher_walk_init(struct hifn_cipher_walk *w,
 	sg_init_table(w->cache, num);
 	sg_init_table(w->cache, num);
 
 
 	w->num = 0;
 	w->num = 0;
-	for (i=0; i<num; ++i) {
+	for (i = 0; i < num; ++i) {
 		struct page *page = alloc_page(gfp_flags);
 		struct page *page = alloc_page(gfp_flags);
 		struct scatterlist *s;
 		struct scatterlist *s;
 
 
@@ -1444,7 +1410,7 @@ static void hifn_cipher_walk_exit(struct hifn_cipher_walk *w)
 {
 {
 	int i;
 	int i;
 
 
-	for (i=0; i<w->num; ++i) {
+	for (i = 0; i < w->num; ++i) {
 		struct scatterlist *s = &w->cache[i];
 		struct scatterlist *s = &w->cache[i];
 
 
 		__free_page(sg_page(s));
 		__free_page(sg_page(s));
@@ -1471,8 +1437,8 @@ static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst,
 		drest -= copy;
 		drest -= copy;
 		nbytes -= copy;
 		nbytes -= copy;
 
 
-		dprintk("%s: copy: %u, size: %u, drest: %u, nbytes: %u.\n",
-				__func__, copy, size, drest, nbytes);
+		pr_debug("%s: copy: %u, size: %u, drest: %u, nbytes: %u.\n",
+			 __func__, copy, size, drest, nbytes);
 
 
 		dst++;
 		dst++;
 		idx++;
 		idx++;
@@ -1499,8 +1465,8 @@ static int hifn_cipher_walk(struct ablkcipher_request *req,
 
 
 		dst = &req->dst[idx];
 		dst = &req->dst[idx];
 
 
-		dprintk("\n%s: dlen: %u, doff: %u, offset: %u, nbytes: %u.\n",
-			__func__, dst->length, dst->offset, offset, nbytes);
+		pr_debug("\n%s: dlen: %u, doff: %u, offset: %u, nbytes: %u.\n",
+			 __func__, dst->length, dst->offset, offset, nbytes);
 
 
 		if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) ||
 		if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) ||
 		    !IS_ALIGNED(dst->length, HIFN_D_DST_DALIGN) ||
 		    !IS_ALIGNED(dst->length, HIFN_D_DST_DALIGN) ||
@@ -1525,10 +1491,10 @@ static int hifn_cipher_walk(struct ablkcipher_request *req,
 				 * to put there additional blocksized chunk,
 				 * to put there additional blocksized chunk,
 				 * so we mark that page as containing only
 				 * so we mark that page as containing only
 				 * blocksize aligned chunks:
 				 * blocksize aligned chunks:
-				 * 	t->length = (slen & ~(HIFN_D_DST_DALIGN - 1));
+				 *	t->length = (slen & ~(HIFN_D_DST_DALIGN - 1));
 				 * and increase number of bytes to be processed
 				 * and increase number of bytes to be processed
 				 * in next chunk:
 				 * in next chunk:
-				 * 	nbytes += diff;
+				 *	nbytes += diff;
 				 */
 				 */
 				nbytes += diff;
 				nbytes += diff;
 
 
@@ -1536,14 +1502,13 @@ static int hifn_cipher_walk(struct ablkcipher_request *req,
 				 * Temporary of course...
 				 * Temporary of course...
 				 * Kick author if you will catch this one.
 				 * Kick author if you will catch this one.
 				 */
 				 */
-				printk(KERN_ERR "%s: dlen: %u, nbytes: %u,"
-					"slen: %u, offset: %u.\n",
-					__func__, dlen, nbytes, slen, offset);
-				printk(KERN_ERR "%s: please contact author to fix this "
-					"issue, generally you should not catch "
-					"this path under any condition but who "
-					"knows how did you use crypto code.\n"
-					"Thank you.\n",	__func__);
+				pr_err("%s: dlen: %u, nbytes: %u, slen: %u, offset: %u.\n",
+				       __func__, dlen, nbytes, slen, offset);
+				pr_err("%s: please contact author to fix this "
+				       "issue, generally you should not catch "
+				       "this path under any condition but who "
+				       "knows how did you use crypto code.\n"
+				       "Thank you.\n",	__func__);
 				BUG();
 				BUG();
 			} else {
 			} else {
 				copy += diff + nbytes;
 				copy += diff + nbytes;
@@ -1630,70 +1595,16 @@ err_out:
 	spin_unlock_irqrestore(&dev->lock, flags);
 	spin_unlock_irqrestore(&dev->lock, flags);
 err_out_exit:
 err_out_exit:
 	if (err) {
 	if (err) {
-		printk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, "
-				"type: %u, err: %d.\n",
-			dev->name, rctx->iv, rctx->ivsize,
-			ctx->key, ctx->keysize,
-			rctx->mode, rctx->op, rctx->type, err);
+		dev_info(&dev->pdev->dev, "iv: %p [%d], key: %p [%d], mode: %u, op: %u, "
+			 "type: %u, err: %d.\n",
+			 rctx->iv, rctx->ivsize,
+			 ctx->key, ctx->keysize,
+			 rctx->mode, rctx->op, rctx->type, err);
 	}
 	}
 
 
 	return err;
 	return err;
 }
 }
 
 
-static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
-{
-	int n, err;
-	u8 src[16];
-	struct hifn_context ctx;
-	struct hifn_request_context rctx;
-	u8 fips_aes_ecb_from_zero[16] = {
-		0x66, 0xE9, 0x4B, 0xD4,
-		0xEF, 0x8A, 0x2C, 0x3B,
-		0x88, 0x4C, 0xFA, 0x59,
-		0xCA, 0x34, 0x2B, 0x2E};
-	struct scatterlist sg;
-
-	memset(src, 0, sizeof(src));
-	memset(ctx.key, 0, sizeof(ctx.key));
-
-	ctx.dev = dev;
-	ctx.keysize = 16;
-	rctx.ivsize = 0;
-	rctx.iv = NULL;
-	rctx.op = (encdec)?ACRYPTO_OP_ENCRYPT:ACRYPTO_OP_DECRYPT;
-	rctx.mode = ACRYPTO_MODE_ECB;
-	rctx.type = ACRYPTO_TYPE_AES_128;
-	rctx.walk.cache[0].length = 0;
-
-	sg_init_one(&sg, &src, sizeof(src));
-
-	err = hifn_setup_dma(dev, &ctx, &rctx, &sg, &sg, sizeof(src), NULL);
-	if (err)
-		goto err_out;
-
-	dev->started = 0;
-	msleep(200);
-
-	dprintk("%s: decoded: ", dev->name);
-	for (n=0; n<sizeof(src); ++n)
-		dprintk("%02x ", src[n]);
-	dprintk("\n");
-	dprintk("%s: FIPS   : ", dev->name);
-	for (n=0; n<sizeof(fips_aes_ecb_from_zero); ++n)
-		dprintk("%02x ", fips_aes_ecb_from_zero[n]);
-	dprintk("\n");
-
-	if (!memcmp(src, fips_aes_ecb_from_zero, sizeof(fips_aes_ecb_from_zero))) {
-		printk(KERN_INFO "%s: AES 128 ECB test has been successfully "
-				"passed.\n", dev->name);
-		return 0;
-	}
-
-err_out:
-	printk(KERN_INFO "%s: AES 128 ECB test has been failed.\n", dev->name);
-	return -1;
-}
-
 static int hifn_start_device(struct hifn_device *dev)
 static int hifn_start_device(struct hifn_device *dev)
 {
 {
 	int err;
 	int err;
@@ -1739,8 +1650,8 @@ static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset
 		saddr += copy;
 		saddr += copy;
 		offset = 0;
 		offset = 0;
 
 
-		dprintk("%s: copy: %u, size: %u, srest: %u, nbytes: %u.\n",
-				__func__, copy, size, srest, nbytes);
+		pr_debug("%s: copy: %u, size: %u, srest: %u, nbytes: %u.\n",
+			 __func__, copy, size, srest, nbytes);
 
 
 		dst++;
 		dst++;
 		idx++;
 		idx++;
@@ -1760,7 +1671,8 @@ static inline void hifn_complete_sa(struct hifn_device *dev, int i)
 	dev->sa[i] = NULL;
 	dev->sa[i] = NULL;
 	dev->started--;
 	dev->started--;
 	if (dev->started < 0)
 	if (dev->started < 0)
-		printk("%s: started: %d.\n", __func__, dev->started);
+		dev_info(&dev->pdev->dev, "%s: started: %d.\n", __func__,
+			 dev->started);
 	spin_unlock_irqrestore(&dev->lock, flags);
 	spin_unlock_irqrestore(&dev->lock, flags);
 	BUG_ON(dev->started < 0);
 	BUG_ON(dev->started < 0);
 }
 }
@@ -1779,7 +1691,7 @@ static void hifn_process_ready(struct ablkcipher_request *req, int error)
 			t = &rctx->walk.cache[idx];
 			t = &rctx->walk.cache[idx];
 			dst = &req->dst[idx];
 			dst = &req->dst[idx];
 
 
-			dprintk("\n%s: sg_page(t): %p, t->length: %u, "
+			pr_debug("\n%s: sg_page(t): %p, t->length: %u, "
 				"sg_page(dst): %p, dst->length: %u, "
 				"sg_page(dst): %p, dst->length: %u, "
 				"nbytes: %u.\n",
 				"nbytes: %u.\n",
 				__func__, sg_page(t), t->length,
 				__func__, sg_page(t), t->length,
@@ -1815,9 +1727,8 @@ static void hifn_clear_rings(struct hifn_device *dev, int error)
 	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
 	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
 	int i, u;
 	int i, u;
 
 
-	dprintk("%s: ring cleanup 1: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
+	dev_dbg(&dev->pdev->dev, "ring cleanup 1: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
 			"k: %d.%d.%d.%d.\n",
 			"k: %d.%d.%d.%d.\n",
-			dev->name,
 			dma->cmdi, dma->srci, dma->dsti, dma->resi,
 			dma->cmdi, dma->srci, dma->dsti, dma->resi,
 			dma->cmdu, dma->srcu, dma->dstu, dma->resu,
 			dma->cmdu, dma->srcu, dma->dstu, dma->resu,
 			dma->cmdk, dma->srck, dma->dstk, dma->resk);
 			dma->cmdk, dma->srck, dma->dstk, dma->resk);
@@ -1870,9 +1781,8 @@ static void hifn_clear_rings(struct hifn_device *dev, int error)
 	}
 	}
 	dma->dstk = i; dma->dstu = u;
 	dma->dstk = i; dma->dstu = u;
 
 
-	dprintk("%s: ring cleanup 2: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
+	dev_dbg(&dev->pdev->dev, "ring cleanup 2: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
 			"k: %d.%d.%d.%d.\n",
 			"k: %d.%d.%d.%d.\n",
-			dev->name,
 			dma->cmdi, dma->srci, dma->dsti, dma->resi,
 			dma->cmdi, dma->srci, dma->dsti, dma->resi,
 			dma->cmdu, dma->srcu, dma->dstu, dma->resu,
 			dma->cmdu, dma->srcu, dma->dstu, dma->resu,
 			dma->cmdk, dma->srck, dma->dstk, dma->resk);
 			dma->cmdk, dma->srck, dma->dstk, dma->resk);
@@ -1921,21 +1831,22 @@ static void hifn_work(struct work_struct *work)
 			int i;
 			int i;
 			struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
 			struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
 
 
-			printk("%s: r: %08x, active: %d, started: %d, "
-				"success: %lu: qlen: %u/%u, reset: %d.\n",
-				dev->name, r, dev->active, dev->started,
-				dev->success, dev->queue.qlen, dev->queue.max_qlen,
-				reset);
+			dev_info(&dev->pdev->dev,
+				 "r: %08x, active: %d, started: %d, "
+				 "success: %lu: qlen: %u/%u, reset: %d.\n",
+				 r, dev->active, dev->started,
+				 dev->success, dev->queue.qlen, dev->queue.max_qlen,
+				 reset);
 
 
-			printk("%s: res: ", __func__);
-			for (i=0; i<HIFN_D_RES_RSIZE; ++i) {
-				printk("%x.%p ", dma->resr[i].l, dev->sa[i]);
+			dev_info(&dev->pdev->dev, "%s: res: ", __func__);
+			for (i = 0; i < HIFN_D_RES_RSIZE; ++i) {
+				pr_info("%x.%p ", dma->resr[i].l, dev->sa[i]);
 				if (dev->sa[i]) {
 				if (dev->sa[i]) {
 					hifn_process_ready(dev->sa[i], -ENODEV);
 					hifn_process_ready(dev->sa[i], -ENODEV);
 					hifn_complete_sa(dev, i);
 					hifn_complete_sa(dev, i);
 				}
 				}
 			}
 			}
-			printk("\n");
+			pr_info("\n");
 
 
 			hifn_reset_dma(dev, 1);
 			hifn_reset_dma(dev, 1);
 			hifn_stop_device(dev);
 			hifn_stop_device(dev);
@@ -1957,9 +1868,9 @@ static irqreturn_t hifn_interrupt(int irq, void *data)
 
 
 	dmacsr = hifn_read_1(dev, HIFN_1_DMA_CSR);
 	dmacsr = hifn_read_1(dev, HIFN_1_DMA_CSR);
 
 
-	dprintk("%s: 1 dmacsr: %08x, dmareg: %08x, res: %08x [%d], "
+	dev_dbg(&dev->pdev->dev, "1 dmacsr: %08x, dmareg: %08x, res: %08x [%d], "
 			"i: %d.%d.%d.%d, u: %d.%d.%d.%d.\n",
 			"i: %d.%d.%d.%d, u: %d.%d.%d.%d.\n",
-		dev->name, dmacsr, dev->dmareg, dmacsr & dev->dmareg, dma->cmdi,
+		dmacsr, dev->dmareg, dmacsr & dev->dmareg, dma->cmdi,
 		dma->cmdi, dma->srci, dma->dsti, dma->resi,
 		dma->cmdi, dma->srci, dma->dsti, dma->resi,
 		dma->cmdu, dma->srcu, dma->dstu, dma->resu);
 		dma->cmdu, dma->srcu, dma->dstu, dma->resu);
 
 
@@ -1978,9 +1889,9 @@ static irqreturn_t hifn_interrupt(int irq, void *data)
 	if (restart) {
 	if (restart) {
 		u32 puisr = hifn_read_0(dev, HIFN_0_PUISR);
 		u32 puisr = hifn_read_0(dev, HIFN_0_PUISR);
 
 
-		printk(KERN_WARNING "%s: overflow: r: %d, d: %d, puisr: %08x, d: %u.\n",
-			dev->name, !!(dmacsr & HIFN_DMACSR_R_OVER),
-			!!(dmacsr & HIFN_DMACSR_D_OVER),
+		dev_warn(&dev->pdev->dev, "overflow: r: %d, d: %d, puisr: %08x, d: %u.\n",
+			 !!(dmacsr & HIFN_DMACSR_R_OVER),
+			 !!(dmacsr & HIFN_DMACSR_D_OVER),
 			puisr, !!(puisr & HIFN_PUISR_DSTOVER));
 			puisr, !!(puisr & HIFN_PUISR_DSTOVER));
 		if (!!(puisr & HIFN_PUISR_DSTOVER))
 		if (!!(puisr & HIFN_PUISR_DSTOVER))
 			hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
 			hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
@@ -1991,18 +1902,18 @@ static irqreturn_t hifn_interrupt(int irq, void *data)
 	restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
 	restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
 			HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
 			HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
 	if (restart) {
 	if (restart) {
-		printk(KERN_WARNING "%s: abort: c: %d, s: %d, d: %d, r: %d.\n",
-			dev->name, !!(dmacsr & HIFN_DMACSR_C_ABORT),
-			!!(dmacsr & HIFN_DMACSR_S_ABORT),
-			!!(dmacsr & HIFN_DMACSR_D_ABORT),
-			!!(dmacsr & HIFN_DMACSR_R_ABORT));
+		dev_warn(&dev->pdev->dev, "abort: c: %d, s: %d, d: %d, r: %d.\n",
+			 !!(dmacsr & HIFN_DMACSR_C_ABORT),
+			 !!(dmacsr & HIFN_DMACSR_S_ABORT),
+			 !!(dmacsr & HIFN_DMACSR_D_ABORT),
+			 !!(dmacsr & HIFN_DMACSR_R_ABORT));
 		hifn_reset_dma(dev, 1);
 		hifn_reset_dma(dev, 1);
 		hifn_init_dma(dev);
 		hifn_init_dma(dev);
 		hifn_init_registers(dev);
 		hifn_init_registers(dev);
 	}
 	}
 
 
 	if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
 	if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
-		dprintk("%s: wait on command.\n", dev->name);
+		dev_dbg(&dev->pdev->dev, "wait on command.\n");
 		dev->dmareg &= ~(HIFN_DMAIER_C_WAIT);
 		dev->dmareg &= ~(HIFN_DMAIER_C_WAIT);
 		hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
 		hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
 	}
 	}
@@ -2020,19 +1931,19 @@ static void hifn_flush(struct hifn_device *dev)
 	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
 	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
 	int i;
 	int i;
 
 
-	for (i=0; i<HIFN_D_RES_RSIZE; ++i) {
+	for (i = 0; i < HIFN_D_RES_RSIZE; ++i) {
 		struct hifn_desc *d = &dma->resr[i];
 		struct hifn_desc *d = &dma->resr[i];
 
 
 		if (dev->sa[i]) {
 		if (dev->sa[i]) {
 			hifn_process_ready(dev->sa[i],
 			hifn_process_ready(dev->sa[i],
-				(d->l & __cpu_to_le32(HIFN_D_VALID))?-ENODEV:0);
+				(d->l & __cpu_to_le32(HIFN_D_VALID)) ? -ENODEV : 0);
 			hifn_complete_sa(dev, i);
 			hifn_complete_sa(dev, i);
 		}
 		}
 	}
 	}
 
 
 	spin_lock_irqsave(&dev->lock, flags);
 	spin_lock_irqsave(&dev->lock, flags);
 	while ((async_req = crypto_dequeue_request(&dev->queue))) {
 	while ((async_req = crypto_dequeue_request(&dev->queue))) {
-		req = container_of(async_req, struct ablkcipher_request, base);
+		req = ablkcipher_request_cast(async_req);
 		spin_unlock_irqrestore(&dev->lock, flags);
 		spin_unlock_irqrestore(&dev->lock, flags);
 
 
 		hifn_process_ready(req, -ENODEV);
 		hifn_process_ready(req, -ENODEV);
@@ -2057,7 +1968,7 @@ static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 	if (len == HIFN_DES_KEY_LENGTH) {
 	if (len == HIFN_DES_KEY_LENGTH) {
 		u32 tmp[DES_EXPKEY_WORDS];
 		u32 tmp[DES_EXPKEY_WORDS];
 		int ret = des_ekey(tmp, key);
 		int ret = des_ekey(tmp, key);
-		
+
 		if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
 		if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
 			tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
 			tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
 			return -EINVAL;
 			return -EINVAL;
@@ -2151,7 +2062,7 @@ static int hifn_process_queue(struct hifn_device *dev)
 		if (backlog)
 		if (backlog)
 			backlog->complete(backlog, -EINPROGRESS);
 			backlog->complete(backlog, -EINPROGRESS);
 
 
-		req = container_of(async_req, struct ablkcipher_request, base);
+		req = ablkcipher_request_cast(async_req);
 
 
 		err = hifn_handle_req(req);
 		err = hifn_handle_req(req);
 		if (err)
 		if (err)
@@ -2298,9 +2209,7 @@ static inline int hifn_encrypt_3des_ofb(struct ablkcipher_request *req)
 			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
 			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
 }
 }
 
 
-/*
- * 3DES decryption functions.
- */
+/* 3DES decryption functions. */
 static inline int hifn_decrypt_3des_ecb(struct ablkcipher_request *req)
 static inline int hifn_decrypt_3des_ecb(struct ablkcipher_request *req)
 {
 {
 	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
 	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
@@ -2322,8 +2231,7 @@ static inline int hifn_decrypt_3des_ofb(struct ablkcipher_request *req)
 			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
 			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
 }
 }
 
 
-struct hifn_alg_template
-{
+struct hifn_alg_template {
 	char name[CRYPTO_MAX_ALG_NAME];
 	char name[CRYPTO_MAX_ALG_NAME];
 	char drv_name[CRYPTO_MAX_ALG_NAME];
 	char drv_name[CRYPTO_MAX_ALG_NAME];
 	unsigned int bsize;
 	unsigned int bsize;
@@ -2483,7 +2391,7 @@ static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t)
 	struct hifn_crypto_alg *alg;
 	struct hifn_crypto_alg *alg;
 	int err;
 	int err;
 
 
-	alg = kzalloc(sizeof(struct hifn_crypto_alg), GFP_KERNEL);
+	alg = kzalloc(sizeof(*alg), GFP_KERNEL);
 	if (!alg)
 	if (!alg)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
@@ -2530,7 +2438,7 @@ static int hifn_register_alg(struct hifn_device *dev)
 {
 {
 	int i, err;
 	int i, err;
 
 
-	for (i=0; i<ARRAY_SIZE(hifn_alg_templates); ++i) {
+	for (i = 0; i < ARRAY_SIZE(hifn_alg_templates); ++i) {
 		err = hifn_alg_alloc(dev, &hifn_alg_templates[i]);
 		err = hifn_alg_alloc(dev, &hifn_alg_templates[i]);
 		if (err)
 		if (err)
 			goto err_out_exit;
 			goto err_out_exit;
@@ -2575,7 +2483,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		goto err_out_disable_pci_device;
 		goto err_out_disable_pci_device;
 
 
 	snprintf(name, sizeof(name), "hifn%d",
 	snprintf(name, sizeof(name), "hifn%d",
-			atomic_inc_return(&hifn_dev_number)-1);
+			atomic_inc_return(&hifn_dev_number) - 1);
 
 
 	err = pci_request_regions(pdev, name);
 	err = pci_request_regions(pdev, name);
 	if (err)
 	if (err)
@@ -2584,8 +2492,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	if (pci_resource_len(pdev, 0) < HIFN_BAR0_SIZE ||
 	if (pci_resource_len(pdev, 0) < HIFN_BAR0_SIZE ||
 	    pci_resource_len(pdev, 1) < HIFN_BAR1_SIZE ||
 	    pci_resource_len(pdev, 1) < HIFN_BAR1_SIZE ||
 	    pci_resource_len(pdev, 2) < HIFN_BAR2_SIZE) {
 	    pci_resource_len(pdev, 2) < HIFN_BAR2_SIZE) {
-		dprintk("%s: Broken hardware - I/O regions are too small.\n",
-				pci_name(pdev));
+		dev_err(&pdev->dev, "Broken hardware - I/O regions are too small.\n");
 		err = -ENODEV;
 		err = -ENODEV;
 		goto err_out_free_regions;
 		goto err_out_free_regions;
 	}
 	}
@@ -2602,7 +2509,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	snprintf(dev->name, sizeof(dev->name), "%s", name);
 	snprintf(dev->name, sizeof(dev->name), "%s", name);
 	spin_lock_init(&dev->lock);
 	spin_lock_init(&dev->lock);
 
 
-	for (i=0; i<3; ++i) {
+	for (i = 0; i < 3; ++i) {
 		unsigned long addr, size;
 		unsigned long addr, size;
 
 
 		addr = pci_resource_start(pdev, i);
 		addr = pci_resource_start(pdev, i);
@@ -2618,7 +2525,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	dev->desc_virt = pci_zalloc_consistent(pdev, sizeof(struct hifn_dma),
 	dev->desc_virt = pci_zalloc_consistent(pdev, sizeof(struct hifn_dma),
 					       &dev->desc_dma);
 					       &dev->desc_dma);
 	if (!dev->desc_virt) {
 	if (!dev->desc_virt) {
-		dprintk("Failed to allocate descriptor rings.\n");
+		dev_err(&pdev->dev, "Failed to allocate descriptor rings.\n");
 		err = -ENOMEM;
 		err = -ENOMEM;
 		goto err_out_unmap_bars;
 		goto err_out_unmap_bars;
 	}
 	}
@@ -2626,7 +2533,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	dev->pdev = pdev;
 	dev->pdev = pdev;
 	dev->irq = pdev->irq;
 	dev->irq = pdev->irq;
 
 
-	for (i=0; i<HIFN_D_RES_RSIZE; ++i)
+	for (i = 0; i < HIFN_D_RES_RSIZE; ++i)
 		dev->sa[i] = NULL;
 		dev->sa[i] = NULL;
 
 
 	pci_set_drvdata(pdev, dev);
 	pci_set_drvdata(pdev, dev);
@@ -2637,7 +2544,8 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
 
 	err = request_irq(dev->irq, hifn_interrupt, IRQF_SHARED, dev->name, dev);
 	err = request_irq(dev->irq, hifn_interrupt, IRQF_SHARED, dev->name, dev);
 	if (err) {
 	if (err) {
-		dprintk("Failed to request IRQ%d: err: %d.\n", dev->irq, err);
+		dev_err(&pdev->dev, "Failed to request IRQ%d: err: %d.\n",
+			dev->irq, err);
 		dev->irq = 0;
 		dev->irq = 0;
 		goto err_out_free_desc;
 		goto err_out_free_desc;
 	}
 	}
@@ -2646,10 +2554,6 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	if (err)
 	if (err)
 		goto err_out_free_irq;
 		goto err_out_free_irq;
 
 
-	err = hifn_test(dev, 1, 0);
-	if (err)
-		goto err_out_stop_device;
-
 	err = hifn_register_rng(dev);
 	err = hifn_register_rng(dev);
 	if (err)
 	if (err)
 		goto err_out_stop_device;
 		goto err_out_stop_device;
@@ -2661,9 +2565,9 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	INIT_DELAYED_WORK(&dev->work, hifn_work);
 	INIT_DELAYED_WORK(&dev->work, hifn_work);
 	schedule_delayed_work(&dev->work, HZ);
 	schedule_delayed_work(&dev->work, HZ);
 
 
-	dprintk("HIFN crypto accelerator card at %s has been "
-			"successfully registered as %s.\n",
-			pci_name(pdev), dev->name);
+	dev_dbg(&pdev->dev, "HIFN crypto accelerator card at %s has been "
+		"successfully registered as %s.\n",
+		pci_name(pdev), dev->name);
 
 
 	return 0;
 	return 0;
 
 
@@ -2680,7 +2584,7 @@ err_out_free_desc:
 			dev->desc_virt, dev->desc_dma);
 			dev->desc_virt, dev->desc_dma);
 
 
 err_out_unmap_bars:
 err_out_unmap_bars:
-	for (i=0; i<3; ++i)
+	for (i = 0; i < 3; ++i)
 		if (dev->bar[i])
 		if (dev->bar[i])
 			iounmap(dev->bar[i]);
 			iounmap(dev->bar[i]);
 
 
@@ -2715,7 +2619,7 @@ static void hifn_remove(struct pci_dev *pdev)
 
 
 		pci_free_consistent(pdev, sizeof(struct hifn_dma),
 		pci_free_consistent(pdev, sizeof(struct hifn_dma),
 				dev->desc_virt, dev->desc_dma);
 				dev->desc_virt, dev->desc_dma);
-		for (i=0; i<3; ++i)
+		for (i = 0; i < 3; ++i)
 			if (dev->bar[i])
 			if (dev->bar[i])
 				iounmap(dev->bar[i]);
 				iounmap(dev->bar[i]);
 
 
@@ -2750,8 +2654,7 @@ static int __init hifn_init(void)
 
 
 	if (strncmp(hifn_pll_ref, "ext", 3) &&
 	if (strncmp(hifn_pll_ref, "ext", 3) &&
 	    strncmp(hifn_pll_ref, "pci", 3)) {
 	    strncmp(hifn_pll_ref, "pci", 3)) {
-		printk(KERN_ERR "hifn795x: invalid hifn_pll_ref clock, "
-				"must be pci or ext");
+		pr_err("hifn795x: invalid hifn_pll_ref clock, must be pci or ext");
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
@@ -2763,22 +2666,21 @@ static int __init hifn_init(void)
 	if (hifn_pll_ref[3] != '\0') {
 	if (hifn_pll_ref[3] != '\0') {
 		freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10);
 		freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10);
 		if (freq < 20 || freq > 100) {
 		if (freq < 20 || freq > 100) {
-			printk(KERN_ERR "hifn795x: invalid hifn_pll_ref "
-					"frequency, must be in the range "
-					"of 20-100");
+			pr_err("hifn795x: invalid hifn_pll_ref frequency, must"
+			       "be in the range of 20-100");
 			return -EINVAL;
 			return -EINVAL;
 		}
 		}
 	}
 	}
 
 
 	err = pci_register_driver(&hifn_pci_driver);
 	err = pci_register_driver(&hifn_pci_driver);
 	if (err < 0) {
 	if (err < 0) {
-		dprintk("Failed to register PCI driver for %s device.\n",
-				hifn_pci_driver.name);
+		pr_err("Failed to register PCI driver for %s device.\n",
+		       hifn_pci_driver.name);
 		return -ENODEV;
 		return -ENODEV;
 	}
 	}
 
 
-	printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip "
-			"has been successfully registered.\n");
+	pr_info("Driver for HIFN 795x crypto accelerator chip "
+		"has been successfully registered.\n");
 
 
 	return 0;
 	return 0;
 }
 }
@@ -2787,8 +2689,8 @@ static void __exit hifn_fini(void)
 {
 {
 	pci_unregister_driver(&hifn_pci_driver);
 	pci_unregister_driver(&hifn_pci_driver);
 
 
-	printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip "
-			"has been successfully unregistered.\n");
+	pr_info("Driver for HIFN 795x crypto accelerator chip "
+		"has been successfully unregistered.\n");
 }
 }
 
 
 module_init(hifn_init);
 module_init(hifn_init);

+ 2 - 4
drivers/crypto/ixp4xx_crypto.c

@@ -510,10 +510,8 @@ npe_error:
 	printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
 	printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
 	ret = -EIO;
 	ret = -EIO;
 err:
 err:
-	if (ctx_pool)
-		dma_pool_destroy(ctx_pool);
-	if (buffer_pool)
-		dma_pool_destroy(buffer_pool);
+	dma_pool_destroy(ctx_pool);
+	dma_pool_destroy(buffer_pool);
 	npe_release(npe_c);
 	npe_release(npe_c);
 	return ret;
 	return ret;
 }
 }

+ 8 - 0
drivers/crypto/marvell/cipher.c

@@ -401,7 +401,15 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
 		return -EINVAL;
 		return -EINVAL;
 
 
 	creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
 	creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
+	if (creq->src_nents < 0) {
+		dev_err(cesa_dev->dev, "Invalid number of src SG");
+		return creq->src_nents;
+	}
 	creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
 	creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+	if (creq->dst_nents < 0) {
+		dev_err(cesa_dev->dev, "Invalid number of dst SG");
+		return creq->dst_nents;
+	}
 
 
 	mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
 	mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
 			      CESA_SA_DESC_CFG_OP_MSK);
 			      CESA_SA_DESC_CFG_OP_MSK);

+ 4 - 0
drivers/crypto/marvell/hash.c

@@ -712,6 +712,10 @@ static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
 		creq->req.base.type = CESA_STD_REQ;
 		creq->req.base.type = CESA_STD_REQ;
 
 
 	creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
 	creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
+	if (creq->src_nents < 0) {
+		dev_err(cesa_dev->dev, "Invalid number of src SG");
+		return creq->src_nents;
+	}
 
 
 	ret = mv_cesa_ahash_cache_req(req, cached);
 	ret = mv_cesa_ahash_cache_req(req, cached);
 	if (ret)
 	if (ret)

+ 13 - 37
drivers/crypto/n2_core.c

@@ -241,7 +241,7 @@ static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
 
 
 struct n2_ahash_alg {
 struct n2_ahash_alg {
 	struct list_head	entry;
 	struct list_head	entry;
-	const char		*hash_zero;
+	const u8		*hash_zero;
 	const u32		*hash_init;
 	const u32		*hash_init;
 	u8			hw_op_hashsz;
 	u8			hw_op_hashsz;
 	u8			digest_size;
 	u8			digest_size;
@@ -1267,7 +1267,7 @@ static LIST_HEAD(cipher_algs);
 
 
 struct n2_hash_tmpl {
 struct n2_hash_tmpl {
 	const char	*name;
 	const char	*name;
-	const char	*hash_zero;
+	const u8	*hash_zero;
 	const u32	*hash_init;
 	const u32	*hash_init;
 	u8		hw_op_hashsz;
 	u8		hw_op_hashsz;
 	u8		digest_size;
 	u8		digest_size;
@@ -1276,40 +1276,19 @@ struct n2_hash_tmpl {
 	u8		hmac_type;
 	u8		hmac_type;
 };
 };
 
 
-static const char md5_zero[MD5_DIGEST_SIZE] = {
-	0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
-	0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
-};
 static const u32 md5_init[MD5_HASH_WORDS] = {
 static const u32 md5_init[MD5_HASH_WORDS] = {
 	cpu_to_le32(MD5_H0),
 	cpu_to_le32(MD5_H0),
 	cpu_to_le32(MD5_H1),
 	cpu_to_le32(MD5_H1),
 	cpu_to_le32(MD5_H2),
 	cpu_to_le32(MD5_H2),
 	cpu_to_le32(MD5_H3),
 	cpu_to_le32(MD5_H3),
 };
 };
-static const char sha1_zero[SHA1_DIGEST_SIZE] = {
-	0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32,
-	0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8,
-	0x07, 0x09
-};
 static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
 static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
 	SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
 	SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
 };
 };
-static const char sha256_zero[SHA256_DIGEST_SIZE] = {
-	0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a,
-	0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae,
-	0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99,
-	0x1b, 0x78, 0x52, 0xb8, 0x55
-};
 static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = {
 static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = {
 	SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
 	SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
 	SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
 	SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
 };
 };
-static const char sha224_zero[SHA224_DIGEST_SIZE] = {
-	0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
-	0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
-	0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
-	0x2f
-};
 static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = {
 static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = {
 	SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
 	SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
 	SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
 	SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
@@ -1317,7 +1296,7 @@ static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = {
 
 
 static const struct n2_hash_tmpl hash_tmpls[] = {
 static const struct n2_hash_tmpl hash_tmpls[] = {
 	{ .name		= "md5",
 	{ .name		= "md5",
-	  .hash_zero	= md5_zero,
+	  .hash_zero	= md5_zero_message_hash,
 	  .hash_init	= md5_init,
 	  .hash_init	= md5_init,
 	  .auth_type	= AUTH_TYPE_MD5,
 	  .auth_type	= AUTH_TYPE_MD5,
 	  .hmac_type	= AUTH_TYPE_HMAC_MD5,
 	  .hmac_type	= AUTH_TYPE_HMAC_MD5,
@@ -1325,7 +1304,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
 	  .digest_size	= MD5_DIGEST_SIZE,
 	  .digest_size	= MD5_DIGEST_SIZE,
 	  .block_size	= MD5_HMAC_BLOCK_SIZE },
 	  .block_size	= MD5_HMAC_BLOCK_SIZE },
 	{ .name		= "sha1",
 	{ .name		= "sha1",
-	  .hash_zero	= sha1_zero,
+	  .hash_zero	= sha1_zero_message_hash,
 	  .hash_init	= sha1_init,
 	  .hash_init	= sha1_init,
 	  .auth_type	= AUTH_TYPE_SHA1,
 	  .auth_type	= AUTH_TYPE_SHA1,
 	  .hmac_type	= AUTH_TYPE_HMAC_SHA1,
 	  .hmac_type	= AUTH_TYPE_HMAC_SHA1,
@@ -1333,7 +1312,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
 	  .digest_size	= SHA1_DIGEST_SIZE,
 	  .digest_size	= SHA1_DIGEST_SIZE,
 	  .block_size	= SHA1_BLOCK_SIZE },
 	  .block_size	= SHA1_BLOCK_SIZE },
 	{ .name		= "sha256",
 	{ .name		= "sha256",
-	  .hash_zero	= sha256_zero,
+	  .hash_zero	= sha256_zero_message_hash,
 	  .hash_init	= sha256_init,
 	  .hash_init	= sha256_init,
 	  .auth_type	= AUTH_TYPE_SHA256,
 	  .auth_type	= AUTH_TYPE_SHA256,
 	  .hmac_type	= AUTH_TYPE_HMAC_SHA256,
 	  .hmac_type	= AUTH_TYPE_HMAC_SHA256,
@@ -1341,7 +1320,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
 	  .digest_size	= SHA256_DIGEST_SIZE,
 	  .digest_size	= SHA256_DIGEST_SIZE,
 	  .block_size	= SHA256_BLOCK_SIZE },
 	  .block_size	= SHA256_BLOCK_SIZE },
 	{ .name		= "sha224",
 	{ .name		= "sha224",
-	  .hash_zero	= sha224_zero,
+	  .hash_zero	= sha224_zero_message_hash,
 	  .hash_init	= sha224_init,
 	  .hash_init	= sha224_init,
 	  .auth_type	= AUTH_TYPE_SHA256,
 	  .auth_type	= AUTH_TYPE_SHA256,
 	  .hmac_type	= AUTH_TYPE_RESERVED,
 	  .hmac_type	= AUTH_TYPE_RESERVED,
@@ -2243,22 +2222,19 @@ static struct platform_driver n2_mau_driver = {
 	.remove		=	n2_mau_remove,
 	.remove		=	n2_mau_remove,
 };
 };
 
 
+static struct platform_driver * const drivers[] = {
+	&n2_crypto_driver,
+	&n2_mau_driver,
+};
+
 static int __init n2_init(void)
 static int __init n2_init(void)
 {
 {
-	int err = platform_driver_register(&n2_crypto_driver);
-
-	if (!err) {
-		err = platform_driver_register(&n2_mau_driver);
-		if (err)
-			platform_driver_unregister(&n2_crypto_driver);
-	}
-	return err;
+	return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
 }
 }
 
 
 static void __exit n2_exit(void)
 static void __exit n2_exit(void)
 {
 {
-	platform_driver_unregister(&n2_mau_driver);
-	platform_driver_unregister(&n2_crypto_driver);
+	platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
 }
 }
 
 
 module_init(n2_init);
 module_init(n2_init);

+ 12 - 11
drivers/crypto/nx/nx-842-powernv.c

@@ -442,6 +442,14 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
 			     (unsigned int)ccw,
 			     (unsigned int)ccw,
 			     (unsigned int)be32_to_cpu(crb->ccw));
 			     (unsigned int)be32_to_cpu(crb->ccw));
 
 
+	/*
+	 * NX842 coprocessor sets 3rd bit in CR register with XER[S0].
+	 * XER[S0] is the integer summary overflow bit which is nothing
+	 * to do NX. Since this bit can be set with other return values,
+	 * mask this bit.
+	 */
+	ret &= ~ICSWX_XERS0;
+
 	switch (ret) {
 	switch (ret) {
 	case ICSWX_INITIATED:
 	case ICSWX_INITIATED:
 		ret = wait_for_csb(wmem, csb);
 		ret = wait_for_csb(wmem, csb);
@@ -454,10 +462,6 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
 		pr_err_ratelimited("ICSWX rejected\n");
 		pr_err_ratelimited("ICSWX rejected\n");
 		ret = -EPROTO;
 		ret = -EPROTO;
 		break;
 		break;
-	default:
-		pr_err_ratelimited("Invalid ICSWX return code %x\n", ret);
-		ret = -EPROTO;
-		break;
 	}
 	}
 
 
 	if (!ret)
 	if (!ret)
@@ -525,7 +529,6 @@ static int nx842_powernv_decompress(const unsigned char *in, unsigned int inlen,
 static int __init nx842_powernv_probe(struct device_node *dn)
 static int __init nx842_powernv_probe(struct device_node *dn)
 {
 {
 	struct nx842_coproc *coproc;
 	struct nx842_coproc *coproc;
-	struct property *ct_prop, *ci_prop;
 	unsigned int ct, ci;
 	unsigned int ct, ci;
 	int chip_id;
 	int chip_id;
 
 
@@ -534,18 +537,16 @@ static int __init nx842_powernv_probe(struct device_node *dn)
 		pr_err("ibm,chip-id missing\n");
 		pr_err("ibm,chip-id missing\n");
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
-	ct_prop = of_find_property(dn, "ibm,842-coprocessor-type", NULL);
-	if (!ct_prop) {
+
+	if (of_property_read_u32(dn, "ibm,842-coprocessor-type", &ct)) {
 		pr_err("ibm,842-coprocessor-type missing\n");
 		pr_err("ibm,842-coprocessor-type missing\n");
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
-	ct = be32_to_cpu(*(unsigned int *)ct_prop->value);
-	ci_prop = of_find_property(dn, "ibm,842-coprocessor-instance", NULL);
-	if (!ci_prop) {
+
+	if (of_property_read_u32(dn, "ibm,842-coprocessor-instance", &ci)) {
 		pr_err("ibm,842-coprocessor-instance missing\n");
 		pr_err("ibm,842-coprocessor-instance missing\n");
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
-	ci = be32_to_cpu(*(unsigned int *)ci_prop->value);
 
 
 	coproc = kmalloc(sizeof(*coproc), GFP_KERNEL);
 	coproc = kmalloc(sizeof(*coproc), GFP_KERNEL);
 	if (!coproc)
 	if (!coproc)

+ 1 - 3
drivers/crypto/omap-aes.c

@@ -539,8 +539,6 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
 
 
 static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
 static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
 {
 {
-	int err = 0;
-
 	pr_debug("total: %d\n", dd->total);
 	pr_debug("total: %d\n", dd->total);
 
 
 	omap_aes_dma_stop(dd);
 	omap_aes_dma_stop(dd);
@@ -548,7 +546,7 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
 	dmaengine_terminate_all(dd->dma_lch_in);
 	dmaengine_terminate_all(dd->dma_lch_in);
 	dmaengine_terminate_all(dd->dma_lch_out);
 	dmaengine_terminate_all(dd->dma_lch_out);
 
 
-	return err;
+	return 0;
 }
 }
 
 
 static int omap_aes_check_aligned(struct scatterlist *sg, int total)
 static int omap_aes_check_aligned(struct scatterlist *sg, int total)

+ 2 - 3
drivers/crypto/omap-des.c

@@ -527,8 +527,6 @@ static void omap_des_finish_req(struct omap_des_dev *dd, int err)
 
 
 static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
 static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
 {
 {
-	int err = 0;
-
 	pr_debug("total: %d\n", dd->total);
 	pr_debug("total: %d\n", dd->total);
 
 
 	omap_des_dma_stop(dd);
 	omap_des_dma_stop(dd);
@@ -536,7 +534,7 @@ static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
 	dmaengine_terminate_all(dd->dma_lch_in);
 	dmaengine_terminate_all(dd->dma_lch_in);
 	dmaengine_terminate_all(dd->dma_lch_out);
 	dmaengine_terminate_all(dd->dma_lch_out);
 
 
-	return err;
+	return 0;
 }
 }
 
 
 static int omap_des_copy_needed(struct scatterlist *sg)
 static int omap_des_copy_needed(struct scatterlist *sg)
@@ -1086,6 +1084,7 @@ static int omap_des_probe(struct platform_device *pdev)
 	dd->phys_base = res->start;
 	dd->phys_base = res->start;
 
 
 	pm_runtime_enable(dev);
 	pm_runtime_enable(dev);
+	pm_runtime_irq_safe(dev);
 	err = pm_runtime_get_sync(dev);
 	err = pm_runtime_get_sync(dev);
 	if (err < 0) {
 	if (err < 0) {
 		pm_runtime_put_noidle(dev);
 		pm_runtime_put_noidle(dev);

+ 2 - 2
drivers/crypto/padlock-aes.c

@@ -238,7 +238,7 @@ static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
 	/* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
 	/* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
 	 * We could avoid some copying here but it's probably not worth it.
 	 * We could avoid some copying here but it's probably not worth it.
 	 */
 	 */
-	if (unlikely(((unsigned long)in & ~PAGE_MASK) + ecb_fetch_bytes > PAGE_SIZE)) {
+	if (unlikely(offset_in_page(in) + ecb_fetch_bytes > PAGE_SIZE)) {
 		ecb_crypt_copy(in, out, key, cword, count);
 		ecb_crypt_copy(in, out, key, cword, count);
 		return;
 		return;
 	}
 	}
@@ -250,7 +250,7 @@ static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
 			    u8 *iv, struct cword *cword, int count)
 			    u8 *iv, struct cword *cword, int count)
 {
 {
 	/* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
 	/* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
-	if (unlikely(((unsigned long)in & ~PAGE_MASK) + cbc_fetch_bytes > PAGE_SIZE))
+	if (unlikely(offset_in_page(in) + cbc_fetch_bytes > PAGE_SIZE))
 		return cbc_crypt_copy(in, out, key, iv, cword, count);
 		return cbc_crypt_copy(in, out, key, iv, cword, count);
 
 
 	return rep_xcrypt_cbc(in, out, key, iv, cword, count);
 	return rep_xcrypt_cbc(in, out, key, iv, cword, count);

+ 38 - 18
drivers/crypto/picoxcell_crypto.c

@@ -272,12 +272,6 @@ static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx,
 	return indx;
 	return indx;
 }
 }
 
 
-/* Count the number of scatterlist entries in a scatterlist. */
-static inline int sg_count(struct scatterlist *sg_list, int nbytes)
-{
-	return sg_nents_for_len(sg_list, nbytes);
-}
-
 static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len)
 static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len)
 {
 {
 	ddt->p = phys;
 	ddt->p = phys;
@@ -295,12 +289,17 @@ static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine,
 					 enum dma_data_direction dir,
 					 enum dma_data_direction dir,
 					 dma_addr_t *ddt_phys)
 					 dma_addr_t *ddt_phys)
 {
 {
-	unsigned nents, mapped_ents;
+	unsigned mapped_ents;
 	struct scatterlist *cur;
 	struct scatterlist *cur;
 	struct spacc_ddt *ddt;
 	struct spacc_ddt *ddt;
 	int i;
 	int i;
+	int nents;
 
 
-	nents = sg_count(payload, nbytes);
+	nents = sg_nents_for_len(payload, nbytes);
+	if (nents < 0) {
+		dev_err(engine->dev, "Invalid numbers of SG.\n");
+		return NULL;
+	}
 	mapped_ents = dma_map_sg(engine->dev, payload, nents, dir);
 	mapped_ents = dma_map_sg(engine->dev, payload, nents, dir);
 
 
 	if (mapped_ents + 1 > MAX_DDT_LEN)
 	if (mapped_ents + 1 > MAX_DDT_LEN)
@@ -328,7 +327,7 @@ static int spacc_aead_make_ddts(struct aead_request *areq)
 	struct spacc_engine *engine = req->engine;
 	struct spacc_engine *engine = req->engine;
 	struct spacc_ddt *src_ddt, *dst_ddt;
 	struct spacc_ddt *src_ddt, *dst_ddt;
 	unsigned total;
 	unsigned total;
-	unsigned int src_nents, dst_nents;
+	int src_nents, dst_nents;
 	struct scatterlist *cur;
 	struct scatterlist *cur;
 	int i, dst_ents, src_ents;
 	int i, dst_ents, src_ents;
 
 
@@ -336,13 +335,21 @@ static int spacc_aead_make_ddts(struct aead_request *areq)
 	if (req->is_encrypt)
 	if (req->is_encrypt)
 		total += crypto_aead_authsize(aead);
 		total += crypto_aead_authsize(aead);
 
 
-	src_nents = sg_count(areq->src, total);
+	src_nents = sg_nents_for_len(areq->src, total);
+	if (src_nents < 0) {
+		dev_err(engine->dev, "Invalid numbers of src SG.\n");
+		return src_nents;
+	}
 	if (src_nents + 1 > MAX_DDT_LEN)
 	if (src_nents + 1 > MAX_DDT_LEN)
 		return -E2BIG;
 		return -E2BIG;
 
 
 	dst_nents = 0;
 	dst_nents = 0;
 	if (areq->src != areq->dst) {
 	if (areq->src != areq->dst) {
-		dst_nents = sg_count(areq->dst, total);
+		dst_nents = sg_nents_for_len(areq->dst, total);
+		if (dst_nents < 0) {
+			dev_err(engine->dev, "Invalid numbers of dst SG.\n");
+			return dst_nents;
+		}
 		if (src_nents + 1 > MAX_DDT_LEN)
 		if (src_nents + 1 > MAX_DDT_LEN)
 			return -E2BIG;
 			return -E2BIG;
 	}
 	}
@@ -422,13 +429,22 @@ static void spacc_aead_free_ddts(struct spacc_req *req)
 			 (req->is_encrypt ? crypto_aead_authsize(aead) : 0);
 			 (req->is_encrypt ? crypto_aead_authsize(aead) : 0);
 	struct spacc_aead_ctx *aead_ctx = crypto_aead_ctx(aead);
 	struct spacc_aead_ctx *aead_ctx = crypto_aead_ctx(aead);
 	struct spacc_engine *engine = aead_ctx->generic.engine;
 	struct spacc_engine *engine = aead_ctx->generic.engine;
-	unsigned nents = sg_count(areq->src, total);
+	int nents = sg_nents_for_len(areq->src, total);
+
+	/* sg_nents_for_len should not fail since it works when mapping sg */
+	if (unlikely(nents < 0)) {
+		dev_err(engine->dev, "Invalid numbers of src SG.\n");
+		return;
+	}
 
 
 	if (areq->src != areq->dst) {
 	if (areq->src != areq->dst) {
 		dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);
 		dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);
-		dma_unmap_sg(engine->dev, areq->dst,
-			     sg_count(areq->dst, total),
-			     DMA_FROM_DEVICE);
+		nents = sg_nents_for_len(areq->dst, total);
+		if (unlikely(nents < 0)) {
+			dev_err(engine->dev, "Invalid numbers of dst SG.\n");
+			return;
+		}
+		dma_unmap_sg(engine->dev, areq->dst, nents, DMA_FROM_DEVICE);
 	} else
 	} else
 		dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);
 		dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);
 
 
@@ -440,7 +456,12 @@ static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt,
 			   dma_addr_t ddt_addr, struct scatterlist *payload,
 			   dma_addr_t ddt_addr, struct scatterlist *payload,
 			   unsigned nbytes, enum dma_data_direction dir)
 			   unsigned nbytes, enum dma_data_direction dir)
 {
 {
-	unsigned nents = sg_count(payload, nbytes);
+	int nents = sg_nents_for_len(payload, nbytes);
+
+	if (nents < 0) {
+		dev_err(req->engine->dev, "Invalid numbers of SG.\n");
+		return;
+	}
 
 
 	dma_unmap_sg(req->engine->dev, payload, nents, dir);
 	dma_unmap_sg(req->engine->dev, payload, nents, dir);
 	dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
 	dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
@@ -835,8 +856,7 @@ static int spacc_ablk_need_fallback(struct spacc_req *req)
 
 
 static void spacc_ablk_complete(struct spacc_req *req)
 static void spacc_ablk_complete(struct spacc_req *req)
 {
 {
-	struct ablkcipher_request *ablk_req =
-		container_of(req->req, struct ablkcipher_request, base);
+	struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req);
 
 
 	if (ablk_req->src != ablk_req->dst) {
 	if (ablk_req->src != ablk_req->dst) {
 		spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src,
 		spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src,

+ 46 - 0
drivers/crypto/qat/Kconfig

@@ -22,6 +22,28 @@ config CRYPTO_DEV_QAT_DH895xCC
 	  To compile this as a module, choose M here: the module
 	  To compile this as a module, choose M here: the module
 	  will be called qat_dh895xcc.
 	  will be called qat_dh895xcc.
 
 
+config CRYPTO_DEV_QAT_C3XXX
+	tristate "Support for Intel(R) C3XXX"
+	depends on X86 && PCI
+	select CRYPTO_DEV_QAT
+	help
+	  Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology
+	  for accelerating crypto and compression workloads.
+
+	  To compile this as a module, choose M here: the module
+	  will be called qat_c3xxx.
+
+config CRYPTO_DEV_QAT_C62X
+	tristate "Support for Intel(R) C62X"
+	depends on X86 && PCI
+	select CRYPTO_DEV_QAT
+	help
+	  Support for Intel(R) C62x with Intel(R) QuickAssist Technology
+	  for accelerating crypto and compression workloads.
+
+	  To compile this as a module, choose M here: the module
+	  will be called qat_c62x.
+
 config CRYPTO_DEV_QAT_DH895xCCVF
 config CRYPTO_DEV_QAT_DH895xCCVF
 	tristate "Support for Intel(R) DH895xCC Virtual Function"
 	tristate "Support for Intel(R) DH895xCC Virtual Function"
 	depends on X86 && PCI
 	depends on X86 && PCI
@@ -34,3 +56,27 @@ config CRYPTO_DEV_QAT_DH895xCCVF
 
 
 	  To compile this as a module, choose M here: the module
 	  To compile this as a module, choose M here: the module
 	  will be called qat_dh895xccvf.
 	  will be called qat_dh895xccvf.
+
+config CRYPTO_DEV_QAT_C3XXXVF
+	tristate "Support for Intel(R) C3XXX Virtual Function"
+	depends on X86 && PCI
+	select PCI_IOV
+	select CRYPTO_DEV_QAT
+	help
+	  Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology
+	  Virtual Function for accelerating crypto and compression workloads.
+
+	  To compile this as a module, choose M here: the module
+	  will be called qat_c3xxxvf.
+
+config CRYPTO_DEV_QAT_C62XVF
+	tristate "Support for Intel(R) C62X Virtual Function"
+	depends on X86 && PCI
+	select PCI_IOV
+	select CRYPTO_DEV_QAT
+	help
+	  Support for Intel(R) C62x with Intel(R) QuickAssist Technology
+	  Virtual Function for accelerating crypto and compression workloads.
+
+	  To compile this as a module, choose M here: the module
+	  will be called qat_c62xvf.

+ 4 - 0
drivers/crypto/qat/Makefile

@@ -1,3 +1,7 @@
 obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
 obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
 obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
 obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
+obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/
+obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/
 obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/
 obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/
+obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/
+obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/

+ 3 - 0
drivers/crypto/qat/qat_c3xxx/Makefile

@@ -0,0 +1,3 @@
+ccflags-y := -I$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o
+qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o

+ 238 - 0
drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c

@@ -0,0 +1,238 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+	* Redistributions of source code must retain the above copyright
+	  notice, this list of conditions and the following disclaimer.
+	* Redistributions in binary form must reproduce the above copyright
+	  notice, this list of conditions and the following disclaimer in
+	  the documentation and/or other materials provided with the
+	  distribution.
+	* Neither the name of Intel Corporation nor the names of its
+	  contributors may be used to endorse or promote products derived
+	  from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_pf2vf_msg.h>
+#include "adf_c3xxx_hw_data.h"
+
+/* Worker thread to service arbiter mappings based on dev SKUs */
+static const u32 thrd_to_arb_map_6_me_sku[] = {
+	0x12222AAA, 0x11222AAA, 0x12222AAA,
+	0x11222AAA, 0x12222AAA, 0x11222AAA
+};
+
+static struct adf_hw_device_class c3xxx_class = {
+	.name = ADF_C3XXX_DEVICE_NAME,
+	.type = DEV_C3XXX,
+	.instances = 0
+};
+
+static u32 get_accel_mask(u32 fuse)
+{
+	return (~fuse) >> ADF_C3XXX_ACCELERATORS_REG_OFFSET &
+		ADF_C3XXX_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(u32 fuse)
+{
+	return (~fuse) & ADF_C3XXX_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+	u32 i, ctr = 0;
+
+	if (!self || !self->accel_mask)
+		return 0;
+
+	for (i = 0; i < ADF_C3XXX_MAX_ACCELERATORS; i++) {
+		if (self->accel_mask & (1 << i))
+			ctr++;
+	}
+	return ctr;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+	u32 i, ctr = 0;
+
+	if (!self || !self->ae_mask)
+		return 0;
+
+	for (i = 0; i < ADF_C3XXX_MAX_ACCELENGINES; i++) {
+		if (self->ae_mask & (1 << i))
+			ctr++;
+	}
+	return ctr;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C3XXX_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C3XXX_ETR_BAR;
+}
+
+static u32 get_sram_bar_id(struct adf_hw_device_data *self)
+{
+	return 0;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+	int aes = get_num_aes(self);
+
+	if (aes == 6)
+		return DEV_SKU_4;
+
+	return DEV_SKU_UNKNOWN;
+}
+
+static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
+				    u32 const **arb_map_config)
+{
+	switch (accel_dev->accel_pci_dev.sku) {
+	case DEV_SKU_4:
+		*arb_map_config = thrd_to_arb_map_6_me_sku;
+		break;
+	default:
+		dev_err(&GET_DEV(accel_dev),
+			"The configuration doesn't match any SKU");
+		*arb_map_config = NULL;
+	}
+}
+
+static u32 get_pf2vf_offset(u32 i)
+{
+	return ADF_C3XXX_PF2VF_OFFSET(i);
+}
+
+static u32 get_vintmsk_offset(u32 i)
+{
+	return ADF_C3XXX_VINTMSK_OFFSET(i);
+}
+
+static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR];
+	void __iomem *csr = misc_bar->virt_addr;
+	unsigned int val, i;
+
+	/* Enable Accel Engine error detection & correction */
+	for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
+		val = ADF_CSR_RD(csr, ADF_C3XXX_AE_CTX_ENABLES(i));
+		val |= ADF_C3XXX_ENABLE_AE_ECC_ERR;
+		ADF_CSR_WR(csr, ADF_C3XXX_AE_CTX_ENABLES(i), val);
+		val = ADF_CSR_RD(csr, ADF_C3XXX_AE_MISC_CONTROL(i));
+		val |= ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR;
+		ADF_CSR_WR(csr, ADF_C3XXX_AE_MISC_CONTROL(i), val);
+	}
+
+	/* Enable shared memory error detection & correction */
+	for (i = 0; i < hw_device->get_num_accels(hw_device); i++) {
+		val = ADF_CSR_RD(csr, ADF_C3XXX_UERRSSMSH(i));
+		val |= ADF_C3XXX_ERRSSMSH_EN;
+		ADF_CSR_WR(csr, ADF_C3XXX_UERRSSMSH(i), val);
+		val = ADF_CSR_RD(csr, ADF_C3XXX_CERRSSMSH(i));
+		val |= ADF_C3XXX_ERRSSMSH_EN;
+		ADF_CSR_WR(csr, ADF_C3XXX_CERRSSMSH(i), val);
+	}
+}
+
+static void adf_enable_ints(struct adf_accel_dev *accel_dev)
+{
+	void __iomem *addr;
+
+	addr = (&GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR])->virt_addr;
+
+	/* Enable bundle and misc interrupts */
+	ADF_CSR_WR(addr, ADF_C3XXX_SMIAPF0_MASK_OFFSET,
+		   ADF_C3XXX_SMIA0_MASK);
+	ADF_CSR_WR(addr, ADF_C3XXX_SMIAPF1_MASK_OFFSET,
+		   ADF_C3XXX_SMIA1_MASK);
+}
+
+static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+	return 0;
+}
+
+void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class = &c3xxx_class;
+	hw_data->instance_id = c3xxx_class.instances++;
+	hw_data->num_banks = ADF_C3XXX_ETR_MAX_BANKS;
+	hw_data->num_accel = ADF_C3XXX_MAX_ACCELERATORS;
+	hw_data->num_logical_accel = 1;
+	hw_data->num_engines = ADF_C3XXX_MAX_ACCELENGINES;
+	hw_data->tx_rx_gap = ADF_C3XXX_RX_RINGS_OFFSET;
+	hw_data->tx_rings_mask = ADF_C3XXX_TX_RINGS_MASK;
+	hw_data->alloc_irq = adf_isr_resource_alloc;
+	hw_data->free_irq = adf_isr_resource_free;
+	hw_data->enable_error_correction = adf_enable_error_correction;
+	hw_data->get_accel_mask = get_accel_mask;
+	hw_data->get_ae_mask = get_ae_mask;
+	hw_data->get_num_accels = get_num_accels;
+	hw_data->get_num_aes = get_num_aes;
+	hw_data->get_sram_bar_id = get_sram_bar_id;
+	hw_data->get_etr_bar_id = get_etr_bar_id;
+	hw_data->get_misc_bar_id = get_misc_bar_id;
+	hw_data->get_pf2vf_offset = get_pf2vf_offset;
+	hw_data->get_vintmsk_offset = get_vintmsk_offset;
+	hw_data->get_sku = get_sku;
+	hw_data->fw_name = ADF_C3XXX_FW;
+	hw_data->fw_mmp_name = ADF_C3XXX_MMP;
+	hw_data->init_admin_comms = adf_init_admin_comms;
+	hw_data->exit_admin_comms = adf_exit_admin_comms;
+	hw_data->disable_iov = adf_disable_sriov;
+	hw_data->send_admin_init = adf_send_admin_init;
+	hw_data->init_arb = adf_init_arb;
+	hw_data->exit_arb = adf_exit_arb;
+	hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+	hw_data->enable_ints = adf_enable_ints;
+	hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
+	hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+}
+
+void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class->instances--;
+}

+ 83 - 0
drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h

@@ -0,0 +1,83 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_C3XXX_HW_DATA_H_
+#define ADF_C3XXX_HW_DATA_H_
+
+/* PCIe configuration space */
+#define ADF_C3XXX_PMISC_BAR 0
+#define ADF_C3XXX_ETR_BAR 1
+#define ADF_C3XXX_RX_RINGS_OFFSET 8
+#define ADF_C3XXX_TX_RINGS_MASK 0xFF
+#define ADF_C3XXX_MAX_ACCELERATORS 3
+#define ADF_C3XXX_MAX_ACCELENGINES 6
+#define ADF_C3XXX_ACCELERATORS_REG_OFFSET 16
+#define ADF_C3XXX_ACCELERATORS_MASK 0x3
+#define ADF_C3XXX_ACCELENGINES_MASK 0x3F
+#define ADF_C3XXX_ETR_MAX_BANKS 16
+#define ADF_C3XXX_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
+#define ADF_C3XXX_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
+#define ADF_C3XXX_SMIA0_MASK 0xFFFF
+#define ADF_C3XXX_SMIA1_MASK 0x1
+/* Error detection and correction */
+#define ADF_C3XXX_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
+#define ADF_C3XXX_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
+#define ADF_C3XXX_ENABLE_AE_ECC_ERR BIT(28)
+#define ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
+#define ADF_C3XXX_UERRSSMSH(i) (i * 0x4000 + 0x18)
+#define ADF_C3XXX_CERRSSMSH(i) (i * 0x4000 + 0x10)
+#define ADF_C3XXX_ERRSSMSH_EN BIT(3)
+
+#define ADF_C3XXX_PF2VF_OFFSET(i)	(0x3A000 + 0x280 + ((i) * 0x04))
+#define ADF_C3XXX_VINTMSK_OFFSET(i)	(0x3A000 + 0x200 + ((i) * 0x04))
+
+/* Firmware Binary */
+#define ADF_C3XXX_FW "qat_c3xxx.bin"
+#define ADF_C3XXX_MMP "qat_c3xxx_mmp.bin"
+
+void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data);
+#endif

+ 335 - 0
drivers/crypto/qat/qat_c3xxx/adf_drv.c

@@ -0,0 +1,335 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_c3xxx_hw_data.h"
+
+#define ADF_SYSTEM_DEVICE(device_id) \
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+	ADF_SYSTEM_DEVICE(ADF_C3XXX_PCI_DEVICE_ID),
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+	.id_table = adf_pci_tbl,
+	.name = ADF_C3XXX_DEVICE_NAME,
+	.probe = adf_probe,
+	.remove = adf_remove,
+	.sriov_configure = adf_sriov_configure,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+	pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+	pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+	int i;
+
+	for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+		if (bar->virt_addr)
+			pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+	}
+
+	if (accel_dev->hw_device) {
+		switch (accel_pci_dev->pci_dev->device) {
+		case ADF_C3XXX_PCI_DEVICE_ID:
+			adf_clean_hw_data_c3xxx(accel_dev->hw_device);
+			break;
+		default:
+			break;
+		}
+		kfree(accel_dev->hw_device);
+		accel_dev->hw_device = NULL;
+	}
+	adf_cfg_dev_remove(accel_dev);
+	debugfs_remove(accel_dev->debugfs_dir);
+	adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct adf_accel_dev *accel_dev;
+	struct adf_accel_pci *accel_pci_dev;
+	struct adf_hw_device_data *hw_data;
+	char name[ADF_DEVICE_NAME_LENGTH];
+	unsigned int i, bar_nr;
+	int ret, bar_mask;
+
+	switch (ent->device) {
+	case ADF_C3XXX_PCI_DEVICE_ID:
+		break;
+	default:
+		dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+		return -ENODEV;
+	}
+
+	if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+		/* If the accelerator is connected to a node with no memory
+		 * there is no point in using the accelerator since the remote
+		 * memory transaction will be very slow. */
+		dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+		return -EINVAL;
+	}
+
+	accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+				 dev_to_node(&pdev->dev));
+	if (!accel_dev)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&accel_dev->crypto_list);
+	accel_pci_dev = &accel_dev->accel_pci_dev;
+	accel_pci_dev->pci_dev = pdev;
+
+	/* Add accel device to accel table.
+	 * This should be called before adf_cleanup_accel is called */
+	if (adf_devmgr_add_dev(accel_dev, NULL)) {
+		dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+		kfree(accel_dev);
+		return -EFAULT;
+	}
+
+	accel_dev->owner = THIS_MODULE;
+	/* Allocate and configure device configuration structure */
+	hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+			       dev_to_node(&pdev->dev));
+	if (!hw_data) {
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	accel_dev->hw_device = hw_data;
+	adf_init_hw_data_c3xxx(accel_dev->hw_device);
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+	pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
+			      &hw_data->fuses);
+
+	/* Get Accelerators and Accelerators Engines masks */
+	hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
+	hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
+	accel_pci_dev->sku = hw_data->get_sku(hw_data);
+	/* If the device has no acceleration engines then ignore it. */
+	if (!hw_data->accel_mask || !hw_data->ae_mask ||
+	    ((~hw_data->ae_mask) & 0x01)) {
+		dev_err(&pdev->dev, "No acceleration units found");
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* Create dev top level debugfs entry */
+	snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d",
+		 ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+		 pdev->bus->number, PCI_SLOT(pdev->devfn),
+		 PCI_FUNC(pdev->devfn));
+
+	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+	if (!accel_dev->debugfs_dir) {
+		dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	/* Create device configuration table */
+	ret = adf_cfg_dev_add(accel_dev);
+	if (ret)
+		goto out_err;
+
+	/* enable PCI device */
+	if (pci_enable_device(pdev)) {
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* set dma identifier */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+			dev_err(&pdev->dev, "No usable DMA configuration\n");
+			ret = -EFAULT;
+			goto out_err_disable;
+		} else {
+			pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		}
+
+	} else {
+		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	}
+
+	if (pci_request_regions(pdev, ADF_C3XXX_DEVICE_NAME)) {
+		ret = -EFAULT;
+		goto out_err_disable;
+	}
+
+	/* Read accelerator capabilities mask */
+	pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET,
+			      &hw_data->accel_capabilities_mask);
+
+	/* Find and map all the device's BARS */
+	i = 0;
+	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
+			 ADF_PCI_MAX_BARS * 2) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+		bar->base_addr = pci_resource_start(pdev, bar_nr);
+		if (!bar->base_addr)
+			break;
+		bar->size = pci_resource_len(pdev, bar_nr);
+		bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+		if (!bar->virt_addr) {
+			dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+			ret = -EFAULT;
+			goto out_err_free_reg;
+		}
+	}
+	pci_set_master(pdev);
+
+	if (adf_enable_aer(accel_dev, &adf_driver)) {
+		dev_err(&pdev->dev, "Failed to enable aer\n");
+		ret = -EFAULT;
+		goto out_err_free_reg;
+	}
+
+	if (pci_save_state(pdev)) {
+		dev_err(&pdev->dev, "Failed to save pci state\n");
+		ret = -ENOMEM;
+		goto out_err_free_reg;
+	}
+
+	ret = qat_crypto_dev_config(accel_dev);
+	if (ret)
+		goto out_err_free_reg;
+
+	ret = adf_dev_init(accel_dev);
+	if (ret)
+		goto out_err_dev_shutdown;
+
+	ret = adf_dev_start(accel_dev);
+	if (ret)
+		goto out_err_dev_stop;
+
+	return ret;
+
+out_err_dev_stop:
+	adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+	adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+	pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+	pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+	adf_cleanup_accel(accel_dev);
+	kfree(accel_dev);
+	return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+	if (!accel_dev) {
+		pr_err("QAT: Driver removal failed\n");
+		return;
+	}
+	if (adf_dev_stop(accel_dev))
+		dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
+
+	adf_dev_shutdown(accel_dev);
+	adf_disable_aer(accel_dev);
+	adf_cleanup_accel(accel_dev);
+	adf_cleanup_pci_dev(accel_dev);
+	kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+	request_module("intel_qat");
+
+	if (pci_register_driver(&adf_driver)) {
+		pr_err("QAT: Driver initialization failed\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+	pci_unregister_driver(&adf_driver);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);

+ 3 - 0
drivers/crypto/qat/qat_c3xxxvf/Makefile

@@ -0,0 +1,3 @@
+ccflags-y := -I$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o
+qat_c3xxxvf-objs := adf_drv.o adf_c3xxxvf_hw_data.o

+ 173 - 0
drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c

@@ -0,0 +1,173 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include <adf_pf2vf_msg.h>
+#include <adf_common_drv.h>
+#include "adf_c3xxxvf_hw_data.h"
+
+static struct adf_hw_device_class c3xxxiov_class = {
+	.name = ADF_C3XXXVF_DEVICE_NAME,
+	.type = DEV_C3XXXVF,
+	.instances = 0
+};
+
+static u32 get_accel_mask(u32 fuse)
+{
+	return ADF_C3XXXIOV_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(u32 fuse)
+{
+	return ADF_C3XXXIOV_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+	return ADF_C3XXXIOV_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+	return ADF_C3XXXIOV_MAX_ACCELENGINES;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C3XXXIOV_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C3XXXIOV_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+	return DEV_SKU_VF;
+}
+
+static u32 get_pf2vf_offset(u32 i)
+{
+	return ADF_C3XXXIOV_PF2VF_OFFSET;
+}
+
+static u32 get_vintmsk_offset(u32 i)
+{
+	return ADF_C3XXXIOV_VINTMSK_OFFSET;
+}
+
+static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
+{
+	return 0;
+}
+
+static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
+{
+}
+
+static int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+{
+	u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+		(ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
+
+	if (adf_iov_putmsg(accel_dev, msg, 0)) {
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to send Init event to PF\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+{
+	u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+	    (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
+
+	if (adf_iov_putmsg(accel_dev, msg, 0))
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to send Shutdown event to PF\n");
+}
+
+void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class = &c3xxxiov_class;
+	hw_data->num_banks = ADF_C3XXXIOV_ETR_MAX_BANKS;
+	hw_data->num_accel = ADF_C3XXXIOV_MAX_ACCELERATORS;
+	hw_data->num_logical_accel = 1;
+	hw_data->num_engines = ADF_C3XXXIOV_MAX_ACCELENGINES;
+	hw_data->tx_rx_gap = ADF_C3XXXIOV_RX_RINGS_OFFSET;
+	hw_data->tx_rings_mask = ADF_C3XXXIOV_TX_RINGS_MASK;
+	hw_data->alloc_irq = adf_vf_isr_resource_alloc;
+	hw_data->free_irq = adf_vf_isr_resource_free;
+	hw_data->enable_error_correction = adf_vf_void_noop;
+	hw_data->init_admin_comms = adf_vf_int_noop;
+	hw_data->exit_admin_comms = adf_vf_void_noop;
+	hw_data->send_admin_init = adf_vf2pf_init;
+	hw_data->init_arb = adf_vf_int_noop;
+	hw_data->exit_arb = adf_vf_void_noop;
+	hw_data->disable_iov = adf_vf2pf_shutdown;
+	hw_data->get_accel_mask = get_accel_mask;
+	hw_data->get_ae_mask = get_ae_mask;
+	hw_data->get_num_accels = get_num_accels;
+	hw_data->get_num_aes = get_num_aes;
+	hw_data->get_etr_bar_id = get_etr_bar_id;
+	hw_data->get_misc_bar_id = get_misc_bar_id;
+	hw_data->get_pf2vf_offset = get_pf2vf_offset;
+	hw_data->get_vintmsk_offset = get_vintmsk_offset;
+	hw_data->get_sku = get_sku;
+	hw_data->enable_ints = adf_vf_void_noop;
+	hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
+	hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+	hw_data->dev_class->instances++;
+	adf_devmgr_update_class_index(hw_data);
+}
+
+void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class->instances--;
+	adf_devmgr_update_class_index(hw_data);
+}

+ 19 - 12
drivers/crypto/qat/qat_dh895xccvf/adf_drv.h → drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h

@@ -3,7 +3,7 @@
   redistributing this file, you may do so under either license.
   redistributing this file, you may do so under either license.
 
 
   GPL LICENSE SUMMARY
   GPL LICENSE SUMMARY
-  Copyright(c) 2014 Intel Corporation.
+  Copyright(c) 2015 Intel Corporation.
   This program is free software; you can redistribute it and/or modify
   This program is free software; you can redistribute it and/or modify
   it under the terms of version 2 of the GNU General Public License as
   it under the terms of version 2 of the GNU General Public License as
   published by the Free Software Foundation.
   published by the Free Software Foundation.
@@ -17,7 +17,7 @@
   qat-linux@intel.com
   qat-linux@intel.com
 
 
   BSD LICENSE
   BSD LICENSE
-  Copyright(c) 2014 Intel Corporation.
+  Copyright(c) 2015 Intel Corporation.
   Redistribution and use in source and binary forms, with or without
   Redistribution and use in source and binary forms, with or without
   modification, are permitted provided that the following conditions
   modification, are permitted provided that the following conditions
   are met:
   are met:
@@ -44,14 +44,21 @@
   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
 */
-#ifndef ADF_DH895xVF_DRV_H_
-#define ADF_DH895xVF_DRV_H_
-#include <adf_accel_devices.h>
-#include <adf_transport.h>
-
-void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
-void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
-int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
-void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev);
-void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring);
+#ifndef ADF_C3XXXVF_HW_DATA_H_
+#define ADF_C3XXXVF_HW_DATA_H_
+
+#define ADF_C3XXXIOV_PMISC_BAR 1
+#define ADF_C3XXXIOV_ACCELERATORS_MASK 0x1
+#define ADF_C3XXXIOV_ACCELENGINES_MASK 0x1
+#define ADF_C3XXXIOV_MAX_ACCELERATORS 1
+#define ADF_C3XXXIOV_MAX_ACCELENGINES 1
+#define ADF_C3XXXIOV_RX_RINGS_OFFSET 8
+#define ADF_C3XXXIOV_TX_RINGS_MASK 0xFF
+#define ADF_C3XXXIOV_ETR_BAR 0
+#define ADF_C3XXXIOV_ETR_MAX_BANKS 1
+#define ADF_C3XXXIOV_PF2VF_OFFSET	0x200
+#define ADF_C3XXXIOV_VINTMSK_OFFSET	0x208
+
+void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
 #endif
 #endif

+ 305 - 0
drivers/crypto/qat/qat_c3xxxvf/adf_drv.c

@@ -0,0 +1,305 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_c3xxxvf_hw_data.h"
+
+#define ADF_SYSTEM_DEVICE(device_id) \
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+	ADF_SYSTEM_DEVICE(ADF_C3XXXIOV_PCI_DEVICE_ID),
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+	.id_table = adf_pci_tbl,
+	.name = ADF_C3XXXVF_DEVICE_NAME,
+	.probe = adf_probe,
+	.remove = adf_remove,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+	pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+	pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+	struct adf_accel_dev *pf;
+	int i;
+
+	for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+		if (bar->virt_addr)
+			pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+	}
+
+	if (accel_dev->hw_device) {
+		switch (accel_pci_dev->pci_dev->device) {
+		case ADF_C3XXXIOV_PCI_DEVICE_ID:
+			adf_clean_hw_data_c3xxxiov(accel_dev->hw_device);
+			break;
+		default:
+			break;
+		}
+		kfree(accel_dev->hw_device);
+		accel_dev->hw_device = NULL;
+	}
+	adf_cfg_dev_remove(accel_dev);
+	debugfs_remove(accel_dev->debugfs_dir);
+	pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
+	adf_devmgr_rm_dev(accel_dev, pf);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct adf_accel_dev *accel_dev;
+	struct adf_accel_dev *pf;
+	struct adf_accel_pci *accel_pci_dev;
+	struct adf_hw_device_data *hw_data;
+	char name[ADF_DEVICE_NAME_LENGTH];
+	unsigned int i, bar_nr;
+	int ret, bar_mask;
+
+	switch (ent->device) {
+	case ADF_C3XXXIOV_PCI_DEVICE_ID:
+		break;
+	default:
+		dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+		return -ENODEV;
+	}
+
+	accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+				 dev_to_node(&pdev->dev));
+	if (!accel_dev)
+		return -ENOMEM;
+
+	accel_dev->is_vf = true;
+	pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
+	accel_pci_dev = &accel_dev->accel_pci_dev;
+	accel_pci_dev->pci_dev = pdev;
+
+	/* Add accel device to accel table */
+	if (adf_devmgr_add_dev(accel_dev, pf)) {
+		dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+		kfree(accel_dev);
+		return -EFAULT;
+	}
+	INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+	accel_dev->owner = THIS_MODULE;
+	/* Allocate and configure device configuration structure */
+	hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+			       dev_to_node(&pdev->dev));
+	if (!hw_data) {
+		ret = -ENOMEM;
+		goto out_err;
+	}
+	accel_dev->hw_device = hw_data;
+	adf_init_hw_data_c3xxxiov(accel_dev->hw_device);
+
+	/* Get Accelerators and Accelerators Engines masks */
+	hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
+	hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
+	accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+	/* Create dev top level debugfs entry */
+	snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d",
+		 ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+		 pdev->bus->number, PCI_SLOT(pdev->devfn),
+		 PCI_FUNC(pdev->devfn));
+
+	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+	if (!accel_dev->debugfs_dir) {
+		dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	/* Create device configuration table */
+	ret = adf_cfg_dev_add(accel_dev);
+	if (ret)
+		goto out_err;
+
+	/* enable PCI device */
+	if (pci_enable_device(pdev)) {
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* set dma identifier */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+			dev_err(&pdev->dev, "No usable DMA configuration\n");
+			ret = -EFAULT;
+			goto out_err_disable;
+		} else {
+			pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		}
+
+	} else {
+		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	}
+
+	if (pci_request_regions(pdev, ADF_C3XXXVF_DEVICE_NAME)) {
+		ret = -EFAULT;
+		goto out_err_disable;
+	}
+
+	/* Find and map all the device's BARS */
+	i = 0;
+	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
+			 ADF_PCI_MAX_BARS * 2) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+		bar->base_addr = pci_resource_start(pdev, bar_nr);
+		if (!bar->base_addr)
+			break;
+		bar->size = pci_resource_len(pdev, bar_nr);
+		bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+		if (!bar->virt_addr) {
+			dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+			ret = -EFAULT;
+			goto out_err_free_reg;
+		}
+	}
+	pci_set_master(pdev);
+	/* Completion for VF2PF request/response message exchange */
+	init_completion(&accel_dev->vf.iov_msg_completion);
+
+	ret = qat_crypto_dev_config(accel_dev);
+	if (ret)
+		goto out_err_free_reg;
+
+	ret = adf_dev_init(accel_dev);
+	if (ret)
+		goto out_err_dev_shutdown;
+
+	ret = adf_dev_start(accel_dev);
+	if (ret)
+		goto out_err_dev_stop;
+
+	return ret;
+
+out_err_dev_stop:
+	adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+	adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+	pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+	pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+	adf_cleanup_accel(accel_dev);
+	kfree(accel_dev);
+	return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+	if (!accel_dev) {
+		pr_err("QAT: Driver removal failed\n");
+		return;
+	}
+	if (adf_dev_stop(accel_dev))
+		dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
+
+	adf_dev_shutdown(accel_dev);
+	adf_cleanup_accel(accel_dev);
+	adf_cleanup_pci_dev(accel_dev);
+	kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+	request_module("intel_qat");
+
+	if (pci_register_driver(&adf_driver)) {
+		pr_err("QAT: Driver initialization failed\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+	pci_unregister_driver(&adf_driver);
+	adf_clean_vf_map(true);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);

+ 3 - 0
drivers/crypto/qat/qat_c62x/Makefile

@@ -0,0 +1,3 @@
+ccflags-y := -I$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o
+qat_c62x-objs := adf_drv.o adf_c62x_hw_data.o

+ 248 - 0
drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c

@@ -0,0 +1,248 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+	* Redistributions of source code must retain the above copyright
+	  notice, this list of conditions and the following disclaimer.
+	* Redistributions in binary form must reproduce the above copyright
+	  notice, this list of conditions and the following disclaimer in
+	  the documentation and/or other materials provided with the
+	  distribution.
+	* Neither the name of Intel Corporation nor the names of its
+	  contributors may be used to endorse or promote products derived
+	  from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_pf2vf_msg.h>
+#include "adf_c62x_hw_data.h"
+
+/* Worker thread to service arbiter mappings based on dev SKUs */
+static const u32 thrd_to_arb_map_8_me_sku[] = {
+	0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
+	0x11222AAA, 0x12222AAA, 0x11222AAA, 0, 0
+};
+
+static const u32 thrd_to_arb_map_10_me_sku[] = {
+	0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
+	0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA
+};
+
+static struct adf_hw_device_class c62x_class = {
+	.name = ADF_C62X_DEVICE_NAME,
+	.type = DEV_C62X,
+	.instances = 0
+};
+
+static u32 get_accel_mask(u32 fuse)
+{
+	return (~fuse) >> ADF_C62X_ACCELERATORS_REG_OFFSET &
+			  ADF_C62X_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(u32 fuse)
+{
+	return (~fuse) & ADF_C62X_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+	u32 i, ctr = 0;
+
+	if (!self || !self->accel_mask)
+		return 0;
+
+	for (i = 0; i < ADF_C62X_MAX_ACCELERATORS; i++) {
+		if (self->accel_mask & (1 << i))
+			ctr++;
+	}
+	return ctr;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+	u32 i, ctr = 0;
+
+	if (!self || !self->ae_mask)
+		return 0;
+
+	for (i = 0; i < ADF_C62X_MAX_ACCELENGINES; i++) {
+		if (self->ae_mask & (1 << i))
+			ctr++;
+	}
+	return ctr;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C62X_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C62X_ETR_BAR;
+}
+
+static u32 get_sram_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C62X_SRAM_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+	int aes = get_num_aes(self);
+
+	if (aes == 8)
+		return DEV_SKU_2;
+	else if (aes == 10)
+		return DEV_SKU_4;
+
+	return DEV_SKU_UNKNOWN;
+}
+
+static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
+				    u32 const **arb_map_config)
+{
+	switch (accel_dev->accel_pci_dev.sku) {
+	case DEV_SKU_2:
+		*arb_map_config = thrd_to_arb_map_8_me_sku;
+		break;
+	case DEV_SKU_4:
+		*arb_map_config = thrd_to_arb_map_10_me_sku;
+		break;
+	default:
+		dev_err(&GET_DEV(accel_dev),
+			"The configuration doesn't match any SKU");
+		*arb_map_config = NULL;
+	}
+}
+
+static u32 get_pf2vf_offset(u32 i)
+{
+	return ADF_C62X_PF2VF_OFFSET(i);
+}
+
+static u32 get_vintmsk_offset(u32 i)
+{
+	return ADF_C62X_VINTMSK_OFFSET(i);
+}
+
+static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR];
+	void __iomem *csr = misc_bar->virt_addr;
+	unsigned int val, i;
+
+	/* Enable Accel Engine error detection & correction */
+	for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
+		val = ADF_CSR_RD(csr, ADF_C62X_AE_CTX_ENABLES(i));
+		val |= ADF_C62X_ENABLE_AE_ECC_ERR;
+		ADF_CSR_WR(csr, ADF_C62X_AE_CTX_ENABLES(i), val);
+		val = ADF_CSR_RD(csr, ADF_C62X_AE_MISC_CONTROL(i));
+		val |= ADF_C62X_ENABLE_AE_ECC_PARITY_CORR;
+		ADF_CSR_WR(csr, ADF_C62X_AE_MISC_CONTROL(i), val);
+	}
+
+	/* Enable shared memory error detection & correction */
+	for (i = 0; i < hw_device->get_num_accels(hw_device); i++) {
+		val = ADF_CSR_RD(csr, ADF_C62X_UERRSSMSH(i));
+		val |= ADF_C62X_ERRSSMSH_EN;
+		ADF_CSR_WR(csr, ADF_C62X_UERRSSMSH(i), val);
+		val = ADF_CSR_RD(csr, ADF_C62X_CERRSSMSH(i));
+		val |= ADF_C62X_ERRSSMSH_EN;
+		ADF_CSR_WR(csr, ADF_C62X_CERRSSMSH(i), val);
+	}
+}
+
+static void adf_enable_ints(struct adf_accel_dev *accel_dev)
+{
+	void __iomem *addr;
+
+	addr = (&GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR])->virt_addr;
+
+	/* Enable bundle and misc interrupts */
+	ADF_CSR_WR(addr, ADF_C62X_SMIAPF0_MASK_OFFSET,
+		   ADF_C62X_SMIA0_MASK);
+	ADF_CSR_WR(addr, ADF_C62X_SMIAPF1_MASK_OFFSET,
+		   ADF_C62X_SMIA1_MASK);
+}
+
+static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+	return 0;
+}
+
+void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class = &c62x_class;
+	hw_data->instance_id = c62x_class.instances++;
+	hw_data->num_banks = ADF_C62X_ETR_MAX_BANKS;
+	hw_data->num_accel = ADF_C62X_MAX_ACCELERATORS;
+	hw_data->num_logical_accel = 1;
+	hw_data->num_engines = ADF_C62X_MAX_ACCELENGINES;
+	hw_data->tx_rx_gap = ADF_C62X_RX_RINGS_OFFSET;
+	hw_data->tx_rings_mask = ADF_C62X_TX_RINGS_MASK;
+	hw_data->alloc_irq = adf_isr_resource_alloc;
+	hw_data->free_irq = adf_isr_resource_free;
+	hw_data->enable_error_correction = adf_enable_error_correction;
+	hw_data->get_accel_mask = get_accel_mask;
+	hw_data->get_ae_mask = get_ae_mask;
+	hw_data->get_num_accels = get_num_accels;
+	hw_data->get_num_aes = get_num_aes;
+	hw_data->get_sram_bar_id = get_sram_bar_id;
+	hw_data->get_etr_bar_id = get_etr_bar_id;
+	hw_data->get_misc_bar_id = get_misc_bar_id;
+	hw_data->get_pf2vf_offset = get_pf2vf_offset;
+	hw_data->get_vintmsk_offset = get_vintmsk_offset;
+	hw_data->get_sku = get_sku;
+	hw_data->fw_name = ADF_C62X_FW;
+	hw_data->fw_mmp_name = ADF_C62X_MMP;
+	hw_data->init_admin_comms = adf_init_admin_comms;
+	hw_data->exit_admin_comms = adf_exit_admin_comms;
+	hw_data->disable_iov = adf_disable_sriov;
+	hw_data->send_admin_init = adf_send_admin_init;
+	hw_data->init_arb = adf_init_arb;
+	hw_data->exit_arb = adf_exit_arb;
+	hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+	hw_data->enable_ints = adf_enable_ints;
+	hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
+	hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+}
+
+void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class->instances--;
+}

+ 84 - 0
drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h

@@ -0,0 +1,84 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_C62X_HW_DATA_H_
+#define ADF_C62X_HW_DATA_H_
+
+/* PCIe configuration space */
+#define ADF_C62X_SRAM_BAR 0
+#define ADF_C62X_PMISC_BAR 1
+#define ADF_C62X_ETR_BAR 2
+#define ADF_C62X_RX_RINGS_OFFSET 8
+#define ADF_C62X_TX_RINGS_MASK 0xFF
+#define ADF_C62X_MAX_ACCELERATORS 5
+#define ADF_C62X_MAX_ACCELENGINES 10
+#define ADF_C62X_ACCELERATORS_REG_OFFSET 16
+#define ADF_C62X_ACCELERATORS_MASK 0x1F
+#define ADF_C62X_ACCELENGINES_MASK 0x3FF
+#define ADF_C62X_ETR_MAX_BANKS 16
+#define ADF_C62X_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
+#define ADF_C62X_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
+#define ADF_C62X_SMIA0_MASK 0xFFFF
+#define ADF_C62X_SMIA1_MASK 0x1
+/* Error detection and correction */
+#define ADF_C62X_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
+#define ADF_C62X_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
+#define ADF_C62X_ENABLE_AE_ECC_ERR BIT(28)
+#define ADF_C62X_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
+#define ADF_C62X_UERRSSMSH(i) (i * 0x4000 + 0x18)
+#define ADF_C62X_CERRSSMSH(i) (i * 0x4000 + 0x10)
+#define ADF_C62X_ERRSSMSH_EN BIT(3)
+
+#define ADF_C62X_PF2VF_OFFSET(i)	(0x3A000 + 0x280 + ((i) * 0x04))
+#define ADF_C62X_VINTMSK_OFFSET(i)	(0x3A000 + 0x200 + ((i) * 0x04))
+
+/* Firmware Binary */
+#define ADF_C62X_FW "qat_c62x.bin"
+#define ADF_C62X_MMP "qat_c62x_mmp.bin"
+
+void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data);
+#endif

+ 335 - 0
drivers/crypto/qat/qat_c62x/adf_drv.c

@@ -0,0 +1,335 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_c62x_hw_data.h"
+
+#define ADF_SYSTEM_DEVICE(device_id) \
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+	ADF_SYSTEM_DEVICE(ADF_C62X_PCI_DEVICE_ID),
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+	.id_table = adf_pci_tbl,
+	.name = ADF_C62X_DEVICE_NAME,
+	.probe = adf_probe,
+	.remove = adf_remove,
+	.sriov_configure = adf_sriov_configure,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+	pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+	pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+	int i;
+
+	for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+		if (bar->virt_addr)
+			pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+	}
+
+	if (accel_dev->hw_device) {
+		switch (accel_pci_dev->pci_dev->device) {
+		case ADF_C62X_PCI_DEVICE_ID:
+			adf_clean_hw_data_c62x(accel_dev->hw_device);
+			break;
+		default:
+			break;
+		}
+		kfree(accel_dev->hw_device);
+		accel_dev->hw_device = NULL;
+	}
+	adf_cfg_dev_remove(accel_dev);
+	debugfs_remove(accel_dev->debugfs_dir);
+	adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct adf_accel_dev *accel_dev;
+	struct adf_accel_pci *accel_pci_dev;
+	struct adf_hw_device_data *hw_data;
+	char name[ADF_DEVICE_NAME_LENGTH];
+	unsigned int i, bar_nr;
+	int ret, bar_mask;
+
+	switch (ent->device) {
+	case ADF_C62X_PCI_DEVICE_ID:
+		break;
+	default:
+		dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+		return -ENODEV;
+	}
+
+	if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+		/* If the accelerator is connected to a node with no memory
+		 * there is no point in using the accelerator since the remote
+		 * memory transaction will be very slow. */
+		dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+		return -EINVAL;
+	}
+
+	accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+				 dev_to_node(&pdev->dev));
+	if (!accel_dev)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&accel_dev->crypto_list);
+	accel_pci_dev = &accel_dev->accel_pci_dev;
+	accel_pci_dev->pci_dev = pdev;
+
+	/* Add accel device to accel table.
+	 * This should be called before adf_cleanup_accel is called */
+	if (adf_devmgr_add_dev(accel_dev, NULL)) {
+		dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+		kfree(accel_dev);
+		return -EFAULT;
+	}
+
+	accel_dev->owner = THIS_MODULE;
+	/* Allocate and configure device configuration structure */
+	hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+			       dev_to_node(&pdev->dev));
+	if (!hw_data) {
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	accel_dev->hw_device = hw_data;
+	adf_init_hw_data_c62x(accel_dev->hw_device);
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+	pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
+			      &hw_data->fuses);
+
+	/* Get Accelerators and Accelerators Engines masks */
+	hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
+	hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
+	accel_pci_dev->sku = hw_data->get_sku(hw_data);
+	/* If the device has no acceleration engines then ignore it. */
+	if (!hw_data->accel_mask || !hw_data->ae_mask ||
+	    ((~hw_data->ae_mask) & 0x01)) {
+		dev_err(&pdev->dev, "No acceleration units found");
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* Create dev top level debugfs entry */
+	snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d",
+		 ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+		 pdev->bus->number, PCI_SLOT(pdev->devfn),
+		 PCI_FUNC(pdev->devfn));
+
+	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+	if (!accel_dev->debugfs_dir) {
+		dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	/* Create device configuration table */
+	ret = adf_cfg_dev_add(accel_dev);
+	if (ret)
+		goto out_err;
+
+	/* enable PCI device */
+	if (pci_enable_device(pdev)) {
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* set dma identifier */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+			dev_err(&pdev->dev, "No usable DMA configuration\n");
+			ret = -EFAULT;
+			goto out_err_disable;
+		} else {
+			pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		}
+
+	} else {
+		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	}
+
+	if (pci_request_regions(pdev, ADF_C62X_DEVICE_NAME)) {
+		ret = -EFAULT;
+		goto out_err_disable;
+	}
+
+	/* Read accelerator capabilities mask */
+	pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET,
+			      &hw_data->accel_capabilities_mask);
+
+	/* Find and map all the device's BARS */
+	i = 0;
+	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
+			 ADF_PCI_MAX_BARS * 2) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+		bar->base_addr = pci_resource_start(pdev, bar_nr);
+		if (!bar->base_addr)
+			break;
+		bar->size = pci_resource_len(pdev, bar_nr);
+		bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+		if (!bar->virt_addr) {
+			dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+			ret = -EFAULT;
+			goto out_err_free_reg;
+		}
+	}
+	pci_set_master(pdev);
+
+	if (adf_enable_aer(accel_dev, &adf_driver)) {
+		dev_err(&pdev->dev, "Failed to enable aer\n");
+		ret = -EFAULT;
+		goto out_err_free_reg;
+	}
+
+	if (pci_save_state(pdev)) {
+		dev_err(&pdev->dev, "Failed to save pci state\n");
+		ret = -ENOMEM;
+		goto out_err_free_reg;
+	}
+
+	ret = qat_crypto_dev_config(accel_dev);
+	if (ret)
+		goto out_err_free_reg;
+
+	ret = adf_dev_init(accel_dev);
+	if (ret)
+		goto out_err_dev_shutdown;
+
+	ret = adf_dev_start(accel_dev);
+	if (ret)
+		goto out_err_dev_stop;
+
+	return ret;
+
+out_err_dev_stop:
+	adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+	adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+	pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+	pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+	adf_cleanup_accel(accel_dev);
+	kfree(accel_dev);
+	return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+	if (!accel_dev) {
+		pr_err("QAT: Driver removal failed\n");
+		return;
+	}
+	if (adf_dev_stop(accel_dev))
+		dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
+
+	adf_dev_shutdown(accel_dev);
+	adf_disable_aer(accel_dev);
+	adf_cleanup_accel(accel_dev);
+	adf_cleanup_pci_dev(accel_dev);
+	kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+	request_module("intel_qat");
+
+	if (pci_register_driver(&adf_driver)) {
+		pr_err("QAT: Driver initialization failed\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+	pci_unregister_driver(&adf_driver);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);

+ 3 - 0
drivers/crypto/qat/qat_c62xvf/Makefile

@@ -0,0 +1,3 @@
+ccflags-y := -I$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o
+qat_c62xvf-objs := adf_drv.o adf_c62xvf_hw_data.o

+ 173 - 0
drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c

@@ -0,0 +1,173 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include <adf_pf2vf_msg.h>
+#include <adf_common_drv.h>
+#include "adf_c62xvf_hw_data.h"
+
+static struct adf_hw_device_class c62xiov_class = {
+	.name = ADF_C62XVF_DEVICE_NAME,
+	.type = DEV_C62XVF,
+	.instances = 0
+};
+
+static u32 get_accel_mask(u32 fuse)
+{
+	return ADF_C62XIOV_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(u32 fuse)
+{
+	return ADF_C62XIOV_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+	return ADF_C62XIOV_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+	return ADF_C62XIOV_MAX_ACCELENGINES;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C62XIOV_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C62XIOV_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+	return DEV_SKU_VF;
+}
+
+static u32 get_pf2vf_offset(u32 i)
+{
+	return ADF_C62XIOV_PF2VF_OFFSET;
+}
+
+static u32 get_vintmsk_offset(u32 i)
+{
+	return ADF_C62XIOV_VINTMSK_OFFSET;
+}
+
+static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
+{
+	return 0;
+}
+
+static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
+{
+}
+
+static int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+{
+	u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+		(ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
+
+	if (adf_iov_putmsg(accel_dev, msg, 0)) {
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to send Init event to PF\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+{
+	u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+	    (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
+
+	if (adf_iov_putmsg(accel_dev, msg, 0))
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to send Shutdown event to PF\n");
+}
+
+void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class = &c62xiov_class;
+	hw_data->num_banks = ADF_C62XIOV_ETR_MAX_BANKS;
+	hw_data->num_accel = ADF_C62XIOV_MAX_ACCELERATORS;
+	hw_data->num_logical_accel = 1;
+	hw_data->num_engines = ADF_C62XIOV_MAX_ACCELENGINES;
+	hw_data->tx_rx_gap = ADF_C62XIOV_RX_RINGS_OFFSET;
+	hw_data->tx_rings_mask = ADF_C62XIOV_TX_RINGS_MASK;
+	hw_data->alloc_irq = adf_vf_isr_resource_alloc;
+	hw_data->free_irq = adf_vf_isr_resource_free;
+	hw_data->enable_error_correction = adf_vf_void_noop;
+	hw_data->init_admin_comms = adf_vf_int_noop;
+	hw_data->exit_admin_comms = adf_vf_void_noop;
+	hw_data->send_admin_init = adf_vf2pf_init;
+	hw_data->init_arb = adf_vf_int_noop;
+	hw_data->exit_arb = adf_vf_void_noop;
+	hw_data->disable_iov = adf_vf2pf_shutdown;
+	hw_data->get_accel_mask = get_accel_mask;
+	hw_data->get_ae_mask = get_ae_mask;
+	hw_data->get_num_accels = get_num_accels;
+	hw_data->get_num_aes = get_num_aes;
+	hw_data->get_etr_bar_id = get_etr_bar_id;
+	hw_data->get_misc_bar_id = get_misc_bar_id;
+	hw_data->get_pf2vf_offset = get_pf2vf_offset;
+	hw_data->get_vintmsk_offset = get_vintmsk_offset;
+	hw_data->get_sku = get_sku;
+	hw_data->enable_ints = adf_vf_void_noop;
+	hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
+	hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+	hw_data->dev_class->instances++;
+	adf_devmgr_update_class_index(hw_data);
+}
+
+void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class->instances--;
+	adf_devmgr_update_class_index(hw_data);
+}

+ 19 - 13
drivers/crypto/qat/qat_dh895xcc/adf_drv.h → drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h

@@ -3,7 +3,7 @@
   redistributing this file, you may do so under either license.
   redistributing this file, you may do so under either license.
 
 
   GPL LICENSE SUMMARY
   GPL LICENSE SUMMARY
-  Copyright(c) 2014 Intel Corporation.
+  Copyright(c) 2015 Intel Corporation.
   This program is free software; you can redistribute it and/or modify
   This program is free software; you can redistribute it and/or modify
   it under the terms of version 2 of the GNU General Public License as
   it under the terms of version 2 of the GNU General Public License as
   published by the Free Software Foundation.
   published by the Free Software Foundation.
@@ -17,7 +17,7 @@
   qat-linux@intel.com
   qat-linux@intel.com
 
 
   BSD LICENSE
   BSD LICENSE
-  Copyright(c) 2014 Intel Corporation.
+  Copyright(c) 2015 Intel Corporation.
   Redistribution and use in source and binary forms, with or without
   Redistribution and use in source and binary forms, with or without
   modification, are permitted provided that the following conditions
   modification, are permitted provided that the following conditions
   are met:
   are met:
@@ -44,15 +44,21 @@
   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
 */
-#ifndef ADF_DH895x_DRV_H_
-#define ADF_DH895x_DRV_H_
-#include <adf_accel_devices.h>
-#include <adf_transport.h>
-
-void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
-void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
-int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
-void adf_isr_resource_free(struct adf_accel_dev *accel_dev);
-void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
-			     uint32_t const **arb_map_config);
+#ifndef ADF_C62XVF_HW_DATA_H_
+#define ADF_C62XVF_HW_DATA_H_
+
+#define ADF_C62XIOV_PMISC_BAR 1
+#define ADF_C62XIOV_ACCELERATORS_MASK 0x1
+#define ADF_C62XIOV_ACCELENGINES_MASK 0x1
+#define ADF_C62XIOV_MAX_ACCELERATORS 1
+#define ADF_C62XIOV_MAX_ACCELENGINES 1
+#define ADF_C62XIOV_RX_RINGS_OFFSET 8
+#define ADF_C62XIOV_TX_RINGS_MASK 0xFF
+#define ADF_C62XIOV_ETR_BAR 0
+#define ADF_C62XIOV_ETR_MAX_BANKS 1
+#define ADF_C62XIOV_PF2VF_OFFSET	0x200
+#define ADF_C62XIOV_VINTMSK_OFFSET	0x208
+
+void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
 #endif
 #endif

+ 305 - 0
drivers/crypto/qat/qat_c62xvf/adf_drv.c

@@ -0,0 +1,305 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_c62xvf_hw_data.h"
+
+#define ADF_SYSTEM_DEVICE(device_id) \
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+	ADF_SYSTEM_DEVICE(ADF_C62XIOV_PCI_DEVICE_ID),
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+	.id_table = adf_pci_tbl,
+	.name = ADF_C62XVF_DEVICE_NAME,
+	.probe = adf_probe,
+	.remove = adf_remove,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+	pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+	pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+	struct adf_accel_dev *pf;
+	int i;
+
+	for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+		if (bar->virt_addr)
+			pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+	}
+
+	if (accel_dev->hw_device) {
+		switch (accel_pci_dev->pci_dev->device) {
+		case ADF_C62XIOV_PCI_DEVICE_ID:
+			adf_clean_hw_data_c62xiov(accel_dev->hw_device);
+			break;
+		default:
+			break;
+		}
+		kfree(accel_dev->hw_device);
+		accel_dev->hw_device = NULL;
+	}
+	adf_cfg_dev_remove(accel_dev);
+	debugfs_remove(accel_dev->debugfs_dir);
+	pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
+	adf_devmgr_rm_dev(accel_dev, pf);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct adf_accel_dev *accel_dev;
+	struct adf_accel_dev *pf;
+	struct adf_accel_pci *accel_pci_dev;
+	struct adf_hw_device_data *hw_data;
+	char name[ADF_DEVICE_NAME_LENGTH];
+	unsigned int i, bar_nr;
+	int ret, bar_mask;
+
+	switch (ent->device) {
+	case ADF_C62XIOV_PCI_DEVICE_ID:
+		break;
+	default:
+		dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+		return -ENODEV;
+	}
+
+	accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+				 dev_to_node(&pdev->dev));
+	if (!accel_dev)
+		return -ENOMEM;
+
+	accel_dev->is_vf = true;
+	pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
+	accel_pci_dev = &accel_dev->accel_pci_dev;
+	accel_pci_dev->pci_dev = pdev;
+
+	/* Add accel device to accel table */
+	if (adf_devmgr_add_dev(accel_dev, pf)) {
+		dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+		kfree(accel_dev);
+		return -EFAULT;
+	}
+	INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+	accel_dev->owner = THIS_MODULE;
+	/* Allocate and configure device configuration structure */
+	hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+			       dev_to_node(&pdev->dev));
+	if (!hw_data) {
+		ret = -ENOMEM;
+		goto out_err;
+	}
+	accel_dev->hw_device = hw_data;
+	adf_init_hw_data_c62xiov(accel_dev->hw_device);
+
+	/* Get Accelerators and Accelerators Engines masks */
+	hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
+	hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
+	accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+	/* Create dev top level debugfs entry */
+	snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d",
+		 ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+		 pdev->bus->number, PCI_SLOT(pdev->devfn),
+		 PCI_FUNC(pdev->devfn));
+
+	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+	if (!accel_dev->debugfs_dir) {
+		dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	/* Create device configuration table */
+	ret = adf_cfg_dev_add(accel_dev);
+	if (ret)
+		goto out_err;
+
+	/* enable PCI device */
+	if (pci_enable_device(pdev)) {
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* set dma identifier */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+			dev_err(&pdev->dev, "No usable DMA configuration\n");
+			ret = -EFAULT;
+			goto out_err_disable;
+		} else {
+			pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		}
+
+	} else {
+		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	}
+
+	if (pci_request_regions(pdev, ADF_C62XVF_DEVICE_NAME)) {
+		ret = -EFAULT;
+		goto out_err_disable;
+	}
+
+	/* Find and map all the device's BARS */
+	i = 0;
+	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
+			 ADF_PCI_MAX_BARS * 2) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+		bar->base_addr = pci_resource_start(pdev, bar_nr);
+		if (!bar->base_addr)
+			break;
+		bar->size = pci_resource_len(pdev, bar_nr);
+		bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+		if (!bar->virt_addr) {
+			dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+			ret = -EFAULT;
+			goto out_err_free_reg;
+		}
+	}
+	pci_set_master(pdev);
+	/* Completion for VF2PF request/response message exchange */
+	init_completion(&accel_dev->vf.iov_msg_completion);
+
+	ret = qat_crypto_dev_config(accel_dev);
+	if (ret)
+		goto out_err_free_reg;
+
+	ret = adf_dev_init(accel_dev);
+	if (ret)
+		goto out_err_dev_shutdown;
+
+	ret = adf_dev_start(accel_dev);
+	if (ret)
+		goto out_err_dev_stop;
+
+	return ret;
+
+out_err_dev_stop:
+	adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+	adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+	pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+	pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+	adf_cleanup_accel(accel_dev);
+	kfree(accel_dev);
+	return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+	if (!accel_dev) {
+		pr_err("QAT: Driver removal failed\n");
+		return;
+	}
+	if (adf_dev_stop(accel_dev))
+		dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
+
+	adf_dev_shutdown(accel_dev);
+	adf_cleanup_accel(accel_dev);
+	adf_cleanup_pci_dev(accel_dev);
+	kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+	request_module("intel_qat");
+
+	if (pci_register_driver(&adf_driver)) {
+		pr_err("QAT: Driver initialization failed\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+	pci_unregister_driver(&adf_driver);
+	adf_clean_vf_map(true);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);

+ 3 - 1
drivers/crypto/qat/qat_common/Makefile

@@ -4,10 +4,12 @@ $(obj)/qat_rsaprivkey-asn1.o: $(obj)/qat_rsaprivkey-asn1.c \
 			      $(obj)/qat_rsaprivkey-asn1.h
 			      $(obj)/qat_rsaprivkey-asn1.h
 
 
 clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h
 clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h
-clean-files += qat_rsaprivkey-asn1.c qat_rsapvivkey-asn1.h
+clean-files += qat_rsaprivkey-asn1.c qat_rsaprivkey-asn1.h
 
 
 obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
 obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
 intel_qat-objs := adf_cfg.o \
 intel_qat-objs := adf_cfg.o \
+	adf_isr.o \
+	adf_vf_isr.o \
 	adf_ctl_drv.o \
 	adf_ctl_drv.o \
 	adf_dev_mgr.o \
 	adf_dev_mgr.o \
 	adf_init.o \
 	adf_init.o \

+ 14 - 2
drivers/crypto/qat/qat_common/adf_accel_devices.h

@@ -55,8 +55,20 @@
 
 
 #define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
 #define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
 #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
 #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
+#define ADF_C62X_DEVICE_NAME "c62x"
+#define ADF_C62XVF_DEVICE_NAME "c62xvf"
+#define ADF_C3XXX_DEVICE_NAME "c3xxx"
+#define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
 #define ADF_DH895XCC_PCI_DEVICE_ID 0x435
 #define ADF_DH895XCC_PCI_DEVICE_ID 0x435
 #define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443
 #define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443
+#define ADF_C62X_PCI_DEVICE_ID 0x37c8
+#define ADF_C62XIOV_PCI_DEVICE_ID 0x37c9
+#define ADF_C3XXX_PCI_DEVICE_ID 0x19e2
+#define ADF_C3XXXIOV_PCI_DEVICE_ID 0x19e3
+#define ADF_ERRSOU3 (0x3A000 + 0x0C)
+#define ADF_ERRSOU5 (0x3A000 + 0xD8)
+#define ADF_DEVICE_FUSECTL_OFFSET 0x40
+#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
 #define ADF_PCI_MAX_BARS 3
 #define ADF_PCI_MAX_BARS 3
 #define ADF_DEVICE_NAME_LENGTH 32
 #define ADF_DEVICE_NAME_LENGTH 32
 #define ADF_ETR_MAX_RINGS_PER_BANK 16
 #define ADF_ETR_MAX_RINGS_PER_BANK 16
@@ -168,11 +180,11 @@ struct adf_hw_device_data {
 	const char *fw_mmp_name;
 	const char *fw_mmp_name;
 	uint32_t fuses;
 	uint32_t fuses;
 	uint32_t accel_capabilities_mask;
 	uint32_t accel_capabilities_mask;
+	uint32_t instance_id;
 	uint16_t accel_mask;
 	uint16_t accel_mask;
 	uint16_t ae_mask;
 	uint16_t ae_mask;
 	uint16_t tx_rings_mask;
 	uint16_t tx_rings_mask;
 	uint8_t tx_rx_gap;
 	uint8_t tx_rx_gap;
-	uint8_t instance_id;
 	uint8_t num_banks;
 	uint8_t num_banks;
 	uint8_t num_accel;
 	uint8_t num_accel;
 	uint8_t num_logical_accel;
 	uint8_t num_logical_accel;
@@ -239,6 +251,6 @@ struct adf_accel_dev {
 		} vf;
 		} vf;
 	};
 	};
 	bool is_vf;
 	bool is_vf;
-	uint8_t accel_id;
+	u32 accel_id;
 } __packed;
 } __packed;
 #endif
 #endif

+ 6 - 3
drivers/crypto/qat/qat_common/adf_accel_engine.c

@@ -78,9 +78,12 @@ int adf_ae_fw_load(struct adf_accel_dev *accel_dev)
 	uof_addr = (void *)loader_data->uof_fw->data;
 	uof_addr = (void *)loader_data->uof_fw->data;
 	mmp_size = loader_data->mmp_fw->size;
 	mmp_size = loader_data->mmp_fw->size;
 	mmp_addr = (void *)loader_data->mmp_fw->data;
 	mmp_addr = (void *)loader_data->mmp_fw->data;
-	qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size);
-	if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) {
-		dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n");
+	if (qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to load MMP\n");
+		goto out_err;
+	}
+	if (qat_uclo_map_obj(loader_data->fw_loader, uof_addr, uof_size)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to map FW\n");
 		goto out_err;
 		goto out_err;
 	}
 	}
 	if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {
 	if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {

+ 3 - 1
drivers/crypto/qat/qat_common/adf_admin.c

@@ -51,6 +51,7 @@
 #include <linux/pci.h>
 #include <linux/pci.h>
 #include <linux/dma-mapping.h>
 #include <linux/dma-mapping.h>
 #include "adf_accel_devices.h"
 #include "adf_accel_devices.h"
+#include "adf_common_drv.h"
 #include "icp_qat_fw_init_admin.h"
 #include "icp_qat_fw_init_admin.h"
 
 
 /* Admin Messages Registers */
 /* Admin Messages Registers */
@@ -234,7 +235,8 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
 	struct adf_bar *pmisc =
 	struct adf_bar *pmisc =
 		&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
 		&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
 	void __iomem *csr = pmisc->virt_addr;
 	void __iomem *csr = pmisc->virt_addr;
-	void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET;
+	void __iomem *mailbox = (void __iomem *)((uintptr_t)csr +
+				 ADF_DH895XCC_MAILBOX_BASE_OFFSET);
 	u64 reg_val;
 	u64 reg_val;
 
 
 	admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
 	admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,

+ 2 - 2
drivers/crypto/qat/qat_common/adf_aer.c

@@ -82,7 +82,7 @@ struct adf_reset_dev_data {
 	struct work_struct reset_work;
 	struct work_struct reset_work;
 };
 };
 
 
-static void adf_dev_restore(struct adf_accel_dev *accel_dev)
+void adf_dev_restore(struct adf_accel_dev *accel_dev)
 {
 {
 	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
 	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
 	struct pci_dev *parent = pdev->bus->self;
 	struct pci_dev *parent = pdev->bus->self;
@@ -197,7 +197,7 @@ static void adf_resume(struct pci_dev *pdev)
 	dev_info(&pdev->dev, "Device is up and runnig\n");
 	dev_info(&pdev->dev, "Device is up and runnig\n");
 }
 }
 
 
-static struct pci_error_handlers adf_err_handler = {
+static const struct pci_error_handlers adf_err_handler = {
 	.error_detected = adf_error_detected,
 	.error_detected = adf_error_detected,
 	.slot_reset = adf_slot_reset,
 	.slot_reset = adf_slot_reset,
 	.resume = adf_resume,
 	.resume = adf_resume,

+ 6 - 2
drivers/crypto/qat/qat_common/adf_cfg_common.h

@@ -72,12 +72,16 @@ enum adf_device_type {
 	DEV_UNKNOWN = 0,
 	DEV_UNKNOWN = 0,
 	DEV_DH895XCC,
 	DEV_DH895XCC,
 	DEV_DH895XCCVF,
 	DEV_DH895XCCVF,
+	DEV_C62X,
+	DEV_C62XVF,
+	DEV_C3XXX,
+	DEV_C3XXXVF
 };
 };
 
 
 struct adf_dev_status_info {
 struct adf_dev_status_info {
 	enum adf_device_type type;
 	enum adf_device_type type;
-	uint8_t accel_id;
-	uint8_t instance_id;
+	u32 accel_id;
+	u32 instance_id;
 	uint8_t num_ae;
 	uint8_t num_ae;
 	uint8_t num_accel;
 	uint8_t num_accel;
 	uint8_t num_logical_accel;
 	uint8_t num_logical_accel;

+ 24 - 7
drivers/crypto/qat/qat_common/adf_common_drv.h

@@ -54,7 +54,7 @@
 #include "icp_qat_hal.h"
 #include "icp_qat_hal.h"
 
 
 #define ADF_MAJOR_VERSION	0
 #define ADF_MAJOR_VERSION	0
-#define ADF_MINOR_VERSION	2
+#define ADF_MINOR_VERSION	6
 #define ADF_BUILD_VERSION	0
 #define ADF_BUILD_VERSION	0
 #define ADF_DRV_VERSION		__stringify(ADF_MAJOR_VERSION) "." \
 #define ADF_DRV_VERSION		__stringify(ADF_MAJOR_VERSION) "." \
 				__stringify(ADF_MINOR_VERSION) "." \
 				__stringify(ADF_MINOR_VERSION) "." \
@@ -106,8 +106,6 @@ int adf_dev_start(struct adf_accel_dev *accel_dev);
 int adf_dev_stop(struct adf_accel_dev *accel_dev);
 int adf_dev_stop(struct adf_accel_dev *accel_dev);
 void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
 void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
 
 
-void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
-void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
 int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr);
 int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr);
 void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
 void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
 int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
 int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
@@ -143,6 +141,7 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev);
 
 
 int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
 int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
 void adf_disable_aer(struct adf_accel_dev *accel_dev);
 void adf_disable_aer(struct adf_accel_dev *accel_dev);
+void adf_dev_restore(struct adf_accel_dev *accel_dev);
 int adf_init_aer(void);
 int adf_init_aer(void);
 void adf_exit_aer(void);
 void adf_exit_aer(void);
 int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
 int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
@@ -159,6 +158,7 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev);
 void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev);
 void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev);
 int qat_crypto_register(void);
 int qat_crypto_register(void);
 int qat_crypto_unregister(void);
 int qat_crypto_unregister(void);
+int qat_crypto_dev_config(struct adf_accel_dev *accel_dev);
 struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
 struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
 void qat_crypto_put_instance(struct qat_crypto_instance *inst);
 void qat_crypto_put_instance(struct qat_crypto_instance *inst);
 void qat_alg_callback(void *resp);
 void qat_alg_callback(void *resp);
@@ -168,6 +168,11 @@ void qat_algs_unregister(void);
 int qat_asym_algs_register(void);
 int qat_asym_algs_register(void);
 void qat_asym_algs_unregister(void);
 void qat_asym_algs_unregister(void);
 
 
+int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
+void adf_isr_resource_free(struct adf_accel_dev *accel_dev);
+int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
+void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev);
+
 int qat_hal_init(struct adf_accel_dev *accel_dev);
 int qat_hal_init(struct adf_accel_dev *accel_dev);
 void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
 void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
 void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
 void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
@@ -178,6 +183,8 @@ void qat_hal_reset(struct icp_qat_fw_loader_handle *handle);
 int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle);
 int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle);
 void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
 void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
 			  unsigned char ae, unsigned int ctx_mask);
 			  unsigned char ae, unsigned int ctx_mask);
+int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
+			    unsigned int ae);
 int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
 int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
 			   unsigned char ae, enum icp_qat_uof_regtype lm_type,
 			   unsigned char ae, enum icp_qat_uof_regtype lm_type,
 			   unsigned char mode);
 			   unsigned char mode);
@@ -216,10 +223,10 @@ int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle,
 		  unsigned char ae, unsigned short lm_addr, unsigned int value);
 		  unsigned char ae, unsigned short lm_addr, unsigned int value);
 int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle);
 int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle);
 void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle);
 void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle);
-int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
-			 void *addr_ptr, int mem_size);
-void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
-			void *addr_ptr, int mem_size);
+int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, void *addr_ptr,
+		       int mem_size);
+int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
+		     void *addr_ptr, int mem_size);
 #if defined(CONFIG_PCI_IOV)
 #if defined(CONFIG_PCI_IOV)
 int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
 int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
 void adf_disable_sriov(struct adf_accel_dev *accel_dev);
 void adf_disable_sriov(struct adf_accel_dev *accel_dev);
@@ -227,6 +234,8 @@ void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
 				  uint32_t vf_mask);
 				  uint32_t vf_mask);
 void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
 void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
 				 uint32_t vf_mask);
 				 uint32_t vf_mask);
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
 #else
 #else
 static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
 static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
 {
 {
@@ -236,5 +245,13 @@ static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
 static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
 static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
 {
 {
 }
 }
+
+static inline void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+}
 #endif
 #endif
 #endif
 #endif

+ 6 - 13
drivers/crypto/qat/qat_common/adf_ctl_drv.c

@@ -255,12 +255,9 @@ out:
 
 
 static int adf_ctl_is_device_in_use(int id)
 static int adf_ctl_is_device_in_use(int id)
 {
 {
-	struct list_head *itr, *head = adf_devmgr_get_head();
-
-	list_for_each(itr, head) {
-		struct adf_accel_dev *dev =
-				list_entry(itr, struct adf_accel_dev, list);
+	struct adf_accel_dev *dev;
 
 
+	list_for_each_entry(dev, adf_devmgr_get_head(), list) {
 		if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
 		if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
 			if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) {
 			if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) {
 				dev_info(&GET_DEV(dev),
 				dev_info(&GET_DEV(dev),
@@ -275,12 +272,10 @@ static int adf_ctl_is_device_in_use(int id)
 
 
 static int adf_ctl_stop_devices(uint32_t id)
 static int adf_ctl_stop_devices(uint32_t id)
 {
 {
-	struct list_head *itr, *head = adf_devmgr_get_head();
+	struct adf_accel_dev *accel_dev;
 	int ret = 0;
 	int ret = 0;
 
 
-	list_for_each(itr, head) {
-		struct adf_accel_dev *accel_dev =
-				list_entry(itr, struct adf_accel_dev, list);
+	list_for_each_entry_reverse(accel_dev, adf_devmgr_get_head(), list) {
 		if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
 		if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
 			if (!adf_dev_started(accel_dev))
 			if (!adf_dev_started(accel_dev))
 				continue;
 				continue;
@@ -342,12 +337,10 @@ static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
+	ret = -ENODEV;
 	accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
 	accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
-	if (!accel_dev) {
-		pr_err("QAT: Device %d not found\n", ctl_data->device_id);
-		ret = -ENODEV;
+	if (!accel_dev)
 		goto out;
 		goto out;
-	}
 
 
 	if (!adf_dev_started(accel_dev)) {
 	if (!adf_dev_started(accel_dev)) {
 		dev_info(&GET_DEV(accel_dev),
 		dev_info(&GET_DEV(accel_dev),

+ 31 - 5
drivers/crypto/qat/qat_common/adf_dev_mgr.c

@@ -53,6 +53,7 @@ static LIST_HEAD(accel_table);
 static LIST_HEAD(vfs_table);
 static LIST_HEAD(vfs_table);
 static DEFINE_MUTEX(table_lock);
 static DEFINE_MUTEX(table_lock);
 static uint32_t num_devices;
 static uint32_t num_devices;
+static u8 id_map[ADF_MAX_DEVICES];
 
 
 struct vf_id_map {
 struct vf_id_map {
 	u32 bdf;
 	u32 bdf;
@@ -116,8 +117,10 @@ void adf_clean_vf_map(bool vf)
 	mutex_lock(&table_lock);
 	mutex_lock(&table_lock);
 	list_for_each_safe(ptr, tmp, &vfs_table) {
 	list_for_each_safe(ptr, tmp, &vfs_table) {
 		map = list_entry(ptr, struct vf_id_map, list);
 		map = list_entry(ptr, struct vf_id_map, list);
-		if (map->bdf != -1)
+		if (map->bdf != -1) {
+			id_map[map->id] = 0;
 			num_devices--;
 			num_devices--;
+		}
 
 
 		if (vf && map->bdf == -1)
 		if (vf && map->bdf == -1)
 			continue;
 			continue;
@@ -154,6 +157,19 @@ void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
 }
 }
 EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index);
 EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index);
 
 
+static unsigned int adf_find_free_id(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < ADF_MAX_DEVICES; i++) {
+		if (!id_map[i]) {
+			id_map[i] = 1;
+			return i;
+		}
+	}
+	return ADF_MAX_DEVICES + 1;
+}
+
 /**
 /**
  * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
  * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
  * @accel_dev:  Pointer to acceleration device.
  * @accel_dev:  Pointer to acceleration device.
@@ -194,8 +210,12 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
 		}
 		}
 
 
 		list_add_tail(&accel_dev->list, &accel_table);
 		list_add_tail(&accel_dev->list, &accel_table);
-		accel_dev->accel_id = num_devices++;
-
+		accel_dev->accel_id = adf_find_free_id();
+		if (accel_dev->accel_id > ADF_MAX_DEVICES) {
+			ret = -EFAULT;
+			goto unlock;
+		}
+		num_devices++;
 		map = kzalloc(sizeof(*map), GFP_KERNEL);
 		map = kzalloc(sizeof(*map), GFP_KERNEL);
 		if (!map) {
 		if (!map) {
 			ret = -ENOMEM;
 			ret = -ENOMEM;
@@ -236,8 +256,13 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
 			ret = -ENOMEM;
 			ret = -ENOMEM;
 			goto unlock;
 			goto unlock;
 		}
 		}
-
-		accel_dev->accel_id = num_devices++;
+		accel_dev->accel_id = adf_find_free_id();
+		if (accel_dev->accel_id > ADF_MAX_DEVICES) {
+			kfree(map);
+			ret = -EFAULT;
+			goto unlock;
+		}
+		num_devices++;
 		list_add_tail(&accel_dev->list, &accel_table);
 		list_add_tail(&accel_dev->list, &accel_table);
 		map->bdf = adf_get_vf_num(accel_dev);
 		map->bdf = adf_get_vf_num(accel_dev);
 		map->id = accel_dev->accel_id;
 		map->id = accel_dev->accel_id;
@@ -271,6 +296,7 @@ void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
 {
 {
 	mutex_lock(&table_lock);
 	mutex_lock(&table_lock);
 	if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
 	if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
+		id_map[accel_dev->accel_id] = 0;
 		num_devices--;
 		num_devices--;
 	} else if (accel_dev->is_vf && pf) {
 	} else if (accel_dev->is_vf && pf) {
 		struct vf_id_map *map, *next;
 		struct vf_id_map *map, *next;

+ 1 - 7
drivers/crypto/qat/qat_common/adf_hw_arbiter.c

@@ -45,6 +45,7 @@
   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
 */
 #include "adf_accel_devices.h"
 #include "adf_accel_devices.h"
+#include "adf_common_drv.h"
 #include "adf_transport_internal.h"
 #include "adf_transport_internal.h"
 
 
 #define ADF_ARB_NUM 4
 #define ADF_ARB_NUM 4
@@ -124,19 +125,12 @@ int adf_init_arb(struct adf_accel_dev *accel_dev)
 }
 }
 EXPORT_SYMBOL_GPL(adf_init_arb);
 EXPORT_SYMBOL_GPL(adf_init_arb);
 
 
-/**
- * adf_update_ring_arb() - update ring arbitration rgister
- * @accel_dev:  Pointer to ring data.
- *
- * Function enables or disables rings for/from arbitration.
- */
 void adf_update_ring_arb(struct adf_etr_ring_data *ring)
 void adf_update_ring_arb(struct adf_etr_ring_data *ring)
 {
 {
 	WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr,
 	WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr,
 				   ring->bank->bank_number,
 				   ring->bank->bank_number,
 				   ring->bank->ring_mask & 0xFF);
 				   ring->bank->ring_mask & 0xFF);
 }
 }
-EXPORT_SYMBOL_GPL(adf_update_ring_arb);
 
 
 void adf_exit_arb(struct adf_accel_dev *accel_dev)
 void adf_exit_arb(struct adf_accel_dev *accel_dev)
 {
 {

+ 1 - 20
drivers/crypto/qat/qat_common/adf_init.c

@@ -62,15 +62,6 @@ static void adf_service_add(struct service_hndl *service)
 	mutex_unlock(&service_lock);
 	mutex_unlock(&service_lock);
 }
 }
 
 
-/**
- * adf_service_register() - Register acceleration service in the accel framework
- * @service:    Pointer to the service
- *
- * Function adds the acceleration service to the acceleration framework.
- * To be used by QAT device specific drivers.
- *
- * Return: 0 on success, error code otherwise.
- */
 int adf_service_register(struct service_hndl *service)
 int adf_service_register(struct service_hndl *service)
 {
 {
 	service->init_status = 0;
 	service->init_status = 0;
@@ -78,7 +69,6 @@ int adf_service_register(struct service_hndl *service)
 	adf_service_add(service);
 	adf_service_add(service);
 	return 0;
 	return 0;
 }
 }
-EXPORT_SYMBOL_GPL(adf_service_register);
 
 
 static void adf_service_remove(struct service_hndl *service)
 static void adf_service_remove(struct service_hndl *service)
 {
 {
@@ -87,15 +77,6 @@ static void adf_service_remove(struct service_hndl *service)
 	mutex_unlock(&service_lock);
 	mutex_unlock(&service_lock);
 }
 }
 
 
-/**
- * adf_service_unregister() - Unregister acceleration service from the framework
- * @service:    Pointer to the service
- *
- * Function remove the acceleration service from the acceleration framework.
- * To be used by QAT device specific drivers.
- *
- * Return: 0 on success, error code otherwise.
- */
 int adf_service_unregister(struct service_hndl *service)
 int adf_service_unregister(struct service_hndl *service)
 {
 {
 	if (service->init_status || service->start_status) {
 	if (service->init_status || service->start_status) {
@@ -105,7 +86,6 @@ int adf_service_unregister(struct service_hndl *service)
 	adf_service_remove(service);
 	adf_service_remove(service);
 	return 0;
 	return 0;
 }
 }
-EXPORT_SYMBOL_GPL(adf_service_unregister);
 
 
 /**
 /**
  * adf_dev_init() - Init data structures and services for the given accel device
  * adf_dev_init() - Init data structures and services for the given accel device
@@ -366,6 +346,7 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
 
 
 	hw_data->disable_iov(accel_dev);
 	hw_data->disable_iov(accel_dev);
 	adf_cleanup_etr_data(accel_dev);
 	adf_cleanup_etr_data(accel_dev);
+	adf_dev_restore(accel_dev);
 }
 }
 EXPORT_SYMBOL_GPL(adf_dev_shutdown);
 EXPORT_SYMBOL_GPL(adf_dev_shutdown);
 
 

+ 30 - 14
drivers/crypto/qat/qat_dh895xcc/adf_isr.c → drivers/crypto/qat/qat_common/adf_isr.c

@@ -51,15 +51,13 @@
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/errno.h>
 #include <linux/errno.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_cfg.h>
-#include <adf_cfg_strings.h>
-#include <adf_cfg_common.h>
-#include <adf_transport_access_macros.h>
-#include <adf_transport_internal.h>
-#include "adf_drv.h"
-#include "adf_dh895xcc_hw_data.h"
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "adf_cfg_common.h"
+#include "adf_transport_access_macros.h"
+#include "adf_transport_internal.h"
 
 
 static int adf_enable_msix(struct adf_accel_dev *accel_dev)
 static int adf_enable_msix(struct adf_accel_dev *accel_dev)
 {
 {
@@ -109,14 +107,16 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
 #ifdef CONFIG_PCI_IOV
 #ifdef CONFIG_PCI_IOV
 	/* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
 	/* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
 	if (accel_dev->pf.vf_info) {
 	if (accel_dev->pf.vf_info) {
-		void __iomem *pmisc_bar_addr =
-		    (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr;
+		struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+		struct adf_bar *pmisc =
+			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+		void __iomem *pmisc_bar_addr = pmisc->virt_addr;
 		u32 vf_mask;
 		u32 vf_mask;
 
 
 		/* Get the interrupt sources triggered by VFs */
 		/* Get the interrupt sources triggered by VFs */
-		vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU5) &
+		vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5) &
 			    0x0000FFFF) << 16) |
 			    0x0000FFFF) << 16) |
-			  ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU3) &
+			  ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU3) &
 			    0x01FFFE00) >> 9);
 			    0x01FFFE00) >> 9);
 
 
 		if (vf_mask) {
 		if (vf_mask) {
@@ -301,6 +301,12 @@ static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
 	}
 	}
 }
 }
 
 
+/**
+ * adf_vf_isr_resource_free() - Free IRQ for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function frees interrupts for acceleration device.
+ */
 void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
 void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
 {
 {
 	adf_free_irqs(accel_dev);
 	adf_free_irqs(accel_dev);
@@ -308,7 +314,16 @@ void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
 	adf_disable_msix(&accel_dev->accel_pci_dev);
 	adf_disable_msix(&accel_dev->accel_pci_dev);
 	adf_isr_free_msix_entry_table(accel_dev);
 	adf_isr_free_msix_entry_table(accel_dev);
 }
 }
-
+EXPORT_SYMBOL_GPL(adf_isr_resource_free);
+
+/**
+ * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function allocates interrupts for acceleration device.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
 int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
 int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
 {
 {
 	int ret;
 	int ret;
@@ -330,3 +345,4 @@ err_out:
 	adf_isr_resource_free(accel_dev);
 	adf_isr_resource_free(accel_dev);
 	return -EFAULT;
 	return -EFAULT;
 }
 }
+EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);

+ 0 - 23
drivers/crypto/qat/qat_common/adf_pf2vf_msg.c

@@ -45,8 +45,6 @@
   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
 */
 
 
-#include <linux/pci.h>
-#include <linux/mutex.h>
 #include <linux/delay.h>
 #include <linux/delay.h>
 #include "adf_accel_devices.h"
 #include "adf_accel_devices.h"
 #include "adf_common_drv.h"
 #include "adf_common_drv.h"
@@ -58,12 +56,6 @@
 #define ADF_DH895XCC_ERRMSK5	(ADF_DH895XCC_EP_OFFSET + 0xDC)
 #define ADF_DH895XCC_ERRMSK5	(ADF_DH895XCC_EP_OFFSET + 0xDC)
 #define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
 #define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
 
 
-/**
- * adf_enable_pf2vf_interrupts() - Enable PF to VF interrupts
- * @accel_dev:  Pointer to acceleration device.
- *
- * Function enables PF to VF interrupts
- */
 void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
 void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
 {
 {
 	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
 	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
@@ -73,14 +65,7 @@ void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
 
 
 	ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0);
 	ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0);
 }
 }
-EXPORT_SYMBOL_GPL(adf_enable_pf2vf_interrupts);
 
 
-/**
- * adf_disable_pf2vf_interrupts() - Disable PF to VF interrupts
- * @accel_dev:  Pointer to acceleration device.
- *
- * Function disables PF to VF interrupts
- */
 void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
 void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
 {
 {
 	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
 	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
@@ -90,7 +75,6 @@ void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
 
 
 	ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2);
 	ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2);
 }
 }
-EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
 
 
 void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
 void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
 				 u32 vf_mask)
 				 u32 vf_mask)
@@ -116,12 +100,6 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
 	}
 	}
 }
 }
 
 
-/**
- * adf_disable_pf2vf_interrupts() - Disable VF to PF interrupts
- * @accel_dev:  Pointer to acceleration device.
- *
- * Function disables VF to PF interrupts
- */
 void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
 void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
 {
 {
 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
@@ -144,7 +122,6 @@ void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
 		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
 		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
 	}
 	}
 }
 }
-EXPORT_SYMBOL_GPL(adf_disable_vf2pf_interrupts);
 
 
 static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
 static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
 {
 {

+ 8 - 20
drivers/crypto/qat/qat_common/adf_transport.c

@@ -122,7 +122,7 @@ int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg)
 		return -EAGAIN;
 		return -EAGAIN;
 	}
 	}
 	spin_lock_bh(&ring->lock);
 	spin_lock_bh(&ring->lock);
-	memcpy(ring->base_addr + ring->tail, msg,
+	memcpy((void *)((uintptr_t)ring->base_addr + ring->tail), msg,
 	       ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
 	       ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
 
 
 	ring->tail = adf_modulo(ring->tail +
 	ring->tail = adf_modulo(ring->tail +
@@ -137,23 +137,22 @@ int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg)
 static int adf_handle_response(struct adf_etr_ring_data *ring)
 static int adf_handle_response(struct adf_etr_ring_data *ring)
 {
 {
 	uint32_t msg_counter = 0;
 	uint32_t msg_counter = 0;
-	uint32_t *msg = (uint32_t *)(ring->base_addr + ring->head);
+	uint32_t *msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head);
 
 
 	while (*msg != ADF_RING_EMPTY_SIG) {
 	while (*msg != ADF_RING_EMPTY_SIG) {
 		ring->callback((uint32_t *)msg);
 		ring->callback((uint32_t *)msg);
+		atomic_dec(ring->inflights);
 		*msg = ADF_RING_EMPTY_SIG;
 		*msg = ADF_RING_EMPTY_SIG;
 		ring->head = adf_modulo(ring->head +
 		ring->head = adf_modulo(ring->head +
 					ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
 					ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
 					ADF_RING_SIZE_MODULO(ring->ring_size));
 					ADF_RING_SIZE_MODULO(ring->ring_size));
 		msg_counter++;
 		msg_counter++;
-		msg = (uint32_t *)(ring->base_addr + ring->head);
+		msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head);
 	}
 	}
-	if (msg_counter > 0) {
+	if (msg_counter > 0)
 		WRITE_CSR_RING_HEAD(ring->bank->csr_addr,
 		WRITE_CSR_RING_HEAD(ring->bank->csr_addr,
 				    ring->bank->bank_number,
 				    ring->bank->bank_number,
 				    ring->ring_number, ring->head);
 				    ring->ring_number, ring->head);
-		atomic_sub(msg_counter, ring->inflights);
-	}
 	return 0;
 	return 0;
 }
 }
 
 
@@ -342,27 +341,15 @@ static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
 	}
 	}
 }
 }
 
 
-/**
- * adf_response_handler() - Bottom half handler response handler
- * @bank_addr:  Address of a ring bank for with the BH was scheduled.
- *
- * Function is the bottom half handler for the response from acceleration
- * device. There is one handler for every ring bank. Function checks all
- * communication rings in the bank.
- * To be used by QAT device specific drivers.
- *
- * Return: void
- */
-void adf_response_handler(unsigned long bank_addr)
+void adf_response_handler(uintptr_t bank_addr)
 {
 {
 	struct adf_etr_bank_data *bank = (void *)bank_addr;
 	struct adf_etr_bank_data *bank = (void *)bank_addr;
 
 
-	/* Handle all the responses nad reenable IRQs */
+	/* Handle all the responses and reenable IRQs */
 	adf_ring_response_handler(bank);
 	adf_ring_response_handler(bank);
 	WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
 	WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
 				   bank->irq_mask);
 				   bank->irq_mask);
 }
 }
-EXPORT_SYMBOL_GPL(adf_response_handler);
 
 
 static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
 static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
 				  const char *section, const char *format,
 				  const char *section, const char *format,
@@ -447,6 +434,7 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
 		goto err;
 		goto err;
 	}
 	}
 
 
+	WRITE_CSR_INT_FLAG(csr_addr, bank_num, ADF_BANK_INT_FLAG_CLEAR_MASK);
 	WRITE_CSR_INT_SRCSEL(csr_addr, bank_num);
 	WRITE_CSR_INT_SRCSEL(csr_addr, bank_num);
 	return 0;
 	return 0;
 err:
 err:

+ 5 - 0
drivers/crypto/qat/qat_common/adf_transport_access_macros.h

@@ -50,12 +50,14 @@
 #include "adf_accel_devices.h"
 #include "adf_accel_devices.h"
 #define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
 #define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
 #define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
 #define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
+#define ADF_BANK_INT_FLAG_CLEAR_MASK 0xFFFF
 #define ADF_RING_CSR_RING_CONFIG 0x000
 #define ADF_RING_CSR_RING_CONFIG 0x000
 #define ADF_RING_CSR_RING_LBASE 0x040
 #define ADF_RING_CSR_RING_LBASE 0x040
 #define ADF_RING_CSR_RING_UBASE 0x080
 #define ADF_RING_CSR_RING_UBASE 0x080
 #define ADF_RING_CSR_RING_HEAD 0x0C0
 #define ADF_RING_CSR_RING_HEAD 0x0C0
 #define ADF_RING_CSR_RING_TAIL 0x100
 #define ADF_RING_CSR_RING_TAIL 0x100
 #define ADF_RING_CSR_E_STAT 0x14C
 #define ADF_RING_CSR_E_STAT 0x14C
+#define ADF_RING_CSR_INT_FLAG	0x170
 #define ADF_RING_CSR_INT_SRCSEL 0x174
 #define ADF_RING_CSR_INT_SRCSEL 0x174
 #define ADF_RING_CSR_INT_SRCSEL_2 0x178
 #define ADF_RING_CSR_INT_SRCSEL_2 0x178
 #define ADF_RING_CSR_INT_COL_EN 0x17C
 #define ADF_RING_CSR_INT_COL_EN 0x17C
@@ -144,6 +146,9 @@ do { \
 #define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
 #define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
 	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
 	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
 		ADF_RING_CSR_RING_TAIL + (ring << 2), value)
 		ADF_RING_CSR_RING_TAIL + (ring << 2), value)
+#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+			ADF_RING_CSR_INT_FLAG, value)
 #define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
 #define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
 do { \
 do { \
 	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
 	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \

+ 1 - 1
drivers/crypto/qat/qat_common/adf_transport_internal.h

@@ -91,7 +91,7 @@ struct adf_etr_data {
 	struct dentry *debug;
 	struct dentry *debug;
 };
 };
 
 
-void adf_response_handler(unsigned long bank_addr);
+void adf_response_handler(uintptr_t bank_addr);
 #ifdef CONFIG_DEBUG_FS
 #ifdef CONFIG_DEBUG_FS
 #include <linux/debugfs.h>
 #include <linux/debugfs.h>
 int adf_bank_debugfs_add(struct adf_etr_bank_data *bank);
 int adf_bank_debugfs_add(struct adf_etr_bank_data *bank);

+ 43 - 21
drivers/crypto/qat/qat_dh895xccvf/adf_isr.c → drivers/crypto/qat/qat_common/adf_vf_isr.c

@@ -51,16 +51,18 @@
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/errno.h>
 #include <linux/errno.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_cfg.h>
-#include <adf_cfg_strings.h>
-#include <adf_cfg_common.h>
-#include <adf_transport_access_macros.h>
-#include <adf_transport_internal.h>
-#include <adf_pf2vf_msg.h>
-#include "adf_drv.h"
-#include "adf_dh895xccvf_hw_data.h"
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "adf_cfg_common.h"
+#include "adf_transport_access_macros.h"
+#include "adf_transport_internal.h"
+#include "adf_pf2vf_msg.h"
+
+#define ADF_VINTSOU_OFFSET	0x204
+#define ADF_VINTSOU_BUN		BIT(0)
+#define ADF_VINTSOU_PF2VF	BIT(1)
 
 
 static int adf_enable_msi(struct adf_accel_dev *accel_dev)
 static int adf_enable_msi(struct adf_accel_dev *accel_dev)
 {
 {
@@ -91,12 +93,14 @@ static void adf_disable_msi(struct adf_accel_dev *accel_dev)
 static void adf_pf2vf_bh_handler(void *data)
 static void adf_pf2vf_bh_handler(void *data)
 {
 {
 	struct adf_accel_dev *accel_dev = data;
 	struct adf_accel_dev *accel_dev = data;
-	void __iomem *pmisc_bar_addr =
-		(&GET_BARS(accel_dev)[ADF_DH895XCCIOV_PMISC_BAR])->virt_addr;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *pmisc =
+			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+	void __iomem *pmisc_bar_addr = pmisc->virt_addr;
 	u32 msg;
 	u32 msg;
 
 
 	/* Read the message from PF */
 	/* Read the message from PF */
-	msg = ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCCIOV_PF2VF_OFFSET);
+	msg = ADF_CSR_RD(pmisc_bar_addr, hw_data->get_pf2vf_offset(0));
 
 
 	if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM))
 	if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM))
 		/* Ignore legacy non-system (non-kernel) PF2VF messages */
 		/* Ignore legacy non-system (non-kernel) PF2VF messages */
@@ -124,8 +128,8 @@ static void adf_pf2vf_bh_handler(void *data)
 	}
 	}
 
 
 	/* To ack, clear the PF2VFINT bit */
 	/* To ack, clear the PF2VFINT bit */
-	msg &= ~ADF_DH895XCC_PF2VF_PF2VFINT;
-	ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCCIOV_PF2VF_OFFSET, msg);
+	msg &= ~BIT(0);
+	ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg);
 
 
 	/* Re-enable PF2VF interrupts */
 	/* Re-enable PF2VF interrupts */
 	adf_enable_pf2vf_interrupts(accel_dev);
 	adf_enable_pf2vf_interrupts(accel_dev);
@@ -155,15 +159,17 @@ static void adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev)
 static irqreturn_t adf_isr(int irq, void *privdata)
 static irqreturn_t adf_isr(int irq, void *privdata)
 {
 {
 	struct adf_accel_dev *accel_dev = privdata;
 	struct adf_accel_dev *accel_dev = privdata;
-	void __iomem *pmisc_bar_addr =
-		(&GET_BARS(accel_dev)[ADF_DH895XCCIOV_PMISC_BAR])->virt_addr;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *pmisc =
+			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+	void __iomem *pmisc_bar_addr = pmisc->virt_addr;
 	u32 v_int;
 	u32 v_int;
 
 
 	/* Read VF INT source CSR to determine the source of VF interrupt */
 	/* Read VF INT source CSR to determine the source of VF interrupt */
-	v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCCIOV_VINTSOU_OFFSET);
+	v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTSOU_OFFSET);
 
 
 	/* Check for PF2VF interrupt */
 	/* Check for PF2VF interrupt */
-	if (v_int & ADF_DH895XCC_VINTSOU_PF2VF) {
+	if (v_int & ADF_VINTSOU_PF2VF) {
 		/* Disable PF to VF interrupt */
 		/* Disable PF to VF interrupt */
 		adf_disable_pf2vf_interrupts(accel_dev);
 		adf_disable_pf2vf_interrupts(accel_dev);
 
 
@@ -173,7 +179,7 @@ static irqreturn_t adf_isr(int irq, void *privdata)
 	}
 	}
 
 
 	/* Check bundle interrupt */
 	/* Check bundle interrupt */
-	if (v_int & ADF_DH895XCC_VINTSOU_BUN) {
+	if (v_int & ADF_VINTSOU_BUN) {
 		struct adf_etr_data *etr_data = accel_dev->transport;
 		struct adf_etr_data *etr_data = accel_dev->transport;
 		struct adf_etr_bank_data *bank = &etr_data->banks[0];
 		struct adf_etr_bank_data *bank = &etr_data->banks[0];
 
 
@@ -226,6 +232,12 @@ static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
 	tasklet_kill(&priv_data->banks[0].resp_handler);
 	tasklet_kill(&priv_data->banks[0].resp_handler);
 }
 }
 
 
+/**
+ * adf_vf_isr_resource_free() - Free IRQ for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function frees interrupts for acceleration device virtual function.
+ */
 void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
 void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
 {
 {
 	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
 	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
@@ -236,7 +248,16 @@ void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
 	adf_cleanup_pf2vf_bh(accel_dev);
 	adf_cleanup_pf2vf_bh(accel_dev);
 	adf_disable_msi(accel_dev);
 	adf_disable_msi(accel_dev);
 }
 }
-
+EXPORT_SYMBOL_GPL(adf_vf_isr_resource_free);
+
+/**
+ * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function allocates interrupts for acceleration device virtual function.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
 int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
 int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
 {
 {
 	if (adf_enable_msi(accel_dev))
 	if (adf_enable_msi(accel_dev))
@@ -256,3 +277,4 @@ err_out:
 	adf_vf_isr_resource_free(accel_dev);
 	adf_vf_isr_resource_free(accel_dev);
 	return -EFAULT;
 	return -EFAULT;
 }
 }
+EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);

+ 10 - 0
drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h

@@ -68,11 +68,21 @@ struct icp_qat_fw_loader_hal_handle {
 
 
 struct icp_qat_fw_loader_handle {
 struct icp_qat_fw_loader_handle {
 	struct icp_qat_fw_loader_hal_handle *hal_handle;
 	struct icp_qat_fw_loader_hal_handle *hal_handle;
+	struct pci_dev *pci_dev;
 	void *obj_handle;
 	void *obj_handle;
+	void *sobj_handle;
+	bool fw_auth;
 	void __iomem *hal_sram_addr_v;
 	void __iomem *hal_sram_addr_v;
 	void __iomem *hal_cap_g_ctl_csr_addr_v;
 	void __iomem *hal_cap_g_ctl_csr_addr_v;
 	void __iomem *hal_cap_ae_xfer_csr_addr_v;
 	void __iomem *hal_cap_ae_xfer_csr_addr_v;
 	void __iomem *hal_cap_ae_local_csr_addr_v;
 	void __iomem *hal_cap_ae_local_csr_addr_v;
 	void __iomem *hal_ep_csr_addr_v;
 	void __iomem *hal_ep_csr_addr_v;
 };
 };
+
+struct icp_firml_dram_desc {
+	void __iomem *dram_base_addr;
+	void *dram_base_addr_v;
+	dma_addr_t dram_bus_addr;
+	u64 dram_size;
+};
 #endif
 #endif

+ 34 - 3
drivers/crypto/qat/qat_common/icp_qat_hal.h

@@ -81,6 +81,31 @@ enum hal_ae_csr {
 	LOCAL_CSR_STATUS = 0x180,
 	LOCAL_CSR_STATUS = 0x180,
 };
 };
 
 
+enum fcu_csr {
+	FCU_CONTROL           = 0x8c0,
+	FCU_STATUS            = 0x8c4,
+	FCU_STATUS1           = 0x8c8,
+	FCU_DRAM_ADDR_LO      = 0x8cc,
+	FCU_DRAM_ADDR_HI      = 0x8d0,
+	FCU_RAMBASE_ADDR_HI   = 0x8d4,
+	FCU_RAMBASE_ADDR_LO   = 0x8d8
+};
+
+enum fcu_cmd {
+	FCU_CTRL_CMD_NOOP  = 0,
+	FCU_CTRL_CMD_AUTH  = 1,
+	FCU_CTRL_CMD_LOAD  = 2,
+	FCU_CTRL_CMD_START = 3
+};
+
+enum fcu_sts {
+	FCU_STS_NO_STS    = 0,
+	FCU_STS_VERI_DONE = 1,
+	FCU_STS_LOAD_DONE = 2,
+	FCU_STS_VERI_FAIL = 3,
+	FCU_STS_LOAD_FAIL = 4,
+	FCU_STS_BUSY      = 5
+};
 #define UA_ECS                      (0x1 << 31)
 #define UA_ECS                      (0x1 << 31)
 #define ACS_ABO_BITPOS              31
 #define ACS_ABO_BITPOS              31
 #define ACS_ACNO                    0x7
 #define ACS_ACNO                    0x7
@@ -98,6 +123,13 @@ enum hal_ae_csr {
 #define LCS_STATUS          (0x1)
 #define LCS_STATUS          (0x1)
 #define MMC_SHARE_CS_BITPOS         2
 #define MMC_SHARE_CS_BITPOS         2
 #define GLOBAL_CSR                0xA00
 #define GLOBAL_CSR                0xA00
+#define FCU_CTRL_AE_POS     0x8
+#define FCU_AUTH_STS_MASK   0x7
+#define FCU_STS_DONE_POS    0x9
+#define FCU_STS_AUTHFWLD_POS 0X8
+#define FCU_LOADED_AE_POS   0x16
+#define FW_AUTH_WAIT_PERIOD 10
+#define FW_AUTH_MAX_RETRY   300
 
 
 #define SET_CAP_CSR(handle, csr, val) \
 #define SET_CAP_CSR(handle, csr, val) \
 	ADF_CSR_WR(handle->hal_cap_g_ctl_csr_addr_v, csr, val)
 	ADF_CSR_WR(handle->hal_cap_g_ctl_csr_addr_v, csr, val)
@@ -106,14 +138,14 @@ enum hal_ae_csr {
 #define SET_GLB_CSR(handle, csr, val) SET_CAP_CSR(handle, csr + GLOBAL_CSR, val)
 #define SET_GLB_CSR(handle, csr, val) SET_CAP_CSR(handle, csr + GLOBAL_CSR, val)
 #define GET_GLB_CSR(handle, csr) GET_CAP_CSR(handle, GLOBAL_CSR + csr)
 #define GET_GLB_CSR(handle, csr) GET_CAP_CSR(handle, GLOBAL_CSR + csr)
 #define AE_CSR(handle, ae) \
 #define AE_CSR(handle, ae) \
-	(handle->hal_cap_ae_local_csr_addr_v + \
+	((char __iomem *)handle->hal_cap_ae_local_csr_addr_v + \
 	((ae & handle->hal_handle->ae_mask) << 12))
 	((ae & handle->hal_handle->ae_mask) << 12))
 #define AE_CSR_ADDR(handle, ae, csr) (AE_CSR(handle, ae) + (0x3ff & csr))
 #define AE_CSR_ADDR(handle, ae, csr) (AE_CSR(handle, ae) + (0x3ff & csr))
 #define SET_AE_CSR(handle, ae, csr, val) \
 #define SET_AE_CSR(handle, ae, csr, val) \
 	ADF_CSR_WR(AE_CSR_ADDR(handle, ae, csr), 0, val)
 	ADF_CSR_WR(AE_CSR_ADDR(handle, ae, csr), 0, val)
 #define GET_AE_CSR(handle, ae, csr) ADF_CSR_RD(AE_CSR_ADDR(handle, ae, csr), 0)
 #define GET_AE_CSR(handle, ae, csr) ADF_CSR_RD(AE_CSR_ADDR(handle, ae, csr), 0)
 #define AE_XFER(handle, ae) \
 #define AE_XFER(handle, ae) \
-	(handle->hal_cap_ae_xfer_csr_addr_v + \
+	((char __iomem *)handle->hal_cap_ae_xfer_csr_addr_v + \
 	((ae & handle->hal_handle->ae_mask) << 12))
 	((ae & handle->hal_handle->ae_mask) << 12))
 #define AE_XFER_ADDR(handle, ae, reg) (AE_XFER(handle, ae) + \
 #define AE_XFER_ADDR(handle, ae, reg) (AE_XFER(handle, ae) + \
 	((reg & 0xff) << 2))
 	((reg & 0xff) << 2))
@@ -121,5 +153,4 @@ enum hal_ae_csr {
 	ADF_CSR_WR(AE_XFER_ADDR(handle, ae, reg), 0, val)
 	ADF_CSR_WR(AE_XFER_ADDR(handle, ae, reg), 0, val)
 #define SRAM_WRITE(handle, addr, val) \
 #define SRAM_WRITE(handle, addr, val) \
 	ADF_CSR_WR(handle->hal_sram_addr_v, addr, val)
 	ADF_CSR_WR(handle->hal_sram_addr_v, addr, val)
-#define SRAM_READ(handle, addr) ADF_CSR_RD(handle->hal_sram_addr_v, addr)
 #endif
 #endif

+ 158 - 7
drivers/crypto/qat/qat_common/icp_qat_uclo.h

@@ -47,32 +47,55 @@
 #ifndef __ICP_QAT_UCLO_H__
 #ifndef __ICP_QAT_UCLO_H__
 #define __ICP_QAT_UCLO_H__
 #define __ICP_QAT_UCLO_H__
 
 
-#define ICP_QAT_AC_C_CPU_TYPE     0x00400000
+#define ICP_QAT_AC_895XCC_DEV_TYPE 0x00400000
+#define ICP_QAT_AC_C62X_DEV_TYPE   0x01000000
+#define ICP_QAT_AC_C3XXX_DEV_TYPE  0x02000000
 #define ICP_QAT_UCLO_MAX_AE       12
 #define ICP_QAT_UCLO_MAX_AE       12
 #define ICP_QAT_UCLO_MAX_CTX      8
 #define ICP_QAT_UCLO_MAX_CTX      8
 #define ICP_QAT_UCLO_MAX_UIMAGE   (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX)
 #define ICP_QAT_UCLO_MAX_UIMAGE   (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX)
 #define ICP_QAT_UCLO_MAX_USTORE   0x4000
 #define ICP_QAT_UCLO_MAX_USTORE   0x4000
 #define ICP_QAT_UCLO_MAX_XFER_REG 128
 #define ICP_QAT_UCLO_MAX_XFER_REG 128
 #define ICP_QAT_UCLO_MAX_GPR_REG  128
 #define ICP_QAT_UCLO_MAX_GPR_REG  128
-#define ICP_QAT_UCLO_MAX_NN_REG   128
 #define ICP_QAT_UCLO_MAX_LMEM_REG 1024
 #define ICP_QAT_UCLO_MAX_LMEM_REG 1024
 #define ICP_QAT_UCLO_AE_ALL_CTX   0xff
 #define ICP_QAT_UCLO_AE_ALL_CTX   0xff
 #define ICP_QAT_UOF_OBJID_LEN     8
 #define ICP_QAT_UOF_OBJID_LEN     8
 #define ICP_QAT_UOF_FID 0xc6c2
 #define ICP_QAT_UOF_FID 0xc6c2
 #define ICP_QAT_UOF_MAJVER 0x4
 #define ICP_QAT_UOF_MAJVER 0x4
 #define ICP_QAT_UOF_MINVER 0x11
 #define ICP_QAT_UOF_MINVER 0x11
-#define ICP_QAT_UOF_NN_MODE_NOTCARE   0xff
 #define ICP_QAT_UOF_OBJS        "UOF_OBJS"
 #define ICP_QAT_UOF_OBJS        "UOF_OBJS"
 #define ICP_QAT_UOF_STRT        "UOF_STRT"
 #define ICP_QAT_UOF_STRT        "UOF_STRT"
-#define ICP_QAT_UOF_GTID        "UOF_GTID"
 #define ICP_QAT_UOF_IMAG        "UOF_IMAG"
 #define ICP_QAT_UOF_IMAG        "UOF_IMAG"
 #define ICP_QAT_UOF_IMEM        "UOF_IMEM"
 #define ICP_QAT_UOF_IMEM        "UOF_IMEM"
-#define ICP_QAT_UOF_MSEG        "UOF_MSEG"
 #define ICP_QAT_UOF_LOCAL_SCOPE     1
 #define ICP_QAT_UOF_LOCAL_SCOPE     1
 #define ICP_QAT_UOF_INIT_EXPR               0
 #define ICP_QAT_UOF_INIT_EXPR               0
 #define ICP_QAT_UOF_INIT_REG                1
 #define ICP_QAT_UOF_INIT_REG                1
 #define ICP_QAT_UOF_INIT_REG_CTX            2
 #define ICP_QAT_UOF_INIT_REG_CTX            2
 #define ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP   3
 #define ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP   3
+#define ICP_QAT_SUOF_OBJ_ID_LEN             8
+#define ICP_QAT_SUOF_FID  0x53554f46
+#define ICP_QAT_SUOF_MAJVER 0x0
+#define ICP_QAT_SUOF_MINVER 0x1
+#define ICP_QAT_SIMG_AE_INIT_SEQ_LEN    (50 * sizeof(unsigned long long))
+#define ICP_QAT_SIMG_AE_INSTS_LEN       (0x4000 * sizeof(unsigned long long))
+#define ICP_QAT_CSS_FWSK_MODULUS_LEN    256
+#define ICP_QAT_CSS_FWSK_EXPONENT_LEN   4
+#define ICP_QAT_CSS_FWSK_PAD_LEN        252
+#define ICP_QAT_CSS_FWSK_PUB_LEN   (ICP_QAT_CSS_FWSK_MODULUS_LEN + \
+				    ICP_QAT_CSS_FWSK_EXPONENT_LEN + \
+				    ICP_QAT_CSS_FWSK_PAD_LEN)
+#define ICP_QAT_CSS_SIGNATURE_LEN   256
+#define ICP_QAT_CSS_AE_IMG_LEN     (sizeof(struct icp_qat_simg_ae_mode) + \
+				    ICP_QAT_SIMG_AE_INIT_SEQ_LEN +         \
+				    ICP_QAT_SIMG_AE_INSTS_LEN)
+#define ICP_QAT_CSS_AE_SIMG_LEN    (sizeof(struct icp_qat_css_hdr) + \
+				    ICP_QAT_CSS_FWSK_PUB_LEN + \
+				    ICP_QAT_CSS_SIGNATURE_LEN + \
+				    ICP_QAT_CSS_AE_IMG_LEN)
+#define ICP_QAT_AE_IMG_OFFSET	   (sizeof(struct icp_qat_css_hdr) + \
+				    ICP_QAT_CSS_FWSK_MODULUS_LEN + \
+				    ICP_QAT_CSS_FWSK_EXPONENT_LEN + \
+				    ICP_QAT_CSS_SIGNATURE_LEN)
+#define ICP_QAT_CSS_MAX_IMAGE_LEN   0x40000
 
 
 #define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf)
 #define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf)
 #define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf)
 #define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf)
@@ -112,6 +135,11 @@ enum icp_qat_uof_regtype {
 	ICP_NEIGH_REL,
 	ICP_NEIGH_REL,
 };
 };
 
 
+enum icp_qat_css_fwtype {
+	CSS_AE_FIRMWARE = 0,
+	CSS_MMP_FIRMWARE = 1
+};
+
 struct icp_qat_uclo_page {
 struct icp_qat_uclo_page {
 	struct icp_qat_uclo_encap_page *encap_page;
 	struct icp_qat_uclo_encap_page *encap_page;
 	struct icp_qat_uclo_region *region;
 	struct icp_qat_uclo_region *region;
@@ -235,7 +263,7 @@ struct icp_qat_uof_filechunkhdr {
 };
 };
 
 
 struct icp_qat_uof_objhdr {
 struct icp_qat_uof_objhdr {
-	unsigned int cpu_type;
+	unsigned int ac_dev_type;
 	unsigned short min_cpu_ver;
 	unsigned short min_cpu_ver;
 	unsigned short max_cpu_ver;
 	unsigned short max_cpu_ver;
 	short max_chunks;
 	short max_chunks;
@@ -326,7 +354,7 @@ struct icp_qat_uof_image {
 	unsigned int img_name;
 	unsigned int img_name;
 	unsigned int ae_assigned;
 	unsigned int ae_assigned;
 	unsigned int ctx_assigned;
 	unsigned int ctx_assigned;
-	unsigned int cpu_type;
+	unsigned int ac_dev_type;
 	unsigned int entry_address;
 	unsigned int entry_address;
 	unsigned int fill_pattern[2];
 	unsigned int fill_pattern[2];
 	unsigned int reloadable_size;
 	unsigned int reloadable_size;
@@ -374,4 +402,127 @@ struct icp_qat_uof_batch_init {
 	unsigned int size;
 	unsigned int size;
 	struct icp_qat_uof_batch_init *next;
 	struct icp_qat_uof_batch_init *next;
 };
 };
+
+struct icp_qat_suof_img_hdr {
+	char          *simg_buf;
+	unsigned long simg_len;
+	char          *css_header;
+	char          *css_key;
+	char          *css_signature;
+	char          *css_simg;
+	unsigned long simg_size;
+	unsigned int  ae_num;
+	unsigned int  ae_mask;
+	unsigned int  fw_type;
+	unsigned long simg_name;
+	unsigned long appmeta_data;
+};
+
+struct icp_qat_suof_img_tbl {
+	unsigned int num_simgs;
+	struct icp_qat_suof_img_hdr *simg_hdr;
+};
+
+struct icp_qat_suof_handle {
+	unsigned int  file_id;
+	unsigned int  check_sum;
+	char          min_ver;
+	char          maj_ver;
+	char          fw_type;
+	char          *suof_buf;
+	unsigned int  suof_size;
+	char          *sym_str;
+	unsigned int  sym_size;
+	struct icp_qat_suof_img_tbl img_table;
+};
+
+struct icp_qat_fw_auth_desc {
+	unsigned int   img_len;
+	unsigned int   reserved;
+	unsigned int   css_hdr_high;
+	unsigned int   css_hdr_low;
+	unsigned int   img_high;
+	unsigned int   img_low;
+	unsigned int   signature_high;
+	unsigned int   signature_low;
+	unsigned int   fwsk_pub_high;
+	unsigned int   fwsk_pub_low;
+	unsigned int   img_ae_mode_data_high;
+	unsigned int   img_ae_mode_data_low;
+	unsigned int   img_ae_init_data_high;
+	unsigned int   img_ae_init_data_low;
+	unsigned int   img_ae_insts_high;
+	unsigned int   img_ae_insts_low;
+};
+
+struct icp_qat_auth_chunk {
+	struct icp_qat_fw_auth_desc fw_auth_desc;
+	u64 chunk_size;
+	u64 chunk_bus_addr;
+};
+
+struct icp_qat_css_hdr {
+	unsigned int module_type;
+	unsigned int header_len;
+	unsigned int header_ver;
+	unsigned int module_id;
+	unsigned int module_vendor;
+	unsigned int date;
+	unsigned int size;
+	unsigned int key_size;
+	unsigned int module_size;
+	unsigned int exponent_size;
+	unsigned int fw_type;
+	unsigned int reserved[21];
+};
+
+struct icp_qat_simg_ae_mode {
+	unsigned int     file_id;
+	unsigned short   maj_ver;
+	unsigned short   min_ver;
+	unsigned int     dev_type;
+	unsigned short   devmax_ver;
+	unsigned short   devmin_ver;
+	unsigned int     ae_mask;
+	unsigned int     ctx_enables;
+	char             fw_type;
+	char             ctx_mode;
+	char             nn_mode;
+	char             lm0_mode;
+	char             lm1_mode;
+	char             scs_mode;
+	char             lm2_mode;
+	char             lm3_mode;
+	char             tindex_mode;
+	unsigned char    reserved[7];
+	char             simg_name[256];
+	char             appmeta_data[256];
+};
+
+struct icp_qat_suof_filehdr {
+	unsigned int     file_id;
+	unsigned int     check_sum;
+	char             min_ver;
+	char             maj_ver;
+	char             fw_type;
+	char             reserved;
+	unsigned short   max_chunks;
+	unsigned short   num_chunks;
+};
+
+struct icp_qat_suof_chunk_hdr {
+	char chunk_id[ICP_QAT_SUOF_OBJ_ID_LEN];
+	u64 offset;
+	u64 size;
+};
+
+struct icp_qat_suof_strtable {
+	unsigned int tab_length;
+	unsigned int strings;
+};
+
+struct icp_qat_suof_objhdr {
+	unsigned int img_length;
+	unsigned int reserved;
+};
 #endif
 #endif

+ 108 - 28
drivers/crypto/qat/qat_common/qat_crypto.c

@@ -49,6 +49,7 @@
 #include "adf_accel_devices.h"
 #include "adf_accel_devices.h"
 #include "adf_common_drv.h"
 #include "adf_common_drv.h"
 #include "adf_transport.h"
 #include "adf_transport.h"
+#include "adf_transport_access_macros.h"
 #include "adf_cfg.h"
 #include "adf_cfg.h"
 #include "adf_cfg_strings.h"
 #include "adf_cfg_strings.h"
 #include "qat_crypto.h"
 #include "qat_crypto.h"
@@ -66,13 +67,10 @@ void qat_crypto_put_instance(struct qat_crypto_instance *inst)
 
 
 static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
 static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
 {
 {
-	struct qat_crypto_instance *inst;
-	struct list_head *list_ptr, *tmp;
+	struct qat_crypto_instance *inst, *tmp;
 	int i;
 	int i;
 
 
-	list_for_each_safe(list_ptr, tmp, &accel_dev->crypto_list) {
-		inst = list_entry(list_ptr, struct qat_crypto_instance, list);
-
+	list_for_each_entry_safe(inst, tmp, &accel_dev->crypto_list, list) {
 		for (i = 0; i < atomic_read(&inst->refctr); i++)
 		for (i = 0; i < atomic_read(&inst->refctr); i++)
 			qat_crypto_put_instance(inst);
 			qat_crypto_put_instance(inst);
 
 
@@ -88,7 +86,7 @@ static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
 		if (inst->pke_rx)
 		if (inst->pke_rx)
 			adf_remove_ring(inst->pke_rx);
 			adf_remove_ring(inst->pke_rx);
 
 
-		list_del(list_ptr);
+		list_del(&inst->list);
 		kfree(inst);
 		kfree(inst);
 	}
 	}
 	return 0;
 	return 0;
@@ -96,17 +94,13 @@ static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
 
 
 struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
 struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
 {
 {
-	struct adf_accel_dev *accel_dev = NULL;
-	struct qat_crypto_instance *inst = NULL;
-	struct list_head *itr;
+	struct adf_accel_dev *accel_dev = NULL, *tmp_dev;
+	struct qat_crypto_instance *inst = NULL, *tmp_inst;
 	unsigned long best = ~0;
 	unsigned long best = ~0;
 
 
-	list_for_each(itr, adf_devmgr_get_head()) {
-		struct adf_accel_dev *tmp_dev;
+	list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
 		unsigned long ctr;
 		unsigned long ctr;
 
 
-		tmp_dev = list_entry(itr, struct adf_accel_dev, list);
-
 		if ((node == dev_to_node(&GET_DEV(tmp_dev)) ||
 		if ((node == dev_to_node(&GET_DEV(tmp_dev)) ||
 		     dev_to_node(&GET_DEV(tmp_dev)) < 0) &&
 		     dev_to_node(&GET_DEV(tmp_dev)) < 0) &&
 		    adf_dev_started(tmp_dev) &&
 		    adf_dev_started(tmp_dev) &&
@@ -118,19 +112,16 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
 			}
 			}
 		}
 		}
 	}
 	}
-	if (!accel_dev)
-		pr_info("QAT: Could not find a device on node %d\n", node);
-
-	/* Get any started device */
-	list_for_each(itr, adf_devmgr_get_head()) {
-		struct adf_accel_dev *tmp_dev;
 
 
-		tmp_dev = list_entry(itr, struct adf_accel_dev, list);
-
-		if (adf_dev_started(tmp_dev) &&
-		    !list_empty(&tmp_dev->crypto_list)) {
-			accel_dev = tmp_dev;
-			break;
+	if (!accel_dev) {
+		pr_info("QAT: Could not find a device on node %d\n", node);
+		/* Get any started device */
+		list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
+			if (adf_dev_started(tmp_dev) &&
+			    !list_empty(&tmp_dev->crypto_list)) {
+				accel_dev = tmp_dev;
+				break;
+			}
 		}
 		}
 	}
 	}
 
 
@@ -138,11 +129,9 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
 		return NULL;
 		return NULL;
 
 
 	best = ~0;
 	best = ~0;
-	list_for_each(itr, &accel_dev->crypto_list) {
-		struct qat_crypto_instance *tmp_inst;
+	list_for_each_entry(tmp_inst, &accel_dev->crypto_list, list) {
 		unsigned long ctr;
 		unsigned long ctr;
 
 
-		tmp_inst = list_entry(itr, struct qat_crypto_instance, list);
 		ctr = atomic_read(&tmp_inst->refctr);
 		ctr = atomic_read(&tmp_inst->refctr);
 		if (best > ctr) {
 		if (best > ctr) {
 			inst = tmp_inst;
 			inst = tmp_inst;
@@ -159,6 +148,97 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
 	return inst;
 	return inst;
 }
 }
 
 
+/**
+ * qat_crypto_dev_config() - create dev config required to create crypto inst.
+ *
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function creates device configuration required to create crypto instances
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int qat_crypto_dev_config(struct adf_accel_dev *accel_dev)
+{
+	int cpus = num_online_cpus();
+	int banks = GET_MAX_BANKS(accel_dev);
+	int instances = min(cpus, banks);
+	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+	int i;
+	unsigned long val;
+
+	if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
+		goto err;
+	if (adf_cfg_section_add(accel_dev, "Accelerator0"))
+		goto err;
+	for (i = 0; i < instances; i++) {
+		val = i;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
+			 i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+		val = 128;
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = 512;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = 0;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = 2;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = 8;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = 10;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = ADF_COALESCING_DEF_TIME;
+		snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
+		if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+						key, (void *)&val, ADF_DEC))
+			goto err;
+	}
+
+	val = i;
+	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+					ADF_NUM_CY, (void *)&val, ADF_DEC))
+		goto err;
+
+	set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+	return 0;
+err:
+	dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(qat_crypto_dev_config);
+
 static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
 static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
 {
 {
 	int i;
 	int i;

+ 81 - 43
drivers/crypto/qat/qat_common/qat_hal.c

@@ -45,21 +45,22 @@
   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
 */
 #include <linux/slab.h>
 #include <linux/slab.h>
+#include <linux/delay.h>
 
 
 #include "adf_accel_devices.h"
 #include "adf_accel_devices.h"
 #include "adf_common_drv.h"
 #include "adf_common_drv.h"
 #include "icp_qat_hal.h"
 #include "icp_qat_hal.h"
 #include "icp_qat_uclo.h"
 #include "icp_qat_uclo.h"
 
 
-#define BAD_REGADDR               0xffff
-#define MAX_RETRY_TIMES           10000
-#define INIT_CTX_ARB_VALUE        0x0
+#define BAD_REGADDR	       0xffff
+#define MAX_RETRY_TIMES	   10000
+#define INIT_CTX_ARB_VALUE	0x0
 #define INIT_CTX_ENABLE_VALUE     0x0
 #define INIT_CTX_ENABLE_VALUE     0x0
-#define INIT_PC_VALUE             0x0
+#define INIT_PC_VALUE	     0x0
 #define INIT_WAKEUP_EVENTS_VALUE  0x1
 #define INIT_WAKEUP_EVENTS_VALUE  0x1
 #define INIT_SIG_EVENTS_VALUE     0x1
 #define INIT_SIG_EVENTS_VALUE     0x1
 #define INIT_CCENABLE_VALUE       0x2000
 #define INIT_CCENABLE_VALUE       0x2000
-#define RST_CSR_QAT_LSB           20
+#define RST_CSR_QAT_LSB	   20
 #define RST_CSR_AE_LSB		  0
 #define RST_CSR_AE_LSB		  0
 #define MC_TIMESTAMP_ENABLE       (0x1 << 7)
 #define MC_TIMESTAMP_ENABLE       (0x1 << 7)
 
 
@@ -185,7 +186,7 @@ static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
 		if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS)))
 		if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS)))
 			return 0;
 			return 0;
 	}
 	}
-	if (!times) {
+	if (times < 0) {
 		pr_err("QAT: wait_num_cycles time out\n");
 		pr_err("QAT: wait_num_cycles time out\n");
 		return -EFAULT;
 		return -EFAULT;
 	}
 	}
@@ -391,9 +392,6 @@ static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
 	unsigned int times = MAX_RETRY_TIMES;
 	unsigned int times = MAX_RETRY_TIMES;
 
 
 	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
 	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
-		if (!(handle->hal_handle->ae_mask & (1 << ae)))
-			continue;
-
 		qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
 		qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
 				  (unsigned int *)&base_cnt);
 				  (unsigned int *)&base_cnt);
 		base_cnt &= 0xffff;
 		base_cnt &= 0xffff;
@@ -413,6 +411,20 @@ static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
 	return 0;
 	return 0;
 }
 }
 
 
+int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
+			    unsigned int ae)
+{
+	unsigned int enable = 0, active = 0;
+
+	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &enable);
+	qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &active);
+	if ((enable & (0xff << CE_ENABLE_BITPOS)) ||
+	    (active & (1 << ACS_ABO_BITPOS)))
+		return 1;
+	else
+		return 0;
+}
+
 static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
 static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
 {
 {
 	unsigned int misc_ctl;
 	unsigned int misc_ctl;
@@ -425,8 +437,6 @@ static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
 			    (~MC_TIMESTAMP_ENABLE));
 			    (~MC_TIMESTAMP_ENABLE));
 
 
 	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
 	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
-		if (!(handle->hal_handle->ae_mask & (1 << ae)))
-			continue;
 		qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0);
 		qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0);
 		qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0);
 		qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0);
 	}
 	}
@@ -440,8 +450,9 @@ static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
 #define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
 #define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
 static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
 static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
 {
 {
-	void __iomem *csr_addr = handle->hal_ep_csr_addr_v +
-				 ESRAM_AUTO_INIT_CSR_OFFSET;
+	void __iomem *csr_addr =
+			(void __iomem *)((uintptr_t)handle->hal_ep_csr_addr_v +
+			ESRAM_AUTO_INIT_CSR_OFFSET);
 	unsigned int csr_val, times = 30;
 	unsigned int csr_val, times = 30;
 
 
 	csr_val = ADF_CSR_RD(csr_addr, 0);
 	csr_val = ADF_CSR_RD(csr_addr, 0);
@@ -493,8 +504,6 @@ int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
 
 
 	/* Set undefined power-up/reset states to reasonable default values */
 	/* Set undefined power-up/reset states to reasonable default values */
 	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
 	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
-		if (!(handle->hal_handle->ae_mask & (1 << ae)))
-			continue;
 		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
 		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
 				  INIT_CTX_ENABLE_VALUE);
 				  INIT_CTX_ENABLE_VALUE);
 		qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX,
 		qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX,
@@ -598,25 +607,31 @@ static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
 	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
 	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
 }
 }
 
 
-static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
+static void qat_hal_clear_xfer(struct icp_qat_fw_loader_handle *handle)
 {
 {
 	unsigned char ae;
 	unsigned char ae;
-	unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
-	int times = MAX_RETRY_TIMES;
-	unsigned int csr_val = 0;
 	unsigned short reg;
 	unsigned short reg;
-	unsigned int savctx = 0;
-	int ret = 0;
 
 
 	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
 	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
-		if (!(handle->hal_handle->ae_mask & (1 << ae)))
-			continue;
 		for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) {
 		for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) {
 			qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS,
 			qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS,
 					     reg, 0);
 					     reg, 0);
 			qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS,
 			qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS,
 					     reg, 0);
 					     reg, 0);
 		}
 		}
+	}
+}
+
+static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
+{
+	unsigned char ae;
+	unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
+	int times = MAX_RETRY_TIMES;
+	unsigned int csr_val = 0;
+	unsigned int savctx = 0;
+	int ret = 0;
+
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
 		qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
 		qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
 		csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
 		csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
 		qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
 		qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
@@ -638,8 +653,6 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
 		qat_hal_enable_ctx(handle, ae, ctx_mask);
 		qat_hal_enable_ctx(handle, ae, ctx_mask);
 	}
 	}
 	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
 	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
-		if (!(handle->hal_handle->ae_mask & (1 << ae)))
-			continue;
 		/* wait for AE to finish */
 		/* wait for AE to finish */
 		do {
 		do {
 			ret = qat_hal_wait_cycles(handle, ae, 20, 1);
 			ret = qat_hal_wait_cycles(handle, ae, 20, 1);
@@ -667,10 +680,10 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
 	return 0;
 	return 0;
 }
 }
 
 
-#define ICP_DH895XCC_AE_OFFSET      0x20000
-#define ICP_DH895XCC_CAP_OFFSET     (ICP_DH895XCC_AE_OFFSET + 0x10000)
+#define ICP_QAT_AE_OFFSET	0x20000
+#define ICP_QAT_CAP_OFFSET       (ICP_QAT_AE_OFFSET + 0x10000)
 #define LOCAL_TO_XFER_REG_OFFSET    0x800
 #define LOCAL_TO_XFER_REG_OFFSET    0x800
-#define ICP_DH895XCC_EP_OFFSET      0x3a000
+#define ICP_QAT_EP_OFFSET	0x3a000
 int qat_hal_init(struct adf_accel_dev *accel_dev)
 int qat_hal_init(struct adf_accel_dev *accel_dev)
 {
 {
 	unsigned char ae;
 	unsigned char ae;
@@ -687,15 +700,22 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
 	if (!handle)
 	if (!handle)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	handle->hal_cap_g_ctl_csr_addr_v = misc_bar->virt_addr +
-						ICP_DH895XCC_CAP_OFFSET;
-	handle->hal_cap_ae_xfer_csr_addr_v = misc_bar->virt_addr +
-						ICP_DH895XCC_AE_OFFSET;
-	handle->hal_ep_csr_addr_v = misc_bar->virt_addr +
-				    ICP_DH895XCC_EP_OFFSET;
-	handle->hal_cap_ae_local_csr_addr_v =
-		handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET;
 	handle->hal_sram_addr_v = sram_bar->virt_addr;
 	handle->hal_sram_addr_v = sram_bar->virt_addr;
+	handle->hal_cap_g_ctl_csr_addr_v =
+		(void __iomem *)((uintptr_t)misc_bar->virt_addr +
+				 ICP_QAT_CAP_OFFSET);
+	handle->hal_cap_ae_xfer_csr_addr_v =
+		(void __iomem *)((uintptr_t)misc_bar->virt_addr +
+				 ICP_QAT_AE_OFFSET);
+	handle->hal_ep_csr_addr_v =
+		(void __iomem *)((uintptr_t)misc_bar->virt_addr +
+				 ICP_QAT_EP_OFFSET);
+	handle->hal_cap_ae_local_csr_addr_v =
+		(void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v +
+				 LOCAL_TO_XFER_REG_OFFSET);
+	handle->pci_dev = pci_info->pci_dev;
+	handle->fw_auth = (handle->pci_dev->device ==
+			   ADF_DH895XCC_PCI_DEVICE_ID) ? false : true;
 	handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
 	handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
 	if (!handle->hal_handle)
 	if (!handle->hal_handle)
 		goto out_hal_handle;
 		goto out_hal_handle;
@@ -723,14 +743,16 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
 		dev_err(&GET_DEV(accel_dev), "qat_hal_clr_reset error\n");
 		dev_err(&GET_DEV(accel_dev), "qat_hal_clr_reset error\n");
 		goto out_err;
 		goto out_err;
 	}
 	}
-	if (qat_hal_clear_gpr(handle))
-		goto out_err;
+	qat_hal_clear_xfer(handle);
+	if (!handle->fw_auth) {
+		if (qat_hal_clear_gpr(handle))
+			goto out_err;
+	}
+
 	/* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
 	/* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
 	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
 	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
 		unsigned int csr_val = 0;
 		unsigned int csr_val = 0;
 
 
-		if (!(hw_data->ae_mask & (1 << ae)))
-			continue;
 		qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val);
 		qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val);
 		csr_val |= 0x1;
 		csr_val |= 0x1;
 		qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
 		qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
@@ -756,15 +778,31 @@ void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle)
 void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
 void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
 		   unsigned int ctx_mask)
 		   unsigned int ctx_mask)
 {
 {
-	qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) &
+	int retry = 0;
+	unsigned int fcu_sts = 0;
+
+	if (handle->fw_auth) {
+		SET_CAP_CSR(handle, FCU_CONTROL, FCU_CTRL_CMD_START);
+		do {
+			msleep(FW_AUTH_WAIT_PERIOD);
+			fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
+			if (((fcu_sts >> FCU_STS_DONE_POS) & 0x1))
+				return;
+		} while (retry++ < FW_AUTH_MAX_RETRY);
+		pr_err("QAT: start error (AE 0x%x FCU_STS = 0x%x)\n", ae,
+		       fcu_sts);
+	} else {
+		qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) &
 				 ICP_QAT_UCLO_AE_ALL_CTX, 0x10000);
 				 ICP_QAT_UCLO_AE_ALL_CTX, 0x10000);
-	qat_hal_enable_ctx(handle, ae, ctx_mask);
+		qat_hal_enable_ctx(handle, ae, ctx_mask);
+	}
 }
 }
 
 
 void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
 void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
 		  unsigned int ctx_mask)
 		  unsigned int ctx_mask)
 {
 {
-	qat_hal_disable_ctx(handle, ae, ctx_mask);
+	if (!handle->fw_auth)
+		qat_hal_disable_ctx(handle, ae, ctx_mask);
 }
 }
 
 
 void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
 void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,

+ 529 - 26
drivers/crypto/qat/qat_common/qat_uclo.c

@@ -47,7 +47,7 @@
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/ctype.h>
 #include <linux/ctype.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
-
+#include <linux/delay.h>
 #include "adf_accel_devices.h"
 #include "adf_accel_devices.h"
 #include "adf_common_drv.h"
 #include "adf_common_drv.h"
 #include "icp_qat_uclo.h"
 #include "icp_qat_uclo.h"
@@ -119,10 +119,10 @@ static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
 {
 {
 	if ((!str_table->table_len) || (str_offset > str_table->table_len))
 	if ((!str_table->table_len) || (str_offset > str_table->table_len))
 		return NULL;
 		return NULL;
-	return (char *)(((unsigned long)(str_table->strings)) + str_offset);
+	return (char *)(((uintptr_t)(str_table->strings)) + str_offset);
 }
 }
 
 
-static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr)
+static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
 {
 {
 	int maj = hdr->maj_ver & 0xff;
 	int maj = hdr->maj_ver & 0xff;
 	int min = hdr->min_ver & 0xff;
 	int min = hdr->min_ver & 0xff;
@@ -139,6 +139,31 @@ static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr)
 	return 0;
 	return 0;
 }
 }
 
 
+static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr)
+{
+	int maj = suof_hdr->maj_ver & 0xff;
+	int min = suof_hdr->min_ver & 0xff;
+
+	if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
+		pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
+		return -EINVAL;
+	}
+	if (suof_hdr->fw_type != 0) {
+		pr_err("QAT: unsupported firmware type\n");
+		return -EINVAL;
+	}
+	if (suof_hdr->num_chunks <= 0x1) {
+		pr_err("QAT: SUOF chunk amount is incorrect\n");
+		return -EINVAL;
+	}
+	if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
+		pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
+		       maj, min);
+		return -EINVAL;
+	}
+	return 0;
+}
+
 static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
 static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
 				      unsigned int addr, unsigned int *val,
 				      unsigned int addr, unsigned int *val,
 				      unsigned int num_in_bytes)
 				      unsigned int num_in_bytes)
@@ -275,7 +300,7 @@ static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
 	unsigned int i, flag = 0;
 	unsigned int i, flag = 0;
 
 
 	mem_val_attr =
 	mem_val_attr =
-		(struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
+		(struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem +
 		sizeof(struct icp_qat_uof_initmem));
 		sizeof(struct icp_qat_uof_initmem));
 
 
 	init_header = *init_tab_base;
 	init_header = *init_tab_base;
@@ -425,8 +450,8 @@ static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
 			if (qat_uclo_init_ae_memory(handle, initmem))
 			if (qat_uclo_init_ae_memory(handle, initmem))
 				return -EINVAL;
 				return -EINVAL;
 		}
 		}
-		initmem = (struct icp_qat_uof_initmem *)((unsigned long)(
-			(unsigned long)initmem +
+		initmem = (struct icp_qat_uof_initmem *)((uintptr_t)(
+			(uintptr_t)initmem +
 			sizeof(struct icp_qat_uof_initmem)) +
 			sizeof(struct icp_qat_uof_initmem)) +
 			(sizeof(struct icp_qat_uof_memvar_attr) *
 			(sizeof(struct icp_qat_uof_memvar_attr) *
 			initmem->val_attr_num));
 			initmem->val_attr_num));
@@ -454,7 +479,7 @@ static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
 	int i;
 	int i;
 	struct icp_qat_uof_chunkhdr *chunk_hdr =
 	struct icp_qat_uof_chunkhdr *chunk_hdr =
 	    (struct icp_qat_uof_chunkhdr *)
 	    (struct icp_qat_uof_chunkhdr *)
-	    ((unsigned long)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
+	    ((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
 
 
 	for (i = 0; i < obj_hdr->num_chunks; i++) {
 	for (i = 0; i < obj_hdr->num_chunks; i++) {
 		if ((cur < (void *)&chunk_hdr[i]) &&
 		if ((cur < (void *)&chunk_hdr[i]) &&
@@ -596,7 +621,7 @@ static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
 	page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
 	page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
 	for (i = 0; i < uword_block_tab->entry_num; i++)
 	for (i = 0; i < uword_block_tab->entry_num; i++)
 		page->uwblock[i].micro_words =
 		page->uwblock[i].micro_words =
-		(unsigned long)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
+		(uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
 }
 }
 
 
 static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
 static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
@@ -697,7 +722,7 @@ qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
 		memcpy(&str_table->table_len, obj_hdr->file_buff +
 		memcpy(&str_table->table_len, obj_hdr->file_buff +
 		       chunk_hdr->offset, sizeof(str_table->table_len));
 		       chunk_hdr->offset, sizeof(str_table->table_len));
 		hdr_size = (char *)&str_table->strings - (char *)str_table;
 		hdr_size = (char *)&str_table->strings - (char *)str_table;
-		str_table->strings = (unsigned long)obj_hdr->file_buff +
+		str_table->strings = (uintptr_t)obj_hdr->file_buff +
 					chunk_hdr->offset + hdr_size;
 					chunk_hdr->offset + hdr_size;
 		return str_table;
 		return str_table;
 	}
 	}
@@ -721,13 +746,31 @@ qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
 	}
 	}
 }
 }
 
 
+static unsigned int
+qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
+{
+	switch (handle->pci_dev->device) {
+	case ADF_DH895XCC_PCI_DEVICE_ID:
+		return ICP_QAT_AC_895XCC_DEV_TYPE;
+	case ADF_C62X_PCI_DEVICE_ID:
+		return ICP_QAT_AC_C62X_DEV_TYPE;
+	case ADF_C3XXX_PCI_DEVICE_ID:
+		return ICP_QAT_AC_C3XXX_DEV_TYPE;
+	default:
+		pr_err("QAT: unsupported device 0x%x\n",
+		       handle->pci_dev->device);
+		return 0;
+	}
+}
+
 static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
 static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
 {
 {
 	unsigned int maj_ver, prod_type = obj_handle->prod_type;
 	unsigned int maj_ver, prod_type = obj_handle->prod_type;
 
 
-	if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->cpu_type)) {
-		pr_err("QAT: UOF type 0x%x not match with cur platform 0x%x\n",
-		       obj_handle->encap_uof_obj.obj_hdr->cpu_type, prod_type);
+	if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
+		pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
+		       obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
+		       prod_type);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 	maj_ver = obj_handle->prod_rev & 0xff;
 	maj_ver = obj_handle->prod_rev & 0xff;
@@ -932,7 +975,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
 	obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
 	obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
 					     obj_handle->obj_hdr->file_buff;
 					     obj_handle->obj_hdr->file_buff;
 	obj_handle->uword_in_bytes = 6;
 	obj_handle->uword_in_bytes = 6;
-	obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE;
+	obj_handle->prod_type = qat_uclo_get_dev_type(handle);
 	obj_handle->prod_rev = PID_MAJOR_REV |
 	obj_handle->prod_rev = PID_MAJOR_REV |
 			(PID_MINOR_REV & handle->hal_handle->revision_id);
 			(PID_MINOR_REV & handle->hal_handle->revision_id);
 	if (qat_uclo_check_uof_compat(obj_handle)) {
 	if (qat_uclo_check_uof_compat(obj_handle)) {
@@ -969,23 +1012,435 @@ out_err:
 	return -EFAULT;
 	return -EFAULT;
 }
 }
 
 
-void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
-			void *addr_ptr, int mem_size)
+static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
+				      struct icp_qat_suof_filehdr *suof_ptr,
+				      int suof_size)
 {
 {
-	qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, ALIGN(mem_size, 4));
+	unsigned int check_sum = 0;
+	unsigned int min_ver_offset = 0;
+	struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
+
+	suof_handle->file_id = ICP_QAT_SUOF_FID;
+	suof_handle->suof_buf = (char *)suof_ptr;
+	suof_handle->suof_size = suof_size;
+	min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr,
+					      min_ver);
+	check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver,
+					       min_ver_offset);
+	if (check_sum != suof_ptr->check_sum) {
+		pr_err("QAT: incorrect SUOF checksum\n");
+		return -EINVAL;
+	}
+	suof_handle->check_sum = suof_ptr->check_sum;
+	suof_handle->min_ver = suof_ptr->min_ver;
+	suof_handle->maj_ver = suof_ptr->maj_ver;
+	suof_handle->fw_type = suof_ptr->fw_type;
+	return 0;
 }
 }
 
 
-int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
-			 void *addr_ptr, int mem_size)
+static void qat_uclo_map_simg(struct icp_qat_suof_handle *suof_handle,
+			      struct icp_qat_suof_img_hdr *suof_img_hdr,
+			      struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
 {
 {
-	struct icp_qat_uof_filehdr *filehdr;
-	struct icp_qat_uclo_objhandle *objhdl;
+	struct icp_qat_simg_ae_mode *ae_mode;
+	struct icp_qat_suof_objhdr *suof_objhdr;
+
+	suof_img_hdr->simg_buf  = (suof_handle->suof_buf +
+				   suof_chunk_hdr->offset +
+				   sizeof(*suof_objhdr));
+	suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t)
+				  (suof_handle->suof_buf +
+				   suof_chunk_hdr->offset))->img_length;
+
+	suof_img_hdr->css_header = suof_img_hdr->simg_buf;
+	suof_img_hdr->css_key = (suof_img_hdr->css_header +
+				 sizeof(struct icp_qat_css_hdr));
+	suof_img_hdr->css_signature = suof_img_hdr->css_key +
+				      ICP_QAT_CSS_FWSK_MODULUS_LEN +
+				      ICP_QAT_CSS_FWSK_EXPONENT_LEN;
+	suof_img_hdr->css_simg = suof_img_hdr->css_signature +
+				 ICP_QAT_CSS_SIGNATURE_LEN;
+
+	ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
+	suof_img_hdr->ae_mask = ae_mode->ae_mask;
+	suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name;
+	suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data;
+	suof_img_hdr->fw_type = ae_mode->fw_type;
+}
 
 
-	BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
-		     (sizeof(handle->hal_handle->ae_mask) * 8));
+static void
+qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle,
+			  struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
+{
+	char **sym_str = (char **)&suof_handle->sym_str;
+	unsigned int *sym_size = &suof_handle->sym_size;
+	struct icp_qat_suof_strtable *str_table_obj;
+
+	*sym_size = *(unsigned int *)(uintptr_t)
+		   (suof_chunk_hdr->offset + suof_handle->suof_buf);
+	*sym_str = (char *)(uintptr_t)
+		   (suof_handle->suof_buf + suof_chunk_hdr->offset +
+		   sizeof(str_table_obj->tab_length));
+}
 
 
-	if (!handle || !addr_ptr || mem_size < 24)
+static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
+				      struct icp_qat_suof_img_hdr *img_hdr)
+{
+	struct icp_qat_simg_ae_mode *img_ae_mode = NULL;
+	unsigned int prod_rev, maj_ver, prod_type;
+
+	prod_type = qat_uclo_get_dev_type(handle);
+	img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg;
+	prod_rev = PID_MAJOR_REV |
+			 (PID_MINOR_REV & handle->hal_handle->revision_id);
+	if (img_ae_mode->dev_type != prod_type) {
+		pr_err("QAT: incompatible product type %x\n",
+		       img_ae_mode->dev_type);
 		return -EINVAL;
 		return -EINVAL;
+	}
+	maj_ver = prod_rev & 0xff;
+	if ((maj_ver > img_ae_mode->devmax_ver) ||
+	    (maj_ver < img_ae_mode->devmin_ver)) {
+		pr_err("QAT: incompatible device majver 0x%x\n", maj_ver);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle)
+{
+	struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
+
+	kfree(sobj_handle->img_table.simg_hdr);
+	sobj_handle->img_table.simg_hdr = NULL;
+	kfree(handle->sobj_handle);
+	handle->sobj_handle = NULL;
+}
+
+static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr,
+			      unsigned int img_id, unsigned int num_simgs)
+{
+	struct icp_qat_suof_img_hdr img_header;
+
+	if (img_id != num_simgs - 1) {
+		memcpy(&img_header, &suof_img_hdr[num_simgs - 1],
+		       sizeof(*suof_img_hdr));
+		memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id],
+		       sizeof(*suof_img_hdr));
+		memcpy(&suof_img_hdr[img_id], &img_header,
+		       sizeof(*suof_img_hdr));
+	}
+}
+
+static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
+			     struct icp_qat_suof_filehdr *suof_ptr,
+			     int suof_size)
+{
+	struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
+	struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL;
+	struct icp_qat_suof_img_hdr *suof_img_hdr = NULL;
+	int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE;
+	unsigned int i = 0;
+	struct icp_qat_suof_img_hdr img_header;
+
+	if (!suof_ptr || (suof_size == 0)) {
+		pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
+		return -EINVAL;
+	}
+	if (qat_uclo_check_suof_format(suof_ptr))
+		return -EINVAL;
+	ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size);
+	if (ret)
+		return ret;
+	suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)
+			 ((uintptr_t)suof_ptr + sizeof(*suof_ptr));
+
+	qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr);
+	suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1;
+
+	if (suof_handle->img_table.num_simgs != 0) {
+		suof_img_hdr = kzalloc(suof_handle->img_table.num_simgs *
+				       sizeof(img_header), GFP_KERNEL);
+		if (!suof_img_hdr)
+			return -ENOMEM;
+		suof_handle->img_table.simg_hdr = suof_img_hdr;
+	}
+
+	for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
+		qat_uclo_map_simg(handle->sobj_handle, &suof_img_hdr[i],
+				  &suof_chunk_hdr[1 + i]);
+		ret = qat_uclo_check_simg_compat(handle,
+						 &suof_img_hdr[i]);
+		if (ret)
+			return ret;
+		if ((suof_img_hdr[i].ae_mask & 0x1) != 0)
+			ae0_img = i;
+	}
+	qat_uclo_tail_img(suof_img_hdr, ae0_img,
+			  suof_handle->img_table.num_simgs);
+	return 0;
+}
+
+#define ADD_ADDR(high, low)  ((((uint64_t)high) << 32) + low)
+#define BITS_IN_DWORD 32
+
+static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
+			    struct icp_qat_fw_auth_desc *desc)
+{
+	unsigned int fcu_sts, retry = 0;
+	u64 bus_addr;
+
+	bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low)
+			   - sizeof(struct icp_qat_auth_chunk);
+	SET_CAP_CSR(handle, FCU_DRAM_ADDR_HI, (bus_addr >> BITS_IN_DWORD));
+	SET_CAP_CSR(handle, FCU_DRAM_ADDR_LO, bus_addr);
+	SET_CAP_CSR(handle, FCU_CONTROL, FCU_CTRL_CMD_AUTH);
+
+	do {
+		msleep(FW_AUTH_WAIT_PERIOD);
+		fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
+		if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
+			goto auth_fail;
+		if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
+			if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
+				return 0;
+	} while (retry++ < FW_AUTH_MAX_RETRY);
+auth_fail:
+	pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
+	       fcu_sts & FCU_AUTH_STS_MASK, retry);
+	return -EINVAL;
+}
+
+static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle,
+			       struct icp_firml_dram_desc *dram_desc,
+			       unsigned int size)
+{
+	void *vptr;
+	dma_addr_t ptr;
+
+	vptr = dma_alloc_coherent(&handle->pci_dev->dev,
+				  size, &ptr, GFP_KERNEL);
+	if (!vptr)
+		return -ENOMEM;
+	dram_desc->dram_base_addr_v = vptr;
+	dram_desc->dram_bus_addr = ptr;
+	dram_desc->dram_size = size;
+	return 0;
+}
+
+static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle,
+			       struct icp_firml_dram_desc *dram_desc)
+{
+	dma_free_coherent(&handle->pci_dev->dev,
+			  (size_t)(dram_desc->dram_size),
+			  (dram_desc->dram_base_addr_v),
+			  dram_desc->dram_bus_addr);
+	memset(dram_desc, 0, sizeof(*dram_desc));
+}
+
+static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
+				   struct icp_qat_fw_auth_desc **desc)
+{
+	struct icp_firml_dram_desc dram_desc;
+
+	dram_desc.dram_base_addr_v = *desc;
+	dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *)
+				   (*desc))->chunk_bus_addr;
+	dram_desc.dram_size = ((struct icp_qat_auth_chunk *)
+			       (*desc))->chunk_size;
+	qat_uclo_simg_free(handle, &dram_desc);
+}
+
+static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
+				char *image, unsigned int size,
+				struct icp_qat_fw_auth_desc **desc)
+{
+	struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
+	struct icp_qat_fw_auth_desc *auth_desc;
+	struct icp_qat_auth_chunk *auth_chunk;
+	u64 virt_addr,  bus_addr, virt_base;
+	unsigned int length, simg_offset = sizeof(*auth_chunk);
+	struct icp_firml_dram_desc img_desc;
+
+	if (size > (ICP_QAT_AE_IMG_OFFSET + ICP_QAT_CSS_MAX_IMAGE_LEN)) {
+		pr_err("QAT: error, input image size overflow %d\n", size);
+		return -EINVAL;
+	}
+	length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
+		 ICP_QAT_CSS_AE_SIMG_LEN + simg_offset :
+		 size + ICP_QAT_CSS_FWSK_PAD_LEN + simg_offset;
+	if (qat_uclo_simg_alloc(handle, &img_desc, length)) {
+		pr_err("QAT: error, allocate continuous dram fail\n");
+		return -ENOMEM;
+	}
+
+	auth_chunk = img_desc.dram_base_addr_v;
+	auth_chunk->chunk_size = img_desc.dram_size;
+	auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
+	virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset;
+	bus_addr  = img_desc.dram_bus_addr + simg_offset;
+	auth_desc = img_desc.dram_base_addr_v;
+	auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+	auth_desc->css_hdr_low = (unsigned int)bus_addr;
+	virt_addr = virt_base;
+
+	memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
+	/* pub key */
+	bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
+			   sizeof(*css_hdr);
+	virt_addr = virt_addr + sizeof(*css_hdr);
+
+	auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+	auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
+
+	memcpy((void *)(uintptr_t)virt_addr,
+	       (void *)(image + sizeof(*css_hdr)),
+	       ICP_QAT_CSS_FWSK_MODULUS_LEN);
+	/* padding */
+	memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN),
+	       0, ICP_QAT_CSS_FWSK_PAD_LEN);
+
+	/* exponent */
+	memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN +
+	       ICP_QAT_CSS_FWSK_PAD_LEN),
+	       (void *)(image + sizeof(*css_hdr) +
+			ICP_QAT_CSS_FWSK_MODULUS_LEN),
+	       sizeof(unsigned int));
+
+	/* signature */
+	bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high,
+			    auth_desc->fwsk_pub_low) +
+		   ICP_QAT_CSS_FWSK_PUB_LEN;
+	virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN;
+	auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+	auth_desc->signature_low = (unsigned int)bus_addr;
+
+	memcpy((void *)(uintptr_t)virt_addr,
+	       (void *)(image + sizeof(*css_hdr) +
+	       ICP_QAT_CSS_FWSK_MODULUS_LEN +
+	       ICP_QAT_CSS_FWSK_EXPONENT_LEN),
+	       ICP_QAT_CSS_SIGNATURE_LEN);
+
+	bus_addr = ADD_ADDR(auth_desc->signature_high,
+			    auth_desc->signature_low) +
+		   ICP_QAT_CSS_SIGNATURE_LEN;
+	virt_addr += ICP_QAT_CSS_SIGNATURE_LEN;
+
+	auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+	auth_desc->img_low = (unsigned int)bus_addr;
+	auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET;
+	memcpy((void *)(uintptr_t)virt_addr,
+	       (void *)(image + ICP_QAT_AE_IMG_OFFSET),
+	       auth_desc->img_len);
+	virt_addr = virt_base;
+	/* AE firmware */
+	if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
+	    CSS_AE_FIRMWARE) {
+		auth_desc->img_ae_mode_data_high = auth_desc->img_high;
+		auth_desc->img_ae_mode_data_low = auth_desc->img_low;
+		bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
+				    auth_desc->img_ae_mode_data_low) +
+			   sizeof(struct icp_qat_simg_ae_mode);
+
+		auth_desc->img_ae_init_data_high = (unsigned int)
+						 (bus_addr >> BITS_IN_DWORD);
+		auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
+		bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
+		auth_desc->img_ae_insts_high = (unsigned int)
+					     (bus_addr >> BITS_IN_DWORD);
+		auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
+	} else {
+		auth_desc->img_ae_insts_high = auth_desc->img_high;
+		auth_desc->img_ae_insts_low = auth_desc->img_low;
+	}
+	*desc = auth_desc;
+	return 0;
+}
+
+static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
+			    struct icp_qat_fw_auth_desc *desc)
+{
+	unsigned int i;
+	unsigned int fcu_sts;
+	struct icp_qat_simg_ae_mode *virt_addr;
+	unsigned int fcu_loaded_ae_pos = FCU_LOADED_AE_POS;
+
+	virt_addr = (void *)((uintptr_t)desc +
+		     sizeof(struct icp_qat_auth_chunk) +
+		     sizeof(struct icp_qat_css_hdr) +
+		     ICP_QAT_CSS_FWSK_PUB_LEN +
+		     ICP_QAT_CSS_SIGNATURE_LEN);
+	for (i = 0; i < handle->hal_handle->ae_max_num; i++) {
+		int retry = 0;
+
+		if (!((virt_addr->ae_mask >> i) & 0x1))
+			continue;
+		if (qat_hal_check_ae_active(handle, i)) {
+			pr_err("QAT: AE %d is active\n", i);
+			return -EINVAL;
+		}
+		SET_CAP_CSR(handle, FCU_CONTROL,
+			    (FCU_CTRL_CMD_LOAD | (i << FCU_CTRL_AE_POS)));
+
+		do {
+			msleep(FW_AUTH_WAIT_PERIOD);
+			fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
+			if (((fcu_sts & FCU_AUTH_STS_MASK) ==
+			    FCU_STS_LOAD_DONE) &&
+			    ((fcu_sts >> fcu_loaded_ae_pos) & (1 << i)))
+				break;
+		} while (retry++ < FW_AUTH_MAX_RETRY);
+		if (retry > FW_AUTH_MAX_RETRY) {
+			pr_err("QAT: firmware load failed timeout %x\n", retry);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
+				 void *addr_ptr, int mem_size)
+{
+	struct icp_qat_suof_handle *suof_handle;
+
+	suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL);
+	if (!suof_handle)
+		return -ENOMEM;
+	handle->sobj_handle = suof_handle;
+	if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
+		qat_uclo_del_suof(handle);
+		pr_err("QAT: map SUOF failed\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
+		       void *addr_ptr, int mem_size)
+{
+	struct icp_qat_fw_auth_desc *desc = NULL;
+	int status = 0;
+
+	if (handle->fw_auth) {
+		if (!qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc))
+			status = qat_uclo_auth_fw(handle, desc);
+		qat_uclo_ummap_auth_fw(handle, &desc);
+	} else {
+		if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID) {
+			pr_err("QAT: C3XXX doesn't support unsigned MMP\n");
+			return -EINVAL;
+		}
+		qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
+	}
+	return status;
+}
+
+static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
+				void *addr_ptr, int mem_size)
+{
+	struct icp_qat_uof_filehdr *filehdr;
+	struct icp_qat_uclo_objhandle *objhdl;
+
 	objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
 	objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
 	if (!objhdl)
 	if (!objhdl)
 		return -ENOMEM;
 		return -ENOMEM;
@@ -993,7 +1448,7 @@ int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
 	if (!objhdl->obj_buf)
 	if (!objhdl->obj_buf)
 		goto out_objbuf_err;
 		goto out_objbuf_err;
 	filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
 	filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
-	if (qat_uclo_check_format(filehdr))
+	if (qat_uclo_check_uof_format(filehdr))
 		goto out_objhdr_err;
 		goto out_objhdr_err;
 	objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
 	objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
 					     ICP_QAT_UOF_OBJS);
 					     ICP_QAT_UOF_OBJS);
@@ -1016,11 +1471,27 @@ out_objbuf_err:
 	return -ENOMEM;
 	return -ENOMEM;
 }
 }
 
 
+int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
+		     void *addr_ptr, int mem_size)
+{
+	BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
+		     (sizeof(handle->hal_handle->ae_mask) * 8));
+
+	if (!handle || !addr_ptr || mem_size < 24)
+		return -EINVAL;
+
+	return (handle->fw_auth) ?
+			qat_uclo_map_suof_obj(handle, addr_ptr, mem_size) :
+			qat_uclo_map_uof_obj(handle, addr_ptr, mem_size);
+}
+
 void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
 void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
 {
 {
 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 	unsigned int a;
 	unsigned int a;
 
 
+	if (handle->sobj_handle)
+		qat_uclo_del_suof(handle);
 	if (!obj_handle)
 	if (!obj_handle)
 		return;
 		return;
 
 
@@ -1055,7 +1526,7 @@ static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
 		    encap_page->uwblock[i].words_num - 1) {
 		    encap_page->uwblock[i].words_num - 1) {
 			raddr -= encap_page->uwblock[i].start_addr;
 			raddr -= encap_page->uwblock[i].start_addr;
 			raddr *= obj_handle->uword_in_bytes;
 			raddr *= obj_handle->uword_in_bytes;
-			memcpy(&uwrd, (void *)(((unsigned long)
+			memcpy(&uwrd, (void *)(((uintptr_t)
 			       encap_page->uwblock[i].micro_words) + raddr),
 			       encap_page->uwblock[i].micro_words) + raddr),
 			       obj_handle->uword_in_bytes);
 			       obj_handle->uword_in_bytes);
 			uwrd = uwrd & 0xbffffffffffull;
 			uwrd = uwrd & 0xbffffffffffull;
@@ -1147,7 +1618,33 @@ static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
 	}
 	}
 }
 }
 
 
-int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
+static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
+{
+	unsigned int i;
+	struct icp_qat_fw_auth_desc *desc = NULL;
+	struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
+	struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
+
+	for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
+		if (qat_uclo_map_auth_fw(handle,
+					 (char *)simg_hdr[i].simg_buf,
+					 (unsigned int)
+					 (simg_hdr[i].simg_len),
+					 &desc))
+			goto wr_err;
+		if (qat_uclo_auth_fw(handle, desc))
+			goto wr_err;
+		if (qat_uclo_load_fw(handle, desc))
+			goto wr_err;
+		qat_uclo_ummap_auth_fw(handle, &desc);
+	}
+	return 0;
+wr_err:
+	qat_uclo_ummap_auth_fw(handle, &desc);
+	return -EINVAL;
+}
+
+static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle)
 {
 {
 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 	unsigned int i;
 	unsigned int i;
@@ -1164,3 +1661,9 @@ int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
 	}
 	}
 	return 0;
 	return 0;
 }
 }
+
+int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
+{
+	return (handle->fw_auth) ? qat_uclo_wr_suof_img(handle) :
+				   qat_uclo_wr_uof_img(handle);
+}

+ 1 - 3
drivers/crypto/qat/qat_dh895xcc/Makefile

@@ -1,5 +1,3 @@
 ccflags-y := -I$(src)/../qat_common
 ccflags-y := -I$(src)/../qat_common
 obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
 obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
-qat_dh895xcc-objs := adf_drv.o \
-		adf_isr.o \
-		adf_dh895xcc_hw_data.o
+qat_dh895xcc-objs := adf_drv.o adf_dh895xcc_hw_data.o

+ 2 - 3
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c

@@ -48,7 +48,6 @@
 #include <adf_pf2vf_msg.h>
 #include <adf_pf2vf_msg.h>
 #include <adf_common_drv.h>
 #include <adf_common_drv.h>
 #include "adf_dh895xcc_hw_data.h"
 #include "adf_dh895xcc_hw_data.h"
-#include "adf_drv.h"
 
 
 /* Worker thread to service arbiter mappings based on dev SKUs */
 /* Worker thread to service arbiter mappings based on dev SKUs */
 static const uint32_t thrd_to_arb_map_sku4[] = {
 static const uint32_t thrd_to_arb_map_sku4[] = {
@@ -143,8 +142,8 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
 	return DEV_SKU_UNKNOWN;
 	return DEV_SKU_UNKNOWN;
 }
 }
 
 
-void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
-			     uint32_t const **arb_map_config)
+static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
+				    u32 const **arb_map_config)
 {
 {
 	switch (accel_dev->accel_pci_dev.sku) {
 	switch (accel_dev->accel_pci_dev.sku) {
 	case DEV_SKU_1:
 	case DEV_SKU_1:

+ 4 - 5
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h

@@ -53,7 +53,6 @@
 #define ADF_DH895XCC_ETR_BAR 2
 #define ADF_DH895XCC_ETR_BAR 2
 #define ADF_DH895XCC_RX_RINGS_OFFSET 8
 #define ADF_DH895XCC_RX_RINGS_OFFSET 8
 #define ADF_DH895XCC_TX_RINGS_MASK 0xFF
 #define ADF_DH895XCC_TX_RINGS_MASK 0xFF
-#define ADF_DH895XCC_FUSECTL_OFFSET 0x40
 #define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000
 #define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000
 #define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20
 #define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20
 #define ADF_DH895XCC_FUSECTL_SKU_1 0x0
 #define ADF_DH895XCC_FUSECTL_SKU_1 0x0
@@ -65,7 +64,6 @@
 #define ADF_DH895XCC_ACCELERATORS_REG_OFFSET 13
 #define ADF_DH895XCC_ACCELERATORS_REG_OFFSET 13
 #define ADF_DH895XCC_ACCELERATORS_MASK 0x3F
 #define ADF_DH895XCC_ACCELERATORS_MASK 0x3F
 #define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF
 #define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF
-#define ADF_DH895XCC_LEGFUSE_OFFSET 0x4C
 #define ADF_DH895XCC_ETR_MAX_BANKS 32
 #define ADF_DH895XCC_ETR_MAX_BANKS 32
 #define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
 #define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
 #define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
 #define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
@@ -80,11 +78,12 @@
 #define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10)
 #define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10)
 #define ADF_DH895XCC_ERRSSMSH_EN BIT(3)
 #define ADF_DH895XCC_ERRSSMSH_EN BIT(3)
 
 
-#define ADF_DH895XCC_ERRSOU3	(0x3A000 + 0x00C)
-#define ADF_DH895XCC_ERRSOU5	(0x3A000 + 0x0D8)
 #define ADF_DH895XCC_PF2VF_OFFSET(i)	(0x3A000 + 0x280 + ((i) * 0x04))
 #define ADF_DH895XCC_PF2VF_OFFSET(i)	(0x3A000 + 0x280 + ((i) * 0x04))
 #define ADF_DH895XCC_VINTMSK_OFFSET(i)	(0x3A000 + 0x200 + ((i) * 0x04))
 #define ADF_DH895XCC_VINTMSK_OFFSET(i)	(0x3A000 + 0x200 + ((i) * 0x04))
 /* FW names */
 /* FW names */
 #define ADF_DH895XCC_FW "qat_895xcc.bin"
 #define ADF_DH895XCC_FW "qat_895xcc.bin"
-#define ADF_DH895XCC_MMP "qat_mmp.bin"
+#define ADF_DH895XCC_MMP "qat_895xcc_mmp.bin"
+
+void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
 #endif
 #endif

+ 6 - 97
drivers/crypto/qat/qat_dh895xcc/adf_drv.c

@@ -60,11 +60,7 @@
 #include <adf_accel_devices.h>
 #include <adf_accel_devices.h>
 #include <adf_common_drv.h>
 #include <adf_common_drv.h>
 #include <adf_cfg.h>
 #include <adf_cfg.h>
-#include <adf_transport_access_macros.h>
 #include "adf_dh895xcc_hw_data.h"
 #include "adf_dh895xcc_hw_data.h"
-#include "adf_drv.h"
-
-static const char adf_driver_name[] = ADF_DH895XCC_DEVICE_NAME;
 
 
 #define ADF_SYSTEM_DEVICE(device_id) \
 #define ADF_SYSTEM_DEVICE(device_id) \
 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
@@ -80,7 +76,7 @@ static void adf_remove(struct pci_dev *dev);
 
 
 static struct pci_driver adf_driver = {
 static struct pci_driver adf_driver = {
 	.id_table = adf_pci_tbl,
 	.id_table = adf_pci_tbl,
-	.name = adf_driver_name,
+	.name = ADF_DH895XCC_DEVICE_NAME,
 	.probe = adf_probe,
 	.probe = adf_probe,
 	.remove = adf_remove,
 	.remove = adf_remove,
 	.sriov_configure = adf_sriov_configure,
 	.sriov_configure = adf_sriov_configure,
@@ -120,87 +116,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
 	adf_devmgr_rm_dev(accel_dev, NULL);
 	adf_devmgr_rm_dev(accel_dev, NULL);
 }
 }
 
 
-static int adf_dev_configure(struct adf_accel_dev *accel_dev)
-{
-	int cpus = num_online_cpus();
-	int banks = GET_MAX_BANKS(accel_dev);
-	int instances = min(cpus, banks);
-	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
-	int i;
-	unsigned long val;
-
-	if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
-		goto err;
-	if (adf_cfg_section_add(accel_dev, "Accelerator0"))
-		goto err;
-	for (i = 0; i < instances; i++) {
-		val = i;
-		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
-		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-						key, (void *)&val, ADF_DEC))
-			goto err;
-
-		snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
-			 i);
-		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-						key, (void *)&val, ADF_DEC))
-			goto err;
-
-		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
-		val = 128;
-		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-						key, (void *)&val, ADF_DEC))
-			goto err;
-
-		val = 512;
-		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
-		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-						key, (void *)&val, ADF_DEC))
-			goto err;
-
-		val = 0;
-		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
-		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-						key, (void *)&val, ADF_DEC))
-			goto err;
-
-		val = 2;
-		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
-		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-						key, (void *)&val, ADF_DEC))
-			goto err;
-
-		val = 8;
-		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
-		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-						key, (void *)&val, ADF_DEC))
-			goto err;
-
-		val = 10;
-		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
-		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-						key, (void *)&val, ADF_DEC))
-			goto err;
-
-		val = ADF_COALESCING_DEF_TIME;
-		snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
-		if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
-						key, (void *)&val, ADF_DEC))
-			goto err;
-	}
-
-	val = i;
-	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-					ADF_NUM_CY, (void *)&val, ADF_DEC))
-		goto err;
-
-	set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
-	return 0;
-err:
-	dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
-	return -EINVAL;
-}
-
 static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 {
 	struct adf_accel_dev *accel_dev;
 	struct adf_accel_dev *accel_dev;
@@ -253,15 +168,9 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	}
 	}
 
 
 	accel_dev->hw_device = hw_data;
 	accel_dev->hw_device = hw_data;
-	switch (ent->device) {
-	case ADF_DH895XCC_PCI_DEVICE_ID:
-		adf_init_hw_data_dh895xcc(accel_dev->hw_device);
-		break;
-	default:
-		return -ENODEV;
-	}
+	adf_init_hw_data_dh895xcc(accel_dev->hw_device);
 	pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
 	pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
-	pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET,
+	pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
 			      &hw_data->fuses);
 			      &hw_data->fuses);
 
 
 	/* Get Accelerators and Accelerators Engines masks */
 	/* Get Accelerators and Accelerators Engines masks */
@@ -316,13 +225,13 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
 		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
 	}
 	}
 
 
-	if (pci_request_regions(pdev, adf_driver_name)) {
+	if (pci_request_regions(pdev, ADF_DH895XCC_DEVICE_NAME)) {
 		ret = -EFAULT;
 		ret = -EFAULT;
 		goto out_err_disable;
 		goto out_err_disable;
 	}
 	}
 
 
 	/* Read accelerator capabilities mask */
 	/* Read accelerator capabilities mask */
-	pci_read_config_dword(pdev, ADF_DH895XCC_LEGFUSE_OFFSET,
+	pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET,
 			      &hw_data->accel_capabilities_mask);
 			      &hw_data->accel_capabilities_mask);
 
 
 	/* Find and map all the device's BARS */
 	/* Find and map all the device's BARS */
@@ -357,7 +266,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto out_err_free_reg;
 		goto out_err_free_reg;
 	}
 	}
 
 
-	ret = adf_dev_configure(accel_dev);
+	ret = qat_crypto_dev_config(accel_dev);
 	if (ret)
 	if (ret)
 		goto out_err_free_reg;
 		goto out_err_free_reg;
 
 

+ 1 - 3
drivers/crypto/qat/qat_dh895xccvf/Makefile

@@ -1,5 +1,3 @@
 ccflags-y := -I$(src)/../qat_common
 ccflags-y := -I$(src)/../qat_common
 obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o
 obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o
-qat_dh895xccvf-objs := adf_drv.o \
-		adf_isr.o \
-		adf_dh895xccvf_hw_data.o
+qat_dh895xccvf-objs := adf_drv.o adf_dh895xccvf_hw_data.o

+ 3 - 2
drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c

@@ -48,7 +48,6 @@
 #include <adf_pf2vf_msg.h>
 #include <adf_pf2vf_msg.h>
 #include <adf_common_drv.h>
 #include <adf_common_drv.h>
 #include "adf_dh895xccvf_hw_data.h"
 #include "adf_dh895xccvf_hw_data.h"
-#include "adf_drv.h"
 
 
 static struct adf_hw_device_class dh895xcciov_class = {
 static struct adf_hw_device_class dh895xcciov_class = {
 	.name = ADF_DH895XCCVF_DEVICE_NAME,
 	.name = ADF_DH895XCCVF_DEVICE_NAME,
@@ -136,7 +135,6 @@ static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
 void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
 void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
 {
 {
 	hw_data->dev_class = &dh895xcciov_class;
 	hw_data->dev_class = &dh895xcciov_class;
-	hw_data->instance_id = dh895xcciov_class.instances++;
 	hw_data->num_banks = ADF_DH895XCCIOV_ETR_MAX_BANKS;
 	hw_data->num_banks = ADF_DH895XCCIOV_ETR_MAX_BANKS;
 	hw_data->num_accel = ADF_DH895XCCIOV_MAX_ACCELERATORS;
 	hw_data->num_accel = ADF_DH895XCCIOV_MAX_ACCELERATORS;
 	hw_data->num_logical_accel = 1;
 	hw_data->num_logical_accel = 1;
@@ -164,9 +162,12 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
 	hw_data->enable_ints = adf_vf_void_noop;
 	hw_data->enable_ints = adf_vf_void_noop;
 	hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
 	hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
 	hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
 	hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+	hw_data->dev_class->instances++;
+	adf_devmgr_update_class_index(hw_data);
 }
 }
 
 
 void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
 void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
 {
 {
 	hw_data->dev_class->instances--;
 	hw_data->dev_class->instances--;
+	adf_devmgr_update_class_index(hw_data);
 }
 }

+ 3 - 7
drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h

@@ -56,13 +56,9 @@
 #define ADF_DH895XCCIOV_TX_RINGS_MASK 0xFF
 #define ADF_DH895XCCIOV_TX_RINGS_MASK 0xFF
 #define ADF_DH895XCCIOV_ETR_BAR 0
 #define ADF_DH895XCCIOV_ETR_BAR 0
 #define ADF_DH895XCCIOV_ETR_MAX_BANKS 1
 #define ADF_DH895XCCIOV_ETR_MAX_BANKS 1
-
 #define ADF_DH895XCCIOV_PF2VF_OFFSET	0x200
 #define ADF_DH895XCCIOV_PF2VF_OFFSET	0x200
-#define ADF_DH895XCC_PF2VF_PF2VFINT	BIT(0)
-
-#define ADF_DH895XCCIOV_VINTSOU_OFFSET	0x204
-#define ADF_DH895XCC_VINTSOU_BUN	BIT(0)
-#define ADF_DH895XCC_VINTSOU_PF2VF	BIT(1)
-
 #define ADF_DH895XCCIOV_VINTMSK_OFFSET	0x208
 #define ADF_DH895XCCIOV_VINTMSK_OFFSET	0x208
+
+void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
 #endif
 #endif

+ 4 - 92
drivers/crypto/qat/qat_dh895xccvf/adf_drv.c

@@ -60,11 +60,7 @@
 #include <adf_accel_devices.h>
 #include <adf_accel_devices.h>
 #include <adf_common_drv.h>
 #include <adf_common_drv.h>
 #include <adf_cfg.h>
 #include <adf_cfg.h>
-#include <adf_transport_access_macros.h>
 #include "adf_dh895xccvf_hw_data.h"
 #include "adf_dh895xccvf_hw_data.h"
-#include "adf_drv.h"
-
-static const char adf_driver_name[] = ADF_DH895XCCVF_DEVICE_NAME;
 
 
 #define ADF_SYSTEM_DEVICE(device_id) \
 #define ADF_SYSTEM_DEVICE(device_id) \
 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
@@ -80,7 +76,7 @@ static void adf_remove(struct pci_dev *dev);
 
 
 static struct pci_driver adf_driver = {
 static struct pci_driver adf_driver = {
 	.id_table = adf_pci_tbl,
 	.id_table = adf_pci_tbl,
-	.name = adf_driver_name,
+	.name = ADF_DH895XCCVF_DEVICE_NAME,
 	.probe = adf_probe,
 	.probe = adf_probe,
 	.remove = adf_remove,
 	.remove = adf_remove,
 };
 };
@@ -121,83 +117,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
 	adf_devmgr_rm_dev(accel_dev, pf);
 	adf_devmgr_rm_dev(accel_dev, pf);
 }
 }
 
 
-static int adf_dev_configure(struct adf_accel_dev *accel_dev)
-{
-	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
-	unsigned long val, bank = 0;
-
-	if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
-		goto err;
-	if (adf_cfg_section_add(accel_dev, "Accelerator0"))
-		goto err;
-
-	snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, 0);
-	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key,
-					(void *)&bank, ADF_DEC))
-		goto err;
-
-	val = bank;
-	snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, 0);
-	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key,
-					(void *)&val, ADF_DEC))
-		goto err;
-
-	snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, 0);
-
-	val = 128;
-	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key,
-					(void *)&val, ADF_DEC))
-		goto err;
-
-	val = 512;
-	snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, 0);
-	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-					key, (void *)&val, ADF_DEC))
-		goto err;
-
-	val = 0;
-	snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, 0);
-	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-					key, (void *)&val, ADF_DEC))
-		goto err;
-
-	val = 2;
-	snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, 0);
-	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-					key, (void *)&val, ADF_DEC))
-		goto err;
-
-	val = 8;
-	snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, 0);
-	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-					key, (void *)&val, ADF_DEC))
-		goto err;
-
-	val = 10;
-	snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, 0);
-	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-					key, (void *)&val, ADF_DEC))
-			goto err;
-
-	val = ADF_COALESCING_DEF_TIME;
-	snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT,
-		 (int)bank);
-	if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
-					key, (void *)&val, ADF_DEC))
-		goto err;
-
-	val = 1;
-	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-					ADF_NUM_CY, (void *)&val, ADF_DEC))
-		goto err;
-
-	set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
-	return 0;
-err:
-	dev_err(&GET_DEV(accel_dev), "Failed to configure QAT accel dev\n");
-	return -EINVAL;
-}
-
 static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 {
 	struct adf_accel_dev *accel_dev;
 	struct adf_accel_dev *accel_dev;
@@ -243,14 +162,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto out_err;
 		goto out_err;
 	}
 	}
 	accel_dev->hw_device = hw_data;
 	accel_dev->hw_device = hw_data;
-	switch (ent->device) {
-	case ADF_DH895XCCIOV_PCI_DEVICE_ID:
-		adf_init_hw_data_dh895xcciov(accel_dev->hw_device);
-		break;
-	default:
-		ret = -ENODEV;
-		goto out_err;
-	}
+	adf_init_hw_data_dh895xcciov(accel_dev->hw_device);
 
 
 	/* Get Accelerators and Accelerators Engines masks */
 	/* Get Accelerators and Accelerators Engines masks */
 	hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
 	hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
@@ -295,7 +207,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
 		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
 	}
 	}
 
 
-	if (pci_request_regions(pdev, adf_driver_name)) {
+	if (pci_request_regions(pdev, ADF_DH895XCCVF_DEVICE_NAME)) {
 		ret = -EFAULT;
 		ret = -EFAULT;
 		goto out_err_disable;
 		goto out_err_disable;
 	}
 	}
@@ -322,7 +234,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	/* Completion for VF2PF request/response message exchange */
 	/* Completion for VF2PF request/response message exchange */
 	init_completion(&accel_dev->vf.iov_msg_completion);
 	init_completion(&accel_dev->vf.iov_msg_completion);
 
 
-	ret = adf_dev_configure(accel_dev);
+	ret = qat_crypto_dev_config(accel_dev);
 	if (ret)
 	if (ret)
 		goto out_err_free_reg;
 		goto out_err_free_reg;
 
 

+ 8 - 0
drivers/crypto/qce/ablkcipher.c

@@ -83,6 +83,14 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
 		rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
 		rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
 	else
 	else
 		rctx->dst_nents = rctx->src_nents;
 		rctx->dst_nents = rctx->src_nents;
+	if (rctx->src_nents < 0) {
+		dev_err(qce->dev, "Invalid numbers of src SG.\n");
+		return rctx->src_nents;
+	}
+	if (rctx->dst_nents < 0) {
+		dev_err(qce->dev, "Invalid numbers of dst SG.\n");
+		return -rctx->dst_nents;
+	}
 
 
 	rctx->dst_nents += 1;
 	rctx->dst_nents += 1;
 
 

+ 5 - 0
drivers/crypto/qce/sha.c

@@ -92,6 +92,11 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
 	}
 	}
 
 
 	rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
 	rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
+	if (rctx->src_nents < 0) {
+		dev_err(qce->dev, "Invalid numbers of src SG.\n");
+		return rctx->src_nents;
+	}
+
 	ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
 	ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
 	if (ret < 0)
 	if (ret < 0)
 		return ret;
 		return ret;

+ 3 - 0
drivers/crypto/rockchip/Makefile

@@ -0,0 +1,3 @@
+obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rk_crypto.o
+rk_crypto-objs := rk3288_crypto.o \
+		  rk3288_crypto_ablkcipher.o \

+ 394 - 0
drivers/crypto/rockchip/rk3288_crypto.c

@@ -0,0 +1,394 @@
+/*
+ * Crypto acceleration support for Rockchip RK3288
+ *
+ * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * Author: Zain Wang <zain.wang@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
+ */
+
+#include "rk3288_crypto.h"
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/reset.h>
+
+static int rk_crypto_enable_clk(struct rk_crypto_info *dev)
+{
+	int err;
+
+	err = clk_prepare_enable(dev->sclk);
+	if (err) {
+		dev_err(dev->dev, "[%s:%d], Couldn't enable clock sclk\n",
+			__func__, __LINE__);
+		goto err_return;
+	}
+	err = clk_prepare_enable(dev->aclk);
+	if (err) {
+		dev_err(dev->dev, "[%s:%d], Couldn't enable clock aclk\n",
+			__func__, __LINE__);
+		goto err_aclk;
+	}
+	err = clk_prepare_enable(dev->hclk);
+	if (err) {
+		dev_err(dev->dev, "[%s:%d], Couldn't enable clock hclk\n",
+			__func__, __LINE__);
+		goto err_hclk;
+	}
+	err = clk_prepare_enable(dev->dmaclk);
+	if (err) {
+		dev_err(dev->dev, "[%s:%d], Couldn't enable clock dmaclk\n",
+			__func__, __LINE__);
+		goto err_dmaclk;
+	}
+	return err;
+err_dmaclk:
+	clk_disable_unprepare(dev->hclk);
+err_hclk:
+	clk_disable_unprepare(dev->aclk);
+err_aclk:
+	clk_disable_unprepare(dev->sclk);
+err_return:
+	return err;
+}
+
+static void rk_crypto_disable_clk(struct rk_crypto_info *dev)
+{
+	clk_disable_unprepare(dev->dmaclk);
+	clk_disable_unprepare(dev->hclk);
+	clk_disable_unprepare(dev->aclk);
+	clk_disable_unprepare(dev->sclk);
+}
+
+static int check_alignment(struct scatterlist *sg_src,
+			   struct scatterlist *sg_dst,
+			   int align_mask)
+{
+	int in, out, align;
+
+	in = IS_ALIGNED((uint32_t)sg_src->offset, 4) &&
+	     IS_ALIGNED((uint32_t)sg_src->length, align_mask);
+	if (!sg_dst)
+		return in;
+	out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) &&
+	      IS_ALIGNED((uint32_t)sg_dst->length, align_mask);
+	align = in && out;
+
+	return (align && (sg_src->length == sg_dst->length));
+}
+
+static int rk_load_data(struct rk_crypto_info *dev,
+			struct scatterlist *sg_src,
+			struct scatterlist *sg_dst)
+{
+	unsigned int count;
+
+	dev->aligned = dev->aligned ?
+		check_alignment(sg_src, sg_dst, dev->align_size) :
+		dev->aligned;
+	if (dev->aligned) {
+		count = min(dev->left_bytes, sg_src->length);
+		dev->left_bytes -= count;
+
+		if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) {
+			dev_err(dev->dev, "[%s:%d] dma_map_sg(src)  error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		dev->addr_in = sg_dma_address(sg_src);
+
+		if (sg_dst) {
+			if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) {
+				dev_err(dev->dev,
+					"[%s:%d] dma_map_sg(dst)  error\n",
+					__func__, __LINE__);
+				dma_unmap_sg(dev->dev, sg_src, 1,
+					     DMA_TO_DEVICE);
+				return -EINVAL;
+			}
+			dev->addr_out = sg_dma_address(sg_dst);
+		}
+	} else {
+		count = (dev->left_bytes > PAGE_SIZE) ?
+			PAGE_SIZE : dev->left_bytes;
+
+		if (!sg_pcopy_to_buffer(dev->first, dev->nents,
+					dev->addr_vir, count,
+					dev->total - dev->left_bytes)) {
+			dev_err(dev->dev, "[%s:%d] pcopy err\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		dev->left_bytes -= count;
+		sg_init_one(&dev->sg_tmp, dev->addr_vir, count);
+		if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) {
+			dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp)  error\n",
+				__func__, __LINE__);
+			return -ENOMEM;
+		}
+		dev->addr_in = sg_dma_address(&dev->sg_tmp);
+
+		if (sg_dst) {
+			if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1,
+					DMA_FROM_DEVICE)) {
+				dev_err(dev->dev,
+					"[%s:%d] dma_map_sg(sg_tmp)  error\n",
+					__func__, __LINE__);
+				dma_unmap_sg(dev->dev, &dev->sg_tmp, 1,
+					     DMA_TO_DEVICE);
+				return -ENOMEM;
+			}
+			dev->addr_out = sg_dma_address(&dev->sg_tmp);
+		}
+	}
+	dev->count = count;
+	return 0;
+}
+
+static void rk_unload_data(struct rk_crypto_info *dev)
+{
+	struct scatterlist *sg_in, *sg_out;
+
+	sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp;
+	dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE);
+
+	if (dev->sg_dst) {
+		sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp;
+		dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE);
+	}
+}
+
+static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
+{
+	struct rk_crypto_info *dev  = platform_get_drvdata(dev_id);
+	u32 interrupt_status;
+	int err = 0;
+
+	spin_lock(&dev->lock);
+	interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS);
+	CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status);
+	if (interrupt_status & 0x0a) {
+		dev_warn(dev->dev, "DMA Error\n");
+		err = -EFAULT;
+	} else if (interrupt_status & 0x05) {
+		err = dev->update(dev);
+	}
+	if (err)
+		dev->complete(dev, err);
+	spin_unlock(&dev->lock);
+	return IRQ_HANDLED;
+}
+
+static void rk_crypto_tasklet_cb(unsigned long data)
+{
+	struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
+	struct crypto_async_request *async_req, *backlog;
+	unsigned long flags;
+	int err = 0;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	backlog   = crypto_get_backlog(&dev->queue);
+	async_req = crypto_dequeue_request(&dev->queue);
+	spin_unlock_irqrestore(&dev->lock, flags);
+	if (!async_req) {
+		dev_err(dev->dev, "async_req is NULL !!\n");
+		return;
+	}
+	if (backlog) {
+		backlog->complete(backlog, -EINPROGRESS);
+		backlog = NULL;
+	}
+
+	if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER)
+		dev->ablk_req = ablkcipher_request_cast(async_req);
+	err = dev->start(dev);
+	if (err)
+		dev->complete(dev, err);
+}
+
+static struct rk_crypto_tmp *rk_cipher_algs[] = {
+	&rk_ecb_aes_alg,
+	&rk_cbc_aes_alg,
+	&rk_ecb_des_alg,
+	&rk_cbc_des_alg,
+	&rk_ecb_des3_ede_alg,
+	&rk_cbc_des3_ede_alg,
+};
+
+static int rk_crypto_register(struct rk_crypto_info *crypto_info)
+{
+	unsigned int i, k;
+	int err = 0;
+
+	for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
+		rk_cipher_algs[i]->dev = crypto_info;
+		err = crypto_register_alg(&rk_cipher_algs[i]->alg);
+		if (err)
+			goto err_cipher_algs;
+	}
+	return 0;
+
+err_cipher_algs:
+	for (k = 0; k < i; k++)
+		crypto_unregister_alg(&rk_cipher_algs[k]->alg);
+	return err;
+}
+
+static void rk_crypto_unregister(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++)
+		crypto_unregister_alg(&rk_cipher_algs[i]->alg);
+}
+
+static void rk_crypto_action(void *data)
+{
+	struct rk_crypto_info *crypto_info = data;
+
+	reset_control_assert(crypto_info->rst);
+}
+
+static const struct of_device_id crypto_of_id_table[] = {
+	{ .compatible = "rockchip,rk3288-crypto" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, crypto_of_id_table);
+
+static int rk_crypto_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct device *dev = &pdev->dev;
+	struct rk_crypto_info *crypto_info;
+	int err = 0;
+
+	crypto_info = devm_kzalloc(&pdev->dev,
+				   sizeof(*crypto_info), GFP_KERNEL);
+	if (!crypto_info) {
+		err = -ENOMEM;
+		goto err_crypto;
+	}
+
+	crypto_info->rst = devm_reset_control_get(dev, "crypto-rst");
+	if (IS_ERR(crypto_info->rst)) {
+		err = PTR_ERR(crypto_info->rst);
+		goto err_crypto;
+	}
+
+	reset_control_assert(crypto_info->rst);
+	usleep_range(10, 20);
+	reset_control_deassert(crypto_info->rst);
+
+	err = devm_add_action(dev, rk_crypto_action, crypto_info);
+	if (err) {
+		reset_control_assert(crypto_info->rst);
+		goto err_crypto;
+	}
+
+	spin_lock_init(&crypto_info->lock);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	crypto_info->reg = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(crypto_info->reg)) {
+		err = PTR_ERR(crypto_info->reg);
+		goto err_crypto;
+	}
+
+	crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk");
+	if (IS_ERR(crypto_info->aclk)) {
+		err = PTR_ERR(crypto_info->aclk);
+		goto err_crypto;
+	}
+
+	crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk");
+	if (IS_ERR(crypto_info->hclk)) {
+		err = PTR_ERR(crypto_info->hclk);
+		goto err_crypto;
+	}
+
+	crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk");
+	if (IS_ERR(crypto_info->sclk)) {
+		err = PTR_ERR(crypto_info->sclk);
+		goto err_crypto;
+	}
+
+	crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk");
+	if (IS_ERR(crypto_info->dmaclk)) {
+		err = PTR_ERR(crypto_info->dmaclk);
+		goto err_crypto;
+	}
+
+	crypto_info->irq = platform_get_irq(pdev, 0);
+	if (crypto_info->irq < 0) {
+		dev_warn(crypto_info->dev,
+			 "control Interrupt is not available.\n");
+		err = crypto_info->irq;
+		goto err_crypto;
+	}
+
+	err = devm_request_irq(&pdev->dev, crypto_info->irq,
+			       rk_crypto_irq_handle, IRQF_SHARED,
+			       "rk-crypto", pdev);
+
+	if (err) {
+		dev_err(crypto_info->dev, "irq request failed.\n");
+		goto err_crypto;
+	}
+
+	crypto_info->dev = &pdev->dev;
+	platform_set_drvdata(pdev, crypto_info);
+
+	tasklet_init(&crypto_info->crypto_tasklet,
+		     rk_crypto_tasklet_cb, (unsigned long)crypto_info);
+	crypto_init_queue(&crypto_info->queue, 50);
+
+	crypto_info->enable_clk = rk_crypto_enable_clk;
+	crypto_info->disable_clk = rk_crypto_disable_clk;
+	crypto_info->load_data = rk_load_data;
+	crypto_info->unload_data = rk_unload_data;
+
+	err = rk_crypto_register(crypto_info);
+	if (err) {
+		dev_err(dev, "err in register alg");
+		goto err_register_alg;
+	}
+
+	dev_info(dev, "Crypto Accelerator successfully registered\n");
+	return 0;
+
+err_register_alg:
+	tasklet_kill(&crypto_info->crypto_tasklet);
+err_crypto:
+	return err;
+}
+
+static int rk_crypto_remove(struct platform_device *pdev)
+{
+	struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
+
+	rk_crypto_unregister();
+	tasklet_kill(&crypto_tmp->crypto_tasklet);
+	return 0;
+}
+
+static struct platform_driver crypto_driver = {
+	.probe		= rk_crypto_probe,
+	.remove		= rk_crypto_remove,
+	.driver		= {
+		.name	= "rk3288-crypto",
+		.of_match_table	= crypto_of_id_table,
+	},
+};
+
+module_platform_driver(crypto_driver);
+
+MODULE_AUTHOR("Zain Wang <zain.wang@rock-chips.com>");
+MODULE_DESCRIPTION("Support for Rockchip's cryptographic engine");
+MODULE_LICENSE("GPL");

+ 216 - 0
drivers/crypto/rockchip/rk3288_crypto.h

@@ -0,0 +1,216 @@
+#ifndef __RK3288_CRYPTO_H__
+#define __RK3288_CRYPTO_H__
+
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/algapi.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+#define _SBF(v, f)			((v) << (f))
+
+/* Crypto control registers*/
+#define RK_CRYPTO_INTSTS		0x0000
+#define RK_CRYPTO_PKA_DONE_INT		BIT(5)
+#define RK_CRYPTO_HASH_DONE_INT		BIT(4)
+#define RK_CRYPTO_HRDMA_ERR_INT		BIT(3)
+#define RK_CRYPTO_HRDMA_DONE_INT	BIT(2)
+#define RK_CRYPTO_BCDMA_ERR_INT		BIT(1)
+#define RK_CRYPTO_BCDMA_DONE_INT	BIT(0)
+
+#define RK_CRYPTO_INTENA		0x0004
+#define RK_CRYPTO_PKA_DONE_ENA		BIT(5)
+#define RK_CRYPTO_HASH_DONE_ENA		BIT(4)
+#define RK_CRYPTO_HRDMA_ERR_ENA		BIT(3)
+#define RK_CRYPTO_HRDMA_DONE_ENA	BIT(2)
+#define RK_CRYPTO_BCDMA_ERR_ENA		BIT(1)
+#define RK_CRYPTO_BCDMA_DONE_ENA	BIT(0)
+
+#define RK_CRYPTO_CTRL			0x0008
+#define RK_CRYPTO_WRITE_MASK		_SBF(0xFFFF, 16)
+#define RK_CRYPTO_TRNG_FLUSH		BIT(9)
+#define RK_CRYPTO_TRNG_START		BIT(8)
+#define RK_CRYPTO_PKA_FLUSH		BIT(7)
+#define RK_CRYPTO_HASH_FLUSH		BIT(6)
+#define RK_CRYPTO_BLOCK_FLUSH		BIT(5)
+#define RK_CRYPTO_PKA_START		BIT(4)
+#define RK_CRYPTO_HASH_START		BIT(3)
+#define RK_CRYPTO_BLOCK_START		BIT(2)
+#define RK_CRYPTO_TDES_START		BIT(1)
+#define RK_CRYPTO_AES_START		BIT(0)
+
+#define RK_CRYPTO_CONF			0x000c
+/* HASH Receive DMA Address Mode:   fix | increment */
+#define RK_CRYPTO_HR_ADDR_MODE		BIT(8)
+/* Block Transmit DMA Address Mode: fix | increment */
+#define RK_CRYPTO_BT_ADDR_MODE		BIT(7)
+/* Block Receive DMA Address Mode:  fix | increment */
+#define RK_CRYPTO_BR_ADDR_MODE		BIT(6)
+#define RK_CRYPTO_BYTESWAP_HRFIFO	BIT(5)
+#define RK_CRYPTO_BYTESWAP_BTFIFO	BIT(4)
+#define RK_CRYPTO_BYTESWAP_BRFIFO	BIT(3)
+/* AES = 0 OR DES = 1 */
+#define RK_CRYPTO_DESSEL				BIT(2)
+#define RK_CYYPTO_HASHINSEL_INDEPENDENT_SOURCE		_SBF(0x00, 0)
+#define RK_CYYPTO_HASHINSEL_BLOCK_CIPHER_INPUT		_SBF(0x01, 0)
+#define RK_CYYPTO_HASHINSEL_BLOCK_CIPHER_OUTPUT		_SBF(0x02, 0)
+
+/* Block Receiving DMA Start Address Register */
+#define RK_CRYPTO_BRDMAS		0x0010
+/* Block Transmitting DMA Start Address Register */
+#define RK_CRYPTO_BTDMAS		0x0014
+/* Block Receiving DMA Length Register */
+#define RK_CRYPTO_BRDMAL		0x0018
+/* Hash Receiving DMA Start Address Register */
+#define RK_CRYPTO_HRDMAS		0x001c
+/* Hash Receiving DMA Length Register */
+#define RK_CRYPTO_HRDMAL		0x0020
+
+/* AES registers */
+#define RK_CRYPTO_AES_CTRL			  0x0080
+#define RK_CRYPTO_AES_BYTESWAP_CNT	BIT(11)
+#define RK_CRYPTO_AES_BYTESWAP_KEY	BIT(10)
+#define RK_CRYPTO_AES_BYTESWAP_IV	BIT(9)
+#define RK_CRYPTO_AES_BYTESWAP_DO	BIT(8)
+#define RK_CRYPTO_AES_BYTESWAP_DI	BIT(7)
+#define RK_CRYPTO_AES_KEY_CHANGE	BIT(6)
+#define RK_CRYPTO_AES_ECB_MODE		_SBF(0x00, 4)
+#define RK_CRYPTO_AES_CBC_MODE		_SBF(0x01, 4)
+#define RK_CRYPTO_AES_CTR_MODE		_SBF(0x02, 4)
+#define RK_CRYPTO_AES_128BIT_key	_SBF(0x00, 2)
+#define RK_CRYPTO_AES_192BIT_key	_SBF(0x01, 2)
+#define RK_CRYPTO_AES_256BIT_key	_SBF(0x02, 2)
+/* Slave = 0 / fifo = 1 */
+#define RK_CRYPTO_AES_FIFO_MODE		BIT(1)
+/* Encryption = 0 , Decryption = 1 */
+#define RK_CRYPTO_AES_DEC		BIT(0)
+
+#define RK_CRYPTO_AES_STS		0x0084
+#define RK_CRYPTO_AES_DONE		BIT(0)
+
+/* AES Input Data 0-3 Register */
+#define RK_CRYPTO_AES_DIN_0		0x0088
+#define RK_CRYPTO_AES_DIN_1		0x008c
+#define RK_CRYPTO_AES_DIN_2		0x0090
+#define RK_CRYPTO_AES_DIN_3		0x0094
+
+/* AES output Data 0-3 Register */
+#define RK_CRYPTO_AES_DOUT_0		0x0098
+#define RK_CRYPTO_AES_DOUT_1		0x009c
+#define RK_CRYPTO_AES_DOUT_2		0x00a0
+#define RK_CRYPTO_AES_DOUT_3		0x00a4
+
+/* AES IV Data 0-3 Register */
+#define RK_CRYPTO_AES_IV_0		0x00a8
+#define RK_CRYPTO_AES_IV_1		0x00ac
+#define RK_CRYPTO_AES_IV_2		0x00b0
+#define RK_CRYPTO_AES_IV_3		0x00b4
+
+/* AES Key Data 0-3 Register */
+#define RK_CRYPTO_AES_KEY_0		0x00b8
+#define RK_CRYPTO_AES_KEY_1		0x00bc
+#define RK_CRYPTO_AES_KEY_2		0x00c0
+#define RK_CRYPTO_AES_KEY_3		0x00c4
+#define RK_CRYPTO_AES_KEY_4		0x00c8
+#define RK_CRYPTO_AES_KEY_5		0x00cc
+#define RK_CRYPTO_AES_KEY_6		0x00d0
+#define RK_CRYPTO_AES_KEY_7		0x00d4
+
+/* des/tdes */
+#define RK_CRYPTO_TDES_CTRL		0x0100
+#define RK_CRYPTO_TDES_BYTESWAP_KEY	BIT(8)
+#define RK_CRYPTO_TDES_BYTESWAP_IV	BIT(7)
+#define RK_CRYPTO_TDES_BYTESWAP_DO	BIT(6)
+#define RK_CRYPTO_TDES_BYTESWAP_DI	BIT(5)
+/* 0: ECB, 1: CBC */
+#define RK_CRYPTO_TDES_CHAINMODE_CBC	BIT(4)
+/* TDES Key Mode, 0 : EDE, 1 : EEE */
+#define RK_CRYPTO_TDES_EEE		BIT(3)
+/* 0: DES, 1:TDES */
+#define RK_CRYPTO_TDES_SELECT		BIT(2)
+/* 0: Slave, 1:Fifo */
+#define RK_CRYPTO_TDES_FIFO_MODE	BIT(1)
+/* Encryption = 0 , Decryption = 1 */
+#define RK_CRYPTO_TDES_DEC		BIT(0)
+
+#define RK_CRYPTO_TDES_STS		0x0104
+#define RK_CRYPTO_TDES_DONE		BIT(0)
+
+#define RK_CRYPTO_TDES_DIN_0		0x0108
+#define RK_CRYPTO_TDES_DIN_1		0x010c
+#define RK_CRYPTO_TDES_DOUT_0		0x0110
+#define RK_CRYPTO_TDES_DOUT_1		0x0114
+#define RK_CRYPTO_TDES_IV_0		0x0118
+#define RK_CRYPTO_TDES_IV_1		0x011c
+#define RK_CRYPTO_TDES_KEY1_0		0x0120
+#define RK_CRYPTO_TDES_KEY1_1		0x0124
+#define RK_CRYPTO_TDES_KEY2_0		0x0128
+#define RK_CRYPTO_TDES_KEY2_1		0x012c
+#define RK_CRYPTO_TDES_KEY3_0		0x0130
+#define RK_CRYPTO_TDES_KEY3_1		0x0134
+
+#define CRYPTO_READ(dev, offset)		  \
+		readl_relaxed(((dev)->reg + (offset)))
+#define CRYPTO_WRITE(dev, offset, val)	  \
+		writel_relaxed((val), ((dev)->reg + (offset)))
+
+struct rk_crypto_info {
+	struct device			*dev;
+	struct clk			*aclk;
+	struct clk			*hclk;
+	struct clk			*sclk;
+	struct clk			*dmaclk;
+	struct reset_control		*rst;
+	void __iomem			*reg;
+	int				irq;
+	struct crypto_queue		queue;
+	struct tasklet_struct		crypto_tasklet;
+	struct ablkcipher_request	*ablk_req;
+	/* device lock */
+	spinlock_t			lock;
+
+	/* the public variable */
+	struct scatterlist		*sg_src;
+	struct scatterlist		*sg_dst;
+	struct scatterlist		sg_tmp;
+	struct scatterlist		*first;
+	unsigned int			left_bytes;
+	void				*addr_vir;
+	int				aligned;
+	int				align_size;
+	size_t				nents;
+	unsigned int			total;
+	unsigned int			count;
+	u32				mode;
+	dma_addr_t			addr_in;
+	dma_addr_t			addr_out;
+	int (*start)(struct rk_crypto_info *dev);
+	int (*update)(struct rk_crypto_info *dev);
+	void (*complete)(struct rk_crypto_info *dev, int err);
+	int (*enable_clk)(struct rk_crypto_info *dev);
+	void (*disable_clk)(struct rk_crypto_info *dev);
+	int (*load_data)(struct rk_crypto_info *dev,
+			 struct scatterlist *sg_src,
+			 struct scatterlist *sg_dst);
+	void (*unload_data)(struct rk_crypto_info *dev);
+};
+
+/* the private variable of cipher */
+struct rk_cipher_ctx {
+	struct rk_crypto_info		*dev;
+	unsigned int			keylen;
+};
+
+struct rk_crypto_tmp {
+	struct rk_crypto_info *dev;
+	struct crypto_alg alg;
+};
+
+extern struct rk_crypto_tmp rk_ecb_aes_alg;
+extern struct rk_crypto_tmp rk_cbc_aes_alg;
+extern struct rk_crypto_tmp rk_ecb_des_alg;
+extern struct rk_crypto_tmp rk_cbc_des_alg;
+extern struct rk_crypto_tmp rk_ecb_des3_ede_alg;
+extern struct rk_crypto_tmp rk_cbc_des3_ede_alg;
+
+#endif

+ 505 - 0
drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c

@@ -0,0 +1,505 @@
+/*
+ * Crypto acceleration support for Rockchip RK3288
+ *
+ * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * Author: Zain Wang <zain.wang@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
+ */
+#include "rk3288_crypto.h"
+
+#define RK_CRYPTO_DEC			BIT(0)
+
+static void rk_crypto_complete(struct rk_crypto_info *dev, int err)
+{
+	if (dev->ablk_req->base.complete)
+		dev->ablk_req->base.complete(&dev->ablk_req->base, err);
+}
+
+static int rk_handle_req(struct rk_crypto_info *dev,
+			 struct ablkcipher_request *req)
+{
+	unsigned long flags;
+	int err;
+
+	if (!IS_ALIGNED(req->nbytes, dev->align_size))
+		return -EINVAL;
+
+	dev->left_bytes = req->nbytes;
+	dev->total = req->nbytes;
+	dev->sg_src = req->src;
+	dev->first = req->src;
+	dev->nents = sg_nents(req->src);
+	dev->sg_dst = req->dst;
+	dev->aligned = 1;
+	dev->ablk_req = req;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	err = ablkcipher_enqueue_request(&dev->queue, req);
+	spin_unlock_irqrestore(&dev->lock, flags);
+	tasklet_schedule(&dev->crypto_tasklet);
+	return err;
+}
+
+static int rk_aes_setkey(struct crypto_ablkcipher *cipher,
+			 const u8 *key, unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+	    keylen != AES_KEYSIZE_256) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	ctx->keylen = keylen;
+	memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
+	return 0;
+}
+
+static int rk_tdes_setkey(struct crypto_ablkcipher *cipher,
+			  const u8 *key, unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	u32 tmp[DES_EXPKEY_WORDS];
+
+	if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	if (keylen == DES_KEY_SIZE) {
+		if (!des_ekey(tmp, key) &&
+		    (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+			tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+			return -EINVAL;
+		}
+	}
+
+	ctx->keylen = keylen;
+	memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
+	return 0;
+}
+
+static int rk_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct rk_crypto_info *dev = ctx->dev;
+
+	dev->mode = RK_CRYPTO_AES_ECB_MODE;
+	return rk_handle_req(dev, req);
+}
+
+static int rk_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct rk_crypto_info *dev = ctx->dev;
+
+	dev->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
+	return rk_handle_req(dev, req);
+}
+
+static int rk_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct rk_crypto_info *dev = ctx->dev;
+
+	dev->mode = RK_CRYPTO_AES_CBC_MODE;
+	return rk_handle_req(dev, req);
+}
+
+static int rk_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct rk_crypto_info *dev = ctx->dev;
+
+	dev->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
+	return rk_handle_req(dev, req);
+}
+
+static int rk_des_ecb_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct rk_crypto_info *dev = ctx->dev;
+
+	dev->mode = 0;
+	return rk_handle_req(dev, req);
+}
+
+static int rk_des_ecb_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct rk_crypto_info *dev = ctx->dev;
+
+	dev->mode = RK_CRYPTO_DEC;
+	return rk_handle_req(dev, req);
+}
+
+static int rk_des_cbc_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct rk_crypto_info *dev = ctx->dev;
+
+	dev->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
+	return rk_handle_req(dev, req);
+}
+
+static int rk_des_cbc_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct rk_crypto_info *dev = ctx->dev;
+
+	dev->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
+	return rk_handle_req(dev, req);
+}
+
+static int rk_des3_ede_ecb_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct rk_crypto_info *dev = ctx->dev;
+
+	dev->mode = RK_CRYPTO_TDES_SELECT;
+	return rk_handle_req(dev, req);
+}
+
+static int rk_des3_ede_ecb_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct rk_crypto_info *dev = ctx->dev;
+
+	dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
+	return rk_handle_req(dev, req);
+}
+
+static int rk_des3_ede_cbc_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct rk_crypto_info *dev = ctx->dev;
+
+	dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
+	return rk_handle_req(dev, req);
+}
+
+static int rk_des3_ede_cbc_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct rk_crypto_info *dev = ctx->dev;
+
+	dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
+		    RK_CRYPTO_DEC;
+	return rk_handle_req(dev, req);
+}
+
+static void rk_ablk_hw_init(struct rk_crypto_info *dev)
+{
+	struct crypto_ablkcipher *cipher =
+		crypto_ablkcipher_reqtfm(dev->ablk_req);
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+	u32 ivsize, block, conf_reg = 0;
+
+	block = crypto_tfm_alg_blocksize(tfm);
+	ivsize = crypto_ablkcipher_ivsize(cipher);
+
+	if (block == DES_BLOCK_SIZE) {
+		dev->mode |= RK_CRYPTO_TDES_FIFO_MODE |
+			     RK_CRYPTO_TDES_BYTESWAP_KEY |
+			     RK_CRYPTO_TDES_BYTESWAP_IV;
+		CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, dev->mode);
+		memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0,
+			    dev->ablk_req->info, ivsize);
+		conf_reg = RK_CRYPTO_DESSEL;
+	} else {
+		dev->mode |= RK_CRYPTO_AES_FIFO_MODE |
+			     RK_CRYPTO_AES_KEY_CHANGE |
+			     RK_CRYPTO_AES_BYTESWAP_KEY |
+			     RK_CRYPTO_AES_BYTESWAP_IV;
+		if (ctx->keylen == AES_KEYSIZE_192)
+			dev->mode |= RK_CRYPTO_AES_192BIT_key;
+		else if (ctx->keylen == AES_KEYSIZE_256)
+			dev->mode |= RK_CRYPTO_AES_256BIT_key;
+		CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, dev->mode);
+		memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0,
+			    dev->ablk_req->info, ivsize);
+	}
+	conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
+		    RK_CRYPTO_BYTESWAP_BRFIFO;
+	CRYPTO_WRITE(dev, RK_CRYPTO_CONF, conf_reg);
+	CRYPTO_WRITE(dev, RK_CRYPTO_INTENA,
+		     RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
+}
+
+static void crypto_dma_start(struct rk_crypto_info *dev)
+{
+	CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in);
+	CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4);
+	CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out);
+	CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
+		     _SBF(RK_CRYPTO_BLOCK_START, 16));
+}
+
+static int rk_set_data_start(struct rk_crypto_info *dev)
+{
+	int err;
+
+	err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
+	if (!err)
+		crypto_dma_start(dev);
+	return err;
+}
+
+static int rk_ablk_start(struct rk_crypto_info *dev)
+{
+	unsigned long flags;
+	int err;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	rk_ablk_hw_init(dev);
+	err = rk_set_data_start(dev);
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return err;
+}
+
+static void rk_iv_copyback(struct rk_crypto_info *dev)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(dev->ablk_req);
+	u32 ivsize = crypto_ablkcipher_ivsize(tfm);
+
+	if (ivsize == DES_BLOCK_SIZE)
+		memcpy_fromio(dev->ablk_req->info,
+			      dev->reg + RK_CRYPTO_TDES_IV_0, ivsize);
+	else if (ivsize == AES_BLOCK_SIZE)
+		memcpy_fromio(dev->ablk_req->info,
+			      dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
+}
+
+/* return:
+ *	true	some err was occurred
+ *	fault	no err, continue
+ */
+static int rk_ablk_rx(struct rk_crypto_info *dev)
+{
+	int err = 0;
+
+	dev->unload_data(dev);
+	if (!dev->aligned) {
+		if (!sg_pcopy_from_buffer(dev->ablk_req->dst, dev->nents,
+					  dev->addr_vir, dev->count,
+					  dev->total - dev->left_bytes -
+					  dev->count)) {
+			err = -EINVAL;
+			goto out_rx;
+		}
+	}
+	if (dev->left_bytes) {
+		if (dev->aligned) {
+			if (sg_is_last(dev->sg_src)) {
+				dev_err(dev->dev, "[%s:%d] Lack of data\n",
+					__func__, __LINE__);
+				err = -ENOMEM;
+				goto out_rx;
+			}
+			dev->sg_src = sg_next(dev->sg_src);
+			dev->sg_dst = sg_next(dev->sg_dst);
+		}
+		err = rk_set_data_start(dev);
+	} else {
+		rk_iv_copyback(dev);
+		/* here show the calculation is over without any err */
+		dev->complete(dev, 0);
+	}
+out_rx:
+	return err;
+}
+
+static int rk_ablk_cra_init(struct crypto_tfm *tfm)
+{
+	struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct rk_crypto_tmp *algt;
+
+	algt = container_of(alg, struct rk_crypto_tmp, alg);
+
+	ctx->dev = algt->dev;
+	ctx->dev->align_size = crypto_tfm_alg_alignmask(tfm) + 1;
+	ctx->dev->start = rk_ablk_start;
+	ctx->dev->update = rk_ablk_rx;
+	ctx->dev->complete = rk_crypto_complete;
+	ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL);
+
+	return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM;
+}
+
+static void rk_ablk_cra_exit(struct crypto_tfm *tfm)
+{
+	struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	free_page((unsigned long)ctx->dev->addr_vir);
+	ctx->dev->disable_clk(ctx->dev);
+}
+
+struct rk_crypto_tmp rk_ecb_aes_alg = {
+	.alg = {
+		.cra_name		= "ecb(aes)",
+		.cra_driver_name	= "ecb-aes-rk",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+					  CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= AES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct rk_cipher_ctx),
+		.cra_alignmask		= 0x0f,
+		.cra_type		= &crypto_ablkcipher_type,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= rk_ablk_cra_init,
+		.cra_exit		= rk_ablk_cra_exit,
+		.cra_u.ablkcipher	= {
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.setkey		= rk_aes_setkey,
+			.encrypt	= rk_aes_ecb_encrypt,
+			.decrypt	= rk_aes_ecb_decrypt,
+		}
+	}
+};
+
+struct rk_crypto_tmp rk_cbc_aes_alg = {
+	.alg = {
+		.cra_name		= "cbc(aes)",
+		.cra_driver_name	= "cbc-aes-rk",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+					  CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= AES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct rk_cipher_ctx),
+		.cra_alignmask		= 0x0f,
+		.cra_type		= &crypto_ablkcipher_type,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= rk_ablk_cra_init,
+		.cra_exit		= rk_ablk_cra_exit,
+		.cra_u.ablkcipher	= {
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.ivsize		= AES_BLOCK_SIZE,
+			.setkey		= rk_aes_setkey,
+			.encrypt	= rk_aes_cbc_encrypt,
+			.decrypt	= rk_aes_cbc_decrypt,
+		}
+	}
+};
+
+struct rk_crypto_tmp rk_ecb_des_alg = {
+	.alg = {
+		.cra_name		= "ecb(des)",
+		.cra_driver_name	= "ecb-des-rk",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+					  CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= DES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct rk_cipher_ctx),
+		.cra_alignmask		= 0x07,
+		.cra_type		= &crypto_ablkcipher_type,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= rk_ablk_cra_init,
+		.cra_exit		= rk_ablk_cra_exit,
+		.cra_u.ablkcipher	= {
+			.min_keysize	= DES_KEY_SIZE,
+			.max_keysize	= DES_KEY_SIZE,
+			.setkey		= rk_tdes_setkey,
+			.encrypt	= rk_des_ecb_encrypt,
+			.decrypt	= rk_des_ecb_decrypt,
+		}
+	}
+};
+
+struct rk_crypto_tmp rk_cbc_des_alg = {
+	.alg = {
+		.cra_name		= "cbc(des)",
+		.cra_driver_name	= "cbc-des-rk",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+					  CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= DES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct rk_cipher_ctx),
+		.cra_alignmask		= 0x07,
+		.cra_type		= &crypto_ablkcipher_type,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= rk_ablk_cra_init,
+		.cra_exit		= rk_ablk_cra_exit,
+		.cra_u.ablkcipher	= {
+			.min_keysize	= DES_KEY_SIZE,
+			.max_keysize	= DES_KEY_SIZE,
+			.ivsize		= DES_BLOCK_SIZE,
+			.setkey		= rk_tdes_setkey,
+			.encrypt	= rk_des_cbc_encrypt,
+			.decrypt	= rk_des_cbc_decrypt,
+		}
+	}
+};
+
+struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
+	.alg = {
+		.cra_name		= "ecb(des3_ede)",
+		.cra_driver_name	= "ecb-des3-ede-rk",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+					  CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= DES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct rk_cipher_ctx),
+		.cra_alignmask		= 0x07,
+		.cra_type		= &crypto_ablkcipher_type,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= rk_ablk_cra_init,
+		.cra_exit		= rk_ablk_cra_exit,
+		.cra_u.ablkcipher	= {
+			.min_keysize	= DES3_EDE_KEY_SIZE,
+			.max_keysize	= DES3_EDE_KEY_SIZE,
+			.ivsize		= DES_BLOCK_SIZE,
+			.setkey		= rk_tdes_setkey,
+			.encrypt	= rk_des3_ede_ecb_encrypt,
+			.decrypt	= rk_des3_ede_ecb_decrypt,
+		}
+	}
+};
+
+struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
+	.alg = {
+		.cra_name		= "cbc(des3_ede)",
+		.cra_driver_name	= "cbc-des3-ede-rk",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+					  CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= DES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct rk_cipher_ctx),
+		.cra_alignmask		= 0x07,
+		.cra_type		= &crypto_ablkcipher_type,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= rk_ablk_cra_init,
+		.cra_exit		= rk_ablk_cra_exit,
+		.cra_u.ablkcipher	= {
+			.min_keysize	= DES3_EDE_KEY_SIZE,
+			.max_keysize	= DES3_EDE_KEY_SIZE,
+			.ivsize		= DES_BLOCK_SIZE,
+			.setkey		= rk_tdes_setkey,
+			.encrypt	= rk_des3_ede_cbc_encrypt,
+			.decrypt	= rk_des3_ede_cbc_decrypt,
+		}
+	}
+};

+ 27 - 15
drivers/crypto/sahara.c

@@ -130,18 +130,18 @@
 #define SAHARA_REG_IDAR		0x20
 #define SAHARA_REG_IDAR		0x20
 
 
 struct sahara_hw_desc {
 struct sahara_hw_desc {
-	u32		hdr;
-	u32		len1;
-	dma_addr_t	p1;
-	u32		len2;
-	dma_addr_t	p2;
-	dma_addr_t	next;
+	u32	hdr;
+	u32	len1;
+	u32	p1;
+	u32	len2;
+	u32	p2;
+	u32	next;
 };
 };
 
 
 struct sahara_hw_link {
 struct sahara_hw_link {
-	u32		len;
-	dma_addr_t	p;
-	dma_addr_t	next;
+	u32	len;
+	u32	p;
+	u32	next;
 };
 };
 
 
 struct sahara_ctx {
 struct sahara_ctx {
@@ -228,9 +228,9 @@ struct sahara_dev {
 
 
 	size_t			total;
 	size_t			total;
 	struct scatterlist	*in_sg;
 	struct scatterlist	*in_sg;
-	unsigned int		nb_in_sg;
+	int		nb_in_sg;
 	struct scatterlist	*out_sg;
 	struct scatterlist	*out_sg;
-	unsigned int		nb_out_sg;
+	int		nb_out_sg;
 
 
 	u32			error;
 	u32			error;
 };
 };
@@ -416,8 +416,8 @@ static void sahara_dump_descriptors(struct sahara_dev *dev)
 		return;
 		return;
 
 
 	for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
 	for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
-		dev_dbg(dev->device, "Descriptor (%d) (0x%08x):\n",
-			i, dev->hw_phys_desc[i]);
+		dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
+			i, &dev->hw_phys_desc[i]);
 		dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
 		dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
 		dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
 		dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
 		dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
 		dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
@@ -437,8 +437,8 @@ static void sahara_dump_links(struct sahara_dev *dev)
 		return;
 		return;
 
 
 	for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
 	for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
-		dev_dbg(dev->device, "Link (%d) (0x%08x):\n",
-			i, dev->hw_phys_link[i]);
+		dev_dbg(dev->device, "Link (%d) (%pad):\n",
+			i, &dev->hw_phys_link[i]);
 		dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
 		dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
 		dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
 		dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
 		dev_dbg(dev->device, "\tnext = 0x%08x\n",
 		dev_dbg(dev->device, "\tnext = 0x%08x\n",
@@ -477,7 +477,15 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
 	}
 	}
 
 
 	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
 	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
+	if (dev->nb_in_sg < 0) {
+		dev_err(dev->device, "Invalid numbers of src SG.\n");
+		return dev->nb_in_sg;
+	}
 	dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
 	dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
+	if (dev->nb_out_sg < 0) {
+		dev_err(dev->device, "Invalid numbers of dst SG.\n");
+		return dev->nb_out_sg;
+	}
 	if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
 	if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
 		dev_err(dev->device, "not enough hw links (%d)\n",
 		dev_err(dev->device, "not enough hw links (%d)\n",
 			dev->nb_in_sg + dev->nb_out_sg);
 			dev->nb_in_sg + dev->nb_out_sg);
@@ -793,6 +801,10 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev,
 	dev->in_sg = rctx->in_sg;
 	dev->in_sg = rctx->in_sg;
 
 
 	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
 	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
+	if (dev->nb_in_sg < 0) {
+		dev_err(dev->device, "Invalid numbers of src SG.\n");
+		return dev->nb_in_sg;
+	}
 	if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
 	if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
 		dev_err(dev->device, "not enough hw links (%d)\n",
 		dev_err(dev->device, "not enough hw links (%d)\n",
 			dev->nb_in_sg + dev->nb_out_sg);
 			dev->nb_in_sg + dev->nb_out_sg);

+ 2 - 0
drivers/crypto/sunxi-ss/sun4i-ss-core.c

@@ -39,6 +39,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
 		.import = sun4i_hash_import_md5,
 		.import = sun4i_hash_import_md5,
 		.halg = {
 		.halg = {
 			.digestsize = MD5_DIGEST_SIZE,
 			.digestsize = MD5_DIGEST_SIZE,
+			.statesize = sizeof(struct md5_state),
 			.base = {
 			.base = {
 				.cra_name = "md5",
 				.cra_name = "md5",
 				.cra_driver_name = "md5-sun4i-ss",
 				.cra_driver_name = "md5-sun4i-ss",
@@ -66,6 +67,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
 		.import = sun4i_hash_import_sha1,
 		.import = sun4i_hash_import_sha1,
 		.halg = {
 		.halg = {
 			.digestsize = SHA1_DIGEST_SIZE,
 			.digestsize = SHA1_DIGEST_SIZE,
+			.statesize = sizeof(struct sha1_state),
 			.base = {
 			.base = {
 				.cra_name = "sha1",
 				.cra_name = "sha1",
 				.cra_driver_name = "sha1-sun4i-ss",
 				.cra_driver_name = "sha1-sun4i-ss",

+ 117 - 7
drivers/crypto/talitos.c

@@ -1216,6 +1216,7 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
 	struct talitos_private *priv = dev_get_drvdata(dev);
 	struct talitos_private *priv = dev_get_drvdata(dev);
 	bool is_sec1 = has_ftr_sec1(priv);
 	bool is_sec1 = has_ftr_sec1(priv);
 	int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
 	int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
+	void *err;
 
 
 	if (cryptlen + authsize > max_len) {
 	if (cryptlen + authsize > max_len) {
 		dev_err(dev, "length exceeds h/w max limit\n");
 		dev_err(dev, "length exceeds h/w max limit\n");
@@ -1228,14 +1229,29 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
 	if (!dst || dst == src) {
 	if (!dst || dst == src) {
 		src_nents = sg_nents_for_len(src,
 		src_nents = sg_nents_for_len(src,
 					     assoclen + cryptlen + authsize);
 					     assoclen + cryptlen + authsize);
+		if (src_nents < 0) {
+			dev_err(dev, "Invalid number of src SG.\n");
+			err = ERR_PTR(-EINVAL);
+			goto error_sg;
+		}
 		src_nents = (src_nents == 1) ? 0 : src_nents;
 		src_nents = (src_nents == 1) ? 0 : src_nents;
 		dst_nents = dst ? src_nents : 0;
 		dst_nents = dst ? src_nents : 0;
 	} else { /* dst && dst != src*/
 	} else { /* dst && dst != src*/
 		src_nents = sg_nents_for_len(src, assoclen + cryptlen +
 		src_nents = sg_nents_for_len(src, assoclen + cryptlen +
 						 (encrypt ? 0 : authsize));
 						 (encrypt ? 0 : authsize));
+		if (src_nents < 0) {
+			dev_err(dev, "Invalid number of src SG.\n");
+			err = ERR_PTR(-EINVAL);
+			goto error_sg;
+		}
 		src_nents = (src_nents == 1) ? 0 : src_nents;
 		src_nents = (src_nents == 1) ? 0 : src_nents;
 		dst_nents = sg_nents_for_len(dst, assoclen + cryptlen +
 		dst_nents = sg_nents_for_len(dst, assoclen + cryptlen +
 						 (encrypt ? authsize : 0));
 						 (encrypt ? authsize : 0));
+		if (dst_nents < 0) {
+			dev_err(dev, "Invalid number of dst SG.\n");
+			err = ERR_PTR(-EINVAL);
+			goto error_sg;
+		}
 		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
 		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
 	}
 	}
 
 
@@ -1260,11 +1276,9 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
 
 
 	edesc = kmalloc(alloc_len, GFP_DMA | flags);
 	edesc = kmalloc(alloc_len, GFP_DMA | flags);
 	if (!edesc) {
 	if (!edesc) {
-		if (iv_dma)
-			dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
-
 		dev_err(dev, "could not allocate edescriptor\n");
 		dev_err(dev, "could not allocate edescriptor\n");
-		return ERR_PTR(-ENOMEM);
+		err = ERR_PTR(-ENOMEM);
+		goto error_sg;
 	}
 	}
 
 
 	edesc->src_nents = src_nents;
 	edesc->src_nents = src_nents;
@@ -1277,6 +1291,10 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
 						     DMA_BIDIRECTIONAL);
 						     DMA_BIDIRECTIONAL);
 
 
 	return edesc;
 	return edesc;
+error_sg:
+	if (iv_dma)
+		dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+	return err;
 }
 }
 
 
 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
@@ -1830,11 +1848,16 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
 	unsigned int nbytes_to_hash;
 	unsigned int nbytes_to_hash;
 	unsigned int to_hash_later;
 	unsigned int to_hash_later;
 	unsigned int nsg;
 	unsigned int nsg;
+	int nents;
 
 
 	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
 	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
 		/* Buffer up to one whole block */
 		/* Buffer up to one whole block */
-		sg_copy_to_buffer(areq->src,
-				  sg_nents_for_len(areq->src, nbytes),
+		nents = sg_nents_for_len(areq->src, nbytes);
+		if (nents < 0) {
+			dev_err(ctx->dev, "Invalid number of src SG.\n");
+			return nents;
+		}
+		sg_copy_to_buffer(areq->src, nents,
 				  req_ctx->buf + req_ctx->nbuf, nbytes);
 				  req_ctx->buf + req_ctx->nbuf, nbytes);
 		req_ctx->nbuf += nbytes;
 		req_ctx->nbuf += nbytes;
 		return 0;
 		return 0;
@@ -1867,7 +1890,11 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
 		req_ctx->psrc = areq->src;
 		req_ctx->psrc = areq->src;
 
 
 	if (to_hash_later) {
 	if (to_hash_later) {
-		int nents = sg_nents_for_len(areq->src, nbytes);
+		nents = sg_nents_for_len(areq->src, nbytes);
+		if (nents < 0) {
+			dev_err(ctx->dev, "Invalid number of src SG.\n");
+			return nents;
+		}
 		sg_pcopy_to_buffer(areq->src, nents,
 		sg_pcopy_to_buffer(areq->src, nents,
 				      req_ctx->bufnext,
 				      req_ctx->bufnext,
 				      to_hash_later,
 				      to_hash_later,
@@ -2295,6 +2322,22 @@ static struct talitos_alg_template driver_algs[] = {
 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
 	},
 	},
 	/* ABLKCIPHER algorithms. */
 	/* ABLKCIPHER algorithms. */
+	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.alg.crypto = {
+			.cra_name = "ecb(aes)",
+			.cra_driver_name = "ecb-aes-talitos",
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+				     CRYPTO_ALG_ASYNC,
+			.cra_ablkcipher = {
+				.min_keysize = AES_MIN_KEY_SIZE,
+				.max_keysize = AES_MAX_KEY_SIZE,
+				.ivsize = AES_BLOCK_SIZE,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_AESU,
+	},
 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.alg.crypto = {
 		.alg.crypto = {
 			.cra_name = "cbc(aes)",
 			.cra_name = "cbc(aes)",
@@ -2312,6 +2355,73 @@ static struct talitos_alg_template driver_algs[] = {
 				     DESC_HDR_SEL0_AESU |
 				     DESC_HDR_SEL0_AESU |
 				     DESC_HDR_MODE0_AESU_CBC,
 				     DESC_HDR_MODE0_AESU_CBC,
 	},
 	},
+	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.alg.crypto = {
+			.cra_name = "ctr(aes)",
+			.cra_driver_name = "ctr-aes-talitos",
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+				     CRYPTO_ALG_ASYNC,
+			.cra_ablkcipher = {
+				.min_keysize = AES_MIN_KEY_SIZE,
+				.max_keysize = AES_MAX_KEY_SIZE,
+				.ivsize = AES_BLOCK_SIZE,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_AESU |
+				     DESC_HDR_MODE0_AESU_CTR,
+	},
+	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.alg.crypto = {
+			.cra_name = "ecb(des)",
+			.cra_driver_name = "ecb-des-talitos",
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+				     CRYPTO_ALG_ASYNC,
+			.cra_ablkcipher = {
+				.min_keysize = DES_KEY_SIZE,
+				.max_keysize = DES_KEY_SIZE,
+				.ivsize = DES_BLOCK_SIZE,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_DEU,
+	},
+	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.alg.crypto = {
+			.cra_name = "cbc(des)",
+			.cra_driver_name = "cbc-des-talitos",
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+				     CRYPTO_ALG_ASYNC,
+			.cra_ablkcipher = {
+				.min_keysize = DES_KEY_SIZE,
+				.max_keysize = DES_KEY_SIZE,
+				.ivsize = DES_BLOCK_SIZE,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_DEU |
+				     DESC_HDR_MODE0_DEU_CBC,
+	},
+	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.alg.crypto = {
+			.cra_name = "ecb(des3_ede)",
+			.cra_driver_name = "ecb-3des-talitos",
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+				     CRYPTO_ALG_ASYNC,
+			.cra_ablkcipher = {
+				.min_keysize = DES3_EDE_KEY_SIZE,
+				.max_keysize = DES3_EDE_KEY_SIZE,
+				.ivsize = DES3_EDE_BLOCK_SIZE,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_DEU |
+				     DESC_HDR_MODE0_DEU_3DES,
+	},
 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.alg.crypto = {
 		.alg.crypto = {
 			.cra_name = "cbc(des3_ede)",
 			.cra_name = "cbc(des3_ede)",

Некоторые файлы не были показаны из-за большого количества измененных файлов