浏览代码

Merge tag 'staging-4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging

Pull staging/IIO updates from Greg KH:
 "Here is the big Staging and IIO driver patches for 4.16-rc1.

  There is the normal amount of new IIO drivers added, like all
  releases.

  The networking IPX and the ncpfs filesystem are moved into the staging
  tree, as they are on their way out of the kernel due to lack of use
  anymore.

  The visorbus subsystem finall has started moving out of the staging
  tree to the "real" part of the kernel, and the most and fsl-mc
  codebases are almost ready to move out, that will probably happen for
  4.17-rc1 if all goes well.

  Other than that, there is a bunch of license header cleanups in the
  tree, along with the normal amount of coding style churn that we all
  know and love for this codebase. I also got frustrated at the
  Meltdown/Spectre mess and took it out on the dgnc tty driver, deleting
  huge chunks of it that were never even being used.

  Full details of everything is in the shortlog.

  All of these patches have been in linux-next for a while with no
  reported issues"

* tag 'staging-4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging: (627 commits)
  staging: rtlwifi: remove redundant initialization of 'cfg_cmd'
  staging: rtl8723bs: remove a couple of redundant initializations
  staging: comedi: reformat lines to 80 chars or less
  staging: lustre: separate a connection destroy from free struct kib_conn
  Staging: rtl8723bs: Use !x instead of NULL comparison
  Staging: rtl8723bs: Remove dead code
  Staging: rtl8723bs: Change names to conform to the kernel code
  staging: ccree: Fix missing blank line after declaration
  staging: rtl8188eu: remove redundant initialization of 'pwrcfgcmd'
  staging: rtlwifi: remove unused RTLHALMAC_ST and RTLPHYDM_ST
  staging: fbtft: remove unused FB_TFT_SSD1325 kconfig
  staging: comedi: dt2811: remove redundant initialization of 'ns'
  staging: wilc1000: fix alignments to match open parenthesis
  staging: wilc1000: removed unnecessary defined enums typedef
  staging: wilc1000: remove unnecessary use of parentheses
  staging: rtl8192u: remove redundant initialization of 'timeout'
  staging: sm750fb: fix CamelCase for dispSet var
  staging: lustre: lnet/selftest: fix compile error on UP build
  staging: rtl8723bs: hal_com_phycfg: Remove unneeded semicolons
  staging: rts5208: Fix "seg_no" calculation in reset_ms_card()
  ...
Linus Torvalds 7 年之前
父节点
当前提交
5d8515bc23
共有 100 个文件被更改,包括 4840 次插入2966 次删除
  1. 12 2
      Documentation/ABI/testing/sysfs-bus-iio
  2. 0 1
      Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt
  3. 3 1
      Documentation/devicetree/bindings/iio/adc/aspeed_adc.txt
  4. 7 0
      Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt
  5. 24 0
      Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
  6. 6 2
      Documentation/devicetree/bindings/iio/health/max30102.txt
  7. 23 0
      Documentation/devicetree/bindings/iio/light/uvis25.txt
  8. 13 6
      MAINTAINERS
  9. 2 0
      drivers/Kconfig
  10. 1 0
      drivers/Makefile
  11. 12 0
      drivers/iio/accel/bmc150-accel-i2c.c
  12. 29 2
      drivers/iio/accel/da280.c
  13. 3 0
      drivers/iio/accel/kxsd9-i2c.c
  14. 9 11
      drivers/iio/accel/mma8452.c
  15. 0 5
      drivers/iio/accel/st_accel_core.c
  16. 3 0
      drivers/iio/adc/Kconfig
  17. 19 6
      drivers/iio/adc/aspeed_adc.c
  18. 436 20
      drivers/iio/adc/at91-sama5d2_adc.c
  19. 2 2
      drivers/iio/adc/at91_adc.c
  20. 6 14
      drivers/iio/adc/axp288_adc.c
  21. 104 30
      drivers/iio/adc/hx711.c
  22. 226 91
      drivers/iio/adc/ina2xx-adc.c
  23. 36 24
      drivers/iio/adc/meson_saradc.c
  24. 4 0
      drivers/iio/adc/qcom-vadc-common.c
  25. 1 13
      drivers/iio/adc/stm32-adc-core.c
  26. 1 13
      drivers/iio/adc/stm32-adc-core.h
  27. 116 83
      drivers/iio/adc/stm32-adc.c
  28. 1 1
      drivers/iio/adc/ti_am335x_adc.c
  29. 3 10
      drivers/iio/chemical/ccs811.c
  30. 0 2
      drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
  31. 1 1
      drivers/iio/common/ssp_sensors/ssp.h
  32. 1 1
      drivers/iio/common/ssp_sensors/ssp_dev.c
  33. 1 4
      drivers/iio/common/ssp_sensors/ssp_spi.c
  34. 1 1
      drivers/iio/counter/stm32-lptimer-cnt.c
  35. 1 1
      drivers/iio/dac/mcp4725.c
  36. 1 13
      drivers/iio/dac/stm32-dac-core.c
  37. 1 14
      drivers/iio/dac/stm32-dac-core.h
  38. 1 14
      drivers/iio/dac/stm32-dac.c
  39. 1 1
      drivers/iio/dummy/iio_dummy_evgen.c
  40. 9 6
      drivers/iio/gyro/adis16136.c
  41. 0 1
      drivers/iio/gyro/bmg160_core.c
  42. 234 74
      drivers/iio/health/max30102.c
  43. 2 1
      drivers/iio/humidity/hts221.h
  44. 16 2
      drivers/iio/humidity/hts221_core.c
  45. 2 16
      drivers/iio/humidity/hts221_i2c.c
  46. 2 16
      drivers/iio/humidity/hts221_spi.c
  47. 16 12
      drivers/iio/imu/adis16480.c
  48. 1 2
      drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
  49. 2 0
      drivers/iio/imu/st_lsm6dsx/Kconfig
  50. 12 27
      drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
  51. 69 38
      drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
  52. 51 64
      drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
  53. 15 40
      drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
  54. 14 56
      drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
  55. 15 0
      drivers/iio/industrialio-buffer.c
  56. 1 0
      drivers/iio/industrialio-core.c
  57. 34 0
      drivers/iio/light/Kconfig
  58. 4 0
      drivers/iio/light/Makefile
  59. 0 2
      drivers/iio/light/cros_ec_light_prox.c
  60. 37 0
      drivers/iio/light/st_uvis25.h
  61. 359 0
      drivers/iio/light/st_uvis25_core.c
  62. 69 0
      drivers/iio/light/st_uvis25_i2c.c
  63. 68 0
      drivers/iio/light/st_uvis25_spi.c
  64. 568 0
      drivers/iio/light/zopt2201.c
  65. 1 0
      drivers/iio/magnetometer/ak8975.c
  66. 132 71
      drivers/iio/pressure/bmp280-core.c
  67. 1 0
      drivers/iio/proximity/sx9500.c
  68. 1 2
      drivers/iio/trigger/stm32-lptimer-trigger.c
  69. 1 1
      drivers/iio/trigger/stm32-timer-trigger.c
  70. 4 0
      drivers/staging/Kconfig
  71. 2 0
      drivers/staging/Makefile
  72. 27 10
      drivers/staging/android/ashmem.c
  73. 1 0
      drivers/staging/android/ashmem.h
  74. 4 12
      drivers/staging/android/ion/ion-ioctl.c
  75. 17 23
      drivers/staging/android/ion/ion.c
  76. 4 12
      drivers/staging/android/ion/ion.h
  77. 1 10
      drivers/staging/android/ion/ion_carveout_heap.c
  78. 1 10
      drivers/staging/android/ion/ion_chunk_heap.c
  79. 1 10
      drivers/staging/android/ion/ion_cma_heap.c
  80. 4 12
      drivers/staging/android/ion/ion_heap.c
  81. 1 10
      drivers/staging/android/ion/ion_page_pool.c
  82. 2 11
      drivers/staging/android/ion/ion_system_heap.c
  83. 1 0
      drivers/staging/android/uapi/ashmem.h
  84. 1 10
      drivers/staging/android/uapi/ion.h
  85. 0 27
      drivers/staging/ccree/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
  86. 2 0
      drivers/staging/ccree/Kconfig
  87. 6 2
      drivers/staging/ccree/Makefile
  88. 1 21
      drivers/staging/ccree/TODO
  89. 513 606
      drivers/staging/ccree/cc_aead.c
  90. 24 32
      drivers/staging/ccree/cc_aead.h
  91. 480 605
      drivers/staging/ccree/cc_buffer_mgr.c
  92. 74 0
      drivers/staging/ccree/cc_buffer_mgr.h
  93. 261 454
      drivers/staging/ccree/cc_cipher.c
  94. 74 0
      drivers/staging/ccree/cc_cipher.h
  95. 4 34
      drivers/staging/ccree/cc_crypto_ctx.h
  96. 101 0
      drivers/staging/ccree/cc_debugfs.c
  97. 32 0
      drivers/staging/ccree/cc_debugfs.h
  98. 141 205
      drivers/staging/ccree/cc_driver.c
  99. 194 0
      drivers/staging/ccree/cc_driver.h
  100. 15 30
      drivers/staging/ccree/cc_fips.c

+ 12 - 2
Documentation/ABI/testing/sysfs-bus-iio

@@ -32,7 +32,7 @@ Description:
 		Description of the physical chip / device for device X.
 		Description of the physical chip / device for device X.
 		Typically a part number.
 		Typically a part number.
 
 
-What:		/sys/bus/iio/devices/iio:deviceX/timestamp_clock
+What:		/sys/bus/iio/devices/iio:deviceX/current_timestamp_clock
 KernelVersion:	4.5
 KernelVersion:	4.5
 Contact:	linux-iio@vger.kernel.org
 Contact:	linux-iio@vger.kernel.org
 Description:
 Description:
@@ -1290,7 +1290,7 @@ KernelVersion:	3.4
 Contact:	linux-iio@vger.kernel.org
 Contact:	linux-iio@vger.kernel.org
 Description:
 Description:
 		Unit-less light intensity. Modifiers both and ir indicate
 		Unit-less light intensity. Modifiers both and ir indicate
-		that measurements contains visible and infrared light
+		that measurements contain visible and infrared light
 		components or just infrared light, respectively. Modifier uv indicates
 		components or just infrared light, respectively. Modifier uv indicates
 		that measurements contain ultraviolet light components.
 		that measurements contain ultraviolet light components.
 
 
@@ -1413,6 +1413,16 @@ Description:
 		the available samples after the timeout expires and thus have a
 		the available samples after the timeout expires and thus have a
 		maximum delay guarantee.
 		maximum delay guarantee.
 
 
+What:		/sys/bus/iio/devices/iio:deviceX/buffer/data_available
+KernelVersion: 4.16
+Contact:	linux-iio@vger.kernel.org
+Description:
+		A read-only value indicating the bytes of data available in the
+		buffer. In the case of an output buffer, this indicates the
+		amount of empty space available to write data to. In the case of
+		an input buffer, this indicates the amount of data available for
+		reading.
+
 What:		/sys/bus/iio/devices/iio:deviceX/buffer/hwfifo_enabled
 What:		/sys/bus/iio/devices/iio:deviceX/buffer/hwfifo_enabled
 KernelVersion: 4.2
 KernelVersion: 4.2
 Contact:	linux-iio@vger.kernel.org
 Contact:	linux-iio@vger.kernel.org

+ 0 - 1
Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt

@@ -15,7 +15,6 @@ Required properties:
 			- "clkin" for the reference clock (typically XTAL)
 			- "clkin" for the reference clock (typically XTAL)
 			- "core" for the SAR ADC core clock
 			- "core" for the SAR ADC core clock
 		optional clocks:
 		optional clocks:
-			- "sana" for the analog clock
 			- "adc_clk" for the ADC (sampling) clock
 			- "adc_clk" for the ADC (sampling) clock
 			- "adc_sel" for the ADC (sampling) clock mux
 			- "adc_sel" for the ADC (sampling) clock mux
 - vref-supply:	the regulator supply for the ADC reference voltage
 - vref-supply:	the regulator supply for the ADC reference voltage

+ 3 - 1
Documentation/devicetree/bindings/iio/adc/aspeed_adc.txt

@@ -8,6 +8,7 @@ Required properties:
 - reg: memory window mapping address and length
 - reg: memory window mapping address and length
 - clocks: Input clock used to derive the sample clock. Expected to be the
 - clocks: Input clock used to derive the sample clock. Expected to be the
           SoC's APB clock.
           SoC's APB clock.
+- resets: Reset controller phandle
 - #io-channel-cells: Must be set to <1> to indicate channels are selected
 - #io-channel-cells: Must be set to <1> to indicate channels are selected
                      by index.
                      by index.
 
 
@@ -15,6 +16,7 @@ Example:
 	adc@1e6e9000 {
 	adc@1e6e9000 {
 		compatible = "aspeed,ast2400-adc";
 		compatible = "aspeed,ast2400-adc";
 		reg = <0x1e6e9000 0xb0>;
 		reg = <0x1e6e9000 0xb0>;
-		clocks = <&clk_apb>;
+		clocks = <&syscon ASPEED_CLK_APB>;
+		resets = <&syscon ASPEED_RESET_ADC>;
 		#io-channel-cells = <1>;
 		#io-channel-cells = <1>;
 	};
 	};

+ 7 - 0
Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt

@@ -17,6 +17,11 @@ Required properties:
   This property uses the IRQ edge types values: IRQ_TYPE_EDGE_RISING ,
   This property uses the IRQ edge types values: IRQ_TYPE_EDGE_RISING ,
   IRQ_TYPE_EDGE_FALLING or IRQ_TYPE_EDGE_BOTH
   IRQ_TYPE_EDGE_FALLING or IRQ_TYPE_EDGE_BOTH
 
 
+Optional properties:
+  - dmas: Phandle to dma channel for the ADC.
+  - dma-names: Must be "rx" when dmas property is being used.
+  See ../../dma/dma.txt for details.
+
 Example:
 Example:
 
 
 adc: adc@fc030000 {
 adc: adc@fc030000 {
@@ -31,4 +36,6 @@ adc: adc@fc030000 {
 	vddana-supply = <&vdd_3v3_lp_reg>;
 	vddana-supply = <&vdd_3v3_lp_reg>;
 	vref-supply = <&vdd_3v3_lp_reg>;
 	vref-supply = <&vdd_3v3_lp_reg>;
 	atmel,trigger-edge-type = <IRQ_TYPE_EDGE_BOTH>;
 	atmel,trigger-edge-type = <IRQ_TYPE_EDGE_BOTH>;
+	dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) | AT91_XDMAC_DT_PERID(25))>;
+	dma-names = "rx";
 }
 }

+ 24 - 0
Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt

@@ -62,6 +62,15 @@ Required properties:
 - st,adc-channels: List of single-ended channels muxed for this ADC.
 - st,adc-channels: List of single-ended channels muxed for this ADC.
   It can have up to 16 channels on stm32f4 or 20 channels on stm32h7, numbered
   It can have up to 16 channels on stm32f4 or 20 channels on stm32h7, numbered
   from 0 to 15 or 19 (resp. for in0..in15 or in0..in19).
   from 0 to 15 or 19 (resp. for in0..in15 or in0..in19).
+- st,adc-diff-channels: List of differential channels muxed for this ADC.
+  Depending on part used, some channels can be configured as differential
+  instead of single-ended (e.g. stm32h7). List here positive and negative
+  inputs pairs as <vinp vinn>, <vinp vinn>,... vinp and vinn are numbered
+  from 0 to 19 on stm32h7)
+  Note: At least one of "st,adc-channels" or "st,adc-diff-channels" is required.
+  Both properties can be used together. Some channels can be used as
+  single-ended and some other ones as differential (mixed). But channels
+  can't be configured both as single-ended and differential (invalid).
 - #io-channel-cells = <1>: See the IIO bindings section "IIO consumers" in
 - #io-channel-cells = <1>: See the IIO bindings section "IIO consumers" in
   Documentation/devicetree/bindings/iio/iio-bindings.txt
   Documentation/devicetree/bindings/iio/iio-bindings.txt
 
 
@@ -111,3 +120,18 @@ Example:
 		...
 		...
 		other adc child nodes follow...
 		other adc child nodes follow...
 	};
 	};
+
+Example to setup:
+- channel 1 as single-ended
+- channels 2 & 3 as differential (with resp. 6 & 7 negative inputs)
+
+	adc: adc@40022000 {
+		compatible = "st,stm32h7-adc-core";
+		...
+		adc1: adc@0 {
+			compatible = "st,stm32h7-adc";
+			...
+			st,adc-channels = <1>;
+			st,adc-diff-channels = <2 6>, <3 7>;
+		};
+	};

+ 6 - 2
Documentation/devicetree/bindings/iio/health/max30102.txt

@@ -1,9 +1,11 @@
 Maxim MAX30102 heart rate and pulse oximeter sensor
 Maxim MAX30102 heart rate and pulse oximeter sensor
+Maxim MAX30105 optical particle-sensing module
 
 
 * https://datasheets.maximintegrated.com/en/ds/MAX30102.pdf
 * https://datasheets.maximintegrated.com/en/ds/MAX30102.pdf
+* https://datasheets.maximintegrated.com/en/ds/MAX30105.pdf
 
 
 Required properties:
 Required properties:
-  - compatible: must be "maxim,max30102"
+  - compatible: must be "maxim,max30102" or "maxim,max30105"
   - reg: the I2C address of the sensor
   - reg: the I2C address of the sensor
   - interrupt-parent: should be the phandle for the interrupt controller
   - interrupt-parent: should be the phandle for the interrupt controller
   - interrupts: the sole interrupt generated by the device
   - interrupts: the sole interrupt generated by the device
@@ -12,8 +14,10 @@ Required properties:
   interrupt client node bindings.
   interrupt client node bindings.
 
 
 Optional properties:
 Optional properties:
-  - maxim,red-led-current-microamp: configuration for RED LED current
+  - maxim,red-led-current-microamp: configuration for red LED current
   - maxim,ir-led-current-microamp: configuration for IR LED current
   - maxim,ir-led-current-microamp: configuration for IR LED current
+  - maxim,green-led-current-microamp: configuration for green LED current
+    (max30105 only)
 
 
     Note that each step is approximately 200 microamps, ranging from 0 uA to
     Note that each step is approximately 200 microamps, ranging from 0 uA to
     50800 uA.
     50800 uA.

+ 23 - 0
Documentation/devicetree/bindings/iio/light/uvis25.txt

@@ -0,0 +1,23 @@
+* ST UVIS25 uv sensor
+
+Required properties:
+- compatible: should be "st,uvis25"
+- reg: i2c address of the sensor / spi cs line
+
+Optional properties:
+- interrupt-parent: should be the phandle for the interrupt controller
+- interrupts: interrupt mapping for IRQ. It should be configured with
+  flags IRQ_TYPE_LEVEL_HIGH, IRQ_TYPE_EDGE_RISING, IRQ_TYPE_LEVEL_LOW or
+  IRQ_TYPE_EDGE_FALLING.
+
+  Refer to interrupt-controller/interrupts.txt for generic interrupt
+  client node bindings.
+
+Example:
+
+uvis25@47 {
+	compatible = "st,uvis25";
+	reg = <0x47>;
+	interrupt-parent = <&gpio0>;
+	interrupts = <0 IRQ_TYPE_EDGE_RISING>;
+};

+ 13 - 6
MAINTAINERS

@@ -270,6 +270,7 @@ ACCES 104-QUAD-8 IIO DRIVER
 M:	William Breathitt Gray <vilhelm.gray@gmail.com>
 M:	William Breathitt Gray <vilhelm.gray@gmail.com>
 L:	linux-iio@vger.kernel.org
 L:	linux-iio@vger.kernel.org
 S:	Maintained
 S:	Maintained
+F:	Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8
 F:	drivers/iio/counter/104-quad-8.c
 F:	drivers/iio/counter/104-quad-8.c
 
 
 ACCES PCI-IDIO-16 GPIO DRIVER
 ACCES PCI-IDIO-16 GPIO DRIVER
@@ -859,6 +860,8 @@ M:	Michael Hennerich <Michael.Hennerich@analog.com>
 W:	http://wiki.analog.com/
 W:	http://wiki.analog.com/
 W:	http://ez.analog.com/community/linux-device-drivers
 W:	http://ez.analog.com/community/linux-device-drivers
 S:	Supported
 S:	Supported
+F:	Documentation/ABI/testing/sysfs-bus-iio-frequency-ad9523
+F:	Documentation/ABI/testing/sysfs-bus-iio-frequency-adf4350
 F:	drivers/iio/*/ad*
 F:	drivers/iio/*/ad*
 F:	drivers/iio/adc/ltc2497*
 F:	drivers/iio/adc/ltc2497*
 X:	drivers/iio/*/adjd*
 X:	drivers/iio/*/adjd*
@@ -4145,6 +4148,7 @@ DEVANTECH SRF ULTRASONIC RANGER IIO DRIVER
 M:	Andreas Klinger <ak@it-klinger.de>
 M:	Andreas Klinger <ak@it-klinger.de>
 L:	linux-iio@vger.kernel.org
 L:	linux-iio@vger.kernel.org
 S:	Maintained
 S:	Maintained
+F:	Documentation/ABI/testing/sysfs-bus-iio-distance-srf08
 F:	drivers/iio/proximity/srf*.c
 F:	drivers/iio/proximity/srf*.c
 
 
 DEVICE COREDUMP (DEV_COREDUMP)
 DEVICE COREDUMP (DEV_COREDUMP)
@@ -6850,6 +6854,8 @@ R:	Peter Meerwald-Stadler <pmeerw@pmeerw.net>
 L:	linux-iio@vger.kernel.org
 L:	linux-iio@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jic23/iio.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jic23/iio.git
 S:	Maintained
 S:	Maintained
+F:	Documentation/ABI/testing/configfs-iio*
+F:	Documentation/ABI/testing/sysfs-bus-iio*
 F:	Documentation/devicetree/bindings/iio/
 F:	Documentation/devicetree/bindings/iio/
 F:	drivers/iio/
 F:	drivers/iio/
 F:	drivers/staging/iio/
 F:	drivers/staging/iio/
@@ -7331,17 +7337,16 @@ F:	drivers/tty/ipwireless/
 
 
 IPX NETWORK LAYER
 IPX NETWORK LAYER
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
-S:	Odd fixes
-F:	include/net/ipx.h
+S:	Obsolete
 F:	include/uapi/linux/ipx.h
 F:	include/uapi/linux/ipx.h
-F:	net/ipx/
+F:	drivers/staging/ipx/
 
 
 IRDA SUBSYSTEM
 IRDA SUBSYSTEM
 M:	Samuel Ortiz <samuel@sortiz.org>
 M:	Samuel Ortiz <samuel@sortiz.org>
 L:	irda-users@lists.sourceforge.net (subscribers-only)
 L:	irda-users@lists.sourceforge.net (subscribers-only)
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
 W:	http://irda.sourceforge.net/
 W:	http://irda.sourceforge.net/
-S:	Maintained
+S:	Obsolete
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/sameo/irda-2.6.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/sameo/irda-2.6.git
 F:	Documentation/networking/irda.txt
 F:	Documentation/networking/irda.txt
 F:	drivers/staging/irda/
 F:	drivers/staging/irda/
@@ -9408,8 +9413,8 @@ F:	drivers/net/ethernet/natsemi/natsemi.c
 
 
 NCP FILESYSTEM
 NCP FILESYSTEM
 M:	Petr Vandrovec <petr@vandrovec.name>
 M:	Petr Vandrovec <petr@vandrovec.name>
-S:	Odd Fixes
-F:	fs/ncpfs/
+S:	Obsolete
+F:	drivers/staging/ncpfs/
 
 
 NCR 5380 SCSI DRIVERS
 NCR 5380 SCSI DRIVERS
 M:	Finn Thain <fthain@telegraphics.com.au>
 M:	Finn Thain <fthain@telegraphics.com.au>
@@ -14119,6 +14124,8 @@ UNISYS S-PAR DRIVERS
 M:	David Kershner <david.kershner@unisys.com>
 M:	David Kershner <david.kershner@unisys.com>
 L:	sparmaintainer@unisys.com (Unisys internal)
 L:	sparmaintainer@unisys.com (Unisys internal)
 S:	Supported
 S:	Supported
+F:	include/linux/visorbus.h
+F:	drivers/visorbus/
 F:	drivers/staging/unisys/
 F:	drivers/staging/unisys/
 
 
 UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER
 UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER

+ 2 - 0
drivers/Kconfig

@@ -211,4 +211,6 @@ source "drivers/mux/Kconfig"
 
 
 source "drivers/opp/Kconfig"
 source "drivers/opp/Kconfig"
 
 
+source "drivers/visorbus/Kconfig"
+
 endmenu
 endmenu

+ 1 - 0
drivers/Makefile

@@ -184,3 +184,4 @@ obj-$(CONFIG_FPGA)		+= fpga/
 obj-$(CONFIG_FSI)		+= fsi/
 obj-$(CONFIG_FSI)		+= fsi/
 obj-$(CONFIG_TEE)		+= tee/
 obj-$(CONFIG_TEE)		+= tee/
 obj-$(CONFIG_MULTIPLEXER)	+= mux/
 obj-$(CONFIG_MULTIPLEXER)	+= mux/
+obj-$(CONFIG_UNISYS_VISORBUS)	+= visorbus/

+ 12 - 0
drivers/iio/accel/bmc150-accel-i2c.c

@@ -81,9 +81,21 @@ static const struct i2c_device_id bmc150_accel_id[] = {
 
 
 MODULE_DEVICE_TABLE(i2c, bmc150_accel_id);
 MODULE_DEVICE_TABLE(i2c, bmc150_accel_id);
 
 
+static const struct of_device_id bmc150_accel_of_match[] = {
+	{ .compatible = "bosch,bmc150_accel" },
+	{ .compatible = "bosch,bmi055_accel" },
+	{ .compatible = "bosch,bma255" },
+	{ .compatible = "bosch,bma250e" },
+	{ .compatible = "bosch,bma222e" },
+	{ .compatible = "bosch,bma280" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, bmc150_accel_of_match);
+
 static struct i2c_driver bmc150_accel_driver = {
 static struct i2c_driver bmc150_accel_driver = {
 	.driver = {
 	.driver = {
 		.name	= "bmc150_accel_i2c",
 		.name	= "bmc150_accel_i2c",
+		.of_match_table = bmc150_accel_of_match,
 		.acpi_match_table = ACPI_PTR(bmc150_accel_acpi_match),
 		.acpi_match_table = ACPI_PTR(bmc150_accel_acpi_match),
 		.pm	= &bmc150_accel_pm_ops,
 		.pm	= &bmc150_accel_pm_ops,
 	},
 	},

+ 29 - 2
drivers/iio/accel/da280.c

@@ -11,6 +11,7 @@
 
 
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/i2c.h>
 #include <linux/i2c.h>
+#include <linux/acpi.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
 #include <linux/iio/sysfs.h>
 #include <linux/byteorder/generic.h>
 #include <linux/byteorder/generic.h>
@@ -25,7 +26,7 @@
 #define DA280_MODE_ENABLE		0x1e
 #define DA280_MODE_ENABLE		0x1e
 #define DA280_MODE_DISABLE		0x9e
 #define DA280_MODE_DISABLE		0x9e
 
 
-enum { da226, da280 };
+enum da280_chipset { da226, da280 };
 
 
 /*
 /*
  * a value of + or -4096 corresponds to + or - 1G
  * a value of + or -4096 corresponds to + or - 1G
@@ -91,12 +92,24 @@ static const struct iio_info da280_info = {
 	.read_raw	= da280_read_raw,
 	.read_raw	= da280_read_raw,
 };
 };
 
 
+static enum da280_chipset da280_match_acpi_device(struct device *dev)
+{
+	const struct acpi_device_id *id;
+
+	id = acpi_match_device(dev->driver->acpi_match_table, dev);
+	if (!id)
+		return -EINVAL;
+
+	return (enum da280_chipset) id->driver_data;
+}
+
 static int da280_probe(struct i2c_client *client,
 static int da280_probe(struct i2c_client *client,
 			const struct i2c_device_id *id)
 			const struct i2c_device_id *id)
 {
 {
 	int ret;
 	int ret;
 	struct iio_dev *indio_dev;
 	struct iio_dev *indio_dev;
 	struct da280_data *data;
 	struct da280_data *data;
+	enum da280_chipset chip;
 
 
 	ret = i2c_smbus_read_byte_data(client, DA280_REG_CHIP_ID);
 	ret = i2c_smbus_read_byte_data(client, DA280_REG_CHIP_ID);
 	if (ret != DA280_CHIP_ID)
 	if (ret != DA280_CHIP_ID)
@@ -114,7 +127,14 @@ static int da280_probe(struct i2c_client *client,
 	indio_dev->info = &da280_info;
 	indio_dev->info = &da280_info;
 	indio_dev->modes = INDIO_DIRECT_MODE;
 	indio_dev->modes = INDIO_DIRECT_MODE;
 	indio_dev->channels = da280_channels;
 	indio_dev->channels = da280_channels;
-	if (id->driver_data == da226) {
+
+	if (ACPI_HANDLE(&client->dev)) {
+		chip = da280_match_acpi_device(&client->dev);
+	} else {
+		chip = id->driver_data;
+	}
+
+	if (chip == da226) {
 		indio_dev->name = "da226";
 		indio_dev->name = "da226";
 		indio_dev->num_channels = 2;
 		indio_dev->num_channels = 2;
 	} else {
 	} else {
@@ -158,6 +178,12 @@ static int da280_resume(struct device *dev)
 
 
 static SIMPLE_DEV_PM_OPS(da280_pm_ops, da280_suspend, da280_resume);
 static SIMPLE_DEV_PM_OPS(da280_pm_ops, da280_suspend, da280_resume);
 
 
+static const struct acpi_device_id da280_acpi_match[] = {
+	{"MIRAACC", da280},
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, da280_acpi_match);
+
 static const struct i2c_device_id da280_i2c_id[] = {
 static const struct i2c_device_id da280_i2c_id[] = {
 	{ "da226", da226 },
 	{ "da226", da226 },
 	{ "da280", da280 },
 	{ "da280", da280 },
@@ -168,6 +194,7 @@ MODULE_DEVICE_TABLE(i2c, da280_i2c_id);
 static struct i2c_driver da280_driver = {
 static struct i2c_driver da280_driver = {
 	.driver = {
 	.driver = {
 		.name = "da280",
 		.name = "da280",
+		.acpi_match_table = ACPI_PTR(da280_acpi_match),
 		.pm = &da280_pm_ops,
 		.pm = &da280_pm_ops,
 	},
 	},
 	.probe		= da280_probe,
 	.probe		= da280_probe,

+ 3 - 0
drivers/iio/accel/kxsd9-i2c.c

@@ -63,3 +63,6 @@ static struct i2c_driver kxsd9_i2c_driver = {
 	.id_table	= kxsd9_i2c_id,
 	.id_table	= kxsd9_i2c_id,
 };
 };
 module_i2c_driver(kxsd9_i2c_driver);
 module_i2c_driver(kxsd9_i2c_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("KXSD9 accelerometer I2C interface");

+ 9 - 11
drivers/iio/accel/mma8452.c

@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * mma8452.c - Support for following Freescale / NXP 3-axis accelerometers:
  * mma8452.c - Support for following Freescale / NXP 3-axis accelerometers:
  *
  *
@@ -13,9 +14,6 @@
  * Copyright 2015 Martin Kepplinger <martink@posteo.de>
  * Copyright 2015 Martin Kepplinger <martink@posteo.de>
  * Copyright 2014 Peter Meerwald <pmeerw@pmeerw.net>
  * Copyright 2014 Peter Meerwald <pmeerw@pmeerw.net>
  *
  *
- * This file is subject to the terms and conditions of version 2 of
- * the GNU General Public License.  See the file COPYING in the main
- * directory of this archive for more details.
  *
  *
  * TODO: orientation events
  * TODO: orientation events
  */
  */
@@ -135,7 +133,7 @@ struct mma8452_event_regs {
 		u8 ev_count;
 		u8 ev_count;
 };
 };
 
 
-static const struct mma8452_event_regs ev_regs_accel_falling = {
+static const struct mma8452_event_regs ff_mt_ev_regs = {
 		.ev_cfg = MMA8452_FF_MT_CFG,
 		.ev_cfg = MMA8452_FF_MT_CFG,
 		.ev_cfg_ele = MMA8452_FF_MT_CFG_ELE,
 		.ev_cfg_ele = MMA8452_FF_MT_CFG_ELE,
 		.ev_cfg_chan_shift = MMA8452_FF_MT_CHAN_SHIFT,
 		.ev_cfg_chan_shift = MMA8452_FF_MT_CHAN_SHIFT,
@@ -145,7 +143,7 @@ static const struct mma8452_event_regs ev_regs_accel_falling = {
 		.ev_count = MMA8452_FF_MT_COUNT
 		.ev_count = MMA8452_FF_MT_COUNT
 };
 };
 
 
-static const struct mma8452_event_regs ev_regs_accel_rising = {
+static const struct mma8452_event_regs trans_ev_regs = {
 		.ev_cfg = MMA8452_TRANSIENT_CFG,
 		.ev_cfg = MMA8452_TRANSIENT_CFG,
 		.ev_cfg_ele = MMA8452_TRANSIENT_CFG_ELE,
 		.ev_cfg_ele = MMA8452_TRANSIENT_CFG_ELE,
 		.ev_cfg_chan_shift = MMA8452_TRANSIENT_CHAN_SHIFT,
 		.ev_cfg_chan_shift = MMA8452_TRANSIENT_CHAN_SHIFT,
@@ -284,7 +282,7 @@ static const int mma8452_samp_freq[8][2] = {
 };
 };
 
 
 /* Datasheet table: step time "Relationship with the ODR" (sample frequency) */
 /* Datasheet table: step time "Relationship with the ODR" (sample frequency) */
-static const unsigned int mma8452_transient_time_step_us[4][8] = {
+static const unsigned int mma8452_time_step_us[4][8] = {
 	{ 1250, 2500, 5000, 10000, 20000, 20000, 20000, 20000 },  /* normal */
 	{ 1250, 2500, 5000, 10000, 20000, 20000, 20000, 20000 },  /* normal */
 	{ 1250, 2500, 5000, 10000, 20000, 80000, 80000, 80000 },  /* l p l n */
 	{ 1250, 2500, 5000, 10000, 20000, 80000, 80000, 80000 },  /* l p l n */
 	{ 1250, 2500, 2500, 2500, 2500, 2500, 2500, 2500 },	  /* high res*/
 	{ 1250, 2500, 2500, 2500, 2500, 2500, 2500, 2500 },	  /* high res*/
@@ -777,12 +775,12 @@ static int mma8452_get_event_regs(struct mma8452_data *data,
 					& MMA8452_INT_TRANS) &&
 					& MMA8452_INT_TRANS) &&
 				(data->chip_info->enabled_events
 				(data->chip_info->enabled_events
 					& MMA8452_INT_TRANS))
 					& MMA8452_INT_TRANS))
-				*ev_reg = &ev_regs_accel_rising;
+				*ev_reg = &trans_ev_regs;
 			else
 			else
-				*ev_reg = &ev_regs_accel_falling;
+				*ev_reg = &ff_mt_ev_regs;
 			return 0;
 			return 0;
 		case IIO_EV_DIR_FALLING:
 		case IIO_EV_DIR_FALLING:
-			*ev_reg = &ev_regs_accel_falling;
+			*ev_reg = &ff_mt_ev_regs;
 			return 0;
 			return 0;
 		default:
 		default:
 			return -EINVAL;
 			return -EINVAL;
@@ -826,7 +824,7 @@ static int mma8452_read_event_value(struct iio_dev *indio_dev,
 		if (power_mode < 0)
 		if (power_mode < 0)
 			return power_mode;
 			return power_mode;
 
 
-		us = ret * mma8452_transient_time_step_us[power_mode][
+		us = ret * mma8452_time_step_us[power_mode][
 				mma8452_get_odr_index(data)];
 				mma8452_get_odr_index(data)];
 		*val = us / USEC_PER_SEC;
 		*val = us / USEC_PER_SEC;
 		*val2 = us % USEC_PER_SEC;
 		*val2 = us % USEC_PER_SEC;
@@ -883,7 +881,7 @@ static int mma8452_write_event_value(struct iio_dev *indio_dev,
 			return ret;
 			return ret;
 
 
 		steps = (val * USEC_PER_SEC + val2) /
 		steps = (val * USEC_PER_SEC + val2) /
-				mma8452_transient_time_step_us[ret][
+				mma8452_time_step_us[ret][
 					mma8452_get_odr_index(data)];
 					mma8452_get_odr_index(data)];
 
 
 		if (steps < 0 || steps > 0xff)
 		if (steps < 0 || steps > 0xff)

+ 0 - 5
drivers/iio/accel/st_accel_core.c

@@ -920,8 +920,6 @@ static const struct iio_trigger_ops st_accel_trigger_ops = {
 int st_accel_common_probe(struct iio_dev *indio_dev)
 int st_accel_common_probe(struct iio_dev *indio_dev)
 {
 {
 	struct st_sensor_data *adata = iio_priv(indio_dev);
 	struct st_sensor_data *adata = iio_priv(indio_dev);
-	struct st_sensors_platform_data *pdata =
-		(struct st_sensors_platform_data *)adata->dev->platform_data;
 	int irq = adata->get_irq_data_ready(indio_dev);
 	int irq = adata->get_irq_data_ready(indio_dev);
 	int err;
 	int err;
 
 
@@ -948,9 +946,6 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
 					&adata->sensor_settings->fs.fs_avl[0];
 					&adata->sensor_settings->fs.fs_avl[0];
 	adata->odr = adata->sensor_settings->odr.odr_avl[0].hz;
 	adata->odr = adata->sensor_settings->odr.odr_avl[0].hz;
 
 
-	if (!pdata)
-		pdata = (struct st_sensors_platform_data *)&default_accel_pdata;
-
 	err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data);
 	err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data);
 	if (err < 0)
 	if (err < 0)
 		goto st_accel_power_off;
 		goto st_accel_power_off;

+ 3 - 0
drivers/iio/adc/Kconfig

@@ -158,6 +158,7 @@ config AT91_SAMA5D2_ADC
 	tristate "Atmel AT91 SAMA5D2 ADC"
 	tristate "Atmel AT91 SAMA5D2 ADC"
 	depends on ARCH_AT91 || COMPILE_TEST
 	depends on ARCH_AT91 || COMPILE_TEST
 	depends on HAS_IOMEM
 	depends on HAS_IOMEM
+	depends on HAS_DMA
 	select IIO_TRIGGERED_BUFFER
 	select IIO_TRIGGERED_BUFFER
 	help
 	help
 	  Say yes here to build support for Atmel SAMA5D2 ADC which is
 	  Say yes here to build support for Atmel SAMA5D2 ADC which is
@@ -318,6 +319,8 @@ config HI8435
 config HX711
 config HX711
 	tristate "AVIA HX711 ADC for weight cells"
 	tristate "AVIA HX711 ADC for weight cells"
 	depends on GPIOLIB
 	depends on GPIOLIB
+	select IIO_BUFFER
+	select IIO_TRIGGERED_BUFFER
 	help
 	help
 	  If you say yes here you get support for AVIA HX711 ADC which is used
 	  If you say yes here you get support for AVIA HX711 ADC which is used
 	  for weigh cells
 	  for weigh cells

+ 19 - 6
drivers/iio/adc/aspeed_adc.c

@@ -17,6 +17,7 @@
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/of_platform.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
+#include <linux/reset.h>
 #include <linux/spinlock.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
 #include <linux/types.h>
 
 
@@ -53,11 +54,12 @@ struct aspeed_adc_model_data {
 };
 };
 
 
 struct aspeed_adc_data {
 struct aspeed_adc_data {
-	struct device	*dev;
-	void __iomem	*base;
-	spinlock_t	clk_lock;
-	struct clk_hw	*clk_prescaler;
-	struct clk_hw	*clk_scaler;
+	struct device		*dev;
+	void __iomem		*base;
+	spinlock_t		clk_lock;
+	struct clk_hw		*clk_prescaler;
+	struct clk_hw		*clk_scaler;
+	struct reset_control	*rst;
 };
 };
 
 
 #define ASPEED_CHAN(_idx, _data_reg_addr) {			\
 #define ASPEED_CHAN(_idx, _data_reg_addr) {			\
@@ -217,6 +219,15 @@ static int aspeed_adc_probe(struct platform_device *pdev)
 		goto scaler_error;
 		goto scaler_error;
 	}
 	}
 
 
+	data->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+	if (IS_ERR(data->rst)) {
+		dev_err(&pdev->dev,
+			"invalid or missing reset controller device tree entry");
+		ret = PTR_ERR(data->rst);
+		goto reset_error;
+	}
+	reset_control_deassert(data->rst);
+
 	model_data = of_device_get_match_data(&pdev->dev);
 	model_data = of_device_get_match_data(&pdev->dev);
 
 
 	if (model_data->wait_init_sequence) {
 	if (model_data->wait_init_sequence) {
@@ -263,9 +274,10 @@ iio_register_error:
 	writel(ASPEED_OPERATION_MODE_POWER_DOWN,
 	writel(ASPEED_OPERATION_MODE_POWER_DOWN,
 		data->base + ASPEED_REG_ENGINE_CONTROL);
 		data->base + ASPEED_REG_ENGINE_CONTROL);
 	clk_disable_unprepare(data->clk_scaler->clk);
 	clk_disable_unprepare(data->clk_scaler->clk);
+reset_error:
+	reset_control_assert(data->rst);
 clk_enable_error:
 clk_enable_error:
 	clk_hw_unregister_divider(data->clk_scaler);
 	clk_hw_unregister_divider(data->clk_scaler);
-
 scaler_error:
 scaler_error:
 	clk_hw_unregister_divider(data->clk_prescaler);
 	clk_hw_unregister_divider(data->clk_prescaler);
 	return ret;
 	return ret;
@@ -280,6 +292,7 @@ static int aspeed_adc_remove(struct platform_device *pdev)
 	writel(ASPEED_OPERATION_MODE_POWER_DOWN,
 	writel(ASPEED_OPERATION_MODE_POWER_DOWN,
 		data->base + ASPEED_REG_ENGINE_CONTROL);
 		data->base + ASPEED_REG_ENGINE_CONTROL);
 	clk_disable_unprepare(data->clk_scaler->clk);
 	clk_disable_unprepare(data->clk_scaler->clk);
+	reset_control_assert(data->rst);
 	clk_hw_unregister_divider(data->clk_scaler);
 	clk_hw_unregister_divider(data->clk_scaler);
 	clk_hw_unregister_divider(data->clk_prescaler);
 	clk_hw_unregister_divider(data->clk_prescaler);
 
 

+ 436 - 20
drivers/iio/adc/at91-sama5d2_adc.c

@@ -16,6 +16,8 @@
 
 
 #include <linux/bitops.h>
 #include <linux/bitops.h>
 #include <linux/clk.h>
 #include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/module.h>
@@ -100,6 +102,8 @@
 #define AT91_SAMA5D2_LCDR	0x20
 #define AT91_SAMA5D2_LCDR	0x20
 /* Interrupt Enable Register */
 /* Interrupt Enable Register */
 #define AT91_SAMA5D2_IER	0x24
 #define AT91_SAMA5D2_IER	0x24
+/* Interrupt Enable Register - general overrun error */
+#define AT91_SAMA5D2_IER_GOVRE BIT(25)
 /* Interrupt Disable Register */
 /* Interrupt Disable Register */
 #define AT91_SAMA5D2_IDR	0x28
 #define AT91_SAMA5D2_IDR	0x28
 /* Interrupt Mask Register */
 /* Interrupt Mask Register */
@@ -167,13 +171,19 @@
 
 
 /*
 /*
  * Maximum number of bytes to hold conversion from all channels
  * Maximum number of bytes to hold conversion from all channels
- * plus the timestamp
+ * without the timestamp.
  */
  */
-#define AT91_BUFFER_MAX_BYTES ((AT91_SAMA5D2_SINGLE_CHAN_CNT +		\
-				AT91_SAMA5D2_DIFF_CHAN_CNT) * 2 + 8)
+#define AT91_BUFFER_MAX_CONVERSION_BYTES ((AT91_SAMA5D2_SINGLE_CHAN_CNT + \
+					 AT91_SAMA5D2_DIFF_CHAN_CNT) * 2)
+
+/* This total must also include the timestamp */
+#define AT91_BUFFER_MAX_BYTES (AT91_BUFFER_MAX_CONVERSION_BYTES + 8)
 
 
 #define AT91_BUFFER_MAX_HWORDS (AT91_BUFFER_MAX_BYTES / 2)
 #define AT91_BUFFER_MAX_HWORDS (AT91_BUFFER_MAX_BYTES / 2)
 
 
+#define AT91_HWFIFO_MAX_SIZE_STR	"128"
+#define AT91_HWFIFO_MAX_SIZE		128
+
 #define AT91_SAMA5D2_CHAN_SINGLE(num, addr)				\
 #define AT91_SAMA5D2_CHAN_SINGLE(num, addr)				\
 	{								\
 	{								\
 		.type = IIO_VOLTAGE,					\
 		.type = IIO_VOLTAGE,					\
@@ -228,6 +238,28 @@ struct at91_adc_trigger {
 	bool				hw_trig;
 	bool				hw_trig;
 };
 };
 
 
+/**
+ * at91_adc_dma - at91-sama5d2 dma information struct
+ * @dma_chan:		the dma channel acquired
+ * @rx_buf:		dma coherent allocated area
+ * @rx_dma_buf:		dma handler for the buffer
+ * @phys_addr:		physical address of the ADC base register
+ * @buf_idx:		index inside the dma buffer where reading was last done
+ * @rx_buf_sz:		size of buffer used by DMA operation
+ * @watermark:		number of conversions to copy before DMA triggers irq
+ * @dma_ts:		hold the start timestamp of dma operation
+ */
+struct at91_adc_dma {
+	struct dma_chan			*dma_chan;
+	u8				*rx_buf;
+	dma_addr_t			rx_dma_buf;
+	phys_addr_t			phys_addr;
+	int				buf_idx;
+	int				rx_buf_sz;
+	int				watermark;
+	s64				dma_ts;
+};
+
 struct at91_adc_state {
 struct at91_adc_state {
 	void __iomem			*base;
 	void __iomem			*base;
 	int				irq;
 	int				irq;
@@ -242,6 +274,7 @@ struct at91_adc_state {
 	u32				conversion_value;
 	u32				conversion_value;
 	struct at91_adc_soc_info	soc_info;
 	struct at91_adc_soc_info	soc_info;
 	wait_queue_head_t		wq_data_available;
 	wait_queue_head_t		wq_data_available;
+	struct at91_adc_dma		dma_st;
 	u16				buffer[AT91_BUFFER_MAX_HWORDS];
 	u16				buffer[AT91_BUFFER_MAX_HWORDS];
 	/*
 	/*
 	 * lock to prevent concurrent 'single conversion' requests through
 	 * lock to prevent concurrent 'single conversion' requests through
@@ -322,11 +355,17 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
 		if (state) {
 		if (state) {
 			at91_adc_writel(st, AT91_SAMA5D2_CHER,
 			at91_adc_writel(st, AT91_SAMA5D2_CHER,
 					BIT(chan->channel));
 					BIT(chan->channel));
-			at91_adc_writel(st, AT91_SAMA5D2_IER,
-					BIT(chan->channel));
+			/* enable irq only if not using DMA */
+			if (!st->dma_st.dma_chan) {
+				at91_adc_writel(st, AT91_SAMA5D2_IER,
+						BIT(chan->channel));
+			}
 		} else {
 		} else {
-			at91_adc_writel(st, AT91_SAMA5D2_IDR,
-					BIT(chan->channel));
+			/* disable irq only if not using DMA */
+			if (!st->dma_st.dma_chan) {
+				at91_adc_writel(st, AT91_SAMA5D2_IDR,
+						BIT(chan->channel));
+			}
 			at91_adc_writel(st, AT91_SAMA5D2_CHDR,
 			at91_adc_writel(st, AT91_SAMA5D2_CHDR,
 					BIT(chan->channel));
 					BIT(chan->channel));
 		}
 		}
@@ -340,6 +379,10 @@ static int at91_adc_reenable_trigger(struct iio_trigger *trig)
 	struct iio_dev *indio = iio_trigger_get_drvdata(trig);
 	struct iio_dev *indio = iio_trigger_get_drvdata(trig);
 	struct at91_adc_state *st = iio_priv(indio);
 	struct at91_adc_state *st = iio_priv(indio);
 
 
+	/* if we are using DMA, we must not reenable irq after each trigger */
+	if (st->dma_st.dma_chan)
+		return 0;
+
 	enable_irq(st->irq);
 	enable_irq(st->irq);
 
 
 	/* Needed to ACK the DRDY interruption */
 	/* Needed to ACK the DRDY interruption */
@@ -350,6 +393,153 @@ static int at91_adc_reenable_trigger(struct iio_trigger *trig)
 static const struct iio_trigger_ops at91_adc_trigger_ops = {
 static const struct iio_trigger_ops at91_adc_trigger_ops = {
 	.set_trigger_state = &at91_adc_configure_trigger,
 	.set_trigger_state = &at91_adc_configure_trigger,
 	.try_reenable = &at91_adc_reenable_trigger,
 	.try_reenable = &at91_adc_reenable_trigger,
+	.validate_device = iio_trigger_validate_own_device,
+};
+
+static int at91_adc_dma_size_done(struct at91_adc_state *st)
+{
+	struct dma_tx_state state;
+	enum dma_status status;
+	int i, size;
+
+	status = dmaengine_tx_status(st->dma_st.dma_chan,
+				     st->dma_st.dma_chan->cookie,
+				     &state);
+	if (status != DMA_IN_PROGRESS)
+		return 0;
+
+	/* Transferred length is size in bytes from end of buffer */
+	i = st->dma_st.rx_buf_sz - state.residue;
+
+	/* Return available bytes */
+	if (i >= st->dma_st.buf_idx)
+		size = i - st->dma_st.buf_idx;
+	else
+		size = st->dma_st.rx_buf_sz + i - st->dma_st.buf_idx;
+	return size;
+}
+
+static void at91_dma_buffer_done(void *data)
+{
+	struct iio_dev *indio_dev = data;
+
+	iio_trigger_poll_chained(indio_dev->trig);
+}
+
+static int at91_adc_dma_start(struct iio_dev *indio_dev)
+{
+	struct at91_adc_state *st = iio_priv(indio_dev);
+	struct dma_async_tx_descriptor *desc;
+	dma_cookie_t cookie;
+	int ret;
+	u8 bit;
+
+	if (!st->dma_st.dma_chan)
+		return 0;
+
+	/* we start a new DMA, so set buffer index to start */
+	st->dma_st.buf_idx = 0;
+
+	/*
+	 * compute buffer size w.r.t. watermark and enabled channels.
+	 * scan_bytes is aligned so we need an exact size for DMA
+	 */
+	st->dma_st.rx_buf_sz = 0;
+
+	for_each_set_bit(bit, indio_dev->active_scan_mask,
+			 indio_dev->num_channels) {
+		struct iio_chan_spec const *chan = indio_dev->channels + bit;
+
+		st->dma_st.rx_buf_sz += chan->scan_type.storagebits / 8;
+	}
+	st->dma_st.rx_buf_sz *= st->dma_st.watermark;
+
+	/* Prepare a DMA cyclic transaction */
+	desc = dmaengine_prep_dma_cyclic(st->dma_st.dma_chan,
+					 st->dma_st.rx_dma_buf,
+					 st->dma_st.rx_buf_sz,
+					 st->dma_st.rx_buf_sz / 2,
+					 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
+
+	if (!desc) {
+		dev_err(&indio_dev->dev, "cannot prepare DMA cyclic\n");
+		return -EBUSY;
+	}
+
+	desc->callback = at91_dma_buffer_done;
+	desc->callback_param = indio_dev;
+
+	cookie = dmaengine_submit(desc);
+	ret = dma_submit_error(cookie);
+	if (ret) {
+		dev_err(&indio_dev->dev, "cannot submit DMA cyclic\n");
+		dmaengine_terminate_async(st->dma_st.dma_chan);
+		return ret;
+	}
+
+	/* enable general overrun error signaling */
+	at91_adc_writel(st, AT91_SAMA5D2_IER, AT91_SAMA5D2_IER_GOVRE);
+	/* Issue pending DMA requests */
+	dma_async_issue_pending(st->dma_st.dma_chan);
+
+	/* consider current time as DMA start time for timestamps */
+	st->dma_st.dma_ts = iio_get_time_ns(indio_dev);
+
+	dev_dbg(&indio_dev->dev, "DMA cyclic started\n");
+
+	return 0;
+}
+
+static int at91_adc_buffer_postenable(struct iio_dev *indio_dev)
+{
+	int ret;
+
+	ret = at91_adc_dma_start(indio_dev);
+	if (ret) {
+		dev_err(&indio_dev->dev, "buffer postenable failed\n");
+		return ret;
+	}
+
+	return iio_triggered_buffer_postenable(indio_dev);
+}
+
+static int at91_adc_buffer_predisable(struct iio_dev *indio_dev)
+{
+	struct at91_adc_state *st = iio_priv(indio_dev);
+	int ret;
+	u8 bit;
+
+	ret = iio_triggered_buffer_predisable(indio_dev);
+	if (ret < 0)
+		dev_err(&indio_dev->dev, "buffer predisable failed\n");
+
+	if (!st->dma_st.dma_chan)
+		return ret;
+
+	/* if we are using DMA we must clear registers and end DMA */
+	dmaengine_terminate_sync(st->dma_st.dma_chan);
+
+	/*
+	 * For each enabled channel we must read the last converted value
+	 * to clear EOC status and not get a possible interrupt later.
+	 * This value is being read by DMA from LCDR anyway
+	 */
+	for_each_set_bit(bit, indio_dev->active_scan_mask,
+			 indio_dev->num_channels) {
+		struct iio_chan_spec const *chan = indio_dev->channels + bit;
+
+		if (st->dma_st.dma_chan)
+			at91_adc_readl(st, chan->address);
+	}
+
+	/* read overflow register to clear possible overflow status */
+	at91_adc_readl(st, AT91_SAMA5D2_OVER);
+	return ret;
+}
+
+static const struct iio_buffer_setup_ops at91_buffer_setup_ops = {
+	.postenable = &at91_adc_buffer_postenable,
+	.predisable = &at91_adc_buffer_predisable,
 };
 };
 
 
 static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *indio,
 static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *indio,
@@ -388,24 +578,77 @@ static int at91_adc_trigger_init(struct iio_dev *indio)
 	return 0;
 	return 0;
 }
 }
 
 
-static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
+static void at91_adc_trigger_handler_nodma(struct iio_dev *indio_dev,
+					   struct iio_poll_func *pf)
 {
 {
-	struct iio_poll_func *pf = p;
-	struct iio_dev *indio = pf->indio_dev;
-	struct at91_adc_state *st = iio_priv(indio);
+	struct at91_adc_state *st = iio_priv(indio_dev);
 	int i = 0;
 	int i = 0;
 	u8 bit;
 	u8 bit;
 
 
-	for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) {
-		struct iio_chan_spec const *chan = indio->channels + bit;
+	for_each_set_bit(bit, indio_dev->active_scan_mask,
+			 indio_dev->num_channels) {
+		struct iio_chan_spec const *chan = indio_dev->channels + bit;
 
 
 		st->buffer[i] = at91_adc_readl(st, chan->address);
 		st->buffer[i] = at91_adc_readl(st, chan->address);
 		i++;
 		i++;
 	}
 	}
+	iio_push_to_buffers_with_timestamp(indio_dev, st->buffer,
+					   pf->timestamp);
+}
 
 
-	iio_push_to_buffers_with_timestamp(indio, st->buffer, pf->timestamp);
+static void at91_adc_trigger_handler_dma(struct iio_dev *indio_dev)
+{
+	struct at91_adc_state *st = iio_priv(indio_dev);
+	int transferred_len = at91_adc_dma_size_done(st);
+	s64 ns = iio_get_time_ns(indio_dev);
+	s64 interval;
+	int sample_index = 0, sample_count, sample_size;
+
+	u32 status = at91_adc_readl(st, AT91_SAMA5D2_ISR);
+	/* if we reached this point, we cannot sample faster */
+	if (status & AT91_SAMA5D2_IER_GOVRE)
+		pr_info_ratelimited("%s: conversion overrun detected\n",
+				    indio_dev->name);
 
 
-	iio_trigger_notify_done(indio->trig);
+	sample_size = div_s64(st->dma_st.rx_buf_sz, st->dma_st.watermark);
+
+	sample_count = div_s64(transferred_len, sample_size);
+
+	/*
+	 * interval between samples is total time since last transfer handling
+	 * divided by the number of samples (total size divided by sample size)
+	 */
+	interval = div_s64((ns - st->dma_st.dma_ts), sample_count);
+
+	while (transferred_len >= sample_size) {
+		iio_push_to_buffers_with_timestamp(indio_dev,
+				(st->dma_st.rx_buf + st->dma_st.buf_idx),
+				(st->dma_st.dma_ts + interval * sample_index));
+		/* adjust remaining length */
+		transferred_len -= sample_size;
+		/* adjust buffer index */
+		st->dma_st.buf_idx += sample_size;
+		/* in case of reaching end of buffer, reset index */
+		if (st->dma_st.buf_idx >= st->dma_st.rx_buf_sz)
+			st->dma_st.buf_idx = 0;
+		sample_index++;
+	}
+	/* adjust saved time for next transfer handling */
+	st->dma_st.dma_ts = iio_get_time_ns(indio_dev);
+}
+
+static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
+{
+	struct iio_poll_func *pf = p;
+	struct iio_dev *indio_dev = pf->indio_dev;
+	struct at91_adc_state *st = iio_priv(indio_dev);
+
+	if (st->dma_st.dma_chan)
+		at91_adc_trigger_handler_dma(indio_dev);
+	else
+		at91_adc_trigger_handler_nodma(indio_dev, pf);
+
+	iio_trigger_notify_done(indio_dev->trig);
 
 
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
 }
 }
@@ -414,7 +657,7 @@ static int at91_adc_buffer_init(struct iio_dev *indio)
 {
 {
 	return devm_iio_triggered_buffer_setup(&indio->dev, indio,
 	return devm_iio_triggered_buffer_setup(&indio->dev, indio,
 			&iio_pollfunc_store_time,
 			&iio_pollfunc_store_time,
-			&at91_adc_trigger_handler, NULL);
+			&at91_adc_trigger_handler, &at91_buffer_setup_ops);
 }
 }
 
 
 static unsigned at91_adc_startup_time(unsigned startup_time_min,
 static unsigned at91_adc_startup_time(unsigned startup_time_min,
@@ -485,10 +728,13 @@ static irqreturn_t at91_adc_interrupt(int irq, void *private)
 	if (!(status & imr))
 	if (!(status & imr))
 		return IRQ_NONE;
 		return IRQ_NONE;
 
 
-	if (iio_buffer_enabled(indio)) {
+	if (iio_buffer_enabled(indio) && !st->dma_st.dma_chan) {
 		disable_irq_nosync(irq);
 		disable_irq_nosync(irq);
 		iio_trigger_poll(indio->trig);
 		iio_trigger_poll(indio->trig);
-	} else {
+	} else if (iio_buffer_enabled(indio) && st->dma_st.dma_chan) {
+		disable_irq_nosync(irq);
+		WARN(true, "Unexpected irq occurred\n");
+	} else if (!iio_buffer_enabled(indio)) {
 		st->conversion_value = at91_adc_readl(st, st->chan->address);
 		st->conversion_value = at91_adc_readl(st, st->chan->address);
 		st->conversion_done = true;
 		st->conversion_done = true;
 		wake_up_interruptible(&st->wq_data_available);
 		wake_up_interruptible(&st->wq_data_available);
@@ -510,7 +756,6 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
 		ret = iio_device_claim_direct_mode(indio_dev);
 		ret = iio_device_claim_direct_mode(indio_dev);
 		if (ret)
 		if (ret)
 			return ret;
 			return ret;
-
 		mutex_lock(&st->lock);
 		mutex_lock(&st->lock);
 
 
 		st->chan = chan;
 		st->chan = chan;
@@ -541,6 +786,9 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
 		at91_adc_writel(st, AT91_SAMA5D2_IDR, BIT(chan->channel));
 		at91_adc_writel(st, AT91_SAMA5D2_IDR, BIT(chan->channel));
 		at91_adc_writel(st, AT91_SAMA5D2_CHDR, BIT(chan->channel));
 		at91_adc_writel(st, AT91_SAMA5D2_CHDR, BIT(chan->channel));
 
 
+		/* Needed to ACK the DRDY interruption */
+		at91_adc_readl(st, AT91_SAMA5D2_LCDR);
+
 		mutex_unlock(&st->lock);
 		mutex_unlock(&st->lock);
 
 
 		iio_device_release_direct_mode(indio_dev);
 		iio_device_release_direct_mode(indio_dev);
@@ -580,9 +828,123 @@ static int at91_adc_write_raw(struct iio_dev *indio_dev,
 	return 0;
 	return 0;
 }
 }
 
 
+static void at91_adc_dma_init(struct platform_device *pdev)
+{
+	struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+	struct at91_adc_state *st = iio_priv(indio_dev);
+	struct dma_slave_config config = {0};
+	/*
+	 * We make the buffer double the size of the fifo,
+	 * such that DMA uses one half of the buffer (full fifo size)
+	 * and the software uses the other half to read/write.
+	 */
+	unsigned int pages = DIV_ROUND_UP(AT91_HWFIFO_MAX_SIZE *
+					  AT91_BUFFER_MAX_CONVERSION_BYTES * 2,
+					  PAGE_SIZE);
+
+	if (st->dma_st.dma_chan)
+		return;
+
+	st->dma_st.dma_chan = dma_request_slave_channel(&pdev->dev, "rx");
+
+	if (!st->dma_st.dma_chan)  {
+		dev_info(&pdev->dev, "can't get DMA channel\n");
+		goto dma_exit;
+	}
+
+	st->dma_st.rx_buf = dma_alloc_coherent(st->dma_st.dma_chan->device->dev,
+					       pages * PAGE_SIZE,
+					       &st->dma_st.rx_dma_buf,
+					       GFP_KERNEL);
+	if (!st->dma_st.rx_buf) {
+		dev_info(&pdev->dev, "can't allocate coherent DMA area\n");
+		goto dma_chan_disable;
+	}
+
+	/* Configure DMA channel to read data register */
+	config.direction = DMA_DEV_TO_MEM;
+	config.src_addr = (phys_addr_t)(st->dma_st.phys_addr
+			  + AT91_SAMA5D2_LCDR);
+	config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+	config.src_maxburst = 1;
+	config.dst_maxburst = 1;
+
+	if (dmaengine_slave_config(st->dma_st.dma_chan, &config)) {
+		dev_info(&pdev->dev, "can't configure DMA slave\n");
+		goto dma_free_area;
+	}
+
+	dev_info(&pdev->dev, "using %s for rx DMA transfers\n",
+		 dma_chan_name(st->dma_st.dma_chan));
+
+	return;
+
+dma_free_area:
+	dma_free_coherent(st->dma_st.dma_chan->device->dev, pages * PAGE_SIZE,
+			  st->dma_st.rx_buf, st->dma_st.rx_dma_buf);
+dma_chan_disable:
+	dma_release_channel(st->dma_st.dma_chan);
+	st->dma_st.dma_chan = 0;
+dma_exit:
+	dev_info(&pdev->dev, "continuing without DMA support\n");
+}
+
+static void at91_adc_dma_disable(struct platform_device *pdev)
+{
+	struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+	struct at91_adc_state *st = iio_priv(indio_dev);
+	unsigned int pages = DIV_ROUND_UP(AT91_HWFIFO_MAX_SIZE *
+					  AT91_BUFFER_MAX_CONVERSION_BYTES * 2,
+					  PAGE_SIZE);
+
+	/* if we are not using DMA, just return */
+	if (!st->dma_st.dma_chan)
+		return;
+
+	/* wait for all transactions to be terminated first*/
+	dmaengine_terminate_sync(st->dma_st.dma_chan);
+
+	dma_free_coherent(st->dma_st.dma_chan->device->dev, pages * PAGE_SIZE,
+			  st->dma_st.rx_buf, st->dma_st.rx_dma_buf);
+	dma_release_channel(st->dma_st.dma_chan);
+	st->dma_st.dma_chan = 0;
+
+	dev_info(&pdev->dev, "continuing without DMA support\n");
+}
+
+static int at91_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
+{
+	struct at91_adc_state *st = iio_priv(indio_dev);
+
+	if (val > AT91_HWFIFO_MAX_SIZE)
+		return -EINVAL;
+
+	if (!st->selected_trig->hw_trig) {
+		dev_dbg(&indio_dev->dev, "we need hw trigger for DMA\n");
+		return 0;
+	}
+
+	dev_dbg(&indio_dev->dev, "new watermark is %u\n", val);
+	st->dma_st.watermark = val;
+
+	/*
+	 * The logic here is: if we have watermark 1, it means we do
+	 * each conversion with it's own IRQ, thus we don't need DMA.
+	 * If the watermark is higher, we do DMA to do all the transfers in bulk
+	 */
+
+	if (val == 1)
+		at91_adc_dma_disable(to_platform_device(&indio_dev->dev));
+	else if (val > 1)
+		at91_adc_dma_init(to_platform_device(&indio_dev->dev));
+
+	return 0;
+}
+
 static const struct iio_info at91_adc_info = {
 static const struct iio_info at91_adc_info = {
 	.read_raw = &at91_adc_read_raw,
 	.read_raw = &at91_adc_read_raw,
 	.write_raw = &at91_adc_write_raw,
 	.write_raw = &at91_adc_write_raw,
+	.hwfifo_set_watermark = &at91_adc_set_watermark,
 };
 };
 
 
 static void at91_adc_hw_init(struct at91_adc_state *st)
 static void at91_adc_hw_init(struct at91_adc_state *st)
@@ -599,6 +961,42 @@ static void at91_adc_hw_init(struct at91_adc_state *st)
 	at91_adc_setup_samp_freq(st, st->soc_info.min_sample_rate);
 	at91_adc_setup_samp_freq(st, st->soc_info.min_sample_rate);
 }
 }
 
 
+static ssize_t at91_adc_get_fifo_state(struct device *dev,
+				       struct device_attribute *attr, char *buf)
+{
+	struct iio_dev *indio_dev =
+			platform_get_drvdata(to_platform_device(dev));
+	struct at91_adc_state *st = iio_priv(indio_dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", !!st->dma_st.dma_chan);
+}
+
+static ssize_t at91_adc_get_watermark(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct iio_dev *indio_dev =
+			platform_get_drvdata(to_platform_device(dev));
+	struct at91_adc_state *st = iio_priv(indio_dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", st->dma_st.watermark);
+}
+
+static IIO_DEVICE_ATTR(hwfifo_enabled, 0444,
+		       at91_adc_get_fifo_state, NULL, 0);
+static IIO_DEVICE_ATTR(hwfifo_watermark, 0444,
+		       at91_adc_get_watermark, NULL, 0);
+
+static IIO_CONST_ATTR(hwfifo_watermark_min, "2");
+static IIO_CONST_ATTR(hwfifo_watermark_max, AT91_HWFIFO_MAX_SIZE_STR);
+
+static const struct attribute *at91_adc_fifo_attributes[] = {
+	&iio_const_attr_hwfifo_watermark_min.dev_attr.attr,
+	&iio_const_attr_hwfifo_watermark_max.dev_attr.attr,
+	&iio_dev_attr_hwfifo_watermark.dev_attr.attr,
+	&iio_dev_attr_hwfifo_enabled.dev_attr.attr,
+	NULL,
+};
+
 static int at91_adc_probe(struct platform_device *pdev)
 static int at91_adc_probe(struct platform_device *pdev)
 {
 {
 	struct iio_dev *indio_dev;
 	struct iio_dev *indio_dev;
@@ -674,6 +1072,9 @@ static int at91_adc_probe(struct platform_device *pdev)
 	if (!res)
 	if (!res)
 		return -EINVAL;
 		return -EINVAL;
 
 
+	/* if we plan to use DMA, we need the physical address of the regs */
+	st->dma_st.phys_addr = res->start;
+
 	st->base = devm_ioremap_resource(&pdev->dev, res);
 	st->base = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(st->base))
 	if (IS_ERR(st->base))
 		return PTR_ERR(st->base);
 		return PTR_ERR(st->base);
@@ -737,11 +1138,22 @@ static int at91_adc_probe(struct platform_device *pdev)
 			dev_err(&pdev->dev, "couldn't setup the triggers.\n");
 			dev_err(&pdev->dev, "couldn't setup the triggers.\n");
 			goto per_clk_disable_unprepare;
 			goto per_clk_disable_unprepare;
 		}
 		}
+		/*
+		 * Initially the iio buffer has a length of 2 and
+		 * a watermark of 1
+		 */
+		st->dma_st.watermark = 1;
+
+		iio_buffer_set_attrs(indio_dev->buffer,
+				     at91_adc_fifo_attributes);
 	}
 	}
 
 
+	if (dma_coerce_mask_and_coherent(&indio_dev->dev, DMA_BIT_MASK(32)))
+		dev_info(&pdev->dev, "cannot set DMA mask to 32-bit\n");
+
 	ret = iio_device_register(indio_dev);
 	ret = iio_device_register(indio_dev);
 	if (ret < 0)
 	if (ret < 0)
-		goto per_clk_disable_unprepare;
+		goto dma_disable;
 
 
 	if (st->selected_trig->hw_trig)
 	if (st->selected_trig->hw_trig)
 		dev_info(&pdev->dev, "setting up trigger as %s\n",
 		dev_info(&pdev->dev, "setting up trigger as %s\n",
@@ -752,6 +1164,8 @@ static int at91_adc_probe(struct platform_device *pdev)
 
 
 	return 0;
 	return 0;
 
 
+dma_disable:
+	at91_adc_dma_disable(pdev);
 per_clk_disable_unprepare:
 per_clk_disable_unprepare:
 	clk_disable_unprepare(st->per_clk);
 	clk_disable_unprepare(st->per_clk);
 vref_disable:
 vref_disable:
@@ -768,6 +1182,8 @@ static int at91_adc_remove(struct platform_device *pdev)
 
 
 	iio_device_unregister(indio_dev);
 	iio_device_unregister(indio_dev);
 
 
+	at91_adc_dma_disable(pdev);
+
 	clk_disable_unprepare(st->per_clk);
 	clk_disable_unprepare(st->per_clk);
 
 
 	regulator_disable(st->vref);
 	regulator_disable(st->vref);

+ 2 - 2
drivers/iio/adc/at91_adc.c

@@ -1177,9 +1177,9 @@ static int at91_adc_probe(struct platform_device *pdev)
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
 
 	st->reg_base = devm_ioremap_resource(&pdev->dev, res);
 	st->reg_base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(st->reg_base)) {
+	if (IS_ERR(st->reg_base))
 		return PTR_ERR(st->reg_base);
 		return PTR_ERR(st->reg_base);
-	}
+
 
 
 	/*
 	/*
 	 * Disable all IRQs before setting up the handler
 	 * Disable all IRQs before setting up the handler

+ 6 - 14
drivers/iio/adc/axp288_adc.c

@@ -92,22 +92,14 @@ static const struct iio_chan_spec axp288_adc_channels[] = {
 	},
 	},
 };
 };
 
 
-#define AXP288_ADC_MAP(_adc_channel_label, _consumer_dev_name,	\
-		_consumer_channel)				\
-	{							\
-		.adc_channel_label = _adc_channel_label,	\
-		.consumer_dev_name = _consumer_dev_name,	\
-		.consumer_channel = _consumer_channel,		\
-	}
-
 /* for consumer drivers */
 /* for consumer drivers */
 static struct iio_map axp288_adc_default_maps[] = {
 static struct iio_map axp288_adc_default_maps[] = {
-	AXP288_ADC_MAP("TS_PIN", "axp288-batt", "axp288-batt-temp"),
-	AXP288_ADC_MAP("PMIC_TEMP", "axp288-pmic", "axp288-pmic-temp"),
-	AXP288_ADC_MAP("GPADC", "axp288-gpadc", "axp288-system-temp"),
-	AXP288_ADC_MAP("BATT_CHG_I", "axp288-chrg", "axp288-chrg-curr"),
-	AXP288_ADC_MAP("BATT_DISCHRG_I", "axp288-chrg", "axp288-chrg-d-curr"),
-	AXP288_ADC_MAP("BATT_V", "axp288-batt", "axp288-batt-volt"),
+	IIO_MAP("TS_PIN", "axp288-batt", "axp288-batt-temp"),
+	IIO_MAP("PMIC_TEMP", "axp288-pmic", "axp288-pmic-temp"),
+	IIO_MAP("GPADC", "axp288-gpadc", "axp288-system-temp"),
+	IIO_MAP("BATT_CHG_I", "axp288-chrg", "axp288-chrg-curr"),
+	IIO_MAP("BATT_DISCHRG_I", "axp288-chrg", "axp288-chrg-d-curr"),
+	IIO_MAP("BATT_V", "axp288-batt", "axp288-batt-volt"),
 	{},
 	{},
 };
 };
 
 

+ 104 - 30
drivers/iio/adc/hx711.c

@@ -24,6 +24,9 @@
 #include <linux/delay.h>
 #include <linux/delay.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
 #include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
 #include <linux/gpio/consumer.h>
 #include <linux/gpio/consumer.h>
 #include <linux/regulator/consumer.h>
 #include <linux/regulator/consumer.h>
 
 
@@ -89,6 +92,11 @@ struct hx711_data {
 	int			gain_set;	/* gain set on device */
 	int			gain_set;	/* gain set on device */
 	int			gain_chan_a;	/* gain for channel A */
 	int			gain_chan_a;	/* gain for channel A */
 	struct mutex		lock;
 	struct mutex		lock;
+	/*
+	 * triggered buffer
+	 * 2x32-bit channel + 64-bit timestamp
+	 */
+	u32			buffer[4];
 };
 };
 
 
 static int hx711_cycle(struct hx711_data *hx711_data)
 static int hx711_cycle(struct hx711_data *hx711_data)
@@ -145,15 +153,16 @@ static int hx711_wait_for_ready(struct hx711_data *hx711_data)
 	int i, val;
 	int i, val;
 
 
 	/*
 	/*
-	 * a maximum reset cycle time of 56 ms was measured.
-	 * we round it up to 100 ms
+	 * in some rare cases the reset takes quite a long time
+	 * especially when the channel is changed.
+	 * Allow up to one second for it
 	 */
 	 */
 	for (i = 0; i < 100; i++) {
 	for (i = 0; i < 100; i++) {
 		val = gpiod_get_value(hx711_data->gpiod_dout);
 		val = gpiod_get_value(hx711_data->gpiod_dout);
 		if (!val)
 		if (!val)
 			break;
 			break;
-		/* sleep at least 1 ms */
-		msleep(1);
+		/* sleep at least 10 ms */
+		msleep(10);
 	}
 	}
 	if (val)
 	if (val)
 		return -EIO;
 		return -EIO;
@@ -195,9 +204,7 @@ static int hx711_reset(struct hx711_data *hx711_data)
 		 * after a dummy read we need to wait vor readiness
 		 * after a dummy read we need to wait vor readiness
 		 * for not mixing gain pulses with the clock
 		 * for not mixing gain pulses with the clock
 		 */
 		 */
-		ret = hx711_wait_for_ready(hx711_data);
-		if (ret)
-			return ret;
+		val = hx711_wait_for_ready(hx711_data);
 	}
 	}
 
 
 	return val;
 	return val;
@@ -236,34 +243,40 @@ static int hx711_set_gain_for_channel(struct hx711_data *hx711_data, int chan)
 	return 0;
 	return 0;
 }
 }
 
 
+static int hx711_reset_read(struct hx711_data *hx711_data, int chan)
+{
+	int ret;
+	int val;
+
+	/*
+	 * hx711_reset() must be called from here
+	 * because it could be calling hx711_read() by itself
+	 */
+	if (hx711_reset(hx711_data)) {
+		dev_err(hx711_data->dev, "reset failed!");
+		return -EIO;
+	}
+
+	ret = hx711_set_gain_for_channel(hx711_data, chan);
+	if (ret < 0)
+		return ret;
+
+	val = hx711_read(hx711_data);
+
+	return val;
+}
+
 static int hx711_read_raw(struct iio_dev *indio_dev,
 static int hx711_read_raw(struct iio_dev *indio_dev,
 				const struct iio_chan_spec *chan,
 				const struct iio_chan_spec *chan,
 				int *val, int *val2, long mask)
 				int *val, int *val2, long mask)
 {
 {
 	struct hx711_data *hx711_data = iio_priv(indio_dev);
 	struct hx711_data *hx711_data = iio_priv(indio_dev);
-	int ret;
 
 
 	switch (mask) {
 	switch (mask) {
 	case IIO_CHAN_INFO_RAW:
 	case IIO_CHAN_INFO_RAW:
 		mutex_lock(&hx711_data->lock);
 		mutex_lock(&hx711_data->lock);
 
 
-		/*
-		 * hx711_reset() must be called from here
-		 * because it could be calling hx711_read() by itself
-		 */
-		if (hx711_reset(hx711_data)) {
-			mutex_unlock(&hx711_data->lock);
-			dev_err(hx711_data->dev, "reset failed!");
-			return -EIO;
-		}
-
-		ret = hx711_set_gain_for_channel(hx711_data, chan->channel);
-		if (ret < 0) {
-			mutex_unlock(&hx711_data->lock);
-			return ret;
-		}
-
-		*val = hx711_read(hx711_data);
+		*val = hx711_reset_read(hx711_data, chan->channel);
 
 
 		mutex_unlock(&hx711_data->lock);
 		mutex_unlock(&hx711_data->lock);
 
 
@@ -339,6 +352,36 @@ static int hx711_write_raw_get_fmt(struct iio_dev *indio_dev,
 	return IIO_VAL_INT_PLUS_NANO;
 	return IIO_VAL_INT_PLUS_NANO;
 }
 }
 
 
+static irqreturn_t hx711_trigger(int irq, void *p)
+{
+	struct iio_poll_func *pf = p;
+	struct iio_dev *indio_dev = pf->indio_dev;
+	struct hx711_data *hx711_data = iio_priv(indio_dev);
+	int i, j = 0;
+
+	mutex_lock(&hx711_data->lock);
+
+	memset(hx711_data->buffer, 0, sizeof(hx711_data->buffer));
+
+	for (i = 0; i < indio_dev->masklength; i++) {
+		if (!test_bit(i, indio_dev->active_scan_mask))
+			continue;
+
+		hx711_data->buffer[j] = hx711_reset_read(hx711_data,
+					indio_dev->channels[i].channel);
+		j++;
+	}
+
+	iio_push_to_buffers_with_timestamp(indio_dev, hx711_data->buffer,
+							pf->timestamp);
+
+	mutex_unlock(&hx711_data->lock);
+
+	iio_trigger_notify_done(indio_dev->trig);
+
+	return IRQ_HANDLED;
+}
+
 static ssize_t hx711_scale_available_show(struct device *dev,
 static ssize_t hx711_scale_available_show(struct device *dev,
 				struct device_attribute *attr,
 				struct device_attribute *attr,
 				char *buf)
 				char *buf)
@@ -387,6 +430,13 @@ static const struct iio_chan_spec hx711_chan_spec[] = {
 		.indexed = 1,
 		.indexed = 1,
 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
 		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
 		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+		.scan_index = 0,
+		.scan_type = {
+			.sign = 'u',
+			.realbits = 24,
+			.storagebits = 32,
+			.endianness = IIO_CPU,
+		},
 	},
 	},
 	{
 	{
 		.type = IIO_VOLTAGE,
 		.type = IIO_VOLTAGE,
@@ -394,7 +444,15 @@ static const struct iio_chan_spec hx711_chan_spec[] = {
 		.indexed = 1,
 		.indexed = 1,
 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
 		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
 		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+		.scan_index = 1,
+		.scan_type = {
+			.sign = 'u',
+			.realbits = 24,
+			.storagebits = 32,
+			.endianness = IIO_CPU,
+		},
 	},
 	},
+	IIO_CHAN_SOFT_TIMESTAMP(2),
 };
 };
 
 
 static int hx711_probe(struct platform_device *pdev)
 static int hx711_probe(struct platform_device *pdev)
@@ -459,10 +517,9 @@ static int hx711_probe(struct platform_device *pdev)
 	 * 1 LSB = (AVDD * 100) / GAIN / 1678 [10^-9 mV]
 	 * 1 LSB = (AVDD * 100) / GAIN / 1678 [10^-9 mV]
 	 */
 	 */
 	ret = regulator_get_voltage(hx711_data->reg_avdd);
 	ret = regulator_get_voltage(hx711_data->reg_avdd);
-	if (ret < 0) {
-		regulator_disable(hx711_data->reg_avdd);
-		return ret;
-	}
+	if (ret < 0)
+		goto error_regulator;
+
 	/* we need 10^-9 mV */
 	/* we need 10^-9 mV */
 	ret *= 100;
 	ret *= 100;
 
 
@@ -482,12 +539,27 @@ static int hx711_probe(struct platform_device *pdev)
 	indio_dev->channels = hx711_chan_spec;
 	indio_dev->channels = hx711_chan_spec;
 	indio_dev->num_channels = ARRAY_SIZE(hx711_chan_spec);
 	indio_dev->num_channels = ARRAY_SIZE(hx711_chan_spec);
 
 
+	ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
+							hx711_trigger, NULL);
+	if (ret < 0) {
+		dev_err(dev, "setup of iio triggered buffer failed\n");
+		goto error_regulator;
+	}
+
 	ret = iio_device_register(indio_dev);
 	ret = iio_device_register(indio_dev);
 	if (ret < 0) {
 	if (ret < 0) {
 		dev_err(dev, "Couldn't register the device\n");
 		dev_err(dev, "Couldn't register the device\n");
-		regulator_disable(hx711_data->reg_avdd);
+		goto error_buffer;
 	}
 	}
 
 
+	return 0;
+
+error_buffer:
+	iio_triggered_buffer_cleanup(indio_dev);
+
+error_regulator:
+	regulator_disable(hx711_data->reg_avdd);
+
 	return ret;
 	return ret;
 }
 }
 
 
@@ -501,6 +573,8 @@ static int hx711_remove(struct platform_device *pdev)
 
 
 	iio_device_unregister(indio_dev);
 	iio_device_unregister(indio_dev);
 
 
+	iio_triggered_buffer_cleanup(indio_dev);
+
 	regulator_disable(hx711_data->reg_avdd);
 	regulator_disable(hx711_data->reg_avdd);
 
 
 	return 0;
 	return 0;

+ 226 - 91
drivers/iio/adc/ina2xx-adc.c

@@ -44,13 +44,14 @@
 
 
 #define INA226_MASK_ENABLE		0x06
 #define INA226_MASK_ENABLE		0x06
 #define INA226_CVRF			BIT(3)
 #define INA226_CVRF			BIT(3)
-#define INA219_CNVR			BIT(1)
 
 
 #define INA2XX_MAX_REGISTERS            8
 #define INA2XX_MAX_REGISTERS            8
 
 
 /* settings - depend on use case */
 /* settings - depend on use case */
-#define INA219_CONFIG_DEFAULT           0x399F	/* PGA=8 */
+#define INA219_CONFIG_DEFAULT           0x399F	/* PGA=1/8, BRNG=32V */
 #define INA219_DEFAULT_IT		532
 #define INA219_DEFAULT_IT		532
+#define INA219_DEFAULT_BRNG             1   /* 32V */
+#define INA219_DEFAULT_PGA              125 /* 1000/8 */
 #define INA226_CONFIG_DEFAULT           0x4327
 #define INA226_CONFIG_DEFAULT           0x4327
 #define INA226_DEFAULT_AVG              4
 #define INA226_DEFAULT_AVG              4
 #define INA226_DEFAULT_IT		1110
 #define INA226_DEFAULT_IT		1110
@@ -63,6 +64,14 @@
  */
  */
 #define INA2XX_MODE_MASK	GENMASK(3, 0)
 #define INA2XX_MODE_MASK	GENMASK(3, 0)
 
 
+/* Gain for VShunt: 1/8 (default), 1/4, 1/2, 1 */
+#define INA219_PGA_MASK		GENMASK(12, 11)
+#define INA219_SHIFT_PGA(val)	((val) << 11)
+
+/* VBus range: 32V (default), 16V */
+#define INA219_BRNG_MASK	BIT(13)
+#define INA219_SHIFT_BRNG(val)	((val) << 13)
+
 /* Averaging for VBus/VShunt/Power */
 /* Averaging for VBus/VShunt/Power */
 #define INA226_AVG_MASK		GENMASK(11, 9)
 #define INA226_AVG_MASK		GENMASK(11, 9)
 #define INA226_SHIFT_AVG(val)	((val) << 9)
 #define INA226_SHIFT_AVG(val)	((val) << 9)
@@ -79,6 +88,11 @@
 #define INA226_ITS_MASK		GENMASK(5, 3)
 #define INA226_ITS_MASK		GENMASK(5, 3)
 #define INA226_SHIFT_ITS(val)	((val) << 3)
 #define INA226_SHIFT_ITS(val)	((val) << 3)
 
 
+/* INA219 Bus voltage register, low bits are flags */
+#define INA219_OVF		BIT(0)
+#define INA219_CNVR		BIT(1)
+#define INA219_BUS_VOLTAGE_SHIFT	3
+
 /* Cosmetic macro giving the sampling period for a full P=UxI cycle */
 /* Cosmetic macro giving the sampling period for a full P=UxI cycle */
 #define SAMPLING_PERIOD(c)	((c->int_time_vbus + c->int_time_vshunt) \
 #define SAMPLING_PERIOD(c)	((c->int_time_vbus + c->int_time_vshunt) \
 				 * c->avg)
 				 * c->avg)
@@ -110,11 +124,12 @@ enum ina2xx_ids { ina219, ina226 };
 
 
 struct ina2xx_config {
 struct ina2xx_config {
 	u16 config_default;
 	u16 config_default;
-	int calibration_factor;
-	int shunt_div;
-	int bus_voltage_shift;
+	int calibration_value;
+	int shunt_voltage_lsb;	/* nV */
+	int bus_voltage_shift;	/* position of lsb */
 	int bus_voltage_lsb;	/* uV */
 	int bus_voltage_lsb;	/* uV */
-	int power_lsb;		/* uW */
+	/* fixed relation between current and power lsb, uW/uA */
+	int power_lsb_factor;
 	enum ina2xx_ids chip_id;
 	enum ina2xx_ids chip_id;
 };
 };
 
 
@@ -127,26 +142,28 @@ struct ina2xx_chip_info {
 	int avg;
 	int avg;
 	int int_time_vbus; /* Bus voltage integration time uS */
 	int int_time_vbus; /* Bus voltage integration time uS */
 	int int_time_vshunt; /* Shunt voltage integration time uS */
 	int int_time_vshunt; /* Shunt voltage integration time uS */
+	int range_vbus; /* Bus voltage maximum in V */
+	int pga_gain_vshunt; /* Shunt voltage PGA gain */
 	bool allow_async_readout;
 	bool allow_async_readout;
 };
 };
 
 
 static const struct ina2xx_config ina2xx_config[] = {
 static const struct ina2xx_config ina2xx_config[] = {
 	[ina219] = {
 	[ina219] = {
 		.config_default = INA219_CONFIG_DEFAULT,
 		.config_default = INA219_CONFIG_DEFAULT,
-		.calibration_factor = 40960000,
-		.shunt_div = 100,
-		.bus_voltage_shift = 3,
+		.calibration_value = 4096,
+		.shunt_voltage_lsb = 10000,
+		.bus_voltage_shift = INA219_BUS_VOLTAGE_SHIFT,
 		.bus_voltage_lsb = 4000,
 		.bus_voltage_lsb = 4000,
-		.power_lsb = 20000,
+		.power_lsb_factor = 20,
 		.chip_id = ina219,
 		.chip_id = ina219,
 	},
 	},
 	[ina226] = {
 	[ina226] = {
 		.config_default = INA226_CONFIG_DEFAULT,
 		.config_default = INA226_CONFIG_DEFAULT,
-		.calibration_factor = 5120000,
-		.shunt_div = 400,
+		.calibration_value = 2048,
+		.shunt_voltage_lsb = 2500,
 		.bus_voltage_shift = 0,
 		.bus_voltage_shift = 0,
 		.bus_voltage_lsb = 1250,
 		.bus_voltage_lsb = 1250,
-		.power_lsb = 25000,
+		.power_lsb_factor = 25,
 		.chip_id = ina226,
 		.chip_id = ina226,
 	},
 	},
 };
 };
@@ -170,6 +187,9 @@ static int ina2xx_read_raw(struct iio_dev *indio_dev,
 		else
 		else
 			*val  = regval;
 			*val  = regval;
 
 
+		if (chan->address == INA2XX_BUS_VOLTAGE)
+			*val >>= chip->config->bus_voltage_shift;
+
 		return IIO_VAL_INT;
 		return IIO_VAL_INT;
 
 
 	case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
 	case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
@@ -197,26 +217,48 @@ static int ina2xx_read_raw(struct iio_dev *indio_dev,
 	case IIO_CHAN_INFO_SCALE:
 	case IIO_CHAN_INFO_SCALE:
 		switch (chan->address) {
 		switch (chan->address) {
 		case INA2XX_SHUNT_VOLTAGE:
 		case INA2XX_SHUNT_VOLTAGE:
-			/* processed (mV) = raw/shunt_div */
-			*val2 = chip->config->shunt_div;
-			*val = 1;
+			/* processed (mV) = raw * lsb(nV) / 1000000 */
+			*val = chip->config->shunt_voltage_lsb;
+			*val2 = 1000000;
 			return IIO_VAL_FRACTIONAL;
 			return IIO_VAL_FRACTIONAL;
 
 
 		case INA2XX_BUS_VOLTAGE:
 		case INA2XX_BUS_VOLTAGE:
-			/* processed (mV) = raw*lsb (uV) / (1000 << shift) */
+			/* processed (mV) = raw * lsb (uV) / 1000 */
 			*val = chip->config->bus_voltage_lsb;
 			*val = chip->config->bus_voltage_lsb;
-			*val2 = 1000 << chip->config->bus_voltage_shift;
+			*val2 = 1000;
+			return IIO_VAL_FRACTIONAL;
+
+		case INA2XX_CURRENT:
+			/*
+			 * processed (mA) = raw * current_lsb (mA)
+			 * current_lsb (mA) = shunt_voltage_lsb (nV) /
+			 *                    shunt_resistor (uOhm)
+			 */
+			*val = chip->config->shunt_voltage_lsb;
+			*val2 = chip->shunt_resistor_uohm;
 			return IIO_VAL_FRACTIONAL;
 			return IIO_VAL_FRACTIONAL;
 
 
 		case INA2XX_POWER:
 		case INA2XX_POWER:
-			/* processed (mW) = raw*lsb (uW) / 1000 */
-			*val = chip->config->power_lsb;
+			/*
+			 * processed (mW) = raw * power_lsb (mW)
+			 * power_lsb (mW) = power_lsb_factor (mW/mA) *
+			 *                  current_lsb (mA)
+			 */
+			*val = chip->config->power_lsb_factor *
+			       chip->config->shunt_voltage_lsb;
+			*val2 = chip->shunt_resistor_uohm;
+			return IIO_VAL_FRACTIONAL;
+		}
+
+	case IIO_CHAN_INFO_HARDWAREGAIN:
+		switch (chan->address) {
+		case INA2XX_SHUNT_VOLTAGE:
+			*val = chip->pga_gain_vshunt;
 			*val2 = 1000;
 			*val2 = 1000;
 			return IIO_VAL_FRACTIONAL;
 			return IIO_VAL_FRACTIONAL;
 
 
-		case INA2XX_CURRENT:
-			/* processed (mA) = raw (mA) */
-			*val = 1;
+		case INA2XX_BUS_VOLTAGE:
+			*val = chip->range_vbus == 32 ? 1 : 2;
 			return IIO_VAL_INT;
 			return IIO_VAL_INT;
 		}
 		}
 	}
 	}
@@ -353,6 +395,74 @@ static int ina219_set_int_time_vshunt(struct ina2xx_chip_info *chip,
 	return 0;
 	return 0;
 }
 }
 
 
+static const int ina219_vbus_range_tab[] = { 1, 2 };
+static int ina219_set_vbus_range_denom(struct ina2xx_chip_info *chip,
+				       unsigned int range,
+				       unsigned int *config)
+{
+	if (range == 1)
+		chip->range_vbus = 32;
+	else if (range == 2)
+		chip->range_vbus = 16;
+	else
+		return -EINVAL;
+
+	*config &= ~INA219_BRNG_MASK;
+	*config |= INA219_SHIFT_BRNG(range == 1 ? 1 : 0) & INA219_BRNG_MASK;
+
+	return 0;
+}
+
+static const int ina219_vshunt_gain_tab[] = { 125, 250, 500, 1000 };
+static const int ina219_vshunt_gain_frac[] = {
+	125, 1000, 250, 1000, 500, 1000, 1000, 1000 };
+
+static int ina219_set_vshunt_pga_gain(struct ina2xx_chip_info *chip,
+				      unsigned int gain,
+				      unsigned int *config)
+{
+	int bits;
+
+	if (gain < 125 || gain > 1000)
+		return -EINVAL;
+
+	bits = find_closest(gain, ina219_vshunt_gain_tab,
+			    ARRAY_SIZE(ina219_vshunt_gain_tab));
+
+	chip->pga_gain_vshunt = ina219_vshunt_gain_tab[bits];
+	bits = 3 - bits;
+
+	*config &= ~INA219_PGA_MASK;
+	*config |= INA219_SHIFT_PGA(bits) & INA219_PGA_MASK;
+
+	return 0;
+}
+
+static int ina2xx_read_avail(struct iio_dev *indio_dev,
+			     struct iio_chan_spec const *chan,
+			     const int **vals, int *type, int *length,
+			     long mask)
+{
+	switch (mask) {
+	case IIO_CHAN_INFO_HARDWAREGAIN:
+		switch (chan->address) {
+		case INA2XX_SHUNT_VOLTAGE:
+			*type = IIO_VAL_FRACTIONAL;
+			*length = sizeof(ina219_vshunt_gain_frac) / sizeof(int);
+			*vals = ina219_vshunt_gain_frac;
+			return IIO_AVAIL_LIST;
+
+		case INA2XX_BUS_VOLTAGE:
+			*type = IIO_VAL_INT;
+			*length = sizeof(ina219_vbus_range_tab) / sizeof(int);
+			*vals = ina219_vbus_range_tab;
+			return IIO_AVAIL_LIST;
+		}
+	}
+
+	return -EINVAL;
+}
+
 static int ina2xx_write_raw(struct iio_dev *indio_dev,
 static int ina2xx_write_raw(struct iio_dev *indio_dev,
 			    struct iio_chan_spec const *chan,
 			    struct iio_chan_spec const *chan,
 			    int val, int val2, long mask)
 			    int val, int val2, long mask)
@@ -395,6 +505,14 @@ static int ina2xx_write_raw(struct iio_dev *indio_dev,
 		}
 		}
 		break;
 		break;
 
 
+	case IIO_CHAN_INFO_HARDWAREGAIN:
+		if (chan->address == INA2XX_SHUNT_VOLTAGE)
+			ret = ina219_set_vshunt_pga_gain(chip, val * 1000 +
+							 val2 / 1000, &tmp);
+		else
+			ret = ina219_set_vbus_range_denom(chip, val, &tmp);
+		break;
+
 	default:
 	default:
 		ret = -EINVAL;
 		ret = -EINVAL;
 	}
 	}
@@ -434,25 +552,21 @@ static ssize_t ina2xx_allow_async_readout_store(struct device *dev,
 }
 }
 
 
 /*
 /*
- * Set current LSB to 1mA, shunt is in uOhms
- * (equation 13 in datasheet). We hardcode a Current_LSB
- * of 1.0 x10-3. The only remaining parameter is RShunt.
- * There is no need to expose the CALIBRATION register
- * to the user for now. But we need to reset this register
- * if the user updates RShunt after driver init, e.g upon
- * reading an EEPROM/Probe-type value.
+ * Calibration register is set to the best value, which eliminates
+ * truncation errors on calculating current register in hardware.
+ * According to datasheet (INA 226: eq. 3, INA219: eq. 4) the best values
+ * are 2048 for ina226 and 4096 for ina219. They are hardcoded as
+ * calibration_value.
  */
  */
 static int ina2xx_set_calibration(struct ina2xx_chip_info *chip)
 static int ina2xx_set_calibration(struct ina2xx_chip_info *chip)
 {
 {
-	u16 regval = DIV_ROUND_CLOSEST(chip->config->calibration_factor,
-				   chip->shunt_resistor_uohm);
-
-	return regmap_write(chip->regmap, INA2XX_CALIBRATION, regval);
+	return regmap_write(chip->regmap, INA2XX_CALIBRATION,
+			    chip->config->calibration_value);
 }
 }
 
 
 static int set_shunt_resistor(struct ina2xx_chip_info *chip, unsigned int val)
 static int set_shunt_resistor(struct ina2xx_chip_info *chip, unsigned int val)
 {
 {
-	if (val <= 0 || val > chip->config->calibration_factor)
+	if (val == 0 || val > INT_MAX)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	chip->shunt_resistor_uohm = val;
 	chip->shunt_resistor_uohm = val;
@@ -485,11 +599,6 @@ static ssize_t ina2xx_shunt_resistor_store(struct device *dev,
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	/* Update the Calibration register */
-	ret = ina2xx_set_calibration(chip);
-	if (ret)
-		return ret;
-
 	return len;
 	return len;
 }
 }
 
 
@@ -532,19 +641,23 @@ static ssize_t ina2xx_shunt_resistor_store(struct device *dev,
  * Sampling Freq is a consequence of the integration times of
  * Sampling Freq is a consequence of the integration times of
  * the Voltage channels.
  * the Voltage channels.
  */
  */
-#define INA219_CHAN_VOLTAGE(_index, _address) { \
+#define INA219_CHAN_VOLTAGE(_index, _address, _shift) { \
 	.type = IIO_VOLTAGE, \
 	.type = IIO_VOLTAGE, \
 	.address = (_address), \
 	.address = (_address), \
 	.indexed = 1, \
 	.indexed = 1, \
 	.channel = (_index), \
 	.channel = (_index), \
 	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
 	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
 			      BIT(IIO_CHAN_INFO_SCALE) | \
 			      BIT(IIO_CHAN_INFO_SCALE) | \
-			      BIT(IIO_CHAN_INFO_INT_TIME), \
+			      BIT(IIO_CHAN_INFO_INT_TIME) | \
+			      BIT(IIO_CHAN_INFO_HARDWAREGAIN), \
+	.info_mask_separate_available = \
+			      BIT(IIO_CHAN_INFO_HARDWAREGAIN), \
 	.info_mask_shared_by_dir = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
 	.info_mask_shared_by_dir = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
 	.scan_index = (_index), \
 	.scan_index = (_index), \
 	.scan_type = { \
 	.scan_type = { \
 		.sign = 'u', \
 		.sign = 'u', \
-		.realbits = 16, \
+		.shift = _shift, \
+		.realbits = 16 - _shift, \
 		.storagebits = 16, \
 		.storagebits = 16, \
 		.endianness = IIO_LE, \
 		.endianness = IIO_LE, \
 	} \
 	} \
@@ -579,23 +692,18 @@ static const struct iio_chan_spec ina226_channels[] = {
 };
 };
 
 
 static const struct iio_chan_spec ina219_channels[] = {
 static const struct iio_chan_spec ina219_channels[] = {
-	INA219_CHAN_VOLTAGE(0, INA2XX_SHUNT_VOLTAGE),
-	INA219_CHAN_VOLTAGE(1, INA2XX_BUS_VOLTAGE),
+	INA219_CHAN_VOLTAGE(0, INA2XX_SHUNT_VOLTAGE, 0),
+	INA219_CHAN_VOLTAGE(1, INA2XX_BUS_VOLTAGE, INA219_BUS_VOLTAGE_SHIFT),
 	INA219_CHAN(IIO_POWER, 2, INA2XX_POWER),
 	INA219_CHAN(IIO_POWER, 2, INA2XX_POWER),
 	INA219_CHAN(IIO_CURRENT, 3, INA2XX_CURRENT),
 	INA219_CHAN(IIO_CURRENT, 3, INA2XX_CURRENT),
 	IIO_CHAN_SOFT_TIMESTAMP(4),
 	IIO_CHAN_SOFT_TIMESTAMP(4),
 };
 };
 
 
-static int ina2xx_work_buffer(struct iio_dev *indio_dev)
+static int ina2xx_conversion_ready(struct iio_dev *indio_dev)
 {
 {
 	struct ina2xx_chip_info *chip = iio_priv(indio_dev);
 	struct ina2xx_chip_info *chip = iio_priv(indio_dev);
-	unsigned short data[8];
-	int bit, ret, i = 0;
-	s64 time_a, time_b;
+	int ret;
 	unsigned int alert;
 	unsigned int alert;
-	int cnvr_need_clear = 0;
-
-	time_a = iio_get_time_ns(indio_dev);
 
 
 	/*
 	/*
 	 * Because the timer thread and the chip conversion clock
 	 * Because the timer thread and the chip conversion clock
@@ -608,23 +716,31 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
 	 * For now, we do an extra read of the MASK_ENABLE register (INA226)
 	 * For now, we do an extra read of the MASK_ENABLE register (INA226)
 	 * resp. the BUS_VOLTAGE register (INA219).
 	 * resp. the BUS_VOLTAGE register (INA219).
 	 */
 	 */
-	if (!chip->allow_async_readout)
-		do {
-			if (chip->config->chip_id == ina226) {
-				ret = regmap_read(chip->regmap,
-						  INA226_MASK_ENABLE, &alert);
-				alert &= INA226_CVRF;
-			} else {
-				ret = regmap_read(chip->regmap,
-						  INA2XX_BUS_VOLTAGE, &alert);
-				alert &= INA219_CNVR;
-				cnvr_need_clear = alert;
-			}
+	if (chip->config->chip_id == ina226) {
+		ret = regmap_read(chip->regmap,
+				  INA226_MASK_ENABLE, &alert);
+		alert &= INA226_CVRF;
+	} else {
+		ret = regmap_read(chip->regmap,
+				  INA2XX_BUS_VOLTAGE, &alert);
+		alert &= INA219_CNVR;
+	}
 
 
-			if (ret < 0)
-				return ret;
+	if (ret < 0)
+		return ret;
+
+	return !!alert;
+}
+
+static int ina2xx_work_buffer(struct iio_dev *indio_dev)
+{
+	struct ina2xx_chip_info *chip = iio_priv(indio_dev);
+	/* data buffer needs space for channel data and timestap */
+	unsigned short data[4 + sizeof(s64)/sizeof(short)];
+	int bit, ret, i = 0;
+	s64 time;
 
 
-		} while (!alert);
+	time = iio_get_time_ns(indio_dev);
 
 
 	/*
 	/*
 	 * Single register reads: bulk_read will not work with ina226/219
 	 * Single register reads: bulk_read will not work with ina226/219
@@ -640,26 +756,11 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
 			return ret;
 			return ret;
 
 
 		data[i++] = val;
 		data[i++] = val;
-
-		if (INA2XX_SHUNT_VOLTAGE + bit == INA2XX_POWER)
-			cnvr_need_clear = 0;
-	}
-
-	/* Dummy read on INA219 power register to clear CNVR flag */
-	if (cnvr_need_clear && chip->config->chip_id == ina219) {
-		unsigned int val;
-
-		ret = regmap_read(chip->regmap, INA2XX_POWER, &val);
-		if (ret < 0)
-			return ret;
 	}
 	}
 
 
-	time_b = iio_get_time_ns(indio_dev);
+	iio_push_to_buffers_with_timestamp(indio_dev, data, time);
 
 
-	iio_push_to_buffers_with_timestamp(indio_dev,
-					   (unsigned int *)data, time_a);
-
-	return (unsigned long)(time_b - time_a) / 1000;
+	return 0;
 };
 };
 
 
 static int ina2xx_capture_thread(void *data)
 static int ina2xx_capture_thread(void *data)
@@ -667,7 +768,9 @@ static int ina2xx_capture_thread(void *data)
 	struct iio_dev *indio_dev = data;
 	struct iio_dev *indio_dev = data;
 	struct ina2xx_chip_info *chip = iio_priv(indio_dev);
 	struct ina2xx_chip_info *chip = iio_priv(indio_dev);
 	int sampling_us = SAMPLING_PERIOD(chip);
 	int sampling_us = SAMPLING_PERIOD(chip);
-	int buffer_us;
+	int ret;
+	struct timespec64 next, now, delta;
+	s64 delay_us;
 
 
 	/*
 	/*
 	 * Poll a bit faster than the chip internal Fs, in case
 	 * Poll a bit faster than the chip internal Fs, in case
@@ -676,13 +779,43 @@ static int ina2xx_capture_thread(void *data)
 	if (!chip->allow_async_readout)
 	if (!chip->allow_async_readout)
 		sampling_us -= 200;
 		sampling_us -= 200;
 
 
+	ktime_get_ts64(&next);
+
 	do {
 	do {
-		buffer_us = ina2xx_work_buffer(indio_dev);
-		if (buffer_us < 0)
-			return buffer_us;
+		while (!chip->allow_async_readout) {
+			ret = ina2xx_conversion_ready(indio_dev);
+			if (ret < 0)
+				return ret;
 
 
-		if (sampling_us > buffer_us)
-			udelay(sampling_us - buffer_us);
+			/*
+			 * If the conversion was not yet finished,
+			 * reset the reference timestamp.
+			 */
+			if (ret == 0)
+				ktime_get_ts64(&next);
+			else
+				break;
+		}
+
+		ret = ina2xx_work_buffer(indio_dev);
+		if (ret < 0)
+			return ret;
+
+		ktime_get_ts64(&now);
+
+		/*
+		 * Advance the timestamp for the next poll by one sampling
+		 * interval, and sleep for the remainder (next - now)
+		 * In case "next" has already passed, the interval is added
+		 * multiple times, i.e. samples are dropped.
+		 */
+		do {
+			timespec64_add_ns(&next, 1000 * sampling_us);
+			delta = timespec64_sub(next, now);
+			delay_us = div_s64(timespec64_to_ns(&delta), 1000);
+		} while (delay_us <= 0);
+
+		usleep_range(delay_us, (delay_us * 3) >> 1);
 
 
 	} while (!kthread_should_stop());
 	} while (!kthread_should_stop());
 
 
@@ -746,7 +879,6 @@ static IIO_CONST_ATTR_NAMED(ina226_integration_time_available,
 			    integration_time_available,
 			    integration_time_available,
 			    "0.000140 0.000204 0.000332 0.000588 0.001100 0.002116 0.004156 0.008244");
 			    "0.000140 0.000204 0.000332 0.000588 0.001100 0.002116 0.004156 0.008244");
 
 
-
 static IIO_DEVICE_ATTR(in_allow_async_readout, S_IRUGO | S_IWUSR,
 static IIO_DEVICE_ATTR(in_allow_async_readout, S_IRUGO | S_IWUSR,
 		       ina2xx_allow_async_readout_show,
 		       ina2xx_allow_async_readout_show,
 		       ina2xx_allow_async_readout_store, 0);
 		       ina2xx_allow_async_readout_store, 0);
@@ -780,6 +912,7 @@ static const struct attribute_group ina226_attribute_group = {
 static const struct iio_info ina219_info = {
 static const struct iio_info ina219_info = {
 	.attrs = &ina219_attribute_group,
 	.attrs = &ina219_attribute_group,
 	.read_raw = ina2xx_read_raw,
 	.read_raw = ina2xx_read_raw,
+	.read_avail = ina2xx_read_avail,
 	.write_raw = ina2xx_write_raw,
 	.write_raw = ina2xx_write_raw,
 	.debugfs_reg_access = ina2xx_debug_reg,
 	.debugfs_reg_access = ina2xx_debug_reg,
 };
 };
@@ -860,6 +993,8 @@ static int ina2xx_probe(struct i2c_client *client,
 		chip->avg = 1;
 		chip->avg = 1;
 		ina219_set_int_time_vbus(chip, INA219_DEFAULT_IT, &val);
 		ina219_set_int_time_vbus(chip, INA219_DEFAULT_IT, &val);
 		ina219_set_int_time_vshunt(chip, INA219_DEFAULT_IT, &val);
 		ina219_set_int_time_vshunt(chip, INA219_DEFAULT_IT, &val);
+		ina219_set_vbus_range_denom(chip, INA219_DEFAULT_BRNG, &val);
+		ina219_set_vshunt_pga_gain(chip, INA219_DEFAULT_PGA, &val);
 	}
 	}
 
 
 	ret = ina2xx_init(chip, val);
 	ret = ina2xx_init(chip, val);

+ 36 - 24
drivers/iio/adc/meson_saradc.c

@@ -96,8 +96,8 @@
 	#define MESON_SAR_ADC_FIFO_RD_SAMPLE_VALUE_MASK		GENMASK(11, 0)
 	#define MESON_SAR_ADC_FIFO_RD_SAMPLE_VALUE_MASK		GENMASK(11, 0)
 
 
 #define MESON_SAR_ADC_AUX_SW					0x1c
 #define MESON_SAR_ADC_AUX_SW					0x1c
-	#define MESON_SAR_ADC_AUX_SW_MUX_SEL_CHAN_MASK(_chan)	\
-					(GENMASK(10, 8) << (((_chan) - 2) * 2))
+	#define MESON_SAR_ADC_AUX_SW_MUX_SEL_CHAN_SHIFT(_chan)	\
+					(8 + (((_chan) - 2) * 3))
 	#define MESON_SAR_ADC_AUX_SW_VREF_P_MUX			BIT(6)
 	#define MESON_SAR_ADC_AUX_SW_VREF_P_MUX			BIT(6)
 	#define MESON_SAR_ADC_AUX_SW_VREF_N_MUX			BIT(5)
 	#define MESON_SAR_ADC_AUX_SW_VREF_N_MUX			BIT(5)
 	#define MESON_SAR_ADC_AUX_SW_MODE_SEL			BIT(4)
 	#define MESON_SAR_ADC_AUX_SW_MODE_SEL			BIT(4)
@@ -221,6 +221,7 @@ enum meson_sar_adc_chan7_mux_sel {
 
 
 struct meson_sar_adc_data {
 struct meson_sar_adc_data {
 	bool					has_bl30_integration;
 	bool					has_bl30_integration;
+	unsigned long				clock_rate;
 	u32					bandgap_reg;
 	u32					bandgap_reg;
 	unsigned int				resolution;
 	unsigned int				resolution;
 	const char				*name;
 	const char				*name;
@@ -233,7 +234,6 @@ struct meson_sar_adc_priv {
 	const struct meson_sar_adc_data		*data;
 	const struct meson_sar_adc_data		*data;
 	struct clk				*clkin;
 	struct clk				*clkin;
 	struct clk				*core_clk;
 	struct clk				*core_clk;
-	struct clk				*sana_clk;
 	struct clk				*adc_sel_clk;
 	struct clk				*adc_sel_clk;
 	struct clk				*adc_clk;
 	struct clk				*adc_clk;
 	struct clk_gate				clk_gate;
 	struct clk_gate				clk_gate;
@@ -622,7 +622,7 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev,
 static int meson_sar_adc_init(struct iio_dev *indio_dev)
 static int meson_sar_adc_init(struct iio_dev *indio_dev)
 {
 {
 	struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
 	struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
-	int regval, ret;
+	int regval, i, ret;
 
 
 	/*
 	/*
 	 * make sure we start at CH7 input since the other muxes are only used
 	 * make sure we start at CH7 input since the other muxes are only used
@@ -677,6 +677,32 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
 			   FIELD_PREP(MESON_SAR_ADC_DELAY_INPUT_DLY_SEL_MASK,
 			   FIELD_PREP(MESON_SAR_ADC_DELAY_INPUT_DLY_SEL_MASK,
 				      1));
 				      1));
 
 
+	/*
+	 * set up the input channel muxes in MESON_SAR_ADC_CHAN_10_SW
+	 * (0 = SAR_ADC_CH0, 1 = SAR_ADC_CH1)
+	 */
+	regval = FIELD_PREP(MESON_SAR_ADC_CHAN_10_SW_CHAN0_MUX_SEL_MASK, 0);
+	regmap_update_bits(priv->regmap, MESON_SAR_ADC_CHAN_10_SW,
+			   MESON_SAR_ADC_CHAN_10_SW_CHAN0_MUX_SEL_MASK,
+			   regval);
+	regval = FIELD_PREP(MESON_SAR_ADC_CHAN_10_SW_CHAN1_MUX_SEL_MASK, 1);
+	regmap_update_bits(priv->regmap, MESON_SAR_ADC_CHAN_10_SW,
+			   MESON_SAR_ADC_CHAN_10_SW_CHAN1_MUX_SEL_MASK,
+			   regval);
+
+	/*
+	 * set up the input channel muxes in MESON_SAR_ADC_AUX_SW
+	 * (2 = SAR_ADC_CH2, 3 = SAR_ADC_CH3, ...) and enable
+	 * MESON_SAR_ADC_AUX_SW_YP_DRIVE_SW and
+	 * MESON_SAR_ADC_AUX_SW_XP_DRIVE_SW like the vendor driver.
+	 */
+	regval = 0;
+	for (i = 2; i <= 7; i++)
+		regval |= i << MESON_SAR_ADC_AUX_SW_MUX_SEL_CHAN_SHIFT(i);
+	regval |= MESON_SAR_ADC_AUX_SW_YP_DRIVE_SW;
+	regval |= MESON_SAR_ADC_AUX_SW_XP_DRIVE_SW;
+	regmap_write(priv->regmap, MESON_SAR_ADC_AUX_SW, regval);
+
 	ret = clk_set_parent(priv->adc_sel_clk, priv->clkin);
 	ret = clk_set_parent(priv->adc_sel_clk, priv->clkin);
 	if (ret) {
 	if (ret) {
 		dev_err(indio_dev->dev.parent,
 		dev_err(indio_dev->dev.parent,
@@ -684,7 +710,7 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
 		return ret;
 		return ret;
 	}
 	}
 
 
-	ret = clk_set_rate(priv->adc_clk, 1200000);
+	ret = clk_set_rate(priv->adc_clk, priv->data->clock_rate);
 	if (ret) {
 	if (ret) {
 		dev_err(indio_dev->dev.parent,
 		dev_err(indio_dev->dev.parent,
 			"failed to set adc clock rate\n");
 			"failed to set adc clock rate\n");
@@ -731,12 +757,6 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
 		goto err_core_clk;
 		goto err_core_clk;
 	}
 	}
 
 
-	ret = clk_prepare_enable(priv->sana_clk);
-	if (ret) {
-		dev_err(indio_dev->dev.parent, "failed to enable sana clk\n");
-		goto err_sana_clk;
-	}
-
 	regval = FIELD_PREP(MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, 1);
 	regval = FIELD_PREP(MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, 1);
 	regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0,
 	regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0,
 			   MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, regval);
 			   MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, regval);
@@ -763,8 +783,6 @@ err_adc_clk:
 	regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
 	regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
 			   MESON_SAR_ADC_REG3_ADC_EN, 0);
 			   MESON_SAR_ADC_REG3_ADC_EN, 0);
 	meson_sar_adc_set_bandgap(indio_dev, false);
 	meson_sar_adc_set_bandgap(indio_dev, false);
-	clk_disable_unprepare(priv->sana_clk);
-err_sana_clk:
 	clk_disable_unprepare(priv->core_clk);
 	clk_disable_unprepare(priv->core_clk);
 err_core_clk:
 err_core_clk:
 	regulator_disable(priv->vref);
 	regulator_disable(priv->vref);
@@ -790,7 +808,6 @@ static int meson_sar_adc_hw_disable(struct iio_dev *indio_dev)
 
 
 	meson_sar_adc_set_bandgap(indio_dev, false);
 	meson_sar_adc_set_bandgap(indio_dev, false);
 
 
-	clk_disable_unprepare(priv->sana_clk);
 	clk_disable_unprepare(priv->core_clk);
 	clk_disable_unprepare(priv->core_clk);
 
 
 	regulator_disable(priv->vref);
 	regulator_disable(priv->vref);
@@ -866,6 +883,7 @@ static const struct iio_info meson_sar_adc_iio_info = {
 
 
 static const struct meson_sar_adc_data meson_sar_adc_meson8_data = {
 static const struct meson_sar_adc_data meson_sar_adc_meson8_data = {
 	.has_bl30_integration = false,
 	.has_bl30_integration = false,
+	.clock_rate = 1150000,
 	.bandgap_reg = MESON_SAR_ADC_DELTA_10,
 	.bandgap_reg = MESON_SAR_ADC_DELTA_10,
 	.regmap_config = &meson_sar_adc_regmap_config_meson8,
 	.regmap_config = &meson_sar_adc_regmap_config_meson8,
 	.resolution = 10,
 	.resolution = 10,
@@ -874,6 +892,7 @@ static const struct meson_sar_adc_data meson_sar_adc_meson8_data = {
 
 
 static const struct meson_sar_adc_data meson_sar_adc_meson8b_data = {
 static const struct meson_sar_adc_data meson_sar_adc_meson8b_data = {
 	.has_bl30_integration = false,
 	.has_bl30_integration = false,
+	.clock_rate = 1150000,
 	.bandgap_reg = MESON_SAR_ADC_DELTA_10,
 	.bandgap_reg = MESON_SAR_ADC_DELTA_10,
 	.regmap_config = &meson_sar_adc_regmap_config_meson8,
 	.regmap_config = &meson_sar_adc_regmap_config_meson8,
 	.resolution = 10,
 	.resolution = 10,
@@ -882,6 +901,7 @@ static const struct meson_sar_adc_data meson_sar_adc_meson8b_data = {
 
 
 static const struct meson_sar_adc_data meson_sar_adc_gxbb_data = {
 static const struct meson_sar_adc_data meson_sar_adc_gxbb_data = {
 	.has_bl30_integration = true,
 	.has_bl30_integration = true,
+	.clock_rate = 1200000,
 	.bandgap_reg = MESON_SAR_ADC_REG11,
 	.bandgap_reg = MESON_SAR_ADC_REG11,
 	.regmap_config = &meson_sar_adc_regmap_config_gxbb,
 	.regmap_config = &meson_sar_adc_regmap_config_gxbb,
 	.resolution = 10,
 	.resolution = 10,
@@ -890,6 +910,7 @@ static const struct meson_sar_adc_data meson_sar_adc_gxbb_data = {
 
 
 static const struct meson_sar_adc_data meson_sar_adc_gxl_data = {
 static const struct meson_sar_adc_data meson_sar_adc_gxl_data = {
 	.has_bl30_integration = true,
 	.has_bl30_integration = true,
+	.clock_rate = 1200000,
 	.bandgap_reg = MESON_SAR_ADC_REG11,
 	.bandgap_reg = MESON_SAR_ADC_REG11,
 	.regmap_config = &meson_sar_adc_regmap_config_gxbb,
 	.regmap_config = &meson_sar_adc_regmap_config_gxbb,
 	.resolution = 12,
 	.resolution = 12,
@@ -898,6 +919,7 @@ static const struct meson_sar_adc_data meson_sar_adc_gxl_data = {
 
 
 static const struct meson_sar_adc_data meson_sar_adc_gxm_data = {
 static const struct meson_sar_adc_data meson_sar_adc_gxm_data = {
 	.has_bl30_integration = true,
 	.has_bl30_integration = true,
+	.clock_rate = 1200000,
 	.bandgap_reg = MESON_SAR_ADC_REG11,
 	.bandgap_reg = MESON_SAR_ADC_REG11,
 	.regmap_config = &meson_sar_adc_regmap_config_gxbb,
 	.regmap_config = &meson_sar_adc_regmap_config_gxbb,
 	.resolution = 12,
 	.resolution = 12,
@@ -993,16 +1015,6 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
 		return PTR_ERR(priv->core_clk);
 		return PTR_ERR(priv->core_clk);
 	}
 	}
 
 
-	priv->sana_clk = devm_clk_get(&pdev->dev, "sana");
-	if (IS_ERR(priv->sana_clk)) {
-		if (PTR_ERR(priv->sana_clk) == -ENOENT) {
-			priv->sana_clk = NULL;
-		} else {
-			dev_err(&pdev->dev, "failed to get sana clk\n");
-			return PTR_ERR(priv->sana_clk);
-		}
-	}
-
 	priv->adc_clk = devm_clk_get(&pdev->dev, "adc_clk");
 	priv->adc_clk = devm_clk_get(&pdev->dev, "adc_clk");
 	if (IS_ERR(priv->adc_clk)) {
 	if (IS_ERR(priv->adc_clk)) {
 		if (PTR_ERR(priv->adc_clk) == -ENOENT) {
 		if (PTR_ERR(priv->adc_clk) == -ENOENT) {

+ 4 - 0
drivers/iio/adc/qcom-vadc-common.c

@@ -5,6 +5,7 @@
 #include <linux/math64.h>
 #include <linux/math64.h>
 #include <linux/log2.h>
 #include <linux/log2.h>
 #include <linux/err.h>
 #include <linux/err.h>
+#include <linux/module.h>
 
 
 #include "qcom-vadc-common.h"
 #include "qcom-vadc-common.h"
 
 
@@ -229,3 +230,6 @@ int qcom_vadc_decimation_from_dt(u32 value)
 	return __ffs64(value / VADC_DECIMATION_MIN);
 	return __ffs64(value / VADC_DECIMATION_MIN);
 }
 }
 EXPORT_SYMBOL(qcom_vadc_decimation_from_dt);
 EXPORT_SYMBOL(qcom_vadc_decimation_from_dt);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm ADC common functionality");

+ 1 - 13
drivers/iio/adc/stm32-adc-core.c

@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * This file is part of STM32 ADC driver
  * This file is part of STM32 ADC driver
  *
  *
@@ -6,19 +7,6 @@
  *
  *
  * Inspired from: fsl-imx25-tsadc
  * Inspired from: fsl-imx25-tsadc
  *
  *
- * License type: GPLv2
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
  */
  */
 
 
 #include <linux/clk.h>
 #include <linux/clk.h>

+ 1 - 13
drivers/iio/adc/stm32-adc-core.h

@@ -1,22 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
 /*
  * This file is part of STM32 ADC driver
  * This file is part of STM32 ADC driver
  *
  *
  * Copyright (C) 2016, STMicroelectronics - All Rights Reserved
  * Copyright (C) 2016, STMicroelectronics - All Rights Reserved
  * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
  * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
  *
  *
- * License type: GPLv2
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
  */
  */
 
 
 #ifndef __STM32_ADC_H
 #ifndef __STM32_ADC_H

+ 116 - 83
drivers/iio/adc/stm32-adc.c

@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * This file is part of STM32 ADC driver
  * This file is part of STM32 ADC driver
  *
  *
  * Copyright (C) 2016, STMicroelectronics - All Rights Reserved
  * Copyright (C) 2016, STMicroelectronics - All Rights Reserved
  * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
  * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
- *
- * License type: GPLv2
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
  */
  */
 
 
 #include <linux/clk.h>
 #include <linux/clk.h>
@@ -92,6 +79,7 @@
 #define STM32H7_ADC_SQR3		0x38
 #define STM32H7_ADC_SQR3		0x38
 #define STM32H7_ADC_SQR4		0x3C
 #define STM32H7_ADC_SQR4		0x3C
 #define STM32H7_ADC_DR			0x40
 #define STM32H7_ADC_DR			0x40
+#define STM32H7_ADC_DIFSEL		0xC0
 #define STM32H7_ADC_CALFACT		0xC4
 #define STM32H7_ADC_CALFACT		0xC4
 #define STM32H7_ADC_CALFACT2		0xC8
 #define STM32H7_ADC_CALFACT2		0xC8
 
 
@@ -153,6 +141,8 @@ enum stm32h7_adc_dmngt {
 /* BOOST bit must be set on STM32H7 when ADC clock is above 20MHz */
 /* BOOST bit must be set on STM32H7 when ADC clock is above 20MHz */
 #define STM32H7_BOOST_CLKRATE		20000000UL
 #define STM32H7_BOOST_CLKRATE		20000000UL
 
 
+#define STM32_ADC_CH_MAX		20	/* max number of channels */
+#define STM32_ADC_CH_SZ			10	/* max channel name size */
 #define STM32_ADC_MAX_SQ		16	/* SQ1..SQ16 */
 #define STM32_ADC_MAX_SQ		16	/* SQ1..SQ16 */
 #define STM32_ADC_MAX_SMP		7	/* SMPx range is [0..7] */
 #define STM32_ADC_MAX_SMP		7	/* SMPx range is [0..7] */
 #define STM32_ADC_TIMEOUT_US		100000
 #define STM32_ADC_TIMEOUT_US		100000
@@ -297,9 +287,11 @@ struct stm32_adc_cfg {
  * @rx_buf:		dma rx buffer cpu address
  * @rx_buf:		dma rx buffer cpu address
  * @rx_dma_buf:		dma rx buffer bus address
  * @rx_dma_buf:		dma rx buffer bus address
  * @rx_buf_sz:		dma rx buffer size
  * @rx_buf_sz:		dma rx buffer size
+ * @difsel		bitmask to set single-ended/differential channel
  * @pcsel		bitmask to preselect channels on some devices
  * @pcsel		bitmask to preselect channels on some devices
  * @smpr_val:		sampling time settings (e.g. smpr1 / smpr2)
  * @smpr_val:		sampling time settings (e.g. smpr1 / smpr2)
  * @cal:		optional calibration data on some devices
  * @cal:		optional calibration data on some devices
+ * @chan_name:		channel name array
  */
  */
 struct stm32_adc {
 struct stm32_adc {
 	struct stm32_adc_common	*common;
 	struct stm32_adc_common	*common;
@@ -318,72 +310,37 @@ struct stm32_adc {
 	u8			*rx_buf;
 	u8			*rx_buf;
 	dma_addr_t		rx_dma_buf;
 	dma_addr_t		rx_dma_buf;
 	unsigned int		rx_buf_sz;
 	unsigned int		rx_buf_sz;
+	u32			difsel;
 	u32			pcsel;
 	u32			pcsel;
 	u32			smpr_val[2];
 	u32			smpr_val[2];
 	struct stm32_adc_calib	cal;
 	struct stm32_adc_calib	cal;
+	char			chan_name[STM32_ADC_CH_MAX][STM32_ADC_CH_SZ];
 };
 };
 
 
-/**
- * struct stm32_adc_chan_spec - specification of stm32 adc channel
- * @type:	IIO channel type
- * @channel:	channel number (single ended)
- * @name:	channel name (single ended)
- */
-struct stm32_adc_chan_spec {
-	enum iio_chan_type	type;
-	int			channel;
-	const char		*name;
+struct stm32_adc_diff_channel {
+	u32 vinp;
+	u32 vinn;
 };
 };
 
 
 /**
 /**
  * struct stm32_adc_info - stm32 ADC, per instance config data
  * struct stm32_adc_info - stm32 ADC, per instance config data
- * @channels:		Reference to stm32 channels spec
  * @max_channels:	Number of channels
  * @max_channels:	Number of channels
  * @resolutions:	available resolutions
  * @resolutions:	available resolutions
  * @num_res:		number of available resolutions
  * @num_res:		number of available resolutions
  */
  */
 struct stm32_adc_info {
 struct stm32_adc_info {
-	const struct stm32_adc_chan_spec *channels;
 	int max_channels;
 	int max_channels;
 	const unsigned int *resolutions;
 	const unsigned int *resolutions;
 	const unsigned int num_res;
 	const unsigned int num_res;
 };
 };
 
 
-/*
- * Input definitions common for all instances:
- * stm32f4 can have up to 16 channels
- * stm32h7 can have up to 20 channels
- */
-static const struct stm32_adc_chan_spec stm32_adc_channels[] = {
-	{ IIO_VOLTAGE, 0, "in0" },
-	{ IIO_VOLTAGE, 1, "in1" },
-	{ IIO_VOLTAGE, 2, "in2" },
-	{ IIO_VOLTAGE, 3, "in3" },
-	{ IIO_VOLTAGE, 4, "in4" },
-	{ IIO_VOLTAGE, 5, "in5" },
-	{ IIO_VOLTAGE, 6, "in6" },
-	{ IIO_VOLTAGE, 7, "in7" },
-	{ IIO_VOLTAGE, 8, "in8" },
-	{ IIO_VOLTAGE, 9, "in9" },
-	{ IIO_VOLTAGE, 10, "in10" },
-	{ IIO_VOLTAGE, 11, "in11" },
-	{ IIO_VOLTAGE, 12, "in12" },
-	{ IIO_VOLTAGE, 13, "in13" },
-	{ IIO_VOLTAGE, 14, "in14" },
-	{ IIO_VOLTAGE, 15, "in15" },
-	{ IIO_VOLTAGE, 16, "in16" },
-	{ IIO_VOLTAGE, 17, "in17" },
-	{ IIO_VOLTAGE, 18, "in18" },
-	{ IIO_VOLTAGE, 19, "in19" },
-};
-
 static const unsigned int stm32f4_adc_resolutions[] = {
 static const unsigned int stm32f4_adc_resolutions[] = {
 	/* sorted values so the index matches RES[1:0] in STM32F4_ADC_CR1 */
 	/* sorted values so the index matches RES[1:0] in STM32F4_ADC_CR1 */
 	12, 10, 8, 6,
 	12, 10, 8, 6,
 };
 };
 
 
+/* stm32f4 can have up to 16 channels */
 static const struct stm32_adc_info stm32f4_adc_info = {
 static const struct stm32_adc_info stm32f4_adc_info = {
-	.channels = stm32_adc_channels,
 	.max_channels = 16,
 	.max_channels = 16,
 	.resolutions = stm32f4_adc_resolutions,
 	.resolutions = stm32f4_adc_resolutions,
 	.num_res = ARRAY_SIZE(stm32f4_adc_resolutions),
 	.num_res = ARRAY_SIZE(stm32f4_adc_resolutions),
@@ -394,9 +351,9 @@ static const unsigned int stm32h7_adc_resolutions[] = {
 	16, 14, 12, 10, 8,
 	16, 14, 12, 10, 8,
 };
 };
 
 
+/* stm32h7 can have up to 20 channels */
 static const struct stm32_adc_info stm32h7_adc_info = {
 static const struct stm32_adc_info stm32h7_adc_info = {
-	.channels = stm32_adc_channels,
-	.max_channels = 20,
+	.max_channels = STM32_ADC_CH_MAX,
 	.resolutions = stm32h7_adc_resolutions,
 	.resolutions = stm32h7_adc_resolutions,
 	.num_res = ARRAY_SIZE(stm32h7_adc_resolutions),
 	.num_res = ARRAY_SIZE(stm32h7_adc_resolutions),
 };
 };
@@ -983,15 +940,19 @@ pwr_dwn:
  * stm32h7_adc_prepare() - Leave power down mode to enable ADC.
  * stm32h7_adc_prepare() - Leave power down mode to enable ADC.
  * @adc: stm32 adc instance
  * @adc: stm32 adc instance
  * Leave power down mode.
  * Leave power down mode.
+ * Configure channels as single ended or differential before enabling ADC.
  * Enable ADC.
  * Enable ADC.
  * Restore calibration data.
  * Restore calibration data.
- * Pre-select channels that may be used in PCSEL (required by input MUX / IO).
+ * Pre-select channels that may be used in PCSEL (required by input MUX / IO):
+ * - Only one input is selected for single ended (e.g. 'vinp')
+ * - Two inputs are selected for differential channels (e.g. 'vinp' & 'vinn')
  */
  */
 static int stm32h7_adc_prepare(struct stm32_adc *adc)
 static int stm32h7_adc_prepare(struct stm32_adc *adc)
 {
 {
 	int ret;
 	int ret;
 
 
 	stm32h7_adc_exit_pwr_down(adc);
 	stm32h7_adc_exit_pwr_down(adc);
+	stm32_adc_writel(adc, STM32H7_ADC_DIFSEL, adc->difsel);
 
 
 	ret = stm32h7_adc_enable(adc);
 	ret = stm32h7_adc_enable(adc);
 	if (ret)
 	if (ret)
@@ -1263,10 +1224,23 @@ static int stm32_adc_read_raw(struct iio_dev *indio_dev,
 		return ret;
 		return ret;
 
 
 	case IIO_CHAN_INFO_SCALE:
 	case IIO_CHAN_INFO_SCALE:
-		*val = adc->common->vref_mv;
-		*val2 = chan->scan_type.realbits;
+		if (chan->differential) {
+			*val = adc->common->vref_mv * 2;
+			*val2 = chan->scan_type.realbits;
+		} else {
+			*val = adc->common->vref_mv;
+			*val2 = chan->scan_type.realbits;
+		}
 		return IIO_VAL_FRACTIONAL_LOG2;
 		return IIO_VAL_FRACTIONAL_LOG2;
 
 
+	case IIO_CHAN_INFO_OFFSET:
+		if (chan->differential)
+			/* ADC_full_scale / 2 */
+			*val = -((1 << chan->scan_type.realbits) / 2);
+		else
+			*val = 0;
+		return IIO_VAL_INT;
+
 	default:
 	default:
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
@@ -1315,6 +1289,7 @@ static int stm32_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
 {
 {
 	struct stm32_adc *adc = iio_priv(indio_dev);
 	struct stm32_adc *adc = iio_priv(indio_dev);
 	unsigned int watermark = STM32_DMA_BUFFER_SIZE / 2;
 	unsigned int watermark = STM32_DMA_BUFFER_SIZE / 2;
+	unsigned int rx_buf_sz = STM32_DMA_BUFFER_SIZE;
 
 
 	/*
 	/*
 	 * dma cyclic transfers are used, buffer is split into two periods.
 	 * dma cyclic transfers are used, buffer is split into two periods.
@@ -1323,7 +1298,7 @@ static int stm32_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
 	 * - one buffer (period) driver can push with iio_trigger_poll().
 	 * - one buffer (period) driver can push with iio_trigger_poll().
 	 */
 	 */
 	watermark = min(watermark, val * (unsigned)(sizeof(u16)));
 	watermark = min(watermark, val * (unsigned)(sizeof(u16)));
-	adc->rx_buf_sz = watermark * 2;
+	adc->rx_buf_sz = min(rx_buf_sz, watermark * 2 * adc->num_conv);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1628,29 +1603,40 @@ static void stm32_adc_smpr_init(struct stm32_adc *adc, int channel, u32 smp_ns)
 }
 }
 
 
 static void stm32_adc_chan_init_one(struct iio_dev *indio_dev,
 static void stm32_adc_chan_init_one(struct iio_dev *indio_dev,
-				    struct iio_chan_spec *chan,
-				    const struct stm32_adc_chan_spec *channel,
-				    int scan_index, u32 smp)
+				    struct iio_chan_spec *chan, u32 vinp,
+				    u32 vinn, int scan_index, bool differential)
 {
 {
 	struct stm32_adc *adc = iio_priv(indio_dev);
 	struct stm32_adc *adc = iio_priv(indio_dev);
-
-	chan->type = channel->type;
-	chan->channel = channel->channel;
-	chan->datasheet_name = channel->name;
+	char *name = adc->chan_name[vinp];
+
+	chan->type = IIO_VOLTAGE;
+	chan->channel = vinp;
+	if (differential) {
+		chan->differential = 1;
+		chan->channel2 = vinn;
+		snprintf(name, STM32_ADC_CH_SZ, "in%d-in%d", vinp, vinn);
+	} else {
+		snprintf(name, STM32_ADC_CH_SZ, "in%d", vinp);
+	}
+	chan->datasheet_name = name;
 	chan->scan_index = scan_index;
 	chan->scan_index = scan_index;
 	chan->indexed = 1;
 	chan->indexed = 1;
 	chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
 	chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
-	chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE);
+	chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
+					 BIT(IIO_CHAN_INFO_OFFSET);
 	chan->scan_type.sign = 'u';
 	chan->scan_type.sign = 'u';
 	chan->scan_type.realbits = adc->cfg->adc_info->resolutions[adc->res];
 	chan->scan_type.realbits = adc->cfg->adc_info->resolutions[adc->res];
 	chan->scan_type.storagebits = 16;
 	chan->scan_type.storagebits = 16;
 	chan->ext_info = stm32_adc_ext_info;
 	chan->ext_info = stm32_adc_ext_info;
 
 
-	/* Prepare sampling time settings */
-	stm32_adc_smpr_init(adc, chan->channel, smp);
-
 	/* pre-build selected channels mask */
 	/* pre-build selected channels mask */
 	adc->pcsel |= BIT(chan->channel);
 	adc->pcsel |= BIT(chan->channel);
+	if (differential) {
+		/* pre-build diff channels mask */
+		adc->difsel |= BIT(chan->channel);
+		/* Also add negative input to pre-selected channels */
+		adc->pcsel |= BIT(chan->channel2);
+	}
 }
 }
 
 
 static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
 static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
@@ -1658,17 +1644,40 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
 	struct device_node *node = indio_dev->dev.of_node;
 	struct device_node *node = indio_dev->dev.of_node;
 	struct stm32_adc *adc = iio_priv(indio_dev);
 	struct stm32_adc *adc = iio_priv(indio_dev);
 	const struct stm32_adc_info *adc_info = adc->cfg->adc_info;
 	const struct stm32_adc_info *adc_info = adc->cfg->adc_info;
+	struct stm32_adc_diff_channel diff[STM32_ADC_CH_MAX];
 	struct property *prop;
 	struct property *prop;
 	const __be32 *cur;
 	const __be32 *cur;
 	struct iio_chan_spec *channels;
 	struct iio_chan_spec *channels;
-	int scan_index = 0, num_channels, ret;
+	int scan_index = 0, num_channels = 0, num_diff = 0, ret, i;
 	u32 val, smp = 0;
 	u32 val, smp = 0;
 
 
-	num_channels = of_property_count_u32_elems(node, "st,adc-channels");
-	if (num_channels < 0 ||
-	    num_channels > adc_info->max_channels) {
+	ret = of_property_count_u32_elems(node, "st,adc-channels");
+	if (ret > adc_info->max_channels) {
 		dev_err(&indio_dev->dev, "Bad st,adc-channels?\n");
 		dev_err(&indio_dev->dev, "Bad st,adc-channels?\n");
-		return num_channels < 0 ? num_channels : -EINVAL;
+		return -EINVAL;
+	} else if (ret > 0) {
+		num_channels += ret;
+	}
+
+	ret = of_property_count_elems_of_size(node, "st,adc-diff-channels",
+					      sizeof(*diff));
+	if (ret > adc_info->max_channels) {
+		dev_err(&indio_dev->dev, "Bad st,adc-diff-channels?\n");
+		return -EINVAL;
+	} else if (ret > 0) {
+		int size = ret * sizeof(*diff) / sizeof(u32);
+
+		num_diff = ret;
+		num_channels += ret;
+		ret = of_property_read_u32_array(node, "st,adc-diff-channels",
+						 (u32 *)diff, size);
+		if (ret)
+			return ret;
+	}
+
+	if (!num_channels) {
+		dev_err(&indio_dev->dev, "No channels configured\n");
+		return -ENODATA;
 	}
 	}
 
 
 	/* Optional sample time is provided either for each, or all channels */
 	/* Optional sample time is provided either for each, or all channels */
@@ -1689,6 +1698,33 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
 			return -EINVAL;
 			return -EINVAL;
 		}
 		}
 
 
+		/* Channel can't be configured both as single-ended & diff */
+		for (i = 0; i < num_diff; i++) {
+			if (val == diff[i].vinp) {
+				dev_err(&indio_dev->dev,
+					"channel %d miss-configured\n",	val);
+				return -EINVAL;
+			}
+		}
+		stm32_adc_chan_init_one(indio_dev, &channels[scan_index], val,
+					0, scan_index, false);
+		scan_index++;
+	}
+
+	for (i = 0; i < num_diff; i++) {
+		if (diff[i].vinp >= adc_info->max_channels ||
+		    diff[i].vinn >= adc_info->max_channels) {
+			dev_err(&indio_dev->dev, "Invalid channel in%d-in%d\n",
+				diff[i].vinp, diff[i].vinn);
+			return -EINVAL;
+		}
+		stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
+					diff[i].vinp, diff[i].vinn, scan_index,
+					true);
+		scan_index++;
+	}
+
+	for (i = 0; i < scan_index; i++) {
 		/*
 		/*
 		 * Using of_property_read_u32_index(), smp value will only be
 		 * Using of_property_read_u32_index(), smp value will only be
 		 * modified if valid u32 value can be decoded. This allows to
 		 * modified if valid u32 value can be decoded. This allows to
@@ -1696,12 +1732,9 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
 		 * value per channel.
 		 * value per channel.
 		 */
 		 */
 		of_property_read_u32_index(node, "st,min-sample-time-nsecs",
 		of_property_read_u32_index(node, "st,min-sample-time-nsecs",
-					   scan_index, &smp);
-
-		stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
-					&adc_info->channels[val],
-					scan_index, smp);
-		scan_index++;
+					   i, &smp);
+		/* Prepare sampling time settings */
+		stm32_adc_smpr_init(adc, channels[i].channel, smp);
 	}
 	}
 
 
 	indio_dev->num_channels = scan_index;
 	indio_dev->num_channels = scan_index;

+ 1 - 1
drivers/iio/adc/ti_am335x_adc.c

@@ -523,7 +523,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
 	}
 	}
 	am335x_tsc_se_adc_done(adc_dev->mfd_tscadc);
 	am335x_tsc_se_adc_done(adc_dev->mfd_tscadc);
 
 
-	if (found == false)
+	if (!found)
 		ret =  -EBUSY;
 		ret =  -EBUSY;
 
 
 err_unlock:
 err_unlock:

+ 3 - 10
drivers/iio/chemical/ccs811.c

@@ -96,7 +96,6 @@ static const struct iio_chan_spec ccs811_channels[] = {
 		.channel2 = IIO_MOD_CO2,
 		.channel2 = IIO_MOD_CO2,
 		.modified = 1,
 		.modified = 1,
 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
-				      BIT(IIO_CHAN_INFO_OFFSET) |
 				      BIT(IIO_CHAN_INFO_SCALE),
 				      BIT(IIO_CHAN_INFO_SCALE),
 		.scan_index = 0,
 		.scan_index = 0,
 		.scan_type = {
 		.scan_type = {
@@ -255,24 +254,18 @@ static int ccs811_read_raw(struct iio_dev *indio_dev,
 			switch (chan->channel2) {
 			switch (chan->channel2) {
 			case IIO_MOD_CO2:
 			case IIO_MOD_CO2:
 				*val = 0;
 				*val = 0;
-				*val2 = 12834;
+				*val2 = 100;
 				return IIO_VAL_INT_PLUS_MICRO;
 				return IIO_VAL_INT_PLUS_MICRO;
 			case IIO_MOD_VOC:
 			case IIO_MOD_VOC:
 				*val = 0;
 				*val = 0;
-				*val2 = 84246;
-				return IIO_VAL_INT_PLUS_MICRO;
+				*val2 = 100;
+				return IIO_VAL_INT_PLUS_NANO;
 			default:
 			default:
 				return -EINVAL;
 				return -EINVAL;
 			}
 			}
 		default:
 		default:
 			return -EINVAL;
 			return -EINVAL;
 		}
 		}
-	case IIO_CHAN_INFO_OFFSET:
-		if (!(chan->type == IIO_CONCENTRATION &&
-		      chan->channel2 == IIO_MOD_CO2))
-			return -EINVAL;
-		*val = -400;
-		return IIO_VAL_INT;
 	default:
 	default:
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}

+ 0 - 2
drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c

@@ -191,7 +191,6 @@ static int cros_ec_sensors_probe(struct platform_device *pdev)
 {
 {
 	struct device *dev = &pdev->dev;
 	struct device *dev = &pdev->dev;
 	struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
 	struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
-	struct cros_ec_device *ec_device;
 	struct iio_dev *indio_dev;
 	struct iio_dev *indio_dev;
 	struct cros_ec_sensors_state *state;
 	struct cros_ec_sensors_state *state;
 	struct iio_chan_spec *channel;
 	struct iio_chan_spec *channel;
@@ -201,7 +200,6 @@ static int cros_ec_sensors_probe(struct platform_device *pdev)
 		dev_warn(&pdev->dev, "No CROS EC device found.\n");
 		dev_warn(&pdev->dev, "No CROS EC device found.\n");
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
-	ec_device = ec_dev->ec_dev;
 
 
 	indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*state));
 	indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*state));
 	if (!indio_dev)
 	if (!indio_dev)

+ 1 - 1
drivers/iio/common/ssp_sensors/ssp.h

@@ -188,7 +188,7 @@ struct ssp_sensorhub_info {
  */
  */
 struct ssp_data {
 struct ssp_data {
 	struct spi_device *spi;
 	struct spi_device *spi;
-	struct ssp_sensorhub_info *sensorhub_info;
+	const struct ssp_sensorhub_info *sensorhub_info;
 	struct timer_list wdt_timer;
 	struct timer_list wdt_timer;
 	struct work_struct work_wdt;
 	struct work_struct work_wdt;
 	struct delayed_work work_refresh;
 	struct delayed_work work_refresh;

+ 1 - 1
drivers/iio/common/ssp_sensors/ssp_dev.c

@@ -486,7 +486,7 @@ static struct ssp_data *ssp_parse_dt(struct device *dev)
 	if (!match)
 	if (!match)
 		goto err_mcu_reset_gpio;
 		goto err_mcu_reset_gpio;
 
 
-	data->sensorhub_info = (struct ssp_sensorhub_info *)match->data;
+	data->sensorhub_info = match->data;
 
 
 	dev_set_drvdata(dev, data);
 	dev_set_drvdata(dev, data);
 
 

+ 1 - 4
drivers/iio/common/ssp_sensors/ssp_spi.c

@@ -277,12 +277,9 @@ static int ssp_handle_big_data(struct ssp_data *data, char *dataframe, int *idx)
 static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len)
 static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len)
 {
 {
 	int idx, sd;
 	int idx, sd;
-	struct timespec ts;
 	struct ssp_sensor_data *spd;
 	struct ssp_sensor_data *spd;
 	struct iio_dev **indio_devs = data->sensor_devs;
 	struct iio_dev **indio_devs = data->sensor_devs;
 
 
-	getnstimeofday(&ts);
-
 	for (idx = 0; idx < len;) {
 	for (idx = 0; idx < len;) {
 		switch (dataframe[idx++]) {
 		switch (dataframe[idx++]) {
 		case SSP_MSG2AP_INST_BYPASS_DATA:
 		case SSP_MSG2AP_INST_BYPASS_DATA:
@@ -329,7 +326,7 @@ static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len)
 	}
 	}
 
 
 	if (data->time_syncing)
 	if (data->time_syncing)
-		data->timestamp = ts.tv_sec * 1000000000ULL + ts.tv_nsec;
+		data->timestamp = ktime_get_real_ns();
 
 
 	return 0;
 	return 0;
 }
 }

+ 1 - 1
drivers/iio/counter/stm32-lptimer-cnt.c

@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * STM32 Low-Power Timer Encoder and Counter driver
  * STM32 Low-Power Timer Encoder and Counter driver
  *
  *
@@ -7,7 +8,6 @@
  *
  *
  * Inspired by 104-quad-8 and stm32-timer-trigger drivers.
  * Inspired by 104-quad-8 and stm32-timer-trigger drivers.
  *
  *
- * License terms:  GNU General Public License (GPL), version 2
  */
  */
 
 
 #include <linux/bitfield.h>
 #include <linux/bitfield.h>

+ 1 - 1
drivers/iio/dac/mcp4725.c

@@ -476,7 +476,7 @@ static int mcp4725_probe(struct i2c_client *client,
 		goto err_disable_vref_reg;
 		goto err_disable_vref_reg;
 	}
 	}
 	pd = (inbuf[0] >> 1) & 0x3;
 	pd = (inbuf[0] >> 1) & 0x3;
-	data->powerdown = pd > 0 ? true : false;
+	data->powerdown = pd > 0;
 	data->powerdown_mode = pd ? pd - 1 : 2; /* largest resistor to gnd */
 	data->powerdown_mode = pd ? pd - 1 : 2; /* largest resistor to gnd */
 	data->dac_value = (inbuf[1] << 4) | (inbuf[2] >> 4);
 	data->dac_value = (inbuf[1] << 4) | (inbuf[2] >> 4);
 	if (data->id == MCP4726)
 	if (data->id == MCP4726)

+ 1 - 13
drivers/iio/dac/stm32-dac-core.c

@@ -1,22 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * This file is part of STM32 DAC driver
  * This file is part of STM32 DAC driver
  *
  *
  * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
  * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
  * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
  * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
  *
  *
- * License type: GPLv2
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
  */
  */
 
 
 #include <linux/clk.h>
 #include <linux/clk.h>

+ 1 - 14
drivers/iio/dac/stm32-dac-core.h

@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
 /*
  * This file is part of STM32 DAC driver
  * This file is part of STM32 DAC driver
  *
  *
  * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
  * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
  * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
  * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
- *
- * License type: GPLv2
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
  */
  */
 
 
 #ifndef __STM32_DAC_CORE_H
 #ifndef __STM32_DAC_CORE_H

+ 1 - 14
drivers/iio/dac/stm32-dac.c

@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * This file is part of STM32 DAC driver
  * This file is part of STM32 DAC driver
  *
  *
  * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
  * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
  * Authors: Amelie Delaunay <amelie.delaunay@st.com>
  * Authors: Amelie Delaunay <amelie.delaunay@st.com>
  *	    Fabrice Gasnier <fabrice.gasnier@st.com>
  *	    Fabrice Gasnier <fabrice.gasnier@st.com>
- *
- * License type: GPLv2
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
  */
  */
 
 
 #include <linux/bitfield.h>
 #include <linux/bitfield.h>

+ 1 - 1
drivers/iio/dummy/iio_dummy_evgen.c

@@ -56,7 +56,7 @@ static int iio_dummy_evgen_create(void)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
 	ret = irq_sim_init(&iio_evgen->irq_sim, IIO_EVENTGEN_NO);
 	ret = irq_sim_init(&iio_evgen->irq_sim, IIO_EVENTGEN_NO);
-	if (ret) {
+	if (ret < 0) {
 		kfree(iio_evgen);
 		kfree(iio_evgen);
 		return ret;
 		return ret;
 	}
 	}

+ 9 - 6
drivers/iio/gyro/adis16136.c

@@ -124,7 +124,7 @@ static int adis16136_show_product_id(void *arg, u64 *val)
 
 
 	return 0;
 	return 0;
 }
 }
-DEFINE_SIMPLE_ATTRIBUTE(adis16136_product_id_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(adis16136_product_id_fops,
 	adis16136_show_product_id, NULL, "%llu\n");
 	adis16136_show_product_id, NULL, "%llu\n");
 
 
 static int adis16136_show_flash_count(void *arg, u64 *val)
 static int adis16136_show_flash_count(void *arg, u64 *val)
@@ -142,18 +142,21 @@ static int adis16136_show_flash_count(void *arg, u64 *val)
 
 
 	return 0;
 	return 0;
 }
 }
-DEFINE_SIMPLE_ATTRIBUTE(adis16136_flash_count_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(adis16136_flash_count_fops,
 	adis16136_show_flash_count, NULL, "%lld\n");
 	adis16136_show_flash_count, NULL, "%lld\n");
 
 
 static int adis16136_debugfs_init(struct iio_dev *indio_dev)
 static int adis16136_debugfs_init(struct iio_dev *indio_dev)
 {
 {
 	struct adis16136 *adis16136 = iio_priv(indio_dev);
 	struct adis16136 *adis16136 = iio_priv(indio_dev);
 
 
-	debugfs_create_file("serial_number", 0400, indio_dev->debugfs_dentry,
-		adis16136, &adis16136_serial_fops);
-	debugfs_create_file("product_id", 0400, indio_dev->debugfs_dentry,
+	debugfs_create_file_unsafe("serial_number", 0400,
+		indio_dev->debugfs_dentry, adis16136,
+		&adis16136_serial_fops);
+	debugfs_create_file_unsafe("product_id", 0400,
+		indio_dev->debugfs_dentry,
 		adis16136, &adis16136_product_id_fops);
 		adis16136, &adis16136_product_id_fops);
-	debugfs_create_file("flash_count", 0400, indio_dev->debugfs_dentry,
+	debugfs_create_file_unsafe("flash_count", 0400,
+		indio_dev->debugfs_dentry,
 		adis16136, &adis16136_flash_count_fops);
 		adis16136, &adis16136_flash_count_fops);
 
 
 	return 0;
 	return 0;

+ 0 - 1
drivers/iio/gyro/bmg160_core.c

@@ -27,7 +27,6 @@
 #include <linux/iio/trigger_consumer.h>
 #include <linux/iio/trigger_consumer.h>
 #include <linux/iio/triggered_buffer.h>
 #include <linux/iio/triggered_buffer.h>
 #include <linux/regmap.h>
 #include <linux/regmap.h>
-#include <linux/delay.h>
 #include "bmg160.h"
 #include "bmg160.h"
 
 
 #define BMG160_IRQ_NAME		"bmg160_event"
 #define BMG160_IRQ_NAME		"bmg160_event"

+ 234 - 74
drivers/iio/health/max30102.c

@@ -3,6 +3,9 @@
  *
  *
  * Copyright (C) 2017 Matt Ranostay <matt@ranostay.consulting>
  * Copyright (C) 2017 Matt Ranostay <matt@ranostay.consulting>
  *
  *
+ * Support for MAX30105 optical particle sensor
+ * Copyright (C) 2017 Peter Meerwald-Stadler <pmeerw@pmeerw.net>
+ *
  * This program is free software; you can redistribute it and/or modify
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * the Free Software Foundation; either version 2 of the License, or
@@ -13,6 +16,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  * GNU General Public License for more details.
  * GNU General Public License for more details.
  *
  *
+ * 7-bit I2C chip address: 0x57
  * TODO: proximity power saving feature
  * TODO: proximity power saving feature
  */
  */
 
 
@@ -32,6 +36,18 @@
 
 
 #define MAX30102_REGMAP_NAME	"max30102_regmap"
 #define MAX30102_REGMAP_NAME	"max30102_regmap"
 #define MAX30102_DRV_NAME	"max30102"
 #define MAX30102_DRV_NAME	"max30102"
+#define MAX30102_PART_NUMBER	0x15
+
+enum max30102_chip_id {
+	max30102,
+	max30105,
+};
+
+enum max3012_led_idx {
+	MAX30102_LED_RED,
+	MAX30102_LED_IR,
+	MAX30105_LED_GREEN,
+};
 
 
 #define MAX30102_REG_INT_STATUS			0x00
 #define MAX30102_REG_INT_STATUS			0x00
 #define MAX30102_REG_INT_STATUS_PWR_RDY		BIT(0)
 #define MAX30102_REG_INT_STATUS_PWR_RDY		BIT(0)
@@ -52,7 +68,7 @@
 #define MAX30102_REG_FIFO_OVR_CTR		0x05
 #define MAX30102_REG_FIFO_OVR_CTR		0x05
 #define MAX30102_REG_FIFO_RD_PTR		0x06
 #define MAX30102_REG_FIFO_RD_PTR		0x06
 #define MAX30102_REG_FIFO_DATA			0x07
 #define MAX30102_REG_FIFO_DATA			0x07
-#define MAX30102_REG_FIFO_DATA_ENTRY_LEN	6
+#define MAX30102_REG_FIFO_DATA_BYTES		3
 
 
 #define MAX30102_REG_FIFO_CONFIG		0x08
 #define MAX30102_REG_FIFO_CONFIG		0x08
 #define MAX30102_REG_FIFO_CONFIG_AVG_4SAMPLES	BIT(1)
 #define MAX30102_REG_FIFO_CONFIG_AVG_4SAMPLES	BIT(1)
@@ -60,11 +76,18 @@
 #define MAX30102_REG_FIFO_CONFIG_AFULL		BIT(0)
 #define MAX30102_REG_FIFO_CONFIG_AFULL		BIT(0)
 
 
 #define MAX30102_REG_MODE_CONFIG		0x09
 #define MAX30102_REG_MODE_CONFIG		0x09
-#define MAX30102_REG_MODE_CONFIG_MODE_SPO2_EN	BIT(0)
-#define MAX30102_REG_MODE_CONFIG_MODE_HR_EN	BIT(1)
-#define MAX30102_REG_MODE_CONFIG_MODE_MASK	0x03
+#define MAX30102_REG_MODE_CONFIG_MODE_NONE	0x00
+#define MAX30102_REG_MODE_CONFIG_MODE_HR	0x02 /* red LED */
+#define MAX30102_REG_MODE_CONFIG_MODE_HR_SPO2	0x03 /* red + IR LED */
+#define MAX30102_REG_MODE_CONFIG_MODE_MULTI	0x07 /* multi-LED mode */
+#define MAX30102_REG_MODE_CONFIG_MODE_MASK	GENMASK(2, 0)
 #define MAX30102_REG_MODE_CONFIG_PWR		BIT(7)
 #define MAX30102_REG_MODE_CONFIG_PWR		BIT(7)
 
 
+#define MAX30102_REG_MODE_CONTROL_SLOT21	0x11 /* multi-LED control */
+#define MAX30102_REG_MODE_CONTROL_SLOT43	0x12
+#define MAX30102_REG_MODE_CONTROL_SLOT_MASK	(GENMASK(6, 4) | GENMASK(2, 0))
+#define MAX30102_REG_MODE_CONTROL_SLOT_SHIFT	4
+
 #define MAX30102_REG_SPO2_CONFIG		0x0a
 #define MAX30102_REG_SPO2_CONFIG		0x0a
 #define MAX30102_REG_SPO2_CONFIG_PULSE_411_US	0x03
 #define MAX30102_REG_SPO2_CONFIG_PULSE_411_US	0x03
 #define MAX30102_REG_SPO2_CONFIG_SR_400HZ	0x03
 #define MAX30102_REG_SPO2_CONFIG_SR_400HZ	0x03
@@ -75,6 +98,7 @@
 
 
 #define MAX30102_REG_RED_LED_CONFIG		0x0c
 #define MAX30102_REG_RED_LED_CONFIG		0x0c
 #define MAX30102_REG_IR_LED_CONFIG		0x0d
 #define MAX30102_REG_IR_LED_CONFIG		0x0d
+#define MAX30105_REG_GREEN_LED_CONFIG		0x0e
 
 
 #define MAX30102_REG_TEMP_CONFIG		0x21
 #define MAX30102_REG_TEMP_CONFIG		0x21
 #define MAX30102_REG_TEMP_CONFIG_TEMP_EN	BIT(0)
 #define MAX30102_REG_TEMP_CONFIG_TEMP_EN	BIT(0)
@@ -82,14 +106,18 @@
 #define MAX30102_REG_TEMP_INTEGER		0x1f
 #define MAX30102_REG_TEMP_INTEGER		0x1f
 #define MAX30102_REG_TEMP_FRACTION		0x20
 #define MAX30102_REG_TEMP_FRACTION		0x20
 
 
+#define MAX30102_REG_REV_ID			0xfe
+#define MAX30102_REG_PART_ID			0xff
+
 struct max30102_data {
 struct max30102_data {
 	struct i2c_client *client;
 	struct i2c_client *client;
 	struct iio_dev *indio_dev;
 	struct iio_dev *indio_dev;
 	struct mutex lock;
 	struct mutex lock;
 	struct regmap *regmap;
 	struct regmap *regmap;
+	enum max30102_chip_id chip_id;
 
 
-	u8 buffer[8];
-	__be32 processed_buffer[2]; /* 2 x 18-bit (padded to 32-bits) */
+	u8 buffer[12];
+	__be32 processed_buffer[3]; /* 3 x 18-bit (padded to 32-bits) */
 };
 };
 
 
 static const struct regmap_config max30102_regmap_config = {
 static const struct regmap_config max30102_regmap_config = {
@@ -99,37 +127,47 @@ static const struct regmap_config max30102_regmap_config = {
 	.val_bits = 8,
 	.val_bits = 8,
 };
 };
 
 
-static const unsigned long max30102_scan_masks[] = {0x3, 0};
+static const unsigned long max30102_scan_masks[] = {
+	BIT(MAX30102_LED_RED) | BIT(MAX30102_LED_IR),
+	0
+};
+
+static const unsigned long max30105_scan_masks[] = {
+	BIT(MAX30102_LED_RED) | BIT(MAX30102_LED_IR),
+	BIT(MAX30102_LED_RED) | BIT(MAX30102_LED_IR) |
+		BIT(MAX30105_LED_GREEN),
+	0
+};
+
+#define MAX30102_INTENSITY_CHANNEL(_si, _mod) { \
+		.type = IIO_INTENSITY, \
+		.channel2 = _mod, \
+		.modified = 1, \
+		.scan_index = _si, \
+		.scan_type = { \
+			.sign = 'u', \
+			.shift = 8, \
+			.realbits = 18, \
+			.storagebits = 32, \
+			.endianness = IIO_BE, \
+		}, \
+	}
 
 
 static const struct iio_chan_spec max30102_channels[] = {
 static const struct iio_chan_spec max30102_channels[] = {
+	MAX30102_INTENSITY_CHANNEL(MAX30102_LED_RED, IIO_MOD_LIGHT_RED),
+	MAX30102_INTENSITY_CHANNEL(MAX30102_LED_IR, IIO_MOD_LIGHT_IR),
 	{
 	{
-		.type = IIO_INTENSITY,
-		.channel2 = IIO_MOD_LIGHT_RED,
-		.modified = 1,
-
-		.scan_index = 0,
-		.scan_type = {
-			.sign = 'u',
-			.shift = 8,
-			.realbits = 18,
-			.storagebits = 32,
-			.endianness = IIO_BE,
-		},
-	},
-	{
-		.type = IIO_INTENSITY,
-		.channel2 = IIO_MOD_LIGHT_IR,
-		.modified = 1,
-
-		.scan_index = 1,
-		.scan_type = {
-			.sign = 'u',
-			.shift = 8,
-			.realbits = 18,
-			.storagebits = 32,
-			.endianness = IIO_BE,
-		},
+		.type = IIO_TEMP,
+		.info_mask_separate =
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+		.scan_index = -1,
 	},
 	},
+};
+
+static const struct iio_chan_spec max30105_channels[] = {
+	MAX30102_INTENSITY_CHANNEL(MAX30102_LED_RED, IIO_MOD_LIGHT_RED),
+	MAX30102_INTENSITY_CHANNEL(MAX30102_LED_IR, IIO_MOD_LIGHT_IR),
+	MAX30102_INTENSITY_CHANNEL(MAX30105_LED_GREEN, IIO_MOD_LIGHT_GREEN),
 	{
 	{
 		.type = IIO_TEMP,
 		.type = IIO_TEMP,
 		.info_mask_separate =
 		.info_mask_separate =
@@ -138,25 +176,69 @@ static const struct iio_chan_spec max30102_channels[] = {
 	},
 	},
 };
 };
 
 
-static int max30102_set_powermode(struct max30102_data *data, bool state)
+static int max30102_set_power(struct max30102_data *data, bool en)
 {
 {
 	return regmap_update_bits(data->regmap, MAX30102_REG_MODE_CONFIG,
 	return regmap_update_bits(data->regmap, MAX30102_REG_MODE_CONFIG,
 				  MAX30102_REG_MODE_CONFIG_PWR,
 				  MAX30102_REG_MODE_CONFIG_PWR,
-				  state ? 0 : MAX30102_REG_MODE_CONFIG_PWR);
+				  en ? 0 : MAX30102_REG_MODE_CONFIG_PWR);
 }
 }
 
 
+static int max30102_set_powermode(struct max30102_data *data, u8 mode, bool en)
+{
+	u8 reg = mode;
+
+	if (!en)
+		reg |= MAX30102_REG_MODE_CONFIG_PWR;
+
+	return regmap_update_bits(data->regmap, MAX30102_REG_MODE_CONFIG,
+				  MAX30102_REG_MODE_CONFIG_PWR |
+				  MAX30102_REG_MODE_CONFIG_MODE_MASK, reg);
+}
+
+#define MAX30102_MODE_CONTROL_LED_SLOTS(slot2, slot1) \
+	((slot2 << MAX30102_REG_MODE_CONTROL_SLOT_SHIFT) | slot1)
+
 static int max30102_buffer_postenable(struct iio_dev *indio_dev)
 static int max30102_buffer_postenable(struct iio_dev *indio_dev)
 {
 {
 	struct max30102_data *data = iio_priv(indio_dev);
 	struct max30102_data *data = iio_priv(indio_dev);
+	int ret;
+	u8 reg;
 
 
-	return max30102_set_powermode(data, true);
+	switch (*indio_dev->active_scan_mask) {
+	case BIT(MAX30102_LED_RED) | BIT(MAX30102_LED_IR):
+		reg = MAX30102_REG_MODE_CONFIG_MODE_HR_SPO2;
+		break;
+	case BIT(MAX30102_LED_RED) | BIT(MAX30102_LED_IR) |
+	     BIT(MAX30105_LED_GREEN):
+		ret = regmap_update_bits(data->regmap,
+					 MAX30102_REG_MODE_CONTROL_SLOT21,
+					 MAX30102_REG_MODE_CONTROL_SLOT_MASK,
+					 MAX30102_MODE_CONTROL_LED_SLOTS(2, 1));
+		if (ret)
+			return ret;
+
+		ret = regmap_update_bits(data->regmap,
+					 MAX30102_REG_MODE_CONTROL_SLOT43,
+					 MAX30102_REG_MODE_CONTROL_SLOT_MASK,
+					 MAX30102_MODE_CONTROL_LED_SLOTS(0, 3));
+		if (ret)
+			return ret;
+
+		reg = MAX30102_REG_MODE_CONFIG_MODE_MULTI;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return max30102_set_powermode(data, reg, true);
 }
 }
 
 
 static int max30102_buffer_predisable(struct iio_dev *indio_dev)
 static int max30102_buffer_predisable(struct iio_dev *indio_dev)
 {
 {
 	struct max30102_data *data = iio_priv(indio_dev);
 	struct max30102_data *data = iio_priv(indio_dev);
 
 
-	return max30102_set_powermode(data, false);
+	return max30102_set_powermode(data, MAX30102_REG_MODE_CONFIG_MODE_NONE,
+				      false);
 }
 }
 
 
 static const struct iio_buffer_setup_ops max30102_buffer_setup_ops = {
 static const struct iio_buffer_setup_ops max30102_buffer_setup_ops = {
@@ -180,32 +262,51 @@ static inline int max30102_fifo_count(struct max30102_data *data)
 	return 0;
 	return 0;
 }
 }
 
 
-static int max30102_read_measurement(struct max30102_data *data)
+#define MAX30102_COPY_DATA(i) \
+	memcpy(&data->processed_buffer[(i)], \
+	       &buffer[(i) * MAX30102_REG_FIFO_DATA_BYTES], \
+	       MAX30102_REG_FIFO_DATA_BYTES)
+
+static int max30102_read_measurement(struct max30102_data *data,
+				     unsigned int measurements)
 {
 {
 	int ret;
 	int ret;
 	u8 *buffer = (u8 *) &data->buffer;
 	u8 *buffer = (u8 *) &data->buffer;
 
 
 	ret = i2c_smbus_read_i2c_block_data(data->client,
 	ret = i2c_smbus_read_i2c_block_data(data->client,
 					    MAX30102_REG_FIFO_DATA,
 					    MAX30102_REG_FIFO_DATA,
-					    MAX30102_REG_FIFO_DATA_ENTRY_LEN,
+					    measurements *
+					    MAX30102_REG_FIFO_DATA_BYTES,
 					    buffer);
 					    buffer);
 
 
-	memcpy(&data->processed_buffer[0], &buffer[0], 3);
-	memcpy(&data->processed_buffer[1], &buffer[3], 3);
+	switch (measurements) {
+	case 3:
+		MAX30102_COPY_DATA(2);
+	case 2: /* fall-through */
+		MAX30102_COPY_DATA(1);
+	case 1: /* fall-through */
+		MAX30102_COPY_DATA(0);
+		break;
+	default:
+		return -EINVAL;
+	}
 
 
-	return (ret == MAX30102_REG_FIFO_DATA_ENTRY_LEN) ? 0 : -EINVAL;
+	return (ret == measurements * MAX30102_REG_FIFO_DATA_BYTES) ?
+	       0 : -EINVAL;
 }
 }
 
 
 static irqreturn_t max30102_interrupt_handler(int irq, void *private)
 static irqreturn_t max30102_interrupt_handler(int irq, void *private)
 {
 {
 	struct iio_dev *indio_dev = private;
 	struct iio_dev *indio_dev = private;
 	struct max30102_data *data = iio_priv(indio_dev);
 	struct max30102_data *data = iio_priv(indio_dev);
+	unsigned int measurements = bitmap_weight(indio_dev->active_scan_mask,
+						  indio_dev->masklength);
 	int ret, cnt = 0;
 	int ret, cnt = 0;
 
 
 	mutex_lock(&data->lock);
 	mutex_lock(&data->lock);
 
 
 	while (cnt || (cnt = max30102_fifo_count(data)) > 0) {
 	while (cnt || (cnt = max30102_fifo_count(data)) > 0) {
-		ret = max30102_read_measurement(data);
+		ret = max30102_read_measurement(data, measurements);
 		if (ret)
 		if (ret)
 			break;
 			break;
 
 
@@ -251,6 +352,29 @@ static int max30102_led_init(struct max30102_data *data)
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
+	if (data->chip_id == max30105) {
+		ret = of_property_read_u32(np,
+			"maxim,green-led-current-microamp", &val);
+		if (ret) {
+			dev_info(dev, "no green-led-current-microamp set\n");
+
+			/* Default to 7 mA green LED */
+			val = 7000;
+		}
+
+		ret = max30102_get_current_idx(val, &reg);
+		if (ret) {
+			dev_err(dev, "invalid green LED current setting %d\n",
+				val);
+			return ret;
+		}
+
+		ret = regmap_write(data->regmap, MAX30105_REG_GREEN_LED_CONFIG,
+				   reg);
+		if (ret)
+			return ret;
+	}
+
 	ret = of_property_read_u32(np, "maxim,ir-led-current-microamp", &val);
 	ret = of_property_read_u32(np, "maxim,ir-led-current-microamp", &val);
 	if (ret) {
 	if (ret) {
 		dev_info(dev, "no ir-led-current-microamp set\n");
 		dev_info(dev, "no ir-led-current-microamp set\n");
@@ -261,7 +385,7 @@ static int max30102_led_init(struct max30102_data *data)
 
 
 	ret = max30102_get_current_idx(val, &reg);
 	ret = max30102_get_current_idx(val, &reg);
 	if (ret) {
 	if (ret) {
-		dev_err(dev, "invalid IR LED current setting %d", val);
+		dev_err(dev, "invalid IR LED current setting %d\n", val);
 		return ret;
 		return ret;
 	}
 	}
 
 
@@ -277,7 +401,7 @@ static int max30102_chip_init(struct max30102_data *data)
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	/* enable 18-bit HR + SPO2 readings at 400Hz */
+	/* configure 18-bit HR + SpO2 readings at 400Hz */
 	ret = regmap_write(data->regmap, MAX30102_REG_SPO2_CONFIG,
 	ret = regmap_write(data->regmap, MAX30102_REG_SPO2_CONFIG,
 				(MAX30102_REG_SPO2_CONFIG_ADC_4096_STEPS
 				(MAX30102_REG_SPO2_CONFIG_ADC_4096_STEPS
 				 << MAX30102_REG_SPO2_CONFIG_ADC_MASK_SHIFT) |
 				 << MAX30102_REG_SPO2_CONFIG_ADC_MASK_SHIFT) |
@@ -287,14 +411,6 @@ static int max30102_chip_init(struct max30102_data *data)
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	/* enable SPO2 mode */
-	ret = regmap_update_bits(data->regmap, MAX30102_REG_MODE_CONFIG,
-				 MAX30102_REG_MODE_CONFIG_MODE_MASK,
-				 MAX30102_REG_MODE_CONFIG_MODE_HR_EN |
-				 MAX30102_REG_MODE_CONFIG_MODE_SPO2_EN);
-	if (ret)
-		return ret;
-
 	/* average 4 samples + generate FIFO interrupt */
 	/* average 4 samples + generate FIFO interrupt */
 	ret = regmap_write(data->regmap, MAX30102_REG_FIFO_CONFIG,
 	ret = regmap_write(data->regmap, MAX30102_REG_FIFO_CONFIG,
 				(MAX30102_REG_FIFO_CONFIG_AVG_4SAMPLES
 				(MAX30102_REG_FIFO_CONFIG_AVG_4SAMPLES
@@ -329,20 +445,31 @@ static int max30102_read_temp(struct max30102_data *data, int *val)
 	return 0;
 	return 0;
 }
 }
 
 
-static int max30102_get_temp(struct max30102_data *data, int *val)
+static int max30102_get_temp(struct max30102_data *data, int *val, bool en)
 {
 {
 	int ret;
 	int ret;
 
 
+	if (en) {
+		ret = max30102_set_power(data, true);
+		if (ret)
+			return ret;
+	}
+
 	/* start acquisition */
 	/* start acquisition */
 	ret = regmap_update_bits(data->regmap, MAX30102_REG_TEMP_CONFIG,
 	ret = regmap_update_bits(data->regmap, MAX30102_REG_TEMP_CONFIG,
 				 MAX30102_REG_TEMP_CONFIG_TEMP_EN,
 				 MAX30102_REG_TEMP_CONFIG_TEMP_EN,
 				 MAX30102_REG_TEMP_CONFIG_TEMP_EN);
 				 MAX30102_REG_TEMP_CONFIG_TEMP_EN);
 	if (ret)
 	if (ret)
-		return ret;
+		goto out;
 
 
 	msleep(35);
 	msleep(35);
+	ret = max30102_read_temp(data, val);
 
 
-	return max30102_read_temp(data, val);
+out:
+	if (en)
+		max30102_set_power(data, false);
+
+	return ret;
 }
 }
 
 
 static int max30102_read_raw(struct iio_dev *indio_dev,
 static int max30102_read_raw(struct iio_dev *indio_dev,
@@ -355,20 +482,19 @@ static int max30102_read_raw(struct iio_dev *indio_dev,
 	switch (mask) {
 	switch (mask) {
 	case IIO_CHAN_INFO_RAW:
 	case IIO_CHAN_INFO_RAW:
 		/*
 		/*
-		 * Temperature reading can only be acquired while engine
-		 * is running
+		 * Temperature reading can only be acquired when not in
+		 * shutdown; leave shutdown briefly when buffer not running
 		 */
 		 */
 		mutex_lock(&indio_dev->mlock);
 		mutex_lock(&indio_dev->mlock);
-
 		if (!iio_buffer_enabled(indio_dev))
 		if (!iio_buffer_enabled(indio_dev))
-			ret = -EBUSY;
-		else {
-			ret = max30102_get_temp(data, val);
-			if (!ret)
-				ret = IIO_VAL_INT;
-		}
-
+			ret = max30102_get_temp(data, val, true);
+		else
+			ret = max30102_get_temp(data, val, false);
 		mutex_unlock(&indio_dev->mlock);
 		mutex_unlock(&indio_dev->mlock);
+		if (ret)
+			return ret;
+
+		ret = IIO_VAL_INT;
 		break;
 		break;
 	case IIO_CHAN_INFO_SCALE:
 	case IIO_CHAN_INFO_SCALE:
 		*val = 1000;  /* 62.5 */
 		*val = 1000;  /* 62.5 */
@@ -391,6 +517,7 @@ static int max30102_probe(struct i2c_client *client,
 	struct iio_buffer *buffer;
 	struct iio_buffer *buffer;
 	struct iio_dev *indio_dev;
 	struct iio_dev *indio_dev;
 	int ret;
 	int ret;
+	unsigned int reg;
 
 
 	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
 	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
 	if (!indio_dev)
 	if (!indio_dev)
@@ -403,10 +530,7 @@ static int max30102_probe(struct i2c_client *client,
 	iio_device_attach_buffer(indio_dev, buffer);
 	iio_device_attach_buffer(indio_dev, buffer);
 
 
 	indio_dev->name = MAX30102_DRV_NAME;
 	indio_dev->name = MAX30102_DRV_NAME;
-	indio_dev->channels = max30102_channels;
 	indio_dev->info = &max30102_info;
 	indio_dev->info = &max30102_info;
-	indio_dev->num_channels = ARRAY_SIZE(max30102_channels);
-	indio_dev->available_scan_masks = max30102_scan_masks;
 	indio_dev->modes = (INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE);
 	indio_dev->modes = (INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE);
 	indio_dev->setup_ops = &max30102_buffer_setup_ops;
 	indio_dev->setup_ops = &max30102_buffer_setup_ops;
 	indio_dev->dev.parent = &client->dev;
 	indio_dev->dev.parent = &client->dev;
@@ -414,16 +538,50 @@ static int max30102_probe(struct i2c_client *client,
 	data = iio_priv(indio_dev);
 	data = iio_priv(indio_dev);
 	data->indio_dev = indio_dev;
 	data->indio_dev = indio_dev;
 	data->client = client;
 	data->client = client;
+	data->chip_id = id->driver_data;
 
 
 	mutex_init(&data->lock);
 	mutex_init(&data->lock);
 	i2c_set_clientdata(client, indio_dev);
 	i2c_set_clientdata(client, indio_dev);
 
 
+	switch (data->chip_id) {
+	case max30105:
+		indio_dev->channels = max30105_channels;
+		indio_dev->num_channels = ARRAY_SIZE(max30105_channels);
+		indio_dev->available_scan_masks = max30105_scan_masks;
+		break;
+	case max30102:
+		indio_dev->channels = max30102_channels;
+		indio_dev->num_channels = ARRAY_SIZE(max30102_channels);
+		indio_dev->available_scan_masks = max30102_scan_masks;
+		break;
+	default:
+		return -ENODEV;
+	}
+
 	data->regmap = devm_regmap_init_i2c(client, &max30102_regmap_config);
 	data->regmap = devm_regmap_init_i2c(client, &max30102_regmap_config);
 	if (IS_ERR(data->regmap)) {
 	if (IS_ERR(data->regmap)) {
-		dev_err(&client->dev, "regmap initialization failed.\n");
+		dev_err(&client->dev, "regmap initialization failed\n");
 		return PTR_ERR(data->regmap);
 		return PTR_ERR(data->regmap);
 	}
 	}
-	max30102_set_powermode(data, false);
+
+	/* check part ID */
+	ret = regmap_read(data->regmap, MAX30102_REG_PART_ID, &reg);
+	if (ret)
+		return ret;
+	if (reg != MAX30102_PART_NUMBER)
+		return -ENODEV;
+
+	/* show revision ID */
+	ret = regmap_read(data->regmap, MAX30102_REG_REV_ID, &reg);
+	if (ret)
+		return ret;
+	dev_dbg(&client->dev, "max3010x revision %02x\n", reg);
+
+	/* clear mode setting, chip shutdown */
+	ret = max30102_set_powermode(data, MAX30102_REG_MODE_CONFIG_MODE_NONE,
+				     false);
+	if (ret)
+		return ret;
 
 
 	ret = max30102_chip_init(data);
 	ret = max30102_chip_init(data);
 	if (ret)
 	if (ret)
@@ -452,19 +610,21 @@ static int max30102_remove(struct i2c_client *client)
 	struct max30102_data *data = iio_priv(indio_dev);
 	struct max30102_data *data = iio_priv(indio_dev);
 
 
 	iio_device_unregister(indio_dev);
 	iio_device_unregister(indio_dev);
-	max30102_set_powermode(data, false);
+	max30102_set_power(data, false);
 
 
 	return 0;
 	return 0;
 }
 }
 
 
 static const struct i2c_device_id max30102_id[] = {
 static const struct i2c_device_id max30102_id[] = {
-	{ "max30102", 0 },
+	{ "max30102", max30102 },
+	{ "max30105", max30105 },
 	{}
 	{}
 };
 };
 MODULE_DEVICE_TABLE(i2c, max30102_id);
 MODULE_DEVICE_TABLE(i2c, max30102_id);
 
 
 static const struct of_device_id max30102_dt_ids[] = {
 static const struct of_device_id max30102_dt_ids[] = {
 	{ .compatible = "maxim,max30102" },
 	{ .compatible = "maxim,max30102" },
+	{ .compatible = "maxim,max30105" },
 	{ }
 	{ }
 };
 };
 MODULE_DEVICE_TABLE(of, max30102_dt_ids);
 MODULE_DEVICE_TABLE(of, max30102_dt_ids);
@@ -481,5 +641,5 @@ static struct i2c_driver max30102_driver = {
 module_i2c_driver(max30102_driver);
 module_i2c_driver(max30102_driver);
 
 
 MODULE_AUTHOR("Matt Ranostay <matt@ranostay.consulting>");
 MODULE_AUTHOR("Matt Ranostay <matt@ranostay.consulting>");
-MODULE_DESCRIPTION("MAX30102 heart rate and pulse oximeter sensor");
+MODULE_DESCRIPTION("MAX30102 heart rate/pulse oximeter and MAX30105 particle sensor driver");
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");

+ 2 - 1
drivers/iio/humidity/hts221.h

@@ -61,7 +61,8 @@ struct hts221_hw {
 extern const struct dev_pm_ops hts221_pm_ops;
 extern const struct dev_pm_ops hts221_pm_ops;
 
 
 int hts221_write_with_mask(struct hts221_hw *hw, u8 addr, u8 mask, u8 val);
 int hts221_write_with_mask(struct hts221_hw *hw, u8 addr, u8 mask, u8 val);
-int hts221_probe(struct iio_dev *iio_dev);
+int hts221_probe(struct device *dev, int irq, const char *name,
+		 const struct hts221_transfer_function *tf_ops);
 int hts221_set_enable(struct hts221_hw *hw, bool enable);
 int hts221_set_enable(struct hts221_hw *hw, bool enable);
 int hts221_allocate_buffers(struct hts221_hw *hw);
 int hts221_allocate_buffers(struct hts221_hw *hw);
 int hts221_allocate_trigger(struct hts221_hw *hw);
 int hts221_allocate_trigger(struct hts221_hw *hw);

+ 16 - 2
drivers/iio/humidity/hts221_core.c

@@ -581,12 +581,26 @@ static const struct iio_info hts221_info = {
 
 
 static const unsigned long hts221_scan_masks[] = {0x3, 0x0};
 static const unsigned long hts221_scan_masks[] = {0x3, 0x0};
 
 
-int hts221_probe(struct iio_dev *iio_dev)
+int hts221_probe(struct device *dev, int irq, const char *name,
+		 const struct hts221_transfer_function *tf_ops)
 {
 {
-	struct hts221_hw *hw = iio_priv(iio_dev);
+	struct iio_dev *iio_dev;
+	struct hts221_hw *hw;
 	int err;
 	int err;
 	u8 data;
 	u8 data;
 
 
+	iio_dev = devm_iio_device_alloc(dev, sizeof(*hw));
+	if (!iio_dev)
+		return -ENOMEM;
+
+	dev_set_drvdata(dev, (void *)iio_dev);
+
+	hw = iio_priv(iio_dev);
+	hw->name = name;
+	hw->dev = dev;
+	hw->irq = irq;
+	hw->tf = tf_ops;
+
 	mutex_init(&hw->lock);
 	mutex_init(&hw->lock);
 
 
 	err = hts221_check_whoami(hw);
 	err = hts221_check_whoami(hw);

+ 2 - 16
drivers/iio/humidity/hts221_i2c.c

@@ -66,22 +66,8 @@ static const struct hts221_transfer_function hts221_transfer_fn = {
 static int hts221_i2c_probe(struct i2c_client *client,
 static int hts221_i2c_probe(struct i2c_client *client,
 			    const struct i2c_device_id *id)
 			    const struct i2c_device_id *id)
 {
 {
-	struct hts221_hw *hw;
-	struct iio_dev *iio_dev;
-
-	iio_dev = devm_iio_device_alloc(&client->dev, sizeof(*hw));
-	if (!iio_dev)
-		return -ENOMEM;
-
-	i2c_set_clientdata(client, iio_dev);
-
-	hw = iio_priv(iio_dev);
-	hw->name = client->name;
-	hw->dev = &client->dev;
-	hw->irq = client->irq;
-	hw->tf = &hts221_transfer_fn;
-
-	return hts221_probe(iio_dev);
+	return hts221_probe(&client->dev, client->irq,
+			    client->name, &hts221_transfer_fn);
 }
 }
 
 
 static const struct acpi_device_id hts221_acpi_match[] = {
 static const struct acpi_device_id hts221_acpi_match[] = {

+ 2 - 16
drivers/iio/humidity/hts221_spi.c

@@ -80,22 +80,8 @@ static const struct hts221_transfer_function hts221_transfer_fn = {
 
 
 static int hts221_spi_probe(struct spi_device *spi)
 static int hts221_spi_probe(struct spi_device *spi)
 {
 {
-	struct hts221_hw *hw;
-	struct iio_dev *iio_dev;
-
-	iio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*hw));
-	if (!iio_dev)
-		return -ENOMEM;
-
-	spi_set_drvdata(spi, iio_dev);
-
-	hw = iio_priv(iio_dev);
-	hw->name = spi->modalias;
-	hw->dev = &spi->dev;
-	hw->irq = spi->irq;
-	hw->tf = &hts221_transfer_fn;
-
-	return hts221_probe(iio_dev);
+	return hts221_probe(&spi->dev, spi->irq,
+			    spi->modalias, &hts221_transfer_fn);
 }
 }
 
 
 static const struct of_device_id hts221_spi_of_match[] = {
 static const struct of_device_id hts221_spi_of_match[] = {

+ 16 - 12
drivers/iio/imu/adis16480.c

@@ -194,7 +194,7 @@ static int adis16480_show_serial_number(void *arg, u64 *val)
 
 
 	return 0;
 	return 0;
 }
 }
-DEFINE_SIMPLE_ATTRIBUTE(adis16480_serial_number_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(adis16480_serial_number_fops,
 	adis16480_show_serial_number, NULL, "0x%.4llx\n");
 	adis16480_show_serial_number, NULL, "0x%.4llx\n");
 
 
 static int adis16480_show_product_id(void *arg, u64 *val)
 static int adis16480_show_product_id(void *arg, u64 *val)
@@ -212,7 +212,7 @@ static int adis16480_show_product_id(void *arg, u64 *val)
 
 
 	return 0;
 	return 0;
 }
 }
-DEFINE_SIMPLE_ATTRIBUTE(adis16480_product_id_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(adis16480_product_id_fops,
 	adis16480_show_product_id, NULL, "%llu\n");
 	adis16480_show_product_id, NULL, "%llu\n");
 
 
 static int adis16480_show_flash_count(void *arg, u64 *val)
 static int adis16480_show_flash_count(void *arg, u64 *val)
@@ -230,24 +230,28 @@ static int adis16480_show_flash_count(void *arg, u64 *val)
 
 
 	return 0;
 	return 0;
 }
 }
-DEFINE_SIMPLE_ATTRIBUTE(adis16480_flash_count_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(adis16480_flash_count_fops,
 	adis16480_show_flash_count, NULL, "%lld\n");
 	adis16480_show_flash_count, NULL, "%lld\n");
 
 
 static int adis16480_debugfs_init(struct iio_dev *indio_dev)
 static int adis16480_debugfs_init(struct iio_dev *indio_dev)
 {
 {
 	struct adis16480 *adis16480 = iio_priv(indio_dev);
 	struct adis16480 *adis16480 = iio_priv(indio_dev);
 
 
-	debugfs_create_file("firmware_revision", 0400,
+	debugfs_create_file_unsafe("firmware_revision", 0400,
 		indio_dev->debugfs_dentry, adis16480,
 		indio_dev->debugfs_dentry, adis16480,
 		&adis16480_firmware_revision_fops);
 		&adis16480_firmware_revision_fops);
-	debugfs_create_file("firmware_date", 0400, indio_dev->debugfs_dentry,
-		adis16480, &adis16480_firmware_date_fops);
-	debugfs_create_file("serial_number", 0400, indio_dev->debugfs_dentry,
-		adis16480, &adis16480_serial_number_fops);
-	debugfs_create_file("product_id", 0400, indio_dev->debugfs_dentry,
-		adis16480, &adis16480_product_id_fops);
-	debugfs_create_file("flash_count", 0400, indio_dev->debugfs_dentry,
-		adis16480, &adis16480_flash_count_fops);
+	debugfs_create_file_unsafe("firmware_date", 0400,
+		indio_dev->debugfs_dentry, adis16480,
+		&adis16480_firmware_date_fops);
+	debugfs_create_file_unsafe("serial_number", 0400,
+		indio_dev->debugfs_dentry, adis16480,
+		&adis16480_serial_number_fops);
+	debugfs_create_file_unsafe("product_id", 0400,
+		indio_dev->debugfs_dentry, adis16480,
+		&adis16480_product_id_fops);
+	debugfs_create_file_unsafe("flash_count", 0400,
+		indio_dev->debugfs_dentry, adis16480,
+		&adis16480_flash_count_fops);
 
 
 	return 0;
 	return 0;
 }
 }

+ 1 - 2
drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c

@@ -196,8 +196,7 @@ void inv_mpu_acpi_delete_mux_client(struct i2c_client *client)
 {
 {
 	struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(&client->dev));
 	struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(&client->dev));
 
 
-	if (st->mux_client)
-		i2c_unregister_device(st->mux_client);
+	i2c_unregister_device(st->mux_client);
 }
 }
 #else
 #else
 
 

+ 2 - 0
drivers/iio/imu/st_lsm6dsx/Kconfig

@@ -16,7 +16,9 @@ config IIO_ST_LSM6DSX
 config IIO_ST_LSM6DSX_I2C
 config IIO_ST_LSM6DSX_I2C
 	tristate
 	tristate
 	depends on IIO_ST_LSM6DSX
 	depends on IIO_ST_LSM6DSX
+	select REGMAP_I2C
 
 
 config IIO_ST_LSM6DSX_SPI
 config IIO_ST_LSM6DSX_SPI
 	tristate
 	tristate
 	depends on IIO_ST_LSM6DSX
 	depends on IIO_ST_LSM6DSX
+	select REGMAP_SPI

+ 12 - 27
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h

@@ -27,23 +27,12 @@ enum st_lsm6dsx_hw_id {
 	ST_LSM6DSX_MAX_ID,
 	ST_LSM6DSX_MAX_ID,
 };
 };
 
 
+#define ST_LSM6DSX_BUFF_SIZE		256
 #define ST_LSM6DSX_CHAN_SIZE		2
 #define ST_LSM6DSX_CHAN_SIZE		2
 #define ST_LSM6DSX_SAMPLE_SIZE		6
 #define ST_LSM6DSX_SAMPLE_SIZE		6
-
-#if defined(CONFIG_SPI_MASTER)
-#define ST_LSM6DSX_RX_MAX_LENGTH	256
-#define ST_LSM6DSX_TX_MAX_LENGTH	8
-
-struct st_lsm6dsx_transfer_buffer {
-	u8 rx_buf[ST_LSM6DSX_RX_MAX_LENGTH];
-	u8 tx_buf[ST_LSM6DSX_TX_MAX_LENGTH] ____cacheline_aligned;
-};
-#endif /* CONFIG_SPI_MASTER */
-
-struct st_lsm6dsx_transfer_function {
-	int (*read)(struct device *dev, u8 addr, int len, u8 *data);
-	int (*write)(struct device *dev, u8 addr, int len, u8 *data);
-};
+#define ST_LSM6DSX_MAX_WORD_LEN		((32 / ST_LSM6DSX_SAMPLE_SIZE) * \
+					 ST_LSM6DSX_SAMPLE_SIZE)
+#define ST_LSM6DSX_SHIFT_VAL(val, mask)	(((val) << __ffs(mask)) & (mask))
 
 
 struct st_lsm6dsx_reg {
 struct st_lsm6dsx_reg {
 	u8 addr;
 	u8 addr;
@@ -127,47 +116,43 @@ struct st_lsm6dsx_sensor {
 /**
 /**
  * struct st_lsm6dsx_hw - ST IMU MEMS hw instance
  * struct st_lsm6dsx_hw - ST IMU MEMS hw instance
  * @dev: Pointer to instance of struct device (I2C or SPI).
  * @dev: Pointer to instance of struct device (I2C or SPI).
+ * @regmap: Register map of the device.
  * @irq: Device interrupt line (I2C or SPI).
  * @irq: Device interrupt line (I2C or SPI).
- * @lock: Mutex to protect read and write operations.
  * @fifo_lock: Mutex to prevent concurrent access to the hw FIFO.
  * @fifo_lock: Mutex to prevent concurrent access to the hw FIFO.
+ * @conf_lock: Mutex to prevent concurrent FIFO configuration update.
  * @fifo_mode: FIFO operating mode supported by the device.
  * @fifo_mode: FIFO operating mode supported by the device.
  * @enable_mask: Enabled sensor bitmask.
  * @enable_mask: Enabled sensor bitmask.
  * @sip: Total number of samples (acc/gyro) in a given pattern.
  * @sip: Total number of samples (acc/gyro) in a given pattern.
+ * @buff: Device read buffer.
  * @iio_devs: Pointers to acc/gyro iio_dev instances.
  * @iio_devs: Pointers to acc/gyro iio_dev instances.
  * @settings: Pointer to the specific sensor settings in use.
  * @settings: Pointer to the specific sensor settings in use.
- * @tf: Transfer function structure used by I/O operations.
- * @tb: Transfer buffers used by SPI I/O operations.
  */
  */
 struct st_lsm6dsx_hw {
 struct st_lsm6dsx_hw {
 	struct device *dev;
 	struct device *dev;
+	struct regmap *regmap;
 	int irq;
 	int irq;
 
 
-	struct mutex lock;
 	struct mutex fifo_lock;
 	struct mutex fifo_lock;
+	struct mutex conf_lock;
 
 
 	enum st_lsm6dsx_fifo_mode fifo_mode;
 	enum st_lsm6dsx_fifo_mode fifo_mode;
 	u8 enable_mask;
 	u8 enable_mask;
 	u8 sip;
 	u8 sip;
 
 
+	u8 *buff;
+
 	struct iio_dev *iio_devs[ST_LSM6DSX_ID_MAX];
 	struct iio_dev *iio_devs[ST_LSM6DSX_ID_MAX];
 
 
 	const struct st_lsm6dsx_settings *settings;
 	const struct st_lsm6dsx_settings *settings;
-
-	const struct st_lsm6dsx_transfer_function *tf;
-#if defined(CONFIG_SPI_MASTER)
-	struct st_lsm6dsx_transfer_buffer tb;
-#endif /* CONFIG_SPI_MASTER */
 };
 };
 
 
 extern const struct dev_pm_ops st_lsm6dsx_pm_ops;
 extern const struct dev_pm_ops st_lsm6dsx_pm_ops;
 
 
 int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id, const char *name,
 int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id, const char *name,
-		     const struct st_lsm6dsx_transfer_function *tf_ops);
+		     struct regmap *regmap);
 int st_lsm6dsx_sensor_enable(struct st_lsm6dsx_sensor *sensor);
 int st_lsm6dsx_sensor_enable(struct st_lsm6dsx_sensor *sensor);
 int st_lsm6dsx_sensor_disable(struct st_lsm6dsx_sensor *sensor);
 int st_lsm6dsx_sensor_disable(struct st_lsm6dsx_sensor *sensor);
 int st_lsm6dsx_fifo_setup(struct st_lsm6dsx_hw *hw);
 int st_lsm6dsx_fifo_setup(struct st_lsm6dsx_hw *hw);
-int st_lsm6dsx_write_with_mask(struct st_lsm6dsx_hw *hw, u8 addr, u8 mask,
-			       u8 val);
 int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor,
 int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor,
 				u16 watermark);
 				u16 watermark);
 int st_lsm6dsx_flush_fifo(struct st_lsm6dsx_hw *hw);
 int st_lsm6dsx_flush_fifo(struct st_lsm6dsx_hw *hw);

+ 69 - 38
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c

@@ -30,6 +30,8 @@
 #include <linux/iio/kfifo_buf.h>
 #include <linux/iio/kfifo_buf.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/buffer.h>
 #include <linux/iio/buffer.h>
+#include <linux/regmap.h>
+#include <linux/bitfield.h>
 
 
 #include <linux/platform_data/st_sensors_pdata.h>
 #include <linux/platform_data/st_sensors_pdata.h>
 
 
@@ -120,8 +122,10 @@ static int st_lsm6dsx_update_decimators(struct st_lsm6dsx_hw *hw)
 
 
 		dec_reg = &hw->settings->decimator[sensor->id];
 		dec_reg = &hw->settings->decimator[sensor->id];
 		if (dec_reg->addr) {
 		if (dec_reg->addr) {
-			err = st_lsm6dsx_write_with_mask(hw, dec_reg->addr,
-							 dec_reg->mask, data);
+			int val = ST_LSM6DSX_SHIFT_VAL(data, dec_reg->mask);
+
+			err = regmap_update_bits(hw->regmap, dec_reg->addr,
+						 dec_reg->mask, val);
 			if (err < 0)
 			if (err < 0)
 				return err;
 				return err;
 		}
 		}
@@ -137,8 +141,10 @@ int st_lsm6dsx_set_fifo_mode(struct st_lsm6dsx_hw *hw,
 {
 {
 	int err;
 	int err;
 
 
-	err = st_lsm6dsx_write_with_mask(hw, ST_LSM6DSX_REG_FIFO_MODE_ADDR,
-					 ST_LSM6DSX_FIFO_MODE_MASK, fifo_mode);
+	err = regmap_update_bits(hw->regmap, ST_LSM6DSX_REG_FIFO_MODE_ADDR,
+				 ST_LSM6DSX_FIFO_MODE_MASK,
+				 FIELD_PREP(ST_LSM6DSX_FIFO_MODE_MASK,
+					    fifo_mode));
 	if (err < 0)
 	if (err < 0)
 		return err;
 		return err;
 
 
@@ -154,8 +160,9 @@ static int st_lsm6dsx_set_fifo_odr(struct st_lsm6dsx_sensor *sensor,
 	u8 data;
 	u8 data;
 
 
 	data = hw->enable_mask ? ST_LSM6DSX_MAX_FIFO_ODR_VAL : 0;
 	data = hw->enable_mask ? ST_LSM6DSX_MAX_FIFO_ODR_VAL : 0;
-	return st_lsm6dsx_write_with_mask(hw, ST_LSM6DSX_REG_FIFO_MODE_ADDR,
-					  ST_LSM6DSX_FIFO_ODR_MASK, data);
+	return regmap_update_bits(hw->regmap, ST_LSM6DSX_REG_FIFO_MODE_ADDR,
+				 ST_LSM6DSX_FIFO_ODR_MASK,
+				 FIELD_PREP(ST_LSM6DSX_FIFO_ODR_MASK, data));
 }
 }
 
 
 int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
 int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
@@ -163,9 +170,8 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
 	u16 fifo_watermark = ~0, cur_watermark, sip = 0, fifo_th_mask;
 	u16 fifo_watermark = ~0, cur_watermark, sip = 0, fifo_th_mask;
 	struct st_lsm6dsx_hw *hw = sensor->hw;
 	struct st_lsm6dsx_hw *hw = sensor->hw;
 	struct st_lsm6dsx_sensor *cur_sensor;
 	struct st_lsm6dsx_sensor *cur_sensor;
+	int i, err, data;
 	__le16 wdata;
 	__le16 wdata;
-	int i, err;
-	u8 data;
 
 
 	for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
 	for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
 		cur_sensor = iio_priv(hw->iio_devs[i]);
 		cur_sensor = iio_priv(hw->iio_devs[i]);
@@ -187,24 +193,42 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
 	fifo_watermark = (fifo_watermark / sip) * sip;
 	fifo_watermark = (fifo_watermark / sip) * sip;
 	fifo_watermark = fifo_watermark * hw->settings->fifo_ops.th_wl;
 	fifo_watermark = fifo_watermark * hw->settings->fifo_ops.th_wl;
 
 
-	mutex_lock(&hw->lock);
-
-	err = hw->tf->read(hw->dev, hw->settings->fifo_ops.fifo_th.addr + 1,
-			   sizeof(data), &data);
+	err = regmap_read(hw->regmap, hw->settings->fifo_ops.fifo_th.addr + 1,
+			  &data);
 	if (err < 0)
 	if (err < 0)
-		goto out;
+		return err;
 
 
 	fifo_th_mask = hw->settings->fifo_ops.fifo_th.mask;
 	fifo_th_mask = hw->settings->fifo_ops.fifo_th.mask;
 	fifo_watermark = ((data << 8) & ~fifo_th_mask) |
 	fifo_watermark = ((data << 8) & ~fifo_th_mask) |
 			 (fifo_watermark & fifo_th_mask);
 			 (fifo_watermark & fifo_th_mask);
 
 
 	wdata = cpu_to_le16(fifo_watermark);
 	wdata = cpu_to_le16(fifo_watermark);
-	err = hw->tf->write(hw->dev, hw->settings->fifo_ops.fifo_th.addr,
-			    sizeof(wdata), (u8 *)&wdata);
-out:
-	mutex_unlock(&hw->lock);
+	return regmap_bulk_write(hw->regmap,
+				 hw->settings->fifo_ops.fifo_th.addr,
+				 &wdata, sizeof(wdata));
+}
 
 
-	return err < 0 ? err : 0;
+/*
+ * Set max bulk read to ST_LSM6DSX_MAX_WORD_LEN in order to avoid
+ * a kmalloc for each bus access
+ */
+static inline int st_lsm6dsx_read_block(struct st_lsm6dsx_hw *hw, u8 *data,
+					unsigned int data_len)
+{
+	unsigned int word_len, read_len = 0;
+	int err;
+
+	while (read_len < data_len) {
+		word_len = min_t(unsigned int, data_len - read_len,
+				 ST_LSM6DSX_MAX_WORD_LEN);
+		err = regmap_bulk_read(hw->regmap,
+				       ST_LSM6DSX_REG_FIFO_OUTL_ADDR,
+				       data + read_len, word_len);
+		if (err < 0)
+			return err;
+		read_len += word_len;
+	}
+	return 0;
 }
 }
 
 
 /**
 /**
@@ -223,11 +247,11 @@ static int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
 	struct st_lsm6dsx_sensor *acc_sensor, *gyro_sensor;
 	struct st_lsm6dsx_sensor *acc_sensor, *gyro_sensor;
 	s64 acc_ts, acc_delta_ts, gyro_ts, gyro_delta_ts;
 	s64 acc_ts, acc_delta_ts, gyro_ts, gyro_delta_ts;
 	u8 iio_buff[ALIGN(ST_LSM6DSX_SAMPLE_SIZE, sizeof(s64)) + sizeof(s64)];
 	u8 iio_buff[ALIGN(ST_LSM6DSX_SAMPLE_SIZE, sizeof(s64)) + sizeof(s64)];
-	u8 buff[pattern_len];
 	__le16 fifo_status;
 	__le16 fifo_status;
 
 
-	err = hw->tf->read(hw->dev, hw->settings->fifo_ops.fifo_diff.addr,
-			   sizeof(fifo_status), (u8 *)&fifo_status);
+	err = regmap_bulk_read(hw->regmap,
+			       hw->settings->fifo_ops.fifo_diff.addr,
+			       &fifo_status, sizeof(fifo_status));
 	if (err < 0)
 	if (err < 0)
 		return err;
 		return err;
 
 
@@ -255,8 +279,7 @@ static int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
 				samples);
 				samples);
 
 
 	for (read_len = 0; read_len < fifo_len; read_len += pattern_len) {
 	for (read_len = 0; read_len < fifo_len; read_len += pattern_len) {
-		err = hw->tf->read(hw->dev, ST_LSM6DSX_REG_FIFO_OUTL_ADDR,
-				   sizeof(buff), buff);
+		err = st_lsm6dsx_read_block(hw, hw->buff, pattern_len);
 		if (err < 0)
 		if (err < 0)
 			return err;
 			return err;
 
 
@@ -281,7 +304,7 @@ static int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
 
 
 		while (acc_sip > 0 || gyro_sip > 0) {
 		while (acc_sip > 0 || gyro_sip > 0) {
 			if (gyro_sip-- > 0) {
 			if (gyro_sip-- > 0) {
-				memcpy(iio_buff, &buff[offset],
+				memcpy(iio_buff, &hw->buff[offset],
 				       ST_LSM6DSX_SAMPLE_SIZE);
 				       ST_LSM6DSX_SAMPLE_SIZE);
 				iio_push_to_buffers_with_timestamp(
 				iio_push_to_buffers_with_timestamp(
 					hw->iio_devs[ST_LSM6DSX_ID_GYRO],
 					hw->iio_devs[ST_LSM6DSX_ID_GYRO],
@@ -291,7 +314,7 @@ static int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
 			}
 			}
 
 
 			if (acc_sip-- > 0) {
 			if (acc_sip-- > 0) {
-				memcpy(iio_buff, &buff[offset],
+				memcpy(iio_buff, &hw->buff[offset],
 				       ST_LSM6DSX_SAMPLE_SIZE);
 				       ST_LSM6DSX_SAMPLE_SIZE);
 				iio_push_to_buffers_with_timestamp(
 				iio_push_to_buffers_with_timestamp(
 					hw->iio_devs[ST_LSM6DSX_ID_ACC],
 					hw->iio_devs[ST_LSM6DSX_ID_ACC],
@@ -325,38 +348,40 @@ static int st_lsm6dsx_update_fifo(struct iio_dev *iio_dev, bool enable)
 	struct st_lsm6dsx_hw *hw = sensor->hw;
 	struct st_lsm6dsx_hw *hw = sensor->hw;
 	int err;
 	int err;
 
 
+	mutex_lock(&hw->conf_lock);
+
 	if (hw->fifo_mode != ST_LSM6DSX_FIFO_BYPASS) {
 	if (hw->fifo_mode != ST_LSM6DSX_FIFO_BYPASS) {
 		err = st_lsm6dsx_flush_fifo(hw);
 		err = st_lsm6dsx_flush_fifo(hw);
 		if (err < 0)
 		if (err < 0)
-			return err;
+			goto out;
 	}
 	}
 
 
 	if (enable) {
 	if (enable) {
 		err = st_lsm6dsx_sensor_enable(sensor);
 		err = st_lsm6dsx_sensor_enable(sensor);
 		if (err < 0)
 		if (err < 0)
-			return err;
+			goto out;
 	} else {
 	} else {
 		err = st_lsm6dsx_sensor_disable(sensor);
 		err = st_lsm6dsx_sensor_disable(sensor);
 		if (err < 0)
 		if (err < 0)
-			return err;
+			goto out;
 	}
 	}
 
 
 	err = st_lsm6dsx_set_fifo_odr(sensor, enable);
 	err = st_lsm6dsx_set_fifo_odr(sensor, enable);
 	if (err < 0)
 	if (err < 0)
-		return err;
+		goto out;
 
 
 	err = st_lsm6dsx_update_decimators(hw);
 	err = st_lsm6dsx_update_decimators(hw);
 	if (err < 0)
 	if (err < 0)
-		return err;
+		goto out;
 
 
 	err = st_lsm6dsx_update_watermark(sensor, sensor->watermark);
 	err = st_lsm6dsx_update_watermark(sensor, sensor->watermark);
 	if (err < 0)
 	if (err < 0)
-		return err;
+		goto out;
 
 
 	if (hw->enable_mask) {
 	if (hw->enable_mask) {
 		err = st_lsm6dsx_set_fifo_mode(hw, ST_LSM6DSX_FIFO_CONT);
 		err = st_lsm6dsx_set_fifo_mode(hw, ST_LSM6DSX_FIFO_CONT);
 		if (err < 0)
 		if (err < 0)
-			return err;
+			goto out;
 
 
 		/*
 		/*
 		 * store enable buffer timestamp as reference to compute
 		 * store enable buffer timestamp as reference to compute
@@ -365,7 +390,10 @@ static int st_lsm6dsx_update_fifo(struct iio_dev *iio_dev, bool enable)
 		sensor->ts = iio_get_time_ns(iio_dev);
 		sensor->ts = iio_get_time_ns(iio_dev);
 	}
 	}
 
 
-	return 0;
+out:
+	mutex_unlock(&hw->conf_lock);
+
+	return err;
 }
 }
 
 
 static irqreturn_t st_lsm6dsx_handler_irq(int irq, void *private)
 static irqreturn_t st_lsm6dsx_handler_irq(int irq, void *private)
@@ -444,17 +472,20 @@ int st_lsm6dsx_fifo_setup(struct st_lsm6dsx_hw *hw)
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	err = st_lsm6dsx_write_with_mask(hw, ST_LSM6DSX_REG_HLACTIVE_ADDR,
-					 ST_LSM6DSX_REG_HLACTIVE_MASK,
-					 irq_active_low);
+	err = regmap_update_bits(hw->regmap, ST_LSM6DSX_REG_HLACTIVE_ADDR,
+				 ST_LSM6DSX_REG_HLACTIVE_MASK,
+				 FIELD_PREP(ST_LSM6DSX_REG_HLACTIVE_MASK,
+					    irq_active_low));
 	if (err < 0)
 	if (err < 0)
 		return err;
 		return err;
 
 
 	pdata = (struct st_sensors_platform_data *)hw->dev->platform_data;
 	pdata = (struct st_sensors_platform_data *)hw->dev->platform_data;
 	if ((np && of_property_read_bool(np, "drive-open-drain")) ||
 	if ((np && of_property_read_bool(np, "drive-open-drain")) ||
 	    (pdata && pdata->open_drain)) {
 	    (pdata && pdata->open_drain)) {
-		err = st_lsm6dsx_write_with_mask(hw, ST_LSM6DSX_REG_PP_OD_ADDR,
-						 ST_LSM6DSX_REG_PP_OD_MASK, 1);
+		err = regmap_update_bits(hw->regmap, ST_LSM6DSX_REG_PP_OD_ADDR,
+					 ST_LSM6DSX_REG_PP_OD_MASK,
+					 FIELD_PREP(ST_LSM6DSX_REG_PP_OD_MASK,
+						    1));
 		if (err < 0)
 		if (err < 0)
 			return err;
 			return err;
 
 

+ 51 - 64
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c

@@ -37,6 +37,8 @@
 #include <linux/iio/iio.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
 #include <linux/iio/sysfs.h>
 #include <linux/pm.h>
 #include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/bitfield.h>
 
 
 #include <linux/platform_data/st_sensors_pdata.h>
 #include <linux/platform_data/st_sensors_pdata.h>
 
 
@@ -277,36 +279,9 @@ static const struct iio_chan_spec st_lsm6dsx_gyro_channels[] = {
 	IIO_CHAN_SOFT_TIMESTAMP(3),
 	IIO_CHAN_SOFT_TIMESTAMP(3),
 };
 };
 
 
-int st_lsm6dsx_write_with_mask(struct st_lsm6dsx_hw *hw, u8 addr, u8 mask,
-			       u8 val)
-{
-	u8 data;
-	int err;
-
-	mutex_lock(&hw->lock);
-
-	err = hw->tf->read(hw->dev, addr, sizeof(data), &data);
-	if (err < 0) {
-		dev_err(hw->dev, "failed to read %02x register\n", addr);
-		goto out;
-	}
-
-	data = (data & ~mask) | ((val << __ffs(mask)) & mask);
-
-	err = hw->tf->write(hw->dev, addr, sizeof(data), &data);
-	if (err < 0)
-		dev_err(hw->dev, "failed to write %02x register\n", addr);
-
-out:
-	mutex_unlock(&hw->lock);
-
-	return err;
-}
-
 static int st_lsm6dsx_check_whoami(struct st_lsm6dsx_hw *hw, int id)
 static int st_lsm6dsx_check_whoami(struct st_lsm6dsx_hw *hw, int id)
 {
 {
-	int err, i, j;
-	u8 data;
+	int err, i, j, data;
 
 
 	for (i = 0; i < ARRAY_SIZE(st_lsm6dsx_sensor_settings); i++) {
 	for (i = 0; i < ARRAY_SIZE(st_lsm6dsx_sensor_settings); i++) {
 		for (j = 0; j < ST_LSM6DSX_MAX_ID; j++) {
 		for (j = 0; j < ST_LSM6DSX_MAX_ID; j++) {
@@ -322,8 +297,7 @@ static int st_lsm6dsx_check_whoami(struct st_lsm6dsx_hw *hw, int id)
 		return -ENODEV;
 		return -ENODEV;
 	}
 	}
 
 
-	err = hw->tf->read(hw->dev, ST_LSM6DSX_REG_WHOAMI_ADDR, sizeof(data),
-			   &data);
+	err = regmap_read(hw->regmap, ST_LSM6DSX_REG_WHOAMI_ADDR, &data);
 	if (err < 0) {
 	if (err < 0) {
 		dev_err(hw->dev, "failed to read whoami register\n");
 		dev_err(hw->dev, "failed to read whoami register\n");
 		return err;
 		return err;
@@ -342,22 +316,22 @@ static int st_lsm6dsx_check_whoami(struct st_lsm6dsx_hw *hw, int id)
 static int st_lsm6dsx_set_full_scale(struct st_lsm6dsx_sensor *sensor,
 static int st_lsm6dsx_set_full_scale(struct st_lsm6dsx_sensor *sensor,
 				     u32 gain)
 				     u32 gain)
 {
 {
-	enum st_lsm6dsx_sensor_id id = sensor->id;
+	struct st_lsm6dsx_hw *hw = sensor->hw;
+	const struct st_lsm6dsx_reg *reg;
 	int i, err;
 	int i, err;
 	u8 val;
 	u8 val;
 
 
 	for (i = 0; i < ST_LSM6DSX_FS_LIST_SIZE; i++)
 	for (i = 0; i < ST_LSM6DSX_FS_LIST_SIZE; i++)
-		if (st_lsm6dsx_fs_table[id].fs_avl[i].gain == gain)
+		if (st_lsm6dsx_fs_table[sensor->id].fs_avl[i].gain == gain)
 			break;
 			break;
 
 
 	if (i == ST_LSM6DSX_FS_LIST_SIZE)
 	if (i == ST_LSM6DSX_FS_LIST_SIZE)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	val = st_lsm6dsx_fs_table[id].fs_avl[i].val;
-	err = st_lsm6dsx_write_with_mask(sensor->hw,
-					 st_lsm6dsx_fs_table[id].reg.addr,
-					 st_lsm6dsx_fs_table[id].reg.mask,
-					 val);
+	val = st_lsm6dsx_fs_table[sensor->id].fs_avl[i].val;
+	reg = &st_lsm6dsx_fs_table[sensor->id].reg;
+	err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+				 ST_LSM6DSX_SHIFT_VAL(val, reg->mask));
 	if (err < 0)
 	if (err < 0)
 		return err;
 		return err;
 
 
@@ -385,7 +359,8 @@ static int st_lsm6dsx_check_odr(struct st_lsm6dsx_sensor *sensor, u16 odr,
 
 
 static int st_lsm6dsx_set_odr(struct st_lsm6dsx_sensor *sensor, u16 odr)
 static int st_lsm6dsx_set_odr(struct st_lsm6dsx_sensor *sensor, u16 odr)
 {
 {
-	enum st_lsm6dsx_sensor_id id = sensor->id;
+	struct st_lsm6dsx_hw *hw = sensor->hw;
+	const struct st_lsm6dsx_reg *reg;
 	int err;
 	int err;
 	u8 val;
 	u8 val;
 
 
@@ -393,10 +368,9 @@ static int st_lsm6dsx_set_odr(struct st_lsm6dsx_sensor *sensor, u16 odr)
 	if (err < 0)
 	if (err < 0)
 		return err;
 		return err;
 
 
-	return st_lsm6dsx_write_with_mask(sensor->hw,
-					  st_lsm6dsx_odr_table[id].reg.addr,
-					  st_lsm6dsx_odr_table[id].reg.mask,
-					  val);
+	reg = &st_lsm6dsx_odr_table[sensor->id].reg;
+	return regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+				  ST_LSM6DSX_SHIFT_VAL(val, reg->mask));
 }
 }
 
 
 int st_lsm6dsx_sensor_enable(struct st_lsm6dsx_sensor *sensor)
 int st_lsm6dsx_sensor_enable(struct st_lsm6dsx_sensor *sensor)
@@ -414,16 +388,17 @@ int st_lsm6dsx_sensor_enable(struct st_lsm6dsx_sensor *sensor)
 
 
 int st_lsm6dsx_sensor_disable(struct st_lsm6dsx_sensor *sensor)
 int st_lsm6dsx_sensor_disable(struct st_lsm6dsx_sensor *sensor)
 {
 {
-	enum st_lsm6dsx_sensor_id id = sensor->id;
+	struct st_lsm6dsx_hw *hw = sensor->hw;
+	const struct st_lsm6dsx_reg *reg;
 	int err;
 	int err;
 
 
-	err = st_lsm6dsx_write_with_mask(sensor->hw,
-					 st_lsm6dsx_odr_table[id].reg.addr,
-					 st_lsm6dsx_odr_table[id].reg.mask, 0);
+	reg = &st_lsm6dsx_odr_table[sensor->id].reg;
+	err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+				 ST_LSM6DSX_SHIFT_VAL(0, reg->mask));
 	if (err < 0)
 	if (err < 0)
 		return err;
 		return err;
 
 
-	sensor->hw->enable_mask &= ~BIT(id);
+	sensor->hw->enable_mask &= ~BIT(sensor->id);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -431,6 +406,7 @@ int st_lsm6dsx_sensor_disable(struct st_lsm6dsx_sensor *sensor)
 static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor,
 static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor,
 				   u8 addr, int *val)
 				   u8 addr, int *val)
 {
 {
+	struct st_lsm6dsx_hw *hw = sensor->hw;
 	int err, delay;
 	int err, delay;
 	__le16 data;
 	__le16 data;
 
 
@@ -441,14 +417,13 @@ static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor,
 	delay = 1000000 / sensor->odr;
 	delay = 1000000 / sensor->odr;
 	usleep_range(delay, 2 * delay);
 	usleep_range(delay, 2 * delay);
 
 
-	err = sensor->hw->tf->read(sensor->hw->dev, addr, sizeof(data),
-				   (u8 *)&data);
+	err = regmap_bulk_read(hw->regmap, addr, &data, sizeof(data));
 	if (err < 0)
 	if (err < 0)
 		return err;
 		return err;
 
 
 	st_lsm6dsx_sensor_disable(sensor);
 	st_lsm6dsx_sensor_disable(sensor);
 
 
-	*val = (s16)data;
+	*val = (s16)le16_to_cpu(data);
 
 
 	return IIO_VAL_INT;
 	return IIO_VAL_INT;
 }
 }
@@ -528,7 +503,12 @@ static int st_lsm6dsx_set_watermark(struct iio_dev *iio_dev, unsigned int val)
 	if (val < 1 || val > hw->settings->max_fifo_size)
 	if (val < 1 || val > hw->settings->max_fifo_size)
 		return -EINVAL;
 		return -EINVAL;
 
 
+	mutex_lock(&hw->conf_lock);
+
 	err = st_lsm6dsx_update_watermark(sensor, val);
 	err = st_lsm6dsx_update_watermark(sensor, val);
+
+	mutex_unlock(&hw->conf_lock);
+
 	if (err < 0)
 	if (err < 0)
 		return err;
 		return err;
 
 
@@ -652,20 +632,20 @@ static int st_lsm6dsx_get_drdy_reg(struct st_lsm6dsx_hw *hw, u8 *drdy_reg)
 
 
 static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw)
 static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw)
 {
 {
-	u8 data, drdy_int_reg;
+	u8 drdy_int_reg;
 	int err;
 	int err;
 
 
-	data = ST_LSM6DSX_REG_RESET_MASK;
-	err = hw->tf->write(hw->dev, ST_LSM6DSX_REG_RESET_ADDR, sizeof(data),
-			    &data);
+	err = regmap_write(hw->regmap, ST_LSM6DSX_REG_RESET_ADDR,
+			   ST_LSM6DSX_REG_RESET_MASK);
 	if (err < 0)
 	if (err < 0)
 		return err;
 		return err;
 
 
 	msleep(200);
 	msleep(200);
 
 
 	/* enable Block Data Update */
 	/* enable Block Data Update */
-	err = st_lsm6dsx_write_with_mask(hw, ST_LSM6DSX_REG_BDU_ADDR,
-					 ST_LSM6DSX_REG_BDU_MASK, 1);
+	err = regmap_update_bits(hw->regmap, ST_LSM6DSX_REG_BDU_ADDR,
+				 ST_LSM6DSX_REG_BDU_MASK,
+				 FIELD_PREP(ST_LSM6DSX_REG_BDU_MASK, 1));
 	if (err < 0)
 	if (err < 0)
 		return err;
 		return err;
 
 
@@ -674,8 +654,10 @@ static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw)
 	if (err < 0)
 	if (err < 0)
 		return err;
 		return err;
 
 
-	return st_lsm6dsx_write_with_mask(hw, drdy_int_reg,
-					  ST_LSM6DSX_REG_FIFO_FTH_IRQ_MASK, 1);
+	return regmap_update_bits(hw->regmap, drdy_int_reg,
+				  ST_LSM6DSX_REG_FIFO_FTH_IRQ_MASK,
+				  FIELD_PREP(ST_LSM6DSX_REG_FIFO_FTH_IRQ_MASK,
+					     1));
 }
 }
 
 
 static struct iio_dev *st_lsm6dsx_alloc_iiodev(struct st_lsm6dsx_hw *hw,
 static struct iio_dev *st_lsm6dsx_alloc_iiodev(struct st_lsm6dsx_hw *hw,
@@ -726,7 +708,7 @@ static struct iio_dev *st_lsm6dsx_alloc_iiodev(struct st_lsm6dsx_hw *hw,
 }
 }
 
 
 int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id, const char *name,
 int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id, const char *name,
-		     const struct st_lsm6dsx_transfer_function *tf_ops)
+		     struct regmap *regmap)
 {
 {
 	struct st_lsm6dsx_hw *hw;
 	struct st_lsm6dsx_hw *hw;
 	int i, err;
 	int i, err;
@@ -737,12 +719,16 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id, const char *name,
 
 
 	dev_set_drvdata(dev, (void *)hw);
 	dev_set_drvdata(dev, (void *)hw);
 
 
-	mutex_init(&hw->lock);
 	mutex_init(&hw->fifo_lock);
 	mutex_init(&hw->fifo_lock);
+	mutex_init(&hw->conf_lock);
+
+	hw->buff = devm_kzalloc(dev, ST_LSM6DSX_BUFF_SIZE, GFP_KERNEL);
+	if (!hw->buff)
+		return -ENOMEM;
 
 
 	hw->dev = dev;
 	hw->dev = dev;
 	hw->irq = irq;
 	hw->irq = irq;
-	hw->tf = tf_ops;
+	hw->regmap = regmap;
 
 
 	err = st_lsm6dsx_check_whoami(hw, hw_id);
 	err = st_lsm6dsx_check_whoami(hw, hw_id);
 	if (err < 0)
 	if (err < 0)
@@ -778,6 +764,7 @@ static int __maybe_unused st_lsm6dsx_suspend(struct device *dev)
 {
 {
 	struct st_lsm6dsx_hw *hw = dev_get_drvdata(dev);
 	struct st_lsm6dsx_hw *hw = dev_get_drvdata(dev);
 	struct st_lsm6dsx_sensor *sensor;
 	struct st_lsm6dsx_sensor *sensor;
+	const struct st_lsm6dsx_reg *reg;
 	int i, err = 0;
 	int i, err = 0;
 
 
 	for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
 	for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
@@ -785,9 +772,9 @@ static int __maybe_unused st_lsm6dsx_suspend(struct device *dev)
 		if (!(hw->enable_mask & BIT(sensor->id)))
 		if (!(hw->enable_mask & BIT(sensor->id)))
 			continue;
 			continue;
 
 
-		err = st_lsm6dsx_write_with_mask(hw,
-				st_lsm6dsx_odr_table[sensor->id].reg.addr,
-				st_lsm6dsx_odr_table[sensor->id].reg.mask, 0);
+		reg = &st_lsm6dsx_odr_table[sensor->id].reg;
+		err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+					 ST_LSM6DSX_SHIFT_VAL(0, reg->mask));
 		if (err < 0)
 		if (err < 0)
 			return err;
 			return err;
 	}
 	}

+ 15 - 40
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c

@@ -14,55 +14,30 @@
 #include <linux/i2c.h>
 #include <linux/i2c.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/of.h>
+#include <linux/regmap.h>
 
 
 #include "st_lsm6dsx.h"
 #include "st_lsm6dsx.h"
 
 
-static int st_lsm6dsx_i2c_read(struct device *dev, u8 addr, int len, u8 *data)
-{
-	struct i2c_client *client = to_i2c_client(dev);
-	struct i2c_msg msg[2];
-
-	msg[0].addr = client->addr;
-	msg[0].flags = client->flags;
-	msg[0].len = 1;
-	msg[0].buf = &addr;
-
-	msg[1].addr = client->addr;
-	msg[1].flags = client->flags | I2C_M_RD;
-	msg[1].len = len;
-	msg[1].buf = data;
-
-	return i2c_transfer(client->adapter, msg, 2);
-}
-
-static int st_lsm6dsx_i2c_write(struct device *dev, u8 addr, int len, u8 *data)
-{
-	struct i2c_client *client = to_i2c_client(dev);
-	struct i2c_msg msg;
-	u8 send[len + 1];
-
-	send[0] = addr;
-	memcpy(&send[1], data, len * sizeof(u8));
-
-	msg.addr = client->addr;
-	msg.flags = client->flags;
-	msg.len = len + 1;
-	msg.buf = send;
-
-	return i2c_transfer(client->adapter, &msg, 1);
-}
-
-static const struct st_lsm6dsx_transfer_function st_lsm6dsx_transfer_fn = {
-	.read = st_lsm6dsx_i2c_read,
-	.write = st_lsm6dsx_i2c_write,
+static const struct regmap_config st_lsm6dsx_i2c_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
 };
 };
 
 
 static int st_lsm6dsx_i2c_probe(struct i2c_client *client,
 static int st_lsm6dsx_i2c_probe(struct i2c_client *client,
 				const struct i2c_device_id *id)
 				const struct i2c_device_id *id)
 {
 {
+	int hw_id = id->driver_data;
+	struct regmap *regmap;
+
+	regmap = devm_regmap_init_i2c(client, &st_lsm6dsx_i2c_regmap_config);
+	if (IS_ERR(regmap)) {
+		dev_err(&client->dev, "Failed to register i2c regmap %d\n",
+			(int)PTR_ERR(regmap));
+		return PTR_ERR(regmap);
+	}
+
 	return st_lsm6dsx_probe(&client->dev, client->irq,
 	return st_lsm6dsx_probe(&client->dev, client->irq,
-				(int)id->driver_data, id->name,
-				&st_lsm6dsx_transfer_fn);
+				hw_id, id->name, regmap);
 }
 }
 
 
 static const struct of_device_id st_lsm6dsx_i2c_of_match[] = {
 static const struct of_device_id st_lsm6dsx_i2c_of_match[] = {

+ 14 - 56
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c

@@ -14,72 +14,30 @@
 #include <linux/spi/spi.h>
 #include <linux/spi/spi.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/of.h>
+#include <linux/regmap.h>
 
 
 #include "st_lsm6dsx.h"
 #include "st_lsm6dsx.h"
 
 
-#define SENSORS_SPI_READ	BIT(7)
-
-static int st_lsm6dsx_spi_read(struct device *dev, u8 addr, int len,
-			       u8 *data)
-{
-	struct spi_device *spi = to_spi_device(dev);
-	struct st_lsm6dsx_hw *hw = spi_get_drvdata(spi);
-	int err;
-
-	struct spi_transfer xfers[] = {
-		{
-			.tx_buf = hw->tb.tx_buf,
-			.bits_per_word = 8,
-			.len = 1,
-		},
-		{
-			.rx_buf = hw->tb.rx_buf,
-			.bits_per_word = 8,
-			.len = len,
-		}
-	};
-
-	hw->tb.tx_buf[0] = addr | SENSORS_SPI_READ;
-
-	err = spi_sync_transfer(spi, xfers,  ARRAY_SIZE(xfers));
-	if (err < 0)
-		return err;
-
-	memcpy(data, hw->tb.rx_buf, len * sizeof(u8));
-
-	return len;
-}
-
-static int st_lsm6dsx_spi_write(struct device *dev, u8 addr, int len,
-				u8 *data)
-{
-	struct st_lsm6dsx_hw *hw;
-	struct spi_device *spi;
-
-	if (len >= ST_LSM6DSX_TX_MAX_LENGTH)
-		return -ENOMEM;
-
-	spi = to_spi_device(dev);
-	hw = spi_get_drvdata(spi);
-
-	hw->tb.tx_buf[0] = addr;
-	memcpy(&hw->tb.tx_buf[1], data, len);
-
-	return spi_write(spi, hw->tb.tx_buf, len + 1);
-}
-
-static const struct st_lsm6dsx_transfer_function st_lsm6dsx_transfer_fn = {
-	.read = st_lsm6dsx_spi_read,
-	.write = st_lsm6dsx_spi_write,
+static const struct regmap_config st_lsm6dsx_spi_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
 };
 };
 
 
 static int st_lsm6dsx_spi_probe(struct spi_device *spi)
 static int st_lsm6dsx_spi_probe(struct spi_device *spi)
 {
 {
 	const struct spi_device_id *id = spi_get_device_id(spi);
 	const struct spi_device_id *id = spi_get_device_id(spi);
+	int hw_id = id->driver_data;
+	struct regmap *regmap;
+
+	regmap = devm_regmap_init_spi(spi, &st_lsm6dsx_spi_regmap_config);
+	if (IS_ERR(regmap)) {
+		dev_err(&spi->dev, "Failed to register spi regmap %d\n",
+			(int)PTR_ERR(regmap));
+		return PTR_ERR(regmap);
+	}
 
 
 	return st_lsm6dsx_probe(&spi->dev, spi->irq,
 	return st_lsm6dsx_probe(&spi->dev, spi->irq,
-				(int)id->driver_data, id->name,
-				&st_lsm6dsx_transfer_fn);
+				hw_id, id->name, regmap);
 }
 }
 
 
 static const struct of_device_id st_lsm6dsx_spi_of_match[] = {
 static const struct of_device_id st_lsm6dsx_spi_of_match[] = {

+ 15 - 0
drivers/iio/industrialio-buffer.c

@@ -1198,6 +1198,18 @@ out:
 	return ret ? ret : len;
 	return ret ? ret : len;
 }
 }
 
 
+static ssize_t iio_dma_show_data_available(struct device *dev,
+						struct device_attribute *attr,
+						char *buf)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	size_t bytes;
+
+	bytes = iio_buffer_data_available(indio_dev->buffer);
+
+	return sprintf(buf, "%zu\n", bytes);
+}
+
 static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
 static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
 		   iio_buffer_write_length);
 		   iio_buffer_write_length);
 static struct device_attribute dev_attr_length_ro = __ATTR(length,
 static struct device_attribute dev_attr_length_ro = __ATTR(length,
@@ -1208,11 +1220,14 @@ static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR,
 		   iio_buffer_show_watermark, iio_buffer_store_watermark);
 		   iio_buffer_show_watermark, iio_buffer_store_watermark);
 static struct device_attribute dev_attr_watermark_ro = __ATTR(watermark,
 static struct device_attribute dev_attr_watermark_ro = __ATTR(watermark,
 	S_IRUGO, iio_buffer_show_watermark, NULL);
 	S_IRUGO, iio_buffer_show_watermark, NULL);
+static DEVICE_ATTR(data_available, S_IRUGO,
+		iio_dma_show_data_available, NULL);
 
 
 static struct attribute *iio_buffer_attrs[] = {
 static struct attribute *iio_buffer_attrs[] = {
 	&dev_attr_length.attr,
 	&dev_attr_length.attr,
 	&dev_attr_enable.attr,
 	&dev_attr_enable.attr,
 	&dev_attr_watermark.attr,
 	&dev_attr_watermark.attr,
+	&dev_attr_data_available.attr,
 };
 };
 
 
 int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
 int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)

+ 1 - 0
drivers/iio/industrialio-core.c

@@ -588,6 +588,7 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
 		return snprintf(buf, len, "%d", vals[0]);
 		return snprintf(buf, len, "%d", vals[0]);
 	case IIO_VAL_INT_PLUS_MICRO_DB:
 	case IIO_VAL_INT_PLUS_MICRO_DB:
 		scale_db = true;
 		scale_db = true;
+		/* fall through */
 	case IIO_VAL_INT_PLUS_MICRO:
 	case IIO_VAL_INT_PLUS_MICRO:
 		if (vals[1] < 0)
 		if (vals[1] < 0)
 			return snprintf(buf, len, "-%d.%06u%s", abs(vals[0]),
 			return snprintf(buf, len, "-%d.%06u%s", abs(vals[0]),

+ 34 - 0
drivers/iio/light/Kconfig

@@ -334,6 +334,30 @@ config STK3310
 	 Choosing M will build the driver as a module. If so, the module
 	 Choosing M will build the driver as a module. If so, the module
 	 will be called stk3310.
 	 will be called stk3310.
 
 
+config ST_UVIS25
+	tristate "STMicroelectronics UVIS25 sensor driver"
+	depends on (I2C || SPI)
+	select IIO_BUFFER
+	select IIO_TRIGGERED_BUFFER
+	select ST_UVIS25_I2C if (I2C)
+	select ST_UVIS25_SPI if (SPI_MASTER)
+	help
+	  Say yes here to build support for STMicroelectronics UVIS25
+	  uv sensor
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called st_uvis25.
+
+config ST_UVIS25_I2C
+	tristate
+	depends on ST_UVIS25
+	select REGMAP_I2C
+
+config ST_UVIS25_SPI
+	tristate
+	depends on ST_UVIS25
+	select REGMAP_SPI
+
 config TCS3414
 config TCS3414
 	tristate "TAOS TCS3414 digital color sensor"
 	tristate "TAOS TCS3414 digital color sensor"
 	depends on I2C
 	depends on I2C
@@ -425,4 +449,14 @@ config VL6180
 	 To compile this driver as a module, choose M here: the
 	 To compile this driver as a module, choose M here: the
 	 module will be called vl6180.
 	 module will be called vl6180.
 
 
+config ZOPT2201
+	tristate "ZOPT2201 ALS and UV B sensor"
+	depends on I2C
+	help
+	 Say Y here if you want to build a driver for the IDT
+	 ZOPT2201 ambient light and UV B sensor.
+
+	 To compile this driver as a module, choose M here: the
+	 module will be called zopt2201.
+
 endmenu
 endmenu

+ 4 - 0
drivers/iio/light/Makefile

@@ -33,6 +33,9 @@ obj-$(CONFIG_RPR0521)		+= rpr0521.o
 obj-$(CONFIG_SENSORS_TSL2563)	+= tsl2563.o
 obj-$(CONFIG_SENSORS_TSL2563)	+= tsl2563.o
 obj-$(CONFIG_SI1145)		+= si1145.o
 obj-$(CONFIG_SI1145)		+= si1145.o
 obj-$(CONFIG_STK3310)          += stk3310.o
 obj-$(CONFIG_STK3310)          += stk3310.o
+obj-$(CONFIG_ST_UVIS25)		+= st_uvis25_core.o
+obj-$(CONFIG_ST_UVIS25_I2C)	+= st_uvis25_i2c.o
+obj-$(CONFIG_ST_UVIS25_SPI)	+= st_uvis25_spi.o
 obj-$(CONFIG_TCS3414)		+= tcs3414.o
 obj-$(CONFIG_TCS3414)		+= tcs3414.o
 obj-$(CONFIG_TCS3472)		+= tcs3472.o
 obj-$(CONFIG_TCS3472)		+= tcs3472.o
 obj-$(CONFIG_TSL2583)		+= tsl2583.o
 obj-$(CONFIG_TSL2583)		+= tsl2583.o
@@ -41,3 +44,4 @@ obj-$(CONFIG_US5182D)		+= us5182d.o
 obj-$(CONFIG_VCNL4000)		+= vcnl4000.o
 obj-$(CONFIG_VCNL4000)		+= vcnl4000.o
 obj-$(CONFIG_VEML6070)		+= veml6070.o
 obj-$(CONFIG_VEML6070)		+= veml6070.o
 obj-$(CONFIG_VL6180)		+= vl6180.o
 obj-$(CONFIG_VL6180)		+= vl6180.o
+obj-$(CONFIG_ZOPT2201)		+= zopt2201.o

+ 0 - 2
drivers/iio/light/cros_ec_light_prox.c

@@ -181,7 +181,6 @@ static int cros_ec_light_prox_probe(struct platform_device *pdev)
 {
 {
 	struct device *dev = &pdev->dev;
 	struct device *dev = &pdev->dev;
 	struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
 	struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
-	struct cros_ec_device *ec_device;
 	struct iio_dev *indio_dev;
 	struct iio_dev *indio_dev;
 	struct cros_ec_light_prox_state *state;
 	struct cros_ec_light_prox_state *state;
 	struct iio_chan_spec *channel;
 	struct iio_chan_spec *channel;
@@ -191,7 +190,6 @@ static int cros_ec_light_prox_probe(struct platform_device *pdev)
 		dev_warn(dev, "No CROS EC device found.\n");
 		dev_warn(dev, "No CROS EC device found.\n");
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
-	ec_device = ec_dev->ec_dev;
 
 
 	indio_dev = devm_iio_device_alloc(dev, sizeof(*state));
 	indio_dev = devm_iio_device_alloc(dev, sizeof(*state));
 	if (!indio_dev)
 	if (!indio_dev)

+ 37 - 0
drivers/iio/light/st_uvis25.h

@@ -0,0 +1,37 @@
+/*
+ * STMicroelectronics uvis25 sensor driver
+ *
+ * Copyright 2017 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef ST_UVIS25_H
+#define ST_UVIS25_H
+
+#define ST_UVIS25_DEV_NAME		"uvis25"
+
+#include <linux/iio/iio.h>
+
+/**
+ * struct st_uvis25_hw - ST UVIS25 sensor instance
+ * @regmap: Register map of the device.
+ * @trig: The trigger in use by the driver.
+ * @enabled: Status of the sensor (false->off, true->on).
+ * @irq: Device interrupt line (I2C or SPI).
+ */
+struct st_uvis25_hw {
+	struct regmap *regmap;
+
+	struct iio_trigger *trig;
+	bool enabled;
+	int irq;
+};
+
+extern const struct dev_pm_ops st_uvis25_pm_ops;
+
+int st_uvis25_probe(struct device *dev, int irq, struct regmap *regmap);
+
+#endif /* ST_UVIS25_H */

+ 359 - 0
drivers/iio/light/st_uvis25_core.c

@@ -0,0 +1,359 @@
+/*
+ * STMicroelectronics uvis25 sensor driver
+ *
+ * Copyright 2017 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/iio/sysfs.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/buffer.h>
+#include <linux/regmap.h>
+
+#include "st_uvis25.h"
+
+#define ST_UVIS25_REG_WHOAMI_ADDR	0x0f
+#define ST_UVIS25_REG_WHOAMI_VAL	0xca
+#define ST_UVIS25_REG_CTRL1_ADDR	0x20
+#define ST_UVIS25_REG_ODR_MASK		BIT(0)
+#define ST_UVIS25_REG_BDU_MASK		BIT(1)
+#define ST_UVIS25_REG_CTRL2_ADDR	0x21
+#define ST_UVIS25_REG_BOOT_MASK		BIT(7)
+#define ST_UVIS25_REG_CTRL3_ADDR	0x22
+#define ST_UVIS25_REG_HL_MASK		BIT(7)
+#define ST_UVIS25_REG_STATUS_ADDR	0x27
+#define ST_UVIS25_REG_UV_DA_MASK	BIT(0)
+#define ST_UVIS25_REG_OUT_ADDR		0x28
+
+static const struct iio_chan_spec st_uvis25_channels[] = {
+	{
+		.type = IIO_UVINDEX,
+		.address = ST_UVIS25_REG_OUT_ADDR,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+		.scan_index = 0,
+		.scan_type = {
+			.sign = 'u',
+			.realbits = 8,
+			.storagebits = 8,
+		},
+	},
+	IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static int st_uvis25_check_whoami(struct st_uvis25_hw *hw)
+{
+	int err, data;
+
+	err = regmap_read(hw->regmap, ST_UVIS25_REG_WHOAMI_ADDR, &data);
+	if (err < 0) {
+		dev_err(regmap_get_device(hw->regmap),
+			"failed to read whoami register\n");
+		return err;
+	}
+
+	if (data != ST_UVIS25_REG_WHOAMI_VAL) {
+		dev_err(regmap_get_device(hw->regmap),
+			"wrong whoami {%02x vs %02x}\n",
+			data, ST_UVIS25_REG_WHOAMI_VAL);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int st_uvis25_set_enable(struct st_uvis25_hw *hw, bool enable)
+{
+	int err;
+
+	err = regmap_update_bits(hw->regmap, ST_UVIS25_REG_CTRL1_ADDR,
+				 ST_UVIS25_REG_ODR_MASK, enable);
+	if (err < 0)
+		return err;
+
+	hw->enabled = enable;
+
+	return 0;
+}
+
+static int st_uvis25_read_oneshot(struct st_uvis25_hw *hw, u8 addr, int *val)
+{
+	int err;
+
+	err = st_uvis25_set_enable(hw, true);
+	if (err < 0)
+		return err;
+
+	msleep(1500);
+
+	/*
+	 * in order to avoid possible race conditions with interrupt
+	 * generation, disable the sensor first and then poll output
+	 * register. That sequence guarantees the interrupt will be reset
+	 * when irq line is unmasked
+	 */
+	err = st_uvis25_set_enable(hw, false);
+	if (err < 0)
+		return err;
+
+	err = regmap_read(hw->regmap, addr, val);
+
+	return err < 0 ? err : IIO_VAL_INT;
+}
+
+static int st_uvis25_read_raw(struct iio_dev *iio_dev,
+			      struct iio_chan_spec const *ch,
+			      int *val, int *val2, long mask)
+{
+	int ret;
+
+	ret = iio_device_claim_direct_mode(iio_dev);
+	if (ret)
+		return ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_PROCESSED: {
+		struct st_uvis25_hw *hw = iio_priv(iio_dev);
+
+		/*
+		 * mask irq line during oneshot read since the sensor
+		 * does not export the capability to disable data-ready line
+		 * in the register map and it is enabled by default.
+		 * If the line is unmasked during read_raw() it will be set
+		 * active and never reset since the trigger is disabled
+		 */
+		if (hw->irq > 0)
+			disable_irq(hw->irq);
+		ret = st_uvis25_read_oneshot(hw, ch->address, val);
+		if (hw->irq > 0)
+			enable_irq(hw->irq);
+		break;
+	}
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	iio_device_release_direct_mode(iio_dev);
+
+	return ret;
+}
+
+static irqreturn_t st_uvis25_trigger_handler_thread(int irq, void *private)
+{
+	struct st_uvis25_hw *hw = private;
+	int err, status;
+
+	err = regmap_read(hw->regmap, ST_UVIS25_REG_STATUS_ADDR, &status);
+	if (err < 0)
+		return IRQ_HANDLED;
+
+	if (!(status & ST_UVIS25_REG_UV_DA_MASK))
+		return IRQ_NONE;
+
+	iio_trigger_poll_chained(hw->trig);
+
+	return IRQ_HANDLED;
+}
+
+static int st_uvis25_allocate_trigger(struct iio_dev *iio_dev)
+{
+	struct st_uvis25_hw *hw = iio_priv(iio_dev);
+	struct device *dev = regmap_get_device(hw->regmap);
+	bool irq_active_low = false;
+	unsigned long irq_type;
+	int err;
+
+	irq_type = irqd_get_trigger_type(irq_get_irq_data(hw->irq));
+
+	switch (irq_type) {
+	case IRQF_TRIGGER_HIGH:
+	case IRQF_TRIGGER_RISING:
+		break;
+	case IRQF_TRIGGER_LOW:
+	case IRQF_TRIGGER_FALLING:
+		irq_active_low = true;
+		break;
+	default:
+		dev_info(dev, "mode %lx unsupported\n", irq_type);
+		return -EINVAL;
+	}
+
+	err = regmap_update_bits(hw->regmap, ST_UVIS25_REG_CTRL3_ADDR,
+				 ST_UVIS25_REG_HL_MASK, irq_active_low);
+	if (err < 0)
+		return err;
+
+	err = devm_request_threaded_irq(dev, hw->irq, NULL,
+					st_uvis25_trigger_handler_thread,
+					irq_type | IRQF_ONESHOT,
+					iio_dev->name, hw);
+	if (err) {
+		dev_err(dev, "failed to request trigger irq %d\n",
+			hw->irq);
+		return err;
+	}
+
+	hw->trig = devm_iio_trigger_alloc(dev, "%s-trigger",
+					  iio_dev->name);
+	if (!hw->trig)
+		return -ENOMEM;
+
+	iio_trigger_set_drvdata(hw->trig, iio_dev);
+	hw->trig->dev.parent = dev;
+
+	return devm_iio_trigger_register(dev, hw->trig);
+}
+
+static int st_uvis25_buffer_preenable(struct iio_dev *iio_dev)
+{
+	return st_uvis25_set_enable(iio_priv(iio_dev), true);
+}
+
+static int st_uvis25_buffer_postdisable(struct iio_dev *iio_dev)
+{
+	return st_uvis25_set_enable(iio_priv(iio_dev), false);
+}
+
+static const struct iio_buffer_setup_ops st_uvis25_buffer_ops = {
+	.preenable = st_uvis25_buffer_preenable,
+	.postenable = iio_triggered_buffer_postenable,
+	.predisable = iio_triggered_buffer_predisable,
+	.postdisable = st_uvis25_buffer_postdisable,
+};
+
+static irqreturn_t st_uvis25_buffer_handler_thread(int irq, void *p)
+{
+	u8 buffer[ALIGN(sizeof(u8), sizeof(s64)) + sizeof(s64)];
+	struct iio_poll_func *pf = p;
+	struct iio_dev *iio_dev = pf->indio_dev;
+	struct st_uvis25_hw *hw = iio_priv(iio_dev);
+	int err;
+
+	err = regmap_read(hw->regmap, ST_UVIS25_REG_OUT_ADDR, (int *)buffer);
+	if (err < 0)
+		goto out;
+
+	iio_push_to_buffers_with_timestamp(iio_dev, buffer,
+					   iio_get_time_ns(iio_dev));
+
+out:
+	iio_trigger_notify_done(hw->trig);
+
+	return IRQ_HANDLED;
+}
+
+static int st_uvis25_allocate_buffer(struct iio_dev *iio_dev)
+{
+	struct st_uvis25_hw *hw = iio_priv(iio_dev);
+
+	return devm_iio_triggered_buffer_setup(regmap_get_device(hw->regmap),
+					       iio_dev, NULL,
+					       st_uvis25_buffer_handler_thread,
+					       &st_uvis25_buffer_ops);
+}
+
+static const struct iio_info st_uvis25_info = {
+	.read_raw = st_uvis25_read_raw,
+};
+
+static int st_uvis25_init_sensor(struct st_uvis25_hw *hw)
+{
+	int err;
+
+	err = regmap_update_bits(hw->regmap, ST_UVIS25_REG_CTRL2_ADDR,
+				 ST_UVIS25_REG_BOOT_MASK, 1);
+	if (err < 0)
+		return err;
+
+	msleep(2000);
+
+	return regmap_update_bits(hw->regmap, ST_UVIS25_REG_CTRL1_ADDR,
+				  ST_UVIS25_REG_BDU_MASK, 1);
+}
+
+int st_uvis25_probe(struct device *dev, int irq, struct regmap *regmap)
+{
+	struct st_uvis25_hw *hw;
+	struct iio_dev *iio_dev;
+	int err;
+
+	iio_dev = devm_iio_device_alloc(dev, sizeof(*hw));
+	if (!iio_dev)
+		return -ENOMEM;
+
+	dev_set_drvdata(dev, (void *)iio_dev);
+
+	hw = iio_priv(iio_dev);
+	hw->irq = irq;
+	hw->regmap = regmap;
+
+	err = st_uvis25_check_whoami(hw);
+	if (err < 0)
+		return err;
+
+	iio_dev->modes = INDIO_DIRECT_MODE;
+	iio_dev->dev.parent = dev;
+	iio_dev->channels = st_uvis25_channels;
+	iio_dev->num_channels = ARRAY_SIZE(st_uvis25_channels);
+	iio_dev->name = ST_UVIS25_DEV_NAME;
+	iio_dev->info = &st_uvis25_info;
+
+	err = st_uvis25_init_sensor(hw);
+	if (err < 0)
+		return err;
+
+	if (hw->irq > 0) {
+		err = st_uvis25_allocate_buffer(iio_dev);
+		if (err < 0)
+			return err;
+
+		err = st_uvis25_allocate_trigger(iio_dev);
+		if (err)
+			return err;
+	}
+
+	return devm_iio_device_register(dev, iio_dev);
+}
+EXPORT_SYMBOL(st_uvis25_probe);
+
+static int __maybe_unused st_uvis25_suspend(struct device *dev)
+{
+	struct iio_dev *iio_dev = dev_get_drvdata(dev);
+	struct st_uvis25_hw *hw = iio_priv(iio_dev);
+
+	return regmap_update_bits(hw->regmap, ST_UVIS25_REG_CTRL1_ADDR,
+				  ST_UVIS25_REG_ODR_MASK, 0);
+}
+
+static int __maybe_unused st_uvis25_resume(struct device *dev)
+{
+	struct iio_dev *iio_dev = dev_get_drvdata(dev);
+	struct st_uvis25_hw *hw = iio_priv(iio_dev);
+
+	if (hw->enabled)
+		return regmap_update_bits(hw->regmap, ST_UVIS25_REG_CTRL1_ADDR,
+					  ST_UVIS25_REG_ODR_MASK, 1);
+
+	return 0;
+}
+
+const struct dev_pm_ops st_uvis25_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(st_uvis25_suspend, st_uvis25_resume)
+};
+EXPORT_SYMBOL(st_uvis25_pm_ops);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_DESCRIPTION("STMicroelectronics uvis25 sensor driver");
+MODULE_LICENSE("GPL v2");

+ 69 - 0
drivers/iio/light/st_uvis25_i2c.c

@@ -0,0 +1,69 @@
+/*
+ * STMicroelectronics uvis25 i2c driver
+ *
+ * Copyright 2017 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+
+#include "st_uvis25.h"
+
+#define UVIS25_I2C_AUTO_INCREMENT	BIT(7)
+
+static const struct regmap_config st_uvis25_i2c_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.write_flag_mask = UVIS25_I2C_AUTO_INCREMENT,
+	.read_flag_mask = UVIS25_I2C_AUTO_INCREMENT,
+};
+
+static int st_uvis25_i2c_probe(struct i2c_client *client,
+			       const struct i2c_device_id *id)
+{
+	struct regmap *regmap;
+
+	regmap = devm_regmap_init_i2c(client, &st_uvis25_i2c_regmap_config);
+	if (IS_ERR(regmap)) {
+		dev_err(&client->dev, "Failed to register i2c regmap %d\n",
+			(int)PTR_ERR(regmap));
+		return PTR_ERR(regmap);
+	}
+
+	return st_uvis25_probe(&client->dev, client->irq, regmap);
+}
+
+static const struct of_device_id st_uvis25_i2c_of_match[] = {
+	{ .compatible = "st,uvis25", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, st_uvis25_i2c_of_match);
+
+static const struct i2c_device_id st_uvis25_i2c_id_table[] = {
+	{ ST_UVIS25_DEV_NAME },
+	{},
+};
+MODULE_DEVICE_TABLE(i2c, st_uvis25_i2c_id_table);
+
+static struct i2c_driver st_uvis25_driver = {
+	.driver = {
+		.name = "st_uvis25_i2c",
+		.pm = &st_uvis25_pm_ops,
+		.of_match_table = of_match_ptr(st_uvis25_i2c_of_match),
+	},
+	.probe = st_uvis25_i2c_probe,
+	.id_table = st_uvis25_i2c_id_table,
+};
+module_i2c_driver(st_uvis25_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_DESCRIPTION("STMicroelectronics uvis25 i2c driver");
+MODULE_LICENSE("GPL v2");

+ 68 - 0
drivers/iio/light/st_uvis25_spi.c

@@ -0,0 +1,68 @@
+/*
+ * STMicroelectronics uvis25 spi driver
+ *
+ * Copyright 2017 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+
+#include "st_uvis25.h"
+
+#define UVIS25_SENSORS_SPI_READ		BIT(7)
+#define UVIS25_SPI_AUTO_INCREMENT	BIT(6)
+
+static const struct regmap_config st_uvis25_spi_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.read_flag_mask = UVIS25_SENSORS_SPI_READ | UVIS25_SPI_AUTO_INCREMENT,
+	.write_flag_mask = UVIS25_SPI_AUTO_INCREMENT,
+};
+
+static int st_uvis25_spi_probe(struct spi_device *spi)
+{
+	struct regmap *regmap;
+
+	regmap = devm_regmap_init_spi(spi, &st_uvis25_spi_regmap_config);
+	if (IS_ERR(regmap)) {
+		dev_err(&spi->dev, "Failed to register spi regmap %d\n",
+			(int)PTR_ERR(regmap));
+		return PTR_ERR(regmap);
+	}
+
+	return st_uvis25_probe(&spi->dev, spi->irq, regmap);
+}
+
+static const struct of_device_id st_uvis25_spi_of_match[] = {
+	{ .compatible = "st,uvis25", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, st_uvis25_spi_of_match);
+
+static const struct spi_device_id st_uvis25_spi_id_table[] = {
+	{ ST_UVIS25_DEV_NAME },
+	{},
+};
+MODULE_DEVICE_TABLE(spi, st_uvis25_spi_id_table);
+
+static struct spi_driver st_uvis25_driver = {
+	.driver = {
+		.name = "st_uvis25_spi",
+		.pm = &st_uvis25_pm_ops,
+		.of_match_table = of_match_ptr(st_uvis25_spi_of_match),
+	},
+	.probe = st_uvis25_spi_probe,
+	.id_table = st_uvis25_spi_id_table,
+};
+module_spi_driver(st_uvis25_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_DESCRIPTION("STMicroelectronics uvis25 spi driver");
+MODULE_LICENSE("GPL v2");

+ 568 - 0
drivers/iio/light/zopt2201.c

@@ -0,0 +1,568 @@
+/*
+ * zopt2201.c - Support for IDT ZOPT2201 ambient light and UV B sensor
+ *
+ * Copyright 2017 Peter Meerwald-Stadler <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License.  See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Datasheet: https://www.idt.com/document/dst/zopt2201-datasheet
+ * 7-bit I2C slave addresses 0x53 (default) or 0x52 (programmed)
+ *
+ * TODO: interrupt support, ALS/UVB raw mode
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define ZOPT2201_DRV_NAME "zopt2201"
+
+/* Registers */
+#define ZOPT2201_MAIN_CTRL		0x00
+#define ZOPT2201_LS_MEAS_RATE		0x04
+#define ZOPT2201_LS_GAIN		0x05
+#define ZOPT2201_PART_ID		0x06
+#define ZOPT2201_MAIN_STATUS		0x07
+#define ZOPT2201_ALS_DATA		0x0d /* LSB first, 13 to 20 bits */
+#define ZOPT2201_UVB_DATA		0x10 /* LSB first, 13 to 20 bits */
+#define ZOPT2201_UV_COMP_DATA		0x13 /* LSB first, 13 to 20 bits */
+#define ZOPT2201_COMP_DATA		0x16 /* LSB first, 13 to 20 bits */
+#define ZOPT2201_INT_CFG		0x19
+#define ZOPT2201_INT_PST		0x1a
+
+#define ZOPT2201_MAIN_CTRL_LS_MODE	BIT(3) /* 0 .. ALS, 1 .. UV B */
+#define ZOPT2201_MAIN_CTRL_LS_EN	BIT(1)
+
+/* Values for ZOPT2201_LS_MEAS_RATE resolution / bit width */
+#define ZOPT2201_MEAS_RES_20BIT		0 /* takes 400 ms */
+#define ZOPT2201_MEAS_RES_19BIT		1 /* takes 200 ms */
+#define ZOPT2201_MEAS_RES_18BIT		2 /* takes 100 ms, default */
+#define ZOPT2201_MEAS_RES_17BIT		3 /* takes 50 ms */
+#define ZOPT2201_MEAS_RES_16BIT		4 /* takes 25 ms */
+#define ZOPT2201_MEAS_RES_13BIT		5 /* takes 3.125 ms */
+#define ZOPT2201_MEAS_RES_SHIFT		4
+
+/* Values for ZOPT2201_LS_MEAS_RATE measurement rate */
+#define ZOPT2201_MEAS_FREQ_25MS		0
+#define ZOPT2201_MEAS_FREQ_50MS		1
+#define ZOPT2201_MEAS_FREQ_100MS	2 /* default */
+#define ZOPT2201_MEAS_FREQ_200MS	3
+#define ZOPT2201_MEAS_FREQ_500MS	4
+#define ZOPT2201_MEAS_FREQ_1000MS	5
+#define ZOPT2201_MEAS_FREQ_2000MS	6
+
+/* Values for ZOPT2201_LS_GAIN */
+#define ZOPT2201_LS_GAIN_1		0
+#define ZOPT2201_LS_GAIN_3		1
+#define ZOPT2201_LS_GAIN_6		2
+#define ZOPT2201_LS_GAIN_9		3
+#define ZOPT2201_LS_GAIN_18		4
+
+/* Values for ZOPT2201_MAIN_STATUS */
+#define ZOPT2201_MAIN_STATUS_POWERON	BIT(5)
+#define ZOPT2201_MAIN_STATUS_INT	BIT(4)
+#define ZOPT2201_MAIN_STATUS_DRDY	BIT(3)
+
+#define ZOPT2201_PART_NUMBER		0xb2
+
+struct zopt2201_data {
+	struct i2c_client *client;
+	struct mutex lock;
+	u8 gain;
+	u8 res;
+	u8 rate;
+};
+
+static const struct {
+	unsigned int gain; /* gain factor */
+	unsigned int scale; /* micro lux per count */
+} zopt2201_gain_als[] = {
+	{  1, 19200000 },
+	{  3,  6400000 },
+	{  6,  3200000 },
+	{  9,  2133333 },
+	{ 18,  1066666 },
+};
+
+static const struct {
+	unsigned int gain; /* gain factor */
+	unsigned int scale; /* micro W/m2 per count */
+} zopt2201_gain_uvb[] = {
+	{  1, 460800 },
+	{  3, 153600 },
+	{  6,  76800 },
+	{  9,  51200 },
+	{ 18,  25600 },
+};
+
+static const struct {
+	unsigned int bits; /* sensor resolution in bits */
+	unsigned long us; /* measurement time in micro seconds */
+} zopt2201_resolution[] = {
+	{ 20, 400000 },
+	{ 19, 200000 },
+	{ 18, 100000 },
+	{ 17,  50000 },
+	{ 16,  25000 },
+	{ 13,   3125 },
+};
+
+static const struct {
+	unsigned int scale, uscale; /* scale factor as integer + micro */
+	u8 gain; /* gain register value */
+	u8 res; /* resolution register value */
+} zopt2201_scale_als[] = {
+	{ 19, 200000, 0, 5 },
+	{  6, 400000, 1, 5 },
+	{  3, 200000, 2, 5 },
+	{  2, 400000, 0, 4 },
+	{  2, 133333, 3, 5 },
+	{  1, 200000, 0, 3 },
+	{  1,  66666, 4, 5 },
+	{  0, 800000, 1, 4 },
+	{  0, 600000, 0, 2 },
+	{  0, 400000, 2, 4 },
+	{  0, 300000, 0, 1 },
+	{  0, 266666, 3, 4 },
+	{  0, 200000, 2, 3 },
+	{  0, 150000, 0, 0 },
+	{  0, 133333, 4, 4 },
+	{  0, 100000, 2, 2 },
+	{  0,  66666, 4, 3 },
+	{  0,  50000, 2, 1 },
+	{  0,  33333, 4, 2 },
+	{  0,  25000, 2, 0 },
+	{  0,  16666, 4, 1 },
+	{  0,   8333, 4, 0 },
+};
+
+static const struct {
+	unsigned int scale, uscale; /* scale factor as integer + micro */
+	u8 gain; /* gain register value */
+	u8 res; /* resolution register value */
+} zopt2201_scale_uvb[] = {
+	{ 0, 460800, 0, 5 },
+	{ 0, 153600, 1, 5 },
+	{ 0,  76800, 2, 5 },
+	{ 0,  57600, 0, 4 },
+	{ 0,  51200, 3, 5 },
+	{ 0,  28800, 0, 3 },
+	{ 0,  25600, 4, 5 },
+	{ 0,  19200, 1, 4 },
+	{ 0,  14400, 0, 2 },
+	{ 0,   9600, 2, 4 },
+	{ 0,   7200, 0, 1 },
+	{ 0,   6400, 3, 4 },
+	{ 0,   4800, 2, 3 },
+	{ 0,   3600, 0, 0 },
+	{ 0,   3200, 4, 4 },
+	{ 0,   2400, 2, 2 },
+	{ 0,   1600, 4, 3 },
+	{ 0,   1200, 2, 1 },
+	{ 0,    800, 4, 2 },
+	{ 0,    600, 2, 0 },
+	{ 0,    400, 4, 1 },
+	{ 0,    200, 4, 0 },
+};
+
+static int zopt2201_enable_mode(struct zopt2201_data *data, bool uvb_mode)
+{
+	u8 out = ZOPT2201_MAIN_CTRL_LS_EN;
+
+	if (uvb_mode)
+		out |= ZOPT2201_MAIN_CTRL_LS_MODE;
+
+	return i2c_smbus_write_byte_data(data->client, ZOPT2201_MAIN_CTRL, out);
+}
+
+static int zopt2201_read(struct zopt2201_data *data, u8 reg)
+{
+	struct i2c_client *client = data->client;
+	int tries = 10;
+	u8 buf[3];
+	int ret;
+
+	mutex_lock(&data->lock);
+	ret = zopt2201_enable_mode(data, reg == ZOPT2201_UVB_DATA);
+	if (ret < 0)
+		goto fail;
+
+	while (tries--) {
+		unsigned long t = zopt2201_resolution[data->res].us;
+
+		if (t <= 20000)
+			usleep_range(t, t + 1000);
+		else
+			msleep(t / 1000);
+		ret = i2c_smbus_read_byte_data(client, ZOPT2201_MAIN_STATUS);
+		if (ret < 0)
+			goto fail;
+		if (ret & ZOPT2201_MAIN_STATUS_DRDY)
+			break;
+	}
+
+	if (tries < 0) {
+		ret = -ETIMEDOUT;
+		goto fail;
+	}
+
+	ret = i2c_smbus_read_i2c_block_data(client, reg, sizeof(buf), buf);
+	if (ret < 0)
+		goto fail;
+
+	ret = i2c_smbus_write_byte_data(client, ZOPT2201_MAIN_CTRL, 0x00);
+	if (ret < 0)
+		goto fail;
+	mutex_unlock(&data->lock);
+
+	return (buf[2] << 16) | (buf[1] << 8) | buf[0];
+
+fail:
+	mutex_unlock(&data->lock);
+	return ret;
+}
+
+static const struct iio_chan_spec zopt2201_channels[] = {
+	{
+		.type = IIO_LIGHT,
+		.address = ZOPT2201_ALS_DATA,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+				      BIT(IIO_CHAN_INFO_SCALE),
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME),
+	},
+	{
+		.type = IIO_INTENSITY,
+		.modified = 1,
+		.channel2 = IIO_MOD_LIGHT_UV,
+		.address = ZOPT2201_UVB_DATA,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+				      BIT(IIO_CHAN_INFO_SCALE),
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME),
+	},
+	{
+		.type = IIO_UVINDEX,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+	},
+};
+
+static int zopt2201_read_raw(struct iio_dev *indio_dev,
+				struct iio_chan_spec const *chan,
+				int *val, int *val2, long mask)
+{
+	struct zopt2201_data *data = iio_priv(indio_dev);
+	u64 tmp;
+	int ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		ret = zopt2201_read(data, chan->address);
+		if (ret < 0)
+			return ret;
+		*val = ret;
+		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_PROCESSED:
+		ret = zopt2201_read(data, ZOPT2201_UVB_DATA);
+		if (ret < 0)
+			return ret;
+		*val = ret * 18 *
+			(1 << (20 - zopt2201_resolution[data->res].bits)) /
+			zopt2201_gain_uvb[data->gain].gain;
+		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_SCALE:
+		switch (chan->address) {
+		case ZOPT2201_ALS_DATA:
+			*val = zopt2201_gain_als[data->gain].scale;
+			break;
+		case ZOPT2201_UVB_DATA:
+			*val = zopt2201_gain_uvb[data->gain].scale;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		*val2 = 1000000;
+		*val2 *= (1 << (zopt2201_resolution[data->res].bits - 13));
+		tmp = div_s64(*val * 1000000ULL, *val2);
+		*val = div_s64_rem(tmp, 1000000, val2);
+
+		return IIO_VAL_INT_PLUS_MICRO;
+	case IIO_CHAN_INFO_INT_TIME:
+		*val = 0;
+		*val2 = zopt2201_resolution[data->res].us;
+		return IIO_VAL_INT_PLUS_MICRO;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int zopt2201_set_resolution(struct zopt2201_data *data, u8 res)
+{
+	int ret;
+
+	ret = i2c_smbus_write_byte_data(data->client, ZOPT2201_LS_MEAS_RATE,
+					(res << ZOPT2201_MEAS_RES_SHIFT) |
+					data->rate);
+	if (ret < 0)
+		return ret;
+
+	data->res = res;
+
+	return 0;
+}
+
+static int zopt2201_write_resolution(struct zopt2201_data *data,
+				     int val, int val2)
+{
+	int i, ret;
+
+	if (val != 0)
+		return -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(zopt2201_resolution); i++)
+		if (val2 == zopt2201_resolution[i].us) {
+			mutex_lock(&data->lock);
+			ret = zopt2201_set_resolution(data, i);
+			mutex_unlock(&data->lock);
+			return ret;
+		}
+
+	return -EINVAL;
+}
+
+static int zopt2201_set_gain(struct zopt2201_data *data, u8 gain)
+{
+	int ret;
+
+	ret = i2c_smbus_write_byte_data(data->client, ZOPT2201_LS_GAIN, gain);
+	if (ret < 0)
+		return ret;
+
+	data->gain = gain;
+
+	return 0;
+}
+
+static int zopt2201_write_scale_als_by_idx(struct zopt2201_data *data, int idx)
+{
+	int ret;
+
+	mutex_lock(&data->lock);
+	ret = zopt2201_set_resolution(data, zopt2201_scale_als[idx].res);
+	if (ret < 0)
+		goto unlock;
+
+	ret = zopt2201_set_gain(data, zopt2201_scale_als[idx].gain);
+
+unlock:
+	mutex_unlock(&data->lock);
+	return ret;
+}
+
+static int zopt2201_write_scale_als(struct zopt2201_data *data,
+				     int val, int val2)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(zopt2201_scale_als); i++)
+		if (val == zopt2201_scale_als[i].scale &&
+		    val2 == zopt2201_scale_als[i].uscale) {
+			return zopt2201_write_scale_als_by_idx(data, i);
+		}
+
+	return -EINVAL;
+}
+
+static int zopt2201_write_scale_uvb_by_idx(struct zopt2201_data *data, int idx)
+{
+	int ret;
+
+	mutex_lock(&data->lock);
+	ret = zopt2201_set_resolution(data, zopt2201_scale_als[idx].res);
+	if (ret < 0)
+		goto unlock;
+
+	ret = zopt2201_set_gain(data, zopt2201_scale_als[idx].gain);
+
+unlock:
+	mutex_unlock(&data->lock);
+	return ret;
+}
+
+static int zopt2201_write_scale_uvb(struct zopt2201_data *data,
+				     int val, int val2)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(zopt2201_scale_uvb); i++)
+		if (val == zopt2201_scale_uvb[i].scale &&
+		    val2 == zopt2201_scale_uvb[i].uscale)
+			return zopt2201_write_scale_uvb_by_idx(data, i);
+
+	return -EINVAL;
+}
+
+static int zopt2201_write_raw(struct iio_dev *indio_dev,
+			      struct iio_chan_spec const *chan,
+			      int val, int val2, long mask)
+{
+	struct zopt2201_data *data = iio_priv(indio_dev);
+
+	switch (mask) {
+	case IIO_CHAN_INFO_INT_TIME:
+		return zopt2201_write_resolution(data, val, val2);
+	case IIO_CHAN_INFO_SCALE:
+		switch (chan->address) {
+		case ZOPT2201_ALS_DATA:
+			return zopt2201_write_scale_als(data, val, val2);
+		case ZOPT2201_UVB_DATA:
+			return zopt2201_write_scale_uvb(data, val, val2);
+		default:
+			return -EINVAL;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static ssize_t zopt2201_show_int_time_available(struct device *dev,
+						struct device_attribute *attr,
+						char *buf)
+{
+	size_t len = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(zopt2201_resolution); i++)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06lu ",
+				 zopt2201_resolution[i].us);
+	buf[len - 1] = '\n';
+
+	return len;
+}
+
+static IIO_DEV_ATTR_INT_TIME_AVAIL(zopt2201_show_int_time_available);
+
+static ssize_t zopt2201_show_als_scale_avail(struct device *dev,
+					     struct device_attribute *attr,
+					     char *buf)
+{
+	ssize_t len = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(zopt2201_scale_als); i++)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%06u ",
+				 zopt2201_scale_als[i].scale,
+				 zopt2201_scale_als[i].uscale);
+	buf[len - 1] = '\n';
+
+	return len;
+}
+
+static ssize_t zopt2201_show_uvb_scale_avail(struct device *dev,
+					     struct device_attribute *attr,
+					     char *buf)
+{
+	ssize_t len = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(zopt2201_scale_uvb); i++)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%06u ",
+				 zopt2201_scale_uvb[i].scale,
+				 zopt2201_scale_uvb[i].uscale);
+	buf[len - 1] = '\n';
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(in_illuminance_scale_available, 0444,
+		       zopt2201_show_als_scale_avail, NULL, 0);
+static IIO_DEVICE_ATTR(in_intensity_uv_scale_available, 0444,
+		       zopt2201_show_uvb_scale_avail, NULL, 0);
+
+static struct attribute *zopt2201_attributes[] = {
+	&iio_dev_attr_integration_time_available.dev_attr.attr,
+	&iio_dev_attr_in_illuminance_scale_available.dev_attr.attr,
+	&iio_dev_attr_in_intensity_uv_scale_available.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group zopt2201_attribute_group = {
+	.attrs = zopt2201_attributes,
+};
+
+static const struct iio_info zopt2201_info = {
+	.read_raw = zopt2201_read_raw,
+	.write_raw = zopt2201_write_raw,
+	.attrs = &zopt2201_attribute_group,
+};
+
+static int zopt2201_probe(struct i2c_client *client,
+			  const struct i2c_device_id *id)
+{
+	struct zopt2201_data *data;
+	struct iio_dev *indio_dev;
+	int ret;
+
+	if (!i2c_check_functionality(client->adapter,
+				     I2C_FUNC_SMBUS_READ_I2C_BLOCK))
+		return -EOPNOTSUPP;
+
+	ret = i2c_smbus_read_byte_data(client, ZOPT2201_PART_ID);
+	if (ret < 0)
+		return ret;
+	if (ret != ZOPT2201_PART_NUMBER)
+		return -ENODEV;
+
+	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	data = iio_priv(indio_dev);
+	i2c_set_clientdata(client, indio_dev);
+	data->client = client;
+	mutex_init(&data->lock);
+
+	indio_dev->dev.parent = &client->dev;
+	indio_dev->info = &zopt2201_info;
+	indio_dev->channels = zopt2201_channels;
+	indio_dev->num_channels = ARRAY_SIZE(zopt2201_channels);
+	indio_dev->name = ZOPT2201_DRV_NAME;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+
+	data->rate = ZOPT2201_MEAS_FREQ_100MS;
+	ret = zopt2201_set_resolution(data, ZOPT2201_MEAS_RES_18BIT);
+	if (ret < 0)
+		return ret;
+
+	ret = zopt2201_set_gain(data, ZOPT2201_LS_GAIN_3);
+	if (ret < 0)
+		return ret;
+
+	return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id zopt2201_id[] = {
+	{ "zopt2201", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, zopt2201_id);
+
+static struct i2c_driver zopt2201_driver = {
+	.driver = {
+		.name   = ZOPT2201_DRV_NAME,
+	},
+	.probe  = zopt2201_probe,
+	.id_table = zopt2201_id,
+};
+
+module_i2c_driver(zopt2201_driver);
+
+MODULE_AUTHOR("Peter Meerwald-Stadler <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("IDT ZOPT2201 ambient light and UV B sensor driver");
+MODULE_LICENSE("GPL");

+ 1 - 0
drivers/iio/magnetometer/ak8975.c

@@ -788,6 +788,7 @@ static const struct acpi_device_id ak_acpi_match[] = {
 	{"AK8975", AK8975},
 	{"AK8975", AK8975},
 	{"AK8963", AK8963},
 	{"AK8963", AK8963},
 	{"INVN6500", AK8963},
 	{"INVN6500", AK8963},
+	{"AK009911", AK09911},
 	{"AK09911", AK09911},
 	{"AK09911", AK09911},
 	{"AK09912", AK09912},
 	{"AK09912", AK09912},
 	{ },
 	{ },

+ 132 - 71
drivers/iio/pressure/bmp280-core.c

@@ -55,6 +55,28 @@ struct bmp180_calib {
 	s16 MD;
 	s16 MD;
 };
 };
 
 
+/* See datasheet Section 4.2.2. */
+struct bmp280_calib {
+	u16 T1;
+	s16 T2;
+	s16 T3;
+	u16 P1;
+	s16 P2;
+	s16 P3;
+	s16 P4;
+	s16 P5;
+	s16 P6;
+	s16 P7;
+	s16 P8;
+	s16 P9;
+	u8  H1;
+	s16 H2;
+	u8  H3;
+	s16 H4;
+	s16 H5;
+	s8  H6;
+};
+
 struct bmp280_data {
 struct bmp280_data {
 	struct device *dev;
 	struct device *dev;
 	struct mutex lock;
 	struct mutex lock;
@@ -62,7 +84,10 @@ struct bmp280_data {
 	struct completion done;
 	struct completion done;
 	bool use_eoc;
 	bool use_eoc;
 	const struct bmp280_chip_info *chip_info;
 	const struct bmp280_chip_info *chip_info;
-	struct bmp180_calib calib;
+	union {
+		struct bmp180_calib bmp180;
+		struct bmp280_calib bmp280;
+	} calib;
 	struct regulator *vddd;
 	struct regulator *vddd;
 	struct regulator *vdda;
 	struct regulator *vdda;
 	unsigned int start_up_time; /* in microseconds */
 	unsigned int start_up_time; /* in microseconds */
@@ -120,67 +145,121 @@ static const struct iio_chan_spec bmp280_channels[] = {
 	},
 	},
 };
 };
 
 
-/*
- * Returns humidity in percent, resolution is 0.01 percent. Output value of
- * "47445" represents 47445/1024 = 46.333 %RH.
- *
- * Taken from BME280 datasheet, Section 4.2.3, "Compensation formula".
- */
-
-static u32 bmp280_compensate_humidity(struct bmp280_data *data,
-				      s32 adc_humidity)
+static int bmp280_read_calib(struct bmp280_data *data,
+			     struct bmp280_calib *calib,
+			     unsigned int chip)
 {
 {
+	int ret;
+	unsigned int tmp;
 	struct device *dev = data->dev;
 	struct device *dev = data->dev;
-	unsigned int H1, H3, tmp;
-	int H2, H4, H5, H6, ret, var;
+	__le16 t_buf[BMP280_COMP_TEMP_REG_COUNT / 2];
+	__le16 p_buf[BMP280_COMP_PRESS_REG_COUNT / 2];
+
+	/* Read temperature calibration values. */
+	ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_TEMP_START,
+			       t_buf, BMP280_COMP_TEMP_REG_COUNT);
+	if (ret < 0) {
+		dev_err(data->dev,
+			"failed to read temperature calibration parameters\n");
+		return ret;
+	}
+
+	calib->T1 = le16_to_cpu(t_buf[T1]);
+	calib->T2 = le16_to_cpu(t_buf[T2]);
+	calib->T3 = le16_to_cpu(t_buf[T3]);
 
 
-	ret = regmap_read(data->regmap, BMP280_REG_COMP_H1, &H1);
+	/* Read pressure calibration values. */
+	ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_PRESS_START,
+			       p_buf, BMP280_COMP_PRESS_REG_COUNT);
+	if (ret < 0) {
+		dev_err(data->dev,
+			"failed to read pressure calibration parameters\n");
+		return ret;
+	}
+
+	calib->P1 = le16_to_cpu(p_buf[P1]);
+	calib->P2 = le16_to_cpu(p_buf[P2]);
+	calib->P3 = le16_to_cpu(p_buf[P3]);
+	calib->P4 = le16_to_cpu(p_buf[P4]);
+	calib->P5 = le16_to_cpu(p_buf[P5]);
+	calib->P6 = le16_to_cpu(p_buf[P6]);
+	calib->P7 = le16_to_cpu(p_buf[P7]);
+	calib->P8 = le16_to_cpu(p_buf[P8]);
+	calib->P9 = le16_to_cpu(p_buf[P9]);
+
+	/*
+	 * Read humidity calibration values.
+	 * Due to some odd register addressing we cannot just
+	 * do a big bulk read. Instead, we have to read each Hx
+	 * value separately and sometimes do some bit shifting...
+	 * Humidity data is only available on BME280.
+	 */
+	if (chip != BME280_CHIP_ID)
+		return 0;
+
+	ret = regmap_read(data->regmap, BMP280_REG_COMP_H1, &tmp);
 	if (ret < 0) {
 	if (ret < 0) {
 		dev_err(dev, "failed to read H1 comp value\n");
 		dev_err(dev, "failed to read H1 comp value\n");
 		return ret;
 		return ret;
 	}
 	}
+	calib->H1 = tmp;
 
 
 	ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H2, &tmp, 2);
 	ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H2, &tmp, 2);
 	if (ret < 0) {
 	if (ret < 0) {
 		dev_err(dev, "failed to read H2 comp value\n");
 		dev_err(dev, "failed to read H2 comp value\n");
 		return ret;
 		return ret;
 	}
 	}
-	H2 = sign_extend32(le16_to_cpu(tmp), 15);
+	calib->H2 = sign_extend32(le16_to_cpu(tmp), 15);
 
 
-	ret = regmap_read(data->regmap, BMP280_REG_COMP_H3, &H3);
+	ret = regmap_read(data->regmap, BMP280_REG_COMP_H3, &tmp);
 	if (ret < 0) {
 	if (ret < 0) {
 		dev_err(dev, "failed to read H3 comp value\n");
 		dev_err(dev, "failed to read H3 comp value\n");
 		return ret;
 		return ret;
 	}
 	}
+	calib->H3 = tmp;
 
 
 	ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H4, &tmp, 2);
 	ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H4, &tmp, 2);
 	if (ret < 0) {
 	if (ret < 0) {
 		dev_err(dev, "failed to read H4 comp value\n");
 		dev_err(dev, "failed to read H4 comp value\n");
 		return ret;
 		return ret;
 	}
 	}
-	H4 = sign_extend32(((be16_to_cpu(tmp) >> 4) & 0xff0) |
-			  (be16_to_cpu(tmp) & 0xf), 11);
+	calib->H4 = sign_extend32(((be16_to_cpu(tmp) >> 4) & 0xff0) |
+				  (be16_to_cpu(tmp) & 0xf), 11);
 
 
 	ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H5, &tmp, 2);
 	ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H5, &tmp, 2);
 	if (ret < 0) {
 	if (ret < 0) {
 		dev_err(dev, "failed to read H5 comp value\n");
 		dev_err(dev, "failed to read H5 comp value\n");
 		return ret;
 		return ret;
 	}
 	}
-	H5 = sign_extend32(((le16_to_cpu(tmp) >> 4) & 0xfff), 11);
+	calib->H5 = sign_extend32(((le16_to_cpu(tmp) >> 4) & 0xfff), 11);
 
 
 	ret = regmap_read(data->regmap, BMP280_REG_COMP_H6, &tmp);
 	ret = regmap_read(data->regmap, BMP280_REG_COMP_H6, &tmp);
 	if (ret < 0) {
 	if (ret < 0) {
 		dev_err(dev, "failed to read H6 comp value\n");
 		dev_err(dev, "failed to read H6 comp value\n");
 		return ret;
 		return ret;
 	}
 	}
-	H6 = sign_extend32(tmp, 7);
+	calib->H6 = sign_extend32(tmp, 7);
+
+	return 0;
+}
+/*
+ * Returns humidity in percent, resolution is 0.01 percent. Output value of
+ * "47445" represents 47445/1024 = 46.333 %RH.
+ *
+ * Taken from BME280 datasheet, Section 4.2.3, "Compensation formula".
+ */
+static u32 bmp280_compensate_humidity(struct bmp280_data *data,
+				      s32 adc_humidity)
+{
+	s32 var;
+	struct bmp280_calib *calib = &data->calib.bmp280;
 
 
 	var = ((s32)data->t_fine) - (s32)76800;
 	var = ((s32)data->t_fine) - (s32)76800;
-	var = ((((adc_humidity << 14) - (H4 << 20) - (H5 * var))
-		+ (s32)16384) >> 15) * (((((((var * H6) >> 10)
-		* (((var * (s32)H3) >> 11) + (s32)32768)) >> 10)
-		+ (s32)2097152) * H2 + 8192) >> 14);
-	var -= ((((var >> 15) * (var >> 15)) >> 7) * (s32)H1) >> 4;
+	var = ((((adc_humidity << 14) - (calib->H4 << 20) - (calib->H5 * var))
+		+ (s32)16384) >> 15) * (((((((var * calib->H6) >> 10)
+		* (((var * (s32)calib->H3) >> 11) + (s32)32768)) >> 10)
+		+ (s32)2097152) * calib->H2 + 8192) >> 14);
+	var -= ((((var >> 15) * (var >> 15)) >> 7) * (s32)calib->H1) >> 4;
 
 
 	return var >> 12;
 	return var >> 12;
 };
 };
@@ -195,31 +274,14 @@ static u32 bmp280_compensate_humidity(struct bmp280_data *data,
 static s32 bmp280_compensate_temp(struct bmp280_data *data,
 static s32 bmp280_compensate_temp(struct bmp280_data *data,
 				  s32 adc_temp)
 				  s32 adc_temp)
 {
 {
-	int ret;
 	s32 var1, var2;
 	s32 var1, var2;
-	__le16 buf[BMP280_COMP_TEMP_REG_COUNT / 2];
+	struct bmp280_calib *calib = &data->calib.bmp280;
 
 
-	ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_TEMP_START,
-			       buf, BMP280_COMP_TEMP_REG_COUNT);
-	if (ret < 0) {
-		dev_err(data->dev,
-			"failed to read temperature calibration parameters\n");
-		return ret;
-	}
-
-	/*
-	 * The double casts are necessary because le16_to_cpu returns an
-	 * unsigned 16-bit value.  Casting that value directly to a
-	 * signed 32-bit will not do proper sign extension.
-	 *
-	 * Conversely, T1 and P1 are unsigned values, so they can be
-	 * cast straight to the larger type.
-	 */
-	var1 = (((adc_temp >> 3) - ((s32)le16_to_cpu(buf[T1]) << 1)) *
-		((s32)(s16)le16_to_cpu(buf[T2]))) >> 11;
-	var2 = (((((adc_temp >> 4) - ((s32)le16_to_cpu(buf[T1]))) *
-		  ((adc_temp >> 4) - ((s32)le16_to_cpu(buf[T1])))) >> 12) *
-		((s32)(s16)le16_to_cpu(buf[T3]))) >> 14;
+	var1 = (((adc_temp >> 3) - ((s32)calib->T1 << 1)) *
+		((s32)calib->T2)) >> 11;
+	var2 = (((((adc_temp >> 4) - ((s32)calib->T1)) *
+		  ((adc_temp >> 4) - ((s32)calib->T1))) >> 12) *
+		((s32)calib->T3)) >> 14;
 	data->t_fine = var1 + var2;
 	data->t_fine = var1 + var2;
 
 
 	return (data->t_fine * 5 + 128) >> 8;
 	return (data->t_fine * 5 + 128) >> 8;
@@ -235,34 +297,25 @@ static s32 bmp280_compensate_temp(struct bmp280_data *data,
 static u32 bmp280_compensate_press(struct bmp280_data *data,
 static u32 bmp280_compensate_press(struct bmp280_data *data,
 				   s32 adc_press)
 				   s32 adc_press)
 {
 {
-	int ret;
 	s64 var1, var2, p;
 	s64 var1, var2, p;
-	__le16 buf[BMP280_COMP_PRESS_REG_COUNT / 2];
-
-	ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_PRESS_START,
-			       buf, BMP280_COMP_PRESS_REG_COUNT);
-	if (ret < 0) {
-		dev_err(data->dev,
-			"failed to read pressure calibration parameters\n");
-		return ret;
-	}
+	struct bmp280_calib *calib = &data->calib.bmp280;
 
 
 	var1 = ((s64)data->t_fine) - 128000;
 	var1 = ((s64)data->t_fine) - 128000;
-	var2 = var1 * var1 * (s64)(s16)le16_to_cpu(buf[P6]);
-	var2 += (var1 * (s64)(s16)le16_to_cpu(buf[P5])) << 17;
-	var2 += ((s64)(s16)le16_to_cpu(buf[P4])) << 35;
-	var1 = ((var1 * var1 * (s64)(s16)le16_to_cpu(buf[P3])) >> 8) +
-		((var1 * (s64)(s16)le16_to_cpu(buf[P2])) << 12);
-	var1 = ((((s64)1) << 47) + var1) * ((s64)le16_to_cpu(buf[P1])) >> 33;
+	var2 = var1 * var1 * (s64)calib->P6;
+	var2 += (var1 * (s64)calib->P5) << 17;
+	var2 += ((s64)calib->P4) << 35;
+	var1 = ((var1 * var1 * (s64)calib->P3) >> 8) +
+		((var1 * (s64)calib->P2) << 12);
+	var1 = ((((s64)1) << 47) + var1) * ((s64)calib->P1) >> 33;
 
 
 	if (var1 == 0)
 	if (var1 == 0)
 		return 0;
 		return 0;
 
 
 	p = ((((s64)1048576 - adc_press) << 31) - var2) * 3125;
 	p = ((((s64)1048576 - adc_press) << 31) - var2) * 3125;
 	p = div64_s64(p, var1);
 	p = div64_s64(p, var1);
-	var1 = (((s64)(s16)le16_to_cpu(buf[P9])) * (p >> 13) * (p >> 13)) >> 25;
-	var2 = (((s64)(s16)le16_to_cpu(buf[P8])) * p) >> 19;
-	p = ((p + var1 + var2) >> 8) + (((s64)(s16)le16_to_cpu(buf[P7])) << 4);
+	var1 = (((s64)calib->P9) * (p >> 13) * (p >> 13)) >> 25;
+	var2 = ((s64)(calib->P8) * p) >> 19;
+	p = ((p + var1 + var2) >> 8) + (((s64)calib->P7) << 4);
 
 
 	return (u32)p;
 	return (u32)p;
 }
 }
@@ -752,7 +805,7 @@ static int bmp180_read_calib(struct bmp280_data *data,
 static s32 bmp180_compensate_temp(struct bmp280_data *data, s32 adc_temp)
 static s32 bmp180_compensate_temp(struct bmp280_data *data, s32 adc_temp)
 {
 {
 	s32 x1, x2;
 	s32 x1, x2;
-	struct bmp180_calib *calib = &data->calib;
+	struct bmp180_calib *calib = &data->calib.bmp180;
 
 
 	x1 = ((adc_temp - calib->AC6) * calib->AC5) >> 15;
 	x1 = ((adc_temp - calib->AC6) * calib->AC5) >> 15;
 	x2 = (calib->MC << 11) / (x1 + calib->MD);
 	x2 = (calib->MC << 11) / (x1 + calib->MD);
@@ -814,7 +867,7 @@ static u32 bmp180_compensate_press(struct bmp280_data *data, s32 adc_press)
 	s32 b3, b6;
 	s32 b3, b6;
 	u32 b4, b7;
 	u32 b4, b7;
 	s32 oss = data->oversampling_press;
 	s32 oss = data->oversampling_press;
-	struct bmp180_calib *calib = &data->calib;
+	struct bmp180_calib *calib = &data->calib.bmp180;
 
 
 	b6 = data->t_fine - 4000;
 	b6 = data->t_fine - 4000;
 	x1 = (calib->B2 * (b6 * b6 >> 12)) >> 11;
 	x1 = (calib->B2 * (b6 * b6 >> 12)) >> 11;
@@ -1028,11 +1081,19 @@ int bmp280_common_probe(struct device *dev,
 	dev_set_drvdata(dev, indio_dev);
 	dev_set_drvdata(dev, indio_dev);
 
 
 	/*
 	/*
-	 * The BMP085 and BMP180 has calibration in an E2PROM, read it out
-	 * at probe time. It will not change.
+	 * Some chips have calibration parameters "programmed into the devices'
+	 * non-volatile memory during production". Let's read them out at probe
+	 * time once. They will not change.
 	 */
 	 */
 	if (chip_id  == BMP180_CHIP_ID) {
 	if (chip_id  == BMP180_CHIP_ID) {
-		ret = bmp180_read_calib(data, &data->calib);
+		ret = bmp180_read_calib(data, &data->calib.bmp180);
+		if (ret < 0) {
+			dev_err(data->dev,
+				"failed to read calibration coefficients\n");
+			goto out_disable_vdda;
+		}
+	} else if (chip_id == BMP280_CHIP_ID || chip_id == BME280_CHIP_ID) {
+		ret = bmp280_read_calib(data, &data->calib.bmp280, chip_id);
 		if (ret < 0) {
 		if (ret < 0) {
 			dev_err(data->dev,
 			dev_err(data->dev,
 				"failed to read calibration coefficients\n");
 				"failed to read calibration coefficients\n");

+ 1 - 0
drivers/iio/proximity/sx9500.c

@@ -1031,6 +1031,7 @@ static const struct dev_pm_ops sx9500_pm_ops = {
 
 
 static const struct acpi_device_id sx9500_acpi_match[] = {
 static const struct acpi_device_id sx9500_acpi_match[] = {
 	{"SSX9500", 0},
 	{"SSX9500", 0},
+	{"SASX9500", 0},
 	{ },
 	{ },
 };
 };
 MODULE_DEVICE_TABLE(acpi, sx9500_acpi_match);
 MODULE_DEVICE_TABLE(acpi, sx9500_acpi_match);

+ 1 - 2
drivers/iio/trigger/stm32-lptimer-trigger.c

@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * STM32 Low-Power Timer Trigger driver
  * STM32 Low-Power Timer Trigger driver
  *
  *
@@ -5,8 +6,6 @@
  *
  *
  * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
  * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
  *
  *
- * License terms:  GNU General Public License (GPL), version 2
- *
  * Inspired by Benjamin Gaignard's stm32-timer-trigger driver
  * Inspired by Benjamin Gaignard's stm32-timer-trigger driver
  */
  */
 
 

+ 1 - 1
drivers/iio/trigger/stm32-timer-trigger.c

@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * Copyright (C) STMicroelectronics 2016
  * Copyright (C) STMicroelectronics 2016
  *
  *
  * Author: Benjamin Gaignard <benjamin.gaignard@st.com>
  * Author: Benjamin Gaignard <benjamin.gaignard@st.com>
  *
  *
- * License terms:  GNU General Public License (GPL), version 2
  */
  */
 
 
 #include <linux/iio/iio.h>
 #include <linux/iio/iio.h>

+ 4 - 0
drivers/staging/Kconfig

@@ -26,6 +26,10 @@ if STAGING
 
 
 source "drivers/staging/irda/net/Kconfig"
 source "drivers/staging/irda/net/Kconfig"
 
 
+source "drivers/staging/ipx/Kconfig"
+
+source "drivers/staging/ncpfs/Kconfig"
+
 source "drivers/staging/wlan-ng/Kconfig"
 source "drivers/staging/wlan-ng/Kconfig"
 
 
 source "drivers/staging/comedi/Kconfig"
 source "drivers/staging/comedi/Kconfig"

+ 2 - 0
drivers/staging/Makefile

@@ -3,6 +3,8 @@
 
 
 obj-y				+= media/
 obj-y				+= media/
 obj-y				+= typec/
 obj-y				+= typec/
+obj-$(CONFIG_IPX)		+= ipx/
+obj-$(CONFIG_NCP_FS)		+= ncpfs/
 obj-$(CONFIG_IRDA)		+= irda/net/
 obj-$(CONFIG_IRDA)		+= irda/net/
 obj-$(CONFIG_IRDA)		+= irda/drivers/
 obj-$(CONFIG_IRDA)		+= irda/drivers/
 obj-$(CONFIG_PRISM2_USB)	+= wlan-ng/
 obj-$(CONFIG_PRISM2_USB)	+= wlan-ng/

+ 27 - 10
drivers/staging/android/ashmem.c

@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* mm/ashmem.c
 /* mm/ashmem.c
  *
  *
  * Anonymous Shared Memory Subsystem, ashmem
  * Anonymous Shared Memory Subsystem, ashmem
@@ -5,15 +6,6 @@
  * Copyright (C) 2008 Google, Inc.
  * Copyright (C) 2008 Google, Inc.
  *
  *
  * Robert Love <rlove@google.com>
  * Robert Love <rlove@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
  */
 
 
 #define pr_fmt(fmt) "ashmem: " fmt
 #define pr_fmt(fmt) "ashmem: " fmt
@@ -818,7 +810,23 @@ static long compat_ashmem_ioctl(struct file *file, unsigned int cmd,
 	return ashmem_ioctl(file, cmd, arg);
 	return ashmem_ioctl(file, cmd, arg);
 }
 }
 #endif
 #endif
+#ifdef CONFIG_PROC_FS
+static void ashmem_show_fdinfo(struct seq_file *m, struct file *file)
+{
+	struct ashmem_area *asma = file->private_data;
+
+	mutex_lock(&ashmem_mutex);
+
+	if (asma->file)
+		seq_printf(m, "inode:\t%ld\n", file_inode(asma->file)->i_ino);
+
+	if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
+		seq_printf(m, "name:\t%s\n",
+			   asma->name + ASHMEM_NAME_PREFIX_LEN);
 
 
+	mutex_unlock(&ashmem_mutex);
+}
+#endif
 static const struct file_operations ashmem_fops = {
 static const struct file_operations ashmem_fops = {
 	.owner = THIS_MODULE,
 	.owner = THIS_MODULE,
 	.open = ashmem_open,
 	.open = ashmem_open,
@@ -830,6 +838,9 @@ static const struct file_operations ashmem_fops = {
 #ifdef CONFIG_COMPAT
 #ifdef CONFIG_COMPAT
 	.compat_ioctl = compat_ashmem_ioctl,
 	.compat_ioctl = compat_ashmem_ioctl,
 #endif
 #endif
+#ifdef CONFIG_PROC_FS
+	.show_fdinfo = ashmem_show_fdinfo,
+#endif
 };
 };
 
 
 static struct miscdevice ashmem_misc = {
 static struct miscdevice ashmem_misc = {
@@ -864,12 +875,18 @@ static int __init ashmem_init(void)
 		goto out_free2;
 		goto out_free2;
 	}
 	}
 
 
-	register_shrinker(&ashmem_shrinker);
+	ret = register_shrinker(&ashmem_shrinker);
+	if (ret) {
+		pr_err("failed to register shrinker!\n");
+		goto out_demisc;
+	}
 
 
 	pr_info("initialized\n");
 	pr_info("initialized\n");
 
 
 	return 0;
 	return 0;
 
 
+out_demisc:
+	misc_deregister(&ashmem_misc);
 out_free2:
 out_free2:
 	kmem_cache_destroy(ashmem_range_cachep);
 	kmem_cache_destroy(ashmem_range_cachep);
 out_free1:
 out_free1:

+ 1 - 0
drivers/staging/android/ashmem.h

@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR Apache-2.0)
 /*
 /*
  * include/linux/ashmem.h
  * include/linux/ashmem.h
  *
  *

+ 4 - 12
drivers/staging/android/ion/ion-ioctl.c

@@ -1,16 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
- *
  * Copyright (C) 2011 Google, Inc.
  * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  */
  */
 
 
 #include <linux/kernel.h>
 #include <linux/kernel.h>
@@ -70,8 +60,10 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 		return -EFAULT;
 		return -EFAULT;
 
 
 	ret = validate_ioctl_arg(cmd, &data);
 	ret = validate_ioctl_arg(cmd, &data);
-	if (WARN_ON_ONCE(ret))
+	if (ret) {
+		pr_warn_once("%s: ioctl validate failed\n", __func__);
 		return ret;
 		return ret;
+	}
 
 
 	if (!(dir & _IOC_WRITE))
 	if (!(dir & _IOC_WRITE))
 		memset(&data, 0, sizeof(data));
 		memset(&data, 0, sizeof(data));

+ 17 - 23
drivers/staging/android/ion/ion.c

@@ -1,42 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
- *
  * drivers/staging/android/ion/ion.c
  * drivers/staging/android/ion/ion.c
  *
  *
  * Copyright (C) 2011 Google, Inc.
  * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  */
  */
 
 
+#include <linux/anon_inodes.h>
+#include <linux/debugfs.h>
 #include <linux/device.h>
 #include <linux/device.h>
+#include <linux/dma-buf.h>
 #include <linux/err.h>
 #include <linux/err.h>
+#include <linux/export.h>
 #include <linux/file.h>
 #include <linux/file.h>
 #include <linux/freezer.h>
 #include <linux/freezer.h>
 #include <linux/fs.h>
 #include <linux/fs.h>
-#include <linux/anon_inodes.h>
+#include <linux/idr.h>
 #include <linux/kthread.h>
 #include <linux/kthread.h>
 #include <linux/list.h>
 #include <linux/list.h>
 #include <linux/memblock.h>
 #include <linux/memblock.h>
 #include <linux/miscdevice.h>
 #include <linux/miscdevice.h>
-#include <linux/export.h>
 #include <linux/mm.h>
 #include <linux/mm.h>
 #include <linux/mm_types.h>
 #include <linux/mm_types.h>
 #include <linux/rbtree.h>
 #include <linux/rbtree.h>
-#include <linux/slab.h>
+#include <linux/sched/task.h>
 #include <linux/seq_file.h>
 #include <linux/seq_file.h>
+#include <linux/slab.h>
 #include <linux/uaccess.h>
 #include <linux/uaccess.h>
 #include <linux/vmalloc.h>
 #include <linux/vmalloc.h>
-#include <linux/debugfs.h>
-#include <linux/dma-buf.h>
-#include <linux/idr.h>
-#include <linux/sched/task.h>
 
 
 #include "ion.h"
 #include "ion.h"
 
 
@@ -539,6 +529,7 @@ void ion_device_add_heap(struct ion_heap *heap)
 {
 {
 	struct dentry *debug_file;
 	struct dentry *debug_file;
 	struct ion_device *dev = internal_dev;
 	struct ion_device *dev = internal_dev;
+	int ret;
 
 
 	if (!heap->ops->allocate || !heap->ops->free)
 	if (!heap->ops->allocate || !heap->ops->free)
 		pr_err("%s: can not add heap with invalid ops struct.\n",
 		pr_err("%s: can not add heap with invalid ops struct.\n",
@@ -550,8 +541,11 @@ void ion_device_add_heap(struct ion_heap *heap)
 	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
 	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
 		ion_heap_init_deferred_free(heap);
 		ion_heap_init_deferred_free(heap);
 
 
-	if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
-		ion_heap_init_shrinker(heap);
+	if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) {
+		ret = ion_heap_init_shrinker(heap);
+		if (ret)
+			pr_err("%s: Failed to register shrinker\n", __func__);
+	}
 
 
 	heap->dev = dev;
 	heap->dev = dev;
 	down_write(&dev->lock);
 	down_write(&dev->lock);
@@ -567,9 +561,9 @@ void ion_device_add_heap(struct ion_heap *heap)
 		char debug_name[64];
 		char debug_name[64];
 
 
 		snprintf(debug_name, 64, "%s_shrink", heap->name);
 		snprintf(debug_name, 64, "%s_shrink", heap->name);
-		debug_file = debugfs_create_file(
-			debug_name, 0644, dev->debug_root, heap,
-			&debug_shrink_fops);
+		debug_file = debugfs_create_file(debug_name,
+						 0644, dev->debug_root, heap,
+						 &debug_shrink_fops);
 		if (!debug_file) {
 		if (!debug_file) {
 			char buf[256], *path;
 			char buf[256], *path;
 
 

+ 4 - 12
drivers/staging/android/ion/ion.h

@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * drivers/staging/android/ion/ion.h
  * drivers/staging/android/ion/ion.h
  *
  *
  * Copyright (C) 2011 Google, Inc.
  * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  */
  */
 
 
 #ifndef _ION_H
 #ifndef _ION_H
@@ -189,7 +180,8 @@ struct ion_heap {
 	wait_queue_head_t waitqueue;
 	wait_queue_head_t waitqueue;
 	struct task_struct *task;
 	struct task_struct *task;
 
 
-	int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
+	int (*debug_show)(struct ion_heap *heap, struct seq_file *s,
+			  void *unused);
 };
 };
 
 
 /**
 /**
@@ -238,7 +230,7 @@ int ion_alloc(size_t len,
  * this function will be called to setup a shrinker to shrink the freelists
  * this function will be called to setup a shrinker to shrink the freelists
  * and call the heap's shrink op.
  * and call the heap's shrink op.
  */
  */
-void ion_heap_init_shrinker(struct ion_heap *heap);
+int ion_heap_init_shrinker(struct ion_heap *heap);
 
 
 /**
 /**
  * ion_heap_init_deferred_free -- initialize deferred free functionality
  * ion_heap_init_deferred_free -- initialize deferred free functionality

+ 1 - 10
drivers/staging/android/ion/ion_carveout_heap.c

@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * drivers/staging/android/ion/ion_carveout_heap.c
  * drivers/staging/android/ion/ion_carveout_heap.c
  *
  *
  * Copyright (C) 2011 Google, Inc.
  * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  */
  */
 #include <linux/spinlock.h>
 #include <linux/spinlock.h>
 #include <linux/dma-mapping.h>
 #include <linux/dma-mapping.h>

+ 1 - 10
drivers/staging/android/ion/ion_chunk_heap.c

@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * drivers/staging/android/ion/ion_chunk_heap.c
  * drivers/staging/android/ion/ion_chunk_heap.c
  *
  *
  * Copyright (C) 2012 Google, Inc.
  * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  */
  */
 #include <linux/dma-mapping.h>
 #include <linux/dma-mapping.h>
 #include <linux/err.h>
 #include <linux/err.h>

+ 1 - 10
drivers/staging/android/ion/ion_cma_heap.c

@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * drivers/staging/android/ion/ion_cma_heap.c
  * drivers/staging/android/ion/ion_cma_heap.c
  *
  *
  * Copyright (C) Linaro 2012
  * Copyright (C) Linaro 2012
  * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
  * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  */
  */
 
 
 #include <linux/device.h>
 #include <linux/device.h>

+ 4 - 12
drivers/staging/android/ion/ion_heap.c

@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * drivers/staging/android/ion/ion_heap.c
  * drivers/staging/android/ion/ion_heap.c
  *
  *
  * Copyright (C) 2011 Google, Inc.
  * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  */
  */
 
 
 #include <linux/err.h>
 #include <linux/err.h>
@@ -306,11 +297,12 @@ static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
 	return freed;
 	return freed;
 }
 }
 
 
-void ion_heap_init_shrinker(struct ion_heap *heap)
+int ion_heap_init_shrinker(struct ion_heap *heap)
 {
 {
 	heap->shrinker.count_objects = ion_heap_shrink_count;
 	heap->shrinker.count_objects = ion_heap_shrink_count;
 	heap->shrinker.scan_objects = ion_heap_shrink_scan;
 	heap->shrinker.scan_objects = ion_heap_shrink_scan;
 	heap->shrinker.seeks = DEFAULT_SEEKS;
 	heap->shrinker.seeks = DEFAULT_SEEKS;
 	heap->shrinker.batch = 0;
 	heap->shrinker.batch = 0;
-	register_shrinker(&heap->shrinker);
+
+	return register_shrinker(&heap->shrinker);
 }
 }

+ 1 - 10
drivers/staging/android/ion/ion_page_pool.c

@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * drivers/staging/android/ion/ion_mem_pool.c
  * drivers/staging/android/ion/ion_mem_pool.c
  *
  *
  * Copyright (C) 2011 Google, Inc.
  * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  */
  */
 
 
 #include <linux/debugfs.h>
 #include <linux/debugfs.h>

+ 2 - 11
drivers/staging/android/ion/ion_system_heap.c

@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * drivers/staging/android/ion/ion_system_heap.c
  * drivers/staging/android/ion/ion_system_heap.c
  *
  *
  * Copyright (C) 2011 Google, Inc.
  * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  */
  */
 
 
 #include <asm/page.h>
 #include <asm/page.h>
@@ -371,7 +362,7 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
 	unsigned long i;
 	unsigned long i;
 	int ret;
 	int ret;
 
 
-	page = alloc_pages(low_order_gfp_flags, order);
+	page = alloc_pages(low_order_gfp_flags | __GFP_NOWARN, order);
 	if (!page)
 	if (!page)
 		return -ENOMEM;
 		return -ENOMEM;
 
 

+ 1 - 0
drivers/staging/android/uapi/ashmem.h

@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR Apache-2.0)
 /*
 /*
  * drivers/staging/android/uapi/ashmem.h
  * drivers/staging/android/uapi/ashmem.h
  *
  *

+ 1 - 10
drivers/staging/android/uapi/ion.h

@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
 /*
  * drivers/staging/android/uapi/ion.h
  * drivers/staging/android/uapi/ion.h
  *
  *
  * Copyright (C) 2011 Google, Inc.
  * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  */
  */
 
 
 #ifndef _UAPI_LINUX_ION_H
 #ifndef _UAPI_LINUX_ION_H

+ 0 - 27
drivers/staging/ccree/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt

@@ -1,27 +0,0 @@
-Arm TrustZone CryptoCell cryptographic accelerators
-
-Required properties:
-- compatible: must be "arm,cryptocell-712-ree".
-- reg: shall contain base register location and length.
-	Typically length is 0x10000.
-- interrupts: shall contain the interrupt for the device.
-
-Optional properties:
-- interrupt-parent: can designate the interrupt controller the
-	device interrupt is connected to, if needed.
-- clocks: may contain the clock handling the device, if needed.
-- power-domains: may contain a reference to the PM domain, if applicable.
-
-
-Examples:
-
-Zynq FPGA device
-----------------
-
-       arm_cc7x: arm_cc7x@80000000 {
-               compatible = "arm,cryptocell-712-ree";
-               interrupt-parent = <&intc>;
-               interrupts = < 0 30 4 >;
-               reg = < 0x80000000 0x10000 >;
-       };
-

+ 2 - 0
drivers/staging/ccree/Kconfig

@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
 config CRYPTO_DEV_CCREE
 config CRYPTO_DEV_CCREE
 	tristate "Support for ARM TrustZone CryptoCell C7XX family of Crypto accelerators"
 	tristate "Support for ARM TrustZone CryptoCell C7XX family of Crypto accelerators"
 	depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA
 	depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA

+ 6 - 2
drivers/staging/ccree/Makefile

@@ -1,3 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
 obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
 obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
-ccree-y := ssi_driver.o ssi_sysfs.o ssi_buffer_mgr.o ssi_request_mgr.o ssi_cipher.o ssi_hash.o ssi_aead.o ssi_ivgen.o ssi_sram_mgr.o ssi_pm.o
-ccree-$(CONFIG_CRYPTO_FIPS) += ssi_fips.o
+ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o
+ccree-$(CONFIG_CRYPTO_FIPS) += cc_fips.o
+ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o
+ccree-$(CONFIG_PM) += cc_pm.o

+ 1 - 21
drivers/staging/ccree/TODO

@@ -6,25 +6,5 @@
 *									*
 *									*
 *************************************************************************
 *************************************************************************
 
 
-ccree specific items
-a.k.a stuff fixing for this driver to move out of staging
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+1. ???
 
 
-1.  Move to using Crypto Engine to handle backlog queueing.
-2.  Remove synchronous algorithm support leftovers.
-3.  Separate platform specific code for FIPS and power management into separate platform modules.
-4.  Drop legacy kernel support code.
-5.  Move most (all?) #ifdef CONFIG into inline functions.
-6.  Remove all unused definitions.
-7.  Re-factor to accomediate newer/older HW revisions besides the 712.
-8.  Handle the many checkpatch errors.
-9.  Implement ahash import/export correctly.
-10. Go through a proper review of DT bindings and sysfs ABI
-11. Sort out FIPS mode: bake tests into testmgr, sort out behaviour on error, 
-    figure if 3DES weak key check is needed
-
-Kernel infrastructure items
-a.k.a stuff we either neither need to fix in the kernel or understand what we're doing wrong
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-1. ahash import/export context has a PAGE_SIZE/8 size limit.  We need more.
-2. Crypto Engine seems to be built for HW with hardware queue depth of 1, we have 600++.

+ 513 - 606
drivers/staging/ccree/ssi_aead.c → drivers/staging/ccree/cc_aead.c

@@ -1,41 +1,19 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
 
 
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/module.h>
-#include <linux/platform_device.h>
 #include <crypto/algapi.h>
 #include <crypto/algapi.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/internal/hash.h>
 #include <crypto/internal/aead.h>
 #include <crypto/internal/aead.h>
-#include <crypto/sha.h>
-#include <crypto/ctr.h>
 #include <crypto/authenc.h>
 #include <crypto/authenc.h>
-#include <crypto/aes.h>
 #include <crypto/des.h>
 #include <crypto/des.h>
 #include <linux/rtnetlink.h>
 #include <linux/rtnetlink.h>
-#include <linux/version.h>
-#include "ssi_config.h"
-#include "ssi_driver.h"
-#include "ssi_buffer_mgr.h"
-#include "ssi_aead.h"
-#include "ssi_request_mgr.h"
-#include "ssi_hash.h"
-#include "ssi_sysfs.h"
-#include "ssi_sram_mgr.h"
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+#include "cc_aead.h"
+#include "cc_request_mgr.h"
+#include "cc_hash.h"
+#include "cc_sram_mgr.h"
 
 
 #define template_aead	template_u.aead
 #define template_aead	template_u.aead
 
 
@@ -51,8 +29,8 @@
 /* Value of each ICV_CMP byte (of 8) in case of success */
 /* Value of each ICV_CMP byte (of 8) in case of success */
 #define ICV_VERIF_OK 0x01
 #define ICV_VERIF_OK 0x01
 
 
-struct ssi_aead_handle {
-	ssi_sram_addr_t sram_workspace_addr;
+struct cc_aead_handle {
+	cc_sram_addr_t sram_workspace_addr;
 	struct list_head aead_list;
 	struct list_head aead_list;
 };
 };
 
 
@@ -68,8 +46,8 @@ struct cc_xcbc_s {
 	dma_addr_t xcbc_keys_dma_addr;
 	dma_addr_t xcbc_keys_dma_addr;
 };
 };
 
 
-struct ssi_aead_ctx {
-	struct ssi_drvdata *drvdata;
+struct cc_aead_ctx {
+	struct cc_drvdata *drvdata;
 	u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
 	u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
 	u8 *enckey;
 	u8 *enckey;
 	dma_addr_t enckey_dma_addr;
 	dma_addr_t enckey_dma_addr;
@@ -90,9 +68,9 @@ static inline bool valid_assoclen(struct aead_request *req)
 	return ((req->assoclen == 16) || (req->assoclen == 20));
 	return ((req->assoclen == 16) || (req->assoclen == 20));
 }
 }
 
 
-static void ssi_aead_exit(struct crypto_aead *tfm)
+static void cc_aead_exit(struct crypto_aead *tfm)
 {
 {
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 
 
 	dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
 	dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
@@ -100,7 +78,8 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
 
 
 	/* Unmap enckey buffer */
 	/* Unmap enckey buffer */
 	if (ctx->enckey) {
 	if (ctx->enckey) {
-		dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey, ctx->enckey_dma_addr);
+		dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
+				  ctx->enckey_dma_addr);
 		dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
 		dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
 			&ctx->enckey_dma_addr);
 			&ctx->enckey_dma_addr);
 		ctx->enckey_dma_addr = 0;
 		ctx->enckey_dma_addr = 0;
@@ -143,22 +122,22 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
 	}
 	}
 }
 }
 
 
-static int ssi_aead_init(struct crypto_aead *tfm)
+static int cc_aead_init(struct crypto_aead *tfm)
 {
 {
 	struct aead_alg *alg = crypto_aead_alg(tfm);
 	struct aead_alg *alg = crypto_aead_alg(tfm);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct ssi_crypto_alg *ssi_alg =
-			container_of(alg, struct ssi_crypto_alg, aead_alg);
-	struct device *dev = drvdata_to_dev(ssi_alg->drvdata);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_crypto_alg *cc_alg =
+			container_of(alg, struct cc_crypto_alg, aead_alg);
+	struct device *dev = drvdata_to_dev(cc_alg->drvdata);
 
 
 	dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
 	dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
 		crypto_tfm_alg_name(&tfm->base));
 		crypto_tfm_alg_name(&tfm->base));
 
 
 	/* Initialize modes in instance */
 	/* Initialize modes in instance */
-	ctx->cipher_mode = ssi_alg->cipher_mode;
-	ctx->flow_mode = ssi_alg->flow_mode;
-	ctx->auth_mode = ssi_alg->auth_mode;
-	ctx->drvdata = ssi_alg->drvdata;
+	ctx->cipher_mode = cc_alg->cipher_mode;
+	ctx->flow_mode = cc_alg->flow_mode;
+	ctx->auth_mode = cc_alg->auth_mode;
+	ctx->drvdata = cc_alg->drvdata;
 	crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
 	crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
 
 
 	/* Allocate key buffer, cache line aligned */
 	/* Allocate key buffer, cache line aligned */
@@ -221,23 +200,25 @@ static int ssi_aead_init(struct crypto_aead *tfm)
 	return 0;
 	return 0;
 
 
 init_failed:
 init_failed:
-	ssi_aead_exit(tfm);
+	cc_aead_exit(tfm);
 	return -ENOMEM;
 	return -ENOMEM;
 }
 }
 
 
-static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
+static void cc_aead_complete(struct device *dev, void *cc_req, int err)
 {
 {
-	struct aead_request *areq = (struct aead_request *)ssi_req;
+	struct aead_request *areq = (struct aead_request *)cc_req;
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
-	struct crypto_aead *tfm = crypto_aead_reqtfm(ssi_req);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	int err = 0;
+	struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 
 
-	ssi_buffer_mgr_unmap_aead_request(dev, areq);
+	cc_unmap_aead_request(dev, areq);
 
 
 	/* Restore ordinary iv pointer */
 	/* Restore ordinary iv pointer */
 	areq->iv = areq_ctx->backup_iv;
 	areq->iv = areq_ctx->backup_iv;
 
 
+	if (err)
+		goto done;
+
 	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
 	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
 		if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
 		if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
 			   ctx->authsize) != 0) {
 			   ctx->authsize) != 0) {
@@ -246,36 +227,43 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *c
 			/* In case of payload authentication failure, MUST NOT
 			/* In case of payload authentication failure, MUST NOT
 			 * revealed the decrypted message --> zero its memory.
 			 * revealed the decrypted message --> zero its memory.
 			 */
 			 */
-			ssi_buffer_mgr_zero_sgl(areq->dst, areq_ctx->cryptlen);
+			cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
 			err = -EBADMSG;
 			err = -EBADMSG;
 		}
 		}
 	} else { /*ENCRYPT*/
 	} else { /*ENCRYPT*/
-		if (unlikely(areq_ctx->is_icv_fragmented))
-			ssi_buffer_mgr_copy_scatterlist_portion(
-				dev, areq_ctx->mac_buf, areq_ctx->dst_sgl,
-				areq->cryptlen + areq_ctx->dst_offset,
-				(areq->cryptlen + areq_ctx->dst_offset +
-				 ctx->authsize),
-				SSI_SG_FROM_BUF);
-
-		/* If an IV was generated, copy it back to the user provided buffer. */
+		if (areq_ctx->is_icv_fragmented) {
+			u32 skip = areq->cryptlen + areq_ctx->dst_offset;
+
+			cc_copy_sg_portion(dev, areq_ctx->mac_buf,
+					   areq_ctx->dst_sgl, skip,
+					   (skip + ctx->authsize),
+					   CC_SG_FROM_BUF);
+		}
+
+		/* If an IV was generated, copy it back to the user provided
+		 * buffer.
+		 */
 		if (areq_ctx->backup_giv) {
 		if (areq_ctx->backup_giv) {
 			if (ctx->cipher_mode == DRV_CIPHER_CTR)
 			if (ctx->cipher_mode == DRV_CIPHER_CTR)
-				memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_IV_SIZE);
+				memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
+				       CTR_RFC3686_NONCE_SIZE,
+				       CTR_RFC3686_IV_SIZE);
 			else if (ctx->cipher_mode == DRV_CIPHER_CCM)
 			else if (ctx->cipher_mode == DRV_CIPHER_CCM)
-				memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
+				memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
+				       CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
 		}
 		}
 	}
 	}
-
+done:
 	aead_request_complete(areq, err);
 	aead_request_complete(areq, err);
 }
 }
 
 
-static int xcbc_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
+static int xcbc_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
 {
 {
 	/* Load the AES key */
 	/* Load the AES key */
 	hw_desc_init(&desc[0]);
 	hw_desc_init(&desc[0]);
-	/* We are using for the source/user key the same buffer as for the output keys,
-	 * because after this key loading it is not needed anymore
+	/* We are using for the source/user key the same buffer
+	 * as for the output keys, * because after this key loading it
+	 * is not needed anymore
 	 */
 	 */
 	set_din_type(&desc[0], DMA_DLLI,
 	set_din_type(&desc[0], DMA_DLLI,
 		     ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
 		     ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
@@ -309,7 +297,7 @@ static int xcbc_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
 	return 4;
 	return 4;
 }
 }
 
 
-static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
+static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
 {
 {
 	unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
 	unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
 	unsigned int digest_ofs = 0;
 	unsigned int digest_ofs = 0;
@@ -328,8 +316,8 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
 		hw_desc_init(&desc[idx]);
 		hw_desc_init(&desc[idx]);
 		set_cipher_mode(&desc[idx], hash_mode);
 		set_cipher_mode(&desc[idx], hash_mode);
 		set_din_sram(&desc[idx],
 		set_din_sram(&desc[idx],
-			     ssi_ahash_get_larval_digest_sram_addr(
-				ctx->drvdata, ctx->auth_mode),
+			     cc_larval_digest_addr(ctx->drvdata,
+						   ctx->auth_mode),
 			     digest_size);
 			     digest_size);
 		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
@@ -378,7 +366,7 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
 	return idx;
 	return idx;
 }
 }
 
 
-static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
+static int validate_keys_sizes(struct cc_aead_ctx *ctx)
 {
 {
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 
 
@@ -390,9 +378,9 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
 	case DRV_HASH_SHA256:
 	case DRV_HASH_SHA256:
 		break;
 		break;
 	case DRV_HASH_XCBC_MAC:
 	case DRV_HASH_XCBC_MAC:
-		if ((ctx->auth_keylen != AES_KEYSIZE_128) &&
-		    (ctx->auth_keylen != AES_KEYSIZE_192) &&
-		    (ctx->auth_keylen != AES_KEYSIZE_256))
+		if (ctx->auth_keylen != AES_KEYSIZE_128 &&
+		    ctx->auth_keylen != AES_KEYSIZE_192 &&
+		    ctx->auth_keylen != AES_KEYSIZE_256)
 			return -ENOTSUPP;
 			return -ENOTSUPP;
 		break;
 		break;
 	case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
 	case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
@@ -404,16 +392,16 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 	/* Check cipher key size */
 	/* Check cipher key size */
-	if (unlikely(ctx->flow_mode == S_DIN_to_DES)) {
+	if (ctx->flow_mode == S_DIN_to_DES) {
 		if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
 		if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
 			dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
 			dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
 				ctx->enc_keylen);
 				ctx->enc_keylen);
 			return -EINVAL;
 			return -EINVAL;
 		}
 		}
 	} else { /* Default assumed to be AES ciphers */
 	} else { /* Default assumed to be AES ciphers */
-		if ((ctx->enc_keylen != AES_KEYSIZE_128) &&
-		    (ctx->enc_keylen != AES_KEYSIZE_192) &&
-		    (ctx->enc_keylen != AES_KEYSIZE_256)) {
+		if (ctx->enc_keylen != AES_KEYSIZE_128 &&
+		    ctx->enc_keylen != AES_KEYSIZE_192 &&
+		    ctx->enc_keylen != AES_KEYSIZE_256) {
 			dev_err(dev, "Invalid cipher(AES) key size: %u\n",
 			dev_err(dev, "Invalid cipher(AES) key size: %u\n",
 				ctx->enc_keylen);
 				ctx->enc_keylen);
 			return -EINVAL;
 			return -EINVAL;
@@ -427,14 +415,14 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
  * (copy to intenral buffer or hash in case of key longer than block
  * (copy to intenral buffer or hash in case of key longer than block
  */
  */
 static int
 static int
-ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
+		      unsigned int keylen)
 {
 {
 	dma_addr_t key_dma_addr = 0;
 	dma_addr_t key_dma_addr = 0;
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
-	u32 larval_addr = ssi_ahash_get_larval_digest_sram_addr(
-					ctx->drvdata, ctx->auth_mode);
-	struct ssi_crypto_req ssi_req = {};
+	u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
+	struct cc_crypto_req cc_req = {};
 	unsigned int blocksize;
 	unsigned int blocksize;
 	unsigned int digestsize;
 	unsigned int digestsize;
 	unsigned int hashmode;
 	unsigned int hashmode;
@@ -457,9 +445,10 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keyl
 		hashmode = DRV_HASH_HW_SHA256;
 		hashmode = DRV_HASH_HW_SHA256;
 	}
 	}
 
 
-	if (likely(keylen != 0)) {
-		key_dma_addr = dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
-		if (unlikely(dma_mapping_error(dev, key_dma_addr))) {
+	if (keylen != 0) {
+		key_dma_addr = dma_map_single(dev, (void *)key, keylen,
+					      DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, key_dma_addr)) {
 			dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
 			dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
 				key, keylen);
 				key, keylen);
 			return -ENOMEM;
 			return -ENOMEM;
@@ -537,22 +526,22 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keyl
 		idx++;
 		idx++;
 	}
 	}
 
 
-	rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
-	if (unlikely(rc != 0))
+	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
+	if (rc)
 		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 
 
-	if (likely(key_dma_addr != 0))
+	if (key_dma_addr)
 		dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
 		dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
 
 
 	return rc;
 	return rc;
 }
 }
 
 
 static int
 static int
-ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
 {
 {
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct rtattr *rta = (struct rtattr *)key;
 	struct rtattr *rta = (struct rtattr *)key;
-	struct ssi_crypto_req ssi_req = {};
+	struct cc_crypto_req cc_req = {};
 	struct crypto_authenc_key_param *param;
 	struct crypto_authenc_key_param *param;
 	struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
 	struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
 	int seq_len = 0, rc = -EINVAL;
 	int seq_len = 0, rc = -EINVAL;
@@ -586,8 +575,9 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
 			/* Copy nonce from last 4 bytes in CTR key to
 			/* Copy nonce from last 4 bytes in CTR key to
 			 *  first 4 bytes in CTR IV
 			 *  first 4 bytes in CTR IV
 			 */
 			 */
-			memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + ctx->enc_keylen -
-				CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
+			memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
+			       ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
+			       CTR_RFC3686_NONCE_SIZE);
 			/* Set CTR key size */
 			/* Set CTR key size */
 			ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
 			ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
 		}
 		}
@@ -597,7 +587,7 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
 	}
 	}
 
 
 	rc = validate_keys_sizes(ctx);
 	rc = validate_keys_sizes(ctx);
-	if (unlikely(rc != 0))
+	if (rc)
 		goto badkey;
 		goto badkey;
 
 
 	/* STAT_PHASE_1: Copy key to ctx */
 	/* STAT_PHASE_1: Copy key to ctx */
@@ -609,8 +599,8 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
 	if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
 	if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
 		memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
 		memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
 	} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
 	} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
-		rc = ssi_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
-		if (rc != 0)
+		rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
+		if (rc)
 			goto badkey;
 			goto badkey;
 	}
 	}
 
 
@@ -635,8 +625,8 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
 	/* STAT_PHASE_3: Submit sequence to HW */
 	/* STAT_PHASE_3: Submit sequence to HW */
 
 
 	if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
 	if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
-		rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 0);
-		if (unlikely(rc != 0)) {
+		rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
+		if (rc) {
 			dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 			dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 			goto setkey_error;
 			goto setkey_error;
 		}
 		}
@@ -652,10 +642,10 @@ setkey_error:
 	return rc;
 	return rc;
 }
 }
 
 
-#if SSI_CC_HAS_AES_CCM
-static int ssi_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
+				 unsigned int keylen)
 {
 {
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 
 
 	if (keylen < 3)
 	if (keylen < 3)
 		return -EINVAL;
 		return -EINVAL;
@@ -663,20 +653,18 @@ static int ssi_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
 	keylen -= 3;
 	keylen -= 3;
 	memcpy(ctx->ctr_nonce, key + keylen, 3);
 	memcpy(ctx->ctr_nonce, key + keylen, 3);
 
 
-	return ssi_aead_setkey(tfm, key, keylen);
+	return cc_aead_setkey(tfm, key, keylen);
 }
 }
-#endif /*SSI_CC_HAS_AES_CCM*/
 
 
-static int ssi_aead_setauthsize(
-	struct crypto_aead *authenc,
-	unsigned int authsize)
+static int cc_aead_setauthsize(struct crypto_aead *authenc,
+			       unsigned int authsize)
 {
 {
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 
 
 	/* Unsupported auth. sizes */
 	/* Unsupported auth. sizes */
-	if ((authsize == 0) ||
-	    (authsize > crypto_aead_maxauthsize(authenc))) {
+	if (authsize == 0 ||
+	    authsize > crypto_aead_maxauthsize(authenc)) {
 		return -ENOTSUPP;
 		return -ENOTSUPP;
 	}
 	}
 
 
@@ -686,9 +674,8 @@ static int ssi_aead_setauthsize(
 	return 0;
 	return 0;
 }
 }
 
 
-#if SSI_CC_HAS_AES_CCM
-static int ssi_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
-				       unsigned int authsize)
+static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
+				      unsigned int authsize)
 {
 {
 	switch (authsize) {
 	switch (authsize) {
 	case 8:
 	case 8:
@@ -699,11 +686,11 @@ static int ssi_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	return ssi_aead_setauthsize(authenc, authsize);
+	return cc_aead_setauthsize(authenc, authsize);
 }
 }
 
 
-static int ssi_ccm_setauthsize(struct crypto_aead *authenc,
-			       unsigned int authsize)
+static int cc_ccm_setauthsize(struct crypto_aead *authenc,
+			      unsigned int authsize)
 {
 {
 	switch (authsize) {
 	switch (authsize) {
 	case 4:
 	case 4:
@@ -718,46 +705,41 @@ static int ssi_ccm_setauthsize(struct crypto_aead *authenc,
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	return ssi_aead_setauthsize(authenc, authsize);
+	return cc_aead_setauthsize(authenc, authsize);
 }
 }
-#endif /*SSI_CC_HAS_AES_CCM*/
-
-static inline void
-ssi_aead_create_assoc_desc(
-	struct aead_request *areq,
-	unsigned int flow_mode,
-	struct cc_hw_desc desc[],
-	unsigned int *seq_size)
+
+static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
+			      struct cc_hw_desc desc[], unsigned int *seq_size)
 {
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
-	enum ssi_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
+	enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
 	unsigned int idx = *seq_size;
 	unsigned int idx = *seq_size;
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 
 
 	switch (assoc_dma_type) {
 	switch (assoc_dma_type) {
-	case SSI_DMA_BUF_DLLI:
+	case CC_DMA_BUF_DLLI:
 		dev_dbg(dev, "ASSOC buffer type DLLI\n");
 		dev_dbg(dev, "ASSOC buffer type DLLI\n");
 		hw_desc_init(&desc[idx]);
 		hw_desc_init(&desc[idx]);
 		set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
 		set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
-			     areq->assoclen, NS_BIT); set_flow_mode(&desc[idx],
-			     flow_mode);
-		if ((ctx->auth_mode == DRV_HASH_XCBC_MAC) &&
-		    (areq_ctx->cryptlen > 0))
+			     areq->assoclen, NS_BIT);
+		set_flow_mode(&desc[idx], flow_mode);
+		if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
+		    areq_ctx->cryptlen > 0)
 			set_din_not_last_indication(&desc[idx]);
 			set_din_not_last_indication(&desc[idx]);
 		break;
 		break;
-	case SSI_DMA_BUF_MLLI:
+	case CC_DMA_BUF_MLLI:
 		dev_dbg(dev, "ASSOC buffer type MLLI\n");
 		dev_dbg(dev, "ASSOC buffer type MLLI\n");
 		hw_desc_init(&desc[idx]);
 		hw_desc_init(&desc[idx]);
 		set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
 		set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
 			     areq_ctx->assoc.mlli_nents, NS_BIT);
 			     areq_ctx->assoc.mlli_nents, NS_BIT);
 		set_flow_mode(&desc[idx], flow_mode);
 		set_flow_mode(&desc[idx], flow_mode);
-		if ((ctx->auth_mode == DRV_HASH_XCBC_MAC) &&
-		    (areq_ctx->cryptlen > 0))
+		if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
+		    areq_ctx->cryptlen > 0)
 			set_din_not_last_indication(&desc[idx]);
 			set_din_not_last_indication(&desc[idx]);
 		break;
 		break;
-	case SSI_DMA_BUF_NULL:
+	case CC_DMA_BUF_NULL:
 	default:
 	default:
 		dev_err(dev, "Invalid ASSOC buffer type\n");
 		dev_err(dev, "Invalid ASSOC buffer type\n");
 	}
 	}
@@ -765,23 +747,20 @@ ssi_aead_create_assoc_desc(
 	*seq_size = (++idx);
 	*seq_size = (++idx);
 }
 }
 
 
-static inline void
-ssi_aead_process_authenc_data_desc(
-	struct aead_request *areq,
-	unsigned int flow_mode,
-	struct cc_hw_desc desc[],
-	unsigned int *seq_size,
-	int direct)
+static void cc_proc_authen_desc(struct aead_request *areq,
+				unsigned int flow_mode,
+				struct cc_hw_desc desc[],
+				unsigned int *seq_size, int direct)
 {
 {
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
-	enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
+	enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
 	unsigned int idx = *seq_size;
 	unsigned int idx = *seq_size;
 	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 
 
 	switch (data_dma_type) {
 	switch (data_dma_type) {
-	case SSI_DMA_BUF_DLLI:
+	case CC_DMA_BUF_DLLI:
 	{
 	{
 		struct scatterlist *cipher =
 		struct scatterlist *cipher =
 			(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 			(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
@@ -798,16 +777,16 @@ ssi_aead_process_authenc_data_desc(
 		set_flow_mode(&desc[idx], flow_mode);
 		set_flow_mode(&desc[idx], flow_mode);
 		break;
 		break;
 	}
 	}
-	case SSI_DMA_BUF_MLLI:
+	case CC_DMA_BUF_MLLI:
 	{
 	{
 		/* DOUBLE-PASS flow (as default)
 		/* DOUBLE-PASS flow (as default)
 		 * assoc. + iv + data -compact in one table
 		 * assoc. + iv + data -compact in one table
 		 * if assoclen is ZERO only IV perform
 		 * if assoclen is ZERO only IV perform
 		 */
 		 */
-		ssi_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
+		cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
 		u32 mlli_nents = areq_ctx->assoc.mlli_nents;
 		u32 mlli_nents = areq_ctx->assoc.mlli_nents;
 
 
-		if (likely(areq_ctx->is_single_pass)) {
+		if (areq_ctx->is_single_pass) {
 			if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 			if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 				mlli_addr = areq_ctx->dst.sram_addr;
 				mlli_addr = areq_ctx->dst.sram_addr;
 				mlli_nents = areq_ctx->dst.mlli_nents;
 				mlli_nents = areq_ctx->dst.mlli_nents;
@@ -824,7 +803,7 @@ ssi_aead_process_authenc_data_desc(
 		set_flow_mode(&desc[idx], flow_mode);
 		set_flow_mode(&desc[idx], flow_mode);
 		break;
 		break;
 	}
 	}
-	case SSI_DMA_BUF_NULL:
+	case CC_DMA_BUF_NULL:
 	default:
 	default:
 		dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
 		dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
 	}
 	}
@@ -832,37 +811,36 @@ ssi_aead_process_authenc_data_desc(
 	*seq_size = (++idx);
 	*seq_size = (++idx);
 }
 }
 
 
-static inline void
-ssi_aead_process_cipher_data_desc(
-	struct aead_request *areq,
-	unsigned int flow_mode,
-	struct cc_hw_desc desc[],
-	unsigned int *seq_size)
+static void cc_proc_cipher_desc(struct aead_request *areq,
+				unsigned int flow_mode,
+				struct cc_hw_desc desc[],
+				unsigned int *seq_size)
 {
 {
 	unsigned int idx = *seq_size;
 	unsigned int idx = *seq_size;
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
-	enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
+	enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
 	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 
 
 	if (areq_ctx->cryptlen == 0)
 	if (areq_ctx->cryptlen == 0)
 		return; /*null processing*/
 		return; /*null processing*/
 
 
 	switch (data_dma_type) {
 	switch (data_dma_type) {
-	case SSI_DMA_BUF_DLLI:
+	case CC_DMA_BUF_DLLI:
 		dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
 		dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
 		hw_desc_init(&desc[idx]);
 		hw_desc_init(&desc[idx]);
 		set_din_type(&desc[idx], DMA_DLLI,
 		set_din_type(&desc[idx], DMA_DLLI,
 			     (sg_dma_address(areq_ctx->src_sgl) +
 			     (sg_dma_address(areq_ctx->src_sgl) +
-			      areq_ctx->src_offset), areq_ctx->cryptlen, NS_BIT);
+			      areq_ctx->src_offset), areq_ctx->cryptlen,
+			      NS_BIT);
 		set_dout_dlli(&desc[idx],
 		set_dout_dlli(&desc[idx],
 			      (sg_dma_address(areq_ctx->dst_sgl) +
 			      (sg_dma_address(areq_ctx->dst_sgl) +
 			       areq_ctx->dst_offset),
 			       areq_ctx->dst_offset),
 			      areq_ctx->cryptlen, NS_BIT, 0);
 			      areq_ctx->cryptlen, NS_BIT, 0);
 		set_flow_mode(&desc[idx], flow_mode);
 		set_flow_mode(&desc[idx], flow_mode);
 		break;
 		break;
-	case SSI_DMA_BUF_MLLI:
+	case CC_DMA_BUF_MLLI:
 		dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
 		dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
 		hw_desc_init(&desc[idx]);
 		hw_desc_init(&desc[idx]);
 		set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
 		set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
@@ -871,7 +849,7 @@ ssi_aead_process_cipher_data_desc(
 			      areq_ctx->dst.mlli_nents, NS_BIT, 0);
 			      areq_ctx->dst.mlli_nents, NS_BIT, 0);
 		set_flow_mode(&desc[idx], flow_mode);
 		set_flow_mode(&desc[idx], flow_mode);
 		break;
 		break;
-	case SSI_DMA_BUF_NULL:
+	case CC_DMA_BUF_NULL:
 	default:
 	default:
 		dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
 		dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
 	}
 	}
@@ -879,13 +857,12 @@ ssi_aead_process_cipher_data_desc(
 	*seq_size = (++idx);
 	*seq_size = (++idx);
 }
 }
 
 
-static inline void ssi_aead_process_digest_result_desc(
-	struct aead_request *req,
-	struct cc_hw_desc desc[],
-	unsigned int *seq_size)
+static void cc_proc_digest_desc(struct aead_request *req,
+				struct cc_hw_desc desc[],
+				unsigned int *seq_size)
 {
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	unsigned int idx = *seq_size;
 	unsigned int idx = *seq_size;
 	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
@@ -930,13 +907,12 @@ static inline void ssi_aead_process_digest_result_desc(
 	*seq_size = (++idx);
 	*seq_size = (++idx);
 }
 }
 
 
-static inline void ssi_aead_setup_cipher_desc(
-	struct aead_request *req,
-	struct cc_hw_desc desc[],
-	unsigned int *seq_size)
+static void cc_set_cipher_desc(struct aead_request *req,
+			       struct cc_hw_desc desc[],
+			       unsigned int *seq_size)
 {
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	unsigned int hw_iv_size = req_ctx->hw_iv_size;
 	unsigned int hw_iv_size = req_ctx->hw_iv_size;
 	unsigned int idx = *seq_size;
 	unsigned int idx = *seq_size;
@@ -976,11 +952,8 @@ static inline void ssi_aead_setup_cipher_desc(
 	*seq_size = idx;
 	*seq_size = idx;
 }
 }
 
 
-static inline void ssi_aead_process_cipher(
-	struct aead_request *req,
-	struct cc_hw_desc desc[],
-	unsigned int *seq_size,
-	unsigned int data_flow_mode)
+static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
+			   unsigned int *seq_size, unsigned int data_flow_mode)
 {
 {
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	int direct = req_ctx->gen_ctx.op_type;
 	int direct = req_ctx->gen_ctx.op_type;
@@ -989,8 +962,8 @@ static inline void ssi_aead_process_cipher(
 	if (req_ctx->cryptlen == 0)
 	if (req_ctx->cryptlen == 0)
 		return; /*null processing*/
 		return; /*null processing*/
 
 
-	ssi_aead_setup_cipher_desc(req, desc, &idx);
-	ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, &idx);
+	cc_set_cipher_desc(req, desc, &idx);
+	cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
 	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 		/* We must wait for DMA to write all cipher */
 		/* We must wait for DMA to write all cipher */
 		hw_desc_init(&desc[idx]);
 		hw_desc_init(&desc[idx]);
@@ -1002,13 +975,11 @@ static inline void ssi_aead_process_cipher(
 	*seq_size = idx;
 	*seq_size = idx;
 }
 }
 
 
-static inline void ssi_aead_hmac_setup_digest_desc(
-	struct aead_request *req,
-	struct cc_hw_desc desc[],
-	unsigned int *seq_size)
+static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
+			     unsigned int *seq_size)
 {
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 				DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
 				DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
 	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
 	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
@@ -1028,10 +999,8 @@ static inline void ssi_aead_hmac_setup_digest_desc(
 	/* Load init. digest len (64 bytes) */
 	/* Load init. digest len (64 bytes) */
 	hw_desc_init(&desc[idx]);
 	hw_desc_init(&desc[idx]);
 	set_cipher_mode(&desc[idx], hash_mode);
 	set_cipher_mode(&desc[idx], hash_mode);
-	set_din_sram(&desc[idx],
-		     ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata,
-								hash_mode),
-								HASH_LEN_SIZE);
+	set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
+		     HASH_LEN_SIZE);
 	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 	idx++;
 	idx++;
@@ -1039,13 +1008,11 @@ static inline void ssi_aead_hmac_setup_digest_desc(
 	*seq_size = idx;
 	*seq_size = idx;
 }
 }
 
 
-static inline void ssi_aead_xcbc_setup_digest_desc(
-	struct aead_request *req,
-	struct cc_hw_desc desc[],
-	unsigned int *seq_size)
+static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
+			     unsigned int *seq_size)
 {
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	unsigned int idx = *seq_size;
 	unsigned int idx = *seq_size;
 
 
 	/* Loading MAC state */
 	/* Loading MAC state */
@@ -1101,28 +1068,26 @@ static inline void ssi_aead_xcbc_setup_digest_desc(
 	*seq_size = idx;
 	*seq_size = idx;
 }
 }
 
 
-static inline void ssi_aead_process_digest_header_desc(
-	struct aead_request *req,
-	struct cc_hw_desc desc[],
-	unsigned int *seq_size)
+static void cc_proc_header_desc(struct aead_request *req,
+				struct cc_hw_desc desc[],
+				unsigned int *seq_size)
 {
 {
 	unsigned int idx = *seq_size;
 	unsigned int idx = *seq_size;
 	/* Hash associated data */
 	/* Hash associated data */
 	if (req->assoclen > 0)
 	if (req->assoclen > 0)
-		ssi_aead_create_assoc_desc(req, DIN_HASH, desc, &idx);
+		cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
 
 
 	/* Hash IV */
 	/* Hash IV */
 	*seq_size = idx;
 	*seq_size = idx;
 }
 }
 
 
-static inline void ssi_aead_process_digest_scheme_desc(
-	struct aead_request *req,
-	struct cc_hw_desc desc[],
-	unsigned int *seq_size)
+static void cc_proc_scheme_desc(struct aead_request *req,
+				struct cc_hw_desc desc[],
+				unsigned int *seq_size)
 {
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct ssi_aead_handle *aead_handle = ctx->drvdata->aead_handle;
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
 	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 				DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
 				DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
 	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
 	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
@@ -1161,9 +1126,7 @@ static inline void ssi_aead_process_digest_scheme_desc(
 	/* Load init. digest len (64 bytes) */
 	/* Load init. digest len (64 bytes) */
 	hw_desc_init(&desc[idx]);
 	hw_desc_init(&desc[idx]);
 	set_cipher_mode(&desc[idx], hash_mode);
 	set_cipher_mode(&desc[idx], hash_mode);
-	set_din_sram(&desc[idx],
-		     ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata,
-								hash_mode),
+	set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
 		     HASH_LEN_SIZE);
 		     HASH_LEN_SIZE);
 	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
 	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
 	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 	set_flow_mode(&desc[idx], S_DIN_to_HASH);
@@ -1180,20 +1143,17 @@ static inline void ssi_aead_process_digest_scheme_desc(
 	*seq_size = idx;
 	*seq_size = idx;
 }
 }
 
 
-static inline void ssi_aead_load_mlli_to_sram(
-	struct aead_request *req,
-	struct cc_hw_desc desc[],
-	unsigned int *seq_size)
+static void cc_mlli_to_sram(struct aead_request *req,
+			    struct cc_hw_desc desc[], unsigned int *seq_size)
 {
 {
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 
 
-	if (unlikely(
-		(req_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
-		(req_ctx->data_buff_type == SSI_DMA_BUF_MLLI) ||
-		!req_ctx->is_single_pass)) {
+	if (req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+	    req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
+	    !req_ctx->is_single_pass) {
 		dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
 		dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
 			(unsigned int)ctx->drvdata->mlli_sram_addr,
 			(unsigned int)ctx->drvdata->mlli_sram_addr,
 			req_ctx->mlli_params.mlli_len);
 			req_ctx->mlli_params.mlli_len);
@@ -1210,54 +1170,52 @@ static inline void ssi_aead_load_mlli_to_sram(
 	}
 	}
 }
 }
 
 
-static inline enum cc_flow_mode ssi_aead_get_data_flow_mode(
-	enum drv_crypto_direction direct,
-	enum cc_flow_mode setup_flow_mode,
-	bool is_single_pass)
+static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
+					  enum cc_flow_mode setup_flow_mode,
+					  bool is_single_pass)
 {
 {
 	enum cc_flow_mode data_flow_mode;
 	enum cc_flow_mode data_flow_mode;
 
 
 	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 		if (setup_flow_mode == S_DIN_to_AES)
 		if (setup_flow_mode == S_DIN_to_AES)
-			data_flow_mode = likely(is_single_pass) ?
+			data_flow_mode = is_single_pass ?
 				AES_to_HASH_and_DOUT : DIN_AES_DOUT;
 				AES_to_HASH_and_DOUT : DIN_AES_DOUT;
 		else
 		else
-			data_flow_mode = likely(is_single_pass) ?
+			data_flow_mode = is_single_pass ?
 				DES_to_HASH_and_DOUT : DIN_DES_DOUT;
 				DES_to_HASH_and_DOUT : DIN_DES_DOUT;
 	} else { /* Decrypt */
 	} else { /* Decrypt */
 		if (setup_flow_mode == S_DIN_to_AES)
 		if (setup_flow_mode == S_DIN_to_AES)
-			data_flow_mode = likely(is_single_pass) ?
-					AES_and_HASH : DIN_AES_DOUT;
+			data_flow_mode = is_single_pass ?
+				AES_and_HASH : DIN_AES_DOUT;
 		else
 		else
-			data_flow_mode = likely(is_single_pass) ?
-					DES_and_HASH : DIN_DES_DOUT;
+			data_flow_mode = is_single_pass ?
+				DES_and_HASH : DIN_DES_DOUT;
 	}
 	}
 
 
 	return data_flow_mode;
 	return data_flow_mode;
 }
 }
 
 
-static inline void ssi_aead_hmac_authenc(
-	struct aead_request *req,
-	struct cc_hw_desc desc[],
-	unsigned int *seq_size)
+static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
+			    unsigned int *seq_size)
 {
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	int direct = req_ctx->gen_ctx.op_type;
 	int direct = req_ctx->gen_ctx.op_type;
-	unsigned int data_flow_mode = ssi_aead_get_data_flow_mode(
-		direct, ctx->flow_mode, req_ctx->is_single_pass);
+	unsigned int data_flow_mode =
+		cc_get_data_flow(direct, ctx->flow_mode,
+				 req_ctx->is_single_pass);
 
 
 	if (req_ctx->is_single_pass) {
 	if (req_ctx->is_single_pass) {
 		/**
 		/**
 		 * Single-pass flow
 		 * Single-pass flow
 		 */
 		 */
-		ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
-		ssi_aead_setup_cipher_desc(req, desc, seq_size);
-		ssi_aead_process_digest_header_desc(req, desc, seq_size);
-		ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, seq_size);
-		ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
-		ssi_aead_process_digest_result_desc(req, desc, seq_size);
+		cc_set_hmac_desc(req, desc, seq_size);
+		cc_set_cipher_desc(req, desc, seq_size);
+		cc_proc_header_desc(req, desc, seq_size);
+		cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
+		cc_proc_scheme_desc(req, desc, seq_size);
+		cc_proc_digest_desc(req, desc, seq_size);
 		return;
 		return;
 	}
 	}
 
 
@@ -1268,49 +1226,48 @@ static inline void ssi_aead_hmac_authenc(
 	 */
 	 */
 	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 		/* encrypt first.. */
 		/* encrypt first.. */
-		ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
+		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
 		/* authenc after..*/
 		/* authenc after..*/
-		ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
-		ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
-		ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
-		ssi_aead_process_digest_result_desc(req, desc, seq_size);
+		cc_set_hmac_desc(req, desc, seq_size);
+		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+		cc_proc_scheme_desc(req, desc, seq_size);
+		cc_proc_digest_desc(req, desc, seq_size);
 
 
 	} else { /*DECRYPT*/
 	} else { /*DECRYPT*/
 		/* authenc first..*/
 		/* authenc first..*/
-		ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
-		ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
-		ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
+		cc_set_hmac_desc(req, desc, seq_size);
+		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+		cc_proc_scheme_desc(req, desc, seq_size);
 		/* decrypt after.. */
 		/* decrypt after.. */
-		ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
+		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
 		/* read the digest result with setting the completion bit
 		/* read the digest result with setting the completion bit
 		 * must be after the cipher operation
 		 * must be after the cipher operation
 		 */
 		 */
-		ssi_aead_process_digest_result_desc(req, desc, seq_size);
+		cc_proc_digest_desc(req, desc, seq_size);
 	}
 	}
 }
 }
 
 
-static inline void
-ssi_aead_xcbc_authenc(
-	struct aead_request *req,
-	struct cc_hw_desc desc[],
-	unsigned int *seq_size)
+static void
+cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
+		unsigned int *seq_size)
 {
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	int direct = req_ctx->gen_ctx.op_type;
 	int direct = req_ctx->gen_ctx.op_type;
-	unsigned int data_flow_mode = ssi_aead_get_data_flow_mode(
-		direct, ctx->flow_mode, req_ctx->is_single_pass);
+	unsigned int data_flow_mode =
+		cc_get_data_flow(direct, ctx->flow_mode,
+				 req_ctx->is_single_pass);
 
 
 	if (req_ctx->is_single_pass) {
 	if (req_ctx->is_single_pass) {
 		/**
 		/**
 		 * Single-pass flow
 		 * Single-pass flow
 		 */
 		 */
-		ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
-		ssi_aead_setup_cipher_desc(req, desc, seq_size);
-		ssi_aead_process_digest_header_desc(req, desc, seq_size);
-		ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, seq_size);
-		ssi_aead_process_digest_result_desc(req, desc, seq_size);
+		cc_set_xcbc_desc(req, desc, seq_size);
+		cc_set_cipher_desc(req, desc, seq_size);
+		cc_proc_header_desc(req, desc, seq_size);
+		cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
+		cc_proc_digest_desc(req, desc, seq_size);
 		return;
 		return;
 	}
 	}
 
 
@@ -1321,25 +1278,25 @@ ssi_aead_xcbc_authenc(
 	 */
 	 */
 	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 		/* encrypt first.. */
 		/* encrypt first.. */
-		ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
+		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
 		/* authenc after.. */
 		/* authenc after.. */
-		ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
-		ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
-		ssi_aead_process_digest_result_desc(req, desc, seq_size);
+		cc_set_xcbc_desc(req, desc, seq_size);
+		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+		cc_proc_digest_desc(req, desc, seq_size);
 	} else { /*DECRYPT*/
 	} else { /*DECRYPT*/
 		/* authenc first.. */
 		/* authenc first.. */
-		ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
-		ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
+		cc_set_xcbc_desc(req, desc, seq_size);
+		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
 		/* decrypt after..*/
 		/* decrypt after..*/
-		ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
+		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
 		/* read the digest result with setting the completion bit
 		/* read the digest result with setting the completion bit
 		 * must be after the cipher operation
 		 * must be after the cipher operation
 		 */
 		 */
-		ssi_aead_process_digest_result_desc(req, desc, seq_size);
+		cc_proc_digest_desc(req, desc, seq_size);
 	}
 	}
 }
 }
 
 
-static int validate_data_size(struct ssi_aead_ctx *ctx,
+static int validate_data_size(struct cc_aead_ctx *ctx,
 			      enum drv_crypto_direction direct,
 			      enum drv_crypto_direction direct,
 			      struct aead_request *req)
 			      struct aead_request *req)
 {
 {
@@ -1349,16 +1306,16 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
 	unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
 	unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
 			(req->cryptlen - ctx->authsize) : req->cryptlen;
 			(req->cryptlen - ctx->authsize) : req->cryptlen;
 
 
-	if (unlikely((direct == DRV_CRYPTO_DIRECTION_DECRYPT) &&
-		     (req->cryptlen < ctx->authsize)))
+	if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
+	    req->cryptlen < ctx->authsize)
 		goto data_size_err;
 		goto data_size_err;
 
 
 	areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
 	areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
 
 
 	switch (ctx->flow_mode) {
 	switch (ctx->flow_mode) {
 	case S_DIN_to_AES:
 	case S_DIN_to_AES:
-		if (unlikely((ctx->cipher_mode == DRV_CIPHER_CBC) &&
-			     !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE)))
+		if (ctx->cipher_mode == DRV_CIPHER_CBC &&
+		    !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
 			goto data_size_err;
 			goto data_size_err;
 		if (ctx->cipher_mode == DRV_CIPHER_CCM)
 		if (ctx->cipher_mode == DRV_CIPHER_CCM)
 			break;
 			break;
@@ -1371,15 +1328,15 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
 		if (!IS_ALIGNED(assoclen, sizeof(u32)))
 		if (!IS_ALIGNED(assoclen, sizeof(u32)))
 			areq_ctx->is_single_pass = false;
 			areq_ctx->is_single_pass = false;
 
 
-		if ((ctx->cipher_mode == DRV_CIPHER_CTR) &&
+		if (ctx->cipher_mode == DRV_CIPHER_CTR &&
 		    !IS_ALIGNED(cipherlen, sizeof(u32)))
 		    !IS_ALIGNED(cipherlen, sizeof(u32)))
 			areq_ctx->is_single_pass = false;
 			areq_ctx->is_single_pass = false;
 
 
 		break;
 		break;
 	case S_DIN_to_DES:
 	case S_DIN_to_DES:
-		if (unlikely(!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE)))
+		if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
 			goto data_size_err;
 			goto data_size_err;
-		if (unlikely(!IS_ALIGNED(assoclen, DES_BLOCK_SIZE)))
+		if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
 			areq_ctx->is_single_pass = false;
 			areq_ctx->is_single_pass = false;
 		break;
 		break;
 	default:
 	default:
@@ -1393,7 +1350,6 @@ data_size_err:
 	return -EINVAL;
 	return -EINVAL;
 }
 }
 
 
-#if SSI_CC_HAS_AES_CCM
 static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
 static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
 {
 {
 	unsigned int len = 0;
 	unsigned int len = 0;
@@ -1438,13 +1394,11 @@ static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
 	return 0;
 	return 0;
 }
 }
 
 
-static inline int ssi_aead_ccm(
-	struct aead_request *req,
-	struct cc_hw_desc desc[],
-	unsigned int *seq_size)
+static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
+		  unsigned int *seq_size)
 {
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	unsigned int idx = *seq_size;
 	unsigned int idx = *seq_size;
 	unsigned int cipher_flow_mode;
 	unsigned int cipher_flow_mode;
@@ -1508,7 +1462,7 @@ static inline int ssi_aead_ccm(
 
 
 	/* process assoc data */
 	/* process assoc data */
 	if (req->assoclen > 0) {
 	if (req->assoclen > 0) {
-		ssi_aead_create_assoc_desc(req, DIN_HASH, desc, &idx);
+		cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
 	} else {
 	} else {
 		hw_desc_init(&desc[idx]);
 		hw_desc_init(&desc[idx]);
 		set_din_type(&desc[idx], DMA_DLLI,
 		set_din_type(&desc[idx], DMA_DLLI,
@@ -1519,8 +1473,8 @@ static inline int ssi_aead_ccm(
 	}
 	}
 
 
 	/* process the cipher */
 	/* process the cipher */
-	if (req_ctx->cryptlen != 0)
-		ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, &idx);
+	if (req_ctx->cryptlen)
+		cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
 
 
 	/* Read temporal MAC */
 	/* Read temporal MAC */
 	hw_desc_init(&desc[idx]);
 	hw_desc_init(&desc[idx]);
@@ -1565,12 +1519,14 @@ static inline int ssi_aead_ccm(
 static int config_ccm_adata(struct aead_request *req)
 static int config_ccm_adata(struct aead_request *req)
 {
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	//unsigned int size_of_a = 0, rem_a_size = 0;
 	//unsigned int size_of_a = 0, rem_a_size = 0;
 	unsigned int lp = req->iv[0];
 	unsigned int lp = req->iv[0];
-	/* Note: The code assume that req->iv[0] already contains the value of L' of RFC3610 */
+	/* Note: The code assume that req->iv[0] already contains the value
+	 * of L' of RFC3610
+	 */
 	unsigned int l = lp + 1;  /* This is L' of RFC 3610. */
 	unsigned int l = lp + 1;  /* This is L' of RFC 3610. */
 	unsigned int m = ctx->authsize;  /* This is M' of RFC 3610. */
 	unsigned int m = ctx->authsize;  /* This is M' of RFC 3610. */
 	u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
 	u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
@@ -1601,7 +1557,7 @@ static int config_ccm_adata(struct aead_request *req)
 		*b0 |= 64;  /* Enable bit 6 if Adata exists. */
 		*b0 |= 64;  /* Enable bit 6 if Adata exists. */
 
 
 	rc = set_msg_len(b0 + 16 - l, cryptlen, l);  /* Write L'. */
 	rc = set_msg_len(b0 + 16 - l, cryptlen, l);  /* Write L'. */
-	if (rc != 0) {
+	if (rc) {
 		dev_err(dev, "message len overflow detected");
 		dev_err(dev, "message len overflow detected");
 		return rc;
 		return rc;
 	}
 	}
@@ -1619,33 +1575,35 @@ static int config_ccm_adata(struct aead_request *req)
 	return 0;
 	return 0;
 }
 }
 
 
-static void ssi_rfc4309_ccm_process(struct aead_request *req)
+static void cc_proc_rfc4309_ccm(struct aead_request *req)
 {
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 
 
 	/* L' */
 	/* L' */
 	memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
 	memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
-	areq_ctx->ctr_iv[0] = 3;  /* For RFC 4309, always use 4 bytes for message length (at most 2^32-1 bytes). */
+	/* For RFC 4309, always use 4 bytes for message length
+	 * (at most 2^32-1 bytes).
+	 */
+	areq_ctx->ctr_iv[0] = 3;
 
 
-	/* In RFC 4309 there is an 11-bytes nonce+IV part, that we build here. */
-	memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce, CCM_BLOCK_NONCE_SIZE);
-	memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET,    req->iv,        CCM_BLOCK_IV_SIZE);
+	/* In RFC 4309 there is an 11-bytes nonce+IV part,
+	 * that we build here.
+	 */
+	memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
+	       CCM_BLOCK_NONCE_SIZE);
+	memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
+	       CCM_BLOCK_IV_SIZE);
 	req->iv = areq_ctx->ctr_iv;
 	req->iv = areq_ctx->ctr_iv;
 	req->assoclen -= CCM_BLOCK_IV_SIZE;
 	req->assoclen -= CCM_BLOCK_IV_SIZE;
 }
 }
-#endif /*SSI_CC_HAS_AES_CCM*/
-
-#if SSI_CC_HAS_AES_GCM
 
 
-static inline void ssi_aead_gcm_setup_ghash_desc(
-	struct aead_request *req,
-	struct cc_hw_desc desc[],
-	unsigned int *seq_size)
+static void cc_set_ghash_desc(struct aead_request *req,
+			      struct cc_hw_desc desc[], unsigned int *seq_size)
 {
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	unsigned int idx = *seq_size;
 	unsigned int idx = *seq_size;
 
 
@@ -1703,7 +1661,9 @@ static inline void ssi_aead_gcm_setup_ghash_desc(
 	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 	idx++;
 	idx++;
 
 
-	/* Load GHASH initial STATE (which is 0). (for any hash there is an initial state) */
+	/* Load GHASH initial STATE (which is 0). (for any hash there is an
+	 * initial state)
+	 */
 	hw_desc_init(&desc[idx]);
 	hw_desc_init(&desc[idx]);
 	set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
 	set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
 	set_dout_no_dma(&desc[idx], 0, 0, 1);
 	set_dout_no_dma(&desc[idx], 0, 0, 1);
@@ -1717,13 +1677,11 @@ static inline void ssi_aead_gcm_setup_ghash_desc(
 	*seq_size = idx;
 	*seq_size = idx;
 }
 }
 
 
-static inline void ssi_aead_gcm_setup_gctr_desc(
-	struct aead_request *req,
-	struct cc_hw_desc desc[],
-	unsigned int *seq_size)
+static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
+			     unsigned int *seq_size)
 {
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	unsigned int idx = *seq_size;
 	unsigned int idx = *seq_size;
 
 
@@ -1738,7 +1696,7 @@ static inline void ssi_aead_gcm_setup_gctr_desc(
 	set_flow_mode(&desc[idx], S_DIN_to_AES);
 	set_flow_mode(&desc[idx], S_DIN_to_AES);
 	idx++;
 	idx++;
 
 
-	if ((req_ctx->cryptlen != 0) && (!req_ctx->plaintext_authenticate_only)) {
+	if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
 		/* load AES/CTR initial CTR value inc by 2*/
 		/* load AES/CTR initial CTR value inc by 2*/
 		hw_desc_init(&desc[idx]);
 		hw_desc_init(&desc[idx]);
 		set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
 		set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
@@ -1755,13 +1713,12 @@ static inline void ssi_aead_gcm_setup_gctr_desc(
 	*seq_size = idx;
 	*seq_size = idx;
 }
 }
 
 
-static inline void ssi_aead_process_gcm_result_desc(
-	struct aead_request *req,
-	struct cc_hw_desc desc[],
-	unsigned int *seq_size)
+static void cc_proc_gcm_result(struct aead_request *req,
+			       struct cc_hw_desc desc[],
+			       unsigned int *seq_size)
 {
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	dma_addr_t mac_result;
 	dma_addr_t mac_result;
 	unsigned int idx = *seq_size;
 	unsigned int idx = *seq_size;
@@ -1821,10 +1778,8 @@ static inline void ssi_aead_process_gcm_result_desc(
 	*seq_size = idx;
 	*seq_size = idx;
 }
 }
 
 
-static inline int ssi_aead_gcm(
-	struct aead_request *req,
-	struct cc_hw_desc desc[],
-	unsigned int *seq_size)
+static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
+		  unsigned int *seq_size)
 {
 {
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	unsigned int cipher_flow_mode;
 	unsigned int cipher_flow_mode;
@@ -1837,77 +1792,33 @@ static inline int ssi_aead_gcm(
 
 
 	//in RFC4543 no data to encrypt. just copy data from src to dest.
 	//in RFC4543 no data to encrypt. just copy data from src to dest.
 	if (req_ctx->plaintext_authenticate_only) {
 	if (req_ctx->plaintext_authenticate_only) {
-		ssi_aead_process_cipher_data_desc(req, BYPASS, desc, seq_size);
-		ssi_aead_gcm_setup_ghash_desc(req, desc, seq_size);
+		cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
+		cc_set_ghash_desc(req, desc, seq_size);
 		/* process(ghash) assoc data */
 		/* process(ghash) assoc data */
-		ssi_aead_create_assoc_desc(req, DIN_HASH, desc, seq_size);
-		ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
-		ssi_aead_process_gcm_result_desc(req, desc, seq_size);
+		cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
+		cc_set_gctr_desc(req, desc, seq_size);
+		cc_proc_gcm_result(req, desc, seq_size);
 		return 0;
 		return 0;
 	}
 	}
 
 
 	// for gcm and rfc4106.
 	// for gcm and rfc4106.
-	ssi_aead_gcm_setup_ghash_desc(req, desc, seq_size);
+	cc_set_ghash_desc(req, desc, seq_size);
 	/* process(ghash) assoc data */
 	/* process(ghash) assoc data */
 	if (req->assoclen > 0)
 	if (req->assoclen > 0)
-		ssi_aead_create_assoc_desc(req, DIN_HASH, desc, seq_size);
-	ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
+		cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
+	cc_set_gctr_desc(req, desc, seq_size);
 	/* process(gctr+ghash) */
 	/* process(gctr+ghash) */
-	if (req_ctx->cryptlen != 0)
-		ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, seq_size);
-	ssi_aead_process_gcm_result_desc(req, desc, seq_size);
+	if (req_ctx->cryptlen)
+		cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
+	cc_proc_gcm_result(req, desc, seq_size);
 
 
 	return 0;
 	return 0;
 }
 }
 
 
-#ifdef CC_DEBUG
-static inline void ssi_aead_dump_gcm(
-	const char *title,
-	struct aead_request *req)
-{
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
-
-	if (ctx->cipher_mode != DRV_CIPHER_GCTR)
-		return;
-
-	if (title) {
-		dev_dbg(dev, "----------------------------------------------------------------------------------");
-		dev_dbg(dev, "%s\n", title);
-	}
-
-	dev_dbg(dev, "cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d\n",
-		ctx->cipher_mode, ctx->authsize, ctx->enc_keylen,
-		req->assoclen, req_ctx->cryptlen);
-
-	if (ctx->enckey)
-		dump_byte_array("mac key", ctx->enckey, 16);
-
-	dump_byte_array("req->iv", req->iv, AES_BLOCK_SIZE);
-
-	dump_byte_array("gcm_iv_inc1", req_ctx->gcm_iv_inc1, AES_BLOCK_SIZE);
-
-	dump_byte_array("gcm_iv_inc2", req_ctx->gcm_iv_inc2, AES_BLOCK_SIZE);
-
-	dump_byte_array("hkey", req_ctx->hkey, AES_BLOCK_SIZE);
-
-	dump_byte_array("mac_buf", req_ctx->mac_buf, AES_BLOCK_SIZE);
-
-	dump_byte_array("gcm_len_block", req_ctx->gcm_len_block.len_a, AES_BLOCK_SIZE);
-
-	if (req->src && req->cryptlen)
-		dump_byte_array("req->src", sg_virt(req->src), req->cryptlen + req->assoclen);
-
-	if (req->dst)
-		dump_byte_array("req->dst", sg_virt(req->dst), req->cryptlen + ctx->authsize + req->assoclen);
-}
-#endif
-
 static int config_gcm_context(struct aead_request *req)
 static int config_gcm_context(struct aead_request *req)
 {
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 
 
@@ -1938,10 +1849,14 @@ static int config_gcm_context(struct aead_request *req)
 		memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
 		memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
 		temp64 = cpu_to_be64(cryptlen * 8);
 		temp64 = cpu_to_be64(cryptlen * 8);
 		memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
 		memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
-	} else { //rfc4543=>  all data(AAD,IV,Plain) are considered additional data that is nothing is encrypted.
+	} else {
+		/* rfc4543=>  all data(AAD,IV,Plain) are considered additional
+		 * data that is nothing is encrypted.
+		 */
 		__be64 temp64;
 		__be64 temp64;
 
 
-		temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
+		temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
+				      cryptlen) * 8);
 		memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
 		memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
 		temp64 = 0;
 		temp64 = 0;
 		memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
 		memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1950,30 +1865,31 @@ static int config_gcm_context(struct aead_request *req)
 	return 0;
 	return 0;
 }
 }
 
 
-static void ssi_rfc4_gcm_process(struct aead_request *req)
+static void cc_proc_rfc4_gcm(struct aead_request *req)
 {
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 
 
-	memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET, ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
-	memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET,    req->iv, GCM_BLOCK_RFC4_IV_SIZE);
+	memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
+	       ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
+	memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
+	       GCM_BLOCK_RFC4_IV_SIZE);
 	req->iv = areq_ctx->ctr_iv;
 	req->iv = areq_ctx->ctr_iv;
 	req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
 	req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
 }
 }
 
 
-#endif /*SSI_CC_HAS_AES_GCM*/
-
-static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction direct)
+static int cc_proc_aead(struct aead_request *req,
+			enum drv_crypto_direction direct)
 {
 {
 	int rc = 0;
 	int rc = 0;
 	int seq_len = 0;
 	int seq_len = 0;
 	struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
 	struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
-	struct ssi_crypto_req ssi_req = {};
+	struct cc_crypto_req cc_req = {};
 
 
 	dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
 	dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
 		((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
 		((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
@@ -1983,7 +1899,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
 	/* STAT_PHASE_0: Init and sanity checks */
 	/* STAT_PHASE_0: Init and sanity checks */
 
 
 	/* Check data length according to mode */
 	/* Check data length according to mode */
-	if (unlikely(validate_data_size(ctx, direct, req) != 0)) {
+	if (validate_data_size(ctx, direct, req)) {
 		dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
 		dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
 			req->cryptlen, req->assoclen);
 			req->cryptlen, req->assoclen);
 		crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
 		crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
@@ -1991,8 +1907,8 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
 	}
 	}
 
 
 	/* Setup DX request structure */
 	/* Setup DX request structure */
-	ssi_req.user_cb = (void *)ssi_aead_complete;
-	ssi_req.user_arg = (void *)req;
+	cc_req.user_cb = (void *)cc_aead_complete;
+	cc_req.user_arg = (void *)req;
 
 
 	/* Setup request context */
 	/* Setup request context */
 	areq_ctx->gen_ctx.op_type = direct;
 	areq_ctx->gen_ctx.op_type = direct;
@@ -2005,7 +1921,8 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
 		/* Build CTR IV - Copy nonce from last 4 bytes in
 		/* Build CTR IV - Copy nonce from last 4 bytes in
 		 * CTR key to first 4 bytes in CTR IV
 		 * CTR key to first 4 bytes in CTR IV
 		 */
 		 */
-		memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce, CTR_RFC3686_NONCE_SIZE);
+		memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
+		       CTR_RFC3686_NONCE_SIZE);
 		if (!areq_ctx->backup_giv) /*User none-generated IV*/
 		if (!areq_ctx->backup_giv) /*User none-generated IV*/
 			memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
 			memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
 			       req->iv, CTR_RFC3686_IV_SIZE);
 			       req->iv, CTR_RFC3686_IV_SIZE);
@@ -2020,17 +1937,17 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
 		   (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
 		   (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
 		areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
 		areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
 		if (areq_ctx->ctr_iv != req->iv) {
 		if (areq_ctx->ctr_iv != req->iv) {
-			memcpy(areq_ctx->ctr_iv, req->iv, crypto_aead_ivsize(tfm));
+			memcpy(areq_ctx->ctr_iv, req->iv,
+			       crypto_aead_ivsize(tfm));
 			req->iv = areq_ctx->ctr_iv;
 			req->iv = areq_ctx->ctr_iv;
 		}
 		}
 	}  else {
 	}  else {
 		areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
 		areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
 	}
 	}
 
 
-#if SSI_CC_HAS_AES_CCM
 	if (ctx->cipher_mode == DRV_CIPHER_CCM) {
 	if (ctx->cipher_mode == DRV_CIPHER_CCM) {
 		rc = config_ccm_adata(req);
 		rc = config_ccm_adata(req);
-		if (unlikely(rc != 0)) {
+		if (rc) {
 			dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
 			dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
 				rc);
 				rc);
 			goto exit;
 			goto exit;
@@ -2038,23 +1955,18 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
 	} else {
 	} else {
 		areq_ctx->ccm_hdr_size = ccm_header_size_null;
 		areq_ctx->ccm_hdr_size = ccm_header_size_null;
 	}
 	}
-#else
-	areq_ctx->ccm_hdr_size = ccm_header_size_null;
-#endif /*SSI_CC_HAS_AES_CCM*/
 
 
-#if SSI_CC_HAS_AES_GCM
 	if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
 	if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
 		rc = config_gcm_context(req);
 		rc = config_gcm_context(req);
-		if (unlikely(rc != 0)) {
+		if (rc) {
 			dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
 			dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
 				rc);
 				rc);
 			goto exit;
 			goto exit;
 		}
 		}
 	}
 	}
-#endif /*SSI_CC_HAS_AES_GCM*/
 
 
-	rc = ssi_buffer_mgr_map_aead_request(ctx->drvdata, req);
-	if (unlikely(rc != 0)) {
+	rc = cc_map_aead_request(ctx->drvdata, req);
+	if (rc) {
 		dev_err(dev, "map_request() failed\n");
 		dev_err(dev, "map_request() failed\n");
 		goto exit;
 		goto exit;
 	}
 	}
@@ -2063,74 +1975,77 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
 	if (areq_ctx->backup_giv) {
 	if (areq_ctx->backup_giv) {
 		/* set the DMA mapped IV address*/
 		/* set the DMA mapped IV address*/
 		if (ctx->cipher_mode == DRV_CIPHER_CTR) {
 		if (ctx->cipher_mode == DRV_CIPHER_CTR) {
-			ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr + CTR_RFC3686_NONCE_SIZE;
-			ssi_req.ivgen_dma_addr_len = 1;
+			cc_req.ivgen_dma_addr[0] =
+				areq_ctx->gen_ctx.iv_dma_addr +
+				CTR_RFC3686_NONCE_SIZE;
+			cc_req.ivgen_dma_addr_len = 1;
 		} else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
 		} else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
-			/* In ccm, the IV needs to exist both inside B0 and inside the counter.
-			 * It is also copied to iv_dma_addr for other reasons (like returning
-			 * it to the user).
+			/* In ccm, the IV needs to exist both inside B0 and
+			 * inside the counter.It is also copied to iv_dma_addr
+			 * for other reasons (like returning it to the user).
 			 * So, using 3 (identical) IV outputs.
 			 * So, using 3 (identical) IV outputs.
 			 */
 			 */
-			ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr + CCM_BLOCK_IV_OFFSET;
-			ssi_req.ivgen_dma_addr[1] = sg_dma_address(&areq_ctx->ccm_adata_sg) + CCM_B0_OFFSET          + CCM_BLOCK_IV_OFFSET;
-			ssi_req.ivgen_dma_addr[2] = sg_dma_address(&areq_ctx->ccm_adata_sg) + CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
-			ssi_req.ivgen_dma_addr_len = 3;
+			cc_req.ivgen_dma_addr[0] =
+				areq_ctx->gen_ctx.iv_dma_addr +
+				CCM_BLOCK_IV_OFFSET;
+			cc_req.ivgen_dma_addr[1] =
+				sg_dma_address(&areq_ctx->ccm_adata_sg) +
+				CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
+			cc_req.ivgen_dma_addr[2] =
+				sg_dma_address(&areq_ctx->ccm_adata_sg) +
+				CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
+			cc_req.ivgen_dma_addr_len = 3;
 		} else {
 		} else {
-			ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr;
-			ssi_req.ivgen_dma_addr_len = 1;
+			cc_req.ivgen_dma_addr[0] =
+				areq_ctx->gen_ctx.iv_dma_addr;
+			cc_req.ivgen_dma_addr_len = 1;
 		}
 		}
 
 
 		/* set the IV size (8/16 B long)*/
 		/* set the IV size (8/16 B long)*/
-		ssi_req.ivgen_size = crypto_aead_ivsize(tfm);
+		cc_req.ivgen_size = crypto_aead_ivsize(tfm);
 	}
 	}
 
 
 	/* STAT_PHASE_2: Create sequence */
 	/* STAT_PHASE_2: Create sequence */
 
 
 	/* Load MLLI tables to SRAM if necessary */
 	/* Load MLLI tables to SRAM if necessary */
-	ssi_aead_load_mlli_to_sram(req, desc, &seq_len);
+	cc_mlli_to_sram(req, desc, &seq_len);
 
 
 	/*TODO: move seq len by reference */
 	/*TODO: move seq len by reference */
 	switch (ctx->auth_mode) {
 	switch (ctx->auth_mode) {
 	case DRV_HASH_SHA1:
 	case DRV_HASH_SHA1:
 	case DRV_HASH_SHA256:
 	case DRV_HASH_SHA256:
-		ssi_aead_hmac_authenc(req, desc, &seq_len);
+		cc_hmac_authenc(req, desc, &seq_len);
 		break;
 		break;
 	case DRV_HASH_XCBC_MAC:
 	case DRV_HASH_XCBC_MAC:
-		ssi_aead_xcbc_authenc(req, desc, &seq_len);
+		cc_xcbc_authenc(req, desc, &seq_len);
 		break;
 		break;
-#if (SSI_CC_HAS_AES_CCM || SSI_CC_HAS_AES_GCM)
 	case DRV_HASH_NULL:
 	case DRV_HASH_NULL:
-#if SSI_CC_HAS_AES_CCM
 		if (ctx->cipher_mode == DRV_CIPHER_CCM)
 		if (ctx->cipher_mode == DRV_CIPHER_CCM)
-			ssi_aead_ccm(req, desc, &seq_len);
-#endif /*SSI_CC_HAS_AES_CCM*/
-#if SSI_CC_HAS_AES_GCM
+			cc_ccm(req, desc, &seq_len);
 		if (ctx->cipher_mode == DRV_CIPHER_GCTR)
 		if (ctx->cipher_mode == DRV_CIPHER_GCTR)
-			ssi_aead_gcm(req, desc, &seq_len);
-#endif /*SSI_CC_HAS_AES_GCM*/
-			break;
-#endif
+			cc_gcm(req, desc, &seq_len);
+		break;
 	default:
 	default:
 		dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
 		dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
-		ssi_buffer_mgr_unmap_aead_request(dev, req);
+		cc_unmap_aead_request(dev, req);
 		rc = -ENOTSUPP;
 		rc = -ENOTSUPP;
 		goto exit;
 		goto exit;
 	}
 	}
 
 
 	/* STAT_PHASE_3: Lock HW and push sequence */
 	/* STAT_PHASE_3: Lock HW and push sequence */
 
 
-	rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 1);
+	rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
 
 
-	if (unlikely(rc != -EINPROGRESS)) {
+	if (rc != -EINPROGRESS && rc != -EBUSY) {
 		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
-		ssi_buffer_mgr_unmap_aead_request(dev, req);
+		cc_unmap_aead_request(dev, req);
 	}
 	}
 
 
 exit:
 exit:
 	return rc;
 	return rc;
 }
 }
 
 
-static int ssi_aead_encrypt(struct aead_request *req)
+static int cc_aead_encrypt(struct aead_request *req)
 {
 {
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	int rc;
 	int rc;
@@ -2142,21 +2057,20 @@ static int ssi_aead_encrypt(struct aead_request *req)
 
 
 	areq_ctx->plaintext_authenticate_only = false;
 	areq_ctx->plaintext_authenticate_only = false;
 
 
-	rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
-	if (rc != -EINPROGRESS)
+	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+	if (rc != -EINPROGRESS && rc != -EBUSY)
 		req->iv = areq_ctx->backup_iv;
 		req->iv = areq_ctx->backup_iv;
 
 
 	return rc;
 	return rc;
 }
 }
 
 
-#if SSI_CC_HAS_AES_CCM
-static int ssi_rfc4309_ccm_encrypt(struct aead_request *req)
+static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
 {
 {
-	/* Very similar to ssi_aead_encrypt() above. */
+	/* Very similar to cc_aead_encrypt() above. */
 
 
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 	int rc = -EINVAL;
 	int rc = -EINVAL;
 
 
@@ -2170,17 +2084,16 @@ static int ssi_rfc4309_ccm_encrypt(struct aead_request *req)
 	areq_ctx->backup_giv = NULL;
 	areq_ctx->backup_giv = NULL;
 	areq_ctx->is_gcm4543 = true;
 	areq_ctx->is_gcm4543 = true;
 
 
-	ssi_rfc4309_ccm_process(req);
+	cc_proc_rfc4309_ccm(req);
 
 
-	rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
-	if (rc != -EINPROGRESS)
+	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+	if (rc != -EINPROGRESS && rc != -EBUSY)
 		req->iv = areq_ctx->backup_iv;
 		req->iv = areq_ctx->backup_iv;
 out:
 out:
 	return rc;
 	return rc;
 }
 }
-#endif /* SSI_CC_HAS_AES_CCM */
 
 
-static int ssi_aead_decrypt(struct aead_request *req)
+static int cc_aead_decrypt(struct aead_request *req)
 {
 {
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	int rc;
 	int rc;
@@ -2192,18 +2105,17 @@ static int ssi_aead_decrypt(struct aead_request *req)
 
 
 	areq_ctx->plaintext_authenticate_only = false;
 	areq_ctx->plaintext_authenticate_only = false;
 
 
-	rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
-	if (rc != -EINPROGRESS)
+	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+	if (rc != -EINPROGRESS && rc != -EBUSY)
 		req->iv = areq_ctx->backup_iv;
 		req->iv = areq_ctx->backup_iv;
 
 
 	return rc;
 	return rc;
 }
 }
 
 
-#if SSI_CC_HAS_AES_CCM
-static int ssi_rfc4309_ccm_decrypt(struct aead_request *req)
+static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
 {
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	int rc = -EINVAL;
 	int rc = -EINVAL;
@@ -2218,22 +2130,20 @@ static int ssi_rfc4309_ccm_decrypt(struct aead_request *req)
 	areq_ctx->backup_giv = NULL;
 	areq_ctx->backup_giv = NULL;
 
 
 	areq_ctx->is_gcm4543 = true;
 	areq_ctx->is_gcm4543 = true;
-	ssi_rfc4309_ccm_process(req);
+	cc_proc_rfc4309_ccm(req);
 
 
-	rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
-	if (rc != -EINPROGRESS)
+	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+	if (rc != -EINPROGRESS && rc != -EBUSY)
 		req->iv = areq_ctx->backup_iv;
 		req->iv = areq_ctx->backup_iv;
 
 
 out:
 out:
 	return rc;
 	return rc;
 }
 }
-#endif /* SSI_CC_HAS_AES_CCM */
-
-#if SSI_CC_HAS_AES_GCM
 
 
-static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+				 unsigned int keylen)
 {
 {
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 
 
 	dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
 	dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
@@ -2244,12 +2154,13 @@ static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
 	keylen -= 4;
 	keylen -= 4;
 	memcpy(ctx->ctr_nonce, key + keylen, 4);
 	memcpy(ctx->ctr_nonce, key + keylen, 4);
 
 
-	return ssi_aead_setkey(tfm, key, keylen);
+	return cc_aead_setkey(tfm, key, keylen);
 }
 }
 
 
-static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+				 unsigned int keylen)
 {
 {
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 
 
 	dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
 	dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
@@ -2260,11 +2171,11 @@ static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
 	keylen -= 4;
 	keylen -= 4;
 	memcpy(ctx->ctr_nonce, key + keylen, 4);
 	memcpy(ctx->ctr_nonce, key + keylen, 4);
 
 
-	return ssi_aead_setkey(tfm, key, keylen);
+	return cc_aead_setkey(tfm, key, keylen);
 }
 }
 
 
-static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
-			       unsigned int authsize)
+static int cc_gcm_setauthsize(struct crypto_aead *authenc,
+			      unsigned int authsize)
 {
 {
 	switch (authsize) {
 	switch (authsize) {
 	case 4:
 	case 4:
@@ -2279,13 +2190,13 @@ static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	return ssi_aead_setauthsize(authenc, authsize);
+	return cc_aead_setauthsize(authenc, authsize);
 }
 }
 
 
-static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
-				       unsigned int authsize)
+static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
+				      unsigned int authsize)
 {
 {
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 
 
 	dev_dbg(dev, "authsize %d\n", authsize);
 	dev_dbg(dev, "authsize %d\n", authsize);
@@ -2299,13 +2210,13 @@ static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	return ssi_aead_setauthsize(authenc, authsize);
+	return cc_aead_setauthsize(authenc, authsize);
 }
 }
 
 
-static int ssi_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
-				       unsigned int authsize)
+static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
+				      unsigned int authsize)
 {
 {
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 
 
 	dev_dbg(dev, "authsize %d\n", authsize);
 	dev_dbg(dev, "authsize %d\n", authsize);
@@ -2313,15 +2224,15 @@ static int ssi_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
 	if (authsize != 16)
 	if (authsize != 16)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	return ssi_aead_setauthsize(authenc, authsize);
+	return cc_aead_setauthsize(authenc, authsize);
 }
 }
 
 
-static int ssi_rfc4106_gcm_encrypt(struct aead_request *req)
+static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
 {
 {
-	/* Very similar to ssi_aead_encrypt() above. */
+	/* Very similar to cc_aead_encrypt() above. */
 
 
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	int rc = -EINVAL;
 	int rc = -EINVAL;
@@ -2337,19 +2248,19 @@ static int ssi_rfc4106_gcm_encrypt(struct aead_request *req)
 
 
 	areq_ctx->plaintext_authenticate_only = false;
 	areq_ctx->plaintext_authenticate_only = false;
 
 
-	ssi_rfc4_gcm_process(req);
+	cc_proc_rfc4_gcm(req);
 	areq_ctx->is_gcm4543 = true;
 	areq_ctx->is_gcm4543 = true;
 
 
-	rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
-	if (rc != -EINPROGRESS)
+	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+	if (rc != -EINPROGRESS && rc != -EBUSY)
 		req->iv = areq_ctx->backup_iv;
 		req->iv = areq_ctx->backup_iv;
 out:
 out:
 	return rc;
 	return rc;
 }
 }
 
 
-static int ssi_rfc4543_gcm_encrypt(struct aead_request *req)
+static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
 {
 {
-	/* Very similar to ssi_aead_encrypt() above. */
+	/* Very similar to cc_aead_encrypt() above. */
 
 
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	int rc;
 	int rc;
@@ -2361,22 +2272,22 @@ static int ssi_rfc4543_gcm_encrypt(struct aead_request *req)
 	areq_ctx->backup_iv = req->iv;
 	areq_ctx->backup_iv = req->iv;
 	areq_ctx->backup_giv = NULL;
 	areq_ctx->backup_giv = NULL;
 
 
-	ssi_rfc4_gcm_process(req);
+	cc_proc_rfc4_gcm(req);
 	areq_ctx->is_gcm4543 = true;
 	areq_ctx->is_gcm4543 = true;
 
 
-	rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
-	if (rc != -EINPROGRESS)
+	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+	if (rc != -EINPROGRESS && rc != -EBUSY)
 		req->iv = areq_ctx->backup_iv;
 		req->iv = areq_ctx->backup_iv;
 
 
 	return rc;
 	return rc;
 }
 }
 
 
-static int ssi_rfc4106_gcm_decrypt(struct aead_request *req)
+static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
 {
 {
-	/* Very similar to ssi_aead_decrypt() above. */
+	/* Very similar to cc_aead_decrypt() above. */
 
 
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	int rc = -EINVAL;
 	int rc = -EINVAL;
@@ -2392,19 +2303,19 @@ static int ssi_rfc4106_gcm_decrypt(struct aead_request *req)
 
 
 	areq_ctx->plaintext_authenticate_only = false;
 	areq_ctx->plaintext_authenticate_only = false;
 
 
-	ssi_rfc4_gcm_process(req);
+	cc_proc_rfc4_gcm(req);
 	areq_ctx->is_gcm4543 = true;
 	areq_ctx->is_gcm4543 = true;
 
 
-	rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
-	if (rc != -EINPROGRESS)
+	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+	if (rc != -EINPROGRESS && rc != -EBUSY)
 		req->iv = areq_ctx->backup_iv;
 		req->iv = areq_ctx->backup_iv;
 out:
 out:
 	return rc;
 	return rc;
 }
 }
 
 
-static int ssi_rfc4543_gcm_decrypt(struct aead_request *req)
+static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
 {
 {
-	/* Very similar to ssi_aead_decrypt() above. */
+	/* Very similar to cc_aead_decrypt() above. */
 
 
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	int rc;
 	int rc;
@@ -2416,31 +2327,30 @@ static int ssi_rfc4543_gcm_decrypt(struct aead_request *req)
 	areq_ctx->backup_iv = req->iv;
 	areq_ctx->backup_iv = req->iv;
 	areq_ctx->backup_giv = NULL;
 	areq_ctx->backup_giv = NULL;
 
 
-	ssi_rfc4_gcm_process(req);
+	cc_proc_rfc4_gcm(req);
 	areq_ctx->is_gcm4543 = true;
 	areq_ctx->is_gcm4543 = true;
 
 
-	rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
-	if (rc != -EINPROGRESS)
+	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+	if (rc != -EINPROGRESS && rc != -EBUSY)
 		req->iv = areq_ctx->backup_iv;
 		req->iv = areq_ctx->backup_iv;
 
 
 	return rc;
 	return rc;
 }
 }
-#endif /* SSI_CC_HAS_AES_GCM */
 
 
 /* DX Block aead alg */
 /* DX Block aead alg */
-static struct ssi_alg_template aead_algs[] = {
+static struct cc_alg_template aead_algs[] = {
 	{
 	{
 		.name = "authenc(hmac(sha1),cbc(aes))",
 		.name = "authenc(hmac(sha1),cbc(aes))",
 		.driver_name = "authenc-hmac-sha1-cbc-aes-dx",
 		.driver_name = "authenc-hmac-sha1-cbc-aes-dx",
 		.blocksize = AES_BLOCK_SIZE,
 		.blocksize = AES_BLOCK_SIZE,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.template_aead = {
 		.template_aead = {
-			.setkey = ssi_aead_setkey,
-			.setauthsize = ssi_aead_setauthsize,
-			.encrypt = ssi_aead_encrypt,
-			.decrypt = ssi_aead_decrypt,
-			.init = ssi_aead_init,
-			.exit = ssi_aead_exit,
+			.setkey = cc_aead_setkey,
+			.setauthsize = cc_aead_setauthsize,
+			.encrypt = cc_aead_encrypt,
+			.decrypt = cc_aead_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
 			.ivsize = AES_BLOCK_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
 			.maxauthsize = SHA1_DIGEST_SIZE,
 			.maxauthsize = SHA1_DIGEST_SIZE,
 		},
 		},
@@ -2454,12 +2364,12 @@ static struct ssi_alg_template aead_algs[] = {
 		.blocksize = DES3_EDE_BLOCK_SIZE,
 		.blocksize = DES3_EDE_BLOCK_SIZE,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.template_aead = {
 		.template_aead = {
-			.setkey = ssi_aead_setkey,
-			.setauthsize = ssi_aead_setauthsize,
-			.encrypt = ssi_aead_encrypt,
-			.decrypt = ssi_aead_decrypt,
-			.init = ssi_aead_init,
-			.exit = ssi_aead_exit,
+			.setkey = cc_aead_setkey,
+			.setauthsize = cc_aead_setauthsize,
+			.encrypt = cc_aead_encrypt,
+			.decrypt = cc_aead_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.maxauthsize = SHA1_DIGEST_SIZE,
 			.maxauthsize = SHA1_DIGEST_SIZE,
 		},
 		},
@@ -2473,12 +2383,12 @@ static struct ssi_alg_template aead_algs[] = {
 		.blocksize = AES_BLOCK_SIZE,
 		.blocksize = AES_BLOCK_SIZE,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.template_aead = {
 		.template_aead = {
-			.setkey = ssi_aead_setkey,
-			.setauthsize = ssi_aead_setauthsize,
-			.encrypt = ssi_aead_encrypt,
-			.decrypt = ssi_aead_decrypt,
-			.init = ssi_aead_init,
-			.exit = ssi_aead_exit,
+			.setkey = cc_aead_setkey,
+			.setauthsize = cc_aead_setauthsize,
+			.encrypt = cc_aead_encrypt,
+			.decrypt = cc_aead_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
 			.ivsize = AES_BLOCK_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
 			.maxauthsize = SHA256_DIGEST_SIZE,
 			.maxauthsize = SHA256_DIGEST_SIZE,
 		},
 		},
@@ -2492,12 +2402,12 @@ static struct ssi_alg_template aead_algs[] = {
 		.blocksize = DES3_EDE_BLOCK_SIZE,
 		.blocksize = DES3_EDE_BLOCK_SIZE,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.template_aead = {
 		.template_aead = {
-			.setkey = ssi_aead_setkey,
-			.setauthsize = ssi_aead_setauthsize,
-			.encrypt = ssi_aead_encrypt,
-			.decrypt = ssi_aead_decrypt,
-			.init = ssi_aead_init,
-			.exit = ssi_aead_exit,
+			.setkey = cc_aead_setkey,
+			.setauthsize = cc_aead_setauthsize,
+			.encrypt = cc_aead_encrypt,
+			.decrypt = cc_aead_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.maxauthsize = SHA256_DIGEST_SIZE,
 			.maxauthsize = SHA256_DIGEST_SIZE,
 		},
 		},
@@ -2511,12 +2421,12 @@ static struct ssi_alg_template aead_algs[] = {
 		.blocksize = AES_BLOCK_SIZE,
 		.blocksize = AES_BLOCK_SIZE,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.template_aead = {
 		.template_aead = {
-			.setkey = ssi_aead_setkey,
-			.setauthsize = ssi_aead_setauthsize,
-			.encrypt = ssi_aead_encrypt,
-			.decrypt = ssi_aead_decrypt,
-			.init = ssi_aead_init,
-			.exit = ssi_aead_exit,
+			.setkey = cc_aead_setkey,
+			.setauthsize = cc_aead_setauthsize,
+			.encrypt = cc_aead_encrypt,
+			.decrypt = cc_aead_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
 			.ivsize = AES_BLOCK_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
 			.maxauthsize = AES_BLOCK_SIZE,
 			.maxauthsize = AES_BLOCK_SIZE,
 		},
 		},
@@ -2530,12 +2440,12 @@ static struct ssi_alg_template aead_algs[] = {
 		.blocksize = 1,
 		.blocksize = 1,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.template_aead = {
 		.template_aead = {
-			.setkey = ssi_aead_setkey,
-			.setauthsize = ssi_aead_setauthsize,
-			.encrypt = ssi_aead_encrypt,
-			.decrypt = ssi_aead_decrypt,
-			.init = ssi_aead_init,
-			.exit = ssi_aead_exit,
+			.setkey = cc_aead_setkey,
+			.setauthsize = cc_aead_setauthsize,
+			.encrypt = cc_aead_encrypt,
+			.decrypt = cc_aead_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
 			.ivsize = CTR_RFC3686_IV_SIZE,
 			.ivsize = CTR_RFC3686_IV_SIZE,
 			.maxauthsize = SHA1_DIGEST_SIZE,
 			.maxauthsize = SHA1_DIGEST_SIZE,
 		},
 		},
@@ -2549,12 +2459,12 @@ static struct ssi_alg_template aead_algs[] = {
 		.blocksize = 1,
 		.blocksize = 1,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.template_aead = {
 		.template_aead = {
-			.setkey = ssi_aead_setkey,
-			.setauthsize = ssi_aead_setauthsize,
-			.encrypt = ssi_aead_encrypt,
-			.decrypt = ssi_aead_decrypt,
-			.init = ssi_aead_init,
-			.exit = ssi_aead_exit,
+			.setkey = cc_aead_setkey,
+			.setauthsize = cc_aead_setauthsize,
+			.encrypt = cc_aead_encrypt,
+			.decrypt = cc_aead_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
 			.ivsize = CTR_RFC3686_IV_SIZE,
 			.ivsize = CTR_RFC3686_IV_SIZE,
 			.maxauthsize = SHA256_DIGEST_SIZE,
 			.maxauthsize = SHA256_DIGEST_SIZE,
 		},
 		},
@@ -2568,12 +2478,12 @@ static struct ssi_alg_template aead_algs[] = {
 		.blocksize = 1,
 		.blocksize = 1,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.template_aead = {
 		.template_aead = {
-			.setkey = ssi_aead_setkey,
-			.setauthsize = ssi_aead_setauthsize,
-			.encrypt = ssi_aead_encrypt,
-			.decrypt = ssi_aead_decrypt,
-			.init = ssi_aead_init,
-			.exit = ssi_aead_exit,
+			.setkey = cc_aead_setkey,
+			.setauthsize = cc_aead_setauthsize,
+			.encrypt = cc_aead_encrypt,
+			.decrypt = cc_aead_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
 			.ivsize = CTR_RFC3686_IV_SIZE,
 			.ivsize = CTR_RFC3686_IV_SIZE,
 			.maxauthsize = AES_BLOCK_SIZE,
 			.maxauthsize = AES_BLOCK_SIZE,
 		},
 		},
@@ -2581,19 +2491,18 @@ static struct ssi_alg_template aead_algs[] = {
 		.flow_mode = S_DIN_to_AES,
 		.flow_mode = S_DIN_to_AES,
 		.auth_mode = DRV_HASH_XCBC_MAC,
 		.auth_mode = DRV_HASH_XCBC_MAC,
 	},
 	},
-#if SSI_CC_HAS_AES_CCM
 	{
 	{
 		.name = "ccm(aes)",
 		.name = "ccm(aes)",
 		.driver_name = "ccm-aes-dx",
 		.driver_name = "ccm-aes-dx",
 		.blocksize = 1,
 		.blocksize = 1,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.template_aead = {
 		.template_aead = {
-			.setkey = ssi_aead_setkey,
-			.setauthsize = ssi_ccm_setauthsize,
-			.encrypt = ssi_aead_encrypt,
-			.decrypt = ssi_aead_decrypt,
-			.init = ssi_aead_init,
-			.exit = ssi_aead_exit,
+			.setkey = cc_aead_setkey,
+			.setauthsize = cc_ccm_setauthsize,
+			.encrypt = cc_aead_encrypt,
+			.decrypt = cc_aead_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
 			.ivsize = AES_BLOCK_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
 			.maxauthsize = AES_BLOCK_SIZE,
 			.maxauthsize = AES_BLOCK_SIZE,
 		},
 		},
@@ -2607,12 +2516,12 @@ static struct ssi_alg_template aead_algs[] = {
 		.blocksize = 1,
 		.blocksize = 1,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.template_aead = {
 		.template_aead = {
-			.setkey = ssi_rfc4309_ccm_setkey,
-			.setauthsize = ssi_rfc4309_ccm_setauthsize,
-			.encrypt = ssi_rfc4309_ccm_encrypt,
-			.decrypt = ssi_rfc4309_ccm_decrypt,
-			.init = ssi_aead_init,
-			.exit = ssi_aead_exit,
+			.setkey = cc_rfc4309_ccm_setkey,
+			.setauthsize = cc_rfc4309_ccm_setauthsize,
+			.encrypt = cc_rfc4309_ccm_encrypt,
+			.decrypt = cc_rfc4309_ccm_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
 			.ivsize = CCM_BLOCK_IV_SIZE,
 			.ivsize = CCM_BLOCK_IV_SIZE,
 			.maxauthsize = AES_BLOCK_SIZE,
 			.maxauthsize = AES_BLOCK_SIZE,
 		},
 		},
@@ -2620,20 +2529,18 @@ static struct ssi_alg_template aead_algs[] = {
 		.flow_mode = S_DIN_to_AES,
 		.flow_mode = S_DIN_to_AES,
 		.auth_mode = DRV_HASH_NULL,
 		.auth_mode = DRV_HASH_NULL,
 	},
 	},
-#endif /*SSI_CC_HAS_AES_CCM*/
-#if SSI_CC_HAS_AES_GCM
 	{
 	{
 		.name = "gcm(aes)",
 		.name = "gcm(aes)",
 		.driver_name = "gcm-aes-dx",
 		.driver_name = "gcm-aes-dx",
 		.blocksize = 1,
 		.blocksize = 1,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.template_aead = {
 		.template_aead = {
-			.setkey = ssi_aead_setkey,
-			.setauthsize = ssi_gcm_setauthsize,
-			.encrypt = ssi_aead_encrypt,
-			.decrypt = ssi_aead_decrypt,
-			.init = ssi_aead_init,
-			.exit = ssi_aead_exit,
+			.setkey = cc_aead_setkey,
+			.setauthsize = cc_gcm_setauthsize,
+			.encrypt = cc_aead_encrypt,
+			.decrypt = cc_aead_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
 			.ivsize = 12,
 			.ivsize = 12,
 			.maxauthsize = AES_BLOCK_SIZE,
 			.maxauthsize = AES_BLOCK_SIZE,
 		},
 		},
@@ -2647,12 +2554,12 @@ static struct ssi_alg_template aead_algs[] = {
 		.blocksize = 1,
 		.blocksize = 1,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.template_aead = {
 		.template_aead = {
-			.setkey = ssi_rfc4106_gcm_setkey,
-			.setauthsize = ssi_rfc4106_gcm_setauthsize,
-			.encrypt = ssi_rfc4106_gcm_encrypt,
-			.decrypt = ssi_rfc4106_gcm_decrypt,
-			.init = ssi_aead_init,
-			.exit = ssi_aead_exit,
+			.setkey = cc_rfc4106_gcm_setkey,
+			.setauthsize = cc_rfc4106_gcm_setauthsize,
+			.encrypt = cc_rfc4106_gcm_encrypt,
+			.decrypt = cc_rfc4106_gcm_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
 			.ivsize = GCM_BLOCK_RFC4_IV_SIZE,
 			.ivsize = GCM_BLOCK_RFC4_IV_SIZE,
 			.maxauthsize = AES_BLOCK_SIZE,
 			.maxauthsize = AES_BLOCK_SIZE,
 		},
 		},
@@ -2666,12 +2573,12 @@ static struct ssi_alg_template aead_algs[] = {
 		.blocksize = 1,
 		.blocksize = 1,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.type = CRYPTO_ALG_TYPE_AEAD,
 		.template_aead = {
 		.template_aead = {
-			.setkey = ssi_rfc4543_gcm_setkey,
-			.setauthsize = ssi_rfc4543_gcm_setauthsize,
-			.encrypt = ssi_rfc4543_gcm_encrypt,
-			.decrypt = ssi_rfc4543_gcm_decrypt,
-			.init = ssi_aead_init,
-			.exit = ssi_aead_exit,
+			.setkey = cc_rfc4543_gcm_setkey,
+			.setauthsize = cc_rfc4543_gcm_setauthsize,
+			.encrypt = cc_rfc4543_gcm_encrypt,
+			.decrypt = cc_rfc4543_gcm_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
 			.ivsize = GCM_BLOCK_RFC4_IV_SIZE,
 			.ivsize = GCM_BLOCK_RFC4_IV_SIZE,
 			.maxauthsize = AES_BLOCK_SIZE,
 			.maxauthsize = AES_BLOCK_SIZE,
 		},
 		},
@@ -2679,52 +2586,51 @@ static struct ssi_alg_template aead_algs[] = {
 		.flow_mode = S_DIN_to_AES,
 		.flow_mode = S_DIN_to_AES,
 		.auth_mode = DRV_HASH_NULL,
 		.auth_mode = DRV_HASH_NULL,
 	},
 	},
-#endif /*SSI_CC_HAS_AES_GCM*/
 };
 };
 
 
-static struct ssi_crypto_alg *ssi_aead_create_alg(
-			struct ssi_alg_template *template,
-			struct device *dev)
+static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
+						struct device *dev)
 {
 {
-	struct ssi_crypto_alg *t_alg;
+	struct cc_crypto_alg *t_alg;
 	struct aead_alg *alg;
 	struct aead_alg *alg;
 
 
 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
 	if (!t_alg)
 	if (!t_alg)
 		return ERR_PTR(-ENOMEM);
 		return ERR_PTR(-ENOMEM);
 
 
-	alg = &template->template_aead;
+	alg = &tmpl->template_aead;
 
 
-	snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
+	snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
 	snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
 	snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
-		 template->driver_name);
+		 tmpl->driver_name);
 	alg->base.cra_module = THIS_MODULE;
 	alg->base.cra_module = THIS_MODULE;
-	alg->base.cra_priority = SSI_CRA_PRIO;
+	alg->base.cra_priority = CC_CRA_PRIO;
 
 
-	alg->base.cra_ctxsize = sizeof(struct ssi_aead_ctx);
+	alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
-			 template->type;
-	alg->init = ssi_aead_init;
-	alg->exit = ssi_aead_exit;
+			 tmpl->type;
+	alg->init = cc_aead_init;
+	alg->exit = cc_aead_exit;
 
 
 	t_alg->aead_alg = *alg;
 	t_alg->aead_alg = *alg;
 
 
-	t_alg->cipher_mode = template->cipher_mode;
-	t_alg->flow_mode = template->flow_mode;
-	t_alg->auth_mode = template->auth_mode;
+	t_alg->cipher_mode = tmpl->cipher_mode;
+	t_alg->flow_mode = tmpl->flow_mode;
+	t_alg->auth_mode = tmpl->auth_mode;
 
 
 	return t_alg;
 	return t_alg;
 }
 }
 
 
-int ssi_aead_free(struct ssi_drvdata *drvdata)
+int cc_aead_free(struct cc_drvdata *drvdata)
 {
 {
-	struct ssi_crypto_alg *t_alg, *n;
-	struct ssi_aead_handle *aead_handle =
-		(struct ssi_aead_handle *)drvdata->aead_handle;
+	struct cc_crypto_alg *t_alg, *n;
+	struct cc_aead_handle *aead_handle =
+		(struct cc_aead_handle *)drvdata->aead_handle;
 
 
 	if (aead_handle) {
 	if (aead_handle) {
 		/* Remove registered algs */
 		/* Remove registered algs */
-		list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, entry) {
+		list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
+					 entry) {
 			crypto_unregister_aead(&t_alg->aead_alg);
 			crypto_unregister_aead(&t_alg->aead_alg);
 			list_del(&t_alg->entry);
 			list_del(&t_alg->entry);
 			kfree(t_alg);
 			kfree(t_alg);
@@ -2736,10 +2642,10 @@ int ssi_aead_free(struct ssi_drvdata *drvdata)
 	return 0;
 	return 0;
 }
 }
 
 
-int ssi_aead_alloc(struct ssi_drvdata *drvdata)
+int cc_aead_alloc(struct cc_drvdata *drvdata)
 {
 {
-	struct ssi_aead_handle *aead_handle;
-	struct ssi_crypto_alg *t_alg;
+	struct cc_aead_handle *aead_handle;
+	struct cc_crypto_alg *t_alg;
 	int rc = -ENOMEM;
 	int rc = -ENOMEM;
 	int alg;
 	int alg;
 	struct device *dev = drvdata_to_dev(drvdata);
 	struct device *dev = drvdata_to_dev(drvdata);
@@ -2753,8 +2659,9 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
 	INIT_LIST_HEAD(&aead_handle->aead_list);
 	INIT_LIST_HEAD(&aead_handle->aead_list);
 	drvdata->aead_handle = aead_handle;
 	drvdata->aead_handle = aead_handle;
 
 
-	aead_handle->sram_workspace_addr = ssi_sram_mgr_alloc(
-		drvdata, MAX_HMAC_DIGEST_SIZE);
+	aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
+							 MAX_HMAC_DIGEST_SIZE);
+
 	if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
 	if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
 		dev_err(dev, "SRAM pool exhausted\n");
 		dev_err(dev, "SRAM pool exhausted\n");
 		rc = -ENOMEM;
 		rc = -ENOMEM;
@@ -2763,7 +2670,7 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
 
 
 	/* Linux crypto */
 	/* Linux crypto */
 	for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
 	for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
-		t_alg = ssi_aead_create_alg(&aead_algs[alg], dev);
+		t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
 		if (IS_ERR(t_alg)) {
 		if (IS_ERR(t_alg)) {
 			rc = PTR_ERR(t_alg);
 			rc = PTR_ERR(t_alg);
 			dev_err(dev, "%s alg allocation failed\n",
 			dev_err(dev, "%s alg allocation failed\n",
@@ -2772,7 +2679,7 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
 		}
 		}
 		t_alg->drvdata = drvdata;
 		t_alg->drvdata = drvdata;
 		rc = crypto_register_aead(&t_alg->aead_alg);
 		rc = crypto_register_aead(&t_alg->aead_alg);
-		if (unlikely(rc != 0)) {
+		if (rc) {
 			dev_err(dev, "%s alg registration failed\n",
 			dev_err(dev, "%s alg registration failed\n",
 				t_alg->aead_alg.base.cra_driver_name);
 				t_alg->aead_alg.base.cra_driver_name);
 			goto fail2;
 			goto fail2;
@@ -2788,7 +2695,7 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
 fail2:
 fail2:
 	kfree(t_alg);
 	kfree(t_alg);
 fail1:
 fail1:
-	ssi_aead_free(drvdata);
+	cc_aead_free(drvdata);
 fail0:
 fail0:
 	return rc;
 	return rc;
 }
 }

+ 24 - 32
drivers/staging/ccree/ssi_aead.h → drivers/staging/ccree/cc_aead.h

@@ -1,25 +1,12 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
 
 
-/* \file ssi_aead.h
+/* \file cc_aead.h
  * ARM CryptoCell AEAD Crypto API
  * ARM CryptoCell AEAD Crypto API
  */
  */
 
 
-#ifndef __SSI_AEAD_H__
-#define __SSI_AEAD_H__
+#ifndef __CC_AEAD_H__
+#define __CC_AEAD_H__
 
 
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <crypto/algapi.h>
 #include <crypto/algapi.h>
@@ -28,7 +15,7 @@
 /* mac_cmp - HW writes 8 B but all bytes hold the same value */
 /* mac_cmp - HW writes 8 B but all bytes hold the same value */
 #define ICV_CMP_SIZE 8
 #define ICV_CMP_SIZE 8
 #define CCM_CONFIG_BUF_SIZE (AES_BLOCK_SIZE * 3)
 #define CCM_CONFIG_BUF_SIZE (AES_BLOCK_SIZE * 3)
-#define MAX_MAC_SIZE MAX(SHA256_DIGEST_SIZE, AES_BLOCK_SIZE)
+#define MAX_MAC_SIZE SHA256_DIGEST_SIZE
 
 
 /* defines for AES GCM configuration buffer */
 /* defines for AES GCM configuration buffer */
 #define GCM_BLOCK_LEN_SIZE 8
 #define GCM_BLOCK_LEN_SIZE 8
@@ -74,32 +61,37 @@ struct aead_req_ctx {
 	} gcm_len_block;
 	} gcm_len_block;
 
 
 	u8 ccm_config[CCM_CONFIG_BUF_SIZE] ____cacheline_aligned;
 	u8 ccm_config[CCM_CONFIG_BUF_SIZE] ____cacheline_aligned;
-	unsigned int hw_iv_size ____cacheline_aligned; /*HW actual size input*/
-	u8 backup_mac[MAX_MAC_SIZE]; /*used to prevent cache coherence problem*/
+	/* HW actual size input */
+	unsigned int hw_iv_size ____cacheline_aligned;
+	/* used to prevent cache coherence problem */
+	u8 backup_mac[MAX_MAC_SIZE];
 	u8 *backup_iv; /*store iv for generated IV flow*/
 	u8 *backup_iv; /*store iv for generated IV flow*/
 	u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
 	u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
 	dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
 	dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
-	dma_addr_t ccm_iv0_dma_addr; /* buffer for internal ccm configurations */
+	/* buffer for internal ccm configurations */
+	dma_addr_t ccm_iv0_dma_addr;
 	dma_addr_t icv_dma_addr; /* Phys. address of ICV */
 	dma_addr_t icv_dma_addr; /* Phys. address of ICV */
 
 
 	//used in gcm
 	//used in gcm
-	dma_addr_t gcm_iv_inc1_dma_addr; /* buffer for internal gcm configurations */
-	dma_addr_t gcm_iv_inc2_dma_addr; /* buffer for internal gcm configurations */
+	/* buffer for internal gcm configurations */
+	dma_addr_t gcm_iv_inc1_dma_addr;
+	/* buffer for internal gcm configurations */
+	dma_addr_t gcm_iv_inc2_dma_addr;
 	dma_addr_t hkey_dma_addr; /* Phys. address of hkey */
 	dma_addr_t hkey_dma_addr; /* Phys. address of hkey */
 	dma_addr_t gcm_block_len_dma_addr; /* Phys. address of gcm block len */
 	dma_addr_t gcm_block_len_dma_addr; /* Phys. address of gcm block len */
 	bool is_gcm4543;
 	bool is_gcm4543;
 
 
 	u8 *icv_virt_addr; /* Virt. address of ICV */
 	u8 *icv_virt_addr; /* Virt. address of ICV */
 	struct async_gen_req_ctx gen_ctx;
 	struct async_gen_req_ctx gen_ctx;
-	struct ssi_mlli assoc;
-	struct ssi_mlli src;
-	struct ssi_mlli dst;
+	struct cc_mlli assoc;
+	struct cc_mlli src;
+	struct cc_mlli dst;
 	struct scatterlist *src_sgl;
 	struct scatterlist *src_sgl;
 	struct scatterlist *dst_sgl;
 	struct scatterlist *dst_sgl;
 	unsigned int src_offset;
 	unsigned int src_offset;
 	unsigned int dst_offset;
 	unsigned int dst_offset;
-	enum ssi_req_dma_buf_type assoc_buff_type;
-	enum ssi_req_dma_buf_type data_buff_type;
+	enum cc_req_dma_buf_type assoc_buff_type;
+	enum cc_req_dma_buf_type data_buff_type;
 	struct mlli_params mlli_params;
 	struct mlli_params mlli_params;
 	unsigned int cryptlen;
 	unsigned int cryptlen;
 	struct scatterlist ccm_adata_sg;
 	struct scatterlist ccm_adata_sg;
@@ -111,7 +103,7 @@ struct aead_req_ctx {
 	bool plaintext_authenticate_only; //for gcm_rfc4543
 	bool plaintext_authenticate_only; //for gcm_rfc4543
 };
 };
 
 
-int ssi_aead_alloc(struct ssi_drvdata *drvdata);
-int ssi_aead_free(struct ssi_drvdata *drvdata);
+int cc_aead_alloc(struct cc_drvdata *drvdata);
+int cc_aead_free(struct cc_drvdata *drvdata);
 
 
-#endif /*__SSI_AEAD_H__*/
+#endif /*__CC_AEAD_H__*/

+ 480 - 605
drivers/staging/ccree/ssi_buffer_mgr.c → drivers/staging/ccree/cc_buffer_mgr.c

@@ -1,42 +1,17 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
 
 
-#include <linux/crypto.h>
-#include <linux/version.h>
-#include <crypto/algapi.h>
 #include <crypto/internal/aead.h>
 #include <crypto/internal/aead.h>
-#include <crypto/hash.h>
 #include <crypto/authenc.h>
 #include <crypto/authenc.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/scatterwalk.h>
 #include <linux/dmapool.h>
 #include <linux/dmapool.h>
 #include <linux/dma-mapping.h>
 #include <linux/dma-mapping.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
 
 
-#include "ssi_buffer_mgr.h"
+#include "cc_buffer_mgr.h"
 #include "cc_lli_defs.h"
 #include "cc_lli_defs.h"
-#include "ssi_cipher.h"
-#include "ssi_hash.h"
-#include "ssi_aead.h"
-
-#define GET_DMA_BUFFER_TYPE(buff_type) ( \
-	((buff_type) == SSI_DMA_BUF_NULL) ? "BUF_NULL" : \
-	((buff_type) == SSI_DMA_BUF_DLLI) ? "BUF_DLLI" : \
-	((buff_type) == SSI_DMA_BUF_MLLI) ? "BUF_MLLI" : "BUF_INVALID")
+#include "cc_cipher.h"
+#include "cc_hash.h"
+#include "cc_aead.h"
 
 
 enum dma_buffer_type {
 enum dma_buffer_type {
 	DMA_NULL_TYPE = -1,
 	DMA_NULL_TYPE = -1,
@@ -64,25 +39,62 @@ struct buffer_array {
 	u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
 	u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
 };
 };
 
 
+static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
+{
+	switch (type) {
+	case CC_DMA_BUF_NULL:
+		return "BUF_NULL";
+	case CC_DMA_BUF_DLLI:
+		return "BUF_DLLI";
+	case CC_DMA_BUF_MLLI:
+		return "BUF_MLLI";
+	default:
+		return "BUF_INVALID";
+	}
+}
+
 /**
 /**
- * ssi_buffer_mgr_get_sgl_nents() - Get scatterlist number of entries.
+ * cc_copy_mac() - Copy MAC to temporary location
+ *
+ * @dev: device object
+ * @req: aead request object
+ * @dir: [IN] copy from/to sgl
+ */
+static void cc_copy_mac(struct device *dev, struct aead_request *req,
+			enum cc_sg_cpy_direct dir)
+{
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	u32 skip = req->assoclen + req->cryptlen;
+
+	if (areq_ctx->is_gcm4543)
+		skip += crypto_aead_ivsize(tfm);
+
+	cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
+			   (skip - areq_ctx->req_authsize), skip, dir);
+}
+
+/**
+ * cc_get_sgl_nents() - Get scatterlist number of entries.
  *
  *
  * @sg_list: SG list
  * @sg_list: SG list
  * @nbytes: [IN] Total SGL data bytes.
  * @nbytes: [IN] Total SGL data bytes.
  * @lbytes: [OUT] Returns the amount of bytes at the last entry
  * @lbytes: [OUT] Returns the amount of bytes at the last entry
  */
  */
-static unsigned int ssi_buffer_mgr_get_sgl_nents(
-	struct device *dev, struct scatterlist *sg_list,
-	unsigned int nbytes, u32 *lbytes, bool *is_chained)
+static unsigned int cc_get_sgl_nents(struct device *dev,
+				     struct scatterlist *sg_list,
+				     unsigned int nbytes, u32 *lbytes,
+				     bool *is_chained)
 {
 {
 	unsigned int nents = 0;
 	unsigned int nents = 0;
 
 
-	while (nbytes != 0) {
-		if (sg_list->length != 0) {
+	while (nbytes && sg_list) {
+		if (sg_list->length) {
 			nents++;
 			nents++;
 			/* get the number of bytes in the last entry */
 			/* get the number of bytes in the last entry */
 			*lbytes = nbytes;
 			*lbytes = nbytes;
-			nbytes -= (sg_list->length > nbytes) ? nbytes : sg_list->length;
+			nbytes -= (sg_list->length > nbytes) ?
+					nbytes : sg_list->length;
 			sg_list = sg_next(sg_list);
 			sg_list = sg_next(sg_list);
 		} else {
 		} else {
 			sg_list = (struct scatterlist *)sg_page(sg_list);
 			sg_list = (struct scatterlist *)sg_page(sg_list);
@@ -95,11 +107,11 @@ static unsigned int ssi_buffer_mgr_get_sgl_nents(
 }
 }
 
 
 /**
 /**
- * ssi_buffer_mgr_zero_sgl() - Zero scatter scatter list data.
+ * cc_zero_sgl() - Zero scatter scatter list data.
  *
  *
  * @sgl:
  * @sgl:
  */
  */
-void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len)
+void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
 {
 {
 	struct scatterlist *current_sg = sgl;
 	struct scatterlist *current_sg = sgl;
 	int sg_index = 0;
 	int sg_index = 0;
@@ -116,7 +128,7 @@ void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len)
 }
 }
 
 
 /**
 /**
- * ssi_buffer_mgr_copy_scatterlist_portion() - Copy scatter list data,
+ * cc_copy_sg_portion() - Copy scatter list data,
  * from to_skip to end, to dest and vice versa
  * from to_skip to end, to dest and vice versa
  *
  *
  * @dest:
  * @dest:
@@ -125,21 +137,19 @@ void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len)
  * @end:
  * @end:
  * @direct:
  * @direct:
  */
  */
-void ssi_buffer_mgr_copy_scatterlist_portion(
-	struct device *dev, u8 *dest,
-	struct scatterlist *sg, u32 to_skip,
-	u32 end, enum ssi_sg_cpy_direct direct)
+void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
+			u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
 {
 {
 	u32 nents, lbytes;
 	u32 nents, lbytes;
 
 
-	nents = ssi_buffer_mgr_get_sgl_nents(dev, sg, end, &lbytes, NULL);
+	nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL);
 	sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
 	sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
-		       (direct == SSI_SG_TO_BUF));
+		       (direct == CC_SG_TO_BUF));
 }
 }
 
 
-static inline int ssi_buffer_mgr_render_buff_to_mlli(
-	struct device *dev, dma_addr_t buff_dma, u32 buff_size,
-	u32 *curr_nents, u32 **mlli_entry_pp)
+static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
+				  u32 buff_size, u32 *curr_nents,
+				  u32 **mlli_entry_pp)
 {
 {
 	u32 *mlli_entry_p = *mlli_entry_pp;
 	u32 *mlli_entry_p = *mlli_entry_pp;
 	u32 new_nents;
 	u32 new_nents;
@@ -173,26 +183,25 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli(
 	return 0;
 	return 0;
 }
 }
 
 
-static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
-	struct device *dev, struct scatterlist *sgl,
-	u32 sgl_data_len, u32 sgl_offset, u32 *curr_nents,
-	u32 **mlli_entry_pp)
+static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
+				u32 sgl_data_len, u32 sgl_offset,
+				u32 *curr_nents, u32 **mlli_entry_pp)
 {
 {
 	struct scatterlist *curr_sgl = sgl;
 	struct scatterlist *curr_sgl = sgl;
 	u32 *mlli_entry_p = *mlli_entry_pp;
 	u32 *mlli_entry_p = *mlli_entry_pp;
 	s32 rc = 0;
 	s32 rc = 0;
 
 
-	for ( ; (curr_sgl) && (sgl_data_len != 0);
+	for ( ; (curr_sgl && sgl_data_len);
 	      curr_sgl = sg_next(curr_sgl)) {
 	      curr_sgl = sg_next(curr_sgl)) {
 		u32 entry_data_len =
 		u32 entry_data_len =
 			(sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
 			(sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
 				sg_dma_len(curr_sgl) - sgl_offset :
 				sg_dma_len(curr_sgl) - sgl_offset :
 				sgl_data_len;
 				sgl_data_len;
 		sgl_data_len -= entry_data_len;
 		sgl_data_len -= entry_data_len;
-		rc = ssi_buffer_mgr_render_buff_to_mlli(
-			dev, sg_dma_address(curr_sgl) + sgl_offset,
-			entry_data_len, curr_nents, &mlli_entry_p);
-		if (rc != 0)
+		rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
+					    sgl_offset, entry_data_len,
+					    curr_nents, &mlli_entry_p);
+		if (rc)
 			return rc;
 			return rc;
 
 
 		sgl_offset = 0;
 		sgl_offset = 0;
@@ -201,10 +210,8 @@ static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
 	return 0;
 	return 0;
 }
 }
 
 
-static int ssi_buffer_mgr_generate_mlli(
-	struct device *dev,
-	struct buffer_array *sg_data,
-	struct mlli_params *mlli_params)
+static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
+			    struct mlli_params *mlli_params, gfp_t flags)
 {
 {
 	u32 *mlli_p;
 	u32 *mlli_p;
 	u32 total_nents = 0, prev_total_nents = 0;
 	u32 total_nents = 0, prev_total_nents = 0;
@@ -213,10 +220,10 @@ static int ssi_buffer_mgr_generate_mlli(
 	dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
 	dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
 
 
 	/* Allocate memory from the pointed pool */
 	/* Allocate memory from the pointed pool */
-	mlli_params->mlli_virt_addr = dma_pool_alloc(
-			mlli_params->curr_pool, GFP_KERNEL,
-			&mlli_params->mlli_dma_addr);
-	if (unlikely(!mlli_params->mlli_virt_addr)) {
+	mlli_params->mlli_virt_addr =
+		dma_pool_alloc(mlli_params->curr_pool, flags,
+			       &mlli_params->mlli_dma_addr);
+	if (!mlli_params->mlli_virt_addr) {
 		dev_err(dev, "dma_pool_alloc() failed\n");
 		dev_err(dev, "dma_pool_alloc() failed\n");
 		rc = -ENOMEM;
 		rc = -ENOMEM;
 		goto build_mlli_exit;
 		goto build_mlli_exit;
@@ -225,17 +232,19 @@ static int ssi_buffer_mgr_generate_mlli(
 	mlli_p = (u32 *)mlli_params->mlli_virt_addr;
 	mlli_p = (u32 *)mlli_params->mlli_virt_addr;
 	/* go over all SG's and link it to one MLLI table */
 	/* go over all SG's and link it to one MLLI table */
 	for (i = 0; i < sg_data->num_of_buffers; i++) {
 	for (i = 0; i < sg_data->num_of_buffers; i++) {
+		union buffer_array_entry *entry = &sg_data->entry[i];
+		u32 tot_len = sg_data->total_data_len[i];
+		u32 offset = sg_data->offset[i];
+
 		if (sg_data->type[i] == DMA_SGL_TYPE)
 		if (sg_data->type[i] == DMA_SGL_TYPE)
-			rc = ssi_buffer_mgr_render_scatterlist_to_mlli(
-				dev, sg_data->entry[i].sgl,
-				sg_data->total_data_len[i], sg_data->offset[i],
-				&total_nents, &mlli_p);
+			rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len,
+						  offset, &total_nents,
+						  &mlli_p);
 		else /*DMA_BUFF_TYPE*/
 		else /*DMA_BUFF_TYPE*/
-			rc = ssi_buffer_mgr_render_buff_to_mlli(
-				dev, sg_data->entry[i].buffer_dma,
-				sg_data->total_data_len[i], &total_nents,
-				&mlli_p);
-		if (rc != 0)
+			rc = cc_render_buff_to_mlli(dev, entry->buffer_dma,
+						    tot_len, &total_nents,
+						    &mlli_p);
+		if (rc)
 			return rc;
 			return rc;
 
 
 		/* set last bit in the current table */
 		/* set last bit in the current table */
@@ -260,10 +269,10 @@ build_mlli_exit:
 	return rc;
 	return rc;
 }
 }
 
 
-static inline void ssi_buffer_mgr_add_buffer_entry(
-	struct device *dev, struct buffer_array *sgl_data,
-	dma_addr_t buffer_dma, unsigned int buffer_len,
-	bool is_last_entry, u32 *mlli_nents)
+static void cc_add_buffer_entry(struct device *dev,
+				struct buffer_array *sgl_data,
+				dma_addr_t buffer_dma, unsigned int buffer_len,
+				bool is_last_entry, u32 *mlli_nents)
 {
 {
 	unsigned int index = sgl_data->num_of_buffers;
 	unsigned int index = sgl_data->num_of_buffers;
 
 
@@ -281,15 +290,10 @@ static inline void ssi_buffer_mgr_add_buffer_entry(
 	sgl_data->num_of_buffers++;
 	sgl_data->num_of_buffers++;
 }
 }
 
 
-static inline void ssi_buffer_mgr_add_scatterlist_entry(
-	struct device *dev,
-	struct buffer_array *sgl_data,
-	unsigned int nents,
-	struct scatterlist *sgl,
-	unsigned int data_len,
-	unsigned int data_offset,
-	bool is_last_table,
-	u32 *mlli_nents)
+static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
+			    unsigned int nents, struct scatterlist *sgl,
+			    unsigned int data_len, unsigned int data_offset,
+			    bool is_last_table, u32 *mlli_nents)
 {
 {
 	unsigned int index = sgl_data->num_of_buffers;
 	unsigned int index = sgl_data->num_of_buffers;
 
 
@@ -307,9 +311,8 @@ static inline void ssi_buffer_mgr_add_scatterlist_entry(
 	sgl_data->num_of_buffers++;
 	sgl_data->num_of_buffers++;
 }
 }
 
 
-static int
-ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
-			  enum dma_data_direction direction)
+static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
+			 enum dma_data_direction direction)
 {
 {
 	u32 i, j;
 	u32 i, j;
 	struct scatterlist *l_sg = sg;
 	struct scatterlist *l_sg = sg;
@@ -317,7 +320,7 @@ ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
 	for (i = 0; i < nents; i++) {
 	for (i = 0; i < nents; i++) {
 		if (!l_sg)
 		if (!l_sg)
 			break;
 			break;
-		if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)) {
+		if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
 			dev_err(dev, "dma_map_page() sg buffer failed\n");
 			dev_err(dev, "dma_map_page() sg buffer failed\n");
 			goto err;
 			goto err;
 		}
 		}
@@ -336,17 +339,15 @@ err:
 	return 0;
 	return 0;
 }
 }
 
 
-static int ssi_buffer_mgr_map_scatterlist(
-	struct device *dev, struct scatterlist *sg,
-	unsigned int nbytes, int direction,
-	u32 *nents, u32 max_sg_nents,
-	u32 *lbytes, u32 *mapped_nents)
+static int cc_map_sg(struct device *dev, struct scatterlist *sg,
+		     unsigned int nbytes, int direction, u32 *nents,
+		     u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
 {
 {
 	bool is_chained = false;
 	bool is_chained = false;
 
 
 	if (sg_is_last(sg)) {
 	if (sg_is_last(sg)) {
 		/* One entry only case -set to DLLI */
 		/* One entry only case -set to DLLI */
-		if (unlikely(dma_map_sg(dev, sg, 1, direction) != 1)) {
+		if (dma_map_sg(dev, sg, 1, direction) != 1) {
 			dev_err(dev, "dma_map_sg() single buffer failed\n");
 			dev_err(dev, "dma_map_sg() single buffer failed\n");
 			return -ENOMEM;
 			return -ENOMEM;
 		}
 		}
@@ -357,8 +358,8 @@ static int ssi_buffer_mgr_map_scatterlist(
 		*nents = 1;
 		*nents = 1;
 		*mapped_nents = 1;
 		*mapped_nents = 1;
 	} else {  /*sg_is_last*/
 	} else {  /*sg_is_last*/
-		*nents = ssi_buffer_mgr_get_sgl_nents(dev, sg, nbytes, lbytes,
-						      &is_chained);
+		*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
+					  &is_chained);
 		if (*nents > max_sg_nents) {
 		if (*nents > max_sg_nents) {
 			*nents = 0;
 			*nents = 0;
 			dev_err(dev, "Too many fragments. current %d max %d\n",
 			dev_err(dev, "Too many fragments. current %d max %d\n",
@@ -370,7 +371,7 @@ static int ssi_buffer_mgr_map_scatterlist(
 			 * be changed from the original sgl nents
 			 * be changed from the original sgl nents
 			 */
 			 */
 			*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
 			*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
-			if (unlikely(*mapped_nents == 0)) {
+			if (*mapped_nents == 0) {
 				*nents = 0;
 				*nents = 0;
 				dev_err(dev, "dma_map_sg() sg buffer failed\n");
 				dev_err(dev, "dma_map_sg() sg buffer failed\n");
 				return -ENOMEM;
 				return -ENOMEM;
@@ -379,11 +380,9 @@ static int ssi_buffer_mgr_map_scatterlist(
 			/*In this case the driver maps entry by entry so it
 			/*In this case the driver maps entry by entry so it
 			 * must have the same nents before and after map
 			 * must have the same nents before and after map
 			 */
 			 */
-			*mapped_nents = ssi_buffer_mgr_dma_map_sg(dev,
-								  sg,
-								  *nents,
-								  direction);
-			if (unlikely(*mapped_nents != *nents)) {
+			*mapped_nents = cc_dma_map_sg(dev, sg, *nents,
+						      direction);
+			if (*mapped_nents != *nents) {
 				*nents = *mapped_nents;
 				*nents = *mapped_nents;
 				dev_err(dev, "dma_map_sg() sg buffer failed\n");
 				dev_err(dev, "dma_map_sg() sg buffer failed\n");
 				return -ENOMEM;
 				return -ENOMEM;
@@ -394,18 +393,16 @@ static int ssi_buffer_mgr_map_scatterlist(
 	return 0;
 	return 0;
 }
 }
 
 
-static inline int
-ssi_aead_handle_config_buf(struct device *dev,
-			   struct aead_req_ctx *areq_ctx,
-			   u8 *config_data,
-			   struct buffer_array *sg_data,
-			   unsigned int assoclen)
+static int
+cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
+		     u8 *config_data, struct buffer_array *sg_data,
+		     unsigned int assoclen)
 {
 {
 	dev_dbg(dev, " handle additional data config set to DLLI\n");
 	dev_dbg(dev, " handle additional data config set to DLLI\n");
 	/* create sg for the current buffer */
 	/* create sg for the current buffer */
-	sg_init_one(&areq_ctx->ccm_adata_sg, config_data, AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
-	if (unlikely(dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1,
-				DMA_TO_DEVICE) != 1)) {
+	sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
+		    AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
+	if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
 		dev_err(dev, "dma_map_sg() config buffer failed\n");
 		dev_err(dev, "dma_map_sg() config buffer failed\n");
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
@@ -416,25 +413,21 @@ ssi_aead_handle_config_buf(struct device *dev,
 		areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
 		areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
 	/* prepare for case of MLLI */
 	/* prepare for case of MLLI */
 	if (assoclen > 0) {
 	if (assoclen > 0) {
-		ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data, 1,
-						     &areq_ctx->ccm_adata_sg,
-						     (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
-						     0, false, NULL);
+		cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
+				(AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
+				0, false, NULL);
 	}
 	}
 	return 0;
 	return 0;
 }
 }
 
 
-static inline int ssi_ahash_handle_curr_buf(struct device *dev,
-					    struct ahash_req_ctx *areq_ctx,
-					    u8 *curr_buff,
-					    u32 curr_buff_cnt,
-					    struct buffer_array *sg_data)
+static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
+			   u8 *curr_buff, u32 curr_buff_cnt,
+			   struct buffer_array *sg_data)
 {
 {
 	dev_dbg(dev, " handle curr buff %x set to   DLLI\n", curr_buff_cnt);
 	dev_dbg(dev, " handle curr buff %x set to   DLLI\n", curr_buff_cnt);
 	/* create sg for the current buffer */
 	/* create sg for the current buffer */
 	sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
 	sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
-	if (unlikely(dma_map_sg(dev, areq_ctx->buff_sg, 1,
-				DMA_TO_DEVICE) != 1)) {
+	if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
 		dev_err(dev, "dma_map_sg() src buffer failed\n");
 		dev_err(dev, "dma_map_sg() src buffer failed\n");
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
@@ -442,25 +435,22 @@ static inline int ssi_ahash_handle_curr_buf(struct device *dev,
 		&sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
 		&sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
 		sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
 		sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
 		areq_ctx->buff_sg->length);
 		areq_ctx->buff_sg->length);
-	areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
+	areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
 	areq_ctx->curr_sg = areq_ctx->buff_sg;
 	areq_ctx->curr_sg = areq_ctx->buff_sg;
 	areq_ctx->in_nents = 0;
 	areq_ctx->in_nents = 0;
 	/* prepare for case of MLLI */
 	/* prepare for case of MLLI */
-	ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data, 1, areq_ctx->buff_sg,
-					     curr_buff_cnt, 0, false, NULL);
+	cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
+			false, NULL);
 	return 0;
 	return 0;
 }
 }
 
 
-void ssi_buffer_mgr_unmap_blkcipher_request(
-	struct device *dev,
-	void *ctx,
-	unsigned int ivsize,
-	struct scatterlist *src,
-	struct scatterlist *dst)
+void cc_unmap_blkcipher_request(struct device *dev, void *ctx,
+				unsigned int ivsize, struct scatterlist *src,
+				struct scatterlist *dst)
 {
 {
 	struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
 	struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
 
 
-	if (likely(req_ctx->gen_ctx.iv_dma_addr != 0)) {
+	if (req_ctx->gen_ctx.iv_dma_addr) {
 		dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
 		dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
 			&req_ctx->gen_ctx.iv_dma_addr, ivsize);
 			&req_ctx->gen_ctx.iv_dma_addr, ivsize);
 		dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
 		dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
@@ -469,7 +459,8 @@ void ssi_buffer_mgr_unmap_blkcipher_request(
 				 DMA_TO_DEVICE);
 				 DMA_TO_DEVICE);
 	}
 	}
 	/* Release pool */
 	/* Release pool */
-	if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI) {
+	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
+	    req_ctx->mlli_params.mlli_virt_addr) {
 		dma_pool_free(req_ctx->mlli_params.curr_pool,
 		dma_pool_free(req_ctx->mlli_params.curr_pool,
 			      req_ctx->mlli_params.mlli_virt_addr,
 			      req_ctx->mlli_params.mlli_virt_addr,
 			      req_ctx->mlli_params.mlli_dma_addr);
 			      req_ctx->mlli_params.mlli_dma_addr);
@@ -484,14 +475,10 @@ void ssi_buffer_mgr_unmap_blkcipher_request(
 	}
 	}
 }
 }
 
 
-int ssi_buffer_mgr_map_blkcipher_request(
-	struct ssi_drvdata *drvdata,
-	void *ctx,
-	unsigned int ivsize,
-	unsigned int nbytes,
-	void *info,
-	struct scatterlist *src,
-	struct scatterlist *dst)
+int cc_map_blkcipher_request(struct cc_drvdata *drvdata, void *ctx,
+			     unsigned int ivsize, unsigned int nbytes,
+			     void *info, struct scatterlist *src,
+			     struct scatterlist *dst, gfp_t flags)
 {
 {
 	struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
 	struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
 	struct mlli_params *mlli_params = &req_ctx->mlli_params;
 	struct mlli_params *mlli_params = &req_ctx->mlli_params;
@@ -502,20 +489,19 @@ int ssi_buffer_mgr_map_blkcipher_request(
 	int rc = 0;
 	int rc = 0;
 	u32 mapped_nents = 0;
 	u32 mapped_nents = 0;
 
 
-	req_ctx->dma_buf_type = SSI_DMA_BUF_DLLI;
+	req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
 	mlli_params->curr_pool = NULL;
 	mlli_params->curr_pool = NULL;
 	sg_data.num_of_buffers = 0;
 	sg_data.num_of_buffers = 0;
 
 
 	/* Map IV buffer */
 	/* Map IV buffer */
-	if (likely(ivsize != 0)) {
+	if (ivsize) {
 		dump_byte_array("iv", (u8 *)info, ivsize);
 		dump_byte_array("iv", (u8 *)info, ivsize);
 		req_ctx->gen_ctx.iv_dma_addr =
 		req_ctx->gen_ctx.iv_dma_addr =
 			dma_map_single(dev, (void *)info,
 			dma_map_single(dev, (void *)info,
 				       ivsize,
 				       ivsize,
 				       req_ctx->is_giv ? DMA_BIDIRECTIONAL :
 				       req_ctx->is_giv ? DMA_BIDIRECTIONAL :
 				       DMA_TO_DEVICE);
 				       DMA_TO_DEVICE);
-		if (unlikely(dma_mapping_error(dev,
-					       req_ctx->gen_ctx.iv_dma_addr))) {
+		if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
 			dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 			dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 				ivsize, info);
 				ivsize, info);
 			return -ENOMEM;
 			return -ENOMEM;
@@ -527,121 +513,107 @@ int ssi_buffer_mgr_map_blkcipher_request(
 	}
 	}
 
 
 	/* Map the src SGL */
 	/* Map the src SGL */
-	rc = ssi_buffer_mgr_map_scatterlist(dev, src,
-					    nbytes, DMA_BIDIRECTIONAL,
-					    &req_ctx->in_nents,
-					    LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
-					    &mapped_nents);
-	if (unlikely(rc != 0)) {
+	rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
+		       LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
+	if (rc) {
 		rc = -ENOMEM;
 		rc = -ENOMEM;
 		goto ablkcipher_exit;
 		goto ablkcipher_exit;
 	}
 	}
 	if (mapped_nents > 1)
 	if (mapped_nents > 1)
-		req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
+		req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
 
 
-	if (unlikely(src == dst)) {
+	if (src == dst) {
 		/* Handle inplace operation */
 		/* Handle inplace operation */
-		if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
+		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 			req_ctx->out_nents = 0;
 			req_ctx->out_nents = 0;
-			ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
-							     req_ctx->in_nents,
-							     src, nbytes, 0,
-							     true,
-							     &req_ctx->in_mlli_nents);
+			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
+					nbytes, 0, true,
+					&req_ctx->in_mlli_nents);
 		}
 		}
 	} else {
 	} else {
 		/* Map the dst sg */
 		/* Map the dst sg */
-		if (unlikely(ssi_buffer_mgr_map_scatterlist(
-			dev, dst, nbytes,
-			DMA_BIDIRECTIONAL, &req_ctx->out_nents,
-			LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
-			&mapped_nents))){
+		if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
+			      &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+			      &dummy, &mapped_nents)) {
 			rc = -ENOMEM;
 			rc = -ENOMEM;
 			goto ablkcipher_exit;
 			goto ablkcipher_exit;
 		}
 		}
 		if (mapped_nents > 1)
 		if (mapped_nents > 1)
-			req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
-
-		if (unlikely((req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI))) {
-			ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
-							     req_ctx->in_nents,
-							     src, nbytes, 0,
-							     true,
-							     &req_ctx->in_mlli_nents);
-			ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
-							     req_ctx->out_nents,
-							     dst, nbytes, 0,
-							     true,
-							     &req_ctx->out_mlli_nents);
+			req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
+
+		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
+			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
+					nbytes, 0, true,
+					&req_ctx->in_mlli_nents);
+			cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
+					nbytes, 0, true,
+					&req_ctx->out_mlli_nents);
 		}
 		}
 	}
 	}
 
 
-	if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
+	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
 		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
-		rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
-		if (unlikely(rc != 0))
+		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+		if (rc)
 			goto ablkcipher_exit;
 			goto ablkcipher_exit;
 	}
 	}
 
 
 	dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
 	dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
-		GET_DMA_BUFFER_TYPE(req_ctx->dma_buf_type));
+		cc_dma_buf_type(req_ctx->dma_buf_type));
 
 
 	return 0;
 	return 0;
 
 
 ablkcipher_exit:
 ablkcipher_exit:
-	ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
+	cc_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
 	return rc;
 	return rc;
 }
 }
 
 
-void ssi_buffer_mgr_unmap_aead_request(
-	struct device *dev, struct aead_request *req)
+void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
 {
 {
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct ssi_drvdata *drvdata = dev_get_drvdata(dev);
+	struct cc_drvdata *drvdata = dev_get_drvdata(dev);
 	u32 dummy;
 	u32 dummy;
 	bool chained;
 	bool chained;
 	u32 size_to_unmap = 0;
 	u32 size_to_unmap = 0;
 
 
-	if (areq_ctx->mac_buf_dma_addr != 0) {
+	if (areq_ctx->mac_buf_dma_addr) {
 		dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
 		dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
 				 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
 				 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
 	}
 	}
 
 
-#if SSI_CC_HAS_AES_GCM
 	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
 	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
-		if (areq_ctx->hkey_dma_addr != 0) {
+		if (areq_ctx->hkey_dma_addr) {
 			dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
 			dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
 					 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
 					 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
 		}
 		}
 
 
-		if (areq_ctx->gcm_block_len_dma_addr != 0) {
+		if (areq_ctx->gcm_block_len_dma_addr) {
 			dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
 			dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
 					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 		}
 		}
 
 
-		if (areq_ctx->gcm_iv_inc1_dma_addr != 0) {
+		if (areq_ctx->gcm_iv_inc1_dma_addr) {
 			dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
 			dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
 					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 		}
 		}
 
 
-		if (areq_ctx->gcm_iv_inc2_dma_addr != 0) {
+		if (areq_ctx->gcm_iv_inc2_dma_addr) {
 			dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
 			dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
 					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 		}
 		}
 	}
 	}
-#endif
 
 
 	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
-		if (areq_ctx->ccm_iv0_dma_addr != 0) {
+		if (areq_ctx->ccm_iv0_dma_addr) {
 			dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
 			dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
 					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 		}
 		}
 
 
 		dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
 		dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
 	}
 	}
-	if (areq_ctx->gen_ctx.iv_dma_addr != 0) {
+	if (areq_ctx->gen_ctx.iv_dma_addr) {
 		dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
 		dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
 				 hw_iv_size, DMA_BIDIRECTIONAL);
 				 hw_iv_size, DMA_BIDIRECTIONAL);
 	}
 	}
@@ -668,46 +640,37 @@ void ssi_buffer_mgr_unmap_aead_request(
 		size_to_unmap += crypto_aead_ivsize(tfm);
 		size_to_unmap += crypto_aead_ivsize(tfm);
 
 
 	dma_unmap_sg(dev, req->src,
 	dma_unmap_sg(dev, req->src,
-		     ssi_buffer_mgr_get_sgl_nents(dev, req->src, size_to_unmap,
-						  &dummy, &chained),
+		     cc_get_sgl_nents(dev, req->src, size_to_unmap,
+				      &dummy, &chained),
 		     DMA_BIDIRECTIONAL);
 		     DMA_BIDIRECTIONAL);
-	if (unlikely(req->src != req->dst)) {
+	if (req->src != req->dst) {
 		dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
 		dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
 			sg_virt(req->dst));
 			sg_virt(req->dst));
 		dma_unmap_sg(dev, req->dst,
 		dma_unmap_sg(dev, req->dst,
-			     ssi_buffer_mgr_get_sgl_nents(dev, req->dst,
-							  size_to_unmap,
-							  &dummy, &chained),
+			     cc_get_sgl_nents(dev, req->dst, size_to_unmap,
+					      &dummy, &chained),
 			     DMA_BIDIRECTIONAL);
 			     DMA_BIDIRECTIONAL);
 	}
 	}
 	if (drvdata->coherent &&
 	if (drvdata->coherent &&
-	    (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
-	    likely(req->src == req->dst)) {
-		u32 size_to_skip = req->assoclen;
-
-		if (areq_ctx->is_gcm4543)
-			size_to_skip += crypto_aead_ivsize(tfm);
-
-		/* copy mac to a temporary location to deal with possible
-		 * data memory overriding that caused by cache coherence problem.
+	    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
+	    req->src == req->dst) {
+		/* copy back mac from temporary location to deal with possible
+		 * data memory overriding that caused by cache coherence
+		 * problem.
 		 */
 		 */
-		ssi_buffer_mgr_copy_scatterlist_portion(
-			dev, areq_ctx->backup_mac, req->src,
-			size_to_skip + req->cryptlen - areq_ctx->req_authsize,
-			size_to_skip + req->cryptlen, SSI_SG_FROM_BUF);
+		cc_copy_mac(dev, req, CC_SG_FROM_BUF);
 	}
 	}
 }
 }
 
 
-static inline int ssi_buffer_mgr_get_aead_icv_nents(
-	struct device *dev,
-	struct scatterlist *sgl,
-	unsigned int sgl_nents,
-	unsigned int authsize,
-	u32 last_entry_data_size,
-	bool *is_icv_fragmented)
+static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl,
+				 unsigned int sgl_nents, unsigned int authsize,
+				 u32 last_entry_data_size,
+				 bool *is_icv_fragmented)
 {
 {
 	unsigned int icv_max_size = 0;
 	unsigned int icv_max_size = 0;
-	unsigned int icv_required_size = authsize > last_entry_data_size ? (authsize - last_entry_data_size) : authsize;
+	unsigned int icv_required_size = authsize > last_entry_data_size ?
+					(authsize - last_entry_data_size) :
+					authsize;
 	unsigned int nents;
 	unsigned int nents;
 	unsigned int i;
 	unsigned int i;
 
 
@@ -726,10 +689,12 @@ static inline int ssi_buffer_mgr_get_aead_icv_nents(
 		icv_max_size = sgl->length;
 		icv_max_size = sgl->length;
 
 
 	if (last_entry_data_size > authsize) {
 	if (last_entry_data_size > authsize) {
-		nents = 0; /* ICV attached to data in last entry (not fragmented!) */
+		/* ICV attached to data in last entry (not fragmented!) */
+		nents = 0;
 		*is_icv_fragmented = false;
 		*is_icv_fragmented = false;
 	} else if (last_entry_data_size == authsize) {
 	} else if (last_entry_data_size == authsize) {
-		nents = 1; /* ICV placed in whole last entry (not fragmented!) */
+		/* ICV placed in whole last entry (not fragmented!) */
+		nents = 1;
 		*is_icv_fragmented = false;
 		*is_icv_fragmented = false;
 	} else if (icv_max_size > icv_required_size) {
 	} else if (icv_max_size > icv_required_size) {
 		nents = 1;
 		nents = 1;
@@ -748,25 +713,25 @@ static inline int ssi_buffer_mgr_get_aead_icv_nents(
 	return nents;
 	return nents;
 }
 }
 
 
-static inline int ssi_buffer_mgr_aead_chain_iv(
-	struct ssi_drvdata *drvdata,
-	struct aead_request *req,
-	struct buffer_array *sg_data,
-	bool is_last, bool do_chain)
+static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
+			    struct aead_request *req,
+			    struct buffer_array *sg_data,
+			    bool is_last, bool do_chain)
 {
 {
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 	struct device *dev = drvdata_to_dev(drvdata);
 	struct device *dev = drvdata_to_dev(drvdata);
 	int rc = 0;
 	int rc = 0;
 
 
-	if (unlikely(!req->iv)) {
+	if (!req->iv) {
 		areq_ctx->gen_ctx.iv_dma_addr = 0;
 		areq_ctx->gen_ctx.iv_dma_addr = 0;
 		goto chain_iv_exit;
 		goto chain_iv_exit;
 	}
 	}
 
 
-	areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv, hw_iv_size,
+	areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
+						       hw_iv_size,
 						       DMA_BIDIRECTIONAL);
 						       DMA_BIDIRECTIONAL);
-	if (unlikely(dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr))) {
+	if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
 		dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 		dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 			hw_iv_size, req->iv);
 			hw_iv_size, req->iv);
 		rc = -ENOMEM;
 		rc = -ENOMEM;
@@ -775,28 +740,27 @@ static inline int ssi_buffer_mgr_aead_chain_iv(
 
 
 	dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
 	dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
 		hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
 		hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
-	if (do_chain && areq_ctx->plaintext_authenticate_only) {  // TODO: what about CTR?? ask Ron
+	// TODO: what about CTR?? ask Ron
+	if (do_chain && areq_ctx->plaintext_authenticate_only) {
 		struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 		struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 		unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
 		unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
 		unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
 		unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
 		/* Chain to given list */
 		/* Chain to given list */
-		ssi_buffer_mgr_add_buffer_entry(
-			dev, sg_data,
-			areq_ctx->gen_ctx.iv_dma_addr + iv_ofs,
-			iv_size_to_authenc, is_last,
-			&areq_ctx->assoc.mlli_nents);
-		areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
+		cc_add_buffer_entry(dev, sg_data,
+				    (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs),
+				    iv_size_to_authenc, is_last,
+				    &areq_ctx->assoc.mlli_nents);
+		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 	}
 	}
 
 
 chain_iv_exit:
 chain_iv_exit:
 	return rc;
 	return rc;
 }
 }
 
 
-static inline int ssi_buffer_mgr_aead_chain_assoc(
-	struct ssi_drvdata *drvdata,
-	struct aead_request *req,
-	struct buffer_array *sg_data,
-	bool is_last, bool do_chain)
+static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
+			       struct aead_request *req,
+			       struct buffer_array *sg_data,
+			       bool is_last, bool do_chain)
 {
 {
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	int rc = 0;
 	int rc = 0;
@@ -815,12 +779,12 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
 		goto chain_assoc_exit;
 		goto chain_assoc_exit;
 	}
 	}
 
 
-	if (unlikely(req->assoclen == 0)) {
-		areq_ctx->assoc_buff_type = SSI_DMA_BUF_NULL;
+	if (req->assoclen == 0) {
+		areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
 		areq_ctx->assoc.nents = 0;
 		areq_ctx->assoc.nents = 0;
 		areq_ctx->assoc.mlli_nents = 0;
 		areq_ctx->assoc.mlli_nents = 0;
 		dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
 		dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
-			GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
+			cc_dma_buf_type(areq_ctx->assoc_buff_type),
 			areq_ctx->assoc.nents);
 			areq_ctx->assoc.nents);
 		goto chain_assoc_exit;
 		goto chain_assoc_exit;
 	}
 	}
@@ -828,12 +792,15 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
 	//iterate over the sgl to see how many entries are for associated data
 	//iterate over the sgl to see how many entries are for associated data
 	//it is assumed that if we reach here , the sgl is already mapped
 	//it is assumed that if we reach here , the sgl is already mapped
 	sg_index = current_sg->length;
 	sg_index = current_sg->length;
-	if (sg_index > size_of_assoc) { //the first entry in the scatter list contains all the associated data
+	//the first entry in the scatter list contains all the associated data
+	if (sg_index > size_of_assoc) {
 		mapped_nents++;
 		mapped_nents++;
 	} else {
 	} else {
 		while (sg_index <= size_of_assoc) {
 		while (sg_index <= size_of_assoc) {
 			current_sg = sg_next(current_sg);
 			current_sg = sg_next(current_sg);
-			//if have reached the end of the sgl, then this is unexpected
+			/* if have reached the end of the sgl, then this is
+			 * unexpected
+			 */
 			if (!current_sg) {
 			if (!current_sg) {
 				dev_err(dev, "reached end of sg list. unexpected\n");
 				dev_err(dev, "reached end of sg list. unexpected\n");
 				return -EINVAL;
 				return -EINVAL;
@@ -842,7 +809,7 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
 			mapped_nents++;
 			mapped_nents++;
 		}
 		}
 	}
 	}
-	if (unlikely(mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
+	if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
 		dev_err(dev, "Too many fragments. current %d max %d\n",
 		dev_err(dev, "Too many fragments. current %d max %d\n",
 			mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
 			mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
 		return -ENOMEM;
 		return -ENOMEM;
@@ -853,8 +820,7 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
 	 * ccm header configurations
 	 * ccm header configurations
 	 */
 	 */
 	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
-		if (unlikely((mapped_nents + 1) >
-			LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
+		if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
 			dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
 			dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
 				(areq_ctx->assoc.nents + 1),
 				(areq_ctx->assoc.nents + 1),
 				LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
 				LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
@@ -863,227 +829,175 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
 		}
 		}
 	}
 	}
 
 
-	if (likely(mapped_nents == 1) &&
-	    (areq_ctx->ccm_hdr_size == ccm_header_size_null))
-		areq_ctx->assoc_buff_type = SSI_DMA_BUF_DLLI;
+	if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
+		areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
 	else
 	else
-		areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
+		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 
 
-	if (unlikely((do_chain) ||
-		     (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI))) {
+	if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
 		dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
 		dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
-			GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
+			cc_dma_buf_type(areq_ctx->assoc_buff_type),
 			areq_ctx->assoc.nents);
 			areq_ctx->assoc.nents);
-		ssi_buffer_mgr_add_scatterlist_entry(
-			dev, sg_data, areq_ctx->assoc.nents,
-			req->src, req->assoclen, 0, is_last,
-			&areq_ctx->assoc.mlli_nents);
-		areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
+		cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
+				req->assoclen, 0, is_last,
+				&areq_ctx->assoc.mlli_nents);
+		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 	}
 	}
 
 
 chain_assoc_exit:
 chain_assoc_exit:
 	return rc;
 	return rc;
 }
 }
 
 
-static inline void ssi_buffer_mgr_prepare_aead_data_dlli(
-	struct aead_request *req,
-	u32 *src_last_bytes, u32 *dst_last_bytes)
+static void cc_prepare_aead_data_dlli(struct aead_request *req,
+				      u32 *src_last_bytes, u32 *dst_last_bytes)
 {
 {
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 	unsigned int authsize = areq_ctx->req_authsize;
 	unsigned int authsize = areq_ctx->req_authsize;
 
 
 	areq_ctx->is_icv_fragmented = false;
 	areq_ctx->is_icv_fragmented = false;
-	if (likely(req->src == req->dst)) {
+	if (req->src == req->dst) {
 		/*INPLACE*/
 		/*INPLACE*/
-		areq_ctx->icv_dma_addr = sg_dma_address(
-			areq_ctx->src_sgl) +
+		areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
 			(*src_last_bytes - authsize);
 			(*src_last_bytes - authsize);
-		areq_ctx->icv_virt_addr = sg_virt(
-			areq_ctx->src_sgl) +
+		areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
 			(*src_last_bytes - authsize);
 			(*src_last_bytes - authsize);
 	} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 	} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 		/*NON-INPLACE and DECRYPT*/
 		/*NON-INPLACE and DECRYPT*/
-		areq_ctx->icv_dma_addr = sg_dma_address(
-			areq_ctx->src_sgl) +
+		areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
 			(*src_last_bytes - authsize);
 			(*src_last_bytes - authsize);
-		areq_ctx->icv_virt_addr = sg_virt(
-			areq_ctx->src_sgl) +
+		areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
 			(*src_last_bytes - authsize);
 			(*src_last_bytes - authsize);
 	} else {
 	} else {
 		/*NON-INPLACE and ENCRYPT*/
 		/*NON-INPLACE and ENCRYPT*/
-		areq_ctx->icv_dma_addr = sg_dma_address(
-			areq_ctx->dst_sgl) +
+		areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) +
 			(*dst_last_bytes - authsize);
 			(*dst_last_bytes - authsize);
-		areq_ctx->icv_virt_addr = sg_virt(
-			areq_ctx->dst_sgl) +
+		areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) +
 			(*dst_last_bytes - authsize);
 			(*dst_last_bytes - authsize);
 	}
 	}
 }
 }
 
 
-static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
-	struct ssi_drvdata *drvdata,
-	struct aead_request *req,
-	struct buffer_array *sg_data,
-	u32 *src_last_bytes, u32 *dst_last_bytes,
-	bool is_last_table)
+static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
+				     struct aead_request *req,
+				     struct buffer_array *sg_data,
+				     u32 *src_last_bytes, u32 *dst_last_bytes,
+				     bool is_last_table)
 {
 {
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 	unsigned int authsize = areq_ctx->req_authsize;
 	unsigned int authsize = areq_ctx->req_authsize;
 	int rc = 0, icv_nents;
 	int rc = 0, icv_nents;
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct device *dev = drvdata_to_dev(drvdata);
 	struct device *dev = drvdata_to_dev(drvdata);
+	struct scatterlist *sg;
 
 
-	if (likely(req->src == req->dst)) {
+	if (req->src == req->dst) {
 		/*INPLACE*/
 		/*INPLACE*/
-		ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
-						     areq_ctx->src.nents,
-						     areq_ctx->src_sgl,
-						     areq_ctx->cryptlen,
-						     areq_ctx->src_offset,
-						     is_last_table,
-						     &areq_ctx->src.mlli_nents);
-
-		icv_nents = ssi_buffer_mgr_get_aead_icv_nents(dev,
-							      areq_ctx->src_sgl,
-							      areq_ctx->src.nents,
-							      authsize,
-							      *src_last_bytes,
-							      &areq_ctx->is_icv_fragmented);
-		if (unlikely(icv_nents < 0)) {
+		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
+				areq_ctx->src_sgl, areq_ctx->cryptlen,
+				areq_ctx->src_offset, is_last_table,
+				&areq_ctx->src.mlli_nents);
+
+		icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
+						  areq_ctx->src.nents,
+						  authsize, *src_last_bytes,
+						  &areq_ctx->is_icv_fragmented);
+		if (icv_nents < 0) {
 			rc = -ENOTSUPP;
 			rc = -ENOTSUPP;
 			goto prepare_data_mlli_exit;
 			goto prepare_data_mlli_exit;
 		}
 		}
 
 
-		if (unlikely(areq_ctx->is_icv_fragmented)) {
+		if (areq_ctx->is_icv_fragmented) {
 			/* Backup happens only when ICV is fragmented, ICV
 			/* Backup happens only when ICV is fragmented, ICV
-			 * verification is made by CPU compare in order to simplify
-			 * MAC verification upon request completion
+			 * verification is made by CPU compare in order to
+			 * simplify MAC verification upon request completion
 			 */
 			 */
 			if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 			if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
-				if (!drvdata->coherent) {
 				/* In coherent platforms (e.g. ACP)
 				/* In coherent platforms (e.g. ACP)
 				 * already copying ICV for any
 				 * already copying ICV for any
 				 * INPLACE-DECRYPT operation, hence
 				 * INPLACE-DECRYPT operation, hence
 				 * we must neglect this code.
 				 * we must neglect this code.
 				 */
 				 */
-					u32 skip = req->assoclen;
-
-					if (areq_ctx->is_gcm4543)
-						skip += crypto_aead_ivsize(tfm);
-
-					ssi_buffer_mgr_copy_scatterlist_portion(
-						dev, areq_ctx->backup_mac,
-						req->src,
-						(skip + req->cryptlen -
-						 areq_ctx->req_authsize),
-						skip + req->cryptlen,
-						SSI_SG_TO_BUF);
-				}
+				if (!drvdata->coherent)
+					cc_copy_mac(dev, req, CC_SG_TO_BUF);
+
 				areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
 				areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
 			} else {
 			} else {
 				areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
 				areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
-				areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
+				areq_ctx->icv_dma_addr =
+					areq_ctx->mac_buf_dma_addr;
 			}
 			}
 		} else { /* Contig. ICV */
 		} else { /* Contig. ICV */
+			sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
 			/*Should hanlde if the sg is not contig.*/
 			/*Should hanlde if the sg is not contig.*/
-			areq_ctx->icv_dma_addr = sg_dma_address(
-				&areq_ctx->src_sgl[areq_ctx->src.nents - 1]) +
+			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 				(*src_last_bytes - authsize);
 				(*src_last_bytes - authsize);
-			areq_ctx->icv_virt_addr = sg_virt(
-				&areq_ctx->src_sgl[areq_ctx->src.nents - 1]) +
+			areq_ctx->icv_virt_addr = sg_virt(sg) +
 				(*src_last_bytes - authsize);
 				(*src_last_bytes - authsize);
 		}
 		}
 
 
 	} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 	} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 		/*NON-INPLACE and DECRYPT*/
 		/*NON-INPLACE and DECRYPT*/
-		ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
-						     areq_ctx->src.nents,
-						     areq_ctx->src_sgl,
-						     areq_ctx->cryptlen,
-						     areq_ctx->src_offset,
-						     is_last_table,
-						     &areq_ctx->src.mlli_nents);
-		ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
-						     areq_ctx->dst.nents,
-						     areq_ctx->dst_sgl,
-						     areq_ctx->cryptlen,
-						     areq_ctx->dst_offset,
-						     is_last_table,
-						     &areq_ctx->dst.mlli_nents);
-
-		icv_nents = ssi_buffer_mgr_get_aead_icv_nents(dev,
-							      areq_ctx->src_sgl,
-							      areq_ctx->src.nents,
-							      authsize,
-							      *src_last_bytes,
-							      &areq_ctx->is_icv_fragmented);
-		if (unlikely(icv_nents < 0)) {
+		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
+				areq_ctx->src_sgl, areq_ctx->cryptlen,
+				areq_ctx->src_offset, is_last_table,
+				&areq_ctx->src.mlli_nents);
+		cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
+				areq_ctx->dst_sgl, areq_ctx->cryptlen,
+				areq_ctx->dst_offset, is_last_table,
+				&areq_ctx->dst.mlli_nents);
+
+		icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
+						  areq_ctx->src.nents,
+						  authsize, *src_last_bytes,
+						  &areq_ctx->is_icv_fragmented);
+		if (icv_nents < 0) {
 			rc = -ENOTSUPP;
 			rc = -ENOTSUPP;
 			goto prepare_data_mlli_exit;
 			goto prepare_data_mlli_exit;
 		}
 		}
 
 
-		if (unlikely(areq_ctx->is_icv_fragmented)) {
-			/* Backup happens only when ICV is fragmented, ICV
-			 * verification is made by CPU compare in order to simplify
-			 * MAC verification upon request completion
-			 */
-			u32 size_to_skip = req->assoclen;
-
-			if (areq_ctx->is_gcm4543)
-				size_to_skip += crypto_aead_ivsize(tfm);
-
-			ssi_buffer_mgr_copy_scatterlist_portion(
-				dev, areq_ctx->backup_mac, req->src,
-				size_to_skip + req->cryptlen - areq_ctx->req_authsize,
-				size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
+		/* Backup happens only when ICV is fragmented, ICV
+		 * verification is made by CPU compare in order to simplify
+		 * MAC verification upon request completion
+		 */
+		if (areq_ctx->is_icv_fragmented) {
+			cc_copy_mac(dev, req, CC_SG_TO_BUF);
 			areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
 			areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
+
 		} else { /* Contig. ICV */
 		} else { /* Contig. ICV */
+			sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
 			/*Should hanlde if the sg is not contig.*/
 			/*Should hanlde if the sg is not contig.*/
-			areq_ctx->icv_dma_addr = sg_dma_address(
-				&areq_ctx->src_sgl[areq_ctx->src.nents - 1]) +
+			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 				(*src_last_bytes - authsize);
 				(*src_last_bytes - authsize);
-			areq_ctx->icv_virt_addr = sg_virt(
-				&areq_ctx->src_sgl[areq_ctx->src.nents - 1]) +
+			areq_ctx->icv_virt_addr = sg_virt(sg) +
 				(*src_last_bytes - authsize);
 				(*src_last_bytes - authsize);
 		}
 		}
 
 
 	} else {
 	} else {
 		/*NON-INPLACE and ENCRYPT*/
 		/*NON-INPLACE and ENCRYPT*/
-		ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
-						     areq_ctx->dst.nents,
-						     areq_ctx->dst_sgl,
-						     areq_ctx->cryptlen,
-						     areq_ctx->dst_offset,
-						     is_last_table,
-						     &areq_ctx->dst.mlli_nents);
-		ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
-						     areq_ctx->src.nents,
-						     areq_ctx->src_sgl,
-						     areq_ctx->cryptlen,
-						     areq_ctx->src_offset,
-						     is_last_table,
-						     &areq_ctx->src.mlli_nents);
-
-		icv_nents = ssi_buffer_mgr_get_aead_icv_nents(dev,
-							      areq_ctx->dst_sgl,
-							      areq_ctx->dst.nents,
-							      authsize,
-							      *dst_last_bytes,
-			&areq_ctx->is_icv_fragmented);
-		if (unlikely(icv_nents < 0)) {
+		cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
+				areq_ctx->dst_sgl, areq_ctx->cryptlen,
+				areq_ctx->dst_offset, is_last_table,
+				&areq_ctx->dst.mlli_nents);
+		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
+				areq_ctx->src_sgl, areq_ctx->cryptlen,
+				areq_ctx->src_offset, is_last_table,
+				&areq_ctx->src.mlli_nents);
+
+		icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl,
+						  areq_ctx->dst.nents,
+						  authsize, *dst_last_bytes,
+						  &areq_ctx->is_icv_fragmented);
+		if (icv_nents < 0) {
 			rc = -ENOTSUPP;
 			rc = -ENOTSUPP;
 			goto prepare_data_mlli_exit;
 			goto prepare_data_mlli_exit;
 		}
 		}
 
 
-		if (likely(!areq_ctx->is_icv_fragmented)) {
+		if (!areq_ctx->is_icv_fragmented) {
+			sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
 			/* Contig. ICV */
 			/* Contig. ICV */
-			areq_ctx->icv_dma_addr = sg_dma_address(
-				&areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]) +
+			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 				(*dst_last_bytes - authsize);
 				(*dst_last_bytes - authsize);
-			areq_ctx->icv_virt_addr = sg_virt(
-				&areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]) +
+			areq_ctx->icv_virt_addr = sg_virt(sg) +
 				(*dst_last_bytes - authsize);
 				(*dst_last_bytes - authsize);
 		} else {
 		} else {
 			areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
 			areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
@@ -1095,11 +1009,10 @@ prepare_data_mlli_exit:
 	return rc;
 	return rc;
 }
 }
 
 
-static inline int ssi_buffer_mgr_aead_chain_data(
-	struct ssi_drvdata *drvdata,
-	struct aead_request *req,
-	struct buffer_array *sg_data,
-	bool is_last_table, bool do_chain)
+static int cc_aead_chain_data(struct cc_drvdata *drvdata,
+			      struct aead_request *req,
+			      struct buffer_array *sg_data,
+			      bool is_last_table, bool do_chain)
 {
 {
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct device *dev = drvdata_to_dev(drvdata);
 	struct device *dev = drvdata_to_dev(drvdata);
@@ -1109,7 +1022,8 @@ static inline int ssi_buffer_mgr_aead_chain_data(
 	int rc = 0;
 	int rc = 0;
 	u32 src_mapped_nents = 0, dst_mapped_nents = 0;
 	u32 src_mapped_nents = 0, dst_mapped_nents = 0;
 	u32 offset = 0;
 	u32 offset = 0;
-	unsigned int size_for_map = req->assoclen + req->cryptlen; /*non-inplace mode*/
+	/* non-inplace mode */
+	unsigned int size_for_map = req->assoclen + req->cryptlen;
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	u32 sg_index = 0;
 	u32 sg_index = 0;
 	bool chained = false;
 	bool chained = false;
@@ -1130,11 +1044,10 @@ static inline int ssi_buffer_mgr_aead_chain_data(
 	if (is_gcm4543)
 	if (is_gcm4543)
 		size_for_map += crypto_aead_ivsize(tfm);
 		size_for_map += crypto_aead_ivsize(tfm);
 
 
-	size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
-	src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(dev, req->src,
-							size_for_map,
-							&src_last_bytes,
-							&chained);
+	size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+			authsize : 0;
+	src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
+					    &src_last_bytes, &chained);
 	sg_index = areq_ctx->src_sgl->length;
 	sg_index = areq_ctx->src_sgl->length;
 	//check where the data starts
 	//check where the data starts
 	while (sg_index <= size_to_skip) {
 	while (sg_index <= size_to_skip) {
@@ -1148,7 +1061,7 @@ static inline int ssi_buffer_mgr_aead_chain_data(
 		sg_index += areq_ctx->src_sgl->length;
 		sg_index += areq_ctx->src_sgl->length;
 		src_mapped_nents--;
 		src_mapped_nents--;
 	}
 	}
-	if (unlikely(src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES)) {
+	if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
 		dev_err(dev, "Too many fragments. current %d max %d\n",
 		dev_err(dev, "Too many fragments. current %d max %d\n",
 			src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
 			src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
 			return -ENOMEM;
 			return -ENOMEM;
@@ -1160,26 +1073,23 @@ static inline int ssi_buffer_mgr_aead_chain_data(
 
 
 	if (req->src != req->dst) {
 	if (req->src != req->dst) {
 		size_for_map = req->assoclen + req->cryptlen;
 		size_for_map = req->assoclen + req->cryptlen;
-		size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
+		size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+				authsize : 0;
 		if (is_gcm4543)
 		if (is_gcm4543)
 			size_for_map += crypto_aead_ivsize(tfm);
 			size_for_map += crypto_aead_ivsize(tfm);
 
 
-		rc = ssi_buffer_mgr_map_scatterlist(dev, req->dst, size_for_map,
-						    DMA_BIDIRECTIONAL,
-						    &areq_ctx->dst.nents,
-						    LLI_MAX_NUM_OF_DATA_ENTRIES,
-						    &dst_last_bytes,
-						    &dst_mapped_nents);
-		if (unlikely(rc != 0)) {
+		rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
+			       &areq_ctx->dst.nents,
+			       LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
+			       &dst_mapped_nents);
+		if (rc) {
 			rc = -ENOMEM;
 			rc = -ENOMEM;
 			goto chain_data_exit;
 			goto chain_data_exit;
 		}
 		}
 	}
 	}
 
 
-	dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(dev, req->dst,
-							size_for_map,
-							&dst_last_bytes,
-							&chained);
+	dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
+					    &dst_last_bytes, &chained);
 	sg_index = areq_ctx->dst_sgl->length;
 	sg_index = areq_ctx->dst_sgl->length;
 	offset = size_to_skip;
 	offset = size_to_skip;
 
 
@@ -1195,45 +1105,43 @@ static inline int ssi_buffer_mgr_aead_chain_data(
 		sg_index += areq_ctx->dst_sgl->length;
 		sg_index += areq_ctx->dst_sgl->length;
 		dst_mapped_nents--;
 		dst_mapped_nents--;
 	}
 	}
-	if (unlikely(dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES)) {
+	if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
 		dev_err(dev, "Too many fragments. current %d max %d\n",
 		dev_err(dev, "Too many fragments. current %d max %d\n",
 			dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
 			dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
 	areq_ctx->dst.nents = dst_mapped_nents;
 	areq_ctx->dst.nents = dst_mapped_nents;
 	areq_ctx->dst_offset = offset;
 	areq_ctx->dst_offset = offset;
-	if ((src_mapped_nents > 1) ||
-	    (dst_mapped_nents  > 1) ||
+	if (src_mapped_nents > 1 ||
+	    dst_mapped_nents  > 1 ||
 	    do_chain) {
 	    do_chain) {
-		areq_ctx->data_buff_type = SSI_DMA_BUF_MLLI;
-		rc = ssi_buffer_mgr_prepare_aead_data_mlli(drvdata, req,
-							   sg_data,
-							   &src_last_bytes,
-							   &dst_last_bytes,
-							   is_last_table);
+		areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
+		rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data,
+					       &src_last_bytes,
+					       &dst_last_bytes, is_last_table);
 	} else {
 	} else {
-		areq_ctx->data_buff_type = SSI_DMA_BUF_DLLI;
-		ssi_buffer_mgr_prepare_aead_data_dlli(
-				req, &src_last_bytes, &dst_last_bytes);
+		areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
+		cc_prepare_aead_data_dlli(req, &src_last_bytes,
+					  &dst_last_bytes);
 	}
 	}
 
 
 chain_data_exit:
 chain_data_exit:
 	return rc;
 	return rc;
 }
 }
 
 
-static void ssi_buffer_mgr_update_aead_mlli_nents(struct ssi_drvdata *drvdata,
-						  struct aead_request *req)
+static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
+				      struct aead_request *req)
 {
 {
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	u32 curr_mlli_size = 0;
 	u32 curr_mlli_size = 0;
 
 
-	if (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) {
+	if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
 		areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
 		areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
 		curr_mlli_size = areq_ctx->assoc.mlli_nents *
 		curr_mlli_size = areq_ctx->assoc.mlli_nents *
 						LLI_ENTRY_BYTE_SIZE;
 						LLI_ENTRY_BYTE_SIZE;
 	}
 	}
 
 
-	if (areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI) {
+	if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
 		/*Inplace case dst nents equal to src nents*/
 		/*Inplace case dst nents equal to src nents*/
 		if (req->src == req->dst) {
 		if (req->src == req->dst) {
 			areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
 			areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
@@ -1272,8 +1180,7 @@ static void ssi_buffer_mgr_update_aead_mlli_nents(struct ssi_drvdata *drvdata,
 	}
 	}
 }
 }
 
 
-int ssi_buffer_mgr_map_aead_request(
-	struct ssi_drvdata *drvdata, struct aead_request *req)
+int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
 {
 {
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
 	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
@@ -1284,30 +1191,22 @@ int ssi_buffer_mgr_map_aead_request(
 	int rc = 0;
 	int rc = 0;
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	bool is_gcm4543 = areq_ctx->is_gcm4543;
 	bool is_gcm4543 = areq_ctx->is_gcm4543;
-
+	dma_addr_t dma_addr;
 	u32 mapped_nents = 0;
 	u32 mapped_nents = 0;
 	u32 dummy = 0; /*used for the assoc data fragments */
 	u32 dummy = 0; /*used for the assoc data fragments */
 	u32 size_to_map = 0;
 	u32 size_to_map = 0;
+	gfp_t flags = cc_gfp_flags(&req->base);
 
 
 	mlli_params->curr_pool = NULL;
 	mlli_params->curr_pool = NULL;
 	sg_data.num_of_buffers = 0;
 	sg_data.num_of_buffers = 0;
 
 
+	/* copy mac to a temporary location to deal with possible
+	 * data memory overriding that caused by cache coherence problem.
+	 */
 	if (drvdata->coherent &&
 	if (drvdata->coherent &&
-	    (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
-	    likely(req->src == req->dst)) {
-		u32 size_to_skip = req->assoclen;
-
-		if (is_gcm4543)
-			size_to_skip += crypto_aead_ivsize(tfm);
-
-		/* copy mac to a temporary location to deal with possible
-		 * data memory overriding that caused by cache coherence problem.
-		 */
-		ssi_buffer_mgr_copy_scatterlist_portion(
-			dev, areq_ctx->backup_mac, req->src,
-			size_to_skip + req->cryptlen - areq_ctx->req_authsize,
-			size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
-	}
+	    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
+	    req->src == req->dst)
+		cc_copy_mac(dev, req, CC_SG_TO_BUF);
 
 
 	/* cacluate the size for cipher remove ICV in decrypt*/
 	/* cacluate the size for cipher remove ICV in decrypt*/
 	areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
 	areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
@@ -1315,90 +1214,83 @@ int ssi_buffer_mgr_map_aead_request(
 				req->cryptlen :
 				req->cryptlen :
 				(req->cryptlen - authsize);
 				(req->cryptlen - authsize);
 
 
-	areq_ctx->mac_buf_dma_addr = dma_map_single(dev, areq_ctx->mac_buf,
-						    MAX_MAC_SIZE,
-						    DMA_BIDIRECTIONAL);
-	if (unlikely(dma_mapping_error(dev, areq_ctx->mac_buf_dma_addr))) {
+	dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
+				  DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, dma_addr)) {
 		dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
 		dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
 			MAX_MAC_SIZE, areq_ctx->mac_buf);
 			MAX_MAC_SIZE, areq_ctx->mac_buf);
 		rc = -ENOMEM;
 		rc = -ENOMEM;
 		goto aead_map_failure;
 		goto aead_map_failure;
 	}
 	}
+	areq_ctx->mac_buf_dma_addr = dma_addr;
 
 
 	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
-		areq_ctx->ccm_iv0_dma_addr = dma_map_single(dev,
-							    (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET),
-							    AES_BLOCK_SIZE,
-							    DMA_TO_DEVICE);
+		void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
 
 
-		if (unlikely(dma_mapping_error(dev, areq_ctx->ccm_iv0_dma_addr))) {
+		dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
+					  DMA_TO_DEVICE);
+
+		if (dma_mapping_error(dev, dma_addr)) {
 			dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
 			dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
-				AES_BLOCK_SIZE,
-				(areq_ctx->ccm_config +
-				 CCM_CTR_COUNT_0_OFFSET));
+				AES_BLOCK_SIZE, addr);
 			areq_ctx->ccm_iv0_dma_addr = 0;
 			areq_ctx->ccm_iv0_dma_addr = 0;
 			rc = -ENOMEM;
 			rc = -ENOMEM;
 			goto aead_map_failure;
 			goto aead_map_failure;
 		}
 		}
-		if (ssi_aead_handle_config_buf(dev, areq_ctx,
-					       areq_ctx->ccm_config, &sg_data,
-					       req->assoclen) != 0) {
+		areq_ctx->ccm_iv0_dma_addr = dma_addr;
+
+		if (cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
+					 &sg_data, req->assoclen)) {
 			rc = -ENOMEM;
 			rc = -ENOMEM;
 			goto aead_map_failure;
 			goto aead_map_failure;
 		}
 		}
 	}
 	}
 
 
-#if SSI_CC_HAS_AES_GCM
 	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
 	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
-		areq_ctx->hkey_dma_addr = dma_map_single(dev,
-							 areq_ctx->hkey,
-							 AES_BLOCK_SIZE,
-							 DMA_BIDIRECTIONAL);
-		if (unlikely(dma_mapping_error(dev, areq_ctx->hkey_dma_addr))) {
+		dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
+					  DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(dev, dma_addr)) {
 			dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
 			dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
 				AES_BLOCK_SIZE, areq_ctx->hkey);
 				AES_BLOCK_SIZE, areq_ctx->hkey);
 			rc = -ENOMEM;
 			rc = -ENOMEM;
 			goto aead_map_failure;
 			goto aead_map_failure;
 		}
 		}
+		areq_ctx->hkey_dma_addr = dma_addr;
 
 
-		areq_ctx->gcm_block_len_dma_addr = dma_map_single(dev,
-								  &areq_ctx->gcm_len_block,
-								  AES_BLOCK_SIZE,
-								  DMA_TO_DEVICE);
-		if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_block_len_dma_addr))) {
+		dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
+					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, dma_addr)) {
 			dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
 			dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
 				AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
 				AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
 			rc = -ENOMEM;
 			rc = -ENOMEM;
 			goto aead_map_failure;
 			goto aead_map_failure;
 		}
 		}
+		areq_ctx->gcm_block_len_dma_addr = dma_addr;
 
 
-		areq_ctx->gcm_iv_inc1_dma_addr = dma_map_single(dev,
-								areq_ctx->gcm_iv_inc1,
-								AES_BLOCK_SIZE,
-								DMA_TO_DEVICE);
+		dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
+					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
 
 
-		if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc1_dma_addr))) {
+		if (dma_mapping_error(dev, dma_addr)) {
 			dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
 			dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
 				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
 				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
 			areq_ctx->gcm_iv_inc1_dma_addr = 0;
 			areq_ctx->gcm_iv_inc1_dma_addr = 0;
 			rc = -ENOMEM;
 			rc = -ENOMEM;
 			goto aead_map_failure;
 			goto aead_map_failure;
 		}
 		}
+		areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
 
 
-		areq_ctx->gcm_iv_inc2_dma_addr = dma_map_single(dev,
-								areq_ctx->gcm_iv_inc2,
-								AES_BLOCK_SIZE,
-								DMA_TO_DEVICE);
+		dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
+					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
 
 
-		if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc2_dma_addr))) {
+		if (dma_mapping_error(dev, dma_addr)) {
 			dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
 			dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
 				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
 				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
 			areq_ctx->gcm_iv_inc2_dma_addr = 0;
 			areq_ctx->gcm_iv_inc2_dma_addr = 0;
 			rc = -ENOMEM;
 			rc = -ENOMEM;
 			goto aead_map_failure;
 			goto aead_map_failure;
 		}
 		}
+		areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
 	}
 	}
-#endif /*SSI_CC_HAS_AES_GCM*/
 
 
 	size_to_map = req->cryptlen + req->assoclen;
 	size_to_map = req->cryptlen + req->assoclen;
 	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
 	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
@@ -1406,29 +1298,31 @@ int ssi_buffer_mgr_map_aead_request(
 
 
 	if (is_gcm4543)
 	if (is_gcm4543)
 		size_to_map += crypto_aead_ivsize(tfm);
 		size_to_map += crypto_aead_ivsize(tfm);
-	rc = ssi_buffer_mgr_map_scatterlist(dev, req->src,
-					    size_to_map, DMA_BIDIRECTIONAL, &areq_ctx->src.nents,
-					    LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
-	if (unlikely(rc != 0)) {
+	rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
+		       &areq_ctx->src.nents,
+		       (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
+			LLI_MAX_NUM_OF_DATA_ENTRIES),
+		       &dummy, &mapped_nents);
+	if (rc) {
 		rc = -ENOMEM;
 		rc = -ENOMEM;
 		goto aead_map_failure;
 		goto aead_map_failure;
 	}
 	}
 
 
-	if (likely(areq_ctx->is_single_pass)) {
+	if (areq_ctx->is_single_pass) {
 		/*
 		/*
 		 * Create MLLI table for:
 		 * Create MLLI table for:
 		 *   (1) Assoc. data
 		 *   (1) Assoc. data
 		 *   (2) Src/Dst SGLs
 		 *   (2) Src/Dst SGLs
 		 *   Note: IV is contg. buffer (not an SGL)
 		 *   Note: IV is contg. buffer (not an SGL)
 		 */
 		 */
-		rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, true, false);
-		if (unlikely(rc != 0))
+		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
+		if (rc)
 			goto aead_map_failure;
 			goto aead_map_failure;
-		rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, true, false);
-		if (unlikely(rc != 0))
+		rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
+		if (rc)
 			goto aead_map_failure;
 			goto aead_map_failure;
-		rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, false);
-		if (unlikely(rc != 0))
+		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
+		if (rc)
 			goto aead_map_failure;
 			goto aead_map_failure;
 	} else { /* DOUBLE-PASS flow */
 	} else { /* DOUBLE-PASS flow */
 		/*
 		/*
@@ -1451,27 +1345,28 @@ int ssi_buffer_mgr_map_aead_request(
 		 *   (3) MLLI for src
 		 *   (3) MLLI for src
 		 *   (4) MLLI for dst
 		 *   (4) MLLI for dst
 		 */
 		 */
-		rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, false, true);
-		if (unlikely(rc != 0))
+		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
+		if (rc)
 			goto aead_map_failure;
 			goto aead_map_failure;
-		rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, false, true);
-		if (unlikely(rc != 0))
+		rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
+		if (rc)
 			goto aead_map_failure;
 			goto aead_map_failure;
-		rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, true);
-		if (unlikely(rc != 0))
+		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
+		if (rc)
 			goto aead_map_failure;
 			goto aead_map_failure;
 	}
 	}
 
 
-	/* Mlli support -start building the MLLI according to the above results */
-	if (unlikely(
-		(areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
-		(areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI))) {
+	/* Mlli support -start building the MLLI according to the above
+	 * results
+	 */
+	if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+	    areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
 		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
 		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
-		rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
-		if (unlikely(rc != 0))
+		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+		if (rc)
 			goto aead_map_failure;
 			goto aead_map_failure;
 
 
-		ssi_buffer_mgr_update_aead_mlli_nents(drvdata, req);
+		cc_update_aead_mlli_nents(drvdata, req);
 		dev_dbg(dev, "assoc params mn %d\n",
 		dev_dbg(dev, "assoc params mn %d\n",
 			areq_ctx->assoc.mlli_nents);
 			areq_ctx->assoc.mlli_nents);
 		dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
 		dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
@@ -1480,19 +1375,18 @@ int ssi_buffer_mgr_map_aead_request(
 	return 0;
 	return 0;
 
 
 aead_map_failure:
 aead_map_failure:
-	ssi_buffer_mgr_unmap_aead_request(dev, req);
+	cc_unmap_aead_request(dev, req);
 	return rc;
 	return rc;
 }
 }
 
 
-int ssi_buffer_mgr_map_hash_request_final(
-	struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update)
+int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
+			      struct scatterlist *src, unsigned int nbytes,
+			      bool do_update, gfp_t flags)
 {
 {
 	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
 	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
 	struct device *dev = drvdata_to_dev(drvdata);
 	struct device *dev = drvdata_to_dev(drvdata);
-	u8 *curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
-			areq_ctx->buff0;
-	u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
-			&areq_ctx->buff0_cnt;
+	u8 *curr_buff = cc_hash_buf(areq_ctx);
+	u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
 	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
 	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
 	struct buffer_array sg_data;
 	struct buffer_array sg_data;
 	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
 	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
@@ -1502,88 +1396,78 @@ int ssi_buffer_mgr_map_hash_request_final(
 	dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
 	dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
 		curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
 		curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
 	/* Init the type of the dma buffer */
 	/* Init the type of the dma buffer */
-	areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
+	areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
 	mlli_params->curr_pool = NULL;
 	mlli_params->curr_pool = NULL;
 	sg_data.num_of_buffers = 0;
 	sg_data.num_of_buffers = 0;
 	areq_ctx->in_nents = 0;
 	areq_ctx->in_nents = 0;
 
 
-	if (unlikely(nbytes == 0 && *curr_buff_cnt == 0)) {
+	if (nbytes == 0 && *curr_buff_cnt == 0) {
 		/* nothing to do */
 		/* nothing to do */
 		return 0;
 		return 0;
 	}
 	}
 
 
 	/*TODO: copy data in case that buffer is enough for operation */
 	/*TODO: copy data in case that buffer is enough for operation */
 	/* map the previous buffer */
 	/* map the previous buffer */
-	if (*curr_buff_cnt != 0) {
-		if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
-					      *curr_buff_cnt, &sg_data) != 0) {
+	if (*curr_buff_cnt) {
+		if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+				    &sg_data)) {
 			return -ENOMEM;
 			return -ENOMEM;
 		}
 		}
 	}
 	}
 
 
-	if (src && (nbytes > 0) && do_update) {
-		if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src, nbytes,
-							    DMA_TO_DEVICE,
-							    &areq_ctx->in_nents,
-							    LLI_MAX_NUM_OF_DATA_ENTRIES,
-							    &dummy,
-							    &mapped_nents))){
+	if (src && nbytes > 0 && do_update) {
+		if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
+			      &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+			      &dummy, &mapped_nents)) {
 			goto unmap_curr_buff;
 			goto unmap_curr_buff;
 		}
 		}
-		if (src && (mapped_nents == 1)
-		     && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL)) {
+		if (src && mapped_nents == 1 &&
+		    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
 			memcpy(areq_ctx->buff_sg, src,
 			memcpy(areq_ctx->buff_sg, src,
 			       sizeof(struct scatterlist));
 			       sizeof(struct scatterlist));
 			areq_ctx->buff_sg->length = nbytes;
 			areq_ctx->buff_sg->length = nbytes;
 			areq_ctx->curr_sg = areq_ctx->buff_sg;
 			areq_ctx->curr_sg = areq_ctx->buff_sg;
-			areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
+			areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
 		} else {
 		} else {
-			areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
+			areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
 		}
 		}
 	}
 	}
 
 
 	/*build mlli */
 	/*build mlli */
-	if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
+	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
 		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
 		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
 		/* add the src data to the sg_data */
 		/* add the src data to the sg_data */
-		ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
-						     areq_ctx->in_nents,
-						     src, nbytes, 0, true,
-						     &areq_ctx->mlli_nents);
-		if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
-							  mlli_params) != 0)) {
+		cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
+				0, true, &areq_ctx->mlli_nents);
+		if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
 			goto fail_unmap_din;
 			goto fail_unmap_din;
-		}
 	}
 	}
 	/* change the buffer index for the unmap function */
 	/* change the buffer index for the unmap function */
 	areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
 	areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
 	dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
 	dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
-		GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type));
+		cc_dma_buf_type(areq_ctx->data_dma_buf_type));
 	return 0;
 	return 0;
 
 
 fail_unmap_din:
 fail_unmap_din:
 	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
 	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
 
 
 unmap_curr_buff:
 unmap_curr_buff:
-	if (*curr_buff_cnt != 0)
+	if (*curr_buff_cnt)
 		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
 		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
 
 
 	return -ENOMEM;
 	return -ENOMEM;
 }
 }
 
 
-int ssi_buffer_mgr_map_hash_request_update(
-	struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size)
+int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
+			       struct scatterlist *src, unsigned int nbytes,
+			       unsigned int block_size, gfp_t flags)
 {
 {
 	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
 	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
 	struct device *dev = drvdata_to_dev(drvdata);
 	struct device *dev = drvdata_to_dev(drvdata);
-	u8 *curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
-			areq_ctx->buff0;
-	u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
-			&areq_ctx->buff0_cnt;
-	u8 *next_buff = areq_ctx->buff_index ? areq_ctx->buff0 :
-			areq_ctx->buff1;
-	u32 *next_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
-			&areq_ctx->buff1_cnt;
+	u8 *curr_buff = cc_hash_buf(areq_ctx);
+	u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
+	u8 *next_buff = cc_next_buf(areq_ctx);
+	u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
 	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
 	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
 	unsigned int update_data_len;
 	unsigned int update_data_len;
 	u32 total_in_len = nbytes + *curr_buff_cnt;
 	u32 total_in_len = nbytes + *curr_buff_cnt;
@@ -1596,18 +1480,17 @@ int ssi_buffer_mgr_map_hash_request_update(
 	dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
 	dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
 		curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
 		curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
 	/* Init the type of the dma buffer */
 	/* Init the type of the dma buffer */
-	areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
+	areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
 	mlli_params->curr_pool = NULL;
 	mlli_params->curr_pool = NULL;
 	areq_ctx->curr_sg = NULL;
 	areq_ctx->curr_sg = NULL;
 	sg_data.num_of_buffers = 0;
 	sg_data.num_of_buffers = 0;
 	areq_ctx->in_nents = 0;
 	areq_ctx->in_nents = 0;
 
 
-	if (unlikely(total_in_len < block_size)) {
+	if (total_in_len < block_size) {
 		dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
 		dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
 			curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
 			curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
 		areq_ctx->in_nents =
 		areq_ctx->in_nents =
-			ssi_buffer_mgr_get_sgl_nents(dev, src, nbytes, &dummy,
-						     NULL);
+			cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
 		sg_copy_to_buffer(src, areq_ctx->in_nents,
 		sg_copy_to_buffer(src, areq_ctx->in_nents,
 				  &curr_buff[*curr_buff_cnt], nbytes);
 				  &curr_buff[*curr_buff_cnt], nbytes);
 		*curr_buff_cnt += nbytes;
 		*curr_buff_cnt += nbytes;
@@ -1623,20 +1506,20 @@ int ssi_buffer_mgr_map_hash_request_update(
 		*next_buff_cnt, update_data_len);
 		*next_buff_cnt, update_data_len);
 
 
 	/* Copy the new residue to next buffer */
 	/* Copy the new residue to next buffer */
-	if (*next_buff_cnt != 0) {
+	if (*next_buff_cnt) {
 		dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
 		dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
 			next_buff, (update_data_len - *curr_buff_cnt),
 			next_buff, (update_data_len - *curr_buff_cnt),
 			*next_buff_cnt);
 			*next_buff_cnt);
-		ssi_buffer_mgr_copy_scatterlist_portion(dev, next_buff, src,
-							(update_data_len - *curr_buff_cnt),
-							nbytes, SSI_SG_TO_BUF);
+		cc_copy_sg_portion(dev, next_buff, src,
+				   (update_data_len - *curr_buff_cnt),
+				   nbytes, CC_SG_TO_BUF);
 		/* change the buffer index for next operation */
 		/* change the buffer index for next operation */
 		swap_index = 1;
 		swap_index = 1;
 	}
 	}
 
 
-	if (*curr_buff_cnt != 0) {
-		if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
-					      *curr_buff_cnt, &sg_data) != 0) {
+	if (*curr_buff_cnt) {
+		if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+				    &sg_data)) {
 			return -ENOMEM;
 			return -ENOMEM;
 		}
 		}
 		/* change the buffer index for next operation */
 		/* change the buffer index for next operation */
@@ -1644,42 +1527,33 @@ int ssi_buffer_mgr_map_hash_request_update(
 	}
 	}
 
 
 	if (update_data_len > *curr_buff_cnt) {
 	if (update_data_len > *curr_buff_cnt) {
-		if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src,
-							    (update_data_len - *curr_buff_cnt),
-							    DMA_TO_DEVICE,
-							    &areq_ctx->in_nents,
-							    LLI_MAX_NUM_OF_DATA_ENTRIES,
-							    &dummy,
-							    &mapped_nents))){
+		if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
+			      DMA_TO_DEVICE, &areq_ctx->in_nents,
+			      LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
+			      &mapped_nents)) {
 			goto unmap_curr_buff;
 			goto unmap_curr_buff;
 		}
 		}
-		if ((mapped_nents == 1)
-		     && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL)) {
+		if (mapped_nents == 1 &&
+		    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
 			/* only one entry in the SG and no previous data */
 			/* only one entry in the SG and no previous data */
 			memcpy(areq_ctx->buff_sg, src,
 			memcpy(areq_ctx->buff_sg, src,
 			       sizeof(struct scatterlist));
 			       sizeof(struct scatterlist));
 			areq_ctx->buff_sg->length = update_data_len;
 			areq_ctx->buff_sg->length = update_data_len;
-			areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
+			areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
 			areq_ctx->curr_sg = areq_ctx->buff_sg;
 			areq_ctx->curr_sg = areq_ctx->buff_sg;
 		} else {
 		} else {
-			areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
+			areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
 		}
 		}
 	}
 	}
 
 
-	if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
+	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
 		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
 		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
 		/* add the src data to the sg_data */
 		/* add the src data to the sg_data */
-		ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
-						     areq_ctx->in_nents,
-						     src,
-						     (update_data_len - *curr_buff_cnt),
-						     0,
-						     true,
-						     &areq_ctx->mlli_nents);
-		if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
-							  mlli_params) != 0)) {
+		cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
+				(update_data_len - *curr_buff_cnt), 0, true,
+				&areq_ctx->mlli_nents);
+		if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
 			goto fail_unmap_din;
 			goto fail_unmap_din;
-		}
 	}
 	}
 	areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
 	areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
 
 
@@ -1689,18 +1563,17 @@ fail_unmap_din:
 	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
 	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
 
 
 unmap_curr_buff:
 unmap_curr_buff:
-	if (*curr_buff_cnt != 0)
+	if (*curr_buff_cnt)
 		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
 		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
 
 
 	return -ENOMEM;
 	return -ENOMEM;
 }
 }
 
 
-void ssi_buffer_mgr_unmap_hash_request(
-	struct device *dev, void *ctx, struct scatterlist *src, bool do_revert)
+void cc_unmap_hash_request(struct device *dev, void *ctx,
+			   struct scatterlist *src, bool do_revert)
 {
 {
 	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
 	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
-	u32 *prev_len = areq_ctx->buff_index ?  &areq_ctx->buff0_cnt :
-						&areq_ctx->buff1_cnt;
+	u32 *prev_len = cc_next_buf_cnt(areq_ctx);
 
 
 	/*In case a pool was set, a table was
 	/*In case a pool was set, a table was
 	 *allocated and should be released
 	 *allocated and should be released
@@ -1714,21 +1587,23 @@ void ssi_buffer_mgr_unmap_hash_request(
 			      areq_ctx->mlli_params.mlli_dma_addr);
 			      areq_ctx->mlli_params.mlli_dma_addr);
 	}
 	}
 
 
-	if ((src) && likely(areq_ctx->in_nents != 0)) {
+	if (src && areq_ctx->in_nents) {
 		dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
 		dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
 			sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
 			sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
 		dma_unmap_sg(dev, src,
 		dma_unmap_sg(dev, src,
 			     areq_ctx->in_nents, DMA_TO_DEVICE);
 			     areq_ctx->in_nents, DMA_TO_DEVICE);
 	}
 	}
 
 
-	if (*prev_len != 0) {
+	if (*prev_len) {
 		dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
 		dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
 			sg_virt(areq_ctx->buff_sg),
 			sg_virt(areq_ctx->buff_sg),
 			&sg_dma_address(areq_ctx->buff_sg),
 			&sg_dma_address(areq_ctx->buff_sg),
 			sg_dma_len(areq_ctx->buff_sg));
 			sg_dma_len(areq_ctx->buff_sg));
 		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
 		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
 		if (!do_revert) {
 		if (!do_revert) {
-			/* clean the previous data length for update operation */
+			/* clean the previous data length for update
+			 * operation
+			 */
 			*prev_len = 0;
 			*prev_len = 0;
 		} else {
 		} else {
 			areq_ctx->buff_index ^= 1;
 			areq_ctx->buff_index ^= 1;
@@ -1736,7 +1611,7 @@ void ssi_buffer_mgr_unmap_hash_request(
 	}
 	}
 }
 }
 
 
-int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
+int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
 {
 {
 	struct buff_mgr_handle *buff_mgr_handle;
 	struct buff_mgr_handle *buff_mgr_handle;
 	struct device *dev = drvdata_to_dev(drvdata);
 	struct device *dev = drvdata_to_dev(drvdata);
@@ -1747,23 +1622,23 @@ int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
 
 
 	drvdata->buff_mgr_handle = buff_mgr_handle;
 	drvdata->buff_mgr_handle = buff_mgr_handle;
 
 
-	buff_mgr_handle->mlli_buffs_pool = dma_pool_create(
-				"dx_single_mlli_tables", dev,
+	buff_mgr_handle->mlli_buffs_pool =
+		dma_pool_create("dx_single_mlli_tables", dev,
 				MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
 				MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
 				LLI_ENTRY_BYTE_SIZE,
 				LLI_ENTRY_BYTE_SIZE,
 				MLLI_TABLE_MIN_ALIGNMENT, 0);
 				MLLI_TABLE_MIN_ALIGNMENT, 0);
 
 
-	if (unlikely(!buff_mgr_handle->mlli_buffs_pool))
+	if (!buff_mgr_handle->mlli_buffs_pool)
 		goto error;
 		goto error;
 
 
 	return 0;
 	return 0;
 
 
 error:
 error:
-	ssi_buffer_mgr_fini(drvdata);
+	cc_buffer_mgr_fini(drvdata);
 	return -ENOMEM;
 	return -ENOMEM;
 }
 }
 
 
-int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata)
+int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
 {
 {
 	struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
 	struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
 
 

+ 74 - 0
drivers/staging/ccree/cc_buffer_mgr.h

@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_buffer_mgr.h
+ * Buffer Manager
+ */
+
+#ifndef __CC_BUFFER_MGR_H__
+#define __CC_BUFFER_MGR_H__
+
+#include <crypto/algapi.h>
+
+#include "cc_driver.h"
+
+enum cc_req_dma_buf_type {
+	CC_DMA_BUF_NULL = 0,
+	CC_DMA_BUF_DLLI,
+	CC_DMA_BUF_MLLI
+};
+
+enum cc_sg_cpy_direct {
+	CC_SG_TO_BUF = 0,
+	CC_SG_FROM_BUF = 1
+};
+
+struct cc_mlli {
+	cc_sram_addr_t sram_addr;
+	unsigned int nents; //sg nents
+	unsigned int mlli_nents; //mlli nents might be different than the above
+};
+
+struct mlli_params {
+	struct dma_pool *curr_pool;
+	u8 *mlli_virt_addr;
+	dma_addr_t mlli_dma_addr;
+	u32 mlli_len;
+};
+
+int cc_buffer_mgr_init(struct cc_drvdata *drvdata);
+
+int cc_buffer_mgr_fini(struct cc_drvdata *drvdata);
+
+int cc_map_blkcipher_request(struct cc_drvdata *drvdata, void *ctx,
+			     unsigned int ivsize, unsigned int nbytes,
+			     void *info, struct scatterlist *src,
+			     struct scatterlist *dst, gfp_t flags);
+
+void cc_unmap_blkcipher_request(struct device *dev, void *ctx,
+				unsigned int ivsize,
+				struct scatterlist *src,
+				struct scatterlist *dst);
+
+int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req);
+
+void cc_unmap_aead_request(struct device *dev, struct aead_request *req);
+
+int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
+			      struct scatterlist *src, unsigned int nbytes,
+			      bool do_update, gfp_t flags);
+
+int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
+			       struct scatterlist *src, unsigned int nbytes,
+			       unsigned int block_size, gfp_t flags);
+
+void cc_unmap_hash_request(struct device *dev, void *ctx,
+			   struct scatterlist *src, bool do_revert);
+
+void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
+			u32 to_skip, u32 end, enum cc_sg_cpy_direct direct);
+
+void cc_zero_sgl(struct scatterlist *sgl, u32 data_len);
+
+#endif /*__BUFFER_MGR_H__*/
+

+ 261 - 454
drivers/staging/ccree/ssi_cipher.c → drivers/staging/ccree/cc_cipher.c

@@ -1,46 +1,27 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
 
 
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/semaphore.h>
 #include <crypto/algapi.h>
 #include <crypto/algapi.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/internal/skcipher.h>
-#include <crypto/aes.h>
-#include <crypto/ctr.h>
 #include <crypto/des.h>
 #include <crypto/des.h>
 #include <crypto/xts.h>
 #include <crypto/xts.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/scatterwalk.h>
 
 
-#include "ssi_config.h"
-#include "ssi_driver.h"
+#include "cc_driver.h"
 #include "cc_lli_defs.h"
 #include "cc_lli_defs.h"
-#include "ssi_buffer_mgr.h"
-#include "ssi_cipher.h"
-#include "ssi_request_mgr.h"
-#include "ssi_sysfs.h"
+#include "cc_buffer_mgr.h"
+#include "cc_cipher.h"
+#include "cc_request_mgr.h"
 
 
 #define MAX_ABLKCIPHER_SEQ_LEN 6
 #define MAX_ABLKCIPHER_SEQ_LEN 6
 
 
 #define template_ablkcipher	template_u.ablkcipher
 #define template_ablkcipher	template_u.ablkcipher
 
 
-#define SSI_MIN_AES_XTS_SIZE 0x10
-#define SSI_MAX_AES_XTS_SIZE 0x2000
-struct ssi_blkcipher_handle {
+#define CC_MIN_AES_XTS_SIZE 0x10
+#define CC_MAX_AES_XTS_SIZE 0x2000
+struct cc_cipher_handle {
 	struct list_head blkcipher_alg_list;
 	struct list_head blkcipher_alg_list;
 };
 };
 
 
@@ -54,8 +35,8 @@ struct cc_hw_key_info {
 	enum cc_hw_crypto_key key2_slot;
 	enum cc_hw_crypto_key key2_slot;
 };
 };
 
 
-struct ssi_ablkcipher_ctx {
-	struct ssi_drvdata *drvdata;
+struct cc_cipher_ctx {
+	struct cc_drvdata *drvdata;
 	int keylen;
 	int keylen;
 	int key_round_number;
 	int key_round_number;
 	int cipher_mode;
 	int cipher_mode;
@@ -67,61 +48,56 @@ struct ssi_ablkcipher_ctx {
 	struct crypto_shash *shash_tfm;
 	struct crypto_shash *shash_tfm;
 };
 };
 
 
-static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req, void __iomem *cc_base);
+static void cc_cipher_complete(struct device *dev, void *cc_req, int err);
 
 
-static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size)
+static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
 {
 {
 	switch (ctx_p->flow_mode) {
 	switch (ctx_p->flow_mode) {
 	case S_DIN_to_AES:
 	case S_DIN_to_AES:
 		switch (size) {
 		switch (size) {
 		case CC_AES_128_BIT_KEY_SIZE:
 		case CC_AES_128_BIT_KEY_SIZE:
 		case CC_AES_192_BIT_KEY_SIZE:
 		case CC_AES_192_BIT_KEY_SIZE:
-			if (likely((ctx_p->cipher_mode != DRV_CIPHER_XTS) &&
-				   (ctx_p->cipher_mode != DRV_CIPHER_ESSIV) &&
-				   (ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER)))
+			if (ctx_p->cipher_mode != DRV_CIPHER_XTS &&
+			    ctx_p->cipher_mode != DRV_CIPHER_ESSIV &&
+			    ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER)
 				return 0;
 				return 0;
 			break;
 			break;
 		case CC_AES_256_BIT_KEY_SIZE:
 		case CC_AES_256_BIT_KEY_SIZE:
 			return 0;
 			return 0;
 		case (CC_AES_192_BIT_KEY_SIZE * 2):
 		case (CC_AES_192_BIT_KEY_SIZE * 2):
 		case (CC_AES_256_BIT_KEY_SIZE * 2):
 		case (CC_AES_256_BIT_KEY_SIZE * 2):
-			if (likely((ctx_p->cipher_mode == DRV_CIPHER_XTS) ||
-				   (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) ||
-				   (ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)))
+			if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+			    ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
+			    ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)
 				return 0;
 				return 0;
 			break;
 			break;
 		default:
 		default:
 			break;
 			break;
 		}
 		}
 	case S_DIN_to_DES:
 	case S_DIN_to_DES:
-		if (likely(size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE))
+		if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE)
 			return 0;
 			return 0;
 		break;
 		break;
-#if SSI_CC_HAS_MULTI2
-	case S_DIN_to_MULTI2:
-		if (likely(size == CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE))
-			return 0;
-		break;
-#endif
 	default:
 	default:
 		break;
 		break;
 	}
 	}
 	return -EINVAL;
 	return -EINVAL;
 }
 }
 
 
-static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int size)
+static int validate_data_size(struct cc_cipher_ctx *ctx_p,
+			      unsigned int size)
 {
 {
 	switch (ctx_p->flow_mode) {
 	switch (ctx_p->flow_mode) {
 	case S_DIN_to_AES:
 	case S_DIN_to_AES:
 		switch (ctx_p->cipher_mode) {
 		switch (ctx_p->cipher_mode) {
 		case DRV_CIPHER_XTS:
 		case DRV_CIPHER_XTS:
-			if ((size >= SSI_MIN_AES_XTS_SIZE) &&
-			    (size <= SSI_MAX_AES_XTS_SIZE) &&
+			if (size >= CC_MIN_AES_XTS_SIZE &&
+			    size <= CC_MAX_AES_XTS_SIZE &&
 			    IS_ALIGNED(size, AES_BLOCK_SIZE))
 			    IS_ALIGNED(size, AES_BLOCK_SIZE))
 				return 0;
 				return 0;
 			break;
 			break;
 		case DRV_CIPHER_CBC_CTS:
 		case DRV_CIPHER_CBC_CTS:
-			if (likely(size >= AES_BLOCK_SIZE))
+			if (size >= AES_BLOCK_SIZE)
 				return 0;
 				return 0;
 			break;
 			break;
 		case DRV_CIPHER_OFB:
 		case DRV_CIPHER_OFB:
@@ -131,7 +107,7 @@ static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int siz
 		case DRV_CIPHER_CBC:
 		case DRV_CIPHER_CBC:
 		case DRV_CIPHER_ESSIV:
 		case DRV_CIPHER_ESSIV:
 		case DRV_CIPHER_BITLOCKER:
 		case DRV_CIPHER_BITLOCKER:
-			if (likely(IS_ALIGNED(size, AES_BLOCK_SIZE)))
+			if (IS_ALIGNED(size, AES_BLOCK_SIZE))
 				return 0;
 				return 0;
 			break;
 			break;
 		default:
 		default:
@@ -139,23 +115,9 @@ static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int siz
 		}
 		}
 		break;
 		break;
 	case S_DIN_to_DES:
 	case S_DIN_to_DES:
-		if (likely(IS_ALIGNED(size, DES_BLOCK_SIZE)))
-				return 0;
-		break;
-#if SSI_CC_HAS_MULTI2
-	case S_DIN_to_MULTI2:
-		switch (ctx_p->cipher_mode) {
-		case DRV_MULTI2_CBC:
-			if (likely(IS_ALIGNED(size, CC_MULTI2_BLOCK_SIZE)))
-				return 0;
-			break;
-		case DRV_MULTI2_OFB:
+		if (IS_ALIGNED(size, DES_BLOCK_SIZE))
 			return 0;
 			return 0;
-		default:
-			break;
-		}
 		break;
 		break;
-#endif /*SSI_CC_HAS_MULTI2*/
 	default:
 	default:
 		break;
 		break;
 	}
 	}
@@ -164,36 +126,42 @@ static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int siz
 
 
 static unsigned int get_max_keysize(struct crypto_tfm *tfm)
 static unsigned int get_max_keysize(struct crypto_tfm *tfm)
 {
 {
-	struct ssi_crypto_alg *ssi_alg = container_of(tfm->__crt_alg, struct ssi_crypto_alg, crypto_alg);
+	struct cc_crypto_alg *cc_alg =
+		container_of(tfm->__crt_alg, struct cc_crypto_alg, crypto_alg);
 
 
-	if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_ABLKCIPHER)
-		return ssi_alg->crypto_alg.cra_ablkcipher.max_keysize;
+	if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+	    CRYPTO_ALG_TYPE_ABLKCIPHER)
+		return cc_alg->crypto_alg.cra_ablkcipher.max_keysize;
 
 
-	if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_BLKCIPHER)
-		return ssi_alg->crypto_alg.cra_blkcipher.max_keysize;
+	if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+	    CRYPTO_ALG_TYPE_BLKCIPHER)
+		return cc_alg->crypto_alg.cra_blkcipher.max_keysize;
 
 
 	return 0;
 	return 0;
 }
 }
 
 
-static int ssi_blkcipher_init(struct crypto_tfm *tfm)
+static int cc_cipher_init(struct crypto_tfm *tfm)
 {
 {
-	struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
 	struct crypto_alg *alg = tfm->__crt_alg;
 	struct crypto_alg *alg = tfm->__crt_alg;
-	struct ssi_crypto_alg *ssi_alg =
-			container_of(alg, struct ssi_crypto_alg, crypto_alg);
-	struct device *dev = drvdata_to_dev(ssi_alg->drvdata);
+	struct cc_crypto_alg *cc_alg =
+			container_of(alg, struct cc_crypto_alg, crypto_alg);
+	struct device *dev = drvdata_to_dev(cc_alg->drvdata);
 	int rc = 0;
 	int rc = 0;
 	unsigned int max_key_buf_size = get_max_keysize(tfm);
 	unsigned int max_key_buf_size = get_max_keysize(tfm);
+	struct ablkcipher_tfm *ablktfm = &tfm->crt_ablkcipher;
 
 
 	dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p,
 	dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p,
 		crypto_tfm_alg_name(tfm));
 		crypto_tfm_alg_name(tfm));
 
 
-	ctx_p->cipher_mode = ssi_alg->cipher_mode;
-	ctx_p->flow_mode = ssi_alg->flow_mode;
-	ctx_p->drvdata = ssi_alg->drvdata;
+	ablktfm->reqsize = sizeof(struct blkcipher_req_ctx);
+
+	ctx_p->cipher_mode = cc_alg->cipher_mode;
+	ctx_p->flow_mode = cc_alg->flow_mode;
+	ctx_p->drvdata = cc_alg->drvdata;
 
 
 	/* Allocate key buffer, cache line aligned */
 	/* Allocate key buffer, cache line aligned */
-	ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL | GFP_DMA);
+	ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL);
 	if (!ctx_p->user.key)
 	if (!ctx_p->user.key)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
@@ -224,9 +192,9 @@ static int ssi_blkcipher_init(struct crypto_tfm *tfm)
 	return rc;
 	return rc;
 }
 }
 
 
-static void ssi_blkcipher_exit(struct crypto_tfm *tfm)
+static void cc_cipher_exit(struct crypto_tfm *tfm)
 {
 {
-	struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
 	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
 	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
 	unsigned int max_key_buf_size = get_max_keysize(tfm);
 	unsigned int max_key_buf_size = get_max_keysize(tfm);
 
 
@@ -262,13 +230,15 @@ static const u8 zero_buff[] = {	0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
 				0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
 				0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
 
 
 /* The function verifies that tdes keys are not weak.*/
 /* The function verifies that tdes keys are not weak.*/
-static int ssi_verify_3des_keys(const u8 *key, unsigned int keylen)
+static int cc_verify_3des_keys(const u8 *key, unsigned int keylen)
 {
 {
 	struct tdes_keys *tdes_key = (struct tdes_keys *)key;
 	struct tdes_keys *tdes_key = (struct tdes_keys *)key;
 
 
 	/* verify key1 != key2 and key3 != key2*/
 	/* verify key1 != key2 and key3 != key2*/
-	if (unlikely((memcmp((u8 *)tdes_key->key1, (u8 *)tdes_key->key2, sizeof(tdes_key->key1)) == 0) ||
-		     (memcmp((u8 *)tdes_key->key3, (u8 *)tdes_key->key2, sizeof(tdes_key->key3)) == 0))) {
+	if ((memcmp((u8 *)tdes_key->key1, (u8 *)tdes_key->key2,
+		    sizeof(tdes_key->key1)) == 0) ||
+	    (memcmp((u8 *)tdes_key->key3, (u8 *)tdes_key->key2,
+		    sizeof(tdes_key->key3)) == 0)) {
 		return -ENOEXEC;
 		return -ENOEXEC;
 	}
 	}
 
 
@@ -290,11 +260,11 @@ static enum cc_hw_crypto_key hw_key_to_cc_hw_key(int slot_num)
 	return END_OF_KEYS;
 	return END_OF_KEYS;
 }
 }
 
 
-static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
-				const u8 *key,
-				unsigned int keylen)
+static int cc_cipher_setkey(struct crypto_ablkcipher *atfm, const u8 *key,
+			    unsigned int keylen)
 {
 {
-	struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
 	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
 	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
 	u32 tmp[DES_EXPKEY_WORDS];
 	u32 tmp[DES_EXPKEY_WORDS];
 	unsigned int max_key_buf_size = get_max_keysize(tfm);
 	unsigned int max_key_buf_size = get_max_keysize(tfm);
@@ -305,44 +275,39 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
 
 
 	/* STAT_PHASE_0: Init and sanity checks */
 	/* STAT_PHASE_0: Init and sanity checks */
 
 
-#if SSI_CC_HAS_MULTI2
-	/*last byte of key buffer is round number and should not be a part of key size*/
-	if (ctx_p->flow_mode == S_DIN_to_MULTI2)
-		keylen -= 1;
-#endif /*SSI_CC_HAS_MULTI2*/
-
-	if (unlikely(validate_keys_sizes(ctx_p, keylen) != 0)) {
+	if (validate_keys_sizes(ctx_p, keylen)) {
 		dev_err(dev, "Unsupported key size %d.\n", keylen);
 		dev_err(dev, "Unsupported key size %d.\n", keylen);
 		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	if (ssi_is_hw_key(tfm)) {
+	if (cc_is_hw_key(tfm)) {
 		/* setting HW key slots */
 		/* setting HW key slots */
 		struct arm_hw_key_info *hki = (struct arm_hw_key_info *)key;
 		struct arm_hw_key_info *hki = (struct arm_hw_key_info *)key;
 
 
-		if (unlikely(ctx_p->flow_mode != S_DIN_to_AES)) {
+		if (ctx_p->flow_mode != S_DIN_to_AES) {
 			dev_err(dev, "HW key not supported for non-AES flows\n");
 			dev_err(dev, "HW key not supported for non-AES flows\n");
 			return -EINVAL;
 			return -EINVAL;
 		}
 		}
 
 
 		ctx_p->hw.key1_slot = hw_key_to_cc_hw_key(hki->hw_key1);
 		ctx_p->hw.key1_slot = hw_key_to_cc_hw_key(hki->hw_key1);
-		if (unlikely(ctx_p->hw.key1_slot == END_OF_KEYS)) {
+		if (ctx_p->hw.key1_slot == END_OF_KEYS) {
 			dev_err(dev, "Unsupported hw key1 number (%d)\n",
 			dev_err(dev, "Unsupported hw key1 number (%d)\n",
 				hki->hw_key1);
 				hki->hw_key1);
 			return -EINVAL;
 			return -EINVAL;
 		}
 		}
 
 
-		if ((ctx_p->cipher_mode == DRV_CIPHER_XTS) ||
-		    (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) ||
-		    (ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)) {
-			if (unlikely(hki->hw_key1 == hki->hw_key2)) {
+		if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+		    ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
+		    ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
+			if (hki->hw_key1 == hki->hw_key2) {
 				dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
 				dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
 					hki->hw_key1, hki->hw_key2);
 					hki->hw_key1, hki->hw_key2);
 				return -EINVAL;
 				return -EINVAL;
 			}
 			}
-			ctx_p->hw.key2_slot = hw_key_to_cc_hw_key(hki->hw_key2);
-			if (unlikely(ctx_p->hw.key2_slot == END_OF_KEYS)) {
+			ctx_p->hw.key2_slot =
+				hw_key_to_cc_hw_key(hki->hw_key2);
+			if (ctx_p->hw.key2_slot == END_OF_KEYS) {
 				dev_err(dev, "Unsupported hw key2 number (%d)\n",
 				dev_err(dev, "Unsupported hw key2 number (%d)\n",
 					hki->hw_key2);
 					hki->hw_key2);
 				return -EINVAL;
 				return -EINVAL;
@@ -350,28 +315,28 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
 		}
 		}
 
 
 		ctx_p->keylen = keylen;
 		ctx_p->keylen = keylen;
-		dev_dbg(dev, "ssi_is_hw_key ret 0");
+		dev_dbg(dev, "cc_is_hw_key ret 0");
 
 
 		return 0;
 		return 0;
 	}
 	}
 
 
 	// verify weak keys
 	// verify weak keys
 	if (ctx_p->flow_mode == S_DIN_to_DES) {
 	if (ctx_p->flow_mode == S_DIN_to_DES) {
-		if (unlikely(!des_ekey(tmp, key)) &&
+		if (!des_ekey(tmp, key) &&
 		    (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
 		    (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
 			tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
 			tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
 			dev_dbg(dev, "weak DES key");
 			dev_dbg(dev, "weak DES key");
 			return -EINVAL;
 			return -EINVAL;
 		}
 		}
 	}
 	}
-	if ((ctx_p->cipher_mode == DRV_CIPHER_XTS) &&
-	    xts_check_key(tfm, key, keylen) != 0) {
+	if (ctx_p->cipher_mode == DRV_CIPHER_XTS &&
+	    xts_check_key(tfm, key, keylen)) {
 		dev_dbg(dev, "weak XTS key");
 		dev_dbg(dev, "weak XTS key");
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
-	if ((ctx_p->flow_mode == S_DIN_to_DES) &&
-	    (keylen == DES3_EDE_KEY_SIZE) &&
-	    ssi_verify_3des_keys(key, keylen) != 0) {
+	if (ctx_p->flow_mode == S_DIN_to_DES &&
+	    keylen == DES3_EDE_KEY_SIZE &&
+	    cc_verify_3des_keys(key, keylen)) {
 		dev_dbg(dev, "weak 3DES key");
 		dev_dbg(dev, "weak 3DES key");
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
@@ -380,34 +345,24 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
 	dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
 	dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
 				max_key_buf_size, DMA_TO_DEVICE);
 				max_key_buf_size, DMA_TO_DEVICE);
 
 
-	if (ctx_p->flow_mode == S_DIN_to_MULTI2) {
-#if SSI_CC_HAS_MULTI2
-		memcpy(ctx_p->user.key, key, CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE);
-		ctx_p->key_round_number = key[CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE];
-		if (ctx_p->key_round_number < CC_MULTI2_MIN_NUM_ROUNDS ||
-		    ctx_p->key_round_number > CC_MULTI2_MAX_NUM_ROUNDS) {
-			crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
-			dev_dbg(dev, "SSI_CC_HAS_MULTI2 einval");
-			return -EINVAL;
-#endif /*SSI_CC_HAS_MULTI2*/
-	} else {
-		memcpy(ctx_p->user.key, key, keylen);
-		if (keylen == 24)
-			memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
-
-		if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
-			/* sha256 for key2 - use sw implementation */
-			int key_len = keylen >> 1;
-			int err;
-			SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);
-
-			desc->tfm = ctx_p->shash_tfm;
-
-			err = crypto_shash_digest(desc, ctx_p->user.key, key_len, ctx_p->user.key + key_len);
-			if (err) {
-				dev_err(dev, "Failed to hash ESSIV key.\n");
-				return err;
-			}
+	memcpy(ctx_p->user.key, key, keylen);
+	if (keylen == 24)
+		memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
+
+	if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+		/* sha256 for key2 - use sw implementation */
+		int key_len = keylen >> 1;
+		int err;
+
+		SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);
+
+		desc->tfm = ctx_p->shash_tfm;
+
+		err = crypto_shash_digest(desc, ctx_p->user.key, key_len,
+					  ctx_p->user.key + key_len);
+		if (err) {
+			dev_err(dev, "Failed to hash ESSIV key.\n");
+			return err;
 		}
 		}
 	}
 	}
 	dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr,
 	dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr,
@@ -418,16 +373,13 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
 	return 0;
 	return 0;
 }
 }
 
 
-static inline void
-ssi_blkcipher_create_setup_desc(
-	struct crypto_tfm *tfm,
-	struct blkcipher_req_ctx *req_ctx,
-	unsigned int ivsize,
-	unsigned int nbytes,
-	struct cc_hw_desc desc[],
-	unsigned int *seq_size)
+static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
+				 struct blkcipher_req_ctx *req_ctx,
+				 unsigned int ivsize, unsigned int nbytes,
+				 struct cc_hw_desc desc[],
+				 unsigned int *seq_size)
 {
 {
-	struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
 	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
 	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
 	int cipher_mode = ctx_p->cipher_mode;
 	int cipher_mode = ctx_p->cipher_mode;
 	int flow_mode = ctx_p->flow_mode;
 	int flow_mode = ctx_p->flow_mode;
@@ -437,11 +389,14 @@ ssi_blkcipher_create_setup_desc(
 	dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
 	dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
 	unsigned int du_size = nbytes;
 	unsigned int du_size = nbytes;
 
 
-	struct ssi_crypto_alg *ssi_alg = container_of(tfm->__crt_alg, struct ssi_crypto_alg, crypto_alg);
+	struct cc_crypto_alg *cc_alg =
+		container_of(tfm->__crt_alg, struct cc_crypto_alg, crypto_alg);
 
 
-	if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) == CRYPTO_ALG_BULK_DU_512)
+	if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) ==
+	    CRYPTO_ALG_BULK_DU_512)
 		du_size = 512;
 		du_size = 512;
-	if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) == CRYPTO_ALG_BULK_DU_4096)
+	if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) ==
+	    CRYPTO_ALG_BULK_DU_4096)
 		du_size = 4096;
 		du_size = 4096;
 
 
 	switch (cipher_mode) {
 	switch (cipher_mode) {
@@ -456,8 +411,8 @@ ssi_blkcipher_create_setup_desc(
 		set_cipher_config0(&desc[*seq_size], direction);
 		set_cipher_config0(&desc[*seq_size], direction);
 		set_flow_mode(&desc[*seq_size], flow_mode);
 		set_flow_mode(&desc[*seq_size], flow_mode);
 		set_cipher_mode(&desc[*seq_size], cipher_mode);
 		set_cipher_mode(&desc[*seq_size], cipher_mode);
-		if ((cipher_mode == DRV_CIPHER_CTR) ||
-		    (cipher_mode == DRV_CIPHER_OFB)) {
+		if (cipher_mode == DRV_CIPHER_CTR ||
+		    cipher_mode == DRV_CIPHER_OFB) {
 			set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
 			set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
 		} else {
 		} else {
 			set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
 			set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
@@ -470,7 +425,7 @@ ssi_blkcipher_create_setup_desc(
 		set_cipher_mode(&desc[*seq_size], cipher_mode);
 		set_cipher_mode(&desc[*seq_size], cipher_mode);
 		set_cipher_config0(&desc[*seq_size], direction);
 		set_cipher_config0(&desc[*seq_size], direction);
 		if (flow_mode == S_DIN_to_AES) {
 		if (flow_mode == S_DIN_to_AES) {
-			if (ssi_is_hw_key(tfm)) {
+			if (cc_is_hw_key(tfm)) {
 				set_hw_crypto_key(&desc[*seq_size],
 				set_hw_crypto_key(&desc[*seq_size],
 						  ctx_p->hw.key1_slot);
 						  ctx_p->hw.key1_slot);
 			} else {
 			} else {
@@ -497,7 +452,7 @@ ssi_blkcipher_create_setup_desc(
 		hw_desc_init(&desc[*seq_size]);
 		hw_desc_init(&desc[*seq_size]);
 		set_cipher_mode(&desc[*seq_size], cipher_mode);
 		set_cipher_mode(&desc[*seq_size], cipher_mode);
 		set_cipher_config0(&desc[*seq_size], direction);
 		set_cipher_config0(&desc[*seq_size], direction);
-		if (ssi_is_hw_key(tfm)) {
+		if (cc_is_hw_key(tfm)) {
 			set_hw_crypto_key(&desc[*seq_size],
 			set_hw_crypto_key(&desc[*seq_size],
 					  ctx_p->hw.key1_slot);
 					  ctx_p->hw.key1_slot);
 		} else {
 		} else {
@@ -513,7 +468,7 @@ ssi_blkcipher_create_setup_desc(
 		hw_desc_init(&desc[*seq_size]);
 		hw_desc_init(&desc[*seq_size]);
 		set_cipher_mode(&desc[*seq_size], cipher_mode);
 		set_cipher_mode(&desc[*seq_size], cipher_mode);
 		set_cipher_config0(&desc[*seq_size], direction);
 		set_cipher_config0(&desc[*seq_size], direction);
-		if (ssi_is_hw_key(tfm)) {
+		if (cc_is_hw_key(tfm)) {
 			set_hw_crypto_key(&desc[*seq_size],
 			set_hw_crypto_key(&desc[*seq_size],
 					  ctx_p->hw.key2_slot);
 					  ctx_p->hw.key2_slot);
 		} else {
 		} else {
@@ -543,62 +498,14 @@ ssi_blkcipher_create_setup_desc(
 	}
 	}
 }
 }
 
 
-#if SSI_CC_HAS_MULTI2
-static inline void ssi_blkcipher_create_multi2_setup_desc(
-	struct crypto_tfm *tfm,
-	struct blkcipher_req_ctx *req_ctx,
-	unsigned int ivsize,
-	struct cc_hw_desc desc[],
-	unsigned int *seq_size)
-{
-	struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
-
-	int direction = req_ctx->gen_ctx.op_type;
-	/* Load system key */
-	hw_desc_init(&desc[*seq_size]);
-	set_cipher_mode(&desc[*seq_size], ctx_p->cipher_mode);
-	set_cipher_config0(&desc[*seq_size], direction);
-	set_din_type(&desc[*seq_size], DMA_DLLI, ctx_p->user.key_dma_addr,
-		     CC_MULTI2_SYSTEM_KEY_SIZE, NS_BIT);
-	set_flow_mode(&desc[*seq_size], ctx_p->flow_mode);
-	set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
-	(*seq_size)++;
-
-	/* load data key */
-	hw_desc_init(&desc[*seq_size]);
-	set_din_type(&desc[*seq_size], DMA_DLLI,
-		     (ctx_p->user.key_dma_addr + CC_MULTI2_SYSTEM_KEY_SIZE),
-		     CC_MULTI2_DATA_KEY_SIZE, NS_BIT);
-	set_multi2_num_rounds(&desc[*seq_size], ctx_p->key_round_number);
-	set_flow_mode(&desc[*seq_size], ctx_p->flow_mode);
-	set_cipher_mode(&desc[*seq_size], ctx_p->cipher_mode);
-	set_cipher_config0(&desc[*seq_size], direction);
-	set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
-	(*seq_size)++;
-
-	/* Set state */
-	hw_desc_init(&desc[*seq_size]);
-	set_din_type(&desc[*seq_size], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
-		     ivsize, NS_BIT);
-	set_cipher_config0(&desc[*seq_size], direction);
-	set_flow_mode(&desc[*seq_size], ctx_p->flow_mode);
-	set_cipher_mode(&desc[*seq_size], ctx_p->cipher_mode);
-	set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
-	(*seq_size)++;
-}
-#endif /*SSI_CC_HAS_MULTI2*/
-
-static inline void
-ssi_blkcipher_create_data_desc(
-	struct crypto_tfm *tfm,
-	struct blkcipher_req_ctx *req_ctx,
-	struct scatterlist *dst, struct scatterlist *src,
-	unsigned int nbytes,
-	void *areq,
-	struct cc_hw_desc desc[],
-	unsigned int *seq_size)
+static void cc_setup_cipher_data(struct crypto_tfm *tfm,
+				 struct blkcipher_req_ctx *req_ctx,
+				 struct scatterlist *dst,
+				 struct scatterlist *src, unsigned int nbytes,
+				 void *areq, struct cc_hw_desc desc[],
+				 unsigned int *seq_size)
 {
 {
-	struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
 	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
 	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
 	unsigned int flow_mode = ctx_p->flow_mode;
 	unsigned int flow_mode = ctx_p->flow_mode;
 
 
@@ -609,17 +516,12 @@ ssi_blkcipher_create_data_desc(
 	case S_DIN_to_DES:
 	case S_DIN_to_DES:
 		flow_mode = DIN_DES_DOUT;
 		flow_mode = DIN_DES_DOUT;
 		break;
 		break;
-#if SSI_CC_HAS_MULTI2
-	case S_DIN_to_MULTI2:
-		flow_mode = DIN_MULTI2_DOUT;
-		break;
-#endif /*SSI_CC_HAS_MULTI2*/
 	default:
 	default:
 		dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode);
 		dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode);
 		return;
 		return;
 	}
 	}
 	/* Process */
 	/* Process */
-	if (likely(req_ctx->dma_buf_type == SSI_DMA_BUF_DLLI)) {
+	if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
 		dev_dbg(dev, " data params addr %pad length 0x%X\n",
 		dev_dbg(dev, " data params addr %pad length 0x%X\n",
 			&sg_dma_address(src), nbytes);
 			&sg_dma_address(src), nbytes);
 		dev_dbg(dev, " data params addr %pad length 0x%X\n",
 		dev_dbg(dev, " data params addr %pad length 0x%X\n",
@@ -682,68 +584,63 @@ ssi_blkcipher_create_data_desc(
 	}
 	}
 }
 }
 
 
-static int ssi_blkcipher_complete(struct device *dev,
-				  struct ssi_ablkcipher_ctx *ctx_p,
-				  struct blkcipher_req_ctx *req_ctx,
-				  struct scatterlist *dst,
-				  struct scatterlist *src,
-				  unsigned int ivsize,
-				  void *areq,
-				  void __iomem *cc_base)
+static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
 {
 {
-	int completion_error = 0;
+	struct ablkcipher_request *areq = (struct ablkcipher_request *)cc_req;
+	struct scatterlist *dst = areq->dst;
+	struct scatterlist *src = areq->src;
+	struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(areq);
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
 	struct ablkcipher_request *req = (struct ablkcipher_request *)areq;
 	struct ablkcipher_request *req = (struct ablkcipher_request *)areq;
 
 
-	ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
+	cc_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
 	kfree(req_ctx->iv);
 	kfree(req_ctx->iv);
 
 
-	if (areq) {
-		/*
-		 * The crypto API expects us to set the req->info to the last
-		 * ciphertext block. For encrypt, simply copy from the result.
-		 * For decrypt, we must copy from a saved buffer since this
-		 * could be an in-place decryption operation and the src is
-		 * lost by this point.
-		 */
-		if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT)  {
-			memcpy(req->info, req_ctx->backup_info, ivsize);
-			kfree(req_ctx->backup_info);
-		} else {
-			scatterwalk_map_and_copy(req->info, req->dst,
-						 (req->nbytes - ivsize),
-						 ivsize, 0);
-		}
-
-		ablkcipher_request_complete(areq, completion_error);
-		return 0;
+	/*
+	 * The crypto API expects us to set the req->info to the last
+	 * ciphertext block. For encrypt, simply copy from the result.
+	 * For decrypt, we must copy from a saved buffer since this
+	 * could be an in-place decryption operation and the src is
+	 * lost by this point.
+	 */
+	if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT)  {
+		memcpy(req->info, req_ctx->backup_info, ivsize);
+		kfree(req_ctx->backup_info);
+	} else if (!err) {
+		scatterwalk_map_and_copy(req->info, req->dst,
+					 (req->nbytes - ivsize), ivsize, 0);
 	}
 	}
-	return completion_error;
+
+	ablkcipher_request_complete(areq, err);
 }
 }
 
 
-static int ssi_blkcipher_process(
-	struct crypto_tfm *tfm,
-	struct blkcipher_req_ctx *req_ctx,
-	struct scatterlist *dst, struct scatterlist *src,
-	unsigned int nbytes,
-	void *info, //req info
-	unsigned int ivsize,
-	void *areq,
-	enum drv_crypto_direction direction)
+static int cc_cipher_process(struct ablkcipher_request *req,
+			     enum drv_crypto_direction direction)
 {
 {
-	struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct crypto_ablkcipher *ablk_tfm = crypto_ablkcipher_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk_tfm);
+	struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+	unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm);
+	struct scatterlist *dst = req->dst;
+	struct scatterlist *src = req->src;
+	unsigned int nbytes = req->nbytes;
+	void *info = req->info;
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
 	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
 	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
 	struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
 	struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
-	struct ssi_crypto_req ssi_req = {};
+	struct cc_crypto_req cc_req = {};
 	int rc, seq_len = 0, cts_restore_flag = 0;
 	int rc, seq_len = 0, cts_restore_flag = 0;
+	gfp_t flags = cc_gfp_flags(&req->base);
 
 
-	dev_dbg(dev, "%s areq=%p info=%p nbytes=%d\n",
+	dev_dbg(dev, "%s req=%p info=%p nbytes=%d\n",
 		((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 		((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
-		"Encrypt" : "Decrypt"), areq, info, nbytes);
+		"Encrypt" : "Decrypt"), req, info, nbytes);
 
 
 	/* STAT_PHASE_0: Init and sanity checks */
 	/* STAT_PHASE_0: Init and sanity checks */
 
 
 	/* TODO: check data length according to mode */
 	/* TODO: check data length according to mode */
-	if (unlikely(validate_data_size(ctx_p, nbytes))) {
+	if (validate_data_size(ctx_p, nbytes)) {
 		dev_err(dev, "Unsupported data size %d.\n", nbytes);
 		dev_err(dev, "Unsupported data size %d.\n", nbytes);
 		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
 		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
 		rc = -EINVAL;
 		rc = -EINVAL;
@@ -758,7 +655,7 @@ static int ssi_blkcipher_process(
 	/* The IV we are handed may be allocted from the stack so
 	/* The IV we are handed may be allocted from the stack so
 	 * we must copy it to a DMAable buffer before use.
 	 * we must copy it to a DMAable buffer before use.
 	 */
 	 */
-	req_ctx->iv = kmalloc(ivsize, GFP_KERNEL);
+	req_ctx->iv = kmalloc(ivsize, flags);
 	if (!req_ctx->iv) {
 	if (!req_ctx->iv) {
 		rc = -ENOMEM;
 		rc = -ENOMEM;
 		goto exit_process;
 		goto exit_process;
@@ -766,17 +663,18 @@ static int ssi_blkcipher_process(
 	memcpy(req_ctx->iv, info, ivsize);
 	memcpy(req_ctx->iv, info, ivsize);
 
 
 	/*For CTS in case of data size aligned to 16 use CBC mode*/
 	/*For CTS in case of data size aligned to 16 use CBC mode*/
-	if (((nbytes % AES_BLOCK_SIZE) == 0) && (ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS)) {
+	if (((nbytes % AES_BLOCK_SIZE) == 0) &&
+	    ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS) {
 		ctx_p->cipher_mode = DRV_CIPHER_CBC;
 		ctx_p->cipher_mode = DRV_CIPHER_CBC;
 		cts_restore_flag = 1;
 		cts_restore_flag = 1;
 	}
 	}
 
 
 	/* Setup DX request structure */
 	/* Setup DX request structure */
-	ssi_req.user_cb = (void *)ssi_ablkcipher_complete;
-	ssi_req.user_arg = (void *)areq;
+	cc_req.user_cb = (void *)cc_cipher_complete;
+	cc_req.user_arg = (void *)req;
 
 
 #ifdef ENABLE_CYCLE_COUNT
 #ifdef ENABLE_CYCLE_COUNT
-	ssi_req.op_type = (direction == DRV_CRYPTO_DIRECTION_DECRYPT) ?
+	cc_req.op_type = (direction == DRV_CRYPTO_DIRECTION_DECRYPT) ?
 		STAT_OP_TYPE_DECODE : STAT_OP_TYPE_ENCODE;
 		STAT_OP_TYPE_DECODE : STAT_OP_TYPE_ENCODE;
 
 
 #endif
 #endif
@@ -786,10 +684,9 @@ static int ssi_blkcipher_process(
 
 
 	/* STAT_PHASE_1: Map buffers */
 	/* STAT_PHASE_1: Map buffers */
 
 
-	rc = ssi_buffer_mgr_map_blkcipher_request(ctx_p->drvdata, req_ctx,
-						  ivsize, nbytes, req_ctx->iv,
-						  src, dst);
-	if (unlikely(rc != 0)) {
+	rc = cc_map_blkcipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes,
+				      req_ctx->iv, src, dst, flags);
+	if (rc) {
 		dev_err(dev, "map_request() failed\n");
 		dev_err(dev, "map_request() failed\n");
 		goto exit_process;
 		goto exit_process;
 	}
 	}
@@ -797,50 +694,35 @@ static int ssi_blkcipher_process(
 	/* STAT_PHASE_2: Create sequence */
 	/* STAT_PHASE_2: Create sequence */
 
 
 	/* Setup processing */
 	/* Setup processing */
-#if SSI_CC_HAS_MULTI2
-	if (ctx_p->flow_mode == S_DIN_to_MULTI2)
-		ssi_blkcipher_create_multi2_setup_desc(tfm, req_ctx, ivsize,
-						       desc, &seq_len);
-	else
-#endif /*SSI_CC_HAS_MULTI2*/
-		ssi_blkcipher_create_setup_desc(tfm, req_ctx, ivsize, nbytes,
-						desc, &seq_len);
+	cc_setup_cipher_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
 	/* Data processing */
 	/* Data processing */
-	ssi_blkcipher_create_data_desc(tfm, req_ctx, dst, src, nbytes, areq,
-				       desc, &seq_len);
+	cc_setup_cipher_data(tfm, req_ctx, dst, src, nbytes, req, desc,
+			     &seq_len);
 
 
 	/* do we need to generate IV? */
 	/* do we need to generate IV? */
 	if (req_ctx->is_giv) {
 	if (req_ctx->is_giv) {
-		ssi_req.ivgen_dma_addr[0] = req_ctx->gen_ctx.iv_dma_addr;
-		ssi_req.ivgen_dma_addr_len = 1;
+		cc_req.ivgen_dma_addr[0] = req_ctx->gen_ctx.iv_dma_addr;
+		cc_req.ivgen_dma_addr_len = 1;
 		/* set the IV size (8/16 B long)*/
 		/* set the IV size (8/16 B long)*/
-		ssi_req.ivgen_size = ivsize;
+		cc_req.ivgen_size = ivsize;
 	}
 	}
 
 
 	/* STAT_PHASE_3: Lock HW and push sequence */
 	/* STAT_PHASE_3: Lock HW and push sequence */
 
 
-	rc = send_request(ctx_p->drvdata, &ssi_req, desc, seq_len, (!areq) ? 0 : 1);
-	if (areq) {
-		if (unlikely(rc != -EINPROGRESS)) {
-			/* Failed to send the request or request completed synchronously */
-			ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
-		}
-
-	} else {
-		if (rc != 0) {
-			ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
-		} else {
-			rc = ssi_blkcipher_complete(dev, ctx_p, req_ctx, dst,
-						    src, ivsize, NULL,
-						    ctx_p->drvdata->cc_base);
-		}
+	rc = cc_send_request(ctx_p->drvdata, &cc_req, desc, seq_len,
+			     &req->base);
+	if (rc != -EINPROGRESS && rc != -EBUSY) {
+		/* Failed to send the request or request completed
+		 * synchronously
+		 */
+		cc_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
 	}
 	}
 
 
 exit_process:
 exit_process:
-	if (cts_restore_flag != 0)
+	if (cts_restore_flag)
 		ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
 		ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
 
 
-	if (rc != -EINPROGRESS) {
+	if (rc != -EINPROGRESS && rc != -EBUSY) {
 		kfree(req_ctx->backup_info);
 		kfree(req_ctx->backup_info);
 		kfree(req_ctx->iv);
 		kfree(req_ctx->iv);
 	}
 	}
@@ -848,60 +730,28 @@ exit_process:
 	return rc;
 	return rc;
 }
 }
 
 
-static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
-{
-	struct ablkcipher_request *areq = (struct ablkcipher_request *)ssi_req;
-	struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(areq);
-	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
-	struct ssi_ablkcipher_ctx *ctx_p = crypto_ablkcipher_ctx(tfm);
-	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
-
-	ssi_blkcipher_complete(dev, ctx_p, req_ctx, areq->dst, areq->src,
-			       ivsize, areq, cc_base);
-}
-
-/* Async wrap functions */
-
-static int ssi_ablkcipher_init(struct crypto_tfm *tfm)
-{
-	struct ablkcipher_tfm *ablktfm = &tfm->crt_ablkcipher;
-
-	ablktfm->reqsize = sizeof(struct blkcipher_req_ctx);
-
-	return ssi_blkcipher_init(tfm);
-}
-
-static int ssi_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
-				 const u8 *key,
-				 unsigned int keylen)
-{
-	return ssi_blkcipher_setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
-}
-
-static int ssi_ablkcipher_encrypt(struct ablkcipher_request *req)
+static int cc_cipher_encrypt(struct ablkcipher_request *req)
 {
 {
-	struct crypto_ablkcipher *ablk_tfm = crypto_ablkcipher_reqtfm(req);
-	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk_tfm);
 	struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
 	struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
-	unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm);
 
 
 	req_ctx->is_giv = false;
 	req_ctx->is_giv = false;
+	req_ctx->backup_info = NULL;
 
 
-	return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+	return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
 }
 }
 
 
-static int ssi_ablkcipher_decrypt(struct ablkcipher_request *req)
+static int cc_cipher_decrypt(struct ablkcipher_request *req)
 {
 {
 	struct crypto_ablkcipher *ablk_tfm = crypto_ablkcipher_reqtfm(req);
 	struct crypto_ablkcipher *ablk_tfm = crypto_ablkcipher_reqtfm(req);
-	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk_tfm);
 	struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
 	struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
 	unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm);
 	unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm);
+	gfp_t flags = cc_gfp_flags(&req->base);
 
 
 	/*
 	/*
 	 * Allocate and save the last IV sized bytes of the source, which will
 	 * Allocate and save the last IV sized bytes of the source, which will
 	 * be lost in case of in-place decryption and might be needed for CTS.
 	 * be lost in case of in-place decryption and might be needed for CTS.
 	 */
 	 */
-	req_ctx->backup_info = kmalloc(ivsize, GFP_KERNEL);
+	req_ctx->backup_info = kmalloc(ivsize, flags);
 	if (!req_ctx->backup_info)
 	if (!req_ctx->backup_info)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
@@ -909,22 +759,20 @@ static int ssi_ablkcipher_decrypt(struct ablkcipher_request *req)
 				 (req->nbytes - ivsize), ivsize, 0);
 				 (req->nbytes - ivsize), ivsize, 0);
 	req_ctx->is_giv = false;
 	req_ctx->is_giv = false;
 
 
-	return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_DECRYPT);
+	return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
 }
 }
 
 
 /* DX Block cipher alg */
 /* DX Block cipher alg */
-static struct ssi_alg_template blkcipher_algs[] = {
-/* Async template */
-#if SSI_CC_HAS_AES_XTS
+static struct cc_alg_template blkcipher_algs[] = {
 	{
 	{
 		.name = "xts(aes)",
 		.name = "xts(aes)",
 		.driver_name = "xts-aes-dx",
 		.driver_name = "xts-aes-dx",
 		.blocksize = AES_BLOCK_SIZE,
 		.blocksize = AES_BLOCK_SIZE,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.template_ablkcipher = {
 		.template_ablkcipher = {
-			.setkey = ssi_ablkcipher_setkey,
-			.encrypt = ssi_ablkcipher_encrypt,
-			.decrypt = ssi_ablkcipher_decrypt,
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
 			.min_keysize = AES_MIN_KEY_SIZE * 2,
 			.min_keysize = AES_MIN_KEY_SIZE * 2,
 			.max_keysize = AES_MAX_KEY_SIZE * 2,
 			.max_keysize = AES_MAX_KEY_SIZE * 2,
 			.ivsize = AES_BLOCK_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
@@ -939,9 +787,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
 		.blocksize = AES_BLOCK_SIZE,
 		.blocksize = AES_BLOCK_SIZE,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_512,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_512,
 		.template_ablkcipher = {
 		.template_ablkcipher = {
-			.setkey = ssi_ablkcipher_setkey,
-			.encrypt = ssi_ablkcipher_encrypt,
-			.decrypt = ssi_ablkcipher_decrypt,
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
 			.min_keysize = AES_MIN_KEY_SIZE * 2,
 			.min_keysize = AES_MIN_KEY_SIZE * 2,
 			.max_keysize = AES_MAX_KEY_SIZE * 2,
 			.max_keysize = AES_MAX_KEY_SIZE * 2,
 			.ivsize = AES_BLOCK_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
@@ -955,9 +803,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
 		.blocksize = AES_BLOCK_SIZE,
 		.blocksize = AES_BLOCK_SIZE,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_4096,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_4096,
 		.template_ablkcipher = {
 		.template_ablkcipher = {
-			.setkey = ssi_ablkcipher_setkey,
-			.encrypt = ssi_ablkcipher_encrypt,
-			.decrypt = ssi_ablkcipher_decrypt,
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
 			.min_keysize = AES_MIN_KEY_SIZE * 2,
 			.min_keysize = AES_MIN_KEY_SIZE * 2,
 			.max_keysize = AES_MAX_KEY_SIZE * 2,
 			.max_keysize = AES_MAX_KEY_SIZE * 2,
 			.ivsize = AES_BLOCK_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
@@ -965,17 +813,15 @@ static struct ssi_alg_template blkcipher_algs[] = {
 		.cipher_mode = DRV_CIPHER_XTS,
 		.cipher_mode = DRV_CIPHER_XTS,
 		.flow_mode = S_DIN_to_AES,
 		.flow_mode = S_DIN_to_AES,
 	},
 	},
-#endif /*SSI_CC_HAS_AES_XTS*/
-#if SSI_CC_HAS_AES_ESSIV
 	{
 	{
 		.name = "essiv(aes)",
 		.name = "essiv(aes)",
 		.driver_name = "essiv-aes-dx",
 		.driver_name = "essiv-aes-dx",
 		.blocksize = AES_BLOCK_SIZE,
 		.blocksize = AES_BLOCK_SIZE,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.template_ablkcipher = {
 		.template_ablkcipher = {
-			.setkey = ssi_ablkcipher_setkey,
-			.encrypt = ssi_ablkcipher_encrypt,
-			.decrypt = ssi_ablkcipher_decrypt,
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
 			.min_keysize = AES_MIN_KEY_SIZE * 2,
 			.min_keysize = AES_MIN_KEY_SIZE * 2,
 			.max_keysize = AES_MAX_KEY_SIZE * 2,
 			.max_keysize = AES_MAX_KEY_SIZE * 2,
 			.ivsize = AES_BLOCK_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
@@ -989,9 +835,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
 		.blocksize = AES_BLOCK_SIZE,
 		.blocksize = AES_BLOCK_SIZE,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_512,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_512,
 		.template_ablkcipher = {
 		.template_ablkcipher = {
-			.setkey = ssi_ablkcipher_setkey,
-			.encrypt = ssi_ablkcipher_encrypt,
-			.decrypt = ssi_ablkcipher_decrypt,
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
 			.min_keysize = AES_MIN_KEY_SIZE * 2,
 			.min_keysize = AES_MIN_KEY_SIZE * 2,
 			.max_keysize = AES_MAX_KEY_SIZE * 2,
 			.max_keysize = AES_MAX_KEY_SIZE * 2,
 			.ivsize = AES_BLOCK_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
@@ -1005,9 +851,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
 		.blocksize = AES_BLOCK_SIZE,
 		.blocksize = AES_BLOCK_SIZE,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_4096,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_4096,
 		.template_ablkcipher = {
 		.template_ablkcipher = {
-			.setkey = ssi_ablkcipher_setkey,
-			.encrypt = ssi_ablkcipher_encrypt,
-			.decrypt = ssi_ablkcipher_decrypt,
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
 			.min_keysize = AES_MIN_KEY_SIZE * 2,
 			.min_keysize = AES_MIN_KEY_SIZE * 2,
 			.max_keysize = AES_MAX_KEY_SIZE * 2,
 			.max_keysize = AES_MAX_KEY_SIZE * 2,
 			.ivsize = AES_BLOCK_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
@@ -1015,17 +861,15 @@ static struct ssi_alg_template blkcipher_algs[] = {
 		.cipher_mode = DRV_CIPHER_ESSIV,
 		.cipher_mode = DRV_CIPHER_ESSIV,
 		.flow_mode = S_DIN_to_AES,
 		.flow_mode = S_DIN_to_AES,
 	},
 	},
-#endif /*SSI_CC_HAS_AES_ESSIV*/
-#if SSI_CC_HAS_AES_BITLOCKER
 	{
 	{
 		.name = "bitlocker(aes)",
 		.name = "bitlocker(aes)",
 		.driver_name = "bitlocker-aes-dx",
 		.driver_name = "bitlocker-aes-dx",
 		.blocksize = AES_BLOCK_SIZE,
 		.blocksize = AES_BLOCK_SIZE,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.template_ablkcipher = {
 		.template_ablkcipher = {
-			.setkey = ssi_ablkcipher_setkey,
-			.encrypt = ssi_ablkcipher_encrypt,
-			.decrypt = ssi_ablkcipher_decrypt,
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
 			.min_keysize = AES_MIN_KEY_SIZE * 2,
 			.min_keysize = AES_MIN_KEY_SIZE * 2,
 			.max_keysize = AES_MAX_KEY_SIZE * 2,
 			.max_keysize = AES_MAX_KEY_SIZE * 2,
 			.ivsize = AES_BLOCK_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
@@ -1039,9 +883,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
 		.blocksize = AES_BLOCK_SIZE,
 		.blocksize = AES_BLOCK_SIZE,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_512,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_512,
 		.template_ablkcipher = {
 		.template_ablkcipher = {
-			.setkey = ssi_ablkcipher_setkey,
-			.encrypt = ssi_ablkcipher_encrypt,
-			.decrypt = ssi_ablkcipher_decrypt,
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
 			.min_keysize = AES_MIN_KEY_SIZE * 2,
 			.min_keysize = AES_MIN_KEY_SIZE * 2,
 			.max_keysize = AES_MAX_KEY_SIZE * 2,
 			.max_keysize = AES_MAX_KEY_SIZE * 2,
 			.ivsize = AES_BLOCK_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
@@ -1055,9 +899,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
 		.blocksize = AES_BLOCK_SIZE,
 		.blocksize = AES_BLOCK_SIZE,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_4096,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_4096,
 		.template_ablkcipher = {
 		.template_ablkcipher = {
-			.setkey = ssi_ablkcipher_setkey,
-			.encrypt = ssi_ablkcipher_encrypt,
-			.decrypt = ssi_ablkcipher_decrypt,
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
 			.min_keysize = AES_MIN_KEY_SIZE * 2,
 			.min_keysize = AES_MIN_KEY_SIZE * 2,
 			.max_keysize = AES_MAX_KEY_SIZE * 2,
 			.max_keysize = AES_MAX_KEY_SIZE * 2,
 			.ivsize = AES_BLOCK_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
@@ -1065,16 +909,15 @@ static struct ssi_alg_template blkcipher_algs[] = {
 		.cipher_mode = DRV_CIPHER_BITLOCKER,
 		.cipher_mode = DRV_CIPHER_BITLOCKER,
 		.flow_mode = S_DIN_to_AES,
 		.flow_mode = S_DIN_to_AES,
 	},
 	},
-#endif /*SSI_CC_HAS_AES_BITLOCKER*/
 	{
 	{
 		.name = "ecb(aes)",
 		.name = "ecb(aes)",
 		.driver_name = "ecb-aes-dx",
 		.driver_name = "ecb-aes-dx",
 		.blocksize = AES_BLOCK_SIZE,
 		.blocksize = AES_BLOCK_SIZE,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.template_ablkcipher = {
 		.template_ablkcipher = {
-			.setkey = ssi_ablkcipher_setkey,
-			.encrypt = ssi_ablkcipher_encrypt,
-			.decrypt = ssi_ablkcipher_decrypt,
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
 			.min_keysize = AES_MIN_KEY_SIZE,
 			.min_keysize = AES_MIN_KEY_SIZE,
 			.max_keysize = AES_MAX_KEY_SIZE,
 			.max_keysize = AES_MAX_KEY_SIZE,
 			.ivsize = 0,
 			.ivsize = 0,
@@ -1088,9 +931,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
 		.blocksize = AES_BLOCK_SIZE,
 		.blocksize = AES_BLOCK_SIZE,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.template_ablkcipher = {
 		.template_ablkcipher = {
-			.setkey = ssi_ablkcipher_setkey,
-			.encrypt = ssi_ablkcipher_encrypt,
-			.decrypt = ssi_ablkcipher_decrypt,
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
 			.min_keysize = AES_MIN_KEY_SIZE,
 			.min_keysize = AES_MIN_KEY_SIZE,
 			.max_keysize = AES_MAX_KEY_SIZE,
 			.max_keysize = AES_MAX_KEY_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
@@ -1104,9 +947,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
 		.blocksize = AES_BLOCK_SIZE,
 		.blocksize = AES_BLOCK_SIZE,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.template_ablkcipher = {
 		.template_ablkcipher = {
-			.setkey = ssi_ablkcipher_setkey,
-			.encrypt = ssi_ablkcipher_encrypt,
-			.decrypt = ssi_ablkcipher_decrypt,
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
 			.min_keysize = AES_MIN_KEY_SIZE,
 			.min_keysize = AES_MIN_KEY_SIZE,
 			.max_keysize = AES_MAX_KEY_SIZE,
 			.max_keysize = AES_MAX_KEY_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
@@ -1114,16 +957,15 @@ static struct ssi_alg_template blkcipher_algs[] = {
 		.cipher_mode = DRV_CIPHER_OFB,
 		.cipher_mode = DRV_CIPHER_OFB,
 		.flow_mode = S_DIN_to_AES,
 		.flow_mode = S_DIN_to_AES,
 	},
 	},
-#if SSI_CC_HAS_AES_CTS
 	{
 	{
 		.name = "cts1(cbc(aes))",
 		.name = "cts1(cbc(aes))",
 		.driver_name = "cts1-cbc-aes-dx",
 		.driver_name = "cts1-cbc-aes-dx",
 		.blocksize = AES_BLOCK_SIZE,
 		.blocksize = AES_BLOCK_SIZE,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.template_ablkcipher = {
 		.template_ablkcipher = {
-			.setkey = ssi_ablkcipher_setkey,
-			.encrypt = ssi_ablkcipher_encrypt,
-			.decrypt = ssi_ablkcipher_decrypt,
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
 			.min_keysize = AES_MIN_KEY_SIZE,
 			.min_keysize = AES_MIN_KEY_SIZE,
 			.max_keysize = AES_MAX_KEY_SIZE,
 			.max_keysize = AES_MAX_KEY_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
@@ -1131,16 +973,15 @@ static struct ssi_alg_template blkcipher_algs[] = {
 		.cipher_mode = DRV_CIPHER_CBC_CTS,
 		.cipher_mode = DRV_CIPHER_CBC_CTS,
 		.flow_mode = S_DIN_to_AES,
 		.flow_mode = S_DIN_to_AES,
 	},
 	},
-#endif
 	{
 	{
 		.name = "ctr(aes)",
 		.name = "ctr(aes)",
 		.driver_name = "ctr-aes-dx",
 		.driver_name = "ctr-aes-dx",
 		.blocksize = 1,
 		.blocksize = 1,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.template_ablkcipher = {
 		.template_ablkcipher = {
-			.setkey = ssi_ablkcipher_setkey,
-			.encrypt = ssi_ablkcipher_encrypt,
-			.decrypt = ssi_ablkcipher_decrypt,
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
 			.min_keysize = AES_MIN_KEY_SIZE,
 			.min_keysize = AES_MIN_KEY_SIZE,
 			.max_keysize = AES_MAX_KEY_SIZE,
 			.max_keysize = AES_MAX_KEY_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
@@ -1154,9 +995,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
 		.blocksize = DES3_EDE_BLOCK_SIZE,
 		.blocksize = DES3_EDE_BLOCK_SIZE,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.template_ablkcipher = {
 		.template_ablkcipher = {
-			.setkey = ssi_ablkcipher_setkey,
-			.encrypt = ssi_ablkcipher_encrypt,
-			.decrypt = ssi_ablkcipher_decrypt,
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
 			.min_keysize = DES3_EDE_KEY_SIZE,
 			.min_keysize = DES3_EDE_KEY_SIZE,
 			.max_keysize = DES3_EDE_KEY_SIZE,
 			.max_keysize = DES3_EDE_KEY_SIZE,
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -1170,9 +1011,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
 		.blocksize = DES3_EDE_BLOCK_SIZE,
 		.blocksize = DES3_EDE_BLOCK_SIZE,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.template_ablkcipher = {
 		.template_ablkcipher = {
-			.setkey = ssi_ablkcipher_setkey,
-			.encrypt = ssi_ablkcipher_encrypt,
-			.decrypt = ssi_ablkcipher_decrypt,
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
 			.min_keysize = DES3_EDE_KEY_SIZE,
 			.min_keysize = DES3_EDE_KEY_SIZE,
 			.max_keysize = DES3_EDE_KEY_SIZE,
 			.max_keysize = DES3_EDE_KEY_SIZE,
 			.ivsize = 0,
 			.ivsize = 0,
@@ -1186,9 +1027,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
 		.blocksize = DES_BLOCK_SIZE,
 		.blocksize = DES_BLOCK_SIZE,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.template_ablkcipher = {
 		.template_ablkcipher = {
-			.setkey = ssi_ablkcipher_setkey,
-			.encrypt = ssi_ablkcipher_encrypt,
-			.decrypt = ssi_ablkcipher_decrypt,
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
 			.min_keysize = DES_KEY_SIZE,
 			.min_keysize = DES_KEY_SIZE,
 			.max_keysize = DES_KEY_SIZE,
 			.max_keysize = DES_KEY_SIZE,
 			.ivsize = DES_BLOCK_SIZE,
 			.ivsize = DES_BLOCK_SIZE,
@@ -1202,9 +1043,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
 		.blocksize = DES_BLOCK_SIZE,
 		.blocksize = DES_BLOCK_SIZE,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.template_ablkcipher = {
 		.template_ablkcipher = {
-			.setkey = ssi_ablkcipher_setkey,
-			.encrypt = ssi_ablkcipher_encrypt,
-			.decrypt = ssi_ablkcipher_decrypt,
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
 			.min_keysize = DES_KEY_SIZE,
 			.min_keysize = DES_KEY_SIZE,
 			.max_keysize = DES_KEY_SIZE,
 			.max_keysize = DES_KEY_SIZE,
 			.ivsize = 0,
 			.ivsize = 0,
@@ -1212,47 +1053,13 @@ static struct ssi_alg_template blkcipher_algs[] = {
 		.cipher_mode = DRV_CIPHER_ECB,
 		.cipher_mode = DRV_CIPHER_ECB,
 		.flow_mode = S_DIN_to_DES,
 		.flow_mode = S_DIN_to_DES,
 	},
 	},
-#if SSI_CC_HAS_MULTI2
-	{
-		.name = "cbc(multi2)",
-		.driver_name = "cbc-multi2-dx",
-		.blocksize = CC_MULTI2_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
-		.template_ablkcipher = {
-			.setkey = ssi_ablkcipher_setkey,
-			.encrypt = ssi_ablkcipher_encrypt,
-			.decrypt = ssi_ablkcipher_decrypt,
-			.min_keysize = CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE + 1,
-			.max_keysize = CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE + 1,
-			.ivsize = CC_MULTI2_IV_SIZE,
-			},
-		.cipher_mode = DRV_MULTI2_CBC,
-		.flow_mode = S_DIN_to_MULTI2,
-	},
-	{
-		.name = "ofb(multi2)",
-		.driver_name = "ofb-multi2-dx",
-		.blocksize = 1,
-		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
-		.template_ablkcipher = {
-			.setkey = ssi_ablkcipher_setkey,
-			.encrypt = ssi_ablkcipher_encrypt,
-			.decrypt = ssi_ablkcipher_encrypt,
-			.min_keysize = CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE + 1,
-			.max_keysize = CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE + 1,
-			.ivsize = CC_MULTI2_IV_SIZE,
-			},
-		.cipher_mode = DRV_MULTI2_OFB,
-		.flow_mode = S_DIN_to_MULTI2,
-	},
-#endif /*SSI_CC_HAS_MULTI2*/
 };
 };
 
 
 static
 static
-struct ssi_crypto_alg *ssi_ablkcipher_create_alg(struct ssi_alg_template
-						 *template, struct device *dev)
+struct cc_crypto_alg *cc_cipher_create_alg(struct cc_alg_template *template,
+					   struct device *dev)
 {
 {
-	struct ssi_crypto_alg *t_alg;
+	struct cc_crypto_alg *t_alg;
 	struct crypto_alg *alg;
 	struct crypto_alg *alg;
 
 
 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
 	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
@@ -1265,13 +1072,13 @@ struct ssi_crypto_alg *ssi_ablkcipher_create_alg(struct ssi_alg_template
 	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
 	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
 		 template->driver_name);
 		 template->driver_name);
 	alg->cra_module = THIS_MODULE;
 	alg->cra_module = THIS_MODULE;
-	alg->cra_priority = SSI_CRA_PRIO;
+	alg->cra_priority = CC_CRA_PRIO;
 	alg->cra_blocksize = template->blocksize;
 	alg->cra_blocksize = template->blocksize;
 	alg->cra_alignmask = 0;
 	alg->cra_alignmask = 0;
-	alg->cra_ctxsize = sizeof(struct ssi_ablkcipher_ctx);
+	alg->cra_ctxsize = sizeof(struct cc_cipher_ctx);
 
 
-	alg->cra_init = ssi_ablkcipher_init;
-	alg->cra_exit = ssi_blkcipher_exit;
+	alg->cra_init = cc_cipher_init;
+	alg->cra_exit = cc_cipher_exit;
 	alg->cra_type = &crypto_ablkcipher_type;
 	alg->cra_type = &crypto_ablkcipher_type;
 	alg->cra_ablkcipher = template->template_ablkcipher;
 	alg->cra_ablkcipher = template->template_ablkcipher;
 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
@@ -1283,11 +1090,11 @@ struct ssi_crypto_alg *ssi_ablkcipher_create_alg(struct ssi_alg_template
 	return t_alg;
 	return t_alg;
 }
 }
 
 
-int ssi_ablkcipher_free(struct ssi_drvdata *drvdata)
+int cc_cipher_free(struct cc_drvdata *drvdata)
 {
 {
-	struct ssi_crypto_alg *t_alg, *n;
-	struct ssi_blkcipher_handle *blkcipher_handle =
-						drvdata->blkcipher_handle;
+	struct cc_crypto_alg *t_alg, *n;
+	struct cc_cipher_handle *blkcipher_handle = drvdata->blkcipher_handle;
+
 	if (blkcipher_handle) {
 	if (blkcipher_handle) {
 		/* Remove registered algs */
 		/* Remove registered algs */
 		list_for_each_entry_safe(t_alg, n,
 		list_for_each_entry_safe(t_alg, n,
@@ -1303,10 +1110,10 @@ int ssi_ablkcipher_free(struct ssi_drvdata *drvdata)
 	return 0;
 	return 0;
 }
 }
 
 
-int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
+int cc_cipher_alloc(struct cc_drvdata *drvdata)
 {
 {
-	struct ssi_blkcipher_handle *ablkcipher_handle;
-	struct ssi_crypto_alg *t_alg;
+	struct cc_cipher_handle *ablkcipher_handle;
+	struct cc_crypto_alg *t_alg;
 	struct device *dev = drvdata_to_dev(drvdata);
 	struct device *dev = drvdata_to_dev(drvdata);
 	int rc = -ENOMEM;
 	int rc = -ENOMEM;
 	int alg;
 	int alg;
@@ -1323,7 +1130,7 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
 		ARRAY_SIZE(blkcipher_algs));
 		ARRAY_SIZE(blkcipher_algs));
 	for (alg = 0; alg < ARRAY_SIZE(blkcipher_algs); alg++) {
 	for (alg = 0; alg < ARRAY_SIZE(blkcipher_algs); alg++) {
 		dev_dbg(dev, "creating %s\n", blkcipher_algs[alg].driver_name);
 		dev_dbg(dev, "creating %s\n", blkcipher_algs[alg].driver_name);
-		t_alg = ssi_ablkcipher_create_alg(&blkcipher_algs[alg], dev);
+		t_alg = cc_cipher_create_alg(&blkcipher_algs[alg], dev);
 		if (IS_ERR(t_alg)) {
 		if (IS_ERR(t_alg)) {
 			rc = PTR_ERR(t_alg);
 			rc = PTR_ERR(t_alg);
 			dev_err(dev, "%s alg allocation failed\n",
 			dev_err(dev, "%s alg allocation failed\n",
@@ -1337,7 +1144,7 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
 		rc = crypto_register_alg(&t_alg->crypto_alg);
 		rc = crypto_register_alg(&t_alg->crypto_alg);
 		dev_dbg(dev, "%s alg registration rc = %x\n",
 		dev_dbg(dev, "%s alg registration rc = %x\n",
 			t_alg->crypto_alg.cra_driver_name, rc);
 			t_alg->crypto_alg.cra_driver_name, rc);
-		if (unlikely(rc != 0)) {
+		if (rc) {
 			dev_err(dev, "%s alg registration failed\n",
 			dev_err(dev, "%s alg registration failed\n",
 				t_alg->crypto_alg.cra_driver_name);
 				t_alg->crypto_alg.cra_driver_name);
 			kfree(t_alg);
 			kfree(t_alg);
@@ -1352,6 +1159,6 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
 	return 0;
 	return 0;
 
 
 fail0:
 fail0:
-	ssi_ablkcipher_free(drvdata);
+	cc_cipher_free(drvdata);
 	return rc;
 	return rc;
 }
 }

+ 74 - 0
drivers/staging/ccree/cc_cipher.h

@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_cipher.h
+ * ARM CryptoCell Cipher Crypto API
+ */
+
+#ifndef __CC_CIPHER_H__
+#define __CC_CIPHER_H__
+
+#include <linux/kernel.h>
+#include <crypto/algapi.h>
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+
+/* Crypto cipher flags */
+#define CC_CRYPTO_CIPHER_KEY_KFDE0	BIT(0)
+#define CC_CRYPTO_CIPHER_KEY_KFDE1	BIT(1)
+#define CC_CRYPTO_CIPHER_KEY_KFDE2	BIT(2)
+#define CC_CRYPTO_CIPHER_KEY_KFDE3	BIT(3)
+#define CC_CRYPTO_CIPHER_DU_SIZE_512B	BIT(4)
+
+#define CC_CRYPTO_CIPHER_KEY_KFDE_MASK (CC_CRYPTO_CIPHER_KEY_KFDE0 | \
+					CC_CRYPTO_CIPHER_KEY_KFDE1 | \
+					CC_CRYPTO_CIPHER_KEY_KFDE2 | \
+					CC_CRYPTO_CIPHER_KEY_KFDE3)
+
+struct blkcipher_req_ctx {
+	struct async_gen_req_ctx gen_ctx;
+	enum cc_req_dma_buf_type dma_buf_type;
+	u32 in_nents;
+	u32 in_mlli_nents;
+	u32 out_nents;
+	u32 out_mlli_nents;
+	u8 *backup_info; /*store iv for generated IV flow*/
+	u8 *iv;
+	bool is_giv;
+	struct mlli_params mlli_params;
+};
+
+int cc_cipher_alloc(struct cc_drvdata *drvdata);
+
+int cc_cipher_free(struct cc_drvdata *drvdata);
+
+#ifndef CRYPTO_ALG_BULK_MASK
+
+#define CRYPTO_ALG_BULK_DU_512	0x00002000
+#define CRYPTO_ALG_BULK_DU_4096	0x00004000
+#define CRYPTO_ALG_BULK_MASK	(CRYPTO_ALG_BULK_DU_512 |\
+				CRYPTO_ALG_BULK_DU_4096)
+#endif /* CRYPTO_ALG_BULK_MASK */
+
+#ifdef CRYPTO_TFM_REQ_HW_KEY
+
+static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
+{
+	return (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_HW_KEY);
+}
+
+#else
+
+struct arm_hw_key_info {
+	int hw_key1;
+	int hw_key2;
+};
+
+static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
+{
+	return false;
+}
+
+#endif /* CRYPTO_TFM_REQ_HW_KEY */
+
+#endif /*__CC_CIPHER_H__*/

+ 4 - 34
drivers/staging/ccree/cc_crypto_ctx.h

@@ -1,18 +1,5 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
 
 
 #ifndef _CC_CRYPTO_CTX_H_
 #ifndef _CC_CRYPTO_CTX_H_
 #define _CC_CRYPTO_CTX_H_
 #define _CC_CRYPTO_CTX_H_
@@ -21,7 +8,7 @@
 
 
 /* context size */
 /* context size */
 #ifndef CC_CTX_SIZE_LOG2
 #ifndef CC_CTX_SIZE_LOG2
-#if (CC_SUPPORT_SHA > 256)
+#if (CC_DEV_SHA_MAX > 256)
 #define CC_CTX_SIZE_LOG2 8
 #define CC_CTX_SIZE_LOG2 8
 #else
 #else
 #define CC_CTX_SIZE_LOG2 7
 #define CC_CTX_SIZE_LOG2 7
@@ -72,7 +59,7 @@
 #define CC_SHA384_BLOCK_SIZE 128
 #define CC_SHA384_BLOCK_SIZE 128
 #define CC_SHA512_BLOCK_SIZE 128
 #define CC_SHA512_BLOCK_SIZE 128
 
 
-#if (CC_SUPPORT_SHA > 256)
+#if (CC_DEV_SHA_MAX > 256)
 #define CC_DIGEST_SIZE_MAX CC_SHA512_DIGEST_SIZE
 #define CC_DIGEST_SIZE_MAX CC_SHA512_DIGEST_SIZE
 #define CC_HASH_BLOCK_SIZE_MAX CC_SHA512_BLOCK_SIZE /*1024b*/
 #define CC_HASH_BLOCK_SIZE_MAX CC_SHA512_BLOCK_SIZE /*1024b*/
 #else /* Only up to SHA256 */
 #else /* Only up to SHA256 */
@@ -82,15 +69,6 @@
 
 
 #define CC_HMAC_BLOCK_SIZE_MAX CC_HASH_BLOCK_SIZE_MAX
 #define CC_HMAC_BLOCK_SIZE_MAX CC_HASH_BLOCK_SIZE_MAX
 
 
-#define CC_MULTI2_SYSTEM_KEY_SIZE		32
-#define CC_MULTI2_DATA_KEY_SIZE		8
-#define CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE \
-		(CC_MULTI2_SYSTEM_KEY_SIZE + CC_MULTI2_DATA_KEY_SIZE)
-#define	CC_MULTI2_BLOCK_SIZE					8
-#define	CC_MULTI2_IV_SIZE					8
-#define	CC_MULTI2_MIN_NUM_ROUNDS				8
-#define	CC_MULTI2_MAX_NUM_ROUNDS				128
-
 #define CC_DRV_ALG_MAX_BLOCK_SIZE CC_HASH_BLOCK_SIZE_MAX
 #define CC_DRV_ALG_MAX_BLOCK_SIZE CC_HASH_BLOCK_SIZE_MAX
 
 
 enum drv_engine_type {
 enum drv_engine_type {
@@ -168,14 +146,6 @@ enum drv_hash_hw_mode {
 	DRV_HASH_HW_RESERVE32B = S32_MAX
 	DRV_HASH_HW_RESERVE32B = S32_MAX
 };
 };
 
 
-enum drv_multi2_mode {
-	DRV_MULTI2_NULL = -1,
-	DRV_MULTI2_ECB = 0,
-	DRV_MULTI2_CBC = 1,
-	DRV_MULTI2_OFB = 2,
-	DRV_MULTI2_RESERVE32B = S32_MAX
-};
-
 /* drv_crypto_key_type[1:0] is mapped to cipher_do[1:0] */
 /* drv_crypto_key_type[1:0] is mapped to cipher_do[1:0] */
 /* drv_crypto_key_type[2] is mapped to cipher_config2 */
 /* drv_crypto_key_type[2] is mapped to cipher_config2 */
 enum drv_crypto_key_type {
 enum drv_crypto_key_type {

+ 101 - 0
drivers/staging/ccree/cc_debugfs.c

@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/stringify.h>
+#include "cc_driver.h"
+#include "cc_crypto_ctx.h"
+#include "cc_debugfs.h"
+
+struct cc_debugfs_ctx {
+	struct dentry *dir;
+};
+
+#define CC_DEBUG_REG(_X) {	\
+	.name = __stringify(_X),\
+	.offset = CC_REG(_X)	\
+	}
+
+/*
+ * This is a global var for the dentry of the
+ * debugfs ccree/ dir. It is not tied down to
+ * a specific instance of ccree, hence it is
+ * global.
+ */
+static struct dentry *cc_debugfs_dir;
+
+static struct debugfs_reg32 debug_regs[] = {
+	CC_DEBUG_REG(HOST_SIGNATURE),
+	CC_DEBUG_REG(HOST_IRR),
+	CC_DEBUG_REG(HOST_POWER_DOWN_EN),
+	CC_DEBUG_REG(AXIM_MON_ERR),
+	CC_DEBUG_REG(DSCRPTR_QUEUE_CONTENT),
+	CC_DEBUG_REG(HOST_IMR),
+	CC_DEBUG_REG(AXIM_CFG),
+	CC_DEBUG_REG(AXIM_CACHE_PARAMS),
+	CC_DEBUG_REG(HOST_VERSION),
+	CC_DEBUG_REG(GPR_HOST),
+	CC_DEBUG_REG(AXIM_MON_COMP),
+};
+
+int __init cc_debugfs_global_init(void)
+{
+	cc_debugfs_dir = debugfs_create_dir("ccree", NULL);
+
+	return !cc_debugfs_dir;
+}
+
+void __exit cc_debugfs_global_fini(void)
+{
+	debugfs_remove(cc_debugfs_dir);
+}
+
+int cc_debugfs_init(struct cc_drvdata *drvdata)
+{
+	struct device *dev = drvdata_to_dev(drvdata);
+	struct cc_debugfs_ctx *ctx;
+	struct debugfs_regset32 *regset;
+	struct dentry *file;
+
+	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
+	if (!regset)
+		return -ENOMEM;
+
+	regset->regs = debug_regs;
+	regset->nregs = ARRAY_SIZE(debug_regs);
+	regset->base = drvdata->cc_base;
+
+	ctx->dir = debugfs_create_dir(drvdata->plat_dev->name, cc_debugfs_dir);
+	if (!ctx->dir)
+		return -ENFILE;
+
+	file = debugfs_create_regset32("regs", 0400, ctx->dir, regset);
+	if (!file) {
+		debugfs_remove(ctx->dir);
+		return -ENFILE;
+	}
+
+	file = debugfs_create_bool("coherent", 0400, ctx->dir,
+				   &drvdata->coherent);
+
+	if (!file) {
+		debugfs_remove_recursive(ctx->dir);
+		return -ENFILE;
+	}
+
+	drvdata->debugfs = ctx;
+
+	return 0;
+}
+
+void cc_debugfs_fini(struct cc_drvdata *drvdata)
+{
+	struct cc_debugfs_ctx *ctx = (struct cc_debugfs_ctx *)drvdata->debugfs;
+
+	debugfs_remove_recursive(ctx->dir);
+}

+ 32 - 0
drivers/staging/ccree/cc_debugfs.h

@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_DEBUGFS_H__
+#define __CC_DEBUGFS_H__
+
+#ifdef CONFIG_DEBUG_FS
+int cc_debugfs_global_init(void);
+void cc_debugfs_global_fini(void);
+
+int cc_debugfs_init(struct cc_drvdata *drvdata);
+void cc_debugfs_fini(struct cc_drvdata *drvdata);
+
+#else
+
+static inline int cc_debugfs_global_init(void)
+{
+	return 0;
+}
+
+static inline void cc_debugfs_global_fini(void) {}
+
+static inline int cc_debugfs_init(struct cc_drvdata *drvdata)
+{
+	return 0;
+}
+
+static inline void cc_debugfs_fini(struct cc_drvdata *drvdata) {}
+
+#endif
+
+#endif /*__CC_SYSFS_H__*/

+ 141 - 205
drivers/staging/ccree/ssi_driver.c → drivers/staging/ccree/cc_driver.c

@@ -1,96 +1,56 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
 
 
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/module.h>
 
 
 #include <linux/crypto.h>
 #include <linux/crypto.h>
-#include <crypto/algapi.h>
-#include <crypto/aes.h>
-#include <crypto/sha.h>
-#include <crypto/aead.h>
-#include <crypto/authenc.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/skcipher.h>
-
-#include <linux/init.h>
 #include <linux/moduleparam.h>
 #include <linux/moduleparam.h>
 #include <linux/types.h>
 #include <linux/types.h>
-#include <linux/random.h>
-#include <linux/ioport.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
-#include <linux/fcntl.h>
-#include <linux/poll.h>
-#include <linux/proc_fs.h>
-#include <linux/mutex.h>
-#include <linux/sysctl.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/spinlock.h>
-#include <linux/pm.h>
-
-/* cache.h required for L1_CACHE_ALIGN() and cache_line_size() */
-#include <linux/cache.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-#include <linux/pagemap.h>
-#include <linux/sched.h>
-#include <linux/random.h>
 #include <linux/of.h>
 #include <linux/of.h>
 #include <linux/clk.h>
 #include <linux/clk.h>
 #include <linux/of_address.h>
 #include <linux/of_address.h>
 
 
-#include "ssi_config.h"
-#include "ssi_driver.h"
-#include "ssi_request_mgr.h"
-#include "ssi_buffer_mgr.h"
-#include "ssi_sysfs.h"
-#include "ssi_cipher.h"
-#include "ssi_aead.h"
-#include "ssi_hash.h"
-#include "ssi_ivgen.h"
-#include "ssi_sram_mgr.h"
-#include "ssi_pm.h"
-#include "ssi_fips.h"
-
-#ifdef DX_DUMP_BYTES
-void dump_byte_array(const char *name, const u8 *buf, size_t len)
+#include "cc_driver.h"
+#include "cc_request_mgr.h"
+#include "cc_buffer_mgr.h"
+#include "cc_debugfs.h"
+#include "cc_cipher.h"
+#include "cc_aead.h"
+#include "cc_hash.h"
+#include "cc_ivgen.h"
+#include "cc_sram_mgr.h"
+#include "cc_pm.h"
+#include "cc_fips.h"
+
+bool cc_dump_desc;
+module_param_named(dump_desc, cc_dump_desc, bool, 0600);
+MODULE_PARM_DESC(cc_dump_desc, "Dump descriptors to kernel log as debugging aid");
+
+bool cc_dump_bytes;
+module_param_named(dump_bytes, cc_dump_bytes, bool, 0600);
+MODULE_PARM_DESC(cc_dump_bytes, "Dump buffers to kernel log as debugging aid");
+
+void __dump_byte_array(const char *name, const u8 *buf, size_t len)
 {
 {
-	char prefix[NAME_LEN];
+	char prefix[64];
 
 
 	if (!buf)
 	if (!buf)
 		return;
 		return;
 
 
-	snprintf(prefix, sizeof(prefix), "%s[%lu]: ", name, len);
+	snprintf(prefix, sizeof(prefix), "%s[%zu]: ", name, len);
 
 
-	print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_ADDRESS, 16, 1, len,
-		       false);
+	print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_ADDRESS, 16, 1, buf,
+		       len, false);
 }
 }
-#endif
 
 
 static irqreturn_t cc_isr(int irq, void *dev_id)
 static irqreturn_t cc_isr(int irq, void *dev_id)
 {
 {
-	struct ssi_drvdata *drvdata = (struct ssi_drvdata *)dev_id;
+	struct cc_drvdata *drvdata = (struct cc_drvdata *)dev_id;
 	struct device *dev = drvdata_to_dev(drvdata);
 	struct device *dev = drvdata_to_dev(drvdata);
 	u32 irr;
 	u32 irr;
 	u32 imr;
 	u32 imr;
@@ -100,7 +60,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
 	/* read the interrupt status */
 	/* read the interrupt status */
 	irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
 	irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
 	dev_dbg(dev, "Got IRR=0x%08X\n", irr);
 	dev_dbg(dev, "Got IRR=0x%08X\n", irr);
-	if (unlikely(irr == 0)) { /* Probably shared interrupt line */
+	if (irr == 0) { /* Probably shared interrupt line */
 		dev_err(dev, "Got interrupt with empty IRR\n");
 		dev_err(dev, "Got interrupt with empty IRR\n");
 		return IRQ_NONE;
 		return IRQ_NONE;
 	}
 	}
@@ -111,23 +71,27 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
 
 
 	drvdata->irq = irr;
 	drvdata->irq = irr;
 	/* Completion interrupt - most probable */
 	/* Completion interrupt - most probable */
-	if (likely((irr & SSI_COMP_IRQ_MASK) != 0)) {
-		/* Mask AXI completion interrupt - will be unmasked in Deferred service handler */
-		cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | SSI_COMP_IRQ_MASK);
-		irr &= ~SSI_COMP_IRQ_MASK;
+	if (irr & CC_COMP_IRQ_MASK) {
+		/* Mask AXI completion interrupt - will be unmasked in
+		 * Deferred service handler
+		 */
+		cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_COMP_IRQ_MASK);
+		irr &= ~CC_COMP_IRQ_MASK;
 		complete_request(drvdata);
 		complete_request(drvdata);
 	}
 	}
-#ifdef CC_SUPPORT_FIPS
+#ifdef CONFIG_CRYPTO_FIPS
 	/* TEE FIPS interrupt */
 	/* TEE FIPS interrupt */
-	if (likely((irr & SSI_GPR0_IRQ_MASK) != 0)) {
-		/* Mask interrupt - will be unmasked in Deferred service handler */
-		cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | SSI_GPR0_IRQ_MASK);
-		irr &= ~SSI_GPR0_IRQ_MASK;
+	if (irr & CC_GPR0_IRQ_MASK) {
+		/* Mask interrupt - will be unmasked in Deferred service
+		 * handler
+		 */
+		cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_GPR0_IRQ_MASK);
+		irr &= ~CC_GPR0_IRQ_MASK;
 		fips_handler(drvdata);
 		fips_handler(drvdata);
 	}
 	}
 #endif
 #endif
 	/* AXI error interrupt */
 	/* AXI error interrupt */
-	if (unlikely((irr & SSI_AXI_ERR_IRQ_MASK) != 0)) {
+	if (irr & CC_AXI_ERR_IRQ_MASK) {
 		u32 axi_err;
 		u32 axi_err;
 
 
 		/* Read the AXI error ID */
 		/* Read the AXI error ID */
@@ -135,10 +99,10 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
 		dev_dbg(dev, "AXI completion error: axim_mon_err=0x%08X\n",
 		dev_dbg(dev, "AXI completion error: axim_mon_err=0x%08X\n",
 			axi_err);
 			axi_err);
 
 
-		irr &= ~SSI_AXI_ERR_IRQ_MASK;
+		irr &= ~CC_AXI_ERR_IRQ_MASK;
 	}
 	}
 
 
-	if (unlikely(irr != 0)) {
+	if (irr) {
 		dev_dbg(dev, "IRR includes unknown cause bits (0x%08X)\n",
 		dev_dbg(dev, "IRR includes unknown cause bits (0x%08X)\n",
 			irr);
 			irr);
 		/* Just warning */
 		/* Just warning */
@@ -147,14 +111,14 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
 }
 }
 
 
-int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe)
+int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
 {
 {
 	unsigned int val, cache_params;
 	unsigned int val, cache_params;
 	struct device *dev = drvdata_to_dev(drvdata);
 	struct device *dev = drvdata_to_dev(drvdata);
 
 
 	/* Unmask all AXI interrupt sources AXI_CFG1 register */
 	/* Unmask all AXI interrupt sources AXI_CFG1 register */
 	val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
 	val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
-	cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~SSI_AXI_IRQ_MASK);
+	cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
 	dev_dbg(dev, "AXIM_CFG=0x%08X\n",
 	dev_dbg(dev, "AXIM_CFG=0x%08X\n",
 		cc_ioread(drvdata, CC_REG(AXIM_CFG)));
 		cc_ioread(drvdata, CC_REG(AXIM_CFG)));
 
 
@@ -164,21 +128,10 @@ int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe)
 	cc_iowrite(drvdata, CC_REG(HOST_ICR), val);
 	cc_iowrite(drvdata, CC_REG(HOST_ICR), val);
 
 
 	/* Unmask relevant interrupt cause */
 	/* Unmask relevant interrupt cause */
-	val = (unsigned int)(~(SSI_COMP_IRQ_MASK | SSI_AXI_ERR_IRQ_MASK |
-			       SSI_GPR0_IRQ_MASK));
+	val = (unsigned int)(~(CC_COMP_IRQ_MASK | CC_AXI_ERR_IRQ_MASK |
+			       CC_GPR0_IRQ_MASK));
 	cc_iowrite(drvdata, CC_REG(HOST_IMR), val);
 	cc_iowrite(drvdata, CC_REG(HOST_IMR), val);
 
 
-#ifdef DX_HOST_IRQ_TIMER_INIT_VAL_REG_OFFSET
-#ifdef DX_IRQ_DELAY
-	/* Set CC IRQ delay */
-	cc_iowrite(drvdata, CC_REG(HOST_IRQ_TIMER_INIT_VAL), DX_IRQ_DELAY);
-#endif
-	if (cc_ioread(drvdata, CC_REG(HOST_IRQ_TIMER_INIT_VAL)) > 0) {
-		dev_dbg(dev, "irq_delay=%d CC cycles\n",
-			cc_ioread(drvdata, CC_REG(HOST_IRQ_TIMER_INIT_VAL)));
-	}
-#endif
-
 	cache_params = (drvdata->coherent ? CC_COHERENT_CACHE_PARAMS : 0x0);
 	cache_params = (drvdata->coherent ? CC_COHERENT_CACHE_PARAMS : 0x0);
 
 
 	val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
 	val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
@@ -199,12 +152,11 @@ int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe)
 static int init_cc_resources(struct platform_device *plat_dev)
 static int init_cc_resources(struct platform_device *plat_dev)
 {
 {
 	struct resource *req_mem_cc_regs = NULL;
 	struct resource *req_mem_cc_regs = NULL;
-	void __iomem *cc_base = NULL;
-	struct ssi_drvdata *new_drvdata;
+	struct cc_drvdata *new_drvdata;
 	struct device *dev = &plat_dev->dev;
 	struct device *dev = &plat_dev->dev;
 	struct device_node *np = dev->of_node;
 	struct device_node *np = dev->of_node;
 	u32 signature_val;
 	u32 signature_val;
-	dma_addr_t dma_mask;
+	u64 dma_mask;
 	int rc = 0;
 	int rc = 0;
 
 
 	new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL);
 	new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL);
@@ -222,18 +174,14 @@ static int init_cc_resources(struct platform_device *plat_dev)
 	req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
 	req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
 	/* Map registers space */
 	/* Map registers space */
 	new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
 	new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
-	if (IS_ERR(new_drvdata->cc_base)) {
-		dev_err(dev, "Failed to ioremap registers");
+	if (IS_ERR(new_drvdata->cc_base))
 		return PTR_ERR(new_drvdata->cc_base);
 		return PTR_ERR(new_drvdata->cc_base);
-	}
 
 
 	dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
 	dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
 		req_mem_cc_regs);
 		req_mem_cc_regs);
 	dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
 	dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
 		&req_mem_cc_regs->start, new_drvdata->cc_base);
 		&req_mem_cc_regs->start, new_drvdata->cc_base);
 
 
-	cc_base = new_drvdata->cc_base;
-
 	/* Then IRQ */
 	/* Then IRQ */
 	new_drvdata->irq = platform_get_irq(plat_dev, 0);
 	new_drvdata->irq = platform_get_irq(plat_dev, 0);
 	if (new_drvdata->irq < 0) {
 	if (new_drvdata->irq < 0) {
@@ -250,10 +198,12 @@ static int init_cc_resources(struct platform_device *plat_dev)
 	}
 	}
 	dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
 	dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
 
 
+	init_completion(&new_drvdata->hw_queue_avail);
+
 	if (!plat_dev->dev.dma_mask)
 	if (!plat_dev->dev.dma_mask)
 		plat_dev->dev.dma_mask = &plat_dev->dev.coherent_dma_mask;
 		plat_dev->dev.dma_mask = &plat_dev->dev.coherent_dma_mask;
 
 
-	dma_mask = (dma_addr_t)(DMA_BIT_MASK(DMA_BIT_MASK_LEN));
+	dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
 	while (dma_mask > 0x7fffffffUL) {
 	while (dma_mask > 0x7fffffffUL) {
 		if (dma_supported(&plat_dev->dev, dma_mask)) {
 		if (dma_supported(&plat_dev->dev, dma_mask)) {
 			rc = dma_set_coherent_mask(&plat_dev->dev, dma_mask);
 			rc = dma_set_coherent_mask(&plat_dev->dev, dma_mask);
@@ -264,8 +214,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
 	}
 	}
 
 
 	if (rc) {
 	if (rc) {
-		dev_err(dev, "Failed in dma_set_mask, mask=%par\n",
-			&dma_mask);
+		dev_err(dev, "Failed in dma_set_mask, mask=%par\n", &dma_mask);
 		return rc;
 		return rc;
 	}
 	}
 
 
@@ -277,9 +226,9 @@ static int init_cc_resources(struct platform_device *plat_dev)
 
 
 	/* Verify correct mapping */
 	/* Verify correct mapping */
 	signature_val = cc_ioread(new_drvdata, CC_REG(HOST_SIGNATURE));
 	signature_val = cc_ioread(new_drvdata, CC_REG(HOST_SIGNATURE));
-	if (signature_val != DX_DEV_SIGNATURE) {
+	if (signature_val != CC_DEV_SIGNATURE) {
 		dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
 		dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
-			signature_val, (u32)DX_DEV_SIGNATURE);
+			signature_val, (u32)CC_DEV_SIGNATURE);
 		rc = -EINVAL;
 		rc = -EINVAL;
 		goto post_clk_err;
 		goto post_clk_err;
 	}
 	}
@@ -287,84 +236,82 @@ static int init_cc_resources(struct platform_device *plat_dev)
 
 
 	/* Display HW versions */
 	/* Display HW versions */
 	dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
 	dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
-		 SSI_DEV_NAME_STR,
+		 CC_DEV_NAME_STR,
 		 cc_ioread(new_drvdata, CC_REG(HOST_VERSION)),
 		 cc_ioread(new_drvdata, CC_REG(HOST_VERSION)),
 		 DRV_MODULE_VERSION);
 		 DRV_MODULE_VERSION);
 
 
 	rc = init_cc_regs(new_drvdata, true);
 	rc = init_cc_regs(new_drvdata, true);
-	if (unlikely(rc != 0)) {
+	if (rc) {
 		dev_err(dev, "init_cc_regs failed\n");
 		dev_err(dev, "init_cc_regs failed\n");
 		goto post_clk_err;
 		goto post_clk_err;
 	}
 	}
 
 
-#ifdef ENABLE_CC_SYSFS
-	rc = ssi_sysfs_init(&dev->kobj, new_drvdata);
-	if (unlikely(rc != 0)) {
-		dev_err(dev, "init_stat_db failed\n");
+	rc = cc_debugfs_init(new_drvdata);
+	if (rc) {
+		dev_err(dev, "Failed registering debugfs interface\n");
 		goto post_regs_err;
 		goto post_regs_err;
 	}
 	}
-#endif
 
 
-	rc = ssi_fips_init(new_drvdata);
-	if (unlikely(rc != 0)) {
-		dev_err(dev, "SSI_FIPS_INIT failed 0x%x\n", rc);
-		goto post_sysfs_err;
+	rc = cc_fips_init(new_drvdata);
+	if (rc) {
+		dev_err(dev, "CC_FIPS_INIT failed 0x%x\n", rc);
+		goto post_debugfs_err;
 	}
 	}
-	rc = ssi_sram_mgr_init(new_drvdata);
-	if (unlikely(rc != 0)) {
-		dev_err(dev, "ssi_sram_mgr_init failed\n");
+	rc = cc_sram_mgr_init(new_drvdata);
+	if (rc) {
+		dev_err(dev, "cc_sram_mgr_init failed\n");
 		goto post_fips_init_err;
 		goto post_fips_init_err;
 	}
 	}
 
 
 	new_drvdata->mlli_sram_addr =
 	new_drvdata->mlli_sram_addr =
-		ssi_sram_mgr_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
-	if (unlikely(new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR)) {
+		cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
+	if (new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR) {
 		dev_err(dev, "Failed to alloc MLLI Sram buffer\n");
 		dev_err(dev, "Failed to alloc MLLI Sram buffer\n");
 		rc = -ENOMEM;
 		rc = -ENOMEM;
 		goto post_sram_mgr_err;
 		goto post_sram_mgr_err;
 	}
 	}
 
 
-	rc = request_mgr_init(new_drvdata);
-	if (unlikely(rc != 0)) {
-		dev_err(dev, "request_mgr_init failed\n");
+	rc = cc_req_mgr_init(new_drvdata);
+	if (rc) {
+		dev_err(dev, "cc_req_mgr_init failed\n");
 		goto post_sram_mgr_err;
 		goto post_sram_mgr_err;
 	}
 	}
 
 
-	rc = ssi_buffer_mgr_init(new_drvdata);
-	if (unlikely(rc != 0)) {
+	rc = cc_buffer_mgr_init(new_drvdata);
+	if (rc) {
 		dev_err(dev, "buffer_mgr_init failed\n");
 		dev_err(dev, "buffer_mgr_init failed\n");
 		goto post_req_mgr_err;
 		goto post_req_mgr_err;
 	}
 	}
 
 
-	rc = ssi_power_mgr_init(new_drvdata);
-	if (unlikely(rc != 0)) {
+	rc = cc_pm_init(new_drvdata);
+	if (rc) {
 		dev_err(dev, "ssi_power_mgr_init failed\n");
 		dev_err(dev, "ssi_power_mgr_init failed\n");
 		goto post_buf_mgr_err;
 		goto post_buf_mgr_err;
 	}
 	}
 
 
-	rc = ssi_ivgen_init(new_drvdata);
-	if (unlikely(rc != 0)) {
-		dev_err(dev, "ssi_ivgen_init failed\n");
+	rc = cc_ivgen_init(new_drvdata);
+	if (rc) {
+		dev_err(dev, "cc_ivgen_init failed\n");
 		goto post_power_mgr_err;
 		goto post_power_mgr_err;
 	}
 	}
 
 
 	/* Allocate crypto algs */
 	/* Allocate crypto algs */
-	rc = ssi_ablkcipher_alloc(new_drvdata);
-	if (unlikely(rc != 0)) {
-		dev_err(dev, "ssi_ablkcipher_alloc failed\n");
+	rc = cc_cipher_alloc(new_drvdata);
+	if (rc) {
+		dev_err(dev, "cc_cipher_alloc failed\n");
 		goto post_ivgen_err;
 		goto post_ivgen_err;
 	}
 	}
 
 
 	/* hash must be allocated before aead since hash exports APIs */
 	/* hash must be allocated before aead since hash exports APIs */
-	rc = ssi_hash_alloc(new_drvdata);
-	if (unlikely(rc != 0)) {
-		dev_err(dev, "ssi_hash_alloc failed\n");
+	rc = cc_hash_alloc(new_drvdata);
+	if (rc) {
+		dev_err(dev, "cc_hash_alloc failed\n");
 		goto post_cipher_err;
 		goto post_cipher_err;
 	}
 	}
 
 
-	rc = ssi_aead_alloc(new_drvdata);
-	if (unlikely(rc != 0)) {
-		dev_err(dev, "ssi_aead_alloc failed\n");
+	rc = cc_aead_alloc(new_drvdata);
+	if (rc) {
+		dev_err(dev, "cc_aead_alloc failed\n");
 		goto post_hash_err;
 		goto post_hash_err;
 	}
 	}
 
 
@@ -377,25 +324,23 @@ static int init_cc_resources(struct platform_device *plat_dev)
 	return 0;
 	return 0;
 
 
 post_hash_err:
 post_hash_err:
-	ssi_hash_free(new_drvdata);
+	cc_hash_free(new_drvdata);
 post_cipher_err:
 post_cipher_err:
-	ssi_ablkcipher_free(new_drvdata);
+	cc_cipher_free(new_drvdata);
 post_ivgen_err:
 post_ivgen_err:
-	ssi_ivgen_fini(new_drvdata);
+	cc_ivgen_fini(new_drvdata);
 post_power_mgr_err:
 post_power_mgr_err:
-	ssi_power_mgr_fini(new_drvdata);
+	cc_pm_fini(new_drvdata);
 post_buf_mgr_err:
 post_buf_mgr_err:
-	 ssi_buffer_mgr_fini(new_drvdata);
+	 cc_buffer_mgr_fini(new_drvdata);
 post_req_mgr_err:
 post_req_mgr_err:
-	request_mgr_fini(new_drvdata);
+	cc_req_mgr_fini(new_drvdata);
 post_sram_mgr_err:
 post_sram_mgr_err:
-	ssi_sram_mgr_fini(new_drvdata);
+	cc_sram_mgr_fini(new_drvdata);
 post_fips_init_err:
 post_fips_init_err:
-	ssi_fips_fini(new_drvdata);
-post_sysfs_err:
-#ifdef ENABLE_CC_SYSFS
-	ssi_sysfs_fini();
-#endif
+	cc_fips_fini(new_drvdata);
+post_debugfs_err:
+	cc_debugfs_fini(new_drvdata);
 post_regs_err:
 post_regs_err:
 	fini_cc_regs(new_drvdata);
 	fini_cc_regs(new_drvdata);
 post_clk_err:
 post_clk_err:
@@ -403,7 +348,7 @@ post_clk_err:
 	return rc;
 	return rc;
 }
 }
 
 
-void fini_cc_regs(struct ssi_drvdata *drvdata)
+void fini_cc_regs(struct cc_drvdata *drvdata)
 {
 {
 	/* Mask all interrupts */
 	/* Mask all interrupts */
 	cc_iowrite(drvdata, CC_REG(HOST_IMR), 0xFFFFFFFF);
 	cc_iowrite(drvdata, CC_REG(HOST_IMR), 0xFFFFFFFF);
@@ -411,26 +356,24 @@ void fini_cc_regs(struct ssi_drvdata *drvdata)
 
 
 static void cleanup_cc_resources(struct platform_device *plat_dev)
 static void cleanup_cc_resources(struct platform_device *plat_dev)
 {
 {
-	struct ssi_drvdata *drvdata =
-		(struct ssi_drvdata *)platform_get_drvdata(plat_dev);
-
-	ssi_aead_free(drvdata);
-	ssi_hash_free(drvdata);
-	ssi_ablkcipher_free(drvdata);
-	ssi_ivgen_fini(drvdata);
-	ssi_power_mgr_fini(drvdata);
-	ssi_buffer_mgr_fini(drvdata);
-	request_mgr_fini(drvdata);
-	ssi_sram_mgr_fini(drvdata);
-	ssi_fips_fini(drvdata);
-#ifdef ENABLE_CC_SYSFS
-	ssi_sysfs_fini();
-#endif
+	struct cc_drvdata *drvdata =
+		(struct cc_drvdata *)platform_get_drvdata(plat_dev);
+
+	cc_aead_free(drvdata);
+	cc_hash_free(drvdata);
+	cc_cipher_free(drvdata);
+	cc_ivgen_fini(drvdata);
+	cc_pm_fini(drvdata);
+	cc_buffer_mgr_fini(drvdata);
+	cc_req_mgr_fini(drvdata);
+	cc_sram_mgr_fini(drvdata);
+	cc_fips_fini(drvdata);
+	cc_debugfs_fini(drvdata);
 	fini_cc_regs(drvdata);
 	fini_cc_regs(drvdata);
 	cc_clk_off(drvdata);
 	cc_clk_off(drvdata);
 }
 }
 
 
-int cc_clk_on(struct ssi_drvdata *drvdata)
+int cc_clk_on(struct cc_drvdata *drvdata)
 {
 {
 	struct clk *clk = drvdata->clk;
 	struct clk *clk = drvdata->clk;
 	int rc;
 	int rc;
@@ -446,7 +389,7 @@ int cc_clk_on(struct ssi_drvdata *drvdata)
 	return 0;
 	return 0;
 }
 }
 
 
-void cc_clk_off(struct ssi_drvdata *drvdata)
+void cc_clk_off(struct cc_drvdata *drvdata)
 {
 {
 	struct clk *clk = drvdata->clk;
 	struct clk *clk = drvdata->clk;
 
 
@@ -461,23 +404,10 @@ static int cc7x_probe(struct platform_device *plat_dev)
 {
 {
 	int rc;
 	int rc;
 	struct device *dev = &plat_dev->dev;
 	struct device *dev = &plat_dev->dev;
-#if defined(CONFIG_ARM) && defined(CC_DEBUG)
-	u32 ctr, cacheline_size;
-
-	asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
-	cacheline_size =  4 << ((ctr >> 16) & 0xf);
-	dev_dbg(dev, "CP15(L1_CACHE_BYTES) = %u , Kconfig(L1_CACHE_BYTES) = %u\n",
-		cacheline_size, L1_CACHE_BYTES);
-
-	asm volatile("mrc p15, 0, %0, c0, c0, 0" : "=r" (ctr));
-	dev_dbg(dev, "Main ID register (MIDR): Implementer 0x%02X, Arch 0x%01X, Part 0x%03X, Rev r%dp%d\n",
-		(ctr >> 24), (ctr >> 16) & 0xF, (ctr >> 4) & 0xFFF,
-		(ctr >> 20) & 0xF, ctr & 0xF);
-#endif
 
 
 	/* Map registers space */
 	/* Map registers space */
 	rc = init_cc_resources(plat_dev);
 	rc = init_cc_resources(plat_dev);
-	if (rc != 0)
+	if (rc)
 		return rc;
 		return rc;
 
 
 	dev_info(dev, "ARM ccree device initialized\n");
 	dev_info(dev, "ARM ccree device initialized\n");
@@ -498,38 +428,44 @@ static int cc7x_remove(struct platform_device *plat_dev)
 	return 0;
 	return 0;
 }
 }
 
 
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
-static const struct dev_pm_ops arm_cc7x_driver_pm = {
-	SET_RUNTIME_PM_OPS(ssi_power_mgr_runtime_suspend, ssi_power_mgr_runtime_resume, NULL)
-};
-#endif
-
-#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
-#define	DX_DRIVER_RUNTIME_PM	(&arm_cc7x_driver_pm)
-#else
-#define	DX_DRIVER_RUNTIME_PM	NULL
-#endif
-
-#ifdef CONFIG_OF
 static const struct of_device_id arm_cc7x_dev_of_match[] = {
 static const struct of_device_id arm_cc7x_dev_of_match[] = {
 	{.compatible = "arm,cryptocell-712-ree"},
 	{.compatible = "arm,cryptocell-712-ree"},
 	{}
 	{}
 };
 };
 MODULE_DEVICE_TABLE(of, arm_cc7x_dev_of_match);
 MODULE_DEVICE_TABLE(of, arm_cc7x_dev_of_match);
-#endif
 
 
 static struct platform_driver cc7x_driver = {
 static struct platform_driver cc7x_driver = {
 	.driver = {
 	.driver = {
 		   .name = "cc7xree",
 		   .name = "cc7xree",
-#ifdef CONFIG_OF
 		   .of_match_table = arm_cc7x_dev_of_match,
 		   .of_match_table = arm_cc7x_dev_of_match,
+#ifdef CONFIG_PM
+		   .pm = &ccree_pm,
 #endif
 #endif
-		   .pm = DX_DRIVER_RUNTIME_PM,
 	},
 	},
 	.probe = cc7x_probe,
 	.probe = cc7x_probe,
 	.remove = cc7x_remove,
 	.remove = cc7x_remove,
 };
 };
-module_platform_driver(cc7x_driver);
+
+static int __init ccree_init(void)
+{
+	int ret;
+
+	cc_hash_global_init();
+
+	ret = cc_debugfs_global_init();
+	if (ret)
+		return ret;
+
+	return platform_driver_register(&cc7x_driver);
+}
+module_init(ccree_init);
+
+static void __exit ccree_exit(void)
+{
+	platform_driver_unregister(&cc7x_driver);
+	cc_debugfs_global_fini();
+}
+module_exit(ccree_exit);
 
 
 /* Module description */
 /* Module description */
 MODULE_DESCRIPTION("ARM TrustZone CryptoCell REE Driver");
 MODULE_DESCRIPTION("ARM TrustZone CryptoCell REE Driver");

+ 194 - 0
drivers/staging/ccree/cc_driver.h

@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_driver.h
+ * ARM CryptoCell Linux Crypto Driver
+ */
+
+#ifndef __CC_DRIVER_H__
+#define __CC_DRIVER_H__
+
+#ifdef COMP_IN_WQ
+#include <linux/workqueue.h>
+#else
+#include <linux/interrupt.h>
+#endif
+#include <linux/dma-mapping.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/hash.h>
+#include <linux/version.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+/* Registers definitions from shared/hw/ree_include */
+#include "cc_host_regs.h"
+#define CC_DEV_SHA_MAX 512
+#include "cc_crypto_ctx.h"
+#include "cc_hw_queue_defs.h"
+#include "cc_sram_mgr.h"
+
+extern bool cc_dump_desc;
+extern bool cc_dump_bytes;
+
+#define DRV_MODULE_VERSION "3.0"
+
+#define CC_DEV_NAME_STR "cc715ree"
+#define CC_COHERENT_CACHE_PARAMS 0xEEE
+
+/* Maximum DMA mask supported by IP */
+#define DMA_BIT_MASK_LEN 48
+
+#define CC_DEV_SIGNATURE 0xDCC71200UL
+
+#define CC_AXI_IRQ_MASK ((1 << CC_AXIM_CFG_BRESPMASK_BIT_SHIFT) | \
+			  (1 << CC_AXIM_CFG_RRESPMASK_BIT_SHIFT) | \
+			  (1 << CC_AXIM_CFG_INFLTMASK_BIT_SHIFT) | \
+			  (1 << CC_AXIM_CFG_COMPMASK_BIT_SHIFT))
+
+#define CC_AXI_ERR_IRQ_MASK BIT(CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT)
+
+#define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
+
+#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
+				    CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
+				    CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
+
+/* Register name mangling macro */
+#define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET
+
+/* TEE FIPS status interrupt */
+#define CC_GPR0_IRQ_MASK BIT(CC_HOST_IRR_GPR0_BIT_SHIFT)
+
+#define CC_CRA_PRIO 3000
+
+#define MIN_HW_QUEUE_SIZE 50 /* Minimum size required for proper function */
+
+#define MAX_REQUEST_QUEUE_SIZE 4096
+#define MAX_MLLI_BUFF_SIZE 2080
+#define MAX_ICV_NENTS_SUPPORTED 2
+
+/* Definitions for HW descriptors DIN/DOUT fields */
+#define NS_BIT 1
+#define AXI_ID 0
+/* AXI_ID is not actually the AXI ID of the transaction but the value of AXI_ID
+ * field in the HW descriptor. The DMA engine +8 that value.
+ */
+
+#define CC_MAX_IVGEN_DMA_ADDRESSES	3
+struct cc_crypto_req {
+	void (*user_cb)(struct device *dev, void *req, int err);
+	void *user_arg;
+	dma_addr_t ivgen_dma_addr[CC_MAX_IVGEN_DMA_ADDRESSES];
+	/* For the first 'ivgen_dma_addr_len' addresses of this array,
+	 * generated IV would be placed in it by send_request().
+	 * Same generated IV for all addresses!
+	 */
+	/* Amount of 'ivgen_dma_addr' elements to be filled. */
+	unsigned int ivgen_dma_addr_len;
+	/* The generated IV size required, 8/16 B allowed. */
+	unsigned int ivgen_size;
+	struct completion seq_compl; /* request completion */
+};
+
+/**
+ * struct cc_drvdata - driver private data context
+ * @cc_base:	virt address of the CC registers
+ * @irq:	device IRQ number
+ * @irq_mask:	Interrupt mask shadow (1 for masked interrupts)
+ * @fw_ver:	SeP loaded firmware version
+ */
+struct cc_drvdata {
+	void __iomem *cc_base;
+	int irq;
+	u32 irq_mask;
+	u32 fw_ver;
+	struct completion hw_queue_avail; /* wait for HW queue availability */
+	struct platform_device *plat_dev;
+	cc_sram_addr_t mlli_sram_addr;
+	void *buff_mgr_handle;
+	void *hash_handle;
+	void *aead_handle;
+	void *blkcipher_handle;
+	void *request_mgr_handle;
+	void *fips_handle;
+	void *ivgen_handle;
+	void *sram_mgr_handle;
+	void *debugfs;
+	struct clk *clk;
+	bool coherent;
+};
+
+struct cc_crypto_alg {
+	struct list_head entry;
+	int cipher_mode;
+	int flow_mode; /* Note: currently, refers to the cipher mode only. */
+	int auth_mode;
+	struct cc_drvdata *drvdata;
+	struct crypto_alg crypto_alg;
+	struct aead_alg aead_alg;
+};
+
+struct cc_alg_template {
+	char name[CRYPTO_MAX_ALG_NAME];
+	char driver_name[CRYPTO_MAX_ALG_NAME];
+	unsigned int blocksize;
+	u32 type;
+	union {
+		struct ablkcipher_alg ablkcipher;
+		struct aead_alg aead;
+		struct blkcipher_alg blkcipher;
+		struct cipher_alg cipher;
+		struct compress_alg compress;
+	} template_u;
+	int cipher_mode;
+	int flow_mode; /* Note: currently, refers to the cipher mode only. */
+	int auth_mode;
+	struct cc_drvdata *drvdata;
+};
+
+struct async_gen_req_ctx {
+	dma_addr_t iv_dma_addr;
+	enum drv_crypto_direction op_type;
+};
+
+static inline struct device *drvdata_to_dev(struct cc_drvdata *drvdata)
+{
+	return &drvdata->plat_dev->dev;
+}
+
+void __dump_byte_array(const char *name, const u8 *buf, size_t len);
+static inline void dump_byte_array(const char *name, const u8 *the_array,
+				   size_t size)
+{
+	if (cc_dump_bytes)
+		__dump_byte_array(name, the_array, size);
+}
+
+int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe);
+void fini_cc_regs(struct cc_drvdata *drvdata);
+int cc_clk_on(struct cc_drvdata *drvdata);
+void cc_clk_off(struct cc_drvdata *drvdata);
+
+static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val)
+{
+	iowrite32(val, (drvdata->cc_base + reg));
+}
+
+static inline u32 cc_ioread(struct cc_drvdata *drvdata, u32 reg)
+{
+	return ioread32(drvdata->cc_base + reg);
+}
+
+static inline gfp_t cc_gfp_flags(struct crypto_async_request *req)
+{
+	return (req->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+			GFP_KERNEL : GFP_ATOMIC;
+}
+
+#endif /*__CC_DRIVER_H__*/
+

+ 15 - 30
drivers/staging/ccree/ssi_fips.c → drivers/staging/ccree/cc_fips.c

@@ -1,36 +1,22 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
 
 
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/fips.h>
 #include <linux/fips.h>
 
 
-#include "ssi_config.h"
-#include "ssi_driver.h"
-#include "ssi_fips.h"
+#include "cc_driver.h"
+#include "cc_fips.h"
 
 
 static void fips_dsr(unsigned long devarg);
 static void fips_dsr(unsigned long devarg);
 
 
-struct ssi_fips_handle {
+struct cc_fips_handle {
 	struct tasklet_struct tasklet;
 	struct tasklet_struct tasklet;
 };
 };
 
 
 /* The function called once at driver entry point to check
 /* The function called once at driver entry point to check
  * whether TEE FIPS error occurred.
  * whether TEE FIPS error occurred.
  */
  */
-static bool cc_get_tee_fips_status(struct ssi_drvdata *drvdata)
+static bool cc_get_tee_fips_status(struct cc_drvdata *drvdata)
 {
 {
 	u32 reg;
 	u32 reg;
 
 
@@ -42,7 +28,7 @@ static bool cc_get_tee_fips_status(struct ssi_drvdata *drvdata)
  * This function should push the FIPS REE library status towards the TEE library
  * This function should push the FIPS REE library status towards the TEE library
  * by writing the error state to HOST_GPR0 register.
  * by writing the error state to HOST_GPR0 register.
  */
  */
-void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool status)
+void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool status)
 {
 {
 	int val = CC_FIPS_SYNC_REE_STATUS;
 	int val = CC_FIPS_SYNC_REE_STATUS;
 
 
@@ -51,9 +37,9 @@ void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool status)
 	cc_iowrite(drvdata, CC_REG(HOST_GPR0), val);
 	cc_iowrite(drvdata, CC_REG(HOST_GPR0), val);
 }
 }
 
 
-void ssi_fips_fini(struct ssi_drvdata *drvdata)
+void cc_fips_fini(struct cc_drvdata *drvdata)
 {
 {
-	struct ssi_fips_handle *fips_h = drvdata->fips_handle;
+	struct cc_fips_handle *fips_h = drvdata->fips_handle;
 
 
 	if (!fips_h)
 	if (!fips_h)
 		return; /* Not allocated */
 		return; /* Not allocated */
@@ -65,10 +51,9 @@ void ssi_fips_fini(struct ssi_drvdata *drvdata)
 	drvdata->fips_handle = NULL;
 	drvdata->fips_handle = NULL;
 }
 }
 
 
-void fips_handler(struct ssi_drvdata *drvdata)
+void fips_handler(struct cc_drvdata *drvdata)
 {
 {
-	struct ssi_fips_handle *fips_handle_ptr =
-		drvdata->fips_handle;
+	struct cc_fips_handle *fips_handle_ptr = drvdata->fips_handle;
 
 
 	tasklet_schedule(&fips_handle_ptr->tasklet);
 	tasklet_schedule(&fips_handle_ptr->tasklet);
 }
 }
@@ -84,11 +69,11 @@ static inline void tee_fips_error(struct device *dev)
 /* Deferred service handler, run as interrupt-fired tasklet */
 /* Deferred service handler, run as interrupt-fired tasklet */
 static void fips_dsr(unsigned long devarg)
 static void fips_dsr(unsigned long devarg)
 {
 {
-	struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg;
+	struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
 	struct device *dev = drvdata_to_dev(drvdata);
 	struct device *dev = drvdata_to_dev(drvdata);
 	u32 irq, state, val;
 	u32 irq, state, val;
 
 
-	irq = (drvdata->irq & (SSI_GPR0_IRQ_MASK));
+	irq = (drvdata->irq & (CC_GPR0_IRQ_MASK));
 
 
 	if (irq) {
 	if (irq) {
 		state = cc_ioread(drvdata, CC_REG(GPR_HOST));
 		state = cc_ioread(drvdata, CC_REG(GPR_HOST));
@@ -105,9 +90,9 @@ static void fips_dsr(unsigned long devarg)
 }
 }
 
 
 /* The function called once at driver entry point .*/
 /* The function called once at driver entry point .*/
-int ssi_fips_init(struct ssi_drvdata *p_drvdata)
+int cc_fips_init(struct cc_drvdata *p_drvdata)
 {
 {
-	struct ssi_fips_handle *fips_h;
+	struct cc_fips_handle *fips_h;
 	struct device *dev = drvdata_to_dev(p_drvdata);
 	struct device *dev = drvdata_to_dev(p_drvdata);
 
 
 	fips_h = kzalloc(sizeof(*fips_h), GFP_KERNEL);
 	fips_h = kzalloc(sizeof(*fips_h), GFP_KERNEL);

部分文件因为文件数量过多而无法显示